1 | /* |
2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "jvm.h" |
27 | #include "asm/assembler.inline.hpp" |
28 | #include "code/codeCache.hpp" |
29 | #include "code/compiledIC.hpp" |
30 | #include "code/compiledMethod.inline.hpp" |
31 | #include "code/dependencies.hpp" |
32 | #include "code/nativeInst.hpp" |
33 | #include "code/nmethod.hpp" |
34 | #include "code/scopeDesc.hpp" |
35 | #include "compiler/abstractCompiler.hpp" |
36 | #include "compiler/compileBroker.hpp" |
37 | #include "compiler/compileLog.hpp" |
38 | #include "compiler/compilerDirectives.hpp" |
39 | #include "compiler/directivesParser.hpp" |
40 | #include "compiler/disassembler.hpp" |
41 | #include "interpreter/bytecode.hpp" |
42 | #include "logging/log.hpp" |
43 | #include "logging/logStream.hpp" |
44 | #include "memory/allocation.inline.hpp" |
45 | #include "memory/resourceArea.hpp" |
46 | #include "memory/universe.hpp" |
47 | #include "oops/access.inline.hpp" |
48 | #include "oops/method.inline.hpp" |
49 | #include "oops/methodData.hpp" |
50 | #include "oops/oop.inline.hpp" |
51 | #include "prims/jvmtiImpl.hpp" |
52 | #include "runtime/atomic.hpp" |
53 | #include "runtime/flags/flagSetting.hpp" |
54 | #include "runtime/frame.inline.hpp" |
55 | #include "runtime/handles.inline.hpp" |
56 | #include "runtime/jniHandles.inline.hpp" |
57 | #include "runtime/orderAccess.hpp" |
58 | #include "runtime/os.hpp" |
59 | #include "runtime/safepointVerifiers.hpp" |
60 | #include "runtime/sharedRuntime.hpp" |
61 | #include "runtime/sweeper.hpp" |
62 | #include "runtime/vmThread.hpp" |
63 | #include "utilities/align.hpp" |
64 | #include "utilities/dtrace.hpp" |
65 | #include "utilities/events.hpp" |
66 | #include "utilities/resourceHash.hpp" |
67 | #include "utilities/xmlstream.hpp" |
68 | #if INCLUDE_JVMCI |
69 | #include "jvmci/jvmciRuntime.hpp" |
70 | #endif |
71 | |
72 | #ifdef DTRACE_ENABLED |
73 | |
74 | // Only bother with this argument setup if dtrace is available |
75 | |
76 | #define DTRACE_METHOD_UNLOAD_PROBE(method) \ |
77 | { \ |
78 | Method* m = (method); \ |
79 | if (m != NULL) { \ |
80 | Symbol* klass_name = m->klass_name(); \ |
81 | Symbol* name = m->name(); \ |
82 | Symbol* signature = m->signature(); \ |
83 | HOTSPOT_COMPILED_METHOD_UNLOAD( \ |
84 | (char *) klass_name->bytes(), klass_name->utf8_length(), \ |
85 | (char *) name->bytes(), name->utf8_length(), \ |
86 | (char *) signature->bytes(), signature->utf8_length()); \ |
87 | } \ |
88 | } |
89 | |
90 | #else // ndef DTRACE_ENABLED |
91 | |
92 | #define DTRACE_METHOD_UNLOAD_PROBE(method) |
93 | |
94 | #endif |
95 | |
96 | //--------------------------------------------------------------------------------- |
97 | // NMethod statistics |
98 | // They are printed under various flags, including: |
99 | // PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation. |
100 | // (In the latter two cases, they like other stats are printed to the log only.) |
101 | |
102 | #ifndef PRODUCT |
103 | // These variables are put into one block to reduce relocations |
104 | // and make it simpler to print from the debugger. |
105 | struct java_nmethod_stats_struct { |
106 | int nmethod_count; |
107 | int total_size; |
108 | int relocation_size; |
109 | int consts_size; |
110 | int insts_size; |
111 | int stub_size; |
112 | int scopes_data_size; |
113 | int scopes_pcs_size; |
114 | int dependencies_size; |
115 | int handler_table_size; |
116 | int nul_chk_table_size; |
117 | #if INCLUDE_JVMCI |
118 | int speculations_size; |
119 | int jvmci_data_size; |
120 | #endif |
121 | int oops_size; |
122 | int metadata_size; |
123 | |
124 | void note_nmethod(nmethod* nm) { |
125 | nmethod_count += 1; |
126 | total_size += nm->size(); |
127 | relocation_size += nm->relocation_size(); |
128 | consts_size += nm->consts_size(); |
129 | insts_size += nm->insts_size(); |
130 | stub_size += nm->stub_size(); |
131 | oops_size += nm->oops_size(); |
132 | metadata_size += nm->metadata_size(); |
133 | scopes_data_size += nm->scopes_data_size(); |
134 | scopes_pcs_size += nm->scopes_pcs_size(); |
135 | dependencies_size += nm->dependencies_size(); |
136 | handler_table_size += nm->handler_table_size(); |
137 | nul_chk_table_size += nm->nul_chk_table_size(); |
138 | #if INCLUDE_JVMCI |
139 | speculations_size += nm->speculations_size(); |
140 | jvmci_data_size += nm->jvmci_data_size(); |
141 | #endif |
142 | } |
143 | void print_nmethod_stats(const char* name) { |
144 | if (nmethod_count == 0) return; |
145 | tty->print_cr("Statistics for %d bytecoded nmethods for %s:" , nmethod_count, name); |
146 | if (total_size != 0) tty->print_cr(" total in heap = %d" , total_size); |
147 | if (nmethod_count != 0) tty->print_cr(" header = " SIZE_FORMAT, nmethod_count * sizeof(nmethod)); |
148 | if (relocation_size != 0) tty->print_cr(" relocation = %d" , relocation_size); |
149 | if (consts_size != 0) tty->print_cr(" constants = %d" , consts_size); |
150 | if (insts_size != 0) tty->print_cr(" main code = %d" , insts_size); |
151 | if (stub_size != 0) tty->print_cr(" stub code = %d" , stub_size); |
152 | if (oops_size != 0) tty->print_cr(" oops = %d" , oops_size); |
153 | if (metadata_size != 0) tty->print_cr(" metadata = %d" , metadata_size); |
154 | if (scopes_data_size != 0) tty->print_cr(" scopes data = %d" , scopes_data_size); |
155 | if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d" , scopes_pcs_size); |
156 | if (dependencies_size != 0) tty->print_cr(" dependencies = %d" , dependencies_size); |
157 | if (handler_table_size != 0) tty->print_cr(" handler table = %d" , handler_table_size); |
158 | if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d" , nul_chk_table_size); |
159 | #if INCLUDE_JVMCI |
160 | if (speculations_size != 0) tty->print_cr(" speculations = %d" , speculations_size); |
161 | if (jvmci_data_size != 0) tty->print_cr(" JVMCI data = %d" , jvmci_data_size); |
162 | #endif |
163 | } |
164 | }; |
165 | |
166 | struct native_nmethod_stats_struct { |
167 | int native_nmethod_count; |
168 | int native_total_size; |
169 | int native_relocation_size; |
170 | int native_insts_size; |
171 | int native_oops_size; |
172 | int native_metadata_size; |
173 | void note_native_nmethod(nmethod* nm) { |
174 | native_nmethod_count += 1; |
175 | native_total_size += nm->size(); |
176 | native_relocation_size += nm->relocation_size(); |
177 | native_insts_size += nm->insts_size(); |
178 | native_oops_size += nm->oops_size(); |
179 | native_metadata_size += nm->metadata_size(); |
180 | } |
181 | void print_native_nmethod_stats() { |
182 | if (native_nmethod_count == 0) return; |
183 | tty->print_cr("Statistics for %d native nmethods:" , native_nmethod_count); |
184 | if (native_total_size != 0) tty->print_cr(" N. total size = %d" , native_total_size); |
185 | if (native_relocation_size != 0) tty->print_cr(" N. relocation = %d" , native_relocation_size); |
186 | if (native_insts_size != 0) tty->print_cr(" N. main code = %d" , native_insts_size); |
187 | if (native_oops_size != 0) tty->print_cr(" N. oops = %d" , native_oops_size); |
188 | if (native_metadata_size != 0) tty->print_cr(" N. metadata = %d" , native_metadata_size); |
189 | } |
190 | }; |
191 | |
192 | struct pc_nmethod_stats_struct { |
193 | int pc_desc_resets; // number of resets (= number of caches) |
194 | int pc_desc_queries; // queries to nmethod::find_pc_desc |
195 | int pc_desc_approx; // number of those which have approximate true |
196 | int pc_desc_repeats; // number of _pc_descs[0] hits |
197 | int pc_desc_hits; // number of LRU cache hits |
198 | int pc_desc_tests; // total number of PcDesc examinations |
199 | int pc_desc_searches; // total number of quasi-binary search steps |
200 | int pc_desc_adds; // number of LUR cache insertions |
201 | |
202 | void print_pc_stats() { |
203 | tty->print_cr("PcDesc Statistics: %d queries, %.2f comparisons per query" , |
204 | pc_desc_queries, |
205 | (double)(pc_desc_tests + pc_desc_searches) |
206 | / pc_desc_queries); |
207 | tty->print_cr(" caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d" , |
208 | pc_desc_resets, |
209 | pc_desc_queries, pc_desc_approx, |
210 | pc_desc_repeats, pc_desc_hits, |
211 | pc_desc_tests, pc_desc_searches, pc_desc_adds); |
212 | } |
213 | }; |
214 | |
215 | #ifdef COMPILER1 |
216 | static java_nmethod_stats_struct c1_java_nmethod_stats; |
217 | #endif |
218 | #ifdef COMPILER2 |
219 | static java_nmethod_stats_struct c2_java_nmethod_stats; |
220 | #endif |
221 | #if INCLUDE_JVMCI |
222 | static java_nmethod_stats_struct jvmci_java_nmethod_stats; |
223 | #endif |
224 | static java_nmethod_stats_struct unknown_java_nmethod_stats; |
225 | |
226 | static native_nmethod_stats_struct native_nmethod_stats; |
227 | static pc_nmethod_stats_struct pc_nmethod_stats; |
228 | |
229 | static void note_java_nmethod(nmethod* nm) { |
230 | #ifdef COMPILER1 |
231 | if (nm->is_compiled_by_c1()) { |
232 | c1_java_nmethod_stats.note_nmethod(nm); |
233 | } else |
234 | #endif |
235 | #ifdef COMPILER2 |
236 | if (nm->is_compiled_by_c2()) { |
237 | c2_java_nmethod_stats.note_nmethod(nm); |
238 | } else |
239 | #endif |
240 | #if INCLUDE_JVMCI |
241 | if (nm->is_compiled_by_jvmci()) { |
242 | jvmci_java_nmethod_stats.note_nmethod(nm); |
243 | } else |
244 | #endif |
245 | { |
246 | unknown_java_nmethod_stats.note_nmethod(nm); |
247 | } |
248 | } |
249 | #endif // !PRODUCT |
250 | |
251 | //--------------------------------------------------------------------------------- |
252 | |
253 | |
254 | ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) { |
255 | assert(pc != NULL, "Must be non null" ); |
256 | assert(exception.not_null(), "Must be non null" ); |
257 | assert(handler != NULL, "Must be non null" ); |
258 | |
259 | _count = 0; |
260 | _exception_type = exception->klass(); |
261 | _next = NULL; |
262 | _purge_list_next = NULL; |
263 | |
264 | add_address_and_handler(pc,handler); |
265 | } |
266 | |
267 | |
268 | address ExceptionCache::match(Handle exception, address pc) { |
269 | assert(pc != NULL,"Must be non null" ); |
270 | assert(exception.not_null(),"Must be non null" ); |
271 | if (exception->klass() == exception_type()) { |
272 | return (test_address(pc)); |
273 | } |
274 | |
275 | return NULL; |
276 | } |
277 | |
278 | |
279 | bool ExceptionCache::match_exception_with_space(Handle exception) { |
280 | assert(exception.not_null(),"Must be non null" ); |
281 | if (exception->klass() == exception_type() && count() < cache_size) { |
282 | return true; |
283 | } |
284 | return false; |
285 | } |
286 | |
287 | |
288 | address ExceptionCache::test_address(address addr) { |
289 | int limit = count(); |
290 | for (int i = 0; i < limit; i++) { |
291 | if (pc_at(i) == addr) { |
292 | return handler_at(i); |
293 | } |
294 | } |
295 | return NULL; |
296 | } |
297 | |
298 | |
299 | bool ExceptionCache::add_address_and_handler(address addr, address handler) { |
300 | if (test_address(addr) == handler) return true; |
301 | |
302 | int index = count(); |
303 | if (index < cache_size) { |
304 | set_pc_at(index, addr); |
305 | set_handler_at(index, handler); |
306 | increment_count(); |
307 | return true; |
308 | } |
309 | return false; |
310 | } |
311 | |
312 | ExceptionCache* ExceptionCache::next() { |
313 | return Atomic::load(&_next); |
314 | } |
315 | |
316 | void ExceptionCache::set_next(ExceptionCache *ec) { |
317 | Atomic::store(ec, &_next); |
318 | } |
319 | |
320 | //----------------------------------------------------------------------------- |
321 | |
322 | |
323 | // Helper used by both find_pc_desc methods. |
324 | static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) { |
325 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests); |
326 | if (!approximate) |
327 | return pc->pc_offset() == pc_offset; |
328 | else |
329 | return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset(); |
330 | } |
331 | |
332 | void PcDescCache::reset_to(PcDesc* initial_pc_desc) { |
333 | if (initial_pc_desc == NULL) { |
334 | _pc_descs[0] = NULL; // native method; no PcDescs at all |
335 | return; |
336 | } |
337 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_resets); |
338 | // reset the cache by filling it with benign (non-null) values |
339 | assert(initial_pc_desc->pc_offset() < 0, "must be sentinel" ); |
340 | for (int i = 0; i < cache_size; i++) |
341 | _pc_descs[i] = initial_pc_desc; |
342 | } |
343 | |
344 | PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) { |
345 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_queries); |
346 | NOT_PRODUCT(if (approximate) ++pc_nmethod_stats.pc_desc_approx); |
347 | |
348 | // Note: one might think that caching the most recently |
349 | // read value separately would be a win, but one would be |
350 | // wrong. When many threads are updating it, the cache |
351 | // line it's in would bounce between caches, negating |
352 | // any benefit. |
353 | |
354 | // In order to prevent race conditions do not load cache elements |
355 | // repeatedly, but use a local copy: |
356 | PcDesc* res; |
357 | |
358 | // Step one: Check the most recently added value. |
359 | res = _pc_descs[0]; |
360 | if (res == NULL) return NULL; // native method; no PcDescs at all |
361 | if (match_desc(res, pc_offset, approximate)) { |
362 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats); |
363 | return res; |
364 | } |
365 | |
366 | // Step two: Check the rest of the LRU cache. |
367 | for (int i = 1; i < cache_size; ++i) { |
368 | res = _pc_descs[i]; |
369 | if (res->pc_offset() < 0) break; // optimization: skip empty cache |
370 | if (match_desc(res, pc_offset, approximate)) { |
371 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits); |
372 | return res; |
373 | } |
374 | } |
375 | |
376 | // Report failure. |
377 | return NULL; |
378 | } |
379 | |
380 | void PcDescCache::add_pc_desc(PcDesc* pc_desc) { |
381 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds); |
382 | // Update the LRU cache by shifting pc_desc forward. |
383 | for (int i = 0; i < cache_size; i++) { |
384 | PcDesc* next = _pc_descs[i]; |
385 | _pc_descs[i] = pc_desc; |
386 | pc_desc = next; |
387 | } |
388 | } |
389 | |
390 | // adjust pcs_size so that it is a multiple of both oopSize and |
391 | // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple |
392 | // of oopSize, then 2*sizeof(PcDesc) is) |
393 | static int adjust_pcs_size(int pcs_size) { |
394 | int nsize = align_up(pcs_size, oopSize); |
395 | if ((nsize % sizeof(PcDesc)) != 0) { |
396 | nsize = pcs_size + sizeof(PcDesc); |
397 | } |
398 | assert((nsize % oopSize) == 0, "correct alignment" ); |
399 | return nsize; |
400 | } |
401 | |
402 | |
403 | int nmethod::total_size() const { |
404 | return |
405 | consts_size() + |
406 | insts_size() + |
407 | stub_size() + |
408 | scopes_data_size() + |
409 | scopes_pcs_size() + |
410 | handler_table_size() + |
411 | nul_chk_table_size(); |
412 | } |
413 | |
414 | address* nmethod::orig_pc_addr(const frame* fr) { |
415 | return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); |
416 | } |
417 | |
418 | const char* nmethod::compile_kind() const { |
419 | if (is_osr_method()) return "osr" ; |
420 | if (method() != NULL && is_native_method()) return "c2n" ; |
421 | return NULL; |
422 | } |
423 | |
424 | // Fill in default values for various flag fields |
425 | void nmethod::init_defaults() { |
426 | _state = not_installed; |
427 | _has_flushed_dependencies = 0; |
428 | _lock_count = 0; |
429 | _stack_traversal_mark = 0; |
430 | _unload_reported = false; // jvmti state |
431 | _is_far_code = false; // nmethods are located in CodeCache |
432 | |
433 | #ifdef ASSERT |
434 | _oops_are_stale = false; |
435 | #endif |
436 | |
437 | _oops_do_mark_link = NULL; |
438 | _jmethod_id = NULL; |
439 | _osr_link = NULL; |
440 | #if INCLUDE_RTM_OPT |
441 | _rtm_state = NoRTM; |
442 | #endif |
443 | } |
444 | |
445 | nmethod* nmethod::new_native_nmethod(const methodHandle& method, |
446 | int compile_id, |
447 | CodeBuffer *code_buffer, |
448 | int vep_offset, |
449 | int frame_complete, |
450 | int frame_size, |
451 | ByteSize basic_lock_owner_sp_offset, |
452 | ByteSize basic_lock_sp_offset, |
453 | OopMapSet* oop_maps) { |
454 | code_buffer->finalize_oop_references(method); |
455 | // create nmethod |
456 | nmethod* nm = NULL; |
457 | { |
458 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
459 | int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod)); |
460 | |
461 | CodeOffsets offsets; |
462 | offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); |
463 | offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); |
464 | nm = new (native_nmethod_size, CompLevel_none) |
465 | nmethod(method(), compiler_none, native_nmethod_size, |
466 | compile_id, &offsets, |
467 | code_buffer, frame_size, |
468 | basic_lock_owner_sp_offset, |
469 | basic_lock_sp_offset, |
470 | oop_maps); |
471 | NOT_PRODUCT(if (nm != NULL) native_nmethod_stats.note_native_nmethod(nm)); |
472 | } |
473 | |
474 | if (nm != NULL) { |
475 | // verify nmethod |
476 | debug_only(nm->verify();) // might block |
477 | |
478 | nm->log_new_nmethod(); |
479 | nm->make_in_use(); |
480 | } |
481 | return nm; |
482 | } |
483 | |
484 | nmethod* nmethod::new_nmethod(const methodHandle& method, |
485 | int compile_id, |
486 | int entry_bci, |
487 | CodeOffsets* offsets, |
488 | int orig_pc_offset, |
489 | DebugInformationRecorder* debug_info, |
490 | Dependencies* dependencies, |
491 | CodeBuffer* code_buffer, int frame_size, |
492 | OopMapSet* oop_maps, |
493 | ExceptionHandlerTable* handler_table, |
494 | ImplicitExceptionTable* nul_chk_table, |
495 | AbstractCompiler* compiler, |
496 | int comp_level |
497 | #if INCLUDE_JVMCI |
498 | , char* speculations, |
499 | int speculations_len, |
500 | int nmethod_mirror_index, |
501 | const char* nmethod_mirror_name, |
502 | FailedSpeculation** failed_speculations |
503 | #endif |
504 | ) |
505 | { |
506 | assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR" ); |
507 | code_buffer->finalize_oop_references(method); |
508 | // create nmethod |
509 | nmethod* nm = NULL; |
510 | { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
511 | #if INCLUDE_JVMCI |
512 | int jvmci_data_size = !compiler->is_jvmci() ? 0 : JVMCINMethodData::compute_size(nmethod_mirror_name); |
513 | #endif |
514 | int nmethod_size = |
515 | CodeBlob::allocation_size(code_buffer, sizeof(nmethod)) |
516 | + adjust_pcs_size(debug_info->pcs_size()) |
517 | + align_up((int)dependencies->size_in_bytes(), oopSize) |
518 | + align_up(handler_table->size_in_bytes() , oopSize) |
519 | + align_up(nul_chk_table->size_in_bytes() , oopSize) |
520 | #if INCLUDE_JVMCI |
521 | + align_up(speculations_len , oopSize) |
522 | + align_up(jvmci_data_size , oopSize) |
523 | #endif |
524 | + align_up(debug_info->data_size() , oopSize); |
525 | |
526 | nm = new (nmethod_size, comp_level) |
527 | nmethod(method(), compiler->type(), nmethod_size, compile_id, entry_bci, offsets, |
528 | orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, |
529 | oop_maps, |
530 | handler_table, |
531 | nul_chk_table, |
532 | compiler, |
533 | comp_level |
534 | #if INCLUDE_JVMCI |
535 | , speculations, |
536 | speculations_len, |
537 | jvmci_data_size |
538 | #endif |
539 | ); |
540 | |
541 | if (nm != NULL) { |
542 | #if INCLUDE_JVMCI |
543 | if (compiler->is_jvmci()) { |
544 | // Initialize the JVMCINMethodData object inlined into nm |
545 | nm->jvmci_nmethod_data()->initialize(nmethod_mirror_index, nmethod_mirror_name, failed_speculations); |
546 | } |
547 | #endif |
548 | // To make dependency checking during class loading fast, record |
549 | // the nmethod dependencies in the classes it is dependent on. |
550 | // This allows the dependency checking code to simply walk the |
551 | // class hierarchy above the loaded class, checking only nmethods |
552 | // which are dependent on those classes. The slow way is to |
553 | // check every nmethod for dependencies which makes it linear in |
554 | // the number of methods compiled. For applications with a lot |
555 | // classes the slow way is too slow. |
556 | for (Dependencies::DepStream deps(nm); deps.next(); ) { |
557 | if (deps.type() == Dependencies::call_site_target_value) { |
558 | // CallSite dependencies are managed on per-CallSite instance basis. |
559 | oop call_site = deps.argument_oop(0); |
560 | MethodHandles::add_dependent_nmethod(call_site, nm); |
561 | } else { |
562 | Klass* klass = deps.context_type(); |
563 | if (klass == NULL) { |
564 | continue; // ignore things like evol_method |
565 | } |
566 | // record this nmethod as dependent on this klass |
567 | InstanceKlass::cast(klass)->add_dependent_nmethod(nm); |
568 | } |
569 | } |
570 | NOT_PRODUCT(if (nm != NULL) note_java_nmethod(nm)); |
571 | } |
572 | } |
573 | // Do verification and logging outside CodeCache_lock. |
574 | if (nm != NULL) { |
575 | // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet. |
576 | DEBUG_ONLY(nm->verify();) |
577 | nm->log_new_nmethod(); |
578 | } |
579 | return nm; |
580 | } |
581 | |
582 | // For native wrappers |
583 | nmethod::nmethod( |
584 | Method* method, |
585 | CompilerType type, |
586 | int nmethod_size, |
587 | int compile_id, |
588 | CodeOffsets* offsets, |
589 | CodeBuffer* code_buffer, |
590 | int frame_size, |
591 | ByteSize basic_lock_owner_sp_offset, |
592 | ByteSize basic_lock_sp_offset, |
593 | OopMapSet* oop_maps ) |
594 | : CompiledMethod(method, "native nmethod" , type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false), |
595 | _is_unloading_state(0), |
596 | _native_receiver_sp_offset(basic_lock_owner_sp_offset), |
597 | _native_basic_lock_sp_offset(basic_lock_sp_offset) |
598 | { |
599 | { |
600 | int scopes_data_offset = 0; |
601 | int deoptimize_offset = 0; |
602 | int deoptimize_mh_offset = 0; |
603 | |
604 | debug_only(NoSafepointVerifier nsv;) |
605 | assert_locked_or_safepoint(CodeCache_lock); |
606 | |
607 | init_defaults(); |
608 | _entry_bci = InvocationEntryBci; |
609 | // We have no exception handler or deopt handler make the |
610 | // values something that will never match a pc like the nmethod vtable entry |
611 | _exception_offset = 0; |
612 | _orig_pc_offset = 0; |
613 | |
614 | _consts_offset = data_offset(); |
615 | _stub_offset = data_offset(); |
616 | _oops_offset = data_offset(); |
617 | _metadata_offset = _oops_offset + align_up(code_buffer->total_oop_size(), oopSize); |
618 | scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize); |
619 | _scopes_pcs_offset = scopes_data_offset; |
620 | _dependencies_offset = _scopes_pcs_offset; |
621 | _handler_table_offset = _dependencies_offset; |
622 | _nul_chk_table_offset = _handler_table_offset; |
623 | #if INCLUDE_JVMCI |
624 | _speculations_offset = _nul_chk_table_offset; |
625 | _jvmci_data_offset = _speculations_offset; |
626 | _nmethod_end_offset = _jvmci_data_offset; |
627 | #else |
628 | _nmethod_end_offset = _nul_chk_table_offset; |
629 | #endif |
630 | _compile_id = compile_id; |
631 | _comp_level = CompLevel_none; |
632 | _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); |
633 | _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); |
634 | _osr_entry_point = NULL; |
635 | _exception_cache = NULL; |
636 | _pc_desc_container.reset_to(NULL); |
637 | _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); |
638 | |
639 | _scopes_data_begin = (address) this + scopes_data_offset; |
640 | _deopt_handler_begin = (address) this + deoptimize_offset; |
641 | _deopt_mh_handler_begin = (address) this + deoptimize_mh_offset; |
642 | |
643 | code_buffer->copy_code_and_locs_to(this); |
644 | code_buffer->copy_values_to(this); |
645 | |
646 | clear_unloading_state(); |
647 | |
648 | Universe::heap()->register_nmethod(this); |
649 | debug_only(Universe::heap()->verify_nmethod(this)); |
650 | |
651 | CodeCache::commit(this); |
652 | } |
653 | |
654 | if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { |
655 | ttyLocker ttyl; // keep the following output all in one block |
656 | // This output goes directly to the tty, not the compiler log. |
657 | // To enable tools to match it up with the compilation activity, |
658 | // be sure to tag this tty output with the compile ID. |
659 | if (xtty != NULL) { |
660 | xtty->begin_head("print_native_nmethod" ); |
661 | xtty->method(_method); |
662 | xtty->stamp(); |
663 | xtty->end_head(" address='" INTPTR_FORMAT "'" , (intptr_t) this); |
664 | } |
665 | // Print the header part, then print the requested information. |
666 | // This is both handled in decode2(), called via print_code() -> decode() |
667 | if (PrintNativeNMethods) { |
668 | tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------" ); |
669 | print_code(); |
670 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
671 | #if defined(SUPPORT_DATA_STRUCTS) |
672 | if (AbstractDisassembler::show_structs()) { |
673 | if (oop_maps != NULL) { |
674 | tty->print("oop maps:" ); // oop_maps->print_on(tty) outputs a cr() at the beginning |
675 | oop_maps->print_on(tty); |
676 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
677 | } |
678 | } |
679 | #endif |
680 | } else { |
681 | print(); // print the header part only. |
682 | } |
683 | #if defined(SUPPORT_DATA_STRUCTS) |
684 | if (AbstractDisassembler::show_structs()) { |
685 | if (PrintRelocations) { |
686 | print_relocations(); |
687 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
688 | } |
689 | } |
690 | #endif |
691 | if (xtty != NULL) { |
692 | xtty->tail("print_native_nmethod" ); |
693 | } |
694 | } |
695 | } |
696 | |
697 | void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () { |
698 | return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level)); |
699 | } |
700 | |
701 | nmethod::nmethod( |
702 | Method* method, |
703 | CompilerType type, |
704 | int nmethod_size, |
705 | int compile_id, |
706 | int entry_bci, |
707 | CodeOffsets* offsets, |
708 | int orig_pc_offset, |
709 | DebugInformationRecorder* debug_info, |
710 | Dependencies* dependencies, |
711 | CodeBuffer *code_buffer, |
712 | int frame_size, |
713 | OopMapSet* oop_maps, |
714 | ExceptionHandlerTable* handler_table, |
715 | ImplicitExceptionTable* nul_chk_table, |
716 | AbstractCompiler* compiler, |
717 | int comp_level |
718 | #if INCLUDE_JVMCI |
719 | , char* speculations, |
720 | int speculations_len, |
721 | int jvmci_data_size |
722 | #endif |
723 | ) |
724 | : CompiledMethod(method, "nmethod" , type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false), |
725 | _is_unloading_state(0), |
726 | _native_receiver_sp_offset(in_ByteSize(-1)), |
727 | _native_basic_lock_sp_offset(in_ByteSize(-1)) |
728 | { |
729 | assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR" ); |
730 | { |
731 | debug_only(NoSafepointVerifier nsv;) |
732 | assert_locked_or_safepoint(CodeCache_lock); |
733 | |
734 | _deopt_handler_begin = (address) this; |
735 | _deopt_mh_handler_begin = (address) this; |
736 | |
737 | init_defaults(); |
738 | _entry_bci = entry_bci; |
739 | _compile_id = compile_id; |
740 | _comp_level = comp_level; |
741 | _orig_pc_offset = orig_pc_offset; |
742 | _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); |
743 | |
744 | // Section offsets |
745 | _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts()); |
746 | _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs()); |
747 | set_ctable_begin(header_begin() + _consts_offset); |
748 | |
749 | #if INCLUDE_JVMCI |
750 | if (compiler->is_jvmci()) { |
751 | // JVMCI might not produce any stub sections |
752 | if (offsets->value(CodeOffsets::Exceptions) != -1) { |
753 | _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); |
754 | } else { |
755 | _exception_offset = -1; |
756 | } |
757 | if (offsets->value(CodeOffsets::Deopt) != -1) { |
758 | _deopt_handler_begin = (address) this + code_offset() + offsets->value(CodeOffsets::Deopt); |
759 | } else { |
760 | _deopt_handler_begin = NULL; |
761 | } |
762 | if (offsets->value(CodeOffsets::DeoptMH) != -1) { |
763 | _deopt_mh_handler_begin = (address) this + code_offset() + offsets->value(CodeOffsets::DeoptMH); |
764 | } else { |
765 | _deopt_mh_handler_begin = NULL; |
766 | } |
767 | } else |
768 | #endif |
769 | { |
770 | // Exception handler and deopt handler are in the stub section |
771 | assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set" ); |
772 | assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set" ); |
773 | |
774 | _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions); |
775 | _deopt_handler_begin = (address) this + _stub_offset + offsets->value(CodeOffsets::Deopt); |
776 | if (offsets->value(CodeOffsets::DeoptMH) != -1) { |
777 | _deopt_mh_handler_begin = (address) this + _stub_offset + offsets->value(CodeOffsets::DeoptMH); |
778 | } else { |
779 | _deopt_mh_handler_begin = NULL; |
780 | } |
781 | } |
782 | if (offsets->value(CodeOffsets::UnwindHandler) != -1) { |
783 | _unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler); |
784 | } else { |
785 | _unwind_handler_offset = -1; |
786 | } |
787 | |
788 | _oops_offset = data_offset(); |
789 | _metadata_offset = _oops_offset + align_up(code_buffer->total_oop_size(), oopSize); |
790 | int scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize); |
791 | |
792 | _scopes_pcs_offset = scopes_data_offset + align_up(debug_info->data_size (), oopSize); |
793 | _dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size()); |
794 | _handler_table_offset = _dependencies_offset + align_up((int)dependencies->size_in_bytes (), oopSize); |
795 | _nul_chk_table_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize); |
796 | #if INCLUDE_JVMCI |
797 | _speculations_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize); |
798 | _jvmci_data_offset = _speculations_offset + align_up(speculations_len, oopSize); |
799 | _nmethod_end_offset = _jvmci_data_offset + align_up(jvmci_data_size, oopSize); |
800 | #else |
801 | _nmethod_end_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize); |
802 | #endif |
803 | _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); |
804 | _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); |
805 | _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry); |
806 | _exception_cache = NULL; |
807 | _scopes_data_begin = (address) this + scopes_data_offset; |
808 | |
809 | _pc_desc_container.reset_to(scopes_pcs_begin()); |
810 | |
811 | code_buffer->copy_code_and_locs_to(this); |
812 | // Copy contents of ScopeDescRecorder to nmethod |
813 | code_buffer->copy_values_to(this); |
814 | debug_info->copy_to(this); |
815 | dependencies->copy_to(this); |
816 | clear_unloading_state(); |
817 | |
818 | Universe::heap()->register_nmethod(this); |
819 | debug_only(Universe::heap()->verify_nmethod(this)); |
820 | |
821 | CodeCache::commit(this); |
822 | |
823 | // Copy contents of ExceptionHandlerTable to nmethod |
824 | handler_table->copy_to(this); |
825 | nul_chk_table->copy_to(this); |
826 | |
827 | #if INCLUDE_JVMCI |
828 | // Copy speculations to nmethod |
829 | if (speculations_size() != 0) { |
830 | memcpy(speculations_begin(), speculations, speculations_len); |
831 | } |
832 | #endif |
833 | |
834 | // we use the information of entry points to find out if a method is |
835 | // static or non static |
836 | assert(compiler->is_c2() || compiler->is_jvmci() || |
837 | _method->is_static() == (entry_point() == _verified_entry_point), |
838 | " entry points must be same for static methods and vice versa" ); |
839 | } |
840 | } |
841 | |
842 | // Print a short set of xml attributes to identify this nmethod. The |
843 | // output should be embedded in some other element. |
844 | void nmethod::log_identity(xmlStream* log) const { |
845 | log->print(" compile_id='%d'" , compile_id()); |
846 | const char* nm_kind = compile_kind(); |
847 | if (nm_kind != NULL) log->print(" compile_kind='%s'" , nm_kind); |
848 | log->print(" compiler='%s'" , compiler_name()); |
849 | if (TieredCompilation) { |
850 | log->print(" level='%d'" , comp_level()); |
851 | } |
852 | #if INCLUDE_JVMCI |
853 | if (jvmci_nmethod_data() != NULL) { |
854 | const char* jvmci_name = jvmci_nmethod_data()->name(); |
855 | if (jvmci_name != NULL) { |
856 | log->print(" jvmci_mirror_name='" ); |
857 | log->text("%s" , jvmci_name); |
858 | log->print("'" ); |
859 | } |
860 | } |
861 | #endif |
862 | } |
863 | |
864 | |
865 | #define LOG_OFFSET(log, name) \ |
866 | if (p2i(name##_end()) - p2i(name##_begin())) \ |
867 | log->print(" " XSTR(name) "_offset='" INTX_FORMAT "'" , \ |
868 | p2i(name##_begin()) - p2i(this)) |
869 | |
870 | |
871 | void nmethod::log_new_nmethod() const { |
872 | if (LogCompilation && xtty != NULL) { |
873 | ttyLocker ttyl; |
874 | HandleMark hm; |
875 | xtty->begin_elem("nmethod" ); |
876 | log_identity(xtty); |
877 | xtty->print(" entry='" INTPTR_FORMAT "' size='%d'" , p2i(code_begin()), size()); |
878 | xtty->print(" address='" INTPTR_FORMAT "'" , p2i(this)); |
879 | |
880 | LOG_OFFSET(xtty, relocation); |
881 | LOG_OFFSET(xtty, consts); |
882 | LOG_OFFSET(xtty, insts); |
883 | LOG_OFFSET(xtty, stub); |
884 | LOG_OFFSET(xtty, scopes_data); |
885 | LOG_OFFSET(xtty, scopes_pcs); |
886 | LOG_OFFSET(xtty, dependencies); |
887 | LOG_OFFSET(xtty, handler_table); |
888 | LOG_OFFSET(xtty, nul_chk_table); |
889 | LOG_OFFSET(xtty, oops); |
890 | LOG_OFFSET(xtty, metadata); |
891 | |
892 | xtty->method(method()); |
893 | xtty->stamp(); |
894 | xtty->end_elem(); |
895 | } |
896 | } |
897 | |
898 | #undef LOG_OFFSET |
899 | |
900 | |
901 | // Print out more verbose output usually for a newly created nmethod. |
902 | void nmethod::print_on(outputStream* st, const char* msg) const { |
903 | if (st != NULL) { |
904 | ttyLocker ttyl; |
905 | if (WizardMode) { |
906 | CompileTask::print(st, this, msg, /*short_form:*/ true); |
907 | st->print_cr(" (" INTPTR_FORMAT ")" , p2i(this)); |
908 | } else { |
909 | CompileTask::print(st, this, msg, /*short_form:*/ false); |
910 | } |
911 | } |
912 | } |
913 | |
914 | void nmethod::maybe_print_nmethod(DirectiveSet* directive) { |
915 | bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption; |
916 | if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) { |
917 | print_nmethod(printnmethods); |
918 | } |
919 | } |
920 | |
921 | void nmethod::print_nmethod(bool printmethod) { |
922 | ttyLocker ttyl; // keep the following output all in one block |
923 | if (xtty != NULL) { |
924 | xtty->begin_head("print_nmethod" ); |
925 | log_identity(xtty); |
926 | xtty->stamp(); |
927 | xtty->end_head(); |
928 | } |
929 | // Print the header part, then print the requested information. |
930 | // This is both handled in decode2(). |
931 | if (printmethod) { |
932 | HandleMark hm; |
933 | ResourceMark m; |
934 | if (is_compiled_by_c1()) { |
935 | tty->cr(); |
936 | tty->print_cr("============================= C1-compiled nmethod ==============================" ); |
937 | } |
938 | if (is_compiled_by_jvmci()) { |
939 | tty->cr(); |
940 | tty->print_cr("=========================== JVMCI-compiled nmethod =============================" ); |
941 | } |
942 | tty->print_cr("----------------------------------- Assembly -----------------------------------" ); |
943 | decode2(tty); |
944 | #if defined(SUPPORT_DATA_STRUCTS) |
945 | if (AbstractDisassembler::show_structs()) { |
946 | // Print the oops from the underlying CodeBlob as well. |
947 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
948 | print_oops(tty); |
949 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
950 | print_metadata(tty); |
951 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
952 | print_pcs(); |
953 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
954 | if (oop_maps() != NULL) { |
955 | tty->print("oop maps:" ); // oop_maps()->print_on(tty) outputs a cr() at the beginning |
956 | oop_maps()->print_on(tty); |
957 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
958 | } |
959 | } |
960 | #endif |
961 | } else { |
962 | print(); // print the header part only. |
963 | } |
964 | |
965 | #if defined(SUPPORT_DATA_STRUCTS) |
966 | if (AbstractDisassembler::show_structs()) { |
967 | if (printmethod || PrintDebugInfo || CompilerOracle::has_option_string(_method, "PrintDebugInfo" )) { |
968 | print_scopes(); |
969 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
970 | } |
971 | if (printmethod || PrintRelocations || CompilerOracle::has_option_string(_method, "PrintRelocations" )) { |
972 | print_relocations(); |
973 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
974 | } |
975 | if (printmethod || PrintDependencies || CompilerOracle::has_option_string(_method, "PrintDependencies" )) { |
976 | print_dependencies(); |
977 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
978 | } |
979 | if (printmethod || PrintExceptionHandlers) { |
980 | print_handler_table(); |
981 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
982 | print_nul_chk_table(); |
983 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
984 | } |
985 | |
986 | if (printmethod) { |
987 | print_recorded_oops(); |
988 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
989 | print_recorded_metadata(); |
990 | tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " ); |
991 | } |
992 | } |
993 | #endif |
994 | |
995 | if (xtty != NULL) { |
996 | xtty->tail("print_nmethod" ); |
997 | } |
998 | } |
999 | |
1000 | |
1001 | // Promote one word from an assembly-time handle to a live embedded oop. |
1002 | inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) { |
1003 | if (handle == NULL || |
1004 | // As a special case, IC oops are initialized to 1 or -1. |
1005 | handle == (jobject) Universe::non_oop_word()) { |
1006 | (*dest) = (oop) handle; |
1007 | } else { |
1008 | (*dest) = JNIHandles::resolve_non_null(handle); |
1009 | } |
1010 | } |
1011 | |
1012 | |
1013 | // Have to have the same name because it's called by a template |
1014 | void nmethod::copy_values(GrowableArray<jobject>* array) { |
1015 | int length = array->length(); |
1016 | assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough" ); |
1017 | oop* dest = oops_begin(); |
1018 | for (int index = 0 ; index < length; index++) { |
1019 | initialize_immediate_oop(&dest[index], array->at(index)); |
1020 | } |
1021 | |
1022 | // Now we can fix up all the oops in the code. We need to do this |
1023 | // in the code because the assembler uses jobjects as placeholders. |
1024 | // The code and relocations have already been initialized by the |
1025 | // CodeBlob constructor, so it is valid even at this early point to |
1026 | // iterate over relocations and patch the code. |
1027 | fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true); |
1028 | } |
1029 | |
1030 | void nmethod::copy_values(GrowableArray<Metadata*>* array) { |
1031 | int length = array->length(); |
1032 | assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough" ); |
1033 | Metadata** dest = metadata_begin(); |
1034 | for (int index = 0 ; index < length; index++) { |
1035 | dest[index] = array->at(index); |
1036 | } |
1037 | } |
1038 | |
1039 | void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) { |
1040 | // re-patch all oop-bearing instructions, just in case some oops moved |
1041 | RelocIterator iter(this, begin, end); |
1042 | while (iter.next()) { |
1043 | if (iter.type() == relocInfo::oop_type) { |
1044 | oop_Relocation* reloc = iter.oop_reloc(); |
1045 | if (initialize_immediates && reloc->oop_is_immediate()) { |
1046 | oop* dest = reloc->oop_addr(); |
1047 | initialize_immediate_oop(dest, (jobject) *dest); |
1048 | } |
1049 | // Refresh the oop-related bits of this instruction. |
1050 | reloc->fix_oop_relocation(); |
1051 | } else if (iter.type() == relocInfo::metadata_type) { |
1052 | metadata_Relocation* reloc = iter.metadata_reloc(); |
1053 | reloc->fix_metadata_relocation(); |
1054 | } |
1055 | } |
1056 | } |
1057 | |
1058 | |
1059 | void nmethod::verify_clean_inline_caches() { |
1060 | assert(CompiledICLocker::is_safe(this), "mt unsafe call" ); |
1061 | |
1062 | ResourceMark rm; |
1063 | RelocIterator iter(this, oops_reloc_begin()); |
1064 | while(iter.next()) { |
1065 | switch(iter.type()) { |
1066 | case relocInfo::virtual_call_type: |
1067 | case relocInfo::opt_virtual_call_type: { |
1068 | CompiledIC *ic = CompiledIC_at(&iter); |
1069 | // Ok, to lookup references to zombies here |
1070 | CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); |
1071 | assert(cb != NULL, "destination not in CodeBlob?" ); |
1072 | nmethod* nm = cb->as_nmethod_or_null(); |
1073 | if( nm != NULL ) { |
1074 | // Verify that inline caches pointing to both zombie and not_entrant methods are clean |
1075 | if (!nm->is_in_use() || (nm->method()->code() != nm)) { |
1076 | assert(ic->is_clean(), "IC should be clean" ); |
1077 | } |
1078 | } |
1079 | break; |
1080 | } |
1081 | case relocInfo::static_call_type: { |
1082 | CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc()); |
1083 | CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination()); |
1084 | assert(cb != NULL, "destination not in CodeBlob?" ); |
1085 | nmethod* nm = cb->as_nmethod_or_null(); |
1086 | if( nm != NULL ) { |
1087 | // Verify that inline caches pointing to both zombie and not_entrant methods are clean |
1088 | if (!nm->is_in_use() || (nm->method()->code() != nm)) { |
1089 | assert(csc->is_clean(), "IC should be clean" ); |
1090 | } |
1091 | } |
1092 | break; |
1093 | } |
1094 | default: |
1095 | break; |
1096 | } |
1097 | } |
1098 | } |
1099 | |
1100 | // This is a private interface with the sweeper. |
1101 | void nmethod::mark_as_seen_on_stack() { |
1102 | assert(is_alive(), "Must be an alive method" ); |
1103 | // Set the traversal mark to ensure that the sweeper does 2 |
1104 | // cleaning passes before moving to zombie. |
1105 | set_stack_traversal_mark(NMethodSweeper::traversal_count()); |
1106 | } |
1107 | |
1108 | // Tell if a non-entrant method can be converted to a zombie (i.e., |
1109 | // there are no activations on the stack, not in use by the VM, |
1110 | // and not in use by the ServiceThread) |
1111 | bool nmethod::can_convert_to_zombie() { |
1112 | // Note that this is called when the sweeper has observed the nmethod to be |
1113 | // not_entrant. However, with concurrent code cache unloading, the state |
1114 | // might have moved on to unloaded if it is_unloading(), due to racing |
1115 | // concurrent GC threads. |
1116 | assert(is_not_entrant() || is_unloading(), "must be a non-entrant method" ); |
1117 | |
1118 | // Since the nmethod sweeper only does partial sweep the sweeper's traversal |
1119 | // count can be greater than the stack traversal count before it hits the |
1120 | // nmethod for the second time. |
1121 | // If an is_unloading() nmethod is still not_entrant, then it is not safe to |
1122 | // convert it to zombie due to GC unloading interactions. However, if it |
1123 | // has become unloaded, then it is okay to convert such nmethods to zombie. |
1124 | return stack_traversal_mark() + 1 < NMethodSweeper::traversal_count() && |
1125 | !is_locked_by_vm() && (!is_unloading() || is_unloaded()); |
1126 | } |
1127 | |
1128 | void nmethod::inc_decompile_count() { |
1129 | if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return; |
1130 | // Could be gated by ProfileTraps, but do not bother... |
1131 | Method* m = method(); |
1132 | if (m == NULL) return; |
1133 | MethodData* mdo = m->method_data(); |
1134 | if (mdo == NULL) return; |
1135 | // There is a benign race here. See comments in methodData.hpp. |
1136 | mdo->inc_decompile_count(); |
1137 | } |
1138 | |
1139 | void nmethod::make_unloaded() { |
1140 | post_compiled_method_unload(); |
1141 | |
1142 | // This nmethod is being unloaded, make sure that dependencies |
1143 | // recorded in instanceKlasses get flushed. |
1144 | // Since this work is being done during a GC, defer deleting dependencies from the |
1145 | // InstanceKlass. |
1146 | assert(Universe::heap()->is_gc_active() || Thread::current()->is_ConcurrentGC_thread(), |
1147 | "should only be called during gc" ); |
1148 | flush_dependencies(/*delete_immediately*/false); |
1149 | |
1150 | // Break cycle between nmethod & method |
1151 | LogTarget(Trace, class, unload, nmethod) lt; |
1152 | if (lt.is_enabled()) { |
1153 | LogStream ls(lt); |
1154 | ls.print("making nmethod " INTPTR_FORMAT |
1155 | " unloadable, Method*(" INTPTR_FORMAT |
1156 | ") " , |
1157 | p2i(this), p2i(_method)); |
1158 | ls.cr(); |
1159 | } |
1160 | // Unlink the osr method, so we do not look this up again |
1161 | if (is_osr_method()) { |
1162 | // Invalidate the osr nmethod only once |
1163 | if (is_in_use()) { |
1164 | invalidate_osr_method(); |
1165 | } |
1166 | #ifdef ASSERT |
1167 | if (method() != NULL) { |
1168 | // Make sure osr nmethod is invalidated, i.e. not on the list |
1169 | bool found = method()->method_holder()->remove_osr_nmethod(this); |
1170 | assert(!found, "osr nmethod should have been invalidated" ); |
1171 | } |
1172 | #endif |
1173 | } |
1174 | |
1175 | // If _method is already NULL the Method* is about to be unloaded, |
1176 | // so we don't have to break the cycle. Note that it is possible to |
1177 | // have the Method* live here, in case we unload the nmethod because |
1178 | // it is pointing to some oop (other than the Method*) being unloaded. |
1179 | if (_method != NULL) { |
1180 | // OSR methods point to the Method*, but the Method* does not |
1181 | // point back! |
1182 | if (_method->code() == this) { |
1183 | _method->clear_code(); // Break a cycle |
1184 | } |
1185 | } |
1186 | |
1187 | // Make the class unloaded - i.e., change state and notify sweeper |
1188 | assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(), |
1189 | "must be at safepoint" ); |
1190 | |
1191 | { |
1192 | // Clear ICStubs and release any CompiledICHolders. |
1193 | CompiledICLocker ml(this); |
1194 | clear_ic_callsites(); |
1195 | } |
1196 | |
1197 | // Unregister must be done before the state change |
1198 | { |
1199 | MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock, |
1200 | Mutex::_no_safepoint_check_flag); |
1201 | Universe::heap()->unregister_nmethod(this); |
1202 | } |
1203 | |
1204 | // Clear the method of this dead nmethod |
1205 | set_method(NULL); |
1206 | |
1207 | // Log the unloading. |
1208 | log_state_change(); |
1209 | |
1210 | // The Method* is gone at this point |
1211 | assert(_method == NULL, "Tautology" ); |
1212 | |
1213 | set_osr_link(NULL); |
1214 | NMethodSweeper::report_state_change(this); |
1215 | |
1216 | // The release is only needed for compile-time ordering, as accesses |
1217 | // into the nmethod after the store are not safe due to the sweeper |
1218 | // being allowed to free it when the store is observed, during |
1219 | // concurrent nmethod unloading. Therefore, there is no need for |
1220 | // acquire on the loader side. |
1221 | OrderAccess::release_store(&_state, (signed char)unloaded); |
1222 | |
1223 | #if INCLUDE_JVMCI |
1224 | // Clear the link between this nmethod and a HotSpotNmethod mirror |
1225 | JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); |
1226 | if (nmethod_data != NULL) { |
1227 | nmethod_data->invalidate_nmethod_mirror(this); |
1228 | nmethod_data->clear_nmethod_mirror(this); |
1229 | } |
1230 | #endif |
1231 | } |
1232 | |
1233 | void nmethod::invalidate_osr_method() { |
1234 | assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod" ); |
1235 | // Remove from list of active nmethods |
1236 | if (method() != NULL) { |
1237 | method()->method_holder()->remove_osr_nmethod(this); |
1238 | } |
1239 | } |
1240 | |
1241 | void nmethod::log_state_change() const { |
1242 | if (LogCompilation) { |
1243 | if (xtty != NULL) { |
1244 | ttyLocker ttyl; // keep the following output all in one block |
1245 | if (_state == unloaded) { |
1246 | xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'" , |
1247 | os::current_thread_id()); |
1248 | } else { |
1249 | xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s" , |
1250 | os::current_thread_id(), |
1251 | (_state == zombie ? " zombie='1'" : "" )); |
1252 | } |
1253 | log_identity(xtty); |
1254 | xtty->stamp(); |
1255 | xtty->end_elem(); |
1256 | } |
1257 | } |
1258 | |
1259 | const char *state_msg = _state == zombie ? "made zombie" : "made not entrant" ; |
1260 | CompileTask::print_ul(this, state_msg); |
1261 | if (PrintCompilation && _state != unloaded) { |
1262 | print_on(tty, state_msg); |
1263 | } |
1264 | } |
1265 | |
1266 | void nmethod::unlink_from_method(bool acquire_lock) { |
1267 | // We need to check if both the _code and _from_compiled_code_entry_point |
1268 | // refer to this nmethod because there is a race in setting these two fields |
1269 | // in Method* as seen in bugid 4947125. |
1270 | // If the vep() points to the zombie nmethod, the memory for the nmethod |
1271 | // could be flushed and the compiler and vtable stubs could still call |
1272 | // through it. |
1273 | if (method() != NULL && (method()->code() == this || |
1274 | method()->from_compiled_entry() == verified_entry_point())) { |
1275 | method()->clear_code(acquire_lock); |
1276 | } |
1277 | } |
1278 | |
1279 | /** |
1280 | * Common functionality for both make_not_entrant and make_zombie |
1281 | */ |
1282 | bool nmethod::make_not_entrant_or_zombie(int state) { |
1283 | assert(state == zombie || state == not_entrant, "must be zombie or not_entrant" ); |
1284 | assert(!is_zombie(), "should not already be a zombie" ); |
1285 | |
1286 | if (_state == state) { |
1287 | // Avoid taking the lock if already in required state. |
1288 | // This is safe from races because the state is an end-state, |
1289 | // which the nmethod cannot back out of once entered. |
1290 | // No need for fencing either. |
1291 | return false; |
1292 | } |
1293 | |
1294 | // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. |
1295 | nmethodLocker nml(this); |
1296 | methodHandle the_method(method()); |
1297 | // This can be called while the system is already at a safepoint which is ok |
1298 | NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint()); |
1299 | |
1300 | // during patching, depending on the nmethod state we must notify the GC that |
1301 | // code has been unloaded, unregistering it. We cannot do this right while |
1302 | // holding the Patching_lock because we need to use the CodeCache_lock. This |
1303 | // would be prone to deadlocks. |
1304 | // This flag is used to remember whether we need to later lock and unregister. |
1305 | bool nmethod_needs_unregister = false; |
1306 | |
1307 | { |
1308 | // invalidate osr nmethod before acquiring the patching lock since |
1309 | // they both acquire leaf locks and we don't want a deadlock. |
1310 | // This logic is equivalent to the logic below for patching the |
1311 | // verified entry point of regular methods. We check that the |
1312 | // nmethod is in use to ensure that it is invalidated only once. |
1313 | if (is_osr_method() && is_in_use()) { |
1314 | // this effectively makes the osr nmethod not entrant |
1315 | invalidate_osr_method(); |
1316 | } |
1317 | |
1318 | // Enter critical section. Does not block for safepoint. |
1319 | MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag); |
1320 | |
1321 | if (_state == state) { |
1322 | // another thread already performed this transition so nothing |
1323 | // to do, but return false to indicate this. |
1324 | return false; |
1325 | } |
1326 | |
1327 | // The caller can be calling the method statically or through an inline |
1328 | // cache call. |
1329 | if (!is_osr_method() && !is_not_entrant()) { |
1330 | NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), |
1331 | SharedRuntime::get_handle_wrong_method_stub()); |
1332 | } |
1333 | |
1334 | if (is_in_use() && update_recompile_counts()) { |
1335 | // It's a true state change, so mark the method as decompiled. |
1336 | // Do it only for transition from alive. |
1337 | inc_decompile_count(); |
1338 | } |
1339 | |
1340 | // If the state is becoming a zombie, signal to unregister the nmethod with |
1341 | // the heap. |
1342 | // This nmethod may have already been unloaded during a full GC. |
1343 | if ((state == zombie) && !is_unloaded()) { |
1344 | nmethod_needs_unregister = true; |
1345 | } |
1346 | |
1347 | // Must happen before state change. Otherwise we have a race condition in |
1348 | // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately |
1349 | // transition its state from 'not_entrant' to 'zombie' without having to wait |
1350 | // for stack scanning. |
1351 | if (state == not_entrant) { |
1352 | mark_as_seen_on_stack(); |
1353 | OrderAccess::storestore(); // _stack_traversal_mark and _state |
1354 | } |
1355 | |
1356 | // Change state |
1357 | _state = state; |
1358 | |
1359 | // Log the transition once |
1360 | log_state_change(); |
1361 | |
1362 | // Remove nmethod from method. |
1363 | unlink_from_method(false /* already owns Patching_lock */); |
1364 | } // leave critical region under Patching_lock |
1365 | |
1366 | #if INCLUDE_JVMCI |
1367 | // Invalidate can't occur while holding the Patching lock |
1368 | JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); |
1369 | if (nmethod_data != NULL) { |
1370 | nmethod_data->invalidate_nmethod_mirror(this); |
1371 | } |
1372 | #endif |
1373 | |
1374 | #ifdef ASSERT |
1375 | if (is_osr_method() && method() != NULL) { |
1376 | // Make sure osr nmethod is invalidated, i.e. not on the list |
1377 | bool found = method()->method_holder()->remove_osr_nmethod(this); |
1378 | assert(!found, "osr nmethod should have been invalidated" ); |
1379 | } |
1380 | #endif |
1381 | |
1382 | // When the nmethod becomes zombie it is no longer alive so the |
1383 | // dependencies must be flushed. nmethods in the not_entrant |
1384 | // state will be flushed later when the transition to zombie |
1385 | // happens or they get unloaded. |
1386 | if (state == zombie) { |
1387 | { |
1388 | // Flushing dependencies must be done before any possible |
1389 | // safepoint can sneak in, otherwise the oops used by the |
1390 | // dependency logic could have become stale. |
1391 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
1392 | if (nmethod_needs_unregister) { |
1393 | Universe::heap()->unregister_nmethod(this); |
1394 | } |
1395 | flush_dependencies(/*delete_immediately*/true); |
1396 | } |
1397 | |
1398 | #if INCLUDE_JVMCI |
1399 | // Now that the nmethod has been unregistered, it's |
1400 | // safe to clear the HotSpotNmethod mirror oop. |
1401 | if (nmethod_data != NULL) { |
1402 | nmethod_data->clear_nmethod_mirror(this); |
1403 | } |
1404 | #endif |
1405 | |
1406 | // Clear ICStubs to prevent back patching stubs of zombie or flushed |
1407 | // nmethods during the next safepoint (see ICStub::finalize), as well |
1408 | // as to free up CompiledICHolder resources. |
1409 | { |
1410 | CompiledICLocker ml(this); |
1411 | clear_ic_callsites(); |
1412 | } |
1413 | |
1414 | // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload |
1415 | // event and it hasn't already been reported for this nmethod then |
1416 | // report it now. The event may have been reported earlier if the GC |
1417 | // marked it for unloading). JvmtiDeferredEventQueue support means |
1418 | // we no longer go to a safepoint here. |
1419 | post_compiled_method_unload(); |
1420 | |
1421 | #ifdef ASSERT |
1422 | // It's no longer safe to access the oops section since zombie |
1423 | // nmethods aren't scanned for GC. |
1424 | _oops_are_stale = true; |
1425 | #endif |
1426 | // the Method may be reclaimed by class unloading now that the |
1427 | // nmethod is in zombie state |
1428 | set_method(NULL); |
1429 | } else { |
1430 | assert(state == not_entrant, "other cases may need to be handled differently" ); |
1431 | } |
1432 | |
1433 | if (TraceCreateZombies && state == zombie) { |
1434 | ResourceMark m; |
1435 | tty->print_cr("nmethod <" INTPTR_FORMAT "> %s code made %s" , p2i(this), this->method() ? this->method()->name_and_sig_as_C_string() : "null" , (state == not_entrant) ? "not entrant" : "zombie" ); |
1436 | } |
1437 | |
1438 | NMethodSweeper::report_state_change(this); |
1439 | return true; |
1440 | } |
1441 | |
1442 | void nmethod::flush() { |
1443 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
1444 | // Note that there are no valid oops in the nmethod anymore. |
1445 | assert(!is_osr_method() || is_unloaded() || is_zombie(), |
1446 | "osr nmethod must be unloaded or zombie before flushing" ); |
1447 | assert(is_zombie() || is_osr_method(), "must be a zombie method" ); |
1448 | assert (!is_locked_by_vm(), "locked methods shouldn't be flushed" ); |
1449 | assert_locked_or_safepoint(CodeCache_lock); |
1450 | |
1451 | // completely deallocate this method |
1452 | Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this)); |
1453 | if (PrintMethodFlushing) { |
1454 | tty->print_cr("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT |
1455 | "/Free CodeCache:" SIZE_FORMAT "Kb" , |
1456 | is_osr_method() ? "osr" : "" ,_compile_id, p2i(this), CodeCache::blob_count(), |
1457 | CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024); |
1458 | } |
1459 | |
1460 | // We need to deallocate any ExceptionCache data. |
1461 | // Note that we do not need to grab the nmethod lock for this, it |
1462 | // better be thread safe if we're disposing of it! |
1463 | ExceptionCache* ec = exception_cache(); |
1464 | set_exception_cache(NULL); |
1465 | while(ec != NULL) { |
1466 | ExceptionCache* next = ec->next(); |
1467 | delete ec; |
1468 | ec = next; |
1469 | } |
1470 | |
1471 | Universe::heap()->flush_nmethod(this); |
1472 | CodeCache::unregister_old_nmethod(this); |
1473 | |
1474 | CodeBlob::flush(); |
1475 | CodeCache::free(this); |
1476 | } |
1477 | |
1478 | oop nmethod::oop_at(int index) const { |
1479 | if (index == 0) { |
1480 | return NULL; |
1481 | } |
1482 | return NativeAccess<AS_NO_KEEPALIVE>::oop_load(oop_addr_at(index)); |
1483 | } |
1484 | |
1485 | oop nmethod::oop_at_phantom(int index) const { |
1486 | if (index == 0) { |
1487 | return NULL; |
1488 | } |
1489 | return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(oop_addr_at(index)); |
1490 | } |
1491 | |
1492 | // |
1493 | // Notify all classes this nmethod is dependent on that it is no |
1494 | // longer dependent. This should only be called in two situations. |
1495 | // First, when a nmethod transitions to a zombie all dependents need |
1496 | // to be clear. Since zombification happens at a safepoint there's no |
1497 | // synchronization issues. The second place is a little more tricky. |
1498 | // During phase 1 of mark sweep class unloading may happen and as a |
1499 | // result some nmethods may get unloaded. In this case the flushing |
1500 | // of dependencies must happen during phase 1 since after GC any |
1501 | // dependencies in the unloaded nmethod won't be updated, so |
1502 | // traversing the dependency information in unsafe. In that case this |
1503 | // function is called with a boolean argument and this function only |
1504 | // notifies instanceKlasses that are reachable |
1505 | |
1506 | void nmethod::flush_dependencies(bool delete_immediately) { |
1507 | DEBUG_ONLY(bool called_by_gc = Universe::heap()->is_gc_active() || Thread::current()->is_ConcurrentGC_thread();) |
1508 | assert(called_by_gc != delete_immediately, |
1509 | "delete_immediately is false if and only if we are called during GC" ); |
1510 | if (!has_flushed_dependencies()) { |
1511 | set_has_flushed_dependencies(); |
1512 | for (Dependencies::DepStream deps(this); deps.next(); ) { |
1513 | if (deps.type() == Dependencies::call_site_target_value) { |
1514 | // CallSite dependencies are managed on per-CallSite instance basis. |
1515 | oop call_site = deps.argument_oop(0); |
1516 | if (delete_immediately) { |
1517 | assert_locked_or_safepoint(CodeCache_lock); |
1518 | MethodHandles::remove_dependent_nmethod(call_site, this); |
1519 | } else { |
1520 | MethodHandles::clean_dependency_context(call_site); |
1521 | } |
1522 | } else { |
1523 | Klass* klass = deps.context_type(); |
1524 | if (klass == NULL) { |
1525 | continue; // ignore things like evol_method |
1526 | } |
1527 | // During GC delete_immediately is false, and liveness |
1528 | // of dependee determines class that needs to be updated. |
1529 | if (delete_immediately) { |
1530 | assert_locked_or_safepoint(CodeCache_lock); |
1531 | InstanceKlass::cast(klass)->remove_dependent_nmethod(this); |
1532 | } else if (klass->is_loader_alive()) { |
1533 | // The GC may clean dependency contexts concurrently and in parallel. |
1534 | InstanceKlass::cast(klass)->clean_dependency_context(); |
1535 | } |
1536 | } |
1537 | } |
1538 | } |
1539 | } |
1540 | |
1541 | // ------------------------------------------------------------------ |
1542 | // post_compiled_method_load_event |
1543 | // new method for install_code() path |
1544 | // Transfer information from compilation to jvmti |
1545 | void nmethod::post_compiled_method_load_event() { |
1546 | |
1547 | Method* moop = method(); |
1548 | HOTSPOT_COMPILED_METHOD_LOAD( |
1549 | (char *) moop->klass_name()->bytes(), |
1550 | moop->klass_name()->utf8_length(), |
1551 | (char *) moop->name()->bytes(), |
1552 | moop->name()->utf8_length(), |
1553 | (char *) moop->signature()->bytes(), |
1554 | moop->signature()->utf8_length(), |
1555 | insts_begin(), insts_size()); |
1556 | |
1557 | if (JvmtiExport::should_post_compiled_method_load() || |
1558 | JvmtiExport::should_post_compiled_method_unload()) { |
1559 | get_and_cache_jmethod_id(); |
1560 | } |
1561 | |
1562 | if (JvmtiExport::should_post_compiled_method_load()) { |
1563 | // Let the Service thread (which is a real Java thread) post the event |
1564 | MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); |
1565 | JvmtiDeferredEventQueue::enqueue( |
1566 | JvmtiDeferredEvent::compiled_method_load_event(this)); |
1567 | } |
1568 | } |
1569 | |
1570 | jmethodID nmethod::get_and_cache_jmethod_id() { |
1571 | if (_jmethod_id == NULL) { |
1572 | // Cache the jmethod_id since it can no longer be looked up once the |
1573 | // method itself has been marked for unloading. |
1574 | _jmethod_id = method()->jmethod_id(); |
1575 | } |
1576 | return _jmethod_id; |
1577 | } |
1578 | |
1579 | void nmethod::post_compiled_method_unload() { |
1580 | if (unload_reported()) { |
1581 | // During unloading we transition to unloaded and then to zombie |
1582 | // and the unloading is reported during the first transition. |
1583 | return; |
1584 | } |
1585 | |
1586 | assert(_method != NULL && !is_unloaded(), "just checking" ); |
1587 | DTRACE_METHOD_UNLOAD_PROBE(method()); |
1588 | |
1589 | // If a JVMTI agent has enabled the CompiledMethodUnload event then |
1590 | // post the event. Sometime later this nmethod will be made a zombie |
1591 | // by the sweeper but the Method* will not be valid at that point. |
1592 | // If the _jmethod_id is null then no load event was ever requested |
1593 | // so don't bother posting the unload. The main reason for this is |
1594 | // that the jmethodID is a weak reference to the Method* so if |
1595 | // it's being unloaded there's no way to look it up since the weak |
1596 | // ref will have been cleared. |
1597 | if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { |
1598 | assert(!unload_reported(), "already unloaded" ); |
1599 | JvmtiDeferredEvent event = |
1600 | JvmtiDeferredEvent::compiled_method_unload_event(this, |
1601 | _jmethod_id, insts_begin()); |
1602 | MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); |
1603 | JvmtiDeferredEventQueue::enqueue(event); |
1604 | } |
1605 | |
1606 | // The JVMTI CompiledMethodUnload event can be enabled or disabled at |
1607 | // any time. As the nmethod is being unloaded now we mark it has |
1608 | // having the unload event reported - this will ensure that we don't |
1609 | // attempt to report the event in the unlikely scenario where the |
1610 | // event is enabled at the time the nmethod is made a zombie. |
1611 | set_unload_reported(); |
1612 | } |
1613 | |
1614 | // Iterate over metadata calling this function. Used by RedefineClasses |
1615 | void nmethod::metadata_do(MetadataClosure* f) { |
1616 | { |
1617 | // Visit all immediate references that are embedded in the instruction stream. |
1618 | RelocIterator iter(this, oops_reloc_begin()); |
1619 | while (iter.next()) { |
1620 | if (iter.type() == relocInfo::metadata_type) { |
1621 | metadata_Relocation* r = iter.metadata_reloc(); |
1622 | // In this metadata, we must only follow those metadatas directly embedded in |
1623 | // the code. Other metadatas (oop_index>0) are seen as part of |
1624 | // the metadata section below. |
1625 | assert(1 == (r->metadata_is_immediate()) + |
1626 | (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()), |
1627 | "metadata must be found in exactly one place" ); |
1628 | if (r->metadata_is_immediate() && r->metadata_value() != NULL) { |
1629 | Metadata* md = r->metadata_value(); |
1630 | if (md != _method) f->do_metadata(md); |
1631 | } |
1632 | } else if (iter.type() == relocInfo::virtual_call_type) { |
1633 | // Check compiledIC holders associated with this nmethod |
1634 | ResourceMark rm; |
1635 | CompiledIC *ic = CompiledIC_at(&iter); |
1636 | if (ic->is_icholder_call()) { |
1637 | CompiledICHolder* cichk = ic->cached_icholder(); |
1638 | f->do_metadata(cichk->holder_metadata()); |
1639 | f->do_metadata(cichk->holder_klass()); |
1640 | } else { |
1641 | Metadata* ic_oop = ic->cached_metadata(); |
1642 | if (ic_oop != NULL) { |
1643 | f->do_metadata(ic_oop); |
1644 | } |
1645 | } |
1646 | } |
1647 | } |
1648 | } |
1649 | |
1650 | // Visit the metadata section |
1651 | for (Metadata** p = metadata_begin(); p < metadata_end(); p++) { |
1652 | if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops |
1653 | Metadata* md = *p; |
1654 | f->do_metadata(md); |
1655 | } |
1656 | |
1657 | // Visit metadata not embedded in the other places. |
1658 | if (_method != NULL) f->do_metadata(_method); |
1659 | } |
1660 | |
1661 | // The _is_unloading_state encodes a tuple comprising the unloading cycle |
1662 | // and the result of IsUnloadingBehaviour::is_unloading() fpr that cycle. |
1663 | // This is the bit layout of the _is_unloading_state byte: 00000CCU |
1664 | // CC refers to the cycle, which has 2 bits, and U refers to the result of |
1665 | // IsUnloadingBehaviour::is_unloading() for that unloading cycle. |
1666 | |
1667 | class IsUnloadingState: public AllStatic { |
1668 | static const uint8_t _is_unloading_mask = 1; |
1669 | static const uint8_t _is_unloading_shift = 0; |
1670 | static const uint8_t _unloading_cycle_mask = 6; |
1671 | static const uint8_t _unloading_cycle_shift = 1; |
1672 | |
1673 | static uint8_t set_is_unloading(uint8_t state, bool value) { |
1674 | state &= ~_is_unloading_mask; |
1675 | if (value) { |
1676 | state |= 1 << _is_unloading_shift; |
1677 | } |
1678 | assert(is_unloading(state) == value, "unexpected unloading cycle overflow" ); |
1679 | return state; |
1680 | } |
1681 | |
1682 | static uint8_t set_unloading_cycle(uint8_t state, uint8_t value) { |
1683 | state &= ~_unloading_cycle_mask; |
1684 | state |= value << _unloading_cycle_shift; |
1685 | assert(unloading_cycle(state) == value, "unexpected unloading cycle overflow" ); |
1686 | return state; |
1687 | } |
1688 | |
1689 | public: |
1690 | static bool is_unloading(uint8_t state) { return (state & _is_unloading_mask) >> _is_unloading_shift == 1; } |
1691 | static uint8_t unloading_cycle(uint8_t state) { return (state & _unloading_cycle_mask) >> _unloading_cycle_shift; } |
1692 | |
1693 | static uint8_t create(bool is_unloading, uint8_t unloading_cycle) { |
1694 | uint8_t state = 0; |
1695 | state = set_is_unloading(state, is_unloading); |
1696 | state = set_unloading_cycle(state, unloading_cycle); |
1697 | return state; |
1698 | } |
1699 | }; |
1700 | |
1701 | bool nmethod::is_unloading() { |
1702 | uint8_t state = RawAccess<MO_RELAXED>::load(&_is_unloading_state); |
1703 | bool state_is_unloading = IsUnloadingState::is_unloading(state); |
1704 | uint8_t state_unloading_cycle = IsUnloadingState::unloading_cycle(state); |
1705 | if (state_is_unloading) { |
1706 | return true; |
1707 | } |
1708 | uint8_t current_cycle = CodeCache::unloading_cycle(); |
1709 | if (state_unloading_cycle == current_cycle) { |
1710 | return false; |
1711 | } |
1712 | |
1713 | // The IsUnloadingBehaviour is responsible for checking if there are any dead |
1714 | // oops in the CompiledMethod, by calling oops_do on it. |
1715 | state_unloading_cycle = current_cycle; |
1716 | |
1717 | if (is_zombie()) { |
1718 | // Zombies without calculated unloading epoch are never unloading due to GC. |
1719 | |
1720 | // There are no races where a previously observed is_unloading() nmethod |
1721 | // suddenly becomes not is_unloading() due to here being observed as zombie. |
1722 | |
1723 | // With STW unloading, all is_alive() && is_unloading() nmethods are unlinked |
1724 | // and unloaded in the safepoint. That makes races where an nmethod is first |
1725 | // observed as is_alive() && is_unloading() and subsequently observed as |
1726 | // is_zombie() impossible. |
1727 | |
1728 | // With concurrent unloading, all references to is_unloading() nmethods are |
1729 | // first unlinked (e.g. IC caches and dependency contexts). Then a global |
1730 | // handshake operation is performed with all JavaThreads before finally |
1731 | // unloading the nmethods. The sweeper never converts is_alive() && is_unloading() |
1732 | // nmethods to zombies; it waits for them to become is_unloaded(). So before |
1733 | // the global handshake, it is impossible for is_unloading() nmethods to |
1734 | // racingly become is_zombie(). And is_unloading() is calculated for all is_alive() |
1735 | // nmethods before taking that global handshake, meaning that it will never |
1736 | // be recalculated after the handshake. |
1737 | |
1738 | // After that global handshake, is_unloading() nmethods are only observable |
1739 | // to the iterators, and they will never trigger recomputation of the cached |
1740 | // is_unloading_state, and hence may not suffer from such races. |
1741 | |
1742 | state_is_unloading = false; |
1743 | } else { |
1744 | state_is_unloading = IsUnloadingBehaviour::current()->is_unloading(this); |
1745 | } |
1746 | |
1747 | state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle); |
1748 | |
1749 | RawAccess<MO_RELAXED>::store(&_is_unloading_state, state); |
1750 | |
1751 | return state_is_unloading; |
1752 | } |
1753 | |
1754 | void nmethod::clear_unloading_state() { |
1755 | uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle()); |
1756 | RawAccess<MO_RELAXED>::store(&_is_unloading_state, state); |
1757 | } |
1758 | |
1759 | |
1760 | // This is called at the end of the strong tracing/marking phase of a |
1761 | // GC to unload an nmethod if it contains otherwise unreachable |
1762 | // oops. |
1763 | |
1764 | void nmethod::do_unloading(bool unloading_occurred) { |
1765 | // Make sure the oop's ready to receive visitors |
1766 | assert(!is_zombie() && !is_unloaded(), |
1767 | "should not call follow on zombie or unloaded nmethod" ); |
1768 | |
1769 | if (is_unloading()) { |
1770 | make_unloaded(); |
1771 | } else { |
1772 | guarantee(unload_nmethod_caches(unloading_occurred), |
1773 | "Should not need transition stubs" ); |
1774 | } |
1775 | } |
1776 | |
1777 | void nmethod::oops_do(OopClosure* f, bool allow_dead) { |
1778 | // make sure the oops ready to receive visitors |
1779 | assert(allow_dead || is_alive(), "should not call follow on dead nmethod" ); |
1780 | |
1781 | // Prevent extra code cache walk for platforms that don't have immediate oops. |
1782 | if (relocInfo::mustIterateImmediateOopsInCode()) { |
1783 | RelocIterator iter(this, oops_reloc_begin()); |
1784 | |
1785 | while (iter.next()) { |
1786 | if (iter.type() == relocInfo::oop_type ) { |
1787 | oop_Relocation* r = iter.oop_reloc(); |
1788 | // In this loop, we must only follow those oops directly embedded in |
1789 | // the code. Other oops (oop_index>0) are seen as part of scopes_oops. |
1790 | assert(1 == (r->oop_is_immediate()) + |
1791 | (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), |
1792 | "oop must be found in exactly one place" ); |
1793 | if (r->oop_is_immediate() && r->oop_value() != NULL) { |
1794 | f->do_oop(r->oop_addr()); |
1795 | } |
1796 | } |
1797 | } |
1798 | } |
1799 | |
1800 | // Scopes |
1801 | // This includes oop constants not inlined in the code stream. |
1802 | for (oop* p = oops_begin(); p < oops_end(); p++) { |
1803 | if (*p == Universe::non_oop_word()) continue; // skip non-oops |
1804 | f->do_oop(p); |
1805 | } |
1806 | } |
1807 | |
1808 | #define NMETHOD_SENTINEL ((nmethod*)badAddress) |
1809 | |
1810 | nmethod* volatile nmethod::_oops_do_mark_nmethods; |
1811 | |
1812 | // An nmethod is "marked" if its _mark_link is set non-null. |
1813 | // Even if it is the end of the linked list, it will have a non-null link value, |
1814 | // as long as it is on the list. |
1815 | // This code must be MP safe, because it is used from parallel GC passes. |
1816 | bool nmethod::test_set_oops_do_mark() { |
1817 | assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called" ); |
1818 | if (_oops_do_mark_link == NULL) { |
1819 | // Claim this nmethod for this thread to mark. |
1820 | if (Atomic::replace_if_null(NMETHOD_SENTINEL, &_oops_do_mark_link)) { |
1821 | // Atomically append this nmethod (now claimed) to the head of the list: |
1822 | nmethod* observed_mark_nmethods = _oops_do_mark_nmethods; |
1823 | for (;;) { |
1824 | nmethod* required_mark_nmethods = observed_mark_nmethods; |
1825 | _oops_do_mark_link = required_mark_nmethods; |
1826 | observed_mark_nmethods = |
1827 | Atomic::cmpxchg(this, &_oops_do_mark_nmethods, required_mark_nmethods); |
1828 | if (observed_mark_nmethods == required_mark_nmethods) |
1829 | break; |
1830 | } |
1831 | // Mark was clear when we first saw this guy. |
1832 | LogTarget(Trace, gc, nmethod) lt; |
1833 | if (lt.is_enabled()) { |
1834 | LogStream ls(lt); |
1835 | CompileTask::print(&ls, this, "oops_do, mark" , /*short_form:*/ true); |
1836 | } |
1837 | return false; |
1838 | } |
1839 | } |
1840 | // On fall through, another racing thread marked this nmethod before we did. |
1841 | return true; |
1842 | } |
1843 | |
1844 | void nmethod::oops_do_marking_prologue() { |
1845 | log_trace(gc, nmethod)("oops_do_marking_prologue" ); |
1846 | assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row" ); |
1847 | // We use cmpxchg instead of regular assignment here because the user |
1848 | // may fork a bunch of threads, and we need them all to see the same state. |
1849 | nmethod* observed = Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, (nmethod*)NULL); |
1850 | guarantee(observed == NULL, "no races in this sequential code" ); |
1851 | } |
1852 | |
1853 | void nmethod::oops_do_marking_epilogue() { |
1854 | assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row" ); |
1855 | nmethod* cur = _oops_do_mark_nmethods; |
1856 | while (cur != NMETHOD_SENTINEL) { |
1857 | assert(cur != NULL, "not NULL-terminated" ); |
1858 | nmethod* next = cur->_oops_do_mark_link; |
1859 | cur->_oops_do_mark_link = NULL; |
1860 | DEBUG_ONLY(cur->verify_oop_relocations()); |
1861 | |
1862 | LogTarget(Trace, gc, nmethod) lt; |
1863 | if (lt.is_enabled()) { |
1864 | LogStream ls(lt); |
1865 | CompileTask::print(&ls, cur, "oops_do, unmark" , /*short_form:*/ true); |
1866 | } |
1867 | cur = next; |
1868 | } |
1869 | nmethod* required = _oops_do_mark_nmethods; |
1870 | nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required); |
1871 | guarantee(observed == required, "no races in this sequential code" ); |
1872 | log_trace(gc, nmethod)("oops_do_marking_epilogue" ); |
1873 | } |
1874 | |
1875 | inline bool includes(void* p, void* from, void* to) { |
1876 | return from <= p && p < to; |
1877 | } |
1878 | |
1879 | |
1880 | void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) { |
1881 | assert(count >= 2, "must be sentinel values, at least" ); |
1882 | |
1883 | #ifdef ASSERT |
1884 | // must be sorted and unique; we do a binary search in find_pc_desc() |
1885 | int prev_offset = pcs[0].pc_offset(); |
1886 | assert(prev_offset == PcDesc::lower_offset_limit, |
1887 | "must start with a sentinel" ); |
1888 | for (int i = 1; i < count; i++) { |
1889 | int this_offset = pcs[i].pc_offset(); |
1890 | assert(this_offset > prev_offset, "offsets must be sorted" ); |
1891 | prev_offset = this_offset; |
1892 | } |
1893 | assert(prev_offset == PcDesc::upper_offset_limit, |
1894 | "must end with a sentinel" ); |
1895 | #endif //ASSERT |
1896 | |
1897 | // Search for MethodHandle invokes and tag the nmethod. |
1898 | for (int i = 0; i < count; i++) { |
1899 | if (pcs[i].is_method_handle_invoke()) { |
1900 | set_has_method_handle_invokes(true); |
1901 | break; |
1902 | } |
1903 | } |
1904 | assert(has_method_handle_invokes() == (_deopt_mh_handler_begin != NULL), "must have deopt mh handler" ); |
1905 | |
1906 | int size = count * sizeof(PcDesc); |
1907 | assert(scopes_pcs_size() >= size, "oob" ); |
1908 | memcpy(scopes_pcs_begin(), pcs, size); |
1909 | |
1910 | // Adjust the final sentinel downward. |
1911 | PcDesc* last_pc = &scopes_pcs_begin()[count-1]; |
1912 | assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity" ); |
1913 | last_pc->set_pc_offset(content_size() + 1); |
1914 | for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) { |
1915 | // Fill any rounding gaps with copies of the last record. |
1916 | last_pc[1] = last_pc[0]; |
1917 | } |
1918 | // The following assert could fail if sizeof(PcDesc) is not |
1919 | // an integral multiple of oopSize (the rounding term). |
1920 | // If it fails, change the logic to always allocate a multiple |
1921 | // of sizeof(PcDesc), and fill unused words with copies of *last_pc. |
1922 | assert(last_pc + 1 == scopes_pcs_end(), "must match exactly" ); |
1923 | } |
1924 | |
1925 | void nmethod::copy_scopes_data(u_char* buffer, int size) { |
1926 | assert(scopes_data_size() >= size, "oob" ); |
1927 | memcpy(scopes_data_begin(), buffer, size); |
1928 | } |
1929 | |
1930 | #ifdef ASSERT |
1931 | static PcDesc* linear_search(const PcDescSearch& search, int pc_offset, bool approximate) { |
1932 | PcDesc* lower = search.scopes_pcs_begin(); |
1933 | PcDesc* upper = search.scopes_pcs_end(); |
1934 | lower += 1; // exclude initial sentinel |
1935 | PcDesc* res = NULL; |
1936 | for (PcDesc* p = lower; p < upper; p++) { |
1937 | NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests); // don't count this call to match_desc |
1938 | if (match_desc(p, pc_offset, approximate)) { |
1939 | if (res == NULL) |
1940 | res = p; |
1941 | else |
1942 | res = (PcDesc*) badAddress; |
1943 | } |
1944 | } |
1945 | return res; |
1946 | } |
1947 | #endif |
1948 | |
1949 | |
1950 | // Finds a PcDesc with real-pc equal to "pc" |
1951 | PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search) { |
1952 | address base_address = search.code_begin(); |
1953 | if ((pc < base_address) || |
1954 | (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) { |
1955 | return NULL; // PC is wildly out of range |
1956 | } |
1957 | int pc_offset = (int) (pc - base_address); |
1958 | |
1959 | // Check the PcDesc cache if it contains the desired PcDesc |
1960 | // (This as an almost 100% hit rate.) |
1961 | PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate); |
1962 | if (res != NULL) { |
1963 | assert(res == linear_search(search, pc_offset, approximate), "cache ok" ); |
1964 | return res; |
1965 | } |
1966 | |
1967 | // Fallback algorithm: quasi-linear search for the PcDesc |
1968 | // Find the last pc_offset less than the given offset. |
1969 | // The successor must be the required match, if there is a match at all. |
1970 | // (Use a fixed radix to avoid expensive affine pointer arithmetic.) |
1971 | PcDesc* lower = search.scopes_pcs_begin(); |
1972 | PcDesc* upper = search.scopes_pcs_end(); |
1973 | upper -= 1; // exclude final sentinel |
1974 | if (lower >= upper) return NULL; // native method; no PcDescs at all |
1975 | |
1976 | #define assert_LU_OK \ |
1977 | /* invariant on lower..upper during the following search: */ \ |
1978 | assert(lower->pc_offset() < pc_offset, "sanity"); \ |
1979 | assert(upper->pc_offset() >= pc_offset, "sanity") |
1980 | assert_LU_OK; |
1981 | |
1982 | // Use the last successful return as a split point. |
1983 | PcDesc* mid = _pc_desc_cache.last_pc_desc(); |
1984 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches); |
1985 | if (mid->pc_offset() < pc_offset) { |
1986 | lower = mid; |
1987 | } else { |
1988 | upper = mid; |
1989 | } |
1990 | |
1991 | // Take giant steps at first (4096, then 256, then 16, then 1) |
1992 | const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1); |
1993 | const int RADIX = (1 << LOG2_RADIX); |
1994 | for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) { |
1995 | while ((mid = lower + step) < upper) { |
1996 | assert_LU_OK; |
1997 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches); |
1998 | if (mid->pc_offset() < pc_offset) { |
1999 | lower = mid; |
2000 | } else { |
2001 | upper = mid; |
2002 | break; |
2003 | } |
2004 | } |
2005 | assert_LU_OK; |
2006 | } |
2007 | |
2008 | // Sneak up on the value with a linear search of length ~16. |
2009 | while (true) { |
2010 | assert_LU_OK; |
2011 | mid = lower + 1; |
2012 | NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches); |
2013 | if (mid->pc_offset() < pc_offset) { |
2014 | lower = mid; |
2015 | } else { |
2016 | upper = mid; |
2017 | break; |
2018 | } |
2019 | } |
2020 | #undef assert_LU_OK |
2021 | |
2022 | if (match_desc(upper, pc_offset, approximate)) { |
2023 | assert(upper == linear_search(search, pc_offset, approximate), "search ok" ); |
2024 | _pc_desc_cache.add_pc_desc(upper); |
2025 | return upper; |
2026 | } else { |
2027 | assert(NULL == linear_search(search, pc_offset, approximate), "search ok" ); |
2028 | return NULL; |
2029 | } |
2030 | } |
2031 | |
2032 | |
2033 | void nmethod::check_all_dependencies(DepChange& changes) { |
2034 | // Checked dependencies are allocated into this ResourceMark |
2035 | ResourceMark rm; |
2036 | |
2037 | // Turn off dependency tracing while actually testing dependencies. |
2038 | NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) ); |
2039 | |
2040 | typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash, |
2041 | &DependencySignature::equals, 11027> DepTable; |
2042 | |
2043 | DepTable* table = new DepTable(); |
2044 | |
2045 | // Iterate over live nmethods and check dependencies of all nmethods that are not |
2046 | // marked for deoptimization. A particular dependency is only checked once. |
2047 | NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); |
2048 | while(iter.next()) { |
2049 | nmethod* nm = iter.method(); |
2050 | // Only notify for live nmethods |
2051 | if (!nm->is_marked_for_deoptimization()) { |
2052 | for (Dependencies::DepStream deps(nm); deps.next(); ) { |
2053 | // Construct abstraction of a dependency. |
2054 | DependencySignature* current_sig = new DependencySignature(deps); |
2055 | |
2056 | // Determine if dependency is already checked. table->put(...) returns |
2057 | // 'true' if the dependency is added (i.e., was not in the hashtable). |
2058 | if (table->put(*current_sig, 1)) { |
2059 | if (deps.check_dependency() != NULL) { |
2060 | // Dependency checking failed. Print out information about the failed |
2061 | // dependency and finally fail with an assert. We can fail here, since |
2062 | // dependency checking is never done in a product build. |
2063 | tty->print_cr("Failed dependency:" ); |
2064 | changes.print(); |
2065 | nm->print(); |
2066 | nm->print_dependencies(); |
2067 | assert(false, "Should have been marked for deoptimization" ); |
2068 | } |
2069 | } |
2070 | } |
2071 | } |
2072 | } |
2073 | } |
2074 | |
2075 | bool nmethod::check_dependency_on(DepChange& changes) { |
2076 | // What has happened: |
2077 | // 1) a new class dependee has been added |
2078 | // 2) dependee and all its super classes have been marked |
2079 | bool found_check = false; // set true if we are upset |
2080 | for (Dependencies::DepStream deps(this); deps.next(); ) { |
2081 | // Evaluate only relevant dependencies. |
2082 | if (deps.spot_check_dependency_at(changes) != NULL) { |
2083 | found_check = true; |
2084 | NOT_DEBUG(break); |
2085 | } |
2086 | } |
2087 | return found_check; |
2088 | } |
2089 | |
2090 | // Called from mark_for_deoptimization, when dependee is invalidated. |
2091 | bool nmethod::is_dependent_on_method(Method* dependee) { |
2092 | for (Dependencies::DepStream deps(this); deps.next(); ) { |
2093 | if (deps.type() != Dependencies::evol_method) |
2094 | continue; |
2095 | Method* method = deps.method_argument(0); |
2096 | if (method == dependee) return true; |
2097 | } |
2098 | return false; |
2099 | } |
2100 | |
2101 | |
2102 | bool nmethod::is_patchable_at(address instr_addr) { |
2103 | assert(insts_contains(instr_addr), "wrong nmethod used" ); |
2104 | if (is_zombie()) { |
2105 | // a zombie may never be patched |
2106 | return false; |
2107 | } |
2108 | return true; |
2109 | } |
2110 | |
2111 | |
2112 | void nmethod_init() { |
2113 | // make sure you didn't forget to adjust the filler fields |
2114 | assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word" ); |
2115 | } |
2116 | |
2117 | |
2118 | //------------------------------------------------------------------------------------------- |
2119 | |
2120 | |
2121 | // QQQ might we make this work from a frame?? |
2122 | nmethodLocker::nmethodLocker(address pc) { |
2123 | CodeBlob* cb = CodeCache::find_blob(pc); |
2124 | guarantee(cb != NULL && cb->is_compiled(), "bad pc for a nmethod found" ); |
2125 | _nm = cb->as_compiled_method(); |
2126 | lock_nmethod(_nm); |
2127 | } |
2128 | |
2129 | // Only JvmtiDeferredEvent::compiled_method_unload_event() |
2130 | // should pass zombie_ok == true. |
2131 | void nmethodLocker::lock_nmethod(CompiledMethod* cm, bool zombie_ok) { |
2132 | if (cm == NULL) return; |
2133 | if (cm->is_aot()) return; // FIXME: Revisit once _lock_count is added to aot_method |
2134 | nmethod* nm = cm->as_nmethod(); |
2135 | Atomic::inc(&nm->_lock_count); |
2136 | assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method: %p" , nm); |
2137 | } |
2138 | |
2139 | void nmethodLocker::unlock_nmethod(CompiledMethod* cm) { |
2140 | if (cm == NULL) return; |
2141 | if (cm->is_aot()) return; // FIXME: Revisit once _lock_count is added to aot_method |
2142 | nmethod* nm = cm->as_nmethod(); |
2143 | Atomic::dec(&nm->_lock_count); |
2144 | assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock" ); |
2145 | } |
2146 | |
2147 | |
2148 | // ----------------------------------------------------------------------------- |
2149 | // Verification |
2150 | |
2151 | class VerifyOopsClosure: public OopClosure { |
2152 | nmethod* _nm; |
2153 | bool _ok; |
2154 | public: |
2155 | VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { } |
2156 | bool ok() { return _ok; } |
2157 | virtual void do_oop(oop* p) { |
2158 | if (oopDesc::is_oop_or_null(*p)) return; |
2159 | // Print diagnostic information before calling print_nmethod(). |
2160 | // Assertions therein might prevent call from returning. |
2161 | tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)" , |
2162 | p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm)); |
2163 | if (_ok) { |
2164 | _nm->print_nmethod(true); |
2165 | _ok = false; |
2166 | } |
2167 | } |
2168 | virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
2169 | }; |
2170 | |
2171 | void nmethod::verify() { |
2172 | |
2173 | // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant |
2174 | // seems odd. |
2175 | |
2176 | if (is_zombie() || is_not_entrant() || is_unloaded()) |
2177 | return; |
2178 | |
2179 | // Make sure all the entry points are correctly aligned for patching. |
2180 | NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point()); |
2181 | |
2182 | // assert(oopDesc::is_oop(method()), "must be valid"); |
2183 | |
2184 | ResourceMark rm; |
2185 | |
2186 | if (!CodeCache::contains(this)) { |
2187 | fatal("nmethod at " INTPTR_FORMAT " not in zone" , p2i(this)); |
2188 | } |
2189 | |
2190 | if(is_native_method() ) |
2191 | return; |
2192 | |
2193 | nmethod* nm = CodeCache::find_nmethod(verified_entry_point()); |
2194 | if (nm != this) { |
2195 | fatal("findNMethod did not find this nmethod (" INTPTR_FORMAT ")" , p2i(this)); |
2196 | } |
2197 | |
2198 | for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { |
2199 | if (! p->verify(this)) { |
2200 | tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)" , p2i(this)); |
2201 | } |
2202 | } |
2203 | |
2204 | #ifdef ASSERT |
2205 | #if INCLUDE_JVMCI |
2206 | { |
2207 | // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap |
2208 | ImmutableOopMapSet* oms = oop_maps(); |
2209 | ImplicitExceptionTable implicit_table(this); |
2210 | for (uint i = 0; i < implicit_table.len(); i++) { |
2211 | int exec_offset = (int) implicit_table.get_exec_offset(i); |
2212 | if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) { |
2213 | assert(pc_desc_at(code_begin() + exec_offset) != NULL, "missing PcDesc" ); |
2214 | bool found = false; |
2215 | for (int i = 0, imax = oms->count(); i < imax; i++) { |
2216 | if (oms->pair_at(i)->pc_offset() == exec_offset) { |
2217 | found = true; |
2218 | break; |
2219 | } |
2220 | } |
2221 | assert(found, "missing oopmap" ); |
2222 | } |
2223 | } |
2224 | } |
2225 | #endif |
2226 | #endif |
2227 | |
2228 | VerifyOopsClosure voc(this); |
2229 | oops_do(&voc); |
2230 | assert(voc.ok(), "embedded oops must be OK" ); |
2231 | Universe::heap()->verify_nmethod(this); |
2232 | |
2233 | verify_scopes(); |
2234 | } |
2235 | |
2236 | |
2237 | void nmethod::verify_interrupt_point(address call_site) { |
2238 | // Verify IC only when nmethod installation is finished. |
2239 | if (!is_not_installed()) { |
2240 | if (CompiledICLocker::is_safe(this)) { |
2241 | CompiledIC_at(this, call_site); |
2242 | CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); |
2243 | } else { |
2244 | CompiledICLocker ml_verify(this); |
2245 | CompiledIC_at(this, call_site); |
2246 | } |
2247 | } |
2248 | |
2249 | PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address()); |
2250 | assert(pd != NULL, "PcDesc must exist" ); |
2251 | for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(), |
2252 | pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), |
2253 | pd->return_oop()); |
2254 | !sd->is_top(); sd = sd->sender()) { |
2255 | sd->verify(); |
2256 | } |
2257 | } |
2258 | |
2259 | void nmethod::verify_scopes() { |
2260 | if( !method() ) return; // Runtime stubs have no scope |
2261 | if (method()->is_native()) return; // Ignore stub methods. |
2262 | // iterate through all interrupt point |
2263 | // and verify the debug information is valid. |
2264 | RelocIterator iter((nmethod*)this); |
2265 | while (iter.next()) { |
2266 | address stub = NULL; |
2267 | switch (iter.type()) { |
2268 | case relocInfo::virtual_call_type: |
2269 | verify_interrupt_point(iter.addr()); |
2270 | break; |
2271 | case relocInfo::opt_virtual_call_type: |
2272 | stub = iter.opt_virtual_call_reloc()->static_stub(false); |
2273 | verify_interrupt_point(iter.addr()); |
2274 | break; |
2275 | case relocInfo::static_call_type: |
2276 | stub = iter.static_call_reloc()->static_stub(false); |
2277 | //verify_interrupt_point(iter.addr()); |
2278 | break; |
2279 | case relocInfo::runtime_call_type: |
2280 | case relocInfo::runtime_call_w_cp_type: { |
2281 | address destination = iter.reloc()->value(); |
2282 | // Right now there is no way to find out which entries support |
2283 | // an interrupt point. It would be nice if we had this |
2284 | // information in a table. |
2285 | break; |
2286 | } |
2287 | default: |
2288 | break; |
2289 | } |
2290 | assert(stub == NULL || stub_contains(stub), "static call stub outside stub section" ); |
2291 | } |
2292 | } |
2293 | |
2294 | |
2295 | // ----------------------------------------------------------------------------- |
2296 | // Printing operations |
2297 | |
2298 | void nmethod::print() const { |
2299 | ttyLocker ttyl; // keep the following output all in one block |
2300 | print(tty); |
2301 | } |
2302 | |
2303 | void nmethod::print(outputStream* st) const { |
2304 | ResourceMark rm; |
2305 | |
2306 | st->print("Compiled method " ); |
2307 | |
2308 | if (is_compiled_by_c1()) { |
2309 | st->print("(c1) " ); |
2310 | } else if (is_compiled_by_c2()) { |
2311 | st->print("(c2) " ); |
2312 | } else if (is_compiled_by_jvmci()) { |
2313 | st->print("(JVMCI) " ); |
2314 | } else { |
2315 | st->print("(n/a) " ); |
2316 | } |
2317 | |
2318 | print_on(tty, NULL); |
2319 | |
2320 | if (WizardMode) { |
2321 | st->print("((nmethod*) " INTPTR_FORMAT ") " , p2i(this)); |
2322 | st->print(" for method " INTPTR_FORMAT , p2i(method())); |
2323 | st->print(" { " ); |
2324 | st->print_cr("%s " , state()); |
2325 | st->print_cr("}:" ); |
2326 | } |
2327 | if (size () > 0) st->print_cr(" total in heap [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d" , |
2328 | p2i(this), |
2329 | p2i(this) + size(), |
2330 | size()); |
2331 | if (relocation_size () > 0) st->print_cr(" relocation [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d" , |
2332 | p2i(relocation_begin()), |
2333 | p2i(relocation_end()), |
2334 | relocation_size()); |
2335 | if (consts_size () > 0) st->print_cr(" constants [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d" , |
2336 | p2i(consts_begin()), |
2337 | p2i(consts_end()), |
2338 | consts_size()); |
2339 | if (insts_size () > 0) st->print_cr(" main code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d" , |
2340 | p2i(insts_begin()), |
2341 | p2i(insts_end()), |
2342 | insts_size()); |
2343 | if (stub_size () > 0) st->print_cr(" stub code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d" , |
2344 | p2i(stub_begin()), |
2345 | p2i(stub_end()), |
2346 | stub_size()); |
2347 | if (oops_size () > 0) st->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d" , |
2348 | p2i(oops_begin()), |
2349 | p2i(oops_end()), |
2350 | oops_size()); |
2351 | if (metadata_size () > 0) st->print_cr(" metadata [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d" , |
2352 | p2i(metadata_begin()), |
2353 | p2i(metadata_end()), |
2354 | metadata_size()); |
2355 | if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d" , |
2356 | p2i(scopes_data_begin()), |
2357 | p2i(scopes_data_end()), |
2358 | scopes_data_size()); |
2359 | if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d" , |
2360 | p2i(scopes_pcs_begin()), |
2361 | p2i(scopes_pcs_end()), |
2362 | scopes_pcs_size()); |
2363 | if (dependencies_size () > 0) st->print_cr(" dependencies [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d" , |
2364 | p2i(dependencies_begin()), |
2365 | p2i(dependencies_end()), |
2366 | dependencies_size()); |
2367 | if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d" , |
2368 | p2i(handler_table_begin()), |
2369 | p2i(handler_table_end()), |
2370 | handler_table_size()); |
2371 | if (nul_chk_table_size() > 0) st->print_cr(" nul chk table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d" , |
2372 | p2i(nul_chk_table_begin()), |
2373 | p2i(nul_chk_table_end()), |
2374 | nul_chk_table_size()); |
2375 | #if INCLUDE_JVMCI |
2376 | if (speculations_size () > 0) st->print_cr(" speculations [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d" , |
2377 | p2i(speculations_begin()), |
2378 | p2i(speculations_end()), |
2379 | speculations_size()); |
2380 | if (jvmci_data_size () > 0) st->print_cr(" JVMCI data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d" , |
2381 | p2i(jvmci_data_begin()), |
2382 | p2i(jvmci_data_end()), |
2383 | jvmci_data_size()); |
2384 | #endif |
2385 | } |
2386 | |
2387 | void nmethod::print_code() { |
2388 | HandleMark hm; |
2389 | ResourceMark m; |
2390 | ttyLocker ttyl; |
2391 | // Call the specialized decode method of this class. |
2392 | decode(tty); |
2393 | } |
2394 | |
2395 | #ifndef PRODUCT // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN |
2396 | |
2397 | void nmethod::print_dependencies() { |
2398 | ResourceMark rm; |
2399 | ttyLocker ttyl; // keep the following output all in one block |
2400 | tty->print_cr("Dependencies:" ); |
2401 | for (Dependencies::DepStream deps(this); deps.next(); ) { |
2402 | deps.print_dependency(); |
2403 | Klass* ctxk = deps.context_type(); |
2404 | if (ctxk != NULL) { |
2405 | if (ctxk->is_instance_klass() && InstanceKlass::cast(ctxk)->is_dependent_nmethod(this)) { |
2406 | tty->print_cr(" [nmethod<=klass]%s" , ctxk->external_name()); |
2407 | } |
2408 | } |
2409 | deps.log_dependency(); // put it into the xml log also |
2410 | } |
2411 | } |
2412 | #endif |
2413 | |
2414 | #if defined(SUPPORT_DATA_STRUCTS) |
2415 | |
2416 | // Print the oops from the underlying CodeBlob. |
2417 | void nmethod::print_oops(outputStream* st) { |
2418 | HandleMark hm; |
2419 | ResourceMark m; |
2420 | st->print("Oops:" ); |
2421 | if (oops_begin() < oops_end()) { |
2422 | st->cr(); |
2423 | for (oop* p = oops_begin(); p < oops_end(); p++) { |
2424 | Disassembler::print_location((unsigned char*)p, (unsigned char*)oops_begin(), (unsigned char*)oops_end(), st, true, false); |
2425 | st->print(PTR_FORMAT " " , *((uintptr_t*)p)); |
2426 | if (*p == Universe::non_oop_word()) { |
2427 | st->print_cr("NON_OOP" ); |
2428 | continue; // skip non-oops |
2429 | } |
2430 | if (*p == NULL) { |
2431 | st->print_cr("NULL-oop" ); |
2432 | continue; // skip non-oops |
2433 | } |
2434 | (*p)->print_value_on(st); |
2435 | st->cr(); |
2436 | } |
2437 | } else { |
2438 | st->print_cr(" <list empty>" ); |
2439 | } |
2440 | } |
2441 | |
2442 | // Print metadata pool. |
2443 | void nmethod::print_metadata(outputStream* st) { |
2444 | HandleMark hm; |
2445 | ResourceMark m; |
2446 | st->print("Metadata:" ); |
2447 | if (metadata_begin() < metadata_end()) { |
2448 | st->cr(); |
2449 | for (Metadata** p = metadata_begin(); p < metadata_end(); p++) { |
2450 | Disassembler::print_location((unsigned char*)p, (unsigned char*)metadata_begin(), (unsigned char*)metadata_end(), st, true, false); |
2451 | st->print(PTR_FORMAT " " , *((uintptr_t*)p)); |
2452 | if (*p && *p != Universe::non_oop_word()) { |
2453 | (*p)->print_value_on(st); |
2454 | } |
2455 | st->cr(); |
2456 | } |
2457 | } else { |
2458 | st->print_cr(" <list empty>" ); |
2459 | } |
2460 | } |
2461 | |
2462 | #ifndef PRODUCT // ScopeDesc::print_on() is available only then. Declared as PRODUCT_RETURN |
2463 | void nmethod::print_scopes_on(outputStream* st) { |
2464 | // Find the first pc desc for all scopes in the code and print it. |
2465 | ResourceMark rm; |
2466 | st->print("scopes:" ); |
2467 | if (scopes_pcs_begin() < scopes_pcs_end()) { |
2468 | st->cr(); |
2469 | for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { |
2470 | if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null) |
2471 | continue; |
2472 | |
2473 | ScopeDesc* sd = scope_desc_at(p->real_pc(this)); |
2474 | while (sd != NULL) { |
2475 | sd->print_on(st, p); // print output ends with a newline |
2476 | sd = sd->sender(); |
2477 | } |
2478 | } |
2479 | } else { |
2480 | st->print_cr(" <list empty>" ); |
2481 | } |
2482 | } |
2483 | #endif |
2484 | |
2485 | #ifndef PRODUCT // RelocIterator does support printing only then. |
2486 | void nmethod::print_relocations() { |
2487 | ResourceMark m; // in case methods get printed via the debugger |
2488 | tty->print_cr("relocations:" ); |
2489 | RelocIterator iter(this); |
2490 | iter.print(); |
2491 | } |
2492 | #endif |
2493 | |
2494 | void nmethod::print_pcs_on(outputStream* st) { |
2495 | ResourceMark m; // in case methods get printed via debugger |
2496 | st->print("pc-bytecode offsets:" ); |
2497 | if (scopes_pcs_begin() < scopes_pcs_end()) { |
2498 | st->cr(); |
2499 | for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { |
2500 | p->print_on(st, this); // print output ends with a newline |
2501 | } |
2502 | } else { |
2503 | st->print_cr(" <list empty>" ); |
2504 | } |
2505 | } |
2506 | |
2507 | void nmethod::print_handler_table() { |
2508 | ExceptionHandlerTable(this).print(); |
2509 | } |
2510 | |
2511 | void nmethod::print_nul_chk_table() { |
2512 | ImplicitExceptionTable(this).print(code_begin()); |
2513 | } |
2514 | |
2515 | void nmethod::print_recorded_oops() { |
2516 | const int n = oops_count(); |
2517 | const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6; |
2518 | tty->print("Recorded oops:" ); |
2519 | if (n > 0) { |
2520 | tty->cr(); |
2521 | for (int i = 0; i < n; i++) { |
2522 | oop o = oop_at(i); |
2523 | tty->print("#%*d: " INTPTR_FORMAT " " , log_n, i, p2i(o)); |
2524 | if (o == (oop)Universe::non_oop_word()) { |
2525 | tty->print("non-oop word" ); |
2526 | } else if (o == NULL) { |
2527 | tty->print("NULL-oop" ); |
2528 | } else { |
2529 | o->print_value_on(tty); |
2530 | } |
2531 | tty->cr(); |
2532 | } |
2533 | } else { |
2534 | tty->print_cr(" <list empty>" ); |
2535 | } |
2536 | } |
2537 | |
2538 | void nmethod::print_recorded_metadata() { |
2539 | const int n = metadata_count(); |
2540 | const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6; |
2541 | tty->print("Recorded metadata:" ); |
2542 | if (n > 0) { |
2543 | tty->cr(); |
2544 | for (int i = 0; i < n; i++) { |
2545 | Metadata* m = metadata_at(i); |
2546 | tty->print("#%*d: " INTPTR_FORMAT " " , log_n, i, p2i(m)); |
2547 | if (m == (Metadata*)Universe::non_oop_word()) { |
2548 | tty->print("non-metadata word" ); |
2549 | } else if (m == NULL) { |
2550 | tty->print("NULL-oop" ); |
2551 | } else { |
2552 | Metadata::print_value_on_maybe_null(tty, m); |
2553 | } |
2554 | tty->cr(); |
2555 | } |
2556 | } else { |
2557 | tty->print_cr(" <list empty>" ); |
2558 | } |
2559 | } |
2560 | #endif |
2561 | |
2562 | #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) |
2563 | |
2564 | void nmethod::print_constant_pool(outputStream* st) { |
2565 | //----------------------------------- |
2566 | //---< Print the constant pool >--- |
2567 | //----------------------------------- |
2568 | int consts_size = this->consts_size(); |
2569 | if ( consts_size > 0 ) { |
2570 | unsigned char* cstart = this->consts_begin(); |
2571 | unsigned char* cp = cstart; |
2572 | unsigned char* cend = cp + consts_size; |
2573 | unsigned int bytes_per_line = 4; |
2574 | unsigned int CP_alignment = 8; |
2575 | unsigned int n; |
2576 | |
2577 | st->cr(); |
2578 | |
2579 | //---< print CP header to make clear what's printed >--- |
2580 | if( ((uintptr_t)cp&(CP_alignment-1)) == 0 ) { |
2581 | n = bytes_per_line; |
2582 | st->print_cr("[Constant Pool]" ); |
2583 | Disassembler::print_location(cp, cstart, cend, st, true, true); |
2584 | Disassembler::print_hexdata(cp, n, st, true); |
2585 | st->cr(); |
2586 | } else { |
2587 | n = (uintptr_t)cp&(bytes_per_line-1); |
2588 | st->print_cr("[Constant Pool (unaligned)]" ); |
2589 | } |
2590 | |
2591 | //---< print CP contents, bytes_per_line at a time >--- |
2592 | while (cp < cend) { |
2593 | Disassembler::print_location(cp, cstart, cend, st, true, false); |
2594 | Disassembler::print_hexdata(cp, n, st, false); |
2595 | cp += n; |
2596 | n = bytes_per_line; |
2597 | st->cr(); |
2598 | } |
2599 | |
2600 | //---< Show potential alignment gap between constant pool and code >--- |
2601 | cend = code_begin(); |
2602 | if( cp < cend ) { |
2603 | n = 4; |
2604 | st->print_cr("[Code entry alignment]" ); |
2605 | while (cp < cend) { |
2606 | Disassembler::print_location(cp, cstart, cend, st, false, false); |
2607 | cp += n; |
2608 | st->cr(); |
2609 | } |
2610 | } |
2611 | } else { |
2612 | st->print_cr("[Constant Pool (empty)]" ); |
2613 | } |
2614 | st->cr(); |
2615 | } |
2616 | |
2617 | #endif |
2618 | |
2619 | // Disassemble this nmethod. |
2620 | // Print additional debug information, if requested. This could be code |
2621 | // comments, block comments, profiling counters, etc. |
2622 | // The undisassembled format is useful no disassembler library is available. |
2623 | // The resulting hex dump (with markers) can be disassembled later, or on |
2624 | // another system, when/where a disassembler library is available. |
2625 | void nmethod::decode2(outputStream* ost) const { |
2626 | |
2627 | // Called from frame::back_trace_with_decode without ResourceMark. |
2628 | ResourceMark rm; |
2629 | |
2630 | // Make sure we have a valid stream to print on. |
2631 | outputStream* st = ost ? ost : tty; |
2632 | |
2633 | #if defined(SUPPORT_ABSTRACT_ASSEMBLY) && ! defined(SUPPORT_ASSEMBLY) |
2634 | const bool use_compressed_format = true; |
2635 | const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() || |
2636 | AbstractDisassembler::show_block_comment()); |
2637 | #else |
2638 | const bool use_compressed_format = Disassembler::is_abstract(); |
2639 | const bool = use_compressed_format && (AbstractDisassembler::show_comment() || |
2640 | AbstractDisassembler::show_block_comment()); |
2641 | #endif |
2642 | |
2643 | st->cr(); |
2644 | this->print(st); |
2645 | st->cr(); |
2646 | |
2647 | #if defined(SUPPORT_ASSEMBLY) |
2648 | //---------------------------------- |
2649 | //---< Print real disassembly >--- |
2650 | //---------------------------------- |
2651 | if (! use_compressed_format) { |
2652 | Disassembler::decode(const_cast<nmethod*>(this), st); |
2653 | return; |
2654 | } |
2655 | #endif |
2656 | |
2657 | #if defined(SUPPORT_ABSTRACT_ASSEMBLY) |
2658 | |
2659 | // Compressed undisassembled disassembly format. |
2660 | // The following stati are defined/supported: |
2661 | // = 0 - currently at bol() position, nothing printed yet on current line. |
2662 | // = 1 - currently at position after print_location(). |
2663 | // > 1 - in the midst of printing instruction stream bytes. |
2664 | int compressed_format_idx = 0; |
2665 | int code_comment_column = 0; |
2666 | const int instr_maxlen = Assembler::instr_maxlen(); |
2667 | const uint tabspacing = 8; |
2668 | unsigned char* start = this->code_begin(); |
2669 | unsigned char* p = this->code_begin(); |
2670 | unsigned char* end = this->code_end(); |
2671 | unsigned char* pss = p; // start of a code section (used for offsets) |
2672 | |
2673 | if ((start == NULL) || (end == NULL)) { |
2674 | st->print_cr("PrintAssembly not possible due to uninitialized section pointers" ); |
2675 | return; |
2676 | } |
2677 | #endif |
2678 | |
2679 | #if defined(SUPPORT_ABSTRACT_ASSEMBLY) |
2680 | //---< plain abstract disassembly, no comments or anything, just section headers >--- |
2681 | if (use_compressed_format && ! compressed_with_comments) { |
2682 | const_cast<nmethod*>(this)->print_constant_pool(st); |
2683 | |
2684 | //---< Open the output (Marker for post-mortem disassembler) >--- |
2685 | st->print_cr("[MachCode]" ); |
2686 | const char* = NULL; |
2687 | address p0 = p; |
2688 | while (p < end) { |
2689 | address pp = p; |
2690 | while ((p < end) && (header == NULL)) { |
2691 | header = nmethod_section_label(p); |
2692 | pp = p; |
2693 | p += Assembler::instr_len(p); |
2694 | } |
2695 | if (pp > p0) { |
2696 | AbstractDisassembler::decode_range_abstract(p0, pp, start, end, st, Assembler::instr_maxlen()); |
2697 | p0 = pp; |
2698 | p = pp; |
2699 | header = NULL; |
2700 | } else if (header != NULL) { |
2701 | st->bol(); |
2702 | st->print_cr("%s" , header); |
2703 | header = NULL; |
2704 | } |
2705 | } |
2706 | //---< Close the output (Marker for post-mortem disassembler) >--- |
2707 | st->bol(); |
2708 | st->print_cr("[/MachCode]" ); |
2709 | return; |
2710 | } |
2711 | #endif |
2712 | |
2713 | #if defined(SUPPORT_ABSTRACT_ASSEMBLY) |
2714 | //---< abstract disassembly with comments and section headers merged in >--- |
2715 | if (compressed_with_comments) { |
2716 | const_cast<nmethod*>(this)->print_constant_pool(st); |
2717 | |
2718 | //---< Open the output (Marker for post-mortem disassembler) >--- |
2719 | st->print_cr("[MachCode]" ); |
2720 | while ((p < end) && (p != NULL)) { |
2721 | const int instruction_size_in_bytes = Assembler::instr_len(p); |
2722 | |
2723 | //---< Block comments for nmethod. Interrupts instruction stream, if any. >--- |
2724 | // Outputs a bol() before and a cr() after, but only if a comment is printed. |
2725 | // Prints nmethod_section_label as well. |
2726 | if (AbstractDisassembler::show_block_comment()) { |
2727 | print_block_comment(st, p); |
2728 | if (st->position() == 0) { |
2729 | compressed_format_idx = 0; |
2730 | } |
2731 | } |
2732 | |
2733 | //---< New location information after line break >--- |
2734 | if (compressed_format_idx == 0) { |
2735 | code_comment_column = Disassembler::print_location(p, pss, end, st, false, false); |
2736 | compressed_format_idx = 1; |
2737 | } |
2738 | |
2739 | //---< Code comment for current instruction. Address range [p..(p+len)) >--- |
2740 | unsigned char* p_end = p + (ssize_t)instruction_size_in_bytes; |
2741 | S390_ONLY(if (p_end > end) p_end = end;) // avoid getting past the end |
2742 | |
2743 | if (AbstractDisassembler::show_comment() && const_cast<nmethod*>(this)->has_code_comment(p, p_end)) { |
2744 | //---< interrupt instruction byte stream for code comment >--- |
2745 | if (compressed_format_idx > 1) { |
2746 | st->cr(); // interrupt byte stream |
2747 | st->cr(); // add an empty line |
2748 | code_comment_column = Disassembler::print_location(p, pss, end, st, false, false); |
2749 | } |
2750 | const_cast<nmethod*>(this)->print_code_comment_on(st, code_comment_column, p, p_end ); |
2751 | st->bol(); |
2752 | compressed_format_idx = 0; |
2753 | } |
2754 | |
2755 | //---< New location information after line break >--- |
2756 | if (compressed_format_idx == 0) { |
2757 | code_comment_column = Disassembler::print_location(p, pss, end, st, false, false); |
2758 | compressed_format_idx = 1; |
2759 | } |
2760 | |
2761 | //---< Nicely align instructions for readability >--- |
2762 | if (compressed_format_idx > 1) { |
2763 | Disassembler::print_delimiter(st); |
2764 | } |
2765 | |
2766 | //---< Now, finally, print the actual instruction bytes >--- |
2767 | unsigned char* p0 = p; |
2768 | p = Disassembler::decode_instruction_abstract(p, st, instruction_size_in_bytes, instr_maxlen); |
2769 | compressed_format_idx += p - p0; |
2770 | |
2771 | if (Disassembler::start_newline(compressed_format_idx-1)) { |
2772 | st->cr(); |
2773 | compressed_format_idx = 0; |
2774 | } |
2775 | } |
2776 | //---< Close the output (Marker for post-mortem disassembler) >--- |
2777 | st->bol(); |
2778 | st->print_cr("[/MachCode]" ); |
2779 | return; |
2780 | } |
2781 | #endif |
2782 | } |
2783 | |
2784 | #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) |
2785 | |
2786 | const char* nmethod::reloc_string_for(u_char* begin, u_char* end) { |
2787 | RelocIterator iter(this, begin, end); |
2788 | bool have_one = false; |
2789 | while (iter.next()) { |
2790 | have_one = true; |
2791 | switch (iter.type()) { |
2792 | case relocInfo::none: return "no_reloc" ; |
2793 | case relocInfo::oop_type: { |
2794 | // Get a non-resizable resource-allocated stringStream. |
2795 | // Our callees make use of (nested) ResourceMarks. |
2796 | stringStream st(NEW_RESOURCE_ARRAY(char, 1024), 1024); |
2797 | oop_Relocation* r = iter.oop_reloc(); |
2798 | oop obj = r->oop_value(); |
2799 | st.print("oop(" ); |
2800 | if (obj == NULL) st.print("NULL" ); |
2801 | else obj->print_value_on(&st); |
2802 | st.print(")" ); |
2803 | return st.as_string(); |
2804 | } |
2805 | case relocInfo::metadata_type: { |
2806 | stringStream st; |
2807 | metadata_Relocation* r = iter.metadata_reloc(); |
2808 | Metadata* obj = r->metadata_value(); |
2809 | st.print("metadata(" ); |
2810 | if (obj == NULL) st.print("NULL" ); |
2811 | else obj->print_value_on(&st); |
2812 | st.print(")" ); |
2813 | return st.as_string(); |
2814 | } |
2815 | case relocInfo::runtime_call_type: |
2816 | case relocInfo::runtime_call_w_cp_type: { |
2817 | stringStream st; |
2818 | st.print("runtime_call" ); |
2819 | CallRelocation* r = (CallRelocation*)iter.reloc(); |
2820 | address dest = r->destination(); |
2821 | CodeBlob* cb = CodeCache::find_blob(dest); |
2822 | if (cb != NULL) { |
2823 | st.print(" %s" , cb->name()); |
2824 | } else { |
2825 | ResourceMark rm; |
2826 | const int buflen = 1024; |
2827 | char* buf = NEW_RESOURCE_ARRAY(char, buflen); |
2828 | int offset; |
2829 | if (os::dll_address_to_function_name(dest, buf, buflen, &offset)) { |
2830 | st.print(" %s" , buf); |
2831 | if (offset != 0) { |
2832 | st.print("+%d" , offset); |
2833 | } |
2834 | } |
2835 | } |
2836 | return st.as_string(); |
2837 | } |
2838 | case relocInfo::virtual_call_type: { |
2839 | stringStream st; |
2840 | st.print_raw("virtual_call" ); |
2841 | virtual_call_Relocation* r = iter.virtual_call_reloc(); |
2842 | Method* m = r->method_value(); |
2843 | if (m != NULL) { |
2844 | assert(m->is_method(), "" ); |
2845 | m->print_short_name(&st); |
2846 | } |
2847 | return st.as_string(); |
2848 | } |
2849 | case relocInfo::opt_virtual_call_type: { |
2850 | stringStream st; |
2851 | st.print_raw("optimized virtual_call" ); |
2852 | opt_virtual_call_Relocation* r = iter.opt_virtual_call_reloc(); |
2853 | Method* m = r->method_value(); |
2854 | if (m != NULL) { |
2855 | assert(m->is_method(), "" ); |
2856 | m->print_short_name(&st); |
2857 | } |
2858 | return st.as_string(); |
2859 | } |
2860 | case relocInfo::static_call_type: { |
2861 | stringStream st; |
2862 | st.print_raw("static_call" ); |
2863 | static_call_Relocation* r = iter.static_call_reloc(); |
2864 | Method* m = r->method_value(); |
2865 | if (m != NULL) { |
2866 | assert(m->is_method(), "" ); |
2867 | m->print_short_name(&st); |
2868 | } |
2869 | return st.as_string(); |
2870 | } |
2871 | case relocInfo::static_stub_type: return "static_stub" ; |
2872 | case relocInfo::external_word_type: return "external_word" ; |
2873 | case relocInfo::internal_word_type: return "internal_word" ; |
2874 | case relocInfo::section_word_type: return "section_word" ; |
2875 | case relocInfo::poll_type: return "poll" ; |
2876 | case relocInfo::poll_return_type: return "poll_return" ; |
2877 | case relocInfo::trampoline_stub_type: return "trampoline_stub" ; |
2878 | case relocInfo::type_mask: return "type_bit_mask" ; |
2879 | |
2880 | default: |
2881 | break; |
2882 | } |
2883 | } |
2884 | return have_one ? "other" : NULL; |
2885 | } |
2886 | |
2887 | // Return a the last scope in (begin..end] |
2888 | ScopeDesc* nmethod::scope_desc_in(address begin, address end) { |
2889 | PcDesc* p = pc_desc_near(begin+1); |
2890 | if (p != NULL && p->real_pc(this) <= end) { |
2891 | return new ScopeDesc(this, p->scope_decode_offset(), |
2892 | p->obj_decode_offset(), p->should_reexecute(), p->rethrow_exception(), |
2893 | p->return_oop()); |
2894 | } |
2895 | return NULL; |
2896 | } |
2897 | |
2898 | const char* nmethod::nmethod_section_label(address pos) const { |
2899 | const char* label = NULL; |
2900 | if (pos == code_begin()) label = "[Instructions begin]" ; |
2901 | if (pos == entry_point()) label = "[Entry Point]" ; |
2902 | if (pos == verified_entry_point()) label = "[Verified Entry Point]" ; |
2903 | if (has_method_handle_invokes() && (pos == deopt_mh_handler_begin())) label = "[Deopt MH Handler Code]" ; |
2904 | if (pos == consts_begin() && pos != insts_begin()) label = "[Constants]" ; |
2905 | // Check stub_code before checking exception_handler or deopt_handler. |
2906 | if (pos == this->stub_begin()) label = "[Stub Code]" ; |
2907 | if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin()) label = "[Exception Handler]" ; |
2908 | if (JVMCI_ONLY(_deopt_handler_begin != NULL &&) pos == deopt_handler_begin()) label = "[Deopt Handler Code]" ; |
2909 | return label; |
2910 | } |
2911 | |
2912 | void nmethod::print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels) const { |
2913 | if (print_section_labels) { |
2914 | const char* label = nmethod_section_label(block_begin); |
2915 | if (label != NULL) { |
2916 | stream->bol(); |
2917 | stream->print_cr("%s" , label); |
2918 | } |
2919 | } |
2920 | |
2921 | if (block_begin == entry_point()) { |
2922 | methodHandle m = method(); |
2923 | if (m.not_null()) { |
2924 | stream->print(" # " ); |
2925 | m->print_value_on(stream); |
2926 | stream->cr(); |
2927 | } |
2928 | if (m.not_null() && !is_osr_method()) { |
2929 | ResourceMark rm; |
2930 | int sizeargs = m->size_of_parameters(); |
2931 | BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs); |
2932 | VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs); |
2933 | { |
2934 | int sig_index = 0; |
2935 | if (!m->is_static()) |
2936 | sig_bt[sig_index++] = T_OBJECT; // 'this' |
2937 | for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) { |
2938 | BasicType t = ss.type(); |
2939 | sig_bt[sig_index++] = t; |
2940 | if (type2size[t] == 2) { |
2941 | sig_bt[sig_index++] = T_VOID; |
2942 | } else { |
2943 | assert(type2size[t] == 1, "size is 1 or 2" ); |
2944 | } |
2945 | } |
2946 | assert(sig_index == sizeargs, "" ); |
2947 | } |
2948 | const char* spname = "sp" ; // make arch-specific? |
2949 | intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false); |
2950 | int stack_slot_offset = this->frame_size() * wordSize; |
2951 | int tab1 = 14, tab2 = 24; |
2952 | int sig_index = 0; |
2953 | int arg_index = (m->is_static() ? 0 : -1); |
2954 | bool did_old_sp = false; |
2955 | for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) { |
2956 | bool at_this = (arg_index == -1); |
2957 | bool at_old_sp = false; |
2958 | BasicType t = (at_this ? T_OBJECT : ss.type()); |
2959 | assert(t == sig_bt[sig_index], "sigs in sync" ); |
2960 | if (at_this) |
2961 | stream->print(" # this: " ); |
2962 | else |
2963 | stream->print(" # parm%d: " , arg_index); |
2964 | stream->move_to(tab1); |
2965 | VMReg fst = regs[sig_index].first(); |
2966 | VMReg snd = regs[sig_index].second(); |
2967 | if (fst->is_reg()) { |
2968 | stream->print("%s" , fst->name()); |
2969 | if (snd->is_valid()) { |
2970 | stream->print(":%s" , snd->name()); |
2971 | } |
2972 | } else if (fst->is_stack()) { |
2973 | int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset; |
2974 | if (offset == stack_slot_offset) at_old_sp = true; |
2975 | stream->print("[%s+0x%x]" , spname, offset); |
2976 | } else { |
2977 | stream->print("reg%d:%d??" , (int)(intptr_t)fst, (int)(intptr_t)snd); |
2978 | } |
2979 | stream->print(" " ); |
2980 | stream->move_to(tab2); |
2981 | stream->print("= " ); |
2982 | if (at_this) { |
2983 | m->method_holder()->print_value_on(stream); |
2984 | } else { |
2985 | bool did_name = false; |
2986 | if (!at_this && ss.is_object()) { |
2987 | Symbol* name = ss.as_symbol_or_null(); |
2988 | if (name != NULL) { |
2989 | name->print_value_on(stream); |
2990 | did_name = true; |
2991 | } |
2992 | } |
2993 | if (!did_name) |
2994 | stream->print("%s" , type2name(t)); |
2995 | } |
2996 | if (at_old_sp) { |
2997 | stream->print(" (%s of caller)" , spname); |
2998 | did_old_sp = true; |
2999 | } |
3000 | stream->cr(); |
3001 | sig_index += type2size[t]; |
3002 | arg_index += 1; |
3003 | if (!at_this) ss.next(); |
3004 | } |
3005 | if (!did_old_sp) { |
3006 | stream->print(" # " ); |
3007 | stream->move_to(tab1); |
3008 | stream->print("[%s+0x%x]" , spname, stack_slot_offset); |
3009 | stream->print(" (%s of caller)" , spname); |
3010 | stream->cr(); |
3011 | } |
3012 | } |
3013 | } |
3014 | } |
3015 | |
3016 | // Returns whether this nmethod has code comments. |
3017 | bool nmethod::(address begin, address end) { |
3018 | // scopes? |
3019 | ScopeDesc* sd = scope_desc_in(begin, end); |
3020 | if (sd != NULL) return true; |
3021 | |
3022 | // relocations? |
3023 | const char* str = reloc_string_for(begin, end); |
3024 | if (str != NULL) return true; |
3025 | |
3026 | // implicit exceptions? |
3027 | int cont_offset = ImplicitExceptionTable(this).continuation_offset(begin - code_begin()); |
3028 | if (cont_offset != 0) return true; |
3029 | |
3030 | return false; |
3031 | } |
3032 | |
3033 | void nmethod::(outputStream* st, int column, address begin, address end) { |
3034 | ImplicitExceptionTable implicit_table(this); |
3035 | int pc_offset = begin - code_begin(); |
3036 | int cont_offset = implicit_table.continuation_offset(pc_offset); |
3037 | bool oop_map_required = false; |
3038 | if (cont_offset != 0) { |
3039 | st->move_to(column, 6, 0); |
3040 | if (pc_offset == cont_offset) { |
3041 | st->print("; implicit exception: deoptimizes" ); |
3042 | oop_map_required = true; |
3043 | } else { |
3044 | st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset)); |
3045 | } |
3046 | } |
3047 | |
3048 | // Find an oopmap in (begin, end]. We use the odd half-closed |
3049 | // interval so that oop maps and scope descs which are tied to the |
3050 | // byte after a call are printed with the call itself. OopMaps |
3051 | // associated with implicit exceptions are printed with the implicit |
3052 | // instruction. |
3053 | address base = code_begin(); |
3054 | ImmutableOopMapSet* oms = oop_maps(); |
3055 | if (oms != NULL) { |
3056 | for (int i = 0, imax = oms->count(); i < imax; i++) { |
3057 | const ImmutableOopMapPair* pair = oms->pair_at(i); |
3058 | const ImmutableOopMap* om = pair->get_from(oms); |
3059 | address pc = base + pair->pc_offset(); |
3060 | if (pc >= begin) { |
3061 | #if INCLUDE_JVMCI |
3062 | bool is_implicit_deopt = implicit_table.continuation_offset(pair->pc_offset()) == (uint) pair->pc_offset(); |
3063 | #else |
3064 | bool is_implicit_deopt = false; |
3065 | #endif |
3066 | if (is_implicit_deopt ? pc == begin : pc > begin && pc <= end) { |
3067 | st->move_to(column, 6, 0); |
3068 | st->print("; " ); |
3069 | om->print_on(st); |
3070 | oop_map_required = false; |
3071 | } |
3072 | } |
3073 | if (pc > end) { |
3074 | break; |
3075 | } |
3076 | } |
3077 | } |
3078 | assert(!oop_map_required, "missed oopmap" ); |
3079 | |
3080 | // Print any debug info present at this pc. |
3081 | ScopeDesc* sd = scope_desc_in(begin, end); |
3082 | if (sd != NULL) { |
3083 | st->move_to(column, 6, 0); |
3084 | if (sd->bci() == SynchronizationEntryBCI) { |
3085 | st->print(";*synchronization entry" ); |
3086 | } else if (sd->bci() == AfterBci) { |
3087 | st->print(";* method exit (unlocked if synchronized)" ); |
3088 | } else if (sd->bci() == UnwindBci) { |
3089 | st->print(";* unwind (locked if synchronized)" ); |
3090 | } else if (sd->bci() == AfterExceptionBci) { |
3091 | st->print(";* unwind (unlocked if synchronized)" ); |
3092 | } else if (sd->bci() == UnknownBci) { |
3093 | st->print(";* unknown" ); |
3094 | } else if (sd->bci() == InvalidFrameStateBci) { |
3095 | st->print(";* invalid frame state" ); |
3096 | } else { |
3097 | if (sd->method() == NULL) { |
3098 | st->print("method is NULL" ); |
3099 | } else if (sd->method()->is_native()) { |
3100 | st->print("method is native" ); |
3101 | } else { |
3102 | Bytecodes::Code bc = sd->method()->java_code_at(sd->bci()); |
3103 | st->print(";*%s" , Bytecodes::name(bc)); |
3104 | switch (bc) { |
3105 | case Bytecodes::_invokevirtual: |
3106 | case Bytecodes::_invokespecial: |
3107 | case Bytecodes::_invokestatic: |
3108 | case Bytecodes::_invokeinterface: |
3109 | { |
3110 | Bytecode_invoke invoke(sd->method(), sd->bci()); |
3111 | st->print(" " ); |
3112 | if (invoke.name() != NULL) |
3113 | invoke.name()->print_symbol_on(st); |
3114 | else |
3115 | st->print("<UNKNOWN>" ); |
3116 | break; |
3117 | } |
3118 | case Bytecodes::_getfield: |
3119 | case Bytecodes::_putfield: |
3120 | case Bytecodes::_getstatic: |
3121 | case Bytecodes::_putstatic: |
3122 | { |
3123 | Bytecode_field field(sd->method(), sd->bci()); |
3124 | st->print(" " ); |
3125 | if (field.name() != NULL) |
3126 | field.name()->print_symbol_on(st); |
3127 | else |
3128 | st->print("<UNKNOWN>" ); |
3129 | } |
3130 | default: |
3131 | break; |
3132 | } |
3133 | } |
3134 | st->print(" {reexecute=%d rethrow=%d return_oop=%d}" , sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop()); |
3135 | } |
3136 | |
3137 | // Print all scopes |
3138 | for (;sd != NULL; sd = sd->sender()) { |
3139 | st->move_to(column, 6, 0); |
3140 | st->print("; -" ); |
3141 | if (sd->should_reexecute()) { |
3142 | st->print(" (reexecute)" ); |
3143 | } |
3144 | if (sd->method() == NULL) { |
3145 | st->print("method is NULL" ); |
3146 | } else { |
3147 | sd->method()->print_short_name(st); |
3148 | } |
3149 | int lineno = sd->method()->line_number_from_bci(sd->bci()); |
3150 | if (lineno != -1) { |
3151 | st->print("@%d (line %d)" , sd->bci(), lineno); |
3152 | } else { |
3153 | st->print("@%d" , sd->bci()); |
3154 | } |
3155 | st->cr(); |
3156 | } |
3157 | } |
3158 | |
3159 | // Print relocation information |
3160 | // Prevent memory leak: allocating without ResourceMark. |
3161 | ResourceMark rm; |
3162 | const char* str = reloc_string_for(begin, end); |
3163 | if (str != NULL) { |
3164 | if (sd != NULL) st->cr(); |
3165 | st->move_to(column, 6, 0); |
3166 | st->print("; {%s}" , str); |
3167 | } |
3168 | } |
3169 | |
3170 | #endif |
3171 | |
3172 | class DirectNativeCallWrapper: public NativeCallWrapper { |
3173 | private: |
3174 | NativeCall* _call; |
3175 | |
3176 | public: |
3177 | DirectNativeCallWrapper(NativeCall* call) : _call(call) {} |
3178 | |
3179 | virtual address destination() const { return _call->destination(); } |
3180 | virtual address instruction_address() const { return _call->instruction_address(); } |
3181 | virtual address next_instruction_address() const { return _call->next_instruction_address(); } |
3182 | virtual address return_address() const { return _call->return_address(); } |
3183 | |
3184 | virtual address get_resolve_call_stub(bool is_optimized) const { |
3185 | if (is_optimized) { |
3186 | return SharedRuntime::get_resolve_opt_virtual_call_stub(); |
3187 | } |
3188 | return SharedRuntime::get_resolve_virtual_call_stub(); |
3189 | } |
3190 | |
3191 | virtual void set_destination_mt_safe(address dest) { |
3192 | #if INCLUDE_AOT |
3193 | if (UseAOT) { |
3194 | CodeBlob* callee = CodeCache::find_blob(dest); |
3195 | CompiledMethod* cm = callee->as_compiled_method_or_null(); |
3196 | if (cm != NULL && cm->is_far_code()) { |
3197 | // Temporary fix, see JDK-8143106 |
3198 | CompiledDirectStaticCall* csc = CompiledDirectStaticCall::at(instruction_address()); |
3199 | csc->set_to_far(methodHandle(cm->method()), dest); |
3200 | return; |
3201 | } |
3202 | } |
3203 | #endif |
3204 | _call->set_destination_mt_safe(dest); |
3205 | } |
3206 | |
3207 | virtual void set_to_interpreted(const methodHandle& method, CompiledICInfo& info) { |
3208 | CompiledDirectStaticCall* csc = CompiledDirectStaticCall::at(instruction_address()); |
3209 | #if INCLUDE_AOT |
3210 | if (info.to_aot()) { |
3211 | csc->set_to_far(method, info.entry()); |
3212 | } else |
3213 | #endif |
3214 | { |
3215 | csc->set_to_interpreted(method, info.entry()); |
3216 | } |
3217 | } |
3218 | |
3219 | virtual void verify() const { |
3220 | // make sure code pattern is actually a call imm32 instruction |
3221 | _call->verify(); |
3222 | _call->verify_alignment(); |
3223 | } |
3224 | |
3225 | virtual void verify_resolve_call(address dest) const { |
3226 | CodeBlob* db = CodeCache::find_blob_unsafe(dest); |
3227 | assert(db != NULL && !db->is_adapter_blob(), "must use stub!" ); |
3228 | } |
3229 | |
3230 | virtual bool is_call_to_interpreted(address dest) const { |
3231 | CodeBlob* cb = CodeCache::find_blob(_call->instruction_address()); |
3232 | return cb->contains(dest); |
3233 | } |
3234 | |
3235 | virtual bool is_safe_for_patching() const { return false; } |
3236 | |
3237 | virtual NativeInstruction* get_load_instruction(virtual_call_Relocation* r) const { |
3238 | return nativeMovConstReg_at(r->cached_value()); |
3239 | } |
3240 | |
3241 | virtual void *get_data(NativeInstruction* instruction) const { |
3242 | return (void*)((NativeMovConstReg*) instruction)->data(); |
3243 | } |
3244 | |
3245 | virtual void set_data(NativeInstruction* instruction, intptr_t data) { |
3246 | ((NativeMovConstReg*) instruction)->set_data(data); |
3247 | } |
3248 | }; |
3249 | |
3250 | NativeCallWrapper* nmethod::call_wrapper_at(address call) const { |
3251 | return new DirectNativeCallWrapper((NativeCall*) call); |
3252 | } |
3253 | |
3254 | NativeCallWrapper* nmethod::call_wrapper_before(address return_pc) const { |
3255 | return new DirectNativeCallWrapper(nativeCall_before(return_pc)); |
3256 | } |
3257 | |
3258 | address nmethod::call_instruction_address(address pc) const { |
3259 | if (NativeCall::is_call_before(pc)) { |
3260 | NativeCall *ncall = nativeCall_before(pc); |
3261 | return ncall->instruction_address(); |
3262 | } |
3263 | return NULL; |
3264 | } |
3265 | |
3266 | CompiledStaticCall* nmethod::compiledStaticCall_at(Relocation* call_site) const { |
3267 | return CompiledDirectStaticCall::at(call_site); |
3268 | } |
3269 | |
3270 | CompiledStaticCall* nmethod::compiledStaticCall_at(address call_site) const { |
3271 | return CompiledDirectStaticCall::at(call_site); |
3272 | } |
3273 | |
3274 | CompiledStaticCall* nmethod::compiledStaticCall_before(address return_addr) const { |
3275 | return CompiledDirectStaticCall::before(return_addr); |
3276 | } |
3277 | |
3278 | #if defined(SUPPORT_DATA_STRUCTS) |
3279 | void nmethod::print_value_on(outputStream* st) const { |
3280 | st->print("nmethod" ); |
3281 | print_on(st, NULL); |
3282 | } |
3283 | #endif |
3284 | |
3285 | #ifndef PRODUCT |
3286 | |
3287 | void nmethod::print_calls(outputStream* st) { |
3288 | RelocIterator iter(this); |
3289 | while (iter.next()) { |
3290 | switch (iter.type()) { |
3291 | case relocInfo::virtual_call_type: |
3292 | case relocInfo::opt_virtual_call_type: { |
3293 | CompiledICLocker ml_verify(this); |
3294 | CompiledIC_at(&iter)->print(); |
3295 | break; |
3296 | } |
3297 | case relocInfo::static_call_type: |
3298 | st->print_cr("Static call at " INTPTR_FORMAT, p2i(iter.reloc()->addr())); |
3299 | CompiledDirectStaticCall::at(iter.reloc())->print(); |
3300 | break; |
3301 | default: |
3302 | break; |
3303 | } |
3304 | } |
3305 | } |
3306 | |
3307 | void nmethod::print_statistics() { |
3308 | ttyLocker ttyl; |
3309 | if (xtty != NULL) xtty->head("statistics type='nmethod'" ); |
3310 | native_nmethod_stats.print_native_nmethod_stats(); |
3311 | #ifdef COMPILER1 |
3312 | c1_java_nmethod_stats.print_nmethod_stats("C1" ); |
3313 | #endif |
3314 | #ifdef COMPILER2 |
3315 | c2_java_nmethod_stats.print_nmethod_stats("C2" ); |
3316 | #endif |
3317 | #if INCLUDE_JVMCI |
3318 | jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI" ); |
3319 | #endif |
3320 | unknown_java_nmethod_stats.print_nmethod_stats("Unknown" ); |
3321 | DebugInformationRecorder::print_statistics(); |
3322 | #ifndef PRODUCT |
3323 | pc_nmethod_stats.print_pc_stats(); |
3324 | #endif |
3325 | Dependencies::print_statistics(); |
3326 | if (xtty != NULL) xtty->tail("statistics" ); |
3327 | } |
3328 | |
3329 | #endif // !PRODUCT |
3330 | |
3331 | #if INCLUDE_JVMCI |
3332 | void nmethod::update_speculation(JavaThread* thread) { |
3333 | jlong speculation = thread->pending_failed_speculation(); |
3334 | if (speculation != 0) { |
3335 | guarantee(jvmci_nmethod_data() != NULL, "failed speculation in nmethod without failed speculation list" ); |
3336 | jvmci_nmethod_data()->add_failed_speculation(this, speculation); |
3337 | thread->set_pending_failed_speculation(0); |
3338 | } |
3339 | } |
3340 | |
3341 | const char* nmethod::jvmci_name() { |
3342 | if (jvmci_nmethod_data() != NULL) { |
3343 | return jvmci_nmethod_data()->name(); |
3344 | } |
3345 | return NULL; |
3346 | } |
3347 | #endif |
3348 | |