1 | /* |
2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_CODE_NMETHOD_HPP |
26 | #define SHARE_CODE_NMETHOD_HPP |
27 | |
28 | #include "code/compiledMethod.hpp" |
29 | |
30 | class DepChange; |
31 | class DirectiveSet; |
32 | class DebugInformationRecorder; |
33 | |
34 | // nmethods (native methods) are the compiled code versions of Java methods. |
35 | // |
36 | // An nmethod contains: |
37 | // - header (the nmethod structure) |
38 | // [Relocation] |
39 | // - relocation information |
40 | // - constant part (doubles, longs and floats used in nmethod) |
41 | // - oop table |
42 | // [Code] |
43 | // - code body |
44 | // - exception handler |
45 | // - stub code |
46 | // [Debugging information] |
47 | // - oop array |
48 | // - data array |
49 | // - pcs |
50 | // [Exception handler table] |
51 | // - handler entry point array |
52 | // [Implicit Null Pointer exception table] |
53 | // - implicit null table array |
54 | // [Speculations] |
55 | // - encoded speculations array |
56 | // [JVMCINMethodData] |
57 | // - meta data for JVMCI compiled nmethod |
58 | |
59 | #if INCLUDE_JVMCI |
60 | class FailedSpeculation; |
61 | class JVMCINMethodData; |
62 | #endif |
63 | |
64 | class nmethod : public CompiledMethod { |
65 | friend class VMStructs; |
66 | friend class JVMCIVMStructs; |
67 | friend class NMethodSweeper; |
68 | friend class CodeCache; // scavengable oops |
69 | friend class JVMCINMethodData; |
70 | private: |
71 | |
72 | // Shared fields for all nmethod's |
73 | int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method |
74 | jmethodID _jmethod_id; // Cache of method()->jmethod_id() |
75 | |
76 | // To support simple linked-list chaining of nmethods: |
77 | nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head |
78 | |
79 | static nmethod* volatile _oops_do_mark_nmethods; |
80 | nmethod* volatile _oops_do_mark_link; |
81 | |
82 | // offsets for entry points |
83 | address _entry_point; // entry point with class check |
84 | address _verified_entry_point; // entry point without class check |
85 | address _osr_entry_point; // entry point for on stack replacement |
86 | |
87 | // Offsets for different nmethod parts |
88 | int _exception_offset; |
89 | // Offset of the unwind handler if it exists |
90 | int _unwind_handler_offset; |
91 | |
92 | int _consts_offset; |
93 | int _stub_offset; |
94 | int _oops_offset; // offset to where embedded oop table begins (inside data) |
95 | int _metadata_offset; // embedded meta data table |
96 | int _scopes_data_offset; |
97 | int _scopes_pcs_offset; |
98 | int _dependencies_offset; |
99 | int _handler_table_offset; |
100 | int _nul_chk_table_offset; |
101 | #if INCLUDE_JVMCI |
102 | int _speculations_offset; |
103 | int _jvmci_data_offset; |
104 | #endif |
105 | int _nmethod_end_offset; |
106 | |
107 | int code_offset() const { return (address) code_begin() - header_begin(); } |
108 | |
109 | // location in frame (offset for sp) that deopt can store the original |
110 | // pc during a deopt. |
111 | int _orig_pc_offset; |
112 | |
113 | int _compile_id; // which compilation made this nmethod |
114 | int _comp_level; // compilation level |
115 | |
116 | // protected by CodeCache_lock |
117 | bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) |
118 | |
119 | // used by jvmti to track if an unload event has been posted for this nmethod. |
120 | bool _unload_reported; |
121 | |
122 | // Protected by Patching_lock |
123 | volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded} |
124 | |
125 | #ifdef ASSERT |
126 | bool _oops_are_stale; // indicates that it's no longer safe to access oops section |
127 | #endif |
128 | |
129 | #if INCLUDE_RTM_OPT |
130 | // RTM state at compile time. Used during deoptimization to decide |
131 | // whether to restart collecting RTM locking abort statistic again. |
132 | RTMState _rtm_state; |
133 | #endif |
134 | |
135 | // Nmethod Flushing lock. If non-zero, then the nmethod is not removed |
136 | // and is not made into a zombie. However, once the nmethod is made into |
137 | // a zombie, it will be locked one final time if CompiledMethodUnload |
138 | // event processing needs to be done. |
139 | volatile jint _lock_count; |
140 | |
141 | // not_entrant method removal. Each mark_sweep pass will update |
142 | // this mark to current sweep invocation count if it is seen on the |
143 | // stack. An not_entrant method can be removed when there are no |
144 | // more activations, i.e., when the _stack_traversal_mark is less than |
145 | // current sweep traversal index. |
146 | volatile long _stack_traversal_mark; |
147 | |
148 | // The _hotness_counter indicates the hotness of a method. The higher |
149 | // the value the hotter the method. The hotness counter of a nmethod is |
150 | // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method |
151 | // is active while stack scanning (mark_active_nmethods()). The hotness |
152 | // counter is decreased (by 1) while sweeping. |
153 | int _hotness_counter; |
154 | |
155 | // Local state used to keep track of whether unloading is happening or not |
156 | volatile uint8_t _is_unloading_state; |
157 | |
158 | // These are used for compiled synchronized native methods to |
159 | // locate the owner and stack slot for the BasicLock so that we can |
160 | // properly revoke the bias of the owner if necessary. They are |
161 | // needed because there is no debug information for compiled native |
162 | // wrappers and the oop maps are insufficient to allow |
163 | // frame::retrieve_receiver() to work. Currently they are expected |
164 | // to be byte offsets from the Java stack pointer for maximum code |
165 | // sharing between platforms. Note that currently biased locking |
166 | // will never cause Class instances to be biased but this code |
167 | // handles the static synchronized case as well. |
168 | // JVMTI's GetLocalInstance() also uses these offsets to find the receiver |
169 | // for non-static native wrapper frames. |
170 | ByteSize _native_receiver_sp_offset; |
171 | ByteSize _native_basic_lock_sp_offset; |
172 | |
173 | friend class nmethodLocker; |
174 | |
175 | // For native wrappers |
176 | nmethod(Method* method, |
177 | CompilerType type, |
178 | int nmethod_size, |
179 | int compile_id, |
180 | CodeOffsets* offsets, |
181 | CodeBuffer *code_buffer, |
182 | int frame_size, |
183 | ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ |
184 | ByteSize basic_lock_sp_offset, /* synchronized natives only */ |
185 | OopMapSet* oop_maps); |
186 | |
187 | // Creation support |
188 | nmethod(Method* method, |
189 | CompilerType type, |
190 | int nmethod_size, |
191 | int compile_id, |
192 | int entry_bci, |
193 | CodeOffsets* offsets, |
194 | int orig_pc_offset, |
195 | DebugInformationRecorder *recorder, |
196 | Dependencies* dependencies, |
197 | CodeBuffer *code_buffer, |
198 | int frame_size, |
199 | OopMapSet* oop_maps, |
200 | ExceptionHandlerTable* handler_table, |
201 | ImplicitExceptionTable* nul_chk_table, |
202 | AbstractCompiler* compiler, |
203 | int comp_level |
204 | #if INCLUDE_JVMCI |
205 | , char* speculations, |
206 | int speculations_len, |
207 | int jvmci_data_size |
208 | #endif |
209 | ); |
210 | |
211 | // helper methods |
212 | void* operator new(size_t size, int nmethod_size, int comp_level) throw(); |
213 | |
214 | const char* reloc_string_for(u_char* begin, u_char* end); |
215 | // Returns true if this thread changed the state of the nmethod or |
216 | // false if another thread performed the transition. |
217 | bool make_not_entrant_or_zombie(int state); |
218 | bool make_entrant() { Unimplemented(); return false; } |
219 | void inc_decompile_count(); |
220 | |
221 | // Inform external interfaces that a compiled method has been unloaded |
222 | void post_compiled_method_unload(); |
223 | |
224 | // Initailize fields to their default values |
225 | void init_defaults(); |
226 | |
227 | // Offsets |
228 | int content_offset() const { return content_begin() - header_begin(); } |
229 | int data_offset() const { return _data_offset; } |
230 | |
231 | address () const { return (address) header_begin() + header_size(); } |
232 | |
233 | public: |
234 | // create nmethod with entry_bci |
235 | static nmethod* new_nmethod(const methodHandle& method, |
236 | int compile_id, |
237 | int entry_bci, |
238 | CodeOffsets* offsets, |
239 | int orig_pc_offset, |
240 | DebugInformationRecorder* recorder, |
241 | Dependencies* dependencies, |
242 | CodeBuffer *code_buffer, |
243 | int frame_size, |
244 | OopMapSet* oop_maps, |
245 | ExceptionHandlerTable* handler_table, |
246 | ImplicitExceptionTable* nul_chk_table, |
247 | AbstractCompiler* compiler, |
248 | int comp_level |
249 | #if INCLUDE_JVMCI |
250 | , char* speculations = NULL, |
251 | int speculations_len = 0, |
252 | int nmethod_mirror_index = -1, |
253 | const char* nmethod_mirror_name = NULL, |
254 | FailedSpeculation** failed_speculations = NULL |
255 | #endif |
256 | ); |
257 | |
258 | // Only used for unit tests. |
259 | nmethod() |
260 | : CompiledMethod(), |
261 | _is_unloading_state(0), |
262 | _native_receiver_sp_offset(in_ByteSize(-1)), |
263 | _native_basic_lock_sp_offset(in_ByteSize(-1)) {} |
264 | |
265 | |
266 | static nmethod* new_native_nmethod(const methodHandle& method, |
267 | int compile_id, |
268 | CodeBuffer *code_buffer, |
269 | int vep_offset, |
270 | int frame_complete, |
271 | int frame_size, |
272 | ByteSize receiver_sp_offset, |
273 | ByteSize basic_lock_sp_offset, |
274 | OopMapSet* oop_maps); |
275 | |
276 | // type info |
277 | bool is_nmethod() const { return true; } |
278 | bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } |
279 | |
280 | // boundaries for different parts |
281 | address consts_begin () const { return header_begin() + _consts_offset ; } |
282 | address consts_end () const { return code_begin() ; } |
283 | address stub_begin () const { return header_begin() + _stub_offset ; } |
284 | address stub_end () const { return header_begin() + _oops_offset ; } |
285 | address exception_begin () const { return header_begin() + _exception_offset ; } |
286 | address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; } |
287 | oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } |
288 | oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } |
289 | |
290 | Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } |
291 | Metadata** metadata_end () const { return (Metadata**) _scopes_data_begin; } |
292 | |
293 | address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } |
294 | PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } |
295 | PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } |
296 | address dependencies_begin () const { return header_begin() + _dependencies_offset ; } |
297 | address dependencies_end () const { return header_begin() + _handler_table_offset ; } |
298 | address handler_table_begin () const { return header_begin() + _handler_table_offset ; } |
299 | address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } |
300 | address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } |
301 | #if INCLUDE_JVMCI |
302 | address nul_chk_table_end () const { return header_begin() + _speculations_offset ; } |
303 | address speculations_begin () const { return header_begin() + _speculations_offset ; } |
304 | address speculations_end () const { return header_begin() + _jvmci_data_offset ; } |
305 | address jvmci_data_begin () const { return header_begin() + _jvmci_data_offset ; } |
306 | address jvmci_data_end () const { return header_begin() + _nmethod_end_offset ; } |
307 | #else |
308 | address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } |
309 | #endif |
310 | |
311 | // Sizes |
312 | int oops_size () const { return (address) oops_end () - (address) oops_begin (); } |
313 | int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); } |
314 | int dependencies_size () const { return dependencies_end () - dependencies_begin (); } |
315 | #if INCLUDE_JVMCI |
316 | int speculations_size () const { return speculations_end () - speculations_begin (); } |
317 | int jvmci_data_size () const { return jvmci_data_end () - jvmci_data_begin (); } |
318 | #endif |
319 | |
320 | int oops_count() const { assert(oops_size() % oopSize == 0, "" ); return (oops_size() / oopSize) + 1; } |
321 | int metadata_count() const { assert(metadata_size() % wordSize == 0, "" ); return (metadata_size() / wordSize) + 1; } |
322 | |
323 | int total_size () const; |
324 | |
325 | void dec_hotness_counter() { _hotness_counter--; } |
326 | void set_hotness_counter(int val) { _hotness_counter = val; } |
327 | int hotness_counter() const { return _hotness_counter; } |
328 | |
329 | // Containment |
330 | bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } |
331 | bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } |
332 | bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } |
333 | bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } |
334 | |
335 | // entry points |
336 | address entry_point() const { return _entry_point; } // normal entry point |
337 | address verified_entry_point() const { return _verified_entry_point; } // if klass is correct |
338 | |
339 | // flag accessing and manipulation |
340 | bool is_not_installed() const { return _state == not_installed; } |
341 | bool is_in_use() const { return _state <= in_use; } |
342 | bool is_alive() const { return _state < zombie; } |
343 | bool is_not_entrant() const { return _state == not_entrant; } |
344 | bool is_zombie() const { return _state == zombie; } |
345 | bool is_unloaded() const { return _state == unloaded; } |
346 | |
347 | void clear_unloading_state(); |
348 | virtual bool is_unloading(); |
349 | virtual void do_unloading(bool unloading_occurred); |
350 | |
351 | #if INCLUDE_RTM_OPT |
352 | // rtm state accessing and manipulating |
353 | RTMState rtm_state() const { return _rtm_state; } |
354 | void set_rtm_state(RTMState state) { _rtm_state = state; } |
355 | #endif |
356 | |
357 | void make_in_use() { _state = in_use; } |
358 | // Make the nmethod non entrant. The nmethod will continue to be |
359 | // alive. It is used when an uncommon trap happens. Returns true |
360 | // if this thread changed the state of the nmethod or false if |
361 | // another thread performed the transition. |
362 | bool make_not_entrant() { |
363 | assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant" ); |
364 | return make_not_entrant_or_zombie(not_entrant); |
365 | } |
366 | bool make_not_used() { return make_not_entrant(); } |
367 | bool make_zombie() { return make_not_entrant_or_zombie(zombie); } |
368 | |
369 | // used by jvmti to track if the unload event has been reported |
370 | bool unload_reported() { return _unload_reported; } |
371 | void set_unload_reported() { _unload_reported = true; } |
372 | |
373 | int get_state() const { |
374 | return _state; |
375 | } |
376 | |
377 | void make_unloaded(); |
378 | |
379 | bool has_dependencies() { return dependencies_size() != 0; } |
380 | void print_dependencies() PRODUCT_RETURN; |
381 | void flush_dependencies(bool delete_immediately); |
382 | bool has_flushed_dependencies() { return _has_flushed_dependencies; } |
383 | void set_has_flushed_dependencies() { |
384 | assert(!has_flushed_dependencies(), "should only happen once" ); |
385 | _has_flushed_dependencies = 1; |
386 | } |
387 | |
388 | int comp_level() const { return _comp_level; } |
389 | |
390 | void unlink_from_method(bool acquire_lock); |
391 | |
392 | // Support for oops in scopes and relocs: |
393 | // Note: index 0 is reserved for null. |
394 | oop oop_at(int index) const; |
395 | oop oop_at_phantom(int index) const; // phantom reference |
396 | oop* oop_addr_at(int index) const { // for GC |
397 | // relocation indexes are biased by 1 (because 0 is reserved) |
398 | assert(index > 0 && index <= oops_count(), "must be a valid non-zero index" ); |
399 | assert(!_oops_are_stale, "oops are stale" ); |
400 | return &oops_begin()[index - 1]; |
401 | } |
402 | |
403 | // Support for meta data in scopes and relocs: |
404 | // Note: index 0 is reserved for null. |
405 | Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); } |
406 | Metadata** metadata_addr_at(int index) const { // for GC |
407 | // relocation indexes are biased by 1 (because 0 is reserved) |
408 | assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index" ); |
409 | return &metadata_begin()[index - 1]; |
410 | } |
411 | |
412 | void copy_values(GrowableArray<jobject>* oops); |
413 | void copy_values(GrowableArray<Metadata*>* metadata); |
414 | |
415 | // Relocation support |
416 | private: |
417 | void fix_oop_relocations(address begin, address end, bool initialize_immediates); |
418 | inline void initialize_immediate_oop(oop* dest, jobject handle); |
419 | |
420 | public: |
421 | void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } |
422 | void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } |
423 | |
424 | // Sweeper support |
425 | long stack_traversal_mark() { return _stack_traversal_mark; } |
426 | void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; } |
427 | |
428 | // On-stack replacement support |
429 | int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod" ); return _entry_bci; } |
430 | address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod" ); return _osr_entry_point; } |
431 | void invalidate_osr_method(); |
432 | nmethod* osr_link() const { return _osr_link; } |
433 | void set_osr_link(nmethod *n) { _osr_link = n; } |
434 | |
435 | // Verify calls to dead methods have been cleaned. |
436 | void verify_clean_inline_caches(); |
437 | |
438 | // unlink and deallocate this nmethod |
439 | // Only NMethodSweeper class is expected to use this. NMethodSweeper is not |
440 | // expected to use any other private methods/data in this class. |
441 | |
442 | protected: |
443 | void flush(); |
444 | |
445 | public: |
446 | // When true is returned, it is unsafe to remove this nmethod even if |
447 | // it is a zombie, since the VM or the ServiceThread might still be |
448 | // using it. |
449 | bool is_locked_by_vm() const { return _lock_count >0; } |
450 | |
451 | // See comment at definition of _last_seen_on_stack |
452 | void mark_as_seen_on_stack(); |
453 | bool can_convert_to_zombie(); |
454 | |
455 | // Evolution support. We make old (discarded) compiled methods point to new Method*s. |
456 | void set_method(Method* method) { _method = method; } |
457 | |
458 | #if INCLUDE_JVMCI |
459 | // Gets the JVMCI name of this nmethod. |
460 | const char* jvmci_name(); |
461 | |
462 | // Records the pending failed speculation in the |
463 | // JVMCI speculation log associated with this nmethod. |
464 | void update_speculation(JavaThread* thread); |
465 | |
466 | // Gets the data specific to a JVMCI compiled method. |
467 | // This returns a non-NULL value iff this nmethod was |
468 | // compiled by the JVMCI compiler. |
469 | JVMCINMethodData* jvmci_nmethod_data() const { |
470 | return jvmci_data_size() == 0 ? NULL : (JVMCINMethodData*) jvmci_data_begin(); |
471 | } |
472 | #endif |
473 | |
474 | public: |
475 | void oops_do(OopClosure* f) { oops_do(f, false); } |
476 | void oops_do(OopClosure* f, bool allow_dead); |
477 | |
478 | bool test_set_oops_do_mark(); |
479 | static void oops_do_marking_prologue(); |
480 | static void oops_do_marking_epilogue(); |
481 | static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; } |
482 | bool test_oops_do_mark() { return _oops_do_mark_link != NULL; } |
483 | |
484 | private: |
485 | ScopeDesc* scope_desc_in(address begin, address end); |
486 | |
487 | address* orig_pc_addr(const frame* fr); |
488 | |
489 | public: |
490 | // copying of debugging information |
491 | void copy_scopes_pcs(PcDesc* pcs, int count); |
492 | void copy_scopes_data(address buffer, int size); |
493 | |
494 | // Accessor/mutator for the original pc of a frame before a frame was deopted. |
495 | address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } |
496 | void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } |
497 | |
498 | // jvmti support: |
499 | void post_compiled_method_load_event(); |
500 | jmethodID get_and_cache_jmethod_id(); |
501 | |
502 | // verify operations |
503 | void verify(); |
504 | void verify_scopes(); |
505 | void verify_interrupt_point(address interrupt_point); |
506 | |
507 | // Disassemble this nmethod with additional debug information, e.g. information about blocks. |
508 | void decode2(outputStream* st) const; |
509 | void print_constant_pool(outputStream* st); |
510 | |
511 | // Avoid hiding of parent's 'decode(outputStream*)' method. |
512 | void decode(outputStream* st) const { decode2(st); } // just delegate here. |
513 | |
514 | // printing support |
515 | void print() const; |
516 | void print(outputStream* st) const; |
517 | void print_code(); |
518 | |
519 | #if defined(SUPPORT_DATA_STRUCTS) |
520 | // print output in opt build for disassembler library |
521 | void print_relocations() PRODUCT_RETURN; |
522 | void print_pcs() { print_pcs_on(tty); } |
523 | void print_pcs_on(outputStream* st); |
524 | void print_scopes() { print_scopes_on(tty); } |
525 | void print_scopes_on(outputStream* st) PRODUCT_RETURN; |
526 | void print_value_on(outputStream* st) const; |
527 | void print_handler_table(); |
528 | void print_nul_chk_table(); |
529 | void print_recorded_oops(); |
530 | void print_recorded_metadata(); |
531 | |
532 | void print_oops(outputStream* st); // oops from the underlying CodeBlob. |
533 | void print_metadata(outputStream* st); // metadata in metadata pool. |
534 | #else |
535 | // void print_pcs() PRODUCT_RETURN; |
536 | void print_pcs() { return; } |
537 | #endif |
538 | |
539 | void print_calls(outputStream* st) PRODUCT_RETURN; |
540 | static void print_statistics() PRODUCT_RETURN; |
541 | |
542 | void maybe_print_nmethod(DirectiveSet* directive); |
543 | void print_nmethod(bool print_code); |
544 | |
545 | // need to re-define this from CodeBlob else the overload hides it |
546 | virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } |
547 | void print_on(outputStream* st, const char* msg) const; |
548 | |
549 | // Logging |
550 | void log_identity(xmlStream* log) const; |
551 | void log_new_nmethod() const; |
552 | void log_state_change() const; |
553 | |
554 | // Prints block-level comments, including nmethod specific block labels: |
555 | virtual void (outputStream* stream, address block_begin) const { |
556 | #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) |
557 | print_nmethod_labels(stream, block_begin); |
558 | CodeBlob::print_block_comment(stream, block_begin); |
559 | #endif |
560 | } |
561 | bool (address block_begin) { |
562 | return CodeBlob::has_block_comment(block_begin); |
563 | } |
564 | void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const; |
565 | const char* nmethod_section_label(address pos) const; |
566 | |
567 | // returns whether this nmethod has code comments. |
568 | bool (address begin, address end); |
569 | // Prints a comment for one native instruction (reloc info, pc desc) |
570 | void (outputStream* st, int column, address begin, address end); |
571 | |
572 | // Compiler task identification. Note that all OSR methods |
573 | // are numbered in an independent sequence if CICountOSR is true, |
574 | // and native method wrappers are also numbered independently if |
575 | // CICountNative is true. |
576 | virtual int compile_id() const { return _compile_id; } |
577 | const char* compile_kind() const; |
578 | |
579 | // tells if any of this method's dependencies have been invalidated |
580 | // (this is expensive!) |
581 | static void check_all_dependencies(DepChange& changes); |
582 | |
583 | // tells if this compiled method is dependent on the given changes, |
584 | // and the changes have invalidated it |
585 | bool check_dependency_on(DepChange& changes); |
586 | |
587 | // Fast breakpoint support. Tells if this compiled method is |
588 | // dependent on the given method. Returns true if this nmethod |
589 | // corresponds to the given method as well. |
590 | virtual bool is_dependent_on_method(Method* dependee); |
591 | |
592 | // is it ok to patch at address? |
593 | bool is_patchable_at(address instr_address); |
594 | |
595 | // UseBiasedLocking support |
596 | ByteSize native_receiver_sp_offset() { |
597 | return _native_receiver_sp_offset; |
598 | } |
599 | ByteSize native_basic_lock_sp_offset() { |
600 | return _native_basic_lock_sp_offset; |
601 | } |
602 | |
603 | // support for code generation |
604 | static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); } |
605 | static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); } |
606 | static int state_offset() { return offset_of(nmethod, _state); } |
607 | |
608 | virtual void metadata_do(MetadataClosure* f); |
609 | |
610 | NativeCallWrapper* call_wrapper_at(address call) const; |
611 | NativeCallWrapper* call_wrapper_before(address return_pc) const; |
612 | address call_instruction_address(address pc) const; |
613 | |
614 | virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const; |
615 | virtual CompiledStaticCall* compiledStaticCall_at(address addr) const; |
616 | virtual CompiledStaticCall* compiledStaticCall_before(address addr) const; |
617 | }; |
618 | |
619 | // Locks an nmethod so its code will not get removed and it will not |
620 | // be made into a zombie, even if it is a not_entrant method. After the |
621 | // nmethod becomes a zombie, if CompiledMethodUnload event processing |
622 | // needs to be done, then lock_nmethod() is used directly to keep the |
623 | // generated code from being reused too early. |
624 | class nmethodLocker : public StackObj { |
625 | CompiledMethod* _nm; |
626 | |
627 | public: |
628 | |
629 | // note: nm can be NULL |
630 | // Only JvmtiDeferredEvent::compiled_method_unload_event() |
631 | // should pass zombie_ok == true. |
632 | static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false); |
633 | static void unlock_nmethod(CompiledMethod* nm); // (ditto) |
634 | |
635 | nmethodLocker(address pc); // derive nm from pc |
636 | nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); } |
637 | nmethodLocker(CompiledMethod *nm) { |
638 | _nm = nm; |
639 | lock(_nm); |
640 | } |
641 | |
642 | static void lock(CompiledMethod* method) { |
643 | if (method == NULL) return; |
644 | lock_nmethod(method); |
645 | } |
646 | |
647 | static void unlock(CompiledMethod* method) { |
648 | if (method == NULL) return; |
649 | unlock_nmethod(method); |
650 | } |
651 | |
652 | nmethodLocker() { _nm = NULL; } |
653 | ~nmethodLocker() { |
654 | unlock(_nm); |
655 | } |
656 | |
657 | CompiledMethod* code() { return _nm; } |
658 | void set_code(CompiledMethod* new_nm) { |
659 | unlock(_nm); // note: This works even if _nm==new_nm. |
660 | _nm = new_nm; |
661 | lock(_nm); |
662 | } |
663 | }; |
664 | |
665 | #endif // SHARE_CODE_NMETHOD_HPP |
666 | |