| 1 | /* |
| 2 | * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_CODE_COMPILEDMETHOD_HPP |
| 26 | #define SHARE_CODE_COMPILEDMETHOD_HPP |
| 27 | |
| 28 | #include "code/codeBlob.hpp" |
| 29 | #include "code/pcDesc.hpp" |
| 30 | #include "oops/metadata.hpp" |
| 31 | |
| 32 | class Dependencies; |
| 33 | class ExceptionHandlerTable; |
| 34 | class ImplicitExceptionTable; |
| 35 | class AbstractCompiler; |
| 36 | class xmlStream; |
| 37 | class CompiledStaticCall; |
| 38 | class NativeCallWrapper; |
| 39 | class ScopeDesc; |
| 40 | class CompiledIC; |
| 41 | class MetadataClosure; |
| 42 | |
| 43 | // This class is used internally by nmethods, to cache |
| 44 | // exception/pc/handler information. |
| 45 | |
| 46 | class ExceptionCache : public CHeapObj<mtCode> { |
| 47 | friend class VMStructs; |
| 48 | private: |
| 49 | enum { cache_size = 16 }; |
| 50 | Klass* _exception_type; |
| 51 | address _pc[cache_size]; |
| 52 | address _handler[cache_size]; |
| 53 | volatile int _count; |
| 54 | ExceptionCache* volatile _next; |
| 55 | ExceptionCache* _purge_list_next; |
| 56 | |
| 57 | inline address pc_at(int index); |
| 58 | void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,"" ); _pc[index] = a; } |
| 59 | |
| 60 | inline address handler_at(int index); |
| 61 | void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,"" ); _handler[index] = a; } |
| 62 | |
| 63 | inline int count(); |
| 64 | // increment_count is only called under lock, but there may be concurrent readers. |
| 65 | void increment_count(); |
| 66 | |
| 67 | public: |
| 68 | |
| 69 | ExceptionCache(Handle exception, address pc, address handler); |
| 70 | |
| 71 | Klass* exception_type() { return _exception_type; } |
| 72 | ExceptionCache* next(); |
| 73 | void set_next(ExceptionCache *ec); |
| 74 | ExceptionCache* purge_list_next() { return _purge_list_next; } |
| 75 | void set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; } |
| 76 | |
| 77 | address match(Handle exception, address pc); |
| 78 | bool match_exception_with_space(Handle exception) ; |
| 79 | address test_address(address addr); |
| 80 | bool add_address_and_handler(address addr, address handler) ; |
| 81 | }; |
| 82 | |
| 83 | class nmethod; |
| 84 | |
| 85 | // cache pc descs found in earlier inquiries |
| 86 | class PcDescCache { |
| 87 | friend class VMStructs; |
| 88 | private: |
| 89 | enum { cache_size = 4 }; |
| 90 | // The array elements MUST be volatile! Several threads may modify |
| 91 | // and read from the cache concurrently. find_pc_desc_internal has |
| 92 | // returned wrong results. C++ compiler (namely xlC12) may duplicate |
| 93 | // C++ field accesses if the elements are not volatile. |
| 94 | typedef PcDesc* PcDescPtr; |
| 95 | volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found |
| 96 | public: |
| 97 | PcDescCache() { debug_only(_pc_descs[0] = NULL); } |
| 98 | void reset_to(PcDesc* initial_pc_desc); |
| 99 | PcDesc* find_pc_desc(int pc_offset, bool approximate); |
| 100 | void add_pc_desc(PcDesc* pc_desc); |
| 101 | PcDesc* last_pc_desc() { return _pc_descs[0]; } |
| 102 | }; |
| 103 | |
| 104 | class PcDescSearch { |
| 105 | private: |
| 106 | address _code_begin; |
| 107 | PcDesc* _lower; |
| 108 | PcDesc* _upper; |
| 109 | public: |
| 110 | PcDescSearch(address code, PcDesc* lower, PcDesc* upper) : |
| 111 | _code_begin(code), _lower(lower), _upper(upper) |
| 112 | { |
| 113 | } |
| 114 | |
| 115 | address code_begin() const { return _code_begin; } |
| 116 | PcDesc* scopes_pcs_begin() const { return _lower; } |
| 117 | PcDesc* scopes_pcs_end() const { return _upper; } |
| 118 | }; |
| 119 | |
| 120 | class PcDescContainer { |
| 121 | private: |
| 122 | PcDescCache _pc_desc_cache; |
| 123 | public: |
| 124 | PcDescContainer() {} |
| 125 | |
| 126 | PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search); |
| 127 | void reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); } |
| 128 | |
| 129 | PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) { |
| 130 | address base_address = search.code_begin(); |
| 131 | PcDesc* desc = _pc_desc_cache.last_pc_desc(); |
| 132 | if (desc != NULL && desc->pc_offset() == pc - base_address) { |
| 133 | return desc; |
| 134 | } |
| 135 | return find_pc_desc_internal(pc, approximate, search); |
| 136 | } |
| 137 | }; |
| 138 | |
| 139 | |
| 140 | class CompiledMethod : public CodeBlob { |
| 141 | friend class VMStructs; |
| 142 | friend class NMethodSweeper; |
| 143 | |
| 144 | void init_defaults(); |
| 145 | protected: |
| 146 | enum MarkForDeoptimizationStatus { |
| 147 | not_marked, |
| 148 | deoptimize, |
| 149 | deoptimize_noupdate |
| 150 | }; |
| 151 | |
| 152 | MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization |
| 153 | |
| 154 | bool _is_far_code; // Code is far from CodeCache. |
| 155 | // Have to use far call instructions to call it from code in CodeCache. |
| 156 | |
| 157 | // set during construction |
| 158 | unsigned int _has_unsafe_access:1; // May fault due to unsafe access. |
| 159 | unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes? |
| 160 | unsigned int _lazy_critical_native:1; // Lazy JNI critical native |
| 161 | unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints |
| 162 | |
| 163 | Method* _method; |
| 164 | address _scopes_data_begin; |
| 165 | // All deoptee's will resume execution at this location described by |
| 166 | // this address. |
| 167 | address _deopt_handler_begin; |
| 168 | // All deoptee's at a MethodHandle call site will resume execution |
| 169 | // at this location described by this offset. |
| 170 | address _deopt_mh_handler_begin; |
| 171 | |
| 172 | PcDescContainer _pc_desc_container; |
| 173 | ExceptionCache * volatile _exception_cache; |
| 174 | |
| 175 | void* _gc_data; |
| 176 | |
| 177 | virtual void flush() = 0; |
| 178 | protected: |
| 179 | CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments); |
| 180 | CompiledMethod(Method* method, const char* name, CompilerType type, int size, int , CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments); |
| 181 | |
| 182 | public: |
| 183 | // Only used by unit test. |
| 184 | CompiledMethod() {} |
| 185 | |
| 186 | virtual bool is_compiled() const { return true; } |
| 187 | |
| 188 | template<typename T> |
| 189 | T* gc_data() const { return reinterpret_cast<T*>(_gc_data); } |
| 190 | template<typename T> |
| 191 | void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); } |
| 192 | |
| 193 | bool has_unsafe_access() const { return _has_unsafe_access; } |
| 194 | void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } |
| 195 | |
| 196 | bool has_method_handle_invokes() const { return _has_method_handle_invokes; } |
| 197 | void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } |
| 198 | |
| 199 | bool is_lazy_critical_native() const { return _lazy_critical_native; } |
| 200 | void set_lazy_critical_native(bool z) { _lazy_critical_native = z; } |
| 201 | |
| 202 | bool has_wide_vectors() const { return _has_wide_vectors; } |
| 203 | void set_has_wide_vectors(bool z) { _has_wide_vectors = z; } |
| 204 | |
| 205 | enum { not_installed = -1, // in construction, only the owner doing the construction is |
| 206 | // allowed to advance state |
| 207 | in_use = 0, // executable nmethod |
| 208 | not_used = 1, // not entrant, but revivable |
| 209 | not_entrant = 2, // marked for deoptimization but activations may still exist, |
| 210 | // will be transformed to zombie when all activations are gone |
| 211 | zombie = 3, // no activations exist, nmethod is ready for purge |
| 212 | unloaded = 4 // there should be no activations, should not be called, |
| 213 | // will be transformed to zombie immediately |
| 214 | }; |
| 215 | |
| 216 | virtual bool is_in_use() const = 0; |
| 217 | virtual int comp_level() const = 0; |
| 218 | virtual int compile_id() const = 0; |
| 219 | |
| 220 | virtual address verified_entry_point() const = 0; |
| 221 | virtual void log_identity(xmlStream* log) const = 0; |
| 222 | virtual void log_state_change() const = 0; |
| 223 | virtual bool make_not_used() = 0; |
| 224 | virtual bool make_not_entrant() = 0; |
| 225 | virtual bool make_entrant() = 0; |
| 226 | virtual address entry_point() const = 0; |
| 227 | virtual bool make_zombie() = 0; |
| 228 | virtual bool is_osr_method() const = 0; |
| 229 | virtual int osr_entry_bci() const = 0; |
| 230 | Method* method() const { return _method; } |
| 231 | virtual void print_pcs() = 0; |
| 232 | bool is_native_method() const { return _method != NULL && _method->is_native(); } |
| 233 | bool is_java_method() const { return _method != NULL && !_method->is_native(); } |
| 234 | |
| 235 | // ScopeDesc retrieval operation |
| 236 | PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); } |
| 237 | // pc_desc_near returns the first PcDesc at or after the given pc. |
| 238 | PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); } |
| 239 | |
| 240 | // ScopeDesc for an instruction |
| 241 | ScopeDesc* scope_desc_at(address pc); |
| 242 | ScopeDesc* scope_desc_near(address pc); |
| 243 | |
| 244 | bool is_at_poll_return(address pc); |
| 245 | bool is_at_poll_or_poll_return(address pc); |
| 246 | |
| 247 | bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; } |
| 248 | void mark_for_deoptimization(bool inc_recompile_counts = true) { |
| 249 | _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate); |
| 250 | } |
| 251 | bool update_recompile_counts() const { |
| 252 | // Update recompile counts when either the update is explicitly requested (deoptimize) |
| 253 | // or the nmethod is not marked for deoptimization at all (not_marked). |
| 254 | // The latter happens during uncommon traps when deoptimized nmethod is made not entrant. |
| 255 | return _mark_for_deoptimization_status != deoptimize_noupdate; |
| 256 | } |
| 257 | |
| 258 | static bool nmethod_access_is_safe(nmethod* nm); |
| 259 | |
| 260 | // tells whether frames described by this nmethod can be deoptimized |
| 261 | // note: native wrappers cannot be deoptimized. |
| 262 | bool can_be_deoptimized() const { return is_java_method(); } |
| 263 | |
| 264 | virtual oop oop_at(int index) const = 0; |
| 265 | virtual Metadata* metadata_at(int index) const = 0; |
| 266 | |
| 267 | address scopes_data_begin() const { return _scopes_data_begin; } |
| 268 | virtual address scopes_data_end() const = 0; |
| 269 | int scopes_data_size() const { return scopes_data_end() - scopes_data_begin(); } |
| 270 | |
| 271 | virtual PcDesc* scopes_pcs_begin() const = 0; |
| 272 | virtual PcDesc* scopes_pcs_end() const = 0; |
| 273 | int scopes_pcs_size() const { return (intptr_t) scopes_pcs_end() - (intptr_t) scopes_pcs_begin(); } |
| 274 | |
| 275 | address insts_begin() const { return code_begin(); } |
| 276 | address insts_end() const { return stub_begin(); } |
| 277 | // Returns true if a given address is in the 'insts' section. The method |
| 278 | // insts_contains_inclusive() is end-inclusive. |
| 279 | bool insts_contains(address addr) const { return insts_begin() <= addr && addr < insts_end(); } |
| 280 | bool insts_contains_inclusive(address addr) const { return insts_begin() <= addr && addr <= insts_end(); } |
| 281 | |
| 282 | int insts_size() const { return insts_end() - insts_begin(); } |
| 283 | |
| 284 | virtual address consts_begin() const = 0; |
| 285 | virtual address consts_end() const = 0; |
| 286 | bool consts_contains(address addr) const { return consts_begin() <= addr && addr < consts_end(); } |
| 287 | int consts_size() const { return consts_end() - consts_begin(); } |
| 288 | |
| 289 | virtual address stub_begin() const = 0; |
| 290 | virtual address stub_end() const = 0; |
| 291 | bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); } |
| 292 | int stub_size() const { return stub_end() - stub_begin(); } |
| 293 | |
| 294 | virtual address handler_table_begin() const = 0; |
| 295 | virtual address handler_table_end() const = 0; |
| 296 | bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); } |
| 297 | int handler_table_size() const { return handler_table_end() - handler_table_begin(); } |
| 298 | |
| 299 | virtual address exception_begin() const = 0; |
| 300 | |
| 301 | virtual address nul_chk_table_begin() const = 0; |
| 302 | virtual address nul_chk_table_end() const = 0; |
| 303 | bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); } |
| 304 | int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); } |
| 305 | |
| 306 | virtual oop* oop_addr_at(int index) const = 0; |
| 307 | virtual Metadata** metadata_addr_at(int index) const = 0; |
| 308 | virtual void set_original_pc(const frame* fr, address pc) = 0; |
| 309 | |
| 310 | protected: |
| 311 | // Exception cache support |
| 312 | // Note: _exception_cache may be read and cleaned concurrently. |
| 313 | ExceptionCache* exception_cache() const { return _exception_cache; } |
| 314 | ExceptionCache* exception_cache_acquire() const; |
| 315 | void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } |
| 316 | |
| 317 | public: |
| 318 | address handler_for_exception_and_pc(Handle exception, address pc); |
| 319 | void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); |
| 320 | void clean_exception_cache(); |
| 321 | |
| 322 | void add_exception_cache_entry(ExceptionCache* new_entry); |
| 323 | ExceptionCache* exception_cache_entry_for_exception(Handle exception); |
| 324 | |
| 325 | // MethodHandle |
| 326 | bool is_method_handle_return(address return_pc); |
| 327 | address deopt_mh_handler_begin() const { return _deopt_mh_handler_begin; } |
| 328 | |
| 329 | address deopt_handler_begin() const { return _deopt_handler_begin; } |
| 330 | virtual address get_original_pc(const frame* fr) = 0; |
| 331 | // Deopt |
| 332 | // Return true is the PC is one would expect if the frame is being deopted. |
| 333 | inline bool is_deopt_pc(address pc); |
| 334 | bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); } |
| 335 | inline bool is_deopt_entry(address pc); |
| 336 | |
| 337 | virtual bool can_convert_to_zombie() = 0; |
| 338 | virtual const char* compile_kind() const = 0; |
| 339 | virtual int get_state() const = 0; |
| 340 | |
| 341 | const char* state() const; |
| 342 | |
| 343 | bool is_far_code() const { return _is_far_code; } |
| 344 | |
| 345 | bool inlinecache_check_contains(address addr) const { |
| 346 | return (addr >= code_begin() && addr < verified_entry_point()); |
| 347 | } |
| 348 | |
| 349 | void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f); |
| 350 | |
| 351 | // implicit exceptions support |
| 352 | address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); } |
| 353 | address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); } |
| 354 | |
| 355 | static address get_deopt_original_pc(const frame* fr); |
| 356 | |
| 357 | // Inline cache support for class unloading and nmethod unloading |
| 358 | private: |
| 359 | bool cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all); |
| 360 | |
| 361 | address continuation_for_implicit_exception(address pc, bool for_div0_check); |
| 362 | |
| 363 | public: |
| 364 | // Serial version used by sweeper and whitebox test |
| 365 | void cleanup_inline_caches(bool clean_all); |
| 366 | |
| 367 | virtual void clear_inline_caches(); |
| 368 | void clear_ic_callsites(); |
| 369 | |
| 370 | // Verify and count cached icholder relocations. |
| 371 | int verify_icholder_relocations(); |
| 372 | void verify_oop_relocations(); |
| 373 | |
| 374 | bool has_evol_metadata(); |
| 375 | |
| 376 | // Fast breakpoint support. Tells if this compiled method is |
| 377 | // dependent on the given method. Returns true if this nmethod |
| 378 | // corresponds to the given method as well. |
| 379 | virtual bool is_dependent_on_method(Method* dependee) = 0; |
| 380 | |
| 381 | virtual NativeCallWrapper* call_wrapper_at(address call) const = 0; |
| 382 | virtual NativeCallWrapper* call_wrapper_before(address return_pc) const = 0; |
| 383 | virtual address call_instruction_address(address pc) const = 0; |
| 384 | |
| 385 | virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const = 0; |
| 386 | virtual CompiledStaticCall* compiledStaticCall_at(address addr) const = 0; |
| 387 | virtual CompiledStaticCall* compiledStaticCall_before(address addr) const = 0; |
| 388 | |
| 389 | Method* attached_method(address call_pc); |
| 390 | Method* attached_method_before_pc(address pc); |
| 391 | |
| 392 | virtual void metadata_do(MetadataClosure* f) = 0; |
| 393 | |
| 394 | // GC support |
| 395 | protected: |
| 396 | address oops_reloc_begin() const; |
| 397 | |
| 398 | private: |
| 399 | bool static clean_ic_if_metadata_is_dead(CompiledIC *ic); |
| 400 | |
| 401 | public: |
| 402 | // GC unloading support |
| 403 | // Cleans unloaded klasses and unloaded nmethods in inline caches |
| 404 | |
| 405 | virtual bool is_unloading() = 0; |
| 406 | |
| 407 | bool unload_nmethod_caches(bool class_unloading_occurred); |
| 408 | virtual void do_unloading(bool unloading_occurred) = 0; |
| 409 | |
| 410 | private: |
| 411 | PcDesc* find_pc_desc(address pc, bool approximate) { |
| 412 | return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end())); |
| 413 | } |
| 414 | }; |
| 415 | |
| 416 | #endif // SHARE_CODE_COMPILEDMETHOD_HPP |
| 417 | |