| 1 | /* |
| 2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_CODE_COMPILEDIC_HPP |
| 26 | #define SHARE_CODE_COMPILEDIC_HPP |
| 27 | |
| 28 | #include "code/nativeInst.hpp" |
| 29 | #include "interpreter/linkResolver.hpp" |
| 30 | #include "oops/compiledICHolder.hpp" |
| 31 | #include "runtime/safepointVerifiers.hpp" |
| 32 | |
| 33 | //----------------------------------------------------------------------------- |
| 34 | // The CompiledIC represents a compiled inline cache. |
| 35 | // |
| 36 | // In order to make patching of the inline cache MT-safe, we only allow the following |
| 37 | // transitions (when not at a safepoint): |
| 38 | // |
| 39 | // |
| 40 | // [1] --<-- Clean -->--- [1] |
| 41 | // / (null) \ |
| 42 | // / \ /-<-\ |
| 43 | // / [2] \ / \ |
| 44 | // Interpreted ---------> Monomorphic | [3] |
| 45 | // (CompiledICHolder*) (Klass*) | |
| 46 | // \ / \ / |
| 47 | // [4] \ / [4] \->-/ |
| 48 | // \->- Megamorphic -<-/ |
| 49 | // (CompiledICHolder*) |
| 50 | // |
| 51 | // The text in parentheses () refers to the value of the inline cache receiver (mov instruction) |
| 52 | // |
| 53 | // The numbers in square brackets refer to the kind of transition: |
| 54 | // [1]: Initial fixup. Receiver it found from debug information |
| 55 | // [2]: Compilation of a method |
| 56 | // [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same) |
| 57 | // [4]: Inline cache miss. We go directly to megamorphic call. |
| 58 | // |
| 59 | // The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe |
| 60 | // transition is made to a stub. |
| 61 | // |
| 62 | class CompiledIC; |
| 63 | class CompiledICProtectionBehaviour; |
| 64 | class CompiledMethod; |
| 65 | class ICStub; |
| 66 | |
| 67 | class CompiledICLocker: public StackObj { |
| 68 | CompiledMethod* _method; |
| 69 | CompiledICProtectionBehaviour* _behaviour; |
| 70 | bool _locked; |
| 71 | NoSafepointVerifier _nsv; |
| 72 | |
| 73 | public: |
| 74 | CompiledICLocker(CompiledMethod* method); |
| 75 | ~CompiledICLocker(); |
| 76 | static bool is_safe(CompiledMethod* method); |
| 77 | static bool is_safe(address code); |
| 78 | }; |
| 79 | |
| 80 | class CompiledICInfo : public StackObj { |
| 81 | private: |
| 82 | address _entry; // entry point for call |
| 83 | void* _cached_value; // Value of cached_value (either in stub or inline cache) |
| 84 | bool _is_icholder; // Is the cached value a CompiledICHolder* |
| 85 | bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound) |
| 86 | bool _to_interpreter; // Call it to interpreter |
| 87 | bool _to_aot; // Call it to aot code |
| 88 | bool _release_icholder; |
| 89 | public: |
| 90 | address entry() const { return _entry; } |
| 91 | Metadata* cached_metadata() const { assert(!_is_icholder, "" ); return (Metadata*)_cached_value; } |
| 92 | CompiledICHolder* claim_cached_icholder() { |
| 93 | assert(_is_icholder, "" ); |
| 94 | assert(_cached_value != NULL, "must be non-NULL" ); |
| 95 | _release_icholder = false; |
| 96 | CompiledICHolder* icholder = (CompiledICHolder*)_cached_value; |
| 97 | icholder->claim(); |
| 98 | return icholder; |
| 99 | } |
| 100 | bool is_optimized() const { return _is_optimized; } |
| 101 | bool to_interpreter() const { return _to_interpreter; } |
| 102 | bool to_aot() const { return _to_aot; } |
| 103 | |
| 104 | void set_compiled_entry(address entry, Klass* klass, bool is_optimized) { |
| 105 | _entry = entry; |
| 106 | _cached_value = (void*)klass; |
| 107 | _to_interpreter = false; |
| 108 | _to_aot = false; |
| 109 | _is_icholder = false; |
| 110 | _is_optimized = is_optimized; |
| 111 | _release_icholder = false; |
| 112 | } |
| 113 | |
| 114 | void set_interpreter_entry(address entry, Method* method) { |
| 115 | _entry = entry; |
| 116 | _cached_value = (void*)method; |
| 117 | _to_interpreter = true; |
| 118 | _to_aot = false; |
| 119 | _is_icholder = false; |
| 120 | _is_optimized = true; |
| 121 | _release_icholder = false; |
| 122 | } |
| 123 | |
| 124 | void set_aot_entry(address entry, Method* method) { |
| 125 | _entry = entry; |
| 126 | _cached_value = (void*)method; |
| 127 | _to_interpreter = false; |
| 128 | _to_aot = true; |
| 129 | _is_icholder = false; |
| 130 | _is_optimized = true; |
| 131 | _release_icholder = false; |
| 132 | } |
| 133 | |
| 134 | void set_icholder_entry(address entry, CompiledICHolder* icholder) { |
| 135 | _entry = entry; |
| 136 | _cached_value = (void*)icholder; |
| 137 | _to_interpreter = true; |
| 138 | _to_aot = false; |
| 139 | _is_icholder = true; |
| 140 | _is_optimized = false; |
| 141 | _release_icholder = true; |
| 142 | } |
| 143 | |
| 144 | CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false), |
| 145 | _is_optimized(false), _to_interpreter(false), _to_aot(false), _release_icholder(false) { |
| 146 | } |
| 147 | ~CompiledICInfo() { |
| 148 | // In rare cases the info is computed but not used, so release any |
| 149 | // CompiledICHolder* that was created |
| 150 | if (_release_icholder) { |
| 151 | assert(_is_icholder, "must be" ); |
| 152 | CompiledICHolder* icholder = (CompiledICHolder*)_cached_value; |
| 153 | icholder->claim(); |
| 154 | delete icholder; |
| 155 | } |
| 156 | } |
| 157 | }; |
| 158 | |
| 159 | class NativeCallWrapper: public ResourceObj { |
| 160 | public: |
| 161 | virtual address destination() const = 0; |
| 162 | virtual address instruction_address() const = 0; |
| 163 | virtual address next_instruction_address() const = 0; |
| 164 | virtual address return_address() const = 0; |
| 165 | virtual address get_resolve_call_stub(bool is_optimized) const = 0; |
| 166 | virtual void set_destination_mt_safe(address dest) = 0; |
| 167 | virtual void set_to_interpreted(const methodHandle& method, CompiledICInfo& info) = 0; |
| 168 | virtual void verify() const = 0; |
| 169 | virtual void verify_resolve_call(address dest) const = 0; |
| 170 | |
| 171 | virtual bool is_call_to_interpreted(address dest) const = 0; |
| 172 | virtual bool is_safe_for_patching() const = 0; |
| 173 | |
| 174 | virtual NativeInstruction* get_load_instruction(virtual_call_Relocation* r) const = 0; |
| 175 | |
| 176 | virtual void *get_data(NativeInstruction* instruction) const = 0; |
| 177 | virtual void set_data(NativeInstruction* instruction, intptr_t data) = 0; |
| 178 | }; |
| 179 | |
| 180 | class CompiledIC: public ResourceObj { |
| 181 | friend class InlineCacheBuffer; |
| 182 | friend class ICStub; |
| 183 | |
| 184 | private: |
| 185 | NativeCallWrapper* _call; |
| 186 | NativeInstruction* _value; // patchable value cell for this IC |
| 187 | bool _is_optimized; // an optimized virtual call (i.e., no compiled IC) |
| 188 | CompiledMethod* _method; |
| 189 | |
| 190 | CompiledIC(CompiledMethod* cm, NativeCall* ic_call); |
| 191 | CompiledIC(RelocIterator* iter); |
| 192 | |
| 193 | void initialize_from_iter(RelocIterator* iter); |
| 194 | |
| 195 | static bool is_icholder_entry(address entry); |
| 196 | |
| 197 | // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe |
| 198 | // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make |
| 199 | // changes to a transition stub. |
| 200 | void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder); |
| 201 | void set_ic_destination(ICStub* stub); |
| 202 | void set_ic_destination(address entry_point) { |
| 203 | assert(_is_optimized, "use set_ic_destination_and_value instead" ); |
| 204 | internal_set_ic_destination(entry_point, false, NULL, false); |
| 205 | } |
| 206 | // This only for use by ICStubs where the type of the value isn't known |
| 207 | void set_ic_destination_and_value(address entry_point, void* value) { |
| 208 | internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point)); |
| 209 | } |
| 210 | void set_ic_destination_and_value(address entry_point, Metadata* value) { |
| 211 | internal_set_ic_destination(entry_point, false, value, false); |
| 212 | } |
| 213 | void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) { |
| 214 | internal_set_ic_destination(entry_point, false, value, true); |
| 215 | } |
| 216 | |
| 217 | // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is |
| 218 | // associated with the inline cache. |
| 219 | address stub_address() const; |
| 220 | bool is_in_transition_state() const; // Use InlineCacheBuffer |
| 221 | |
| 222 | public: |
| 223 | // conversion (machine PC to CompiledIC*) |
| 224 | friend CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr); |
| 225 | friend CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site); |
| 226 | friend CompiledIC* CompiledIC_at(Relocation* call_site); |
| 227 | friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter); |
| 228 | |
| 229 | static bool is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm); |
| 230 | |
| 231 | // Return the cached_metadata/destination associated with this inline cache. If the cache currently points |
| 232 | // to a transition stub, it will read the values from the transition stub. |
| 233 | void* cached_value() const; |
| 234 | CompiledICHolder* cached_icholder() const { |
| 235 | assert(is_icholder_call(), "must be" ); |
| 236 | return (CompiledICHolder*) cached_value(); |
| 237 | } |
| 238 | Metadata* cached_metadata() const { |
| 239 | assert(!is_icholder_call(), "must be" ); |
| 240 | return (Metadata*) cached_value(); |
| 241 | } |
| 242 | |
| 243 | void* get_data() const { |
| 244 | return _call->get_data(_value); |
| 245 | } |
| 246 | |
| 247 | void set_data(intptr_t data) { |
| 248 | _call->set_data(_value, data); |
| 249 | } |
| 250 | |
| 251 | address ic_destination() const; |
| 252 | |
| 253 | bool is_optimized() const { return _is_optimized; } |
| 254 | |
| 255 | // State |
| 256 | bool is_clean() const; |
| 257 | bool is_megamorphic() const; |
| 258 | bool is_call_to_compiled() const; |
| 259 | bool is_call_to_interpreted() const; |
| 260 | |
| 261 | bool is_icholder_call() const; |
| 262 | |
| 263 | address end_of_call() { return _call->return_address(); } |
| 264 | |
| 265 | // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock |
| 266 | // so you are guaranteed that no patching takes place. The same goes for verify. |
| 267 | // |
| 268 | // Note: We do not provide any direct access to the stub code, to prevent parts of the code |
| 269 | // to manipulate the inline cache in MT-unsafe ways. |
| 270 | // |
| 271 | // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full. |
| 272 | // |
| 273 | bool set_to_clean(bool in_use = true); |
| 274 | bool set_to_monomorphic(CompiledICInfo& info); |
| 275 | void clear_ic_stub(); |
| 276 | |
| 277 | // Returns true if successful and false otherwise. The call can fail if memory |
| 278 | // allocation in the code cache fails, or ic stub refill is required. |
| 279 | bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, bool& needs_ic_stub_refill, TRAPS); |
| 280 | |
| 281 | static void compute_monomorphic_entry(const methodHandle& method, Klass* receiver_klass, |
| 282 | bool is_optimized, bool static_bound, bool caller_is_nmethod, |
| 283 | CompiledICInfo& info, TRAPS); |
| 284 | |
| 285 | // Location |
| 286 | address instruction_address() const { return _call->instruction_address(); } |
| 287 | |
| 288 | // Misc |
| 289 | void print() PRODUCT_RETURN; |
| 290 | void print_compiled_ic() PRODUCT_RETURN; |
| 291 | void verify() PRODUCT_RETURN; |
| 292 | }; |
| 293 | |
| 294 | inline CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr) { |
| 295 | CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr)); |
| 296 | c_ic->verify(); |
| 297 | return c_ic; |
| 298 | } |
| 299 | |
| 300 | inline CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site) { |
| 301 | CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site)); |
| 302 | c_ic->verify(); |
| 303 | return c_ic; |
| 304 | } |
| 305 | |
| 306 | inline CompiledIC* CompiledIC_at(Relocation* call_site) { |
| 307 | assert(call_site->type() == relocInfo::virtual_call_type || |
| 308 | call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info" ); |
| 309 | CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr())); |
| 310 | c_ic->verify(); |
| 311 | return c_ic; |
| 312 | } |
| 313 | |
| 314 | inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) { |
| 315 | assert(reloc_iter->type() == relocInfo::virtual_call_type || |
| 316 | reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info" ); |
| 317 | CompiledIC* c_ic = new CompiledIC(reloc_iter); |
| 318 | c_ic->verify(); |
| 319 | return c_ic; |
| 320 | } |
| 321 | |
| 322 | //----------------------------------------------------------------------------- |
| 323 | // The CompiledStaticCall represents a call to a static method in the compiled |
| 324 | // |
| 325 | // Transition diagram of a static call site is somewhat simpler than for an inlined cache: |
| 326 | // |
| 327 | // |
| 328 | // -----<----- Clean ----->----- |
| 329 | // / \ |
| 330 | // / \ |
| 331 | // compilled code <------------> interpreted code |
| 332 | // |
| 333 | // Clean: Calls directly to runtime method for fixup |
| 334 | // Compiled code: Calls directly to compiled code |
| 335 | // Interpreted code: Calls to stub that set Method* reference |
| 336 | // |
| 337 | // |
| 338 | |
| 339 | class StaticCallInfo { |
| 340 | private: |
| 341 | address _entry; // Entrypoint |
| 342 | methodHandle _callee; // Callee (used when calling interpreter) |
| 343 | bool _to_interpreter; // call to interpreted method (otherwise compiled) |
| 344 | bool _to_aot; // call to aot method (otherwise compiled) |
| 345 | |
| 346 | friend class CompiledStaticCall; |
| 347 | friend class CompiledDirectStaticCall; |
| 348 | friend class CompiledPltStaticCall; |
| 349 | public: |
| 350 | address entry() const { return _entry; } |
| 351 | methodHandle callee() const { return _callee; } |
| 352 | }; |
| 353 | |
| 354 | class CompiledStaticCall : public ResourceObj { |
| 355 | public: |
| 356 | // Code |
| 357 | static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = NULL); |
| 358 | static int to_interp_stub_size(); |
| 359 | static int to_trampoline_stub_size(); |
| 360 | static int reloc_to_interp_stub(); |
| 361 | static void emit_to_aot_stub(CodeBuffer &cbuf, address mark = NULL); |
| 362 | static int to_aot_stub_size(); |
| 363 | static int reloc_to_aot_stub(); |
| 364 | |
| 365 | // Compute entry point given a method |
| 366 | static void compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info); |
| 367 | |
| 368 | public: |
| 369 | // Clean static call (will force resolving on next use) |
| 370 | virtual address destination() const = 0; |
| 371 | |
| 372 | // Clean static call (will force resolving on next use) |
| 373 | bool set_to_clean(bool in_use = true); |
| 374 | |
| 375 | // Set state. The entry must be the same, as computed by compute_entry. |
| 376 | // Computation and setting is split up, since the actions are separate during |
| 377 | // a OptoRuntime::resolve_xxx. |
| 378 | void set(const StaticCallInfo& info); |
| 379 | |
| 380 | // State |
| 381 | bool is_clean() const; |
| 382 | bool is_call_to_compiled() const; |
| 383 | virtual bool is_call_to_interpreted() const = 0; |
| 384 | |
| 385 | virtual address instruction_address() const = 0; |
| 386 | protected: |
| 387 | virtual address resolve_call_stub() const = 0; |
| 388 | virtual void set_destination_mt_safe(address dest) = 0; |
| 389 | #if INCLUDE_AOT |
| 390 | virtual void set_to_far(const methodHandle& callee, address entry) = 0; |
| 391 | #endif |
| 392 | virtual void set_to_interpreted(const methodHandle& callee, address entry) = 0; |
| 393 | virtual const char* name() const = 0; |
| 394 | |
| 395 | void set_to_compiled(address entry); |
| 396 | }; |
| 397 | |
| 398 | class CompiledDirectStaticCall : public CompiledStaticCall { |
| 399 | private: |
| 400 | friend class CompiledIC; |
| 401 | friend class DirectNativeCallWrapper; |
| 402 | |
| 403 | // Also used by CompiledIC |
| 404 | void set_to_interpreted(const methodHandle& callee, address entry); |
| 405 | #if INCLUDE_AOT |
| 406 | void set_to_far(const methodHandle& callee, address entry); |
| 407 | #endif |
| 408 | address instruction_address() const { return _call->instruction_address(); } |
| 409 | void set_destination_mt_safe(address dest) { _call->set_destination_mt_safe(dest); } |
| 410 | |
| 411 | NativeCall* _call; |
| 412 | |
| 413 | CompiledDirectStaticCall(NativeCall* call) : _call(call) {} |
| 414 | |
| 415 | public: |
| 416 | static inline CompiledDirectStaticCall* before(address return_addr) { |
| 417 | CompiledDirectStaticCall* st = new CompiledDirectStaticCall(nativeCall_before(return_addr)); |
| 418 | st->verify(); |
| 419 | return st; |
| 420 | } |
| 421 | |
| 422 | static inline CompiledDirectStaticCall* at(address native_call) { |
| 423 | CompiledDirectStaticCall* st = new CompiledDirectStaticCall(nativeCall_at(native_call)); |
| 424 | st->verify(); |
| 425 | return st; |
| 426 | } |
| 427 | |
| 428 | static inline CompiledDirectStaticCall* at(Relocation* call_site) { |
| 429 | return at(call_site->addr()); |
| 430 | } |
| 431 | |
| 432 | // Delegation |
| 433 | address destination() const { return _call->destination(); } |
| 434 | |
| 435 | // State |
| 436 | virtual bool is_call_to_interpreted() const; |
| 437 | bool is_call_to_far() const; |
| 438 | |
| 439 | // Stub support |
| 440 | static address find_stub_for(address instruction, bool is_aot); |
| 441 | address find_stub(bool is_aot); |
| 442 | static void set_stub_to_clean(static_stub_Relocation* static_stub); |
| 443 | |
| 444 | // Misc. |
| 445 | void print() PRODUCT_RETURN; |
| 446 | void verify() PRODUCT_RETURN; |
| 447 | |
| 448 | protected: |
| 449 | virtual address resolve_call_stub() const; |
| 450 | virtual const char* name() const { return "CompiledDirectStaticCall" ; } |
| 451 | }; |
| 452 | |
| 453 | #endif // SHARE_CODE_COMPILEDIC_HPP |
| 454 | |