1/*
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/systemDictionary.hpp"
27#include "code/codeBehaviours.hpp"
28#include "code/codeCache.hpp"
29#include "code/compiledIC.hpp"
30#include "code/icBuffer.hpp"
31#include "code/nmethod.hpp"
32#include "code/vtableStubs.hpp"
33#include "interpreter/interpreter.hpp"
34#include "interpreter/linkResolver.hpp"
35#include "memory/metadataFactory.hpp"
36#include "memory/oopFactory.hpp"
37#include "memory/resourceArea.hpp"
38#include "memory/universe.hpp"
39#include "oops/method.inline.hpp"
40#include "oops/oop.inline.hpp"
41#include "oops/symbol.hpp"
42#include "runtime/handles.inline.hpp"
43#include "runtime/icache.hpp"
44#include "runtime/sharedRuntime.hpp"
45#include "runtime/stubRoutines.hpp"
46#include "utilities/events.hpp"
47
48
49// Every time a compiled IC is changed or its type is being accessed,
50// either the CompiledIC_lock must be set or we must be at a safe point.
51
52CompiledICLocker::CompiledICLocker(CompiledMethod* method)
53 : _method(method),
54 _behaviour(CompiledICProtectionBehaviour::current()),
55 _locked(_behaviour->lock(_method)),
56 _nsv(true, !SafepointSynchronize::is_at_safepoint()) {
57}
58
59CompiledICLocker::~CompiledICLocker() {
60 if (_locked) {
61 _behaviour->unlock(_method);
62 }
63}
64
65bool CompiledICLocker::is_safe(CompiledMethod* method) {
66 return CompiledICProtectionBehaviour::current()->is_safe(method);
67}
68
69bool CompiledICLocker::is_safe(address code) {
70 CodeBlob* cb = CodeCache::find_blob_unsafe(code);
71 assert(cb != NULL && cb->is_compiled(), "must be compiled");
72 CompiledMethod* cm = cb->as_compiled_method();
73 return CompiledICProtectionBehaviour::current()->is_safe(cm);
74}
75
76//-----------------------------------------------------------------------------
77// Low-level access to an inline cache. Private, since they might not be
78// MT-safe to use.
79
80void* CompiledIC::cached_value() const {
81 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
82 assert (!is_optimized(), "an optimized virtual call does not have a cached metadata");
83
84 if (!is_in_transition_state()) {
85 void* data = get_data();
86 // If we let the metadata value here be initialized to zero...
87 assert(data != NULL || Universe::non_oop_word() == NULL,
88 "no raw nulls in CompiledIC metadatas, because of patching races");
89 return (data == (void*)Universe::non_oop_word()) ? NULL : data;
90 } else {
91 return InlineCacheBuffer::cached_value_for((CompiledIC *)this);
92 }
93}
94
95
96void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) {
97 assert(entry_point != NULL, "must set legal entry point");
98 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
99 assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata");
100 assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata");
101
102 assert(!is_icholder || is_icholder_entry(entry_point), "must be");
103
104 // Don't use ic_destination for this test since that forwards
105 // through ICBuffer instead of returning the actual current state of
106 // the CompiledIC.
107 if (is_icholder_entry(_call->destination())) {
108 // When patching for the ICStub case the cached value isn't
109 // overwritten until the ICStub copied into the CompiledIC during
110 // the next safepoint. Make sure that the CompiledICHolder* is
111 // marked for release at this point since it won't be identifiable
112 // once the entry point is overwritten.
113 InlineCacheBuffer::queue_for_release((CompiledICHolder*)get_data());
114 }
115
116 if (TraceCompiledIC) {
117 tty->print(" ");
118 print_compiled_ic();
119 tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point));
120 if (!is_optimized()) {
121 tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache));
122 }
123 if (is_icstub) {
124 tty->print(" (icstub)");
125 }
126 tty->cr();
127 }
128
129 {
130 CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address());
131 assert(cb != NULL && cb->is_compiled(), "must be compiled");
132 _call->set_destination_mt_safe(entry_point);
133 }
134
135 if (is_optimized() || is_icstub) {
136 // Optimized call sites don't have a cache value and ICStub call
137 // sites only change the entry point. Changing the value in that
138 // case could lead to MT safety issues.
139 assert(cache == NULL, "must be null");
140 return;
141 }
142
143 if (cache == NULL) cache = (void*)Universe::non_oop_word();
144
145 set_data((intptr_t)cache);
146}
147
148
149void CompiledIC::set_ic_destination(ICStub* stub) {
150 internal_set_ic_destination(stub->code_begin(), true, NULL, false);
151}
152
153
154
155address CompiledIC::ic_destination() const {
156 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
157 if (!is_in_transition_state()) {
158 return _call->destination();
159 } else {
160 return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
161 }
162}
163
164
165bool CompiledIC::is_in_transition_state() const {
166 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
167 return InlineCacheBuffer::contains(_call->destination());;
168}
169
170
171bool CompiledIC::is_icholder_call() const {
172 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
173 return !_is_optimized && is_icholder_entry(ic_destination());
174}
175
176// Returns native address of 'call' instruction in inline-cache. Used by
177// the InlineCacheBuffer when it needs to find the stub.
178address CompiledIC::stub_address() const {
179 assert(is_in_transition_state(), "should only be called when we are in a transition state");
180 return _call->destination();
181}
182
183// Clears the IC stub if the compiled IC is in transition state
184void CompiledIC::clear_ic_stub() {
185 if (is_in_transition_state()) {
186 ICStub* stub = ICStub_from_destination_address(stub_address());
187 stub->clear();
188 }
189}
190
191//-----------------------------------------------------------------------------
192// High-level access to an inline cache. Guaranteed to be MT-safe.
193
194void CompiledIC::initialize_from_iter(RelocIterator* iter) {
195 assert(iter->addr() == _call->instruction_address(), "must find ic_call");
196
197 if (iter->type() == relocInfo::virtual_call_type) {
198 virtual_call_Relocation* r = iter->virtual_call_reloc();
199 _is_optimized = false;
200 _value = _call->get_load_instruction(r);
201 } else {
202 assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
203 _is_optimized = true;
204 _value = NULL;
205 }
206}
207
208CompiledIC::CompiledIC(CompiledMethod* cm, NativeCall* call)
209 : _method(cm)
210{
211 _call = _method->call_wrapper_at((address) call);
212 address ic_call = _call->instruction_address();
213
214 assert(ic_call != NULL, "ic_call address must be set");
215 assert(cm != NULL, "must pass compiled method");
216 assert(cm->contains(ic_call), "must be in compiled method");
217
218 // Search for the ic_call at the given address.
219 RelocIterator iter(cm, ic_call, ic_call+1);
220 bool ret = iter.next();
221 assert(ret == true, "relocInfo must exist at this address");
222 assert(iter.addr() == ic_call, "must find ic_call");
223
224 initialize_from_iter(&iter);
225}
226
227CompiledIC::CompiledIC(RelocIterator* iter)
228 : _method(iter->code())
229{
230 _call = _method->call_wrapper_at(iter->addr());
231 address ic_call = _call->instruction_address();
232
233 CompiledMethod* nm = iter->code();
234 assert(ic_call != NULL, "ic_call address must be set");
235 assert(nm != NULL, "must pass compiled method");
236 assert(nm->contains(ic_call), "must be in compiled method");
237
238 initialize_from_iter(iter);
239}
240
241// This function may fail for two reasons: either due to running out of vtable
242// stubs, or due to running out of IC stubs in an attempted transition to a
243// transitional state. The needs_ic_stub_refill value will be set if the failure
244// was due to running out of IC stubs, in which case the caller will refill IC
245// stubs and retry.
246bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode,
247 bool& needs_ic_stub_refill, TRAPS) {
248 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
249 assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
250 assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
251
252 address entry;
253 if (call_info->call_kind() == CallInfo::itable_call) {
254 assert(bytecode == Bytecodes::_invokeinterface, "");
255 int itable_index = call_info->itable_index();
256 entry = VtableStubs::find_itable_stub(itable_index);
257 if (entry == NULL) {
258 return false;
259 }
260#ifdef ASSERT
261 int index = call_info->resolved_method()->itable_index();
262 assert(index == itable_index, "CallInfo pre-computes this");
263 InstanceKlass* k = call_info->resolved_method()->method_holder();
264 assert(k->verify_itable_index(itable_index), "sanity check");
265#endif //ASSERT
266 CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
267 call_info->resolved_klass(), false);
268 holder->claim();
269 if (!InlineCacheBuffer::create_transition_stub(this, holder, entry)) {
270 delete holder;
271 needs_ic_stub_refill = true;
272 return false;
273 }
274 } else {
275 assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
276 // Can be different than selected_method->vtable_index(), due to package-private etc.
277 int vtable_index = call_info->vtable_index();
278 assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
279 entry = VtableStubs::find_vtable_stub(vtable_index);
280 if (entry == NULL) {
281 return false;
282 }
283 if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) {
284 needs_ic_stub_refill = true;
285 return false;
286 }
287 }
288
289 if (TraceICs) {
290 ResourceMark rm;
291 assert(!call_info->selected_method().is_null(), "Unexpected null selected method");
292 tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
293 p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
294 }
295
296 // We can't check this anymore. With lazy deopt we could have already
297 // cleaned this IC entry before we even return. This is possible if
298 // we ran out of space in the inline cache buffer trying to do the
299 // set_next and we safepointed to free up space. This is a benign
300 // race because the IC entry was complete when we safepointed so
301 // cleaning it immediately is harmless.
302 // assert(is_megamorphic(), "sanity check");
303 return true;
304}
305
306
307// true if destination is megamorphic stub
308bool CompiledIC::is_megamorphic() const {
309 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
310 assert(!is_optimized(), "an optimized call cannot be megamorphic");
311
312 // Cannot rely on cached_value. It is either an interface or a method.
313 return VtableStubs::entry_point(ic_destination()) != NULL;
314}
315
316bool CompiledIC::is_call_to_compiled() const {
317 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
318
319 // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
320 // method is guaranteed to still exist, since we only remove methods after all inline caches
321 // has been cleaned up
322 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
323 bool is_monomorphic = (cb != NULL && cb->is_compiled());
324 // Check that the cached_value is a klass for non-optimized monomorphic calls
325 // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
326 // for calling directly to vep without using the inline cache (i.e., cached_value == NULL).
327 // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized
328 // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites.
329#ifdef ASSERT
330 CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
331 bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci();
332 assert( is_c1_or_jvmci_method ||
333 !is_monomorphic ||
334 is_optimized() ||
335 !caller->is_alive() ||
336 (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
337#endif // ASSERT
338 return is_monomorphic;
339}
340
341
342bool CompiledIC::is_call_to_interpreted() const {
343 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
344 // Call to interpreter if destination is either calling to a stub (if it
345 // is optimized), or calling to an I2C blob
346 bool is_call_to_interpreted = false;
347 if (!is_optimized()) {
348 // must use unsafe because the destination can be a zombie (and we're cleaning)
349 // and the print_compiled_ic code wants to know if site (in the non-zombie)
350 // is to the interpreter.
351 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
352 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
353 assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
354 } else {
355 // Check if we are calling into our own codeblob (i.e., to a stub)
356 address dest = ic_destination();
357#ifdef ASSERT
358 {
359 _call->verify_resolve_call(dest);
360 }
361#endif /* ASSERT */
362 is_call_to_interpreted = _call->is_call_to_interpreted(dest);
363 }
364 return is_call_to_interpreted;
365}
366
367bool CompiledIC::set_to_clean(bool in_use) {
368 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
369 if (TraceInlineCacheClearing || TraceICs) {
370 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
371 print();
372 }
373
374 address entry = _call->get_resolve_call_stub(is_optimized());
375
376 // A zombie transition will always be safe, since the metadata has already been set to NULL, so
377 // we only need to patch the destination
378 bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
379
380 if (safe_transition) {
381 // Kill any leftover stub we might have too
382 clear_ic_stub();
383 if (is_optimized()) {
384 set_ic_destination(entry);
385 } else {
386 set_ic_destination_and_value(entry, (void*)NULL);
387 }
388 } else {
389 // Unsafe transition - create stub.
390 if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) {
391 return false;
392 }
393 }
394 // We can't check this anymore. With lazy deopt we could have already
395 // cleaned this IC entry before we even return. This is possible if
396 // we ran out of space in the inline cache buffer trying to do the
397 // set_next and we safepointed to free up space. This is a benign
398 // race because the IC entry was complete when we safepointed so
399 // cleaning it immediately is harmless.
400 // assert(is_clean(), "sanity check");
401 return true;
402}
403
404bool CompiledIC::is_clean() const {
405 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
406 bool is_clean = false;
407 address dest = ic_destination();
408 is_clean = dest == _call->get_resolve_call_stub(is_optimized());
409 assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check");
410 return is_clean;
411}
412
413bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
414 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
415 // Updating a cache to the wrong entry can cause bugs that are very hard
416 // to track down - if cache entry gets invalid - we just clean it. In
417 // this way it is always the same code path that is responsible for
418 // updating and resolving an inline cache
419 //
420 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
421 // callsites. In addition ic_miss code will update a site to monomorphic if it determines
422 // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
423 //
424 // In both of these cases the only thing being modifed is the jump/call target and these
425 // transitions are mt_safe
426
427 Thread *thread = Thread::current();
428 if (info.to_interpreter() || info.to_aot()) {
429 // Call to interpreter
430 if (info.is_optimized() && is_optimized()) {
431 assert(is_clean(), "unsafe IC path");
432 // the call analysis (callee structure) specifies that the call is optimized
433 // (either because of CHA or the static target is final)
434 // At code generation time, this call has been emitted as static call
435 // Call via stub
436 assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check");
437 methodHandle method (thread, (Method*)info.cached_metadata());
438 _call->set_to_interpreted(method, info);
439
440 if (TraceICs) {
441 ResourceMark rm(thread);
442 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to %s: %s",
443 p2i(instruction_address()),
444 (info.to_aot() ? "aot" : "interpreter"),
445 method->print_value_string());
446 }
447 } else {
448 // Call via method-klass-holder
449 CompiledICHolder* holder = info.claim_cached_icholder();
450 if (!InlineCacheBuffer::create_transition_stub(this, holder, info.entry())) {
451 delete holder;
452 return false;
453 }
454 if (TraceICs) {
455 ResourceMark rm(thread);
456 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
457 }
458 }
459 } else {
460 // Call to compiled code
461 bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
462#ifdef ASSERT
463 CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
464 assert (cb != NULL && cb->is_compiled(), "must be compiled!");
465#endif /* ASSERT */
466
467 // This is MT safe if we come from a clean-cache and go through a
468 // non-verified entry point
469 bool safe = SafepointSynchronize::is_at_safepoint() ||
470 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
471
472 if (!safe) {
473 if (!InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry())) {
474 return false;
475 }
476 } else {
477 if (is_optimized()) {
478 set_ic_destination(info.entry());
479 } else {
480 set_ic_destination_and_value(info.entry(), info.cached_metadata());
481 }
482 }
483
484 if (TraceICs) {
485 ResourceMark rm(thread);
486 assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
487 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass = %s) %s",
488 p2i(instruction_address()),
489 (info.cached_metadata() != NULL) ? ((Klass*)info.cached_metadata())->print_value_string() : "NULL",
490 (safe) ? "" : " via stub");
491 }
492 }
493 // We can't check this anymore. With lazy deopt we could have already
494 // cleaned this IC entry before we even return. This is possible if
495 // we ran out of space in the inline cache buffer trying to do the
496 // set_next and we safepointed to free up space. This is a benign
497 // race because the IC entry was complete when we safepointed so
498 // cleaning it immediately is harmless.
499 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
500 return true;
501}
502
503
504// is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
505// static_bound: The call can be static bound. If it isn't also optimized, the property
506// wasn't provable at time of compilation. An optimized call will have any necessary
507// null check, while a static_bound won't. A static_bound (but not optimized) must
508// therefore use the unverified entry point.
509void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
510 Klass* receiver_klass,
511 bool is_optimized,
512 bool static_bound,
513 bool caller_is_nmethod,
514 CompiledICInfo& info,
515 TRAPS) {
516 CompiledMethod* method_code = method->code();
517
518 address entry = NULL;
519 if (method_code != NULL && method_code->is_in_use()) {
520 assert(method_code->is_compiled(), "must be compiled");
521 // Call to compiled code
522 //
523 // Note: the following problem exists with Compiler1:
524 // - at compile time we may or may not know if the destination is final
525 // - if we know that the destination is final (is_optimized), we will emit
526 // an optimized virtual call (no inline cache), and need a Method* to make
527 // a call to the interpreter
528 // - if we don't know if the destination is final, we emit a standard
529 // virtual call, and use CompiledICHolder to call interpreted code
530 // (no static call stub has been generated)
531 // - In the case that we here notice the call is static bound we
532 // convert the call into what looks to be an optimized virtual call,
533 // but we must use the unverified entry point (since there will be no
534 // null check on a call when the target isn't loaded).
535 // This causes problems when verifying the IC because
536 // it looks vanilla but is optimized. Code in is_call_to_interpreted
537 // is aware of this and weakens its asserts.
538 if (is_optimized) {
539 entry = method_code->verified_entry_point();
540 } else {
541 entry = method_code->entry_point();
542 }
543 }
544 bool far_c2a = entry != NULL && caller_is_nmethod && method_code->is_far_code();
545 if (entry != NULL && !far_c2a) {
546 // Call to near compiled code (nmethod or aot).
547 info.set_compiled_entry(entry, is_optimized ? NULL : receiver_klass, is_optimized);
548 } else {
549 if (is_optimized) {
550 if (far_c2a) {
551 // Call to aot code from nmethod.
552 info.set_aot_entry(entry, method());
553 } else {
554 // Use stub entry
555 info.set_interpreter_entry(method()->get_c2i_entry(), method());
556 }
557 } else {
558 // Use icholder entry
559 assert(method_code == NULL || method_code->is_compiled(), "must be compiled");
560 CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass);
561 info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
562 }
563 }
564 assert(info.is_optimized() == is_optimized, "must agree");
565}
566
567
568bool CompiledIC::is_icholder_entry(address entry) {
569 CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
570 if (cb != NULL && cb->is_adapter_blob()) {
571 return true;
572 }
573 // itable stubs also use CompiledICHolder
574 if (cb != NULL && cb->is_vtable_blob()) {
575 VtableStub* s = VtableStubs::entry_point(entry);
576 return (s != NULL) && s->is_itable_stub();
577 }
578
579 return false;
580}
581
582bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
583 // This call site might have become stale so inspect it carefully.
584 address dest = cm->call_wrapper_at(call_site->addr())->destination();
585 return is_icholder_entry(dest);
586}
587
588// ----------------------------------------------------------------------------
589
590bool CompiledStaticCall::set_to_clean(bool in_use) {
591 // in_use is unused but needed to match template function in CompiledMethod
592 assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
593 // Reset call site
594 set_destination_mt_safe(resolve_call_stub());
595
596 // Do not reset stub here: It is too expensive to call find_stub.
597 // Instead, rely on caller (nmethod::clear_inline_caches) to clear
598 // both the call and its stub.
599 return true;
600}
601
602bool CompiledStaticCall::is_clean() const {
603 return destination() == resolve_call_stub();
604}
605
606bool CompiledStaticCall::is_call_to_compiled() const {
607 return CodeCache::contains(destination());
608}
609
610bool CompiledDirectStaticCall::is_call_to_interpreted() const {
611 // It is a call to interpreted, if it calls to a stub. Hence, the destination
612 // must be in the stub part of the nmethod that contains the call
613 CompiledMethod* cm = CodeCache::find_compiled(instruction_address());
614 return cm->stub_contains(destination());
615}
616
617bool CompiledDirectStaticCall::is_call_to_far() const {
618 // It is a call to aot method, if it calls to a stub. Hence, the destination
619 // must be in the stub part of the nmethod that contains the call
620 CodeBlob* desc = CodeCache::find_blob(instruction_address());
621 return desc->as_compiled_method()->stub_contains(destination());
622}
623
624void CompiledStaticCall::set_to_compiled(address entry) {
625 if (TraceICs) {
626 ResourceMark rm;
627 tty->print_cr("%s@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
628 name(),
629 p2i(instruction_address()),
630 p2i(entry));
631 }
632 // Call to compiled code
633 assert(CodeCache::contains(entry), "wrong entry point");
634 set_destination_mt_safe(entry);
635}
636
637void CompiledStaticCall::set(const StaticCallInfo& info) {
638 assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
639 // Updating a cache to the wrong entry can cause bugs that are very hard
640 // to track down - if cache entry gets invalid - we just clean it. In
641 // this way it is always the same code path that is responsible for
642 // updating and resolving an inline cache
643 assert(is_clean(), "do not update a call entry - use clean");
644
645 if (info._to_interpreter) {
646 // Call to interpreted code
647 set_to_interpreted(info.callee(), info.entry());
648#if INCLUDE_AOT
649 } else if (info._to_aot) {
650 // Call to far code
651 set_to_far(info.callee(), info.entry());
652#endif
653 } else {
654 set_to_compiled(info.entry());
655 }
656}
657
658// Compute settings for a CompiledStaticCall. Since we might have to set
659// the stub when calling to the interpreter, we need to return arguments.
660void CompiledStaticCall::compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info) {
661 CompiledMethod* m_code = m->code();
662 info._callee = m;
663 if (m_code != NULL && m_code->is_in_use()) {
664 if (caller_is_nmethod && m_code->is_far_code()) {
665 // Call to far aot code from nmethod.
666 info._to_aot = true;
667 } else {
668 info._to_aot = false;
669 }
670 info._to_interpreter = false;
671 info._entry = m_code->verified_entry_point();
672 } else {
673 // Callee is interpreted code. In any case entering the interpreter
674 // puts a converter-frame on the stack to save arguments.
675 assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics");
676 info._to_interpreter = true;
677 info._entry = m()->get_c2i_entry();
678 }
679}
680
681address CompiledDirectStaticCall::find_stub_for(address instruction, bool is_aot) {
682 // Find reloc. information containing this call-site
683 RelocIterator iter((nmethod*)NULL, instruction);
684 while (iter.next()) {
685 if (iter.addr() == instruction) {
686 switch(iter.type()) {
687 case relocInfo::static_call_type:
688 return iter.static_call_reloc()->static_stub(is_aot);
689 // We check here for opt_virtual_call_type, since we reuse the code
690 // from the CompiledIC implementation
691 case relocInfo::opt_virtual_call_type:
692 return iter.opt_virtual_call_reloc()->static_stub(is_aot);
693 case relocInfo::poll_type:
694 case relocInfo::poll_return_type: // A safepoint can't overlap a call.
695 default:
696 ShouldNotReachHere();
697 }
698 }
699 }
700 return NULL;
701}
702
703address CompiledDirectStaticCall::find_stub(bool is_aot) {
704 return CompiledDirectStaticCall::find_stub_for(instruction_address(), is_aot);
705}
706
707address CompiledDirectStaticCall::resolve_call_stub() const {
708 return SharedRuntime::get_resolve_static_call_stub();
709}
710
711//-----------------------------------------------------------------------------
712// Non-product mode code
713#ifndef PRODUCT
714
715void CompiledIC::verify() {
716 _call->verify();
717 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
718 || is_optimized() || is_megamorphic(), "sanity check");
719}
720
721void CompiledIC::print() {
722 print_compiled_ic();
723 tty->cr();
724}
725
726void CompiledIC::print_compiled_ic() {
727 tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
728 p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? NULL : cached_value()));
729}
730
731void CompiledDirectStaticCall::print() {
732 tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address()));
733 if (is_clean()) {
734 tty->print("clean");
735 } else if (is_call_to_compiled()) {
736 tty->print("compiled");
737 } else if (is_call_to_far()) {
738 tty->print("far");
739 } else if (is_call_to_interpreted()) {
740 tty->print("interpreted");
741 }
742 tty->cr();
743}
744
745#endif // !PRODUCT
746