1/*
2 * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24#include "precompiled.hpp"
25#include "classfile/symbolTable.hpp"
26#include "compiler/compileBroker.hpp"
27#include "jvmci/jniAccessMark.inline.hpp"
28#include "jvmci/jvmciCompilerToVM.hpp"
29#include "jvmci/jvmciRuntime.hpp"
30#include "logging/log.hpp"
31#include "memory/oopFactory.hpp"
32#include "memory/universe.hpp"
33#include "oops/constantPool.inline.hpp"
34#include "oops/method.inline.hpp"
35#include "oops/objArrayKlass.hpp"
36#include "oops/oop.inline.hpp"
37#include "runtime/biasedLocking.hpp"
38#include "runtime/deoptimization.hpp"
39#include "runtime/fieldDescriptor.inline.hpp"
40#include "runtime/frame.inline.hpp"
41#include "runtime/sharedRuntime.hpp"
42#if INCLUDE_G1GC
43#include "gc/g1/g1ThreadLocalData.hpp"
44#endif // INCLUDE_G1GC
45
46// Simple helper to see if the caller of a runtime stub which
47// entered the VM has been deoptimized
48
49static bool caller_is_deopted() {
50 JavaThread* thread = JavaThread::current();
51 RegisterMap reg_map(thread, false);
52 frame runtime_frame = thread->last_frame();
53 frame caller_frame = runtime_frame.sender(&reg_map);
54 assert(caller_frame.is_compiled_frame(), "must be compiled");
55 return caller_frame.is_deoptimized_frame();
56}
57
58// Stress deoptimization
59static void deopt_caller() {
60 if ( !caller_is_deopted()) {
61 JavaThread* thread = JavaThread::current();
62 RegisterMap reg_map(thread, false);
63 frame runtime_frame = thread->last_frame();
64 frame caller_frame = runtime_frame.sender(&reg_map);
65 Deoptimization::deoptimize_frame(thread, caller_frame.id(), Deoptimization::Reason_constraint);
66 assert(caller_is_deopted(), "Must be deoptimized");
67 }
68}
69
70// Manages a scope for a JVMCI runtime call that attempts a heap allocation.
71// If there is a pending exception upon closing the scope and the runtime
72// call is of the variety where allocation failure returns NULL without an
73// exception, the following action is taken:
74// 1. The pending exception is cleared
75// 2. NULL is written to JavaThread::_vm_result
76// 3. Checks that an OutOfMemoryError is Universe::out_of_memory_error_retry().
77class RetryableAllocationMark: public StackObj {
78 private:
79 JavaThread* _thread;
80 public:
81 RetryableAllocationMark(JavaThread* thread, bool activate) {
82 if (activate) {
83 assert(!thread->in_retryable_allocation(), "retryable allocation scope is non-reentrant");
84 _thread = thread;
85 _thread->set_in_retryable_allocation(true);
86 } else {
87 _thread = NULL;
88 }
89 }
90 ~RetryableAllocationMark() {
91 if (_thread != NULL) {
92 _thread->set_in_retryable_allocation(false);
93 JavaThread* THREAD = _thread;
94 if (HAS_PENDING_EXCEPTION) {
95 oop ex = PENDING_EXCEPTION;
96 CLEAR_PENDING_EXCEPTION;
97 oop retry_oome = Universe::out_of_memory_error_retry();
98 if (ex->is_a(retry_oome->klass()) && retry_oome != ex) {
99 ResourceMark rm;
100 fatal("Unexpected exception in scope of retryable allocation: " INTPTR_FORMAT " of type %s", p2i(ex), ex->klass()->external_name());
101 }
102 _thread->set_vm_result(NULL);
103 }
104 }
105 }
106};
107
108JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_instance_common(JavaThread* thread, Klass* klass, bool null_on_fail))
109 JRT_BLOCK;
110 assert(klass->is_klass(), "not a class");
111 Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
112 InstanceKlass* h = InstanceKlass::cast(klass);
113 {
114 RetryableAllocationMark ram(thread, null_on_fail);
115 h->check_valid_for_instantiation(true, CHECK);
116 oop obj;
117 if (null_on_fail) {
118 if (!h->is_initialized()) {
119 // Cannot re-execute class initialization without side effects
120 // so return without attempting the initialization
121 return;
122 }
123 } else {
124 // make sure klass is initialized
125 h->initialize(CHECK);
126 }
127 // allocate instance and return via TLS
128 obj = h->allocate_instance(CHECK);
129 thread->set_vm_result(obj);
130 }
131 JRT_BLOCK_END;
132 SharedRuntime::on_slowpath_allocation_exit(thread);
133JRT_END
134
135JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_array_common(JavaThread* thread, Klass* array_klass, jint length, bool null_on_fail))
136 JRT_BLOCK;
137 // Note: no handle for klass needed since they are not used
138 // anymore after new_objArray() and no GC can happen before.
139 // (This may have to change if this code changes!)
140 assert(array_klass->is_klass(), "not a class");
141 oop obj;
142 if (array_klass->is_typeArray_klass()) {
143 BasicType elt_type = TypeArrayKlass::cast(array_klass)->element_type();
144 RetryableAllocationMark ram(thread, null_on_fail);
145 obj = oopFactory::new_typeArray(elt_type, length, CHECK);
146 } else {
147 Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
148 Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
149 RetryableAllocationMark ram(thread, null_on_fail);
150 obj = oopFactory::new_objArray(elem_klass, length, CHECK);
151 }
152 thread->set_vm_result(obj);
153 // This is pretty rare but this runtime patch is stressful to deoptimization
154 // if we deoptimize here so force a deopt to stress the path.
155 if (DeoptimizeALot) {
156 static int deopts = 0;
157 // Alternate between deoptimizing and raising an error (which will also cause a deopt)
158 if (deopts++ % 2 == 0) {
159 if (null_on_fail) {
160 return;
161 } else {
162 ResourceMark rm(THREAD);
163 THROW(vmSymbols::java_lang_OutOfMemoryError());
164 }
165 } else {
166 deopt_caller();
167 }
168 }
169 JRT_BLOCK_END;
170 SharedRuntime::on_slowpath_allocation_exit(thread);
171JRT_END
172
173JRT_ENTRY(void, JVMCIRuntime::new_multi_array_common(JavaThread* thread, Klass* klass, int rank, jint* dims, bool null_on_fail))
174 assert(klass->is_klass(), "not a class");
175 assert(rank >= 1, "rank must be nonzero");
176 Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
177 RetryableAllocationMark ram(thread, null_on_fail);
178 oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
179 thread->set_vm_result(obj);
180JRT_END
181
182JRT_ENTRY(void, JVMCIRuntime::dynamic_new_array_common(JavaThread* thread, oopDesc* element_mirror, jint length, bool null_on_fail))
183 RetryableAllocationMark ram(thread, null_on_fail);
184 oop obj = Reflection::reflect_new_array(element_mirror, length, CHECK);
185 thread->set_vm_result(obj);
186JRT_END
187
188JRT_ENTRY(void, JVMCIRuntime::dynamic_new_instance_common(JavaThread* thread, oopDesc* type_mirror, bool null_on_fail))
189 InstanceKlass* klass = InstanceKlass::cast(java_lang_Class::as_Klass(type_mirror));
190
191 if (klass == NULL) {
192 ResourceMark rm(THREAD);
193 THROW(vmSymbols::java_lang_InstantiationException());
194 }
195 RetryableAllocationMark ram(thread, null_on_fail);
196
197 // Create new instance (the receiver)
198 klass->check_valid_for_instantiation(false, CHECK);
199
200 if (null_on_fail) {
201 if (!klass->is_initialized()) {
202 // Cannot re-execute class initialization without side effects
203 // so return without attempting the initialization
204 return;
205 }
206 } else {
207 // Make sure klass gets initialized
208 klass->initialize(CHECK);
209 }
210
211 oop obj = klass->allocate_instance(CHECK);
212 thread->set_vm_result(obj);
213JRT_END
214
215extern void vm_exit(int code);
216
217// Enter this method from compiled code handler below. This is where we transition
218// to VM mode. This is done as a helper routine so that the method called directly
219// from compiled code does not have to transition to VM. This allows the entry
220// method to see if the nmethod that we have just looked up a handler for has
221// been deoptimized while we were in the vm. This simplifies the assembly code
222// cpu directories.
223//
224// We are entering here from exception stub (via the entry method below)
225// If there is a compiled exception handler in this method, we will continue there;
226// otherwise we will unwind the stack and continue at the caller of top frame method
227// Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
228// control the area where we can allow a safepoint. After we exit the safepoint area we can
229// check to see if the handler we are going to return is now in a nmethod that has
230// been deoptimized. If that is the case we return the deopt blob
231// unpack_with_exception entry instead. This makes life for the exception blob easier
232// because making that same check and diverting is painful from assembly language.
233JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, CompiledMethod*& cm))
234 // Reset method handle flag.
235 thread->set_is_method_handle_return(false);
236
237 Handle exception(thread, ex);
238 cm = CodeCache::find_compiled(pc);
239 assert(cm != NULL, "this is not a compiled method");
240 // Adjust the pc as needed/
241 if (cm->is_deopt_pc(pc)) {
242 RegisterMap map(thread, false);
243 frame exception_frame = thread->last_frame().sender(&map);
244 // if the frame isn't deopted then pc must not correspond to the caller of last_frame
245 assert(exception_frame.is_deoptimized_frame(), "must be deopted");
246 pc = exception_frame.pc();
247 }
248#ifdef ASSERT
249 assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
250 assert(oopDesc::is_oop(exception()), "just checking");
251 // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
252 if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
253 if (ExitVMOnVerifyError) vm_exit(-1);
254 ShouldNotReachHere();
255 }
256#endif
257
258 // Check the stack guard pages and reenable them if necessary and there is
259 // enough space on the stack to do so. Use fast exceptions only if the guard
260 // pages are enabled.
261 bool guard_pages_enabled = thread->stack_guards_enabled();
262 if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
263
264 if (JvmtiExport::can_post_on_exceptions()) {
265 // To ensure correct notification of exception catches and throws
266 // we have to deoptimize here. If we attempted to notify the
267 // catches and throws during this exception lookup it's possible
268 // we could deoptimize on the way out of the VM and end back in
269 // the interpreter at the throw site. This would result in double
270 // notifications since the interpreter would also notify about
271 // these same catches and throws as it unwound the frame.
272
273 RegisterMap reg_map(thread);
274 frame stub_frame = thread->last_frame();
275 frame caller_frame = stub_frame.sender(&reg_map);
276
277 // We don't really want to deoptimize the nmethod itself since we
278 // can actually continue in the exception handler ourselves but I
279 // don't see an easy way to have the desired effect.
280 Deoptimization::deoptimize_frame(thread, caller_frame.id(), Deoptimization::Reason_constraint);
281 assert(caller_is_deopted(), "Must be deoptimized");
282
283 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
284 }
285
286 // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
287 if (guard_pages_enabled) {
288 address fast_continuation = cm->handler_for_exception_and_pc(exception, pc);
289 if (fast_continuation != NULL) {
290 // Set flag if return address is a method handle call site.
291 thread->set_is_method_handle_return(cm->is_method_handle_return(pc));
292 return fast_continuation;
293 }
294 }
295
296 // If the stack guard pages are enabled, check whether there is a handler in
297 // the current method. Otherwise (guard pages disabled), force an unwind and
298 // skip the exception cache update (i.e., just leave continuation==NULL).
299 address continuation = NULL;
300 if (guard_pages_enabled) {
301
302 // New exception handling mechanism can support inlined methods
303 // with exception handlers since the mappings are from PC to PC
304
305 // debugging support
306 // tracing
307 if (log_is_enabled(Info, exceptions)) {
308 ResourceMark rm;
309 stringStream tempst;
310 assert(cm->method() != NULL, "Unexpected null method()");
311 tempst.print("compiled method <%s>\n"
312 " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
313 cm->method()->print_value_string(), p2i(pc), p2i(thread));
314 Exceptions::log_exception(exception, tempst.as_string());
315 }
316 // for AbortVMOnException flag
317 NOT_PRODUCT(Exceptions::debug_check_abort(exception));
318
319 // Clear out the exception oop and pc since looking up an
320 // exception handler can cause class loading, which might throw an
321 // exception and those fields are expected to be clear during
322 // normal bytecode execution.
323 thread->clear_exception_oop_and_pc();
324
325 bool recursive_exception = false;
326 continuation = SharedRuntime::compute_compiled_exc_handler(cm, pc, exception, false, false, recursive_exception);
327 // If an exception was thrown during exception dispatch, the exception oop may have changed
328 thread->set_exception_oop(exception());
329 thread->set_exception_pc(pc);
330
331 // The exception cache is used only for non-implicit exceptions
332 // Update the exception cache only when another exception did
333 // occur during the computation of the compiled exception handler
334 // (e.g., when loading the class of the catch type).
335 // Checking for exception oop equality is not
336 // sufficient because some exceptions are pre-allocated and reused.
337 if (continuation != NULL && !recursive_exception && !SharedRuntime::deopt_blob()->contains(continuation)) {
338 cm->add_handler_for_exception_and_pc(exception, pc, continuation);
339 }
340 }
341
342 // Set flag if return address is a method handle call site.
343 thread->set_is_method_handle_return(cm->is_method_handle_return(pc));
344
345 if (log_is_enabled(Info, exceptions)) {
346 ResourceMark rm;
347 log_info(exceptions)("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT
348 " for exception thrown at PC " PTR_FORMAT,
349 p2i(thread), p2i(continuation), p2i(pc));
350 }
351
352 return continuation;
353JRT_END
354
355// Enter this method from compiled code only if there is a Java exception handler
356// in the method handling the exception.
357// We are entering here from exception stub. We don't do a normal VM transition here.
358// We do it in a helper. This is so we can check to see if the nmethod we have just
359// searched for an exception handler has been deoptimized in the meantime.
360address JVMCIRuntime::exception_handler_for_pc(JavaThread* thread) {
361 oop exception = thread->exception_oop();
362 address pc = thread->exception_pc();
363 // Still in Java mode
364 DEBUG_ONLY(ResetNoHandleMark rnhm);
365 CompiledMethod* cm = NULL;
366 address continuation = NULL;
367 {
368 // Enter VM mode by calling the helper
369 ResetNoHandleMark rnhm;
370 continuation = exception_handler_for_pc_helper(thread, exception, pc, cm);
371 }
372 // Back in JAVA, use no oops DON'T safepoint
373
374 // Now check to see if the compiled method we were called from is now deoptimized.
375 // If so we must return to the deopt blob and deoptimize the nmethod
376 if (cm != NULL && caller_is_deopted()) {
377 continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
378 }
379
380 assert(continuation != NULL, "no handler found");
381 return continuation;
382}
383
384JRT_ENTRY_NO_ASYNC(void, JVMCIRuntime::monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock))
385 IF_TRACE_jvmci_3 {
386 char type[O_BUFLEN];
387 obj->klass()->name()->as_C_string(type, O_BUFLEN);
388 markOop mark = obj->mark();
389 TRACE_jvmci_3("%s: entered locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, p2i(mark), p2i(lock));
390 tty->flush();
391 }
392 if (PrintBiasedLockingStatistics) {
393 Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
394 }
395 Handle h_obj(thread, obj);
396 assert(oopDesc::is_oop(h_obj()), "must be NULL or an object");
397 if (UseBiasedLocking) {
398 // Retry fast entry if bias is revoked to avoid unnecessary inflation
399 ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
400 } else {
401 if (JVMCIUseFastLocking) {
402 // When using fast locking, the compiled code has already tried the fast case
403 ObjectSynchronizer::slow_enter(h_obj, lock, THREAD);
404 } else {
405 ObjectSynchronizer::fast_enter(h_obj, lock, false, THREAD);
406 }
407 }
408 TRACE_jvmci_3("%s: exiting locking slow with obj=" INTPTR_FORMAT, thread->name(), p2i(obj));
409JRT_END
410
411JRT_LEAF(void, JVMCIRuntime::monitorexit(JavaThread* thread, oopDesc* obj, BasicLock* lock))
412 assert(thread == JavaThread::current(), "threads must correspond");
413 assert(thread->last_Java_sp(), "last_Java_sp must be set");
414 // monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
415 EXCEPTION_MARK;
416
417#ifdef ASSERT
418 if (!oopDesc::is_oop(obj)) {
419 ResetNoHandleMark rhm;
420 nmethod* method = thread->last_frame().cb()->as_nmethod_or_null();
421 if (method != NULL) {
422 tty->print_cr("ERROR in monitorexit in method %s wrong obj " INTPTR_FORMAT, method->name(), p2i(obj));
423 }
424 thread->print_stack_on(tty);
425 assert(false, "invalid lock object pointer dected");
426 }
427#endif
428
429 if (JVMCIUseFastLocking) {
430 // When using fast locking, the compiled code has already tried the fast case
431 ObjectSynchronizer::slow_exit(obj, lock, THREAD);
432 } else {
433 ObjectSynchronizer::fast_exit(obj, lock, THREAD);
434 }
435 IF_TRACE_jvmci_3 {
436 char type[O_BUFLEN];
437 obj->klass()->name()->as_C_string(type, O_BUFLEN);
438 TRACE_jvmci_3("%s: exited locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, p2i(obj->mark()), p2i(lock));
439 tty->flush();
440 }
441JRT_END
442
443// Object.notify() fast path, caller does slow path
444JRT_LEAF(jboolean, JVMCIRuntime::object_notify(JavaThread *thread, oopDesc* obj))
445
446 // Very few notify/notifyAll operations find any threads on the waitset, so
447 // the dominant fast-path is to simply return.
448 // Relatedly, it's critical that notify/notifyAll be fast in order to
449 // reduce lock hold times.
450 if (!SafepointSynchronize::is_synchronizing()) {
451 if (ObjectSynchronizer::quick_notify(obj, thread, false)) {
452 return true;
453 }
454 }
455 return false; // caller must perform slow path
456
457JRT_END
458
459// Object.notifyAll() fast path, caller does slow path
460JRT_LEAF(jboolean, JVMCIRuntime::object_notifyAll(JavaThread *thread, oopDesc* obj))
461
462 if (!SafepointSynchronize::is_synchronizing() ) {
463 if (ObjectSynchronizer::quick_notify(obj, thread, true)) {
464 return true;
465 }
466 }
467 return false; // caller must perform slow path
468
469JRT_END
470
471JRT_ENTRY(void, JVMCIRuntime::throw_and_post_jvmti_exception(JavaThread* thread, const char* exception, const char* message))
472 TempNewSymbol symbol = SymbolTable::new_symbol(exception);
473 SharedRuntime::throw_and_post_jvmti_exception(thread, symbol, message);
474JRT_END
475
476JRT_ENTRY(void, JVMCIRuntime::throw_klass_external_name_exception(JavaThread* thread, const char* exception, Klass* klass))
477 ResourceMark rm(thread);
478 TempNewSymbol symbol = SymbolTable::new_symbol(exception);
479 SharedRuntime::throw_and_post_jvmti_exception(thread, symbol, klass->external_name());
480JRT_END
481
482JRT_ENTRY(void, JVMCIRuntime::throw_class_cast_exception(JavaThread* thread, const char* exception, Klass* caster_klass, Klass* target_klass))
483 ResourceMark rm(thread);
484 const char* message = SharedRuntime::generate_class_cast_message(caster_klass, target_klass);
485 TempNewSymbol symbol = SymbolTable::new_symbol(exception);
486 SharedRuntime::throw_and_post_jvmti_exception(thread, symbol, message);
487JRT_END
488
489JRT_LEAF(void, JVMCIRuntime::log_object(JavaThread* thread, oopDesc* obj, bool as_string, bool newline))
490 ttyLocker ttyl;
491
492 if (obj == NULL) {
493 tty->print("NULL");
494 } else if (oopDesc::is_oop_or_null(obj, true) && (!as_string || !java_lang_String::is_instance(obj))) {
495 if (oopDesc::is_oop_or_null(obj, true)) {
496 char buf[O_BUFLEN];
497 tty->print("%s@" INTPTR_FORMAT, obj->klass()->name()->as_C_string(buf, O_BUFLEN), p2i(obj));
498 } else {
499 tty->print(INTPTR_FORMAT, p2i(obj));
500 }
501 } else {
502 ResourceMark rm;
503 assert(obj != NULL && java_lang_String::is_instance(obj), "must be");
504 char *buf = java_lang_String::as_utf8_string(obj);
505 tty->print_raw(buf);
506 }
507 if (newline) {
508 tty->cr();
509 }
510JRT_END
511
512#if INCLUDE_G1GC
513
514JRT_LEAF(void, JVMCIRuntime::write_barrier_pre(JavaThread* thread, oopDesc* obj))
515 G1ThreadLocalData::satb_mark_queue(thread).enqueue(obj);
516JRT_END
517
518JRT_LEAF(void, JVMCIRuntime::write_barrier_post(JavaThread* thread, void* card_addr))
519 G1ThreadLocalData::dirty_card_queue(thread).enqueue(card_addr);
520JRT_END
521
522#endif // INCLUDE_G1GC
523
524JRT_LEAF(jboolean, JVMCIRuntime::validate_object(JavaThread* thread, oopDesc* parent, oopDesc* child))
525 bool ret = true;
526 if(!Universe::heap()->is_in(parent)) {
527 tty->print_cr("Parent Object " INTPTR_FORMAT " not in heap", p2i(parent));
528 parent->print();
529 ret=false;
530 }
531 if(!Universe::heap()->is_in(child)) {
532 tty->print_cr("Child Object " INTPTR_FORMAT " not in heap", p2i(child));
533 child->print();
534 ret=false;
535 }
536 return (jint)ret;
537JRT_END
538
539JRT_ENTRY(void, JVMCIRuntime::vm_error(JavaThread* thread, jlong where, jlong format, jlong value))
540 ResourceMark rm;
541 const char *error_msg = where == 0L ? "<internal JVMCI error>" : (char*) (address) where;
542 char *detail_msg = NULL;
543 if (format != 0L) {
544 const char* buf = (char*) (address) format;
545 size_t detail_msg_length = strlen(buf) * 2;
546 detail_msg = (char *) NEW_RESOURCE_ARRAY(u_char, detail_msg_length);
547 jio_snprintf(detail_msg, detail_msg_length, buf, value);
548 }
549 report_vm_error(__FILE__, __LINE__, error_msg, "%s", detail_msg);
550JRT_END
551
552JRT_LEAF(oopDesc*, JVMCIRuntime::load_and_clear_exception(JavaThread* thread))
553 oop exception = thread->exception_oop();
554 assert(exception != NULL, "npe");
555 thread->set_exception_oop(NULL);
556 thread->set_exception_pc(0);
557 return exception;
558JRT_END
559
560PRAGMA_DIAG_PUSH
561PRAGMA_FORMAT_NONLITERAL_IGNORED
562JRT_LEAF(void, JVMCIRuntime::log_printf(JavaThread* thread, const char* format, jlong v1, jlong v2, jlong v3))
563 ResourceMark rm;
564 tty->print(format, v1, v2, v3);
565JRT_END
566PRAGMA_DIAG_POP
567
568static void decipher(jlong v, bool ignoreZero) {
569 if (v != 0 || !ignoreZero) {
570 void* p = (void *)(address) v;
571 CodeBlob* cb = CodeCache::find_blob(p);
572 if (cb) {
573 if (cb->is_nmethod()) {
574 char buf[O_BUFLEN];
575 tty->print("%s [" INTPTR_FORMAT "+" JLONG_FORMAT "]", cb->as_nmethod_or_null()->method()->name_and_sig_as_C_string(buf, O_BUFLEN), p2i(cb->code_begin()), (jlong)((address)v - cb->code_begin()));
576 return;
577 }
578 cb->print_value_on(tty);
579 return;
580 }
581 if (Universe::heap()->is_in(p)) {
582 oop obj = oop(p);
583 obj->print_value_on(tty);
584 return;
585 }
586 tty->print(INTPTR_FORMAT " [long: " JLONG_FORMAT ", double %lf, char %c]",p2i((void *)v), (jlong)v, (jdouble)v, (char)v);
587 }
588}
589
590PRAGMA_DIAG_PUSH
591PRAGMA_FORMAT_NONLITERAL_IGNORED
592JRT_LEAF(void, JVMCIRuntime::vm_message(jboolean vmError, jlong format, jlong v1, jlong v2, jlong v3))
593 ResourceMark rm;
594 const char *buf = (const char*) (address) format;
595 if (vmError) {
596 if (buf != NULL) {
597 fatal(buf, v1, v2, v3);
598 } else {
599 fatal("<anonymous error>");
600 }
601 } else if (buf != NULL) {
602 tty->print(buf, v1, v2, v3);
603 } else {
604 assert(v2 == 0, "v2 != 0");
605 assert(v3 == 0, "v3 != 0");
606 decipher(v1, false);
607 }
608JRT_END
609PRAGMA_DIAG_POP
610
611JRT_LEAF(void, JVMCIRuntime::log_primitive(JavaThread* thread, jchar typeChar, jlong value, jboolean newline))
612 union {
613 jlong l;
614 jdouble d;
615 jfloat f;
616 } uu;
617 uu.l = value;
618 switch (typeChar) {
619 case 'Z': tty->print(value == 0 ? "false" : "true"); break;
620 case 'B': tty->print("%d", (jbyte) value); break;
621 case 'C': tty->print("%c", (jchar) value); break;
622 case 'S': tty->print("%d", (jshort) value); break;
623 case 'I': tty->print("%d", (jint) value); break;
624 case 'F': tty->print("%f", uu.f); break;
625 case 'J': tty->print(JLONG_FORMAT, value); break;
626 case 'D': tty->print("%lf", uu.d); break;
627 default: assert(false, "unknown typeChar"); break;
628 }
629 if (newline) {
630 tty->cr();
631 }
632JRT_END
633
634JRT_ENTRY(jint, JVMCIRuntime::identity_hash_code(JavaThread* thread, oopDesc* obj))
635 return (jint) obj->identity_hash();
636JRT_END
637
638JRT_ENTRY(jboolean, JVMCIRuntime::thread_is_interrupted(JavaThread* thread, oopDesc* receiver, jboolean clear_interrupted))
639 Handle receiverHandle(thread, receiver);
640 // A nested ThreadsListHandle may require the Threads_lock which
641 // requires thread_in_vm which is why this method cannot be JRT_LEAF.
642 ThreadsListHandle tlh;
643
644 JavaThread* receiverThread = java_lang_Thread::thread(receiverHandle());
645 if (receiverThread == NULL || (EnableThreadSMRExtraValidityChecks && !tlh.includes(receiverThread))) {
646 // The other thread may exit during this process, which is ok so return false.
647 return JNI_FALSE;
648 } else {
649 return (jint) Thread::is_interrupted(receiverThread, clear_interrupted != 0);
650 }
651JRT_END
652
653JRT_ENTRY(jint, JVMCIRuntime::test_deoptimize_call_int(JavaThread* thread, int value))
654 deopt_caller();
655 return (jint) value;
656JRT_END
657
658
659// private static JVMCIRuntime JVMCI.initializeRuntime()
660JVM_ENTRY_NO_ENV(jobject, JVM_GetJVMCIRuntime(JNIEnv *env, jclass c))
661 JNI_JVMCIENV(thread, env);
662 if (!EnableJVMCI) {
663 JVMCI_THROW_MSG_NULL(InternalError, "JVMCI is not enabled");
664 }
665 JVMCIENV->runtime()->initialize_HotSpotJVMCIRuntime(JVMCI_CHECK_NULL);
666 JVMCIObject runtime = JVMCIENV->runtime()->get_HotSpotJVMCIRuntime(JVMCI_CHECK_NULL);
667 return JVMCIENV->get_jobject(runtime);
668JVM_END
669
670void JVMCIRuntime::call_getCompiler(TRAPS) {
671 THREAD_JVMCIENV(JavaThread::current());
672 JVMCIObject jvmciRuntime = JVMCIRuntime::get_HotSpotJVMCIRuntime(JVMCI_CHECK);
673 initialize(JVMCIENV);
674 JVMCIENV->call_HotSpotJVMCIRuntime_getCompiler(jvmciRuntime, JVMCI_CHECK);
675}
676
677void JVMCINMethodData::initialize(
678 int nmethod_mirror_index,
679 const char* name,
680 FailedSpeculation** failed_speculations)
681{
682 _failed_speculations = failed_speculations;
683 _nmethod_mirror_index = nmethod_mirror_index;
684 if (name != NULL) {
685 _has_name = true;
686 char* dest = (char*) this->name();
687 strcpy(dest, name);
688 } else {
689 _has_name = false;
690 }
691}
692
693void JVMCINMethodData::add_failed_speculation(nmethod* nm, jlong speculation) {
694 uint index = (speculation >> 32) & 0xFFFFFFFF;
695 int length = (int) speculation;
696 if (index + length > (uint) nm->speculations_size()) {
697 fatal(INTPTR_FORMAT "[index: %d, length: %d] out of bounds wrt encoded speculations of length %u", speculation, index, length, nm->speculations_size());
698 }
699 address data = nm->speculations_begin() + index;
700 FailedSpeculation::add_failed_speculation(nm, _failed_speculations, data, length);
701}
702
703oop JVMCINMethodData::get_nmethod_mirror(nmethod* nm, bool phantom_ref) {
704 if (_nmethod_mirror_index == -1) {
705 return NULL;
706 }
707 if (phantom_ref) {
708 return nm->oop_at_phantom(_nmethod_mirror_index);
709 } else {
710 return nm->oop_at(_nmethod_mirror_index);
711 }
712}
713
714void JVMCINMethodData::set_nmethod_mirror(nmethod* nm, oop new_mirror) {
715 assert(_nmethod_mirror_index != -1, "cannot set JVMCI mirror for nmethod");
716 oop* addr = nm->oop_addr_at(_nmethod_mirror_index);
717 assert(new_mirror != NULL, "use clear_nmethod_mirror to clear the mirror");
718 assert(*addr == NULL, "cannot overwrite non-null mirror");
719
720 *addr = new_mirror;
721
722 // Since we've patched some oops in the nmethod,
723 // (re)register it with the heap.
724 Universe::heap()->register_nmethod(nm);
725}
726
727void JVMCINMethodData::clear_nmethod_mirror(nmethod* nm) {
728 if (_nmethod_mirror_index != -1) {
729 oop* addr = nm->oop_addr_at(_nmethod_mirror_index);
730 *addr = NULL;
731 }
732}
733
734void JVMCINMethodData::invalidate_nmethod_mirror(nmethod* nm) {
735 oop nmethod_mirror = get_nmethod_mirror(nm, /* phantom_ref */ true);
736 if (nmethod_mirror == NULL) {
737 return;
738 }
739
740 // Update the values in the mirror if it still refers to nm.
741 // We cannot use JVMCIObject to wrap the mirror as this is called
742 // during GC, forbidding the creation of JNIHandles.
743 JVMCIEnv* jvmciEnv = NULL;
744 nmethod* current = (nmethod*) HotSpotJVMCI::InstalledCode::address(jvmciEnv, nmethod_mirror);
745 if (nm == current) {
746 if (!nm->is_alive()) {
747 // Break the link from the mirror to nm such that
748 // future invocations via the mirror will result in
749 // an InvalidInstalledCodeException.
750 HotSpotJVMCI::InstalledCode::set_address(jvmciEnv, nmethod_mirror, 0);
751 HotSpotJVMCI::InstalledCode::set_entryPoint(jvmciEnv, nmethod_mirror, 0);
752 } else if (nm->is_not_entrant()) {
753 // Zero the entry point so any new invocation will fail but keep
754 // the address link around that so that existing activations can
755 // be deoptimized via the mirror (i.e. JVMCIEnv::invalidate_installed_code).
756 HotSpotJVMCI::InstalledCode::set_entryPoint(jvmciEnv, nmethod_mirror, 0);
757 }
758 }
759}
760
761void JVMCIRuntime::initialize_HotSpotJVMCIRuntime(JVMCI_TRAPS) {
762 if (is_HotSpotJVMCIRuntime_initialized()) {
763 if (JVMCIENV->is_hotspot() && UseJVMCINativeLibrary) {
764 JVMCI_THROW_MSG(InternalError, "JVMCI has already been enabled in the JVMCI shared library");
765 }
766 }
767
768 initialize(JVMCIENV);
769
770 // This should only be called in the context of the JVMCI class being initialized
771 JVMCIObject result = JVMCIENV->call_HotSpotJVMCIRuntime_runtime(JVMCI_CHECK);
772
773 _HotSpotJVMCIRuntime_instance = JVMCIENV->make_global(result);
774}
775
776void JVMCIRuntime::initialize(JVMCIEnv* JVMCIENV) {
777 assert(this != NULL, "sanity");
778 // Check first without JVMCI_lock
779 if (_initialized) {
780 return;
781 }
782
783 MutexLocker locker(JVMCI_lock);
784 // Check again under JVMCI_lock
785 if (_initialized) {
786 return;
787 }
788
789 while (_being_initialized) {
790 JVMCI_lock->wait();
791 if (_initialized) {
792 return;
793 }
794 }
795
796 _being_initialized = true;
797
798 {
799 MutexUnlocker unlock(JVMCI_lock);
800
801 HandleMark hm;
802 ResourceMark rm;
803 JavaThread* THREAD = JavaThread::current();
804 if (JVMCIENV->is_hotspot()) {
805 HotSpotJVMCI::compute_offsets(CHECK_EXIT);
806 } else {
807 JNIAccessMark jni(JVMCIENV);
808
809 JNIJVMCI::initialize_ids(jni.env());
810 if (jni()->ExceptionCheck()) {
811 jni()->ExceptionDescribe();
812 fatal("JNI exception during init");
813 }
814 }
815 create_jvmci_primitive_type(T_BOOLEAN, JVMCI_CHECK_EXIT_((void)0));
816 create_jvmci_primitive_type(T_BYTE, JVMCI_CHECK_EXIT_((void)0));
817 create_jvmci_primitive_type(T_CHAR, JVMCI_CHECK_EXIT_((void)0));
818 create_jvmci_primitive_type(T_SHORT, JVMCI_CHECK_EXIT_((void)0));
819 create_jvmci_primitive_type(T_INT, JVMCI_CHECK_EXIT_((void)0));
820 create_jvmci_primitive_type(T_LONG, JVMCI_CHECK_EXIT_((void)0));
821 create_jvmci_primitive_type(T_FLOAT, JVMCI_CHECK_EXIT_((void)0));
822 create_jvmci_primitive_type(T_DOUBLE, JVMCI_CHECK_EXIT_((void)0));
823 create_jvmci_primitive_type(T_VOID, JVMCI_CHECK_EXIT_((void)0));
824
825 if (!JVMCIENV->is_hotspot()) {
826 JVMCIENV->copy_saved_properties();
827 }
828 }
829
830 _initialized = true;
831 _being_initialized = false;
832 JVMCI_lock->notify_all();
833}
834
835JVMCIObject JVMCIRuntime::create_jvmci_primitive_type(BasicType type, JVMCI_TRAPS) {
836 Thread* THREAD = Thread::current();
837 // These primitive types are long lived and are created before the runtime is fully set up
838 // so skip registering them for scanning.
839 JVMCIObject mirror = JVMCIENV->get_object_constant(java_lang_Class::primitive_mirror(type), false, true);
840 if (JVMCIENV->is_hotspot()) {
841 JavaValue result(T_OBJECT);
842 JavaCallArguments args;
843 args.push_oop(Handle(THREAD, HotSpotJVMCI::resolve(mirror)));
844 args.push_int(type2char(type));
845 JavaCalls::call_static(&result, HotSpotJVMCI::HotSpotResolvedPrimitiveType::klass(), vmSymbols::fromMetaspace_name(), vmSymbols::primitive_fromMetaspace_signature(), &args, CHECK_(JVMCIObject()));
846
847 return JVMCIENV->wrap(JNIHandles::make_local((oop)result.get_jobject()));
848 } else {
849 JNIAccessMark jni(JVMCIENV);
850 jobject result = jni()->CallStaticObjectMethod(JNIJVMCI::HotSpotResolvedPrimitiveType::clazz(),
851 JNIJVMCI::HotSpotResolvedPrimitiveType_fromMetaspace_method(),
852 mirror.as_jobject(), type2char(type));
853 if (jni()->ExceptionCheck()) {
854 return JVMCIObject();
855 }
856 return JVMCIENV->wrap(result);
857 }
858}
859
860void JVMCIRuntime::initialize_JVMCI(JVMCI_TRAPS) {
861 if (!is_HotSpotJVMCIRuntime_initialized()) {
862 initialize(JVMCI_CHECK);
863 JVMCIENV->call_JVMCI_getRuntime(JVMCI_CHECK);
864 }
865}
866
867JVMCIObject JVMCIRuntime::get_HotSpotJVMCIRuntime(JVMCI_TRAPS) {
868 initialize(JVMCIENV);
869 initialize_JVMCI(JVMCI_CHECK_(JVMCIObject()));
870 return _HotSpotJVMCIRuntime_instance;
871}
872
873
874// private void CompilerToVM.registerNatives()
875JVM_ENTRY_NO_ENV(void, JVM_RegisterJVMCINatives(JNIEnv *env, jclass c2vmClass))
876
877#ifdef _LP64
878#ifndef TARGET_ARCH_sparc
879 uintptr_t heap_end = (uintptr_t) Universe::heap()->reserved_region().end();
880 uintptr_t allocation_end = heap_end + ((uintptr_t)16) * 1024 * 1024 * 1024;
881 guarantee(heap_end < allocation_end, "heap end too close to end of address space (might lead to erroneous TLAB allocations)");
882#endif // TARGET_ARCH_sparc
883#else
884 fatal("check TLAB allocation code for address space conflicts");
885#endif
886
887 JNI_JVMCIENV(thread, env);
888
889 if (!EnableJVMCI) {
890 JVMCI_THROW_MSG(InternalError, "JVMCI is not enabled");
891 }
892
893 JVMCIENV->runtime()->initialize(JVMCIENV);
894
895 {
896 ResourceMark rm;
897 HandleMark hm(thread);
898 ThreadToNativeFromVM trans(thread);
899
900 // Ensure _non_oop_bits is initialized
901 Universe::non_oop_word();
902
903 if (JNI_OK != env->RegisterNatives(c2vmClass, CompilerToVM::methods, CompilerToVM::methods_count())) {
904 if (!env->ExceptionCheck()) {
905 for (int i = 0; i < CompilerToVM::methods_count(); i++) {
906 if (JNI_OK != env->RegisterNatives(c2vmClass, CompilerToVM::methods + i, 1)) {
907 guarantee(false, "Error registering JNI method %s%s", CompilerToVM::methods[i].name, CompilerToVM::methods[i].signature);
908 break;
909 }
910 }
911 } else {
912 env->ExceptionDescribe();
913 }
914 guarantee(false, "Failed registering CompilerToVM native methods");
915 }
916 }
917JVM_END
918
919
920void JVMCIRuntime::shutdown() {
921 if (is_HotSpotJVMCIRuntime_initialized()) {
922 _shutdown_called = true;
923
924 THREAD_JVMCIENV(JavaThread::current());
925 JVMCIENV->call_HotSpotJVMCIRuntime_shutdown(_HotSpotJVMCIRuntime_instance);
926 }
927}
928
929void JVMCIRuntime::bootstrap_finished(TRAPS) {
930 if (is_HotSpotJVMCIRuntime_initialized()) {
931 THREAD_JVMCIENV(JavaThread::current());
932 JVMCIENV->call_HotSpotJVMCIRuntime_bootstrapFinished(_HotSpotJVMCIRuntime_instance, JVMCIENV);
933 }
934}
935
936void JVMCIRuntime::describe_pending_hotspot_exception(JavaThread* THREAD, bool clear) {
937 if (HAS_PENDING_EXCEPTION) {
938 Handle exception(THREAD, PENDING_EXCEPTION);
939 const char* exception_file = THREAD->exception_file();
940 int exception_line = THREAD->exception_line();
941 CLEAR_PENDING_EXCEPTION;
942 if (exception->is_a(SystemDictionary::ThreadDeath_klass())) {
943 // Don't print anything if we are being killed.
944 } else {
945 java_lang_Throwable::print_stack_trace(exception, tty);
946
947 // Clear and ignore any exceptions raised during printing
948 CLEAR_PENDING_EXCEPTION;
949 }
950 if (!clear) {
951 THREAD->set_pending_exception(exception(), exception_file, exception_line);
952 }
953 }
954}
955
956
957void JVMCIRuntime::exit_on_pending_exception(JVMCIEnv* JVMCIENV, const char* message) {
958 JavaThread* THREAD = JavaThread::current();
959
960 static volatile int report_error = 0;
961 if (!report_error && Atomic::cmpxchg(1, &report_error, 0) == 0) {
962 // Only report an error once
963 tty->print_raw_cr(message);
964 if (JVMCIENV != NULL) {
965 JVMCIENV->describe_pending_exception(true);
966 } else {
967 describe_pending_hotspot_exception(THREAD, true);
968 }
969 } else {
970 // Allow error reporting thread to print the stack trace. Windows
971 // doesn't allow uninterruptible wait for JavaThreads
972 const bool interruptible = true;
973 os::sleep(THREAD, 200, interruptible);
974 }
975
976 before_exit(THREAD);
977 vm_exit(-1);
978}
979
980// ------------------------------------------------------------------
981// Note: the logic of this method should mirror the logic of
982// constantPoolOopDesc::verify_constant_pool_resolve.
983bool JVMCIRuntime::check_klass_accessibility(Klass* accessing_klass, Klass* resolved_klass) {
984 if (accessing_klass->is_objArray_klass()) {
985 accessing_klass = ObjArrayKlass::cast(accessing_klass)->bottom_klass();
986 }
987 if (!accessing_klass->is_instance_klass()) {
988 return true;
989 }
990
991 if (resolved_klass->is_objArray_klass()) {
992 // Find the element klass, if this is an array.
993 resolved_klass = ObjArrayKlass::cast(resolved_klass)->bottom_klass();
994 }
995 if (resolved_klass->is_instance_klass()) {
996 Reflection::VerifyClassAccessResults result =
997 Reflection::verify_class_access(accessing_klass, InstanceKlass::cast(resolved_klass), true);
998 return result == Reflection::ACCESS_OK;
999 }
1000 return true;
1001}
1002
1003// ------------------------------------------------------------------
1004Klass* JVMCIRuntime::get_klass_by_name_impl(Klass*& accessing_klass,
1005 const constantPoolHandle& cpool,
1006 Symbol* sym,
1007 bool require_local) {
1008 JVMCI_EXCEPTION_CONTEXT;
1009
1010 // Now we need to check the SystemDictionary
1011 if (sym->char_at(0) == 'L' &&
1012 sym->char_at(sym->utf8_length()-1) == ';') {
1013 // This is a name from a signature. Strip off the trimmings.
1014 // Call recursive to keep scope of strippedsym.
1015 TempNewSymbol strippedsym = SymbolTable::new_symbol(sym->as_utf8()+1,
1016 sym->utf8_length()-2);
1017 return get_klass_by_name_impl(accessing_klass, cpool, strippedsym, require_local);
1018 }
1019
1020 Handle loader(THREAD, (oop)NULL);
1021 Handle domain(THREAD, (oop)NULL);
1022 if (accessing_klass != NULL) {
1023 loader = Handle(THREAD, accessing_klass->class_loader());
1024 domain = Handle(THREAD, accessing_klass->protection_domain());
1025 }
1026
1027 Klass* found_klass;
1028 {
1029 ttyUnlocker ttyul; // release tty lock to avoid ordering problems
1030 MutexLocker ml(Compile_lock);
1031 if (!require_local) {
1032 found_klass = SystemDictionary::find_constrained_instance_or_array_klass(sym, loader, CHECK_NULL);
1033 } else {
1034 found_klass = SystemDictionary::find_instance_or_array_klass(sym, loader, domain, CHECK_NULL);
1035 }
1036 }
1037
1038 // If we fail to find an array klass, look again for its element type.
1039 // The element type may be available either locally or via constraints.
1040 // In either case, if we can find the element type in the system dictionary,
1041 // we must build an array type around it. The CI requires array klasses
1042 // to be loaded if their element klasses are loaded, except when memory
1043 // is exhausted.
1044 if (sym->char_at(0) == '[' &&
1045 (sym->char_at(1) == '[' || sym->char_at(1) == 'L')) {
1046 // We have an unloaded array.
1047 // Build it on the fly if the element class exists.
1048 TempNewSymbol elem_sym = SymbolTable::new_symbol(sym->as_utf8()+1,
1049 sym->utf8_length()-1);
1050
1051 // Get element Klass recursively.
1052 Klass* elem_klass =
1053 get_klass_by_name_impl(accessing_klass,
1054 cpool,
1055 elem_sym,
1056 require_local);
1057 if (elem_klass != NULL) {
1058 // Now make an array for it
1059 return elem_klass->array_klass(THREAD);
1060 }
1061 }
1062
1063 if (found_klass == NULL && !cpool.is_null() && cpool->has_preresolution()) {
1064 // Look inside the constant pool for pre-resolved class entries.
1065 for (int i = cpool->length() - 1; i >= 1; i--) {
1066 if (cpool->tag_at(i).is_klass()) {
1067 Klass* kls = cpool->resolved_klass_at(i);
1068 if (kls->name() == sym) {
1069 return kls;
1070 }
1071 }
1072 }
1073 }
1074
1075 return found_klass;
1076}
1077
1078// ------------------------------------------------------------------
1079Klass* JVMCIRuntime::get_klass_by_name(Klass* accessing_klass,
1080 Symbol* klass_name,
1081 bool require_local) {
1082 ResourceMark rm;
1083 constantPoolHandle cpool;
1084 return get_klass_by_name_impl(accessing_klass,
1085 cpool,
1086 klass_name,
1087 require_local);
1088}
1089
1090// ------------------------------------------------------------------
1091// Implementation of get_klass_by_index.
1092Klass* JVMCIRuntime::get_klass_by_index_impl(const constantPoolHandle& cpool,
1093 int index,
1094 bool& is_accessible,
1095 Klass* accessor) {
1096 JVMCI_EXCEPTION_CONTEXT;
1097 Klass* klass = ConstantPool::klass_at_if_loaded(cpool, index);
1098 Symbol* klass_name = NULL;
1099 if (klass == NULL) {
1100 klass_name = cpool->klass_name_at(index);
1101 }
1102
1103 if (klass == NULL) {
1104 // Not found in constant pool. Use the name to do the lookup.
1105 Klass* k = get_klass_by_name_impl(accessor,
1106 cpool,
1107 klass_name,
1108 false);
1109 // Calculate accessibility the hard way.
1110 if (k == NULL) {
1111 is_accessible = false;
1112 } else if (k->class_loader() != accessor->class_loader() &&
1113 get_klass_by_name_impl(accessor, cpool, k->name(), true) == NULL) {
1114 // Loaded only remotely. Not linked yet.
1115 is_accessible = false;
1116 } else {
1117 // Linked locally, and we must also check public/private, etc.
1118 is_accessible = check_klass_accessibility(accessor, k);
1119 }
1120 if (!is_accessible) {
1121 return NULL;
1122 }
1123 return k;
1124 }
1125
1126 // It is known to be accessible, since it was found in the constant pool.
1127 is_accessible = true;
1128 return klass;
1129}
1130
1131// ------------------------------------------------------------------
1132// Get a klass from the constant pool.
1133Klass* JVMCIRuntime::get_klass_by_index(const constantPoolHandle& cpool,
1134 int index,
1135 bool& is_accessible,
1136 Klass* accessor) {
1137 ResourceMark rm;
1138 Klass* result = get_klass_by_index_impl(cpool, index, is_accessible, accessor);
1139 return result;
1140}
1141
1142// ------------------------------------------------------------------
1143// Implementation of get_field_by_index.
1144//
1145// Implementation note: the results of field lookups are cached
1146// in the accessor klass.
1147void JVMCIRuntime::get_field_by_index_impl(InstanceKlass* klass, fieldDescriptor& field_desc,
1148 int index) {
1149 JVMCI_EXCEPTION_CONTEXT;
1150
1151 assert(klass->is_linked(), "must be linked before using its constant-pool");
1152
1153 constantPoolHandle cpool(thread, klass->constants());
1154
1155 // Get the field's name, signature, and type.
1156 Symbol* name = cpool->name_ref_at(index);
1157
1158 int nt_index = cpool->name_and_type_ref_index_at(index);
1159 int sig_index = cpool->signature_ref_index_at(nt_index);
1160 Symbol* signature = cpool->symbol_at(sig_index);
1161
1162 // Get the field's declared holder.
1163 int holder_index = cpool->klass_ref_index_at(index);
1164 bool holder_is_accessible;
1165 Klass* declared_holder = get_klass_by_index(cpool, holder_index,
1166 holder_is_accessible,
1167 klass);
1168
1169 // The declared holder of this field may not have been loaded.
1170 // Bail out with partial field information.
1171 if (!holder_is_accessible) {
1172 return;
1173 }
1174
1175
1176 // Perform the field lookup.
1177 Klass* canonical_holder =
1178 InstanceKlass::cast(declared_holder)->find_field(name, signature, &field_desc);
1179 if (canonical_holder == NULL) {
1180 return;
1181 }
1182
1183 assert(canonical_holder == field_desc.field_holder(), "just checking");
1184}
1185
1186// ------------------------------------------------------------------
1187// Get a field by index from a klass's constant pool.
1188void JVMCIRuntime::get_field_by_index(InstanceKlass* accessor, fieldDescriptor& fd, int index) {
1189 ResourceMark rm;
1190 return get_field_by_index_impl(accessor, fd, index);
1191}
1192
1193// ------------------------------------------------------------------
1194// Perform an appropriate method lookup based on accessor, holder,
1195// name, signature, and bytecode.
1196methodHandle JVMCIRuntime::lookup_method(InstanceKlass* accessor,
1197 Klass* holder,
1198 Symbol* name,
1199 Symbol* sig,
1200 Bytecodes::Code bc,
1201 constantTag tag) {
1202 // Accessibility checks are performed in JVMCIEnv::get_method_by_index_impl().
1203 assert(check_klass_accessibility(accessor, holder), "holder not accessible");
1204
1205 methodHandle dest_method;
1206 LinkInfo link_info(holder, name, sig, accessor, LinkInfo::needs_access_check, tag);
1207 switch (bc) {
1208 case Bytecodes::_invokestatic:
1209 dest_method =
1210 LinkResolver::resolve_static_call_or_null(link_info);
1211 break;
1212 case Bytecodes::_invokespecial:
1213 dest_method =
1214 LinkResolver::resolve_special_call_or_null(link_info);
1215 break;
1216 case Bytecodes::_invokeinterface:
1217 dest_method =
1218 LinkResolver::linktime_resolve_interface_method_or_null(link_info);
1219 break;
1220 case Bytecodes::_invokevirtual:
1221 dest_method =
1222 LinkResolver::linktime_resolve_virtual_method_or_null(link_info);
1223 break;
1224 default: ShouldNotReachHere();
1225 }
1226
1227 return dest_method;
1228}
1229
1230
1231// ------------------------------------------------------------------
1232methodHandle JVMCIRuntime::get_method_by_index_impl(const constantPoolHandle& cpool,
1233 int index, Bytecodes::Code bc,
1234 InstanceKlass* accessor) {
1235 if (bc == Bytecodes::_invokedynamic) {
1236 ConstantPoolCacheEntry* cpce = cpool->invokedynamic_cp_cache_entry_at(index);
1237 bool is_resolved = !cpce->is_f1_null();
1238 if (is_resolved) {
1239 // Get the invoker Method* from the constant pool.
1240 // (The appendix argument, if any, will be noted in the method's signature.)
1241 Method* adapter = cpce->f1_as_method();
1242 return methodHandle(adapter);
1243 }
1244
1245 return NULL;
1246 }
1247
1248 int holder_index = cpool->klass_ref_index_at(index);
1249 bool holder_is_accessible;
1250 Klass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor);
1251
1252 // Get the method's name and signature.
1253 Symbol* name_sym = cpool->name_ref_at(index);
1254 Symbol* sig_sym = cpool->signature_ref_at(index);
1255
1256 if (cpool->has_preresolution()
1257 || ((holder == SystemDictionary::MethodHandle_klass() || holder == SystemDictionary::VarHandle_klass()) &&
1258 MethodHandles::is_signature_polymorphic_name(holder, name_sym))) {
1259 // Short-circuit lookups for JSR 292-related call sites.
1260 // That is, do not rely only on name-based lookups, because they may fail
1261 // if the names are not resolvable in the boot class loader (7056328).
1262 switch (bc) {
1263 case Bytecodes::_invokevirtual:
1264 case Bytecodes::_invokeinterface:
1265 case Bytecodes::_invokespecial:
1266 case Bytecodes::_invokestatic:
1267 {
1268 Method* m = ConstantPool::method_at_if_loaded(cpool, index);
1269 if (m != NULL) {
1270 return m;
1271 }
1272 }
1273 break;
1274 default:
1275 break;
1276 }
1277 }
1278
1279 if (holder_is_accessible) { // Our declared holder is loaded.
1280 constantTag tag = cpool->tag_ref_at(index);
1281 methodHandle m = lookup_method(accessor, holder, name_sym, sig_sym, bc, tag);
1282 if (!m.is_null()) {
1283 // We found the method.
1284 return m;
1285 }
1286 }
1287
1288 // Either the declared holder was not loaded, or the method could
1289 // not be found.
1290
1291 return NULL;
1292}
1293
1294// ------------------------------------------------------------------
1295InstanceKlass* JVMCIRuntime::get_instance_klass_for_declared_method_holder(Klass* method_holder) {
1296 // For the case of <array>.clone(), the method holder can be an ArrayKlass*
1297 // instead of an InstanceKlass*. For that case simply pretend that the
1298 // declared holder is Object.clone since that's where the call will bottom out.
1299 if (method_holder->is_instance_klass()) {
1300 return InstanceKlass::cast(method_holder);
1301 } else if (method_holder->is_array_klass()) {
1302 return InstanceKlass::cast(SystemDictionary::Object_klass());
1303 } else {
1304 ShouldNotReachHere();
1305 }
1306 return NULL;
1307}
1308
1309
1310// ------------------------------------------------------------------
1311methodHandle JVMCIRuntime::get_method_by_index(const constantPoolHandle& cpool,
1312 int index, Bytecodes::Code bc,
1313 InstanceKlass* accessor) {
1314 ResourceMark rm;
1315 return get_method_by_index_impl(cpool, index, bc, accessor);
1316}
1317
1318// ------------------------------------------------------------------
1319// Check for changes to the system dictionary during compilation
1320// class loads, evolution, breakpoints
1321JVMCI::CodeInstallResult JVMCIRuntime::validate_compile_task_dependencies(Dependencies* dependencies, JVMCICompileState* compile_state, char** failure_detail) {
1322 // If JVMTI capabilities were enabled during compile, the compilation is invalidated.
1323 if (compile_state != NULL && compile_state->jvmti_state_changed()) {
1324 *failure_detail = (char*) "Jvmti state change during compilation invalidated dependencies";
1325 return JVMCI::dependencies_failed;
1326 }
1327
1328 // Dependencies must be checked when the system dictionary changes
1329 // or if we don't know whether it has changed (i.e., compile_state == NULL).
1330 bool counter_changed = compile_state == NULL || compile_state->system_dictionary_modification_counter() != SystemDictionary::number_of_modifications();
1331 CompileTask* task = compile_state == NULL ? NULL : compile_state->task();
1332 Dependencies::DepType result = dependencies->validate_dependencies(task, counter_changed, failure_detail);
1333 if (result == Dependencies::end_marker) {
1334 return JVMCI::ok;
1335 }
1336
1337 if (!Dependencies::is_klass_type(result) || counter_changed) {
1338 return JVMCI::dependencies_failed;
1339 }
1340 // The dependencies were invalid at the time of installation
1341 // without any intervening modification of the system
1342 // dictionary. That means they were invalidly constructed.
1343 return JVMCI::dependencies_invalid;
1344}
1345
1346// Reports a pending exception and exits the VM.
1347static void fatal_exception_in_compile(JVMCIEnv* JVMCIENV, JavaThread* thread, const char* msg) {
1348 // Only report a fatal JVMCI compilation exception once
1349 static volatile int report_init_failure = 0;
1350 if (!report_init_failure && Atomic::cmpxchg(1, &report_init_failure, 0) == 0) {
1351 tty->print_cr("%s:", msg);
1352 JVMCIENV->describe_pending_exception(true);
1353 }
1354 JVMCIENV->clear_pending_exception();
1355 before_exit(thread);
1356 vm_exit(-1);
1357}
1358
1359void JVMCIRuntime::compile_method(JVMCIEnv* JVMCIENV, JVMCICompiler* compiler, const methodHandle& method, int entry_bci) {
1360 JVMCI_EXCEPTION_CONTEXT
1361
1362 JVMCICompileState* compile_state = JVMCIENV->compile_state();
1363
1364 bool is_osr = entry_bci != InvocationEntryBci;
1365 if (compiler->is_bootstrapping() && is_osr) {
1366 // no OSR compilations during bootstrap - the compiler is just too slow at this point,
1367 // and we know that there are no endless loops
1368 compile_state->set_failure(true, "No OSR during boostrap");
1369 return;
1370 }
1371 if (JVMCI::shutdown_called()) {
1372 compile_state->set_failure(false, "Avoiding compilation during shutdown");
1373 return;
1374 }
1375
1376 HandleMark hm;
1377 JVMCIObject receiver = get_HotSpotJVMCIRuntime(JVMCIENV);
1378 if (JVMCIENV->has_pending_exception()) {
1379 fatal_exception_in_compile(JVMCIENV, thread, "Exception during HotSpotJVMCIRuntime initialization");
1380 }
1381 JVMCIObject jvmci_method = JVMCIENV->get_jvmci_method(method, JVMCIENV);
1382 if (JVMCIENV->has_pending_exception()) {
1383 JVMCIENV->describe_pending_exception(true);
1384 compile_state->set_failure(false, "exception getting JVMCI wrapper method");
1385 return;
1386 }
1387
1388 JVMCIObject result_object = JVMCIENV->call_HotSpotJVMCIRuntime_compileMethod(receiver, jvmci_method, entry_bci,
1389 (jlong) compile_state, compile_state->task()->compile_id());
1390 if (!JVMCIENV->has_pending_exception()) {
1391 if (result_object.is_non_null()) {
1392 JVMCIObject failure_message = JVMCIENV->get_HotSpotCompilationRequestResult_failureMessage(result_object);
1393 if (failure_message.is_non_null()) {
1394 // Copy failure reason into resource memory first ...
1395 const char* failure_reason = JVMCIENV->as_utf8_string(failure_message);
1396 // ... and then into the C heap.
1397 failure_reason = os::strdup(failure_reason, mtJVMCI);
1398 bool retryable = JVMCIENV->get_HotSpotCompilationRequestResult_retry(result_object) != 0;
1399 compile_state->set_failure(retryable, failure_reason, true);
1400 } else {
1401 if (compile_state->task()->code() == NULL) {
1402 compile_state->set_failure(true, "no nmethod produced");
1403 } else {
1404 compile_state->task()->set_num_inlined_bytecodes(JVMCIENV->get_HotSpotCompilationRequestResult_inlinedBytecodes(result_object));
1405 compiler->inc_methods_compiled();
1406 }
1407 }
1408 } else {
1409 assert(false, "JVMCICompiler.compileMethod should always return non-null");
1410 }
1411 } else {
1412 // An uncaught exception here implies failure during compiler initialization.
1413 // The only sensible thing to do here is to exit the VM.
1414 fatal_exception_in_compile(JVMCIENV, thread, "Exception during JVMCI compiler initialization");
1415 }
1416 if (compiler->is_bootstrapping()) {
1417 compiler->set_bootstrap_compilation_request_handled();
1418 }
1419}
1420
1421
1422// ------------------------------------------------------------------
1423JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV,
1424 const methodHandle& method,
1425 nmethod*& nm,
1426 int entry_bci,
1427 CodeOffsets* offsets,
1428 int orig_pc_offset,
1429 CodeBuffer* code_buffer,
1430 int frame_words,
1431 OopMapSet* oop_map_set,
1432 ExceptionHandlerTable* handler_table,
1433 ImplicitExceptionTable* implicit_exception_table,
1434 AbstractCompiler* compiler,
1435 DebugInformationRecorder* debug_info,
1436 Dependencies* dependencies,
1437 int compile_id,
1438 bool has_unsafe_access,
1439 bool has_wide_vector,
1440 JVMCIObject compiled_code,
1441 JVMCIObject nmethod_mirror,
1442 FailedSpeculation** failed_speculations,
1443 char* speculations,
1444 int speculations_len) {
1445 JVMCI_EXCEPTION_CONTEXT;
1446 nm = NULL;
1447 int comp_level = CompLevel_full_optimization;
1448 char* failure_detail = NULL;
1449
1450 bool install_default = JVMCIENV->get_HotSpotNmethod_isDefault(nmethod_mirror) != 0;
1451 assert(JVMCIENV->isa_HotSpotNmethod(nmethod_mirror), "must be");
1452 JVMCIObject name = JVMCIENV->get_InstalledCode_name(nmethod_mirror);
1453 const char* nmethod_mirror_name = name.is_null() ? NULL : JVMCIENV->as_utf8_string(name);
1454 int nmethod_mirror_index;
1455 if (!install_default) {
1456 // Reserve or initialize mirror slot in the oops table.
1457 OopRecorder* oop_recorder = debug_info->oop_recorder();
1458 nmethod_mirror_index = oop_recorder->allocate_oop_index(nmethod_mirror.is_hotspot() ? nmethod_mirror.as_jobject() : NULL);
1459 } else {
1460 // A default HotSpotNmethod mirror is never tracked by the nmethod
1461 nmethod_mirror_index = -1;
1462 }
1463
1464 JVMCI::CodeInstallResult result;
1465 {
1466 // To prevent compile queue updates.
1467 MutexLocker locker(MethodCompileQueue_lock, THREAD);
1468
1469 // Prevent SystemDictionary::add_to_hierarchy from running
1470 // and invalidating our dependencies until we install this method.
1471 MutexLocker ml(Compile_lock);
1472
1473 // Encode the dependencies now, so we can check them right away.
1474 dependencies->encode_content_bytes();
1475
1476 // Record the dependencies for the current compile in the log
1477 if (LogCompilation) {
1478 for (Dependencies::DepStream deps(dependencies); deps.next(); ) {
1479 deps.log_dependency();
1480 }
1481 }
1482
1483 // Check for {class loads, evolution, breakpoints} during compilation
1484 result = validate_compile_task_dependencies(dependencies, JVMCIENV->compile_state(), &failure_detail);
1485 if (result != JVMCI::ok) {
1486 // While not a true deoptimization, it is a preemptive decompile.
1487 MethodData* mdp = method()->method_data();
1488 if (mdp != NULL) {
1489 mdp->inc_decompile_count();
1490#ifdef ASSERT
1491 if (mdp->decompile_count() > (uint)PerMethodRecompilationCutoff) {
1492 ResourceMark m;
1493 tty->print_cr("WARN: endless recompilation of %s. Method was set to not compilable.", method()->name_and_sig_as_C_string());
1494 }
1495#endif
1496 }
1497
1498 // All buffers in the CodeBuffer are allocated in the CodeCache.
1499 // If the code buffer is created on each compile attempt
1500 // as in C2, then it must be freed.
1501 //code_buffer->free_blob();
1502 } else {
1503 nm = nmethod::new_nmethod(method,
1504 compile_id,
1505 entry_bci,
1506 offsets,
1507 orig_pc_offset,
1508 debug_info, dependencies, code_buffer,
1509 frame_words, oop_map_set,
1510 handler_table, implicit_exception_table,
1511 compiler, comp_level,
1512 speculations, speculations_len,
1513 nmethod_mirror_index, nmethod_mirror_name, failed_speculations);
1514
1515
1516 // Free codeBlobs
1517 if (nm == NULL) {
1518 // The CodeCache is full. Print out warning and disable compilation.
1519 {
1520 MutexUnlocker ml(Compile_lock);
1521 MutexUnlocker locker(MethodCompileQueue_lock);
1522 CompileBroker::handle_full_code_cache(CodeCache::get_code_blob_type(comp_level));
1523 }
1524 } else {
1525 nm->set_has_unsafe_access(has_unsafe_access);
1526 nm->set_has_wide_vectors(has_wide_vector);
1527
1528 // Record successful registration.
1529 // (Put nm into the task handle *before* publishing to the Java heap.)
1530 if (JVMCIENV->compile_state() != NULL) {
1531 JVMCIENV->compile_state()->task()->set_code(nm);
1532 }
1533
1534 JVMCINMethodData* data = nm->jvmci_nmethod_data();
1535 assert(data != NULL, "must be");
1536 if (install_default) {
1537 assert(!nmethod_mirror.is_hotspot() || data->get_nmethod_mirror(nm, /* phantom_ref */ false) == NULL, "must be");
1538 if (entry_bci == InvocationEntryBci) {
1539 if (TieredCompilation) {
1540 // If there is an old version we're done with it
1541 CompiledMethod* old = method->code();
1542 if (TraceMethodReplacement && old != NULL) {
1543 ResourceMark rm;
1544 char *method_name = method->name_and_sig_as_C_string();
1545 tty->print_cr("Replacing method %s", method_name);
1546 }
1547 if (old != NULL ) {
1548 old->make_not_entrant();
1549 }
1550 }
1551
1552 LogTarget(Info, nmethod, install) lt;
1553 if (lt.is_enabled()) {
1554 ResourceMark rm;
1555 char *method_name = method->name_and_sig_as_C_string();
1556 lt.print("Installing method (%d) %s [entry point: %p]",
1557 comp_level, method_name, nm->entry_point());
1558 }
1559 // Allow the code to be executed
1560 method->set_code(method, nm);
1561 } else {
1562 LogTarget(Info, nmethod, install) lt;
1563 if (lt.is_enabled()) {
1564 ResourceMark rm;
1565 char *method_name = method->name_and_sig_as_C_string();
1566 lt.print("Installing osr method (%d) %s @ %d",
1567 comp_level, method_name, entry_bci);
1568 }
1569 InstanceKlass::cast(method->method_holder())->add_osr_nmethod(nm);
1570 }
1571 } else {
1572 assert(!nmethod_mirror.is_hotspot() || data->get_nmethod_mirror(nm, /* phantom_ref */ false) == HotSpotJVMCI::resolve(nmethod_mirror), "must be");
1573 }
1574 nm->make_in_use();
1575 }
1576 result = nm != NULL ? JVMCI::ok :JVMCI::cache_full;
1577 }
1578 }
1579
1580 // String creation must be done outside lock
1581 if (failure_detail != NULL) {
1582 // A failure to allocate the string is silently ignored.
1583 JVMCIObject message = JVMCIENV->create_string(failure_detail, JVMCIENV);
1584 JVMCIENV->set_HotSpotCompiledNmethod_installationFailureMessage(compiled_code, message);
1585 }
1586
1587 // JVMTI -- compiled method notification (must be done outside lock)
1588 if (nm != NULL) {
1589 nm->post_compiled_method_load_event();
1590 }
1591
1592 return result;
1593}
1594