1/*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "jvm.h"
27#include "classfile/symbolTable.hpp"
28#include "classfile/systemDictionary.hpp"
29#include "code/codeCache.hpp"
30#include "code/debugInfoRec.hpp"
31#include "code/nmethod.hpp"
32#include "code/pcDesc.hpp"
33#include "code/scopeDesc.hpp"
34#include "interpreter/bytecode.hpp"
35#include "interpreter/interpreter.hpp"
36#include "interpreter/oopMapCache.hpp"
37#include "memory/allocation.inline.hpp"
38#include "memory/oopFactory.hpp"
39#include "memory/resourceArea.hpp"
40#include "memory/universe.hpp"
41#include "oops/constantPool.hpp"
42#include "oops/method.hpp"
43#include "oops/objArrayKlass.hpp"
44#include "oops/objArrayOop.inline.hpp"
45#include "oops/oop.inline.hpp"
46#include "oops/fieldStreams.hpp"
47#include "oops/typeArrayOop.inline.hpp"
48#include "oops/verifyOopClosure.hpp"
49#include "prims/jvmtiThreadState.hpp"
50#include "runtime/biasedLocking.hpp"
51#include "runtime/compilationPolicy.hpp"
52#include "runtime/deoptimization.hpp"
53#include "runtime/fieldDescriptor.hpp"
54#include "runtime/fieldDescriptor.inline.hpp"
55#include "runtime/frame.inline.hpp"
56#include "runtime/jniHandles.inline.hpp"
57#include "runtime/handles.inline.hpp"
58#include "runtime/interfaceSupport.inline.hpp"
59#include "runtime/safepointVerifiers.hpp"
60#include "runtime/sharedRuntime.hpp"
61#include "runtime/signature.hpp"
62#include "runtime/stubRoutines.hpp"
63#include "runtime/thread.hpp"
64#include "runtime/threadSMR.hpp"
65#include "runtime/vframe.hpp"
66#include "runtime/vframeArray.hpp"
67#include "runtime/vframe_hp.hpp"
68#include "utilities/events.hpp"
69#include "utilities/preserveException.hpp"
70#include "utilities/xmlstream.hpp"
71
72
73bool DeoptimizationMarker::_is_active = false;
74
75Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame,
76 int caller_adjustment,
77 int caller_actual_parameters,
78 int number_of_frames,
79 intptr_t* frame_sizes,
80 address* frame_pcs,
81 BasicType return_type,
82 int exec_mode) {
83 _size_of_deoptimized_frame = size_of_deoptimized_frame;
84 _caller_adjustment = caller_adjustment;
85 _caller_actual_parameters = caller_actual_parameters;
86 _number_of_frames = number_of_frames;
87 _frame_sizes = frame_sizes;
88 _frame_pcs = frame_pcs;
89 _register_block = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler);
90 _return_type = return_type;
91 _initial_info = 0;
92 // PD (x86 only)
93 _counter_temp = 0;
94 _unpack_kind = exec_mode;
95 _sender_sp_temp = 0;
96
97 _total_frame_sizes = size_of_frames();
98 assert(exec_mode >= 0 && exec_mode < Unpack_LIMIT, "Unexpected exec_mode");
99}
100
101
102Deoptimization::UnrollBlock::~UnrollBlock() {
103 FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes);
104 FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs);
105 FREE_C_HEAP_ARRAY(intptr_t, _register_block);
106}
107
108
109intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const {
110 assert(register_number < RegisterMap::reg_count, "checking register number");
111 return &_register_block[register_number * 2];
112}
113
114
115
116int Deoptimization::UnrollBlock::size_of_frames() const {
117 // Acount first for the adjustment of the initial frame
118 int result = _caller_adjustment;
119 for (int index = 0; index < number_of_frames(); index++) {
120 result += frame_sizes()[index];
121 }
122 return result;
123}
124
125
126void Deoptimization::UnrollBlock::print() {
127 ttyLocker ttyl;
128 tty->print_cr("UnrollBlock");
129 tty->print_cr(" size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
130 tty->print( " frame_sizes: ");
131 for (int index = 0; index < number_of_frames(); index++) {
132 tty->print(INTX_FORMAT " ", frame_sizes()[index]);
133 }
134 tty->cr();
135}
136
137
138// In order to make fetch_unroll_info work properly with escape
139// analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
140// ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
141// of previously eliminated objects occurs in realloc_objects, which is
142// called from the method fetch_unroll_info_helper below.
143JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread, int exec_mode))
144 // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
145 // but makes the entry a little slower. There is however a little dance we have to
146 // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
147
148 // fetch_unroll_info() is called at the beginning of the deoptimization
149 // handler. Note this fact before we start generating temporary frames
150 // that can confuse an asynchronous stack walker. This counter is
151 // decremented at the end of unpack_frames().
152 if (TraceDeoptimization) {
153 tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(thread));
154 }
155 thread->inc_in_deopt_handler();
156
157 return fetch_unroll_info_helper(thread, exec_mode);
158JRT_END
159
160
161// This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
162Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread, int exec_mode) {
163
164 // Note: there is a safepoint safety issue here. No matter whether we enter
165 // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
166 // the vframeArray is created.
167 //
168
169 // Allocate our special deoptimization ResourceMark
170 DeoptResourceMark* dmark = new DeoptResourceMark(thread);
171 assert(thread->deopt_mark() == NULL, "Pending deopt!");
172 thread->set_deopt_mark(dmark);
173
174 frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
175 RegisterMap map(thread, true);
176 RegisterMap dummy_map(thread, false);
177 // Now get the deoptee with a valid map
178 frame deoptee = stub_frame.sender(&map);
179 // Set the deoptee nmethod
180 assert(thread->deopt_compiled_method() == NULL, "Pending deopt!");
181 CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
182 thread->set_deopt_compiled_method(cm);
183
184 if (VerifyStack) {
185 thread->validate_frame_layout();
186 }
187
188 // Create a growable array of VFrames where each VFrame represents an inlined
189 // Java frame. This storage is allocated with the usual system arena.
190 assert(deoptee.is_compiled_frame(), "Wrong frame type");
191 GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
192 vframe* vf = vframe::new_vframe(&deoptee, &map, thread);
193 while (!vf->is_top()) {
194 assert(vf->is_compiled_frame(), "Wrong frame type");
195 chunk->push(compiledVFrame::cast(vf));
196 vf = vf->sender();
197 }
198 assert(vf->is_compiled_frame(), "Wrong frame type");
199 chunk->push(compiledVFrame::cast(vf));
200
201 bool realloc_failures = false;
202
203#if COMPILER2_OR_JVMCI
204 // Reallocate the non-escaping objects and restore their fields. Then
205 // relock objects if synchronization on them was eliminated.
206#if !INCLUDE_JVMCI
207 if (DoEscapeAnalysis || EliminateNestedLocks) {
208 if (EliminateAllocations) {
209#endif // INCLUDE_JVMCI
210 assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
211 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
212
213 // The flag return_oop() indicates call sites which return oop
214 // in compiled code. Such sites include java method calls,
215 // runtime calls (for example, used to allocate new objects/arrays
216 // on slow code path) and any other calls generated in compiled code.
217 // It is not guaranteed that we can get such information here only
218 // by analyzing bytecode in deoptimized frames. This is why this flag
219 // is set during method compilation (see Compile::Process_OopMap_Node()).
220 // If the previous frame was popped or if we are dispatching an exception,
221 // we don't have an oop result.
222 bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Unpack_deopt);
223 Handle return_value;
224 if (save_oop_result) {
225 // Reallocation may trigger GC. If deoptimization happened on return from
226 // call which returns oop we need to save it since it is not in oopmap.
227 oop result = deoptee.saved_oop_result(&map);
228 assert(oopDesc::is_oop_or_null(result), "must be oop");
229 return_value = Handle(thread, result);
230 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
231 if (TraceDeoptimization) {
232 ttyLocker ttyl;
233 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
234 }
235 }
236 if (objects != NULL) {
237 JRT_BLOCK
238 realloc_failures = realloc_objects(thread, &deoptee, &map, objects, THREAD);
239 JRT_END
240 bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci();
241 reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
242#ifndef PRODUCT
243 if (TraceDeoptimization) {
244 ttyLocker ttyl;
245 tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
246 print_objects(objects, realloc_failures);
247 }
248#endif
249 }
250 if (save_oop_result) {
251 // Restore result.
252 deoptee.set_saved_oop_result(&map, return_value());
253 }
254#if !INCLUDE_JVMCI
255 }
256 if (EliminateLocks) {
257#endif // INCLUDE_JVMCI
258#ifndef PRODUCT
259 bool first = true;
260#endif
261 for (int i = 0; i < chunk->length(); i++) {
262 compiledVFrame* cvf = chunk->at(i);
263 assert (cvf->scope() != NULL,"expect only compiled java frames");
264 GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
265 if (monitors->is_nonempty()) {
266 relock_objects(monitors, thread, realloc_failures);
267#ifndef PRODUCT
268 if (PrintDeoptimizationDetails) {
269 ttyLocker ttyl;
270 for (int j = 0; j < monitors->length(); j++) {
271 MonitorInfo* mi = monitors->at(j);
272 if (mi->eliminated()) {
273 if (first) {
274 first = false;
275 tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
276 }
277 if (mi->owner_is_scalar_replaced()) {
278 Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
279 tty->print_cr(" failed reallocation for klass %s", k->external_name());
280 } else {
281 tty->print_cr(" object <" INTPTR_FORMAT "> locked", p2i(mi->owner()));
282 }
283 }
284 }
285 }
286#endif // !PRODUCT
287 }
288 }
289#if !INCLUDE_JVMCI
290 }
291 }
292#endif // INCLUDE_JVMCI
293#endif // COMPILER2_OR_JVMCI
294
295 ScopeDesc* trap_scope = chunk->at(0)->scope();
296 Handle exceptionObject;
297 if (trap_scope->rethrow_exception()) {
298 if (PrintDeoptimizationDetails) {
299 tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci());
300 }
301 GrowableArray<ScopeValue*>* expressions = trap_scope->expressions();
302 guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw");
303 ScopeValue* topOfStack = expressions->top();
304 exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj();
305 guarantee(exceptionObject() != NULL, "exception oop can not be null");
306 }
307
308 // Ensure that no safepoint is taken after pointers have been stored
309 // in fields of rematerialized objects. If a safepoint occurs from here on
310 // out the java state residing in the vframeArray will be missed.
311 NoSafepointVerifier no_safepoint;
312
313 vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures);
314#if COMPILER2_OR_JVMCI
315 if (realloc_failures) {
316 pop_frames_failed_reallocs(thread, array);
317 }
318#endif
319
320 assert(thread->vframe_array_head() == NULL, "Pending deopt!");
321 thread->set_vframe_array_head(array);
322
323 // Now that the vframeArray has been created if we have any deferred local writes
324 // added by jvmti then we can free up that structure as the data is now in the
325 // vframeArray
326
327 if (thread->deferred_locals() != NULL) {
328 GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
329 int i = 0;
330 do {
331 // Because of inlining we could have multiple vframes for a single frame
332 // and several of the vframes could have deferred writes. Find them all.
333 if (list->at(i)->id() == array->original().id()) {
334 jvmtiDeferredLocalVariableSet* dlv = list->at(i);
335 list->remove_at(i);
336 // individual jvmtiDeferredLocalVariableSet are CHeapObj's
337 delete dlv;
338 } else {
339 i++;
340 }
341 } while ( i < list->length() );
342 if (list->length() == 0) {
343 thread->set_deferred_locals(NULL);
344 // free the list and elements back to C heap.
345 delete list;
346 }
347
348 }
349
350 // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
351 CodeBlob* cb = stub_frame.cb();
352 // Verify we have the right vframeArray
353 assert(cb->frame_size() >= 0, "Unexpected frame size");
354 intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
355
356 // If the deopt call site is a MethodHandle invoke call site we have
357 // to adjust the unpack_sp.
358 nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null();
359 if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
360 unpack_sp = deoptee.unextended_sp();
361
362#ifdef ASSERT
363 assert(cb->is_deoptimization_stub() ||
364 cb->is_uncommon_trap_stub() ||
365 strcmp("Stub<DeoptimizationStub.deoptimizationHandler>", cb->name()) == 0 ||
366 strcmp("Stub<UncommonTrapStub.uncommonTrapHandler>", cb->name()) == 0,
367 "unexpected code blob: %s", cb->name());
368#endif
369
370 // This is a guarantee instead of an assert because if vframe doesn't match
371 // we will unpack the wrong deoptimized frame and wind up in strange places
372 // where it will be very difficult to figure out what went wrong. Better
373 // to die an early death here than some very obscure death later when the
374 // trail is cold.
375 // Note: on ia64 this guarantee can be fooled by frames with no memory stack
376 // in that it will fail to detect a problem when there is one. This needs
377 // more work in tiger timeframe.
378 guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
379
380 int number_of_frames = array->frames();
381
382 // Compute the vframes' sizes. Note that frame_sizes[] entries are ordered from outermost to innermost
383 // virtual activation, which is the reverse of the elements in the vframes array.
384 intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler);
385 // +1 because we always have an interpreter return address for the final slot.
386 address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler);
387 int popframe_extra_args = 0;
388 // Create an interpreter return address for the stub to use as its return
389 // address so the skeletal frames are perfectly walkable
390 frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
391
392 // PopFrame requires that the preserved incoming arguments from the recently-popped topmost
393 // activation be put back on the expression stack of the caller for reexecution
394 if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
395 popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words());
396 }
397
398 // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
399 // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
400 // than simply use array->sender.pc(). This requires us to walk the current set of frames
401 //
402 frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
403 deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller
404
405 // It's possible that the number of parameters at the call site is
406 // different than number of arguments in the callee when method
407 // handles are used. If the caller is interpreted get the real
408 // value so that the proper amount of space can be added to it's
409 // frame.
410 bool caller_was_method_handle = false;
411 if (deopt_sender.is_interpreted_frame()) {
412 methodHandle method = deopt_sender.interpreter_frame_method();
413 Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci());
414 if (cur.is_invokedynamic() || cur.is_invokehandle()) {
415 // Method handle invokes may involve fairly arbitrary chains of
416 // calls so it's impossible to know how much actual space the
417 // caller has for locals.
418 caller_was_method_handle = true;
419 }
420 }
421
422 //
423 // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
424 // frame_sizes/frame_pcs[1] next oldest frame (int)
425 // frame_sizes/frame_pcs[n] youngest frame (int)
426 //
427 // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
428 // owns the space for the return address to it's caller). Confusing ain't it.
429 //
430 // The vframe array can address vframes with indices running from
431 // 0.._frames-1. Index 0 is the youngest frame and _frame - 1 is the oldest (root) frame.
432 // When we create the skeletal frames we need the oldest frame to be in the zero slot
433 // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
434 // so things look a little strange in this loop.
435 //
436 int callee_parameters = 0;
437 int callee_locals = 0;
438 for (int index = 0; index < array->frames(); index++ ) {
439 // frame[number_of_frames - 1 ] = on_stack_size(youngest)
440 // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
441 // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
442 frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
443 callee_locals,
444 index == 0,
445 popframe_extra_args);
446 // This pc doesn't have to be perfect just good enough to identify the frame
447 // as interpreted so the skeleton frame will be walkable
448 // The correct pc will be set when the skeleton frame is completely filled out
449 // The final pc we store in the loop is wrong and will be overwritten below
450 frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
451
452 callee_parameters = array->element(index)->method()->size_of_parameters();
453 callee_locals = array->element(index)->method()->max_locals();
454 popframe_extra_args = 0;
455 }
456
457 // Compute whether the root vframe returns a float or double value.
458 BasicType return_type;
459 {
460 methodHandle method(thread, array->element(0)->method());
461 Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
462 return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL;
463 }
464
465 // Compute information for handling adapters and adjusting the frame size of the caller.
466 int caller_adjustment = 0;
467
468 // Compute the amount the oldest interpreter frame will have to adjust
469 // its caller's stack by. If the caller is a compiled frame then
470 // we pretend that the callee has no parameters so that the
471 // extension counts for the full amount of locals and not just
472 // locals-parms. This is because without a c2i adapter the parm
473 // area as created by the compiled frame will not be usable by
474 // the interpreter. (Depending on the calling convention there
475 // may not even be enough space).
476
477 // QQQ I'd rather see this pushed down into last_frame_adjust
478 // and have it take the sender (aka caller).
479
480 if (deopt_sender.is_compiled_frame() || caller_was_method_handle) {
481 caller_adjustment = last_frame_adjust(0, callee_locals);
482 } else if (callee_locals > callee_parameters) {
483 // The caller frame may need extending to accommodate
484 // non-parameter locals of the first unpacked interpreted frame.
485 // Compute that adjustment.
486 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
487 }
488
489 // If the sender is deoptimized the we must retrieve the address of the handler
490 // since the frame will "magically" show the original pc before the deopt
491 // and we'd undo the deopt.
492
493 frame_pcs[0] = deopt_sender.raw_pc();
494
495 assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
496
497#if INCLUDE_JVMCI
498 if (exceptionObject() != NULL) {
499 thread->set_exception_oop(exceptionObject());
500 exec_mode = Unpack_exception;
501 }
502#endif
503
504 if (thread->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
505 assert(thread->has_pending_exception(), "should have thrown OOME");
506 thread->set_exception_oop(thread->pending_exception());
507 thread->clear_pending_exception();
508 exec_mode = Unpack_exception;
509 }
510
511#if INCLUDE_JVMCI
512 if (thread->frames_to_pop_failed_realloc() > 0) {
513 thread->set_pending_monitorenter(false);
514 }
515#endif
516
517 UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
518 caller_adjustment * BytesPerWord,
519 caller_was_method_handle ? 0 : callee_parameters,
520 number_of_frames,
521 frame_sizes,
522 frame_pcs,
523 return_type,
524 exec_mode);
525 // On some platforms, we need a way to pass some platform dependent
526 // information to the unpacking code so the skeletal frames come out
527 // correct (initial fp value, unextended sp, ...)
528 info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info());
529
530 if (array->frames() > 1) {
531 if (VerifyStack && TraceDeoptimization) {
532 ttyLocker ttyl;
533 tty->print_cr("Deoptimizing method containing inlining");
534 }
535 }
536
537 array->set_unroll_block(info);
538 return info;
539}
540
541// Called to cleanup deoptimization data structures in normal case
542// after unpacking to stack and when stack overflow error occurs
543void Deoptimization::cleanup_deopt_info(JavaThread *thread,
544 vframeArray *array) {
545
546 // Get array if coming from exception
547 if (array == NULL) {
548 array = thread->vframe_array_head();
549 }
550 thread->set_vframe_array_head(NULL);
551
552 // Free the previous UnrollBlock
553 vframeArray* old_array = thread->vframe_array_last();
554 thread->set_vframe_array_last(array);
555
556 if (old_array != NULL) {
557 UnrollBlock* old_info = old_array->unroll_block();
558 old_array->set_unroll_block(NULL);
559 delete old_info;
560 delete old_array;
561 }
562
563 // Deallocate any resource creating in this routine and any ResourceObjs allocated
564 // inside the vframeArray (StackValueCollections)
565
566 delete thread->deopt_mark();
567 thread->set_deopt_mark(NULL);
568 thread->set_deopt_compiled_method(NULL);
569
570
571 if (JvmtiExport::can_pop_frame()) {
572#ifndef CC_INTERP
573 // Regardless of whether we entered this routine with the pending
574 // popframe condition bit set, we should always clear it now
575 thread->clear_popframe_condition();
576#else
577 // C++ interpreter will clear has_pending_popframe when it enters
578 // with method_resume. For deopt_resume2 we clear it now.
579 if (thread->popframe_forcing_deopt_reexecution())
580 thread->clear_popframe_condition();
581#endif /* CC_INTERP */
582 }
583
584 // unpack_frames() is called at the end of the deoptimization handler
585 // and (in C2) at the end of the uncommon trap handler. Note this fact
586 // so that an asynchronous stack walker can work again. This counter is
587 // incremented at the beginning of fetch_unroll_info() and (in C2) at
588 // the beginning of uncommon_trap().
589 thread->dec_in_deopt_handler();
590}
591
592// Moved from cpu directories because none of the cpus has callee save values.
593// If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp.
594void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
595
596 // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
597 // the days we had adapter frames. When we deoptimize a situation where a
598 // compiled caller calls a compiled caller will have registers it expects
599 // to survive the call to the callee. If we deoptimize the callee the only
600 // way we can restore these registers is to have the oldest interpreter
601 // frame that we create restore these values. That is what this routine
602 // will accomplish.
603
604 // At the moment we have modified c2 to not have any callee save registers
605 // so this problem does not exist and this routine is just a place holder.
606
607 assert(f->is_interpreted_frame(), "must be interpreted");
608}
609
610// Return BasicType of value being returned
611JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
612
613 // We are already active in the special DeoptResourceMark any ResourceObj's we
614 // allocate will be freed at the end of the routine.
615
616 // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
617 // but makes the entry a little slower. There is however a little dance we have to
618 // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
619 ResetNoHandleMark rnhm; // No-op in release/product versions
620 HandleMark hm;
621
622 frame stub_frame = thread->last_frame();
623
624 // Since the frame to unpack is the top frame of this thread, the vframe_array_head
625 // must point to the vframeArray for the unpack frame.
626 vframeArray* array = thread->vframe_array_head();
627
628#ifndef PRODUCT
629 if (TraceDeoptimization) {
630 ttyLocker ttyl;
631 tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d",
632 p2i(thread), p2i(array), exec_mode);
633 }
634#endif
635 Events::log_deopt_message(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d",
636 p2i(stub_frame.pc()), p2i(stub_frame.sp()), exec_mode);
637
638 UnrollBlock* info = array->unroll_block();
639
640 // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
641 array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters());
642
643 BasicType bt = info->return_type();
644
645 // If we have an exception pending, claim that the return type is an oop
646 // so the deopt_blob does not overwrite the exception_oop.
647
648 if (exec_mode == Unpack_exception)
649 bt = T_OBJECT;
650
651 // Cleanup thread deopt data
652 cleanup_deopt_info(thread, array);
653
654#ifndef PRODUCT
655 if (VerifyStack) {
656 ResourceMark res_mark;
657 // Clear pending exception to not break verification code (restored afterwards)
658 PRESERVE_EXCEPTION_MARK;
659
660 thread->validate_frame_layout();
661
662 // Verify that the just-unpacked frames match the interpreter's
663 // notions of expression stack and locals
664 vframeArray* cur_array = thread->vframe_array_last();
665 RegisterMap rm(thread, false);
666 rm.set_include_argument_oops(false);
667 bool is_top_frame = true;
668 int callee_size_of_parameters = 0;
669 int callee_max_locals = 0;
670 for (int i = 0; i < cur_array->frames(); i++) {
671 vframeArrayElement* el = cur_array->element(i);
672 frame* iframe = el->iframe();
673 guarantee(iframe->is_interpreted_frame(), "Wrong frame type");
674
675 // Get the oop map for this bci
676 InterpreterOopMap mask;
677 int cur_invoke_parameter_size = 0;
678 bool try_next_mask = false;
679 int next_mask_expression_stack_size = -1;
680 int top_frame_expression_stack_adjustment = 0;
681 methodHandle mh(thread, iframe->interpreter_frame_method());
682 OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask);
683 BytecodeStream str(mh, iframe->interpreter_frame_bci());
684 int max_bci = mh->code_size();
685 // Get to the next bytecode if possible
686 assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
687 // Check to see if we can grab the number of outgoing arguments
688 // at an uncommon trap for an invoke (where the compiler
689 // generates debug info before the invoke has executed)
690 Bytecodes::Code cur_code = str.next();
691 if (Bytecodes::is_invoke(cur_code)) {
692 Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci());
693 cur_invoke_parameter_size = invoke.size_of_parameters();
694 if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) {
695 callee_size_of_parameters++;
696 }
697 }
698 if (str.bci() < max_bci) {
699 Bytecodes::Code next_code = str.next();
700 if (next_code >= 0) {
701 // The interpreter oop map generator reports results before
702 // the current bytecode has executed except in the case of
703 // calls. It seems to be hard to tell whether the compiler
704 // has emitted debug information matching the "state before"
705 // a given bytecode or the state after, so we try both
706 if (!Bytecodes::is_invoke(cur_code) && cur_code != Bytecodes::_athrow) {
707 // Get expression stack size for the next bytecode
708 InterpreterOopMap next_mask;
709 OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask);
710 next_mask_expression_stack_size = next_mask.expression_stack_size();
711 if (Bytecodes::is_invoke(next_code)) {
712 Bytecode_invoke invoke(mh, str.bci());
713 next_mask_expression_stack_size += invoke.size_of_parameters();
714 }
715 // Need to subtract off the size of the result type of
716 // the bytecode because this is not described in the
717 // debug info but returned to the interpreter in the TOS
718 // caching register
719 BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
720 if (bytecode_result_type != T_ILLEGAL) {
721 top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
722 }
723 assert(top_frame_expression_stack_adjustment >= 0, "stack adjustment must be positive");
724 try_next_mask = true;
725 }
726 }
727 }
728
729 // Verify stack depth and oops in frame
730 // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc)
731 if (!(
732 /* SPARC */
733 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) ||
734 /* x86 */
735 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) ||
736 (try_next_mask &&
737 (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
738 top_frame_expression_stack_adjustment))) ||
739 (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
740 (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) &&
741 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
742 )) {
743 {
744 ttyLocker ttyl;
745
746 // Print out some information that will help us debug the problem
747 tty->print_cr("Wrong number of expression stack elements during deoptimization");
748 tty->print_cr(" Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1);
749 tty->print_cr(" Fabricated interpreter frame had %d expression stack elements",
750 iframe->interpreter_frame_expression_stack_size());
751 tty->print_cr(" Interpreter oop map had %d expression stack elements", mask.expression_stack_size());
752 tty->print_cr(" try_next_mask = %d", try_next_mask);
753 tty->print_cr(" next_mask_expression_stack_size = %d", next_mask_expression_stack_size);
754 tty->print_cr(" callee_size_of_parameters = %d", callee_size_of_parameters);
755 tty->print_cr(" callee_max_locals = %d", callee_max_locals);
756 tty->print_cr(" top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment);
757 tty->print_cr(" exec_mode = %d", exec_mode);
758 tty->print_cr(" cur_invoke_parameter_size = %d", cur_invoke_parameter_size);
759 tty->print_cr(" Thread = " INTPTR_FORMAT ", thread ID = %d", p2i(thread), thread->osthread()->thread_id());
760 tty->print_cr(" Interpreted frames:");
761 for (int k = 0; k < cur_array->frames(); k++) {
762 vframeArrayElement* el = cur_array->element(k);
763 tty->print_cr(" %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci());
764 }
765 cur_array->print_on_2(tty);
766 } // release tty lock before calling guarantee
767 guarantee(false, "wrong number of expression stack elements during deopt");
768 }
769 VerifyOopClosure verify;
770 iframe->oops_interpreted_do(&verify, &rm, false);
771 callee_size_of_parameters = mh->size_of_parameters();
772 callee_max_locals = mh->max_locals();
773 is_top_frame = false;
774 }
775 }
776#endif /* !PRODUCT */
777
778
779 return bt;
780JRT_END
781
782
783int Deoptimization::deoptimize_dependents() {
784 Threads::deoptimized_wrt_marked_nmethods();
785 return 0;
786}
787
788Deoptimization::DeoptAction Deoptimization::_unloaded_action
789 = Deoptimization::Action_reinterpret;
790
791
792
793#if INCLUDE_JVMCI || INCLUDE_AOT
794template<typename CacheType>
795class BoxCacheBase : public CHeapObj<mtCompiler> {
796protected:
797 static InstanceKlass* find_cache_klass(Symbol* klass_name, TRAPS) {
798 ResourceMark rm;
799 char* klass_name_str = klass_name->as_C_string();
800 Klass* k = SystemDictionary::find(klass_name, Handle(), Handle(), THREAD);
801 guarantee(k != NULL, "%s must be loaded", klass_name_str);
802 InstanceKlass* ik = InstanceKlass::cast(k);
803 guarantee(ik->is_initialized(), "%s must be initialized", klass_name_str);
804 CacheType::compute_offsets(ik);
805 return ik;
806 }
807};
808
809template<typename PrimitiveType, typename CacheType, typename BoxType> class BoxCache : public BoxCacheBase<CacheType> {
810 PrimitiveType _low;
811 PrimitiveType _high;
812 jobject _cache;
813protected:
814 static BoxCache<PrimitiveType, CacheType, BoxType> *_singleton;
815 BoxCache(Thread* thread) {
816 InstanceKlass* ik = BoxCacheBase<CacheType>::find_cache_klass(CacheType::symbol(), thread);
817 objArrayOop cache = CacheType::cache(ik);
818 assert(cache->length() > 0, "Empty cache");
819 _low = BoxType::value(cache->obj_at(0));
820 _high = _low + cache->length() - 1;
821 _cache = JNIHandles::make_global(Handle(thread, cache));
822 }
823 ~BoxCache() {
824 JNIHandles::destroy_global(_cache);
825 }
826public:
827 static BoxCache<PrimitiveType, CacheType, BoxType>* singleton(Thread* thread) {
828 if (_singleton == NULL) {
829 BoxCache<PrimitiveType, CacheType, BoxType>* s = new BoxCache<PrimitiveType, CacheType, BoxType>(thread);
830 if (!Atomic::replace_if_null(s, &_singleton)) {
831 delete s;
832 }
833 }
834 return _singleton;
835 }
836 oop lookup(PrimitiveType value) {
837 if (_low <= value && value <= _high) {
838 int offset = value - _low;
839 return objArrayOop(JNIHandles::resolve_non_null(_cache))->obj_at(offset);
840 }
841 return NULL;
842 }
843 oop lookup_raw(intptr_t raw_value) {
844 // Have to cast to avoid little/big-endian problems.
845 if (sizeof(PrimitiveType) > sizeof(jint)) {
846 jlong value = (jlong)raw_value;
847 return lookup(value);
848 }
849 PrimitiveType value = (PrimitiveType)*((jint*)&raw_value);
850 return lookup(value);
851 }
852};
853
854typedef BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer> IntegerBoxCache;
855typedef BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long> LongBoxCache;
856typedef BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character> CharacterBoxCache;
857typedef BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short> ShortBoxCache;
858typedef BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte> ByteBoxCache;
859
860template<> BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>* BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>::_singleton = NULL;
861template<> BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>* BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>::_singleton = NULL;
862template<> BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>* BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>::_singleton = NULL;
863template<> BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>* BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>::_singleton = NULL;
864template<> BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>* BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>::_singleton = NULL;
865
866class BooleanBoxCache : public BoxCacheBase<java_lang_Boolean> {
867 jobject _true_cache;
868 jobject _false_cache;
869protected:
870 static BooleanBoxCache *_singleton;
871 BooleanBoxCache(Thread *thread) {
872 InstanceKlass* ik = find_cache_klass(java_lang_Boolean::symbol(), thread);
873 _true_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_TRUE(ik)));
874 _false_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_FALSE(ik)));
875 }
876 ~BooleanBoxCache() {
877 JNIHandles::destroy_global(_true_cache);
878 JNIHandles::destroy_global(_false_cache);
879 }
880public:
881 static BooleanBoxCache* singleton(Thread* thread) {
882 if (_singleton == NULL) {
883 BooleanBoxCache* s = new BooleanBoxCache(thread);
884 if (!Atomic::replace_if_null(s, &_singleton)) {
885 delete s;
886 }
887 }
888 return _singleton;
889 }
890 oop lookup_raw(intptr_t raw_value) {
891 // Have to cast to avoid little/big-endian problems.
892 jboolean value = (jboolean)*((jint*)&raw_value);
893 return lookup(value);
894 }
895 oop lookup(jboolean value) {
896 if (value != 0) {
897 return JNIHandles::resolve_non_null(_true_cache);
898 }
899 return JNIHandles::resolve_non_null(_false_cache);
900 }
901};
902
903BooleanBoxCache* BooleanBoxCache::_singleton = NULL;
904
905oop Deoptimization::get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMap* reg_map, TRAPS) {
906 Klass* k = java_lang_Class::as_Klass(bv->klass()->as_ConstantOopReadValue()->value()());
907 BasicType box_type = SystemDictionary::box_klass_type(k);
908 if (box_type != T_OBJECT) {
909 StackValue* value = StackValue::create_stack_value(fr, reg_map, bv->field_at(box_type == T_LONG ? 1 : 0));
910 switch(box_type) {
911 case T_INT: return IntegerBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
912 case T_CHAR: return CharacterBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
913 case T_SHORT: return ShortBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
914 case T_BYTE: return ByteBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
915 case T_BOOLEAN: return BooleanBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
916 case T_LONG: return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
917 default:;
918 }
919 }
920 return NULL;
921}
922#endif // INCLUDE_JVMCI || INCLUDE_AOT
923
924#if COMPILER2_OR_JVMCI
925bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
926 Handle pending_exception(THREAD, thread->pending_exception());
927 const char* exception_file = thread->exception_file();
928 int exception_line = thread->exception_line();
929 thread->clear_pending_exception();
930
931 bool failures = false;
932
933 for (int i = 0; i < objects->length(); i++) {
934 assert(objects->at(i)->is_object(), "invalid debug information");
935 ObjectValue* sv = (ObjectValue*) objects->at(i);
936
937 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
938 oop obj = NULL;
939
940 if (k->is_instance_klass()) {
941#if INCLUDE_JVMCI || INCLUDE_AOT
942 CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
943 if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
944 AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
945 obj = get_cached_box(abv, fr, reg_map, THREAD);
946 if (obj != NULL) {
947 // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
948 abv->set_cached(true);
949 }
950 }
951#endif // INCLUDE_JVMCI || INCLUDE_AOT
952 InstanceKlass* ik = InstanceKlass::cast(k);
953 if (obj == NULL) {
954 obj = ik->allocate_instance(THREAD);
955 }
956 } else if (k->is_typeArray_klass()) {
957 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
958 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
959 int len = sv->field_size() / type2size[ak->element_type()];
960 obj = ak->allocate(len, THREAD);
961 } else if (k->is_objArray_klass()) {
962 ObjArrayKlass* ak = ObjArrayKlass::cast(k);
963 obj = ak->allocate(sv->field_size(), THREAD);
964 }
965
966 if (obj == NULL) {
967 failures = true;
968 }
969
970 assert(sv->value().is_null(), "redundant reallocation");
971 assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
972 CLEAR_PENDING_EXCEPTION;
973 sv->set_value(obj);
974 }
975
976 if (failures) {
977 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
978 } else if (pending_exception.not_null()) {
979 thread->set_pending_exception(pending_exception(), exception_file, exception_line);
980 }
981
982 return failures;
983}
984
985// restore elements of an eliminated type array
986void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) {
987 int index = 0;
988 intptr_t val;
989
990 for (int i = 0; i < sv->field_size(); i++) {
991 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
992 switch(type) {
993 case T_LONG: case T_DOUBLE: {
994 assert(value->type() == T_INT, "Agreement.");
995 StackValue* low =
996 StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
997#ifdef _LP64
998 jlong res = (jlong)low->get_int();
999#else
1000#ifdef SPARC
1001 // For SPARC we have to swap high and low words.
1002 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
1003#else
1004 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
1005#endif //SPARC
1006#endif
1007 obj->long_at_put(index, res);
1008 break;
1009 }
1010
1011 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1012 case T_INT: case T_FLOAT: { // 4 bytes.
1013 assert(value->type() == T_INT, "Agreement.");
1014 bool big_value = false;
1015 if (i + 1 < sv->field_size() && type == T_INT) {
1016 if (sv->field_at(i)->is_location()) {
1017 Location::Type type = ((LocationValue*) sv->field_at(i))->location().type();
1018 if (type == Location::dbl || type == Location::lng) {
1019 big_value = true;
1020 }
1021 } else if (sv->field_at(i)->is_constant_int()) {
1022 ScopeValue* next_scope_field = sv->field_at(i + 1);
1023 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1024 big_value = true;
1025 }
1026 }
1027 }
1028
1029 if (big_value) {
1030 StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
1031 #ifdef _LP64
1032 jlong res = (jlong)low->get_int();
1033 #else
1034 #ifdef SPARC
1035 // For SPARC we have to swap high and low words.
1036 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
1037 #else
1038 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
1039 #endif //SPARC
1040 #endif
1041 obj->int_at_put(index, (jint)*((jint*)&res));
1042 obj->int_at_put(++index, (jint)*(((jint*)&res) + 1));
1043 } else {
1044 val = value->get_int();
1045 obj->int_at_put(index, (jint)*((jint*)&val));
1046 }
1047 break;
1048 }
1049
1050 case T_SHORT:
1051 assert(value->type() == T_INT, "Agreement.");
1052 val = value->get_int();
1053 obj->short_at_put(index, (jshort)*((jint*)&val));
1054 break;
1055
1056 case T_CHAR:
1057 assert(value->type() == T_INT, "Agreement.");
1058 val = value->get_int();
1059 obj->char_at_put(index, (jchar)*((jint*)&val));
1060 break;
1061
1062 case T_BYTE:
1063 assert(value->type() == T_INT, "Agreement.");
1064 val = value->get_int();
1065 obj->byte_at_put(index, (jbyte)*((jint*)&val));
1066 break;
1067
1068 case T_BOOLEAN:
1069 assert(value->type() == T_INT, "Agreement.");
1070 val = value->get_int();
1071 obj->bool_at_put(index, (jboolean)*((jint*)&val));
1072 break;
1073
1074 default:
1075 ShouldNotReachHere();
1076 }
1077 index++;
1078 }
1079}
1080
1081
1082// restore fields of an eliminated object array
1083void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1084 for (int i = 0; i < sv->field_size(); i++) {
1085 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1086 assert(value->type() == T_OBJECT, "object element expected");
1087 obj->obj_at_put(i, value->get_obj()());
1088 }
1089}
1090
1091class ReassignedField {
1092public:
1093 int _offset;
1094 BasicType _type;
1095public:
1096 ReassignedField() {
1097 _offset = 0;
1098 _type = T_ILLEGAL;
1099 }
1100};
1101
1102int compare(ReassignedField* left, ReassignedField* right) {
1103 return left->_offset - right->_offset;
1104}
1105
1106// Restore fields of an eliminated instance object using the same field order
1107// returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1108static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
1109 if (klass->superklass() != NULL) {
1110 svIndex = reassign_fields_by_klass(klass->superklass(), fr, reg_map, sv, svIndex, obj, skip_internal);
1111 }
1112
1113 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1114 for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1115 if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
1116 ReassignedField field;
1117 field._offset = fs.offset();
1118 field._type = FieldType::basic_type(fs.signature());
1119 fields->append(field);
1120 }
1121 }
1122 fields->sort(compare);
1123 for (int i = 0; i < fields->length(); i++) {
1124 intptr_t val;
1125 ScopeValue* scope_field = sv->field_at(svIndex);
1126 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1127 int offset = fields->at(i)._offset;
1128 BasicType type = fields->at(i)._type;
1129 switch (type) {
1130 case T_OBJECT: case T_ARRAY:
1131 assert(value->type() == T_OBJECT, "Agreement.");
1132 obj->obj_field_put(offset, value->get_obj()());
1133 break;
1134
1135 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1136 case T_INT: case T_FLOAT: { // 4 bytes.
1137 assert(value->type() == T_INT, "Agreement.");
1138 bool big_value = false;
1139 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1140 if (scope_field->is_location()) {
1141 Location::Type type = ((LocationValue*) scope_field)->location().type();
1142 if (type == Location::dbl || type == Location::lng) {
1143 big_value = true;
1144 }
1145 }
1146 if (scope_field->is_constant_int()) {
1147 ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1148 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1149 big_value = true;
1150 }
1151 }
1152 }
1153
1154 if (big_value) {
1155 i++;
1156 assert(i < fields->length(), "second T_INT field needed");
1157 assert(fields->at(i)._type == T_INT, "T_INT field needed");
1158 } else {
1159 val = value->get_int();
1160 obj->int_field_put(offset, (jint)*((jint*)&val));
1161 break;
1162 }
1163 }
1164 /* no break */
1165
1166 case T_LONG: case T_DOUBLE: {
1167 assert(value->type() == T_INT, "Agreement.");
1168 StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++svIndex));
1169#ifdef _LP64
1170 jlong res = (jlong)low->get_int();
1171#else
1172#ifdef SPARC
1173 // For SPARC we have to swap high and low words.
1174 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
1175#else
1176 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
1177#endif //SPARC
1178#endif
1179 obj->long_field_put(offset, res);
1180 break;
1181 }
1182
1183 case T_SHORT:
1184 assert(value->type() == T_INT, "Agreement.");
1185 val = value->get_int();
1186 obj->short_field_put(offset, (jshort)*((jint*)&val));
1187 break;
1188
1189 case T_CHAR:
1190 assert(value->type() == T_INT, "Agreement.");
1191 val = value->get_int();
1192 obj->char_field_put(offset, (jchar)*((jint*)&val));
1193 break;
1194
1195 case T_BYTE:
1196 assert(value->type() == T_INT, "Agreement.");
1197 val = value->get_int();
1198 obj->byte_field_put(offset, (jbyte)*((jint*)&val));
1199 break;
1200
1201 case T_BOOLEAN:
1202 assert(value->type() == T_INT, "Agreement.");
1203 val = value->get_int();
1204 obj->bool_field_put(offset, (jboolean)*((jint*)&val));
1205 break;
1206
1207 default:
1208 ShouldNotReachHere();
1209 }
1210 svIndex++;
1211 }
1212 return svIndex;
1213}
1214
1215// restore fields of all eliminated objects and arrays
1216void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
1217 for (int i = 0; i < objects->length(); i++) {
1218 ObjectValue* sv = (ObjectValue*) objects->at(i);
1219 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1220 Handle obj = sv->value();
1221 assert(obj.not_null() || realloc_failures, "reallocation was missed");
1222 if (PrintDeoptimizationDetails) {
1223 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1224 }
1225 if (obj.is_null()) {
1226 continue;
1227 }
1228#if INCLUDE_JVMCI || INCLUDE_AOT
1229 // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1230 if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1231 continue;
1232 }
1233#endif // INCLUDE_JVMCI || INCLUDE_AOT
1234 if (k->is_instance_klass()) {
1235 InstanceKlass* ik = InstanceKlass::cast(k);
1236 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1237 } else if (k->is_typeArray_klass()) {
1238 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1239 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1240 } else if (k->is_objArray_klass()) {
1241 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1242 }
1243 }
1244}
1245
1246
1247// relock objects for which synchronization was eliminated
1248void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) {
1249 for (int i = 0; i < monitors->length(); i++) {
1250 MonitorInfo* mon_info = monitors->at(i);
1251 if (mon_info->eliminated()) {
1252 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1253 if (!mon_info->owner_is_scalar_replaced()) {
1254 Handle obj(thread, mon_info->owner());
1255 markOop mark = obj->mark();
1256 if (UseBiasedLocking && mark->has_bias_pattern()) {
1257 // New allocated objects may have the mark set to anonymously biased.
1258 // Also the deoptimized method may called methods with synchronization
1259 // where the thread-local object is bias locked to the current thread.
1260 assert(mark->is_biased_anonymously() ||
1261 mark->biased_locker() == thread, "should be locked to current thread");
1262 // Reset mark word to unbiased prototype.
1263 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
1264 obj->set_mark(unbiased_prototype);
1265 }
1266 BasicLock* lock = mon_info->lock();
1267 ObjectSynchronizer::slow_enter(obj, lock, thread);
1268 assert(mon_info->owner()->is_locked(), "object must be locked now");
1269 }
1270 }
1271 }
1272}
1273
1274
1275#ifndef PRODUCT
1276// print information about reallocated objects
1277void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
1278 fieldDescriptor fd;
1279
1280 for (int i = 0; i < objects->length(); i++) {
1281 ObjectValue* sv = (ObjectValue*) objects->at(i);
1282 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1283 Handle obj = sv->value();
1284
1285 tty->print(" object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
1286 k->print_value();
1287 assert(obj.not_null() || realloc_failures, "reallocation was missed");
1288 if (obj.is_null()) {
1289 tty->print(" allocation failed");
1290 } else {
1291 tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
1292 }
1293 tty->cr();
1294
1295 if (Verbose && !obj.is_null()) {
1296 k->oop_print_on(obj(), tty);
1297 }
1298 }
1299}
1300#endif
1301#endif // COMPILER2_OR_JVMCI
1302
1303vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1304 Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1305
1306#ifndef PRODUCT
1307 if (PrintDeoptimizationDetails) {
1308 ttyLocker ttyl;
1309 tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", p2i(thread));
1310 fr.print_on(tty);
1311 tty->print_cr(" Virtual frames (innermost first):");
1312 for (int index = 0; index < chunk->length(); index++) {
1313 compiledVFrame* vf = chunk->at(index);
1314 tty->print(" %2d - ", index);
1315 vf->print_value();
1316 int bci = chunk->at(index)->raw_bci();
1317 const char* code_name;
1318 if (bci == SynchronizationEntryBCI) {
1319 code_name = "sync entry";
1320 } else {
1321 Bytecodes::Code code = vf->method()->code_at(bci);
1322 code_name = Bytecodes::name(code);
1323 }
1324 tty->print(" - %s", code_name);
1325 tty->print_cr(" @ bci %d ", bci);
1326 if (Verbose) {
1327 vf->print();
1328 tty->cr();
1329 }
1330 }
1331 }
1332#endif
1333
1334 // Register map for next frame (used for stack crawl). We capture
1335 // the state of the deopt'ing frame's caller. Thus if we need to
1336 // stuff a C2I adapter we can properly fill in the callee-save
1337 // register locations.
1338 frame caller = fr.sender(reg_map);
1339 int frame_size = caller.sp() - fr.sp();
1340
1341 frame sender = caller;
1342
1343 // Since the Java thread being deoptimized will eventually adjust it's own stack,
1344 // the vframeArray containing the unpacking information is allocated in the C heap.
1345 // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
1346 vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
1347
1348 // Compare the vframeArray to the collected vframes
1349 assert(array->structural_compare(thread, chunk), "just checking");
1350
1351#ifndef PRODUCT
1352 if (PrintDeoptimizationDetails) {
1353 ttyLocker ttyl;
1354 tty->print_cr(" Created vframeArray " INTPTR_FORMAT, p2i(array));
1355 }
1356#endif // PRODUCT
1357
1358 return array;
1359}
1360
1361#if COMPILER2_OR_JVMCI
1362void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) {
1363 // Reallocation of some scalar replaced objects failed. Record
1364 // that we need to pop all the interpreter frames for the
1365 // deoptimized compiled frame.
1366 assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?");
1367 thread->set_frames_to_pop_failed_realloc(array->frames());
1368 // Unlock all monitors here otherwise the interpreter will see a
1369 // mix of locked and unlocked monitors (because of failed
1370 // reallocations of synchronized objects) and be confused.
1371 for (int i = 0; i < array->frames(); i++) {
1372 MonitorChunk* monitors = array->element(i)->monitors();
1373 if (monitors != NULL) {
1374 for (int j = 0; j < monitors->number_of_monitors(); j++) {
1375 BasicObjectLock* src = monitors->at(j);
1376 if (src->obj() != NULL) {
1377 ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread);
1378 }
1379 }
1380 array->element(i)->free_monitors(thread);
1381#ifdef ASSERT
1382 array->element(i)->set_removed_monitors();
1383#endif
1384 }
1385 }
1386}
1387#endif
1388
1389static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
1390 GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
1391 Thread* thread = Thread::current();
1392 for (int i = 0; i < monitors->length(); i++) {
1393 MonitorInfo* mon_info = monitors->at(i);
1394 if (!mon_info->eliminated() && mon_info->owner() != NULL) {
1395 objects_to_revoke->append(Handle(thread, mon_info->owner()));
1396 }
1397 }
1398}
1399
1400
1401void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
1402 if (!UseBiasedLocking) {
1403 return;
1404 }
1405
1406 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
1407
1408 // Unfortunately we don't have a RegisterMap available in most of
1409 // the places we want to call this routine so we need to walk the
1410 // stack again to update the register map.
1411 if (map == NULL || !map->update_map()) {
1412 StackFrameStream sfs(thread, true);
1413 bool found = false;
1414 while (!found && !sfs.is_done()) {
1415 frame* cur = sfs.current();
1416 sfs.next();
1417 found = cur->id() == fr.id();
1418 }
1419 assert(found, "frame to be deoptimized not found on target thread's stack");
1420 map = sfs.register_map();
1421 }
1422
1423 vframe* vf = vframe::new_vframe(&fr, map, thread);
1424 compiledVFrame* cvf = compiledVFrame::cast(vf);
1425 // Revoke monitors' biases in all scopes
1426 while (!cvf->is_top()) {
1427 collect_monitors(cvf, objects_to_revoke);
1428 cvf = compiledVFrame::cast(cvf->sender());
1429 }
1430 collect_monitors(cvf, objects_to_revoke);
1431
1432 if (SafepointSynchronize::is_at_safepoint()) {
1433 BiasedLocking::revoke_at_safepoint(objects_to_revoke);
1434 } else {
1435 BiasedLocking::revoke(objects_to_revoke);
1436 }
1437}
1438
1439
1440void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) {
1441 assert(fr.can_be_deoptimized(), "checking frame type");
1442
1443 gather_statistics(reason, Action_none, Bytecodes::_illegal);
1444
1445 if (LogCompilation && xtty != NULL) {
1446 CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
1447 assert(cm != NULL, "only compiled methods can deopt");
1448
1449 ttyLocker ttyl;
1450 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1451 cm->log_identity(xtty);
1452 xtty->end_head();
1453 for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1454 xtty->begin_elem("jvms bci='%d'", sd->bci());
1455 xtty->method(sd->method());
1456 xtty->end_elem();
1457 if (sd->is_top()) break;
1458 }
1459 xtty->tail("deoptimized");
1460 }
1461
1462 // Patch the compiled method so that when execution returns to it we will
1463 // deopt the execution state and return to the interpreter.
1464 fr.deoptimize(thread);
1465}
1466
1467void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
1468 deoptimize(thread, fr, map, Reason_constraint);
1469}
1470
1471void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) {
1472 // Deoptimize only if the frame comes from compile code.
1473 // Do not deoptimize the frame which is already patched
1474 // during the execution of the loops below.
1475 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1476 return;
1477 }
1478 ResourceMark rm;
1479 DeoptimizationMarker dm;
1480 if (UseBiasedLocking) {
1481 revoke_biases_of_monitors(thread, fr, map);
1482 }
1483 deoptimize_single_frame(thread, fr, reason);
1484
1485}
1486
1487#if INCLUDE_JVMCI
1488address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1489 // there is no exception handler for this pc => deoptimize
1490 cm->make_not_entrant();
1491
1492 // Use Deoptimization::deoptimize for all of its side-effects:
1493 // revoking biases of monitors, gathering traps statistics, logging...
1494 // it also patches the return pc but we do not care about that
1495 // since we return a continuation to the deopt_blob below.
1496 JavaThread* thread = JavaThread::current();
1497 RegisterMap reg_map(thread, UseBiasedLocking);
1498 frame runtime_frame = thread->last_frame();
1499 frame caller_frame = runtime_frame.sender(&reg_map);
1500 assert(caller_frame.cb()->as_compiled_method_or_null() == cm, "expect top frame compiled method");
1501 Deoptimization::deoptimize(thread, caller_frame, &reg_map, Deoptimization::Reason_not_compiled_exception_handler);
1502
1503 MethodData* trap_mdo = get_method_data(thread, cm->method(), true);
1504 if (trap_mdo != NULL) {
1505 trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler);
1506 }
1507
1508 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
1509}
1510#endif
1511
1512void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) {
1513 assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
1514 "can only deoptimize other thread at a safepoint");
1515 // Compute frame and register map based on thread and sp.
1516 RegisterMap reg_map(thread, UseBiasedLocking);
1517 frame fr = thread->last_frame();
1518 while (fr.id() != id) {
1519 fr = fr.sender(&reg_map);
1520 }
1521 deoptimize(thread, fr, &reg_map, reason);
1522}
1523
1524
1525void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason) {
1526 if (thread == Thread::current()) {
1527 Deoptimization::deoptimize_frame_internal(thread, id, reason);
1528 } else {
1529 VM_DeoptimizeFrame deopt(thread, id, reason);
1530 VMThread::execute(&deopt);
1531 }
1532}
1533
1534void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
1535 deoptimize_frame(thread, id, Reason_constraint);
1536}
1537
1538// JVMTI PopFrame support
1539JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
1540{
1541 thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
1542}
1543JRT_END
1544
1545MethodData*
1546Deoptimization::get_method_data(JavaThread* thread, const methodHandle& m,
1547 bool create_if_missing) {
1548 Thread* THREAD = thread;
1549 MethodData* mdo = m()->method_data();
1550 if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
1551 // Build an MDO. Ignore errors like OutOfMemory;
1552 // that simply means we won't have an MDO to update.
1553 Method::build_interpreter_method_data(m, THREAD);
1554 if (HAS_PENDING_EXCEPTION) {
1555 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
1556 CLEAR_PENDING_EXCEPTION;
1557 }
1558 mdo = m()->method_data();
1559 }
1560 return mdo;
1561}
1562
1563#if COMPILER2_OR_JVMCI
1564void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS) {
1565 // in case of an unresolved klass entry, load the class.
1566 if (constant_pool->tag_at(index).is_unresolved_klass()) {
1567 Klass* tk = constant_pool->klass_at_ignore_error(index, CHECK);
1568 return;
1569 }
1570
1571 if (!constant_pool->tag_at(index).is_symbol()) return;
1572
1573 Handle class_loader (THREAD, constant_pool->pool_holder()->class_loader());
1574 Symbol* symbol = constant_pool->symbol_at(index);
1575
1576 // class name?
1577 if (symbol->char_at(0) != '(') {
1578 Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain());
1579 SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK);
1580 return;
1581 }
1582
1583 // then it must be a signature!
1584 ResourceMark rm(THREAD);
1585 for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) {
1586 if (ss.is_object()) {
1587 Symbol* class_name = ss.as_symbol();
1588 Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain());
1589 SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK);
1590 }
1591 }
1592}
1593
1594
1595void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index) {
1596 EXCEPTION_MARK;
1597 load_class_by_index(constant_pool, index, THREAD);
1598 if (HAS_PENDING_EXCEPTION) {
1599 // Exception happened during classloading. We ignore the exception here, since it
1600 // is going to be rethrown since the current activation is going to be deoptimized and
1601 // the interpreter will re-execute the bytecode.
1602 CLEAR_PENDING_EXCEPTION;
1603 // Class loading called java code which may have caused a stack
1604 // overflow. If the exception was thrown right before the return
1605 // to the runtime the stack is no longer guarded. Reguard the
1606 // stack otherwise if we return to the uncommon trap blob and the
1607 // stack bang causes a stack overflow we crash.
1608 assert(THREAD->is_Java_thread(), "only a java thread can be here");
1609 JavaThread* thread = (JavaThread*)THREAD;
1610 bool guard_pages_enabled = thread->stack_guards_enabled();
1611 if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
1612 assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash");
1613 }
1614}
1615
1616JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) {
1617 HandleMark hm;
1618
1619 // uncommon_trap() is called at the beginning of the uncommon trap
1620 // handler. Note this fact before we start generating temporary frames
1621 // that can confuse an asynchronous stack walker. This counter is
1622 // decremented at the end of unpack_frames().
1623 thread->inc_in_deopt_handler();
1624
1625 // We need to update the map if we have biased locking.
1626#if INCLUDE_JVMCI
1627 // JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid
1628 RegisterMap reg_map(thread, true);
1629#else
1630 RegisterMap reg_map(thread, UseBiasedLocking);
1631#endif
1632 frame stub_frame = thread->last_frame();
1633 frame fr = stub_frame.sender(&reg_map);
1634 // Make sure the calling nmethod is not getting deoptimized and removed
1635 // before we are done with it.
1636 nmethodLocker nl(fr.pc());
1637
1638 // Log a message
1639 Events::log_deopt_message(thread, "Uncommon trap: trap_request=" PTR32_FORMAT " fr.pc=" INTPTR_FORMAT " relative=" INTPTR_FORMAT,
1640 trap_request, p2i(fr.pc()), fr.pc() - fr.cb()->code_begin());
1641
1642 {
1643 ResourceMark rm;
1644
1645 // Revoke biases of any monitors in the frame to ensure we can migrate them
1646 revoke_biases_of_monitors(thread, fr, &reg_map);
1647
1648 DeoptReason reason = trap_request_reason(trap_request);
1649 DeoptAction action = trap_request_action(trap_request);
1650#if INCLUDE_JVMCI
1651 int debug_id = trap_request_debug_id(trap_request);
1652#endif
1653 jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
1654
1655 vframe* vf = vframe::new_vframe(&fr, &reg_map, thread);
1656 compiledVFrame* cvf = compiledVFrame::cast(vf);
1657
1658 CompiledMethod* nm = cvf->code();
1659
1660 ScopeDesc* trap_scope = cvf->scope();
1661
1662 if (TraceDeoptimization) {
1663 ttyLocker ttyl;
1664 tty->print_cr(" bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT ", method=%s" JVMCI_ONLY(", debug_id=%d"), trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin(), trap_scope->method()->name_and_sig_as_C_string()
1665#if INCLUDE_JVMCI
1666 , debug_id
1667#endif
1668 );
1669 }
1670
1671 methodHandle trap_method = trap_scope->method();
1672 int trap_bci = trap_scope->bci();
1673#if INCLUDE_JVMCI
1674 jlong speculation = thread->pending_failed_speculation();
1675 if (nm->is_compiled_by_jvmci() && nm->is_nmethod()) { // Exclude AOTed methods
1676 nm->as_nmethod()->update_speculation(thread);
1677 } else {
1678 assert(speculation == 0, "There should not be a speculation for methods compiled by non-JVMCI compilers");
1679 }
1680
1681 if (trap_bci == SynchronizationEntryBCI) {
1682 trap_bci = 0;
1683 thread->set_pending_monitorenter(true);
1684 }
1685
1686 if (reason == Deoptimization::Reason_transfer_to_interpreter) {
1687 thread->set_pending_transfer_to_interpreter(true);
1688 }
1689#endif
1690
1691 Bytecodes::Code trap_bc = trap_method->java_code_at(trap_bci);
1692 // Record this event in the histogram.
1693 gather_statistics(reason, action, trap_bc);
1694
1695 // Ensure that we can record deopt. history:
1696 // Need MDO to record RTM code generation state.
1697 bool create_if_missing = ProfileTraps || UseCodeAging RTM_OPT_ONLY( || UseRTMLocking );
1698
1699 methodHandle profiled_method;
1700#if INCLUDE_JVMCI
1701 if (nm->is_compiled_by_jvmci()) {
1702 profiled_method = nm->method();
1703 } else {
1704 profiled_method = trap_method;
1705 }
1706#else
1707 profiled_method = trap_method;
1708#endif
1709
1710 MethodData* trap_mdo =
1711 get_method_data(thread, profiled_method, create_if_missing);
1712
1713 // Log a message
1714 Events::log_deopt_message(thread, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d %s",
1715 trap_reason_name(reason), trap_action_name(action), p2i(fr.pc()),
1716 trap_method->name_and_sig_as_C_string(), trap_bci, nm->compiler_name());
1717
1718 // Print a bunch of diagnostics, if requested.
1719 if (TraceDeoptimization || LogCompilation) {
1720 ResourceMark rm;
1721 ttyLocker ttyl;
1722 char buf[100];
1723 if (xtty != NULL) {
1724 xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT "' %s",
1725 os::current_thread_id(),
1726 format_trap_request(buf, sizeof(buf), trap_request));
1727#if INCLUDE_JVMCI
1728 if (speculation != 0) {
1729 xtty->print(" speculation='" JLONG_FORMAT "'", speculation);
1730 }
1731#endif
1732 nm->log_identity(xtty);
1733 }
1734 Symbol* class_name = NULL;
1735 bool unresolved = false;
1736 if (unloaded_class_index >= 0) {
1737 constantPoolHandle constants (THREAD, trap_method->constants());
1738 if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
1739 class_name = constants->klass_name_at(unloaded_class_index);
1740 unresolved = true;
1741 if (xtty != NULL)
1742 xtty->print(" unresolved='1'");
1743 } else if (constants->tag_at(unloaded_class_index).is_symbol()) {
1744 class_name = constants->symbol_at(unloaded_class_index);
1745 }
1746 if (xtty != NULL)
1747 xtty->name(class_name);
1748 }
1749 if (xtty != NULL && trap_mdo != NULL && (int)reason < (int)MethodData::_trap_hist_limit) {
1750 // Dump the relevant MDO state.
1751 // This is the deopt count for the current reason, any previous
1752 // reasons or recompiles seen at this point.
1753 int dcnt = trap_mdo->trap_count(reason);
1754 if (dcnt != 0)
1755 xtty->print(" count='%d'", dcnt);
1756 ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
1757 int dos = (pdata == NULL)? 0: pdata->trap_state();
1758 if (dos != 0) {
1759 xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
1760 if (trap_state_is_recompiled(dos)) {
1761 int recnt2 = trap_mdo->overflow_recompile_count();
1762 if (recnt2 != 0)
1763 xtty->print(" recompiles2='%d'", recnt2);
1764 }
1765 }
1766 }
1767 if (xtty != NULL) {
1768 xtty->stamp();
1769 xtty->end_head();
1770 }
1771 if (TraceDeoptimization) { // make noise on the tty
1772 tty->print("Uncommon trap occurred in");
1773 nm->method()->print_short_name(tty);
1774 tty->print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id());
1775#if INCLUDE_JVMCI
1776 if (nm->is_nmethod()) {
1777 const char* installed_code_name = nm->as_nmethod()->jvmci_name();
1778 if (installed_code_name != NULL) {
1779 tty->print(" (JVMCI: installed code name=%s) ", installed_code_name);
1780 }
1781 }
1782#endif
1783 tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"),
1784 p2i(fr.pc()),
1785 os::current_thread_id(),
1786 trap_reason_name(reason),
1787 trap_action_name(action),
1788 unloaded_class_index
1789#if INCLUDE_JVMCI
1790 , debug_id
1791#endif
1792 );
1793 if (class_name != NULL) {
1794 tty->print(unresolved ? " unresolved class: " : " symbol: ");
1795 class_name->print_symbol_on(tty);
1796 }
1797 tty->cr();
1798 }
1799 if (xtty != NULL) {
1800 // Log the precise location of the trap.
1801 for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
1802 xtty->begin_elem("jvms bci='%d'", sd->bci());
1803 xtty->method(sd->method());
1804 xtty->end_elem();
1805 if (sd->is_top()) break;
1806 }
1807 xtty->tail("uncommon_trap");
1808 }
1809 }
1810 // (End diagnostic printout.)
1811
1812 // Load class if necessary
1813 if (unloaded_class_index >= 0) {
1814 constantPoolHandle constants(THREAD, trap_method->constants());
1815 load_class_by_index(constants, unloaded_class_index);
1816 }
1817
1818 // Flush the nmethod if necessary and desirable.
1819 //
1820 // We need to avoid situations where we are re-flushing the nmethod
1821 // because of a hot deoptimization site. Repeated flushes at the same
1822 // point need to be detected by the compiler and avoided. If the compiler
1823 // cannot avoid them (or has a bug and "refuses" to avoid them), this
1824 // module must take measures to avoid an infinite cycle of recompilation
1825 // and deoptimization. There are several such measures:
1826 //
1827 // 1. If a recompilation is ordered a second time at some site X
1828 // and for the same reason R, the action is adjusted to 'reinterpret',
1829 // to give the interpreter time to exercise the method more thoroughly.
1830 // If this happens, the method's overflow_recompile_count is incremented.
1831 //
1832 // 2. If the compiler fails to reduce the deoptimization rate, then
1833 // the method's overflow_recompile_count will begin to exceed the set
1834 // limit PerBytecodeRecompilationCutoff. If this happens, the action
1835 // is adjusted to 'make_not_compilable', and the method is abandoned
1836 // to the interpreter. This is a performance hit for hot methods,
1837 // but is better than a disastrous infinite cycle of recompilations.
1838 // (Actually, only the method containing the site X is abandoned.)
1839 //
1840 // 3. In parallel with the previous measures, if the total number of
1841 // recompilations of a method exceeds the much larger set limit
1842 // PerMethodRecompilationCutoff, the method is abandoned.
1843 // This should only happen if the method is very large and has
1844 // many "lukewarm" deoptimizations. The code which enforces this
1845 // limit is elsewhere (class nmethod, class Method).
1846 //
1847 // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance
1848 // to recompile at each bytecode independently of the per-BCI cutoff.
1849 //
1850 // The decision to update code is up to the compiler, and is encoded
1851 // in the Action_xxx code. If the compiler requests Action_none
1852 // no trap state is changed, no compiled code is changed, and the
1853 // computation suffers along in the interpreter.
1854 //
1855 // The other action codes specify various tactics for decompilation
1856 // and recompilation. Action_maybe_recompile is the loosest, and
1857 // allows the compiled code to stay around until enough traps are seen,
1858 // and until the compiler gets around to recompiling the trapping method.
1859 //
1860 // The other actions cause immediate removal of the present code.
1861
1862 // Traps caused by injected profile shouldn't pollute trap counts.
1863 bool injected_profile_trap = trap_method->has_injected_profile() &&
1864 (reason == Reason_intrinsic || reason == Reason_unreached);
1865
1866 bool update_trap_state = (reason != Reason_tenured) && !injected_profile_trap;
1867 bool make_not_entrant = false;
1868 bool make_not_compilable = false;
1869 bool reprofile = false;
1870 switch (action) {
1871 case Action_none:
1872 // Keep the old code.
1873 update_trap_state = false;
1874 break;
1875 case Action_maybe_recompile:
1876 // Do not need to invalidate the present code, but we can
1877 // initiate another
1878 // Start compiler without (necessarily) invalidating the nmethod.
1879 // The system will tolerate the old code, but new code should be
1880 // generated when possible.
1881 break;
1882 case Action_reinterpret:
1883 // Go back into the interpreter for a while, and then consider
1884 // recompiling form scratch.
1885 make_not_entrant = true;
1886 // Reset invocation counter for outer most method.
1887 // This will allow the interpreter to exercise the bytecodes
1888 // for a while before recompiling.
1889 // By contrast, Action_make_not_entrant is immediate.
1890 //
1891 // Note that the compiler will track null_check, null_assert,
1892 // range_check, and class_check events and log them as if they
1893 // had been traps taken from compiled code. This will update
1894 // the MDO trap history so that the next compilation will
1895 // properly detect hot trap sites.
1896 reprofile = true;
1897 break;
1898 case Action_make_not_entrant:
1899 // Request immediate recompilation, and get rid of the old code.
1900 // Make them not entrant, so next time they are called they get
1901 // recompiled. Unloaded classes are loaded now so recompile before next
1902 // time they are called. Same for uninitialized. The interpreter will
1903 // link the missing class, if any.
1904 make_not_entrant = true;
1905 break;
1906 case Action_make_not_compilable:
1907 // Give up on compiling this method at all.
1908 make_not_entrant = true;
1909 make_not_compilable = true;
1910 break;
1911 default:
1912 ShouldNotReachHere();
1913 }
1914
1915 // Setting +ProfileTraps fixes the following, on all platforms:
1916 // 4852688: ProfileInterpreter is off by default for ia64. The result is
1917 // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the
1918 // recompile relies on a MethodData* to record heroic opt failures.
1919
1920 // Whether the interpreter is producing MDO data or not, we also need
1921 // to use the MDO to detect hot deoptimization points and control
1922 // aggressive optimization.
1923 bool inc_recompile_count = false;
1924 ProfileData* pdata = NULL;
1925 if (ProfileTraps && !is_client_compilation_mode_vm() && update_trap_state && trap_mdo != NULL) {
1926 assert(trap_mdo == get_method_data(thread, profiled_method, false), "sanity");
1927 uint this_trap_count = 0;
1928 bool maybe_prior_trap = false;
1929 bool maybe_prior_recompile = false;
1930 pdata = query_update_method_data(trap_mdo, trap_bci, reason, true,
1931#if INCLUDE_JVMCI
1932 nm->is_compiled_by_jvmci() && nm->is_osr_method(),
1933#endif
1934 nm->method(),
1935 //outputs:
1936 this_trap_count,
1937 maybe_prior_trap,
1938 maybe_prior_recompile);
1939 // Because the interpreter also counts null, div0, range, and class
1940 // checks, these traps from compiled code are double-counted.
1941 // This is harmless; it just means that the PerXTrapLimit values
1942 // are in effect a little smaller than they look.
1943
1944 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
1945 if (per_bc_reason != Reason_none) {
1946 // Now take action based on the partially known per-BCI history.
1947 if (maybe_prior_trap
1948 && this_trap_count >= (uint)PerBytecodeTrapLimit) {
1949 // If there are too many traps at this BCI, force a recompile.
1950 // This will allow the compiler to see the limit overflow, and
1951 // take corrective action, if possible. The compiler generally
1952 // does not use the exact PerBytecodeTrapLimit value, but instead
1953 // changes its tactics if it sees any traps at all. This provides
1954 // a little hysteresis, delaying a recompile until a trap happens
1955 // several times.
1956 //
1957 // Actually, since there is only one bit of counter per BCI,
1958 // the possible per-BCI counts are {0,1,(per-method count)}.
1959 // This produces accurate results if in fact there is only
1960 // one hot trap site, but begins to get fuzzy if there are
1961 // many sites. For example, if there are ten sites each
1962 // trapping two or more times, they each get the blame for
1963 // all of their traps.
1964 make_not_entrant = true;
1965 }
1966
1967 // Detect repeated recompilation at the same BCI, and enforce a limit.
1968 if (make_not_entrant && maybe_prior_recompile) {
1969 // More than one recompile at this point.
1970 inc_recompile_count = maybe_prior_trap;
1971 }
1972 } else {
1973 // For reasons which are not recorded per-bytecode, we simply
1974 // force recompiles unconditionally.
1975 // (Note that PerMethodRecompilationCutoff is enforced elsewhere.)
1976 make_not_entrant = true;
1977 }
1978
1979 // Go back to the compiler if there are too many traps in this method.
1980 if (this_trap_count >= per_method_trap_limit(reason)) {
1981 // If there are too many traps in this method, force a recompile.
1982 // This will allow the compiler to see the limit overflow, and
1983 // take corrective action, if possible.
1984 // (This condition is an unlikely backstop only, because the
1985 // PerBytecodeTrapLimit is more likely to take effect first,
1986 // if it is applicable.)
1987 make_not_entrant = true;
1988 }
1989
1990 // Here's more hysteresis: If there has been a recompile at
1991 // this trap point already, run the method in the interpreter
1992 // for a while to exercise it more thoroughly.
1993 if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
1994 reprofile = true;
1995 }
1996 }
1997
1998 // Take requested actions on the method:
1999
2000 // Recompile
2001 if (make_not_entrant) {
2002 if (!nm->make_not_entrant()) {
2003 return; // the call did not change nmethod's state
2004 }
2005
2006 if (pdata != NULL) {
2007 // Record the recompilation event, if any.
2008 int tstate0 = pdata->trap_state();
2009 int tstate1 = trap_state_set_recompiled(tstate0, true);
2010 if (tstate1 != tstate0)
2011 pdata->set_trap_state(tstate1);
2012 }
2013
2014#if INCLUDE_RTM_OPT
2015 // Restart collecting RTM locking abort statistic if the method
2016 // is recompiled for a reason other than RTM state change.
2017 // Assume that in new recompiled code the statistic could be different,
2018 // for example, due to different inlining.
2019 if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) &&
2020 UseRTMDeopt && (nm->as_nmethod()->rtm_state() != ProfileRTM)) {
2021 trap_mdo->atomic_set_rtm_state(ProfileRTM);
2022 }
2023#endif
2024 // For code aging we count traps separately here, using make_not_entrant()
2025 // as a guard against simultaneous deopts in multiple threads.
2026 if (reason == Reason_tenured && trap_mdo != NULL) {
2027 trap_mdo->inc_tenure_traps();
2028 }
2029 }
2030
2031 if (inc_recompile_count) {
2032 trap_mdo->inc_overflow_recompile_count();
2033 if ((uint)trap_mdo->overflow_recompile_count() >
2034 (uint)PerBytecodeRecompilationCutoff) {
2035 // Give up on the method containing the bad BCI.
2036 if (trap_method() == nm->method()) {
2037 make_not_compilable = true;
2038 } else {
2039 trap_method->set_not_compilable("overflow_recompile_count > PerBytecodeRecompilationCutoff", CompLevel_full_optimization);
2040 // But give grace to the enclosing nm->method().
2041 }
2042 }
2043 }
2044
2045 // Reprofile
2046 if (reprofile) {
2047 CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method());
2048 }
2049
2050 // Give up compiling
2051 if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) {
2052 assert(make_not_entrant, "consistent");
2053 nm->method()->set_not_compilable("give up compiling", CompLevel_full_optimization);
2054 }
2055
2056 } // Free marked resources
2057
2058}
2059JRT_END
2060
2061ProfileData*
2062Deoptimization::query_update_method_data(MethodData* trap_mdo,
2063 int trap_bci,
2064 Deoptimization::DeoptReason reason,
2065 bool update_total_trap_count,
2066#if INCLUDE_JVMCI
2067 bool is_osr,
2068#endif
2069 Method* compiled_method,
2070 //outputs:
2071 uint& ret_this_trap_count,
2072 bool& ret_maybe_prior_trap,
2073 bool& ret_maybe_prior_recompile) {
2074 bool maybe_prior_trap = false;
2075 bool maybe_prior_recompile = false;
2076 uint this_trap_count = 0;
2077 if (update_total_trap_count) {
2078 uint idx = reason;
2079#if INCLUDE_JVMCI
2080 if (is_osr) {
2081 idx += Reason_LIMIT;
2082 }
2083#endif
2084 uint prior_trap_count = trap_mdo->trap_count(idx);
2085 this_trap_count = trap_mdo->inc_trap_count(idx);
2086
2087 // If the runtime cannot find a place to store trap history,
2088 // it is estimated based on the general condition of the method.
2089 // If the method has ever been recompiled, or has ever incurred
2090 // a trap with the present reason , then this BCI is assumed
2091 // (pessimistically) to be the culprit.
2092 maybe_prior_trap = (prior_trap_count != 0);
2093 maybe_prior_recompile = (trap_mdo->decompile_count() != 0);
2094 }
2095 ProfileData* pdata = NULL;
2096
2097
2098 // For reasons which are recorded per bytecode, we check per-BCI data.
2099 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
2100 assert(per_bc_reason != Reason_none || update_total_trap_count, "must be");
2101 if (per_bc_reason != Reason_none) {
2102 // Find the profile data for this BCI. If there isn't one,
2103 // try to allocate one from the MDO's set of spares.
2104 // This will let us detect a repeated trap at this point.
2105 pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : NULL);
2106
2107 if (pdata != NULL) {
2108 if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) {
2109 if (LogCompilation && xtty != NULL) {
2110 ttyLocker ttyl;
2111 // no more room for speculative traps in this MDO
2112 xtty->elem("speculative_traps_oom");
2113 }
2114 }
2115 // Query the trap state of this profile datum.
2116 int tstate0 = pdata->trap_state();
2117 if (!trap_state_has_reason(tstate0, per_bc_reason))
2118 maybe_prior_trap = false;
2119 if (!trap_state_is_recompiled(tstate0))
2120 maybe_prior_recompile = false;
2121
2122 // Update the trap state of this profile datum.
2123 int tstate1 = tstate0;
2124 // Record the reason.
2125 tstate1 = trap_state_add_reason(tstate1, per_bc_reason);
2126 // Store the updated state on the MDO, for next time.
2127 if (tstate1 != tstate0)
2128 pdata->set_trap_state(tstate1);
2129 } else {
2130 if (LogCompilation && xtty != NULL) {
2131 ttyLocker ttyl;
2132 // Missing MDP? Leave a small complaint in the log.
2133 xtty->elem("missing_mdp bci='%d'", trap_bci);
2134 }
2135 }
2136 }
2137
2138 // Return results:
2139 ret_this_trap_count = this_trap_count;
2140 ret_maybe_prior_trap = maybe_prior_trap;
2141 ret_maybe_prior_recompile = maybe_prior_recompile;
2142 return pdata;
2143}
2144
2145void
2146Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
2147 ResourceMark rm;
2148 // Ignored outputs:
2149 uint ignore_this_trap_count;
2150 bool ignore_maybe_prior_trap;
2151 bool ignore_maybe_prior_recompile;
2152 assert(!reason_is_speculate(reason), "reason speculate only used by compiler");
2153 // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts
2154 bool update_total_counts = true JVMCI_ONLY( && !UseJVMCICompiler);
2155 query_update_method_data(trap_mdo, trap_bci,
2156 (DeoptReason)reason,
2157 update_total_counts,
2158#if INCLUDE_JVMCI
2159 false,
2160#endif
2161 NULL,
2162 ignore_this_trap_count,
2163 ignore_maybe_prior_trap,
2164 ignore_maybe_prior_recompile);
2165}
2166
2167Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request, jint exec_mode) {
2168 if (TraceDeoptimization) {
2169 tty->print("Uncommon trap ");
2170 }
2171 // Still in Java no safepoints
2172 {
2173 // This enters VM and may safepoint
2174 uncommon_trap_inner(thread, trap_request);
2175 }
2176 return fetch_unroll_info_helper(thread, exec_mode);
2177}
2178
2179// Local derived constants.
2180// Further breakdown of DataLayout::trap_state, as promised by DataLayout.
2181const int DS_REASON_MASK = ((uint)DataLayout::trap_mask) >> 1;
2182const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
2183
2184//---------------------------trap_state_reason---------------------------------
2185Deoptimization::DeoptReason
2186Deoptimization::trap_state_reason(int trap_state) {
2187 // This assert provides the link between the width of DataLayout::trap_bits
2188 // and the encoding of "recorded" reasons. It ensures there are enough
2189 // bits to store all needed reasons in the per-BCI MDO profile.
2190 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2191 int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2192 trap_state -= recompile_bit;
2193 if (trap_state == DS_REASON_MASK) {
2194 return Reason_many;
2195 } else {
2196 assert((int)Reason_none == 0, "state=0 => Reason_none");
2197 return (DeoptReason)trap_state;
2198 }
2199}
2200//-------------------------trap_state_has_reason-------------------------------
2201int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
2202 assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason");
2203 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2204 int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2205 trap_state -= recompile_bit;
2206 if (trap_state == DS_REASON_MASK) {
2207 return -1; // true, unspecifically (bottom of state lattice)
2208 } else if (trap_state == reason) {
2209 return 1; // true, definitely
2210 } else if (trap_state == 0) {
2211 return 0; // false, definitely (top of state lattice)
2212 } else {
2213 return 0; // false, definitely
2214 }
2215}
2216//-------------------------trap_state_add_reason-------------------------------
2217int Deoptimization::trap_state_add_reason(int trap_state, int reason) {
2218 assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason");
2219 int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2220 trap_state -= recompile_bit;
2221 if (trap_state == DS_REASON_MASK) {
2222 return trap_state + recompile_bit; // already at state lattice bottom
2223 } else if (trap_state == reason) {
2224 return trap_state + recompile_bit; // the condition is already true
2225 } else if (trap_state == 0) {
2226 return reason + recompile_bit; // no condition has yet been true
2227 } else {
2228 return DS_REASON_MASK + recompile_bit; // fall to state lattice bottom
2229 }
2230}
2231//-----------------------trap_state_is_recompiled------------------------------
2232bool Deoptimization::trap_state_is_recompiled(int trap_state) {
2233 return (trap_state & DS_RECOMPILE_BIT) != 0;
2234}
2235//-----------------------trap_state_set_recompiled-----------------------------
2236int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) {
2237 if (z) return trap_state | DS_RECOMPILE_BIT;
2238 else return trap_state & ~DS_RECOMPILE_BIT;
2239}
2240//---------------------------format_trap_state---------------------------------
2241// This is used for debugging and diagnostics, including LogFile output.
2242const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
2243 int trap_state) {
2244 assert(buflen > 0, "sanity");
2245 DeoptReason reason = trap_state_reason(trap_state);
2246 bool recomp_flag = trap_state_is_recompiled(trap_state);
2247 // Re-encode the state from its decoded components.
2248 int decoded_state = 0;
2249 if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many)
2250 decoded_state = trap_state_add_reason(decoded_state, reason);
2251 if (recomp_flag)
2252 decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag);
2253 // If the state re-encodes properly, format it symbolically.
2254 // Because this routine is used for debugging and diagnostics,
2255 // be robust even if the state is a strange value.
2256 size_t len;
2257 if (decoded_state != trap_state) {
2258 // Random buggy state that doesn't decode??
2259 len = jio_snprintf(buf, buflen, "#%d", trap_state);
2260 } else {
2261 len = jio_snprintf(buf, buflen, "%s%s",
2262 trap_reason_name(reason),
2263 recomp_flag ? " recompiled" : "");
2264 }
2265 return buf;
2266}
2267
2268
2269//--------------------------------statics--------------------------------------
2270const char* Deoptimization::_trap_reason_name[] = {
2271 // Note: Keep this in sync. with enum DeoptReason.
2272 "none",
2273 "null_check",
2274 "null_assert" JVMCI_ONLY("_or_unreached0"),
2275 "range_check",
2276 "class_check",
2277 "array_check",
2278 "intrinsic" JVMCI_ONLY("_or_type_checked_inlining"),
2279 "bimorphic" JVMCI_ONLY("_or_optimized_type_check"),
2280 "profile_predicate",
2281 "unloaded",
2282 "uninitialized",
2283 "initialized",
2284 "unreached",
2285 "unhandled",
2286 "constraint",
2287 "div0_check",
2288 "age",
2289 "predicate",
2290 "loop_limit_check",
2291 "speculate_class_check",
2292 "speculate_null_check",
2293 "speculate_null_assert",
2294 "rtm_state_change",
2295 "unstable_if",
2296 "unstable_fused_if",
2297#if INCLUDE_JVMCI
2298 "aliasing",
2299 "transfer_to_interpreter",
2300 "not_compiled_exception_handler",
2301 "unresolved",
2302 "jsr_mismatch",
2303#endif
2304 "tenured"
2305};
2306const char* Deoptimization::_trap_action_name[] = {
2307 // Note: Keep this in sync. with enum DeoptAction.
2308 "none",
2309 "maybe_recompile",
2310 "reinterpret",
2311 "make_not_entrant",
2312 "make_not_compilable"
2313};
2314
2315const char* Deoptimization::trap_reason_name(int reason) {
2316 // Check that every reason has a name
2317 STATIC_ASSERT(sizeof(_trap_reason_name)/sizeof(const char*) == Reason_LIMIT);
2318
2319 if (reason == Reason_many) return "many";
2320 if ((uint)reason < Reason_LIMIT)
2321 return _trap_reason_name[reason];
2322 static char buf[20];
2323 sprintf(buf, "reason%d", reason);
2324 return buf;
2325}
2326const char* Deoptimization::trap_action_name(int action) {
2327 // Check that every action has a name
2328 STATIC_ASSERT(sizeof(_trap_action_name)/sizeof(const char*) == Action_LIMIT);
2329
2330 if ((uint)action < Action_LIMIT)
2331 return _trap_action_name[action];
2332 static char buf[20];
2333 sprintf(buf, "action%d", action);
2334 return buf;
2335}
2336
2337// This is used for debugging and diagnostics, including LogFile output.
2338const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
2339 int trap_request) {
2340 jint unloaded_class_index = trap_request_index(trap_request);
2341 const char* reason = trap_reason_name(trap_request_reason(trap_request));
2342 const char* action = trap_action_name(trap_request_action(trap_request));
2343#if INCLUDE_JVMCI
2344 int debug_id = trap_request_debug_id(trap_request);
2345#endif
2346 size_t len;
2347 if (unloaded_class_index < 0) {
2348 len = jio_snprintf(buf, buflen, "reason='%s' action='%s'" JVMCI_ONLY(" debug_id='%d'"),
2349 reason, action
2350#if INCLUDE_JVMCI
2351 ,debug_id
2352#endif
2353 );
2354 } else {
2355 len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'" JVMCI_ONLY(" debug_id='%d'"),
2356 reason, action, unloaded_class_index
2357#if INCLUDE_JVMCI
2358 ,debug_id
2359#endif
2360 );
2361 }
2362 return buf;
2363}
2364
2365juint Deoptimization::_deoptimization_hist
2366 [Deoptimization::Reason_LIMIT]
2367 [1 + Deoptimization::Action_LIMIT]
2368 [Deoptimization::BC_CASE_LIMIT]
2369 = {0};
2370
2371enum {
2372 LSB_BITS = 8,
2373 LSB_MASK = right_n_bits(LSB_BITS)
2374};
2375
2376void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
2377 Bytecodes::Code bc) {
2378 assert(reason >= 0 && reason < Reason_LIMIT, "oob");
2379 assert(action >= 0 && action < Action_LIMIT, "oob");
2380 _deoptimization_hist[Reason_none][0][0] += 1; // total
2381 _deoptimization_hist[reason][0][0] += 1; // per-reason total
2382 juint* cases = _deoptimization_hist[reason][1+action];
2383 juint* bc_counter_addr = NULL;
2384 juint bc_counter = 0;
2385 // Look for an unused counter, or an exact match to this BC.
2386 if (bc != Bytecodes::_illegal) {
2387 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2388 juint* counter_addr = &cases[bc_case];
2389 juint counter = *counter_addr;
2390 if ((counter == 0 && bc_counter_addr == NULL)
2391 || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
2392 // this counter is either free or is already devoted to this BC
2393 bc_counter_addr = counter_addr;
2394 bc_counter = counter | bc;
2395 }
2396 }
2397 }
2398 if (bc_counter_addr == NULL) {
2399 // Overflow, or no given bytecode.
2400 bc_counter_addr = &cases[BC_CASE_LIMIT-1];
2401 bc_counter = (*bc_counter_addr & ~LSB_MASK); // clear LSB
2402 }
2403 *bc_counter_addr = bc_counter + (1 << LSB_BITS);
2404}
2405
2406jint Deoptimization::total_deoptimization_count() {
2407 return _deoptimization_hist[Reason_none][0][0];
2408}
2409
2410void Deoptimization::print_statistics() {
2411 juint total = total_deoptimization_count();
2412 juint account = total;
2413 if (total != 0) {
2414 ttyLocker ttyl;
2415 if (xtty != NULL) xtty->head("statistics type='deoptimization'");
2416 tty->print_cr("Deoptimization traps recorded:");
2417 #define PRINT_STAT_LINE(name, r) \
2418 tty->print_cr(" %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
2419 PRINT_STAT_LINE("total", total);
2420 // For each non-zero entry in the histogram, print the reason,
2421 // the action, and (if specifically known) the type of bytecode.
2422 for (int reason = 0; reason < Reason_LIMIT; reason++) {
2423 for (int action = 0; action < Action_LIMIT; action++) {
2424 juint* cases = _deoptimization_hist[reason][1+action];
2425 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2426 juint counter = cases[bc_case];
2427 if (counter != 0) {
2428 char name[1*K];
2429 Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
2430 if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
2431 bc = Bytecodes::_illegal;
2432 sprintf(name, "%s/%s/%s",
2433 trap_reason_name(reason),
2434 trap_action_name(action),
2435 Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
2436 juint r = counter >> LSB_BITS;
2437 tty->print_cr(" %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
2438 account -= r;
2439 }
2440 }
2441 }
2442 }
2443 if (account != 0) {
2444 PRINT_STAT_LINE("unaccounted", account);
2445 }
2446 #undef PRINT_STAT_LINE
2447 if (xtty != NULL) xtty->tail("statistics");
2448 }
2449}
2450#else // COMPILER2_OR_JVMCI
2451
2452
2453// Stubs for C1 only system.
2454bool Deoptimization::trap_state_is_recompiled(int trap_state) {
2455 return false;
2456}
2457
2458const char* Deoptimization::trap_reason_name(int reason) {
2459 return "unknown";
2460}
2461
2462void Deoptimization::print_statistics() {
2463 // no output
2464}
2465
2466void
2467Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
2468 // no udpate
2469}
2470
2471int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
2472 return 0;
2473}
2474
2475void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
2476 Bytecodes::Code bc) {
2477 // no update
2478}
2479
2480const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
2481 int trap_state) {
2482 jio_snprintf(buf, buflen, "#%d", trap_state);
2483 return buf;
2484}
2485
2486#endif // COMPILER2_OR_JVMCI
2487