1 | /* |
2 | * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "asm/macroAssembler.hpp" |
27 | #include "compiler/disassembler.hpp" |
28 | #include "gc/shared/barrierSetAssembler.hpp" |
29 | #include "interpreter/bytecodeHistogram.hpp" |
30 | #include "interpreter/interp_masm.hpp" |
31 | #include "interpreter/interpreter.hpp" |
32 | #include "interpreter/interpreterRuntime.hpp" |
33 | #include "interpreter/templateInterpreterGenerator.hpp" |
34 | #include "interpreter/templateTable.hpp" |
35 | #include "oops/arrayOop.hpp" |
36 | #include "oops/methodData.hpp" |
37 | #include "oops/method.hpp" |
38 | #include "oops/oop.inline.hpp" |
39 | #include "prims/jvmtiExport.hpp" |
40 | #include "prims/jvmtiThreadState.hpp" |
41 | #include "runtime/arguments.hpp" |
42 | #include "runtime/deoptimization.hpp" |
43 | #include "runtime/frame.inline.hpp" |
44 | #include "runtime/sharedRuntime.hpp" |
45 | #include "runtime/stubRoutines.hpp" |
46 | #include "runtime/synchronizer.hpp" |
47 | #include "runtime/timer.hpp" |
48 | #include "runtime/vframeArray.hpp" |
49 | #include "utilities/debug.hpp" |
50 | #include "utilities/macros.hpp" |
51 | |
52 | #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)-> |
53 | |
54 | // Size of interpreter code. Increase if too small. Interpreter will |
55 | // fail with a guarantee ("not enough space for interpreter generation"); |
56 | // if too small. |
57 | // Run with +PrintInterpreter to get the VM to print out the size. |
58 | // Max size with JVMTI |
59 | #ifdef AMD64 |
60 | int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024; |
61 | #else |
62 | int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; |
63 | #endif // AMD64 |
64 | |
65 | // Global Register Names |
66 | static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); |
67 | static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); |
68 | |
69 | const int method_offset = frame::interpreter_frame_method_offset * wordSize; |
70 | const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; |
71 | const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; |
72 | |
73 | |
74 | //----------------------------------------------------------------------------- |
75 | |
76 | address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { |
77 | address entry = __ pc(); |
78 | |
79 | #ifdef ASSERT |
80 | { |
81 | Label L; |
82 | __ lea(rax, Address(rbp, |
83 | frame::interpreter_frame_monitor_block_top_offset * |
84 | wordSize)); |
85 | __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack |
86 | // grows negative) |
87 | __ jcc(Assembler::aboveEqual, L); // check if frame is complete |
88 | __ stop ("interpreter frame not set up" ); |
89 | __ bind(L); |
90 | } |
91 | #endif // ASSERT |
92 | // Restore bcp under the assumption that the current frame is still |
93 | // interpreted |
94 | __ restore_bcp(); |
95 | |
96 | // expression stack must be empty before entering the VM if an |
97 | // exception happened |
98 | __ empty_expression_stack(); |
99 | // throw exception |
100 | __ call_VM(noreg, |
101 | CAST_FROM_FN_PTR(address, |
102 | InterpreterRuntime::throw_StackOverflowError)); |
103 | return entry; |
104 | } |
105 | |
106 | address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { |
107 | address entry = __ pc(); |
108 | // The expression stack must be empty before entering the VM if an |
109 | // exception happened. |
110 | __ empty_expression_stack(); |
111 | |
112 | // Setup parameters. |
113 | // ??? convention: expect aberrant index in register ebx/rbx. |
114 | // Pass array to create more detailed exceptions. |
115 | Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); |
116 | __ call_VM(noreg, |
117 | CAST_FROM_FN_PTR(address, |
118 | InterpreterRuntime:: |
119 | throw_ArrayIndexOutOfBoundsException), |
120 | rarg, rbx); |
121 | return entry; |
122 | } |
123 | |
124 | address TemplateInterpreterGenerator::generate_ClassCastException_handler() { |
125 | address entry = __ pc(); |
126 | |
127 | // object is at TOS |
128 | Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); |
129 | __ pop(rarg); |
130 | |
131 | // expression stack must be empty before entering the VM if an |
132 | // exception happened |
133 | __ empty_expression_stack(); |
134 | |
135 | __ call_VM(noreg, |
136 | CAST_FROM_FN_PTR(address, |
137 | InterpreterRuntime:: |
138 | throw_ClassCastException), |
139 | rarg); |
140 | return entry; |
141 | } |
142 | |
143 | address TemplateInterpreterGenerator::generate_exception_handler_common( |
144 | const char* name, const char* message, bool pass_oop) { |
145 | assert(!pass_oop || message == NULL, "either oop or message but not both" ); |
146 | address entry = __ pc(); |
147 | |
148 | Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); |
149 | Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); |
150 | |
151 | if (pass_oop) { |
152 | // object is at TOS |
153 | __ pop(rarg2); |
154 | } |
155 | // expression stack must be empty before entering the VM if an |
156 | // exception happened |
157 | __ empty_expression_stack(); |
158 | // setup parameters |
159 | __ lea(rarg, ExternalAddress((address)name)); |
160 | if (pass_oop) { |
161 | __ call_VM(rax, CAST_FROM_FN_PTR(address, |
162 | InterpreterRuntime:: |
163 | create_klass_exception), |
164 | rarg, rarg2); |
165 | } else { |
166 | __ lea(rarg2, ExternalAddress((address)message)); |
167 | __ call_VM(rax, |
168 | CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), |
169 | rarg, rarg2); |
170 | } |
171 | // throw exception |
172 | __ jump(ExternalAddress(Interpreter::throw_exception_entry())); |
173 | return entry; |
174 | } |
175 | |
176 | address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { |
177 | address entry = __ pc(); |
178 | |
179 | #ifndef _LP64 |
180 | #ifdef COMPILER2 |
181 | // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases |
182 | if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { |
183 | for (int i = 1; i < 8; i++) { |
184 | __ ffree(i); |
185 | } |
186 | } else if (UseSSE < 2) { |
187 | __ empty_FPU_stack(); |
188 | } |
189 | #endif // COMPILER2 |
190 | if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { |
191 | __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled" ); |
192 | } else { |
193 | __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled" ); |
194 | } |
195 | |
196 | if (state == ftos) { |
197 | __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter" ); |
198 | } else if (state == dtos) { |
199 | __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter" ); |
200 | } |
201 | #endif // _LP64 |
202 | |
203 | // Restore stack bottom in case i2c adjusted stack |
204 | __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); |
205 | // and NULL it as marker that esp is now tos until next java call |
206 | __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); |
207 | |
208 | __ restore_bcp(); |
209 | __ restore_locals(); |
210 | |
211 | if (state == atos) { |
212 | Register mdp = rbx; |
213 | Register tmp = rcx; |
214 | __ profile_return_type(mdp, rax, tmp); |
215 | } |
216 | |
217 | const Register cache = rbx; |
218 | const Register index = rcx; |
219 | __ get_cache_and_index_at_bcp(cache, index, 1, index_size); |
220 | |
221 | const Register flags = cache; |
222 | __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); |
223 | __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); |
224 | __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); |
225 | |
226 | const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); |
227 | if (JvmtiExport::can_pop_frame()) { |
228 | NOT_LP64(__ get_thread(java_thread)); |
229 | __ check_and_handle_popframe(java_thread); |
230 | } |
231 | if (JvmtiExport::can_force_early_return()) { |
232 | NOT_LP64(__ get_thread(java_thread)); |
233 | __ check_and_handle_earlyret(java_thread); |
234 | } |
235 | |
236 | __ dispatch_next(state, step); |
237 | |
238 | return entry; |
239 | } |
240 | |
241 | |
242 | address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { |
243 | address entry = __ pc(); |
244 | |
245 | #ifndef _LP64 |
246 | if (state == ftos) { |
247 | __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter" ); |
248 | } else if (state == dtos) { |
249 | __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter" ); |
250 | } |
251 | #endif // _LP64 |
252 | |
253 | // NULL last_sp until next java call |
254 | __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); |
255 | __ restore_bcp(); |
256 | __ restore_locals(); |
257 | const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); |
258 | NOT_LP64(__ get_thread(thread)); |
259 | #if INCLUDE_JVMCI |
260 | // Check if we need to take lock at entry of synchronized method. This can |
261 | // only occur on method entry so emit it only for vtos with step 0. |
262 | if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) { |
263 | Label L; |
264 | __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); |
265 | __ jcc(Assembler::zero, L); |
266 | // Clear flag. |
267 | __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); |
268 | // Satisfy calling convention for lock_method(). |
269 | __ get_method(rbx); |
270 | // Take lock. |
271 | lock_method(); |
272 | __ bind(L); |
273 | } else { |
274 | #ifdef ASSERT |
275 | if (EnableJVMCI) { |
276 | Label L; |
277 | __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); |
278 | __ jcc(Assembler::zero, L); |
279 | __ stop("unexpected pending monitor in deopt entry" ); |
280 | __ bind(L); |
281 | } |
282 | #endif |
283 | } |
284 | #endif |
285 | // handle exceptions |
286 | { |
287 | Label L; |
288 | __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); |
289 | __ jcc(Assembler::zero, L); |
290 | __ call_VM(noreg, |
291 | CAST_FROM_FN_PTR(address, |
292 | InterpreterRuntime::throw_pending_exception)); |
293 | __ should_not_reach_here(); |
294 | __ bind(L); |
295 | } |
296 | if (continuation == NULL) { |
297 | __ dispatch_next(state, step); |
298 | } else { |
299 | __ jump_to_entry(continuation); |
300 | } |
301 | return entry; |
302 | } |
303 | |
304 | address TemplateInterpreterGenerator::generate_result_handler_for( |
305 | BasicType type) { |
306 | address entry = __ pc(); |
307 | switch (type) { |
308 | case T_BOOLEAN: __ c2bool(rax); break; |
309 | #ifndef _LP64 |
310 | case T_CHAR : __ andptr(rax, 0xFFFF); break; |
311 | #else |
312 | case T_CHAR : __ movzwl(rax, rax); break; |
313 | #endif // _LP64 |
314 | case T_BYTE : __ sign_extend_byte(rax); break; |
315 | case T_SHORT : __ sign_extend_short(rax); break; |
316 | case T_INT : /* nothing to do */ break; |
317 | case T_LONG : /* nothing to do */ break; |
318 | case T_VOID : /* nothing to do */ break; |
319 | #ifndef _LP64 |
320 | case T_DOUBLE : |
321 | case T_FLOAT : |
322 | { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); |
323 | __ pop(t); // remove return address first |
324 | // Must return a result for interpreter or compiler. In SSE |
325 | // mode, results are returned in xmm0 and the FPU stack must |
326 | // be empty. |
327 | if (type == T_FLOAT && UseSSE >= 1) { |
328 | // Load ST0 |
329 | __ fld_d(Address(rsp, 0)); |
330 | // Store as float and empty fpu stack |
331 | __ fstp_s(Address(rsp, 0)); |
332 | // and reload |
333 | __ movflt(xmm0, Address(rsp, 0)); |
334 | } else if (type == T_DOUBLE && UseSSE >= 2 ) { |
335 | __ movdbl(xmm0, Address(rsp, 0)); |
336 | } else { |
337 | // restore ST0 |
338 | __ fld_d(Address(rsp, 0)); |
339 | } |
340 | // and pop the temp |
341 | __ addptr(rsp, 2 * wordSize); |
342 | __ push(t); // restore return address |
343 | } |
344 | break; |
345 | #else |
346 | case T_FLOAT : /* nothing to do */ break; |
347 | case T_DOUBLE : /* nothing to do */ break; |
348 | #endif // _LP64 |
349 | |
350 | case T_OBJECT : |
351 | // retrieve result from frame |
352 | __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); |
353 | // and verify it |
354 | __ verify_oop(rax); |
355 | break; |
356 | default : ShouldNotReachHere(); |
357 | } |
358 | __ ret(0); // return from result handler |
359 | return entry; |
360 | } |
361 | |
362 | address TemplateInterpreterGenerator::generate_safept_entry_for( |
363 | TosState state, |
364 | address runtime_entry) { |
365 | address entry = __ pc(); |
366 | __ push(state); |
367 | __ call_VM(noreg, runtime_entry); |
368 | __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); |
369 | return entry; |
370 | } |
371 | |
372 | |
373 | |
374 | // Helpers for commoning out cases in the various type of method entries. |
375 | // |
376 | |
377 | |
378 | // increment invocation count & check for overflow |
379 | // |
380 | // Note: checking for negative value instead of overflow |
381 | // so we have a 'sticky' overflow test |
382 | // |
383 | // rbx: method |
384 | // rcx: invocation counter |
385 | // |
386 | void TemplateInterpreterGenerator::generate_counter_incr( |
387 | Label* overflow, |
388 | Label* profile_method, |
389 | Label* profile_method_continue) { |
390 | Label done; |
391 | // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. |
392 | if (TieredCompilation) { |
393 | int increment = InvocationCounter::count_increment; |
394 | Label no_mdo; |
395 | if (ProfileInterpreter) { |
396 | // Are we profiling? |
397 | __ movptr(rax, Address(rbx, Method::method_data_offset())); |
398 | __ testptr(rax, rax); |
399 | __ jccb(Assembler::zero, no_mdo); |
400 | // Increment counter in the MDO |
401 | const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + |
402 | in_bytes(InvocationCounter::counter_offset())); |
403 | const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); |
404 | __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); |
405 | __ jmp(done); |
406 | } |
407 | __ bind(no_mdo); |
408 | // Increment counter in MethodCounters |
409 | const Address invocation_counter(rax, |
410 | MethodCounters::invocation_counter_offset() + |
411 | InvocationCounter::counter_offset()); |
412 | __ get_method_counters(rbx, rax, done); |
413 | const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); |
414 | __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, |
415 | false, Assembler::zero, overflow); |
416 | __ bind(done); |
417 | } else { // not TieredCompilation |
418 | const Address backedge_counter(rax, |
419 | MethodCounters::backedge_counter_offset() + |
420 | InvocationCounter::counter_offset()); |
421 | const Address invocation_counter(rax, |
422 | MethodCounters::invocation_counter_offset() + |
423 | InvocationCounter::counter_offset()); |
424 | |
425 | __ get_method_counters(rbx, rax, done); |
426 | |
427 | if (ProfileInterpreter) { |
428 | __ incrementl(Address(rax, |
429 | MethodCounters::interpreter_invocation_counter_offset())); |
430 | } |
431 | // Update standard invocation counters |
432 | __ movl(rcx, invocation_counter); |
433 | __ incrementl(rcx, InvocationCounter::count_increment); |
434 | __ movl(invocation_counter, rcx); // save invocation count |
435 | |
436 | __ movl(rax, backedge_counter); // load backedge counter |
437 | __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits |
438 | |
439 | __ addl(rcx, rax); // add both counters |
440 | |
441 | // profile_method is non-null only for interpreted method so |
442 | // profile_method != NULL == !native_call |
443 | |
444 | if (ProfileInterpreter && profile_method != NULL) { |
445 | // Test to see if we should create a method data oop |
446 | __ movptr(rax, Address(rbx, Method::method_counters_offset())); |
447 | __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); |
448 | __ jcc(Assembler::less, *profile_method_continue); |
449 | |
450 | // if no method data exists, go to profile_method |
451 | __ test_method_data_pointer(rax, *profile_method); |
452 | } |
453 | |
454 | __ movptr(rax, Address(rbx, Method::method_counters_offset())); |
455 | __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); |
456 | __ jcc(Assembler::aboveEqual, *overflow); |
457 | __ bind(done); |
458 | } |
459 | } |
460 | |
461 | void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { |
462 | |
463 | // Asm interpreter on entry |
464 | // r14/rdi - locals |
465 | // r13/rsi - bcp |
466 | // rbx - method |
467 | // rdx - cpool --- DOES NOT APPEAR TO BE TRUE |
468 | // rbp - interpreter frame |
469 | |
470 | // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] |
471 | // Everything as it was on entry |
472 | // rdx is not restored. Doesn't appear to really be set. |
473 | |
474 | // InterpreterRuntime::frequency_counter_overflow takes two |
475 | // arguments, the first (thread) is passed by call_VM, the second |
476 | // indicates if the counter overflow occurs at a backwards branch |
477 | // (NULL bcp). We pass zero for it. The call returns the address |
478 | // of the verified entry point for the method or NULL if the |
479 | // compilation did not complete (either went background or bailed |
480 | // out). |
481 | Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); |
482 | __ movl(rarg, 0); |
483 | __ call_VM(noreg, |
484 | CAST_FROM_FN_PTR(address, |
485 | InterpreterRuntime::frequency_counter_overflow), |
486 | rarg); |
487 | |
488 | __ movptr(rbx, Address(rbp, method_offset)); // restore Method* |
489 | // Preserve invariant that r13/r14 contain bcp/locals of sender frame |
490 | // and jump to the interpreted entry. |
491 | __ jmp(do_continue, relocInfo::none); |
492 | } |
493 | |
494 | // See if we've got enough room on the stack for locals plus overhead below |
495 | // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError |
496 | // without going through the signal handler, i.e., reserved and yellow zones |
497 | // will not be made usable. The shadow zone must suffice to handle the |
498 | // overflow. |
499 | // The expression stack grows down incrementally, so the normal guard |
500 | // page mechanism will work for that. |
501 | // |
502 | // NOTE: Since the additional locals are also always pushed (wasn't |
503 | // obvious in generate_fixed_frame) so the guard should work for them |
504 | // too. |
505 | // |
506 | // Args: |
507 | // rdx: number of additional locals this frame needs (what we must check) |
508 | // rbx: Method* |
509 | // |
510 | // Kills: |
511 | // rax |
512 | void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { |
513 | |
514 | // monitor entry size: see picture of stack in frame_x86.hpp |
515 | const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; |
516 | |
517 | // total overhead size: entry_size + (saved rbp through expr stack |
518 | // bottom). be sure to change this if you add/subtract anything |
519 | // to/from the overhead area |
520 | const int overhead_size = |
521 | -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; |
522 | |
523 | const int page_size = os::vm_page_size(); |
524 | |
525 | Label after_frame_check; |
526 | |
527 | // see if the frame is greater than one page in size. If so, |
528 | // then we need to verify there is enough stack space remaining |
529 | // for the additional locals. |
530 | __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); |
531 | __ jcc(Assembler::belowEqual, after_frame_check); |
532 | |
533 | // compute rsp as if this were going to be the last frame on |
534 | // the stack before the red zone |
535 | |
536 | Label after_frame_check_pop; |
537 | const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); |
538 | #ifndef _LP64 |
539 | __ push(thread); |
540 | __ get_thread(thread); |
541 | #endif |
542 | |
543 | const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); |
544 | |
545 | // locals + overhead, in bytes |
546 | __ mov(rax, rdx); |
547 | __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. |
548 | __ addptr(rax, overhead_size); |
549 | |
550 | #ifdef ASSERT |
551 | Label limit_okay; |
552 | // Verify that thread stack overflow limit is non-zero. |
553 | __ cmpptr(stack_limit, (int32_t)NULL_WORD); |
554 | __ jcc(Assembler::notEqual, limit_okay); |
555 | __ stop("stack overflow limit is zero" ); |
556 | __ bind(limit_okay); |
557 | #endif |
558 | |
559 | // Add locals/frame size to stack limit. |
560 | __ addptr(rax, stack_limit); |
561 | |
562 | // Check against the current stack bottom. |
563 | __ cmpptr(rsp, rax); |
564 | |
565 | __ jcc(Assembler::above, after_frame_check_pop); |
566 | NOT_LP64(__ pop(rsi)); // get saved bcp |
567 | |
568 | // Restore sender's sp as SP. This is necessary if the sender's |
569 | // frame is an extended compiled frame (see gen_c2i_adapter()) |
570 | // and safer anyway in case of JSR292 adaptations. |
571 | |
572 | __ pop(rax); // return address must be moved if SP is changed |
573 | __ mov(rsp, rbcp); |
574 | __ push(rax); |
575 | |
576 | // Note: the restored frame is not necessarily interpreted. |
577 | // Use the shared runtime version of the StackOverflowError. |
578 | assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated" ); |
579 | __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); |
580 | // all done with frame size check |
581 | __ bind(after_frame_check_pop); |
582 | NOT_LP64(__ pop(rsi)); |
583 | |
584 | // all done with frame size check |
585 | __ bind(after_frame_check); |
586 | } |
587 | |
588 | // Allocate monitor and lock method (asm interpreter) |
589 | // |
590 | // Args: |
591 | // rbx: Method* |
592 | // r14/rdi: locals |
593 | // |
594 | // Kills: |
595 | // rax |
596 | // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) |
597 | // rscratch1, rscratch2 (scratch regs) |
598 | void TemplateInterpreterGenerator::lock_method() { |
599 | // synchronize method |
600 | const Address access_flags(rbx, Method::access_flags_offset()); |
601 | const Address monitor_block_top( |
602 | rbp, |
603 | frame::interpreter_frame_monitor_block_top_offset * wordSize); |
604 | const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; |
605 | |
606 | #ifdef ASSERT |
607 | { |
608 | Label L; |
609 | __ movl(rax, access_flags); |
610 | __ testl(rax, JVM_ACC_SYNCHRONIZED); |
611 | __ jcc(Assembler::notZero, L); |
612 | __ stop("method doesn't need synchronization" ); |
613 | __ bind(L); |
614 | } |
615 | #endif // ASSERT |
616 | |
617 | // get synchronization object |
618 | { |
619 | Label done; |
620 | __ movl(rax, access_flags); |
621 | __ testl(rax, JVM_ACC_STATIC); |
622 | // get receiver (assume this is frequent case) |
623 | __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); |
624 | __ jcc(Assembler::zero, done); |
625 | __ load_mirror(rax, rbx); |
626 | |
627 | #ifdef ASSERT |
628 | { |
629 | Label L; |
630 | __ testptr(rax, rax); |
631 | __ jcc(Assembler::notZero, L); |
632 | __ stop("synchronization object is NULL" ); |
633 | __ bind(L); |
634 | } |
635 | #endif // ASSERT |
636 | |
637 | __ bind(done); |
638 | __ resolve(IS_NOT_NULL, rax); |
639 | } |
640 | |
641 | // add space for monitor & lock |
642 | __ subptr(rsp, entry_size); // add space for a monitor entry |
643 | __ movptr(monitor_block_top, rsp); // set new monitor block top |
644 | // store object |
645 | __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); |
646 | const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); |
647 | __ movptr(lockreg, rsp); // object address |
648 | __ lock_object(lockreg); |
649 | } |
650 | |
651 | // Generate a fixed interpreter frame. This is identical setup for |
652 | // interpreted methods and for native methods hence the shared code. |
653 | // |
654 | // Args: |
655 | // rax: return address |
656 | // rbx: Method* |
657 | // r14/rdi: pointer to locals |
658 | // r13/rsi: sender sp |
659 | // rdx: cp cache |
660 | void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { |
661 | // initialize fixed part of activation frame |
662 | __ push(rax); // save return address |
663 | __ enter(); // save old & set new rbp |
664 | __ push(rbcp); // set sender sp |
665 | __ push((int)NULL_WORD); // leave last_sp as null |
666 | __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* |
667 | __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase |
668 | __ push(rbx); // save Method* |
669 | // Get mirror and store it in the frame as GC root for this Method* |
670 | __ load_mirror(rdx, rbx); |
671 | __ push(rdx); |
672 | if (ProfileInterpreter) { |
673 | Label method_data_continue; |
674 | __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); |
675 | __ testptr(rdx, rdx); |
676 | __ jcc(Assembler::zero, method_data_continue); |
677 | __ addptr(rdx, in_bytes(MethodData::data_offset())); |
678 | __ bind(method_data_continue); |
679 | __ push(rdx); // set the mdp (method data pointer) |
680 | } else { |
681 | __ push(0); |
682 | } |
683 | |
684 | __ movptr(rdx, Address(rbx, Method::const_offset())); |
685 | __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); |
686 | __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); |
687 | __ push(rdx); // set constant pool cache |
688 | __ push(rlocals); // set locals pointer |
689 | if (native_call) { |
690 | __ push(0); // no bcp |
691 | } else { |
692 | __ push(rbcp); // set bcp |
693 | } |
694 | __ push(0); // reserve word for pointer to expression stack bottom |
695 | __ movptr(Address(rsp, 0), rsp); // set expression stack bottom |
696 | } |
697 | |
698 | // End of helpers |
699 | |
700 | // Method entry for java.lang.ref.Reference.get. |
701 | address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { |
702 | // Code: _aload_0, _getfield, _areturn |
703 | // parameter size = 1 |
704 | // |
705 | // The code that gets generated by this routine is split into 2 parts: |
706 | // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load, |
707 | // 2. The slow path - which is an expansion of the regular method entry. |
708 | // |
709 | // Notes:- |
710 | // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed. |
711 | // * We may jump to the slow path iff the receiver is null. If the |
712 | // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load |
713 | // Thus we can use the regular method entry code to generate the NPE. |
714 | // |
715 | // rbx: Method* |
716 | |
717 | // r13: senderSP must preserve for slow path, set SP to it on fast path |
718 | |
719 | address entry = __ pc(); |
720 | |
721 | const int referent_offset = java_lang_ref_Reference::referent_offset; |
722 | guarantee(referent_offset > 0, "referent offset not initialized" ); |
723 | |
724 | Label slow_path; |
725 | // rbx: method |
726 | |
727 | // Check if local 0 != NULL |
728 | // If the receiver is null then it is OK to jump to the slow path. |
729 | __ movptr(rax, Address(rsp, wordSize)); |
730 | |
731 | __ testptr(rax, rax); |
732 | __ jcc(Assembler::zero, slow_path); |
733 | |
734 | // rax: local 0 |
735 | // rbx: method (but can be used as scratch now) |
736 | // rdx: scratch |
737 | // rdi: scratch |
738 | |
739 | // Preserve the sender sp in case the load barrier |
740 | // calls the runtime |
741 | NOT_LP64(__ push(rsi)); |
742 | |
743 | // Load the value of the referent field. |
744 | const Address field_address(rax, referent_offset); |
745 | __ load_heap_oop(rax, field_address, /*tmp1*/ rbx, /*tmp_thread*/ rdx, ON_WEAK_OOP_REF); |
746 | |
747 | // _areturn |
748 | const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); |
749 | NOT_LP64(__ pop(rsi)); // get sender sp |
750 | __ pop(rdi); // get return address |
751 | __ mov(rsp, sender_sp); // set sp to sender sp |
752 | __ jmp(rdi); |
753 | __ ret(0); |
754 | |
755 | // generate a vanilla interpreter entry as the slow path |
756 | __ bind(slow_path); |
757 | __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); |
758 | return entry; |
759 | } |
760 | |
761 | void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { |
762 | // Quick & dirty stack overflow checking: bang the stack & handle trap. |
763 | // Note that we do the banging after the frame is setup, since the exception |
764 | // handling code expects to find a valid interpreter frame on the stack. |
765 | // Doing the banging earlier fails if the caller frame is not an interpreter |
766 | // frame. |
767 | // (Also, the exception throwing code expects to unlock any synchronized |
768 | // method receiever, so do the banging after locking the receiver.) |
769 | |
770 | // Bang each page in the shadow zone. We can't assume it's been done for |
771 | // an interpreter frame with greater than a page of locals, so each page |
772 | // needs to be checked. Only true for non-native. |
773 | if (UseStackBanging) { |
774 | const int page_size = os::vm_page_size(); |
775 | const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; |
776 | const int start_page = native_call ? n_shadow_pages : 1; |
777 | for (int pages = start_page; pages <= n_shadow_pages; pages++) { |
778 | __ bang_stack_with_offset(pages*page_size); |
779 | } |
780 | } |
781 | } |
782 | |
783 | // Interpreter stub for calling a native method. (asm interpreter) |
784 | // This sets up a somewhat different looking stack for calling the |
785 | // native method than the typical interpreter frame setup. |
786 | address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { |
787 | // determine code generation flags |
788 | bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; |
789 | |
790 | // rbx: Method* |
791 | // rbcp: sender sp |
792 | |
793 | address entry_point = __ pc(); |
794 | |
795 | const Address constMethod (rbx, Method::const_offset()); |
796 | const Address access_flags (rbx, Method::access_flags_offset()); |
797 | const Address size_of_parameters(rcx, ConstMethod:: |
798 | size_of_parameters_offset()); |
799 | |
800 | |
801 | // get parameter size (always needed) |
802 | __ movptr(rcx, constMethod); |
803 | __ load_unsigned_short(rcx, size_of_parameters); |
804 | |
805 | // native calls don't need the stack size check since they have no |
806 | // expression stack and the arguments are already on the stack and |
807 | // we only add a handful of words to the stack |
808 | |
809 | // rbx: Method* |
810 | // rcx: size of parameters |
811 | // rbcp: sender sp |
812 | __ pop(rax); // get return address |
813 | |
814 | // for natives the size of locals is zero |
815 | |
816 | // compute beginning of parameters |
817 | __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); |
818 | |
819 | // add 2 zero-initialized slots for native calls |
820 | // initialize result_handler slot |
821 | __ push((int) NULL_WORD); |
822 | // slot for oop temp |
823 | // (static native method holder mirror/jni oop result) |
824 | __ push((int) NULL_WORD); |
825 | |
826 | // initialize fixed part of activation frame |
827 | generate_fixed_frame(true); |
828 | |
829 | // make sure method is native & not abstract |
830 | #ifdef ASSERT |
831 | __ movl(rax, access_flags); |
832 | { |
833 | Label L; |
834 | __ testl(rax, JVM_ACC_NATIVE); |
835 | __ jcc(Assembler::notZero, L); |
836 | __ stop("tried to execute non-native method as native" ); |
837 | __ bind(L); |
838 | } |
839 | { |
840 | Label L; |
841 | __ testl(rax, JVM_ACC_ABSTRACT); |
842 | __ jcc(Assembler::zero, L); |
843 | __ stop("tried to execute abstract method in interpreter" ); |
844 | __ bind(L); |
845 | } |
846 | #endif |
847 | |
848 | // Since at this point in the method invocation the exception handler |
849 | // would try to exit the monitor of synchronized methods which hasn't |
850 | // been entered yet, we set the thread local variable |
851 | // _do_not_unlock_if_synchronized to true. The remove_activation will |
852 | // check this flag. |
853 | |
854 | const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); |
855 | NOT_LP64(__ get_thread(thread1)); |
856 | const Address do_not_unlock_if_synchronized(thread1, |
857 | in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); |
858 | __ movbool(do_not_unlock_if_synchronized, true); |
859 | |
860 | // increment invocation count & check for overflow |
861 | Label invocation_counter_overflow; |
862 | if (inc_counter) { |
863 | generate_counter_incr(&invocation_counter_overflow, NULL, NULL); |
864 | } |
865 | |
866 | Label continue_after_compile; |
867 | __ bind(continue_after_compile); |
868 | |
869 | bang_stack_shadow_pages(true); |
870 | |
871 | // reset the _do_not_unlock_if_synchronized flag |
872 | NOT_LP64(__ get_thread(thread1)); |
873 | __ movbool(do_not_unlock_if_synchronized, false); |
874 | |
875 | // check for synchronized methods |
876 | // Must happen AFTER invocation_counter check and stack overflow check, |
877 | // so method is not locked if overflows. |
878 | if (synchronized) { |
879 | lock_method(); |
880 | } else { |
881 | // no synchronization necessary |
882 | #ifdef ASSERT |
883 | { |
884 | Label L; |
885 | __ movl(rax, access_flags); |
886 | __ testl(rax, JVM_ACC_SYNCHRONIZED); |
887 | __ jcc(Assembler::zero, L); |
888 | __ stop("method needs synchronization" ); |
889 | __ bind(L); |
890 | } |
891 | #endif |
892 | } |
893 | |
894 | // start execution |
895 | #ifdef ASSERT |
896 | { |
897 | Label L; |
898 | const Address monitor_block_top(rbp, |
899 | frame::interpreter_frame_monitor_block_top_offset * wordSize); |
900 | __ movptr(rax, monitor_block_top); |
901 | __ cmpptr(rax, rsp); |
902 | __ jcc(Assembler::equal, L); |
903 | __ stop("broken stack frame setup in interpreter" ); |
904 | __ bind(L); |
905 | } |
906 | #endif |
907 | |
908 | // jvmti support |
909 | __ notify_method_entry(); |
910 | |
911 | // work registers |
912 | const Register method = rbx; |
913 | const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); |
914 | const Register t = NOT_LP64(rcx) LP64_ONLY(r11); |
915 | |
916 | // allocate space for parameters |
917 | __ get_method(method); |
918 | __ movptr(t, Address(method, Method::const_offset())); |
919 | __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); |
920 | |
921 | #ifndef _LP64 |
922 | __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. |
923 | __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror |
924 | __ subptr(rsp, t); |
925 | __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics |
926 | #else |
927 | __ shll(t, Interpreter::logStackElementSize); |
928 | |
929 | __ subptr(rsp, t); |
930 | __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows |
931 | __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) |
932 | #endif // _LP64 |
933 | |
934 | // get signature handler |
935 | { |
936 | Label L; |
937 | __ movptr(t, Address(method, Method::signature_handler_offset())); |
938 | __ testptr(t, t); |
939 | __ jcc(Assembler::notZero, L); |
940 | __ call_VM(noreg, |
941 | CAST_FROM_FN_PTR(address, |
942 | InterpreterRuntime::prepare_native_call), |
943 | method); |
944 | __ get_method(method); |
945 | __ movptr(t, Address(method, Method::signature_handler_offset())); |
946 | __ bind(L); |
947 | } |
948 | |
949 | // call signature handler |
950 | assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, |
951 | "adjust this code" ); |
952 | assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, |
953 | "adjust this code" ); |
954 | assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), |
955 | "adjust this code" ); |
956 | |
957 | // The generated handlers do not touch RBX (the method oop). |
958 | // However, large signatures cannot be cached and are generated |
959 | // each time here. The slow-path generator can do a GC on return, |
960 | // so we must reload it after the call. |
961 | __ call(t); |
962 | __ get_method(method); // slow path can do a GC, reload RBX |
963 | |
964 | |
965 | // result handler is in rax |
966 | // set result handler |
967 | __ movptr(Address(rbp, |
968 | (frame::interpreter_frame_result_handler_offset) * wordSize), |
969 | rax); |
970 | |
971 | // pass mirror handle if static call |
972 | { |
973 | Label L; |
974 | __ movl(t, Address(method, Method::access_flags_offset())); |
975 | __ testl(t, JVM_ACC_STATIC); |
976 | __ jcc(Assembler::zero, L); |
977 | // get mirror |
978 | __ load_mirror(t, method, rax); |
979 | // copy mirror into activation frame |
980 | __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), |
981 | t); |
982 | // pass handle to mirror |
983 | #ifndef _LP64 |
984 | __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); |
985 | __ movptr(Address(rsp, wordSize), t); |
986 | #else |
987 | __ lea(c_rarg1, |
988 | Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); |
989 | #endif // _LP64 |
990 | __ bind(L); |
991 | } |
992 | |
993 | // get native function entry point |
994 | { |
995 | Label L; |
996 | __ movptr(rax, Address(method, Method::native_function_offset())); |
997 | ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); |
998 | __ cmpptr(rax, unsatisfied.addr()); |
999 | __ jcc(Assembler::notEqual, L); |
1000 | __ call_VM(noreg, |
1001 | CAST_FROM_FN_PTR(address, |
1002 | InterpreterRuntime::prepare_native_call), |
1003 | method); |
1004 | __ get_method(method); |
1005 | __ movptr(rax, Address(method, Method::native_function_offset())); |
1006 | __ bind(L); |
1007 | } |
1008 | |
1009 | // pass JNIEnv |
1010 | #ifndef _LP64 |
1011 | __ get_thread(thread); |
1012 | __ lea(t, Address(thread, JavaThread::jni_environment_offset())); |
1013 | __ movptr(Address(rsp, 0), t); |
1014 | |
1015 | // set_last_Java_frame_before_call |
1016 | // It is enough that the pc() |
1017 | // points into the right code segment. It does not have to be the correct return pc. |
1018 | __ set_last_Java_frame(thread, noreg, rbp, __ pc()); |
1019 | #else |
1020 | __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); |
1021 | |
1022 | // It is enough that the pc() points into the right code |
1023 | // segment. It does not have to be the correct return pc. |
1024 | __ set_last_Java_frame(rsp, rbp, (address) __ pc()); |
1025 | #endif // _LP64 |
1026 | |
1027 | // change thread state |
1028 | #ifdef ASSERT |
1029 | { |
1030 | Label L; |
1031 | __ movl(t, Address(thread, JavaThread::thread_state_offset())); |
1032 | __ cmpl(t, _thread_in_Java); |
1033 | __ jcc(Assembler::equal, L); |
1034 | __ stop("Wrong thread state in native stub" ); |
1035 | __ bind(L); |
1036 | } |
1037 | #endif |
1038 | |
1039 | // Change state to native |
1040 | |
1041 | __ movl(Address(thread, JavaThread::thread_state_offset()), |
1042 | _thread_in_native); |
1043 | |
1044 | // Call the native method. |
1045 | __ call(rax); |
1046 | // 32: result potentially in rdx:rax or ST0 |
1047 | // 64: result potentially in rax or xmm0 |
1048 | |
1049 | // Verify or restore cpu control state after JNI call |
1050 | __ restore_cpu_control_state_after_jni(); |
1051 | |
1052 | // NOTE: The order of these pushes is known to frame::interpreter_frame_result |
1053 | // in order to extract the result of a method call. If the order of these |
1054 | // pushes change or anything else is added to the stack then the code in |
1055 | // interpreter_frame_result must also change. |
1056 | |
1057 | #ifndef _LP64 |
1058 | // save potential result in ST(0) & rdx:rax |
1059 | // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - |
1060 | // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) |
1061 | // It is safe to do this push because state is _thread_in_native and return address will be found |
1062 | // via _last_native_pc and not via _last_jave_sp |
1063 | |
1064 | // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. |
1065 | // If the order changes or anything else is added to the stack the code in |
1066 | // interpreter_frame_result will have to be changed. |
1067 | |
1068 | { Label L; |
1069 | Label push_double; |
1070 | ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); |
1071 | ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); |
1072 | __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), |
1073 | float_handler.addr()); |
1074 | __ jcc(Assembler::equal, push_double); |
1075 | __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), |
1076 | double_handler.addr()); |
1077 | __ jcc(Assembler::notEqual, L); |
1078 | __ bind(push_double); |
1079 | __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). |
1080 | __ bind(L); |
1081 | } |
1082 | #else |
1083 | __ push(dtos); |
1084 | #endif // _LP64 |
1085 | |
1086 | __ push(ltos); |
1087 | |
1088 | // change thread state |
1089 | NOT_LP64(__ get_thread(thread)); |
1090 | __ movl(Address(thread, JavaThread::thread_state_offset()), |
1091 | _thread_in_native_trans); |
1092 | |
1093 | // Force this write out before the read below |
1094 | __ membar(Assembler::Membar_mask_bits( |
1095 | Assembler::LoadLoad | Assembler::LoadStore | |
1096 | Assembler::StoreLoad | Assembler::StoreStore)); |
1097 | |
1098 | #ifndef _LP64 |
1099 | if (AlwaysRestoreFPU) { |
1100 | // Make sure the control word is correct. |
1101 | __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); |
1102 | } |
1103 | #endif // _LP64 |
1104 | |
1105 | // check for safepoint operation in progress and/or pending suspend requests |
1106 | { |
1107 | Label Continue; |
1108 | Label slow_path; |
1109 | |
1110 | #ifndef _LP64 |
1111 | __ safepoint_poll(slow_path, thread, noreg); |
1112 | #else |
1113 | __ safepoint_poll(slow_path, r15_thread, rscratch1); |
1114 | #endif |
1115 | |
1116 | __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); |
1117 | __ jcc(Assembler::equal, Continue); |
1118 | __ bind(slow_path); |
1119 | |
1120 | // Don't use call_VM as it will see a possible pending exception |
1121 | // and forward it and never return here preventing us from |
1122 | // clearing _last_native_pc down below. Also can't use |
1123 | // call_VM_leaf either as it will check to see if r13 & r14 are |
1124 | // preserved and correspond to the bcp/locals pointers. So we do a |
1125 | // runtime call by hand. |
1126 | // |
1127 | #ifndef _LP64 |
1128 | __ push(thread); |
1129 | __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, |
1130 | JavaThread::check_special_condition_for_native_trans))); |
1131 | __ increment(rsp, wordSize); |
1132 | __ get_thread(thread); |
1133 | #else |
1134 | __ mov(c_rarg0, r15_thread); |
1135 | __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) |
1136 | __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows |
1137 | __ andptr(rsp, -16); // align stack as required by ABI |
1138 | __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); |
1139 | __ mov(rsp, r12); // restore sp |
1140 | __ reinit_heapbase(); |
1141 | #endif // _LP64 |
1142 | __ bind(Continue); |
1143 | } |
1144 | |
1145 | // change thread state |
1146 | __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); |
1147 | |
1148 | // reset_last_Java_frame |
1149 | __ reset_last_Java_frame(thread, true); |
1150 | |
1151 | if (CheckJNICalls) { |
1152 | // clear_pending_jni_exception_check |
1153 | __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); |
1154 | } |
1155 | |
1156 | // reset handle block |
1157 | __ movptr(t, Address(thread, JavaThread::active_handles_offset())); |
1158 | __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); |
1159 | |
1160 | // If result is an oop unbox and store it in frame where gc will see it |
1161 | // and result handler will pick it up |
1162 | |
1163 | { |
1164 | Label no_oop; |
1165 | __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); |
1166 | __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); |
1167 | __ jcc(Assembler::notEqual, no_oop); |
1168 | // retrieve result |
1169 | __ pop(ltos); |
1170 | // Unbox oop result, e.g. JNIHandles::resolve value. |
1171 | __ resolve_jobject(rax /* value */, |
1172 | thread /* thread */, |
1173 | t /* tmp */); |
1174 | __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); |
1175 | // keep stack depth as expected by pushing oop which will eventually be discarded |
1176 | __ push(ltos); |
1177 | __ bind(no_oop); |
1178 | } |
1179 | |
1180 | |
1181 | { |
1182 | Label no_reguard; |
1183 | __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), |
1184 | JavaThread::stack_guard_yellow_reserved_disabled); |
1185 | __ jcc(Assembler::notEqual, no_reguard); |
1186 | |
1187 | __ pusha(); // XXX only save smashed registers |
1188 | #ifndef _LP64 |
1189 | __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); |
1190 | __ popa(); |
1191 | #else |
1192 | __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) |
1193 | __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows |
1194 | __ andptr(rsp, -16); // align stack as required by ABI |
1195 | __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); |
1196 | __ mov(rsp, r12); // restore sp |
1197 | __ popa(); // XXX only restore smashed registers |
1198 | __ reinit_heapbase(); |
1199 | #endif // _LP64 |
1200 | |
1201 | __ bind(no_reguard); |
1202 | } |
1203 | |
1204 | |
1205 | // The method register is junk from after the thread_in_native transition |
1206 | // until here. Also can't call_VM until the bcp has been |
1207 | // restored. Need bcp for throwing exception below so get it now. |
1208 | __ get_method(method); |
1209 | |
1210 | // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() |
1211 | __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* |
1212 | __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase |
1213 | |
1214 | // handle exceptions (exception handling will handle unlocking!) |
1215 | { |
1216 | Label L; |
1217 | __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); |
1218 | __ jcc(Assembler::zero, L); |
1219 | // Note: At some point we may want to unify this with the code |
1220 | // used in call_VM_base(); i.e., we should use the |
1221 | // StubRoutines::forward_exception code. For now this doesn't work |
1222 | // here because the rsp is not correctly set at this point. |
1223 | __ MacroAssembler::call_VM(noreg, |
1224 | CAST_FROM_FN_PTR(address, |
1225 | InterpreterRuntime::throw_pending_exception)); |
1226 | __ should_not_reach_here(); |
1227 | __ bind(L); |
1228 | } |
1229 | |
1230 | // do unlocking if necessary |
1231 | { |
1232 | Label L; |
1233 | __ movl(t, Address(method, Method::access_flags_offset())); |
1234 | __ testl(t, JVM_ACC_SYNCHRONIZED); |
1235 | __ jcc(Assembler::zero, L); |
1236 | // the code below should be shared with interpreter macro |
1237 | // assembler implementation |
1238 | { |
1239 | Label unlock; |
1240 | // BasicObjectLock will be first in list, since this is a |
1241 | // synchronized method. However, need to check that the object |
1242 | // has not been unlocked by an explicit monitorexit bytecode. |
1243 | const Address monitor(rbp, |
1244 | (intptr_t)(frame::interpreter_frame_initial_sp_offset * |
1245 | wordSize - (int)sizeof(BasicObjectLock))); |
1246 | |
1247 | const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); |
1248 | |
1249 | // monitor expect in c_rarg1 for slow unlock path |
1250 | __ lea(regmon, monitor); // address of first monitor |
1251 | |
1252 | __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); |
1253 | __ testptr(t, t); |
1254 | __ jcc(Assembler::notZero, unlock); |
1255 | |
1256 | // Entry already unlocked, need to throw exception |
1257 | __ MacroAssembler::call_VM(noreg, |
1258 | CAST_FROM_FN_PTR(address, |
1259 | InterpreterRuntime::throw_illegal_monitor_state_exception)); |
1260 | __ should_not_reach_here(); |
1261 | |
1262 | __ bind(unlock); |
1263 | __ unlock_object(regmon); |
1264 | } |
1265 | __ bind(L); |
1266 | } |
1267 | |
1268 | // jvmti support |
1269 | // Note: This must happen _after_ handling/throwing any exceptions since |
1270 | // the exception handler code notifies the runtime of method exits |
1271 | // too. If this happens before, method entry/exit notifications are |
1272 | // not properly paired (was bug - gri 11/22/99). |
1273 | __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); |
1274 | |
1275 | // restore potential result in edx:eax, call result handler to |
1276 | // restore potential result in ST0 & handle result |
1277 | |
1278 | __ pop(ltos); |
1279 | LP64_ONLY( __ pop(dtos)); |
1280 | |
1281 | __ movptr(t, Address(rbp, |
1282 | (frame::interpreter_frame_result_handler_offset) * wordSize)); |
1283 | __ call(t); |
1284 | |
1285 | // remove activation |
1286 | __ movptr(t, Address(rbp, |
1287 | frame::interpreter_frame_sender_sp_offset * |
1288 | wordSize)); // get sender sp |
1289 | __ leave(); // remove frame anchor |
1290 | __ pop(rdi); // get return address |
1291 | __ mov(rsp, t); // set sp to sender sp |
1292 | __ jmp(rdi); |
1293 | |
1294 | if (inc_counter) { |
1295 | // Handle overflow of counter and compile method |
1296 | __ bind(invocation_counter_overflow); |
1297 | generate_counter_overflow(continue_after_compile); |
1298 | } |
1299 | |
1300 | return entry_point; |
1301 | } |
1302 | |
1303 | // Abstract method entry |
1304 | // Attempt to execute abstract method. Throw exception |
1305 | address TemplateInterpreterGenerator::generate_abstract_entry(void) { |
1306 | |
1307 | address entry_point = __ pc(); |
1308 | |
1309 | // abstract method entry |
1310 | |
1311 | // pop return address, reset last_sp to NULL |
1312 | __ empty_expression_stack(); |
1313 | __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) |
1314 | __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) |
1315 | |
1316 | // throw exception |
1317 | __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), rbx); |
1318 | // the call_VM checks for exception, so we should never return here. |
1319 | __ should_not_reach_here(); |
1320 | |
1321 | return entry_point; |
1322 | } |
1323 | |
1324 | // |
1325 | // Generic interpreted method entry to (asm) interpreter |
1326 | // |
1327 | address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { |
1328 | // determine code generation flags |
1329 | bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; |
1330 | |
1331 | // ebx: Method* |
1332 | // rbcp: sender sp |
1333 | address entry_point = __ pc(); |
1334 | |
1335 | const Address constMethod(rbx, Method::const_offset()); |
1336 | const Address access_flags(rbx, Method::access_flags_offset()); |
1337 | const Address size_of_parameters(rdx, |
1338 | ConstMethod::size_of_parameters_offset()); |
1339 | const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); |
1340 | |
1341 | |
1342 | // get parameter size (always needed) |
1343 | __ movptr(rdx, constMethod); |
1344 | __ load_unsigned_short(rcx, size_of_parameters); |
1345 | |
1346 | // rbx: Method* |
1347 | // rcx: size of parameters |
1348 | // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) |
1349 | |
1350 | __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words |
1351 | __ subl(rdx, rcx); // rdx = no. of additional locals |
1352 | |
1353 | // YYY |
1354 | // __ incrementl(rdx); |
1355 | // __ andl(rdx, -2); |
1356 | |
1357 | // see if we've got enough room on the stack for locals plus overhead. |
1358 | generate_stack_overflow_check(); |
1359 | |
1360 | // get return address |
1361 | __ pop(rax); |
1362 | |
1363 | // compute beginning of parameters |
1364 | __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); |
1365 | |
1366 | // rdx - # of additional locals |
1367 | // allocate space for locals |
1368 | // explicitly initialize locals |
1369 | { |
1370 | Label exit, loop; |
1371 | __ testl(rdx, rdx); |
1372 | __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 |
1373 | __ bind(loop); |
1374 | __ push((int) NULL_WORD); // initialize local variables |
1375 | __ decrementl(rdx); // until everything initialized |
1376 | __ jcc(Assembler::greater, loop); |
1377 | __ bind(exit); |
1378 | } |
1379 | |
1380 | // initialize fixed part of activation frame |
1381 | generate_fixed_frame(false); |
1382 | |
1383 | // make sure method is not native & not abstract |
1384 | #ifdef ASSERT |
1385 | __ movl(rax, access_flags); |
1386 | { |
1387 | Label L; |
1388 | __ testl(rax, JVM_ACC_NATIVE); |
1389 | __ jcc(Assembler::zero, L); |
1390 | __ stop("tried to execute native method as non-native" ); |
1391 | __ bind(L); |
1392 | } |
1393 | { |
1394 | Label L; |
1395 | __ testl(rax, JVM_ACC_ABSTRACT); |
1396 | __ jcc(Assembler::zero, L); |
1397 | __ stop("tried to execute abstract method in interpreter" ); |
1398 | __ bind(L); |
1399 | } |
1400 | #endif |
1401 | |
1402 | // Since at this point in the method invocation the exception |
1403 | // handler would try to exit the monitor of synchronized methods |
1404 | // which hasn't been entered yet, we set the thread local variable |
1405 | // _do_not_unlock_if_synchronized to true. The remove_activation |
1406 | // will check this flag. |
1407 | |
1408 | const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); |
1409 | NOT_LP64(__ get_thread(thread)); |
1410 | const Address do_not_unlock_if_synchronized(thread, |
1411 | in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); |
1412 | __ movbool(do_not_unlock_if_synchronized, true); |
1413 | |
1414 | __ profile_parameters_type(rax, rcx, rdx); |
1415 | // increment invocation count & check for overflow |
1416 | Label invocation_counter_overflow; |
1417 | Label profile_method; |
1418 | Label profile_method_continue; |
1419 | if (inc_counter) { |
1420 | generate_counter_incr(&invocation_counter_overflow, |
1421 | &profile_method, |
1422 | &profile_method_continue); |
1423 | if (ProfileInterpreter) { |
1424 | __ bind(profile_method_continue); |
1425 | } |
1426 | } |
1427 | |
1428 | Label continue_after_compile; |
1429 | __ bind(continue_after_compile); |
1430 | |
1431 | // check for synchronized interpreted methods |
1432 | bang_stack_shadow_pages(false); |
1433 | |
1434 | // reset the _do_not_unlock_if_synchronized flag |
1435 | NOT_LP64(__ get_thread(thread)); |
1436 | __ movbool(do_not_unlock_if_synchronized, false); |
1437 | |
1438 | // check for synchronized methods |
1439 | // Must happen AFTER invocation_counter check and stack overflow check, |
1440 | // so method is not locked if overflows. |
1441 | if (synchronized) { |
1442 | // Allocate monitor and lock method |
1443 | lock_method(); |
1444 | } else { |
1445 | // no synchronization necessary |
1446 | #ifdef ASSERT |
1447 | { |
1448 | Label L; |
1449 | __ movl(rax, access_flags); |
1450 | __ testl(rax, JVM_ACC_SYNCHRONIZED); |
1451 | __ jcc(Assembler::zero, L); |
1452 | __ stop("method needs synchronization" ); |
1453 | __ bind(L); |
1454 | } |
1455 | #endif |
1456 | } |
1457 | |
1458 | // start execution |
1459 | #ifdef ASSERT |
1460 | { |
1461 | Label L; |
1462 | const Address monitor_block_top (rbp, |
1463 | frame::interpreter_frame_monitor_block_top_offset * wordSize); |
1464 | __ movptr(rax, monitor_block_top); |
1465 | __ cmpptr(rax, rsp); |
1466 | __ jcc(Assembler::equal, L); |
1467 | __ stop("broken stack frame setup in interpreter" ); |
1468 | __ bind(L); |
1469 | } |
1470 | #endif |
1471 | |
1472 | // jvmti support |
1473 | __ notify_method_entry(); |
1474 | |
1475 | __ dispatch_next(vtos); |
1476 | |
1477 | // invocation counter overflow |
1478 | if (inc_counter) { |
1479 | if (ProfileInterpreter) { |
1480 | // We have decided to profile this method in the interpreter |
1481 | __ bind(profile_method); |
1482 | __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); |
1483 | __ set_method_data_pointer_for_bcp(); |
1484 | __ get_method(rbx); |
1485 | __ jmp(profile_method_continue); |
1486 | } |
1487 | // Handle overflow of counter and compile method |
1488 | __ bind(invocation_counter_overflow); |
1489 | generate_counter_overflow(continue_after_compile); |
1490 | } |
1491 | |
1492 | return entry_point; |
1493 | } |
1494 | |
1495 | //----------------------------------------------------------------------------- |
1496 | // Exceptions |
1497 | |
1498 | void TemplateInterpreterGenerator::generate_throw_exception() { |
1499 | // Entry point in previous activation (i.e., if the caller was |
1500 | // interpreted) |
1501 | Interpreter::_rethrow_exception_entry = __ pc(); |
1502 | // Restore sp to interpreter_frame_last_sp even though we are going |
1503 | // to empty the expression stack for the exception processing. |
1504 | __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); |
1505 | // rax: exception |
1506 | // rdx: return address/pc that threw exception |
1507 | __ restore_bcp(); // r13/rsi points to call/send |
1508 | __ restore_locals(); |
1509 | LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. |
1510 | // Entry point for exceptions thrown within interpreter code |
1511 | Interpreter::_throw_exception_entry = __ pc(); |
1512 | // expression stack is undefined here |
1513 | // rax: exception |
1514 | // r13/rsi: exception bcp |
1515 | __ verify_oop(rax); |
1516 | Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); |
1517 | LP64_ONLY(__ mov(c_rarg1, rax)); |
1518 | |
1519 | // expression stack must be empty before entering the VM in case of |
1520 | // an exception |
1521 | __ empty_expression_stack(); |
1522 | // find exception handler address and preserve exception oop |
1523 | __ call_VM(rdx, |
1524 | CAST_FROM_FN_PTR(address, |
1525 | InterpreterRuntime::exception_handler_for_exception), |
1526 | rarg); |
1527 | // rax: exception handler entry point |
1528 | // rdx: preserved exception oop |
1529 | // r13/rsi: bcp for exception handler |
1530 | __ push_ptr(rdx); // push exception which is now the only value on the stack |
1531 | __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) |
1532 | |
1533 | // If the exception is not handled in the current frame the frame is |
1534 | // removed and the exception is rethrown (i.e. exception |
1535 | // continuation is _rethrow_exception). |
1536 | // |
1537 | // Note: At this point the bci is still the bxi for the instruction |
1538 | // which caused the exception and the expression stack is |
1539 | // empty. Thus, for any VM calls at this point, GC will find a legal |
1540 | // oop map (with empty expression stack). |
1541 | |
1542 | // In current activation |
1543 | // tos: exception |
1544 | // esi: exception bcp |
1545 | |
1546 | // |
1547 | // JVMTI PopFrame support |
1548 | // |
1549 | |
1550 | Interpreter::_remove_activation_preserving_args_entry = __ pc(); |
1551 | __ empty_expression_stack(); |
1552 | // Set the popframe_processing bit in pending_popframe_condition |
1553 | // indicating that we are currently handling popframe, so that |
1554 | // call_VMs that may happen later do not trigger new popframe |
1555 | // handling cycles. |
1556 | const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); |
1557 | NOT_LP64(__ get_thread(thread)); |
1558 | __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); |
1559 | __ orl(rdx, JavaThread::popframe_processing_bit); |
1560 | __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); |
1561 | |
1562 | { |
1563 | // Check to see whether we are returning to a deoptimized frame. |
1564 | // (The PopFrame call ensures that the caller of the popped frame is |
1565 | // either interpreted or compiled and deoptimizes it if compiled.) |
1566 | // In this case, we can't call dispatch_next() after the frame is |
1567 | // popped, but instead must save the incoming arguments and restore |
1568 | // them after deoptimization has occurred. |
1569 | // |
1570 | // Note that we don't compare the return PC against the |
1571 | // deoptimization blob's unpack entry because of the presence of |
1572 | // adapter frames in C2. |
1573 | Label caller_not_deoptimized; |
1574 | Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); |
1575 | __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); |
1576 | __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, |
1577 | InterpreterRuntime::interpreter_contains), rarg); |
1578 | __ testl(rax, rax); |
1579 | __ jcc(Assembler::notZero, caller_not_deoptimized); |
1580 | |
1581 | // Compute size of arguments for saving when returning to |
1582 | // deoptimized caller |
1583 | __ get_method(rax); |
1584 | __ movptr(rax, Address(rax, Method::const_offset())); |
1585 | __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: |
1586 | size_of_parameters_offset()))); |
1587 | __ shll(rax, Interpreter::logStackElementSize); |
1588 | __ restore_locals(); |
1589 | __ subptr(rlocals, rax); |
1590 | __ addptr(rlocals, wordSize); |
1591 | // Save these arguments |
1592 | NOT_LP64(__ get_thread(thread)); |
1593 | __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, |
1594 | Deoptimization:: |
1595 | popframe_preserve_args), |
1596 | thread, rax, rlocals); |
1597 | |
1598 | __ remove_activation(vtos, rdx, |
1599 | /* throw_monitor_exception */ false, |
1600 | /* install_monitor_exception */ false, |
1601 | /* notify_jvmdi */ false); |
1602 | |
1603 | // Inform deoptimization that it is responsible for restoring |
1604 | // these arguments |
1605 | NOT_LP64(__ get_thread(thread)); |
1606 | __ movl(Address(thread, JavaThread::popframe_condition_offset()), |
1607 | JavaThread::popframe_force_deopt_reexecution_bit); |
1608 | |
1609 | // Continue in deoptimization handler |
1610 | __ jmp(rdx); |
1611 | |
1612 | __ bind(caller_not_deoptimized); |
1613 | } |
1614 | |
1615 | __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ |
1616 | /* throw_monitor_exception */ false, |
1617 | /* install_monitor_exception */ false, |
1618 | /* notify_jvmdi */ false); |
1619 | |
1620 | // Finish with popframe handling |
1621 | // A previous I2C followed by a deoptimization might have moved the |
1622 | // outgoing arguments further up the stack. PopFrame expects the |
1623 | // mutations to those outgoing arguments to be preserved and other |
1624 | // constraints basically require this frame to look exactly as |
1625 | // though it had previously invoked an interpreted activation with |
1626 | // no space between the top of the expression stack (current |
1627 | // last_sp) and the top of stack. Rather than force deopt to |
1628 | // maintain this kind of invariant all the time we call a small |
1629 | // fixup routine to move the mutated arguments onto the top of our |
1630 | // expression stack if necessary. |
1631 | #ifndef _LP64 |
1632 | __ mov(rax, rsp); |
1633 | __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); |
1634 | __ get_thread(thread); |
1635 | // PC must point into interpreter here |
1636 | __ set_last_Java_frame(thread, noreg, rbp, __ pc()); |
1637 | __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); |
1638 | __ get_thread(thread); |
1639 | #else |
1640 | __ mov(c_rarg1, rsp); |
1641 | __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); |
1642 | // PC must point into interpreter here |
1643 | __ set_last_Java_frame(noreg, rbp, __ pc()); |
1644 | __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); |
1645 | #endif |
1646 | __ reset_last_Java_frame(thread, true); |
1647 | |
1648 | // Restore the last_sp and null it out |
1649 | __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); |
1650 | __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); |
1651 | |
1652 | __ restore_bcp(); |
1653 | __ restore_locals(); |
1654 | // The method data pointer was incremented already during |
1655 | // call profiling. We have to restore the mdp for the current bcp. |
1656 | if (ProfileInterpreter) { |
1657 | __ set_method_data_pointer_for_bcp(); |
1658 | } |
1659 | |
1660 | // Clear the popframe condition flag |
1661 | NOT_LP64(__ get_thread(thread)); |
1662 | __ movl(Address(thread, JavaThread::popframe_condition_offset()), |
1663 | JavaThread::popframe_inactive); |
1664 | |
1665 | #if INCLUDE_JVMTI |
1666 | { |
1667 | Label L_done; |
1668 | const Register local0 = rlocals; |
1669 | |
1670 | __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); |
1671 | __ jcc(Assembler::notEqual, L_done); |
1672 | |
1673 | // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. |
1674 | // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. |
1675 | |
1676 | __ get_method(rdx); |
1677 | __ movptr(rax, Address(local0, 0)); |
1678 | __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); |
1679 | |
1680 | __ testptr(rax, rax); |
1681 | __ jcc(Assembler::zero, L_done); |
1682 | |
1683 | __ movptr(Address(rbx, 0), rax); |
1684 | __ bind(L_done); |
1685 | } |
1686 | #endif // INCLUDE_JVMTI |
1687 | |
1688 | __ dispatch_next(vtos); |
1689 | // end of PopFrame support |
1690 | |
1691 | Interpreter::_remove_activation_entry = __ pc(); |
1692 | |
1693 | // preserve exception over this code sequence |
1694 | __ pop_ptr(rax); |
1695 | NOT_LP64(__ get_thread(thread)); |
1696 | __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); |
1697 | // remove the activation (without doing throws on illegalMonitorExceptions) |
1698 | __ remove_activation(vtos, rdx, false, true, false); |
1699 | // restore exception |
1700 | NOT_LP64(__ get_thread(thread)); |
1701 | __ get_vm_result(rax, thread); |
1702 | |
1703 | // In between activations - previous activation type unknown yet |
1704 | // compute continuation point - the continuation point expects the |
1705 | // following registers set up: |
1706 | // |
1707 | // rax: exception |
1708 | // rdx: return address/pc that threw exception |
1709 | // rsp: expression stack of caller |
1710 | // rbp: ebp of caller |
1711 | __ push(rax); // save exception |
1712 | __ push(rdx); // save return address |
1713 | __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, |
1714 | SharedRuntime::exception_handler_for_return_address), |
1715 | thread, rdx); |
1716 | __ mov(rbx, rax); // save exception handler |
1717 | __ pop(rdx); // restore return address |
1718 | __ pop(rax); // restore exception |
1719 | // Note that an "issuing PC" is actually the next PC after the call |
1720 | __ jmp(rbx); // jump to exception |
1721 | // handler of caller |
1722 | } |
1723 | |
1724 | |
1725 | // |
1726 | // JVMTI ForceEarlyReturn support |
1727 | // |
1728 | address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { |
1729 | address entry = __ pc(); |
1730 | |
1731 | __ restore_bcp(); |
1732 | __ restore_locals(); |
1733 | __ empty_expression_stack(); |
1734 | __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse |
1735 | |
1736 | const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); |
1737 | NOT_LP64(__ get_thread(thread)); |
1738 | __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); |
1739 | Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); |
1740 | |
1741 | // Clear the earlyret state |
1742 | __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); |
1743 | |
1744 | __ remove_activation(state, rsi, |
1745 | false, /* throw_monitor_exception */ |
1746 | false, /* install_monitor_exception */ |
1747 | true); /* notify_jvmdi */ |
1748 | __ jmp(rsi); |
1749 | |
1750 | return entry; |
1751 | } // end of ForceEarlyReturn support |
1752 | |
1753 | |
1754 | //----------------------------------------------------------------------------- |
1755 | // Helper for vtos entry point generation |
1756 | |
1757 | void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, |
1758 | address& bep, |
1759 | address& cep, |
1760 | address& sep, |
1761 | address& aep, |
1762 | address& iep, |
1763 | address& lep, |
1764 | address& fep, |
1765 | address& dep, |
1766 | address& vep) { |
1767 | assert(t->is_valid() && t->tos_in() == vtos, "illegal template" ); |
1768 | Label L; |
1769 | aep = __ pc(); // atos entry point |
1770 | __ push_ptr(); |
1771 | __ jmp(L); |
1772 | #ifndef _LP64 |
1773 | fep = __ pc(); // ftos entry point |
1774 | __ push(ftos); |
1775 | __ jmp(L); |
1776 | dep = __ pc(); // dtos entry point |
1777 | __ push(dtos); |
1778 | __ jmp(L); |
1779 | #else |
1780 | fep = __ pc(); // ftos entry point |
1781 | __ push_f(xmm0); |
1782 | __ jmp(L); |
1783 | dep = __ pc(); // dtos entry point |
1784 | __ push_d(xmm0); |
1785 | __ jmp(L); |
1786 | #endif // _LP64 |
1787 | lep = __ pc(); // ltos entry point |
1788 | __ push_l(); |
1789 | __ jmp(L); |
1790 | bep = cep = sep = iep = __ pc(); // [bcsi]tos entry point |
1791 | __ push_i(); |
1792 | vep = __ pc(); // vtos entry point |
1793 | __ bind(L); |
1794 | generate_and_dispatch(t); |
1795 | } |
1796 | |
1797 | //----------------------------------------------------------------------------- |
1798 | |
1799 | // Non-product code |
1800 | #ifndef PRODUCT |
1801 | |
1802 | address TemplateInterpreterGenerator::generate_trace_code(TosState state) { |
1803 | address entry = __ pc(); |
1804 | |
1805 | #ifndef _LP64 |
1806 | // prepare expression stack |
1807 | __ pop(rcx); // pop return address so expression stack is 'pure' |
1808 | __ push(state); // save tosca |
1809 | |
1810 | // pass tosca registers as arguments & call tracer |
1811 | __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); |
1812 | __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) |
1813 | __ pop(state); // restore tosca |
1814 | |
1815 | // return |
1816 | __ jmp(rcx); |
1817 | #else |
1818 | __ push(state); |
1819 | __ push(c_rarg0); |
1820 | __ push(c_rarg1); |
1821 | __ push(c_rarg2); |
1822 | __ push(c_rarg3); |
1823 | __ mov(c_rarg2, rax); // Pass itos |
1824 | #ifdef _WIN64 |
1825 | __ movflt(xmm3, xmm0); // Pass ftos |
1826 | #endif |
1827 | __ call_VM(noreg, |
1828 | CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), |
1829 | c_rarg1, c_rarg2, c_rarg3); |
1830 | __ pop(c_rarg3); |
1831 | __ pop(c_rarg2); |
1832 | __ pop(c_rarg1); |
1833 | __ pop(c_rarg0); |
1834 | __ pop(state); |
1835 | __ ret(0); // return from result handler |
1836 | #endif // _LP64 |
1837 | |
1838 | return entry; |
1839 | } |
1840 | |
1841 | void TemplateInterpreterGenerator::count_bytecode() { |
1842 | __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); |
1843 | } |
1844 | |
1845 | void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { |
1846 | __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); |
1847 | } |
1848 | |
1849 | void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { |
1850 | __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); |
1851 | __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); |
1852 | __ orl(rbx, |
1853 | ((int) t->bytecode()) << |
1854 | BytecodePairHistogram::log2_number_of_codes); |
1855 | __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); |
1856 | __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); |
1857 | __ incrementl(Address(rscratch1, rbx, Address::times_4)); |
1858 | } |
1859 | |
1860 | |
1861 | void TemplateInterpreterGenerator::trace_bytecode(Template* t) { |
1862 | // Call a little run-time stub to avoid blow-up for each bytecode. |
1863 | // The run-time runtime saves the right registers, depending on |
1864 | // the tosca in-state for the given template. |
1865 | |
1866 | assert(Interpreter::trace_code(t->tos_in()) != NULL, |
1867 | "entry must have been generated" ); |
1868 | #ifndef _LP64 |
1869 | __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); |
1870 | #else |
1871 | __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) |
1872 | __ andptr(rsp, -16); // align stack as required by ABI |
1873 | __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); |
1874 | __ mov(rsp, r12); // restore sp |
1875 | __ reinit_heapbase(); |
1876 | #endif // _LP64 |
1877 | } |
1878 | |
1879 | |
1880 | void TemplateInterpreterGenerator::stop_interpreter_at() { |
1881 | Label L; |
1882 | __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), |
1883 | StopInterpreterAt); |
1884 | __ jcc(Assembler::notEqual, L); |
1885 | __ int3(); |
1886 | __ bind(L); |
1887 | } |
1888 | #endif // !PRODUCT |
1889 | |