1/*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/macroAssembler.hpp"
27#include "compiler/disassembler.hpp"
28#include "interpreter/interpreter.hpp"
29#include "interpreter/interpreterRuntime.hpp"
30#include "interpreter/interp_masm.hpp"
31#include "interpreter/templateTable.hpp"
32#include "memory/universe.hpp"
33#include "oops/methodData.hpp"
34#include "oops/objArrayKlass.hpp"
35#include "oops/oop.inline.hpp"
36#include "prims/methodHandles.hpp"
37#include "runtime/frame.inline.hpp"
38#include "runtime/safepointMechanism.hpp"
39#include "runtime/sharedRuntime.hpp"
40#include "runtime/stubRoutines.hpp"
41#include "runtime/synchronizer.hpp"
42#include "utilities/macros.hpp"
43
44#define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
45
46// Global Register Names
47static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi);
48static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi);
49
50// Platform-dependent initialization
51void TemplateTable::pd_initialize() {
52 // No x86 specific initialization
53}
54
55// Address Computation: local variables
56static inline Address iaddress(int n) {
57 return Address(rlocals, Interpreter::local_offset_in_bytes(n));
58}
59
60static inline Address laddress(int n) {
61 return iaddress(n + 1);
62}
63
64#ifndef _LP64
65static inline Address haddress(int n) {
66 return iaddress(n + 0);
67}
68#endif
69
70static inline Address faddress(int n) {
71 return iaddress(n);
72}
73
74static inline Address daddress(int n) {
75 return laddress(n);
76}
77
78static inline Address aaddress(int n) {
79 return iaddress(n);
80}
81
82static inline Address iaddress(Register r) {
83 return Address(rlocals, r, Address::times_ptr);
84}
85
86static inline Address laddress(Register r) {
87 return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1));
88}
89
90#ifndef _LP64
91static inline Address haddress(Register r) {
92 return Address(rlocals, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
93}
94#endif
95
96static inline Address faddress(Register r) {
97 return iaddress(r);
98}
99
100static inline Address daddress(Register r) {
101 return laddress(r);
102}
103
104static inline Address aaddress(Register r) {
105 return iaddress(r);
106}
107
108
109// expression stack
110// (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
111// data beyond the rsp which is potentially unsafe in an MT environment;
112// an interrupt may overwrite that data.)
113static inline Address at_rsp () {
114 return Address(rsp, 0);
115}
116
117// At top of Java expression stack which may be different than esp(). It
118// isn't for category 1 objects.
119static inline Address at_tos () {
120 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
121}
122
123static inline Address at_tos_p1() {
124 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
125}
126
127static inline Address at_tos_p2() {
128 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
129}
130
131// Condition conversion
132static Assembler::Condition j_not(TemplateTable::Condition cc) {
133 switch (cc) {
134 case TemplateTable::equal : return Assembler::notEqual;
135 case TemplateTable::not_equal : return Assembler::equal;
136 case TemplateTable::less : return Assembler::greaterEqual;
137 case TemplateTable::less_equal : return Assembler::greater;
138 case TemplateTable::greater : return Assembler::lessEqual;
139 case TemplateTable::greater_equal: return Assembler::less;
140 }
141 ShouldNotReachHere();
142 return Assembler::zero;
143}
144
145
146
147// Miscelaneous helper routines
148// Store an oop (or NULL) at the address described by obj.
149// If val == noreg this means store a NULL
150
151
152static void do_oop_store(InterpreterMacroAssembler* _masm,
153 Address dst,
154 Register val,
155 DecoratorSet decorators = 0) {
156 assert(val == noreg || val == rax, "parameter is just for looks");
157 __ store_heap_oop(dst, val, rdx, rbx, decorators);
158}
159
160static void do_oop_load(InterpreterMacroAssembler* _masm,
161 Address src,
162 Register dst,
163 DecoratorSet decorators = 0) {
164 __ load_heap_oop(dst, src, rdx, rbx, decorators);
165}
166
167Address TemplateTable::at_bcp(int offset) {
168 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
169 return Address(rbcp, offset);
170}
171
172
173void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
174 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
175 int byte_no) {
176 if (!RewriteBytecodes) return;
177 Label L_patch_done;
178
179 switch (bc) {
180 case Bytecodes::_fast_aputfield:
181 case Bytecodes::_fast_bputfield:
182 case Bytecodes::_fast_zputfield:
183 case Bytecodes::_fast_cputfield:
184 case Bytecodes::_fast_dputfield:
185 case Bytecodes::_fast_fputfield:
186 case Bytecodes::_fast_iputfield:
187 case Bytecodes::_fast_lputfield:
188 case Bytecodes::_fast_sputfield:
189 {
190 // We skip bytecode quickening for putfield instructions when
191 // the put_code written to the constant pool cache is zero.
192 // This is required so that every execution of this instruction
193 // calls out to InterpreterRuntime::resolve_get_put to do
194 // additional, required work.
195 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
196 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
197 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
198 __ movl(bc_reg, bc);
199 __ cmpl(temp_reg, (int) 0);
200 __ jcc(Assembler::zero, L_patch_done); // don't patch
201 }
202 break;
203 default:
204 assert(byte_no == -1, "sanity");
205 // the pair bytecodes have already done the load.
206 if (load_bc_into_bc_reg) {
207 __ movl(bc_reg, bc);
208 }
209 }
210
211 if (JvmtiExport::can_post_breakpoint()) {
212 Label L_fast_patch;
213 // if a breakpoint is present we can't rewrite the stream directly
214 __ movzbl(temp_reg, at_bcp(0));
215 __ cmpl(temp_reg, Bytecodes::_breakpoint);
216 __ jcc(Assembler::notEqual, L_fast_patch);
217 __ get_method(temp_reg);
218 // Let breakpoint table handling rewrite to quicker bytecode
219 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg);
220#ifndef ASSERT
221 __ jmpb(L_patch_done);
222#else
223 __ jmp(L_patch_done);
224#endif
225 __ bind(L_fast_patch);
226 }
227
228#ifdef ASSERT
229 Label L_okay;
230 __ load_unsigned_byte(temp_reg, at_bcp(0));
231 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
232 __ jcc(Assembler::equal, L_okay);
233 __ cmpl(temp_reg, bc_reg);
234 __ jcc(Assembler::equal, L_okay);
235 __ stop("patching the wrong bytecode");
236 __ bind(L_okay);
237#endif
238
239 // patch bytecode
240 __ movb(at_bcp(0), bc_reg);
241 __ bind(L_patch_done);
242}
243// Individual instructions
244
245
246void TemplateTable::nop() {
247 transition(vtos, vtos);
248 // nothing to do
249}
250
251void TemplateTable::shouldnotreachhere() {
252 transition(vtos, vtos);
253 __ stop("shouldnotreachhere bytecode");
254}
255
256void TemplateTable::aconst_null() {
257 transition(vtos, atos);
258 __ xorl(rax, rax);
259}
260
261void TemplateTable::iconst(int value) {
262 transition(vtos, itos);
263 if (value == 0) {
264 __ xorl(rax, rax);
265 } else {
266 __ movl(rax, value);
267 }
268}
269
270void TemplateTable::lconst(int value) {
271 transition(vtos, ltos);
272 if (value == 0) {
273 __ xorl(rax, rax);
274 } else {
275 __ movl(rax, value);
276 }
277#ifndef _LP64
278 assert(value >= 0, "check this code");
279 __ xorptr(rdx, rdx);
280#endif
281}
282
283
284
285void TemplateTable::fconst(int value) {
286 transition(vtos, ftos);
287 if (UseSSE >= 1) {
288 static float one = 1.0f, two = 2.0f;
289 switch (value) {
290 case 0:
291 __ xorps(xmm0, xmm0);
292 break;
293 case 1:
294 __ movflt(xmm0, ExternalAddress((address) &one));
295 break;
296 case 2:
297 __ movflt(xmm0, ExternalAddress((address) &two));
298 break;
299 default:
300 ShouldNotReachHere();
301 break;
302 }
303 } else {
304#ifdef _LP64
305 ShouldNotReachHere();
306#else
307 if (value == 0) { __ fldz();
308 } else if (value == 1) { __ fld1();
309 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
310 } else { ShouldNotReachHere();
311 }
312#endif // _LP64
313 }
314}
315
316void TemplateTable::dconst(int value) {
317 transition(vtos, dtos);
318 if (UseSSE >= 2) {
319 static double one = 1.0;
320 switch (value) {
321 case 0:
322 __ xorpd(xmm0, xmm0);
323 break;
324 case 1:
325 __ movdbl(xmm0, ExternalAddress((address) &one));
326 break;
327 default:
328 ShouldNotReachHere();
329 break;
330 }
331 } else {
332#ifdef _LP64
333 ShouldNotReachHere();
334#else
335 if (value == 0) { __ fldz();
336 } else if (value == 1) { __ fld1();
337 } else { ShouldNotReachHere();
338 }
339#endif
340 }
341}
342
343void TemplateTable::bipush() {
344 transition(vtos, itos);
345 __ load_signed_byte(rax, at_bcp(1));
346}
347
348void TemplateTable::sipush() {
349 transition(vtos, itos);
350 __ load_unsigned_short(rax, at_bcp(1));
351 __ bswapl(rax);
352 __ sarl(rax, 16);
353}
354
355void TemplateTable::ldc(bool wide) {
356 transition(vtos, vtos);
357 Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
358 Label call_ldc, notFloat, notClass, notInt, Done;
359
360 if (wide) {
361 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
362 } else {
363 __ load_unsigned_byte(rbx, at_bcp(1));
364 }
365
366 __ get_cpool_and_tags(rcx, rax);
367 const int base_offset = ConstantPool::header_size() * wordSize;
368 const int tags_offset = Array<u1>::base_offset_in_bytes();
369
370 // get type
371 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
372
373 // unresolved class - get the resolved class
374 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
375 __ jccb(Assembler::equal, call_ldc);
376
377 // unresolved class in error state - call into runtime to throw the error
378 // from the first resolution attempt
379 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
380 __ jccb(Assembler::equal, call_ldc);
381
382 // resolved class - need to call vm to get java mirror of the class
383 __ cmpl(rdx, JVM_CONSTANT_Class);
384 __ jcc(Assembler::notEqual, notClass);
385
386 __ bind(call_ldc);
387
388 __ movl(rarg, wide);
389 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg);
390
391 __ push(atos);
392 __ jmp(Done);
393
394 __ bind(notClass);
395 __ cmpl(rdx, JVM_CONSTANT_Float);
396 __ jccb(Assembler::notEqual, notFloat);
397
398 // ftos
399 __ load_float(Address(rcx, rbx, Address::times_ptr, base_offset));
400 __ push(ftos);
401 __ jmp(Done);
402
403 __ bind(notFloat);
404 __ cmpl(rdx, JVM_CONSTANT_Integer);
405 __ jccb(Assembler::notEqual, notInt);
406
407 // itos
408 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
409 __ push(itos);
410 __ jmp(Done);
411
412 // assume the tag is for condy; if not, the VM runtime will tell us
413 __ bind(notInt);
414 condy_helper(Done);
415
416 __ bind(Done);
417}
418
419// Fast path for caching oop constants.
420void TemplateTable::fast_aldc(bool wide) {
421 transition(vtos, atos);
422
423 Register result = rax;
424 Register tmp = rdx;
425 Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
426 int index_size = wide ? sizeof(u2) : sizeof(u1);
427
428 Label resolved;
429
430 // We are resolved if the resolved reference cache entry contains a
431 // non-null object (String, MethodType, etc.)
432 assert_different_registers(result, tmp);
433 __ get_cache_index_at_bcp(tmp, 1, index_size);
434 __ load_resolved_reference_at_index(result, tmp);
435 __ testptr(result, result);
436 __ jcc(Assembler::notZero, resolved);
437
438 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
439
440 // first time invocation - must resolve first
441 __ movl(rarg, (int)bytecode());
442 __ call_VM(result, entry, rarg);
443 __ bind(resolved);
444
445 { // Check for the null sentinel.
446 // If we just called the VM, it already did the mapping for us,
447 // but it's harmless to retry.
448 Label notNull;
449 ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr());
450 __ movptr(tmp, null_sentinel);
451 __ cmpoop(tmp, result);
452 __ jccb(Assembler::notEqual, notNull);
453 __ xorptr(result, result); // NULL object reference
454 __ bind(notNull);
455 }
456
457 if (VerifyOops) {
458 __ verify_oop(result);
459 }
460}
461
462void TemplateTable::ldc2_w() {
463 transition(vtos, vtos);
464 Label notDouble, notLong, Done;
465 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
466
467 __ get_cpool_and_tags(rcx, rax);
468 const int base_offset = ConstantPool::header_size() * wordSize;
469 const int tags_offset = Array<u1>::base_offset_in_bytes();
470
471 // get type
472 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
473 __ cmpl(rdx, JVM_CONSTANT_Double);
474 __ jccb(Assembler::notEqual, notDouble);
475
476 // dtos
477 __ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
478 __ push(dtos);
479
480 __ jmp(Done);
481 __ bind(notDouble);
482 __ cmpl(rdx, JVM_CONSTANT_Long);
483 __ jccb(Assembler::notEqual, notLong);
484
485 // ltos
486 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
487 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
488 __ push(ltos);
489 __ jmp(Done);
490
491 __ bind(notLong);
492 condy_helper(Done);
493
494 __ bind(Done);
495}
496
497void TemplateTable::condy_helper(Label& Done) {
498 const Register obj = rax;
499 const Register off = rbx;
500 const Register flags = rcx;
501 const Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
502 __ movl(rarg, (int)bytecode());
503 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
504#ifndef _LP64
505 // borrow rdi from locals
506 __ get_thread(rdi);
507 __ get_vm_result_2(flags, rdi);
508 __ restore_locals();
509#else
510 __ get_vm_result_2(flags, r15_thread);
511#endif
512 // VMr = obj = base address to find primitive value to push
513 // VMr2 = flags = (tos, off) using format of CPCE::_flags
514 __ movl(off, flags);
515 __ andl(off, ConstantPoolCacheEntry::field_index_mask);
516 const Address field(obj, off, Address::times_1, 0*wordSize);
517
518 // What sort of thing are we loading?
519 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
520 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
521
522 switch (bytecode()) {
523 case Bytecodes::_ldc:
524 case Bytecodes::_ldc_w:
525 {
526 // tos in (itos, ftos, stos, btos, ctos, ztos)
527 Label notInt, notFloat, notShort, notByte, notChar, notBool;
528 __ cmpl(flags, itos);
529 __ jcc(Assembler::notEqual, notInt);
530 // itos
531 __ movl(rax, field);
532 __ push(itos);
533 __ jmp(Done);
534
535 __ bind(notInt);
536 __ cmpl(flags, ftos);
537 __ jcc(Assembler::notEqual, notFloat);
538 // ftos
539 __ load_float(field);
540 __ push(ftos);
541 __ jmp(Done);
542
543 __ bind(notFloat);
544 __ cmpl(flags, stos);
545 __ jcc(Assembler::notEqual, notShort);
546 // stos
547 __ load_signed_short(rax, field);
548 __ push(stos);
549 __ jmp(Done);
550
551 __ bind(notShort);
552 __ cmpl(flags, btos);
553 __ jcc(Assembler::notEqual, notByte);
554 // btos
555 __ load_signed_byte(rax, field);
556 __ push(btos);
557 __ jmp(Done);
558
559 __ bind(notByte);
560 __ cmpl(flags, ctos);
561 __ jcc(Assembler::notEqual, notChar);
562 // ctos
563 __ load_unsigned_short(rax, field);
564 __ push(ctos);
565 __ jmp(Done);
566
567 __ bind(notChar);
568 __ cmpl(flags, ztos);
569 __ jcc(Assembler::notEqual, notBool);
570 // ztos
571 __ load_signed_byte(rax, field);
572 __ push(ztos);
573 __ jmp(Done);
574
575 __ bind(notBool);
576 break;
577 }
578
579 case Bytecodes::_ldc2_w:
580 {
581 Label notLong, notDouble;
582 __ cmpl(flags, ltos);
583 __ jcc(Assembler::notEqual, notLong);
584 // ltos
585 // Loading high word first because movptr clobbers rax
586 NOT_LP64(__ movptr(rdx, field.plus_disp(4)));
587 __ movptr(rax, field);
588 __ push(ltos);
589 __ jmp(Done);
590
591 __ bind(notLong);
592 __ cmpl(flags, dtos);
593 __ jcc(Assembler::notEqual, notDouble);
594 // dtos
595 __ load_double(field);
596 __ push(dtos);
597 __ jmp(Done);
598
599 __ bind(notDouble);
600 break;
601 }
602
603 default:
604 ShouldNotReachHere();
605 }
606
607 __ stop("bad ldc/condy");
608}
609
610void TemplateTable::locals_index(Register reg, int offset) {
611 __ load_unsigned_byte(reg, at_bcp(offset));
612 __ negptr(reg);
613}
614
615void TemplateTable::iload() {
616 iload_internal();
617}
618
619void TemplateTable::nofast_iload() {
620 iload_internal(may_not_rewrite);
621}
622
623void TemplateTable::iload_internal(RewriteControl rc) {
624 transition(vtos, itos);
625 if (RewriteFrequentPairs && rc == may_rewrite) {
626 Label rewrite, done;
627 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
628 LP64_ONLY(assert(rbx != bc, "register damaged"));
629
630 // get next byte
631 __ load_unsigned_byte(rbx,
632 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
633 // if _iload, wait to rewrite to iload2. We only want to rewrite the
634 // last two iloads in a pair. Comparing against fast_iload means that
635 // the next bytecode is neither an iload or a caload, and therefore
636 // an iload pair.
637 __ cmpl(rbx, Bytecodes::_iload);
638 __ jcc(Assembler::equal, done);
639
640 __ cmpl(rbx, Bytecodes::_fast_iload);
641 __ movl(bc, Bytecodes::_fast_iload2);
642
643 __ jccb(Assembler::equal, rewrite);
644
645 // if _caload, rewrite to fast_icaload
646 __ cmpl(rbx, Bytecodes::_caload);
647 __ movl(bc, Bytecodes::_fast_icaload);
648 __ jccb(Assembler::equal, rewrite);
649
650 // rewrite so iload doesn't check again.
651 __ movl(bc, Bytecodes::_fast_iload);
652
653 // rewrite
654 // bc: fast bytecode
655 __ bind(rewrite);
656 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
657 __ bind(done);
658 }
659
660 // Get the local value into tos
661 locals_index(rbx);
662 __ movl(rax, iaddress(rbx));
663}
664
665void TemplateTable::fast_iload2() {
666 transition(vtos, itos);
667 locals_index(rbx);
668 __ movl(rax, iaddress(rbx));
669 __ push(itos);
670 locals_index(rbx, 3);
671 __ movl(rax, iaddress(rbx));
672}
673
674void TemplateTable::fast_iload() {
675 transition(vtos, itos);
676 locals_index(rbx);
677 __ movl(rax, iaddress(rbx));
678}
679
680void TemplateTable::lload() {
681 transition(vtos, ltos);
682 locals_index(rbx);
683 __ movptr(rax, laddress(rbx));
684 NOT_LP64(__ movl(rdx, haddress(rbx)));
685}
686
687void TemplateTable::fload() {
688 transition(vtos, ftos);
689 locals_index(rbx);
690 __ load_float(faddress(rbx));
691}
692
693void TemplateTable::dload() {
694 transition(vtos, dtos);
695 locals_index(rbx);
696 __ load_double(daddress(rbx));
697}
698
699void TemplateTable::aload() {
700 transition(vtos, atos);
701 locals_index(rbx);
702 __ movptr(rax, aaddress(rbx));
703}
704
705void TemplateTable::locals_index_wide(Register reg) {
706 __ load_unsigned_short(reg, at_bcp(2));
707 __ bswapl(reg);
708 __ shrl(reg, 16);
709 __ negptr(reg);
710}
711
712void TemplateTable::wide_iload() {
713 transition(vtos, itos);
714 locals_index_wide(rbx);
715 __ movl(rax, iaddress(rbx));
716}
717
718void TemplateTable::wide_lload() {
719 transition(vtos, ltos);
720 locals_index_wide(rbx);
721 __ movptr(rax, laddress(rbx));
722 NOT_LP64(__ movl(rdx, haddress(rbx)));
723}
724
725void TemplateTable::wide_fload() {
726 transition(vtos, ftos);
727 locals_index_wide(rbx);
728 __ load_float(faddress(rbx));
729}
730
731void TemplateTable::wide_dload() {
732 transition(vtos, dtos);
733 locals_index_wide(rbx);
734 __ load_double(daddress(rbx));
735}
736
737void TemplateTable::wide_aload() {
738 transition(vtos, atos);
739 locals_index_wide(rbx);
740 __ movptr(rax, aaddress(rbx));
741}
742
743void TemplateTable::index_check(Register array, Register index) {
744 // Pop ptr into array
745 __ pop_ptr(array);
746 index_check_without_pop(array, index);
747}
748
749void TemplateTable::index_check_without_pop(Register array, Register index) {
750 // destroys rbx
751 // check array
752 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
753 // sign extend index for use by indexed load
754 __ movl2ptr(index, index);
755 // check index
756 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
757 if (index != rbx) {
758 // ??? convention: move aberrant index into rbx for exception message
759 assert(rbx != array, "different registers");
760 __ movl(rbx, index);
761 }
762 Label skip;
763 __ jccb(Assembler::below, skip);
764 // Pass array to create more detailed exceptions.
765 __ mov(NOT_LP64(rax) LP64_ONLY(c_rarg1), array);
766 __ jump(ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
767 __ bind(skip);
768}
769
770void TemplateTable::iaload() {
771 transition(itos, itos);
772 // rax: index
773 // rdx: array
774 index_check(rdx, rax); // kills rbx
775 __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, rax,
776 Address(rdx, rax, Address::times_4,
777 arrayOopDesc::base_offset_in_bytes(T_INT)),
778 noreg, noreg);
779}
780
781void TemplateTable::laload() {
782 transition(itos, ltos);
783 // rax: index
784 // rdx: array
785 index_check(rdx, rax); // kills rbx
786 NOT_LP64(__ mov(rbx, rax));
787 // rbx,: index
788 __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, noreg /* ltos */,
789 Address(rdx, rbx, Address::times_8,
790 arrayOopDesc::base_offset_in_bytes(T_LONG)),
791 noreg, noreg);
792}
793
794
795
796void TemplateTable::faload() {
797 transition(itos, ftos);
798 // rax: index
799 // rdx: array
800 index_check(rdx, rax); // kills rbx
801 __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, noreg /* ftos */,
802 Address(rdx, rax,
803 Address::times_4,
804 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
805 noreg, noreg);
806}
807
808void TemplateTable::daload() {
809 transition(itos, dtos);
810 // rax: index
811 // rdx: array
812 index_check(rdx, rax); // kills rbx
813 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, noreg /* dtos */,
814 Address(rdx, rax,
815 Address::times_8,
816 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
817 noreg, noreg);
818}
819
820void TemplateTable::aaload() {
821 transition(itos, atos);
822 // rax: index
823 // rdx: array
824 index_check(rdx, rax); // kills rbx
825 do_oop_load(_masm,
826 Address(rdx, rax,
827 UseCompressedOops ? Address::times_4 : Address::times_ptr,
828 arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
829 rax,
830 IS_ARRAY);
831}
832
833void TemplateTable::baload() {
834 transition(itos, itos);
835 // rax: index
836 // rdx: array
837 index_check(rdx, rax); // kills rbx
838 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, rax,
839 Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)),
840 noreg, noreg);
841}
842
843void TemplateTable::caload() {
844 transition(itos, itos);
845 // rax: index
846 // rdx: array
847 index_check(rdx, rax); // kills rbx
848 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
849 Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
850 noreg, noreg);
851}
852
853// iload followed by caload frequent pair
854void TemplateTable::fast_icaload() {
855 transition(vtos, itos);
856 // load index out of locals
857 locals_index(rbx);
858 __ movl(rax, iaddress(rbx));
859
860 // rax: index
861 // rdx: array
862 index_check(rdx, rax); // kills rbx
863 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
864 Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
865 noreg, noreg);
866}
867
868
869void TemplateTable::saload() {
870 transition(itos, itos);
871 // rax: index
872 // rdx: array
873 index_check(rdx, rax); // kills rbx
874 __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, rax,
875 Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)),
876 noreg, noreg);
877}
878
879void TemplateTable::iload(int n) {
880 transition(vtos, itos);
881 __ movl(rax, iaddress(n));
882}
883
884void TemplateTable::lload(int n) {
885 transition(vtos, ltos);
886 __ movptr(rax, laddress(n));
887 NOT_LP64(__ movptr(rdx, haddress(n)));
888}
889
890void TemplateTable::fload(int n) {
891 transition(vtos, ftos);
892 __ load_float(faddress(n));
893}
894
895void TemplateTable::dload(int n) {
896 transition(vtos, dtos);
897 __ load_double(daddress(n));
898}
899
900void TemplateTable::aload(int n) {
901 transition(vtos, atos);
902 __ movptr(rax, aaddress(n));
903}
904
905void TemplateTable::aload_0() {
906 aload_0_internal();
907}
908
909void TemplateTable::nofast_aload_0() {
910 aload_0_internal(may_not_rewrite);
911}
912
913void TemplateTable::aload_0_internal(RewriteControl rc) {
914 transition(vtos, atos);
915 // According to bytecode histograms, the pairs:
916 //
917 // _aload_0, _fast_igetfield
918 // _aload_0, _fast_agetfield
919 // _aload_0, _fast_fgetfield
920 //
921 // occur frequently. If RewriteFrequentPairs is set, the (slow)
922 // _aload_0 bytecode checks if the next bytecode is either
923 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
924 // rewrites the current bytecode into a pair bytecode; otherwise it
925 // rewrites the current bytecode into _fast_aload_0 that doesn't do
926 // the pair check anymore.
927 //
928 // Note: If the next bytecode is _getfield, the rewrite must be
929 // delayed, otherwise we may miss an opportunity for a pair.
930 //
931 // Also rewrite frequent pairs
932 // aload_0, aload_1
933 // aload_0, iload_1
934 // These bytecodes with a small amount of code are most profitable
935 // to rewrite
936 if (RewriteFrequentPairs && rc == may_rewrite) {
937 Label rewrite, done;
938
939 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
940 LP64_ONLY(assert(rbx != bc, "register damaged"));
941
942 // get next byte
943 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
944
945 // if _getfield then wait with rewrite
946 __ cmpl(rbx, Bytecodes::_getfield);
947 __ jcc(Assembler::equal, done);
948
949 // if _igetfield then rewrite to _fast_iaccess_0
950 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
951 __ cmpl(rbx, Bytecodes::_fast_igetfield);
952 __ movl(bc, Bytecodes::_fast_iaccess_0);
953 __ jccb(Assembler::equal, rewrite);
954
955 // if _agetfield then rewrite to _fast_aaccess_0
956 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
957 __ cmpl(rbx, Bytecodes::_fast_agetfield);
958 __ movl(bc, Bytecodes::_fast_aaccess_0);
959 __ jccb(Assembler::equal, rewrite);
960
961 // if _fgetfield then rewrite to _fast_faccess_0
962 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
963 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
964 __ movl(bc, Bytecodes::_fast_faccess_0);
965 __ jccb(Assembler::equal, rewrite);
966
967 // else rewrite to _fast_aload0
968 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
969 __ movl(bc, Bytecodes::_fast_aload_0);
970
971 // rewrite
972 // bc: fast bytecode
973 __ bind(rewrite);
974 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
975
976 __ bind(done);
977 }
978
979 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
980 aload(0);
981}
982
983void TemplateTable::istore() {
984 transition(itos, vtos);
985 locals_index(rbx);
986 __ movl(iaddress(rbx), rax);
987}
988
989
990void TemplateTable::lstore() {
991 transition(ltos, vtos);
992 locals_index(rbx);
993 __ movptr(laddress(rbx), rax);
994 NOT_LP64(__ movptr(haddress(rbx), rdx));
995}
996
997void TemplateTable::fstore() {
998 transition(ftos, vtos);
999 locals_index(rbx);
1000 __ store_float(faddress(rbx));
1001}
1002
1003void TemplateTable::dstore() {
1004 transition(dtos, vtos);
1005 locals_index(rbx);
1006 __ store_double(daddress(rbx));
1007}
1008
1009void TemplateTable::astore() {
1010 transition(vtos, vtos);
1011 __ pop_ptr(rax);
1012 locals_index(rbx);
1013 __ movptr(aaddress(rbx), rax);
1014}
1015
1016void TemplateTable::wide_istore() {
1017 transition(vtos, vtos);
1018 __ pop_i();
1019 locals_index_wide(rbx);
1020 __ movl(iaddress(rbx), rax);
1021}
1022
1023void TemplateTable::wide_lstore() {
1024 transition(vtos, vtos);
1025 NOT_LP64(__ pop_l(rax, rdx));
1026 LP64_ONLY(__ pop_l());
1027 locals_index_wide(rbx);
1028 __ movptr(laddress(rbx), rax);
1029 NOT_LP64(__ movl(haddress(rbx), rdx));
1030}
1031
1032void TemplateTable::wide_fstore() {
1033#ifdef _LP64
1034 transition(vtos, vtos);
1035 __ pop_f(xmm0);
1036 locals_index_wide(rbx);
1037 __ movflt(faddress(rbx), xmm0);
1038#else
1039 wide_istore();
1040#endif
1041}
1042
1043void TemplateTable::wide_dstore() {
1044#ifdef _LP64
1045 transition(vtos, vtos);
1046 __ pop_d(xmm0);
1047 locals_index_wide(rbx);
1048 __ movdbl(daddress(rbx), xmm0);
1049#else
1050 wide_lstore();
1051#endif
1052}
1053
1054void TemplateTable::wide_astore() {
1055 transition(vtos, vtos);
1056 __ pop_ptr(rax);
1057 locals_index_wide(rbx);
1058 __ movptr(aaddress(rbx), rax);
1059}
1060
1061void TemplateTable::iastore() {
1062 transition(itos, vtos);
1063 __ pop_i(rbx);
1064 // rax: value
1065 // rbx: index
1066 // rdx: array
1067 index_check(rdx, rbx); // prefer index in rbx
1068 __ access_store_at(T_INT, IN_HEAP | IS_ARRAY,
1069 Address(rdx, rbx, Address::times_4,
1070 arrayOopDesc::base_offset_in_bytes(T_INT)),
1071 rax, noreg, noreg);
1072}
1073
1074void TemplateTable::lastore() {
1075 transition(ltos, vtos);
1076 __ pop_i(rbx);
1077 // rax,: low(value)
1078 // rcx: array
1079 // rdx: high(value)
1080 index_check(rcx, rbx); // prefer index in rbx,
1081 // rbx,: index
1082 __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY,
1083 Address(rcx, rbx, Address::times_8,
1084 arrayOopDesc::base_offset_in_bytes(T_LONG)),
1085 noreg /* ltos */, noreg, noreg);
1086}
1087
1088
1089void TemplateTable::fastore() {
1090 transition(ftos, vtos);
1091 __ pop_i(rbx);
1092 // value is in UseSSE >= 1 ? xmm0 : ST(0)
1093 // rbx: index
1094 // rdx: array
1095 index_check(rdx, rbx); // prefer index in rbx
1096 __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY,
1097 Address(rdx, rbx, Address::times_4,
1098 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
1099 noreg /* ftos */, noreg, noreg);
1100}
1101
1102void TemplateTable::dastore() {
1103 transition(dtos, vtos);
1104 __ pop_i(rbx);
1105 // value is in UseSSE >= 2 ? xmm0 : ST(0)
1106 // rbx: index
1107 // rdx: array
1108 index_check(rdx, rbx); // prefer index in rbx
1109 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY,
1110 Address(rdx, rbx, Address::times_8,
1111 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
1112 noreg /* dtos */, noreg, noreg);
1113}
1114
1115void TemplateTable::aastore() {
1116 Label is_null, ok_is_subtype, done;
1117 transition(vtos, vtos);
1118 // stack: ..., array, index, value
1119 __ movptr(rax, at_tos()); // value
1120 __ movl(rcx, at_tos_p1()); // index
1121 __ movptr(rdx, at_tos_p2()); // array
1122
1123 Address element_address(rdx, rcx,
1124 UseCompressedOops? Address::times_4 : Address::times_ptr,
1125 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1126
1127 index_check_without_pop(rdx, rcx); // kills rbx
1128 __ testptr(rax, rax);
1129 __ jcc(Assembler::zero, is_null);
1130
1131 // Move subklass into rbx
1132 __ load_klass(rbx, rax);
1133 // Move superklass into rax
1134 __ load_klass(rax, rdx);
1135 __ movptr(rax, Address(rax,
1136 ObjArrayKlass::element_klass_offset()));
1137
1138 // Generate subtype check. Blows rcx, rdi
1139 // Superklass in rax. Subklass in rbx.
1140 __ gen_subtype_check(rbx, ok_is_subtype);
1141
1142 // Come here on failure
1143 // object is at TOS
1144 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1145
1146 // Come here on success
1147 __ bind(ok_is_subtype);
1148
1149 // Get the value we will store
1150 __ movptr(rax, at_tos());
1151 __ movl(rcx, at_tos_p1()); // index
1152 // Now store using the appropriate barrier
1153 do_oop_store(_masm, element_address, rax, IS_ARRAY);
1154 __ jmp(done);
1155
1156 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
1157 __ bind(is_null);
1158 __ profile_null_seen(rbx);
1159
1160 // Store a NULL
1161 do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1162
1163 // Pop stack arguments
1164 __ bind(done);
1165 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1166}
1167
1168void TemplateTable::bastore() {
1169 transition(itos, vtos);
1170 __ pop_i(rbx);
1171 // rax: value
1172 // rbx: index
1173 // rdx: array
1174 index_check(rdx, rbx); // prefer index in rbx
1175 // Need to check whether array is boolean or byte
1176 // since both types share the bastore bytecode.
1177 __ load_klass(rcx, rdx);
1178 __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
1179 int diffbit = Klass::layout_helper_boolean_diffbit();
1180 __ testl(rcx, diffbit);
1181 Label L_skip;
1182 __ jccb(Assembler::zero, L_skip);
1183 __ andl(rax, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1184 __ bind(L_skip);
1185 __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY,
1186 Address(rdx, rbx,Address::times_1,
1187 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1188 rax, noreg, noreg);
1189}
1190
1191void TemplateTable::castore() {
1192 transition(itos, vtos);
1193 __ pop_i(rbx);
1194 // rax: value
1195 // rbx: index
1196 // rdx: array
1197 index_check(rdx, rbx); // prefer index in rbx
1198 __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY,
1199 Address(rdx, rbx, Address::times_2,
1200 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1201 rax, noreg, noreg);
1202}
1203
1204
1205void TemplateTable::sastore() {
1206 castore();
1207}
1208
1209void TemplateTable::istore(int n) {
1210 transition(itos, vtos);
1211 __ movl(iaddress(n), rax);
1212}
1213
1214void TemplateTable::lstore(int n) {
1215 transition(ltos, vtos);
1216 __ movptr(laddress(n), rax);
1217 NOT_LP64(__ movptr(haddress(n), rdx));
1218}
1219
1220void TemplateTable::fstore(int n) {
1221 transition(ftos, vtos);
1222 __ store_float(faddress(n));
1223}
1224
1225void TemplateTable::dstore(int n) {
1226 transition(dtos, vtos);
1227 __ store_double(daddress(n));
1228}
1229
1230
1231void TemplateTable::astore(int n) {
1232 transition(vtos, vtos);
1233 __ pop_ptr(rax);
1234 __ movptr(aaddress(n), rax);
1235}
1236
1237void TemplateTable::pop() {
1238 transition(vtos, vtos);
1239 __ addptr(rsp, Interpreter::stackElementSize);
1240}
1241
1242void TemplateTable::pop2() {
1243 transition(vtos, vtos);
1244 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1245}
1246
1247
1248void TemplateTable::dup() {
1249 transition(vtos, vtos);
1250 __ load_ptr(0, rax);
1251 __ push_ptr(rax);
1252 // stack: ..., a, a
1253}
1254
1255void TemplateTable::dup_x1() {
1256 transition(vtos, vtos);
1257 // stack: ..., a, b
1258 __ load_ptr( 0, rax); // load b
1259 __ load_ptr( 1, rcx); // load a
1260 __ store_ptr(1, rax); // store b
1261 __ store_ptr(0, rcx); // store a
1262 __ push_ptr(rax); // push b
1263 // stack: ..., b, a, b
1264}
1265
1266void TemplateTable::dup_x2() {
1267 transition(vtos, vtos);
1268 // stack: ..., a, b, c
1269 __ load_ptr( 0, rax); // load c
1270 __ load_ptr( 2, rcx); // load a
1271 __ store_ptr(2, rax); // store c in a
1272 __ push_ptr(rax); // push c
1273 // stack: ..., c, b, c, c
1274 __ load_ptr( 2, rax); // load b
1275 __ store_ptr(2, rcx); // store a in b
1276 // stack: ..., c, a, c, c
1277 __ store_ptr(1, rax); // store b in c
1278 // stack: ..., c, a, b, c
1279}
1280
1281void TemplateTable::dup2() {
1282 transition(vtos, vtos);
1283 // stack: ..., a, b
1284 __ load_ptr(1, rax); // load a
1285 __ push_ptr(rax); // push a
1286 __ load_ptr(1, rax); // load b
1287 __ push_ptr(rax); // push b
1288 // stack: ..., a, b, a, b
1289}
1290
1291
1292void TemplateTable::dup2_x1() {
1293 transition(vtos, vtos);
1294 // stack: ..., a, b, c
1295 __ load_ptr( 0, rcx); // load c
1296 __ load_ptr( 1, rax); // load b
1297 __ push_ptr(rax); // push b
1298 __ push_ptr(rcx); // push c
1299 // stack: ..., a, b, c, b, c
1300 __ store_ptr(3, rcx); // store c in b
1301 // stack: ..., a, c, c, b, c
1302 __ load_ptr( 4, rcx); // load a
1303 __ store_ptr(2, rcx); // store a in 2nd c
1304 // stack: ..., a, c, a, b, c
1305 __ store_ptr(4, rax); // store b in a
1306 // stack: ..., b, c, a, b, c
1307}
1308
1309void TemplateTable::dup2_x2() {
1310 transition(vtos, vtos);
1311 // stack: ..., a, b, c, d
1312 __ load_ptr( 0, rcx); // load d
1313 __ load_ptr( 1, rax); // load c
1314 __ push_ptr(rax); // push c
1315 __ push_ptr(rcx); // push d
1316 // stack: ..., a, b, c, d, c, d
1317 __ load_ptr( 4, rax); // load b
1318 __ store_ptr(2, rax); // store b in d
1319 __ store_ptr(4, rcx); // store d in b
1320 // stack: ..., a, d, c, b, c, d
1321 __ load_ptr( 5, rcx); // load a
1322 __ load_ptr( 3, rax); // load c
1323 __ store_ptr(3, rcx); // store a in c
1324 __ store_ptr(5, rax); // store c in a
1325 // stack: ..., c, d, a, b, c, d
1326}
1327
1328void TemplateTable::swap() {
1329 transition(vtos, vtos);
1330 // stack: ..., a, b
1331 __ load_ptr( 1, rcx); // load a
1332 __ load_ptr( 0, rax); // load b
1333 __ store_ptr(0, rcx); // store a in b
1334 __ store_ptr(1, rax); // store b in a
1335 // stack: ..., b, a
1336}
1337
1338void TemplateTable::iop2(Operation op) {
1339 transition(itos, itos);
1340 switch (op) {
1341 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1342 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1343 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1344 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1345 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1346 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1347 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1348 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1349 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1350 default : ShouldNotReachHere();
1351 }
1352}
1353
1354void TemplateTable::lop2(Operation op) {
1355 transition(ltos, ltos);
1356#ifdef _LP64
1357 switch (op) {
1358 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1359 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1360 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1361 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1362 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1363 default : ShouldNotReachHere();
1364 }
1365#else
1366 __ pop_l(rbx, rcx);
1367 switch (op) {
1368 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1369 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1370 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1371 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1372 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1373 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1374 default : ShouldNotReachHere();
1375 }
1376#endif
1377}
1378
1379void TemplateTable::idiv() {
1380 transition(itos, itos);
1381 __ movl(rcx, rax);
1382 __ pop_i(rax);
1383 // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1384 // they are not equal, one could do a normal division (no correction
1385 // needed), which may speed up this implementation for the common case.
1386 // (see also JVM spec., p.243 & p.271)
1387 __ corrected_idivl(rcx);
1388}
1389
1390void TemplateTable::irem() {
1391 transition(itos, itos);
1392 __ movl(rcx, rax);
1393 __ pop_i(rax);
1394 // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1395 // they are not equal, one could do a normal division (no correction
1396 // needed), which may speed up this implementation for the common case.
1397 // (see also JVM spec., p.243 & p.271)
1398 __ corrected_idivl(rcx);
1399 __ movl(rax, rdx);
1400}
1401
1402void TemplateTable::lmul() {
1403 transition(ltos, ltos);
1404#ifdef _LP64
1405 __ pop_l(rdx);
1406 __ imulq(rax, rdx);
1407#else
1408 __ pop_l(rbx, rcx);
1409 __ push(rcx); __ push(rbx);
1410 __ push(rdx); __ push(rax);
1411 __ lmul(2 * wordSize, 0);
1412 __ addptr(rsp, 4 * wordSize); // take off temporaries
1413#endif
1414}
1415
1416void TemplateTable::ldiv() {
1417 transition(ltos, ltos);
1418#ifdef _LP64
1419 __ mov(rcx, rax);
1420 __ pop_l(rax);
1421 // generate explicit div0 check
1422 __ testq(rcx, rcx);
1423 __ jump_cc(Assembler::zero,
1424 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1425 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1426 // they are not equal, one could do a normal division (no correction
1427 // needed), which may speed up this implementation for the common case.
1428 // (see also JVM spec., p.243 & p.271)
1429 __ corrected_idivq(rcx); // kills rbx
1430#else
1431 __ pop_l(rbx, rcx);
1432 __ push(rcx); __ push(rbx);
1433 __ push(rdx); __ push(rax);
1434 // check if y = 0
1435 __ orl(rax, rdx);
1436 __ jump_cc(Assembler::zero,
1437 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1438 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1439 __ addptr(rsp, 4 * wordSize); // take off temporaries
1440#endif
1441}
1442
1443void TemplateTable::lrem() {
1444 transition(ltos, ltos);
1445#ifdef _LP64
1446 __ mov(rcx, rax);
1447 __ pop_l(rax);
1448 __ testq(rcx, rcx);
1449 __ jump_cc(Assembler::zero,
1450 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1451 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1452 // they are not equal, one could do a normal division (no correction
1453 // needed), which may speed up this implementation for the common case.
1454 // (see also JVM spec., p.243 & p.271)
1455 __ corrected_idivq(rcx); // kills rbx
1456 __ mov(rax, rdx);
1457#else
1458 __ pop_l(rbx, rcx);
1459 __ push(rcx); __ push(rbx);
1460 __ push(rdx); __ push(rax);
1461 // check if y = 0
1462 __ orl(rax, rdx);
1463 __ jump_cc(Assembler::zero,
1464 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1465 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1466 __ addptr(rsp, 4 * wordSize);
1467#endif
1468}
1469
1470void TemplateTable::lshl() {
1471 transition(itos, ltos);
1472 __ movl(rcx, rax); // get shift count
1473 #ifdef _LP64
1474 __ pop_l(rax); // get shift value
1475 __ shlq(rax);
1476#else
1477 __ pop_l(rax, rdx); // get shift value
1478 __ lshl(rdx, rax);
1479#endif
1480}
1481
1482void TemplateTable::lshr() {
1483#ifdef _LP64
1484 transition(itos, ltos);
1485 __ movl(rcx, rax); // get shift count
1486 __ pop_l(rax); // get shift value
1487 __ sarq(rax);
1488#else
1489 transition(itos, ltos);
1490 __ mov(rcx, rax); // get shift count
1491 __ pop_l(rax, rdx); // get shift value
1492 __ lshr(rdx, rax, true);
1493#endif
1494}
1495
1496void TemplateTable::lushr() {
1497 transition(itos, ltos);
1498#ifdef _LP64
1499 __ movl(rcx, rax); // get shift count
1500 __ pop_l(rax); // get shift value
1501 __ shrq(rax);
1502#else
1503 __ mov(rcx, rax); // get shift count
1504 __ pop_l(rax, rdx); // get shift value
1505 __ lshr(rdx, rax);
1506#endif
1507}
1508
1509void TemplateTable::fop2(Operation op) {
1510 transition(ftos, ftos);
1511
1512 if (UseSSE >= 1) {
1513 switch (op) {
1514 case add:
1515 __ addss(xmm0, at_rsp());
1516 __ addptr(rsp, Interpreter::stackElementSize);
1517 break;
1518 case sub:
1519 __ movflt(xmm1, xmm0);
1520 __ pop_f(xmm0);
1521 __ subss(xmm0, xmm1);
1522 break;
1523 case mul:
1524 __ mulss(xmm0, at_rsp());
1525 __ addptr(rsp, Interpreter::stackElementSize);
1526 break;
1527 case div:
1528 __ movflt(xmm1, xmm0);
1529 __ pop_f(xmm0);
1530 __ divss(xmm0, xmm1);
1531 break;
1532 case rem:
1533 // On x86_64 platforms the SharedRuntime::frem method is called to perform the
1534 // modulo operation. The frem method calls the function
1535 // double fmod(double x, double y) in math.h. The documentation of fmod states:
1536 // "If x or y is a NaN, a NaN is returned." without specifying what type of NaN
1537 // (signalling or quiet) is returned.
1538 //
1539 // On x86_32 platforms the FPU is used to perform the modulo operation. The
1540 // reason is that on 32-bit Windows the sign of modulo operations diverges from
1541 // what is considered the standard (e.g., -0.0f % -3.14f is 0.0f (and not -0.0f).
1542 // The fprem instruction used on x86_32 is functionally equivalent to
1543 // SharedRuntime::frem in that it returns a NaN.
1544#ifdef _LP64
1545 __ movflt(xmm1, xmm0);
1546 __ pop_f(xmm0);
1547 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1548#else
1549 __ push_f(xmm0);
1550 __ pop_f();
1551 __ fld_s(at_rsp());
1552 __ fremr(rax);
1553 __ f2ieee();
1554 __ pop(rax); // pop second operand off the stack
1555 __ push_f();
1556 __ pop_f(xmm0);
1557#endif
1558 break;
1559 default:
1560 ShouldNotReachHere();
1561 break;
1562 }
1563 } else {
1564#ifdef _LP64
1565 ShouldNotReachHere();
1566#else
1567 switch (op) {
1568 case add: __ fadd_s (at_rsp()); break;
1569 case sub: __ fsubr_s(at_rsp()); break;
1570 case mul: __ fmul_s (at_rsp()); break;
1571 case div: __ fdivr_s(at_rsp()); break;
1572 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1573 default : ShouldNotReachHere();
1574 }
1575 __ f2ieee();
1576 __ pop(rax); // pop second operand off the stack
1577#endif // _LP64
1578 }
1579}
1580
1581void TemplateTable::dop2(Operation op) {
1582 transition(dtos, dtos);
1583 if (UseSSE >= 2) {
1584 switch (op) {
1585 case add:
1586 __ addsd(xmm0, at_rsp());
1587 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1588 break;
1589 case sub:
1590 __ movdbl(xmm1, xmm0);
1591 __ pop_d(xmm0);
1592 __ subsd(xmm0, xmm1);
1593 break;
1594 case mul:
1595 __ mulsd(xmm0, at_rsp());
1596 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1597 break;
1598 case div:
1599 __ movdbl(xmm1, xmm0);
1600 __ pop_d(xmm0);
1601 __ divsd(xmm0, xmm1);
1602 break;
1603 case rem:
1604 // Similar to fop2(), the modulo operation is performed using the
1605 // SharedRuntime::drem method (on x86_64 platforms) or using the
1606 // FPU (on x86_32 platforms) for the same reasons as mentioned in fop2().
1607#ifdef _LP64
1608 __ movdbl(xmm1, xmm0);
1609 __ pop_d(xmm0);
1610 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1611#else
1612 __ push_d(xmm0);
1613 __ pop_d();
1614 __ fld_d(at_rsp());
1615 __ fremr(rax);
1616 __ d2ieee();
1617 __ pop(rax);
1618 __ pop(rdx);
1619 __ push_d();
1620 __ pop_d(xmm0);
1621#endif
1622 break;
1623 default:
1624 ShouldNotReachHere();
1625 break;
1626 }
1627 } else {
1628#ifdef _LP64
1629 ShouldNotReachHere();
1630#else
1631 switch (op) {
1632 case add: __ fadd_d (at_rsp()); break;
1633 case sub: __ fsubr_d(at_rsp()); break;
1634 case mul: {
1635 Label L_strict;
1636 Label L_join;
1637 const Address access_flags (rcx, Method::access_flags_offset());
1638 __ get_method(rcx);
1639 __ movl(rcx, access_flags);
1640 __ testl(rcx, JVM_ACC_STRICT);
1641 __ jccb(Assembler::notZero, L_strict);
1642 __ fmul_d (at_rsp());
1643 __ jmpb(L_join);
1644 __ bind(L_strict);
1645 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1646 __ fmulp();
1647 __ fmul_d (at_rsp());
1648 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1649 __ fmulp();
1650 __ bind(L_join);
1651 break;
1652 }
1653 case div: {
1654 Label L_strict;
1655 Label L_join;
1656 const Address access_flags (rcx, Method::access_flags_offset());
1657 __ get_method(rcx);
1658 __ movl(rcx, access_flags);
1659 __ testl(rcx, JVM_ACC_STRICT);
1660 __ jccb(Assembler::notZero, L_strict);
1661 __ fdivr_d(at_rsp());
1662 __ jmp(L_join);
1663 __ bind(L_strict);
1664 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1665 __ fmul_d (at_rsp());
1666 __ fdivrp();
1667 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1668 __ fmulp();
1669 __ bind(L_join);
1670 break;
1671 }
1672 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1673 default : ShouldNotReachHere();
1674 }
1675 __ d2ieee();
1676 // Pop double precision number from rsp.
1677 __ pop(rax);
1678 __ pop(rdx);
1679#endif
1680 }
1681}
1682
1683void TemplateTable::ineg() {
1684 transition(itos, itos);
1685 __ negl(rax);
1686}
1687
1688void TemplateTable::lneg() {
1689 transition(ltos, ltos);
1690 LP64_ONLY(__ negq(rax));
1691 NOT_LP64(__ lneg(rdx, rax));
1692}
1693
1694// Note: 'double' and 'long long' have 32-bits alignment on x86.
1695static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1696 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1697 // of 128-bits operands for SSE instructions.
1698 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1699 // Store the value to a 128-bits operand.
1700 operand[0] = lo;
1701 operand[1] = hi;
1702 return operand;
1703}
1704
1705// Buffer for 128-bits masks used by SSE instructions.
1706static jlong float_signflip_pool[2*2];
1707static jlong double_signflip_pool[2*2];
1708
1709void TemplateTable::fneg() {
1710 transition(ftos, ftos);
1711 if (UseSSE >= 1) {
1712 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
1713 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1714 } else {
1715 LP64_ONLY(ShouldNotReachHere());
1716 NOT_LP64(__ fchs());
1717 }
1718}
1719
1720void TemplateTable::dneg() {
1721 transition(dtos, dtos);
1722 if (UseSSE >= 2) {
1723 static jlong *double_signflip =
1724 double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
1725 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1726 } else {
1727#ifdef _LP64
1728 ShouldNotReachHere();
1729#else
1730 __ fchs();
1731#endif
1732 }
1733}
1734
1735void TemplateTable::iinc() {
1736 transition(vtos, vtos);
1737 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1738 locals_index(rbx);
1739 __ addl(iaddress(rbx), rdx);
1740}
1741
1742void TemplateTable::wide_iinc() {
1743 transition(vtos, vtos);
1744 __ movl(rdx, at_bcp(4)); // get constant
1745 locals_index_wide(rbx);
1746 __ bswapl(rdx); // swap bytes & sign-extend constant
1747 __ sarl(rdx, 16);
1748 __ addl(iaddress(rbx), rdx);
1749 // Note: should probably use only one movl to get both
1750 // the index and the constant -> fix this
1751}
1752
1753void TemplateTable::convert() {
1754#ifdef _LP64
1755 // Checking
1756#ifdef ASSERT
1757 {
1758 TosState tos_in = ilgl;
1759 TosState tos_out = ilgl;
1760 switch (bytecode()) {
1761 case Bytecodes::_i2l: // fall through
1762 case Bytecodes::_i2f: // fall through
1763 case Bytecodes::_i2d: // fall through
1764 case Bytecodes::_i2b: // fall through
1765 case Bytecodes::_i2c: // fall through
1766 case Bytecodes::_i2s: tos_in = itos; break;
1767 case Bytecodes::_l2i: // fall through
1768 case Bytecodes::_l2f: // fall through
1769 case Bytecodes::_l2d: tos_in = ltos; break;
1770 case Bytecodes::_f2i: // fall through
1771 case Bytecodes::_f2l: // fall through
1772 case Bytecodes::_f2d: tos_in = ftos; break;
1773 case Bytecodes::_d2i: // fall through
1774 case Bytecodes::_d2l: // fall through
1775 case Bytecodes::_d2f: tos_in = dtos; break;
1776 default : ShouldNotReachHere();
1777 }
1778 switch (bytecode()) {
1779 case Bytecodes::_l2i: // fall through
1780 case Bytecodes::_f2i: // fall through
1781 case Bytecodes::_d2i: // fall through
1782 case Bytecodes::_i2b: // fall through
1783 case Bytecodes::_i2c: // fall through
1784 case Bytecodes::_i2s: tos_out = itos; break;
1785 case Bytecodes::_i2l: // fall through
1786 case Bytecodes::_f2l: // fall through
1787 case Bytecodes::_d2l: tos_out = ltos; break;
1788 case Bytecodes::_i2f: // fall through
1789 case Bytecodes::_l2f: // fall through
1790 case Bytecodes::_d2f: tos_out = ftos; break;
1791 case Bytecodes::_i2d: // fall through
1792 case Bytecodes::_l2d: // fall through
1793 case Bytecodes::_f2d: tos_out = dtos; break;
1794 default : ShouldNotReachHere();
1795 }
1796 transition(tos_in, tos_out);
1797 }
1798#endif // ASSERT
1799
1800 static const int64_t is_nan = 0x8000000000000000L;
1801
1802 // Conversion
1803 switch (bytecode()) {
1804 case Bytecodes::_i2l:
1805 __ movslq(rax, rax);
1806 break;
1807 case Bytecodes::_i2f:
1808 __ cvtsi2ssl(xmm0, rax);
1809 break;
1810 case Bytecodes::_i2d:
1811 __ cvtsi2sdl(xmm0, rax);
1812 break;
1813 case Bytecodes::_i2b:
1814 __ movsbl(rax, rax);
1815 break;
1816 case Bytecodes::_i2c:
1817 __ movzwl(rax, rax);
1818 break;
1819 case Bytecodes::_i2s:
1820 __ movswl(rax, rax);
1821 break;
1822 case Bytecodes::_l2i:
1823 __ movl(rax, rax);
1824 break;
1825 case Bytecodes::_l2f:
1826 __ cvtsi2ssq(xmm0, rax);
1827 break;
1828 case Bytecodes::_l2d:
1829 __ cvtsi2sdq(xmm0, rax);
1830 break;
1831 case Bytecodes::_f2i:
1832 {
1833 Label L;
1834 __ cvttss2sil(rax, xmm0);
1835 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1836 __ jcc(Assembler::notEqual, L);
1837 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1838 __ bind(L);
1839 }
1840 break;
1841 case Bytecodes::_f2l:
1842 {
1843 Label L;
1844 __ cvttss2siq(rax, xmm0);
1845 // NaN or overflow/underflow?
1846 __ cmp64(rax, ExternalAddress((address) &is_nan));
1847 __ jcc(Assembler::notEqual, L);
1848 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1849 __ bind(L);
1850 }
1851 break;
1852 case Bytecodes::_f2d:
1853 __ cvtss2sd(xmm0, xmm0);
1854 break;
1855 case Bytecodes::_d2i:
1856 {
1857 Label L;
1858 __ cvttsd2sil(rax, xmm0);
1859 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1860 __ jcc(Assembler::notEqual, L);
1861 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1862 __ bind(L);
1863 }
1864 break;
1865 case Bytecodes::_d2l:
1866 {
1867 Label L;
1868 __ cvttsd2siq(rax, xmm0);
1869 // NaN or overflow/underflow?
1870 __ cmp64(rax, ExternalAddress((address) &is_nan));
1871 __ jcc(Assembler::notEqual, L);
1872 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1873 __ bind(L);
1874 }
1875 break;
1876 case Bytecodes::_d2f:
1877 __ cvtsd2ss(xmm0, xmm0);
1878 break;
1879 default:
1880 ShouldNotReachHere();
1881 }
1882#else
1883 // Checking
1884#ifdef ASSERT
1885 { TosState tos_in = ilgl;
1886 TosState tos_out = ilgl;
1887 switch (bytecode()) {
1888 case Bytecodes::_i2l: // fall through
1889 case Bytecodes::_i2f: // fall through
1890 case Bytecodes::_i2d: // fall through
1891 case Bytecodes::_i2b: // fall through
1892 case Bytecodes::_i2c: // fall through
1893 case Bytecodes::_i2s: tos_in = itos; break;
1894 case Bytecodes::_l2i: // fall through
1895 case Bytecodes::_l2f: // fall through
1896 case Bytecodes::_l2d: tos_in = ltos; break;
1897 case Bytecodes::_f2i: // fall through
1898 case Bytecodes::_f2l: // fall through
1899 case Bytecodes::_f2d: tos_in = ftos; break;
1900 case Bytecodes::_d2i: // fall through
1901 case Bytecodes::_d2l: // fall through
1902 case Bytecodes::_d2f: tos_in = dtos; break;
1903 default : ShouldNotReachHere();
1904 }
1905 switch (bytecode()) {
1906 case Bytecodes::_l2i: // fall through
1907 case Bytecodes::_f2i: // fall through
1908 case Bytecodes::_d2i: // fall through
1909 case Bytecodes::_i2b: // fall through
1910 case Bytecodes::_i2c: // fall through
1911 case Bytecodes::_i2s: tos_out = itos; break;
1912 case Bytecodes::_i2l: // fall through
1913 case Bytecodes::_f2l: // fall through
1914 case Bytecodes::_d2l: tos_out = ltos; break;
1915 case Bytecodes::_i2f: // fall through
1916 case Bytecodes::_l2f: // fall through
1917 case Bytecodes::_d2f: tos_out = ftos; break;
1918 case Bytecodes::_i2d: // fall through
1919 case Bytecodes::_l2d: // fall through
1920 case Bytecodes::_f2d: tos_out = dtos; break;
1921 default : ShouldNotReachHere();
1922 }
1923 transition(tos_in, tos_out);
1924 }
1925#endif // ASSERT
1926
1927 // Conversion
1928 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1929 switch (bytecode()) {
1930 case Bytecodes::_i2l:
1931 __ extend_sign(rdx, rax);
1932 break;
1933 case Bytecodes::_i2f:
1934 if (UseSSE >= 1) {
1935 __ cvtsi2ssl(xmm0, rax);
1936 } else {
1937 __ push(rax); // store int on tos
1938 __ fild_s(at_rsp()); // load int to ST0
1939 __ f2ieee(); // truncate to float size
1940 __ pop(rcx); // adjust rsp
1941 }
1942 break;
1943 case Bytecodes::_i2d:
1944 if (UseSSE >= 2) {
1945 __ cvtsi2sdl(xmm0, rax);
1946 } else {
1947 __ push(rax); // add one slot for d2ieee()
1948 __ push(rax); // store int on tos
1949 __ fild_s(at_rsp()); // load int to ST0
1950 __ d2ieee(); // truncate to double size
1951 __ pop(rcx); // adjust rsp
1952 __ pop(rcx);
1953 }
1954 break;
1955 case Bytecodes::_i2b:
1956 __ shll(rax, 24); // truncate upper 24 bits
1957 __ sarl(rax, 24); // and sign-extend byte
1958 LP64_ONLY(__ movsbl(rax, rax));
1959 break;
1960 case Bytecodes::_i2c:
1961 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1962 LP64_ONLY(__ movzwl(rax, rax));
1963 break;
1964 case Bytecodes::_i2s:
1965 __ shll(rax, 16); // truncate upper 16 bits
1966 __ sarl(rax, 16); // and sign-extend short
1967 LP64_ONLY(__ movswl(rax, rax));
1968 break;
1969 case Bytecodes::_l2i:
1970 /* nothing to do */
1971 break;
1972 case Bytecodes::_l2f:
1973 // On 64-bit platforms, the cvtsi2ssq instruction is used to convert
1974 // 64-bit long values to floats. On 32-bit platforms it is not possible
1975 // to use that instruction with 64-bit operands, therefore the FPU is
1976 // used to perform the conversion.
1977 __ push(rdx); // store long on tos
1978 __ push(rax);
1979 __ fild_d(at_rsp()); // load long to ST0
1980 __ f2ieee(); // truncate to float size
1981 __ pop(rcx); // adjust rsp
1982 __ pop(rcx);
1983 if (UseSSE >= 1) {
1984 __ push_f();
1985 __ pop_f(xmm0);
1986 }
1987 break;
1988 case Bytecodes::_l2d:
1989 // On 32-bit platforms the FPU is used for conversion because on
1990 // 32-bit platforms it is not not possible to use the cvtsi2sdq
1991 // instruction with 64-bit operands.
1992 __ push(rdx); // store long on tos
1993 __ push(rax);
1994 __ fild_d(at_rsp()); // load long to ST0
1995 __ d2ieee(); // truncate to double size
1996 __ pop(rcx); // adjust rsp
1997 __ pop(rcx);
1998 if (UseSSE >= 2) {
1999 __ push_d();
2000 __ pop_d(xmm0);
2001 }
2002 break;
2003 case Bytecodes::_f2i:
2004 // SharedRuntime::f2i does not differentiate between sNaNs and qNaNs
2005 // as it returns 0 for any NaN.
2006 if (UseSSE >= 1) {
2007 __ push_f(xmm0);
2008 } else {
2009 __ push(rcx); // reserve space for argument
2010 __ fstp_s(at_rsp()); // pass float argument on stack
2011 }
2012 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
2013 break;
2014 case Bytecodes::_f2l:
2015 // SharedRuntime::f2l does not differentiate between sNaNs and qNaNs
2016 // as it returns 0 for any NaN.
2017 if (UseSSE >= 1) {
2018 __ push_f(xmm0);
2019 } else {
2020 __ push(rcx); // reserve space for argument
2021 __ fstp_s(at_rsp()); // pass float argument on stack
2022 }
2023 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
2024 break;
2025 case Bytecodes::_f2d:
2026 if (UseSSE < 1) {
2027 /* nothing to do */
2028 } else if (UseSSE == 1) {
2029 __ push_f(xmm0);
2030 __ pop_f();
2031 } else { // UseSSE >= 2
2032 __ cvtss2sd(xmm0, xmm0);
2033 }
2034 break;
2035 case Bytecodes::_d2i:
2036 if (UseSSE >= 2) {
2037 __ push_d(xmm0);
2038 } else {
2039 __ push(rcx); // reserve space for argument
2040 __ push(rcx);
2041 __ fstp_d(at_rsp()); // pass double argument on stack
2042 }
2043 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
2044 break;
2045 case Bytecodes::_d2l:
2046 if (UseSSE >= 2) {
2047 __ push_d(xmm0);
2048 } else {
2049 __ push(rcx); // reserve space for argument
2050 __ push(rcx);
2051 __ fstp_d(at_rsp()); // pass double argument on stack
2052 }
2053 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
2054 break;
2055 case Bytecodes::_d2f:
2056 if (UseSSE <= 1) {
2057 __ push(rcx); // reserve space for f2ieee()
2058 __ f2ieee(); // truncate to float size
2059 __ pop(rcx); // adjust rsp
2060 if (UseSSE == 1) {
2061 // The cvtsd2ss instruction is not available if UseSSE==1, therefore
2062 // the conversion is performed using the FPU in this case.
2063 __ push_f();
2064 __ pop_f(xmm0);
2065 }
2066 } else { // UseSSE >= 2
2067 __ cvtsd2ss(xmm0, xmm0);
2068 }
2069 break;
2070 default :
2071 ShouldNotReachHere();
2072 }
2073#endif
2074}
2075
2076void TemplateTable::lcmp() {
2077 transition(ltos, itos);
2078#ifdef _LP64
2079 Label done;
2080 __ pop_l(rdx);
2081 __ cmpq(rdx, rax);
2082 __ movl(rax, -1);
2083 __ jccb(Assembler::less, done);
2084 __ setb(Assembler::notEqual, rax);
2085 __ movzbl(rax, rax);
2086 __ bind(done);
2087#else
2088
2089 // y = rdx:rax
2090 __ pop_l(rbx, rcx); // get x = rcx:rbx
2091 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
2092 __ mov(rax, rcx);
2093#endif
2094}
2095
2096void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2097 if ((is_float && UseSSE >= 1) ||
2098 (!is_float && UseSSE >= 2)) {
2099 Label done;
2100 if (is_float) {
2101 // XXX get rid of pop here, use ... reg, mem32
2102 __ pop_f(xmm1);
2103 __ ucomiss(xmm1, xmm0);
2104 } else {
2105 // XXX get rid of pop here, use ... reg, mem64
2106 __ pop_d(xmm1);
2107 __ ucomisd(xmm1, xmm0);
2108 }
2109 if (unordered_result < 0) {
2110 __ movl(rax, -1);
2111 __ jccb(Assembler::parity, done);
2112 __ jccb(Assembler::below, done);
2113 __ setb(Assembler::notEqual, rdx);
2114 __ movzbl(rax, rdx);
2115 } else {
2116 __ movl(rax, 1);
2117 __ jccb(Assembler::parity, done);
2118 __ jccb(Assembler::above, done);
2119 __ movl(rax, 0);
2120 __ jccb(Assembler::equal, done);
2121 __ decrementl(rax);
2122 }
2123 __ bind(done);
2124 } else {
2125#ifdef _LP64
2126 ShouldNotReachHere();
2127#else
2128 if (is_float) {
2129 __ fld_s(at_rsp());
2130 } else {
2131 __ fld_d(at_rsp());
2132 __ pop(rdx);
2133 }
2134 __ pop(rcx);
2135 __ fcmp2int(rax, unordered_result < 0);
2136#endif // _LP64
2137 }
2138}
2139
2140void TemplateTable::branch(bool is_jsr, bool is_wide) {
2141 __ get_method(rcx); // rcx holds method
2142 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
2143 // holds bumped taken count
2144
2145 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2146 InvocationCounter::counter_offset();
2147 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2148 InvocationCounter::counter_offset();
2149
2150 // Load up edx with the branch displacement
2151 if (is_wide) {
2152 __ movl(rdx, at_bcp(1));
2153 } else {
2154 __ load_signed_short(rdx, at_bcp(1));
2155 }
2156 __ bswapl(rdx);
2157
2158 if (!is_wide) {
2159 __ sarl(rdx, 16);
2160 }
2161 LP64_ONLY(__ movl2ptr(rdx, rdx));
2162
2163 // Handle all the JSR stuff here, then exit.
2164 // It's much shorter and cleaner than intermingling with the non-JSR
2165 // normal-branch stuff occurring below.
2166 if (is_jsr) {
2167 // Pre-load the next target bytecode into rbx
2168 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0));
2169
2170 // compute return address as bci in rax
2171 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
2172 in_bytes(ConstMethod::codes_offset())));
2173 __ subptr(rax, Address(rcx, Method::const_offset()));
2174 // Adjust the bcp in r13 by the displacement in rdx
2175 __ addptr(rbcp, rdx);
2176 // jsr returns atos that is not an oop
2177 __ push_i(rax);
2178 __ dispatch_only(vtos, true);
2179 return;
2180 }
2181
2182 // Normal (non-jsr) branch handling
2183
2184 // Adjust the bcp in r13 by the displacement in rdx
2185 __ addptr(rbcp, rdx);
2186
2187 assert(UseLoopCounter || !UseOnStackReplacement,
2188 "on-stack-replacement requires loop counters");
2189 Label backedge_counter_overflow;
2190 Label profile_method;
2191 Label dispatch;
2192 if (UseLoopCounter) {
2193 // increment backedge counter for backward branches
2194 // rax: MDO
2195 // rbx: MDO bumped taken-count
2196 // rcx: method
2197 // rdx: target offset
2198 // r13: target bcp
2199 // r14: locals pointer
2200 __ testl(rdx, rdx); // check if forward or backward branch
2201 __ jcc(Assembler::positive, dispatch); // count only if backward branch
2202
2203 // check if MethodCounters exists
2204 Label has_counters;
2205 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2206 __ testptr(rax, rax);
2207 __ jcc(Assembler::notZero, has_counters);
2208 __ push(rdx);
2209 __ push(rcx);
2210 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
2211 rcx);
2212 __ pop(rcx);
2213 __ pop(rdx);
2214 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2215 __ testptr(rax, rax);
2216 __ jcc(Assembler::zero, dispatch);
2217 __ bind(has_counters);
2218
2219 if (TieredCompilation) {
2220 Label no_mdo;
2221 int increment = InvocationCounter::count_increment;
2222 if (ProfileInterpreter) {
2223 // Are we profiling?
2224 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
2225 __ testptr(rbx, rbx);
2226 __ jccb(Assembler::zero, no_mdo);
2227 // Increment the MDO backedge counter
2228 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
2229 in_bytes(InvocationCounter::counter_offset()));
2230 const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
2231 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, rax, false, Assembler::zero,
2232 UseOnStackReplacement ? &backedge_counter_overflow : NULL);
2233 __ jmp(dispatch);
2234 }
2235 __ bind(no_mdo);
2236 // Increment backedge counter in MethodCounters*
2237 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2238 const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
2239 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
2240 rax, false, Assembler::zero,
2241 UseOnStackReplacement ? &backedge_counter_overflow : NULL);
2242 } else { // not TieredCompilation
2243 // increment counter
2244 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2245 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
2246 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
2247 __ movl(Address(rcx, be_offset), rax); // store counter
2248
2249 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
2250
2251 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
2252 __ addl(rax, Address(rcx, be_offset)); // add both counters
2253
2254 if (ProfileInterpreter) {
2255 // Test to see if we should create a method data oop
2256 __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
2257 __ jcc(Assembler::less, dispatch);
2258
2259 // if no method data exists, go to profile method
2260 __ test_method_data_pointer(rax, profile_method);
2261
2262 if (UseOnStackReplacement) {
2263 // check for overflow against rbx which is the MDO taken count
2264 __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2265 __ jcc(Assembler::below, dispatch);
2266
2267 // When ProfileInterpreter is on, the backedge_count comes
2268 // from the MethodData*, which value does not get reset on
2269 // the call to frequency_counter_overflow(). To avoid
2270 // excessive calls to the overflow routine while the method is
2271 // being compiled, add a second test to make sure the overflow
2272 // function is called only once every overflow_frequency.
2273 const int overflow_frequency = 1024;
2274 __ andl(rbx, overflow_frequency - 1);
2275 __ jcc(Assembler::zero, backedge_counter_overflow);
2276
2277 }
2278 } else {
2279 if (UseOnStackReplacement) {
2280 // check for overflow against rax, which is the sum of the
2281 // counters
2282 __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2283 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
2284
2285 }
2286 }
2287 }
2288 __ bind(dispatch);
2289 }
2290
2291 // Pre-load the next target bytecode into rbx
2292 __ load_unsigned_byte(rbx, Address(rbcp, 0));
2293
2294 // continue with the bytecode @ target
2295 // rax: return bci for jsr's, unused otherwise
2296 // rbx: target bytecode
2297 // r13: target bcp
2298 __ dispatch_only(vtos, true);
2299
2300 if (UseLoopCounter) {
2301 if (ProfileInterpreter) {
2302 // Out-of-line code to allocate method data oop.
2303 __ bind(profile_method);
2304 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2305 __ set_method_data_pointer_for_bcp();
2306 __ jmp(dispatch);
2307 }
2308
2309 if (UseOnStackReplacement) {
2310 // invocation counter overflow
2311 __ bind(backedge_counter_overflow);
2312 __ negptr(rdx);
2313 __ addptr(rdx, rbcp); // branch bcp
2314 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
2315 __ call_VM(noreg,
2316 CAST_FROM_FN_PTR(address,
2317 InterpreterRuntime::frequency_counter_overflow),
2318 rdx);
2319
2320 // rax: osr nmethod (osr ok) or NULL (osr not possible)
2321 // rdx: scratch
2322 // r14: locals pointer
2323 // r13: bcp
2324 __ testptr(rax, rax); // test result
2325 __ jcc(Assembler::zero, dispatch); // no osr if null
2326 // nmethod may have been invalidated (VM may block upon call_VM return)
2327 __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
2328 __ jcc(Assembler::notEqual, dispatch);
2329
2330 // We have the address of an on stack replacement routine in rax.
2331 // In preparation of invoking it, first we must migrate the locals
2332 // and monitors from off the interpreter frame on the stack.
2333 // Ensure to save the osr nmethod over the migration call,
2334 // it will be preserved in rbx.
2335 __ mov(rbx, rax);
2336
2337 NOT_LP64(__ get_thread(rcx));
2338
2339 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2340
2341 // rax is OSR buffer, move it to expected parameter location
2342 LP64_ONLY(__ mov(j_rarg0, rax));
2343 NOT_LP64(__ mov(rcx, rax));
2344 // We use j_rarg definitions here so that registers don't conflict as parameter
2345 // registers change across platforms as we are in the midst of a calling
2346 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
2347
2348 const Register retaddr = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
2349 const Register sender_sp = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
2350
2351 // pop the interpreter frame
2352 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
2353 __ leave(); // remove frame anchor
2354 __ pop(retaddr); // get return address
2355 __ mov(rsp, sender_sp); // set sp to sender sp
2356 // Ensure compiled code always sees stack at proper alignment
2357 __ andptr(rsp, -(StackAlignmentInBytes));
2358
2359 // unlike x86 we need no specialized return from compiled code
2360 // to the interpreter or the call stub.
2361
2362 // push the return address
2363 __ push(retaddr);
2364
2365 // and begin the OSR nmethod
2366 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
2367 }
2368 }
2369}
2370
2371void TemplateTable::if_0cmp(Condition cc) {
2372 transition(itos, vtos);
2373 // assume branch is more often taken than not (loops use backward branches)
2374 Label not_taken;
2375 __ testl(rax, rax);
2376 __ jcc(j_not(cc), not_taken);
2377 branch(false, false);
2378 __ bind(not_taken);
2379 __ profile_not_taken_branch(rax);
2380}
2381
2382void TemplateTable::if_icmp(Condition cc) {
2383 transition(itos, vtos);
2384 // assume branch is more often taken than not (loops use backward branches)
2385 Label not_taken;
2386 __ pop_i(rdx);
2387 __ cmpl(rdx, rax);
2388 __ jcc(j_not(cc), not_taken);
2389 branch(false, false);
2390 __ bind(not_taken);
2391 __ profile_not_taken_branch(rax);
2392}
2393
2394void TemplateTable::if_nullcmp(Condition cc) {
2395 transition(atos, vtos);
2396 // assume branch is more often taken than not (loops use backward branches)
2397 Label not_taken;
2398 __ testptr(rax, rax);
2399 __ jcc(j_not(cc), not_taken);
2400 branch(false, false);
2401 __ bind(not_taken);
2402 __ profile_not_taken_branch(rax);
2403}
2404
2405void TemplateTable::if_acmp(Condition cc) {
2406 transition(atos, vtos);
2407 // assume branch is more often taken than not (loops use backward branches)
2408 Label not_taken;
2409 __ pop_ptr(rdx);
2410 __ cmpoop(rdx, rax);
2411 __ jcc(j_not(cc), not_taken);
2412 branch(false, false);
2413 __ bind(not_taken);
2414 __ profile_not_taken_branch(rax);
2415}
2416
2417void TemplateTable::ret() {
2418 transition(vtos, vtos);
2419 locals_index(rbx);
2420 LP64_ONLY(__ movslq(rbx, iaddress(rbx))); // get return bci, compute return bcp
2421 NOT_LP64(__ movptr(rbx, iaddress(rbx)));
2422 __ profile_ret(rbx, rcx);
2423 __ get_method(rax);
2424 __ movptr(rbcp, Address(rax, Method::const_offset()));
2425 __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
2426 ConstMethod::codes_offset()));
2427 __ dispatch_next(vtos, 0, true);
2428}
2429
2430void TemplateTable::wide_ret() {
2431 transition(vtos, vtos);
2432 locals_index_wide(rbx);
2433 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
2434 __ profile_ret(rbx, rcx);
2435 __ get_method(rax);
2436 __ movptr(rbcp, Address(rax, Method::const_offset()));
2437 __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
2438 __ dispatch_next(vtos, 0, true);
2439}
2440
2441void TemplateTable::tableswitch() {
2442 Label default_case, continue_execution;
2443 transition(itos, vtos);
2444
2445 // align r13/rsi
2446 __ lea(rbx, at_bcp(BytesPerInt));
2447 __ andptr(rbx, -BytesPerInt);
2448 // load lo & hi
2449 __ movl(rcx, Address(rbx, BytesPerInt));
2450 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
2451 __ bswapl(rcx);
2452 __ bswapl(rdx);
2453 // check against lo & hi
2454 __ cmpl(rax, rcx);
2455 __ jcc(Assembler::less, default_case);
2456 __ cmpl(rax, rdx);
2457 __ jcc(Assembler::greater, default_case);
2458 // lookup dispatch offset
2459 __ subl(rax, rcx);
2460 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
2461 __ profile_switch_case(rax, rbx, rcx);
2462 // continue execution
2463 __ bind(continue_execution);
2464 __ bswapl(rdx);
2465 LP64_ONLY(__ movl2ptr(rdx, rdx));
2466 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2467 __ addptr(rbcp, rdx);
2468 __ dispatch_only(vtos, true);
2469 // handle default
2470 __ bind(default_case);
2471 __ profile_switch_default(rax);
2472 __ movl(rdx, Address(rbx, 0));
2473 __ jmp(continue_execution);
2474}
2475
2476void TemplateTable::lookupswitch() {
2477 transition(itos, itos);
2478 __ stop("lookupswitch bytecode should have been rewritten");
2479}
2480
2481void TemplateTable::fast_linearswitch() {
2482 transition(itos, vtos);
2483 Label loop_entry, loop, found, continue_execution;
2484 // bswap rax so we can avoid bswapping the table entries
2485 __ bswapl(rax);
2486 // align r13
2487 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2488 // this instruction (change offsets
2489 // below)
2490 __ andptr(rbx, -BytesPerInt);
2491 // set counter
2492 __ movl(rcx, Address(rbx, BytesPerInt));
2493 __ bswapl(rcx);
2494 __ jmpb(loop_entry);
2495 // table search
2496 __ bind(loop);
2497 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
2498 __ jcc(Assembler::equal, found);
2499 __ bind(loop_entry);
2500 __ decrementl(rcx);
2501 __ jcc(Assembler::greaterEqual, loop);
2502 // default case
2503 __ profile_switch_default(rax);
2504 __ movl(rdx, Address(rbx, 0));
2505 __ jmp(continue_execution);
2506 // entry found -> get offset
2507 __ bind(found);
2508 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
2509 __ profile_switch_case(rcx, rax, rbx);
2510 // continue execution
2511 __ bind(continue_execution);
2512 __ bswapl(rdx);
2513 __ movl2ptr(rdx, rdx);
2514 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2515 __ addptr(rbcp, rdx);
2516 __ dispatch_only(vtos, true);
2517}
2518
2519void TemplateTable::fast_binaryswitch() {
2520 transition(itos, vtos);
2521 // Implementation using the following core algorithm:
2522 //
2523 // int binary_search(int key, LookupswitchPair* array, int n) {
2524 // // Binary search according to "Methodik des Programmierens" by
2525 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2526 // int i = 0;
2527 // int j = n;
2528 // while (i+1 < j) {
2529 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2530 // // with Q: for all i: 0 <= i < n: key < a[i]
2531 // // where a stands for the array and assuming that the (inexisting)
2532 // // element a[n] is infinitely big.
2533 // int h = (i + j) >> 1;
2534 // // i < h < j
2535 // if (key < array[h].fast_match()) {
2536 // j = h;
2537 // } else {
2538 // i = h;
2539 // }
2540 // }
2541 // // R: a[i] <= key < a[i+1] or Q
2542 // // (i.e., if key is within array, i is the correct index)
2543 // return i;
2544 // }
2545
2546 // Register allocation
2547 const Register key = rax; // already set (tosca)
2548 const Register array = rbx;
2549 const Register i = rcx;
2550 const Register j = rdx;
2551 const Register h = rdi;
2552 const Register temp = rsi;
2553
2554 // Find array start
2555 NOT_LP64(__ save_bcp());
2556
2557 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2558 // get rid of this
2559 // instruction (change
2560 // offsets below)
2561 __ andptr(array, -BytesPerInt);
2562
2563 // Initialize i & j
2564 __ xorl(i, i); // i = 0;
2565 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
2566
2567 // Convert j into native byteordering
2568 __ bswapl(j);
2569
2570 // And start
2571 Label entry;
2572 __ jmp(entry);
2573
2574 // binary search loop
2575 {
2576 Label loop;
2577 __ bind(loop);
2578 // int h = (i + j) >> 1;
2579 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2580 __ sarl(h, 1); // h = (i + j) >> 1;
2581 // if (key < array[h].fast_match()) {
2582 // j = h;
2583 // } else {
2584 // i = h;
2585 // }
2586 // Convert array[h].match to native byte-ordering before compare
2587 __ movl(temp, Address(array, h, Address::times_8));
2588 __ bswapl(temp);
2589 __ cmpl(key, temp);
2590 // j = h if (key < array[h].fast_match())
2591 __ cmov32(Assembler::less, j, h);
2592 // i = h if (key >= array[h].fast_match())
2593 __ cmov32(Assembler::greaterEqual, i, h);
2594 // while (i+1 < j)
2595 __ bind(entry);
2596 __ leal(h, Address(i, 1)); // i+1
2597 __ cmpl(h, j); // i+1 < j
2598 __ jcc(Assembler::less, loop);
2599 }
2600
2601 // end of binary search, result index is i (must check again!)
2602 Label default_case;
2603 // Convert array[i].match to native byte-ordering before compare
2604 __ movl(temp, Address(array, i, Address::times_8));
2605 __ bswapl(temp);
2606 __ cmpl(key, temp);
2607 __ jcc(Assembler::notEqual, default_case);
2608
2609 // entry found -> j = offset
2610 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2611 __ profile_switch_case(i, key, array);
2612 __ bswapl(j);
2613 LP64_ONLY(__ movslq(j, j));
2614
2615 NOT_LP64(__ restore_bcp());
2616 NOT_LP64(__ restore_locals()); // restore rdi
2617
2618 __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2619 __ addptr(rbcp, j);
2620 __ dispatch_only(vtos, true);
2621
2622 // default case -> j = default offset
2623 __ bind(default_case);
2624 __ profile_switch_default(i);
2625 __ movl(j, Address(array, -2 * BytesPerInt));
2626 __ bswapl(j);
2627 LP64_ONLY(__ movslq(j, j));
2628
2629 NOT_LP64(__ restore_bcp());
2630 NOT_LP64(__ restore_locals());
2631
2632 __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2633 __ addptr(rbcp, j);
2634 __ dispatch_only(vtos, true);
2635}
2636
2637void TemplateTable::_return(TosState state) {
2638 transition(state, state);
2639
2640 assert(_desc->calls_vm(),
2641 "inconsistent calls_vm information"); // call in remove_activation
2642
2643 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2644 assert(state == vtos, "only valid state");
2645 Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax);
2646 __ movptr(robj, aaddress(0));
2647 __ load_klass(rdi, robj);
2648 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2649 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2650 Label skip_register_finalizer;
2651 __ jcc(Assembler::zero, skip_register_finalizer);
2652
2653 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj);
2654
2655 __ bind(skip_register_finalizer);
2656 }
2657
2658 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) {
2659 Label no_safepoint;
2660 NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
2661#ifdef _LP64
2662 __ testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
2663#else
2664 const Register thread = rdi;
2665 __ get_thread(thread);
2666 __ testb(Address(thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
2667#endif
2668 __ jcc(Assembler::zero, no_safepoint);
2669 __ push(state);
2670 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2671 InterpreterRuntime::at_safepoint));
2672 __ pop(state);
2673 __ bind(no_safepoint);
2674 }
2675
2676 // Narrow result if state is itos but result type is smaller.
2677 // Need to narrow in the return bytecode rather than in generate_return_entry
2678 // since compiled code callers expect the result to already be narrowed.
2679 if (state == itos) {
2680 __ narrow(rax);
2681 }
2682 __ remove_activation(state, rbcp);
2683
2684 __ jmp(rbcp);
2685}
2686
2687// ----------------------------------------------------------------------------
2688// Volatile variables demand their effects be made known to all CPU's
2689// in order. Store buffers on most chips allow reads & writes to
2690// reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2691// without some kind of memory barrier (i.e., it's not sufficient that
2692// the interpreter does not reorder volatile references, the hardware
2693// also must not reorder them).
2694//
2695// According to the new Java Memory Model (JMM):
2696// (1) All volatiles are serialized wrt to each other. ALSO reads &
2697// writes act as aquire & release, so:
2698// (2) A read cannot let unrelated NON-volatile memory refs that
2699// happen after the read float up to before the read. It's OK for
2700// non-volatile memory refs that happen before the volatile read to
2701// float down below it.
2702// (3) Similar a volatile write cannot let unrelated NON-volatile
2703// memory refs that happen BEFORE the write float down to after the
2704// write. It's OK for non-volatile memory refs that happen after the
2705// volatile write to float up before it.
2706//
2707// We only put in barriers around volatile refs (they are expensive),
2708// not _between_ memory refs (that would require us to track the
2709// flavor of the previous memory refs). Requirements (2) and (3)
2710// require some barriers before volatile stores and after volatile
2711// loads. These nearly cover requirement (1) but miss the
2712// volatile-store-volatile-load case. This final case is placed after
2713// volatile-stores although it could just as well go before
2714// volatile-loads.
2715
2716void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2717 // Helper function to insert a is-volatile test and memory barrier
2718 __ membar(order_constraint);
2719}
2720
2721void TemplateTable::resolve_cache_and_index(int byte_no,
2722 Register cache,
2723 Register index,
2724 size_t index_size) {
2725 const Register temp = rbx;
2726 assert_different_registers(cache, index, temp);
2727
2728 Label L_clinit_barrier_slow;
2729 Label resolved;
2730
2731 Bytecodes::Code code = bytecode();
2732 switch (code) {
2733 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2734 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2735 default: break;
2736 }
2737
2738 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2739 __ get_cache_and_index_and_bytecode_at_bcp(cache, index, temp, byte_no, 1, index_size);
2740 __ cmpl(temp, code); // have we resolved this bytecode?
2741 __ jcc(Assembler::equal, resolved);
2742
2743 // resolve first time through
2744 // Class initialization barrier slow path lands here as well.
2745 __ bind(L_clinit_barrier_slow);
2746 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2747 __ movl(temp, code);
2748 __ call_VM(noreg, entry, temp);
2749 // Update registers with resolved info
2750 __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
2751
2752 __ bind(resolved);
2753
2754 // Class initialization barrier for static methods
2755 if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2756 const Register method = temp;
2757 const Register klass = temp;
2758 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2759 assert(thread != noreg, "x86_32 not supported");
2760
2761 __ load_resolved_method_at_index(byte_no, method, cache, index);
2762 __ load_method_holder(klass, method);
2763 __ clinit_barrier(klass, thread, NULL /*L_fast_path*/, &L_clinit_barrier_slow);
2764 }
2765}
2766
2767// The cache and index registers must be set before call
2768void TemplateTable::load_field_cp_cache_entry(Register obj,
2769 Register cache,
2770 Register index,
2771 Register off,
2772 Register flags,
2773 bool is_static = false) {
2774 assert_different_registers(cache, index, flags, off);
2775
2776 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2777 // Field offset
2778 __ movptr(off, Address(cache, index, Address::times_ptr,
2779 in_bytes(cp_base_offset +
2780 ConstantPoolCacheEntry::f2_offset())));
2781 // Flags
2782 __ movl(flags, Address(cache, index, Address::times_ptr,
2783 in_bytes(cp_base_offset +
2784 ConstantPoolCacheEntry::flags_offset())));
2785
2786 // klass overwrite register
2787 if (is_static) {
2788 __ movptr(obj, Address(cache, index, Address::times_ptr,
2789 in_bytes(cp_base_offset +
2790 ConstantPoolCacheEntry::f1_offset())));
2791 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2792 __ movptr(obj, Address(obj, mirror_offset));
2793 __ resolve_oop_handle(obj);
2794 }
2795}
2796
2797void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2798 Register method,
2799 Register itable_index,
2800 Register flags,
2801 bool is_invokevirtual,
2802 bool is_invokevfinal, /*unused*/
2803 bool is_invokedynamic) {
2804 // setup registers
2805 const Register cache = rcx;
2806 const Register index = rdx;
2807 assert_different_registers(method, flags);
2808 assert_different_registers(method, cache, index);
2809 assert_different_registers(itable_index, flags);
2810 assert_different_registers(itable_index, cache, index);
2811 // determine constant pool cache field offsets
2812 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2813 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2814 ConstantPoolCacheEntry::flags_offset());
2815 // access constant pool cache fields
2816 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2817 ConstantPoolCacheEntry::f2_offset());
2818
2819 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2820 resolve_cache_and_index(byte_no, cache, index, index_size);
2821 __ load_resolved_method_at_index(byte_no, method, cache, index);
2822
2823 if (itable_index != noreg) {
2824 // pick up itable or appendix index from f2 also:
2825 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2826 }
2827 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2828}
2829
2830// The registers cache and index expected to be set before call.
2831// Correct values of the cache and index registers are preserved.
2832void TemplateTable::jvmti_post_field_access(Register cache,
2833 Register index,
2834 bool is_static,
2835 bool has_tos) {
2836 if (JvmtiExport::can_post_field_access()) {
2837 // Check to see if a field access watch has been set before we take
2838 // the time to call into the VM.
2839 Label L1;
2840 assert_different_registers(cache, index, rax);
2841 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2842 __ testl(rax,rax);
2843 __ jcc(Assembler::zero, L1);
2844
2845 // cache entry pointer
2846 __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2847 __ shll(index, LogBytesPerWord);
2848 __ addptr(cache, index);
2849 if (is_static) {
2850 __ xorptr(rax, rax); // NULL object reference
2851 } else {
2852 __ pop(atos); // Get the object
2853 __ verify_oop(rax);
2854 __ push(atos); // Restore stack state
2855 }
2856 // rax,: object pointer or NULL
2857 // cache: cache entry pointer
2858 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2859 rax, cache);
2860 __ get_cache_and_index_at_bcp(cache, index, 1);
2861 __ bind(L1);
2862 }
2863}
2864
2865void TemplateTable::pop_and_check_object(Register r) {
2866 __ pop_ptr(r);
2867 __ null_check(r); // for field access must check obj.
2868 __ verify_oop(r);
2869}
2870
2871void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2872 transition(vtos, vtos);
2873
2874 const Register cache = rcx;
2875 const Register index = rdx;
2876 const Register obj = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
2877 const Register off = rbx;
2878 const Register flags = rax;
2879 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them
2880
2881 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2882 jvmti_post_field_access(cache, index, is_static, false);
2883 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2884
2885 if (!is_static) pop_and_check_object(obj);
2886
2887 const Address field(obj, off, Address::times_1, 0*wordSize);
2888
2889 Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj;
2890
2891 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2892 // Make sure we don't need to mask edx after the above shift
2893 assert(btos == 0, "change code, btos != 0");
2894
2895 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2896
2897 __ jcc(Assembler::notZero, notByte);
2898 // btos
2899 __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
2900 __ push(btos);
2901 // Rewrite bytecode to be faster
2902 if (!is_static && rc == may_rewrite) {
2903 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2904 }
2905 __ jmp(Done);
2906
2907 __ bind(notByte);
2908 __ cmpl(flags, ztos);
2909 __ jcc(Assembler::notEqual, notBool);
2910
2911 // ztos (same code as btos)
2912 __ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg, noreg);
2913 __ push(ztos);
2914 // Rewrite bytecode to be faster
2915 if (!is_static && rc == may_rewrite) {
2916 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2917 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2918 }
2919 __ jmp(Done);
2920
2921 __ bind(notBool);
2922 __ cmpl(flags, atos);
2923 __ jcc(Assembler::notEqual, notObj);
2924 // atos
2925 do_oop_load(_masm, field, rax);
2926 __ push(atos);
2927 if (!is_static && rc == may_rewrite) {
2928 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2929 }
2930 __ jmp(Done);
2931
2932 __ bind(notObj);
2933 __ cmpl(flags, itos);
2934 __ jcc(Assembler::notEqual, notInt);
2935 // itos
2936 __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
2937 __ push(itos);
2938 // Rewrite bytecode to be faster
2939 if (!is_static && rc == may_rewrite) {
2940 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2941 }
2942 __ jmp(Done);
2943
2944 __ bind(notInt);
2945 __ cmpl(flags, ctos);
2946 __ jcc(Assembler::notEqual, notChar);
2947 // ctos
2948 __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
2949 __ push(ctos);
2950 // Rewrite bytecode to be faster
2951 if (!is_static && rc == may_rewrite) {
2952 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2953 }
2954 __ jmp(Done);
2955
2956 __ bind(notChar);
2957 __ cmpl(flags, stos);
2958 __ jcc(Assembler::notEqual, notShort);
2959 // stos
2960 __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
2961 __ push(stos);
2962 // Rewrite bytecode to be faster
2963 if (!is_static && rc == may_rewrite) {
2964 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2965 }
2966 __ jmp(Done);
2967
2968 __ bind(notShort);
2969 __ cmpl(flags, ltos);
2970 __ jcc(Assembler::notEqual, notLong);
2971 // ltos
2972 // Generate code as if volatile (x86_32). There just aren't enough registers to
2973 // save that information and this code is faster than the test.
2974 __ access_load_at(T_LONG, IN_HEAP | MO_RELAXED, noreg /* ltos */, field, noreg, noreg);
2975 __ push(ltos);
2976 // Rewrite bytecode to be faster
2977 LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
2978 __ jmp(Done);
2979
2980 __ bind(notLong);
2981 __ cmpl(flags, ftos);
2982 __ jcc(Assembler::notEqual, notFloat);
2983 // ftos
2984
2985 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2986 __ push(ftos);
2987 // Rewrite bytecode to be faster
2988 if (!is_static && rc == may_rewrite) {
2989 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2990 }
2991 __ jmp(Done);
2992
2993 __ bind(notFloat);
2994#ifdef ASSERT
2995 Label notDouble;
2996 __ cmpl(flags, dtos);
2997 __ jcc(Assembler::notEqual, notDouble);
2998#endif
2999 // dtos
3000 // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
3001 __ access_load_at(T_DOUBLE, IN_HEAP | MO_RELAXED, noreg /* dtos */, field, noreg, noreg);
3002 __ push(dtos);
3003 // Rewrite bytecode to be faster
3004 if (!is_static && rc == may_rewrite) {
3005 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
3006 }
3007#ifdef ASSERT
3008 __ jmp(Done);
3009
3010 __ bind(notDouble);
3011 __ stop("Bad state");
3012#endif
3013
3014 __ bind(Done);
3015 // [jk] not needed currently
3016 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
3017 // Assembler::LoadStore));
3018}
3019
3020void TemplateTable::getfield(int byte_no) {
3021 getfield_or_static(byte_no, false);
3022}
3023
3024void TemplateTable::nofast_getfield(int byte_no) {
3025 getfield_or_static(byte_no, false, may_not_rewrite);
3026}
3027
3028void TemplateTable::getstatic(int byte_no) {
3029 getfield_or_static(byte_no, true);
3030}
3031
3032
3033// The registers cache and index expected to be set before call.
3034// The function may destroy various registers, just not the cache and index registers.
3035void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
3036
3037 const Register robj = LP64_ONLY(c_rarg2) NOT_LP64(rax);
3038 const Register RBX = LP64_ONLY(c_rarg1) NOT_LP64(rbx);
3039 const Register RCX = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3040 const Register RDX = LP64_ONLY(rscratch1) NOT_LP64(rdx);
3041
3042 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3043
3044 if (JvmtiExport::can_post_field_modification()) {
3045 // Check to see if a field modification watch has been set before
3046 // we take the time to call into the VM.
3047 Label L1;
3048 assert_different_registers(cache, index, rax);
3049 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3050 __ testl(rax, rax);
3051 __ jcc(Assembler::zero, L1);
3052
3053 __ get_cache_and_index_at_bcp(robj, RDX, 1);
3054
3055
3056 if (is_static) {
3057 // Life is simple. Null out the object pointer.
3058 __ xorl(RBX, RBX);
3059
3060 } else {
3061 // Life is harder. The stack holds the value on top, followed by
3062 // the object. We don't know the size of the value, though; it
3063 // could be one or two words depending on its type. As a result,
3064 // we must find the type to determine where the object is.
3065#ifndef _LP64
3066 Label two_word, valsize_known;
3067#endif
3068 __ movl(RCX, Address(robj, RDX,
3069 Address::times_ptr,
3070 in_bytes(cp_base_offset +
3071 ConstantPoolCacheEntry::flags_offset())));
3072 NOT_LP64(__ mov(rbx, rsp));
3073 __ shrl(RCX, ConstantPoolCacheEntry::tos_state_shift);
3074
3075 // Make sure we don't need to mask rcx after the above shift
3076 ConstantPoolCacheEntry::verify_tos_state_shift();
3077#ifdef _LP64
3078 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
3079 __ cmpl(c_rarg3, ltos);
3080 __ cmovptr(Assembler::equal,
3081 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
3082 __ cmpl(c_rarg3, dtos);
3083 __ cmovptr(Assembler::equal,
3084 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
3085#else
3086 __ cmpl(rcx, ltos);
3087 __ jccb(Assembler::equal, two_word);
3088 __ cmpl(rcx, dtos);
3089 __ jccb(Assembler::equal, two_word);
3090 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
3091 __ jmpb(valsize_known);
3092
3093 __ bind(two_word);
3094 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
3095
3096 __ bind(valsize_known);
3097 // setup object pointer
3098 __ movptr(rbx, Address(rbx, 0));
3099#endif
3100 }
3101 // cache entry pointer
3102 __ addptr(robj, in_bytes(cp_base_offset));
3103 __ shll(RDX, LogBytesPerWord);
3104 __ addptr(robj, RDX);
3105 // object (tos)
3106 __ mov(RCX, rsp);
3107 // c_rarg1: object pointer set up above (NULL if static)
3108 // c_rarg2: cache entry pointer
3109 // c_rarg3: jvalue object on the stack
3110 __ call_VM(noreg,
3111 CAST_FROM_FN_PTR(address,
3112 InterpreterRuntime::post_field_modification),
3113 RBX, robj, RCX);
3114 __ get_cache_and_index_at_bcp(cache, index, 1);
3115 __ bind(L1);
3116 }
3117}
3118
3119void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3120 transition(vtos, vtos);
3121
3122 const Register cache = rcx;
3123 const Register index = rdx;
3124 const Register obj = rcx;
3125 const Register off = rbx;
3126 const Register flags = rax;
3127
3128 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
3129 jvmti_post_field_mod(cache, index, is_static);
3130 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
3131
3132 // [jk] not needed currently
3133 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3134 // Assembler::StoreStore));
3135
3136 Label notVolatile, Done;
3137 __ movl(rdx, flags);
3138 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3139 __ andl(rdx, 0x1);
3140
3141 // Check for volatile store
3142 __ testl(rdx, rdx);
3143 __ jcc(Assembler::zero, notVolatile);
3144
3145 putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags);
3146 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3147 Assembler::StoreStore));
3148 __ jmp(Done);
3149 __ bind(notVolatile);
3150
3151 putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags);
3152
3153 __ bind(Done);
3154}
3155
3156void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, RewriteControl rc,
3157 Register obj, Register off, Register flags) {
3158
3159 // field addresses
3160 const Address field(obj, off, Address::times_1, 0*wordSize);
3161 NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);)
3162
3163 Label notByte, notBool, notInt, notShort, notChar,
3164 notLong, notFloat, notObj;
3165 Label Done;
3166
3167 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3168
3169 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3170
3171 assert(btos == 0, "change code, btos != 0");
3172 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
3173 __ jcc(Assembler::notZero, notByte);
3174
3175 // btos
3176 {
3177 __ pop(btos);
3178 if (!is_static) pop_and_check_object(obj);
3179 __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
3180 if (!is_static && rc == may_rewrite) {
3181 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
3182 }
3183 __ jmp(Done);
3184 }
3185
3186 __ bind(notByte);
3187 __ cmpl(flags, ztos);
3188 __ jcc(Assembler::notEqual, notBool);
3189
3190 // ztos
3191 {
3192 __ pop(ztos);
3193 if (!is_static) pop_and_check_object(obj);
3194 __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
3195 if (!is_static && rc == may_rewrite) {
3196 patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
3197 }
3198 __ jmp(Done);
3199 }
3200
3201 __ bind(notBool);
3202 __ cmpl(flags, atos);
3203 __ jcc(Assembler::notEqual, notObj);
3204
3205 // atos
3206 {
3207 __ pop(atos);
3208 if (!is_static) pop_and_check_object(obj);
3209 // Store into the field
3210 do_oop_store(_masm, field, rax);
3211 if (!is_static && rc == may_rewrite) {
3212 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3213 }
3214 __ jmp(Done);
3215 }
3216
3217 __ bind(notObj);
3218 __ cmpl(flags, itos);
3219 __ jcc(Assembler::notEqual, notInt);
3220
3221 // itos
3222 {
3223 __ pop(itos);
3224 if (!is_static) pop_and_check_object(obj);
3225 __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
3226 if (!is_static && rc == may_rewrite) {
3227 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
3228 }
3229 __ jmp(Done);
3230 }
3231
3232 __ bind(notInt);
3233 __ cmpl(flags, ctos);
3234 __ jcc(Assembler::notEqual, notChar);
3235
3236 // ctos
3237 {
3238 __ pop(ctos);
3239 if (!is_static) pop_and_check_object(obj);
3240 __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
3241 if (!is_static && rc == may_rewrite) {
3242 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
3243 }
3244 __ jmp(Done);
3245 }
3246
3247 __ bind(notChar);
3248 __ cmpl(flags, stos);
3249 __ jcc(Assembler::notEqual, notShort);
3250
3251 // stos
3252 {
3253 __ pop(stos);
3254 if (!is_static) pop_and_check_object(obj);
3255 __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
3256 if (!is_static && rc == may_rewrite) {
3257 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
3258 }
3259 __ jmp(Done);
3260 }
3261
3262 __ bind(notShort);
3263 __ cmpl(flags, ltos);
3264 __ jcc(Assembler::notEqual, notLong);
3265
3266 // ltos
3267 {
3268 __ pop(ltos);
3269 if (!is_static) pop_and_check_object(obj);
3270 // MO_RELAXED: generate atomic store for the case of volatile field (important for x86_32)
3271 __ access_store_at(T_LONG, IN_HEAP | MO_RELAXED, field, noreg /* ltos*/, noreg, noreg);
3272#ifdef _LP64
3273 if (!is_static && rc == may_rewrite) {
3274 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
3275 }
3276#endif // _LP64
3277 __ jmp(Done);
3278 }
3279
3280 __ bind(notLong);
3281 __ cmpl(flags, ftos);
3282 __ jcc(Assembler::notEqual, notFloat);
3283
3284 // ftos
3285 {
3286 __ pop(ftos);
3287 if (!is_static) pop_and_check_object(obj);
3288 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
3289 if (!is_static && rc == may_rewrite) {
3290 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
3291 }
3292 __ jmp(Done);
3293 }
3294
3295 __ bind(notFloat);
3296#ifdef ASSERT
3297 Label notDouble;
3298 __ cmpl(flags, dtos);
3299 __ jcc(Assembler::notEqual, notDouble);
3300#endif
3301
3302 // dtos
3303 {
3304 __ pop(dtos);
3305 if (!is_static) pop_and_check_object(obj);
3306 // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
3307 __ access_store_at(T_DOUBLE, IN_HEAP | MO_RELAXED, field, noreg /* dtos */, noreg, noreg);
3308 if (!is_static && rc == may_rewrite) {
3309 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
3310 }
3311 }
3312
3313#ifdef ASSERT
3314 __ jmp(Done);
3315
3316 __ bind(notDouble);
3317 __ stop("Bad state");
3318#endif
3319
3320 __ bind(Done);
3321}
3322
3323void TemplateTable::putfield(int byte_no) {
3324 putfield_or_static(byte_no, false);
3325}
3326
3327void TemplateTable::nofast_putfield(int byte_no) {
3328 putfield_or_static(byte_no, false, may_not_rewrite);
3329}
3330
3331void TemplateTable::putstatic(int byte_no) {
3332 putfield_or_static(byte_no, true);
3333}
3334
3335void TemplateTable::jvmti_post_fast_field_mod() {
3336
3337 const Register scratch = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3338
3339 if (JvmtiExport::can_post_field_modification()) {
3340 // Check to see if a field modification watch has been set before
3341 // we take the time to call into the VM.
3342 Label L2;
3343 __ mov32(scratch, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3344 __ testl(scratch, scratch);
3345 __ jcc(Assembler::zero, L2);
3346 __ pop_ptr(rbx); // copy the object pointer from tos
3347 __ verify_oop(rbx);
3348 __ push_ptr(rbx); // put the object pointer back on tos
3349 // Save tos values before call_VM() clobbers them. Since we have
3350 // to do it for every data type, we use the saved values as the
3351 // jvalue object.
3352 switch (bytecode()) { // load values into the jvalue object
3353 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
3354 case Bytecodes::_fast_bputfield: // fall through
3355 case Bytecodes::_fast_zputfield: // fall through
3356 case Bytecodes::_fast_sputfield: // fall through
3357 case Bytecodes::_fast_cputfield: // fall through
3358 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
3359 case Bytecodes::_fast_dputfield: __ push(dtos); break;
3360 case Bytecodes::_fast_fputfield: __ push(ftos); break;
3361 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
3362
3363 default:
3364 ShouldNotReachHere();
3365 }
3366 __ mov(scratch, rsp); // points to jvalue on the stack
3367 // access constant pool cache entry
3368 LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1));
3369 NOT_LP64(__ get_cache_entry_pointer_at_bcp(rax, rdx, 1));
3370 __ verify_oop(rbx);
3371 // rbx: object pointer copied above
3372 // c_rarg2: cache entry pointer
3373 // c_rarg3: jvalue object on the stack
3374 LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3));
3375 NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx));
3376
3377 switch (bytecode()) { // restore tos values
3378 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
3379 case Bytecodes::_fast_bputfield: // fall through
3380 case Bytecodes::_fast_zputfield: // fall through
3381 case Bytecodes::_fast_sputfield: // fall through
3382 case Bytecodes::_fast_cputfield: // fall through
3383 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
3384 case Bytecodes::_fast_dputfield: __ pop(dtos); break;
3385 case Bytecodes::_fast_fputfield: __ pop(ftos); break;
3386 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
3387 default: break;
3388 }
3389 __ bind(L2);
3390 }
3391}
3392
3393void TemplateTable::fast_storefield(TosState state) {
3394 transition(state, vtos);
3395
3396 ByteSize base = ConstantPoolCache::base_offset();
3397
3398 jvmti_post_fast_field_mod();
3399
3400 // access constant pool cache
3401 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3402
3403 // test for volatile with rdx but rdx is tos register for lputfield.
3404 __ movl(rdx, Address(rcx, rbx, Address::times_ptr,
3405 in_bytes(base +
3406 ConstantPoolCacheEntry::flags_offset())));
3407
3408 // replace index with field offset from cache entry
3409 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3410 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
3411
3412 // [jk] not needed currently
3413 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3414 // Assembler::StoreStore));
3415
3416 Label notVolatile, Done;
3417 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3418 __ andl(rdx, 0x1);
3419
3420 // Get object from stack
3421 pop_and_check_object(rcx);
3422
3423 // field address
3424 const Address field(rcx, rbx, Address::times_1);
3425
3426 // Check for volatile store
3427 __ testl(rdx, rdx);
3428 __ jcc(Assembler::zero, notVolatile);
3429
3430 fast_storefield_helper(field, rax);
3431 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3432 Assembler::StoreStore));
3433 __ jmp(Done);
3434 __ bind(notVolatile);
3435
3436 fast_storefield_helper(field, rax);
3437
3438 __ bind(Done);
3439}
3440
3441void TemplateTable::fast_storefield_helper(Address field, Register rax) {
3442
3443 // access field
3444 switch (bytecode()) {
3445 case Bytecodes::_fast_aputfield:
3446 do_oop_store(_masm, field, rax);
3447 break;
3448 case Bytecodes::_fast_lputfield:
3449#ifdef _LP64
3450 __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg);
3451#else
3452 __ stop("should not be rewritten");
3453#endif
3454 break;
3455 case Bytecodes::_fast_iputfield:
3456 __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
3457 break;
3458 case Bytecodes::_fast_zputfield:
3459 __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
3460 break;
3461 case Bytecodes::_fast_bputfield:
3462 __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
3463 break;
3464 case Bytecodes::_fast_sputfield:
3465 __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
3466 break;
3467 case Bytecodes::_fast_cputfield:
3468 __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
3469 break;
3470 case Bytecodes::_fast_fputfield:
3471 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos*/, noreg, noreg);
3472 break;
3473 case Bytecodes::_fast_dputfield:
3474 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos*/, noreg, noreg);
3475 break;
3476 default:
3477 ShouldNotReachHere();
3478 }
3479}
3480
3481void TemplateTable::fast_accessfield(TosState state) {
3482 transition(atos, state);
3483
3484 // Do the JVMTI work here to avoid disturbing the register state below
3485 if (JvmtiExport::can_post_field_access()) {
3486 // Check to see if a field access watch has been set before we
3487 // take the time to call into the VM.
3488 Label L1;
3489 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3490 __ testl(rcx, rcx);
3491 __ jcc(Assembler::zero, L1);
3492 // access constant pool cache entry
3493 LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1));
3494 NOT_LP64(__ get_cache_entry_pointer_at_bcp(rcx, rdx, 1));
3495 __ verify_oop(rax);
3496 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
3497 LP64_ONLY(__ mov(c_rarg1, rax));
3498 // c_rarg1: object pointer copied above
3499 // c_rarg2: cache entry pointer
3500 LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2));
3501 NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx));
3502 __ pop_ptr(rax); // restore object pointer
3503 __ bind(L1);
3504 }
3505
3506 // access constant pool cache
3507 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3508 // replace index with field offset from cache entry
3509 // [jk] not needed currently
3510 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
3511 // in_bytes(ConstantPoolCache::base_offset() +
3512 // ConstantPoolCacheEntry::flags_offset())));
3513 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3514 // __ andl(rdx, 0x1);
3515 //
3516 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3517 in_bytes(ConstantPoolCache::base_offset() +
3518 ConstantPoolCacheEntry::f2_offset())));
3519
3520 // rax: object
3521 __ verify_oop(rax);
3522 __ null_check(rax);
3523 Address field(rax, rbx, Address::times_1);
3524
3525 // access field
3526 switch (bytecode()) {
3527 case Bytecodes::_fast_agetfield:
3528 do_oop_load(_masm, field, rax);
3529 __ verify_oop(rax);
3530 break;
3531 case Bytecodes::_fast_lgetfield:
3532#ifdef _LP64
3533 __ access_load_at(T_LONG, IN_HEAP, noreg /* ltos */, field, noreg, noreg);
3534#else
3535 __ stop("should not be rewritten");
3536#endif
3537 break;
3538 case Bytecodes::_fast_igetfield:
3539 __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3540 break;
3541 case Bytecodes::_fast_bgetfield:
3542 __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
3543 break;
3544 case Bytecodes::_fast_sgetfield:
3545 __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
3546 break;
3547 case Bytecodes::_fast_cgetfield:
3548 __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
3549 break;
3550 case Bytecodes::_fast_fgetfield:
3551 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3552 break;
3553 case Bytecodes::_fast_dgetfield:
3554 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3555 break;
3556 default:
3557 ShouldNotReachHere();
3558 }
3559 // [jk] not needed currently
3560 // Label notVolatile;
3561 // __ testl(rdx, rdx);
3562 // __ jcc(Assembler::zero, notVolatile);
3563 // __ membar(Assembler::LoadLoad);
3564 // __ bind(notVolatile);
3565}
3566
3567void TemplateTable::fast_xaccess(TosState state) {
3568 transition(vtos, state);
3569
3570 // get receiver
3571 __ movptr(rax, aaddress(0));
3572 // access constant pool cache
3573 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
3574 __ movptr(rbx,
3575 Address(rcx, rdx, Address::times_ptr,
3576 in_bytes(ConstantPoolCache::base_offset() +
3577 ConstantPoolCacheEntry::f2_offset())));
3578 // make sure exception is reported in correct bcp range (getfield is
3579 // next instruction)
3580 __ increment(rbcp);
3581 __ null_check(rax);
3582 const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
3583 switch (state) {
3584 case itos:
3585 __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3586 break;
3587 case atos:
3588 do_oop_load(_masm, field, rax);
3589 __ verify_oop(rax);
3590 break;
3591 case ftos:
3592 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3593 break;
3594 default:
3595 ShouldNotReachHere();
3596 }
3597
3598 // [jk] not needed currently
3599 // Label notVolatile;
3600 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
3601 // in_bytes(ConstantPoolCache::base_offset() +
3602 // ConstantPoolCacheEntry::flags_offset())));
3603 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3604 // __ testl(rdx, 0x1);
3605 // __ jcc(Assembler::zero, notVolatile);
3606 // __ membar(Assembler::LoadLoad);
3607 // __ bind(notVolatile);
3608
3609 __ decrement(rbcp);
3610}
3611
3612//-----------------------------------------------------------------------------
3613// Calls
3614
3615void TemplateTable::count_calls(Register method, Register temp) {
3616 // implemented elsewhere
3617 ShouldNotReachHere();
3618}
3619
3620void TemplateTable::prepare_invoke(int byte_no,
3621 Register method, // linked method (or i-klass)
3622 Register index, // itable index, MethodType, etc.
3623 Register recv, // if caller wants to see it
3624 Register flags // if caller wants to test it
3625 ) {
3626 // determine flags
3627 const Bytecodes::Code code = bytecode();
3628 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
3629 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
3630 const bool is_invokehandle = code == Bytecodes::_invokehandle;
3631 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
3632 const bool is_invokespecial = code == Bytecodes::_invokespecial;
3633 const bool load_receiver = (recv != noreg);
3634 const bool save_flags = (flags != noreg);
3635 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3636 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3637 assert(flags == noreg || flags == rdx, "");
3638 assert(recv == noreg || recv == rcx, "");
3639
3640 // setup registers & access constant pool cache
3641 if (recv == noreg) recv = rcx;
3642 if (flags == noreg) flags = rdx;
3643 assert_different_registers(method, index, recv, flags);
3644
3645 // save 'interpreter return address'
3646 __ save_bcp();
3647
3648 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3649
3650 // maybe push appendix to arguments (just before return address)
3651 if (is_invokedynamic || is_invokehandle) {
3652 Label L_no_push;
3653 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
3654 __ jcc(Assembler::zero, L_no_push);
3655 // Push the appendix as a trailing parameter.
3656 // This must be done before we get the receiver,
3657 // since the parameter_size includes it.
3658 __ push(rbx);
3659 __ mov(rbx, index);
3660 __ load_resolved_reference_at_index(index, rbx);
3661 __ pop(rbx);
3662 __ push(index); // push appendix (MethodType, CallSite, etc.)
3663 __ bind(L_no_push);
3664 }
3665
3666 // load receiver if needed (after appendix is pushed so parameter size is correct)
3667 // Note: no return address pushed yet
3668 if (load_receiver) {
3669 __ movl(recv, flags);
3670 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
3671 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
3672 const int receiver_is_at_end = -1; // back off one slot to get receiver
3673 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3674 __ movptr(recv, recv_addr);
3675 __ verify_oop(recv);
3676 }
3677
3678 if (save_flags) {
3679 __ movl(rbcp, flags);
3680 }
3681
3682 // compute return type
3683 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3684 // Make sure we don't need to mask flags after the above shift
3685 ConstantPoolCacheEntry::verify_tos_state_shift();
3686 // load return address
3687 {
3688 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3689 ExternalAddress table(table_addr);
3690 LP64_ONLY(__ lea(rscratch1, table));
3691 LP64_ONLY(__ movptr(flags, Address(rscratch1, flags, Address::times_ptr)));
3692 NOT_LP64(__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))));
3693 }
3694
3695 // push return address
3696 __ push(flags);
3697
3698 // Restore flags value from the constant pool cache, and restore rsi
3699 // for later null checks. r13 is the bytecode pointer
3700 if (save_flags) {
3701 __ movl(flags, rbcp);
3702 __ restore_bcp();
3703 }
3704}
3705
3706void TemplateTable::invokevirtual_helper(Register index,
3707 Register recv,
3708 Register flags) {
3709 // Uses temporary registers rax, rdx
3710 assert_different_registers(index, recv, rax, rdx);
3711 assert(index == rbx, "");
3712 assert(recv == rcx, "");
3713
3714 // Test for an invoke of a final method
3715 Label notFinal;
3716 __ movl(rax, flags);
3717 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3718 __ jcc(Assembler::zero, notFinal);
3719
3720 const Register method = index; // method must be rbx
3721 assert(method == rbx,
3722 "Method* must be rbx for interpreter calling convention");
3723
3724 // do the call - the index is actually the method to call
3725 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3726
3727 // It's final, need a null check here!
3728 __ null_check(recv);
3729
3730 // profile this call
3731 __ profile_final_call(rax);
3732 __ profile_arguments_type(rax, method, rbcp, true);
3733
3734 __ jump_from_interpreted(method, rax);
3735
3736 __ bind(notFinal);
3737
3738 // get receiver klass
3739 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3740 __ load_klass(rax, recv);
3741
3742 // profile this call
3743 __ profile_virtual_call(rax, rlocals, rdx);
3744 // get target Method* & entry point
3745 __ lookup_virtual_method(rax, index, method);
3746 __ profile_called_method(method, rdx, rbcp);
3747
3748 __ profile_arguments_type(rdx, method, rbcp, true);
3749 __ jump_from_interpreted(method, rdx);
3750}
3751
3752void TemplateTable::invokevirtual(int byte_no) {
3753 transition(vtos, vtos);
3754 assert(byte_no == f2_byte, "use this argument");
3755 prepare_invoke(byte_no,
3756 rbx, // method or vtable index
3757 noreg, // unused itable index
3758 rcx, rdx); // recv, flags
3759
3760 // rbx: index
3761 // rcx: receiver
3762 // rdx: flags
3763
3764 invokevirtual_helper(rbx, rcx, rdx);
3765}
3766
3767void TemplateTable::invokespecial(int byte_no) {
3768 transition(vtos, vtos);
3769 assert(byte_no == f1_byte, "use this argument");
3770 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
3771 rcx); // get receiver also for null check
3772 __ verify_oop(rcx);
3773 __ null_check(rcx);
3774 // do the call
3775 __ profile_call(rax);
3776 __ profile_arguments_type(rax, rbx, rbcp, false);
3777 __ jump_from_interpreted(rbx, rax);
3778}
3779
3780void TemplateTable::invokestatic(int byte_no) {
3781 transition(vtos, vtos);
3782 assert(byte_no == f1_byte, "use this argument");
3783 prepare_invoke(byte_no, rbx); // get f1 Method*
3784 // do the call
3785 __ profile_call(rax);
3786 __ profile_arguments_type(rax, rbx, rbcp, false);
3787 __ jump_from_interpreted(rbx, rax);
3788}
3789
3790
3791void TemplateTable::fast_invokevfinal(int byte_no) {
3792 transition(vtos, vtos);
3793 assert(byte_no == f2_byte, "use this argument");
3794 __ stop("fast_invokevfinal not used on x86");
3795}
3796
3797
3798void TemplateTable::invokeinterface(int byte_no) {
3799 transition(vtos, vtos);
3800 assert(byte_no == f1_byte, "use this argument");
3801 prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 Method*
3802 rcx, rdx); // recv, flags
3803
3804 // rax: reference klass (from f1) if interface method
3805 // rbx: method (from f2)
3806 // rcx: receiver
3807 // rdx: flags
3808
3809 // First check for Object case, then private interface method,
3810 // then regular interface method.
3811
3812 // Special case of invokeinterface called for virtual method of
3813 // java.lang.Object. See cpCache.cpp for details.
3814 Label notObjectMethod;
3815 __ movl(rlocals, rdx);
3816 __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3817 __ jcc(Assembler::zero, notObjectMethod);
3818 invokevirtual_helper(rbx, rcx, rdx);
3819 // no return from above
3820 __ bind(notObjectMethod);
3821
3822 Label no_such_interface; // for receiver subtype check
3823 Register recvKlass; // used for exception processing
3824
3825 // Check for private method invocation - indicated by vfinal
3826 Label notVFinal;
3827 __ movl(rlocals, rdx);
3828 __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3829 __ jcc(Assembler::zero, notVFinal);
3830
3831 // Get receiver klass into rlocals - also a null check
3832 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3833 __ load_klass(rlocals, rcx);
3834
3835 Label subtype;
3836 __ check_klass_subtype(rlocals, rax, rbcp, subtype);
3837 // If we get here the typecheck failed
3838 recvKlass = rdx;
3839 __ mov(recvKlass, rlocals); // shuffle receiver class for exception use
3840 __ jmp(no_such_interface);
3841
3842 __ bind(subtype);
3843
3844 // do the call - rbx is actually the method to call
3845
3846 __ profile_final_call(rdx);
3847 __ profile_arguments_type(rdx, rbx, rbcp, true);
3848
3849 __ jump_from_interpreted(rbx, rdx);
3850 // no return from above
3851 __ bind(notVFinal);
3852
3853 // Get receiver klass into rdx - also a null check
3854 __ restore_locals(); // restore r14
3855 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3856 __ load_klass(rdx, rcx);
3857
3858 Label no_such_method;
3859
3860 // Preserve method for throw_AbstractMethodErrorVerbose.
3861 __ mov(rcx, rbx);
3862 // Receiver subtype check against REFC.
3863 // Superklass in rax. Subklass in rdx. Blows rcx, rdi.
3864 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3865 rdx, rax, noreg,
3866 // outputs: scan temp. reg, scan temp. reg
3867 rbcp, rlocals,
3868 no_such_interface,
3869 /*return_method=*/false);
3870
3871 // profile this call
3872 __ restore_bcp(); // rbcp was destroyed by receiver type check
3873 __ profile_virtual_call(rdx, rbcp, rlocals);
3874
3875 // Get declaring interface class from method, and itable index
3876 __ load_method_holder(rax, rbx);
3877 __ movl(rbx, Address(rbx, Method::itable_index_offset()));
3878 __ subl(rbx, Method::itable_index_max);
3879 __ negl(rbx);
3880
3881 // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3882 __ mov(rlocals, rdx);
3883 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3884 rlocals, rax, rbx,
3885 // outputs: method, scan temp. reg
3886 rbx, rbcp,
3887 no_such_interface);
3888
3889 // rbx: Method* to call
3890 // rcx: receiver
3891 // Check for abstract method error
3892 // Note: This should be done more efficiently via a throw_abstract_method_error
3893 // interpreter entry point and a conditional jump to it in case of a null
3894 // method.
3895 __ testptr(rbx, rbx);
3896 __ jcc(Assembler::zero, no_such_method);
3897
3898 __ profile_called_method(rbx, rbcp, rdx);
3899 __ profile_arguments_type(rdx, rbx, rbcp, true);
3900
3901 // do the call
3902 // rcx: receiver
3903 // rbx,: Method*
3904 __ jump_from_interpreted(rbx, rdx);
3905 __ should_not_reach_here();
3906
3907 // exception handling code follows...
3908 // note: must restore interpreter registers to canonical
3909 // state for exception handling to work correctly!
3910
3911 __ bind(no_such_method);
3912 // throw exception
3913 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3914 __ restore_bcp(); // rbcp must be correct for exception handler (was destroyed)
3915 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3916 // Pass arguments for generating a verbose error message.
3917#ifdef _LP64
3918 recvKlass = c_rarg1;
3919 Register method = c_rarg2;
3920 if (recvKlass != rdx) { __ movq(recvKlass, rdx); }
3921 if (method != rcx) { __ movq(method, rcx); }
3922#else
3923 recvKlass = rdx;
3924 Register method = rcx;
3925#endif
3926 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
3927 recvKlass, method);
3928 // The call_VM checks for exception, so we should never return here.
3929 __ should_not_reach_here();
3930
3931 __ bind(no_such_interface);
3932 // throw exception
3933 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3934 __ restore_bcp(); // rbcp must be correct for exception handler (was destroyed)
3935 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3936 // Pass arguments for generating a verbose error message.
3937 LP64_ONLY( if (recvKlass != rdx) { __ movq(recvKlass, rdx); } )
3938 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
3939 recvKlass, rax);
3940 // the call_VM checks for exception, so we should never return here.
3941 __ should_not_reach_here();
3942}
3943
3944void TemplateTable::invokehandle(int byte_no) {
3945 transition(vtos, vtos);
3946 assert(byte_no == f1_byte, "use this argument");
3947 const Register rbx_method = rbx;
3948 const Register rax_mtype = rax;
3949 const Register rcx_recv = rcx;
3950 const Register rdx_flags = rdx;
3951
3952 prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3953 __ verify_method_ptr(rbx_method);
3954 __ verify_oop(rcx_recv);
3955 __ null_check(rcx_recv);
3956
3957 // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3958 // rbx: MH.invokeExact_MT method (from f2)
3959
3960 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
3961
3962 // FIXME: profile the LambdaForm also
3963 __ profile_final_call(rax);
3964 __ profile_arguments_type(rdx, rbx_method, rbcp, true);
3965
3966 __ jump_from_interpreted(rbx_method, rdx);
3967}
3968
3969void TemplateTable::invokedynamic(int byte_no) {
3970 transition(vtos, vtos);
3971 assert(byte_no == f1_byte, "use this argument");
3972
3973 const Register rbx_method = rbx;
3974 const Register rax_callsite = rax;
3975
3976 prepare_invoke(byte_no, rbx_method, rax_callsite);
3977
3978 // rax: CallSite object (from cpool->resolved_references[f1])
3979 // rbx: MH.linkToCallSite method (from f2)
3980
3981 // Note: rax_callsite is already pushed by prepare_invoke
3982
3983 // %%% should make a type profile for any invokedynamic that takes a ref argument
3984 // profile this call
3985 __ profile_call(rbcp);
3986 __ profile_arguments_type(rdx, rbx_method, rbcp, false);
3987
3988 __ verify_oop(rax_callsite);
3989
3990 __ jump_from_interpreted(rbx_method, rdx);
3991}
3992
3993//-----------------------------------------------------------------------------
3994// Allocation
3995
3996void TemplateTable::_new() {
3997 transition(vtos, atos);
3998 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3999 Label slow_case;
4000 Label slow_case_no_pop;
4001 Label done;
4002 Label initialize_header;
4003 Label initialize_object; // including clearing the fields
4004
4005 __ get_cpool_and_tags(rcx, rax);
4006
4007 // Make sure the class we're about to instantiate has been resolved.
4008 // This is done before loading InstanceKlass to be consistent with the order
4009 // how Constant Pool is updated (see ConstantPool::klass_at_put)
4010 const int tags_offset = Array<u1>::base_offset_in_bytes();
4011 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
4012 __ jcc(Assembler::notEqual, slow_case_no_pop);
4013
4014 // get InstanceKlass
4015 __ load_resolved_klass_at_index(rcx, rcx, rdx);
4016 __ push(rcx); // save the contexts of klass for initializing the header
4017
4018 // make sure klass is initialized & doesn't have finalizer
4019 // make sure klass is fully initialized
4020 __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
4021 __ jcc(Assembler::notEqual, slow_case);
4022
4023 // get instance_size in InstanceKlass (scaled to a count of bytes)
4024 __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
4025 // test to see if it has a finalizer or is malformed in some way
4026 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
4027 __ jcc(Assembler::notZero, slow_case);
4028
4029 // Allocate the instance:
4030 // If TLAB is enabled:
4031 // Try to allocate in the TLAB.
4032 // If fails, go to the slow path.
4033 // Else If inline contiguous allocations are enabled:
4034 // Try to allocate in eden.
4035 // If fails due to heap end, go to slow path.
4036 //
4037 // If TLAB is enabled OR inline contiguous is enabled:
4038 // Initialize the allocation.
4039 // Exit.
4040 //
4041 // Go to slow path.
4042
4043 const bool allow_shared_alloc =
4044 Universe::heap()->supports_inline_contig_alloc();
4045
4046 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
4047#ifndef _LP64
4048 if (UseTLAB || allow_shared_alloc) {
4049 __ get_thread(thread);
4050 }
4051#endif // _LP64
4052
4053 if (UseTLAB) {
4054 __ tlab_allocate(thread, rax, rdx, 0, rcx, rbx, slow_case);
4055 if (ZeroTLAB) {
4056 // the fields have been already cleared
4057 __ jmp(initialize_header);
4058 } else {
4059 // initialize both the header and fields
4060 __ jmp(initialize_object);
4061 }
4062 } else {
4063 // Allocation in the shared Eden, if allowed.
4064 //
4065 // rdx: instance size in bytes
4066 __ eden_allocate(thread, rax, rdx, 0, rbx, slow_case);
4067 }
4068
4069 // If UseTLAB or allow_shared_alloc are true, the object is created above and
4070 // there is an initialize need. Otherwise, skip and go to the slow path.
4071 if (UseTLAB || allow_shared_alloc) {
4072 // The object is initialized before the header. If the object size is
4073 // zero, go directly to the header initialization.
4074 __ bind(initialize_object);
4075 __ decrement(rdx, sizeof(oopDesc));
4076 __ jcc(Assembler::zero, initialize_header);
4077
4078 // Initialize topmost object field, divide rdx by 8, check if odd and
4079 // test if zero.
4080 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
4081 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
4082
4083 // rdx must have been multiple of 8
4084#ifdef ASSERT
4085 // make sure rdx was multiple of 8
4086 Label L;
4087 // Ignore partial flag stall after shrl() since it is debug VM
4088 __ jcc(Assembler::carryClear, L);
4089 __ stop("object size is not multiple of 2 - adjust this code");
4090 __ bind(L);
4091 // rdx must be > 0, no extra check needed here
4092#endif
4093
4094 // initialize remaining object fields: rdx was a multiple of 8
4095 { Label loop;
4096 __ bind(loop);
4097 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
4098 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
4099 __ decrement(rdx);
4100 __ jcc(Assembler::notZero, loop);
4101 }
4102
4103 // initialize object header only.
4104 __ bind(initialize_header);
4105 if (UseBiasedLocking) {
4106 __ pop(rcx); // get saved klass back in the register.
4107 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
4108 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
4109 } else {
4110 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
4111 (intptr_t)markOopDesc::prototype()); // header
4112 __ pop(rcx); // get saved klass back in the register.
4113 }
4114#ifdef _LP64
4115 __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
4116 __ store_klass_gap(rax, rsi); // zero klass gap for compressed oops
4117#endif
4118 __ store_klass(rax, rcx); // klass
4119
4120 {
4121 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
4122 // Trigger dtrace event for fastpath
4123 __ push(atos);
4124 __ call_VM_leaf(
4125 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
4126 __ pop(atos);
4127 }
4128
4129 __ jmp(done);
4130 }
4131
4132 // slow case
4133 __ bind(slow_case);
4134 __ pop(rcx); // restore stack pointer to what it was when we came in.
4135 __ bind(slow_case_no_pop);
4136
4137 Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4138 Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4139
4140 __ get_constant_pool(rarg1);
4141 __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4142 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2);
4143 __ verify_oop(rax);
4144
4145 // continue
4146 __ bind(done);
4147}
4148
4149void TemplateTable::newarray() {
4150 transition(itos, atos);
4151 Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4152 __ load_unsigned_byte(rarg1, at_bcp(1));
4153 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
4154 rarg1, rax);
4155}
4156
4157void TemplateTable::anewarray() {
4158 transition(itos, atos);
4159
4160 Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4161 Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4162
4163 __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4164 __ get_constant_pool(rarg1);
4165 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
4166 rarg1, rarg2, rax);
4167}
4168
4169void TemplateTable::arraylength() {
4170 transition(atos, itos);
4171 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
4172 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
4173}
4174
4175void TemplateTable::checkcast() {
4176 transition(atos, atos);
4177 Label done, is_null, ok_is_subtype, quicked, resolved;
4178 __ testptr(rax, rax); // object is in rax
4179 __ jcc(Assembler::zero, is_null);
4180
4181 // Get cpool & tags index
4182 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4183 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4184 // See if bytecode has already been quicked
4185 __ cmpb(Address(rdx, rbx,
4186 Address::times_1,
4187 Array<u1>::base_offset_in_bytes()),
4188 JVM_CONSTANT_Class);
4189 __ jcc(Assembler::equal, quicked);
4190 __ push(atos); // save receiver for result, and for GC
4191 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4192
4193 // vm_result_2 has metadata result
4194#ifndef _LP64
4195 // borrow rdi from locals
4196 __ get_thread(rdi);
4197 __ get_vm_result_2(rax, rdi);
4198 __ restore_locals();
4199#else
4200 __ get_vm_result_2(rax, r15_thread);
4201#endif
4202
4203 __ pop_ptr(rdx); // restore receiver
4204 __ jmpb(resolved);
4205
4206 // Get superklass in rax and subklass in rbx
4207 __ bind(quicked);
4208 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
4209 __ load_resolved_klass_at_index(rax, rcx, rbx);
4210
4211 __ bind(resolved);
4212 __ load_klass(rbx, rdx);
4213
4214 // Generate subtype check. Blows rcx, rdi. Object in rdx.
4215 // Superklass in rax. Subklass in rbx.
4216 __ gen_subtype_check(rbx, ok_is_subtype);
4217
4218 // Come here on failure
4219 __ push_ptr(rdx);
4220 // object is at TOS
4221 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
4222
4223 // Come here on success
4224 __ bind(ok_is_subtype);
4225 __ mov(rax, rdx); // Restore object in rdx
4226
4227 // Collect counts on whether this check-cast sees NULLs a lot or not.
4228 if (ProfileInterpreter) {
4229 __ jmp(done);
4230 __ bind(is_null);
4231 __ profile_null_seen(rcx);
4232 } else {
4233 __ bind(is_null); // same as 'done'
4234 }
4235 __ bind(done);
4236}
4237
4238void TemplateTable::instanceof() {
4239 transition(atos, itos);
4240 Label done, is_null, ok_is_subtype, quicked, resolved;
4241 __ testptr(rax, rax);
4242 __ jcc(Assembler::zero, is_null);
4243
4244 // Get cpool & tags index
4245 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4246 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4247 // See if bytecode has already been quicked
4248 __ cmpb(Address(rdx, rbx,
4249 Address::times_1,
4250 Array<u1>::base_offset_in_bytes()),
4251 JVM_CONSTANT_Class);
4252 __ jcc(Assembler::equal, quicked);
4253
4254 __ push(atos); // save receiver for result, and for GC
4255 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4256 // vm_result_2 has metadata result
4257
4258#ifndef _LP64
4259 // borrow rdi from locals
4260 __ get_thread(rdi);
4261 __ get_vm_result_2(rax, rdi);
4262 __ restore_locals();
4263#else
4264 __ get_vm_result_2(rax, r15_thread);
4265#endif
4266
4267 __ pop_ptr(rdx); // restore receiver
4268 __ verify_oop(rdx);
4269 __ load_klass(rdx, rdx);
4270 __ jmpb(resolved);
4271
4272 // Get superklass in rax and subklass in rdx
4273 __ bind(quicked);
4274 __ load_klass(rdx, rax);
4275 __ load_resolved_klass_at_index(rax, rcx, rbx);
4276
4277 __ bind(resolved);
4278
4279 // Generate subtype check. Blows rcx, rdi
4280 // Superklass in rax. Subklass in rdx.
4281 __ gen_subtype_check(rdx, ok_is_subtype);
4282
4283 // Come here on failure
4284 __ xorl(rax, rax);
4285 __ jmpb(done);
4286 // Come here on success
4287 __ bind(ok_is_subtype);
4288 __ movl(rax, 1);
4289
4290 // Collect counts on whether this test sees NULLs a lot or not.
4291 if (ProfileInterpreter) {
4292 __ jmp(done);
4293 __ bind(is_null);
4294 __ profile_null_seen(rcx);
4295 } else {
4296 __ bind(is_null); // same as 'done'
4297 }
4298 __ bind(done);
4299 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
4300 // rax = 1: obj != NULL and obj is an instanceof the specified klass
4301}
4302
4303
4304//----------------------------------------------------------------------------------------------------
4305// Breakpoints
4306void TemplateTable::_breakpoint() {
4307 // Note: We get here even if we are single stepping..
4308 // jbug insists on setting breakpoints at every bytecode
4309 // even if we are in single step mode.
4310
4311 transition(vtos, vtos);
4312
4313 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4314
4315 // get the unpatched byte code
4316 __ get_method(rarg);
4317 __ call_VM(noreg,
4318 CAST_FROM_FN_PTR(address,
4319 InterpreterRuntime::get_original_bytecode_at),
4320 rarg, rbcp);
4321 __ mov(rbx, rax); // why?
4322
4323 // post the breakpoint event
4324 __ get_method(rarg);
4325 __ call_VM(noreg,
4326 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4327 rarg, rbcp);
4328
4329 // complete the execution of original bytecode
4330 __ dispatch_only_normal(vtos);
4331}
4332
4333//-----------------------------------------------------------------------------
4334// Exceptions
4335
4336void TemplateTable::athrow() {
4337 transition(atos, vtos);
4338 __ null_check(rax);
4339 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
4340}
4341
4342//-----------------------------------------------------------------------------
4343// Synchronization
4344//
4345// Note: monitorenter & exit are symmetric routines; which is reflected
4346// in the assembly code structure as well
4347//
4348// Stack layout:
4349//
4350// [expressions ] <--- rsp = expression stack top
4351// ..
4352// [expressions ]
4353// [monitor entry] <--- monitor block top = expression stack bot
4354// ..
4355// [monitor entry]
4356// [frame data ] <--- monitor block bot
4357// ...
4358// [saved rbp ] <--- rbp
4359void TemplateTable::monitorenter() {
4360 transition(atos, vtos);
4361
4362 // check for NULL object
4363 __ null_check(rax);
4364
4365 __ resolve(IS_NOT_NULL, rax);
4366
4367 const Address monitor_block_top(
4368 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4369 const Address monitor_block_bot(
4370 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4371 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4372
4373 Label allocated;
4374
4375 Register rtop = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
4376 Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4377 Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4378
4379 // initialize entry pointer
4380 __ xorl(rmon, rmon); // points to free slot or NULL
4381
4382 // find a free slot in the monitor block (result in rmon)
4383 {
4384 Label entry, loop, exit;
4385 __ movptr(rtop, monitor_block_top); // points to current entry,
4386 // starting with top-most entry
4387 __ lea(rbot, monitor_block_bot); // points to word before bottom
4388 // of monitor block
4389 __ jmpb(entry);
4390
4391 __ bind(loop);
4392 // check if current entry is used
4393 __ cmpptr(Address(rtop, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
4394 // if not used then remember entry in rmon
4395 __ cmovptr(Assembler::equal, rmon, rtop); // cmov => cmovptr
4396 // check if current entry is for same object
4397 __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4398 // if same object then stop searching
4399 __ jccb(Assembler::equal, exit);
4400 // otherwise advance to next entry
4401 __ addptr(rtop, entry_size);
4402 __ bind(entry);
4403 // check if bottom reached
4404 __ cmpptr(rtop, rbot);
4405 // if not at bottom then check this entry
4406 __ jcc(Assembler::notEqual, loop);
4407 __ bind(exit);
4408 }
4409
4410 __ testptr(rmon, rmon); // check if a slot has been found
4411 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
4412
4413 // allocate one if there's no free slot
4414 {
4415 Label entry, loop;
4416 // 1. compute new pointers // rsp: old expression stack top
4417 __ movptr(rmon, monitor_block_bot); // rmon: old expression stack bottom
4418 __ subptr(rsp, entry_size); // move expression stack top
4419 __ subptr(rmon, entry_size); // move expression stack bottom
4420 __ mov(rtop, rsp); // set start value for copy loop
4421 __ movptr(monitor_block_bot, rmon); // set new monitor block bottom
4422 __ jmp(entry);
4423 // 2. move expression stack contents
4424 __ bind(loop);
4425 __ movptr(rbot, Address(rtop, entry_size)); // load expression stack
4426 // word from old location
4427 __ movptr(Address(rtop, 0), rbot); // and store it at new location
4428 __ addptr(rtop, wordSize); // advance to next word
4429 __ bind(entry);
4430 __ cmpptr(rtop, rmon); // check if bottom reached
4431 __ jcc(Assembler::notEqual, loop); // if not at bottom then
4432 // copy next word
4433 }
4434
4435 // call run-time routine
4436 // rmon: points to monitor entry
4437 __ bind(allocated);
4438
4439 // Increment bcp to point to the next bytecode, so exception
4440 // handling for async. exceptions work correctly.
4441 // The object has already been poped from the stack, so the
4442 // expression stack looks correct.
4443 __ increment(rbcp);
4444
4445 // store object
4446 __ movptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), rax);
4447 __ lock_object(rmon);
4448
4449 // check to make sure this monitor doesn't cause stack overflow after locking
4450 __ save_bcp(); // in case of exception
4451 __ generate_stack_overflow_check(0);
4452
4453 // The bcp has already been incremented. Just need to dispatch to
4454 // next instruction.
4455 __ dispatch_next(vtos);
4456}
4457
4458void TemplateTable::monitorexit() {
4459 transition(atos, vtos);
4460
4461 // check for NULL object
4462 __ null_check(rax);
4463
4464 __ resolve(IS_NOT_NULL, rax);
4465
4466 const Address monitor_block_top(
4467 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4468 const Address monitor_block_bot(
4469 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4470 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4471
4472 Register rtop = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4473 Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4474
4475 Label found;
4476
4477 // find matching slot
4478 {
4479 Label entry, loop;
4480 __ movptr(rtop, monitor_block_top); // points to current entry,
4481 // starting with top-most entry
4482 __ lea(rbot, monitor_block_bot); // points to word before bottom
4483 // of monitor block
4484 __ jmpb(entry);
4485
4486 __ bind(loop);
4487 // check if current entry is for same object
4488 __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4489 // if same object then stop searching
4490 __ jcc(Assembler::equal, found);
4491 // otherwise advance to next entry
4492 __ addptr(rtop, entry_size);
4493 __ bind(entry);
4494 // check if bottom reached
4495 __ cmpptr(rtop, rbot);
4496 // if not at bottom then check this entry
4497 __ jcc(Assembler::notEqual, loop);
4498 }
4499
4500 // error handling. Unlocking was not block-structured
4501 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4502 InterpreterRuntime::throw_illegal_monitor_state_exception));
4503 __ should_not_reach_here();
4504
4505 // call run-time routine
4506 __ bind(found);
4507 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
4508 __ unlock_object(rtop);
4509 __ pop_ptr(rax); // discard object
4510}
4511
4512// Wide instructions
4513void TemplateTable::wide() {
4514 transition(vtos, vtos);
4515 __ load_unsigned_byte(rbx, at_bcp(1));
4516 ExternalAddress wtable((address)Interpreter::_wentry_point);
4517 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
4518 // Note: the rbcp increment step is part of the individual wide bytecode implementations
4519}
4520
4521// Multi arrays
4522void TemplateTable::multianewarray() {
4523 transition(vtos, atos);
4524
4525 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4526 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
4527 // last dim is on top of stack; we want address of first one:
4528 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
4529 // the latter wordSize to point to the beginning of the array.
4530 __ lea(rarg, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
4531 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rarg);
4532 __ load_unsigned_byte(rbx, at_bcp(3));
4533 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
4534}
4535