1/*
2 * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "c1/c1_Compilation.hpp"
27#include "c1/c1_Defs.hpp"
28#include "c1/c1_FrameMap.hpp"
29#include "c1/c1_Instruction.hpp"
30#include "c1/c1_LIRAssembler.hpp"
31#include "c1/c1_LIRGenerator.hpp"
32#include "c1/c1_ValueStack.hpp"
33#include "ci/ciArrayKlass.hpp"
34#include "ci/ciInstance.hpp"
35#include "ci/ciObjArray.hpp"
36#include "ci/ciUtilities.hpp"
37#include "gc/shared/barrierSet.hpp"
38#include "gc/shared/c1/barrierSetC1.hpp"
39#include "runtime/arguments.hpp"
40#include "runtime/sharedRuntime.hpp"
41#include "runtime/stubRoutines.hpp"
42#include "runtime/vm_version.hpp"
43#include "utilities/bitMap.inline.hpp"
44#include "utilities/macros.hpp"
45
46#ifdef ASSERT
47#define __ gen()->lir(__FILE__, __LINE__)->
48#else
49#define __ gen()->lir()->
50#endif
51
52#ifndef PATCHED_ADDR
53#define PATCHED_ADDR (max_jint)
54#endif
55
56void PhiResolverState::reset() {
57 _virtual_operands.clear();
58 _other_operands.clear();
59 _vreg_table.clear();
60}
61
62
63//--------------------------------------------------------------
64// PhiResolver
65
66// Resolves cycles:
67//
68// r1 := r2 becomes temp := r1
69// r2 := r1 r1 := r2
70// r2 := temp
71// and orders moves:
72//
73// r2 := r3 becomes r1 := r2
74// r1 := r2 r2 := r3
75
76PhiResolver::PhiResolver(LIRGenerator* gen)
77 : _gen(gen)
78 , _state(gen->resolver_state())
79 , _temp(LIR_OprFact::illegalOpr)
80{
81 // reinitialize the shared state arrays
82 _state.reset();
83}
84
85
86void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
87 assert(src->is_valid(), "");
88 assert(dest->is_valid(), "");
89 __ move(src, dest);
90}
91
92
93void PhiResolver::move_temp_to(LIR_Opr dest) {
94 assert(_temp->is_valid(), "");
95 emit_move(_temp, dest);
96 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
97}
98
99
100void PhiResolver::move_to_temp(LIR_Opr src) {
101 assert(_temp->is_illegal(), "");
102 _temp = _gen->new_register(src->type());
103 emit_move(src, _temp);
104}
105
106
107// Traverse assignment graph in depth first order and generate moves in post order
108// ie. two assignments: b := c, a := b start with node c:
109// Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
110// Generates moves in this order: move b to a and move c to b
111// ie. cycle a := b, b := a start with node a
112// Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
113// Generates moves in this order: move b to temp, move a to b, move temp to a
114void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
115 if (!dest->visited()) {
116 dest->set_visited();
117 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
118 move(dest, dest->destination_at(i));
119 }
120 } else if (!dest->start_node()) {
121 // cylce in graph detected
122 assert(_loop == NULL, "only one loop valid!");
123 _loop = dest;
124 move_to_temp(src->operand());
125 return;
126 } // else dest is a start node
127
128 if (!dest->assigned()) {
129 if (_loop == dest) {
130 move_temp_to(dest->operand());
131 dest->set_assigned();
132 } else if (src != NULL) {
133 emit_move(src->operand(), dest->operand());
134 dest->set_assigned();
135 }
136 }
137}
138
139
140PhiResolver::~PhiResolver() {
141 int i;
142 // resolve any cycles in moves from and to virtual registers
143 for (i = virtual_operands().length() - 1; i >= 0; i --) {
144 ResolveNode* node = virtual_operands().at(i);
145 if (!node->visited()) {
146 _loop = NULL;
147 move(NULL, node);
148 node->set_start_node();
149 assert(_temp->is_illegal(), "move_temp_to() call missing");
150 }
151 }
152
153 // generate move for move from non virtual register to abitrary destination
154 for (i = other_operands().length() - 1; i >= 0; i --) {
155 ResolveNode* node = other_operands().at(i);
156 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
157 emit_move(node->operand(), node->destination_at(j)->operand());
158 }
159 }
160}
161
162
163ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
164 ResolveNode* node;
165 if (opr->is_virtual()) {
166 int vreg_num = opr->vreg_number();
167 node = vreg_table().at_grow(vreg_num, NULL);
168 assert(node == NULL || node->operand() == opr, "");
169 if (node == NULL) {
170 node = new ResolveNode(opr);
171 vreg_table().at_put(vreg_num, node);
172 }
173 // Make sure that all virtual operands show up in the list when
174 // they are used as the source of a move.
175 if (source && !virtual_operands().contains(node)) {
176 virtual_operands().append(node);
177 }
178 } else {
179 assert(source, "");
180 node = new ResolveNode(opr);
181 other_operands().append(node);
182 }
183 return node;
184}
185
186
187void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
188 assert(dest->is_virtual(), "");
189 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
190 assert(src->is_valid(), "");
191 assert(dest->is_valid(), "");
192 ResolveNode* source = source_node(src);
193 source->append(destination_node(dest));
194}
195
196
197//--------------------------------------------------------------
198// LIRItem
199
200void LIRItem::set_result(LIR_Opr opr) {
201 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
202 value()->set_operand(opr);
203
204 if (opr->is_virtual()) {
205 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
206 }
207
208 _result = opr;
209}
210
211void LIRItem::load_item() {
212 if (result()->is_illegal()) {
213 // update the items result
214 _result = value()->operand();
215 }
216 if (!result()->is_register()) {
217 LIR_Opr reg = _gen->new_register(value()->type());
218 __ move(result(), reg);
219 if (result()->is_constant()) {
220 _result = reg;
221 } else {
222 set_result(reg);
223 }
224 }
225}
226
227
228void LIRItem::load_for_store(BasicType type) {
229 if (_gen->can_store_as_constant(value(), type)) {
230 _result = value()->operand();
231 if (!_result->is_constant()) {
232 _result = LIR_OprFact::value_type(value()->type());
233 }
234 } else if (type == T_BYTE || type == T_BOOLEAN) {
235 load_byte_item();
236 } else {
237 load_item();
238 }
239}
240
241void LIRItem::load_item_force(LIR_Opr reg) {
242 LIR_Opr r = result();
243 if (r != reg) {
244#if !defined(ARM) && !defined(E500V2)
245 if (r->type() != reg->type()) {
246 // moves between different types need an intervening spill slot
247 r = _gen->force_to_spill(r, reg->type());
248 }
249#endif
250 __ move(r, reg);
251 _result = reg;
252 }
253}
254
255ciObject* LIRItem::get_jobject_constant() const {
256 ObjectType* oc = type()->as_ObjectType();
257 if (oc) {
258 return oc->constant_value();
259 }
260 return NULL;
261}
262
263
264jint LIRItem::get_jint_constant() const {
265 assert(is_constant() && value() != NULL, "");
266 assert(type()->as_IntConstant() != NULL, "type check");
267 return type()->as_IntConstant()->value();
268}
269
270
271jint LIRItem::get_address_constant() const {
272 assert(is_constant() && value() != NULL, "");
273 assert(type()->as_AddressConstant() != NULL, "type check");
274 return type()->as_AddressConstant()->value();
275}
276
277
278jfloat LIRItem::get_jfloat_constant() const {
279 assert(is_constant() && value() != NULL, "");
280 assert(type()->as_FloatConstant() != NULL, "type check");
281 return type()->as_FloatConstant()->value();
282}
283
284
285jdouble LIRItem::get_jdouble_constant() const {
286 assert(is_constant() && value() != NULL, "");
287 assert(type()->as_DoubleConstant() != NULL, "type check");
288 return type()->as_DoubleConstant()->value();
289}
290
291
292jlong LIRItem::get_jlong_constant() const {
293 assert(is_constant() && value() != NULL, "");
294 assert(type()->as_LongConstant() != NULL, "type check");
295 return type()->as_LongConstant()->value();
296}
297
298
299
300//--------------------------------------------------------------
301
302
303void LIRGenerator::block_do_prolog(BlockBegin* block) {
304#ifndef PRODUCT
305 if (PrintIRWithLIR) {
306 block->print();
307 }
308#endif
309
310 // set up the list of LIR instructions
311 assert(block->lir() == NULL, "LIR list already computed for this block");
312 _lir = new LIR_List(compilation(), block);
313 block->set_lir(_lir);
314
315 __ branch_destination(block->label());
316
317 if (LIRTraceExecution &&
318 Compilation::current()->hir()->start()->block_id() != block->block_id() &&
319 !block->is_set(BlockBegin::exception_entry_flag)) {
320 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
321 trace_block_entry(block);
322 }
323}
324
325
326void LIRGenerator::block_do_epilog(BlockBegin* block) {
327#ifndef PRODUCT
328 if (PrintIRWithLIR) {
329 tty->cr();
330 }
331#endif
332
333 // LIR_Opr for unpinned constants shouldn't be referenced by other
334 // blocks so clear them out after processing the block.
335 for (int i = 0; i < _unpinned_constants.length(); i++) {
336 _unpinned_constants.at(i)->clear_operand();
337 }
338 _unpinned_constants.trunc_to(0);
339
340 // clear our any registers for other local constants
341 _constants.trunc_to(0);
342 _reg_for_constants.trunc_to(0);
343}
344
345
346void LIRGenerator::block_do(BlockBegin* block) {
347 CHECK_BAILOUT();
348
349 block_do_prolog(block);
350 set_block(block);
351
352 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
353 if (instr->is_pinned()) do_root(instr);
354 }
355
356 set_block(NULL);
357 block_do_epilog(block);
358}
359
360
361//-------------------------LIRGenerator-----------------------------
362
363// This is where the tree-walk starts; instr must be root;
364void LIRGenerator::do_root(Value instr) {
365 CHECK_BAILOUT();
366
367 InstructionMark im(compilation(), instr);
368
369 assert(instr->is_pinned(), "use only with roots");
370 assert(instr->subst() == instr, "shouldn't have missed substitution");
371
372 instr->visit(this);
373
374 assert(!instr->has_uses() || instr->operand()->is_valid() ||
375 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
376}
377
378
379// This is called for each node in tree; the walk stops if a root is reached
380void LIRGenerator::walk(Value instr) {
381 InstructionMark im(compilation(), instr);
382 //stop walk when encounter a root
383 if ((instr->is_pinned() && instr->as_Phi() == NULL) || instr->operand()->is_valid()) {
384 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
385 } else {
386 assert(instr->subst() == instr, "shouldn't have missed substitution");
387 instr->visit(this);
388 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
389 }
390}
391
392
393CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
394 assert(state != NULL, "state must be defined");
395
396#ifndef PRODUCT
397 state->verify();
398#endif
399
400 ValueStack* s = state;
401 for_each_state(s) {
402 if (s->kind() == ValueStack::EmptyExceptionState) {
403 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
404 continue;
405 }
406
407 int index;
408 Value value;
409 for_each_stack_value(s, index, value) {
410 assert(value->subst() == value, "missed substitution");
411 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
412 walk(value);
413 assert(value->operand()->is_valid(), "must be evaluated now");
414 }
415 }
416
417 int bci = s->bci();
418 IRScope* scope = s->scope();
419 ciMethod* method = scope->method();
420
421 MethodLivenessResult liveness = method->liveness_at_bci(bci);
422 if (bci == SynchronizationEntryBCI) {
423 if (x->as_ExceptionObject() || x->as_Throw()) {
424 // all locals are dead on exit from the synthetic unlocker
425 liveness.clear();
426 } else {
427 assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
428 }
429 }
430 if (!liveness.is_valid()) {
431 // Degenerate or breakpointed method.
432 bailout("Degenerate or breakpointed method");
433 } else {
434 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
435 for_each_local_value(s, index, value) {
436 assert(value->subst() == value, "missed substition");
437 if (liveness.at(index) && !value->type()->is_illegal()) {
438 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
439 walk(value);
440 assert(value->operand()->is_valid(), "must be evaluated now");
441 }
442 } else {
443 // NULL out this local so that linear scan can assume that all non-NULL values are live.
444 s->invalidate_local(index);
445 }
446 }
447 }
448 }
449
450 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
451}
452
453
454CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
455 return state_for(x, x->exception_state());
456}
457
458
459void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
460 /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
461 * is active and the class hasn't yet been resolved we need to emit a patch that resolves
462 * the class. */
463 if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
464 assert(info != NULL, "info must be set if class is not loaded");
465 __ klass2reg_patch(NULL, r, info);
466 } else {
467 // no patching needed
468 __ metadata2reg(obj->constant_encoding(), r);
469 }
470}
471
472
473void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
474 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
475 CodeStub* stub = new RangeCheckStub(range_check_info, index, array);
476 if (index->is_constant()) {
477 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
478 index->as_jint(), null_check_info);
479 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
480 } else {
481 cmp_reg_mem(lir_cond_aboveEqual, index, array,
482 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
483 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
484 }
485}
486
487
488void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
489 CodeStub* stub = new RangeCheckStub(info, index);
490 if (index->is_constant()) {
491 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
492 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
493 } else {
494 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
495 java_nio_Buffer::limit_offset(), T_INT, info);
496 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
497 }
498 __ move(index, result);
499}
500
501
502
503void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
504 LIR_Opr result_op = result;
505 LIR_Opr left_op = left;
506 LIR_Opr right_op = right;
507
508 if (TwoOperandLIRForm && left_op != result_op) {
509 assert(right_op != result_op, "malformed");
510 __ move(left_op, result_op);
511 left_op = result_op;
512 }
513
514 switch(code) {
515 case Bytecodes::_dadd:
516 case Bytecodes::_fadd:
517 case Bytecodes::_ladd:
518 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
519 case Bytecodes::_fmul:
520 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
521
522 case Bytecodes::_dmul:
523 {
524 if (is_strictfp) {
525 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
526 } else {
527 __ mul(left_op, right_op, result_op); break;
528 }
529 }
530 break;
531
532 case Bytecodes::_imul:
533 {
534 bool did_strength_reduce = false;
535
536 if (right->is_constant()) {
537 jint c = right->as_jint();
538 if (c > 0 && is_power_of_2(c)) {
539 // do not need tmp here
540 __ shift_left(left_op, exact_log2(c), result_op);
541 did_strength_reduce = true;
542 } else {
543 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
544 }
545 }
546 // we couldn't strength reduce so just emit the multiply
547 if (!did_strength_reduce) {
548 __ mul(left_op, right_op, result_op);
549 }
550 }
551 break;
552
553 case Bytecodes::_dsub:
554 case Bytecodes::_fsub:
555 case Bytecodes::_lsub:
556 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
557
558 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
559 // ldiv and lrem are implemented with a direct runtime call
560
561 case Bytecodes::_ddiv:
562 {
563 if (is_strictfp) {
564 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
565 } else {
566 __ div (left_op, right_op, result_op); break;
567 }
568 }
569 break;
570
571 case Bytecodes::_drem:
572 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
573
574 default: ShouldNotReachHere();
575 }
576}
577
578
579void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
580 arithmetic_op(code, result, left, right, false, tmp);
581}
582
583
584void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
585 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
586}
587
588
589void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
590 arithmetic_op(code, result, left, right, is_strictfp, tmp);
591}
592
593
594void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
595
596 if (TwoOperandLIRForm && value != result_op
597 // Only 32bit right shifts require two operand form on S390.
598 S390_ONLY(&& (code == Bytecodes::_ishr || code == Bytecodes::_iushr))) {
599 assert(count != result_op, "malformed");
600 __ move(value, result_op);
601 value = result_op;
602 }
603
604 assert(count->is_constant() || count->is_register(), "must be");
605 switch(code) {
606 case Bytecodes::_ishl:
607 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
608 case Bytecodes::_ishr:
609 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
610 case Bytecodes::_iushr:
611 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
612 default: ShouldNotReachHere();
613 }
614}
615
616
617void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
618 if (TwoOperandLIRForm && left_op != result_op) {
619 assert(right_op != result_op, "malformed");
620 __ move(left_op, result_op);
621 left_op = result_op;
622 }
623
624 switch(code) {
625 case Bytecodes::_iand:
626 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
627
628 case Bytecodes::_ior:
629 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
630
631 case Bytecodes::_ixor:
632 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
633
634 default: ShouldNotReachHere();
635 }
636}
637
638
639void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
640 if (!GenerateSynchronizationCode) return;
641 // for slow path, use debug info for state after successful locking
642 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
643 __ load_stack_address_monitor(monitor_no, lock);
644 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
645 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
646}
647
648
649void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
650 if (!GenerateSynchronizationCode) return;
651 // setup registers
652 LIR_Opr hdr = lock;
653 lock = new_hdr;
654 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
655 __ load_stack_address_monitor(monitor_no, lock);
656 __ unlock_object(hdr, object, lock, scratch, slow_path);
657}
658
659#ifndef PRODUCT
660void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
661 if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
662 tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
663 } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
664 tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
665 }
666}
667#endif
668
669void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
670 klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
671 // If klass is not loaded we do not know if the klass has finalizers:
672 if (UseFastNewInstance && klass->is_loaded()
673 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
674
675 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
676
677 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
678
679 assert(klass->is_loaded(), "must be loaded");
680 // allocate space for instance
681 assert(klass->size_helper() >= 0, "illegal instance size");
682 const int instance_size = align_object_size(klass->size_helper());
683 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
684 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
685 } else {
686 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
687 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
688 __ branch_destination(slow_path->continuation());
689 }
690}
691
692
693static bool is_constant_zero(Instruction* inst) {
694 IntConstant* c = inst->type()->as_IntConstant();
695 if (c) {
696 return (c->value() == 0);
697 }
698 return false;
699}
700
701
702static bool positive_constant(Instruction* inst) {
703 IntConstant* c = inst->type()->as_IntConstant();
704 if (c) {
705 return (c->value() >= 0);
706 }
707 return false;
708}
709
710
711static ciArrayKlass* as_array_klass(ciType* type) {
712 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
713 return (ciArrayKlass*)type;
714 } else {
715 return NULL;
716 }
717}
718
719static ciType* phi_declared_type(Phi* phi) {
720 ciType* t = phi->operand_at(0)->declared_type();
721 if (t == NULL) {
722 return NULL;
723 }
724 for(int i = 1; i < phi->operand_count(); i++) {
725 if (t != phi->operand_at(i)->declared_type()) {
726 return NULL;
727 }
728 }
729 return t;
730}
731
732void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
733 Instruction* src = x->argument_at(0);
734 Instruction* src_pos = x->argument_at(1);
735 Instruction* dst = x->argument_at(2);
736 Instruction* dst_pos = x->argument_at(3);
737 Instruction* length = x->argument_at(4);
738
739 // first try to identify the likely type of the arrays involved
740 ciArrayKlass* expected_type = NULL;
741 bool is_exact = false, src_objarray = false, dst_objarray = false;
742 {
743 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
744 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
745 Phi* phi;
746 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
747 src_declared_type = as_array_klass(phi_declared_type(phi));
748 }
749 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
750 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
751 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
752 dst_declared_type = as_array_klass(phi_declared_type(phi));
753 }
754
755 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
756 // the types exactly match so the type is fully known
757 is_exact = true;
758 expected_type = src_exact_type;
759 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
760 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
761 ciArrayKlass* src_type = NULL;
762 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
763 src_type = (ciArrayKlass*) src_exact_type;
764 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
765 src_type = (ciArrayKlass*) src_declared_type;
766 }
767 if (src_type != NULL) {
768 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
769 is_exact = true;
770 expected_type = dst_type;
771 }
772 }
773 }
774 // at least pass along a good guess
775 if (expected_type == NULL) expected_type = dst_exact_type;
776 if (expected_type == NULL) expected_type = src_declared_type;
777 if (expected_type == NULL) expected_type = dst_declared_type;
778
779 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
780 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
781 }
782
783 // if a probable array type has been identified, figure out if any
784 // of the required checks for a fast case can be elided.
785 int flags = LIR_OpArrayCopy::all_flags;
786
787 if (!src_objarray)
788 flags &= ~LIR_OpArrayCopy::src_objarray;
789 if (!dst_objarray)
790 flags &= ~LIR_OpArrayCopy::dst_objarray;
791
792 if (!x->arg_needs_null_check(0))
793 flags &= ~LIR_OpArrayCopy::src_null_check;
794 if (!x->arg_needs_null_check(2))
795 flags &= ~LIR_OpArrayCopy::dst_null_check;
796
797
798 if (expected_type != NULL) {
799 Value length_limit = NULL;
800
801 IfOp* ifop = length->as_IfOp();
802 if (ifop != NULL) {
803 // look for expressions like min(v, a.length) which ends up as
804 // x > y ? y : x or x >= y ? y : x
805 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
806 ifop->x() == ifop->fval() &&
807 ifop->y() == ifop->tval()) {
808 length_limit = ifop->y();
809 }
810 }
811
812 // try to skip null checks and range checks
813 NewArray* src_array = src->as_NewArray();
814 if (src_array != NULL) {
815 flags &= ~LIR_OpArrayCopy::src_null_check;
816 if (length_limit != NULL &&
817 src_array->length() == length_limit &&
818 is_constant_zero(src_pos)) {
819 flags &= ~LIR_OpArrayCopy::src_range_check;
820 }
821 }
822
823 NewArray* dst_array = dst->as_NewArray();
824 if (dst_array != NULL) {
825 flags &= ~LIR_OpArrayCopy::dst_null_check;
826 if (length_limit != NULL &&
827 dst_array->length() == length_limit &&
828 is_constant_zero(dst_pos)) {
829 flags &= ~LIR_OpArrayCopy::dst_range_check;
830 }
831 }
832
833 // check from incoming constant values
834 if (positive_constant(src_pos))
835 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
836 if (positive_constant(dst_pos))
837 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
838 if (positive_constant(length))
839 flags &= ~LIR_OpArrayCopy::length_positive_check;
840
841 // see if the range check can be elided, which might also imply
842 // that src or dst is non-null.
843 ArrayLength* al = length->as_ArrayLength();
844 if (al != NULL) {
845 if (al->array() == src) {
846 // it's the length of the source array
847 flags &= ~LIR_OpArrayCopy::length_positive_check;
848 flags &= ~LIR_OpArrayCopy::src_null_check;
849 if (is_constant_zero(src_pos))
850 flags &= ~LIR_OpArrayCopy::src_range_check;
851 }
852 if (al->array() == dst) {
853 // it's the length of the destination array
854 flags &= ~LIR_OpArrayCopy::length_positive_check;
855 flags &= ~LIR_OpArrayCopy::dst_null_check;
856 if (is_constant_zero(dst_pos))
857 flags &= ~LIR_OpArrayCopy::dst_range_check;
858 }
859 }
860 if (is_exact) {
861 flags &= ~LIR_OpArrayCopy::type_check;
862 }
863 }
864
865 IntConstant* src_int = src_pos->type()->as_IntConstant();
866 IntConstant* dst_int = dst_pos->type()->as_IntConstant();
867 if (src_int && dst_int) {
868 int s_offs = src_int->value();
869 int d_offs = dst_int->value();
870 if (src_int->value() >= dst_int->value()) {
871 flags &= ~LIR_OpArrayCopy::overlapping;
872 }
873 if (expected_type != NULL) {
874 BasicType t = expected_type->element_type()->basic_type();
875 int element_size = type2aelembytes(t);
876 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
877 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
878 flags &= ~LIR_OpArrayCopy::unaligned;
879 }
880 }
881 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
882 // src and dest positions are the same, or dst is zero so assume
883 // nonoverlapping copy.
884 flags &= ~LIR_OpArrayCopy::overlapping;
885 }
886
887 if (src == dst) {
888 // moving within a single array so no type checks are needed
889 if (flags & LIR_OpArrayCopy::type_check) {
890 flags &= ~LIR_OpArrayCopy::type_check;
891 }
892 }
893 *flagsp = flags;
894 *expected_typep = (ciArrayKlass*)expected_type;
895}
896
897
898LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
899 assert(opr->is_register(), "why spill if item is not register?");
900
901 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
902 LIR_Opr result = new_register(T_FLOAT);
903 set_vreg_flag(result, must_start_in_memory);
904 assert(opr->is_register(), "only a register can be spilled");
905 assert(opr->value_type()->is_float(), "rounding only for floats available");
906 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
907 return result;
908 }
909 return opr;
910}
911
912
913LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
914 assert(type2size[t] == type2size[value->type()],
915 "size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type()));
916 if (!value->is_register()) {
917 // force into a register
918 LIR_Opr r = new_register(value->type());
919 __ move(value, r);
920 value = r;
921 }
922
923 // create a spill location
924 LIR_Opr tmp = new_register(t);
925 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
926
927 // move from register to spill
928 __ move(value, tmp);
929 return tmp;
930}
931
932void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
933 if (if_instr->should_profile()) {
934 ciMethod* method = if_instr->profiled_method();
935 assert(method != NULL, "method should be set if branch is profiled");
936 ciMethodData* md = method->method_data_or_null();
937 assert(md != NULL, "Sanity");
938 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
939 assert(data != NULL, "must have profiling data");
940 assert(data->is_BranchData(), "need BranchData for two-way branches");
941 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
942 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
943 if (if_instr->is_swapped()) {
944 int t = taken_count_offset;
945 taken_count_offset = not_taken_count_offset;
946 not_taken_count_offset = t;
947 }
948
949 LIR_Opr md_reg = new_register(T_METADATA);
950 __ metadata2reg(md->constant_encoding(), md_reg);
951
952 LIR_Opr data_offset_reg = new_pointer_register();
953 __ cmove(lir_cond(cond),
954 LIR_OprFact::intptrConst(taken_count_offset),
955 LIR_OprFact::intptrConst(not_taken_count_offset),
956 data_offset_reg, as_BasicType(if_instr->x()->type()));
957
958 // MDO cells are intptr_t, so the data_reg width is arch-dependent.
959 LIR_Opr data_reg = new_pointer_register();
960 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
961 __ move(data_addr, data_reg);
962 // Use leal instead of add to avoid destroying condition codes on x86
963 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
964 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
965 __ move(data_reg, data_addr);
966 }
967}
968
969// Phi technique:
970// This is about passing live values from one basic block to the other.
971// In code generated with Java it is rather rare that more than one
972// value is on the stack from one basic block to the other.
973// We optimize our technique for efficient passing of one value
974// (of type long, int, double..) but it can be extended.
975// When entering or leaving a basic block, all registers and all spill
976// slots are release and empty. We use the released registers
977// and spill slots to pass the live values from one block
978// to the other. The topmost value, i.e., the value on TOS of expression
979// stack is passed in registers. All other values are stored in spilling
980// area. Every Phi has an index which designates its spill slot
981// At exit of a basic block, we fill the register(s) and spill slots.
982// At entry of a basic block, the block_prolog sets up the content of phi nodes
983// and locks necessary registers and spilling slots.
984
985
986// move current value to referenced phi function
987void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
988 Phi* phi = sux_val->as_Phi();
989 // cur_val can be null without phi being null in conjunction with inlining
990 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
991 Phi* cur_phi = cur_val->as_Phi();
992 if (cur_phi != NULL && cur_phi->is_illegal()) {
993 // Phi and local would need to get invalidated
994 // (which is unexpected for Linear Scan).
995 // But this case is very rare so we simply bail out.
996 bailout("propagation of illegal phi");
997 return;
998 }
999 LIR_Opr operand = cur_val->operand();
1000 if (operand->is_illegal()) {
1001 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1002 "these can be produced lazily");
1003 operand = operand_for_instruction(cur_val);
1004 }
1005 resolver->move(operand, operand_for_instruction(phi));
1006 }
1007}
1008
1009
1010// Moves all stack values into their PHI position
1011void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1012 BlockBegin* bb = block();
1013 if (bb->number_of_sux() == 1) {
1014 BlockBegin* sux = bb->sux_at(0);
1015 assert(sux->number_of_preds() > 0, "invalid CFG");
1016
1017 // a block with only one predecessor never has phi functions
1018 if (sux->number_of_preds() > 1) {
1019 PhiResolver resolver(this);
1020
1021 ValueStack* sux_state = sux->state();
1022 Value sux_value;
1023 int index;
1024
1025 assert(cur_state->scope() == sux_state->scope(), "not matching");
1026 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1027 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1028
1029 for_each_stack_value(sux_state, index, sux_value) {
1030 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1031 }
1032
1033 for_each_local_value(sux_state, index, sux_value) {
1034 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1035 }
1036
1037 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1038 }
1039 }
1040}
1041
1042
1043LIR_Opr LIRGenerator::new_register(BasicType type) {
1044 int vreg = _virtual_register_number;
1045 // add a little fudge factor for the bailout, since the bailout is
1046 // only checked periodically. This gives a few extra registers to
1047 // hand out before we really run out, which helps us keep from
1048 // tripping over assertions.
1049 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1050 bailout("out of virtual registers");
1051 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1052 // wrap it around
1053 _virtual_register_number = LIR_OprDesc::vreg_base;
1054 }
1055 }
1056 _virtual_register_number += 1;
1057 return LIR_OprFact::virtual_register(vreg, type);
1058}
1059
1060
1061// Try to lock using register in hint
1062LIR_Opr LIRGenerator::rlock(Value instr) {
1063 return new_register(instr->type());
1064}
1065
1066
1067// does an rlock and sets result
1068LIR_Opr LIRGenerator::rlock_result(Value x) {
1069 LIR_Opr reg = rlock(x);
1070 set_result(x, reg);
1071 return reg;
1072}
1073
1074
1075// does an rlock and sets result
1076LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1077 LIR_Opr reg;
1078 switch (type) {
1079 case T_BYTE:
1080 case T_BOOLEAN:
1081 reg = rlock_byte(type);
1082 break;
1083 default:
1084 reg = rlock(x);
1085 break;
1086 }
1087
1088 set_result(x, reg);
1089 return reg;
1090}
1091
1092
1093//---------------------------------------------------------------------
1094ciObject* LIRGenerator::get_jobject_constant(Value value) {
1095 ObjectType* oc = value->type()->as_ObjectType();
1096 if (oc) {
1097 return oc->constant_value();
1098 }
1099 return NULL;
1100}
1101
1102
1103void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1104 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1105 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1106
1107 // no moves are created for phi functions at the begin of exception
1108 // handlers, so assign operands manually here
1109 for_each_phi_fun(block(), phi,
1110 if (!phi->is_illegal()) { operand_for_instruction(phi); });
1111
1112 LIR_Opr thread_reg = getThreadPointer();
1113 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1114 exceptionOopOpr());
1115 __ move_wide(LIR_OprFact::oopConst(NULL),
1116 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1117 __ move_wide(LIR_OprFact::oopConst(NULL),
1118 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1119
1120 LIR_Opr result = new_register(T_OBJECT);
1121 __ move(exceptionOopOpr(), result);
1122 set_result(x, result);
1123}
1124
1125
1126//----------------------------------------------------------------------
1127//----------------------------------------------------------------------
1128//----------------------------------------------------------------------
1129//----------------------------------------------------------------------
1130// visitor functions
1131//----------------------------------------------------------------------
1132//----------------------------------------------------------------------
1133//----------------------------------------------------------------------
1134//----------------------------------------------------------------------
1135
1136void LIRGenerator::do_Phi(Phi* x) {
1137 // phi functions are never visited directly
1138 ShouldNotReachHere();
1139}
1140
1141
1142// Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1143void LIRGenerator::do_Constant(Constant* x) {
1144 if (x->state_before() != NULL) {
1145 // Any constant with a ValueStack requires patching so emit the patch here
1146 LIR_Opr reg = rlock_result(x);
1147 CodeEmitInfo* info = state_for(x, x->state_before());
1148 __ oop2reg_patch(NULL, reg, info);
1149 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1150 if (!x->is_pinned()) {
1151 // unpinned constants are handled specially so that they can be
1152 // put into registers when they are used multiple times within a
1153 // block. After the block completes their operand will be
1154 // cleared so that other blocks can't refer to that register.
1155 set_result(x, load_constant(x));
1156 } else {
1157 LIR_Opr res = x->operand();
1158 if (!res->is_valid()) {
1159 res = LIR_OprFact::value_type(x->type());
1160 }
1161 if (res->is_constant()) {
1162 LIR_Opr reg = rlock_result(x);
1163 __ move(res, reg);
1164 } else {
1165 set_result(x, res);
1166 }
1167 }
1168 } else {
1169 set_result(x, LIR_OprFact::value_type(x->type()));
1170 }
1171}
1172
1173
1174void LIRGenerator::do_Local(Local* x) {
1175 // operand_for_instruction has the side effect of setting the result
1176 // so there's no need to do it here.
1177 operand_for_instruction(x);
1178}
1179
1180
1181void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1182 Unimplemented();
1183}
1184
1185
1186void LIRGenerator::do_Return(Return* x) {
1187 if (compilation()->env()->dtrace_method_probes()) {
1188 BasicTypeList signature;
1189 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1190 signature.append(T_METADATA); // Method*
1191 LIR_OprList* args = new LIR_OprList();
1192 args->append(getThreadPointer());
1193 LIR_Opr meth = new_register(T_METADATA);
1194 __ metadata2reg(method()->constant_encoding(), meth);
1195 args->append(meth);
1196 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1197 }
1198
1199 if (x->type()->is_void()) {
1200 __ return_op(LIR_OprFact::illegalOpr);
1201 } else {
1202 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1203 LIRItem result(x->result(), this);
1204
1205 result.load_item_force(reg);
1206 __ return_op(result.result());
1207 }
1208 set_no_result(x);
1209}
1210
1211// Examble: ref.get()
1212// Combination of LoadField and g1 pre-write barrier
1213void LIRGenerator::do_Reference_get(Intrinsic* x) {
1214
1215 const int referent_offset = java_lang_ref_Reference::referent_offset;
1216 guarantee(referent_offset > 0, "referent offset not initialized");
1217
1218 assert(x->number_of_arguments() == 1, "wrong type");
1219
1220 LIRItem reference(x->argument_at(0), this);
1221 reference.load_item();
1222
1223 // need to perform the null check on the reference objecy
1224 CodeEmitInfo* info = NULL;
1225 if (x->needs_null_check()) {
1226 info = state_for(x);
1227 }
1228
1229 LIR_Opr result = rlock_result(x, T_OBJECT);
1230 access_load_at(IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT,
1231 reference, LIR_OprFact::intConst(referent_offset), result);
1232}
1233
1234// Example: clazz.isInstance(object)
1235void LIRGenerator::do_isInstance(Intrinsic* x) {
1236 assert(x->number_of_arguments() == 2, "wrong type");
1237
1238 // TODO could try to substitute this node with an equivalent InstanceOf
1239 // if clazz is known to be a constant Class. This will pick up newly found
1240 // constants after HIR construction. I'll leave this to a future change.
1241
1242 // as a first cut, make a simple leaf call to runtime to stay platform independent.
1243 // could follow the aastore example in a future change.
1244
1245 LIRItem clazz(x->argument_at(0), this);
1246 LIRItem object(x->argument_at(1), this);
1247 clazz.load_item();
1248 object.load_item();
1249 LIR_Opr result = rlock_result(x);
1250
1251 // need to perform null check on clazz
1252 if (x->needs_null_check()) {
1253 CodeEmitInfo* info = state_for(x);
1254 __ null_check(clazz.result(), info);
1255 }
1256
1257 LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1258 CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1259 x->type(),
1260 NULL); // NULL CodeEmitInfo results in a leaf call
1261 __ move(call_result, result);
1262}
1263
1264// Example: object.getClass ()
1265void LIRGenerator::do_getClass(Intrinsic* x) {
1266 assert(x->number_of_arguments() == 1, "wrong type");
1267
1268 LIRItem rcvr(x->argument_at(0), this);
1269 rcvr.load_item();
1270 LIR_Opr temp = new_register(T_METADATA);
1271 LIR_Opr result = rlock_result(x);
1272
1273 // need to perform the null check on the rcvr
1274 CodeEmitInfo* info = NULL;
1275 if (x->needs_null_check()) {
1276 info = state_for(x);
1277 }
1278
1279 // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
1280 // meaning of these two is mixed up (see JDK-8026837).
1281 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
1282 __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), temp);
1283 // mirror = ((OopHandle)mirror)->resolve();
1284 access_load(IN_NATIVE, T_OBJECT,
1285 LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), result);
1286}
1287
1288// java.lang.Class::isPrimitive()
1289void LIRGenerator::do_isPrimitive(Intrinsic* x) {
1290 assert(x->number_of_arguments() == 1, "wrong type");
1291
1292 LIRItem rcvr(x->argument_at(0), this);
1293 rcvr.load_item();
1294 LIR_Opr temp = new_register(T_METADATA);
1295 LIR_Opr result = rlock_result(x);
1296
1297 CodeEmitInfo* info = NULL;
1298 if (x->needs_null_check()) {
1299 info = state_for(x);
1300 }
1301
1302 __ move(new LIR_Address(rcvr.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), temp, info);
1303 __ cmp(lir_cond_notEqual, temp, LIR_OprFact::intConst(0));
1304 __ cmove(lir_cond_notEqual, LIR_OprFact::intConst(0), LIR_OprFact::intConst(1), result, T_BOOLEAN);
1305}
1306
1307
1308// Example: Thread.currentThread()
1309void LIRGenerator::do_currentThread(Intrinsic* x) {
1310 assert(x->number_of_arguments() == 0, "wrong type");
1311 LIR_Opr reg = rlock_result(x);
1312 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1313}
1314
1315
1316void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1317 assert(x->number_of_arguments() == 1, "wrong type");
1318 LIRItem receiver(x->argument_at(0), this);
1319
1320 receiver.load_item();
1321 BasicTypeList signature;
1322 signature.append(T_OBJECT); // receiver
1323 LIR_OprList* args = new LIR_OprList();
1324 args->append(receiver.result());
1325 CodeEmitInfo* info = state_for(x, x->state());
1326 call_runtime(&signature, args,
1327 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1328 voidType, info);
1329
1330 set_no_result(x);
1331}
1332
1333
1334//------------------------local access--------------------------------------
1335
1336LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1337 if (x->operand()->is_illegal()) {
1338 Constant* c = x->as_Constant();
1339 if (c != NULL) {
1340 x->set_operand(LIR_OprFact::value_type(c->type()));
1341 } else {
1342 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1343 // allocate a virtual register for this local or phi
1344 x->set_operand(rlock(x));
1345 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1346 }
1347 }
1348 return x->operand();
1349}
1350
1351
1352Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1353 if (opr->is_virtual()) {
1354 return instruction_for_vreg(opr->vreg_number());
1355 }
1356 return NULL;
1357}
1358
1359
1360Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1361 if (reg_num < _instruction_for_operand.length()) {
1362 return _instruction_for_operand.at(reg_num);
1363 }
1364 return NULL;
1365}
1366
1367
1368void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1369 if (_vreg_flags.size_in_bits() == 0) {
1370 BitMap2D temp(100, num_vreg_flags);
1371 _vreg_flags = temp;
1372 }
1373 _vreg_flags.at_put_grow(vreg_num, f, true);
1374}
1375
1376bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1377 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1378 return false;
1379 }
1380 return _vreg_flags.at(vreg_num, f);
1381}
1382
1383
1384// Block local constant handling. This code is useful for keeping
1385// unpinned constants and constants which aren't exposed in the IR in
1386// registers. Unpinned Constant instructions have their operands
1387// cleared when the block is finished so that other blocks can't end
1388// up referring to their registers.
1389
1390LIR_Opr LIRGenerator::load_constant(Constant* x) {
1391 assert(!x->is_pinned(), "only for unpinned constants");
1392 _unpinned_constants.append(x);
1393 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1394}
1395
1396
1397LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1398 BasicType t = c->type();
1399 for (int i = 0; i < _constants.length(); i++) {
1400 LIR_Const* other = _constants.at(i);
1401 if (t == other->type()) {
1402 switch (t) {
1403 case T_INT:
1404 case T_FLOAT:
1405 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1406 break;
1407 case T_LONG:
1408 case T_DOUBLE:
1409 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1410 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1411 break;
1412 case T_OBJECT:
1413 if (c->as_jobject() != other->as_jobject()) continue;
1414 break;
1415 default:
1416 break;
1417 }
1418 return _reg_for_constants.at(i);
1419 }
1420 }
1421
1422 LIR_Opr result = new_register(t);
1423 __ move((LIR_Opr)c, result);
1424 _constants.append(c);
1425 _reg_for_constants.append(result);
1426 return result;
1427}
1428
1429//------------------------field access--------------------------------------
1430
1431void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1432 assert(x->number_of_arguments() == 4, "wrong type");
1433 LIRItem obj (x->argument_at(0), this); // object
1434 LIRItem offset(x->argument_at(1), this); // offset of field
1435 LIRItem cmp (x->argument_at(2), this); // value to compare with field
1436 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
1437 assert(obj.type()->tag() == objectTag, "invalid type");
1438
1439 // In 64bit the type can be long, sparc doesn't have this assert
1440 // assert(offset.type()->tag() == intTag, "invalid type");
1441
1442 assert(cmp.type()->tag() == type->tag(), "invalid type");
1443 assert(val.type()->tag() == type->tag(), "invalid type");
1444
1445 LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1446 obj, offset, cmp, val);
1447 set_result(x, result);
1448}
1449
1450// Comment copied form templateTable_i486.cpp
1451// ----------------------------------------------------------------------------
1452// Volatile variables demand their effects be made known to all CPU's in
1453// order. Store buffers on most chips allow reads & writes to reorder; the
1454// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1455// memory barrier (i.e., it's not sufficient that the interpreter does not
1456// reorder volatile references, the hardware also must not reorder them).
1457//
1458// According to the new Java Memory Model (JMM):
1459// (1) All volatiles are serialized wrt to each other.
1460// ALSO reads & writes act as aquire & release, so:
1461// (2) A read cannot let unrelated NON-volatile memory refs that happen after
1462// the read float up to before the read. It's OK for non-volatile memory refs
1463// that happen before the volatile read to float down below it.
1464// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1465// that happen BEFORE the write float down to after the write. It's OK for
1466// non-volatile memory refs that happen after the volatile write to float up
1467// before it.
1468//
1469// We only put in barriers around volatile refs (they are expensive), not
1470// _between_ memory refs (that would require us to track the flavor of the
1471// previous memory refs). Requirements (2) and (3) require some barriers
1472// before volatile stores and after volatile loads. These nearly cover
1473// requirement (1) but miss the volatile-store-volatile-load case. This final
1474// case is placed after volatile-stores although it could just as well go
1475// before volatile-loads.
1476
1477
1478void LIRGenerator::do_StoreField(StoreField* x) {
1479 bool needs_patching = x->needs_patching();
1480 bool is_volatile = x->field()->is_volatile();
1481 BasicType field_type = x->field_type();
1482
1483 CodeEmitInfo* info = NULL;
1484 if (needs_patching) {
1485 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1486 info = state_for(x, x->state_before());
1487 } else if (x->needs_null_check()) {
1488 NullCheck* nc = x->explicit_null_check();
1489 if (nc == NULL) {
1490 info = state_for(x);
1491 } else {
1492 info = state_for(nc);
1493 }
1494 }
1495
1496 LIRItem object(x->obj(), this);
1497 LIRItem value(x->value(), this);
1498
1499 object.load_item();
1500
1501 if (is_volatile || needs_patching) {
1502 // load item if field is volatile (fewer special cases for volatiles)
1503 // load item if field not initialized
1504 // load item if field not constant
1505 // because of code patching we cannot inline constants
1506 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1507 value.load_byte_item();
1508 } else {
1509 value.load_item();
1510 }
1511 } else {
1512 value.load_for_store(field_type);
1513 }
1514
1515 set_no_result(x);
1516
1517#ifndef PRODUCT
1518 if (PrintNotLoaded && needs_patching) {
1519 tty->print_cr(" ###class not loaded at store_%s bci %d",
1520 x->is_static() ? "static" : "field", x->printable_bci());
1521 }
1522#endif
1523
1524 if (x->needs_null_check() &&
1525 (needs_patching ||
1526 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1527 // Emit an explicit null check because the offset is too large.
1528 // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1529 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1530 __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1531 }
1532
1533 DecoratorSet decorators = IN_HEAP;
1534 if (is_volatile) {
1535 decorators |= MO_SEQ_CST;
1536 }
1537 if (needs_patching) {
1538 decorators |= C1_NEEDS_PATCHING;
1539 }
1540
1541 access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1542 value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
1543}
1544
1545void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1546 assert(x->is_pinned(),"");
1547 bool needs_range_check = x->compute_needs_range_check();
1548 bool use_length = x->length() != NULL;
1549 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
1550 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
1551 !get_jobject_constant(x->value())->is_null_object() ||
1552 x->should_profile());
1553
1554 LIRItem array(x->array(), this);
1555 LIRItem index(x->index(), this);
1556 LIRItem value(x->value(), this);
1557 LIRItem length(this);
1558
1559 array.load_item();
1560 index.load_nonconstant();
1561
1562 if (use_length && needs_range_check) {
1563 length.set_instruction(x->length());
1564 length.load_item();
1565
1566 }
1567 if (needs_store_check || x->check_boolean()) {
1568 value.load_item();
1569 } else {
1570 value.load_for_store(x->elt_type());
1571 }
1572
1573 set_no_result(x);
1574
1575 // the CodeEmitInfo must be duplicated for each different
1576 // LIR-instruction because spilling can occur anywhere between two
1577 // instructions and so the debug information must be different
1578 CodeEmitInfo* range_check_info = state_for(x);
1579 CodeEmitInfo* null_check_info = NULL;
1580 if (x->needs_null_check()) {
1581 null_check_info = new CodeEmitInfo(range_check_info);
1582 }
1583
1584 if (GenerateRangeChecks && needs_range_check) {
1585 if (use_length) {
1586 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1587 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1588 } else {
1589 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1590 // range_check also does the null check
1591 null_check_info = NULL;
1592 }
1593 }
1594
1595 if (GenerateArrayStoreCheck && needs_store_check) {
1596 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1597 array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1598 }
1599
1600 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1601 if (x->check_boolean()) {
1602 decorators |= C1_MASK_BOOLEAN;
1603 }
1604
1605 access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1606 NULL, null_check_info);
1607}
1608
1609void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1610 LIRItem& base, LIR_Opr offset, LIR_Opr result,
1611 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1612 decorators |= ACCESS_READ;
1613 LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1614 if (access.is_raw()) {
1615 _barrier_set->BarrierSetC1::load_at(access, result);
1616 } else {
1617 _barrier_set->load_at(access, result);
1618 }
1619}
1620
1621void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1622 LIR_Opr addr, LIR_Opr result) {
1623 decorators |= ACCESS_READ;
1624 LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1625 access.set_resolved_addr(addr);
1626 if (access.is_raw()) {
1627 _barrier_set->BarrierSetC1::load(access, result);
1628 } else {
1629 _barrier_set->load(access, result);
1630 }
1631}
1632
1633void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
1634 LIRItem& base, LIR_Opr offset, LIR_Opr value,
1635 CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
1636 decorators |= ACCESS_WRITE;
1637 LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info);
1638 if (access.is_raw()) {
1639 _barrier_set->BarrierSetC1::store_at(access, value);
1640 } else {
1641 _barrier_set->store_at(access, value);
1642 }
1643}
1644
1645LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
1646 LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
1647 decorators |= ACCESS_READ;
1648 decorators |= ACCESS_WRITE;
1649 // Atomic operations are SEQ_CST by default
1650 decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1651 LIRAccess access(this, decorators, base, offset, type);
1652 if (access.is_raw()) {
1653 return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);
1654 } else {
1655 return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);
1656 }
1657}
1658
1659LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
1660 LIRItem& base, LIRItem& offset, LIRItem& value) {
1661 decorators |= ACCESS_READ;
1662 decorators |= ACCESS_WRITE;
1663 // Atomic operations are SEQ_CST by default
1664 decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1665 LIRAccess access(this, decorators, base, offset, type);
1666 if (access.is_raw()) {
1667 return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);
1668 } else {
1669 return _barrier_set->atomic_xchg_at(access, value);
1670 }
1671}
1672
1673LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
1674 LIRItem& base, LIRItem& offset, LIRItem& value) {
1675 decorators |= ACCESS_READ;
1676 decorators |= ACCESS_WRITE;
1677 // Atomic operations are SEQ_CST by default
1678 decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1679 LIRAccess access(this, decorators, base, offset, type);
1680 if (access.is_raw()) {
1681 return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
1682 } else {
1683 return _barrier_set->atomic_add_at(access, value);
1684 }
1685}
1686
1687LIR_Opr LIRGenerator::access_resolve(DecoratorSet decorators, LIR_Opr obj) {
1688 // Use stronger ACCESS_WRITE|ACCESS_READ by default.
1689 if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
1690 decorators |= ACCESS_READ | ACCESS_WRITE;
1691 }
1692
1693 return _barrier_set->resolve(this, decorators, obj);
1694}
1695
1696void LIRGenerator::do_LoadField(LoadField* x) {
1697 bool needs_patching = x->needs_patching();
1698 bool is_volatile = x->field()->is_volatile();
1699 BasicType field_type = x->field_type();
1700
1701 CodeEmitInfo* info = NULL;
1702 if (needs_patching) {
1703 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1704 info = state_for(x, x->state_before());
1705 } else if (x->needs_null_check()) {
1706 NullCheck* nc = x->explicit_null_check();
1707 if (nc == NULL) {
1708 info = state_for(x);
1709 } else {
1710 info = state_for(nc);
1711 }
1712 }
1713
1714 LIRItem object(x->obj(), this);
1715
1716 object.load_item();
1717
1718#ifndef PRODUCT
1719 if (PrintNotLoaded && needs_patching) {
1720 tty->print_cr(" ###class not loaded at load_%s bci %d",
1721 x->is_static() ? "static" : "field", x->printable_bci());
1722 }
1723#endif
1724
1725 bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1726 if (x->needs_null_check() &&
1727 (needs_patching ||
1728 MacroAssembler::needs_explicit_null_check(x->offset()) ||
1729 stress_deopt)) {
1730 LIR_Opr obj = object.result();
1731 if (stress_deopt) {
1732 obj = new_register(T_OBJECT);
1733 __ move(LIR_OprFact::oopConst(NULL), obj);
1734 }
1735 // Emit an explicit null check because the offset is too large.
1736 // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1737 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1738 __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1739 }
1740
1741 DecoratorSet decorators = IN_HEAP;
1742 if (is_volatile) {
1743 decorators |= MO_SEQ_CST;
1744 }
1745 if (needs_patching) {
1746 decorators |= C1_NEEDS_PATCHING;
1747 }
1748
1749 LIR_Opr result = rlock_result(x, field_type);
1750 access_load_at(decorators, field_type,
1751 object, LIR_OprFact::intConst(x->offset()), result,
1752 info ? new CodeEmitInfo(info) : NULL, info);
1753}
1754
1755
1756//------------------------java.nio.Buffer.checkIndex------------------------
1757
1758// int java.nio.Buffer.checkIndex(int)
1759void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1760 // NOTE: by the time we are in checkIndex() we are guaranteed that
1761 // the buffer is non-null (because checkIndex is package-private and
1762 // only called from within other methods in the buffer).
1763 assert(x->number_of_arguments() == 2, "wrong type");
1764 LIRItem buf (x->argument_at(0), this);
1765 LIRItem index(x->argument_at(1), this);
1766 buf.load_item();
1767 index.load_item();
1768
1769 LIR_Opr result = rlock_result(x);
1770 if (GenerateRangeChecks) {
1771 CodeEmitInfo* info = state_for(x);
1772 CodeStub* stub = new RangeCheckStub(info, index.result());
1773 LIR_Opr buf_obj = access_resolve(IS_NOT_NULL | ACCESS_READ, buf.result());
1774 if (index.result()->is_constant()) {
1775 cmp_mem_int(lir_cond_belowEqual, buf_obj, java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1776 __ branch(lir_cond_belowEqual, T_INT, stub);
1777 } else {
1778 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf_obj,
1779 java_nio_Buffer::limit_offset(), T_INT, info);
1780 __ branch(lir_cond_aboveEqual, T_INT, stub);
1781 }
1782 __ move(index.result(), result);
1783 } else {
1784 // Just load the index into the result register
1785 __ move(index.result(), result);
1786 }
1787}
1788
1789
1790//------------------------array access--------------------------------------
1791
1792
1793void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1794 LIRItem array(x->array(), this);
1795 array.load_item();
1796 LIR_Opr reg = rlock_result(x);
1797
1798 CodeEmitInfo* info = NULL;
1799 if (x->needs_null_check()) {
1800 NullCheck* nc = x->explicit_null_check();
1801 if (nc == NULL) {
1802 info = state_for(x);
1803 } else {
1804 info = state_for(nc);
1805 }
1806 if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
1807 LIR_Opr obj = new_register(T_OBJECT);
1808 __ move(LIR_OprFact::oopConst(NULL), obj);
1809 __ null_check(obj, new CodeEmitInfo(info));
1810 }
1811 }
1812 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1813}
1814
1815
1816void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1817 bool use_length = x->length() != NULL;
1818 LIRItem array(x->array(), this);
1819 LIRItem index(x->index(), this);
1820 LIRItem length(this);
1821 bool needs_range_check = x->compute_needs_range_check();
1822
1823 if (use_length && needs_range_check) {
1824 length.set_instruction(x->length());
1825 length.load_item();
1826 }
1827
1828 array.load_item();
1829 if (index.is_constant() && can_inline_as_constant(x->index())) {
1830 // let it be a constant
1831 index.dont_load_item();
1832 } else {
1833 index.load_item();
1834 }
1835
1836 CodeEmitInfo* range_check_info = state_for(x);
1837 CodeEmitInfo* null_check_info = NULL;
1838 if (x->needs_null_check()) {
1839 NullCheck* nc = x->explicit_null_check();
1840 if (nc != NULL) {
1841 null_check_info = state_for(nc);
1842 } else {
1843 null_check_info = range_check_info;
1844 }
1845 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1846 LIR_Opr obj = new_register(T_OBJECT);
1847 __ move(LIR_OprFact::oopConst(NULL), obj);
1848 __ null_check(obj, new CodeEmitInfo(null_check_info));
1849 }
1850 }
1851
1852 if (GenerateRangeChecks && needs_range_check) {
1853 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1854 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result(), array.result()));
1855 } else if (use_length) {
1856 // TODO: use a (modified) version of array_range_check that does not require a
1857 // constant length to be loaded to a register
1858 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1859 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1860 } else {
1861 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1862 // The range check performs the null check, so clear it out for the load
1863 null_check_info = NULL;
1864 }
1865 }
1866
1867 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1868
1869 LIR_Opr result = rlock_result(x, x->elt_type());
1870 access_load_at(decorators, x->elt_type(),
1871 array, index.result(), result,
1872 NULL, null_check_info);
1873}
1874
1875
1876void LIRGenerator::do_NullCheck(NullCheck* x) {
1877 if (x->can_trap()) {
1878 LIRItem value(x->obj(), this);
1879 value.load_item();
1880 CodeEmitInfo* info = state_for(x);
1881 __ null_check(value.result(), info);
1882 }
1883}
1884
1885
1886void LIRGenerator::do_TypeCast(TypeCast* x) {
1887 LIRItem value(x->obj(), this);
1888 value.load_item();
1889 // the result is the same as from the node we are casting
1890 set_result(x, value.result());
1891}
1892
1893
1894void LIRGenerator::do_Throw(Throw* x) {
1895 LIRItem exception(x->exception(), this);
1896 exception.load_item();
1897 set_no_result(x);
1898 LIR_Opr exception_opr = exception.result();
1899 CodeEmitInfo* info = state_for(x, x->state());
1900
1901#ifndef PRODUCT
1902 if (PrintC1Statistics) {
1903 increment_counter(Runtime1::throw_count_address(), T_INT);
1904 }
1905#endif
1906
1907 // check if the instruction has an xhandler in any of the nested scopes
1908 bool unwind = false;
1909 if (info->exception_handlers()->length() == 0) {
1910 // this throw is not inside an xhandler
1911 unwind = true;
1912 } else {
1913 // get some idea of the throw type
1914 bool type_is_exact = true;
1915 ciType* throw_type = x->exception()->exact_type();
1916 if (throw_type == NULL) {
1917 type_is_exact = false;
1918 throw_type = x->exception()->declared_type();
1919 }
1920 if (throw_type != NULL && throw_type->is_instance_klass()) {
1921 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
1922 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
1923 }
1924 }
1925
1926 // do null check before moving exception oop into fixed register
1927 // to avoid a fixed interval with an oop during the null check.
1928 // Use a copy of the CodeEmitInfo because debug information is
1929 // different for null_check and throw.
1930 if (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL) {
1931 // if the exception object wasn't created using new then it might be null.
1932 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
1933 }
1934
1935 if (compilation()->env()->jvmti_can_post_on_exceptions()) {
1936 // we need to go through the exception lookup path to get JVMTI
1937 // notification done
1938 unwind = false;
1939 }
1940
1941 // move exception oop into fixed register
1942 __ move(exception_opr, exceptionOopOpr());
1943
1944 if (unwind) {
1945 __ unwind_exception(exceptionOopOpr());
1946 } else {
1947 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
1948 }
1949}
1950
1951
1952void LIRGenerator::do_RoundFP(RoundFP* x) {
1953 LIRItem input(x->input(), this);
1954 input.load_item();
1955 LIR_Opr input_opr = input.result();
1956 assert(input_opr->is_register(), "why round if value is not in a register?");
1957 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
1958 if (input_opr->is_single_fpu()) {
1959 set_result(x, round_item(input_opr)); // This code path not currently taken
1960 } else {
1961 LIR_Opr result = new_register(T_DOUBLE);
1962 set_vreg_flag(result, must_start_in_memory);
1963 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
1964 set_result(x, result);
1965 }
1966}
1967
1968// Here UnsafeGetRaw may have x->base() and x->index() be int or long
1969// on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
1970void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
1971 LIRItem base(x->base(), this);
1972 LIRItem idx(this);
1973
1974 base.load_item();
1975 if (x->has_index()) {
1976 idx.set_instruction(x->index());
1977 idx.load_nonconstant();
1978 }
1979
1980 LIR_Opr reg = rlock_result(x, x->basic_type());
1981
1982 int log2_scale = 0;
1983 if (x->has_index()) {
1984 log2_scale = x->log2_scale();
1985 }
1986
1987 assert(!x->has_index() || idx.value() == x->index(), "should match");
1988
1989 LIR_Opr base_op = base.result();
1990 LIR_Opr index_op = idx.result();
1991#ifndef _LP64
1992 if (base_op->type() == T_LONG) {
1993 base_op = new_register(T_INT);
1994 __ convert(Bytecodes::_l2i, base.result(), base_op);
1995 }
1996 if (x->has_index()) {
1997 if (index_op->type() == T_LONG) {
1998 LIR_Opr long_index_op = index_op;
1999 if (index_op->is_constant()) {
2000 long_index_op = new_register(T_LONG);
2001 __ move(index_op, long_index_op);
2002 }
2003 index_op = new_register(T_INT);
2004 __ convert(Bytecodes::_l2i, long_index_op, index_op);
2005 } else {
2006 assert(x->index()->type()->tag() == intTag, "must be");
2007 }
2008 }
2009 // At this point base and index should be all ints.
2010 assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2011 assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
2012#else
2013 if (x->has_index()) {
2014 if (index_op->type() == T_INT) {
2015 if (!index_op->is_constant()) {
2016 index_op = new_register(T_LONG);
2017 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2018 }
2019 } else {
2020 assert(index_op->type() == T_LONG, "must be");
2021 if (index_op->is_constant()) {
2022 index_op = new_register(T_LONG);
2023 __ move(idx.result(), index_op);
2024 }
2025 }
2026 }
2027 // At this point base is a long non-constant
2028 // Index is a long register or a int constant.
2029 // We allow the constant to stay an int because that would allow us a more compact encoding by
2030 // embedding an immediate offset in the address expression. If we have a long constant, we have to
2031 // move it into a register first.
2032 assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2033 assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2034 (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2035#endif
2036
2037 BasicType dst_type = x->basic_type();
2038
2039 LIR_Address* addr;
2040 if (index_op->is_constant()) {
2041 assert(log2_scale == 0, "must not have a scale");
2042 assert(index_op->type() == T_INT, "only int constants supported");
2043 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2044 } else {
2045#ifdef X86
2046 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2047#elif defined(GENERATE_ADDRESS_IS_PREFERRED)
2048 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2049#else
2050 if (index_op->is_illegal() || log2_scale == 0) {
2051 addr = new LIR_Address(base_op, index_op, dst_type);
2052 } else {
2053 LIR_Opr tmp = new_pointer_register();
2054 __ shift_left(index_op, log2_scale, tmp);
2055 addr = new LIR_Address(base_op, tmp, dst_type);
2056 }
2057#endif
2058 }
2059
2060 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2061 __ unaligned_move(addr, reg);
2062 } else {
2063 if (dst_type == T_OBJECT && x->is_wide()) {
2064 __ move_wide(addr, reg);
2065 } else {
2066 __ move(addr, reg);
2067 }
2068 }
2069}
2070
2071
2072void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2073 int log2_scale = 0;
2074 BasicType type = x->basic_type();
2075
2076 if (x->has_index()) {
2077 log2_scale = x->log2_scale();
2078 }
2079
2080 LIRItem base(x->base(), this);
2081 LIRItem value(x->value(), this);
2082 LIRItem idx(this);
2083
2084 base.load_item();
2085 if (x->has_index()) {
2086 idx.set_instruction(x->index());
2087 idx.load_item();
2088 }
2089
2090 if (type == T_BYTE || type == T_BOOLEAN) {
2091 value.load_byte_item();
2092 } else {
2093 value.load_item();
2094 }
2095
2096 set_no_result(x);
2097
2098 LIR_Opr base_op = base.result();
2099 LIR_Opr index_op = idx.result();
2100
2101#ifdef GENERATE_ADDRESS_IS_PREFERRED
2102 LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type());
2103#else
2104#ifndef _LP64
2105 if (base_op->type() == T_LONG) {
2106 base_op = new_register(T_INT);
2107 __ convert(Bytecodes::_l2i, base.result(), base_op);
2108 }
2109 if (x->has_index()) {
2110 if (index_op->type() == T_LONG) {
2111 index_op = new_register(T_INT);
2112 __ convert(Bytecodes::_l2i, idx.result(), index_op);
2113 }
2114 }
2115 // At this point base and index should be all ints and not constants
2116 assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2117 assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2118#else
2119 if (x->has_index()) {
2120 if (index_op->type() == T_INT) {
2121 index_op = new_register(T_LONG);
2122 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2123 }
2124 }
2125 // At this point base and index are long and non-constant
2126 assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2127 assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2128#endif
2129
2130 if (log2_scale != 0) {
2131 // temporary fix (platform dependent code without shift on Intel would be better)
2132 // TODO: ARM also allows embedded shift in the address
2133 LIR_Opr tmp = new_pointer_register();
2134 if (TwoOperandLIRForm) {
2135 __ move(index_op, tmp);
2136 index_op = tmp;
2137 }
2138 __ shift_left(index_op, log2_scale, tmp);
2139 if (!TwoOperandLIRForm) {
2140 index_op = tmp;
2141 }
2142 }
2143
2144 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2145#endif // !GENERATE_ADDRESS_IS_PREFERRED
2146 __ move(value.result(), addr);
2147}
2148
2149
2150void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2151 BasicType type = x->basic_type();
2152 LIRItem src(x->object(), this);
2153 LIRItem off(x->offset(), this);
2154
2155 off.load_item();
2156 src.load_item();
2157
2158 DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2159
2160 if (x->is_volatile()) {
2161 decorators |= MO_SEQ_CST;
2162 }
2163 if (type == T_BOOLEAN) {
2164 decorators |= C1_MASK_BOOLEAN;
2165 }
2166 if (type == T_ARRAY || type == T_OBJECT) {
2167 decorators |= ON_UNKNOWN_OOP_REF;
2168 }
2169
2170 LIR_Opr result = rlock_result(x, type);
2171 access_load_at(decorators, type,
2172 src, off.result(), result);
2173}
2174
2175
2176void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2177 BasicType type = x->basic_type();
2178 LIRItem src(x->object(), this);
2179 LIRItem off(x->offset(), this);
2180 LIRItem data(x->value(), this);
2181
2182 src.load_item();
2183 if (type == T_BOOLEAN || type == T_BYTE) {
2184 data.load_byte_item();
2185 } else {
2186 data.load_item();
2187 }
2188 off.load_item();
2189
2190 set_no_result(x);
2191
2192 DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2193 if (type == T_ARRAY || type == T_OBJECT) {
2194 decorators |= ON_UNKNOWN_OOP_REF;
2195 }
2196 if (x->is_volatile()) {
2197 decorators |= MO_SEQ_CST;
2198 }
2199 access_store_at(decorators, type, src, off.result(), data.result());
2200}
2201
2202void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
2203 BasicType type = x->basic_type();
2204 LIRItem src(x->object(), this);
2205 LIRItem off(x->offset(), this);
2206 LIRItem value(x->value(), this);
2207
2208 DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS | MO_SEQ_CST;
2209
2210 if (type == T_ARRAY || type == T_OBJECT) {
2211 decorators |= ON_UNKNOWN_OOP_REF;
2212 }
2213
2214 LIR_Opr result;
2215 if (x->is_add()) {
2216 result = access_atomic_add_at(decorators, type, src, off, value);
2217 } else {
2218 result = access_atomic_xchg_at(decorators, type, src, off, value);
2219 }
2220 set_result(x, result);
2221}
2222
2223void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2224 int lng = x->length();
2225
2226 for (int i = 0; i < lng; i++) {
2227 SwitchRange* one_range = x->at(i);
2228 int low_key = one_range->low_key();
2229 int high_key = one_range->high_key();
2230 BlockBegin* dest = one_range->sux();
2231 if (low_key == high_key) {
2232 __ cmp(lir_cond_equal, value, low_key);
2233 __ branch(lir_cond_equal, T_INT, dest);
2234 } else if (high_key - low_key == 1) {
2235 __ cmp(lir_cond_equal, value, low_key);
2236 __ branch(lir_cond_equal, T_INT, dest);
2237 __ cmp(lir_cond_equal, value, high_key);
2238 __ branch(lir_cond_equal, T_INT, dest);
2239 } else {
2240 LabelObj* L = new LabelObj();
2241 __ cmp(lir_cond_less, value, low_key);
2242 __ branch(lir_cond_less, T_INT, L->label());
2243 __ cmp(lir_cond_lessEqual, value, high_key);
2244 __ branch(lir_cond_lessEqual, T_INT, dest);
2245 __ branch_destination(L->label());
2246 }
2247 }
2248 __ jump(default_sux);
2249}
2250
2251
2252SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2253 SwitchRangeList* res = new SwitchRangeList();
2254 int len = x->length();
2255 if (len > 0) {
2256 BlockBegin* sux = x->sux_at(0);
2257 int key = x->lo_key();
2258 BlockBegin* default_sux = x->default_sux();
2259 SwitchRange* range = new SwitchRange(key, sux);
2260 for (int i = 0; i < len; i++, key++) {
2261 BlockBegin* new_sux = x->sux_at(i);
2262 if (sux == new_sux) {
2263 // still in same range
2264 range->set_high_key(key);
2265 } else {
2266 // skip tests which explicitly dispatch to the default
2267 if (sux != default_sux) {
2268 res->append(range);
2269 }
2270 range = new SwitchRange(key, new_sux);
2271 }
2272 sux = new_sux;
2273 }
2274 if (res->length() == 0 || res->last() != range) res->append(range);
2275 }
2276 return res;
2277}
2278
2279
2280// we expect the keys to be sorted by increasing value
2281SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2282 SwitchRangeList* res = new SwitchRangeList();
2283 int len = x->length();
2284 if (len > 0) {
2285 BlockBegin* default_sux = x->default_sux();
2286 int key = x->key_at(0);
2287 BlockBegin* sux = x->sux_at(0);
2288 SwitchRange* range = new SwitchRange(key, sux);
2289 for (int i = 1; i < len; i++) {
2290 int new_key = x->key_at(i);
2291 BlockBegin* new_sux = x->sux_at(i);
2292 if (key+1 == new_key && sux == new_sux) {
2293 // still in same range
2294 range->set_high_key(new_key);
2295 } else {
2296 // skip tests which explicitly dispatch to the default
2297 if (range->sux() != default_sux) {
2298 res->append(range);
2299 }
2300 range = new SwitchRange(new_key, new_sux);
2301 }
2302 key = new_key;
2303 sux = new_sux;
2304 }
2305 if (res->length() == 0 || res->last() != range) res->append(range);
2306 }
2307 return res;
2308}
2309
2310
2311void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2312 LIRItem tag(x->tag(), this);
2313 tag.load_item();
2314 set_no_result(x);
2315
2316 if (x->is_safepoint()) {
2317 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2318 }
2319
2320 // move values into phi locations
2321 move_to_phi(x->state());
2322
2323 int lo_key = x->lo_key();
2324 int len = x->length();
2325 assert(lo_key <= (lo_key + (len - 1)), "integer overflow");
2326 LIR_Opr value = tag.result();
2327
2328 if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2329 ciMethod* method = x->state()->scope()->method();
2330 ciMethodData* md = method->method_data_or_null();
2331 assert(md != NULL, "Sanity");
2332 ciProfileData* data = md->bci_to_data(x->state()->bci());
2333 assert(data != NULL, "must have profiling data");
2334 assert(data->is_MultiBranchData(), "bad profile data?");
2335 int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2336 LIR_Opr md_reg = new_register(T_METADATA);
2337 __ metadata2reg(md->constant_encoding(), md_reg);
2338 LIR_Opr data_offset_reg = new_pointer_register();
2339 LIR_Opr tmp_reg = new_pointer_register();
2340
2341 __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2342 for (int i = 0; i < len; i++) {
2343 int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2344 __ cmp(lir_cond_equal, value, i + lo_key);
2345 __ move(data_offset_reg, tmp_reg);
2346 __ cmove(lir_cond_equal,
2347 LIR_OprFact::intptrConst(count_offset),
2348 tmp_reg,
2349 data_offset_reg, T_INT);
2350 }
2351
2352 LIR_Opr data_reg = new_pointer_register();
2353 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2354 __ move(data_addr, data_reg);
2355 __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2356 __ move(data_reg, data_addr);
2357 }
2358
2359 if (UseTableRanges) {
2360 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2361 } else {
2362 for (int i = 0; i < len; i++) {
2363 __ cmp(lir_cond_equal, value, i + lo_key);
2364 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2365 }
2366 __ jump(x->default_sux());
2367 }
2368}
2369
2370
2371void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2372 LIRItem tag(x->tag(), this);
2373 tag.load_item();
2374 set_no_result(x);
2375
2376 if (x->is_safepoint()) {
2377 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2378 }
2379
2380 // move values into phi locations
2381 move_to_phi(x->state());
2382
2383 LIR_Opr value = tag.result();
2384 int len = x->length();
2385
2386 if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2387 ciMethod* method = x->state()->scope()->method();
2388 ciMethodData* md = method->method_data_or_null();
2389 assert(md != NULL, "Sanity");
2390 ciProfileData* data = md->bci_to_data(x->state()->bci());
2391 assert(data != NULL, "must have profiling data");
2392 assert(data->is_MultiBranchData(), "bad profile data?");
2393 int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2394 LIR_Opr md_reg = new_register(T_METADATA);
2395 __ metadata2reg(md->constant_encoding(), md_reg);
2396 LIR_Opr data_offset_reg = new_pointer_register();
2397 LIR_Opr tmp_reg = new_pointer_register();
2398
2399 __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2400 for (int i = 0; i < len; i++) {
2401 int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2402 __ cmp(lir_cond_equal, value, x->key_at(i));
2403 __ move(data_offset_reg, tmp_reg);
2404 __ cmove(lir_cond_equal,
2405 LIR_OprFact::intptrConst(count_offset),
2406 tmp_reg,
2407 data_offset_reg, T_INT);
2408 }
2409
2410 LIR_Opr data_reg = new_pointer_register();
2411 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2412 __ move(data_addr, data_reg);
2413 __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2414 __ move(data_reg, data_addr);
2415 }
2416
2417 if (UseTableRanges) {
2418 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2419 } else {
2420 int len = x->length();
2421 for (int i = 0; i < len; i++) {
2422 __ cmp(lir_cond_equal, value, x->key_at(i));
2423 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2424 }
2425 __ jump(x->default_sux());
2426 }
2427}
2428
2429
2430void LIRGenerator::do_Goto(Goto* x) {
2431 set_no_result(x);
2432
2433 if (block()->next()->as_OsrEntry()) {
2434 // need to free up storage used for OSR entry point
2435 LIR_Opr osrBuffer = block()->next()->operand();
2436 BasicTypeList signature;
2437 signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2438 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2439 __ move(osrBuffer, cc->args()->at(0));
2440 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2441 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2442 }
2443
2444 if (x->is_safepoint()) {
2445 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2446
2447 // increment backedge counter if needed
2448 CodeEmitInfo* info = state_for(x, state);
2449 increment_backedge_counter(info, x->profiled_bci());
2450 CodeEmitInfo* safepoint_info = state_for(x, state);
2451 __ safepoint(safepoint_poll_register(), safepoint_info);
2452 }
2453
2454 // Gotos can be folded Ifs, handle this case.
2455 if (x->should_profile()) {
2456 ciMethod* method = x->profiled_method();
2457 assert(method != NULL, "method should be set if branch is profiled");
2458 ciMethodData* md = method->method_data_or_null();
2459 assert(md != NULL, "Sanity");
2460 ciProfileData* data = md->bci_to_data(x->profiled_bci());
2461 assert(data != NULL, "must have profiling data");
2462 int offset;
2463 if (x->direction() == Goto::taken) {
2464 assert(data->is_BranchData(), "need BranchData for two-way branches");
2465 offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2466 } else if (x->direction() == Goto::not_taken) {
2467 assert(data->is_BranchData(), "need BranchData for two-way branches");
2468 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2469 } else {
2470 assert(data->is_JumpData(), "need JumpData for branches");
2471 offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2472 }
2473 LIR_Opr md_reg = new_register(T_METADATA);
2474 __ metadata2reg(md->constant_encoding(), md_reg);
2475
2476 increment_counter(new LIR_Address(md_reg, offset,
2477 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2478 }
2479
2480 // emit phi-instruction move after safepoint since this simplifies
2481 // describing the state as the safepoint.
2482 move_to_phi(x->state());
2483
2484 __ jump(x->default_sux());
2485}
2486
2487/**
2488 * Emit profiling code if needed for arguments, parameters, return value types
2489 *
2490 * @param md MDO the code will update at runtime
2491 * @param md_base_offset common offset in the MDO for this profile and subsequent ones
2492 * @param md_offset offset in the MDO (on top of md_base_offset) for this profile
2493 * @param profiled_k current profile
2494 * @param obj IR node for the object to be profiled
2495 * @param mdp register to hold the pointer inside the MDO (md + md_base_offset).
2496 * Set once we find an update to make and use for next ones.
2497 * @param not_null true if we know obj cannot be null
2498 * @param signature_at_call_k signature at call for obj
2499 * @param callee_signature_k signature of callee for obj
2500 * at call and callee signatures differ at method handle call
2501 * @return the only klass we know will ever be seen at this profile point
2502 */
2503ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2504 Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2505 ciKlass* callee_signature_k) {
2506 ciKlass* result = NULL;
2507 bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2508 bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2509 // known not to be null or null bit already set and already set to
2510 // unknown: nothing we can do to improve profiling
2511 if (!do_null && !do_update) {
2512 return result;
2513 }
2514
2515 ciKlass* exact_klass = NULL;
2516 Compilation* comp = Compilation::current();
2517 if (do_update) {
2518 // try to find exact type, using CHA if possible, so that loading
2519 // the klass from the object can be avoided
2520 ciType* type = obj->exact_type();
2521 if (type == NULL) {
2522 type = obj->declared_type();
2523 type = comp->cha_exact_type(type);
2524 }
2525 assert(type == NULL || type->is_klass(), "type should be class");
2526 exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
2527
2528 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2529 }
2530
2531 if (!do_null && !do_update) {
2532 return result;
2533 }
2534
2535 ciKlass* exact_signature_k = NULL;
2536 if (do_update) {
2537 // Is the type from the signature exact (the only one possible)?
2538 exact_signature_k = signature_at_call_k->exact_klass();
2539 if (exact_signature_k == NULL) {
2540 exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2541 } else {
2542 result = exact_signature_k;
2543 // Known statically. No need to emit any code: prevent
2544 // LIR_Assembler::emit_profile_type() from emitting useless code
2545 profiled_k = ciTypeEntries::with_status(result, profiled_k);
2546 }
2547 // exact_klass and exact_signature_k can be both non NULL but
2548 // different if exact_klass is loaded after the ciObject for
2549 // exact_signature_k is created.
2550 if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
2551 // sometimes the type of the signature is better than the best type
2552 // the compiler has
2553 exact_klass = exact_signature_k;
2554 }
2555 if (callee_signature_k != NULL &&
2556 callee_signature_k != signature_at_call_k) {
2557 ciKlass* improved_klass = callee_signature_k->exact_klass();
2558 if (improved_klass == NULL) {
2559 improved_klass = comp->cha_exact_type(callee_signature_k);
2560 }
2561 if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {
2562 exact_klass = exact_signature_k;
2563 }
2564 }
2565 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2566 }
2567
2568 if (!do_null && !do_update) {
2569 return result;
2570 }
2571
2572 if (mdp == LIR_OprFact::illegalOpr) {
2573 mdp = new_register(T_METADATA);
2574 __ metadata2reg(md->constant_encoding(), mdp);
2575 if (md_base_offset != 0) {
2576 LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2577 mdp = new_pointer_register();
2578 __ leal(LIR_OprFact::address(base_type_address), mdp);
2579 }
2580 }
2581 LIRItem value(obj, this);
2582 value.load_item();
2583 __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2584 value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
2585 return result;
2586}
2587
2588// profile parameters on entry to the root of the compilation
2589void LIRGenerator::profile_parameters(Base* x) {
2590 if (compilation()->profile_parameters()) {
2591 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2592 ciMethodData* md = scope()->method()->method_data_or_null();
2593 assert(md != NULL, "Sanity");
2594
2595 if (md->parameters_type_data() != NULL) {
2596 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2597 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
2598 LIR_Opr mdp = LIR_OprFact::illegalOpr;
2599 for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2600 LIR_Opr src = args->at(i);
2601 assert(!src->is_illegal(), "check");
2602 BasicType t = src->type();
2603 if (t == T_OBJECT || t == T_ARRAY) {
2604 intptr_t profiled_k = parameters->type(j);
2605 Local* local = x->state()->local_at(java_index)->as_Local();
2606 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2607 in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2608 profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
2609 // If the profile is known statically set it once for all and do not emit any code
2610 if (exact != NULL) {
2611 md->set_parameter_type(j, exact);
2612 }
2613 j++;
2614 }
2615 java_index += type2size[t];
2616 }
2617 }
2618 }
2619}
2620
2621void LIRGenerator::do_Base(Base* x) {
2622 __ std_entry(LIR_OprFact::illegalOpr);
2623 // Emit moves from physical registers / stack slots to virtual registers
2624 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2625 IRScope* irScope = compilation()->hir()->top_scope();
2626 int java_index = 0;
2627 for (int i = 0; i < args->length(); i++) {
2628 LIR_Opr src = args->at(i);
2629 assert(!src->is_illegal(), "check");
2630 BasicType t = src->type();
2631
2632 // Types which are smaller than int are passed as int, so
2633 // correct the type which passed.
2634 switch (t) {
2635 case T_BYTE:
2636 case T_BOOLEAN:
2637 case T_SHORT:
2638 case T_CHAR:
2639 t = T_INT;
2640 break;
2641 default:
2642 break;
2643 }
2644
2645 LIR_Opr dest = new_register(t);
2646 __ move(src, dest);
2647
2648 // Assign new location to Local instruction for this local
2649 Local* local = x->state()->local_at(java_index)->as_Local();
2650 assert(local != NULL, "Locals for incoming arguments must have been created");
2651#ifndef __SOFTFP__
2652 // The java calling convention passes double as long and float as int.
2653 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2654#endif // __SOFTFP__
2655 local->set_operand(dest);
2656 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2657 java_index += type2size[t];
2658 }
2659
2660 if (compilation()->env()->dtrace_method_probes()) {
2661 BasicTypeList signature;
2662 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
2663 signature.append(T_METADATA); // Method*
2664 LIR_OprList* args = new LIR_OprList();
2665 args->append(getThreadPointer());
2666 LIR_Opr meth = new_register(T_METADATA);
2667 __ metadata2reg(method()->constant_encoding(), meth);
2668 args->append(meth);
2669 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2670 }
2671
2672 if (method()->is_synchronized()) {
2673 LIR_Opr obj;
2674 if (method()->is_static()) {
2675 obj = new_register(T_OBJECT);
2676 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2677 } else {
2678 Local* receiver = x->state()->local_at(0)->as_Local();
2679 assert(receiver != NULL, "must already exist");
2680 obj = receiver->operand();
2681 }
2682 assert(obj->is_valid(), "must be valid");
2683
2684 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2685 LIR_Opr lock = syncLockOpr();
2686 __ load_stack_address_monitor(0, lock);
2687
2688 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2689 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2690
2691 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2692 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2693 }
2694 }
2695 if (compilation()->age_code()) {
2696 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);
2697 decrement_age(info);
2698 }
2699 // increment invocation counters if needed
2700 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2701 profile_parameters(x);
2702 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2703 increment_invocation_counter(info);
2704 }
2705
2706 // all blocks with a successor must end with an unconditional jump
2707 // to the successor even if they are consecutive
2708 __ jump(x->default_sux());
2709}
2710
2711
2712void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2713 // construct our frame and model the production of incoming pointer
2714 // to the OSR buffer.
2715 __ osr_entry(LIR_Assembler::osrBufferPointer());
2716 LIR_Opr result = rlock_result(x);
2717 __ move(LIR_Assembler::osrBufferPointer(), result);
2718}
2719
2720
2721void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2722 assert(args->length() == arg_list->length(),
2723 "args=%d, arg_list=%d", args->length(), arg_list->length());
2724 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2725 LIRItem* param = args->at(i);
2726 LIR_Opr loc = arg_list->at(i);
2727 if (loc->is_register()) {
2728 param->load_item_force(loc);
2729 } else {
2730 LIR_Address* addr = loc->as_address_ptr();
2731 param->load_for_store(addr->type());
2732 if (addr->type() == T_OBJECT) {
2733 __ move_wide(param->result(), addr);
2734 } else
2735 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2736 __ unaligned_move(param->result(), addr);
2737 } else {
2738 __ move(param->result(), addr);
2739 }
2740 }
2741 }
2742
2743 if (x->has_receiver()) {
2744 LIRItem* receiver = args->at(0);
2745 LIR_Opr loc = arg_list->at(0);
2746 if (loc->is_register()) {
2747 receiver->load_item_force(loc);
2748 } else {
2749 assert(loc->is_address(), "just checking");
2750 receiver->load_for_store(T_OBJECT);
2751 __ move_wide(receiver->result(), loc->as_address_ptr());
2752 }
2753 }
2754}
2755
2756
2757// Visits all arguments, returns appropriate items without loading them
2758LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2759 LIRItemList* argument_items = new LIRItemList();
2760 if (x->has_receiver()) {
2761 LIRItem* receiver = new LIRItem(x->receiver(), this);
2762 argument_items->append(receiver);
2763 }
2764 for (int i = 0; i < x->number_of_arguments(); i++) {
2765 LIRItem* param = new LIRItem(x->argument_at(i), this);
2766 argument_items->append(param);
2767 }
2768 return argument_items;
2769}
2770
2771
2772// The invoke with receiver has following phases:
2773// a) traverse and load/lock receiver;
2774// b) traverse all arguments -> item-array (invoke_visit_argument)
2775// c) push receiver on stack
2776// d) load each of the items and push on stack
2777// e) unlock receiver
2778// f) move receiver into receiver-register %o0
2779// g) lock result registers and emit call operation
2780//
2781// Before issuing a call, we must spill-save all values on stack
2782// that are in caller-save register. "spill-save" moves those registers
2783// either in a free callee-save register or spills them if no free
2784// callee save register is available.
2785//
2786// The problem is where to invoke spill-save.
2787// - if invoked between e) and f), we may lock callee save
2788// register in "spill-save" that destroys the receiver register
2789// before f) is executed
2790// - if we rearrange f) to be earlier (by loading %o0) it
2791// may destroy a value on the stack that is currently in %o0
2792// and is waiting to be spilled
2793// - if we keep the receiver locked while doing spill-save,
2794// we cannot spill it as it is spill-locked
2795//
2796void LIRGenerator::do_Invoke(Invoke* x) {
2797 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2798
2799 LIR_OprList* arg_list = cc->args();
2800 LIRItemList* args = invoke_visit_arguments(x);
2801 LIR_Opr receiver = LIR_OprFact::illegalOpr;
2802
2803 // setup result register
2804 LIR_Opr result_register = LIR_OprFact::illegalOpr;
2805 if (x->type() != voidType) {
2806 result_register = result_register_for(x->type());
2807 }
2808
2809 CodeEmitInfo* info = state_for(x, x->state());
2810
2811 invoke_load_arguments(x, args, arg_list);
2812
2813 if (x->has_receiver()) {
2814 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2815 receiver = args->at(0)->result();
2816 }
2817
2818 // emit invoke code
2819 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2820
2821 // JSR 292
2822 // Preserve the SP over MethodHandle call sites, if needed.
2823 ciMethod* target = x->target();
2824 bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2825 target->is_method_handle_intrinsic() ||
2826 target->is_compiled_lambda_form());
2827 if (is_method_handle_invoke) {
2828 info->set_is_method_handle_invoke(true);
2829 if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2830 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2831 }
2832 }
2833
2834 switch (x->code()) {
2835 case Bytecodes::_invokestatic:
2836 __ call_static(target, result_register,
2837 SharedRuntime::get_resolve_static_call_stub(),
2838 arg_list, info);
2839 break;
2840 case Bytecodes::_invokespecial:
2841 case Bytecodes::_invokevirtual:
2842 case Bytecodes::_invokeinterface:
2843 // for loaded and final (method or class) target we still produce an inline cache,
2844 // in order to be able to call mixed mode
2845 if (x->code() == Bytecodes::_invokespecial || x->target_is_final()) {
2846 __ call_opt_virtual(target, receiver, result_register,
2847 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2848 arg_list, info);
2849 } else if (x->vtable_index() < 0) {
2850 __ call_icvirtual(target, receiver, result_register,
2851 SharedRuntime::get_resolve_virtual_call_stub(),
2852 arg_list, info);
2853 } else {
2854 int entry_offset = in_bytes(Klass::vtable_start_offset()) + x->vtable_index() * vtableEntry::size_in_bytes();
2855 int vtable_offset = entry_offset + vtableEntry::method_offset_in_bytes();
2856 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2857 }
2858 break;
2859 case Bytecodes::_invokedynamic: {
2860 __ call_dynamic(target, receiver, result_register,
2861 SharedRuntime::get_resolve_static_call_stub(),
2862 arg_list, info);
2863 break;
2864 }
2865 default:
2866 fatal("unexpected bytecode: %s", Bytecodes::name(x->code()));
2867 break;
2868 }
2869
2870 // JSR 292
2871 // Restore the SP after MethodHandle call sites, if needed.
2872 if (is_method_handle_invoke
2873 && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2874 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2875 }
2876
2877 if (x->type()->is_float() || x->type()->is_double()) {
2878 // Force rounding of results from non-strictfp when in strictfp
2879 // scope (or when we don't know the strictness of the callee, to
2880 // be safe.)
2881 if (method()->is_strict()) {
2882 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2883 result_register = round_item(result_register);
2884 }
2885 }
2886 }
2887
2888 if (result_register->is_valid()) {
2889 LIR_Opr result = rlock_result(x);
2890 __ move(result_register, result);
2891 }
2892}
2893
2894
2895void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2896 assert(x->number_of_arguments() == 1, "wrong type");
2897 LIRItem value (x->argument_at(0), this);
2898 LIR_Opr reg = rlock_result(x);
2899 value.load_item();
2900 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
2901 __ move(tmp, reg);
2902}
2903
2904
2905
2906// Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2907void LIRGenerator::do_IfOp(IfOp* x) {
2908#ifdef ASSERT
2909 {
2910 ValueTag xtag = x->x()->type()->tag();
2911 ValueTag ttag = x->tval()->type()->tag();
2912 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2913 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2914 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2915 }
2916#endif
2917
2918 LIRItem left(x->x(), this);
2919 LIRItem right(x->y(), this);
2920 left.load_item();
2921 if (can_inline_as_constant(right.value())) {
2922 right.dont_load_item();
2923 } else {
2924 right.load_item();
2925 }
2926
2927 LIRItem t_val(x->tval(), this);
2928 LIRItem f_val(x->fval(), this);
2929 t_val.dont_load_item();
2930 f_val.dont_load_item();
2931 LIR_Opr reg = rlock_result(x);
2932
2933 __ cmp(lir_cond(x->cond()), left.result(), right.result());
2934 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
2935}
2936
2937#ifdef JFR_HAVE_INTRINSICS
2938void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
2939 CodeEmitInfo* info = state_for(x);
2940 CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
2941
2942 assert(info != NULL, "must have info");
2943 LIRItem arg(x->argument_at(0), this);
2944
2945 arg.load_item();
2946 LIR_Opr klass = new_register(T_METADATA);
2947 __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), klass, info);
2948 LIR_Opr id = new_register(T_LONG);
2949 ByteSize offset = KLASS_TRACE_ID_OFFSET;
2950 LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
2951
2952 __ move(trace_id_addr, id);
2953 __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
2954 __ store(id, trace_id_addr);
2955
2956#ifdef TRACE_ID_META_BITS
2957 __ logical_and(id, LIR_OprFact::longConst(~TRACE_ID_META_BITS), id);
2958#endif
2959#ifdef TRACE_ID_SHIFT
2960 __ unsigned_shift_right(id, TRACE_ID_SHIFT, id);
2961#endif
2962
2963 __ move(id, rlock_result(x));
2964}
2965
2966void LIRGenerator::do_getEventWriter(Intrinsic* x) {
2967 LabelObj* L_end = new LabelObj();
2968
2969 LIR_Address* jobj_addr = new LIR_Address(getThreadPointer(),
2970 in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR),
2971 T_OBJECT);
2972 LIR_Opr result = rlock_result(x);
2973 __ move_wide(jobj_addr, result);
2974 __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
2975 __ branch(lir_cond_equal, T_OBJECT, L_end->label());
2976
2977 LIR_Opr jobj = new_register(T_OBJECT);
2978 __ move(result, jobj);
2979 access_load(IN_NATIVE, T_OBJECT, LIR_OprFact::address(new LIR_Address(jobj, T_OBJECT)), result);
2980
2981 __ branch_destination(L_end->label());
2982}
2983
2984#endif
2985
2986
2987void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
2988 assert(x->number_of_arguments() == 0, "wrong type");
2989 // Enforce computation of _reserved_argument_area_size which is required on some platforms.
2990 BasicTypeList signature;
2991 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2992 LIR_Opr reg = result_register_for(x->type());
2993 __ call_runtime_leaf(routine, getThreadTemp(),
2994 reg, new LIR_OprList());
2995 LIR_Opr result = rlock_result(x);
2996 __ move(reg, result);
2997}
2998
2999
3000
3001void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3002 switch (x->id()) {
3003 case vmIntrinsics::_intBitsToFloat :
3004 case vmIntrinsics::_doubleToRawLongBits :
3005 case vmIntrinsics::_longBitsToDouble :
3006 case vmIntrinsics::_floatToRawIntBits : {
3007 do_FPIntrinsics(x);
3008 break;
3009 }
3010
3011#ifdef JFR_HAVE_INTRINSICS
3012 case vmIntrinsics::_getClassId:
3013 do_ClassIDIntrinsic(x);
3014 break;
3015 case vmIntrinsics::_getEventWriter:
3016 do_getEventWriter(x);
3017 break;
3018 case vmIntrinsics::_counterTime:
3019 do_RuntimeCall(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), x);
3020 break;
3021#endif
3022
3023 case vmIntrinsics::_currentTimeMillis:
3024 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), x);
3025 break;
3026
3027 case vmIntrinsics::_nanoTime:
3028 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), x);
3029 break;
3030
3031 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
3032 case vmIntrinsics::_isInstance: do_isInstance(x); break;
3033 case vmIntrinsics::_isPrimitive: do_isPrimitive(x); break;
3034 case vmIntrinsics::_getClass: do_getClass(x); break;
3035 case vmIntrinsics::_currentThread: do_currentThread(x); break;
3036
3037 case vmIntrinsics::_dlog: // fall through
3038 case vmIntrinsics::_dlog10: // fall through
3039 case vmIntrinsics::_dabs: // fall through
3040 case vmIntrinsics::_dsqrt: // fall through
3041 case vmIntrinsics::_dtan: // fall through
3042 case vmIntrinsics::_dsin : // fall through
3043 case vmIntrinsics::_dcos : // fall through
3044 case vmIntrinsics::_dexp : // fall through
3045 case vmIntrinsics::_dpow : do_MathIntrinsic(x); break;
3046 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
3047
3048 case vmIntrinsics::_fmaD: do_FmaIntrinsic(x); break;
3049 case vmIntrinsics::_fmaF: do_FmaIntrinsic(x); break;
3050
3051 // java.nio.Buffer.checkIndex
3052 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
3053
3054 case vmIntrinsics::_compareAndSetReference:
3055 do_CompareAndSwap(x, objectType);
3056 break;
3057 case vmIntrinsics::_compareAndSetInt:
3058 do_CompareAndSwap(x, intType);
3059 break;
3060 case vmIntrinsics::_compareAndSetLong:
3061 do_CompareAndSwap(x, longType);
3062 break;
3063
3064 case vmIntrinsics::_loadFence :
3065 __ membar_acquire();
3066 break;
3067 case vmIntrinsics::_storeFence:
3068 __ membar_release();
3069 break;
3070 case vmIntrinsics::_fullFence :
3071 __ membar();
3072 break;
3073 case vmIntrinsics::_onSpinWait:
3074 __ on_spin_wait();
3075 break;
3076 case vmIntrinsics::_Reference_get:
3077 do_Reference_get(x);
3078 break;
3079
3080 case vmIntrinsics::_updateCRC32:
3081 case vmIntrinsics::_updateBytesCRC32:
3082 case vmIntrinsics::_updateByteBufferCRC32:
3083 do_update_CRC32(x);
3084 break;
3085
3086 case vmIntrinsics::_updateBytesCRC32C:
3087 case vmIntrinsics::_updateDirectByteBufferCRC32C:
3088 do_update_CRC32C(x);
3089 break;
3090
3091 case vmIntrinsics::_vectorizedMismatch:
3092 do_vectorizedMismatch(x);
3093 break;
3094
3095 default: ShouldNotReachHere(); break;
3096 }
3097}
3098
3099void LIRGenerator::profile_arguments(ProfileCall* x) {
3100 if (compilation()->profile_arguments()) {
3101 int bci = x->bci_of_invoke();
3102 ciMethodData* md = x->method()->method_data_or_null();
3103 assert(md != NULL, "Sanity");
3104 ciProfileData* data = md->bci_to_data(bci);
3105 if (data != NULL) {
3106 if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3107 (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3108 ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3109 int base_offset = md->byte_offset_of_slot(data, extra);
3110 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3111 ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3112
3113 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3114 int start = 0;
3115 int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3116 if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3117 // first argument is not profiled at call (method handle invoke)
3118 assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3119 start = 1;
3120 }
3121 ciSignature* callee_signature = x->callee()->signature();
3122 // method handle call to virtual method
3123 bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3124 ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
3125
3126 bool ignored_will_link;
3127 ciSignature* signature_at_call = NULL;
3128 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3129 ciSignatureStream signature_at_call_stream(signature_at_call);
3130
3131 // if called through method handle invoke, some arguments may have been popped
3132 for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3133 int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3134 ciKlass* exact = profile_type(md, base_offset, off,
3135 args->type(i), x->profiled_arg_at(i+start), mdp,
3136 !x->arg_needs_null_check(i+start),
3137 signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3138 if (exact != NULL) {
3139 md->set_argument_type(bci, i, exact);
3140 }
3141 }
3142 } else {
3143#ifdef ASSERT
3144 Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3145 int n = x->nb_profiled_args();
3146 assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3147 (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3148 "only at JSR292 bytecodes");
3149#endif
3150 }
3151 }
3152 }
3153}
3154
3155// profile parameters on entry to an inlined method
3156void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3157 if (compilation()->profile_parameters() && x->inlined()) {
3158 ciMethodData* md = x->callee()->method_data_or_null();
3159 if (md != NULL) {
3160 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3161 if (parameters_type_data != NULL) {
3162 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
3163 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3164 bool has_receiver = !x->callee()->is_static();
3165 ciSignature* sig = x->callee()->signature();
3166 ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
3167 int i = 0; // to iterate on the Instructions
3168 Value arg = x->recv();
3169 bool not_null = false;
3170 int bci = x->bci_of_invoke();
3171 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3172 // The first parameter is the receiver so that's what we start
3173 // with if it exists. One exception is method handle call to
3174 // virtual method: the receiver is in the args list
3175 if (arg == NULL || !Bytecodes::has_receiver(bc)) {
3176 i = 1;
3177 arg = x->profiled_arg_at(0);
3178 not_null = !x->arg_needs_null_check(0);
3179 }
3180 int k = 0; // to iterate on the profile data
3181 for (;;) {
3182 intptr_t profiled_k = parameters->type(k);
3183 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3184 in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3185 profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
3186 // If the profile is known statically set it once for all and do not emit any code
3187 if (exact != NULL) {
3188 md->set_parameter_type(k, exact);
3189 }
3190 k++;
3191 if (k >= parameters_type_data->number_of_parameters()) {
3192#ifdef ASSERT
3193 int extra = 0;
3194 if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3195 x->nb_profiled_args() >= TypeProfileParmsLimit &&
3196 x->recv() != NULL && Bytecodes::has_receiver(bc)) {
3197 extra += 1;
3198 }
3199 assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3200#endif
3201 break;
3202 }
3203 arg = x->profiled_arg_at(i);
3204 not_null = !x->arg_needs_null_check(i);
3205 i++;
3206 }
3207 }
3208 }
3209 }
3210}
3211
3212void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3213 // Need recv in a temporary register so it interferes with the other temporaries
3214 LIR_Opr recv = LIR_OprFact::illegalOpr;
3215 LIR_Opr mdo = new_register(T_METADATA);
3216 // tmp is used to hold the counters on SPARC
3217 LIR_Opr tmp = new_pointer_register();
3218
3219 if (x->nb_profiled_args() > 0) {
3220 profile_arguments(x);
3221 }
3222
3223 // profile parameters on inlined method entry including receiver
3224 if (x->recv() != NULL || x->nb_profiled_args() > 0) {
3225 profile_parameters_at_call(x);
3226 }
3227
3228 if (x->recv() != NULL) {
3229 LIRItem value(x->recv(), this);
3230 value.load_item();
3231 recv = new_register(T_OBJECT);
3232 __ move(value.result(), recv);
3233 }
3234 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3235}
3236
3237void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3238 int bci = x->bci_of_invoke();
3239 ciMethodData* md = x->method()->method_data_or_null();
3240 assert(md != NULL, "Sanity");
3241 ciProfileData* data = md->bci_to_data(bci);
3242 if (data != NULL) {
3243 assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3244 ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3245 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3246
3247 bool ignored_will_link;
3248 ciSignature* signature_at_call = NULL;
3249 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3250
3251 // The offset within the MDO of the entry to update may be too large
3252 // to be used in load/store instructions on some platforms. So have
3253 // profile_type() compute the address of the profile in a register.
3254 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3255 ret->type(), x->ret(), mdp,
3256 !x->needs_null_check(),
3257 signature_at_call->return_type()->as_klass(),
3258 x->callee()->signature()->return_type()->as_klass());
3259 if (exact != NULL) {
3260 md->set_return_type(bci, exact);
3261 }
3262 }
3263}
3264
3265void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3266 // We can safely ignore accessors here, since c2 will inline them anyway,
3267 // accessors are also always mature.
3268 if (!x->inlinee()->is_accessor()) {
3269 CodeEmitInfo* info = state_for(x, x->state(), true);
3270 // Notify the runtime very infrequently only to take care of counter overflows
3271 int freq_log = Tier23InlineeNotifyFreqLog;
3272 double scale;
3273 if (_method->has_option_value("CompileThresholdScaling", scale)) {
3274 freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3275 }
3276 increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3277 }
3278}
3279
3280void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3281 if (compilation()->count_backedges()) {
3282#if defined(X86) && !defined(_LP64)
3283 // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3284 LIR_Opr left_copy = new_register(left->type());
3285 __ move(left, left_copy);
3286 __ cmp(cond, left_copy, right);
3287#else
3288 __ cmp(cond, left, right);
3289#endif
3290 LIR_Opr step = new_register(T_INT);
3291 LIR_Opr plus_one = LIR_OprFact::intConst(InvocationCounter::count_increment);
3292 LIR_Opr zero = LIR_OprFact::intConst(0);
3293 __ cmove(cond,
3294 (left_bci < bci) ? plus_one : zero,
3295 (right_bci < bci) ? plus_one : zero,
3296 step, left->type());
3297 increment_backedge_counter(info, step, bci);
3298 }
3299}
3300
3301
3302void LIRGenerator::increment_event_counter(CodeEmitInfo* info, LIR_Opr step, int bci, bool backedge) {
3303 int freq_log = 0;
3304 int level = compilation()->env()->comp_level();
3305 if (level == CompLevel_limited_profile) {
3306 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3307 } else if (level == CompLevel_full_profile) {
3308 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3309 } else {
3310 ShouldNotReachHere();
3311 }
3312 // Increment the appropriate invocation/backedge counter and notify the runtime.
3313 double scale;
3314 if (_method->has_option_value("CompileThresholdScaling", scale)) {
3315 freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3316 }
3317 increment_event_counter_impl(info, info->scope()->method(), step, right_n_bits(freq_log), bci, backedge, true);
3318}
3319
3320void LIRGenerator::decrement_age(CodeEmitInfo* info) {
3321 ciMethod* method = info->scope()->method();
3322 MethodCounters* mc_adr = method->ensure_method_counters();
3323 if (mc_adr != NULL) {
3324 LIR_Opr mc = new_pointer_register();
3325 __ move(LIR_OprFact::intptrConst(mc_adr), mc);
3326 int offset = in_bytes(MethodCounters::nmethod_age_offset());
3327 LIR_Address* counter = new LIR_Address(mc, offset, T_INT);
3328 LIR_Opr result = new_register(T_INT);
3329 __ load(counter, result);
3330 __ sub(result, LIR_OprFact::intConst(1), result);
3331 __ store(result, counter);
3332 // DeoptimizeStub will reexecute from the current state in code info.
3333 CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_tenured,
3334 Deoptimization::Action_make_not_entrant);
3335 __ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0));
3336 __ branch(lir_cond_lessEqual, T_INT, deopt);
3337 }
3338}
3339
3340
3341void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3342 ciMethod *method, LIR_Opr step, int frequency,
3343 int bci, bool backedge, bool notify) {
3344 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3345 int level = _compilation->env()->comp_level();
3346 assert(level > CompLevel_simple, "Shouldn't be here");
3347
3348 int offset = -1;
3349 LIR_Opr counter_holder = NULL;
3350 if (level == CompLevel_limited_profile) {
3351 MethodCounters* counters_adr = method->ensure_method_counters();
3352 if (counters_adr == NULL) {
3353 bailout("method counters allocation failed");
3354 return;
3355 }
3356 counter_holder = new_pointer_register();
3357 __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3358 offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3359 MethodCounters::invocation_counter_offset());
3360 } else if (level == CompLevel_full_profile) {
3361 counter_holder = new_register(T_METADATA);
3362 offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3363 MethodData::invocation_counter_offset());
3364 ciMethodData* md = method->method_data_or_null();
3365 assert(md != NULL, "Sanity");
3366 __ metadata2reg(md->constant_encoding(), counter_holder);
3367 } else {
3368 ShouldNotReachHere();
3369 }
3370 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3371 LIR_Opr result = new_register(T_INT);
3372 __ load(counter, result);
3373 __ add(result, step, result);
3374 __ store(result, counter);
3375 if (notify && (!backedge || UseOnStackReplacement)) {
3376 LIR_Opr meth = LIR_OprFact::metadataConst(method->constant_encoding());
3377 // The bci for info can point to cmp for if's we want the if bci
3378 CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3379 int freq = frequency << InvocationCounter::count_shift;
3380 if (freq == 0) {
3381 if (!step->is_constant()) {
3382 __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3383 __ branch(lir_cond_notEqual, T_ILLEGAL, overflow);
3384 } else {
3385 __ branch(lir_cond_always, T_ILLEGAL, overflow);
3386 }
3387 } else {
3388 LIR_Opr mask = load_immediate(freq, T_INT);
3389 if (!step->is_constant()) {
3390 // If step is 0, make sure the overflow check below always fails
3391 __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3392 __ cmove(lir_cond_notEqual, result, LIR_OprFact::intConst(InvocationCounter::count_increment), result, T_INT);
3393 }
3394 __ logical_and(result, mask, result);
3395 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3396 __ branch(lir_cond_equal, T_INT, overflow);
3397 }
3398 __ branch_destination(overflow->continuation());
3399 }
3400}
3401
3402void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3403 LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3404 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3405
3406 if (x->pass_thread()) {
3407 signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
3408 args->append(getThreadPointer());
3409 }
3410
3411 for (int i = 0; i < x->number_of_arguments(); i++) {
3412 Value a = x->argument_at(i);
3413 LIRItem* item = new LIRItem(a, this);
3414 item->load_item();
3415 args->append(item->result());
3416 signature->append(as_BasicType(a->type()));
3417 }
3418
3419 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3420 if (x->type() == voidType) {
3421 set_no_result(x);
3422 } else {
3423 __ move(result, rlock_result(x));
3424 }
3425}
3426
3427#ifdef ASSERT
3428void LIRGenerator::do_Assert(Assert *x) {
3429 ValueTag tag = x->x()->type()->tag();
3430 If::Condition cond = x->cond();
3431
3432 LIRItem xitem(x->x(), this);
3433 LIRItem yitem(x->y(), this);
3434 LIRItem* xin = &xitem;
3435 LIRItem* yin = &yitem;
3436
3437 assert(tag == intTag, "Only integer assertions are valid!");
3438
3439 xin->load_item();
3440 yin->dont_load_item();
3441
3442 set_no_result(x);
3443
3444 LIR_Opr left = xin->result();
3445 LIR_Opr right = yin->result();
3446
3447 __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3448}
3449#endif
3450
3451void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3452
3453
3454 Instruction *a = x->x();
3455 Instruction *b = x->y();
3456 if (!a || StressRangeCheckElimination) {
3457 assert(!b || StressRangeCheckElimination, "B must also be null");
3458
3459 CodeEmitInfo *info = state_for(x, x->state());
3460 CodeStub* stub = new PredicateFailedStub(info);
3461
3462 __ jump(stub);
3463 } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3464 int a_int = a->type()->as_IntConstant()->value();
3465 int b_int = b->type()->as_IntConstant()->value();
3466
3467 bool ok = false;
3468
3469 switch(x->cond()) {
3470 case Instruction::eql: ok = (a_int == b_int); break;
3471 case Instruction::neq: ok = (a_int != b_int); break;
3472 case Instruction::lss: ok = (a_int < b_int); break;
3473 case Instruction::leq: ok = (a_int <= b_int); break;
3474 case Instruction::gtr: ok = (a_int > b_int); break;
3475 case Instruction::geq: ok = (a_int >= b_int); break;
3476 case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3477 case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3478 default: ShouldNotReachHere();
3479 }
3480
3481 if (ok) {
3482
3483 CodeEmitInfo *info = state_for(x, x->state());
3484 CodeStub* stub = new PredicateFailedStub(info);
3485
3486 __ jump(stub);
3487 }
3488 } else {
3489
3490 ValueTag tag = x->x()->type()->tag();
3491 If::Condition cond = x->cond();
3492 LIRItem xitem(x->x(), this);
3493 LIRItem yitem(x->y(), this);
3494 LIRItem* xin = &xitem;
3495 LIRItem* yin = &yitem;
3496
3497 assert(tag == intTag, "Only integer deoptimizations are valid!");
3498
3499 xin->load_item();
3500 yin->dont_load_item();
3501 set_no_result(x);
3502
3503 LIR_Opr left = xin->result();
3504 LIR_Opr right = yin->result();
3505
3506 CodeEmitInfo *info = state_for(x, x->state());
3507 CodeStub* stub = new PredicateFailedStub(info);
3508
3509 __ cmp(lir_cond(cond), left, right);
3510 __ branch(lir_cond(cond), right->type(), stub);
3511 }
3512}
3513
3514
3515LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3516 LIRItemList args(1);
3517 LIRItem value(arg1, this);
3518 args.append(&value);
3519 BasicTypeList signature;
3520 signature.append(as_BasicType(arg1->type()));
3521
3522 return call_runtime(&signature, &args, entry, result_type, info);
3523}
3524
3525
3526LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3527 LIRItemList args(2);
3528 LIRItem value1(arg1, this);
3529 LIRItem value2(arg2, this);
3530 args.append(&value1);
3531 args.append(&value2);
3532 BasicTypeList signature;
3533 signature.append(as_BasicType(arg1->type()));
3534 signature.append(as_BasicType(arg2->type()));
3535
3536 return call_runtime(&signature, &args, entry, result_type, info);
3537}
3538
3539
3540LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3541 address entry, ValueType* result_type, CodeEmitInfo* info) {
3542 // get a result register
3543 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3544 LIR_Opr result = LIR_OprFact::illegalOpr;
3545 if (result_type->tag() != voidTag) {
3546 result = new_register(result_type);
3547 phys_reg = result_register_for(result_type);
3548 }
3549
3550 // move the arguments into the correct location
3551 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3552 assert(cc->length() == args->length(), "argument mismatch");
3553 for (int i = 0; i < args->length(); i++) {
3554 LIR_Opr arg = args->at(i);
3555 LIR_Opr loc = cc->at(i);
3556 if (loc->is_register()) {
3557 __ move(arg, loc);
3558 } else {
3559 LIR_Address* addr = loc->as_address_ptr();
3560// if (!can_store_as_constant(arg)) {
3561// LIR_Opr tmp = new_register(arg->type());
3562// __ move(arg, tmp);
3563// arg = tmp;
3564// }
3565 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3566 __ unaligned_move(arg, addr);
3567 } else {
3568 __ move(arg, addr);
3569 }
3570 }
3571 }
3572
3573 if (info) {
3574 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3575 } else {
3576 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3577 }
3578 if (result->is_valid()) {
3579 __ move(phys_reg, result);
3580 }
3581 return result;
3582}
3583
3584
3585LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3586 address entry, ValueType* result_type, CodeEmitInfo* info) {
3587 // get a result register
3588 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3589 LIR_Opr result = LIR_OprFact::illegalOpr;
3590 if (result_type->tag() != voidTag) {
3591 result = new_register(result_type);
3592 phys_reg = result_register_for(result_type);
3593 }
3594
3595 // move the arguments into the correct location
3596 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3597
3598 assert(cc->length() == args->length(), "argument mismatch");
3599 for (int i = 0; i < args->length(); i++) {
3600 LIRItem* arg = args->at(i);
3601 LIR_Opr loc = cc->at(i);
3602 if (loc->is_register()) {
3603 arg->load_item_force(loc);
3604 } else {
3605 LIR_Address* addr = loc->as_address_ptr();
3606 arg->load_for_store(addr->type());
3607 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3608 __ unaligned_move(arg->result(), addr);
3609 } else {
3610 __ move(arg->result(), addr);
3611 }
3612 }
3613 }
3614
3615 if (info) {
3616 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3617 } else {
3618 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3619 }
3620 if (result->is_valid()) {
3621 __ move(phys_reg, result);
3622 }
3623 return result;
3624}
3625
3626void LIRGenerator::do_MemBar(MemBar* x) {
3627 LIR_Code code = x->code();
3628 switch(code) {
3629 case lir_membar_acquire : __ membar_acquire(); break;
3630 case lir_membar_release : __ membar_release(); break;
3631 case lir_membar : __ membar(); break;
3632 case lir_membar_loadload : __ membar_loadload(); break;
3633 case lir_membar_storestore: __ membar_storestore(); break;
3634 case lir_membar_loadstore : __ membar_loadstore(); break;
3635 case lir_membar_storeload : __ membar_storeload(); break;
3636 default : ShouldNotReachHere(); break;
3637 }
3638}
3639
3640LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3641 LIR_Opr value_fixed = rlock_byte(T_BYTE);
3642 if (TwoOperandLIRForm) {
3643 __ move(value, value_fixed);
3644 __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
3645 } else {
3646 __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
3647 }
3648 LIR_Opr klass = new_register(T_METADATA);
3649 __ move(new LIR_Address(array, oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
3650 null_check_info = NULL;
3651 LIR_Opr layout = new_register(T_INT);
3652 __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
3653 int diffbit = Klass::layout_helper_boolean_diffbit();
3654 __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
3655 __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
3656 __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
3657 value = value_fixed;
3658 return value;
3659}
3660
3661LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3662 if (x->check_boolean()) {
3663 value = mask_boolean(array, value, null_check_info);
3664 }
3665 return value;
3666}
3667