1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6#if defined(TARGET_ARCH_ARM)
7
8#include "vm/compiler/backend/il.h"
9
10#include "vm/compiler/backend/flow_graph.h"
11#include "vm/compiler/backend/flow_graph_compiler.h"
12#include "vm/compiler/backend/locations.h"
13#include "vm/compiler/backend/locations_helpers.h"
14#include "vm/compiler/backend/range_analysis.h"
15#include "vm/compiler/compiler_state.h"
16#include "vm/compiler/ffi/native_calling_convention.h"
17#include "vm/compiler/jit/compiler.h"
18#include "vm/cpu.h"
19#include "vm/dart_entry.h"
20#include "vm/instructions.h"
21#include "vm/object_store.h"
22#include "vm/parser.h"
23#include "vm/simulator.h"
24#include "vm/stack_frame.h"
25#include "vm/stub_code.h"
26#include "vm/symbols.h"
27#include "vm/type_testing_stubs.h"
28
29#define __ compiler->assembler()->
30#define Z (compiler->zone())
31
32namespace dart {
33
34// Generic summary for call instructions that have all arguments pushed
35// on the stack and return the result in a fixed location depending on
36// the return value (R0, Location::Pair(R0, R1) or Q0).
37LocationSummary* Instruction::MakeCallSummary(Zone* zone,
38 const Instruction* instr,
39 LocationSummary* locs) {
40 ASSERT(locs == nullptr || locs->always_calls());
41 LocationSummary* result =
42 ((locs == nullptr)
43 ? (new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall))
44 : locs);
45 const auto representation = instr->representation();
46 switch (representation) {
47 case kTagged:
48 result->set_out(
49 0, Location::RegisterLocation(CallingConventions::kReturnReg));
50 break;
51 case kUnboxedInt64:
52 result->set_out(
53 0, Location::Pair(
54 Location::RegisterLocation(CallingConventions::kReturnReg),
55 Location::RegisterLocation(
56 CallingConventions::kSecondReturnReg)));
57 break;
58 case kUnboxedDouble:
59 result->set_out(
60 0, Location::FpuRegisterLocation(CallingConventions::kReturnFpuReg));
61 break;
62 default:
63 UNREACHABLE();
64 break;
65 }
66 return result;
67}
68
69LocationSummary* LoadIndexedUnsafeInstr::MakeLocationSummary(Zone* zone,
70 bool opt) const {
71 const intptr_t kNumInputs = 1;
72 const intptr_t kNumTemps = ((representation() == kUnboxedDouble) ? 1 : 0);
73 LocationSummary* locs = new (zone)
74 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
75
76 locs->set_in(0, Location::RequiresRegister());
77 switch (representation()) {
78 case kTagged:
79 locs->set_out(0, Location::RequiresRegister());
80 break;
81 case kUnboxedInt64:
82 locs->set_out(0, Location::Pair(Location::RequiresRegister(),
83 Location::RequiresRegister()));
84 break;
85 case kUnboxedDouble:
86 locs->set_temp(0, Location::RequiresRegister());
87 locs->set_out(0, Location::RequiresFpuRegister());
88 break;
89 default:
90 UNREACHABLE();
91 break;
92 }
93 return locs;
94}
95
96void LoadIndexedUnsafeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
97 ASSERT(RequiredInputRepresentation(0) == kTagged); // It is a Smi.
98 ASSERT(kSmiTag == 0);
99 ASSERT(kSmiTagSize == 1);
100
101 const Register index = locs()->in(0).reg();
102
103 switch (representation()) {
104 case kTagged: {
105 const auto out = locs()->out(0).reg();
106 __ add(out, base_reg(), compiler::Operand(index, LSL, 1));
107 __ ldr(out, compiler::Address(out, offset()));
108 break;
109 }
110 case kUnboxedInt64: {
111 const auto out_lo = locs()->out(0).AsPairLocation()->At(0).reg();
112 const auto out_hi = locs()->out(0).AsPairLocation()->At(1).reg();
113
114 __ add(out_hi, base_reg(), compiler::Operand(index, LSL, 1));
115 __ ldr(out_lo, compiler::Address(out_hi, offset()));
116 __ ldr(out_hi,
117 compiler::Address(out_hi, offset() + compiler::target::kWordSize));
118 break;
119 }
120 case kUnboxedDouble: {
121 const auto tmp = locs()->temp(0).reg();
122 const auto out = EvenDRegisterOf(locs()->out(0).fpu_reg());
123 __ add(tmp, base_reg(), compiler::Operand(index, LSL, 1));
124 __ LoadDFromOffset(out, tmp, offset());
125 break;
126 }
127 default:
128 UNREACHABLE();
129 break;
130 }
131}
132
133DEFINE_BACKEND(StoreIndexedUnsafe,
134 (NoLocation, Register index, Register value)) {
135 ASSERT(instr->RequiredInputRepresentation(
136 StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
137 __ add(TMP, instr->base_reg(), compiler::Operand(index, LSL, 1));
138 __ str(value, compiler::Address(TMP, instr->offset()));
139
140 ASSERT(kSmiTag == 0);
141 ASSERT(kSmiTagSize == 1);
142}
143
144DEFINE_BACKEND(TailCall,
145 (NoLocation,
146 Fixed<Register, ARGS_DESC_REG>,
147 Temp<Register> temp)) {
148 compiler->EmitTailCallToStub(instr->code());
149
150 // Even though the TailCallInstr will be the last instruction in a basic
151 // block, the flow graph compiler will emit native code for other blocks after
152 // the one containing this instruction and needs to be able to use the pool.
153 // (The `LeaveDartFrame` above disables usages of the pool.)
154 __ set_constant_pool_allowed(true);
155}
156
157LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
158 bool opt) const {
159 const intptr_t kNumInputs = 5;
160 const intptr_t kNumTemps =
161 element_size_ == 16 ? 4 : element_size_ == 8 ? 2 : 1;
162 LocationSummary* locs = new (zone)
163 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
164 locs->set_in(kSrcPos, Location::WritableRegister());
165 locs->set_in(kDestPos, Location::WritableRegister());
166 locs->set_in(kSrcStartPos, Location::RequiresRegister());
167 locs->set_in(kDestStartPos, Location::RequiresRegister());
168 locs->set_in(kLengthPos, Location::WritableRegister());
169 for (intptr_t i = 0; i < kNumTemps; i++) {
170 locs->set_temp(i, Location::RequiresRegister());
171 }
172 return locs;
173}
174
175void MemoryCopyInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
176 const Register src_reg = locs()->in(kSrcPos).reg();
177 const Register dest_reg = locs()->in(kDestPos).reg();
178 const Register src_start_reg = locs()->in(kSrcStartPos).reg();
179 const Register dest_start_reg = locs()->in(kDestStartPos).reg();
180 const Register length_reg = locs()->in(kLengthPos).reg();
181
182 const Register temp_reg = locs()->temp(0).reg();
183 RegList temp_regs = 0;
184 for (intptr_t i = 0; i < locs()->temp_count(); i++) {
185 temp_regs |= 1 << locs()->temp(i).reg();
186 }
187
188 EmitComputeStartPointer(compiler, src_cid_, src_start(), src_reg,
189 src_start_reg);
190 EmitComputeStartPointer(compiler, dest_cid_, dest_start(), dest_reg,
191 dest_start_reg);
192
193 compiler::Label loop, done;
194
195 compiler::Address src_address =
196 compiler::Address(src_reg, element_size_, compiler::Address::PostIndex);
197 compiler::Address dest_address =
198 compiler::Address(dest_reg, element_size_, compiler::Address::PostIndex);
199
200 // Untag length and skip copy if length is zero.
201 __ movs(length_reg, compiler::Operand(length_reg, ASR, 1));
202 __ b(&done, ZERO);
203
204 __ Bind(&loop);
205 switch (element_size_) {
206 case 1:
207 __ ldrb(temp_reg, src_address);
208 __ strb(temp_reg, dest_address);
209 break;
210 case 2:
211 __ ldrh(temp_reg, src_address);
212 __ strh(temp_reg, dest_address);
213 break;
214 case 4:
215 __ ldr(temp_reg, src_address);
216 __ str(temp_reg, dest_address);
217 break;
218 case 8:
219 case 16:
220 __ ldm(BlockAddressMode::IA_W, src_reg, temp_regs);
221 __ stm(BlockAddressMode::IA_W, dest_reg, temp_regs);
222 break;
223 }
224 __ subs(length_reg, length_reg, compiler::Operand(1));
225 __ b(&loop, NOT_ZERO);
226 __ Bind(&done);
227}
228
229void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
230 classid_t array_cid,
231 Value* start,
232 Register array_reg,
233 Register start_reg) {
234 if (IsTypedDataBaseClassId(array_cid)) {
235 __ ldr(
236 array_reg,
237 compiler::FieldAddress(
238 array_reg, compiler::target::TypedDataBase::data_field_offset()));
239 } else {
240 switch (array_cid) {
241 case kOneByteStringCid:
242 __ add(
243 array_reg, array_reg,
244 compiler::Operand(compiler::target::OneByteString::data_offset() -
245 kHeapObjectTag));
246 break;
247 case kTwoByteStringCid:
248 __ add(
249 array_reg, array_reg,
250 compiler::Operand(compiler::target::OneByteString::data_offset() -
251 kHeapObjectTag));
252 break;
253 case kExternalOneByteStringCid:
254 __ ldr(array_reg,
255 compiler::FieldAddress(array_reg,
256 compiler::target::ExternalOneByteString::
257 external_data_offset()));
258 break;
259 case kExternalTwoByteStringCid:
260 __ ldr(array_reg,
261 compiler::FieldAddress(array_reg,
262 compiler::target::ExternalTwoByteString::
263 external_data_offset()));
264 break;
265 default:
266 UNREACHABLE();
267 break;
268 }
269 }
270 intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) - 1;
271 if (shift < 0) {
272 __ add(array_reg, array_reg, compiler::Operand(start_reg, ASR, -shift));
273 } else {
274 __ add(array_reg, array_reg, compiler::Operand(start_reg, LSL, shift));
275 }
276}
277
278LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone,
279 bool opt) const {
280 const intptr_t kNumInputs = 1;
281 const intptr_t kNumTemps = 0;
282 LocationSummary* locs = new (zone)
283 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
284 if (representation() == kUnboxedDouble) {
285 locs->set_in(0, Location::RequiresFpuRegister());
286 } else if (representation() == kUnboxedInt64) {
287 locs->set_in(0, Location::Pair(Location::RequiresRegister(),
288 Location::RequiresRegister()));
289 } else {
290 locs->set_in(0, LocationAnyOrConstant(value()));
291 }
292 return locs;
293}
294
295// Buffers registers to use STMDB in order to push
296// multiple registers at once.
297class ArgumentsPusher : public ValueObject {
298 public:
299 ArgumentsPusher() {}
300
301 // Flush all buffered registers.
302 void Flush(FlowGraphCompiler* compiler) {
303 if (pending_regs_ != 0) {
304 if (is_single_register_) {
305 __ Push(lowest_register_);
306 } else {
307 __ PushList(pending_regs_);
308 }
309 pending_regs_ = 0;
310 lowest_register_ = kNoRegister;
311 is_single_register_ = false;
312 }
313 }
314
315 // Buffer given register. May push previously buffered registers if needed.
316 void PushRegister(FlowGraphCompiler* compiler, Register reg) {
317 if (pending_regs_ != 0) {
318 ASSERT(lowest_register_ != kNoRegister);
319 // STMDB pushes higher registers first, so we can only buffer
320 // lower registers.
321 if (reg < lowest_register_) {
322 pending_regs_ |= (1 << reg);
323 lowest_register_ = reg;
324 is_single_register_ = false;
325 return;
326 }
327 Flush(compiler);
328 }
329 pending_regs_ = (1 << reg);
330 lowest_register_ = reg;
331 is_single_register_ = true;
332 }
333
334 // Return a register which can be used to hold a value of an argument.
335 Register FindFreeRegister(FlowGraphCompiler* compiler,
336 Instruction* push_arg) {
337 // Dart calling conventions do not have callee-save registers,
338 // so arguments pushing can clobber all allocatable registers
339 // except registers used in arguments which were not pushed yet,
340 // as well as ParallelMove and inputs of a call instruction.
341 intptr_t busy = kReservedCpuRegisters;
342 for (Instruction* instr = push_arg;; instr = instr->next()) {
343 ASSERT(instr != nullptr);
344 if (ParallelMoveInstr* parallel_move = instr->AsParallelMove()) {
345 for (intptr_t i = 0, n = parallel_move->NumMoves(); i < n; ++i) {
346 const auto src_loc = parallel_move->MoveOperandsAt(i)->src();
347 if (src_loc.IsRegister()) {
348 busy |= (1 << src_loc.reg());
349 } else if (src_loc.IsPairLocation()) {
350 busy |= (1 << src_loc.AsPairLocation()->At(0).reg());
351 busy |= (1 << src_loc.AsPairLocation()->At(1).reg());
352 }
353 }
354 } else {
355 ASSERT(instr->IsPushArgument() || (instr->ArgumentCount() > 0));
356 for (intptr_t i = 0, n = instr->locs()->input_count(); i < n; ++i) {
357 const auto in_loc = instr->locs()->in(i);
358 if (in_loc.IsRegister()) {
359 busy |= (1 << in_loc.reg());
360 } else if (in_loc.IsPairLocation()) {
361 const auto pair_location = in_loc.AsPairLocation();
362 busy |= (1 << pair_location->At(0).reg());
363 busy |= (1 << pair_location->At(1).reg());
364 }
365 }
366 if (instr->ArgumentCount() > 0) {
367 break;
368 }
369 }
370 }
371 if (pending_regs_ != 0) {
372 // Find the highest available register which can be pushed along with
373 // pending registers.
374 Register reg = HighestAvailableRegister(busy, lowest_register_);
375 if (reg != kNoRegister) {
376 return reg;
377 }
378 Flush(compiler);
379 }
380 // At this point there are no pending buffered registers.
381 // Use LR as it's the highest free register, it is not allocatable and
382 // it is clobbered by the call.
383 static_assert(((1 << LR) & kDartAvailableCpuRegs) == 0,
384 "LR should not be allocatable");
385 return LR;
386 }
387
388 private:
389 RegList pending_regs_ = 0;
390 Register lowest_register_ = kNoRegister;
391 bool is_single_register_ = false;
392
393 Register HighestAvailableRegister(intptr_t busy, Register upper_bound) {
394 for (intptr_t i = upper_bound - 1; i >= 0; --i) {
395 if ((busy & (1 << i)) == 0) {
396 return static_cast<Register>(i);
397 }
398 }
399 return kNoRegister;
400 }
401};
402
403void PushArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
404 // In SSA mode, we need an explicit push. Nothing to do in non-SSA mode
405 // where arguments are pushed by their definitions.
406 if (compiler->is_optimizing()) {
407 if (previous()->IsPushArgument()) {
408 // Already generated.
409 return;
410 }
411 ArgumentsPusher pusher;
412 for (PushArgumentInstr* push_arg = this; push_arg != nullptr;
413 push_arg = push_arg->next()->AsPushArgument()) {
414 const Location value = push_arg->locs()->in(0);
415 if (value.IsRegister()) {
416 pusher.PushRegister(compiler, value.reg());
417 } else if (value.IsPairLocation()) {
418 pusher.PushRegister(compiler, value.AsPairLocation()->At(1).reg());
419 pusher.PushRegister(compiler, value.AsPairLocation()->At(0).reg());
420 } else if (value.IsFpuRegister()) {
421 pusher.Flush(compiler);
422 __ vstmd(DB_W, SP, EvenDRegisterOf(value.fpu_reg()), 1);
423 } else {
424 const Register reg = pusher.FindFreeRegister(compiler, push_arg);
425 ASSERT(reg != kNoRegister);
426 if (value.IsConstant()) {
427 __ LoadObject(reg, value.constant());
428 } else {
429 ASSERT(value.IsStackSlot());
430 const intptr_t value_offset = value.ToStackSlotOffset();
431 __ LoadFromOffset(kWord, reg, value.base_reg(), value_offset);
432 }
433 pusher.PushRegister(compiler, reg);
434 }
435 }
436 pusher.Flush(compiler);
437 }
438}
439
440LocationSummary* ReturnInstr::MakeLocationSummary(Zone* zone, bool opt) const {
441 const intptr_t kNumInputs = 1;
442 const intptr_t kNumTemps = 0;
443 LocationSummary* locs = new (zone)
444 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
445 switch (representation()) {
446 case kTagged:
447 locs->set_in(0,
448 Location::RegisterLocation(CallingConventions::kReturnReg));
449 break;
450 case kUnboxedInt64:
451 locs->set_in(
452 0, Location::Pair(
453 Location::RegisterLocation(CallingConventions::kReturnReg),
454 Location::RegisterLocation(
455 CallingConventions::kSecondReturnReg)));
456 break;
457 case kUnboxedDouble:
458 locs->set_in(
459 0, Location::FpuRegisterLocation(CallingConventions::kReturnFpuReg));
460 break;
461 default:
462 UNREACHABLE();
463 break;
464 }
465 return locs;
466}
467
468// Attempt optimized compilation at return instruction instead of at the entry.
469// The entry needs to be patchable, no inlined objects are allowed in the area
470// that will be overwritten by the patch instructions: a branch macro sequence.
471void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
472 if (locs()->in(0).IsRegister()) {
473 const Register result = locs()->in(0).reg();
474 ASSERT(result == CallingConventions::kReturnReg);
475 } else if (locs()->in(0).IsPairLocation()) {
476 const Register result_lo = locs()->in(0).AsPairLocation()->At(0).reg();
477 const Register result_hi = locs()->in(0).AsPairLocation()->At(1).reg();
478 ASSERT(result_lo == CallingConventions::kReturnReg);
479 ASSERT(result_hi == CallingConventions::kSecondReturnReg);
480 } else {
481 ASSERT(locs()->in(0).IsFpuRegister());
482 const FpuRegister result = locs()->in(0).fpu_reg();
483 ASSERT(result == CallingConventions::kReturnFpuReg);
484 }
485
486 if (compiler->intrinsic_mode()) {
487 // Intrinsics don't have a frame.
488 __ Ret();
489 return;
490 }
491
492#if defined(DEBUG)
493 compiler::Label stack_ok;
494 __ Comment("Stack Check");
495 const intptr_t fp_sp_dist =
496 (compiler::target::frame_layout.first_local_from_fp + 1 -
497 compiler->StackSize()) *
498 compiler::target::kWordSize;
499 ASSERT(fp_sp_dist <= 0);
500 __ sub(R2, SP, compiler::Operand(FP));
501 __ CompareImmediate(R2, fp_sp_dist);
502 __ b(&stack_ok, EQ);
503 __ bkpt(0);
504 __ Bind(&stack_ok);
505#endif
506 ASSERT(__ constant_pool_allowed());
507 if (yield_index() != PcDescriptorsLayout::kInvalidYieldIndex) {
508 compiler->EmitYieldPositionMetadata(token_pos(), yield_index());
509 }
510 __ LeaveDartFrameAndReturn(); // Disallows constant pool use.
511 // This ReturnInstr may be emitted out of order by the optimizer. The next
512 // block may be a target expecting a properly set constant pool pointer.
513 __ set_constant_pool_allowed(true);
514}
515
516// Detect pattern when one value is zero and another is a power of 2.
517static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) {
518 return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) ||
519 (Utils::IsPowerOfTwo(v2) && (v1 == 0));
520}
521
522LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone,
523 bool opt) const {
524 comparison()->InitializeLocationSummary(zone, opt);
525 return comparison()->locs();
526}
527
528void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
529 const Register result = locs()->out(0).reg();
530
531 Location left = locs()->in(0);
532 Location right = locs()->in(1);
533 ASSERT(!left.IsConstant() || !right.IsConstant());
534
535 // Clear out register.
536 __ eor(result, result, compiler::Operand(result));
537
538 // Emit comparison code. This must not overwrite the result register.
539 // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
540 // the labels or returning an invalid condition.
541 BranchLabels labels = {NULL, NULL, NULL};
542 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
543 ASSERT(true_condition != kInvalidCondition);
544
545 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
546
547 intptr_t true_value = if_true_;
548 intptr_t false_value = if_false_;
549
550 if (is_power_of_two_kind) {
551 if (true_value == 0) {
552 // We need to have zero in result on true_condition.
553 true_condition = InvertCondition(true_condition);
554 }
555 } else {
556 if (true_value == 0) {
557 // Swap values so that false_value is zero.
558 intptr_t temp = true_value;
559 true_value = false_value;
560 false_value = temp;
561 } else {
562 true_condition = InvertCondition(true_condition);
563 }
564 }
565
566 __ mov(result, compiler::Operand(1), true_condition);
567
568 if (is_power_of_two_kind) {
569 const intptr_t shift =
570 Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value));
571 __ Lsl(result, result, compiler::Operand(shift + kSmiTagSize));
572 } else {
573 __ sub(result, result, compiler::Operand(1));
574 const int32_t val = compiler::target::ToRawSmi(true_value) -
575 compiler::target::ToRawSmi(false_value);
576 __ AndImmediate(result, result, val);
577 if (false_value != 0) {
578 __ AddImmediate(result, compiler::target::ToRawSmi(false_value));
579 }
580 }
581}
582
583LocationSummary* DispatchTableCallInstr::MakeLocationSummary(Zone* zone,
584 bool opt) const {
585 const intptr_t kNumInputs = 1;
586 const intptr_t kNumTemps = 0;
587 LocationSummary* summary = new (zone)
588 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
589 summary->set_in(0, Location::RegisterLocation(R0)); // ClassId
590 return MakeCallSummary(zone, this, summary);
591}
592
593LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
594 bool opt) const {
595 const intptr_t kNumInputs = 1;
596 const intptr_t kNumTemps = 0;
597 LocationSummary* summary = new (zone)
598 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
599 summary->set_in(0, Location::RegisterLocation(R0)); // Function.
600 return MakeCallSummary(zone, this, summary);
601}
602
603void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
604 // Load arguments descriptor in R4.
605 const intptr_t argument_count = ArgumentCount(); // Includes type args.
606 const Array& arguments_descriptor =
607 Array::ZoneHandle(Z, GetArgumentsDescriptor());
608 __ LoadObject(R4, arguments_descriptor);
609
610 // R4: Arguments descriptor.
611 // R0: Function.
612 ASSERT(locs()->in(0).reg() == R0);
613 if (!FLAG_precompiled_mode || !FLAG_use_bare_instructions) {
614 __ ldr(CODE_REG, compiler::FieldAddress(
615 R0, compiler::target::Function::code_offset()));
616 }
617 __ ldr(R2,
618 compiler::FieldAddress(
619 R0, compiler::target::Function::entry_point_offset(entry_kind())));
620
621 // R2: instructions entry point.
622 if (!FLAG_precompiled_mode) {
623 // R9: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
624 __ LoadImmediate(R9, 0);
625 }
626 __ blx(R2);
627 compiler->EmitCallsiteMetadata(token_pos(), deopt_id(),
628 PcDescriptorsLayout::kOther, locs());
629 __ Drop(argument_count);
630}
631
632LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
633 bool opt) const {
634 return LocationSummary::Make(zone, 0, Location::RequiresRegister(),
635 LocationSummary::kNoCall);
636}
637
638void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
639 const Register result = locs()->out(0).reg();
640 __ LoadFromOffset(kWord, result, FP,
641 compiler::target::FrameOffsetInBytesForVariable(&local()));
642}
643
644LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
645 bool opt) const {
646 return LocationSummary::Make(zone, 1, Location::SameAsFirstInput(),
647 LocationSummary::kNoCall);
648}
649
650void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
651 const Register value = locs()->in(0).reg();
652 const Register result = locs()->out(0).reg();
653 ASSERT(result == value); // Assert that register assignment is correct.
654 __ StoreToOffset(kWord, value, FP,
655 compiler::target::FrameOffsetInBytesForVariable(&local()));
656}
657
658LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
659 bool opt) const {
660 return LocationSummary::Make(zone, 0, Location::RequiresRegister(),
661 LocationSummary::kNoCall);
662}
663
664void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
665 // The register allocator drops constant definitions that have no uses.
666 if (!locs()->out(0).IsInvalid()) {
667 const Register result = locs()->out(0).reg();
668 __ LoadObject(result, value());
669 }
670}
671
672void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
673 const Location& destination,
674 Register tmp) {
675 if (destination.IsRegister()) {
676 if (representation() == kUnboxedInt32) {
677 int64_t v;
678 const bool ok = compiler::HasIntegerValue(value_, &v);
679 RELEASE_ASSERT(ok);
680 __ LoadImmediate(destination.reg(), v);
681 } else {
682 ASSERT(representation() == kTagged);
683 __ LoadObject(destination.reg(), value_);
684 }
685 } else if (destination.IsFpuRegister()) {
686 const DRegister dst = EvenDRegisterOf(destination.fpu_reg());
687 if (Utils::DoublesBitEqual(Double::Cast(value_).value(), 0.0) &&
688 TargetCPUFeatures::neon_supported()) {
689 QRegister qdst = destination.fpu_reg();
690 __ veorq(qdst, qdst, qdst);
691 } else {
692 ASSERT(tmp != kNoRegister);
693 __ LoadDImmediate(dst, Double::Cast(value_).value(), tmp);
694 }
695 } else if (destination.IsDoubleStackSlot()) {
696 if (Utils::DoublesBitEqual(Double::Cast(value_).value(), 0.0) &&
697 TargetCPUFeatures::neon_supported()) {
698 __ veorq(QTMP, QTMP, QTMP);
699 } else {
700 ASSERT(tmp != kNoRegister);
701 __ LoadDImmediate(DTMP, Double::Cast(value_).value(), tmp);
702 }
703 const intptr_t dest_offset = destination.ToStackSlotOffset();
704 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset);
705 } else {
706 ASSERT(destination.IsStackSlot());
707 ASSERT(tmp != kNoRegister);
708 const intptr_t dest_offset = destination.ToStackSlotOffset();
709 if (representation() == kUnboxedInt32) {
710 int64_t v;
711 const bool ok = compiler::HasIntegerValue(value_, &v);
712 RELEASE_ASSERT(ok);
713 __ LoadImmediate(tmp, v);
714 } else {
715 __ LoadObject(tmp, value_);
716 }
717 __ StoreToOffset(kWord, tmp, destination.base_reg(), dest_offset);
718 }
719}
720
721LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone,
722 bool opt) const {
723 const intptr_t kNumInputs = 0;
724 const intptr_t kNumTemps = (representation_ == kUnboxedInt32) ? 0 : 1;
725 LocationSummary* locs = new (zone)
726 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
727 if (representation_ == kUnboxedInt32) {
728 locs->set_out(0, Location::RequiresRegister());
729 } else {
730 ASSERT(representation_ == kUnboxedDouble);
731 locs->set_out(0, Location::RequiresFpuRegister());
732 }
733 if (kNumTemps > 0) {
734 locs->set_temp(0, Location::RequiresRegister());
735 }
736 return locs;
737}
738
739void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
740 // The register allocator drops constant definitions that have no uses.
741 if (!locs()->out(0).IsInvalid()) {
742 const Register scratch =
743 locs()->temp_count() == 0 ? kNoRegister : locs()->temp(0).reg();
744 EmitMoveToLocation(compiler, locs()->out(0), scratch);
745 }
746}
747
748LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone,
749 bool opt) const {
750 auto const dst_type_loc =
751 LocationFixedRegisterOrConstant(dst_type(), TypeTestABI::kDstTypeReg);
752
753 // When using a type testing stub, we want to prevent spilling of the
754 // function/instantiator type argument vectors, since stub preserves them. So
755 // we make this a `kNoCall` summary, even though most other registers can be
756 // modified by the stub. To tell the register allocator about it, we reserve
757 // all the other registers as temporary registers.
758 // TODO(http://dartbug.com/32788): Simplify this.
759 const bool using_stub = dst_type_loc.IsConstant() &&
760 FlowGraphCompiler::ShouldUseTypeTestingStubFor(
761 opt, AbstractType::Cast(dst_type_loc.constant()));
762
763 const intptr_t kNonChangeableInputRegs =
764 (1 << TypeTestABI::kInstanceReg) |
765 ((dst_type_loc.IsRegister() ? 1 : 0) << TypeTestABI::kDstTypeReg) |
766 (1 << TypeTestABI::kInstantiatorTypeArgumentsReg) |
767 (1 << TypeTestABI::kFunctionTypeArgumentsReg);
768
769 const intptr_t kNumInputs = 4;
770
771 // We invoke a stub that can potentially clobber any CPU register
772 // but can only clobber FPU registers on the slow path when
773 // entering runtime. Preserve all FPU registers that are
774 // not guarateed to be preserved by the ABI.
775 const intptr_t kCpuRegistersToPreserve =
776 kDartAvailableCpuRegs & ~kNonChangeableInputRegs;
777 const intptr_t kFpuRegistersToPreserve =
778 Utils::SignedNBitMask(kNumberOfFpuRegisters) &
779 ~(Utils::SignedNBitMask(kAbiPreservedFpuRegCount)
780 << kAbiFirstPreservedFpuReg) &
781 ~(1 << FpuTMP);
782
783 const intptr_t kNumTemps =
784 using_stub ? (Utils::CountOneBits64(kCpuRegistersToPreserve) +
785 Utils::CountOneBits64(kFpuRegistersToPreserve))
786 : 0;
787
788 LocationSummary* summary = new (zone) LocationSummary(
789 zone, kNumInputs, kNumTemps,
790 using_stub ? LocationSummary::kCallCalleeSafe : LocationSummary::kCall);
791 summary->set_in(0, Location::RegisterLocation(TypeTestABI::kInstanceReg));
792 summary->set_in(1, dst_type_loc);
793 summary->set_in(2, Location::RegisterLocation(
794 TypeTestABI::kInstantiatorTypeArgumentsReg));
795 summary->set_in(
796 3, Location::RegisterLocation(TypeTestABI::kFunctionTypeArgumentsReg));
797 summary->set_out(0, Location::SameAsFirstInput());
798
799 if (using_stub) {
800 // Let's reserve all registers except for the input ones.
801 intptr_t next_temp = 0;
802 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
803 const bool should_preserve = ((1 << i) & kCpuRegistersToPreserve) != 0;
804 if (should_preserve) {
805 summary->set_temp(next_temp++,
806 Location::RegisterLocation(static_cast<Register>(i)));
807 }
808 }
809
810 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
811 const bool should_preserve = ((1 << i) & kFpuRegistersToPreserve) != 0;
812 if (should_preserve) {
813 summary->set_temp(next_temp++, Location::FpuRegisterLocation(
814 static_cast<FpuRegister>(i)));
815 }
816 }
817 }
818
819 return summary;
820}
821
822static Condition TokenKindToSmiCondition(Token::Kind kind) {
823 switch (kind) {
824 case Token::kEQ:
825 return EQ;
826 case Token::kNE:
827 return NE;
828 case Token::kLT:
829 return LT;
830 case Token::kGT:
831 return GT;
832 case Token::kLTE:
833 return LE;
834 case Token::kGTE:
835 return GE;
836 default:
837 UNREACHABLE();
838 return VS;
839 }
840}
841
842LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone,
843 bool opt) const {
844 const intptr_t kNumInputs = 2;
845 if (operation_cid() == kMintCid) {
846 const intptr_t kNumTemps = 0;
847 LocationSummary* locs = new (zone)
848 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
849 locs->set_in(0, Location::Pair(Location::RequiresRegister(),
850 Location::RequiresRegister()));
851 locs->set_in(1, Location::Pair(Location::RequiresRegister(),
852 Location::RequiresRegister()));
853 locs->set_out(0, Location::RequiresRegister());
854 return locs;
855 }
856 if (operation_cid() == kDoubleCid) {
857 const intptr_t kNumTemps = 0;
858 LocationSummary* locs = new (zone)
859 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
860 locs->set_in(0, Location::RequiresFpuRegister());
861 locs->set_in(1, Location::RequiresFpuRegister());
862 locs->set_out(0, Location::RequiresRegister());
863 return locs;
864 }
865 if (operation_cid() == kSmiCid) {
866 const intptr_t kNumTemps = 0;
867 LocationSummary* locs = new (zone)
868 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
869 locs->set_in(0, LocationRegisterOrConstant(left()));
870 // Only one input can be a constant operand. The case of two constant
871 // operands should be handled by constant propagation.
872 locs->set_in(1, locs->in(0).IsConstant()
873 ? Location::RequiresRegister()
874 : LocationRegisterOrConstant(right()));
875 locs->set_out(0, Location::RequiresRegister());
876 return locs;
877 }
878 UNREACHABLE();
879 return NULL;
880}
881
882static void LoadValueCid(FlowGraphCompiler* compiler,
883 Register value_cid_reg,
884 Register value_reg,
885 compiler::Label* value_is_smi = NULL) {
886 if (value_is_smi == NULL) {
887 __ mov(value_cid_reg, compiler::Operand(kSmiCid));
888 }
889 __ tst(value_reg, compiler::Operand(kSmiTagMask));
890 if (value_is_smi == NULL) {
891 __ LoadClassId(value_cid_reg, value_reg, NE);
892 } else {
893 __ b(value_is_smi, EQ);
894 __ LoadClassId(value_cid_reg, value_reg);
895 }
896}
897
898static Condition FlipCondition(Condition condition) {
899 switch (condition) {
900 case EQ:
901 return EQ;
902 case NE:
903 return NE;
904 case LT:
905 return GT;
906 case LE:
907 return GE;
908 case GT:
909 return LT;
910 case GE:
911 return LE;
912 case CC:
913 return HI;
914 case LS:
915 return CS;
916 case HI:
917 return CC;
918 case CS:
919 return LS;
920 default:
921 UNREACHABLE();
922 return EQ;
923 }
924}
925
926static void EmitBranchOnCondition(FlowGraphCompiler* compiler,
927 Condition true_condition,
928 BranchLabels labels) {
929 if (labels.fall_through == labels.false_label) {
930 // If the next block is the false successor we will fall through to it.
931 __ b(labels.true_label, true_condition);
932 } else {
933 // If the next block is not the false successor we will branch to it.
934 Condition false_condition = InvertCondition(true_condition);
935 __ b(labels.false_label, false_condition);
936
937 // Fall through or jump to the true successor.
938 if (labels.fall_through != labels.true_label) {
939 __ b(labels.true_label);
940 }
941 }
942}
943
944static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler,
945 LocationSummary* locs,
946 Token::Kind kind) {
947 Location left = locs->in(0);
948 Location right = locs->in(1);
949 ASSERT(!left.IsConstant() || !right.IsConstant());
950
951 Condition true_condition = TokenKindToSmiCondition(kind);
952
953 if (left.IsConstant()) {
954 __ CompareObject(right.reg(), left.constant());
955 true_condition = FlipCondition(true_condition);
956 } else if (right.IsConstant()) {
957 __ CompareObject(left.reg(), right.constant());
958 } else {
959 __ cmp(left.reg(), compiler::Operand(right.reg()));
960 }
961 return true_condition;
962}
963
964static Condition TokenKindToMintCondition(Token::Kind kind) {
965 switch (kind) {
966 case Token::kEQ:
967 return EQ;
968 case Token::kNE:
969 return NE;
970 case Token::kLT:
971 return LT;
972 case Token::kGT:
973 return GT;
974 case Token::kLTE:
975 return LE;
976 case Token::kGTE:
977 return GE;
978 default:
979 UNREACHABLE();
980 return VS;
981 }
982}
983
984static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler,
985 LocationSummary* locs,
986 Token::Kind kind) {
987 ASSERT(Token::IsEqualityOperator(kind));
988 PairLocation* left_pair = locs->in(0).AsPairLocation();
989 Register left_lo = left_pair->At(0).reg();
990 Register left_hi = left_pair->At(1).reg();
991 PairLocation* right_pair = locs->in(1).AsPairLocation();
992 Register right_lo = right_pair->At(0).reg();
993 Register right_hi = right_pair->At(1).reg();
994
995 // Compare lower.
996 __ cmp(left_lo, compiler::Operand(right_lo));
997 // Compare upper if lower is equal.
998 __ cmp(left_hi, compiler::Operand(right_hi), EQ);
999 return TokenKindToMintCondition(kind);
1000}
1001
1002static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler,
1003 LocationSummary* locs,
1004 Token::Kind kind,
1005 BranchLabels labels) {
1006 PairLocation* left_pair = locs->in(0).AsPairLocation();
1007 Register left_lo = left_pair->At(0).reg();
1008 Register left_hi = left_pair->At(1).reg();
1009 PairLocation* right_pair = locs->in(1).AsPairLocation();
1010 Register right_lo = right_pair->At(0).reg();
1011 Register right_hi = right_pair->At(1).reg();
1012
1013 // 64-bit comparison.
1014 Condition hi_cond, lo_cond;
1015 switch (kind) {
1016 case Token::kLT:
1017 hi_cond = LT;
1018 lo_cond = CC;
1019 break;
1020 case Token::kGT:
1021 hi_cond = GT;
1022 lo_cond = HI;
1023 break;
1024 case Token::kLTE:
1025 hi_cond = LT;
1026 lo_cond = LS;
1027 break;
1028 case Token::kGTE:
1029 hi_cond = GT;
1030 lo_cond = CS;
1031 break;
1032 default:
1033 UNREACHABLE();
1034 hi_cond = lo_cond = VS;
1035 }
1036 // Compare upper halves first.
1037 __ cmp(left_hi, compiler::Operand(right_hi));
1038 __ b(labels.true_label, hi_cond);
1039 __ b(labels.false_label, FlipCondition(hi_cond));
1040
1041 // If higher words are equal, compare lower words.
1042 __ cmp(left_lo, compiler::Operand(right_lo));
1043 return lo_cond;
1044}
1045
1046static Condition TokenKindToDoubleCondition(Token::Kind kind) {
1047 switch (kind) {
1048 case Token::kEQ:
1049 return EQ;
1050 case Token::kNE:
1051 return NE;
1052 case Token::kLT:
1053 return LT;
1054 case Token::kGT:
1055 return GT;
1056 case Token::kLTE:
1057 return LE;
1058 case Token::kGTE:
1059 return GE;
1060 default:
1061 UNREACHABLE();
1062 return VS;
1063 }
1064}
1065
1066static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
1067 LocationSummary* locs,
1068 BranchLabels labels,
1069 Token::Kind kind) {
1070 const QRegister left = locs->in(0).fpu_reg();
1071 const QRegister right = locs->in(1).fpu_reg();
1072 const DRegister dleft = EvenDRegisterOf(left);
1073 const DRegister dright = EvenDRegisterOf(right);
1074 __ vcmpd(dleft, dright);
1075 __ vmstat();
1076 Condition true_condition = TokenKindToDoubleCondition(kind);
1077 if (true_condition != NE) {
1078 // Special case for NaN comparison. Result is always false unless
1079 // relational operator is !=.
1080 __ b(labels.false_label, VS);
1081 }
1082 return true_condition;
1083}
1084
1085Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1086 BranchLabels labels) {
1087 if (operation_cid() == kSmiCid) {
1088 return EmitSmiComparisonOp(compiler, locs(), kind());
1089 } else if (operation_cid() == kMintCid) {
1090 return EmitUnboxedMintEqualityOp(compiler, locs(), kind());
1091 } else {
1092 ASSERT(operation_cid() == kDoubleCid);
1093 return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
1094 }
1095}
1096
1097LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone, bool opt) const {
1098 const intptr_t kNumInputs = 2;
1099 const intptr_t kNumTemps = 0;
1100 LocationSummary* locs = new (zone)
1101 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1102 locs->set_in(0, Location::RequiresRegister());
1103 // Only one input can be a constant operand. The case of two constant
1104 // operands should be handled by constant propagation.
1105 locs->set_in(1, LocationRegisterOrConstant(right()));
1106 return locs;
1107}
1108
1109Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1110 BranchLabels labels) {
1111 const Register left = locs()->in(0).reg();
1112 Location right = locs()->in(1);
1113 if (right.IsConstant()) {
1114 ASSERT(compiler::target::IsSmi(right.constant()));
1115 const int32_t imm = compiler::target::ToRawSmi(right.constant());
1116 __ TestImmediate(left, imm);
1117 } else {
1118 __ tst(left, compiler::Operand(right.reg()));
1119 }
1120 Condition true_condition = (kind() == Token::kNE) ? NE : EQ;
1121 return true_condition;
1122}
1123
1124LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone,
1125 bool opt) const {
1126 const intptr_t kNumInputs = 1;
1127 const intptr_t kNumTemps = 1;
1128 LocationSummary* locs = new (zone)
1129 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1130 locs->set_in(0, Location::RequiresRegister());
1131 locs->set_temp(0, Location::RequiresRegister());
1132 locs->set_out(0, Location::RequiresRegister());
1133 return locs;
1134}
1135
1136Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1137 BranchLabels labels) {
1138 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
1139 const Register val_reg = locs()->in(0).reg();
1140 const Register cid_reg = locs()->temp(0).reg();
1141
1142 compiler::Label* deopt =
1143 CanDeoptimize()
1144 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids,
1145 licm_hoisted_ ? ICData::kHoisted : 0)
1146 : NULL;
1147
1148 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
1149 const ZoneGrowableArray<intptr_t>& data = cid_results();
1150 ASSERT(data[0] == kSmiCid);
1151 bool result = data[1] == true_result;
1152 __ tst(val_reg, compiler::Operand(kSmiTagMask));
1153 __ b(result ? labels.true_label : labels.false_label, EQ);
1154 __ LoadClassId(cid_reg, val_reg);
1155
1156 for (intptr_t i = 2; i < data.length(); i += 2) {
1157 const intptr_t test_cid = data[i];
1158 ASSERT(test_cid != kSmiCid);
1159 result = data[i + 1] == true_result;
1160 __ CompareImmediate(cid_reg, test_cid);
1161 __ b(result ? labels.true_label : labels.false_label, EQ);
1162 }
1163 // No match found, deoptimize or default action.
1164 if (deopt == NULL) {
1165 // If the cid is not in the list, jump to the opposite label from the cids
1166 // that are in the list. These must be all the same (see asserts in the
1167 // constructor).
1168 compiler::Label* target = result ? labels.false_label : labels.true_label;
1169 if (target != labels.fall_through) {
1170 __ b(target);
1171 }
1172 } else {
1173 __ b(deopt);
1174 }
1175 // Dummy result as this method already did the jump, there's no need
1176 // for the caller to branch on a condition.
1177 return kInvalidCondition;
1178}
1179
1180LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone,
1181 bool opt) const {
1182 const intptr_t kNumInputs = 2;
1183 const intptr_t kNumTemps = 0;
1184 if (operation_cid() == kMintCid) {
1185 const intptr_t kNumTemps = 0;
1186 LocationSummary* locs = new (zone)
1187 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1188 locs->set_in(0, Location::Pair(Location::RequiresRegister(),
1189 Location::RequiresRegister()));
1190 locs->set_in(1, Location::Pair(Location::RequiresRegister(),
1191 Location::RequiresRegister()));
1192 locs->set_out(0, Location::RequiresRegister());
1193 return locs;
1194 }
1195 if (operation_cid() == kDoubleCid) {
1196 LocationSummary* summary = new (zone)
1197 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1198 summary->set_in(0, Location::RequiresFpuRegister());
1199 summary->set_in(1, Location::RequiresFpuRegister());
1200 summary->set_out(0, Location::RequiresRegister());
1201 return summary;
1202 }
1203 ASSERT(operation_cid() == kSmiCid);
1204 LocationSummary* summary = new (zone)
1205 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1206 summary->set_in(0, LocationRegisterOrConstant(left()));
1207 // Only one input can be a constant operand. The case of two constant
1208 // operands should be handled by constant propagation.
1209 summary->set_in(1, summary->in(0).IsConstant()
1210 ? Location::RequiresRegister()
1211 : LocationRegisterOrConstant(right()));
1212 summary->set_out(0, Location::RequiresRegister());
1213 return summary;
1214}
1215
1216Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1217 BranchLabels labels) {
1218 if (operation_cid() == kSmiCid) {
1219 return EmitSmiComparisonOp(compiler, locs(), kind());
1220 } else if (operation_cid() == kMintCid) {
1221 return EmitUnboxedMintComparisonOp(compiler, locs(), kind(), labels);
1222 } else {
1223 ASSERT(operation_cid() == kDoubleCid);
1224 return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
1225 }
1226}
1227
1228void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1229 SetupNative();
1230 const Register result = locs()->out(0).reg();
1231
1232 // All arguments are already @SP due to preceding PushArgument()s.
1233 ASSERT(ArgumentCount() ==
1234 function().NumParameters() + (function().IsGeneric() ? 1 : 0));
1235
1236 // Push the result place holder initialized to NULL.
1237 __ PushObject(Object::null_object());
1238
1239 // Pass a pointer to the first argument in R2.
1240 __ add(R2, SP,
1241 compiler::Operand(ArgumentCount() * compiler::target::kWordSize));
1242
1243 // Compute the effective address. When running under the simulator,
1244 // this is a redirection address that forces the simulator to call
1245 // into the runtime system.
1246 uword entry;
1247 const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function());
1248 const Code* stub;
1249 if (link_lazily()) {
1250 stub = &StubCode::CallBootstrapNative();
1251 entry = NativeEntry::LinkNativeCallEntry();
1252 } else {
1253 entry = reinterpret_cast<uword>(native_c_function());
1254 if (is_bootstrap_native()) {
1255 stub = &StubCode::CallBootstrapNative();
1256 } else if (is_auto_scope()) {
1257 stub = &StubCode::CallAutoScopeNative();
1258 } else {
1259 stub = &StubCode::CallNoScopeNative();
1260 }
1261 }
1262 __ LoadImmediate(R1, argc_tag);
1263 compiler::ExternalLabel label(entry);
1264 __ LoadNativeEntry(R9, &label,
1265 link_lazily()
1266 ? compiler::ObjectPoolBuilderEntry::kPatchable
1267 : compiler::ObjectPoolBuilderEntry::kNotPatchable);
1268 if (link_lazily()) {
1269 compiler->GeneratePatchableCall(token_pos(), *stub,
1270 PcDescriptorsLayout::kOther, locs());
1271 } else {
1272 compiler->GenerateStubCall(token_pos(), *stub, PcDescriptorsLayout::kOther,
1273 locs());
1274 }
1275 __ Pop(result);
1276
1277 __ Drop(ArgumentCount()); // Drop the arguments.
1278}
1279
1280void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1281 const Register saved_fp = locs()->temp(0).reg();
1282 const Register temp = locs()->temp(1).reg();
1283 const Register branch = locs()->in(TargetAddressIndex()).reg();
1284
1285 // Save frame pointer because we're going to update it when we enter the exit
1286 // frame.
1287 __ mov(saved_fp, compiler::Operand(FPREG));
1288
1289 // Make a space to put the return address.
1290 __ PushImmediate(0);
1291
1292 // We need to create a dummy "exit frame". It will have a null code object.
1293 __ LoadObject(CODE_REG, Object::null_object());
1294 __ set_constant_pool_allowed(false);
1295 __ EnterDartFrame(0, /*load_pool_pointer=*/false);
1296
1297 // Reserve space for arguments and align frame before entering C++ world.
1298 __ ReserveAlignedFrameSpace(marshaller_.StackTopInBytes());
1299
1300 EmitParamMoves(compiler);
1301
1302 // We need to copy the return address up into the dummy stack frame so the
1303 // stack walker will know which safepoint to use.
1304 __ mov(TMP, compiler::Operand(PC));
1305 __ str(TMP, compiler::Address(FPREG, kSavedCallerPcSlotFromFp *
1306 compiler::target::kWordSize));
1307
1308 // For historical reasons, the PC on ARM points 8 bytes past the current
1309 // instruction. Therefore we emit the metadata here, 8 bytes (2 instructions)
1310 // after the original mov.
1311 compiler->EmitCallsiteMetadata(TokenPosition::kNoSource, deopt_id(),
1312 PcDescriptorsLayout::Kind::kOther, locs());
1313
1314 // Update information in the thread object and enter a safepoint.
1315 if (CanExecuteGeneratedCodeInSafepoint()) {
1316 __ LoadImmediate(temp, compiler::target::Thread::exit_through_ffi());
1317 __ TransitionGeneratedToNative(branch, FPREG, temp, saved_fp,
1318 /*enter_safepoint=*/true);
1319
1320 __ blx(branch);
1321
1322 // Update information in the thread object and leave the safepoint.
1323 __ TransitionNativeToGenerated(saved_fp, temp, /*leave_safepoint=*/true);
1324 } else {
1325 // We cannot trust that this code will be executable within a safepoint.
1326 // Therefore we delegate the responsibility of entering/exiting the
1327 // safepoint to a stub which in the VM isolate's heap, which will never lose
1328 // execute permission.
1329 __ ldr(TMP,
1330 compiler::Address(
1331 THR, compiler::target::Thread::
1332 call_native_through_safepoint_entry_point_offset()));
1333
1334 // Calls R8 in a safepoint and clobbers R4 and NOTFP.
1335 ASSERT(branch == R8 && temp == R4);
1336 static_assert((kReservedCpuRegisters & (1 << NOTFP)) != 0,
1337 "NOTFP should be a reserved register");
1338 __ blx(TMP);
1339 }
1340
1341 // Restore the global object pool after returning from runtime (old space is
1342 // moving, so the GOP could have been relocated).
1343 if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
1344 __ SetupGlobalPoolAndDispatchTable();
1345 }
1346
1347 EmitReturnMoves(compiler);
1348
1349 // Leave dummy exit frame.
1350 __ LeaveDartFrame();
1351 __ set_constant_pool_allowed(true);
1352
1353 // Instead of returning to the "fake" return address, we just pop it.
1354 __ PopRegister(TMP);
1355}
1356
1357void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1358 EmitReturnMoves(compiler);
1359
1360 __ LeaveDartFrame();
1361
1362 // The dummy return address is in LR, no need to pop it as on Intel.
1363
1364 // These can be anything besides the return registers (R0 and R1) and THR
1365 // (R10).
1366 const Register vm_tag_reg = R2;
1367 const Register old_exit_frame_reg = R3;
1368 const Register old_exit_through_ffi_reg = R4;
1369 const Register tmp = R5;
1370
1371 __ Pop(old_exit_frame_reg);
1372 __ Pop(old_exit_through_ffi_reg);
1373
1374 // Restore top_resource.
1375 __ Pop(tmp);
1376 __ StoreToOffset(kWord, tmp, THR,
1377 compiler::target::Thread::top_resource_offset());
1378
1379 __ Pop(vm_tag_reg);
1380
1381 // If we were called by a trampoline, it will enter the safepoint on our
1382 // behalf.
1383 __ TransitionGeneratedToNative(
1384 vm_tag_reg, old_exit_frame_reg, old_exit_through_ffi_reg, tmp,
1385 /*enter_safepoint=*/!NativeCallbackTrampolines::Enabled());
1386
1387 __ PopNativeCalleeSavedRegisters();
1388
1389#if defined(TARGET_OS_FUCHSIA)
1390 UNREACHABLE(); // Fuchsia does not allow dart:ffi.
1391#elif defined(USING_SHADOW_CALL_STACK)
1392#error Unimplemented
1393#endif
1394
1395 // Leave the entry frame.
1396 __ LeaveFrame(1 << LR | 1 << FP);
1397
1398 // Leave the dummy frame holding the pushed arguments.
1399 __ LeaveFrame(1 << LR | 1 << FP);
1400
1401 __ Ret();
1402
1403 // For following blocks.
1404 __ set_constant_pool_allowed(true);
1405}
1406
1407void NativeEntryInstr::SaveArgument(
1408 FlowGraphCompiler* compiler,
1409 const compiler::ffi::NativeLocation& nloc) const {
1410 if (nloc.IsFpuRegisters()) {
1411 auto const& fpu_loc = nloc.AsFpuRegisters();
1412 ASSERT(fpu_loc.fpu_reg_kind() != compiler::ffi::kQuadFpuReg);
1413 const intptr_t size = fpu_loc.payload_type().SizeInBytes();
1414 // TODO(dartbug.com/40469): Reduce code size.
1415 __ SubImmediate(SPREG, SPREG, 8);
1416 if (size == 8) {
1417 __ StoreDToOffset(fpu_loc.fpu_d_reg(), SPREG, 0);
1418 } else {
1419 ASSERT(size == 4);
1420 __ StoreSToOffset(fpu_loc.fpu_s_reg(), SPREG, 0);
1421 }
1422
1423 } else if (nloc.IsRegisters()) {
1424 const auto& reg_loc = nloc.WidenTo4Bytes(compiler->zone()).AsRegisters();
1425 const intptr_t num_regs = reg_loc.num_regs();
1426 // Save higher-order component first, so bytes are in little-endian layout
1427 // overall.
1428 for (intptr_t i = num_regs - 1; i >= 0; i--) {
1429 __ Push(reg_loc.reg_at(i));
1430 }
1431 }
1432}
1433
1434void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1435 // Constant pool cannot be used until we enter the actual Dart frame.
1436 __ set_constant_pool_allowed(false);
1437
1438 __ Bind(compiler->GetJumpLabel(this));
1439
1440 // Create a dummy frame holding the pushed arguments. This simplifies
1441 // NativeReturnInstr::EmitNativeCode.
1442 __ EnterFrame((1 << FP) | (1 << LR), 0);
1443
1444 // Save the argument registers, in reverse order.
1445 for (intptr_t i = marshaller_.num_args(); i-- > 0;) {
1446 SaveArgument(compiler, marshaller_.Location(i));
1447 }
1448
1449 // Enter the entry frame.
1450 __ EnterFrame((1 << FP) | (1 << LR), 0);
1451
1452 // Save a space for the code object.
1453 __ PushImmediate(0);
1454
1455#if defined(TARGET_OS_FUCHSIA)
1456 UNREACHABLE(); // Fuchsia does not allow dart:ffi.
1457#elif defined(USING_SHADOW_CALL_STACK)
1458#error Unimplemented
1459#endif
1460
1461 __ PushNativeCalleeSavedRegisters();
1462
1463 // Load the thread object. If we were called by a trampoline, the thread is
1464 // already loaded.
1465 if (FLAG_precompiled_mode) {
1466 compiler->LoadBSSEntry(BSS::Relocation::DRT_GetThreadForNativeCallback, R1,
1467 R0);
1468 } else if (!NativeCallbackTrampolines::Enabled()) {
1469 // In JIT mode, we can just paste the address of the runtime entry into the
1470 // generated code directly. This is not a problem since we don't save
1471 // callbacks into JIT snapshots.
1472 ASSERT(kWordSize == compiler::target::kWordSize);
1473 __ LoadImmediate(
1474 R1, static_cast<compiler::target::uword>(
1475 reinterpret_cast<uword>(DLRT_GetThreadForNativeCallback)));
1476 }
1477
1478 // Load the thread object. If we were called by a trampoline, the thread is
1479 // already loaded.
1480 if (!NativeCallbackTrampolines::Enabled()) {
1481 // Create another frame to align the frame before continuing in "native"
1482 // code.
1483 __ EnterFrame(1 << FP, 0);
1484 __ ReserveAlignedFrameSpace(0);
1485
1486 __ LoadImmediate(R0, callback_id_);
1487 __ blx(R1);
1488 __ mov(THR, compiler::Operand(R0));
1489
1490 __ LeaveFrame(1 << FP);
1491 }
1492
1493 // Save the current VMTag on the stack.
1494 __ LoadFromOffset(kWord, R0, THR, compiler::target::Thread::vm_tag_offset());
1495 __ Push(R0);
1496
1497 // Save top resource.
1498 const intptr_t top_resource_offset =
1499 compiler::target::Thread::top_resource_offset();
1500 __ LoadFromOffset(kWord, R0, THR, top_resource_offset);
1501 __ Push(R0);
1502 __ LoadImmediate(R0, 0);
1503 __ StoreToOffset(kWord, R0, THR, top_resource_offset);
1504
1505 __ LoadFromOffset(kWord, R0, THR,
1506 compiler::target::Thread::exit_through_ffi_offset());
1507 __ Push(R0);
1508
1509 // Save top exit frame info. Don't set it to 0 yet,
1510 // TransitionNativeToGenerated will handle that.
1511 __ LoadFromOffset(kWord, R0, THR,
1512 compiler::target::Thread::top_exit_frame_info_offset());
1513 __ Push(R0);
1514
1515 __ EmitEntryFrameVerification(R0);
1516
1517 // Either DLRT_GetThreadForNativeCallback or the callback trampoline (caller)
1518 // will leave the safepoint for us.
1519 __ TransitionNativeToGenerated(/*scratch0=*/R0, /*scratch1=*/R1,
1520 /*exit_safepoint=*/false);
1521
1522 // Now that the safepoint has ended, we can touch Dart objects without
1523 // handles.
1524
1525 // Load the code object.
1526 __ LoadFromOffset(kWord, R0, THR,
1527 compiler::target::Thread::callback_code_offset());
1528 __ LoadFieldFromOffset(kWord, R0, R0,
1529 compiler::target::GrowableObjectArray::data_offset());
1530 __ LoadFieldFromOffset(kWord, CODE_REG, R0,
1531 compiler::target::Array::data_offset() +
1532 callback_id_ * compiler::target::kWordSize);
1533
1534 // Put the code object in the reserved slot.
1535 __ StoreToOffset(kWord, CODE_REG, FPREG,
1536 kPcMarkerSlotFromFp * compiler::target::kWordSize);
1537 if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
1538 __ SetupGlobalPoolAndDispatchTable();
1539 } else {
1540 __ LoadImmediate(PP, 0); // GC safe value into PP.
1541 }
1542
1543 // Load a GC-safe value for the arguments descriptor (unused but tagged).
1544 __ LoadImmediate(ARGS_DESC_REG, 0);
1545
1546 // Load a dummy return address which suggests that we are inside of
1547 // InvokeDartCodeStub. This is how the stack walker detects an entry frame.
1548 __ LoadFromOffset(kWord, LR, THR,
1549 compiler::target::Thread::invoke_dart_code_stub_offset());
1550 __ LoadFieldFromOffset(kWord, LR, LR,
1551 compiler::target::Code::entry_point_offset());
1552
1553 FunctionEntryInstr::EmitNativeCode(compiler);
1554}
1555
1556LocationSummary* OneByteStringFromCharCodeInstr::MakeLocationSummary(
1557 Zone* zone,
1558 bool opt) const {
1559 const intptr_t kNumInputs = 1;
1560 // TODO(fschneider): Allow immediate operands for the char code.
1561 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1562 LocationSummary::kNoCall);
1563}
1564
1565void OneByteStringFromCharCodeInstr::EmitNativeCode(
1566 FlowGraphCompiler* compiler) {
1567 ASSERT(compiler->is_optimizing());
1568 const Register char_code = locs()->in(0).reg();
1569 const Register result = locs()->out(0).reg();
1570
1571 __ ldr(
1572 result,
1573 compiler::Address(
1574 THR, compiler::target::Thread::predefined_symbols_address_offset()));
1575 __ AddImmediate(
1576 result, Symbols::kNullCharCodeSymbolOffset * compiler::target::kWordSize);
1577 __ ldr(result,
1578 compiler::Address(result, char_code, LSL, 1)); // Char code is a smi.
1579}
1580
1581LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone,
1582 bool opt) const {
1583 const intptr_t kNumInputs = 1;
1584 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1585 LocationSummary::kNoCall);
1586}
1587
1588void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1589 ASSERT(cid_ == kOneByteStringCid);
1590 const Register str = locs()->in(0).reg();
1591 const Register result = locs()->out(0).reg();
1592 __ ldr(result, compiler::FieldAddress(
1593 str, compiler::target::String::length_offset()));
1594 __ cmp(result, compiler::Operand(compiler::target::ToRawSmi(1)));
1595 __ LoadImmediate(result, -1, NE);
1596 __ ldrb(result,
1597 compiler::FieldAddress(
1598 str, compiler::target::OneByteString::data_offset()),
1599 EQ);
1600 __ SmiTag(result);
1601}
1602
1603LocationSummary* StringInterpolateInstr::MakeLocationSummary(Zone* zone,
1604 bool opt) const {
1605 const intptr_t kNumInputs = 1;
1606 const intptr_t kNumTemps = 0;
1607 LocationSummary* summary = new (zone)
1608 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
1609 summary->set_in(0, Location::RegisterLocation(R0));
1610 summary->set_out(0, Location::RegisterLocation(R0));
1611 return summary;
1612}
1613
1614void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1615 const Register array = locs()->in(0).reg();
1616 __ Push(array);
1617 const int kTypeArgsLen = 0;
1618 const int kNumberOfArguments = 1;
1619 constexpr int kSizeOfArguments = 1;
1620 const Array& kNoArgumentNames = Object::null_array();
1621 ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kSizeOfArguments,
1622 kNoArgumentNames);
1623 compiler->GenerateStaticCall(deopt_id(), token_pos(), CallFunction(),
1624 args_info, locs(), ICData::Handle(),
1625 ICData::kStatic);
1626 ASSERT(locs()->out(0).reg() == R0);
1627}
1628
1629LocationSummary* Utf8ScanInstr::MakeLocationSummary(Zone* zone,
1630 bool opt) const {
1631 const intptr_t kNumInputs = 5;
1632 const intptr_t kNumTemps = 0;
1633 LocationSummary* summary = new (zone)
1634 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1635 summary->set_in(0, Location::Any()); // decoder
1636 summary->set_in(1, Location::WritableRegister()); // bytes
1637 summary->set_in(2, Location::WritableRegister()); // start
1638 summary->set_in(3, Location::WritableRegister()); // end
1639 summary->set_in(4, Location::WritableRegister()); // table
1640 summary->set_out(0, Location::RequiresRegister());
1641 return summary;
1642}
1643
1644void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1645 const Register bytes_reg = locs()->in(1).reg();
1646 const Register start_reg = locs()->in(2).reg();
1647 const Register end_reg = locs()->in(3).reg();
1648 const Register table_reg = locs()->in(4).reg();
1649 const Register size_reg = locs()->out(0).reg();
1650
1651 const Register bytes_ptr_reg = start_reg;
1652 const Register bytes_end_reg = end_reg;
1653 const Register flags_reg = bytes_reg;
1654 const Register temp_reg = TMP;
1655 const Register decoder_temp_reg = start_reg;
1656 const Register flags_temp_reg = end_reg;
1657
1658 static const intptr_t kSizeMask = 0x03;
1659 static const intptr_t kFlagsMask = 0x3C;
1660
1661 compiler::Label loop, loop_in;
1662
1663 // Address of input bytes.
1664 __ LoadFieldFromOffset(kWord, bytes_reg, bytes_reg,
1665 compiler::target::TypedDataBase::data_field_offset());
1666
1667 // Table.
1668 __ AddImmediate(
1669 table_reg, table_reg,
1670 compiler::target::OneByteString::data_offset() - kHeapObjectTag);
1671
1672 // Pointers to start and end.
1673 __ add(bytes_ptr_reg, bytes_reg, compiler::Operand(start_reg));
1674 __ add(bytes_end_reg, bytes_reg, compiler::Operand(end_reg));
1675
1676 // Initialize size and flags.
1677 __ LoadImmediate(size_reg, 0);
1678 __ LoadImmediate(flags_reg, 0);
1679
1680 __ b(&loop_in);
1681 __ Bind(&loop);
1682
1683 // Read byte and increment pointer.
1684 __ ldrb(temp_reg,
1685 compiler::Address(bytes_ptr_reg, 1, compiler::Address::PostIndex));
1686
1687 // Update size and flags based on byte value.
1688 __ ldrb(temp_reg, compiler::Address(table_reg, temp_reg));
1689 __ orr(flags_reg, flags_reg, compiler::Operand(temp_reg));
1690 __ and_(temp_reg, temp_reg, compiler::Operand(kSizeMask));
1691 __ add(size_reg, size_reg, compiler::Operand(temp_reg));
1692
1693 // Stop if end is reached.
1694 __ Bind(&loop_in);
1695 __ cmp(bytes_ptr_reg, compiler::Operand(bytes_end_reg));
1696 __ b(&loop, UNSIGNED_LESS);
1697
1698 // Write flags to field.
1699 __ AndImmediate(flags_reg, flags_reg, kFlagsMask);
1700 if (!IsScanFlagsUnboxed()) {
1701 __ SmiTag(flags_reg);
1702 }
1703 Register decoder_reg;
1704 const Location decoder_location = locs()->in(0);
1705 if (decoder_location.IsStackSlot()) {
1706 __ ldr(decoder_temp_reg, LocationToStackSlotAddress(decoder_location));
1707 decoder_reg = decoder_temp_reg;
1708 } else {
1709 decoder_reg = decoder_location.reg();
1710 }
1711 const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
1712 __ LoadFieldFromOffset(kWord, flags_temp_reg, decoder_reg,
1713 scan_flags_field_offset);
1714 __ orr(flags_temp_reg, flags_temp_reg, compiler::Operand(flags_reg));
1715 __ StoreFieldToOffset(kWord, flags_temp_reg, decoder_reg,
1716 scan_flags_field_offset);
1717}
1718
1719LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
1720 bool opt) const {
1721 const intptr_t kNumInputs = 1;
1722 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1723 LocationSummary::kNoCall);
1724}
1725
1726void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1727 const Register obj = locs()->in(0).reg();
1728 const Register result = locs()->out(0).reg();
1729 if (object()->definition()->representation() == kUntagged) {
1730 __ LoadFromOffset(kWord, result, obj, offset());
1731 } else {
1732 ASSERT(object()->definition()->representation() == kTagged);
1733 __ LoadFieldFromOffset(kWord, result, obj, offset());
1734 }
1735}
1736
1737DEFINE_BACKEND(StoreUntagged, (NoLocation, Register obj, Register value)) {
1738 __ StoreToOffset(kWord, value, obj, instr->offset_from_tagged());
1739}
1740
1741Representation LoadIndexedInstr::representation() const {
1742 switch (class_id_) {
1743 case kArrayCid:
1744 case kImmutableArrayCid:
1745 return kTagged;
1746 case kOneByteStringCid:
1747 case kTwoByteStringCid:
1748 case kTypedDataInt8ArrayCid:
1749 case kTypedDataInt16ArrayCid:
1750 case kTypedDataUint8ArrayCid:
1751 case kTypedDataUint8ClampedArrayCid:
1752 case kTypedDataUint16ArrayCid:
1753 case kExternalOneByteStringCid:
1754 case kExternalTwoByteStringCid:
1755 case kExternalTypedDataUint8ArrayCid:
1756 case kExternalTypedDataUint8ClampedArrayCid:
1757 return kUnboxedIntPtr;
1758 case kTypedDataInt32ArrayCid:
1759 return kUnboxedInt32;
1760 case kTypedDataUint32ArrayCid:
1761 return kUnboxedUint32;
1762 case kTypedDataInt64ArrayCid:
1763 case kTypedDataUint64ArrayCid:
1764 return kUnboxedInt64;
1765 case kTypedDataFloat32ArrayCid:
1766 case kTypedDataFloat64ArrayCid:
1767 return kUnboxedDouble;
1768 case kTypedDataInt32x4ArrayCid:
1769 return kUnboxedInt32x4;
1770 case kTypedDataFloat32x4ArrayCid:
1771 return kUnboxedFloat32x4;
1772 case kTypedDataFloat64x2ArrayCid:
1773 return kUnboxedFloat64x2;
1774 default:
1775 UNREACHABLE();
1776 return kTagged;
1777 }
1778}
1779
1780static bool CanBeImmediateIndex(Value* value,
1781 intptr_t cid,
1782 bool is_external,
1783 bool is_load,
1784 bool* needs_base) {
1785 if ((cid == kTypedDataInt32x4ArrayCid) ||
1786 (cid == kTypedDataFloat32x4ArrayCid) ||
1787 (cid == kTypedDataFloat64x2ArrayCid)) {
1788 // We are using vldmd/vstmd which do not support offset.
1789 return false;
1790 }
1791
1792 ConstantInstr* constant = value->definition()->AsConstant();
1793 if ((constant == NULL) ||
1794 !compiler::Assembler::IsSafeSmi(constant->value())) {
1795 return false;
1796 }
1797 const int64_t index = compiler::target::SmiValue(constant->value());
1798 const intptr_t scale = compiler::target::Instance::ElementSizeFor(cid);
1799 const intptr_t base_offset =
1800 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag));
1801 const int64_t offset = index * scale + base_offset;
1802 if (!Utils::IsAbsoluteUint(12, offset)) {
1803 return false;
1804 }
1805 if (compiler::Address::CanHoldImmediateOffset(is_load, cid, offset)) {
1806 *needs_base = false;
1807 return true;
1808 }
1809
1810 if (compiler::Address::CanHoldImmediateOffset(is_load, cid,
1811 offset - base_offset)) {
1812 *needs_base = true;
1813 return true;
1814 }
1815
1816 return false;
1817}
1818
1819LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
1820 bool opt) const {
1821 const bool directly_addressable =
1822 aligned() && representation() != kUnboxedInt64;
1823 const intptr_t kNumInputs = 2;
1824 intptr_t kNumTemps = 0;
1825
1826 if (!directly_addressable) {
1827 kNumTemps += 1;
1828 if (representation() == kUnboxedDouble) {
1829 kNumTemps += 1;
1830 }
1831 }
1832 LocationSummary* locs = new (zone)
1833 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1834 locs->set_in(0, Location::RequiresRegister());
1835 bool needs_base = false;
1836 if (CanBeImmediateIndex(index(), class_id(), IsExternal(),
1837 true, // Load.
1838 &needs_base)) {
1839 // CanBeImmediateIndex must return false for unsafe smis.
1840 locs->set_in(1, Location::Constant(index()->definition()->AsConstant()));
1841 } else {
1842 locs->set_in(1, Location::RequiresRegister());
1843 }
1844 if ((representation() == kUnboxedDouble) ||
1845 (representation() == kUnboxedFloat32x4) ||
1846 (representation() == kUnboxedInt32x4) ||
1847 (representation() == kUnboxedFloat64x2)) {
1848 if (class_id() == kTypedDataFloat32ArrayCid) {
1849 // Need register < Q7 for float operations.
1850 // TODO(30953): Support register range constraints in the regalloc.
1851 locs->set_out(0, Location::FpuRegisterLocation(Q6));
1852 } else {
1853 locs->set_out(0, Location::RequiresFpuRegister());
1854 }
1855 } else if (representation() == kUnboxedInt64) {
1856 ASSERT(class_id() == kTypedDataInt64ArrayCid ||
1857 class_id() == kTypedDataUint64ArrayCid);
1858 locs->set_out(0, Location::Pair(Location::RequiresRegister(),
1859 Location::RequiresRegister()));
1860 } else {
1861 locs->set_out(0, Location::RequiresRegister());
1862 }
1863 if (!directly_addressable) {
1864 locs->set_temp(0, Location::RequiresRegister());
1865 if (representation() == kUnboxedDouble) {
1866 locs->set_temp(1, Location::RequiresRegister());
1867 }
1868 }
1869 return locs;
1870}
1871
1872void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1873 const bool directly_addressable =
1874 aligned() && representation() != kUnboxedInt64;
1875 // The array register points to the backing store for external arrays.
1876 const Register array = locs()->in(0).reg();
1877 const Location index = locs()->in(1);
1878 const Register address =
1879 directly_addressable ? kNoRegister : locs()->temp(0).reg();
1880
1881 compiler::Address element_address(kNoRegister);
1882 if (directly_addressable) {
1883 element_address =
1884 index.IsRegister()
1885 ? __ ElementAddressForRegIndex(true, // Load.
1886 IsExternal(), class_id(),
1887 index_scale(), index_unboxed_, array,
1888 index.reg())
1889 : __ ElementAddressForIntIndex(
1890 true, // Load.
1891 IsExternal(), class_id(), index_scale(), array,
1892 compiler::target::SmiValue(index.constant()),
1893 IP); // Temp register.
1894 // Warning: element_address may use register IP as base.
1895 } else {
1896 if (index.IsRegister()) {
1897 __ LoadElementAddressForRegIndex(address,
1898 true, // Load.
1899 IsExternal(), class_id(), index_scale(),
1900 index_unboxed_, array, index.reg());
1901 } else {
1902 __ LoadElementAddressForIntIndex(
1903 address,
1904 true, // Load.
1905 IsExternal(), class_id(), index_scale(), array,
1906 compiler::target::SmiValue(index.constant()));
1907 }
1908 }
1909
1910 if ((representation() == kUnboxedDouble) ||
1911 (representation() == kUnboxedFloat32x4) ||
1912 (representation() == kUnboxedInt32x4) ||
1913 (representation() == kUnboxedFloat64x2)) {
1914 const QRegister result = locs()->out(0).fpu_reg();
1915 const DRegister dresult0 = EvenDRegisterOf(result);
1916 switch (class_id()) {
1917 case kTypedDataFloat32ArrayCid:
1918 // Load single precision float.
1919 // vldrs does not support indexed addressing.
1920 if (aligned()) {
1921 __ vldrs(EvenSRegisterOf(dresult0), element_address);
1922 } else {
1923 const Register value = locs()->temp(1).reg();
1924 __ LoadWordUnaligned(value, address, TMP);
1925 __ vmovsr(EvenSRegisterOf(dresult0), value);
1926 }
1927 break;
1928 case kTypedDataFloat64ArrayCid:
1929 // vldrd does not support indexed addressing.
1930 if (aligned()) {
1931 __ vldrd(dresult0, element_address);
1932 } else {
1933 const Register value = locs()->temp(1).reg();
1934 __ LoadWordUnaligned(value, address, TMP);
1935 __ vmovdr(dresult0, 0, value);
1936 __ AddImmediate(address, address, 4);
1937 __ LoadWordUnaligned(value, address, TMP);
1938 __ vmovdr(dresult0, 1, value);
1939 }
1940 break;
1941 case kTypedDataFloat64x2ArrayCid:
1942 case kTypedDataInt32x4ArrayCid:
1943 case kTypedDataFloat32x4ArrayCid:
1944 ASSERT(element_address.Equals(compiler::Address(IP)));
1945 ASSERT(aligned());
1946 __ vldmd(IA, IP, dresult0, 2);
1947 break;
1948 default:
1949 UNREACHABLE();
1950 }
1951 return;
1952 }
1953
1954 switch (class_id()) {
1955 case kTypedDataInt32ArrayCid: {
1956 const Register result = locs()->out(0).reg();
1957 ASSERT(representation() == kUnboxedInt32);
1958 if (aligned()) {
1959 __ ldr(result, element_address);
1960 } else {
1961 __ LoadWordUnaligned(result, address, TMP);
1962 }
1963 break;
1964 }
1965 case kTypedDataUint32ArrayCid: {
1966 const Register result = locs()->out(0).reg();
1967 ASSERT(representation() == kUnboxedUint32);
1968 if (aligned()) {
1969 __ ldr(result, element_address);
1970 } else {
1971 __ LoadWordUnaligned(result, address, TMP);
1972 }
1973 break;
1974 }
1975 case kTypedDataInt64ArrayCid:
1976 case kTypedDataUint64ArrayCid: {
1977 ASSERT(representation() == kUnboxedInt64);
1978 ASSERT(!directly_addressable); // need to add to register
1979 ASSERT(locs()->out(0).IsPairLocation());
1980 PairLocation* result_pair = locs()->out(0).AsPairLocation();
1981 const Register result_lo = result_pair->At(0).reg();
1982 const Register result_hi = result_pair->At(1).reg();
1983 if (aligned()) {
1984 __ ldr(result_lo, compiler::Address(address));
1985 __ ldr(result_hi,
1986 compiler::Address(address, compiler::target::kWordSize));
1987 } else {
1988 __ LoadWordUnaligned(result_lo, address, TMP);
1989 __ AddImmediate(address, address, compiler::target::kWordSize);
1990 __ LoadWordUnaligned(result_hi, address, TMP);
1991 }
1992 break;
1993 }
1994 case kTypedDataInt8ArrayCid: {
1995 const Register result = locs()->out(0).reg();
1996 ASSERT(representation() == kUnboxedIntPtr);
1997 ASSERT(index_scale() == 1);
1998 ASSERT(aligned());
1999 __ ldrsb(result, element_address);
2000 break;
2001 }
2002 case kTypedDataUint8ArrayCid:
2003 case kTypedDataUint8ClampedArrayCid:
2004 case kExternalTypedDataUint8ArrayCid:
2005 case kExternalTypedDataUint8ClampedArrayCid:
2006 case kOneByteStringCid:
2007 case kExternalOneByteStringCid: {
2008 const Register result = locs()->out(0).reg();
2009 ASSERT(representation() == kUnboxedIntPtr);
2010 ASSERT(index_scale() == 1);
2011 ASSERT(aligned());
2012 __ ldrb(result, element_address);
2013 break;
2014 }
2015 case kTypedDataInt16ArrayCid: {
2016 const Register result = locs()->out(0).reg();
2017 ASSERT(representation() == kUnboxedIntPtr);
2018 if (aligned()) {
2019 __ ldrsh(result, element_address);
2020 } else {
2021 __ LoadHalfWordUnaligned(result, address, TMP);
2022 }
2023 break;
2024 }
2025 case kTypedDataUint16ArrayCid:
2026 case kTwoByteStringCid:
2027 case kExternalTwoByteStringCid: {
2028 const Register result = locs()->out(0).reg();
2029 ASSERT(representation() == kUnboxedIntPtr);
2030 if (aligned()) {
2031 __ ldrh(result, element_address);
2032 } else {
2033 __ LoadHalfWordUnsignedUnaligned(result, address, TMP);
2034 }
2035 break;
2036 }
2037 default: {
2038 const Register result = locs()->out(0).reg();
2039 ASSERT(representation() == kTagged);
2040 ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid));
2041 __ ldr(result, element_address);
2042 break;
2043 }
2044 }
2045}
2046
2047Representation StoreIndexedInstr::RequiredInputRepresentation(
2048 intptr_t idx) const {
2049 // Array can be a Dart object or a pointer to external data.
2050 if (idx == 0) return kNoRepresentation; // Flexible input representation.
2051 if (idx == 1) {
2052 if (index_unboxed_) {
2053 // TODO(dartbug.com/39432): kUnboxedInt32 || kUnboxedUint32.
2054 return kNoRepresentation;
2055 } else {
2056 return kTagged; // Index is a smi.
2057 }
2058 }
2059 ASSERT(idx == 2);
2060 switch (class_id_) {
2061 case kArrayCid:
2062 return kTagged;
2063 case kOneByteStringCid:
2064 case kTwoByteStringCid:
2065 case kTypedDataInt8ArrayCid:
2066 case kTypedDataInt16ArrayCid:
2067 case kTypedDataUint8ArrayCid:
2068 case kTypedDataUint8ClampedArrayCid:
2069 case kTypedDataUint16ArrayCid:
2070 case kExternalTypedDataUint8ArrayCid:
2071 case kExternalTypedDataUint8ClampedArrayCid:
2072 return kUnboxedIntPtr;
2073 case kTypedDataInt32ArrayCid:
2074 return kUnboxedInt32;
2075 case kTypedDataUint32ArrayCid:
2076 return kUnboxedUint32;
2077 case kTypedDataInt64ArrayCid:
2078 case kTypedDataUint64ArrayCid:
2079 return kUnboxedInt64;
2080 case kTypedDataFloat32ArrayCid:
2081 case kTypedDataFloat64ArrayCid:
2082 return kUnboxedDouble;
2083 case kTypedDataFloat32x4ArrayCid:
2084 return kUnboxedFloat32x4;
2085 case kTypedDataInt32x4ArrayCid:
2086 return kUnboxedInt32x4;
2087 case kTypedDataFloat64x2ArrayCid:
2088 return kUnboxedFloat64x2;
2089 default:
2090 UNREACHABLE();
2091 return kTagged;
2092 }
2093}
2094
2095LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
2096 bool opt) const {
2097 const bool directly_addressable =
2098 aligned() && class_id() != kTypedDataInt64ArrayCid &&
2099 class_id() != kTypedDataUint64ArrayCid && class_id() != kArrayCid;
2100 const intptr_t kNumInputs = 3;
2101 LocationSummary* locs;
2102
2103 bool needs_base = false;
2104 intptr_t kNumTemps = 0;
2105 if (CanBeImmediateIndex(index(), class_id(), IsExternal(),
2106 false, // Store.
2107 &needs_base)) {
2108 if (!directly_addressable) {
2109 kNumTemps += 2;
2110 } else if (needs_base) {
2111 kNumTemps += 1;
2112 }
2113
2114 locs = new (zone)
2115 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2116
2117 // CanBeImmediateIndex must return false for unsafe smis.
2118 locs->set_in(1, Location::Constant(index()->definition()->AsConstant()));
2119 } else {
2120 if (!directly_addressable) {
2121 kNumTemps += 2;
2122 }
2123
2124 locs = new (zone)
2125 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2126
2127 locs->set_in(1, Location::WritableRegister());
2128 }
2129 locs->set_in(0, Location::RequiresRegister());
2130 for (intptr_t i = 0; i < kNumTemps; i++) {
2131 locs->set_temp(i, Location::RequiresRegister());
2132 }
2133
2134 switch (class_id()) {
2135 case kArrayCid:
2136 locs->set_in(2, ShouldEmitStoreBarrier()
2137 ? Location::RegisterLocation(kWriteBarrierValueReg)
2138 : LocationRegisterOrConstant(value()));
2139 if (ShouldEmitStoreBarrier()) {
2140 locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg));
2141 locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg));
2142 }
2143 break;
2144 case kExternalTypedDataUint8ArrayCid:
2145 case kExternalTypedDataUint8ClampedArrayCid:
2146 case kTypedDataInt8ArrayCid:
2147 case kTypedDataUint8ArrayCid:
2148 case kTypedDataUint8ClampedArrayCid:
2149 case kOneByteStringCid:
2150 case kTwoByteStringCid:
2151 case kTypedDataInt16ArrayCid:
2152 case kTypedDataUint16ArrayCid:
2153 case kTypedDataInt32ArrayCid:
2154 case kTypedDataUint32ArrayCid:
2155 locs->set_in(2, Location::RequiresRegister());
2156 break;
2157 case kTypedDataInt64ArrayCid:
2158 case kTypedDataUint64ArrayCid:
2159 locs->set_in(2, Location::Pair(Location::RequiresRegister(),
2160 Location::RequiresRegister()));
2161 break;
2162 case kTypedDataFloat32ArrayCid:
2163 // Need low register (< Q7).
2164 locs->set_in(2, Location::FpuRegisterLocation(Q6));
2165 break;
2166 case kTypedDataFloat64ArrayCid: // TODO(srdjan): Support Float64 constants.
2167 case kTypedDataInt32x4ArrayCid:
2168 case kTypedDataFloat32x4ArrayCid:
2169 case kTypedDataFloat64x2ArrayCid:
2170 locs->set_in(2, Location::RequiresFpuRegister());
2171 break;
2172 default:
2173 UNREACHABLE();
2174 return NULL;
2175 }
2176
2177 return locs;
2178}
2179
2180void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2181 const bool directly_addressable =
2182 aligned() && class_id() != kTypedDataInt64ArrayCid &&
2183 class_id() != kTypedDataUint64ArrayCid && class_id() != kArrayCid;
2184 // The array register points to the backing store for external arrays.
2185 const Register array = locs()->in(0).reg();
2186 const Location index = locs()->in(1);
2187 const Register temp =
2188 (locs()->temp_count() > 0) ? locs()->temp(0).reg() : kNoRegister;
2189 const Register temp2 =
2190 (locs()->temp_count() > 1) ? locs()->temp(1).reg() : kNoRegister;
2191
2192 compiler::Address element_address(kNoRegister);
2193 if (directly_addressable) {
2194 element_address =
2195 index.IsRegister()
2196 ? __ ElementAddressForRegIndex(false, // Store.
2197 IsExternal(), class_id(),
2198 index_scale(), index_unboxed_, array,
2199 index.reg())
2200 : __ ElementAddressForIntIndex(
2201 false, // Store.
2202 IsExternal(), class_id(), index_scale(), array,
2203 compiler::target::SmiValue(index.constant()), temp);
2204 } else {
2205 if (index.IsRegister()) {
2206 __ LoadElementAddressForRegIndex(temp,
2207 false, // Store.
2208 IsExternal(), class_id(), index_scale(),
2209 index_unboxed_, array, index.reg());
2210 } else {
2211 __ LoadElementAddressForIntIndex(
2212 temp,
2213 false, // Store.
2214 IsExternal(), class_id(), index_scale(), array,
2215 compiler::target::SmiValue(index.constant()));
2216 }
2217 }
2218
2219 switch (class_id()) {
2220 case kArrayCid:
2221 if (ShouldEmitStoreBarrier()) {
2222 const Register value = locs()->in(2).reg();
2223 __ StoreIntoArray(array, temp, value, CanValueBeSmi(),
2224 /*lr_reserved=*/!compiler->intrinsic_mode());
2225 } else if (locs()->in(2).IsConstant()) {
2226 const Object& constant = locs()->in(2).constant();
2227 __ StoreIntoObjectNoBarrier(array, compiler::Address(temp), constant);
2228 } else {
2229 const Register value = locs()->in(2).reg();
2230 __ StoreIntoObjectNoBarrier(array, compiler::Address(temp), value);
2231 }
2232 break;
2233 case kTypedDataInt8ArrayCid:
2234 case kTypedDataUint8ArrayCid:
2235 case kExternalTypedDataUint8ArrayCid:
2236 case kOneByteStringCid: {
2237 ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
2238 if (locs()->in(2).IsConstant()) {
2239 __ LoadImmediate(IP,
2240 compiler::target::SmiValue(locs()->in(2).constant()));
2241 __ strb(IP, element_address);
2242 } else {
2243 const Register value = locs()->in(2).reg();
2244 __ strb(value, element_address);
2245 }
2246 break;
2247 }
2248 case kTypedDataUint8ClampedArrayCid:
2249 case kExternalTypedDataUint8ClampedArrayCid: {
2250 ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
2251 if (locs()->in(2).IsConstant()) {
2252 intptr_t value = compiler::target::SmiValue(locs()->in(2).constant());
2253 // Clamp to 0x0 or 0xFF respectively.
2254 if (value > 0xFF) {
2255 value = 0xFF;
2256 } else if (value < 0) {
2257 value = 0;
2258 }
2259 __ LoadImmediate(IP, static_cast<int8_t>(value));
2260 __ strb(IP, element_address);
2261 } else {
2262 const Register value = locs()->in(2).reg();
2263 // Clamp to 0x00 or 0xFF respectively.
2264 __ LoadImmediate(IP, 0xFF);
2265 __ cmp(value,
2266 compiler::Operand(IP)); // Compare Smi value and smi 0xFF.
2267 __ mov(IP, compiler::Operand(0), LE); // IP = value <= 0xFF ? 0 : 0xFF.
2268 __ mov(IP, compiler::Operand(value),
2269 LS); // IP = value in range ? value : IP.
2270 __ strb(IP, element_address);
2271 }
2272 break;
2273 }
2274 case kTwoByteStringCid:
2275 case kTypedDataInt16ArrayCid:
2276 case kTypedDataUint16ArrayCid: {
2277 ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
2278 const Register value = locs()->in(2).reg();
2279 if (aligned()) {
2280 __ strh(value, element_address);
2281 } else {
2282 __ StoreHalfWordUnaligned(value, temp, temp2);
2283 }
2284 break;
2285 }
2286 case kTypedDataInt32ArrayCid:
2287 case kTypedDataUint32ArrayCid: {
2288 const Register value = locs()->in(2).reg();
2289 if (aligned()) {
2290 __ str(value, element_address);
2291 } else {
2292 __ StoreWordUnaligned(value, temp, temp2);
2293 }
2294 break;
2295 }
2296 case kTypedDataInt64ArrayCid:
2297 case kTypedDataUint64ArrayCid: {
2298 ASSERT(!directly_addressable); // need to add to register
2299 ASSERT(locs()->in(2).IsPairLocation());
2300 PairLocation* value_pair = locs()->in(2).AsPairLocation();
2301 Register value_lo = value_pair->At(0).reg();
2302 Register value_hi = value_pair->At(1).reg();
2303 if (aligned()) {
2304 __ str(value_lo, compiler::Address(temp));
2305 __ str(value_hi, compiler::Address(temp, compiler::target::kWordSize));
2306 } else {
2307 __ StoreWordUnaligned(value_lo, temp, temp2);
2308 __ AddImmediate(temp, temp, compiler::target::kWordSize);
2309 __ StoreWordUnaligned(value_hi, temp, temp2);
2310 }
2311 break;
2312 }
2313 case kTypedDataFloat32ArrayCid: {
2314 const SRegister value_reg =
2315 EvenSRegisterOf(EvenDRegisterOf(locs()->in(2).fpu_reg()));
2316 if (aligned()) {
2317 __ vstrs(value_reg, element_address);
2318 } else {
2319 const Register address = temp;
2320 const Register value = temp2;
2321 __ vmovrs(value, value_reg);
2322 __ StoreWordUnaligned(value, address, TMP);
2323 }
2324 break;
2325 }
2326 case kTypedDataFloat64ArrayCid: {
2327 const DRegister value_reg = EvenDRegisterOf(locs()->in(2).fpu_reg());
2328 if (aligned()) {
2329 __ vstrd(value_reg, element_address);
2330 } else {
2331 const Register address = temp;
2332 const Register value = temp2;
2333 __ vmovrs(value, EvenSRegisterOf(value_reg));
2334 __ StoreWordUnaligned(value, address, TMP);
2335 __ AddImmediate(address, address, 4);
2336 __ vmovrs(value, OddSRegisterOf(value_reg));
2337 __ StoreWordUnaligned(value, address, TMP);
2338 }
2339 break;
2340 }
2341 case kTypedDataFloat64x2ArrayCid:
2342 case kTypedDataInt32x4ArrayCid:
2343 case kTypedDataFloat32x4ArrayCid: {
2344 ASSERT(element_address.Equals(compiler::Address(index.reg())));
2345 ASSERT(aligned());
2346 const DRegister value_reg = EvenDRegisterOf(locs()->in(2).fpu_reg());
2347 __ vstmd(IA, index.reg(), value_reg, 2);
2348 break;
2349 }
2350 default:
2351 UNREACHABLE();
2352 }
2353}
2354
2355LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
2356 bool opt) const {
2357 const intptr_t kNumInputs = 1;
2358
2359 const intptr_t value_cid = value()->Type()->ToCid();
2360 const intptr_t field_cid = field().guarded_cid();
2361
2362 const bool emit_full_guard = !opt || (field_cid == kIllegalCid);
2363
2364 const bool needs_value_cid_temp_reg =
2365 emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
2366
2367 const bool needs_field_temp_reg = emit_full_guard;
2368
2369 intptr_t num_temps = 0;
2370 if (needs_value_cid_temp_reg) {
2371 num_temps++;
2372 }
2373 if (needs_field_temp_reg) {
2374 num_temps++;
2375 }
2376
2377 LocationSummary* summary = new (zone)
2378 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
2379 summary->set_in(0, Location::RequiresRegister());
2380
2381 for (intptr_t i = 0; i < num_temps; i++) {
2382 summary->set_temp(i, Location::RequiresRegister());
2383 }
2384
2385 return summary;
2386}
2387
2388void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2389 ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
2390 ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
2391 ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
2392
2393 const intptr_t value_cid = value()->Type()->ToCid();
2394 const intptr_t field_cid = field().guarded_cid();
2395 const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid;
2396
2397 if (field_cid == kDynamicCid) {
2398 return; // Nothing to emit.
2399 }
2400
2401 const bool emit_full_guard =
2402 !compiler->is_optimizing() || (field_cid == kIllegalCid);
2403
2404 const bool needs_value_cid_temp_reg =
2405 emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
2406
2407 const bool needs_field_temp_reg = emit_full_guard;
2408
2409 const Register value_reg = locs()->in(0).reg();
2410
2411 const Register value_cid_reg =
2412 needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister;
2413
2414 const Register field_reg = needs_field_temp_reg
2415 ? locs()->temp(locs()->temp_count() - 1).reg()
2416 : kNoRegister;
2417
2418 compiler::Label ok, fail_label;
2419
2420 compiler::Label* deopt =
2421 compiler->is_optimizing()
2422 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2423 : NULL;
2424
2425 compiler::Label* fail = (deopt != NULL) ? deopt : &fail_label;
2426
2427 if (emit_full_guard) {
2428 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2429
2430 compiler::FieldAddress field_cid_operand(
2431 field_reg, compiler::target::Field::guarded_cid_offset());
2432 compiler::FieldAddress field_nullability_operand(
2433 field_reg, compiler::target::Field::is_nullable_offset());
2434
2435 if (value_cid == kDynamicCid) {
2436 LoadValueCid(compiler, value_cid_reg, value_reg);
2437 __ ldrh(IP, field_cid_operand);
2438 __ cmp(value_cid_reg, compiler::Operand(IP));
2439 __ b(&ok, EQ);
2440 __ ldrh(IP, field_nullability_operand);
2441 __ cmp(value_cid_reg, compiler::Operand(IP));
2442 } else if (value_cid == kNullCid) {
2443 __ ldrh(value_cid_reg, field_nullability_operand);
2444 __ CompareImmediate(value_cid_reg, value_cid);
2445 } else {
2446 __ ldrh(value_cid_reg, field_cid_operand);
2447 __ CompareImmediate(value_cid_reg, value_cid);
2448 }
2449 __ b(&ok, EQ);
2450
2451 // Check if the tracked state of the guarded field can be initialized
2452 // inline. If the field needs length check we fall through to runtime
2453 // which is responsible for computing offset of the length field
2454 // based on the class id.
2455 // Length guard will be emitted separately when needed via GuardFieldLength
2456 // instruction after GuardFieldClass.
2457 if (!field().needs_length_check()) {
2458 // Uninitialized field can be handled inline. Check if the
2459 // field is still unitialized.
2460 __ ldrh(IP, field_cid_operand);
2461 __ CompareImmediate(IP, kIllegalCid);
2462 __ b(fail, NE);
2463
2464 if (value_cid == kDynamicCid) {
2465 __ strh(value_cid_reg, field_cid_operand);
2466 __ strh(value_cid_reg, field_nullability_operand);
2467 } else {
2468 __ LoadImmediate(IP, value_cid);
2469 __ strh(IP, field_cid_operand);
2470 __ strh(IP, field_nullability_operand);
2471 }
2472
2473 __ b(&ok);
2474 }
2475
2476 if (deopt == NULL) {
2477 ASSERT(!compiler->is_optimizing());
2478 __ Bind(fail);
2479
2480 __ ldrh(IP,
2481 compiler::FieldAddress(
2482 field_reg, compiler::target::Field::guarded_cid_offset()));
2483 __ CompareImmediate(IP, kDynamicCid);
2484 __ b(&ok, EQ);
2485
2486 __ Push(field_reg);
2487 __ Push(value_reg);
2488 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2489 __ Drop(2); // Drop the field and the value.
2490 } else {
2491 __ b(fail);
2492 }
2493 } else {
2494 ASSERT(compiler->is_optimizing());
2495 ASSERT(deopt != NULL);
2496
2497 // Field guard class has been initialized and is known.
2498 if (value_cid == kDynamicCid) {
2499 // Field's guarded class id is fixed by value's class id is not known.
2500 __ tst(value_reg, compiler::Operand(kSmiTagMask));
2501
2502 if (field_cid != kSmiCid) {
2503 __ b(fail, EQ);
2504 __ LoadClassId(value_cid_reg, value_reg);
2505 __ CompareImmediate(value_cid_reg, field_cid);
2506 }
2507
2508 if (field().is_nullable() && (field_cid != kNullCid)) {
2509 __ b(&ok, EQ);
2510 if (field_cid != kSmiCid) {
2511 __ CompareImmediate(value_cid_reg, kNullCid);
2512 } else {
2513 __ CompareObject(value_reg, Object::null_object());
2514 }
2515 }
2516 __ b(fail, NE);
2517 } else if (value_cid == field_cid) {
2518 // This would normaly be caught by Canonicalize, but RemoveRedefinitions
2519 // may sometimes produce the situation after the last Canonicalize pass.
2520 } else {
2521 // Both value's and field's class id is known.
2522 ASSERT(value_cid != nullability);
2523 __ b(fail);
2524 }
2525 }
2526 __ Bind(&ok);
2527}
2528
2529LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone,
2530 bool opt) const {
2531 const intptr_t kNumInputs = 1;
2532 if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2533 const intptr_t kNumTemps = 3;
2534 LocationSummary* summary = new (zone)
2535 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2536 summary->set_in(0, Location::RequiresRegister());
2537 // We need temporaries for field object, length offset and expected length.
2538 summary->set_temp(0, Location::RequiresRegister());
2539 summary->set_temp(1, Location::RequiresRegister());
2540 summary->set_temp(2, Location::RequiresRegister());
2541 return summary;
2542 } else {
2543 // TODO(vegorov): can use TMP when length is small enough to fit into
2544 // immediate.
2545 const intptr_t kNumTemps = 1;
2546 LocationSummary* summary = new (zone)
2547 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2548 summary->set_in(0, Location::RequiresRegister());
2549 summary->set_temp(0, Location::RequiresRegister());
2550 return summary;
2551 }
2552 UNREACHABLE();
2553}
2554
2555void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2556 if (field().guarded_list_length() == Field::kNoFixedLength) {
2557 return; // Nothing to emit.
2558 }
2559
2560 compiler::Label* deopt =
2561 compiler->is_optimizing()
2562 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2563 : NULL;
2564
2565 const Register value_reg = locs()->in(0).reg();
2566
2567 if (!compiler->is_optimizing() ||
2568 (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2569 const Register field_reg = locs()->temp(0).reg();
2570 const Register offset_reg = locs()->temp(1).reg();
2571 const Register length_reg = locs()->temp(2).reg();
2572
2573 compiler::Label ok;
2574
2575 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2576
2577 __ ldrsb(offset_reg,
2578 compiler::FieldAddress(
2579 field_reg, compiler::target::Field::
2580 guarded_list_length_in_object_offset_offset()));
2581 __ ldr(
2582 length_reg,
2583 compiler::FieldAddress(
2584 field_reg, compiler::target::Field::guarded_list_length_offset()));
2585
2586 __ tst(offset_reg, compiler::Operand(offset_reg));
2587 __ b(&ok, MI);
2588
2589 // Load the length from the value. GuardFieldClass already verified that
2590 // value's class matches guarded class id of the field.
2591 // offset_reg contains offset already corrected by -kHeapObjectTag that is
2592 // why we use Address instead of FieldAddress.
2593 __ ldr(IP, compiler::Address(value_reg, offset_reg));
2594 __ cmp(length_reg, compiler::Operand(IP));
2595
2596 if (deopt == NULL) {
2597 __ b(&ok, EQ);
2598
2599 __ Push(field_reg);
2600 __ Push(value_reg);
2601 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2602 __ Drop(2); // Drop the field and the value.
2603 } else {
2604 __ b(deopt, NE);
2605 }
2606
2607 __ Bind(&ok);
2608 } else {
2609 ASSERT(compiler->is_optimizing());
2610 ASSERT(field().guarded_list_length() >= 0);
2611 ASSERT(field().guarded_list_length_in_object_offset() !=
2612 Field::kUnknownLengthOffset);
2613
2614 const Register length_reg = locs()->temp(0).reg();
2615
2616 __ ldr(length_reg,
2617 compiler::FieldAddress(
2618 value_reg, field().guarded_list_length_in_object_offset()));
2619 __ CompareImmediate(
2620 length_reg, compiler::target::ToRawSmi(field().guarded_list_length()));
2621 __ b(deopt, NE);
2622 }
2623}
2624
2625DEFINE_UNIMPLEMENTED_INSTRUCTION(GuardFieldTypeInstr)
2626DEFINE_UNIMPLEMENTED_INSTRUCTION(CheckConditionInstr)
2627
2628class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
2629 public:
2630 BoxAllocationSlowPath(Instruction* instruction,
2631 const Class& cls,
2632 Register result)
2633 : TemplateSlowPathCode(instruction), cls_(cls), result_(result) {}
2634
2635 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2636 if (compiler::Assembler::EmittingComments()) {
2637 __ Comment("%s slow path allocation of %s", instruction()->DebugName(),
2638 String::Handle(cls_.ScrubbedName()).ToCString());
2639 }
2640 __ Bind(entry_label());
2641 const Code& stub = Code::ZoneHandle(
2642 compiler->zone(), StubCode::GetAllocationStubForClass(cls_));
2643
2644 LocationSummary* locs = instruction()->locs();
2645
2646 locs->live_registers()->Remove(Location::RegisterLocation(result_));
2647
2648 compiler->SaveLiveRegisters(locs);
2649 compiler->GenerateStubCall(TokenPosition::kNoSource, // No token position.
2650 stub, PcDescriptorsLayout::kOther, locs);
2651 __ MoveRegister(result_, R0);
2652 compiler->RestoreLiveRegisters(locs);
2653 __ b(exit_label());
2654 }
2655
2656 static void Allocate(FlowGraphCompiler* compiler,
2657 Instruction* instruction,
2658 const Class& cls,
2659 Register result,
2660 Register temp) {
2661 if (compiler->intrinsic_mode()) {
2662 __ TryAllocate(cls, compiler->intrinsic_slow_path_label(), result, temp);
2663 } else {
2664 BoxAllocationSlowPath* slow_path =
2665 new BoxAllocationSlowPath(instruction, cls, result);
2666 compiler->AddSlowPathCode(slow_path);
2667
2668 __ TryAllocate(cls, slow_path->entry_label(), result, temp);
2669 __ Bind(slow_path->exit_label());
2670 }
2671 }
2672
2673 private:
2674 const Class& cls_;
2675 const Register result_;
2676};
2677
2678LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
2679 bool opt) const {
2680 const bool might_box = (representation() == kTagged) && !can_pack_into_smi();
2681 const intptr_t kNumInputs = 2;
2682 const intptr_t kNumTemps = might_box ? 2 : 0;
2683 LocationSummary* summary = new (zone) LocationSummary(
2684 zone, kNumInputs, kNumTemps,
2685 might_box ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
2686 summary->set_in(0, Location::RequiresRegister());
2687 summary->set_in(1, Location::RequiresRegister());
2688
2689 if (might_box) {
2690 summary->set_temp(0, Location::RequiresRegister());
2691 summary->set_temp(1, Location::RequiresRegister());
2692 }
2693
2694 if (representation() == kUnboxedInt64) {
2695 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
2696 Location::RequiresRegister()));
2697 } else {
2698 ASSERT(representation() == kTagged);
2699 summary->set_out(0, Location::RequiresRegister());
2700 }
2701
2702 return summary;
2703}
2704
2705void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2706 // The string register points to the backing store for external strings.
2707 const Register str = locs()->in(0).reg();
2708 const Location index = locs()->in(1);
2709
2710 compiler::Address element_address = __ ElementAddressForRegIndex(
2711 true, IsExternal(), class_id(), index_scale(), /*index_unboxed=*/false,
2712 str, index.reg());
2713 // Warning: element_address may use register IP as base.
2714
2715 if (representation() == kUnboxedInt64) {
2716 ASSERT(compiler->is_optimizing());
2717 ASSERT(locs()->out(0).IsPairLocation());
2718 PairLocation* result_pair = locs()->out(0).AsPairLocation();
2719 Register result1 = result_pair->At(0).reg();
2720 Register result2 = result_pair->At(1).reg();
2721 switch (class_id()) {
2722 case kOneByteStringCid:
2723 case kExternalOneByteStringCid:
2724 ASSERT(element_count() == 4);
2725 __ ldr(result1, element_address);
2726 __ eor(result2, result2, compiler::Operand(result2));
2727 break;
2728 case kTwoByteStringCid:
2729 case kExternalTwoByteStringCid:
2730 ASSERT(element_count() == 2);
2731 __ ldr(result1, element_address);
2732 __ eor(result2, result2, compiler::Operand(result2));
2733 break;
2734 default:
2735 UNREACHABLE();
2736 }
2737 } else {
2738 ASSERT(representation() == kTagged);
2739 Register result = locs()->out(0).reg();
2740 switch (class_id()) {
2741 case kOneByteStringCid:
2742 case kExternalOneByteStringCid:
2743 switch (element_count()) {
2744 case 1:
2745 __ ldrb(result, element_address);
2746 break;
2747 case 2:
2748 __ ldrh(result, element_address);
2749 break;
2750 case 4:
2751 __ ldr(result, element_address);
2752 break;
2753 default:
2754 UNREACHABLE();
2755 }
2756 break;
2757 case kTwoByteStringCid:
2758 case kExternalTwoByteStringCid:
2759 switch (element_count()) {
2760 case 1:
2761 __ ldrh(result, element_address);
2762 break;
2763 case 2:
2764 __ ldr(result, element_address);
2765 break;
2766 default:
2767 UNREACHABLE();
2768 }
2769 break;
2770 default:
2771 UNREACHABLE();
2772 break;
2773 }
2774 if (can_pack_into_smi()) {
2775 __ SmiTag(result);
2776 } else {
2777 // If the value cannot fit in a smi then allocate a mint box for it.
2778 Register value = locs()->temp(0).reg();
2779 Register temp = locs()->temp(1).reg();
2780 // Value register needs to be manually preserved on allocation slow-path.
2781 locs()->live_registers()->Add(locs()->temp(0), kUnboxedInt32);
2782
2783 ASSERT(result != value);
2784 __ MoveRegister(value, result);
2785 __ SmiTag(result);
2786
2787 compiler::Label done;
2788 __ TestImmediate(value, 0xC0000000);
2789 __ b(&done, EQ);
2790 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(),
2791 result, temp);
2792 __ eor(temp, temp, compiler::Operand(temp));
2793 __ StoreToOffset(kWord, value, result,
2794 compiler::target::Mint::value_offset() - kHeapObjectTag);
2795 __ StoreToOffset(kWord, temp, result,
2796 compiler::target::Mint::value_offset() - kHeapObjectTag +
2797 compiler::target::kWordSize);
2798 __ Bind(&done);
2799 }
2800 }
2801}
2802
2803LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(Zone* zone,
2804 bool opt) const {
2805 const intptr_t kNumInputs = 2;
2806 const intptr_t kNumTemps =
2807 ((IsUnboxedStore() && opt) ? (FLAG_precompiled_mode ? 0 : 2)
2808 : (IsPotentialUnboxedStore() ? 3 : 0));
2809 LocationSummary* summary = new (zone)
2810 LocationSummary(zone, kNumInputs, kNumTemps,
2811 (!FLAG_precompiled_mode &&
2812 ((IsUnboxedStore() && opt && is_initialization()) ||
2813 IsPotentialUnboxedStore()))
2814 ? LocationSummary::kCallOnSlowPath
2815 : LocationSummary::kNoCall);
2816
2817 summary->set_in(0, Location::RequiresRegister());
2818 if (IsUnboxedStore() && opt) {
2819 if (slot().field().is_non_nullable_integer()) {
2820 ASSERT(FLAG_precompiled_mode);
2821 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
2822 Location::RequiresRegister()));
2823 } else {
2824 summary->set_in(1, Location::RequiresFpuRegister());
2825 }
2826 if (!FLAG_precompiled_mode) {
2827 summary->set_temp(0, Location::RequiresRegister());
2828 summary->set_temp(1, Location::RequiresRegister());
2829 }
2830 } else if (IsPotentialUnboxedStore()) {
2831 summary->set_in(1, ShouldEmitStoreBarrier() ? Location::WritableRegister()
2832 : Location::RequiresRegister());
2833 summary->set_temp(0, Location::RequiresRegister());
2834 summary->set_temp(1, Location::RequiresRegister());
2835 summary->set_temp(2, opt ? Location::RequiresFpuRegister()
2836 : Location::FpuRegisterLocation(Q1));
2837 } else {
2838 summary->set_in(1, ShouldEmitStoreBarrier()
2839 ? Location::RegisterLocation(kWriteBarrierValueReg)
2840 : LocationRegisterOrConstant(value()));
2841 }
2842 return summary;
2843}
2844
2845static void EnsureMutableBox(FlowGraphCompiler* compiler,
2846 StoreInstanceFieldInstr* instruction,
2847 Register box_reg,
2848 const Class& cls,
2849 Register instance_reg,
2850 intptr_t offset,
2851 Register temp) {
2852 compiler::Label done;
2853 __ ldr(box_reg, compiler::FieldAddress(instance_reg, offset));
2854 __ CompareObject(box_reg, Object::null_object());
2855 __ b(&done, NE);
2856
2857 BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp);
2858
2859 __ MoveRegister(temp, box_reg);
2860 __ StoreIntoObjectOffset(instance_reg, offset, temp,
2861 compiler::Assembler::kValueIsNotSmi);
2862 __ Bind(&done);
2863}
2864
2865void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2866 ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
2867 ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
2868 ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
2869
2870 compiler::Label skip_store;
2871
2872 const Register instance_reg = locs()->in(0).reg();
2873 const intptr_t offset_in_bytes = OffsetInBytes();
2874 ASSERT(offset_in_bytes > 0); // Field is finalized and points after header.
2875
2876 if (IsUnboxedStore() && compiler->is_optimizing()) {
2877 if (slot().field().is_non_nullable_integer()) {
2878 const PairLocation* value_pair = locs()->in(1).AsPairLocation();
2879 const Register value_lo = value_pair->At(0).reg();
2880 const Register value_hi = value_pair->At(1).reg();
2881 __ Comment("UnboxedIntegerStoreInstanceFieldInstr");
2882 __ StoreToOffset(kWord, value_lo, instance_reg,
2883 offset_in_bytes - kHeapObjectTag);
2884 __ StoreToOffset(
2885 kWord, value_hi, instance_reg,
2886 offset_in_bytes - kHeapObjectTag + compiler::target::kWordSize);
2887 return;
2888 }
2889
2890 const intptr_t cid = slot().field().UnboxedFieldCid();
2891 const DRegister value = EvenDRegisterOf(locs()->in(1).fpu_reg());
2892
2893 if (FLAG_precompiled_mode) {
2894 switch (cid) {
2895 case kDoubleCid:
2896 __ Comment("UnboxedDoubleStoreInstanceFieldInstr");
2897 __ StoreDToOffset(value, instance_reg,
2898 offset_in_bytes - kHeapObjectTag);
2899 return;
2900 case kFloat32x4Cid:
2901 __ Comment("UnboxedFloat32x4StoreInstanceFieldInstr");
2902 __ StoreMultipleDToOffset(value, 2, instance_reg,
2903 offset_in_bytes - kHeapObjectTag);
2904 return;
2905 case kFloat64x2Cid:
2906 __ Comment("UnboxedFloat64x2StoreInstanceFieldInstr");
2907 __ StoreMultipleDToOffset(value, 2, instance_reg,
2908 offset_in_bytes - kHeapObjectTag);
2909 return;
2910 default:
2911 UNREACHABLE();
2912 }
2913 }
2914
2915 const Register temp = locs()->temp(0).reg();
2916 const Register temp2 = locs()->temp(1).reg();
2917
2918 if (is_initialization()) {
2919 const Class* cls = NULL;
2920 switch (cid) {
2921 case kDoubleCid:
2922 cls = &compiler->double_class();
2923 break;
2924 case kFloat32x4Cid:
2925 cls = &compiler->float32x4_class();
2926 break;
2927 case kFloat64x2Cid:
2928 cls = &compiler->float64x2_class();
2929 break;
2930 default:
2931 UNREACHABLE();
2932 }
2933
2934 BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2);
2935 __ MoveRegister(temp2, temp);
2936 __ StoreIntoObjectOffset(instance_reg, offset_in_bytes, temp2,
2937 compiler::Assembler::kValueIsNotSmi);
2938 } else {
2939 __ ldr(temp, compiler::FieldAddress(instance_reg, offset_in_bytes));
2940 }
2941 switch (cid) {
2942 case kDoubleCid:
2943 __ Comment("UnboxedDoubleStoreInstanceFieldInstr");
2944 __ StoreDToOffset(
2945 value, temp,
2946 compiler::target::Double::value_offset() - kHeapObjectTag);
2947 break;
2948 case kFloat32x4Cid:
2949 __ Comment("UnboxedFloat32x4StoreInstanceFieldInstr");
2950 __ StoreMultipleDToOffset(
2951 value, 2, temp,
2952 compiler::target::Float32x4::value_offset() - kHeapObjectTag);
2953 break;
2954 case kFloat64x2Cid:
2955 __ Comment("UnboxedFloat64x2StoreInstanceFieldInstr");
2956 __ StoreMultipleDToOffset(
2957 value, 2, temp,
2958 compiler::target::Float64x2::value_offset() - kHeapObjectTag);
2959 break;
2960 default:
2961 UNREACHABLE();
2962 }
2963
2964 return;
2965 }
2966
2967 if (IsPotentialUnboxedStore()) {
2968 const Register value_reg = locs()->in(1).reg();
2969 const Register temp = locs()->temp(0).reg();
2970 const Register temp2 = locs()->temp(1).reg();
2971 const DRegister fpu_temp = EvenDRegisterOf(locs()->temp(2).fpu_reg());
2972
2973 if (ShouldEmitStoreBarrier()) {
2974 // Value input is a writable register and should be manually preserved
2975 // across allocation slow-path.
2976 locs()->live_registers()->Add(locs()->in(1), kTagged);
2977 }
2978
2979 compiler::Label store_pointer;
2980 compiler::Label store_double;
2981 compiler::Label store_float32x4;
2982 compiler::Label store_float64x2;
2983
2984 __ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original()));
2985
2986 __ ldrh(temp2, compiler::FieldAddress(
2987 temp, compiler::target::Field::is_nullable_offset()));
2988 __ CompareImmediate(temp2, kNullCid);
2989 __ b(&store_pointer, EQ);
2990
2991 __ ldrb(temp2, compiler::FieldAddress(
2992 temp, compiler::target::Field::kind_bits_offset()));
2993 __ tst(temp2, compiler::Operand(1 << Field::kUnboxingCandidateBit));
2994 __ b(&store_pointer, EQ);
2995
2996 __ ldrh(temp2, compiler::FieldAddress(
2997 temp, compiler::target::Field::guarded_cid_offset()));
2998 __ CompareImmediate(temp2, kDoubleCid);
2999 __ b(&store_double, EQ);
3000
3001 __ ldrh(temp2, compiler::FieldAddress(
3002 temp, compiler::target::Field::guarded_cid_offset()));
3003 __ CompareImmediate(temp2, kFloat32x4Cid);
3004 __ b(&store_float32x4, EQ);
3005
3006 __ ldrh(temp2, compiler::FieldAddress(
3007 temp, compiler::target::Field::guarded_cid_offset()));
3008 __ CompareImmediate(temp2, kFloat64x2Cid);
3009 __ b(&store_float64x2, EQ);
3010
3011 // Fall through.
3012 __ b(&store_pointer);
3013
3014 if (!compiler->is_optimizing()) {
3015 locs()->live_registers()->Add(locs()->in(0));
3016 locs()->live_registers()->Add(locs()->in(1));
3017 }
3018
3019 {
3020 __ Bind(&store_double);
3021 EnsureMutableBox(compiler, this, temp, compiler->double_class(),
3022 instance_reg, offset_in_bytes, temp2);
3023 __ CopyDoubleField(temp, value_reg, TMP, temp2, fpu_temp);
3024 __ b(&skip_store);
3025 }
3026
3027 {
3028 __ Bind(&store_float32x4);
3029 EnsureMutableBox(compiler, this, temp, compiler->float32x4_class(),
3030 instance_reg, offset_in_bytes, temp2);
3031 __ CopyFloat32x4Field(temp, value_reg, TMP, temp2, fpu_temp);
3032 __ b(&skip_store);
3033 }
3034
3035 {
3036 __ Bind(&store_float64x2);
3037 EnsureMutableBox(compiler, this, temp, compiler->float64x2_class(),
3038 instance_reg, offset_in_bytes, temp2);
3039 __ CopyFloat64x2Field(temp, value_reg, TMP, temp2, fpu_temp);
3040 __ b(&skip_store);
3041 }
3042
3043 __ Bind(&store_pointer);
3044 }
3045
3046 if (ShouldEmitStoreBarrier()) {
3047 const Register value_reg = locs()->in(1).reg();
3048 // In intrinsic mode, there is no stack frame and the function will return
3049 // by executing 'ret LR' directly. Therefore we cannot overwrite LR. (see
3050 // ReturnInstr::EmitNativeCode).
3051 ASSERT(!locs()->live_registers()->Contains(Location::RegisterLocation(LR)));
3052 __ StoreIntoObjectOffset(instance_reg, offset_in_bytes, value_reg,
3053 CanValueBeSmi(),
3054 /*lr_reserved=*/!compiler->intrinsic_mode());
3055 } else {
3056 if (locs()->in(1).IsConstant()) {
3057 __ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes,
3058 locs()->in(1).constant());
3059 } else {
3060 const Register value_reg = locs()->in(1).reg();
3061 __ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes,
3062 value_reg);
3063 }
3064 }
3065 __ Bind(&skip_store);
3066}
3067
3068LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
3069 bool opt) const {
3070 const intptr_t kNumInputs = 1;
3071 const intptr_t kNumTemps = 1;
3072 LocationSummary* locs = new (zone)
3073 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3074 locs->set_in(0, Location::RequiresRegister());
3075 locs->set_temp(0, Location::RequiresRegister());
3076 return locs;
3077}
3078
3079void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3080 const Register value = locs()->in(0).reg();
3081 const Register temp = locs()->temp(0).reg();
3082
3083 compiler->used_static_fields().Add(&field());
3084
3085 __ LoadFromOffset(kWord, temp, THR,
3086 compiler::target::Thread::field_table_values_offset());
3087 // Note: static fields ids won't be changed by hot-reload.
3088 __ StoreToOffset(kWord, value, temp,
3089 compiler::target::FieldTable::OffsetOf(field()));
3090}
3091
3092LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone,
3093 bool opt) const {
3094 const intptr_t kNumInputs = 3;
3095 const intptr_t kNumTemps = 0;
3096 LocationSummary* summary = new (zone)
3097 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3098 summary->set_in(0, Location::RegisterLocation(TypeTestABI::kInstanceReg));
3099 summary->set_in(1, Location::RegisterLocation(
3100 TypeTestABI::kInstantiatorTypeArgumentsReg));
3101 summary->set_in(
3102 2, Location::RegisterLocation(TypeTestABI::kFunctionTypeArgumentsReg));
3103 summary->set_out(0, Location::RegisterLocation(R0));
3104 return summary;
3105}
3106
3107void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3108 ASSERT(locs()->in(0).reg() == TypeTestABI::kInstanceReg);
3109 ASSERT(locs()->in(1).reg() == TypeTestABI::kInstantiatorTypeArgumentsReg);
3110 ASSERT(locs()->in(2).reg() == TypeTestABI::kFunctionTypeArgumentsReg);
3111
3112 compiler->GenerateInstanceOf(token_pos(), deopt_id(), type(), locs());
3113 ASSERT(locs()->out(0).reg() == R0);
3114}
3115
3116LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone,
3117 bool opt) const {
3118 const intptr_t kNumInputs = 2;
3119 const intptr_t kNumTemps = 0;
3120 LocationSummary* locs = new (zone)
3121 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3122 locs->set_in(kElementTypePos, Location::RegisterLocation(R1));
3123 locs->set_in(kLengthPos, Location::RegisterLocation(R2));
3124 locs->set_out(0, Location::RegisterLocation(R0));
3125 return locs;
3126}
3127
3128// Inlines array allocation for known constant values.
3129static void InlineArrayAllocation(FlowGraphCompiler* compiler,
3130 intptr_t num_elements,
3131 compiler::Label* slow_path,
3132 compiler::Label* done) {
3133 const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
3134 const Register kLengthReg = R2;
3135 const Register kElemTypeReg = R1;
3136 const intptr_t instance_size = Array::InstanceSize(num_elements);
3137
3138 __ TryAllocateArray(kArrayCid, instance_size, slow_path,
3139 R0, // instance
3140 R3, // end address
3141 R8, R6);
3142 // R0: new object start as a tagged pointer.
3143 // R3: new object end address.
3144
3145 // Store the type argument field.
3146 __ StoreIntoObjectNoBarrier(
3147 R0,
3148 compiler::FieldAddress(R0,
3149 compiler::target::Array::type_arguments_offset()),
3150 kElemTypeReg);
3151
3152 // Set the length field.
3153 __ StoreIntoObjectNoBarrier(
3154 R0, compiler::FieldAddress(R0, compiler::target::Array::length_offset()),
3155 kLengthReg);
3156
3157 // Initialize all array elements to raw_null.
3158 // R0: new object start as a tagged pointer.
3159 // R3: new object end address.
3160 // R6: iterator which initially points to the start of the variable
3161 // data area to be initialized.
3162 // R8: null
3163 if (num_elements > 0) {
3164 const intptr_t array_size = instance_size - sizeof(ArrayLayout);
3165 __ LoadObject(R8, Object::null_object());
3166 if (num_elements >= 2) {
3167 __ mov(R9, compiler::Operand(R8));
3168 } else {
3169#if defined(DEBUG)
3170 // Clobber R9 with an invalid pointer.
3171 __ LoadImmediate(R9, 0x1);
3172#endif // DEBUG
3173 }
3174 __ AddImmediate(R6, R0, sizeof(ArrayLayout) - kHeapObjectTag);
3175 if (array_size < (kInlineArraySize * compiler::target::kWordSize)) {
3176 __ InitializeFieldsNoBarrierUnrolled(
3177 R0, R6, 0, num_elements * compiler::target::kWordSize, R8, R9);
3178 } else {
3179 __ InitializeFieldsNoBarrier(R0, R6, R3, R8, R9);
3180 }
3181 }
3182 __ b(done);
3183}
3184
3185void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3186 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
3187 if (type_usage_info != nullptr) {
3188 const Class& list_class = Class::Handle(
3189 compiler->thread()->isolate()->class_table()->At(kArrayCid));
3190 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, list_class,
3191 element_type()->definition());
3192 }
3193
3194 const Register kLengthReg = R2;
3195 const Register kElemTypeReg = R1;
3196 const Register kResultReg = R0;
3197
3198 ASSERT(locs()->in(kElementTypePos).reg() == kElemTypeReg);
3199 ASSERT(locs()->in(kLengthPos).reg() == kLengthReg);
3200
3201 compiler::Label slow_path, done;
3202 if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
3203 num_elements()->BindsToConstant() &&
3204 compiler::target::IsSmi(num_elements()->BoundConstant())) {
3205 const intptr_t length =
3206 compiler::target::SmiValue(num_elements()->BoundConstant());
3207 if (Array::IsValidLength(length)) {
3208 InlineArrayAllocation(compiler, length, &slow_path, &done);
3209 }
3210 }
3211
3212 __ Bind(&slow_path);
3213 auto object_store = compiler->isolate()->object_store();
3214 const auto& allocate_array_stub =
3215 Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
3216 compiler->GenerateStubCall(token_pos(), allocate_array_stub,
3217 PcDescriptorsLayout::kOther, locs(), deopt_id());
3218 __ Bind(&done);
3219 ASSERT(locs()->out(0).reg() == kResultReg);
3220}
3221
3222LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
3223 bool opt) const {
3224 const intptr_t kNumInputs = 1;
3225 const intptr_t kNumTemps = (IsUnboxedLoad() && opt)
3226 ? (FLAG_precompiled_mode ? 0 : 1)
3227 : (IsPotentialUnboxedLoad() ? 3 : 0);
3228
3229 const auto contains_call =
3230 (IsUnboxedLoad() && opt)
3231 ? LocationSummary::kNoCall
3232 : (IsPotentialUnboxedLoad()
3233 ? LocationSummary::kCallOnSlowPath
3234 : (calls_initializer() ? LocationSummary::kCall
3235 : LocationSummary::kNoCall));
3236
3237 LocationSummary* locs =
3238 new (zone) LocationSummary(zone, kNumInputs, kNumTemps, contains_call);
3239
3240 locs->set_in(0, calls_initializer() ? Location::RegisterLocation(
3241 InitInstanceFieldABI::kInstanceReg)
3242 : Location::RequiresRegister());
3243
3244 if (IsUnboxedLoad() && opt) {
3245 ASSERT(!calls_initializer());
3246 if (!FLAG_precompiled_mode) {
3247 locs->set_temp(0, Location::RequiresRegister());
3248 }
3249 if (slot().field().is_non_nullable_integer()) {
3250 ASSERT(FLAG_precompiled_mode);
3251 locs->set_out(0, Location::Pair(Location::RequiresRegister(),
3252 Location::RequiresRegister()));
3253 } else {
3254 locs->set_out(0, Location::RequiresFpuRegister());
3255 }
3256 } else if (IsPotentialUnboxedLoad()) {
3257 ASSERT(!calls_initializer());
3258 locs->set_temp(0, opt ? Location::RequiresFpuRegister()
3259 : Location::FpuRegisterLocation(Q1));
3260 locs->set_temp(1, Location::RequiresRegister());
3261 locs->set_temp(2, Location::RequiresRegister());
3262 locs->set_out(0, Location::RequiresRegister());
3263 } else if (calls_initializer()) {
3264 locs->set_out(0,
3265 Location::RegisterLocation(InitInstanceFieldABI::kResultReg));
3266 } else {
3267 locs->set_out(0, Location::RequiresRegister());
3268 }
3269 return locs;
3270}
3271
3272void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3273 ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
3274 ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
3275 ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
3276
3277 const Register instance_reg = locs()->in(0).reg();
3278 if (IsUnboxedLoad() && compiler->is_optimizing()) {
3279 ASSERT(!calls_initializer());
3280 if (slot().field().is_non_nullable_integer()) {
3281 const PairLocation* out_pair = locs()->out(0).AsPairLocation();
3282 const Register out_lo = out_pair->At(0).reg();
3283 const Register out_hi = out_pair->At(1).reg();
3284 __ Comment("UnboxedIntegerLoadFieldInstr");
3285 __ LoadFromOffset(kWord, out_lo, instance_reg,
3286 OffsetInBytes() - kHeapObjectTag);
3287 __ LoadFromOffset(
3288 kWord, out_hi, instance_reg,
3289 OffsetInBytes() - kHeapObjectTag + compiler::target::kWordSize);
3290 return;
3291 }
3292
3293 const intptr_t cid = slot().field().UnboxedFieldCid();
3294 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
3295
3296 if (FLAG_precompiled_mode) {
3297 switch (cid) {
3298 case kDoubleCid:
3299 __ Comment("UnboxedDoubleLoadFieldInstr");
3300 __ LoadDFromOffset(result, instance_reg,
3301 OffsetInBytes() - kHeapObjectTag);
3302 return;
3303 case kFloat32x4Cid:
3304 __ Comment("UnboxedFloat32x4LoadFieldInstr");
3305 __ LoadMultipleDFromOffset(result, 2, instance_reg,
3306 OffsetInBytes() - kHeapObjectTag);
3307 return;
3308 case kFloat64x2Cid:
3309 __ Comment("UnboxedFloat64x2LoadFieldInstr");
3310 __ LoadMultipleDFromOffset(result, 2, instance_reg,
3311 OffsetInBytes() - kHeapObjectTag);
3312 return;
3313 default:
3314 UNREACHABLE();
3315 }
3316 }
3317
3318 const Register temp = locs()->temp(0).reg();
3319 __ LoadFieldFromOffset(kWord, temp, instance_reg, OffsetInBytes());
3320 switch (cid) {
3321 case kDoubleCid:
3322 __ Comment("UnboxedDoubleLoadFieldInstr");
3323 __ LoadDFromOffset(
3324 result, temp,
3325 compiler::target::Double::value_offset() - kHeapObjectTag);
3326 break;
3327 case kFloat32x4Cid:
3328 __ Comment("UnboxedFloat32x4LoadFieldInstr");
3329 __ LoadMultipleDFromOffset(
3330 result, 2, temp,
3331 compiler::target::Float32x4::value_offset() - kHeapObjectTag);
3332 break;
3333 case kFloat64x2Cid:
3334 __ Comment("UnboxedFloat64x2LoadFieldInstr");
3335 __ LoadMultipleDFromOffset(
3336 result, 2, temp,
3337 compiler::target::Float64x2::value_offset() - kHeapObjectTag);
3338 break;
3339 default:
3340 UNREACHABLE();
3341 }
3342 return;
3343 }
3344
3345 compiler::Label done;
3346 const Register result_reg = locs()->out(0).reg();
3347 if (IsPotentialUnboxedLoad()) {
3348 ASSERT(!calls_initializer());
3349 const DRegister value = EvenDRegisterOf(locs()->temp(0).fpu_reg());
3350 const Register temp = locs()->temp(1).reg();
3351 const Register temp2 = locs()->temp(2).reg();
3352
3353 compiler::Label load_pointer;
3354 compiler::Label load_double;
3355 compiler::Label load_float32x4;
3356 compiler::Label load_float64x2;
3357
3358 __ LoadObject(result_reg, Field::ZoneHandle(slot().field().Original()));
3359
3360 compiler::FieldAddress field_cid_operand(
3361 result_reg, compiler::target::Field::guarded_cid_offset());
3362 compiler::FieldAddress field_nullability_operand(
3363 result_reg, compiler::target::Field::is_nullable_offset());
3364
3365 __ ldrh(temp, field_nullability_operand);
3366 __ CompareImmediate(temp, kNullCid);
3367 __ b(&load_pointer, EQ);
3368
3369 __ ldrh(temp, field_cid_operand);
3370 __ CompareImmediate(temp, kDoubleCid);
3371 __ b(&load_double, EQ);
3372
3373 __ ldrh(temp, field_cid_operand);
3374 __ CompareImmediate(temp, kFloat32x4Cid);
3375 __ b(&load_float32x4, EQ);
3376
3377 __ ldrh(temp, field_cid_operand);
3378 __ CompareImmediate(temp, kFloat64x2Cid);
3379 __ b(&load_float64x2, EQ);
3380
3381 // Fall through.
3382 __ b(&load_pointer);
3383
3384 if (!compiler->is_optimizing()) {
3385 locs()->live_registers()->Add(locs()->in(0));
3386 }
3387
3388 {
3389 __ Bind(&load_double);
3390 BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(),
3391 result_reg, temp);
3392 __ ldr(temp, compiler::FieldAddress(instance_reg, OffsetInBytes()));
3393 __ CopyDoubleField(result_reg, temp, TMP, temp2, value);
3394 __ b(&done);
3395 }
3396
3397 {
3398 __ Bind(&load_float32x4);
3399 BoxAllocationSlowPath::Allocate(
3400 compiler, this, compiler->float32x4_class(), result_reg, temp);
3401 __ ldr(temp, compiler::FieldAddress(instance_reg, OffsetInBytes()));
3402 __ CopyFloat32x4Field(result_reg, temp, TMP, temp2, value);
3403 __ b(&done);
3404 }
3405
3406 {
3407 __ Bind(&load_float64x2);
3408 BoxAllocationSlowPath::Allocate(
3409 compiler, this, compiler->float64x2_class(), result_reg, temp);
3410 __ ldr(temp, compiler::FieldAddress(instance_reg, OffsetInBytes()));
3411 __ CopyFloat64x2Field(result_reg, temp, TMP, temp2, value);
3412 __ b(&done);
3413 }
3414
3415 __ Bind(&load_pointer);
3416 }
3417
3418 __ LoadFieldFromOffset(kWord, result_reg, instance_reg, OffsetInBytes());
3419
3420 if (calls_initializer()) {
3421 EmitNativeCodeForInitializerCall(compiler);
3422 }
3423
3424 __ Bind(&done);
3425}
3426
3427LocationSummary* InstantiateTypeInstr::MakeLocationSummary(Zone* zone,
3428 bool opt) const {
3429 const intptr_t kNumInputs = 2;
3430 const intptr_t kNumTemps = 0;
3431 LocationSummary* locs = new (zone)
3432 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3433 locs->set_in(0, Location::RegisterLocation(
3434 InstantiationABI::kInstantiatorTypeArgumentsReg));
3435 locs->set_in(1, Location::RegisterLocation(
3436 InstantiationABI::kFunctionTypeArgumentsReg));
3437 locs->set_out(0,
3438 Location::RegisterLocation(InstantiationABI::kResultTypeReg));
3439 return locs;
3440}
3441
3442void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3443 const Register instantiator_type_args_reg = locs()->in(0).reg();
3444 const Register function_type_args_reg = locs()->in(1).reg();
3445 const Register result_reg = locs()->out(0).reg();
3446
3447 // 'instantiator_type_args_reg' is a TypeArguments object (or null).
3448 // 'function_type_args_reg' is a TypeArguments object (or null).
3449 // A runtime call to instantiate the type is required.
3450 __ PushObject(Object::null_object()); // Make room for the result.
3451 __ PushObject(type());
3452 static_assert(InstantiationABI::kFunctionTypeArgumentsReg <
3453 InstantiationABI::kInstantiatorTypeArgumentsReg,
3454 "Should be ordered to push arguments with one instruction");
3455 __ PushList((1 << instantiator_type_args_reg) |
3456 (1 << function_type_args_reg));
3457 compiler->GenerateRuntimeCall(token_pos(), deopt_id(),
3458 kInstantiateTypeRuntimeEntry, 3, locs());
3459 __ Drop(3); // Drop 2 type vectors, and uninstantiated type.
3460 __ Pop(result_reg); // Pop instantiated type.
3461}
3462
3463LocationSummary* InstantiateTypeArgumentsInstr::MakeLocationSummary(
3464 Zone* zone,
3465 bool opt) const {
3466 const intptr_t kNumInputs = 2;
3467 const intptr_t kNumTemps = 0;
3468 LocationSummary* locs = new (zone)
3469 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3470 locs->set_in(0, Location::RegisterLocation(
3471 InstantiationABI::kInstantiatorTypeArgumentsReg));
3472 locs->set_in(1, Location::RegisterLocation(
3473 InstantiationABI::kFunctionTypeArgumentsReg));
3474 locs->set_out(
3475 0, Location::RegisterLocation(InstantiationABI::kResultTypeArgumentsReg));
3476 return locs;
3477}
3478
3479void InstantiateTypeArgumentsInstr::EmitNativeCode(
3480 FlowGraphCompiler* compiler) {
3481 const Register instantiator_type_args_reg = locs()->in(0).reg();
3482 const Register function_type_args_reg = locs()->in(1).reg();
3483 const Register result_reg = locs()->out(0).reg();
3484
3485 // 'instantiator_type_args_reg' is a TypeArguments object (or null).
3486 // 'function_type_args_reg' is a TypeArguments object (or null).
3487
3488 // If both the instantiator and function type arguments are null and if the
3489 // type argument vector instantiated from null becomes a vector of dynamic,
3490 // then use null as the type arguments.
3491 compiler::Label type_arguments_instantiated;
3492 const bool can_function_type_args_be_null =
3493 function_type_arguments()->CanBe(Object::null_object());
3494 const intptr_t len = type_arguments().Length();
3495 if (type_arguments().IsRawWhenInstantiatedFromRaw(len) &&
3496 can_function_type_args_be_null) {
3497 ASSERT(result_reg != instantiator_type_args_reg &&
3498 result_reg != function_type_args_reg);
3499 __ LoadObject(result_reg, Object::null_object());
3500 __ cmp(instantiator_type_args_reg, compiler::Operand(result_reg));
3501 if (!function_type_arguments()->BindsToConstant()) {
3502 __ cmp(function_type_args_reg, compiler::Operand(result_reg), EQ);
3503 }
3504 __ b(&type_arguments_instantiated, EQ);
3505 }
3506 // Lookup cache in stub before calling runtime.
3507 __ LoadObject(InstantiationABI::kUninstantiatedTypeArgumentsReg,
3508 type_arguments());
3509 compiler->GenerateStubCall(token_pos(), GetStub(),
3510 PcDescriptorsLayout::kOther, locs());
3511 __ Bind(&type_arguments_instantiated);
3512}
3513
3514LocationSummary* AllocateUninitializedContextInstr::MakeLocationSummary(
3515 Zone* zone,
3516 bool opt) const {
3517 ASSERT(opt);
3518 const intptr_t kNumInputs = 0;
3519 const intptr_t kNumTemps = 3;
3520 LocationSummary* locs = new (zone) LocationSummary(
3521 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
3522 locs->set_temp(0, Location::RegisterLocation(R1));
3523 locs->set_temp(1, Location::RegisterLocation(R2));
3524 locs->set_temp(2, Location::RegisterLocation(R3));
3525 locs->set_out(0, Location::RegisterLocation(R0));
3526 return locs;
3527}
3528
3529class AllocateContextSlowPath
3530 : public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
3531 public:
3532 explicit AllocateContextSlowPath(
3533 AllocateUninitializedContextInstr* instruction)
3534 : TemplateSlowPathCode(instruction) {}
3535
3536 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
3537 __ Comment("AllocateContextSlowPath");
3538 __ Bind(entry_label());
3539
3540 LocationSummary* locs = instruction()->locs();
3541 locs->live_registers()->Remove(locs->out(0));
3542
3543 compiler->SaveLiveRegisters(locs);
3544
3545 auto object_store = compiler->isolate()->object_store();
3546 const auto& allocate_context_stub = Code::ZoneHandle(
3547 compiler->zone(), object_store->allocate_context_stub());
3548 __ LoadImmediate(R1, instruction()->num_context_variables());
3549 compiler->GenerateStubCall(instruction()->token_pos(),
3550 allocate_context_stub,
3551 PcDescriptorsLayout::kOther, locs);
3552 ASSERT(instruction()->locs()->out(0).reg() == R0);
3553 compiler->RestoreLiveRegisters(instruction()->locs());
3554 __ b(exit_label());
3555 }
3556};
3557
3558void AllocateUninitializedContextInstr::EmitNativeCode(
3559 FlowGraphCompiler* compiler) {
3560 Register temp0 = locs()->temp(0).reg();
3561 Register temp1 = locs()->temp(1).reg();
3562 Register temp2 = locs()->temp(2).reg();
3563 Register result = locs()->out(0).reg();
3564 // Try allocate the object.
3565 AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this);
3566 compiler->AddSlowPathCode(slow_path);
3567 intptr_t instance_size = Context::InstanceSize(num_context_variables());
3568
3569 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
3570 result, // instance
3571 temp0, temp1, temp2);
3572
3573 // Setup up number of context variables field.
3574 __ LoadImmediate(temp0, num_context_variables());
3575 __ str(temp0, compiler::FieldAddress(
3576 result, compiler::target::Context::num_variables_offset()));
3577
3578 __ Bind(slow_path->exit_label());
3579}
3580
3581LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone,
3582 bool opt) const {
3583 const intptr_t kNumInputs = 0;
3584 const intptr_t kNumTemps = 1;
3585 LocationSummary* locs = new (zone)
3586 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3587 locs->set_temp(0, Location::RegisterLocation(R1));
3588 locs->set_out(0, Location::RegisterLocation(R0));
3589 return locs;
3590}
3591
3592void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3593 ASSERT(locs()->temp(0).reg() == R1);
3594 ASSERT(locs()->out(0).reg() == R0);
3595
3596 auto object_store = compiler->isolate()->object_store();
3597 const auto& allocate_context_stub =
3598 Code::ZoneHandle(compiler->zone(), object_store->allocate_context_stub());
3599 __ LoadImmediate(R1, num_context_variables());
3600 compiler->GenerateStubCall(token_pos(), allocate_context_stub,
3601 PcDescriptorsLayout::kOther, locs());
3602}
3603
3604LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
3605 bool opt) const {
3606 const intptr_t kNumInputs = 1;
3607 const intptr_t kNumTemps = 0;
3608 LocationSummary* locs = new (zone)
3609 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3610 locs->set_in(0, Location::RegisterLocation(R4));
3611 locs->set_out(0, Location::RegisterLocation(R0));
3612 return locs;
3613}
3614
3615void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3616 ASSERT(locs()->in(0).reg() == R4);
3617 ASSERT(locs()->out(0).reg() == R0);
3618
3619 auto object_store = compiler->isolate()->object_store();
3620 const auto& clone_context_stub =
3621 Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
3622 compiler->GenerateStubCall(token_pos(), clone_context_stub,
3623 /*kind=*/PcDescriptorsLayout::kOther, locs());
3624}
3625
3626LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
3627 bool opt) const {
3628 UNREACHABLE();
3629 return NULL;
3630}
3631
3632void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3633 __ Bind(compiler->GetJumpLabel(this));
3634 compiler->AddExceptionHandler(
3635 catch_try_index(), try_index(), compiler->assembler()->CodeSize(),
3636 is_generated(), catch_handler_types_, needs_stacktrace());
3637 if (!FLAG_precompiled_mode) {
3638 // On lazy deoptimization we patch the optimized code here to enter the
3639 // deoptimization stub.
3640 const intptr_t deopt_id = DeoptId::ToDeoptAfter(GetDeoptId());
3641 if (compiler->is_optimizing()) {
3642 compiler->AddDeoptIndexAtCall(deopt_id);
3643 } else {
3644 compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id,
3645 TokenPosition::kNoSource);
3646 }
3647 }
3648 if (HasParallelMove()) {
3649 compiler->parallel_move_resolver()->EmitNativeCode(parallel_move());
3650 }
3651
3652 // Restore SP from FP as we are coming from a throw and the code for
3653 // popping arguments has not been run.
3654 const intptr_t fp_sp_dist =
3655 (compiler::target::frame_layout.first_local_from_fp + 1 -
3656 compiler->StackSize()) *
3657 compiler::target::kWordSize;
3658 ASSERT(fp_sp_dist <= 0);
3659 __ AddImmediate(SP, FP, fp_sp_dist);
3660
3661 if (!compiler->is_optimizing()) {
3662 if (raw_exception_var_ != nullptr) {
3663 __ StoreToOffset(
3664 kWord, kExceptionObjectReg, FP,
3665 compiler::target::FrameOffsetInBytesForVariable(raw_exception_var_));
3666 }
3667 if (raw_stacktrace_var_ != nullptr) {
3668 __ StoreToOffset(
3669 kWord, kStackTraceObjectReg, FP,
3670 compiler::target::FrameOffsetInBytesForVariable(raw_stacktrace_var_));
3671 }
3672 }
3673}
3674
3675LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone,
3676 bool opt) const {
3677 const intptr_t kNumInputs = 0;
3678 const intptr_t kNumTemps = 2;
3679 const bool using_shared_stub = UseSharedSlowPathStub(opt);
3680 ASSERT((kReservedCpuRegisters & (1 << LR)) != 0);
3681 LocationSummary* summary = new (zone)
3682 LocationSummary(zone, kNumInputs, kNumTemps,
3683 using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
3684 : LocationSummary::kCallOnSlowPath);
3685 summary->set_temp(0, Location::RequiresRegister());
3686 summary->set_temp(1, Location::RequiresRegister());
3687 return summary;
3688}
3689
3690class CheckStackOverflowSlowPath
3691 : public TemplateSlowPathCode<CheckStackOverflowInstr> {
3692 public:
3693 static constexpr intptr_t kNumSlowPathArgs = 0;
3694
3695 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
3696 : TemplateSlowPathCode(instruction) {}
3697
3698 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
3699 if (compiler->isolate()->use_osr() && osr_entry_label()->IsLinked()) {
3700 const Register value = instruction()->locs()->temp(0).reg();
3701 __ Comment("CheckStackOverflowSlowPathOsr");
3702 __ Bind(osr_entry_label());
3703 __ LoadImmediate(value, Thread::kOsrRequest);
3704 __ str(value,
3705 compiler::Address(
3706 THR, compiler::target::Thread::stack_overflow_flags_offset()));
3707 }
3708 __ Comment("CheckStackOverflowSlowPath");
3709 __ Bind(entry_label());
3710 const bool using_shared_stub =
3711 instruction()->locs()->call_on_shared_slow_path();
3712 if (!using_shared_stub) {
3713 compiler->SaveLiveRegisters(instruction()->locs());
3714 }
3715 // pending_deoptimization_env_ is needed to generate a runtime call that
3716 // may throw an exception.
3717 ASSERT(compiler->pending_deoptimization_env_ == NULL);
3718 Environment* env =
3719 compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
3720 compiler->pending_deoptimization_env_ = env;
3721
3722 if (using_shared_stub) {
3723 const uword entry_point_offset = compiler::target::Thread::
3724 stack_overflow_shared_stub_entry_point_offset(
3725 instruction()->locs()->live_registers()->FpuRegisterCount() > 0);
3726 __ ldr(LR, compiler::Address(THR, entry_point_offset));
3727 __ blx(LR);
3728 compiler->RecordSafepoint(instruction()->locs(), kNumSlowPathArgs);
3729 compiler->RecordCatchEntryMoves();
3730 compiler->AddDescriptor(
3731 PcDescriptorsLayout::kOther, compiler->assembler()->CodeSize(),
3732 instruction()->deopt_id(), instruction()->token_pos(),
3733 compiler->CurrentTryIndex());
3734 } else {
3735 compiler->GenerateRuntimeCall(
3736 instruction()->token_pos(), instruction()->deopt_id(),
3737 kStackOverflowRuntimeEntry, kNumSlowPathArgs, instruction()->locs());
3738 }
3739
3740 if (compiler->isolate()->use_osr() && !compiler->is_optimizing() &&
3741 instruction()->in_loop()) {
3742 // In unoptimized code, record loop stack checks as possible OSR entries.
3743 compiler->AddCurrentDescriptor(PcDescriptorsLayout::kOsrEntry,
3744 instruction()->deopt_id(),
3745 TokenPosition::kNoSource);
3746 }
3747 compiler->pending_deoptimization_env_ = NULL;
3748 if (!using_shared_stub) {
3749 compiler->RestoreLiveRegisters(instruction()->locs());
3750 }
3751 __ b(exit_label());
3752 }
3753
3754 compiler::Label* osr_entry_label() {
3755 ASSERT(Isolate::Current()->use_osr());
3756 return &osr_entry_label_;
3757 }
3758
3759 private:
3760 compiler::Label osr_entry_label_;
3761};
3762
3763void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3764 __ ldr(IP, compiler::Address(THR,
3765 compiler::target::Thread::stack_limit_offset()));
3766 __ cmp(SP, compiler::Operand(IP));
3767
3768 auto object_store = compiler->isolate()->object_store();
3769 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
3770 const auto& stub = Code::ZoneHandle(
3771 compiler->zone(),
3772 live_fpu_regs
3773 ? object_store->stack_overflow_stub_with_fpu_regs_stub()
3774 : object_store->stack_overflow_stub_without_fpu_regs_stub());
3775 const bool using_shared_stub = locs()->call_on_shared_slow_path();
3776 if (using_shared_stub && compiler->CanPcRelativeCall(stub)) {
3777 __ GenerateUnRelocatedPcRelativeCall(LS);
3778 compiler->AddPcRelativeCallStubTarget(stub);
3779
3780 // We use the "extended" environment which has the locations updated to
3781 // reflect live registers being saved in the shared spilling stubs (see
3782 // the stub above).
3783 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
3784 compiler->EmitCallsiteMetadata(token_pos(), deopt_id(),
3785 PcDescriptorsLayout::kOther, locs(),
3786 extended_env);
3787 return;
3788 }
3789
3790 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
3791 compiler->AddSlowPathCode(slow_path);
3792 __ b(slow_path->entry_label(), LS);
3793 if (compiler->CanOSRFunction() && in_loop()) {
3794 const Register function = locs()->temp(0).reg();
3795 const Register count = locs()->temp(1).reg();
3796 // In unoptimized code check the usage counter to trigger OSR at loop
3797 // stack checks. Use progressively higher thresholds for more deeply
3798 // nested loops to attempt to hit outer loops with OSR when possible.
3799 __ LoadObject(function, compiler->parsed_function().function());
3800 intptr_t threshold =
3801 FLAG_optimization_counter_threshold * (loop_depth() + 1);
3802 __ ldr(count,
3803 compiler::FieldAddress(
3804 function, compiler::target::Function::usage_counter_offset()));
3805 __ add(count, count, compiler::Operand(1));
3806 __ str(count,
3807 compiler::FieldAddress(
3808 function, compiler::target::Function::usage_counter_offset()));
3809 __ CompareImmediate(count, threshold);
3810 __ b(slow_path->osr_entry_label(), GE);
3811 }
3812 if (compiler->ForceSlowPathForStackOverflow()) {
3813 __ b(slow_path->entry_label());
3814 }
3815 __ Bind(slow_path->exit_label());
3816}
3817
3818static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
3819 BinarySmiOpInstr* shift_left) {
3820 const LocationSummary& locs = *shift_left->locs();
3821 const Register left = locs.in(0).reg();
3822 const Register result = locs.out(0).reg();
3823 compiler::Label* deopt =
3824 shift_left->CanDeoptimize()
3825 ? compiler->AddDeoptStub(shift_left->deopt_id(),
3826 ICData::kDeoptBinarySmiOp)
3827 : NULL;
3828 if (locs.in(1).IsConstant()) {
3829 const Object& constant = locs.in(1).constant();
3830 ASSERT(compiler::target::IsSmi(constant));
3831 // Immediate shift operation takes 5 bits for the count.
3832 const intptr_t kCountLimit = 0x1F;
3833 const intptr_t value = compiler::target::SmiValue(constant);
3834 ASSERT((0 < value) && (value < kCountLimit));
3835 if (shift_left->can_overflow()) {
3836 // Check for overflow (preserve left).
3837 __ Lsl(IP, left, compiler::Operand(value));
3838 __ cmp(left, compiler::Operand(IP, ASR, value));
3839 __ b(deopt, NE); // Overflow.
3840 }
3841 // Shift for result now we know there is no overflow.
3842 __ Lsl(result, left, compiler::Operand(value));
3843 return;
3844 }
3845
3846 // Right (locs.in(1)) is not constant.
3847 const Register right = locs.in(1).reg();
3848 Range* right_range = shift_left->right_range();
3849 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
3850 // TODO(srdjan): Implement code below for is_truncating().
3851 // If left is constant, we know the maximal allowed size for right.
3852 const Object& obj = shift_left->left()->BoundConstant();
3853 if (compiler::target::IsSmi(obj)) {
3854 const intptr_t left_int = compiler::target::SmiValue(obj);
3855 if (left_int == 0) {
3856 __ cmp(right, compiler::Operand(0));
3857 __ b(deopt, MI);
3858 __ mov(result, compiler::Operand(0));
3859 return;
3860 }
3861 const intptr_t max_right =
3862 compiler::target::kSmiBits - Utils::HighestBit(left_int);
3863 const bool right_needs_check =
3864 !RangeUtils::IsWithin(right_range, 0, max_right - 1);
3865 if (right_needs_check) {
3866 __ cmp(right, compiler::Operand(compiler::target::ToRawSmi(max_right)));
3867 __ b(deopt, CS);
3868 }
3869 __ SmiUntag(IP, right);
3870 __ Lsl(result, left, IP);
3871 }
3872 return;
3873 }
3874
3875 const bool right_needs_check =
3876 !RangeUtils::IsWithin(right_range, 0, (compiler::target::kSmiBits - 1));
3877 if (!shift_left->can_overflow()) {
3878 if (right_needs_check) {
3879 if (!RangeUtils::IsPositive(right_range)) {
3880 ASSERT(shift_left->CanDeoptimize());
3881 __ cmp(right, compiler::Operand(0));
3882 __ b(deopt, MI);
3883 }
3884
3885 __ cmp(right, compiler::Operand(compiler::target::ToRawSmi(
3886 compiler::target::kSmiBits)));
3887 __ mov(result, compiler::Operand(0), CS);
3888 __ SmiUntag(IP, right, CC); // SmiUntag right into IP if CC.
3889 __ Lsl(result, left, IP, CC);
3890 } else {
3891 __ SmiUntag(IP, right);
3892 __ Lsl(result, left, IP);
3893 }
3894 } else {
3895 if (right_needs_check) {
3896 ASSERT(shift_left->CanDeoptimize());
3897 __ cmp(right, compiler::Operand(compiler::target::ToRawSmi(
3898 compiler::target::kSmiBits)));
3899 __ b(deopt, CS);
3900 }
3901 // Left is not a constant.
3902 // Check if count too large for handling it inlined.
3903 __ SmiUntag(IP, right);
3904 // Overflow test (preserve left, right, and IP);
3905 const Register temp = locs.temp(0).reg();
3906 __ Lsl(temp, left, IP);
3907 __ cmp(left, compiler::Operand(temp, ASR, IP));
3908 __ b(deopt, NE); // Overflow.
3909 // Shift for result now we know there is no overflow.
3910 __ Lsl(result, left, IP);
3911 }
3912}
3913
3914class CheckedSmiSlowPath : public TemplateSlowPathCode<CheckedSmiOpInstr> {
3915 public:
3916 CheckedSmiSlowPath(CheckedSmiOpInstr* instruction, intptr_t try_index)
3917 : TemplateSlowPathCode(instruction), try_index_(try_index) {}
3918
3919 static constexpr intptr_t kNumSlowPathArgs = 2;
3920
3921 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
3922 if (compiler::Assembler::EmittingComments()) {
3923 __ Comment("slow path smi operation");
3924 }
3925 __ Bind(entry_label());
3926 LocationSummary* locs = instruction()->locs();
3927 Register result = locs->out(0).reg();
3928 locs->live_registers()->Remove(Location::RegisterLocation(result));
3929
3930 compiler->SaveLiveRegisters(locs);
3931 if (instruction()->env() != NULL) {
3932 Environment* env =
3933 compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
3934 compiler->pending_deoptimization_env_ = env;
3935 }
3936 __ Push(locs->in(0).reg());
3937 __ Push(locs->in(1).reg());
3938 const auto& selector = String::Handle(instruction()->call()->Selector());
3939 const auto& arguments_descriptor =
3940 Array::Handle(ArgumentsDescriptor::NewBoxed(
3941 /*type_args_len=*/0, /*num_arguments=*/2));
3942 compiler->EmitMegamorphicInstanceCall(
3943 selector, arguments_descriptor, instruction()->call()->deopt_id(),
3944 instruction()->token_pos(), locs, try_index_, kNumSlowPathArgs);
3945 __ mov(result, compiler::Operand(R0));
3946 compiler->RestoreLiveRegisters(locs);
3947 __ b(exit_label());
3948 compiler->pending_deoptimization_env_ = NULL;
3949 }
3950
3951 private:
3952 intptr_t try_index_;
3953};
3954
3955LocationSummary* CheckedSmiOpInstr::MakeLocationSummary(Zone* zone,
3956 bool opt) const {
3957 const intptr_t kNumInputs = 2;
3958 const intptr_t kNumTemps = 0;
3959 LocationSummary* summary = new (zone) LocationSummary(
3960 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
3961 summary->set_in(0, Location::RequiresRegister());
3962 summary->set_in(1, Location::RequiresRegister());
3963 summary->set_out(0, Location::RequiresRegister());
3964 return summary;
3965}
3966
3967void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3968 CheckedSmiSlowPath* slow_path =
3969 new CheckedSmiSlowPath(this, compiler->CurrentTryIndex());
3970 compiler->AddSlowPathCode(slow_path);
3971 // Test operands if necessary.
3972 Register left = locs()->in(0).reg();
3973 Register right = locs()->in(1).reg();
3974 Register result = locs()->out(0).reg();
3975 intptr_t left_cid = this->left()->Type()->ToCid();
3976 intptr_t right_cid = this->right()->Type()->ToCid();
3977 bool combined_smi_check = false;
3978 if (this->left()->definition() == this->right()->definition()) {
3979 __ tst(left, compiler::Operand(kSmiTagMask));
3980 } else if (left_cid == kSmiCid) {
3981 __ tst(right, compiler::Operand(kSmiTagMask));
3982 } else if (right_cid == kSmiCid) {
3983 __ tst(left, compiler::Operand(kSmiTagMask));
3984 } else {
3985 combined_smi_check = true;
3986 __ orr(result, left, compiler::Operand(right));
3987 __ tst(result, compiler::Operand(kSmiTagMask));
3988 }
3989 __ b(slow_path->entry_label(), NE);
3990 switch (op_kind()) {
3991 case Token::kADD:
3992 __ adds(result, left, compiler::Operand(right));
3993 __ b(slow_path->entry_label(), VS);
3994 break;
3995 case Token::kSUB:
3996 __ subs(result, left, compiler::Operand(right));
3997 __ b(slow_path->entry_label(), VS);
3998 break;
3999 case Token::kMUL:
4000 __ SmiUntag(IP, left);
4001 __ smull(result, IP, IP, right);
4002 // IP: result bits 32..63.
4003 __ cmp(IP, compiler::Operand(result, ASR, 31));
4004 __ b(slow_path->entry_label(), NE);
4005 break;
4006 case Token::kBIT_OR:
4007 // Operation may be part of combined smi check.
4008 if (!combined_smi_check) {
4009 __ orr(result, left, compiler::Operand(right));
4010 }
4011 break;
4012 case Token::kBIT_AND:
4013 __ and_(result, left, compiler::Operand(right));
4014 break;
4015 case Token::kBIT_XOR:
4016 __ eor(result, left, compiler::Operand(right));
4017 break;
4018 case Token::kSHL:
4019 ASSERT(result != left);
4020 ASSERT(result != right);
4021 __ CompareImmediate(
4022 right, compiler::target::ToRawSmi(compiler::target::kSmiBits));
4023 __ b(slow_path->entry_label(), HI);
4024
4025 __ SmiUntag(TMP, right);
4026 // Check for overflow by shifting left and shifting back arithmetically.
4027 // If the result is different from the original, there was overflow.
4028 __ Lsl(result, left, TMP);
4029 __ cmp(left, compiler::Operand(result, ASR, TMP));
4030 __ b(slow_path->entry_label(), NE);
4031 break;
4032 case Token::kSHR:
4033 ASSERT(result != left);
4034 ASSERT(result != right);
4035 __ CompareImmediate(
4036 right, compiler::target::ToRawSmi(compiler::target::kSmiBits));
4037 __ b(slow_path->entry_label(), HI);
4038
4039 __ SmiUntag(result, right);
4040 __ SmiUntag(TMP, left);
4041 __ Asr(result, TMP, result);
4042 __ SmiTag(result);
4043 break;
4044 default:
4045 UNREACHABLE();
4046 }
4047 __ Bind(slow_path->exit_label());
4048}
4049
4050class CheckedSmiComparisonSlowPath
4051 : public TemplateSlowPathCode<CheckedSmiComparisonInstr> {
4052 public:
4053 static constexpr intptr_t kNumSlowPathArgs = 2;
4054
4055 CheckedSmiComparisonSlowPath(CheckedSmiComparisonInstr* instruction,
4056 Environment* env,
4057 intptr_t try_index,
4058 BranchLabels labels,
4059 bool merged)
4060 : TemplateSlowPathCode(instruction),
4061 try_index_(try_index),
4062 labels_(labels),
4063 merged_(merged),
4064 env_(env) {
4065 // The environment must either come from the comparison or the environment
4066 // was cleared from the comparison (and moved to a branch).
4067 ASSERT(env == instruction->env() ||
4068 (merged && instruction->env() == nullptr));
4069 }
4070
4071 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
4072 if (compiler::Assembler::EmittingComments()) {
4073 __ Comment("slow path smi operation");
4074 }
4075 __ Bind(entry_label());
4076 LocationSummary* locs = instruction()->locs();
4077 Register result = merged_ ? locs->temp(0).reg() : locs->out(0).reg();
4078 locs->live_registers()->Remove(Location::RegisterLocation(result));
4079
4080 compiler->SaveLiveRegisters(locs);
4081 if (env_ != nullptr) {
4082 compiler->pending_deoptimization_env_ =
4083 compiler->SlowPathEnvironmentFor(env_, locs, kNumSlowPathArgs);
4084 }
4085 __ Push(locs->in(0).reg());
4086 __ Push(locs->in(1).reg());
4087 const auto& selector = String::Handle(instruction()->call()->Selector());
4088 const auto& arguments_descriptor =
4089 Array::Handle(ArgumentsDescriptor::NewBoxed(
4090 /*type_args_len=*/0, /*num_arguments=*/2));
4091 compiler->EmitMegamorphicInstanceCall(
4092 selector, arguments_descriptor, instruction()->call()->deopt_id(),
4093 instruction()->token_pos(), locs, try_index_, kNumSlowPathArgs);
4094 __ mov(result, compiler::Operand(R0));
4095 compiler->RestoreLiveRegisters(locs);
4096 compiler->pending_deoptimization_env_ = nullptr;
4097 if (merged_) {
4098 __ CompareObject(result, Bool::True());
4099 __ b(instruction()->is_negated() ? labels_.false_label
4100 : labels_.true_label,
4101 EQ);
4102 __ b(instruction()->is_negated() ? labels_.true_label
4103 : labels_.false_label);
4104 } else {
4105 if (instruction()->is_negated()) {
4106 // Need to negate the result of slow path call.
4107 __ CompareObject(result, Bool::True());
4108 __ LoadObject(result, Bool::True(), NE);
4109 __ LoadObject(result, Bool::False(), EQ);
4110 }
4111 __ b(exit_label());
4112 }
4113 }
4114
4115 private:
4116 intptr_t try_index_;
4117 BranchLabels labels_;
4118 bool merged_;
4119 Environment* env_;
4120};
4121
4122LocationSummary* CheckedSmiComparisonInstr::MakeLocationSummary(
4123 Zone* zone,
4124 bool opt) const {
4125 const intptr_t kNumInputs = 2;
4126 const intptr_t kNumTemps = 1;
4127 LocationSummary* summary = new (zone) LocationSummary(
4128 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
4129 summary->set_in(0, Location::RequiresRegister());
4130 summary->set_in(1, Location::RequiresRegister());
4131 summary->set_temp(0, Location::RequiresRegister());
4132 summary->set_out(0, Location::RequiresRegister());
4133 return summary;
4134}
4135
4136Condition CheckedSmiComparisonInstr::EmitComparisonCode(
4137 FlowGraphCompiler* compiler,
4138 BranchLabels labels) {
4139 return EmitSmiComparisonOp(compiler, locs(), kind());
4140}
4141
4142#define EMIT_SMI_CHECK \
4143 Register left = locs()->in(0).reg(); \
4144 Register right = locs()->in(1).reg(); \
4145 Register temp = locs()->temp(0).reg(); \
4146 intptr_t left_cid = this->left()->Type()->ToCid(); \
4147 intptr_t right_cid = this->right()->Type()->ToCid(); \
4148 if (this->left()->definition() == this->right()->definition()) { \
4149 __ tst(left, compiler::Operand(kSmiTagMask)); \
4150 } else if (left_cid == kSmiCid) { \
4151 __ tst(right, compiler::Operand(kSmiTagMask)); \
4152 } else if (right_cid == kSmiCid) { \
4153 __ tst(left, compiler::Operand(kSmiTagMask)); \
4154 } else { \
4155 __ orr(temp, left, compiler::Operand(right)); \
4156 __ tst(temp, compiler::Operand(kSmiTagMask)); \
4157 } \
4158 __ b(slow_path->entry_label(), NE)
4159
4160void CheckedSmiComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler,
4161 BranchInstr* branch) {
4162 BranchLabels labels = compiler->CreateBranchLabels(branch);
4163 CheckedSmiComparisonSlowPath* slow_path = new CheckedSmiComparisonSlowPath(
4164 this, branch->env(), compiler->CurrentTryIndex(), labels,
4165 /* merged = */ true);
4166 compiler->AddSlowPathCode(slow_path);
4167 EMIT_SMI_CHECK;
4168 Condition true_condition = EmitComparisonCode(compiler, labels);
4169 ASSERT(true_condition != kInvalidCondition);
4170 EmitBranchOnCondition(compiler, true_condition, labels);
4171 __ Bind(slow_path->exit_label());
4172}
4173
4174void CheckedSmiComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4175 BranchLabels labels = {NULL, NULL, NULL};
4176 CheckedSmiComparisonSlowPath* slow_path = new CheckedSmiComparisonSlowPath(
4177 this, env(), compiler->CurrentTryIndex(), labels,
4178 /* merged = */ false);
4179 compiler->AddSlowPathCode(slow_path);
4180 EMIT_SMI_CHECK;
4181 Condition true_condition = EmitComparisonCode(compiler, labels);
4182 ASSERT(true_condition != kInvalidCondition);
4183 Register result = locs()->out(0).reg();
4184 __ LoadObject(result, Bool::True(), true_condition);
4185 __ LoadObject(result, Bool::False(), InvertCondition(true_condition));
4186 __ Bind(slow_path->exit_label());
4187}
4188#undef EMIT_SMI_CHECK
4189
4190LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone,
4191 bool opt) const {
4192 const intptr_t kNumInputs = 2;
4193 // Calculate number of temporaries.
4194 intptr_t num_temps = 0;
4195 if (op_kind() == Token::kTRUNCDIV) {
4196 if (RightIsPowerOfTwoConstant()) {
4197 num_temps = 1;
4198 } else {
4199 num_temps = 2;
4200 }
4201 } else if (op_kind() == Token::kMOD) {
4202 num_temps = 2;
4203 } else if (((op_kind() == Token::kSHL) && can_overflow()) ||
4204 (op_kind() == Token::kSHR)) {
4205 num_temps = 1;
4206 }
4207 LocationSummary* summary = new (zone)
4208 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
4209 if (op_kind() == Token::kTRUNCDIV) {
4210 summary->set_in(0, Location::RequiresRegister());
4211 if (RightIsPowerOfTwoConstant()) {
4212 ConstantInstr* right_constant = right()->definition()->AsConstant();
4213 summary->set_in(1, Location::Constant(right_constant));
4214 summary->set_temp(0, Location::RequiresRegister());
4215 } else {
4216 summary->set_in(1, Location::RequiresRegister());
4217 summary->set_temp(0, Location::RequiresRegister());
4218 // Request register that overlaps with S0..S31.
4219 summary->set_temp(1, Location::FpuRegisterLocation(Q0));
4220 }
4221 summary->set_out(0, Location::RequiresRegister());
4222 return summary;
4223 }
4224 if (op_kind() == Token::kMOD) {
4225 summary->set_in(0, Location::RequiresRegister());
4226 summary->set_in(1, Location::RequiresRegister());
4227 summary->set_temp(0, Location::RequiresRegister());
4228 // Request register that overlaps with S0..S31.
4229 summary->set_temp(1, Location::FpuRegisterLocation(Q0));
4230 summary->set_out(0, Location::RequiresRegister());
4231 return summary;
4232 }
4233 summary->set_in(0, Location::RequiresRegister());
4234 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
4235 if (((op_kind() == Token::kSHL) && can_overflow()) ||
4236 (op_kind() == Token::kSHR)) {
4237 summary->set_temp(0, Location::RequiresRegister());
4238 }
4239 // We make use of 3-operand instructions by not requiring result register
4240 // to be identical to first input register as on Intel.
4241 summary->set_out(0, Location::RequiresRegister());
4242 return summary;
4243}
4244
4245void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4246 if (op_kind() == Token::kSHL) {
4247 EmitSmiShiftLeft(compiler, this);
4248 return;
4249 }
4250
4251 const Register left = locs()->in(0).reg();
4252 const Register result = locs()->out(0).reg();
4253 compiler::Label* deopt = NULL;
4254 if (CanDeoptimize()) {
4255 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
4256 }
4257
4258 if (locs()->in(1).IsConstant()) {
4259 const Object& constant = locs()->in(1).constant();
4260 ASSERT(compiler::target::IsSmi(constant));
4261 const int32_t imm = compiler::target::ToRawSmi(constant);
4262 switch (op_kind()) {
4263 case Token::kADD: {
4264 if (deopt == NULL) {
4265 __ AddImmediate(result, left, imm);
4266 } else {
4267 __ AddImmediateSetFlags(result, left, imm);
4268 __ b(deopt, VS);
4269 }
4270 break;
4271 }
4272 case Token::kSUB: {
4273 if (deopt == NULL) {
4274 __ AddImmediate(result, left, -imm);
4275 } else {
4276 // Negating imm and using AddImmediateSetFlags would not detect the
4277 // overflow when imm == kMinInt32.
4278 __ SubImmediateSetFlags(result, left, imm);
4279 __ b(deopt, VS);
4280 }
4281 break;
4282 }
4283 case Token::kMUL: {
4284 // Keep left value tagged and untag right value.
4285 const intptr_t value = compiler::target::SmiValue(constant);
4286 if (deopt == NULL) {
4287 __ LoadImmediate(IP, value);
4288 __ mul(result, left, IP);
4289 } else {
4290 __ LoadImmediate(IP, value);
4291 __ smull(result, IP, left, IP);
4292 // IP: result bits 32..63.
4293 __ cmp(IP, compiler::Operand(result, ASR, 31));
4294 __ b(deopt, NE);
4295 }
4296 break;
4297 }
4298 case Token::kTRUNCDIV: {
4299 const intptr_t value = compiler::target::SmiValue(constant);
4300 ASSERT(value != kIntptrMin);
4301 ASSERT(Utils::IsPowerOfTwo(Utils::Abs(value)));
4302 const intptr_t shift_count =
4303 Utils::ShiftForPowerOfTwo(Utils::Abs(value)) + kSmiTagSize;
4304 ASSERT(kSmiTagSize == 1);
4305 __ mov(IP, compiler::Operand(left, ASR, 31));
4306 ASSERT(shift_count > 1); // 1, -1 case handled above.
4307 const Register temp = locs()->temp(0).reg();
4308 __ add(temp, left, compiler::Operand(IP, LSR, 32 - shift_count));
4309 ASSERT(shift_count > 0);
4310 __ mov(result, compiler::Operand(temp, ASR, shift_count));
4311 if (value < 0) {
4312 __ rsb(result, result, compiler::Operand(0));
4313 }
4314 __ SmiTag(result);
4315 break;
4316 }
4317 case Token::kBIT_AND: {
4318 // No overflow check.
4319 compiler::Operand o;
4320 if (compiler::Operand::CanHold(imm, &o)) {
4321 __ and_(result, left, o);
4322 } else if (compiler::Operand::CanHold(~imm, &o)) {
4323 __ bic(result, left, o);
4324 } else {
4325 __ LoadImmediate(IP, imm);
4326 __ and_(result, left, compiler::Operand(IP));
4327 }
4328 break;
4329 }
4330 case Token::kBIT_OR: {
4331 // No overflow check.
4332 compiler::Operand o;
4333 if (compiler::Operand::CanHold(imm, &o)) {
4334 __ orr(result, left, o);
4335 } else {
4336 __ LoadImmediate(IP, imm);
4337 __ orr(result, left, compiler::Operand(IP));
4338 }
4339 break;
4340 }
4341 case Token::kBIT_XOR: {
4342 // No overflow check.
4343 compiler::Operand o;
4344 if (compiler::Operand::CanHold(imm, &o)) {
4345 __ eor(result, left, o);
4346 } else {
4347 __ LoadImmediate(IP, imm);
4348 __ eor(result, left, compiler::Operand(IP));
4349 }
4350 break;
4351 }
4352 case Token::kSHR: {
4353 // sarl operation masks the count to 5 bits.
4354 const intptr_t kCountLimit = 0x1F;
4355 intptr_t value = compiler::target::SmiValue(constant);
4356 __ Asr(result, left,
4357 compiler::Operand(
4358 Utils::Minimum(value + kSmiTagSize, kCountLimit)));
4359 __ SmiTag(result);
4360 break;
4361 }
4362
4363 default:
4364 UNREACHABLE();
4365 break;
4366 }
4367 return;
4368 }
4369
4370 const Register right = locs()->in(1).reg();
4371 switch (op_kind()) {
4372 case Token::kADD: {
4373 if (deopt == NULL) {
4374 __ add(result, left, compiler::Operand(right));
4375 } else {
4376 __ adds(result, left, compiler::Operand(right));
4377 __ b(deopt, VS);
4378 }
4379 break;
4380 }
4381 case Token::kSUB: {
4382 if (deopt == NULL) {
4383 __ sub(result, left, compiler::Operand(right));
4384 } else {
4385 __ subs(result, left, compiler::Operand(right));
4386 __ b(deopt, VS);
4387 }
4388 break;
4389 }
4390 case Token::kMUL: {
4391 __ SmiUntag(IP, left);
4392 if (deopt == NULL) {
4393 __ mul(result, IP, right);
4394 } else {
4395 __ smull(result, IP, IP, right);
4396 // IP: result bits 32..63.
4397 __ cmp(IP, compiler::Operand(result, ASR, 31));
4398 __ b(deopt, NE);
4399 }
4400 break;
4401 }
4402 case Token::kBIT_AND: {
4403 // No overflow check.
4404 __ and_(result, left, compiler::Operand(right));
4405 break;
4406 }
4407 case Token::kBIT_OR: {
4408 // No overflow check.
4409 __ orr(result, left, compiler::Operand(right));
4410 break;
4411 }
4412 case Token::kBIT_XOR: {
4413 // No overflow check.
4414 __ eor(result, left, compiler::Operand(right));
4415 break;
4416 }
4417 case Token::kTRUNCDIV: {
4418 ASSERT(TargetCPUFeatures::can_divide());
4419 if (RangeUtils::CanBeZero(right_range())) {
4420 // Handle divide by zero in runtime.
4421 __ cmp(right, compiler::Operand(0));
4422 __ b(deopt, EQ);
4423 }
4424 const Register temp = locs()->temp(0).reg();
4425 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
4426 __ SmiUntag(temp, left);
4427 __ SmiUntag(IP, right);
4428 __ IntegerDivide(result, temp, IP, dtemp, DTMP);
4429
4430 if (RangeUtils::Overlaps(right_range(), -1, -1)) {
4431 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
4432 // case we cannot tag the result.
4433 __ CompareImmediate(result, 0x40000000);
4434 __ b(deopt, EQ);
4435 }
4436 __ SmiTag(result);
4437 break;
4438 }
4439 case Token::kMOD: {
4440 ASSERT(TargetCPUFeatures::can_divide());
4441 if (RangeUtils::CanBeZero(right_range())) {
4442 // Handle divide by zero in runtime.
4443 __ cmp(right, compiler::Operand(0));
4444 __ b(deopt, EQ);
4445 }
4446 const Register temp = locs()->temp(0).reg();
4447 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
4448 __ SmiUntag(temp, left);
4449 __ SmiUntag(IP, right);
4450 __ IntegerDivide(result, temp, IP, dtemp, DTMP);
4451 __ SmiUntag(IP, right);
4452 __ mls(result, IP, result, temp); // result <- left - right * result
4453 __ SmiTag(result);
4454 // res = left % right;
4455 // if (res < 0) {
4456 // if (right < 0) {
4457 // res = res - right;
4458 // } else {
4459 // res = res + right;
4460 // }
4461 // }
4462 compiler::Label done;
4463 __ cmp(result, compiler::Operand(0));
4464 __ b(&done, GE);
4465 // Result is negative, adjust it.
4466 __ cmp(right, compiler::Operand(0));
4467 __ sub(result, result, compiler::Operand(right), LT);
4468 __ add(result, result, compiler::Operand(right), GE);
4469 __ Bind(&done);
4470 break;
4471 }
4472 case Token::kSHR: {
4473 if (CanDeoptimize()) {
4474 __ CompareImmediate(right, 0);
4475 __ b(deopt, LT);
4476 }
4477 __ SmiUntag(IP, right);
4478 // sarl operation masks the count to 5 bits.
4479 const intptr_t kCountLimit = 0x1F;
4480 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
4481 __ CompareImmediate(IP, kCountLimit);
4482 __ LoadImmediate(IP, kCountLimit, GT);
4483 }
4484 const Register temp = locs()->temp(0).reg();
4485 __ SmiUntag(temp, left);
4486 __ Asr(result, temp, IP);
4487 __ SmiTag(result);
4488 break;
4489 }
4490 case Token::kDIV: {
4491 // Dispatches to 'Double./'.
4492 // TODO(srdjan): Implement as conversion to double and double division.
4493 UNREACHABLE();
4494 break;
4495 }
4496 case Token::kOR:
4497 case Token::kAND: {
4498 // Flow graph builder has dissected this operation to guarantee correct
4499 // behavior (short-circuit evaluation).
4500 UNREACHABLE();
4501 break;
4502 }
4503 default:
4504 UNREACHABLE();
4505 break;
4506 }
4507}
4508
4509static void EmitInt32ShiftLeft(FlowGraphCompiler* compiler,
4510 BinaryInt32OpInstr* shift_left) {
4511 const LocationSummary& locs = *shift_left->locs();
4512 const Register left = locs.in(0).reg();
4513 const Register result = locs.out(0).reg();
4514 compiler::Label* deopt =
4515 shift_left->CanDeoptimize()
4516 ? compiler->AddDeoptStub(shift_left->deopt_id(),
4517 ICData::kDeoptBinarySmiOp)
4518 : NULL;
4519 ASSERT(locs.in(1).IsConstant());
4520 const Object& constant = locs.in(1).constant();
4521 ASSERT(compiler::target::IsSmi(constant));
4522 // Immediate shift operation takes 5 bits for the count.
4523 const intptr_t kCountLimit = 0x1F;
4524 const intptr_t value = compiler::target::SmiValue(constant);
4525 ASSERT((0 < value) && (value < kCountLimit));
4526 if (shift_left->can_overflow()) {
4527 // Check for overflow (preserve left).
4528 __ Lsl(IP, left, compiler::Operand(value));
4529 __ cmp(left, compiler::Operand(IP, ASR, value));
4530 __ b(deopt, NE); // Overflow.
4531 }
4532 // Shift for result now we know there is no overflow.
4533 __ Lsl(result, left, compiler::Operand(value));
4534}
4535
4536LocationSummary* BinaryInt32OpInstr::MakeLocationSummary(Zone* zone,
4537 bool opt) const {
4538 const intptr_t kNumInputs = 2;
4539 // Calculate number of temporaries.
4540 intptr_t num_temps = 0;
4541 if (((op_kind() == Token::kSHL) && can_overflow()) ||
4542 (op_kind() == Token::kSHR)) {
4543 num_temps = 1;
4544 }
4545 LocationSummary* summary = new (zone)
4546 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
4547 summary->set_in(0, Location::RequiresRegister());
4548 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
4549 if (((op_kind() == Token::kSHL) && can_overflow()) ||
4550 (op_kind() == Token::kSHR)) {
4551 summary->set_temp(0, Location::RequiresRegister());
4552 }
4553 // We make use of 3-operand instructions by not requiring result register
4554 // to be identical to first input register as on Intel.
4555 summary->set_out(0, Location::RequiresRegister());
4556 return summary;
4557}
4558
4559void BinaryInt32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4560 if (op_kind() == Token::kSHL) {
4561 EmitInt32ShiftLeft(compiler, this);
4562 return;
4563 }
4564
4565 const Register left = locs()->in(0).reg();
4566 const Register result = locs()->out(0).reg();
4567 compiler::Label* deopt = NULL;
4568 if (CanDeoptimize()) {
4569 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
4570 }
4571
4572 if (locs()->in(1).IsConstant()) {
4573 const Object& constant = locs()->in(1).constant();
4574 ASSERT(compiler::target::IsSmi(constant));
4575 const intptr_t value = compiler::target::SmiValue(constant);
4576 switch (op_kind()) {
4577 case Token::kADD: {
4578 if (deopt == NULL) {
4579 __ AddImmediate(result, left, value);
4580 } else {
4581 __ AddImmediateSetFlags(result, left, value);
4582 __ b(deopt, VS);
4583 }
4584 break;
4585 }
4586 case Token::kSUB: {
4587 if (deopt == NULL) {
4588 __ AddImmediate(result, left, -value);
4589 } else {
4590 // Negating value and using AddImmediateSetFlags would not detect the
4591 // overflow when value == kMinInt32.
4592 __ SubImmediateSetFlags(result, left, value);
4593 __ b(deopt, VS);
4594 }
4595 break;
4596 }
4597 case Token::kMUL: {
4598 if (deopt == NULL) {
4599 __ LoadImmediate(IP, value);
4600 __ mul(result, left, IP);
4601 } else {
4602 __ LoadImmediate(IP, value);
4603 __ smull(result, IP, left, IP);
4604 // IP: result bits 32..63.
4605 __ cmp(IP, compiler::Operand(result, ASR, 31));
4606 __ b(deopt, NE);
4607 }
4608 break;
4609 }
4610 case Token::kBIT_AND: {
4611 // No overflow check.
4612 compiler::Operand o;
4613 if (compiler::Operand::CanHold(value, &o)) {
4614 __ and_(result, left, o);
4615 } else if (compiler::Operand::CanHold(~value, &o)) {
4616 __ bic(result, left, o);
4617 } else {
4618 __ LoadImmediate(IP, value);
4619 __ and_(result, left, compiler::Operand(IP));
4620 }
4621 break;
4622 }
4623 case Token::kBIT_OR: {
4624 // No overflow check.
4625 compiler::Operand o;
4626 if (compiler::Operand::CanHold(value, &o)) {
4627 __ orr(result, left, o);
4628 } else {
4629 __ LoadImmediate(IP, value);
4630 __ orr(result, left, compiler::Operand(IP));
4631 }
4632 break;
4633 }
4634 case Token::kBIT_XOR: {
4635 // No overflow check.
4636 compiler::Operand o;
4637 if (compiler::Operand::CanHold(value, &o)) {
4638 __ eor(result, left, o);
4639 } else {
4640 __ LoadImmediate(IP, value);
4641 __ eor(result, left, compiler::Operand(IP));
4642 }
4643 break;
4644 }
4645 case Token::kSHR: {
4646 // sarl operation masks the count to 5 bits.
4647 const intptr_t kCountLimit = 0x1F;
4648 __ Asr(result, left,
4649 compiler::Operand(Utils::Minimum(value, kCountLimit)));
4650 break;
4651 }
4652
4653 default:
4654 UNREACHABLE();
4655 break;
4656 }
4657 return;
4658 }
4659
4660 const Register right = locs()->in(1).reg();
4661 switch (op_kind()) {
4662 case Token::kADD: {
4663 if (deopt == NULL) {
4664 __ add(result, left, compiler::Operand(right));
4665 } else {
4666 __ adds(result, left, compiler::Operand(right));
4667 __ b(deopt, VS);
4668 }
4669 break;
4670 }
4671 case Token::kSUB: {
4672 if (deopt == NULL) {
4673 __ sub(result, left, compiler::Operand(right));
4674 } else {
4675 __ subs(result, left, compiler::Operand(right));
4676 __ b(deopt, VS);
4677 }
4678 break;
4679 }
4680 case Token::kMUL: {
4681 if (deopt == NULL) {
4682 __ mul(result, left, right);
4683 } else {
4684 __ smull(result, IP, left, right);
4685 // IP: result bits 32..63.
4686 __ cmp(IP, compiler::Operand(result, ASR, 31));
4687 __ b(deopt, NE);
4688 }
4689 break;
4690 }
4691 case Token::kBIT_AND: {
4692 // No overflow check.
4693 __ and_(result, left, compiler::Operand(right));
4694 break;
4695 }
4696 case Token::kBIT_OR: {
4697 // No overflow check.
4698 __ orr(result, left, compiler::Operand(right));
4699 break;
4700 }
4701 case Token::kBIT_XOR: {
4702 // No overflow check.
4703 __ eor(result, left, compiler::Operand(right));
4704 break;
4705 }
4706 default:
4707 UNREACHABLE();
4708 break;
4709 }
4710}
4711
4712LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone,
4713 bool opt) const {
4714 intptr_t left_cid = left()->Type()->ToCid();
4715 intptr_t right_cid = right()->Type()->ToCid();
4716 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
4717 const intptr_t kNumInputs = 2;
4718 const intptr_t kNumTemps = 0;
4719 LocationSummary* summary = new (zone)
4720 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4721 summary->set_in(0, Location::RequiresRegister());
4722 summary->set_in(1, Location::RequiresRegister());
4723 return summary;
4724}
4725
4726void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4727 compiler::Label* deopt =
4728 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp,
4729 licm_hoisted_ ? ICData::kHoisted : 0);
4730 intptr_t left_cid = left()->Type()->ToCid();
4731 intptr_t right_cid = right()->Type()->ToCid();
4732 const Register left = locs()->in(0).reg();
4733 const Register right = locs()->in(1).reg();
4734 if (this->left()->definition() == this->right()->definition()) {
4735 __ tst(left, compiler::Operand(kSmiTagMask));
4736 } else if (left_cid == kSmiCid) {
4737 __ tst(right, compiler::Operand(kSmiTagMask));
4738 } else if (right_cid == kSmiCid) {
4739 __ tst(left, compiler::Operand(kSmiTagMask));
4740 } else {
4741 __ orr(IP, left, compiler::Operand(right));
4742 __ tst(IP, compiler::Operand(kSmiTagMask));
4743 }
4744 __ b(deopt, EQ);
4745}
4746
4747LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4748 const intptr_t kNumInputs = 1;
4749 const intptr_t kNumTemps = 1;
4750 LocationSummary* summary = new (zone) LocationSummary(
4751 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
4752 summary->set_in(0, Location::RequiresFpuRegister());
4753 summary->set_temp(0, Location::RequiresRegister());
4754 summary->set_out(0, Location::RequiresRegister());
4755 return summary;
4756}
4757
4758void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4759 const Register out_reg = locs()->out(0).reg();
4760 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
4761
4762 BoxAllocationSlowPath::Allocate(compiler, this,
4763 compiler->BoxClassFor(from_representation()),
4764 out_reg, locs()->temp(0).reg());
4765
4766 switch (from_representation()) {
4767 case kUnboxedDouble:
4768 __ StoreDToOffset(value, out_reg, ValueOffset() - kHeapObjectTag);
4769 break;
4770 case kUnboxedFloat:
4771 __ vcvtds(DTMP, EvenSRegisterOf(value));
4772 __ StoreDToOffset(EvenDRegisterOf(FpuTMP), out_reg,
4773 ValueOffset() - kHeapObjectTag);
4774 break;
4775 case kUnboxedFloat32x4:
4776 case kUnboxedFloat64x2:
4777 case kUnboxedInt32x4:
4778 __ StoreMultipleDToOffset(value, 2, out_reg,
4779 ValueOffset() - kHeapObjectTag);
4780 break;
4781 default:
4782 UNREACHABLE();
4783 break;
4784 }
4785}
4786
4787LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4788 const bool needs_temp = CanDeoptimize();
4789 const intptr_t kNumInputs = 1;
4790 const intptr_t kNumTemps = needs_temp ? 1 : 0;
4791 LocationSummary* summary = new (zone)
4792 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4793 summary->set_in(0, Location::RequiresRegister());
4794 if (needs_temp) {
4795 summary->set_temp(0, Location::RequiresRegister());
4796 }
4797 if (representation() == kUnboxedInt64) {
4798 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
4799 Location::RequiresRegister()));
4800 } else if (representation() == kUnboxedInt32) {
4801 summary->set_out(0, Location::RequiresRegister());
4802 } else if (representation() == kUnboxedFloat) {
4803 // Low (< Q7) Q registers are needed for the vcvtds and vmovs instructions.
4804 // TODO(30953): Support register range constraints in the regalloc.
4805 summary->set_out(0, Location::FpuRegisterLocation(Q6));
4806 } else {
4807 summary->set_out(0, Location::RequiresFpuRegister());
4808 }
4809 return summary;
4810}
4811
4812void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) {
4813 const Register box = locs()->in(0).reg();
4814
4815 switch (representation()) {
4816 case kUnboxedInt64: {
4817 PairLocation* result = locs()->out(0).AsPairLocation();
4818 ASSERT(result->At(0).reg() != box);
4819 __ LoadFieldFromOffset(kWord, result->At(0).reg(), box, ValueOffset());
4820 __ LoadFieldFromOffset(kWord, result->At(1).reg(), box,
4821 ValueOffset() + compiler::target::kWordSize);
4822 break;
4823 }
4824
4825 case kUnboxedDouble: {
4826 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4827 __ LoadDFromOffset(result, box, ValueOffset() - kHeapObjectTag);
4828 break;
4829 }
4830
4831 case kUnboxedFloat: {
4832 // Should only be <= Q7, because >= Q8 cannot be addressed as S register.
4833 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4834 __ LoadDFromOffset(result, box, ValueOffset() - kHeapObjectTag);
4835 __ vcvtsd(EvenSRegisterOf(result), result);
4836 break;
4837 }
4838
4839 case kUnboxedFloat32x4:
4840 case kUnboxedFloat64x2:
4841 case kUnboxedInt32x4: {
4842 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4843 __ LoadMultipleDFromOffset(result, 2, box,
4844 ValueOffset() - kHeapObjectTag);
4845 break;
4846 }
4847
4848 default:
4849 UNREACHABLE();
4850 break;
4851 }
4852}
4853
4854void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
4855 const Register box = locs()->in(0).reg();
4856
4857 switch (representation()) {
4858 case kUnboxedInt64: {
4859 PairLocation* result = locs()->out(0).AsPairLocation();
4860 __ SmiUntag(result->At(0).reg(), box);
4861 __ SignFill(result->At(1).reg(), result->At(0).reg());
4862 break;
4863 }
4864
4865 case kUnboxedDouble: {
4866 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4867 __ SmiUntag(IP, box);
4868 __ vmovdr(DTMP, 0, IP);
4869 __ vcvtdi(result, STMP);
4870 break;
4871 }
4872
4873 default:
4874 UNREACHABLE();
4875 break;
4876 }
4877}
4878
4879void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
4880 const Register value = locs()->in(0).reg();
4881 const Register result = locs()->out(0).reg();
4882 compiler::Label done;
4883 __ SmiUntag(result, value, &done);
4884 __ LoadFieldFromOffset(kWord, result, value,
4885 compiler::target::Mint::value_offset());
4886 __ Bind(&done);
4887}
4888
4889void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
4890 const Register box = locs()->in(0).reg();
4891 PairLocation* result = locs()->out(0).AsPairLocation();
4892 ASSERT(result->At(0).reg() != box);
4893 ASSERT(result->At(1).reg() != box);
4894 compiler::Label done;
4895 __ SignFill(result->At(1).reg(), box);
4896 __ SmiUntag(result->At(0).reg(), box, &done);
4897 EmitLoadFromBox(compiler);
4898 __ Bind(&done);
4899}
4900
4901LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
4902 bool opt) const {
4903 ASSERT((from_representation() == kUnboxedInt32) ||
4904 (from_representation() == kUnboxedUint32));
4905 const intptr_t kNumInputs = 1;
4906 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
4907 LocationSummary* summary = new (zone)
4908 LocationSummary(zone, kNumInputs, kNumTemps,
4909 ValueFitsSmi() ? LocationSummary::kNoCall
4910 : LocationSummary::kCallOnSlowPath);
4911 summary->set_in(0, Location::RequiresRegister());
4912 if (!ValueFitsSmi()) {
4913 summary->set_temp(0, Location::RequiresRegister());
4914 }
4915 summary->set_out(0, Location::RequiresRegister());
4916 return summary;
4917}
4918
4919void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4920 Register value = locs()->in(0).reg();
4921 Register out = locs()->out(0).reg();
4922 ASSERT(value != out);
4923
4924 __ SmiTag(out, value);
4925 if (!ValueFitsSmi()) {
4926 Register temp = locs()->temp(0).reg();
4927 compiler::Label done;
4928 if (from_representation() == kUnboxedInt32) {
4929 __ cmp(value, compiler::Operand(out, ASR, 1));
4930 } else {
4931 ASSERT(from_representation() == kUnboxedUint32);
4932 // Note: better to test upper bits instead of comparing with
4933 // kSmiMax as kSmiMax does not fit into immediate operand.
4934 __ TestImmediate(value, 0xC0000000);
4935 }
4936 __ b(&done, EQ);
4937 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
4938 temp);
4939 if (from_representation() == kUnboxedInt32) {
4940 __ Asr(temp, value,
4941 compiler::Operand(compiler::target::kBitsPerWord - 1));
4942 } else {
4943 ASSERT(from_representation() == kUnboxedUint32);
4944 __ eor(temp, temp, compiler::Operand(temp));
4945 }
4946 __ StoreToOffset(kWord, value, out,
4947 compiler::target::Mint::value_offset() - kHeapObjectTag);
4948 __ StoreToOffset(kWord, temp, out,
4949 compiler::target::Mint::value_offset() - kHeapObjectTag +
4950 compiler::target::kWordSize);
4951 __ Bind(&done);
4952 }
4953}
4954
4955LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
4956 bool opt) const {
4957 const intptr_t kNumInputs = 1;
4958 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
4959 // Shared slow path is used in BoxInt64Instr::EmitNativeCode in
4960 // FLAG_use_bare_instructions mode and only after VM isolate stubs where
4961 // replaced with isolate-specific stubs.
4962 auto object_store = Isolate::Current()->object_store();
4963 const bool stubs_in_vm_isolate =
4964 object_store->allocate_mint_with_fpu_regs_stub()
4965 ->ptr()
4966 ->InVMIsolateHeap() ||
4967 object_store->allocate_mint_without_fpu_regs_stub()
4968 ->ptr()
4969 ->InVMIsolateHeap();
4970 const bool shared_slow_path_call = SlowPathSharingSupported(opt) &&
4971 FLAG_use_bare_instructions &&
4972 !stubs_in_vm_isolate;
4973 LocationSummary* summary = new (zone) LocationSummary(
4974 zone, kNumInputs, kNumTemps,
4975 ValueFitsSmi()
4976 ? LocationSummary::kNoCall
4977 : ((shared_slow_path_call ? LocationSummary::kCallOnSharedSlowPath
4978 : LocationSummary::kCallOnSlowPath)));
4979 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
4980 Location::RequiresRegister()));
4981 if (ValueFitsSmi()) {
4982 summary->set_out(0, Location::RequiresRegister());
4983 } else if (shared_slow_path_call) {
4984 summary->set_out(0,
4985 Location::RegisterLocation(AllocateMintABI::kResultReg));
4986 summary->set_temp(0, Location::RegisterLocation(AllocateMintABI::kTempReg));
4987 } else {
4988 summary->set_out(0, Location::RequiresRegister());
4989 summary->set_temp(0, Location::RequiresRegister());
4990 }
4991 return summary;
4992}
4993
4994void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4995 if (ValueFitsSmi()) {
4996 PairLocation* value_pair = locs()->in(0).AsPairLocation();
4997 Register value_lo = value_pair->At(0).reg();
4998 Register out_reg = locs()->out(0).reg();
4999 __ SmiTag(out_reg, value_lo);
5000 return;
5001 }
5002
5003 PairLocation* value_pair = locs()->in(0).AsPairLocation();
5004 Register value_lo = value_pair->At(0).reg();
5005 Register value_hi = value_pair->At(1).reg();
5006 Register tmp = locs()->temp(0).reg();
5007 Register out_reg = locs()->out(0).reg();
5008
5009 compiler::Label done;
5010 __ SmiTag(out_reg, value_lo);
5011 __ cmp(value_lo, compiler::Operand(out_reg, ASR, kSmiTagSize));
5012 __ cmp(value_hi, compiler::Operand(out_reg, ASR, 31), EQ);
5013 __ b(&done, EQ);
5014
5015 if (compiler->intrinsic_mode()) {
5016 __ TryAllocate(compiler->mint_class(),
5017 compiler->intrinsic_slow_path_label(), out_reg, tmp);
5018 } else if (locs()->call_on_shared_slow_path()) {
5019 auto object_store = compiler->isolate()->object_store();
5020 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
5021 const auto& stub = Code::ZoneHandle(
5022 compiler->zone(),
5023 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
5024 : object_store->allocate_mint_without_fpu_regs_stub());
5025
5026 ASSERT(!locs()->live_registers()->ContainsRegister(
5027 AllocateMintABI::kResultReg));
5028 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
5029 compiler->GenerateStubCall(token_pos(), stub, PcDescriptorsLayout::kOther,
5030 locs(), DeoptId::kNone, extended_env);
5031 } else {
5032 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(),
5033 out_reg, tmp);
5034 }
5035
5036 __ StoreToOffset(kWord, value_lo, out_reg,
5037 compiler::target::Mint::value_offset() - kHeapObjectTag);
5038 __ StoreToOffset(kWord, value_hi, out_reg,
5039 compiler::target::Mint::value_offset() - kHeapObjectTag +
5040 compiler::target::kWordSize);
5041 __ Bind(&done);
5042}
5043
5044static void LoadInt32FromMint(FlowGraphCompiler* compiler,
5045 Register mint,
5046 Register result,
5047 Register temp,
5048 compiler::Label* deopt) {
5049 __ LoadFieldFromOffset(kWord, result, mint,
5050 compiler::target::Mint::value_offset());
5051 if (deopt != NULL) {
5052 __ LoadFieldFromOffset(
5053 kWord, temp, mint,
5054 compiler::target::Mint::value_offset() + compiler::target::kWordSize);
5055 __ cmp(temp,
5056 compiler::Operand(result, ASR, compiler::target::kBitsPerWord - 1));
5057 __ b(deopt, NE);
5058 }
5059}
5060
5061LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone,
5062 bool opt) const {
5063 ASSERT((representation() == kUnboxedInt32) ||
5064 (representation() == kUnboxedUint32));
5065 ASSERT((representation() != kUnboxedUint32) || is_truncating());
5066 const intptr_t kNumInputs = 1;
5067 const intptr_t kNumTemps = CanDeoptimize() ? 1 : 0;
5068 LocationSummary* summary = new (zone)
5069 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5070 summary->set_in(0, Location::RequiresRegister());
5071 if (kNumTemps > 0) {
5072 summary->set_temp(0, Location::RequiresRegister());
5073 }
5074 summary->set_out(0, Location::RequiresRegister());
5075 return summary;
5076}
5077
5078void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
5079 const intptr_t value_cid = value()->Type()->ToCid();
5080 const Register value = locs()->in(0).reg();
5081 const Register out = locs()->out(0).reg();
5082 const Register temp = CanDeoptimize() ? locs()->temp(0).reg() : kNoRegister;
5083 compiler::Label* deopt =
5084 CanDeoptimize()
5085 ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
5086 : NULL;
5087 compiler::Label* out_of_range = !is_truncating() ? deopt : NULL;
5088 ASSERT(value != out);
5089
5090 if (value_cid == kSmiCid) {
5091 __ SmiUntag(out, value);
5092 } else if (value_cid == kMintCid) {
5093 LoadInt32FromMint(compiler, value, out, temp, out_of_range);
5094 } else if (!CanDeoptimize()) {
5095 compiler::Label done;
5096 __ SmiUntag(out, value, &done);
5097 LoadInt32FromMint(compiler, value, out, kNoRegister, NULL);
5098 __ Bind(&done);
5099 } else {
5100 compiler::Label done;
5101 __ SmiUntag(out, value, &done);
5102 __ CompareClassId(value, kMintCid, temp);
5103 __ b(deopt, NE);
5104 LoadInt32FromMint(compiler, value, out, temp, out_of_range);
5105 __ Bind(&done);
5106 }
5107}
5108
5109LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
5110 bool opt) const {
5111 const intptr_t kNumInputs = 2;
5112 const intptr_t kNumTemps = 0;
5113 LocationSummary* summary = new (zone)
5114 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5115 summary->set_in(0, Location::RequiresFpuRegister());
5116 summary->set_in(1, Location::RequiresFpuRegister());
5117 summary->set_out(0, Location::RequiresFpuRegister());
5118 return summary;
5119}
5120
5121void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5122 const DRegister left = EvenDRegisterOf(locs()->in(0).fpu_reg());
5123 const DRegister right = EvenDRegisterOf(locs()->in(1).fpu_reg());
5124 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5125 switch (op_kind()) {
5126 case Token::kADD:
5127 __ vaddd(result, left, right);
5128 break;
5129 case Token::kSUB:
5130 __ vsubd(result, left, right);
5131 break;
5132 case Token::kMUL:
5133 __ vmuld(result, left, right);
5134 break;
5135 case Token::kDIV:
5136 __ vdivd(result, left, right);
5137 break;
5138 default:
5139 UNREACHABLE();
5140 }
5141}
5142
5143LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone,
5144 bool opt) const {
5145 const intptr_t kNumInputs = 1;
5146 const intptr_t kNumTemps =
5147 (op_kind() == MethodRecognizer::kDouble_getIsInfinite) ? 1 : 0;
5148 LocationSummary* summary = new (zone)
5149 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5150 summary->set_in(0, Location::RequiresFpuRegister());
5151 if (op_kind() == MethodRecognizer::kDouble_getIsInfinite) {
5152 summary->set_temp(0, Location::RequiresRegister());
5153 }
5154 summary->set_out(0, Location::RequiresRegister());
5155 return summary;
5156}
5157
5158Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
5159 BranchLabels labels) {
5160 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
5161 const bool is_negated = kind() != Token::kEQ;
5162 if (op_kind() == MethodRecognizer::kDouble_getIsNaN) {
5163 __ vcmpd(value, value);
5164 __ vmstat();
5165 return is_negated ? VC : VS;
5166 } else {
5167 ASSERT(op_kind() == MethodRecognizer::kDouble_getIsInfinite);
5168 const Register temp = locs()->temp(0).reg();
5169 compiler::Label done;
5170 // TMP <- value[0:31], result <- value[32:63]
5171 __ vmovrrd(TMP, temp, value);
5172 __ cmp(TMP, compiler::Operand(0));
5173 __ b(is_negated ? labels.true_label : labels.false_label, NE);
5174
5175 // Mask off the sign bit.
5176 __ AndImmediate(temp, temp, 0x7FFFFFFF);
5177 // Compare with +infinity.
5178 __ CompareImmediate(temp, 0x7FF00000);
5179 return is_negated ? NE : EQ;
5180 }
5181}
5182
5183// SIMD
5184
5185#define DEFINE_EMIT(Name, Args) \
5186 static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr, \
5187 PP_APPLY(PP_UNPACK, Args))
5188
5189DEFINE_EMIT(Simd32x4BinaryOp,
5190 (QRegister result, QRegister left, QRegister right)) {
5191 switch (instr->kind()) {
5192 case SimdOpInstr::kFloat32x4Add:
5193 __ vaddqs(result, left, right);
5194 break;
5195 case SimdOpInstr::kFloat32x4Sub:
5196 __ vsubqs(result, left, right);
5197 break;
5198 case SimdOpInstr::kFloat32x4Mul:
5199 __ vmulqs(result, left, right);
5200 break;
5201 case SimdOpInstr::kFloat32x4Div:
5202 __ Vdivqs(result, left, right);
5203 break;
5204 case SimdOpInstr::kFloat32x4Equal:
5205 __ vceqqs(result, left, right);
5206 break;
5207 case SimdOpInstr::kFloat32x4NotEqual:
5208 __ vceqqs(result, left, right);
5209 // Invert the result.
5210 __ vmvnq(result, result);
5211 break;
5212 case SimdOpInstr::kFloat32x4GreaterThan:
5213 __ vcgtqs(result, left, right);
5214 break;
5215 case SimdOpInstr::kFloat32x4GreaterThanOrEqual:
5216 __ vcgeqs(result, left, right);
5217 break;
5218 case SimdOpInstr::kFloat32x4LessThan:
5219 __ vcgtqs(result, right, left);
5220 break;
5221 case SimdOpInstr::kFloat32x4LessThanOrEqual:
5222 __ vcgeqs(result, right, left);
5223 break;
5224 case SimdOpInstr::kFloat32x4Min:
5225 __ vminqs(result, left, right);
5226 break;
5227 case SimdOpInstr::kFloat32x4Max:
5228 __ vmaxqs(result, left, right);
5229 break;
5230 case SimdOpInstr::kFloat32x4Scale:
5231 __ vcvtsd(STMP, EvenDRegisterOf(left));
5232 __ vdup(kWord, result, DTMP, 0);
5233 __ vmulqs(result, result, right);
5234 break;
5235 case SimdOpInstr::kInt32x4BitAnd:
5236 __ vandq(result, left, right);
5237 break;
5238 case SimdOpInstr::kInt32x4BitOr:
5239 __ vorrq(result, left, right);
5240 break;
5241 case SimdOpInstr::kInt32x4BitXor:
5242 __ veorq(result, left, right);
5243 break;
5244 case SimdOpInstr::kInt32x4Add:
5245 __ vaddqi(kWord, result, left, right);
5246 break;
5247 case SimdOpInstr::kInt32x4Sub:
5248 __ vsubqi(kWord, result, left, right);
5249 break;
5250 default:
5251 UNREACHABLE();
5252 }
5253}
5254
5255DEFINE_EMIT(Float64x2BinaryOp,
5256 (QRegisterView result, QRegisterView left, QRegisterView right)) {
5257 switch (instr->kind()) {
5258 case SimdOpInstr::kFloat64x2Add:
5259 __ vaddd(result.d(0), left.d(0), right.d(0));
5260 __ vaddd(result.d(1), left.d(1), right.d(1));
5261 break;
5262 case SimdOpInstr::kFloat64x2Sub:
5263 __ vsubd(result.d(0), left.d(0), right.d(0));
5264 __ vsubd(result.d(1), left.d(1), right.d(1));
5265 break;
5266 case SimdOpInstr::kFloat64x2Mul:
5267 __ vmuld(result.d(0), left.d(0), right.d(0));
5268 __ vmuld(result.d(1), left.d(1), right.d(1));
5269 break;
5270 case SimdOpInstr::kFloat64x2Div:
5271 __ vdivd(result.d(0), left.d(0), right.d(0));
5272 __ vdivd(result.d(1), left.d(1), right.d(1));
5273 break;
5274 default:
5275 UNREACHABLE();
5276 }
5277}
5278
5279// Low (< Q7) Q registers are needed for the vcvtds and vmovs instructions.
5280// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5281DEFINE_EMIT(Simd32x4Shuffle,
5282 (FixedQRegisterView<Q6> result, FixedQRegisterView<Q5> value)) {
5283 // For some cases the vdup instruction requires fewer
5284 // instructions. For arbitrary shuffles, use vtbl.
5285
5286 switch (instr->kind()) {
5287 case SimdOpInstr::kFloat32x4ShuffleX:
5288 __ vcvtds(result.d(0), value.s(0));
5289 break;
5290 case SimdOpInstr::kFloat32x4ShuffleY:
5291 __ vcvtds(result.d(0), value.s(1));
5292 break;
5293 case SimdOpInstr::kFloat32x4ShuffleZ:
5294 __ vcvtds(result.d(0), value.s(2));
5295 break;
5296 case SimdOpInstr::kFloat32x4ShuffleW:
5297 __ vcvtds(result.d(0), value.s(3));
5298 break;
5299 case SimdOpInstr::kInt32x4Shuffle:
5300 case SimdOpInstr::kFloat32x4Shuffle: {
5301 if (instr->mask() == 0x00) {
5302 __ vdup(kWord, result, value.d(0), 0);
5303 } else if (instr->mask() == 0x55) {
5304 __ vdup(kWord, result, value.d(0), 1);
5305 } else if (instr->mask() == 0xAA) {
5306 __ vdup(kWord, result, value.d(1), 0);
5307 } else if (instr->mask() == 0xFF) {
5308 __ vdup(kWord, result, value.d(1), 1);
5309 } else {
5310 // TODO(zra): Investigate better instruction sequences for other
5311 // shuffle masks.
5312 QRegisterView temp(QTMP);
5313
5314 __ vmovq(temp, value);
5315 for (intptr_t i = 0; i < 4; i++) {
5316 __ vmovs(result.s(i), temp.s((instr->mask() >> (2 * i)) & 0x3));
5317 }
5318 }
5319 break;
5320 }
5321 default:
5322 UNREACHABLE();
5323 }
5324}
5325
5326// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5327DEFINE_EMIT(Simd32x4ShuffleMix,
5328 (FixedQRegisterView<Q6> result,
5329 FixedQRegisterView<Q4> left,
5330 FixedQRegisterView<Q5> right)) {
5331 // TODO(zra): Investigate better instruction sequences for shuffle masks.
5332 __ vmovs(result.s(0), left.s((instr->mask() >> 0) & 0x3));
5333 __ vmovs(result.s(1), left.s((instr->mask() >> 2) & 0x3));
5334 __ vmovs(result.s(2), right.s((instr->mask() >> 4) & 0x3));
5335 __ vmovs(result.s(3), right.s((instr->mask() >> 6) & 0x3));
5336}
5337
5338// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5339DEFINE_EMIT(Simd32x4GetSignMask,
5340 (Register out, FixedQRegisterView<Q5> value, Temp<Register> temp)) {
5341 // X lane.
5342 __ vmovrs(out, value.s(0));
5343 __ Lsr(out, out, compiler::Operand(31));
5344 // Y lane.
5345 __ vmovrs(temp, value.s(1));
5346 __ Lsr(temp, temp, compiler::Operand(31));
5347 __ orr(out, out, compiler::Operand(temp, LSL, 1));
5348 // Z lane.
5349 __ vmovrs(temp, value.s(2));
5350 __ Lsr(temp, temp, compiler::Operand(31));
5351 __ orr(out, out, compiler::Operand(temp, LSL, 2));
5352 // W lane.
5353 __ vmovrs(temp, value.s(3));
5354 __ Lsr(temp, temp, compiler::Operand(31));
5355 __ orr(out, out, compiler::Operand(temp, LSL, 3));
5356}
5357
5358// Low (< 7) Q registers are needed for the vcvtsd instruction.
5359// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5360DEFINE_EMIT(Float32x4FromDoubles,
5361 (FixedQRegisterView<Q6> out,
5362 QRegisterView q0,
5363 QRegisterView q1,
5364 QRegisterView q2,
5365 QRegisterView q3)) {
5366 __ vcvtsd(out.s(0), q0.d(0));
5367 __ vcvtsd(out.s(1), q1.d(0));
5368 __ vcvtsd(out.s(2), q2.d(0));
5369 __ vcvtsd(out.s(3), q3.d(0));
5370}
5371
5372DEFINE_EMIT(Float32x4Zero, (QRegister out)) {
5373 __ veorq(out, out, out);
5374}
5375
5376DEFINE_EMIT(Float32x4Splat, (QRegister result, QRegisterView value)) {
5377 // Convert to Float32.
5378 __ vcvtsd(STMP, value.d(0));
5379
5380 // Splat across all lanes.
5381 __ vdup(kWord, result, DTMP, 0);
5382}
5383
5384DEFINE_EMIT(Float32x4Sqrt,
5385 (QRegister result, QRegister left, Temp<QRegister> temp)) {
5386 __ Vsqrtqs(result, left, temp);
5387}
5388
5389DEFINE_EMIT(Float32x4Unary, (QRegister result, QRegister left)) {
5390 switch (instr->kind()) {
5391 case SimdOpInstr::kFloat32x4Negate:
5392 __ vnegqs(result, left);
5393 break;
5394 case SimdOpInstr::kFloat32x4Abs:
5395 __ vabsqs(result, left);
5396 break;
5397 case SimdOpInstr::kFloat32x4Reciprocal:
5398 __ Vreciprocalqs(result, left);
5399 break;
5400 case SimdOpInstr::kFloat32x4ReciprocalSqrt:
5401 __ VreciprocalSqrtqs(result, left);
5402 break;
5403 default:
5404 UNREACHABLE();
5405 }
5406}
5407
5408DEFINE_EMIT(Simd32x4ToSimd32x4Convertion, (SameAsFirstInput, QRegister left)) {
5409 // TODO(dartbug.com/30949) these operations are essentially nop and should
5410 // not generate any code. They should be removed from the graph before
5411 // code generation.
5412}
5413
5414DEFINE_EMIT(
5415 Float32x4Clamp,
5416 (QRegister result, QRegister left, QRegister lower, QRegister upper)) {
5417 __ vminqs(result, left, upper);
5418 __ vmaxqs(result, result, lower);
5419}
5420
5421// Low (< 7) Q registers are needed for the vmovs instruction.
5422// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5423DEFINE_EMIT(Float32x4With,
5424 (FixedQRegisterView<Q6> result,
5425 QRegisterView replacement,
5426 QRegister value)) {
5427 __ vcvtsd(STMP, replacement.d(0));
5428 __ vmovq(result, value);
5429 switch (instr->kind()) {
5430 case SimdOpInstr::kFloat32x4WithX:
5431 __ vmovs(result.s(0), STMP);
5432 break;
5433 case SimdOpInstr::kFloat32x4WithY:
5434 __ vmovs(result.s(1), STMP);
5435 break;
5436 case SimdOpInstr::kFloat32x4WithZ:
5437 __ vmovs(result.s(2), STMP);
5438 break;
5439 case SimdOpInstr::kFloat32x4WithW:
5440 __ vmovs(result.s(3), STMP);
5441 break;
5442 default:
5443 UNREACHABLE();
5444 }
5445}
5446
5447DEFINE_EMIT(Simd64x2Shuffle, (QRegisterView result, QRegisterView value)) {
5448 switch (instr->kind()) {
5449 case SimdOpInstr::kFloat64x2GetX:
5450 __ vmovd(result.d(0), value.d(0));
5451 break;
5452 case SimdOpInstr::kFloat64x2GetY:
5453 __ vmovd(result.d(0), value.d(1));
5454 break;
5455 default:
5456 UNREACHABLE();
5457 }
5458}
5459
5460DEFINE_EMIT(Float64x2Zero, (QRegister q)) {
5461 __ veorq(q, q, q);
5462}
5463
5464DEFINE_EMIT(Float64x2Splat, (QRegisterView result, QRegisterView value)) {
5465 // Splat across all lanes.
5466 __ vmovd(result.d(0), value.d(0));
5467 __ vmovd(result.d(1), value.d(0));
5468}
5469
5470DEFINE_EMIT(Float64x2FromDoubles,
5471 (QRegisterView r, QRegisterView q0, QRegisterView q1)) {
5472 __ vmovd(r.d(0), q0.d(0));
5473 __ vmovd(r.d(1), q1.d(0));
5474}
5475
5476// Low (< 7) Q registers are needed for the vcvtsd instruction.
5477// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5478DEFINE_EMIT(Float64x2ToFloat32x4, (FixedQRegisterView<Q6> r, QRegisterView q)) {
5479 __ veorq(r, r, r);
5480 // Set X lane.
5481 __ vcvtsd(r.s(0), q.d(0));
5482 // Set Y lane.
5483 __ vcvtsd(r.s(1), q.d(1));
5484}
5485
5486// Low (< 7) Q registers are needed for the vcvtsd instruction.
5487// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5488DEFINE_EMIT(Float32x4ToFloat64x2, (FixedQRegisterView<Q6> r, QRegisterView q)) {
5489 // Set X.
5490 __ vcvtds(r.d(0), q.s(0));
5491 // Set Y.
5492 __ vcvtds(r.d(1), q.s(1));
5493}
5494
5495// Grabbing the S components means we need a low (< 7) Q.
5496// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5497DEFINE_EMIT(Float64x2GetSignMask,
5498 (Register out, FixedQRegisterView<Q6> value)) {
5499 // Upper 32-bits of X lane.
5500 __ vmovrs(out, value.s(1));
5501 __ Lsr(out, out, compiler::Operand(31));
5502 // Upper 32-bits of Y lane.
5503 __ vmovrs(TMP, value.s(3));
5504 __ Lsr(TMP, TMP, compiler::Operand(31));
5505 __ orr(out, out, compiler::Operand(TMP, LSL, 1));
5506}
5507
5508DEFINE_EMIT(Float64x2Unary, (QRegisterView result, QRegisterView value)) {
5509 switch (instr->kind()) {
5510 case SimdOpInstr::kFloat64x2Negate:
5511 __ vnegd(result.d(0), value.d(0));
5512 __ vnegd(result.d(1), value.d(1));
5513 break;
5514 case SimdOpInstr::kFloat64x2Abs:
5515 __ vabsd(result.d(0), value.d(0));
5516 __ vabsd(result.d(1), value.d(1));
5517 break;
5518 case SimdOpInstr::kFloat64x2Sqrt:
5519 __ vsqrtd(result.d(0), value.d(0));
5520 __ vsqrtd(result.d(1), value.d(1));
5521 break;
5522 default:
5523 UNREACHABLE();
5524 }
5525}
5526
5527DEFINE_EMIT(Float64x2Binary,
5528 (SameAsFirstInput, QRegisterView left, QRegisterView right)) {
5529 switch (instr->kind()) {
5530 case SimdOpInstr::kFloat64x2Scale:
5531 __ vmuld(left.d(0), left.d(0), right.d(0));
5532 __ vmuld(left.d(1), left.d(1), right.d(0));
5533 break;
5534 case SimdOpInstr::kFloat64x2WithX:
5535 __ vmovd(left.d(0), right.d(0));
5536 break;
5537 case SimdOpInstr::kFloat64x2WithY:
5538 __ vmovd(left.d(1), right.d(0));
5539 break;
5540 case SimdOpInstr::kFloat64x2Min: {
5541 // X lane.
5542 __ vcmpd(left.d(0), right.d(0));
5543 __ vmstat();
5544 __ vmovd(left.d(0), right.d(0), GE);
5545 // Y lane.
5546 __ vcmpd(left.d(1), right.d(1));
5547 __ vmstat();
5548 __ vmovd(left.d(1), right.d(1), GE);
5549 break;
5550 }
5551 case SimdOpInstr::kFloat64x2Max: {
5552 // X lane.
5553 __ vcmpd(left.d(0), right.d(0));
5554 __ vmstat();
5555 __ vmovd(left.d(0), right.d(0), LE);
5556 // Y lane.
5557 __ vcmpd(left.d(1), right.d(1));
5558 __ vmstat();
5559 __ vmovd(left.d(1), right.d(1), LE);
5560 break;
5561 }
5562 default:
5563 UNREACHABLE();
5564 }
5565}
5566
5567DEFINE_EMIT(Int32x4FromInts,
5568 (QRegisterView result,
5569 Register v0,
5570 Register v1,
5571 Register v2,
5572 Register v3)) {
5573 __ veorq(result, result, result);
5574 __ vmovdrr(result.d(0), v0, v1);
5575 __ vmovdrr(result.d(1), v2, v3);
5576}
5577
5578DEFINE_EMIT(Int32x4FromBools,
5579 (QRegisterView result,
5580 Register v0,
5581 Register v1,
5582 Register v2,
5583 Register v3,
5584 Temp<Register> temp)) {
5585 __ veorq(result, result, result);
5586 __ LoadImmediate(temp, 0xffffffff);
5587
5588 __ LoadObject(IP, Bool::True());
5589 __ cmp(v0, compiler::Operand(IP));
5590 __ vmovdr(result.d(0), 0, temp, EQ);
5591
5592 __ cmp(v1, compiler::Operand(IP));
5593 __ vmovdr(result.d(0), 1, temp, EQ);
5594
5595 __ cmp(v2, compiler::Operand(IP));
5596 __ vmovdr(result.d(1), 0, temp, EQ);
5597
5598 __ cmp(v3, compiler::Operand(IP));
5599 __ vmovdr(result.d(1), 1, temp, EQ);
5600}
5601
5602// Low (< 7) Q registers are needed for the vmovrs instruction.
5603// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5604DEFINE_EMIT(Int32x4GetFlag, (Register result, FixedQRegisterView<Q6> value)) {
5605 switch (instr->kind()) {
5606 case SimdOpInstr::kInt32x4GetFlagX:
5607 __ vmovrs(result, value.s(0));
5608 break;
5609 case SimdOpInstr::kInt32x4GetFlagY:
5610 __ vmovrs(result, value.s(1));
5611 break;
5612 case SimdOpInstr::kInt32x4GetFlagZ:
5613 __ vmovrs(result, value.s(2));
5614 break;
5615 case SimdOpInstr::kInt32x4GetFlagW:
5616 __ vmovrs(result, value.s(3));
5617 break;
5618 default:
5619 UNREACHABLE();
5620 }
5621
5622 __ tst(result, compiler::Operand(result));
5623 __ LoadObject(result, Bool::True(), NE);
5624 __ LoadObject(result, Bool::False(), EQ);
5625}
5626
5627DEFINE_EMIT(Int32x4Select,
5628 (QRegister out,
5629 QRegister mask,
5630 QRegister trueValue,
5631 QRegister falseValue,
5632 Temp<QRegister> temp)) {
5633 // Copy mask.
5634 __ vmovq(temp, mask);
5635 // Invert it.
5636 __ vmvnq(temp, temp);
5637 // mask = mask & trueValue.
5638 __ vandq(mask, mask, trueValue);
5639 // temp = temp & falseValue.
5640 __ vandq(temp, temp, falseValue);
5641 // out = mask | temp.
5642 __ vorrq(out, mask, temp);
5643}
5644
5645DEFINE_EMIT(Int32x4WithFlag,
5646 (QRegisterView result, QRegister mask, Register flag)) {
5647 __ vmovq(result, mask);
5648 __ CompareObject(flag, Bool::True());
5649 __ LoadImmediate(TMP, 0xffffffff, EQ);
5650 __ LoadImmediate(TMP, 0, NE);
5651 switch (instr->kind()) {
5652 case SimdOpInstr::kInt32x4WithFlagX:
5653 __ vmovdr(result.d(0), 0, TMP);
5654 break;
5655 case SimdOpInstr::kInt32x4WithFlagY:
5656 __ vmovdr(result.d(0), 1, TMP);
5657 break;
5658 case SimdOpInstr::kInt32x4WithFlagZ:
5659 __ vmovdr(result.d(1), 0, TMP);
5660 break;
5661 case SimdOpInstr::kInt32x4WithFlagW:
5662 __ vmovdr(result.d(1), 1, TMP);
5663 break;
5664 default:
5665 UNREACHABLE();
5666 }
5667}
5668
5669// Map SimdOpInstr::Kind-s to corresponding emit functions. Uses the following
5670// format:
5671//
5672// CASE(OpA) CASE(OpB) ____(Emitter) - Emitter is used to emit OpA and OpB.
5673// SIMPLE(OpA) - Emitter with name OpA is used to emit OpA.
5674//
5675#define SIMD_OP_VARIANTS(CASE, ____, SIMPLE) \
5676 CASE(Float32x4Add) \
5677 CASE(Float32x4Sub) \
5678 CASE(Float32x4Mul) \
5679 CASE(Float32x4Div) \
5680 CASE(Float32x4Equal) \
5681 CASE(Float32x4NotEqual) \
5682 CASE(Float32x4GreaterThan) \
5683 CASE(Float32x4GreaterThanOrEqual) \
5684 CASE(Float32x4LessThan) \
5685 CASE(Float32x4LessThanOrEqual) \
5686 CASE(Float32x4Min) \
5687 CASE(Float32x4Max) \
5688 CASE(Float32x4Scale) \
5689 CASE(Int32x4BitAnd) \
5690 CASE(Int32x4BitOr) \
5691 CASE(Int32x4BitXor) \
5692 CASE(Int32x4Add) \
5693 CASE(Int32x4Sub) \
5694 ____(Simd32x4BinaryOp) \
5695 CASE(Float64x2Add) \
5696 CASE(Float64x2Sub) \
5697 CASE(Float64x2Mul) \
5698 CASE(Float64x2Div) \
5699 ____(Float64x2BinaryOp) \
5700 CASE(Float32x4ShuffleX) \
5701 CASE(Float32x4ShuffleY) \
5702 CASE(Float32x4ShuffleZ) \
5703 CASE(Float32x4ShuffleW) \
5704 CASE(Int32x4Shuffle) \
5705 CASE(Float32x4Shuffle) \
5706 ____(Simd32x4Shuffle) \
5707 CASE(Float32x4ShuffleMix) \
5708 CASE(Int32x4ShuffleMix) \
5709 ____(Simd32x4ShuffleMix) \
5710 CASE(Float32x4GetSignMask) \
5711 CASE(Int32x4GetSignMask) \
5712 ____(Simd32x4GetSignMask) \
5713 SIMPLE(Float32x4FromDoubles) \
5714 SIMPLE(Float32x4Zero) \
5715 SIMPLE(Float32x4Splat) \
5716 SIMPLE(Float32x4Sqrt) \
5717 CASE(Float32x4Negate) \
5718 CASE(Float32x4Abs) \
5719 CASE(Float32x4Reciprocal) \
5720 CASE(Float32x4ReciprocalSqrt) \
5721 ____(Float32x4Unary) \
5722 CASE(Float32x4ToInt32x4) \
5723 CASE(Int32x4ToFloat32x4) \
5724 ____(Simd32x4ToSimd32x4Convertion) \
5725 SIMPLE(Float32x4Clamp) \
5726 CASE(Float32x4WithX) \
5727 CASE(Float32x4WithY) \
5728 CASE(Float32x4WithZ) \
5729 CASE(Float32x4WithW) \
5730 ____(Float32x4With) \
5731 CASE(Float64x2GetX) \
5732 CASE(Float64x2GetY) \
5733 ____(Simd64x2Shuffle) \
5734 SIMPLE(Float64x2Zero) \
5735 SIMPLE(Float64x2Splat) \
5736 SIMPLE(Float64x2FromDoubles) \
5737 SIMPLE(Float64x2ToFloat32x4) \
5738 SIMPLE(Float32x4ToFloat64x2) \
5739 SIMPLE(Float64x2GetSignMask) \
5740 CASE(Float64x2Negate) \
5741 CASE(Float64x2Abs) \
5742 CASE(Float64x2Sqrt) \
5743 ____(Float64x2Unary) \
5744 CASE(Float64x2Scale) \
5745 CASE(Float64x2WithX) \
5746 CASE(Float64x2WithY) \
5747 CASE(Float64x2Min) \
5748 CASE(Float64x2Max) \
5749 ____(Float64x2Binary) \
5750 SIMPLE(Int32x4FromInts) \
5751 SIMPLE(Int32x4FromBools) \
5752 CASE(Int32x4GetFlagX) \
5753 CASE(Int32x4GetFlagY) \
5754 CASE(Int32x4GetFlagZ) \
5755 CASE(Int32x4GetFlagW) \
5756 ____(Int32x4GetFlag) \
5757 SIMPLE(Int32x4Select) \
5758 CASE(Int32x4WithFlagX) \
5759 CASE(Int32x4WithFlagY) \
5760 CASE(Int32x4WithFlagZ) \
5761 CASE(Int32x4WithFlagW) \
5762 ____(Int32x4WithFlag)
5763
5764LocationSummary* SimdOpInstr::MakeLocationSummary(Zone* zone, bool opt) const {
5765 switch (kind()) {
5766#define CASE(Name) case k##Name:
5767#define EMIT(Name) \
5768 return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name);
5769#define SIMPLE(Name) CASE(Name) EMIT(Name)
5770 SIMD_OP_VARIANTS(CASE, EMIT, SIMPLE)
5771#undef CASE
5772#undef EMIT
5773#undef SIMPLE
5774 case kIllegalSimdOp:
5775 UNREACHABLE();
5776 break;
5777 }
5778 UNREACHABLE();
5779 return NULL;
5780}
5781
5782void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5783 switch (kind()) {
5784#define CASE(Name) case k##Name:
5785#define EMIT(Name) \
5786 InvokeEmitter(compiler, this, &Emit##Name); \
5787 break;
5788#define SIMPLE(Name) CASE(Name) EMIT(Name)
5789 SIMD_OP_VARIANTS(CASE, EMIT, SIMPLE)
5790#undef CASE
5791#undef EMIT
5792#undef SIMPLE
5793 case kIllegalSimdOp:
5794 UNREACHABLE();
5795 break;
5796 }
5797}
5798
5799#undef DEFINE_EMIT
5800
5801LocationSummary* MathUnaryInstr::MakeLocationSummary(Zone* zone,
5802 bool opt) const {
5803 ASSERT((kind() == MathUnaryInstr::kSqrt) ||
5804 (kind() == MathUnaryInstr::kDoubleSquare));
5805 const intptr_t kNumInputs = 1;
5806 const intptr_t kNumTemps = 0;
5807 LocationSummary* summary = new (zone)
5808 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5809 summary->set_in(0, Location::RequiresFpuRegister());
5810 summary->set_out(0, Location::RequiresFpuRegister());
5811 return summary;
5812}
5813
5814void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5815 if (kind() == MathUnaryInstr::kSqrt) {
5816 const DRegister val = EvenDRegisterOf(locs()->in(0).fpu_reg());
5817 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5818 __ vsqrtd(result, val);
5819 } else if (kind() == MathUnaryInstr::kDoubleSquare) {
5820 const DRegister val = EvenDRegisterOf(locs()->in(0).fpu_reg());
5821 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5822 __ vmuld(result, val, val);
5823 } else {
5824 UNREACHABLE();
5825 }
5826}
5827
5828LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
5829 Zone* zone,
5830 bool opt) const {
5831 const intptr_t kNumTemps = 0;
5832 LocationSummary* summary = new (zone)
5833 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
5834 summary->set_in(0, Location::RegisterLocation(R0));
5835 summary->set_in(1, Location::RegisterLocation(R1));
5836 summary->set_in(2, Location::RegisterLocation(R2));
5837 summary->set_in(3, Location::RegisterLocation(R3));
5838 summary->set_out(0, Location::RegisterLocation(R0));
5839 return summary;
5840}
5841
5842void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5843 // Call the function.
5844 __ CallRuntime(TargetFunction(), TargetFunction().argument_count());
5845}
5846
5847LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
5848 bool opt) const {
5849 if (result_cid() == kDoubleCid) {
5850 const intptr_t kNumInputs = 2;
5851 const intptr_t kNumTemps = 1;
5852 LocationSummary* summary = new (zone)
5853 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5854 summary->set_in(0, Location::RequiresFpuRegister());
5855 summary->set_in(1, Location::RequiresFpuRegister());
5856 // Reuse the left register so that code can be made shorter.
5857 summary->set_out(0, Location::SameAsFirstInput());
5858 summary->set_temp(0, Location::RequiresRegister());
5859 return summary;
5860 }
5861 ASSERT(result_cid() == kSmiCid);
5862 const intptr_t kNumInputs = 2;
5863 const intptr_t kNumTemps = 0;
5864 LocationSummary* summary = new (zone)
5865 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5866 summary->set_in(0, Location::RequiresRegister());
5867 summary->set_in(1, Location::RequiresRegister());
5868 // Reuse the left register so that code can be made shorter.
5869 summary->set_out(0, Location::SameAsFirstInput());
5870 return summary;
5871}
5872
5873void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5874 ASSERT((op_kind() == MethodRecognizer::kMathMin) ||
5875 (op_kind() == MethodRecognizer::kMathMax));
5876 const intptr_t is_min = (op_kind() == MethodRecognizer::kMathMin);
5877 if (result_cid() == kDoubleCid) {
5878 compiler::Label done, returns_nan, are_equal;
5879 const DRegister left = EvenDRegisterOf(locs()->in(0).fpu_reg());
5880 const DRegister right = EvenDRegisterOf(locs()->in(1).fpu_reg());
5881 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5882 const Register temp = locs()->temp(0).reg();
5883 __ vcmpd(left, right);
5884 __ vmstat();
5885 __ b(&returns_nan, VS);
5886 __ b(&are_equal, EQ);
5887 const Condition neg_double_condition =
5888 is_min ? TokenKindToDoubleCondition(Token::kGTE)
5889 : TokenKindToDoubleCondition(Token::kLTE);
5890 ASSERT(left == result);
5891 __ vmovd(result, right, neg_double_condition);
5892 __ b(&done);
5893
5894 __ Bind(&returns_nan);
5895 __ LoadDImmediate(result, NAN, temp);
5896 __ b(&done);
5897
5898 __ Bind(&are_equal);
5899 // Check for negative zero: -0.0 is equal 0.0 but min or max must return
5900 // -0.0 or 0.0 respectively.
5901 // Check for negative left value (get the sign bit):
5902 // - min -> left is negative ? left : right.
5903 // - max -> left is negative ? right : left
5904 // Check the sign bit.
5905 __ vmovrrd(IP, temp, left); // Sign bit is in bit 31 of temp.
5906 __ cmp(temp, compiler::Operand(0));
5907 if (is_min) {
5908 ASSERT(left == result);
5909 __ vmovd(result, right, GE);
5910 } else {
5911 __ vmovd(result, right, LT);
5912 ASSERT(left == result);
5913 }
5914 __ Bind(&done);
5915 return;
5916 }
5917
5918 ASSERT(result_cid() == kSmiCid);
5919 const Register left = locs()->in(0).reg();
5920 const Register right = locs()->in(1).reg();
5921 const Register result = locs()->out(0).reg();
5922 __ cmp(left, compiler::Operand(right));
5923 ASSERT(result == left);
5924 if (is_min) {
5925 __ mov(result, compiler::Operand(right), GT);
5926 } else {
5927 __ mov(result, compiler::Operand(right), LT);
5928 }
5929}
5930
5931LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone,
5932 bool opt) const {
5933 const intptr_t kNumInputs = 1;
5934 const intptr_t kNumTemps = 0;
5935 LocationSummary* summary = new (zone)
5936 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5937 summary->set_in(0, Location::RequiresRegister());
5938 // We make use of 3-operand instructions by not requiring result register
5939 // to be identical to first input register as on Intel.
5940 summary->set_out(0, Location::RequiresRegister());
5941 return summary;
5942}
5943
5944void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5945 const Register value = locs()->in(0).reg();
5946 const Register result = locs()->out(0).reg();
5947 switch (op_kind()) {
5948 case Token::kNEGATE: {
5949 compiler::Label* deopt =
5950 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
5951 __ rsbs(result, value, compiler::Operand(0));
5952 __ b(deopt, VS);
5953 break;
5954 }
5955 case Token::kBIT_NOT:
5956 __ mvn(result, compiler::Operand(value));
5957 // Remove inverted smi-tag.
5958 __ bic(result, result, compiler::Operand(kSmiTagMask));
5959 break;
5960 default:
5961 UNREACHABLE();
5962 }
5963}
5964
5965LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
5966 bool opt) const {
5967 const intptr_t kNumInputs = 1;
5968 const intptr_t kNumTemps = 0;
5969 LocationSummary* summary = new (zone)
5970 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5971 summary->set_in(0, Location::RequiresFpuRegister());
5972 summary->set_out(0, Location::RequiresFpuRegister());
5973 return summary;
5974}
5975
5976void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5977 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5978 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
5979 __ vnegd(result, value);
5980}
5981
5982LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
5983 bool opt) const {
5984 const intptr_t kNumInputs = 1;
5985 const intptr_t kNumTemps = 0;
5986 LocationSummary* result = new (zone)
5987 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5988 result->set_in(0, Location::RequiresRegister());
5989 result->set_out(0, Location::RequiresFpuRegister());
5990 return result;
5991}
5992
5993void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5994 const Register value = locs()->in(0).reg();
5995 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5996 __ vmovdr(DTMP, 0, value);
5997 __ vcvtdi(result, STMP);
5998}
5999
6000LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone,
6001 bool opt) const {
6002 const intptr_t kNumInputs = 1;
6003 const intptr_t kNumTemps = 0;
6004 LocationSummary* result = new (zone)
6005 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6006 result->set_in(0, Location::RequiresRegister());
6007 result->set_out(0, Location::RequiresFpuRegister());
6008 return result;
6009}
6010
6011void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6012 const Register value = locs()->in(0).reg();
6013 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
6014 __ SmiUntag(IP, value);
6015 __ vmovdr(DTMP, 0, IP);
6016 __ vcvtdi(result, STMP);
6017}
6018
6019LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
6020 bool opt) const {
6021 UNIMPLEMENTED();
6022 return NULL;
6023}
6024
6025void Int64ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6026 UNIMPLEMENTED();
6027}
6028
6029LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
6030 bool opt) const {
6031 const intptr_t kNumInputs = 1;
6032 const intptr_t kNumTemps = 0;
6033 LocationSummary* result = new (zone)
6034 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6035 result->set_in(0, Location::RegisterLocation(R1));
6036 result->set_out(0, Location::RegisterLocation(R0));
6037 return result;
6038}
6039
6040void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6041 const Register result = locs()->out(0).reg();
6042 const Register value_obj = locs()->in(0).reg();
6043 ASSERT(result == R0);
6044 ASSERT(result != value_obj);
6045 __ LoadDFromOffset(DTMP, value_obj,
6046 compiler::target::Double::value_offset() - kHeapObjectTag);
6047
6048 compiler::Label done, do_call;
6049 // First check for NaN. Checking for minint after the conversion doesn't work
6050 // on ARM because vcvtid gives 0 for NaN.
6051 __ vcmpd(DTMP, DTMP);
6052 __ vmstat();
6053 __ b(&do_call, VS);
6054
6055 __ vcvtid(STMP, DTMP);
6056 __ vmovrs(result, STMP);
6057 // Overflow is signaled with minint.
6058
6059 // Check for overflow and that it fits into Smi.
6060 __ CompareImmediate(result, 0xC0000000);
6061 __ SmiTag(result, PL);
6062 __ b(&done, PL);
6063
6064 __ Bind(&do_call);
6065 __ Push(value_obj);
6066 ASSERT(instance_call()->HasICData());
6067 const ICData& ic_data = *instance_call()->ic_data();
6068 ASSERT(ic_data.NumberOfChecksIs(1));
6069 const Function& target = Function::ZoneHandle(ic_data.GetTargetAt(0));
6070 const int kTypeArgsLen = 0;
6071 const int kNumberOfArguments = 1;
6072 constexpr int kSizeOfArguments = 1;
6073 const Array& kNoArgumentNames = Object::null_array();
6074 ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kSizeOfArguments,
6075 kNoArgumentNames);
6076 compiler->GenerateStaticCall(deopt_id(), instance_call()->token_pos(), target,
6077 args_info, locs(), ICData::Handle(),
6078 ICData::kStatic);
6079 __ Bind(&done);
6080}
6081
6082LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,
6083 bool opt) const {
6084 const intptr_t kNumInputs = 1;
6085 const intptr_t kNumTemps = 0;
6086 LocationSummary* result = new (zone)
6087 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6088 result->set_in(0, Location::RequiresFpuRegister());
6089 result->set_out(0, Location::RequiresRegister());
6090 return result;
6091}
6092
6093void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6094 compiler::Label* deopt =
6095 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi);
6096 const Register result = locs()->out(0).reg();
6097 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
6098 // First check for NaN. Checking for minint after the conversion doesn't work
6099 // on ARM because vcvtid gives 0 for NaN.
6100 __ vcmpd(value, value);
6101 __ vmstat();
6102 __ b(deopt, VS);
6103
6104 __ vcvtid(STMP, value);
6105 __ vmovrs(result, STMP);
6106 // Check for overflow and that it fits into Smi.
6107 __ CompareImmediate(result, 0xC0000000);
6108 __ b(deopt, MI);
6109 __ SmiTag(result);
6110}
6111
6112LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
6113 bool opt) const {
6114 UNIMPLEMENTED();
6115 return NULL;
6116}
6117
6118void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6119 UNIMPLEMENTED();
6120}
6121
6122LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
6123 bool opt) const {
6124 const intptr_t kNumInputs = 1;
6125 const intptr_t kNumTemps = 0;
6126 LocationSummary* result = new (zone)
6127 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6128 // Low (< Q7) Q registers are needed for the conversion instructions.
6129 result->set_in(0, Location::RequiresFpuRegister());
6130 result->set_out(0, Location::FpuRegisterLocation(Q6));
6131 return result;
6132}
6133
6134void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6135 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
6136 const SRegister result =
6137 EvenSRegisterOf(EvenDRegisterOf(locs()->out(0).fpu_reg()));
6138 __ vcvtsd(result, value);
6139}
6140
6141LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone,
6142 bool opt) const {
6143 const intptr_t kNumInputs = 1;
6144 const intptr_t kNumTemps = 0;
6145 LocationSummary* result = new (zone)
6146 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6147 // Low (< Q7) Q registers are needed for the conversion instructions.
6148 result->set_in(0, Location::FpuRegisterLocation(Q6));
6149 result->set_out(0, Location::RequiresFpuRegister());
6150 return result;
6151}
6152
6153void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6154 const SRegister value =
6155 EvenSRegisterOf(EvenDRegisterOf(locs()->in(0).fpu_reg()));
6156 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
6157 __ vcvtds(result, value);
6158}
6159
6160LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
6161 bool opt) const {
6162 ASSERT((InputCount() == 1) || (InputCount() == 2));
6163 const intptr_t kNumTemps =
6164 (TargetCPUFeatures::hardfp_supported())
6165 ? ((recognized_kind() == MethodRecognizer::kMathDoublePow) ? 1 : 0)
6166 : 4;
6167 LocationSummary* result = new (zone)
6168 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
6169 result->set_in(0, Location::FpuRegisterLocation(Q0));
6170 if (InputCount() == 2) {
6171 result->set_in(1, Location::FpuRegisterLocation(Q1));
6172 }
6173 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
6174 result->set_temp(0, Location::RegisterLocation(R2));
6175 if (!TargetCPUFeatures::hardfp_supported()) {
6176 result->set_temp(1, Location::RegisterLocation(R0));
6177 result->set_temp(2, Location::RegisterLocation(R1));
6178 result->set_temp(3, Location::RegisterLocation(R3));
6179 }
6180 } else if (!TargetCPUFeatures::hardfp_supported()) {
6181 result->set_temp(0, Location::RegisterLocation(R0));
6182 result->set_temp(1, Location::RegisterLocation(R1));
6183 result->set_temp(2, Location::RegisterLocation(R2));
6184 result->set_temp(3, Location::RegisterLocation(R3));
6185 }
6186 result->set_out(0, Location::FpuRegisterLocation(Q0));
6187 return result;
6188}
6189
6190// Pseudo code:
6191// if (exponent == 0.0) return 1.0;
6192// // Speed up simple cases.
6193// if (exponent == 1.0) return base;
6194// if (exponent == 2.0) return base * base;
6195// if (exponent == 3.0) return base * base * base;
6196// if (base == 1.0) return 1.0;
6197// if (base.isNaN || exponent.isNaN) {
6198// return double.NAN;
6199// }
6200// if (base != -Infinity && exponent == 0.5) {
6201// if (base == 0.0) return 0.0;
6202// return sqrt(value);
6203// }
6204// TODO(srdjan): Move into a stub?
6205static void InvokeDoublePow(FlowGraphCompiler* compiler,
6206 InvokeMathCFunctionInstr* instr) {
6207 ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow);
6208 const intptr_t kInputCount = 2;
6209 ASSERT(instr->InputCount() == kInputCount);
6210 LocationSummary* locs = instr->locs();
6211
6212 const DRegister base = EvenDRegisterOf(locs->in(0).fpu_reg());
6213 const DRegister exp = EvenDRegisterOf(locs->in(1).fpu_reg());
6214 const DRegister result = EvenDRegisterOf(locs->out(0).fpu_reg());
6215 const Register temp = locs->temp(0).reg();
6216 const DRegister saved_base = OddDRegisterOf(locs->in(0).fpu_reg());
6217 ASSERT((base == result) && (result != saved_base));
6218
6219 compiler::Label skip_call, try_sqrt, check_base, return_nan;
6220 __ vmovd(saved_base, base);
6221 __ LoadDImmediate(result, 1.0, temp);
6222 // exponent == 0.0 -> return 1.0;
6223 __ vcmpdz(exp);
6224 __ vmstat();
6225 __ b(&check_base, VS); // NaN -> check base.
6226 __ b(&skip_call, EQ); // exp is 0.0, result is 1.0.
6227
6228 // exponent == 1.0 ?
6229 __ vcmpd(exp, result);
6230 __ vmstat();
6231 compiler::Label return_base;
6232 __ b(&return_base, EQ);
6233
6234 // exponent == 2.0 ?
6235 __ LoadDImmediate(DTMP, 2.0, temp);
6236 __ vcmpd(exp, DTMP);
6237 __ vmstat();
6238 compiler::Label return_base_times_2;
6239 __ b(&return_base_times_2, EQ);
6240
6241 // exponent == 3.0 ?
6242 __ LoadDImmediate(DTMP, 3.0, temp);
6243 __ vcmpd(exp, DTMP);
6244 __ vmstat();
6245 __ b(&check_base, NE);
6246
6247 // base_times_3.
6248 __ vmuld(result, saved_base, saved_base);
6249 __ vmuld(result, result, saved_base);
6250 __ b(&skip_call);
6251
6252 __ Bind(&return_base);
6253 __ vmovd(result, saved_base);
6254 __ b(&skip_call);
6255
6256 __ Bind(&return_base_times_2);
6257 __ vmuld(result, saved_base, saved_base);
6258 __ b(&skip_call);
6259
6260 __ Bind(&check_base);
6261 // Note: 'exp' could be NaN.
6262 // base == 1.0 -> return 1.0;
6263 __ vcmpd(saved_base, result);
6264 __ vmstat();
6265 __ b(&return_nan, VS);
6266 __ b(&skip_call, EQ); // base is 1.0, result is 1.0.
6267
6268 __ vcmpd(saved_base, exp);
6269 __ vmstat();
6270 __ b(&try_sqrt, VC); // // Neither 'exp' nor 'base' is NaN.
6271
6272 __ Bind(&return_nan);
6273 __ LoadDImmediate(result, NAN, temp);
6274 __ b(&skip_call);
6275
6276 compiler::Label do_pow, return_zero;
6277 __ Bind(&try_sqrt);
6278
6279 // Before calling pow, check if we could use sqrt instead of pow.
6280 __ LoadDImmediate(result, kNegInfinity, temp);
6281
6282 // base == -Infinity -> call pow;
6283 __ vcmpd(saved_base, result);
6284 __ vmstat();
6285 __ b(&do_pow, EQ);
6286
6287 // exponent == 0.5 ?
6288 __ LoadDImmediate(result, 0.5, temp);
6289 __ vcmpd(exp, result);
6290 __ vmstat();
6291 __ b(&do_pow, NE);
6292
6293 // base == 0 -> return 0;
6294 __ vcmpdz(saved_base);
6295 __ vmstat();
6296 __ b(&return_zero, EQ);
6297
6298 __ vsqrtd(result, saved_base);
6299 __ b(&skip_call);
6300
6301 __ Bind(&return_zero);
6302 __ LoadDImmediate(result, 0.0, temp);
6303 __ b(&skip_call);
6304
6305 __ Bind(&do_pow);
6306 __ vmovd(base, saved_base); // Restore base.
6307
6308 // Args must be in D0 and D1, so move arg from Q1(== D3:D2) to D1.
6309 __ vmovd(D1, D2);
6310 if (TargetCPUFeatures::hardfp_supported()) {
6311 __ CallRuntime(instr->TargetFunction(), kInputCount);
6312 } else {
6313 // If the ABI is not "hardfp", then we have to move the double arguments
6314 // to the integer registers, and take the results from the integer
6315 // registers.
6316 __ vmovrrd(R0, R1, D0);
6317 __ vmovrrd(R2, R3, D1);
6318 __ CallRuntime(instr->TargetFunction(), kInputCount);
6319 __ vmovdrr(D0, R0, R1);
6320 __ vmovdrr(D1, R2, R3);
6321 }
6322 __ Bind(&skip_call);
6323}
6324
6325void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6326 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
6327 InvokeDoublePow(compiler, this);
6328 return;
6329 }
6330
6331 if (InputCount() == 2) {
6332 // Args must be in D0 and D1, so move arg from Q1(== D3:D2) to D1.
6333 __ vmovd(D1, D2);
6334 }
6335 if (TargetCPUFeatures::hardfp_supported()) {
6336 __ CallRuntime(TargetFunction(), InputCount());
6337 } else {
6338 // If the ABI is not "hardfp", then we have to move the double arguments
6339 // to the integer registers, and take the results from the integer
6340 // registers.
6341 __ vmovrrd(R0, R1, D0);
6342 __ vmovrrd(R2, R3, D1);
6343 __ CallRuntime(TargetFunction(), InputCount());
6344 __ vmovdrr(D0, R0, R1);
6345 __ vmovdrr(D1, R2, R3);
6346 }
6347}
6348
6349LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone,
6350 bool opt) const {
6351 // Only use this instruction in optimized code.
6352 ASSERT(opt);
6353 const intptr_t kNumInputs = 1;
6354 LocationSummary* summary =
6355 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
6356 if (representation() == kUnboxedDouble) {
6357 if (index() == 0) {
6358 summary->set_in(
6359 0, Location::Pair(Location::RequiresFpuRegister(), Location::Any()));
6360 } else {
6361 ASSERT(index() == 1);
6362 summary->set_in(
6363 0, Location::Pair(Location::Any(), Location::RequiresFpuRegister()));
6364 }
6365 summary->set_out(0, Location::RequiresFpuRegister());
6366 } else {
6367 ASSERT(representation() == kTagged);
6368 if (index() == 0) {
6369 summary->set_in(
6370 0, Location::Pair(Location::RequiresRegister(), Location::Any()));
6371 } else {
6372 ASSERT(index() == 1);
6373 summary->set_in(
6374 0, Location::Pair(Location::Any(), Location::RequiresRegister()));
6375 }
6376 summary->set_out(0, Location::RequiresRegister());
6377 }
6378 return summary;
6379}
6380
6381void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6382 ASSERT(locs()->in(0).IsPairLocation());
6383 PairLocation* pair = locs()->in(0).AsPairLocation();
6384 Location in_loc = pair->At(index());
6385 if (representation() == kUnboxedDouble) {
6386 const QRegister out = locs()->out(0).fpu_reg();
6387 const QRegister in = in_loc.fpu_reg();
6388 __ vmovq(out, in);
6389 } else {
6390 ASSERT(representation() == kTagged);
6391 const Register out = locs()->out(0).reg();
6392 const Register in = in_loc.reg();
6393 __ mov(out, compiler::Operand(in));
6394 }
6395}
6396
6397LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
6398 bool opt) const {
6399 const intptr_t kNumInputs = 2;
6400 const intptr_t kNumTemps = 2;
6401 LocationSummary* summary = new (zone)
6402 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6403 summary->set_in(0, Location::RequiresRegister());
6404 summary->set_in(1, Location::RequiresRegister());
6405 summary->set_temp(0, Location::RequiresRegister());
6406 // Request register that overlaps with S0..S31.
6407 summary->set_temp(1, Location::FpuRegisterLocation(Q0));
6408 // Output is a pair of registers.
6409 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6410 Location::RequiresRegister()));
6411 return summary;
6412}
6413
6414void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6415 ASSERT(CanDeoptimize());
6416 compiler::Label* deopt =
6417 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
6418
6419 ASSERT(TargetCPUFeatures::can_divide());
6420 const Register left = locs()->in(0).reg();
6421 const Register right = locs()->in(1).reg();
6422 ASSERT(locs()->out(0).IsPairLocation());
6423 PairLocation* pair = locs()->out(0).AsPairLocation();
6424 const Register result_div = pair->At(0).reg();
6425 const Register result_mod = pair->At(1).reg();
6426 if (RangeUtils::CanBeZero(divisor_range())) {
6427 // Handle divide by zero in runtime.
6428 __ cmp(right, compiler::Operand(0));
6429 __ b(deopt, EQ);
6430 }
6431 const Register temp = locs()->temp(0).reg();
6432 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
6433 __ SmiUntag(temp, left);
6434 __ SmiUntag(IP, right);
6435 __ IntegerDivide(result_div, temp, IP, dtemp, DTMP);
6436
6437 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
6438 // case we cannot tag the result.
6439 __ CompareImmediate(result_div, 0x40000000);
6440 __ b(deopt, EQ);
6441 __ SmiUntag(IP, right);
6442 // result_mod <- left - right * result_div.
6443 __ mls(result_mod, IP, result_div, temp);
6444 __ SmiTag(result_div);
6445 __ SmiTag(result_mod);
6446 // Correct MOD result:
6447 // res = left % right;
6448 // if (res < 0) {
6449 // if (right < 0) {
6450 // res = res - right;
6451 // } else {
6452 // res = res + right;
6453 // }
6454 // }
6455 compiler::Label done;
6456 __ cmp(result_mod, compiler::Operand(0));
6457 __ b(&done, GE);
6458 // Result is negative, adjust it.
6459 __ cmp(right, compiler::Operand(0));
6460 __ sub(result_mod, result_mod, compiler::Operand(right), LT);
6461 __ add(result_mod, result_mod, compiler::Operand(right), GE);
6462 __ Bind(&done);
6463}
6464
6465LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6466 comparison()->InitializeLocationSummary(zone, opt);
6467 // Branches don't produce a result.
6468 comparison()->locs()->set_out(0, Location::NoLocation());
6469 return comparison()->locs();
6470}
6471
6472void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6473 comparison()->EmitBranchCode(compiler, this);
6474}
6475
6476LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone,
6477 bool opt) const {
6478 const intptr_t kNumInputs = 1;
6479 const bool need_mask_temp = IsBitTest();
6480 const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
6481 LocationSummary* summary = new (zone)
6482 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6483 summary->set_in(0, Location::RequiresRegister());
6484 if (!IsNullCheck()) {
6485 summary->set_temp(0, Location::RequiresRegister());
6486 if (need_mask_temp) {
6487 summary->set_temp(1, Location::RequiresRegister());
6488 }
6489 }
6490 return summary;
6491}
6492
6493void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler,
6494 compiler::Label* deopt) {
6495 __ CompareObject(locs()->in(0).reg(), Object::null_object());
6496 ASSERT(IsDeoptIfNull() || IsDeoptIfNotNull());
6497 Condition cond = IsDeoptIfNull() ? EQ : NE;
6498 __ b(deopt, cond);
6499}
6500
6501void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler,
6502 intptr_t min,
6503 intptr_t max,
6504 intptr_t mask,
6505 compiler::Label* deopt) {
6506 Register biased_cid = locs()->temp(0).reg();
6507 __ AddImmediate(biased_cid, -min);
6508 __ CompareImmediate(biased_cid, max - min);
6509 __ b(deopt, HI);
6510
6511 Register bit_reg = locs()->temp(1).reg();
6512 __ LoadImmediate(bit_reg, 1);
6513 __ Lsl(bit_reg, bit_reg, biased_cid);
6514 __ TestImmediate(bit_reg, mask);
6515 __ b(deopt, EQ);
6516}
6517
6518int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler,
6519 int bias,
6520 intptr_t cid_start,
6521 intptr_t cid_end,
6522 bool is_last,
6523 compiler::Label* is_ok,
6524 compiler::Label* deopt,
6525 bool use_near_jump) {
6526 Register biased_cid = locs()->temp(0).reg();
6527 Condition no_match, match;
6528 if (cid_start == cid_end) {
6529 __ CompareImmediate(biased_cid, cid_start - bias);
6530 no_match = NE;
6531 match = EQ;
6532 } else {
6533 // For class ID ranges use a subtract followed by an unsigned
6534 // comparison to check both ends of the ranges with one comparison.
6535 __ AddImmediate(biased_cid, bias - cid_start);
6536 bias = cid_start;
6537 __ CompareImmediate(biased_cid, cid_end - cid_start);
6538 no_match = HI; // Unsigned higher.
6539 match = LS; // Unsigned lower or same.
6540 }
6541 if (is_last) {
6542 __ b(deopt, no_match);
6543 } else {
6544 __ b(is_ok, match);
6545 }
6546 return bias;
6547}
6548
6549LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone,
6550 bool opt) const {
6551 const intptr_t kNumInputs = 1;
6552 const intptr_t kNumTemps = 0;
6553 LocationSummary* summary = new (zone)
6554 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6555 summary->set_in(0, Location::RequiresRegister());
6556 return summary;
6557}
6558
6559void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6560 const Register value = locs()->in(0).reg();
6561 compiler::Label* deopt = compiler->AddDeoptStub(
6562 deopt_id(), ICData::kDeoptCheckSmi, licm_hoisted_ ? ICData::kHoisted : 0);
6563 __ BranchIfNotSmi(value, deopt);
6564}
6565
6566void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6567 Register value_reg = locs()->in(0).reg();
6568 // TODO(dartbug.com/30480): Consider passing `null` literal as an argument
6569 // in order to be able to allocate it on register.
6570 __ CompareObject(value_reg, Object::null_object());
6571
6572 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
6573 Code& stub = Code::ZoneHandle(
6574 compiler->zone(),
6575 NullErrorSlowPath::GetStub(compiler, exception_type(), live_fpu_regs));
6576 const bool using_shared_stub = locs()->call_on_shared_slow_path();
6577
6578 if (using_shared_stub && compiler->CanPcRelativeCall(stub)) {
6579 __ GenerateUnRelocatedPcRelativeCall(EQUAL);
6580 compiler->AddPcRelativeCallStubTarget(stub);
6581
6582 // We use the "extended" environment which has the locations updated to
6583 // reflect live registers being saved in the shared spilling stubs (see
6584 // the stub above).
6585 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
6586 compiler->EmitCallsiteMetadata(token_pos(), deopt_id(),
6587 PcDescriptorsLayout::kOther, locs(),
6588 extended_env);
6589 CheckNullInstr::AddMetadataForRuntimeCall(this, compiler);
6590 return;
6591 }
6592
6593 ThrowErrorSlowPathCode* slow_path =
6594 new NullErrorSlowPath(this, compiler->CurrentTryIndex());
6595 compiler->AddSlowPathCode(slow_path);
6596
6597 __ BranchIf(EQUAL, slow_path->entry_label());
6598}
6599
6600LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone,
6601 bool opt) const {
6602 const intptr_t kNumInputs = 1;
6603 const intptr_t kNumTemps = 0;
6604 LocationSummary* summary = new (zone)
6605 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6606 summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister()
6607 : Location::WritableRegister());
6608 return summary;
6609}
6610
6611void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6612 Register value = locs()->in(0).reg();
6613 compiler::Label* deopt =
6614 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass);
6615 if (cids_.IsSingleCid()) {
6616 __ CompareImmediate(value, compiler::target::ToRawSmi(cids_.cid_start));
6617 __ b(deopt, NE);
6618 } else {
6619 __ AddImmediate(value, -compiler::target::ToRawSmi(cids_.cid_start));
6620 __ CompareImmediate(value, compiler::target::ToRawSmi(cids_.Extent()));
6621 __ b(deopt, HI); // Unsigned higher.
6622 }
6623}
6624
6625LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone,
6626 bool opt) const {
6627 const intptr_t kNumInputs = 2;
6628 const intptr_t kNumTemps = 0;
6629 LocationSummary* locs = new (zone)
6630 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6631 locs->set_in(kLengthPos, LocationRegisterOrSmiConstant(length()));
6632 locs->set_in(kIndexPos, LocationRegisterOrSmiConstant(index()));
6633 return locs;
6634}
6635
6636void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6637 uint32_t flags = generalized_ ? ICData::kGeneralized : 0;
6638 flags |= licm_hoisted_ ? ICData::kHoisted : 0;
6639 compiler::Label* deopt =
6640 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags);
6641
6642 Location length_loc = locs()->in(kLengthPos);
6643 Location index_loc = locs()->in(kIndexPos);
6644
6645 if (length_loc.IsConstant() && index_loc.IsConstant()) {
6646#ifdef DEBUG
6647 const int32_t length = compiler::target::SmiValue(length_loc.constant());
6648 const int32_t index = compiler::target::SmiValue(index_loc.constant());
6649 ASSERT((length <= index) || (index < 0));
6650#endif
6651 // Unconditionally deoptimize for constant bounds checks because they
6652 // only occur only when index is out-of-bounds.
6653 __ b(deopt);
6654 return;
6655 }
6656
6657 const intptr_t index_cid = index()->Type()->ToCid();
6658 if (index_loc.IsConstant()) {
6659 const Register length = length_loc.reg();
6660 __ CompareImmediate(length,
6661 compiler::target::ToRawSmi(index_loc.constant()));
6662 __ b(deopt, LS);
6663 } else if (length_loc.IsConstant()) {
6664 const Register index = index_loc.reg();
6665 if (index_cid != kSmiCid) {
6666 __ BranchIfNotSmi(index, deopt);
6667 }
6668 if (compiler::target::SmiValue(length_loc.constant()) ==
6669 compiler::target::kSmiMax) {
6670 __ tst(index, compiler::Operand(index));
6671 __ b(deopt, MI);
6672 } else {
6673 __ CompareImmediate(index,
6674 compiler::target::ToRawSmi(length_loc.constant()));
6675 __ b(deopt, CS);
6676 }
6677 } else {
6678 const Register length = length_loc.reg();
6679 const Register index = index_loc.reg();
6680 if (index_cid != kSmiCid) {
6681 __ BranchIfNotSmi(index, deopt);
6682 }
6683 __ cmp(index, compiler::Operand(length));
6684 __ b(deopt, CS);
6685 }
6686}
6687
6688LocationSummary* BinaryInt64OpInstr::MakeLocationSummary(Zone* zone,
6689 bool opt) const {
6690 const intptr_t kNumInputs = 2;
6691 const intptr_t kNumTemps = (op_kind() == Token::kMUL) ? 1 : 0;
6692 LocationSummary* summary = new (zone)
6693 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6694 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
6695 Location::RequiresRegister()));
6696 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
6697 Location::RequiresRegister()));
6698 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6699 Location::RequiresRegister()));
6700 if (op_kind() == Token::kMUL) {
6701 summary->set_temp(0, Location::RequiresRegister());
6702 }
6703 return summary;
6704}
6705
6706void BinaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6707 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6708 Register left_lo = left_pair->At(0).reg();
6709 Register left_hi = left_pair->At(1).reg();
6710 PairLocation* right_pair = locs()->in(1).AsPairLocation();
6711 Register right_lo = right_pair->At(0).reg();
6712 Register right_hi = right_pair->At(1).reg();
6713 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6714 Register out_lo = out_pair->At(0).reg();
6715 Register out_hi = out_pair->At(1).reg();
6716 ASSERT(!can_overflow());
6717 ASSERT(!CanDeoptimize());
6718
6719 switch (op_kind()) {
6720 case Token::kBIT_AND: {
6721 __ and_(out_lo, left_lo, compiler::Operand(right_lo));
6722 __ and_(out_hi, left_hi, compiler::Operand(right_hi));
6723 break;
6724 }
6725 case Token::kBIT_OR: {
6726 __ orr(out_lo, left_lo, compiler::Operand(right_lo));
6727 __ orr(out_hi, left_hi, compiler::Operand(right_hi));
6728 break;
6729 }
6730 case Token::kBIT_XOR: {
6731 __ eor(out_lo, left_lo, compiler::Operand(right_lo));
6732 __ eor(out_hi, left_hi, compiler::Operand(right_hi));
6733 break;
6734 }
6735 case Token::kADD: {
6736 __ adds(out_lo, left_lo, compiler::Operand(right_lo));
6737 __ adcs(out_hi, left_hi, compiler::Operand(right_hi));
6738 break;
6739 }
6740 case Token::kSUB: {
6741 __ subs(out_lo, left_lo, compiler::Operand(right_lo));
6742 __ sbcs(out_hi, left_hi, compiler::Operand(right_hi));
6743 break;
6744 }
6745 case Token::kMUL: {
6746 // Compute 64-bit a * b as:
6747 // a_l * b_l + (a_h * b_l + a_l * b_h) << 32
6748 Register temp = locs()->temp(0).reg();
6749 __ mul(temp, left_lo, right_hi);
6750 __ mla(out_hi, left_hi, right_lo, temp);
6751 __ umull(out_lo, temp, left_lo, right_lo);
6752 __ add(out_hi, out_hi, compiler::Operand(temp));
6753 break;
6754 }
6755 default:
6756 UNREACHABLE();
6757 }
6758}
6759
6760static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
6761 Token::Kind op_kind,
6762 Register out_lo,
6763 Register out_hi,
6764 Register left_lo,
6765 Register left_hi,
6766 const Object& right) {
6767 const int64_t shift = Integer::Cast(right).AsInt64Value();
6768 ASSERT(shift >= 0);
6769
6770 switch (op_kind) {
6771 case Token::kSHR: {
6772 if (shift < 32) {
6773 __ Lsl(out_lo, left_hi, compiler::Operand(32 - shift));
6774 __ orr(out_lo, out_lo, compiler::Operand(left_lo, LSR, shift));
6775 __ Asr(out_hi, left_hi, compiler::Operand(shift));
6776 } else {
6777 if (shift == 32) {
6778 __ mov(out_lo, compiler::Operand(left_hi));
6779 } else if (shift < 64) {
6780 __ Asr(out_lo, left_hi, compiler::Operand(shift - 32));
6781 } else {
6782 __ Asr(out_lo, left_hi, compiler::Operand(31));
6783 }
6784 __ Asr(out_hi, left_hi, compiler::Operand(31));
6785 }
6786 break;
6787 }
6788 case Token::kSHL: {
6789 ASSERT(shift < 64);
6790 if (shift < 32) {
6791 __ Lsr(out_hi, left_lo, compiler::Operand(32 - shift));
6792 __ orr(out_hi, out_hi, compiler::Operand(left_hi, LSL, shift));
6793 __ Lsl(out_lo, left_lo, compiler::Operand(shift));
6794 } else {
6795 if (shift == 32) {
6796 __ mov(out_hi, compiler::Operand(left_lo));
6797 } else {
6798 __ Lsl(out_hi, left_lo, compiler::Operand(shift - 32));
6799 }
6800 __ mov(out_lo, compiler::Operand(0));
6801 }
6802 break;
6803 }
6804 default:
6805 UNREACHABLE();
6806 }
6807}
6808
6809static void EmitShiftInt64ByRegister(FlowGraphCompiler* compiler,
6810 Token::Kind op_kind,
6811 Register out_lo,
6812 Register out_hi,
6813 Register left_lo,
6814 Register left_hi,
6815 Register right) {
6816 switch (op_kind) {
6817 case Token::kSHR: {
6818 __ rsbs(IP, right, compiler::Operand(32));
6819 __ sub(IP, right, compiler::Operand(32), MI);
6820 __ mov(out_lo, compiler::Operand(left_hi, ASR, IP), MI);
6821 __ mov(out_lo, compiler::Operand(left_lo, LSR, right), PL);
6822 __ orr(out_lo, out_lo, compiler::Operand(left_hi, LSL, IP), PL);
6823 __ mov(out_hi, compiler::Operand(left_hi, ASR, right));
6824 break;
6825 }
6826 case Token::kSHL: {
6827 __ rsbs(IP, right, compiler::Operand(32));
6828 __ sub(IP, right, compiler::Operand(32), MI);
6829 __ mov(out_hi, compiler::Operand(left_lo, LSL, IP), MI);
6830 __ mov(out_hi, compiler::Operand(left_hi, LSL, right), PL);
6831 __ orr(out_hi, out_hi, compiler::Operand(left_lo, LSR, IP), PL);
6832 __ mov(out_lo, compiler::Operand(left_lo, LSL, right));
6833 break;
6834 }
6835 default:
6836 UNREACHABLE();
6837 }
6838}
6839
6840static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
6841 Token::Kind op_kind,
6842 Register out,
6843 Register left,
6844 const Object& right) {
6845 const int64_t shift = Integer::Cast(right).AsInt64Value();
6846 ASSERT(shift >= 0);
6847 if (shift >= 32) {
6848 __ LoadImmediate(out, 0);
6849 } else {
6850 switch (op_kind) {
6851 case Token::kSHR:
6852 __ Lsr(out, left, compiler::Operand(shift));
6853 break;
6854 case Token::kSHL:
6855 __ Lsl(out, left, compiler::Operand(shift));
6856 break;
6857 default:
6858 UNREACHABLE();
6859 }
6860 }
6861}
6862
6863static void EmitShiftUint32ByRegister(FlowGraphCompiler* compiler,
6864 Token::Kind op_kind,
6865 Register out,
6866 Register left,
6867 Register right) {
6868 switch (op_kind) {
6869 case Token::kSHR:
6870 __ Lsr(out, left, right);
6871 break;
6872 case Token::kSHL:
6873 __ Lsl(out, left, right);
6874 break;
6875 default:
6876 UNREACHABLE();
6877 }
6878}
6879
6880class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
6881 public:
6882 static const intptr_t kNumberOfArguments = 0;
6883
6884 ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction, intptr_t try_index)
6885 : ThrowErrorSlowPathCode(instruction,
6886 kArgumentErrorUnboxedInt64RuntimeEntry,
6887 kNumberOfArguments,
6888 try_index) {}
6889
6890 const char* name() override { return "int64 shift"; }
6891
6892 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
6893 PairLocation* left_pair = instruction()->locs()->in(0).AsPairLocation();
6894 Register left_hi = left_pair->At(1).reg();
6895 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
6896 Register right_lo = right_pair->At(0).reg();
6897 Register right_hi = right_pair->At(1).reg();
6898 PairLocation* out_pair = instruction()->locs()->out(0).AsPairLocation();
6899 Register out_lo = out_pair->At(0).reg();
6900 Register out_hi = out_pair->At(1).reg();
6901
6902 __ CompareImmediate(right_hi, 0);
6903
6904 switch (instruction()->AsShiftInt64Op()->op_kind()) {
6905 case Token::kSHR:
6906 __ Asr(out_hi, left_hi,
6907 compiler::Operand(compiler::target::kBitsPerWord - 1), GE);
6908 __ mov(out_lo, compiler::Operand(out_hi), GE);
6909 break;
6910 case Token::kSHL: {
6911 __ LoadImmediate(out_lo, 0, GE);
6912 __ LoadImmediate(out_hi, 0, GE);
6913 break;
6914 }
6915 default:
6916 UNREACHABLE();
6917 }
6918
6919 __ b(exit_label(), GE);
6920
6921 // Can't pass unboxed int64 value directly to runtime call, as all
6922 // arguments are expected to be tagged (boxed).
6923 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6924 // TODO(dartbug.com/33549): Clean this up when unboxed values
6925 // could be passed as arguments.
6926 __ StoreToOffset(
6927 kWord, right_lo, THR,
6928 compiler::target::Thread::unboxed_int64_runtime_arg_offset());
6929 __ StoreToOffset(
6930 kWord, right_hi, THR,
6931 compiler::target::Thread::unboxed_int64_runtime_arg_offset() +
6932 compiler::target::kWordSize);
6933 }
6934};
6935
6936LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
6937 bool opt) const {
6938 const intptr_t kNumInputs = 2;
6939 const intptr_t kNumTemps = 0;
6940 LocationSummary* summary = new (zone) LocationSummary(
6941 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
6942 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
6943 Location::RequiresRegister()));
6944 if (RangeUtils::IsPositive(shift_range()) &&
6945 right()->definition()->IsConstant()) {
6946 ConstantInstr* constant = right()->definition()->AsConstant();
6947 summary->set_in(1, Location::Constant(constant));
6948 } else {
6949 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
6950 Location::RequiresRegister()));
6951 }
6952 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6953 Location::RequiresRegister()));
6954 return summary;
6955}
6956
6957void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6958 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6959 Register left_lo = left_pair->At(0).reg();
6960 Register left_hi = left_pair->At(1).reg();
6961 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6962 Register out_lo = out_pair->At(0).reg();
6963 Register out_hi = out_pair->At(1).reg();
6964 ASSERT(!can_overflow());
6965
6966 if (locs()->in(1).IsConstant()) {
6967 EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
6968 left_hi, locs()->in(1).constant());
6969 } else {
6970 // Code for a variable shift amount (or constant that throws).
6971 PairLocation* right_pair = locs()->in(1).AsPairLocation();
6972 Register right_lo = right_pair->At(0).reg();
6973 Register right_hi = right_pair->At(1).reg();
6974
6975 // Jump to a slow path if shift is larger than 63 or less than 0.
6976 ShiftInt64OpSlowPath* slow_path = NULL;
6977 if (!IsShiftCountInRange()) {
6978 slow_path =
6979 new (Z) ShiftInt64OpSlowPath(this, compiler->CurrentTryIndex());
6980 compiler->AddSlowPathCode(slow_path);
6981 __ CompareImmediate(right_hi, 0);
6982 __ b(slow_path->entry_label(), NE);
6983 __ CompareImmediate(right_lo, kShiftCountLimit);
6984 __ b(slow_path->entry_label(), HI);
6985 }
6986
6987 EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
6988 left_hi, right_lo);
6989
6990 if (slow_path != NULL) {
6991 __ Bind(slow_path->exit_label());
6992 }
6993 }
6994}
6995
6996LocationSummary* SpeculativeShiftInt64OpInstr::MakeLocationSummary(
6997 Zone* zone,
6998 bool opt) const {
6999 const intptr_t kNumInputs = 2;
7000 const intptr_t kNumTemps = 0;
7001 LocationSummary* summary = new (zone)
7002 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7003 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
7004 Location::RequiresRegister()));
7005 summary->set_in(1, LocationWritableRegisterOrSmiConstant(right()));
7006 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
7007 Location::RequiresRegister()));
7008 return summary;
7009}
7010
7011void SpeculativeShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7012 PairLocation* left_pair = locs()->in(0).AsPairLocation();
7013 Register left_lo = left_pair->At(0).reg();
7014 Register left_hi = left_pair->At(1).reg();
7015 PairLocation* out_pair = locs()->out(0).AsPairLocation();
7016 Register out_lo = out_pair->At(0).reg();
7017 Register out_hi = out_pair->At(1).reg();
7018 ASSERT(!can_overflow());
7019
7020 if (locs()->in(1).IsConstant()) {
7021 EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
7022 left_hi, locs()->in(1).constant());
7023 } else {
7024 // Code for a variable shift amount.
7025 Register shift = locs()->in(1).reg();
7026 __ SmiUntag(shift);
7027
7028 // Deopt if shift is larger than 63 or less than 0 (or not a smi).
7029 if (!IsShiftCountInRange()) {
7030 ASSERT(CanDeoptimize());
7031 compiler::Label* deopt =
7032 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
7033
7034 __ CompareImmediate(shift, kShiftCountLimit);
7035 __ b(deopt, HI);
7036 }
7037
7038 EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
7039 left_hi, shift);
7040 }
7041}
7042
7043class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
7044 public:
7045 static const intptr_t kNumberOfArguments = 0;
7046
7047 ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction, intptr_t try_index)
7048 : ThrowErrorSlowPathCode(instruction,
7049 kArgumentErrorUnboxedInt64RuntimeEntry,
7050 kNumberOfArguments,
7051 try_index) {}
7052
7053 const char* name() override { return "uint32 shift"; }
7054
7055 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
7056 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
7057 Register right_lo = right_pair->At(0).reg();
7058 Register right_hi = right_pair->At(1).reg();
7059 Register out = instruction()->locs()->out(0).reg();
7060
7061 __ CompareImmediate(right_hi, 0);
7062 __ LoadImmediate(out, 0, GE);
7063 __ b(exit_label(), GE);
7064
7065 // Can't pass unboxed int64 value directly to runtime call, as all
7066 // arguments are expected to be tagged (boxed).
7067 // The unboxed int64 argument is passed through a dedicated slot in Thread.
7068 // TODO(dartbug.com/33549): Clean this up when unboxed values
7069 // could be passed as arguments.
7070 __ StoreToOffset(
7071 kWord, right_lo, THR,
7072 compiler::target::Thread::unboxed_int64_runtime_arg_offset());
7073 __ StoreToOffset(
7074 kWord, right_hi, THR,
7075 compiler::target::Thread::unboxed_int64_runtime_arg_offset() +
7076 compiler::target::kWordSize);
7077 }
7078};
7079
7080LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
7081 bool opt) const {
7082 const intptr_t kNumInputs = 2;
7083 const intptr_t kNumTemps = 0;
7084 LocationSummary* summary = new (zone) LocationSummary(
7085 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
7086 summary->set_in(0, Location::RequiresRegister());
7087 if (RangeUtils::IsPositive(shift_range()) &&
7088 right()->definition()->IsConstant()) {
7089 ConstantInstr* constant = right()->definition()->AsConstant();
7090 summary->set_in(1, Location::Constant(constant));
7091 } else {
7092 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
7093 Location::RequiresRegister()));
7094 }
7095 summary->set_out(0, Location::RequiresRegister());
7096 return summary;
7097}
7098
7099void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7100 Register left = locs()->in(0).reg();
7101 Register out = locs()->out(0).reg();
7102
7103 ASSERT(left != out);
7104
7105 if (locs()->in(1).IsConstant()) {
7106 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
7107 locs()->in(1).constant());
7108 } else {
7109 // Code for a variable shift amount (or constant that throws).
7110 PairLocation* right_pair = locs()->in(1).AsPairLocation();
7111 Register right_lo = right_pair->At(0).reg();
7112 Register right_hi = right_pair->At(1).reg();
7113
7114 // Jump to a slow path if shift count is > 31 or negative.
7115 ShiftUint32OpSlowPath* slow_path = NULL;
7116 if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
7117 slow_path =
7118 new (Z) ShiftUint32OpSlowPath(this, compiler->CurrentTryIndex());
7119 compiler->AddSlowPathCode(slow_path);
7120
7121 __ CompareImmediate(right_hi, 0);
7122 __ b(slow_path->entry_label(), NE);
7123 __ CompareImmediate(right_lo, kUint32ShiftCountLimit);
7124 __ b(slow_path->entry_label(), HI);
7125 }
7126
7127 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right_lo);
7128
7129 if (slow_path != NULL) {
7130 __ Bind(slow_path->exit_label());
7131 }
7132 }
7133}
7134
7135LocationSummary* SpeculativeShiftUint32OpInstr::MakeLocationSummary(
7136 Zone* zone,
7137 bool opt) const {
7138 const intptr_t kNumInputs = 2;
7139 const intptr_t kNumTemps = 1;
7140 LocationSummary* summary = new (zone)
7141 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7142 summary->set_in(0, Location::RequiresRegister());
7143 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
7144 summary->set_temp(0, Location::RequiresRegister());
7145 summary->set_out(0, Location::RequiresRegister());
7146 return summary;
7147}
7148
7149void SpeculativeShiftUint32OpInstr::EmitNativeCode(
7150 FlowGraphCompiler* compiler) {
7151 Register left = locs()->in(0).reg();
7152 Register out = locs()->out(0).reg();
7153 Register temp = locs()->temp(0).reg();
7154 ASSERT(left != out);
7155
7156 if (locs()->in(1).IsConstant()) {
7157 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
7158 locs()->in(1).constant());
7159 } else {
7160 Register right = locs()->in(1).reg();
7161 const bool shift_count_in_range =
7162 IsShiftCountInRange(kUint32ShiftCountLimit);
7163
7164 __ SmiUntag(temp, right);
7165 right = temp;
7166
7167 // Deopt if shift count is negative.
7168 if (!shift_count_in_range) {
7169 ASSERT(CanDeoptimize());
7170 compiler::Label* deopt =
7171 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
7172
7173 __ CompareImmediate(right, 0);
7174 __ b(deopt, LT);
7175 }
7176
7177 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
7178
7179 if (!shift_count_in_range) {
7180 __ CompareImmediate(right, kUint32ShiftCountLimit);
7181 __ LoadImmediate(out, 0, HI);
7182 }
7183 }
7184}
7185
7186LocationSummary* UnaryInt64OpInstr::MakeLocationSummary(Zone* zone,
7187 bool opt) const {
7188 const intptr_t kNumInputs = 1;
7189 const intptr_t kNumTemps = 0;
7190 LocationSummary* summary = new (zone)
7191 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7192 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
7193 Location::RequiresRegister()));
7194 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
7195 Location::RequiresRegister()));
7196 return summary;
7197}
7198
7199void UnaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7200 PairLocation* left_pair = locs()->in(0).AsPairLocation();
7201 Register left_lo = left_pair->At(0).reg();
7202 Register left_hi = left_pair->At(1).reg();
7203
7204 PairLocation* out_pair = locs()->out(0).AsPairLocation();
7205 Register out_lo = out_pair->At(0).reg();
7206 Register out_hi = out_pair->At(1).reg();
7207
7208 switch (op_kind()) {
7209 case Token::kBIT_NOT:
7210 __ mvn(out_lo, compiler::Operand(left_lo));
7211 __ mvn(out_hi, compiler::Operand(left_hi));
7212 break;
7213 case Token::kNEGATE:
7214 __ rsbs(out_lo, left_lo, compiler::Operand(0));
7215 __ sbc(out_hi, out_hi, compiler::Operand(out_hi));
7216 __ sub(out_hi, out_hi, compiler::Operand(left_hi));
7217 break;
7218 default:
7219 UNREACHABLE();
7220 }
7221}
7222
7223LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone,
7224 bool opt) const {
7225 const intptr_t kNumInputs = 2;
7226 const intptr_t kNumTemps = 0;
7227 LocationSummary* summary = new (zone)
7228 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7229 summary->set_in(0, Location::RequiresRegister());
7230 summary->set_in(1, Location::RequiresRegister());
7231 summary->set_out(0, Location::RequiresRegister());
7232 return summary;
7233}
7234
7235void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7236 Register left = locs()->in(0).reg();
7237 Register right = locs()->in(1).reg();
7238 Register out = locs()->out(0).reg();
7239 ASSERT(out != left);
7240 switch (op_kind()) {
7241 case Token::kBIT_AND:
7242 __ and_(out, left, compiler::Operand(right));
7243 break;
7244 case Token::kBIT_OR:
7245 __ orr(out, left, compiler::Operand(right));
7246 break;
7247 case Token::kBIT_XOR:
7248 __ eor(out, left, compiler::Operand(right));
7249 break;
7250 case Token::kADD:
7251 __ add(out, left, compiler::Operand(right));
7252 break;
7253 case Token::kSUB:
7254 __ sub(out, left, compiler::Operand(right));
7255 break;
7256 case Token::kMUL:
7257 __ mul(out, left, right);
7258 break;
7259 default:
7260 UNREACHABLE();
7261 }
7262}
7263
7264LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,
7265 bool opt) const {
7266 const intptr_t kNumInputs = 1;
7267 const intptr_t kNumTemps = 0;
7268 LocationSummary* summary = new (zone)
7269 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7270 summary->set_in(0, Location::RequiresRegister());
7271 summary->set_out(0, Location::RequiresRegister());
7272 return summary;
7273}
7274
7275void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7276 Register left = locs()->in(0).reg();
7277 Register out = locs()->out(0).reg();
7278 ASSERT(left != out);
7279
7280 ASSERT(op_kind() == Token::kBIT_NOT);
7281
7282 __ mvn(out, compiler::Operand(left));
7283}
7284
7285LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
7286 bool opt) const {
7287 const intptr_t kNumInputs = 1;
7288 const intptr_t kNumTemps = 0;
7289 LocationSummary* summary = new (zone)
7290 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7291 if (from() == kUntagged || to() == kUntagged) {
7292 ASSERT((from() == kUntagged && to() == kUnboxedInt32) ||
7293 (from() == kUntagged && to() == kUnboxedUint32) ||
7294 (from() == kUnboxedInt32 && to() == kUntagged) ||
7295 (from() == kUnboxedUint32 && to() == kUntagged));
7296 ASSERT(!CanDeoptimize());
7297 summary->set_in(0, Location::RequiresRegister());
7298 summary->set_out(0, Location::SameAsFirstInput());
7299 } else if (from() == kUnboxedInt64) {
7300 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
7301 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
7302 Location::RequiresRegister()));
7303 summary->set_out(0, Location::RequiresRegister());
7304 } else if (to() == kUnboxedInt64) {
7305 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
7306 summary->set_in(0, Location::RequiresRegister());
7307 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
7308 Location::RequiresRegister()));
7309 } else {
7310 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
7311 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
7312 summary->set_in(0, Location::RequiresRegister());
7313 summary->set_out(0, Location::SameAsFirstInput());
7314 }
7315 return summary;
7316}
7317
7318void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7319 const bool is_nop_conversion =
7320 (from() == kUntagged && to() == kUnboxedInt32) ||
7321 (from() == kUntagged && to() == kUnboxedUint32) ||
7322 (from() == kUnboxedInt32 && to() == kUntagged) ||
7323 (from() == kUnboxedUint32 && to() == kUntagged);
7324 if (is_nop_conversion) {
7325 ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
7326 return;
7327 }
7328
7329 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
7330 const Register out = locs()->out(0).reg();
7331 // Representations are bitwise equivalent.
7332 ASSERT(out == locs()->in(0).reg());
7333 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
7334 const Register out = locs()->out(0).reg();
7335 // Representations are bitwise equivalent.
7336 ASSERT(out == locs()->in(0).reg());
7337 if (CanDeoptimize()) {
7338 compiler::Label* deopt =
7339 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
7340 __ tst(out, compiler::Operand(out));
7341 __ b(deopt, MI);
7342 }
7343 } else if (from() == kUnboxedInt64) {
7344 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
7345 PairLocation* in_pair = locs()->in(0).AsPairLocation();
7346 Register in_lo = in_pair->At(0).reg();
7347 Register in_hi = in_pair->At(1).reg();
7348 Register out = locs()->out(0).reg();
7349 // Copy low word.
7350 __ mov(out, compiler::Operand(in_lo));
7351 if (CanDeoptimize()) {
7352 compiler::Label* deopt =
7353 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
7354 ASSERT(to() == kUnboxedInt32);
7355 __ cmp(in_hi,
7356 compiler::Operand(in_lo, ASR, compiler::target::kBitsPerWord - 1));
7357 __ b(deopt, NE);
7358 }
7359 } else if (from() == kUnboxedUint32 || from() == kUnboxedInt32) {
7360 ASSERT(to() == kUnboxedInt64);
7361 Register in = locs()->in(0).reg();
7362 PairLocation* out_pair = locs()->out(0).AsPairLocation();
7363 Register out_lo = out_pair->At(0).reg();
7364 Register out_hi = out_pair->At(1).reg();
7365 // Copy low word.
7366 __ mov(out_lo, compiler::Operand(in));
7367 if (from() == kUnboxedUint32) {
7368 __ eor(out_hi, out_hi, compiler::Operand(out_hi));
7369 } else {
7370 ASSERT(from() == kUnboxedInt32);
7371 __ mov(out_hi,
7372 compiler::Operand(in, ASR, compiler::target::kBitsPerWord - 1));
7373 }
7374 } else {
7375 UNREACHABLE();
7376 }
7377}
7378
7379LocationSummary* BitCastInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7380 LocationSummary* summary =
7381 new (zone) LocationSummary(zone, /*num_inputs=*/InputCount(),
7382 /*num_temps=*/0, LocationSummary::kNoCall);
7383 switch (from()) {
7384 case kUnboxedInt32:
7385 summary->set_in(0, Location::RequiresRegister());
7386 break;
7387 case kUnboxedInt64:
7388 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
7389 Location::RequiresRegister()));
7390 break;
7391 case kUnboxedFloat:
7392 case kUnboxedDouble:
7393 // Choose an FPU register with corresponding D and S registers.
7394 summary->set_in(0, Location::FpuRegisterLocation(Q0));
7395 break;
7396 default:
7397 UNREACHABLE();
7398 }
7399
7400 switch (to()) {
7401 case kUnboxedInt32:
7402 summary->set_out(0, Location::RequiresRegister());
7403 break;
7404 case kUnboxedInt64:
7405 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
7406 Location::RequiresRegister()));
7407 break;
7408 case kUnboxedFloat:
7409 case kUnboxedDouble:
7410 // Choose an FPU register with corresponding D and S registers.
7411 summary->set_out(0, Location::FpuRegisterLocation(Q0));
7412 break;
7413 default:
7414 UNREACHABLE();
7415 }
7416 return summary;
7417}
7418
7419void BitCastInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7420 switch (from()) {
7421 case kUnboxedInt32: {
7422 ASSERT(to() == kUnboxedFloat);
7423 const Register from_reg = locs()->in(0).reg();
7424 const FpuRegister to_reg = locs()->out(0).fpu_reg();
7425 __ vmovsr(EvenSRegisterOf(EvenDRegisterOf(to_reg)), from_reg);
7426 break;
7427 }
7428 case kUnboxedFloat: {
7429 ASSERT(to() == kUnboxedInt32);
7430 const FpuRegister from_reg = locs()->in(0).fpu_reg();
7431 const Register to_reg = locs()->out(0).reg();
7432 __ vmovrs(to_reg, EvenSRegisterOf(EvenDRegisterOf(from_reg)));
7433 break;
7434 }
7435 case kUnboxedInt64: {
7436 ASSERT(to() == kUnboxedDouble);
7437 const Register from_lo = locs()->in(0).AsPairLocation()->At(0).reg();
7438 const Register from_hi = locs()->in(0).AsPairLocation()->At(1).reg();
7439 const FpuRegister to_reg = locs()->out(0).fpu_reg();
7440 __ vmovsr(EvenSRegisterOf(EvenDRegisterOf(to_reg)), from_lo);
7441 __ vmovsr(OddSRegisterOf(EvenDRegisterOf(to_reg)), from_hi);
7442 break;
7443 }
7444 case kUnboxedDouble: {
7445 ASSERT(to() == kUnboxedInt64);
7446 const FpuRegister from_reg = locs()->in(0).fpu_reg();
7447 const Register to_lo = locs()->out(0).AsPairLocation()->At(0).reg();
7448 const Register to_hi = locs()->out(0).AsPairLocation()->At(1).reg();
7449 __ vmovrs(to_lo, EvenSRegisterOf(EvenDRegisterOf(from_reg)));
7450 __ vmovrs(to_hi, OddSRegisterOf(EvenDRegisterOf(from_reg)));
7451 break;
7452 }
7453 default:
7454 UNREACHABLE();
7455 }
7456}
7457
7458LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7459 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
7460}
7461
7462void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7463 __ Stop(message());
7464}
7465
7466void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7467 BlockEntryInstr* entry = normal_entry();
7468 if (entry != nullptr) {
7469 if (!compiler->CanFallThroughTo(entry)) {
7470 FATAL("Checked function entry must have no offset");
7471 }
7472 } else {
7473 entry = osr_entry();
7474 if (!compiler->CanFallThroughTo(entry)) {
7475 __ b(compiler->GetJumpLabel(entry));
7476 }
7477 }
7478}
7479
7480LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7481 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
7482}
7483
7484void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7485 if (!compiler->is_optimizing()) {
7486 if (FLAG_reorder_basic_blocks) {
7487 compiler->EmitEdgeCounter(block()->preorder_number());
7488 }
7489 // Add a deoptimization descriptor for deoptimizing instructions that
7490 // may be inserted before this instruction.
7491 compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
7492 TokenPosition::kNoSource);
7493 }
7494 if (HasParallelMove()) {
7495 compiler->parallel_move_resolver()->EmitNativeCode(parallel_move());
7496 }
7497
7498 // We can fall through if the successor is the next block in the list.
7499 // Otherwise, we need a jump.
7500 if (!compiler->CanFallThroughTo(successor())) {
7501 __ b(compiler->GetJumpLabel(successor()));
7502 }
7503}
7504
7505LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone,
7506 bool opt) const {
7507 const intptr_t kNumInputs = 1;
7508 const intptr_t kNumTemps = 1;
7509
7510 LocationSummary* summary = new (zone)
7511 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7512
7513 summary->set_in(0, Location::RequiresRegister());
7514 summary->set_temp(0, Location::RequiresRegister());
7515
7516 return summary;
7517}
7518
7519void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7520 Register target_address_reg = locs()->temp_slot(0)->reg();
7521
7522 // Offset is relative to entry pc.
7523 const intptr_t entry_to_pc_offset = __ CodeSize() + Instr::kPCReadOffset;
7524 __ mov(target_address_reg, compiler::Operand(PC));
7525 __ AddImmediate(target_address_reg, -entry_to_pc_offset);
7526 // Add the offset.
7527 Register offset_reg = locs()->in(0).reg();
7528 compiler::Operand offset_opr =
7529 (offset()->definition()->representation() == kTagged)
7530 ? compiler::Operand(offset_reg, ASR, kSmiTagSize)
7531 : compiler::Operand(offset_reg);
7532 __ add(target_address_reg, target_address_reg, offset_opr);
7533
7534 // Jump to the absolute address.
7535 __ bx(target_address_reg);
7536}
7537
7538LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone,
7539 bool opt) const {
7540 const intptr_t kNumInputs = 2;
7541 const intptr_t kNumTemps = 0;
7542 if (needs_number_check()) {
7543 LocationSummary* locs = new (zone)
7544 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
7545 locs->set_in(0, Location::RegisterLocation(R0));
7546 locs->set_in(1, Location::RegisterLocation(R1));
7547 locs->set_out(0, Location::RegisterLocation(R0));
7548 return locs;
7549 }
7550 LocationSummary* locs = new (zone)
7551 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7552
7553 // If a constant has more than one use, make sure it is loaded in register
7554 // so that multiple immediate loads can be avoided.
7555 ConstantInstr* constant = left()->definition()->AsConstant();
7556 if ((constant != NULL) && !left()->IsSingleUse()) {
7557 locs->set_in(0, Location::RequiresRegister());
7558 } else {
7559 locs->set_in(0, LocationRegisterOrConstant(left()));
7560 }
7561
7562 constant = right()->definition()->AsConstant();
7563 if ((constant != NULL) && !right()->IsSingleUse()) {
7564 locs->set_in(1, Location::RequiresRegister());
7565 } else {
7566 // Only one of the inputs can be a constant. Choose register if the first
7567 // one is a constant.
7568 locs->set_in(1, locs->in(0).IsConstant()
7569 ? Location::RequiresRegister()
7570 : LocationRegisterOrConstant(right()));
7571 }
7572 locs->set_out(0, Location::RequiresRegister());
7573 return locs;
7574}
7575
7576Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
7577 FlowGraphCompiler* compiler,
7578 BranchLabels labels,
7579 Register reg,
7580 const Object& obj) {
7581 return compiler->EmitEqualityRegConstCompare(reg, obj, needs_number_check(),
7582 token_pos(), deopt_id());
7583}
7584
7585void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7586 // The ARM code may not use true- and false-labels here.
7587 compiler::Label is_true, is_false, done;
7588 BranchLabels labels = {&is_true, &is_false, &is_false};
7589 Condition true_condition = EmitComparisonCode(compiler, labels);
7590
7591 const Register result = this->locs()->out(0).reg();
7592 if (is_false.IsLinked() || is_true.IsLinked()) {
7593 if (true_condition != kInvalidCondition) {
7594 EmitBranchOnCondition(compiler, true_condition, labels);
7595 }
7596 __ Bind(&is_false);
7597 __ LoadObject(result, Bool::False());
7598 __ b(&done);
7599 __ Bind(&is_true);
7600 __ LoadObject(result, Bool::True());
7601 __ Bind(&done);
7602 } else {
7603 // If EmitComparisonCode did not use the labels and just returned
7604 // a condition we can avoid the branch and use conditional loads.
7605 ASSERT(true_condition != kInvalidCondition);
7606 __ LoadObject(result, Bool::True(), true_condition);
7607 __ LoadObject(result, Bool::False(), InvertCondition(true_condition));
7608 }
7609}
7610
7611void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler,
7612 BranchInstr* branch) {
7613 BranchLabels labels = compiler->CreateBranchLabels(branch);
7614 Condition true_condition = EmitComparisonCode(compiler, labels);
7615 if (true_condition != kInvalidCondition) {
7616 EmitBranchOnCondition(compiler, true_condition, labels);
7617 }
7618}
7619
7620LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone,
7621 bool opt) const {
7622 return LocationSummary::Make(zone, 1, Location::RequiresRegister(),
7623 LocationSummary::kNoCall);
7624}
7625
7626void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7627 const Register input = locs()->in(0).reg();
7628 const Register result = locs()->out(0).reg();
7629
7630 if (value()->Type()->ToCid() == kBoolCid) {
7631 __ eor(
7632 result, input,
7633 compiler::Operand(compiler::target::ObjectAlignment::kBoolValueMask));
7634 } else {
7635 __ LoadObject(result, Bool::True());
7636 __ cmp(result, compiler::Operand(input));
7637 __ LoadObject(result, Bool::False(), EQ);
7638 }
7639}
7640
7641LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
7642 bool opt) const {
7643 const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;
7644 const intptr_t kNumTemps = 0;
7645 LocationSummary* locs = new (zone)
7646 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
7647 if (type_arguments() != nullptr) {
7648 locs->set_in(0,
7649 Location::RegisterLocation(kAllocationStubTypeArgumentsReg));
7650 }
7651 locs->set_out(0, Location::RegisterLocation(R0));
7652 return locs;
7653}
7654
7655void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7656 if (type_arguments() != nullptr) {
7657 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
7658 if (type_usage_info != nullptr) {
7659 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, cls_,
7660 type_arguments()->definition());
7661 }
7662 }
7663 const Code& stub = Code::ZoneHandle(
7664 compiler->zone(), StubCode::GetAllocationStubForClass(cls()));
7665 compiler->GenerateStubCall(token_pos(), stub, PcDescriptorsLayout::kOther,
7666 locs());
7667}
7668
7669void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7670#ifdef PRODUCT
7671 UNREACHABLE();
7672#else
7673 ASSERT(!compiler->is_optimizing());
7674 __ BranchLinkPatchable(StubCode::DebugStepCheck());
7675 compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, token_pos());
7676 compiler->RecordSafepoint(locs());
7677#endif
7678}
7679
7680} // namespace dart
7681
7682#endif // defined(TARGET_ARCH_ARM)
7683