1 | // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. |
6 | #if defined(TARGET_ARCH_ARM64) |
7 | |
8 | #include "vm/compiler/backend/il.h" |
9 | |
10 | #include "vm/compiler/backend/flow_graph.h" |
11 | #include "vm/compiler/backend/flow_graph_compiler.h" |
12 | #include "vm/compiler/backend/locations.h" |
13 | #include "vm/compiler/backend/locations_helpers.h" |
14 | #include "vm/compiler/backend/range_analysis.h" |
15 | #include "vm/compiler/ffi/native_calling_convention.h" |
16 | #include "vm/compiler/jit/compiler.h" |
17 | #include "vm/dart_entry.h" |
18 | #include "vm/instructions.h" |
19 | #include "vm/object_store.h" |
20 | #include "vm/parser.h" |
21 | #include "vm/simulator.h" |
22 | #include "vm/stack_frame.h" |
23 | #include "vm/stub_code.h" |
24 | #include "vm/symbols.h" |
25 | #include "vm/type_testing_stubs.h" |
26 | |
27 | #define __ compiler->assembler()-> |
28 | #define Z (compiler->zone()) |
29 | |
30 | namespace dart { |
31 | |
32 | // Generic summary for call instructions that have all arguments pushed |
33 | // on the stack and return the result in a fixed register R0 (or V0 if |
34 | // the return type is double). |
35 | LocationSummary* Instruction::MakeCallSummary(Zone* zone, |
36 | const Instruction* instr, |
37 | LocationSummary* locs) { |
38 | ASSERT(locs == nullptr || locs->always_calls()); |
39 | LocationSummary* result = |
40 | ((locs == nullptr) |
41 | ? (new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall)) |
42 | : locs); |
43 | const auto representation = instr->representation(); |
44 | switch (representation) { |
45 | case kTagged: |
46 | case kUnboxedInt64: |
47 | result->set_out( |
48 | 0, Location::RegisterLocation(CallingConventions::kReturnReg)); |
49 | break; |
50 | case kUnboxedDouble: |
51 | result->set_out( |
52 | 0, Location::FpuRegisterLocation(CallingConventions::kReturnFpuReg)); |
53 | break; |
54 | default: |
55 | UNREACHABLE(); |
56 | break; |
57 | } |
58 | return result; |
59 | } |
60 | |
61 | LocationSummary* LoadIndexedUnsafeInstr::MakeLocationSummary(Zone* zone, |
62 | bool opt) const { |
63 | const intptr_t kNumInputs = 1; |
64 | const intptr_t kNumTemps = ((representation() == kUnboxedDouble) ? 1 : 0); |
65 | LocationSummary* locs = new (zone) |
66 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
67 | |
68 | locs->set_in(0, Location::RequiresRegister()); |
69 | switch (representation()) { |
70 | case kTagged: |
71 | case kUnboxedInt64: |
72 | locs->set_out(0, Location::RequiresRegister()); |
73 | break; |
74 | case kUnboxedDouble: |
75 | locs->set_temp(0, Location::RequiresRegister()); |
76 | locs->set_out(0, Location::RequiresFpuRegister()); |
77 | break; |
78 | default: |
79 | UNREACHABLE(); |
80 | break; |
81 | } |
82 | return locs; |
83 | } |
84 | |
85 | void LoadIndexedUnsafeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
86 | ASSERT(RequiredInputRepresentation(0) == kTagged); // It is a Smi. |
87 | ASSERT(kSmiTag == 0); |
88 | ASSERT(kSmiTagSize == 1); |
89 | |
90 | const Register index = locs()->in(0).reg(); |
91 | |
92 | switch (representation()) { |
93 | case kTagged: |
94 | case kUnboxedInt64: { |
95 | const auto out = locs()->out(0).reg(); |
96 | __ add(out, base_reg(), compiler::Operand(index, LSL, 2)); |
97 | __ ldr(out, compiler::Address(out, offset())); |
98 | break; |
99 | } |
100 | case kUnboxedDouble: { |
101 | const auto tmp = locs()->temp(0).reg(); |
102 | const auto out = locs()->out(0).fpu_reg(); |
103 | __ add(tmp, base_reg(), compiler::Operand(index, LSL, 2)); |
104 | __ LoadDFromOffset(out, tmp, offset()); |
105 | break; |
106 | } |
107 | default: |
108 | UNREACHABLE(); |
109 | break; |
110 | } |
111 | } |
112 | |
113 | DEFINE_BACKEND(StoreIndexedUnsafe, |
114 | (NoLocation, Register index, Register value)) { |
115 | ASSERT(instr->RequiredInputRepresentation( |
116 | StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi. |
117 | __ add(TMP, instr->base_reg(), compiler::Operand(index, LSL, 2)); |
118 | __ str(value, compiler::Address(TMP, instr->offset())); |
119 | |
120 | ASSERT(kSmiTag == 0); |
121 | ASSERT(kSmiTagSize == 1); |
122 | } |
123 | |
124 | DEFINE_BACKEND(TailCall, |
125 | (NoLocation, |
126 | Fixed<Register, ARGS_DESC_REG>, |
127 | Temp<Register> temp)) { |
128 | compiler->EmitTailCallToStub(instr->code()); |
129 | |
130 | // Even though the TailCallInstr will be the last instruction in a basic |
131 | // block, the flow graph compiler will emit native code for other blocks after |
132 | // the one containing this instruction and needs to be able to use the pool. |
133 | // (The `LeaveDartFrame` above disables usages of the pool.) |
134 | __ set_constant_pool_allowed(true); |
135 | } |
136 | |
137 | LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone, |
138 | bool opt) const { |
139 | const intptr_t kNumInputs = 5; |
140 | const intptr_t kNumTemps = 1; |
141 | LocationSummary* locs = new (zone) |
142 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
143 | locs->set_in(kSrcPos, Location::WritableRegister()); |
144 | locs->set_in(kDestPos, Location::WritableRegister()); |
145 | locs->set_in(kSrcStartPos, Location::RequiresRegister()); |
146 | locs->set_in(kDestStartPos, Location::RequiresRegister()); |
147 | locs->set_in(kLengthPos, Location::WritableRegister()); |
148 | locs->set_temp(0, element_size_ == 16 |
149 | ? Location::Pair(Location::RequiresRegister(), |
150 | Location::RequiresRegister()) |
151 | : Location::RequiresRegister()); |
152 | return locs; |
153 | } |
154 | |
155 | void MemoryCopyInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
156 | const Register src_reg = locs()->in(kSrcPos).reg(); |
157 | const Register dest_reg = locs()->in(kDestPos).reg(); |
158 | const Register src_start_reg = locs()->in(kSrcStartPos).reg(); |
159 | const Register dest_start_reg = locs()->in(kDestStartPos).reg(); |
160 | const Register length_reg = locs()->in(kLengthPos).reg(); |
161 | |
162 | Register temp_reg, temp_reg2; |
163 | if (locs()->temp(0).IsPairLocation()) { |
164 | PairLocation* pair = locs()->temp(0).AsPairLocation(); |
165 | temp_reg = pair->At(0).reg(); |
166 | temp_reg2 = pair->At(1).reg(); |
167 | } else { |
168 | temp_reg = locs()->temp(0).reg(); |
169 | temp_reg2 = kNoRegister; |
170 | } |
171 | |
172 | EmitComputeStartPointer(compiler, src_cid_, src_start(), src_reg, |
173 | src_start_reg); |
174 | EmitComputeStartPointer(compiler, dest_cid_, dest_start(), dest_reg, |
175 | dest_start_reg); |
176 | |
177 | compiler::Label loop, done; |
178 | |
179 | compiler::Address src_address = |
180 | compiler::Address(src_reg, element_size_, compiler::Address::PostIndex); |
181 | compiler::Address dest_address = |
182 | compiler::Address(dest_reg, element_size_, compiler::Address::PostIndex); |
183 | |
184 | // Untag length and skip copy if length is zero. |
185 | __ adds(length_reg, ZR, compiler::Operand(length_reg, ASR, 1)); |
186 | __ b(&done, ZERO); |
187 | |
188 | __ Bind(&loop); |
189 | switch (element_size_) { |
190 | case 1: |
191 | __ ldr(temp_reg, src_address, kUnsignedByte); |
192 | __ str(temp_reg, dest_address, kUnsignedByte); |
193 | break; |
194 | case 2: |
195 | __ ldr(temp_reg, src_address, kUnsignedHalfword); |
196 | __ str(temp_reg, dest_address, kUnsignedHalfword); |
197 | break; |
198 | case 4: |
199 | __ ldr(temp_reg, src_address, kUnsignedWord); |
200 | __ str(temp_reg, dest_address, kUnsignedWord); |
201 | break; |
202 | case 8: |
203 | __ ldr(temp_reg, src_address, kDoubleWord); |
204 | __ str(temp_reg, dest_address, kDoubleWord); |
205 | break; |
206 | case 16: |
207 | __ ldp(temp_reg, temp_reg2, src_address, kDoubleWord); |
208 | __ stp(temp_reg, temp_reg2, dest_address, kDoubleWord); |
209 | break; |
210 | } |
211 | __ subs(length_reg, length_reg, compiler::Operand(1)); |
212 | __ b(&loop, NOT_ZERO); |
213 | __ Bind(&done); |
214 | } |
215 | |
216 | void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler, |
217 | classid_t array_cid, |
218 | Value* start, |
219 | Register array_reg, |
220 | Register start_reg) { |
221 | if (IsTypedDataBaseClassId(array_cid)) { |
222 | __ ldr( |
223 | array_reg, |
224 | compiler::FieldAddress( |
225 | array_reg, compiler::target::TypedDataBase::data_field_offset())); |
226 | } else { |
227 | switch (array_cid) { |
228 | case kOneByteStringCid: |
229 | __ add( |
230 | array_reg, array_reg, |
231 | compiler::Operand(compiler::target::OneByteString::data_offset() - |
232 | kHeapObjectTag)); |
233 | break; |
234 | case kTwoByteStringCid: |
235 | __ add( |
236 | array_reg, array_reg, |
237 | compiler::Operand(compiler::target::OneByteString::data_offset() - |
238 | kHeapObjectTag)); |
239 | break; |
240 | case kExternalOneByteStringCid: |
241 | __ ldr(array_reg, |
242 | compiler::FieldAddress(array_reg, |
243 | compiler::target::ExternalOneByteString:: |
244 | external_data_offset())); |
245 | break; |
246 | case kExternalTwoByteStringCid: |
247 | __ ldr(array_reg, |
248 | compiler::FieldAddress(array_reg, |
249 | compiler::target::ExternalTwoByteString:: |
250 | external_data_offset())); |
251 | break; |
252 | default: |
253 | UNREACHABLE(); |
254 | break; |
255 | } |
256 | } |
257 | intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) - 1; |
258 | if (shift < 0) { |
259 | __ add(array_reg, array_reg, compiler::Operand(start_reg, ASR, -shift)); |
260 | } else { |
261 | __ add(array_reg, array_reg, compiler::Operand(start_reg, LSL, shift)); |
262 | } |
263 | } |
264 | |
265 | LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone, |
266 | bool opt) const { |
267 | const intptr_t kNumInputs = 1; |
268 | const intptr_t kNumTemps = 0; |
269 | LocationSummary* locs = new (zone) |
270 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
271 | if (representation() == kUnboxedDouble) { |
272 | locs->set_in(0, Location::RequiresFpuRegister()); |
273 | } else if (representation() == kUnboxedInt64) { |
274 | locs->set_in(0, Location::RequiresRegister()); |
275 | } else { |
276 | locs->set_in(0, LocationAnyOrConstant(value())); |
277 | } |
278 | return locs; |
279 | } |
280 | |
281 | // Buffers registers in order to use STP to push |
282 | // two registers at once. |
283 | class ArgumentsPusher : public ValueObject { |
284 | public: |
285 | ArgumentsPusher() {} |
286 | |
287 | // Flush all buffered registers. |
288 | void Flush(FlowGraphCompiler* compiler) { |
289 | if (pending_register_ != kNoRegister) { |
290 | __ Push(pending_register_); |
291 | pending_register_ = kNoRegister; |
292 | } |
293 | } |
294 | |
295 | // Buffer given register. May push buffered registers if needed. |
296 | void PushRegister(FlowGraphCompiler* compiler, Register reg) { |
297 | if (pending_register_ != kNoRegister) { |
298 | __ PushPair(reg, pending_register_); |
299 | pending_register_ = kNoRegister; |
300 | return; |
301 | } |
302 | pending_register_ = reg; |
303 | } |
304 | |
305 | // Returns free temp register to hold argument value. |
306 | Register GetFreeTempRegister() { |
307 | // While pushing arguments only Push, PushPair, LoadObject and |
308 | // LoadFromOffset are used. They do not clobber TMP or LR. |
309 | static_assert(((1 << LR) & kDartAvailableCpuRegs) == 0, |
310 | "LR should not be allocatable" ); |
311 | static_assert(((1 << TMP) & kDartAvailableCpuRegs) == 0, |
312 | "TMP should not be allocatable" ); |
313 | return (pending_register_ == TMP) ? LR : TMP; |
314 | } |
315 | |
316 | private: |
317 | Register pending_register_ = kNoRegister; |
318 | }; |
319 | |
320 | void PushArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
321 | // In SSA mode, we need an explicit push. Nothing to do in non-SSA mode |
322 | // where arguments are pushed by their definitions. |
323 | if (compiler->is_optimizing()) { |
324 | if (previous()->IsPushArgument()) { |
325 | // Already generated. |
326 | return; |
327 | } |
328 | ArgumentsPusher pusher; |
329 | for (PushArgumentInstr* push_arg = this; push_arg != nullptr; |
330 | push_arg = push_arg->next()->AsPushArgument()) { |
331 | const Location value = push_arg->locs()->in(0); |
332 | Register reg = kNoRegister; |
333 | if (value.IsRegister()) { |
334 | reg = value.reg(); |
335 | } else if (value.IsConstant()) { |
336 | if (compiler::IsSameObject(compiler::NullObject(), value.constant())) { |
337 | reg = NULL_REG; |
338 | } else { |
339 | reg = pusher.GetFreeTempRegister(); |
340 | __ LoadObject(reg, value.constant()); |
341 | } |
342 | } else if (value.IsFpuRegister()) { |
343 | pusher.Flush(compiler); |
344 | __ PushDouble(value.fpu_reg()); |
345 | continue; |
346 | } else { |
347 | ASSERT(value.IsStackSlot()); |
348 | const intptr_t value_offset = value.ToStackSlotOffset(); |
349 | reg = pusher.GetFreeTempRegister(); |
350 | __ LoadFromOffset(reg, value.base_reg(), value_offset); |
351 | } |
352 | pusher.PushRegister(compiler, reg); |
353 | } |
354 | pusher.Flush(compiler); |
355 | } |
356 | } |
357 | |
358 | LocationSummary* ReturnInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
359 | const intptr_t kNumInputs = 1; |
360 | const intptr_t kNumTemps = 0; |
361 | LocationSummary* locs = new (zone) |
362 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
363 | switch (representation()) { |
364 | case kTagged: |
365 | case kUnboxedInt64: |
366 | locs->set_in(0, |
367 | Location::RegisterLocation(CallingConventions::kReturnReg)); |
368 | break; |
369 | case kUnboxedDouble: |
370 | locs->set_in( |
371 | 0, Location::FpuRegisterLocation(CallingConventions::kReturnFpuReg)); |
372 | break; |
373 | default: |
374 | UNREACHABLE(); |
375 | break; |
376 | } |
377 | return locs; |
378 | } |
379 | |
380 | // Attempt optimized compilation at return instruction instead of at the entry. |
381 | // The entry needs to be patchable, no inlined objects are allowed in the area |
382 | // that will be overwritten by the patch instructions: a branch macro sequence. |
383 | void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
384 | if (locs()->in(0).IsRegister()) { |
385 | const Register result = locs()->in(0).reg(); |
386 | ASSERT(result == CallingConventions::kReturnReg); |
387 | } else { |
388 | ASSERT(locs()->in(0).IsFpuRegister()); |
389 | const FpuRegister result = locs()->in(0).fpu_reg(); |
390 | ASSERT(result == CallingConventions::kReturnFpuReg); |
391 | } |
392 | |
393 | if (compiler->intrinsic_mode()) { |
394 | // Intrinsics don't have a frame. |
395 | __ ret(); |
396 | return; |
397 | } |
398 | |
399 | #if defined(DEBUG) |
400 | compiler::Label stack_ok; |
401 | __ Comment("Stack Check" ); |
402 | const intptr_t fp_sp_dist = |
403 | (compiler::target::frame_layout.first_local_from_fp + 1 - |
404 | compiler->StackSize()) * |
405 | kWordSize; |
406 | ASSERT(fp_sp_dist <= 0); |
407 | __ sub(R2, SP, compiler::Operand(FP)); |
408 | __ CompareImmediate(R2, fp_sp_dist); |
409 | __ b(&stack_ok, EQ); |
410 | __ brk(0); |
411 | __ Bind(&stack_ok); |
412 | #endif |
413 | ASSERT(__ constant_pool_allowed()); |
414 | if (yield_index() != PcDescriptorsLayout::kInvalidYieldIndex) { |
415 | compiler->EmitYieldPositionMetadata(token_pos(), yield_index()); |
416 | } |
417 | __ LeaveDartFrame(); // Disallows constant pool use. |
418 | __ ret(); |
419 | // This ReturnInstr may be emitted out of order by the optimizer. The next |
420 | // block may be a target expecting a properly set constant pool pointer. |
421 | __ set_constant_pool_allowed(true); |
422 | } |
423 | |
424 | // Detect pattern when one value is zero and another is a power of 2. |
425 | static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) { |
426 | return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) || |
427 | (Utils::IsPowerOfTwo(v2) && (v1 == 0)); |
428 | } |
429 | |
430 | LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone, |
431 | bool opt) const { |
432 | comparison()->InitializeLocationSummary(zone, opt); |
433 | return comparison()->locs(); |
434 | } |
435 | |
436 | void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
437 | const Register result = locs()->out(0).reg(); |
438 | |
439 | Location left = locs()->in(0); |
440 | Location right = locs()->in(1); |
441 | ASSERT(!left.IsConstant() || !right.IsConstant()); |
442 | |
443 | // Emit comparison code. This must not overwrite the result register. |
444 | // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using |
445 | // the labels or returning an invalid condition. |
446 | BranchLabels labels = {NULL, NULL, NULL}; |
447 | Condition true_condition = comparison()->EmitComparisonCode(compiler, labels); |
448 | ASSERT(true_condition != kInvalidCondition); |
449 | |
450 | const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_); |
451 | |
452 | intptr_t true_value = if_true_; |
453 | intptr_t false_value = if_false_; |
454 | |
455 | if (is_power_of_two_kind) { |
456 | if (true_value == 0) { |
457 | // We need to have zero in result on true_condition. |
458 | true_condition = InvertCondition(true_condition); |
459 | } |
460 | } else { |
461 | if (true_value == 0) { |
462 | // Swap values so that false_value is zero. |
463 | intptr_t temp = true_value; |
464 | true_value = false_value; |
465 | false_value = temp; |
466 | } else { |
467 | true_condition = InvertCondition(true_condition); |
468 | } |
469 | } |
470 | |
471 | __ cset(result, true_condition); |
472 | |
473 | if (is_power_of_two_kind) { |
474 | const intptr_t shift = |
475 | Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value)); |
476 | __ LslImmediate(result, result, shift + kSmiTagSize); |
477 | } else { |
478 | __ sub(result, result, compiler::Operand(1)); |
479 | const int64_t val = Smi::RawValue(true_value) - Smi::RawValue(false_value); |
480 | __ AndImmediate(result, result, val); |
481 | if (false_value != 0) { |
482 | __ AddImmediate(result, Smi::RawValue(false_value)); |
483 | } |
484 | } |
485 | } |
486 | |
487 | LocationSummary* DispatchTableCallInstr::MakeLocationSummary(Zone* zone, |
488 | bool opt) const { |
489 | const intptr_t kNumInputs = 1; |
490 | const intptr_t kNumTemps = 0; |
491 | LocationSummary* summary = new (zone) |
492 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
493 | summary->set_in(0, Location::RegisterLocation(R0)); // ClassId |
494 | return MakeCallSummary(zone, this, summary); |
495 | } |
496 | |
497 | LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone, |
498 | bool opt) const { |
499 | const intptr_t kNumInputs = 1; |
500 | const intptr_t kNumTemps = 0; |
501 | LocationSummary* summary = new (zone) |
502 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
503 | summary->set_in(0, Location::RegisterLocation(R0)); // Function. |
504 | return MakeCallSummary(zone, this, summary); |
505 | } |
506 | |
507 | void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
508 | // Load arguments descriptor in R4. |
509 | const intptr_t argument_count = ArgumentCount(); // Includes type args. |
510 | const Array& arguments_descriptor = |
511 | Array::ZoneHandle(Z, GetArgumentsDescriptor()); |
512 | __ LoadObject(R4, arguments_descriptor); |
513 | |
514 | // R4: Arguments descriptor. |
515 | // R0: Function. |
516 | ASSERT(locs()->in(0).reg() == R0); |
517 | if (!FLAG_precompiled_mode || !FLAG_use_bare_instructions) { |
518 | __ LoadFieldFromOffset(CODE_REG, R0, |
519 | compiler::target::Function::code_offset()); |
520 | } |
521 | __ LoadFieldFromOffset( |
522 | R2, R0, compiler::target::Function::entry_point_offset(entry_kind())); |
523 | |
524 | // R2: instructions. |
525 | if (!FLAG_precompiled_mode) { |
526 | // R5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value). |
527 | __ LoadImmediate(R5, 0); |
528 | } |
529 | __ blr(R2); |
530 | compiler->EmitCallsiteMetadata(token_pos(), deopt_id(), |
531 | PcDescriptorsLayout::kOther, locs()); |
532 | __ Drop(argument_count); |
533 | } |
534 | |
535 | LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone, |
536 | bool opt) const { |
537 | return LocationSummary::Make(zone, 0, Location::RequiresRegister(), |
538 | LocationSummary::kNoCall); |
539 | } |
540 | |
541 | void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
542 | const Register result = locs()->out(0).reg(); |
543 | __ LoadFromOffset(result, FP, |
544 | compiler::target::FrameOffsetInBytesForVariable(&local())); |
545 | } |
546 | |
547 | LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone, |
548 | bool opt) const { |
549 | return LocationSummary::Make(zone, 1, Location::SameAsFirstInput(), |
550 | LocationSummary::kNoCall); |
551 | } |
552 | |
553 | void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
554 | const Register value = locs()->in(0).reg(); |
555 | const Register result = locs()->out(0).reg(); |
556 | ASSERT(result == value); // Assert that register assignment is correct. |
557 | __ StoreToOffset(value, FP, |
558 | compiler::target::FrameOffsetInBytesForVariable(&local())); |
559 | } |
560 | |
561 | LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone, |
562 | bool opt) const { |
563 | return LocationSummary::Make(zone, 0, Location::RequiresRegister(), |
564 | LocationSummary::kNoCall); |
565 | } |
566 | |
567 | void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
568 | // The register allocator drops constant definitions that have no uses. |
569 | if (!locs()->out(0).IsInvalid()) { |
570 | const Register result = locs()->out(0).reg(); |
571 | __ LoadObject(result, value()); |
572 | } |
573 | } |
574 | |
575 | void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler, |
576 | const Location& destination, |
577 | Register tmp) { |
578 | if (destination.IsRegister()) { |
579 | if (representation() == kUnboxedInt32 || |
580 | representation() == kUnboxedInt64) { |
581 | const int64_t value = Integer::Cast(value_).AsInt64Value(); |
582 | __ LoadImmediate(destination.reg(), value); |
583 | } else { |
584 | ASSERT(representation() == kTagged); |
585 | __ LoadObject(destination.reg(), value_); |
586 | } |
587 | } else if (destination.IsFpuRegister()) { |
588 | const VRegister dst = destination.fpu_reg(); |
589 | if (Utils::DoublesBitEqual(Double::Cast(value_).value(), 0.0)) { |
590 | __ veor(dst, dst, dst); |
591 | } else { |
592 | __ LoadDImmediate(dst, Double::Cast(value_).value()); |
593 | } |
594 | } else if (destination.IsDoubleStackSlot()) { |
595 | if (Utils::DoublesBitEqual(Double::Cast(value_).value(), 0.0)) { |
596 | __ veor(VTMP, VTMP, VTMP); |
597 | } else { |
598 | __ LoadDImmediate(VTMP, Double::Cast(value_).value()); |
599 | } |
600 | const intptr_t dest_offset = destination.ToStackSlotOffset(); |
601 | __ StoreDToOffset(VTMP, destination.base_reg(), dest_offset); |
602 | } else { |
603 | ASSERT(destination.IsStackSlot()); |
604 | ASSERT(tmp != kNoRegister); |
605 | const intptr_t dest_offset = destination.ToStackSlotOffset(); |
606 | if (representation() == kUnboxedInt32 || |
607 | representation() == kUnboxedInt64) { |
608 | const int64_t value = Integer::Cast(value_).AsInt64Value(); |
609 | __ LoadImmediate(tmp, value); |
610 | } else { |
611 | ASSERT(representation() == kTagged); |
612 | __ LoadObject(tmp, value_); |
613 | } |
614 | __ StoreToOffset(tmp, destination.base_reg(), dest_offset); |
615 | } |
616 | } |
617 | |
618 | LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone, |
619 | bool opt) const { |
620 | const intptr_t kNumInputs = 0; |
621 | const intptr_t kNumTemps = IsUnboxedSignedIntegerConstant() ? 0 : 1; |
622 | LocationSummary* locs = new (zone) |
623 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
624 | switch (representation()) { |
625 | case kUnboxedDouble: |
626 | locs->set_out(0, Location::RequiresFpuRegister()); |
627 | locs->set_temp(0, Location::RequiresRegister()); |
628 | break; |
629 | case kUnboxedInt32: |
630 | case kUnboxedInt64: |
631 | locs->set_out(0, Location::RequiresRegister()); |
632 | break; |
633 | default: |
634 | UNREACHABLE(); |
635 | break; |
636 | } |
637 | return locs; |
638 | } |
639 | |
640 | void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
641 | if (!locs()->out(0).IsInvalid()) { |
642 | const Register scratch = |
643 | IsUnboxedSignedIntegerConstant() ? kNoRegister : locs()->temp(0).reg(); |
644 | EmitMoveToLocation(compiler, locs()->out(0), scratch); |
645 | } |
646 | } |
647 | |
648 | LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone, |
649 | bool opt) const { |
650 | auto const dst_type_loc = |
651 | LocationFixedRegisterOrConstant(dst_type(), TypeTestABI::kDstTypeReg); |
652 | |
653 | // When using a type testing stub, we want to prevent spilling of the |
654 | // function/instantiator type argument vectors, since stub preserves them. So |
655 | // we make this a `kNoCall` summary, even though most other registers can be |
656 | // modified by the stub. To tell the register allocator about it, we reserve |
657 | // all the other registers as temporary registers. |
658 | // TODO(http://dartbug.com/32788): Simplify this. |
659 | const bool using_stub = dst_type_loc.IsConstant() && |
660 | FlowGraphCompiler::ShouldUseTypeTestingStubFor( |
661 | opt, AbstractType::Cast(dst_type_loc.constant())); |
662 | |
663 | const intptr_t kNonChangeableInputRegs = |
664 | (1 << TypeTestABI::kInstanceReg) | |
665 | ((dst_type_loc.IsRegister() ? 1 : 0) << TypeTestABI::kDstTypeReg) | |
666 | (1 << TypeTestABI::kInstantiatorTypeArgumentsReg) | |
667 | (1 << TypeTestABI::kFunctionTypeArgumentsReg); |
668 | |
669 | const intptr_t kNumInputs = 4; |
670 | |
671 | // We invoke a stub that can potentially clobber any CPU register |
672 | // but can only clobber FPU registers on the slow path when |
673 | // entering runtime. ARM64 ABI only guarantees that lower |
674 | // 64-bits of an V registers are preserved so we block all |
675 | // of them except for FpuTMP. |
676 | const intptr_t kCpuRegistersToPreserve = |
677 | kDartAvailableCpuRegs & ~kNonChangeableInputRegs; |
678 | const intptr_t kFpuRegistersToPreserve = |
679 | Utils::SignedNBitMask(kNumberOfFpuRegisters) & ~(1l << FpuTMP); |
680 | |
681 | const intptr_t kNumTemps = |
682 | using_stub ? (Utils::CountOneBits64(kCpuRegistersToPreserve) + |
683 | Utils::CountOneBits64(kFpuRegistersToPreserve)) |
684 | : 0; |
685 | |
686 | LocationSummary* summary = new (zone) LocationSummary( |
687 | zone, kNumInputs, kNumTemps, |
688 | using_stub ? LocationSummary::kCallCalleeSafe : LocationSummary::kCall); |
689 | summary->set_in(0, Location::RegisterLocation(TypeTestABI::kInstanceReg)); |
690 | summary->set_in(1, dst_type_loc); |
691 | summary->set_in(2, Location::RegisterLocation( |
692 | TypeTestABI::kInstantiatorTypeArgumentsReg)); |
693 | summary->set_in( |
694 | 3, Location::RegisterLocation(TypeTestABI::kFunctionTypeArgumentsReg)); |
695 | summary->set_out(0, Location::SameAsFirstInput()); |
696 | |
697 | if (using_stub) { |
698 | // Let's reserve all registers except for the input ones. |
699 | intptr_t next_temp = 0; |
700 | for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) { |
701 | const bool should_preserve = ((1 << i) & kCpuRegistersToPreserve) != 0; |
702 | if (should_preserve) { |
703 | summary->set_temp(next_temp++, |
704 | Location::RegisterLocation(static_cast<Register>(i))); |
705 | } |
706 | } |
707 | |
708 | for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) { |
709 | const bool should_preserve = ((1l << i) & kFpuRegistersToPreserve) != 0; |
710 | if (should_preserve) { |
711 | summary->set_temp(next_temp++, Location::FpuRegisterLocation( |
712 | static_cast<FpuRegister>(i))); |
713 | } |
714 | } |
715 | } |
716 | |
717 | return summary; |
718 | } |
719 | |
720 | static Condition TokenKindToSmiCondition(Token::Kind kind) { |
721 | switch (kind) { |
722 | case Token::kEQ: |
723 | return EQ; |
724 | case Token::kNE: |
725 | return NE; |
726 | case Token::kLT: |
727 | return LT; |
728 | case Token::kGT: |
729 | return GT; |
730 | case Token::kLTE: |
731 | return LE; |
732 | case Token::kGTE: |
733 | return GE; |
734 | default: |
735 | UNREACHABLE(); |
736 | return VS; |
737 | } |
738 | } |
739 | |
740 | static Condition FlipCondition(Condition condition) { |
741 | switch (condition) { |
742 | case EQ: |
743 | return EQ; |
744 | case NE: |
745 | return NE; |
746 | case LT: |
747 | return GT; |
748 | case LE: |
749 | return GE; |
750 | case GT: |
751 | return LT; |
752 | case GE: |
753 | return LE; |
754 | case CC: |
755 | return HI; |
756 | case LS: |
757 | return CS; |
758 | case HI: |
759 | return CC; |
760 | case CS: |
761 | return LS; |
762 | default: |
763 | UNREACHABLE(); |
764 | return EQ; |
765 | } |
766 | } |
767 | |
768 | static void EmitBranchOnCondition(FlowGraphCompiler* compiler, |
769 | Condition true_condition, |
770 | BranchLabels labels) { |
771 | if (labels.fall_through == labels.false_label) { |
772 | // If the next block is the false successor we will fall through to it. |
773 | __ b(labels.true_label, true_condition); |
774 | } else { |
775 | // If the next block is not the false successor we will branch to it. |
776 | Condition false_condition = InvertCondition(true_condition); |
777 | __ b(labels.false_label, false_condition); |
778 | |
779 | // Fall through or jump to the true successor. |
780 | if (labels.fall_through != labels.true_label) { |
781 | __ b(labels.true_label); |
782 | } |
783 | } |
784 | } |
785 | |
786 | static bool AreLabelsNull(BranchLabels labels) { |
787 | return (labels.true_label == nullptr && labels.false_label == nullptr && |
788 | labels.fall_through == nullptr); |
789 | } |
790 | |
791 | static bool CanUseCbzTbzForComparison(FlowGraphCompiler* compiler, |
792 | Register rn, |
793 | Condition cond, |
794 | BranchLabels labels) { |
795 | return !AreLabelsNull(labels) && __ CanGenerateXCbzTbz(rn, cond); |
796 | } |
797 | |
798 | static void EmitCbzTbz(Register reg, |
799 | FlowGraphCompiler* compiler, |
800 | Condition true_condition, |
801 | BranchLabels labels) { |
802 | ASSERT(CanUseCbzTbzForComparison(compiler, reg, true_condition, labels)); |
803 | if (labels.fall_through == labels.false_label) { |
804 | // If the next block is the false successor we will fall through to it. |
805 | __ GenerateXCbzTbz(reg, true_condition, labels.true_label); |
806 | } else { |
807 | // If the next block is not the false successor we will branch to it. |
808 | Condition false_condition = InvertCondition(true_condition); |
809 | __ GenerateXCbzTbz(reg, false_condition, labels.false_label); |
810 | |
811 | // Fall through or jump to the true successor. |
812 | if (labels.fall_through != labels.true_label) { |
813 | __ b(labels.true_label); |
814 | } |
815 | } |
816 | } |
817 | |
818 | // Similar to ComparisonInstr::EmitComparisonCode, may either: |
819 | // - emit comparison code and return a valid condition in which case the |
820 | // caller is expected to emit a branch to the true label based on that |
821 | // condition (or a branch to the false label on the opposite condition). |
822 | // - emit comparison code with a branch directly to the labels and return |
823 | // kInvalidCondition. |
824 | static Condition EmitInt64ComparisonOp(FlowGraphCompiler* compiler, |
825 | LocationSummary* locs, |
826 | Token::Kind kind, |
827 | BranchLabels labels) { |
828 | Location left = locs->in(0); |
829 | Location right = locs->in(1); |
830 | ASSERT(!left.IsConstant() || !right.IsConstant()); |
831 | |
832 | Condition true_condition = TokenKindToSmiCondition(kind); |
833 | if (left.IsConstant() || right.IsConstant()) { |
834 | // Ensure constant is on the right. |
835 | ConstantInstr* right_constant = NULL; |
836 | if (left.IsConstant()) { |
837 | right_constant = left.constant_instruction(); |
838 | Location tmp = right; |
839 | right = left; |
840 | left = tmp; |
841 | true_condition = FlipCondition(true_condition); |
842 | } else { |
843 | right_constant = right.constant_instruction(); |
844 | } |
845 | |
846 | if (right_constant->IsUnboxedSignedIntegerConstant()) { |
847 | const int64_t constant = |
848 | right_constant->GetUnboxedSignedIntegerConstantValue(); |
849 | if (constant == 0 && CanUseCbzTbzForComparison(compiler, left.reg(), |
850 | true_condition, labels)) { |
851 | EmitCbzTbz(left.reg(), compiler, true_condition, labels); |
852 | return kInvalidCondition; |
853 | } |
854 | __ CompareImmediate( |
855 | left.reg(), right_constant->GetUnboxedSignedIntegerConstantValue()); |
856 | } else { |
857 | ASSERT(right_constant->representation() == kTagged); |
858 | __ CompareObject(left.reg(), right.constant()); |
859 | } |
860 | } else { |
861 | __ CompareRegisters(left.reg(), right.reg()); |
862 | } |
863 | return true_condition; |
864 | } |
865 | |
866 | LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone, |
867 | bool opt) const { |
868 | const intptr_t kNumInputs = 2; |
869 | if (operation_cid() == kDoubleCid) { |
870 | const intptr_t kNumTemps = 0; |
871 | LocationSummary* locs = new (zone) |
872 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
873 | locs->set_in(0, Location::RequiresFpuRegister()); |
874 | locs->set_in(1, Location::RequiresFpuRegister()); |
875 | locs->set_out(0, Location::RequiresRegister()); |
876 | return locs; |
877 | } |
878 | if (operation_cid() == kSmiCid || operation_cid() == kMintCid) { |
879 | const intptr_t kNumTemps = 0; |
880 | LocationSummary* locs = new (zone) |
881 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
882 | locs->set_in(0, LocationRegisterOrConstant(left())); |
883 | // Only one input can be a constant operand. The case of two constant |
884 | // operands should be handled by constant propagation. |
885 | // Only right can be a stack slot. |
886 | locs->set_in(1, locs->in(0).IsConstant() |
887 | ? Location::RequiresRegister() |
888 | : LocationRegisterOrConstant(right())); |
889 | locs->set_out(0, Location::RequiresRegister()); |
890 | return locs; |
891 | } |
892 | UNREACHABLE(); |
893 | return NULL; |
894 | } |
895 | |
896 | static Condition TokenKindToDoubleCondition(Token::Kind kind) { |
897 | switch (kind) { |
898 | case Token::kEQ: |
899 | return EQ; |
900 | case Token::kNE: |
901 | return NE; |
902 | case Token::kLT: |
903 | return LT; |
904 | case Token::kGT: |
905 | return GT; |
906 | case Token::kLTE: |
907 | return LE; |
908 | case Token::kGTE: |
909 | return GE; |
910 | default: |
911 | UNREACHABLE(); |
912 | return VS; |
913 | } |
914 | } |
915 | |
916 | static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler, |
917 | LocationSummary* locs, |
918 | BranchLabels labels, |
919 | Token::Kind kind) { |
920 | const VRegister left = locs->in(0).fpu_reg(); |
921 | const VRegister right = locs->in(1).fpu_reg(); |
922 | __ fcmpd(left, right); |
923 | Condition true_condition = TokenKindToDoubleCondition(kind); |
924 | if (true_condition != NE) { |
925 | // Special case for NaN comparison. Result is always false unless |
926 | // relational operator is !=. |
927 | __ b(labels.false_label, VS); |
928 | } |
929 | return true_condition; |
930 | } |
931 | |
932 | Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
933 | BranchLabels labels) { |
934 | if (operation_cid() == kSmiCid || operation_cid() == kMintCid) { |
935 | return EmitInt64ComparisonOp(compiler, locs(), kind(), labels); |
936 | } else { |
937 | ASSERT(operation_cid() == kDoubleCid); |
938 | return EmitDoubleComparisonOp(compiler, locs(), labels, kind()); |
939 | } |
940 | } |
941 | |
942 | LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
943 | const intptr_t kNumInputs = 2; |
944 | const intptr_t kNumTemps = 0; |
945 | LocationSummary* locs = new (zone) |
946 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
947 | locs->set_in(0, Location::RequiresRegister()); |
948 | // Only one input can be a constant operand. The case of two constant |
949 | // operands should be handled by constant propagation. |
950 | locs->set_in(1, LocationRegisterOrConstant(right())); |
951 | return locs; |
952 | } |
953 | |
954 | Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
955 | BranchLabels labels) { |
956 | const Register left = locs()->in(0).reg(); |
957 | Location right = locs()->in(1); |
958 | if (right.IsConstant()) { |
959 | ASSERT(right.constant().IsSmi()); |
960 | const int64_t imm = static_cast<int64_t>(right.constant().raw()); |
961 | __ TestImmediate(left, imm); |
962 | } else { |
963 | __ tst(left, compiler::Operand(right.reg())); |
964 | } |
965 | Condition true_condition = (kind() == Token::kNE) ? NE : EQ; |
966 | return true_condition; |
967 | } |
968 | |
969 | LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone, |
970 | bool opt) const { |
971 | const intptr_t kNumInputs = 1; |
972 | const intptr_t kNumTemps = 1; |
973 | LocationSummary* locs = new (zone) |
974 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
975 | locs->set_in(0, Location::RequiresRegister()); |
976 | locs->set_temp(0, Location::RequiresRegister()); |
977 | locs->set_out(0, Location::RequiresRegister()); |
978 | return locs; |
979 | } |
980 | |
981 | Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
982 | BranchLabels labels) { |
983 | ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT)); |
984 | const Register val_reg = locs()->in(0).reg(); |
985 | const Register cid_reg = locs()->temp(0).reg(); |
986 | |
987 | compiler::Label* deopt = |
988 | CanDeoptimize() |
989 | ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids, |
990 | licm_hoisted_ ? ICData::kHoisted : 0) |
991 | : NULL; |
992 | |
993 | const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0; |
994 | const ZoneGrowableArray<intptr_t>& data = cid_results(); |
995 | ASSERT(data[0] == kSmiCid); |
996 | bool result = data[1] == true_result; |
997 | __ BranchIfSmi(val_reg, result ? labels.true_label : labels.false_label); |
998 | __ LoadClassId(cid_reg, val_reg); |
999 | |
1000 | for (intptr_t i = 2; i < data.length(); i += 2) { |
1001 | const intptr_t test_cid = data[i]; |
1002 | ASSERT(test_cid != kSmiCid); |
1003 | result = data[i + 1] == true_result; |
1004 | __ CompareImmediate(cid_reg, test_cid); |
1005 | __ b(result ? labels.true_label : labels.false_label, EQ); |
1006 | } |
1007 | // No match found, deoptimize or default action. |
1008 | if (deopt == NULL) { |
1009 | // If the cid is not in the list, jump to the opposite label from the cids |
1010 | // that are in the list. These must be all the same (see asserts in the |
1011 | // constructor). |
1012 | compiler::Label* target = result ? labels.false_label : labels.true_label; |
1013 | if (target != labels.fall_through) { |
1014 | __ b(target); |
1015 | } |
1016 | } else { |
1017 | __ b(deopt); |
1018 | } |
1019 | // Dummy result as this method already did the jump, there's no need |
1020 | // for the caller to branch on a condition. |
1021 | return kInvalidCondition; |
1022 | } |
1023 | |
1024 | LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone, |
1025 | bool opt) const { |
1026 | const intptr_t kNumInputs = 2; |
1027 | const intptr_t kNumTemps = 0; |
1028 | if (operation_cid() == kDoubleCid) { |
1029 | LocationSummary* summary = new (zone) |
1030 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
1031 | summary->set_in(0, Location::RequiresFpuRegister()); |
1032 | summary->set_in(1, Location::RequiresFpuRegister()); |
1033 | summary->set_out(0, Location::RequiresRegister()); |
1034 | return summary; |
1035 | } |
1036 | if (operation_cid() == kSmiCid || operation_cid() == kMintCid) { |
1037 | LocationSummary* summary = new (zone) |
1038 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
1039 | summary->set_in(0, LocationRegisterOrConstant(left())); |
1040 | // Only one input can be a constant operand. The case of two constant |
1041 | // operands should be handled by constant propagation. |
1042 | summary->set_in(1, summary->in(0).IsConstant() |
1043 | ? Location::RequiresRegister() |
1044 | : LocationRegisterOrConstant(right())); |
1045 | summary->set_out(0, Location::RequiresRegister()); |
1046 | return summary; |
1047 | } |
1048 | |
1049 | UNREACHABLE(); |
1050 | return NULL; |
1051 | } |
1052 | |
1053 | Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
1054 | BranchLabels labels) { |
1055 | if (operation_cid() == kSmiCid || operation_cid() == kMintCid) { |
1056 | return EmitInt64ComparisonOp(compiler, locs(), kind(), labels); |
1057 | } else { |
1058 | ASSERT(operation_cid() == kDoubleCid); |
1059 | return EmitDoubleComparisonOp(compiler, locs(), labels, kind()); |
1060 | } |
1061 | } |
1062 | |
1063 | void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1064 | SetupNative(); |
1065 | const Register result = locs()->out(0).reg(); |
1066 | |
1067 | // All arguments are already @SP due to preceding PushArgument()s. |
1068 | ASSERT(ArgumentCount() == |
1069 | function().NumParameters() + (function().IsGeneric() ? 1 : 0)); |
1070 | |
1071 | // Push the result place holder initialized to NULL. |
1072 | __ PushObject(Object::null_object()); |
1073 | |
1074 | // Pass a pointer to the first argument in R2. |
1075 | __ AddImmediate(R2, SP, ArgumentCount() * kWordSize); |
1076 | |
1077 | // Compute the effective address. When running under the simulator, |
1078 | // this is a redirection address that forces the simulator to call |
1079 | // into the runtime system. |
1080 | uword entry; |
1081 | const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function()); |
1082 | const Code* stub; |
1083 | if (link_lazily()) { |
1084 | stub = &StubCode::CallBootstrapNative(); |
1085 | entry = NativeEntry::LinkNativeCallEntry(); |
1086 | } else { |
1087 | entry = reinterpret_cast<uword>(native_c_function()); |
1088 | if (is_bootstrap_native()) { |
1089 | stub = &StubCode::CallBootstrapNative(); |
1090 | } else if (is_auto_scope()) { |
1091 | stub = &StubCode::CallAutoScopeNative(); |
1092 | } else { |
1093 | stub = &StubCode::CallNoScopeNative(); |
1094 | } |
1095 | } |
1096 | __ LoadImmediate(R1, argc_tag); |
1097 | compiler::ExternalLabel label(entry); |
1098 | __ LoadNativeEntry(R5, &label, |
1099 | link_lazily() ? ObjectPool::Patchability::kPatchable |
1100 | : ObjectPool::Patchability::kNotPatchable); |
1101 | if (link_lazily()) { |
1102 | compiler->GeneratePatchableCall(token_pos(), *stub, |
1103 | PcDescriptorsLayout::kOther, locs()); |
1104 | } else { |
1105 | compiler->GenerateStubCall(token_pos(), *stub, PcDescriptorsLayout::kOther, |
1106 | locs()); |
1107 | } |
1108 | __ Pop(result); |
1109 | |
1110 | __ Drop(ArgumentCount()); // Drop the arguments. |
1111 | } |
1112 | |
1113 | void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1114 | const Register saved_fp = locs()->temp(0).reg(); |
1115 | const Register temp = locs()->temp(1).reg(); |
1116 | const Register branch = locs()->in(TargetAddressIndex()).reg(); |
1117 | |
1118 | // Save frame pointer because we're going to update it when we enter the exit |
1119 | // frame. |
1120 | __ mov(saved_fp, FPREG); |
1121 | |
1122 | // We need to create a dummy "exit frame". It will share the same pool pointer |
1123 | // but have a null code object. |
1124 | __ LoadObject(CODE_REG, Object::null_object()); |
1125 | __ set_constant_pool_allowed(false); |
1126 | __ EnterDartFrame(0, PP); |
1127 | |
1128 | // Make space for arguments and align the frame. |
1129 | __ ReserveAlignedFrameSpace(marshaller_.StackTopInBytes()); |
1130 | |
1131 | EmitParamMoves(compiler); |
1132 | |
1133 | // We need to copy a dummy return address up into the dummy stack frame so the |
1134 | // stack walker will know which safepoint to use. |
1135 | // |
1136 | // ADR loads relative to itself, so add kInstrSize to point to the next |
1137 | // instruction. |
1138 | __ adr(temp, compiler::Immediate(Instr::kInstrSize)); |
1139 | compiler->EmitCallsiteMetadata(token_pos(), deopt_id(), |
1140 | PcDescriptorsLayout::Kind::kOther, locs()); |
1141 | |
1142 | __ StoreToOffset(temp, FPREG, kSavedCallerPcSlotFromFp * kWordSize); |
1143 | |
1144 | if (CanExecuteGeneratedCodeInSafepoint()) { |
1145 | // Update information in the thread object and enter a safepoint. |
1146 | __ LoadImmediate(temp, compiler::target::Thread::exit_through_ffi()); |
1147 | __ TransitionGeneratedToNative(branch, FPREG, temp, |
1148 | /*enter_safepoint=*/true); |
1149 | |
1150 | // We are entering runtime code, so the C stack pointer must be restored |
1151 | // from the stack limit to the top of the stack. |
1152 | __ mov(R25, CSP); |
1153 | __ mov(CSP, SP); |
1154 | |
1155 | __ blr(branch); |
1156 | |
1157 | // Restore the Dart stack pointer. |
1158 | __ mov(SP, CSP); |
1159 | __ mov(CSP, R25); |
1160 | |
1161 | // Update information in the thread object and leave the safepoint. |
1162 | __ TransitionNativeToGenerated(temp, /*leave_safepoint=*/true); |
1163 | } else { |
1164 | // We cannot trust that this code will be executable within a safepoint. |
1165 | // Therefore we delegate the responsibility of entering/exiting the |
1166 | // safepoint to a stub which in the VM isolate's heap, which will never lose |
1167 | // execute permission. |
1168 | __ ldr(TMP, |
1169 | compiler::Address( |
1170 | THR, compiler::target::Thread:: |
1171 | call_native_through_safepoint_entry_point_offset())); |
1172 | |
1173 | // Calls R8 and clobbers R19 (along with volatile registers). |
1174 | ASSERT(branch == R8 && temp == R19); |
1175 | __ blr(TMP); |
1176 | } |
1177 | |
1178 | // Refresh pinned registers values (inc. write barrier mask and null object). |
1179 | __ RestorePinnedRegisters(); |
1180 | |
1181 | EmitReturnMoves(compiler); |
1182 | |
1183 | // Although PP is a callee-saved register, it may have been moved by the GC. |
1184 | __ LeaveDartFrame(compiler::kRestoreCallerPP); |
1185 | |
1186 | // Restore the global object pool after returning from runtime (old space is |
1187 | // moving, so the GOP could have been relocated). |
1188 | if (FLAG_precompiled_mode && FLAG_use_bare_instructions) { |
1189 | __ SetupGlobalPoolAndDispatchTable(); |
1190 | } |
1191 | |
1192 | __ set_constant_pool_allowed(true); |
1193 | } |
1194 | |
1195 | void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1196 | EmitReturnMoves(compiler); |
1197 | |
1198 | __ LeaveDartFrame(); |
1199 | |
1200 | // The dummy return address is in LR, no need to pop it as on Intel. |
1201 | |
1202 | // These can be anything besides the return register (R0) and THR (R26). |
1203 | const Register vm_tag_reg = R1; |
1204 | const Register old_exit_frame_reg = R2; |
1205 | const Register old_exit_through_ffi_reg = R3; |
1206 | const Register tmp = R4; |
1207 | |
1208 | __ PopPair(old_exit_frame_reg, old_exit_through_ffi_reg); |
1209 | |
1210 | // Restore top_resource. |
1211 | __ PopPair(tmp, vm_tag_reg); |
1212 | __ StoreToOffset(tmp, THR, compiler::target::Thread::top_resource_offset()); |
1213 | |
1214 | // Reset the exit frame info to old_exit_frame_reg *before* entering the |
1215 | // safepoint. |
1216 | // |
1217 | // If we were called by a trampoline, it will enter the safepoint on our |
1218 | // behalf. |
1219 | __ TransitionGeneratedToNative( |
1220 | vm_tag_reg, old_exit_frame_reg, old_exit_through_ffi_reg, |
1221 | /*enter_safepoint=*/!NativeCallbackTrampolines::Enabled()); |
1222 | |
1223 | __ PopNativeCalleeSavedRegisters(); |
1224 | |
1225 | #if defined(TARGET_OS_FUCHSIA) |
1226 | UNREACHABLE(); // Fuchsia does not allow dart:ffi. |
1227 | #elif defined(USING_SHADOW_CALL_STACK) |
1228 | #error Unimplemented |
1229 | #endif |
1230 | |
1231 | // Leave the entry frame. |
1232 | __ LeaveFrame(); |
1233 | |
1234 | // Leave the dummy frame holding the pushed arguments. |
1235 | __ LeaveFrame(); |
1236 | |
1237 | // Restore the actual stack pointer from SPREG. |
1238 | __ RestoreCSP(); |
1239 | |
1240 | __ Ret(); |
1241 | |
1242 | // For following blocks. |
1243 | __ set_constant_pool_allowed(true); |
1244 | } |
1245 | |
1246 | void NativeEntryInstr::SaveArgument( |
1247 | FlowGraphCompiler* compiler, |
1248 | const compiler::ffi::NativeLocation& nloc) const { |
1249 | if (nloc.IsStack()) return; |
1250 | |
1251 | if (nloc.IsRegisters()) { |
1252 | const auto& regs_loc = nloc.AsRegisters(); |
1253 | ASSERT(regs_loc.num_regs() == 1); |
1254 | __ Push(regs_loc.reg_at(0)); |
1255 | } else if (nloc.IsFpuRegisters()) { |
1256 | __ PushDouble(nloc.AsFpuRegisters().fpu_reg()); |
1257 | } else { |
1258 | UNREACHABLE(); |
1259 | } |
1260 | } |
1261 | |
1262 | void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1263 | // Constant pool cannot be used until we enter the actual Dart frame. |
1264 | __ set_constant_pool_allowed(false); |
1265 | |
1266 | __ Bind(compiler->GetJumpLabel(this)); |
1267 | |
1268 | // We don't use the regular stack pointer in ARM64, so we have to copy the |
1269 | // native stack pointer into the Dart stack pointer. This will also kick CSP |
1270 | // forward a bit, enough for the spills and leaf call below, until we can set |
1271 | // it properly after setting up THR. |
1272 | __ SetupDartSP(); |
1273 | |
1274 | // Create a dummy frame holding the pushed arguments. This simplifies |
1275 | // NativeReturnInstr::EmitNativeCode. |
1276 | __ EnterFrame(0); |
1277 | |
1278 | // Save the argument registers, in reverse order. |
1279 | for (intptr_t i = marshaller_.num_args(); i-- > 0;) { |
1280 | SaveArgument(compiler, marshaller_.Location(i)); |
1281 | } |
1282 | |
1283 | // Enter the entry frame. |
1284 | __ EnterFrame(0); |
1285 | |
1286 | // Save a space for the code object. |
1287 | __ PushImmediate(0); |
1288 | |
1289 | #if defined(TARGET_OS_FUCHSIA) |
1290 | UNREACHABLE(); // Fuchsia does not allow dart:ffi. |
1291 | #elif defined(USING_SHADOW_CALL_STACK) |
1292 | #error Unimplemented |
1293 | #endif |
1294 | |
1295 | __ PushNativeCalleeSavedRegisters(); |
1296 | |
1297 | // Load the thread object. If we were called by a trampoline, the thread is |
1298 | // already loaded. |
1299 | if (FLAG_precompiled_mode) { |
1300 | compiler->LoadBSSEntry(BSS::Relocation::DRT_GetThreadForNativeCallback, R1, |
1301 | R0); |
1302 | } else if (!NativeCallbackTrampolines::Enabled()) { |
1303 | // In JIT mode, we can just paste the address of the runtime entry into the |
1304 | // generated code directly. This is not a problem since we don't save |
1305 | // callbacks into JIT snapshots. |
1306 | __ LoadImmediate( |
1307 | R1, reinterpret_cast<int64_t>(DLRT_GetThreadForNativeCallback)); |
1308 | } |
1309 | |
1310 | if (!NativeCallbackTrampolines::Enabled()) { |
1311 | // Create another frame to align the frame before continuing in "native" |
1312 | // code. |
1313 | __ EnterFrame(0); |
1314 | __ ReserveAlignedFrameSpace(0); |
1315 | |
1316 | __ LoadImmediate(R0, callback_id_); |
1317 | __ blr(R1); |
1318 | __ mov(THR, R0); |
1319 | |
1320 | __ LeaveFrame(); |
1321 | } |
1322 | |
1323 | // Now that we have THR, we can set CSP. |
1324 | __ SetupCSPFromThread(THR); |
1325 | |
1326 | // Refresh pinned registers values (inc. write barrier mask and null object). |
1327 | __ RestorePinnedRegisters(); |
1328 | |
1329 | // Save the current VMTag on the stack. |
1330 | __ LoadFromOffset(TMP, THR, compiler::target::Thread::vm_tag_offset()); |
1331 | // Save the top resource. |
1332 | __ LoadFromOffset(R0, THR, compiler::target::Thread::top_resource_offset()); |
1333 | __ PushPair(R0, TMP); |
1334 | |
1335 | __ StoreToOffset(ZR, THR, compiler::target::Thread::top_resource_offset()); |
1336 | |
1337 | __ LoadFromOffset(R0, THR, |
1338 | compiler::target::Thread::exit_through_ffi_offset()); |
1339 | __ Push(R0); |
1340 | |
1341 | // Save the top exit frame info. We don't set it to 0 yet: |
1342 | // TransitionNativeToGenerated will handle that. |
1343 | __ LoadFromOffset(R0, THR, |
1344 | compiler::target::Thread::top_exit_frame_info_offset()); |
1345 | __ Push(R0); |
1346 | |
1347 | // In debug mode, verify that we've pushed the top exit frame info at the |
1348 | // correct offset from FP. |
1349 | __ EmitEntryFrameVerification(); |
1350 | |
1351 | // Either DLRT_GetThreadForNativeCallback or the callback trampoline (caller) |
1352 | // will leave the safepoint for us. |
1353 | __ TransitionNativeToGenerated(R0, /*exit_safepoint=*/false); |
1354 | |
1355 | // Now that the safepoint has ended, we can touch Dart objects without |
1356 | // handles. |
1357 | |
1358 | // Load the code object. |
1359 | __ LoadFromOffset(R0, THR, compiler::target::Thread::callback_code_offset()); |
1360 | __ LoadFieldFromOffset(R0, R0, |
1361 | compiler::target::GrowableObjectArray::data_offset()); |
1362 | __ LoadFieldFromOffset(CODE_REG, R0, |
1363 | compiler::target::Array::data_offset() + |
1364 | callback_id_ * compiler::target::kWordSize); |
1365 | |
1366 | // Put the code object in the reserved slot. |
1367 | __ StoreToOffset(CODE_REG, FPREG, |
1368 | kPcMarkerSlotFromFp * compiler::target::kWordSize); |
1369 | if (FLAG_precompiled_mode && FLAG_use_bare_instructions) { |
1370 | __ SetupGlobalPoolAndDispatchTable(); |
1371 | } else { |
1372 | // We now load the pool pointer (PP) with a GC safe value as we are about to |
1373 | // invoke dart code. We don't need a real object pool here. |
1374 | // Smi zero does not work because ARM64 assumes PP to be untagged. |
1375 | __ LoadObject(PP, compiler::NullObject()); |
1376 | } |
1377 | |
1378 | // Load a GC-safe value for the arguments descriptor (unused but tagged). |
1379 | __ mov(ARGS_DESC_REG, ZR); |
1380 | |
1381 | // Load a dummy return address which suggests that we are inside of |
1382 | // InvokeDartCodeStub. This is how the stack walker detects an entry frame. |
1383 | __ LoadFromOffset(LR, THR, |
1384 | compiler::target::Thread::invoke_dart_code_stub_offset()); |
1385 | __ LoadFieldFromOffset(LR, LR, compiler::target::Code::entry_point_offset()); |
1386 | |
1387 | FunctionEntryInstr::EmitNativeCode(compiler); |
1388 | } |
1389 | |
1390 | LocationSummary* OneByteStringFromCharCodeInstr::MakeLocationSummary( |
1391 | Zone* zone, |
1392 | bool opt) const { |
1393 | const intptr_t kNumInputs = 1; |
1394 | // TODO(fschneider): Allow immediate operands for the char code. |
1395 | return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), |
1396 | LocationSummary::kNoCall); |
1397 | } |
1398 | |
1399 | void OneByteStringFromCharCodeInstr::EmitNativeCode( |
1400 | FlowGraphCompiler* compiler) { |
1401 | ASSERT(compiler->is_optimizing()); |
1402 | const Register char_code = locs()->in(0).reg(); |
1403 | const Register result = locs()->out(0).reg(); |
1404 | |
1405 | __ ldr(result, |
1406 | compiler::Address(THR, Thread::predefined_symbols_address_offset())); |
1407 | __ AddImmediate(result, Symbols::kNullCharCodeSymbolOffset * kWordSize); |
1408 | __ SmiUntag(TMP, char_code); // Untag to use scaled address mode. |
1409 | __ ldr(result, |
1410 | compiler::Address(result, TMP, UXTX, compiler::Address::Scaled)); |
1411 | } |
1412 | |
1413 | LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone, |
1414 | bool opt) const { |
1415 | const intptr_t kNumInputs = 1; |
1416 | return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), |
1417 | LocationSummary::kNoCall); |
1418 | } |
1419 | |
1420 | void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1421 | ASSERT(cid_ == kOneByteStringCid); |
1422 | const Register str = locs()->in(0).reg(); |
1423 | const Register result = locs()->out(0).reg(); |
1424 | __ LoadFieldFromOffset(result, str, String::length_offset()); |
1425 | __ ldr(TMP, compiler::FieldAddress(str, OneByteString::data_offset()), |
1426 | kUnsignedByte); |
1427 | __ CompareImmediate(result, Smi::RawValue(1)); |
1428 | __ LoadImmediate(result, -1); |
1429 | __ csel(result, TMP, result, EQ); |
1430 | __ SmiTag(result); |
1431 | } |
1432 | |
1433 | LocationSummary* StringInterpolateInstr::MakeLocationSummary(Zone* zone, |
1434 | bool opt) const { |
1435 | const intptr_t kNumInputs = 1; |
1436 | const intptr_t kNumTemps = 0; |
1437 | LocationSummary* summary = new (zone) |
1438 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
1439 | summary->set_in(0, Location::RegisterLocation(R0)); |
1440 | summary->set_out(0, Location::RegisterLocation(R0)); |
1441 | return summary; |
1442 | } |
1443 | |
1444 | void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1445 | const Register array = locs()->in(0).reg(); |
1446 | __ Push(array); |
1447 | const int kTypeArgsLen = 0; |
1448 | const int kNumberOfArguments = 1; |
1449 | constexpr int kSizeOfArguments = 1; |
1450 | const Array& kNoArgumentNames = Object::null_array(); |
1451 | ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kSizeOfArguments, |
1452 | kNoArgumentNames); |
1453 | compiler->GenerateStaticCall(deopt_id(), token_pos(), CallFunction(), |
1454 | args_info, locs(), ICData::Handle(), |
1455 | ICData::kStatic); |
1456 | ASSERT(locs()->out(0).reg() == R0); |
1457 | } |
1458 | |
1459 | LocationSummary* Utf8ScanInstr::MakeLocationSummary(Zone* zone, |
1460 | bool opt) const { |
1461 | const intptr_t kNumInputs = 5; |
1462 | const intptr_t kNumTemps = 0; |
1463 | LocationSummary* summary = new (zone) |
1464 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
1465 | summary->set_in(0, Location::Any()); // decoder |
1466 | summary->set_in(1, Location::WritableRegister()); // bytes |
1467 | summary->set_in(2, Location::WritableRegister()); // start |
1468 | summary->set_in(3, Location::WritableRegister()); // end |
1469 | summary->set_in(4, Location::WritableRegister()); // table |
1470 | summary->set_out(0, Location::RequiresRegister()); |
1471 | return summary; |
1472 | } |
1473 | |
1474 | void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1475 | const Register bytes_reg = locs()->in(1).reg(); |
1476 | const Register start_reg = locs()->in(2).reg(); |
1477 | const Register end_reg = locs()->in(3).reg(); |
1478 | const Register table_reg = locs()->in(4).reg(); |
1479 | const Register size_reg = locs()->out(0).reg(); |
1480 | |
1481 | const Register bytes_ptr_reg = start_reg; |
1482 | const Register bytes_end_reg = end_reg; |
1483 | const Register flags_reg = bytes_reg; |
1484 | const Register temp_reg = TMP; |
1485 | const Register decoder_temp_reg = start_reg; |
1486 | const Register flags_temp_reg = end_reg; |
1487 | |
1488 | static const intptr_t kSizeMask = 0x03; |
1489 | static const intptr_t kFlagsMask = 0x3C; |
1490 | |
1491 | compiler::Label loop, loop_in; |
1492 | |
1493 | // Address of input bytes. |
1494 | __ LoadFieldFromOffset(bytes_reg, bytes_reg, |
1495 | compiler::target::TypedDataBase::data_field_offset()); |
1496 | |
1497 | // Table. |
1498 | __ AddImmediate( |
1499 | table_reg, table_reg, |
1500 | compiler::target::OneByteString::data_offset() - kHeapObjectTag); |
1501 | |
1502 | // Pointers to start and end. |
1503 | __ add(bytes_ptr_reg, bytes_reg, compiler::Operand(start_reg)); |
1504 | __ add(bytes_end_reg, bytes_reg, compiler::Operand(end_reg)); |
1505 | |
1506 | // Initialize size and flags. |
1507 | __ mov(size_reg, ZR); |
1508 | __ mov(flags_reg, ZR); |
1509 | |
1510 | __ b(&loop_in); |
1511 | __ Bind(&loop); |
1512 | |
1513 | // Read byte and increment pointer. |
1514 | __ ldr(temp_reg, |
1515 | compiler::Address(bytes_ptr_reg, 1, compiler::Address::PostIndex), |
1516 | kUnsignedByte); |
1517 | |
1518 | // Update size and flags based on byte value. |
1519 | __ ldr(temp_reg, compiler::Address(table_reg, temp_reg), kUnsignedByte); |
1520 | __ orr(flags_reg, flags_reg, compiler::Operand(temp_reg)); |
1521 | __ andi(temp_reg, temp_reg, compiler::Immediate(kSizeMask)); |
1522 | __ add(size_reg, size_reg, compiler::Operand(temp_reg)); |
1523 | |
1524 | // Stop if end is reached. |
1525 | __ Bind(&loop_in); |
1526 | __ cmp(bytes_ptr_reg, compiler::Operand(bytes_end_reg)); |
1527 | __ b(&loop, UNSIGNED_LESS); |
1528 | |
1529 | // Write flags to field. |
1530 | __ AndImmediate(flags_reg, flags_reg, kFlagsMask); |
1531 | if (!IsScanFlagsUnboxed()) { |
1532 | __ SmiTag(flags_reg); |
1533 | } |
1534 | Register decoder_reg; |
1535 | const Location decoder_location = locs()->in(0); |
1536 | if (decoder_location.IsStackSlot()) { |
1537 | __ ldr(decoder_temp_reg, LocationToStackSlotAddress(decoder_location)); |
1538 | decoder_reg = decoder_temp_reg; |
1539 | } else { |
1540 | decoder_reg = decoder_location.reg(); |
1541 | } |
1542 | const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes(); |
1543 | __ LoadFieldFromOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset); |
1544 | __ orr(flags_temp_reg, flags_temp_reg, compiler::Operand(flags_reg)); |
1545 | __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset); |
1546 | } |
1547 | |
1548 | LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone, |
1549 | bool opt) const { |
1550 | const intptr_t kNumInputs = 1; |
1551 | return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), |
1552 | LocationSummary::kNoCall); |
1553 | } |
1554 | |
1555 | void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1556 | const Register obj = locs()->in(0).reg(); |
1557 | const Register result = locs()->out(0).reg(); |
1558 | if (object()->definition()->representation() == kUntagged) { |
1559 | __ LoadFromOffset(result, obj, offset()); |
1560 | } else { |
1561 | ASSERT(object()->definition()->representation() == kTagged); |
1562 | __ LoadFieldFromOffset(result, obj, offset()); |
1563 | } |
1564 | } |
1565 | |
1566 | DEFINE_BACKEND(StoreUntagged, (NoLocation, Register obj, Register value)) { |
1567 | __ StoreToOffset(value, obj, instr->offset_from_tagged()); |
1568 | } |
1569 | |
1570 | Representation LoadIndexedInstr::representation() const { |
1571 | switch (class_id_) { |
1572 | case kArrayCid: |
1573 | case kImmutableArrayCid: |
1574 | return kTagged; |
1575 | case kOneByteStringCid: |
1576 | case kTwoByteStringCid: |
1577 | case kTypedDataInt8ArrayCid: |
1578 | case kTypedDataInt16ArrayCid: |
1579 | case kTypedDataUint8ArrayCid: |
1580 | case kTypedDataUint8ClampedArrayCid: |
1581 | case kTypedDataUint16ArrayCid: |
1582 | case kExternalOneByteStringCid: |
1583 | case kExternalTwoByteStringCid: |
1584 | case kExternalTypedDataUint8ArrayCid: |
1585 | case kExternalTypedDataUint8ClampedArrayCid: |
1586 | return kUnboxedIntPtr; |
1587 | case kTypedDataInt32ArrayCid: |
1588 | return kUnboxedInt32; |
1589 | case kTypedDataUint32ArrayCid: |
1590 | return kUnboxedUint32; |
1591 | case kTypedDataInt64ArrayCid: |
1592 | case kTypedDataUint64ArrayCid: |
1593 | return kUnboxedInt64; |
1594 | case kTypedDataFloat32ArrayCid: |
1595 | case kTypedDataFloat64ArrayCid: |
1596 | return kUnboxedDouble; |
1597 | case kTypedDataInt32x4ArrayCid: |
1598 | return kUnboxedInt32x4; |
1599 | case kTypedDataFloat32x4ArrayCid: |
1600 | return kUnboxedFloat32x4; |
1601 | case kTypedDataFloat64x2ArrayCid: |
1602 | return kUnboxedFloat64x2; |
1603 | default: |
1604 | UNIMPLEMENTED(); |
1605 | return kTagged; |
1606 | } |
1607 | } |
1608 | |
1609 | static bool CanBeImmediateIndex(Value* value, intptr_t cid, bool is_external) { |
1610 | ConstantInstr* constant = value->definition()->AsConstant(); |
1611 | if ((constant == NULL) || !constant->value().IsSmi()) { |
1612 | return false; |
1613 | } |
1614 | const int64_t index = Smi::Cast(constant->value()).AsInt64Value(); |
1615 | const intptr_t scale = Instance::ElementSizeFor(cid); |
1616 | const int64_t offset = |
1617 | index * scale + |
1618 | (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); |
1619 | if (!Utils::IsInt(32, offset)) { |
1620 | return false; |
1621 | } |
1622 | return compiler::Address::CanHoldOffset( |
1623 | static_cast<int32_t>(offset), compiler::Address::Offset, |
1624 | compiler::Address::OperandSizeFor(cid)); |
1625 | } |
1626 | |
1627 | LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone, |
1628 | bool opt) const { |
1629 | const intptr_t kNumInputs = 2; |
1630 | const intptr_t kNumTemps = 0; |
1631 | LocationSummary* locs = new (zone) |
1632 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
1633 | locs->set_in(0, Location::RequiresRegister()); |
1634 | if (CanBeImmediateIndex(index(), class_id(), IsExternal())) { |
1635 | locs->set_in(1, Location::Constant(index()->definition()->AsConstant())); |
1636 | } else { |
1637 | locs->set_in(1, Location::RequiresRegister()); |
1638 | } |
1639 | if ((representation() == kUnboxedDouble) || |
1640 | (representation() == kUnboxedFloat32x4) || |
1641 | (representation() == kUnboxedInt32x4) || |
1642 | (representation() == kUnboxedFloat64x2)) { |
1643 | locs->set_out(0, Location::RequiresFpuRegister()); |
1644 | } else { |
1645 | locs->set_out(0, Location::RequiresRegister()); |
1646 | } |
1647 | return locs; |
1648 | } |
1649 | |
1650 | void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1651 | // The array register points to the backing store for external arrays. |
1652 | const Register array = locs()->in(0).reg(); |
1653 | const Location index = locs()->in(1); |
1654 | |
1655 | compiler::Address element_address(TMP); // Bad address. |
1656 | element_address = index.IsRegister() |
1657 | ? __ ElementAddressForRegIndex( |
1658 | IsExternal(), class_id(), index_scale(), |
1659 | index_unboxed_, array, index.reg(), TMP) |
1660 | : __ ElementAddressForIntIndex( |
1661 | IsExternal(), class_id(), index_scale(), array, |
1662 | Smi::Cast(index.constant()).Value()); |
1663 | if ((representation() == kUnboxedDouble) || |
1664 | (representation() == kUnboxedFloat32x4) || |
1665 | (representation() == kUnboxedInt32x4) || |
1666 | (representation() == kUnboxedFloat64x2)) { |
1667 | const VRegister result = locs()->out(0).fpu_reg(); |
1668 | switch (class_id()) { |
1669 | case kTypedDataFloat32ArrayCid: |
1670 | // Load single precision float. |
1671 | __ fldrs(result, element_address); |
1672 | break; |
1673 | case kTypedDataFloat64ArrayCid: |
1674 | // Load double precision float. |
1675 | __ fldrd(result, element_address); |
1676 | break; |
1677 | case kTypedDataFloat64x2ArrayCid: |
1678 | case kTypedDataInt32x4ArrayCid: |
1679 | case kTypedDataFloat32x4ArrayCid: |
1680 | __ fldrq(result, element_address); |
1681 | break; |
1682 | default: |
1683 | UNREACHABLE(); |
1684 | } |
1685 | return; |
1686 | } |
1687 | |
1688 | const Register result = locs()->out(0).reg(); |
1689 | switch (class_id()) { |
1690 | case kTypedDataInt32ArrayCid: |
1691 | ASSERT(representation() == kUnboxedInt32); |
1692 | __ ldr(result, element_address, kWord); |
1693 | break; |
1694 | case kTypedDataUint32ArrayCid: |
1695 | ASSERT(representation() == kUnboxedUint32); |
1696 | __ ldr(result, element_address, kUnsignedWord); |
1697 | break; |
1698 | case kTypedDataInt64ArrayCid: |
1699 | case kTypedDataUint64ArrayCid: |
1700 | ASSERT(representation() == kUnboxedInt64); |
1701 | __ ldr(result, element_address, kDoubleWord); |
1702 | break; |
1703 | case kTypedDataInt8ArrayCid: |
1704 | ASSERT(representation() == kUnboxedIntPtr); |
1705 | ASSERT(index_scale() == 1); |
1706 | __ ldr(result, element_address, kByte); |
1707 | break; |
1708 | case kTypedDataUint8ArrayCid: |
1709 | case kTypedDataUint8ClampedArrayCid: |
1710 | case kExternalTypedDataUint8ArrayCid: |
1711 | case kExternalTypedDataUint8ClampedArrayCid: |
1712 | case kOneByteStringCid: |
1713 | case kExternalOneByteStringCid: |
1714 | ASSERT(representation() == kUnboxedIntPtr); |
1715 | ASSERT(index_scale() == 1); |
1716 | __ ldr(result, element_address, kUnsignedByte); |
1717 | break; |
1718 | case kTypedDataInt16ArrayCid: |
1719 | ASSERT(representation() == kUnboxedIntPtr); |
1720 | __ ldr(result, element_address, kHalfword); |
1721 | break; |
1722 | case kTypedDataUint16ArrayCid: |
1723 | case kTwoByteStringCid: |
1724 | case kExternalTwoByteStringCid: |
1725 | ASSERT(representation() == kUnboxedIntPtr); |
1726 | __ ldr(result, element_address, kUnsignedHalfword); |
1727 | break; |
1728 | default: |
1729 | ASSERT(representation() == kTagged); |
1730 | ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid)); |
1731 | __ ldr(result, element_address); |
1732 | break; |
1733 | } |
1734 | } |
1735 | |
1736 | LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone, |
1737 | bool opt) const { |
1738 | const intptr_t kNumInputs = 2; |
1739 | const intptr_t kNumTemps = 0; |
1740 | LocationSummary* summary = new (zone) |
1741 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
1742 | summary->set_in(0, Location::RequiresRegister()); |
1743 | summary->set_in(1, Location::RequiresRegister()); |
1744 | summary->set_out(0, Location::RequiresRegister()); |
1745 | return summary; |
1746 | } |
1747 | |
1748 | void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1749 | // The string register points to the backing store for external strings. |
1750 | const Register str = locs()->in(0).reg(); |
1751 | const Location index = locs()->in(1); |
1752 | OperandSize sz = OperandSize::kByte; |
1753 | |
1754 | Register result = locs()->out(0).reg(); |
1755 | switch (class_id()) { |
1756 | case kOneByteStringCid: |
1757 | case kExternalOneByteStringCid: |
1758 | switch (element_count()) { |
1759 | case 1: |
1760 | sz = kUnsignedByte; |
1761 | break; |
1762 | case 2: |
1763 | sz = kUnsignedHalfword; |
1764 | break; |
1765 | case 4: |
1766 | sz = kUnsignedWord; |
1767 | break; |
1768 | default: |
1769 | UNREACHABLE(); |
1770 | } |
1771 | break; |
1772 | case kTwoByteStringCid: |
1773 | case kExternalTwoByteStringCid: |
1774 | switch (element_count()) { |
1775 | case 1: |
1776 | sz = kUnsignedHalfword; |
1777 | break; |
1778 | case 2: |
1779 | sz = kUnsignedWord; |
1780 | break; |
1781 | default: |
1782 | UNREACHABLE(); |
1783 | } |
1784 | break; |
1785 | default: |
1786 | UNREACHABLE(); |
1787 | break; |
1788 | } |
1789 | // Warning: element_address may use register TMP as base. |
1790 | compiler::Address element_address = __ ElementAddressForRegIndexWithSize( |
1791 | IsExternal(), class_id(), sz, index_scale(), /*index_unboxed=*/false, str, |
1792 | index.reg(), TMP); |
1793 | __ ldr(result, element_address, sz); |
1794 | |
1795 | __ SmiTag(result); |
1796 | } |
1797 | |
1798 | Representation StoreIndexedInstr::RequiredInputRepresentation( |
1799 | intptr_t idx) const { |
1800 | // Array can be a Dart object or a pointer to external data. |
1801 | if (idx == 0) return kNoRepresentation; // Flexible input representation. |
1802 | if (idx == 1) { |
1803 | if (index_unboxed_) { |
1804 | return kNoRepresentation; // Index can be any unboxed representation. |
1805 | } else { |
1806 | return kTagged; // Index is a smi. |
1807 | } |
1808 | } |
1809 | ASSERT(idx == 2); |
1810 | switch (class_id_) { |
1811 | case kArrayCid: |
1812 | return kTagged; |
1813 | case kOneByteStringCid: |
1814 | case kTwoByteStringCid: |
1815 | case kTypedDataInt8ArrayCid: |
1816 | case kTypedDataInt16ArrayCid: |
1817 | case kTypedDataUint8ArrayCid: |
1818 | case kTypedDataUint8ClampedArrayCid: |
1819 | case kTypedDataUint16ArrayCid: |
1820 | case kExternalTypedDataUint8ArrayCid: |
1821 | case kExternalTypedDataUint8ClampedArrayCid: |
1822 | return kUnboxedIntPtr; |
1823 | case kTypedDataInt32ArrayCid: |
1824 | return kUnboxedInt32; |
1825 | case kTypedDataUint32ArrayCid: |
1826 | return kUnboxedUint32; |
1827 | case kTypedDataInt64ArrayCid: |
1828 | case kTypedDataUint64ArrayCid: |
1829 | return kUnboxedInt64; |
1830 | case kTypedDataFloat32ArrayCid: |
1831 | case kTypedDataFloat64ArrayCid: |
1832 | return kUnboxedDouble; |
1833 | case kTypedDataFloat32x4ArrayCid: |
1834 | return kUnboxedFloat32x4; |
1835 | case kTypedDataInt32x4ArrayCid: |
1836 | return kUnboxedInt32x4; |
1837 | case kTypedDataFloat64x2ArrayCid: |
1838 | return kUnboxedFloat64x2; |
1839 | default: |
1840 | UNREACHABLE(); |
1841 | return kTagged; |
1842 | } |
1843 | } |
1844 | |
1845 | LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone, |
1846 | bool opt) const { |
1847 | const intptr_t kNumInputs = 3; |
1848 | const intptr_t kNumTemps = 1; |
1849 | LocationSummary* locs = new (zone) |
1850 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
1851 | locs->set_in(0, Location::RequiresRegister()); |
1852 | if (CanBeImmediateIndex(index(), class_id(), IsExternal())) { |
1853 | locs->set_in(1, Location::Constant(index()->definition()->AsConstant())); |
1854 | } else { |
1855 | locs->set_in(1, Location::RequiresRegister()); |
1856 | } |
1857 | locs->set_temp(0, Location::RequiresRegister()); |
1858 | |
1859 | switch (class_id()) { |
1860 | case kArrayCid: |
1861 | locs->set_in(2, ShouldEmitStoreBarrier() |
1862 | ? Location::RegisterLocation(kWriteBarrierValueReg) |
1863 | : LocationRegisterOrConstant(value())); |
1864 | if (ShouldEmitStoreBarrier()) { |
1865 | locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg)); |
1866 | locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg)); |
1867 | } |
1868 | break; |
1869 | case kExternalTypedDataUint8ArrayCid: |
1870 | case kExternalTypedDataUint8ClampedArrayCid: |
1871 | case kTypedDataInt8ArrayCid: |
1872 | case kTypedDataUint8ArrayCid: |
1873 | case kTypedDataUint8ClampedArrayCid: |
1874 | case kOneByteStringCid: |
1875 | case kTwoByteStringCid: |
1876 | case kTypedDataInt16ArrayCid: |
1877 | case kTypedDataUint16ArrayCid: |
1878 | case kTypedDataInt32ArrayCid: |
1879 | case kTypedDataUint32ArrayCid: |
1880 | case kTypedDataInt64ArrayCid: |
1881 | case kTypedDataUint64ArrayCid: |
1882 | locs->set_in(2, Location::RequiresRegister()); |
1883 | break; |
1884 | case kTypedDataFloat32ArrayCid: |
1885 | case kTypedDataFloat64ArrayCid: // TODO(srdjan): Support Float64 constants. |
1886 | locs->set_in(2, Location::RequiresFpuRegister()); |
1887 | break; |
1888 | case kTypedDataInt32x4ArrayCid: |
1889 | case kTypedDataFloat32x4ArrayCid: |
1890 | case kTypedDataFloat64x2ArrayCid: |
1891 | locs->set_in(2, Location::RequiresFpuRegister()); |
1892 | break; |
1893 | default: |
1894 | UNREACHABLE(); |
1895 | return NULL; |
1896 | } |
1897 | return locs; |
1898 | } |
1899 | |
1900 | void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1901 | // The array register points to the backing store for external arrays. |
1902 | const Register array = locs()->in(0).reg(); |
1903 | const Location index = locs()->in(1); |
1904 | const Register temp = locs()->temp(0).reg(); |
1905 | compiler::Address element_address(TMP); // Bad address. |
1906 | |
1907 | // Deal with a special case separately. |
1908 | if (class_id() == kArrayCid && ShouldEmitStoreBarrier()) { |
1909 | if (index.IsRegister()) { |
1910 | __ ComputeElementAddressForRegIndex(temp, IsExternal(), class_id(), |
1911 | index_scale(), index_unboxed_, array, |
1912 | index.reg()); |
1913 | } else { |
1914 | __ ComputeElementAddressForIntIndex(temp, IsExternal(), class_id(), |
1915 | index_scale(), array, |
1916 | Smi::Cast(index.constant()).Value()); |
1917 | } |
1918 | const Register value = locs()->in(2).reg(); |
1919 | __ StoreIntoArray(array, temp, value, CanValueBeSmi(), |
1920 | /*lr_reserved=*/!compiler->intrinsic_mode()); |
1921 | return; |
1922 | } |
1923 | |
1924 | element_address = index.IsRegister() |
1925 | ? __ ElementAddressForRegIndex( |
1926 | IsExternal(), class_id(), index_scale(), |
1927 | index_unboxed_, array, index.reg(), temp) |
1928 | : __ ElementAddressForIntIndex( |
1929 | IsExternal(), class_id(), index_scale(), array, |
1930 | Smi::Cast(index.constant()).Value()); |
1931 | |
1932 | switch (class_id()) { |
1933 | case kArrayCid: |
1934 | ASSERT(!ShouldEmitStoreBarrier()); // Specially treated above. |
1935 | if (locs()->in(2).IsConstant()) { |
1936 | const Object& constant = locs()->in(2).constant(); |
1937 | __ StoreIntoObjectNoBarrier(array, element_address, constant); |
1938 | } else { |
1939 | const Register value = locs()->in(2).reg(); |
1940 | __ StoreIntoObjectNoBarrier(array, element_address, value); |
1941 | } |
1942 | break; |
1943 | case kTypedDataInt8ArrayCid: |
1944 | case kTypedDataUint8ArrayCid: |
1945 | case kExternalTypedDataUint8ArrayCid: |
1946 | case kOneByteStringCid: { |
1947 | ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr); |
1948 | if (locs()->in(2).IsConstant()) { |
1949 | const Smi& constant = Smi::Cast(locs()->in(2).constant()); |
1950 | __ LoadImmediate(TMP, static_cast<int8_t>(constant.Value())); |
1951 | __ str(TMP, element_address, kUnsignedByte); |
1952 | } else { |
1953 | const Register value = locs()->in(2).reg(); |
1954 | __ str(value, element_address, kUnsignedByte); |
1955 | } |
1956 | break; |
1957 | } |
1958 | case kTypedDataUint8ClampedArrayCid: |
1959 | case kExternalTypedDataUint8ClampedArrayCid: { |
1960 | ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr); |
1961 | if (locs()->in(2).IsConstant()) { |
1962 | const Smi& constant = Smi::Cast(locs()->in(2).constant()); |
1963 | intptr_t value = constant.Value(); |
1964 | // Clamp to 0x0 or 0xFF respectively. |
1965 | if (value > 0xFF) { |
1966 | value = 0xFF; |
1967 | } else if (value < 0) { |
1968 | value = 0; |
1969 | } |
1970 | __ LoadImmediate(TMP, static_cast<int8_t>(value)); |
1971 | __ str(TMP, element_address, kUnsignedByte); |
1972 | } else { |
1973 | const Register value = locs()->in(2).reg(); |
1974 | // Clamp to 0x00 or 0xFF respectively. |
1975 | __ CompareImmediate(value, 0xFF); |
1976 | __ csetm(TMP, GT); // TMP = value > 0xFF ? -1 : 0. |
1977 | __ csel(TMP, value, TMP, LS); // TMP = value in range ? value : TMP. |
1978 | __ str(TMP, element_address, kUnsignedByte); |
1979 | } |
1980 | break; |
1981 | } |
1982 | case kTwoByteStringCid: |
1983 | case kTypedDataInt16ArrayCid: |
1984 | case kTypedDataUint16ArrayCid: { |
1985 | ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr); |
1986 | const Register value = locs()->in(2).reg(); |
1987 | __ str(value, element_address, kUnsignedHalfword); |
1988 | break; |
1989 | } |
1990 | case kTypedDataInt32ArrayCid: |
1991 | case kTypedDataUint32ArrayCid: { |
1992 | const Register value = locs()->in(2).reg(); |
1993 | __ str(value, element_address, kUnsignedWord); |
1994 | break; |
1995 | } |
1996 | case kTypedDataInt64ArrayCid: |
1997 | case kTypedDataUint64ArrayCid: { |
1998 | const Register value = locs()->in(2).reg(); |
1999 | __ str(value, element_address, kDoubleWord); |
2000 | break; |
2001 | } |
2002 | case kTypedDataFloat32ArrayCid: { |
2003 | const VRegister value_reg = locs()->in(2).fpu_reg(); |
2004 | __ fstrs(value_reg, element_address); |
2005 | break; |
2006 | } |
2007 | case kTypedDataFloat64ArrayCid: { |
2008 | const VRegister value_reg = locs()->in(2).fpu_reg(); |
2009 | __ fstrd(value_reg, element_address); |
2010 | break; |
2011 | } |
2012 | case kTypedDataFloat64x2ArrayCid: |
2013 | case kTypedDataInt32x4ArrayCid: |
2014 | case kTypedDataFloat32x4ArrayCid: { |
2015 | const VRegister value_reg = locs()->in(2).fpu_reg(); |
2016 | __ fstrq(value_reg, element_address); |
2017 | break; |
2018 | } |
2019 | default: |
2020 | UNREACHABLE(); |
2021 | } |
2022 | } |
2023 | |
2024 | static void LoadValueCid(FlowGraphCompiler* compiler, |
2025 | Register value_cid_reg, |
2026 | Register value_reg, |
2027 | compiler::Label* value_is_smi = NULL) { |
2028 | compiler::Label done; |
2029 | if (value_is_smi == NULL) { |
2030 | __ LoadImmediate(value_cid_reg, kSmiCid); |
2031 | } |
2032 | __ BranchIfSmi(value_reg, value_is_smi == NULL ? &done : value_is_smi); |
2033 | __ LoadClassId(value_cid_reg, value_reg); |
2034 | __ Bind(&done); |
2035 | } |
2036 | |
2037 | DEFINE_UNIMPLEMENTED_INSTRUCTION(GuardFieldTypeInstr) |
2038 | DEFINE_UNIMPLEMENTED_INSTRUCTION(CheckConditionInstr) |
2039 | |
2040 | LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone, |
2041 | bool opt) const { |
2042 | const intptr_t kNumInputs = 1; |
2043 | |
2044 | const intptr_t value_cid = value()->Type()->ToCid(); |
2045 | const intptr_t field_cid = field().guarded_cid(); |
2046 | |
2047 | const bool emit_full_guard = !opt || (field_cid == kIllegalCid); |
2048 | |
2049 | const bool needs_value_cid_temp_reg = |
2050 | emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid)); |
2051 | |
2052 | const bool needs_field_temp_reg = emit_full_guard; |
2053 | |
2054 | intptr_t num_temps = 0; |
2055 | if (needs_value_cid_temp_reg) { |
2056 | num_temps++; |
2057 | } |
2058 | if (needs_field_temp_reg) { |
2059 | num_temps++; |
2060 | } |
2061 | |
2062 | LocationSummary* summary = new (zone) |
2063 | LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall); |
2064 | summary->set_in(0, Location::RequiresRegister()); |
2065 | |
2066 | for (intptr_t i = 0; i < num_temps; i++) { |
2067 | summary->set_temp(i, Location::RequiresRegister()); |
2068 | } |
2069 | |
2070 | return summary; |
2071 | } |
2072 | |
2073 | void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2074 | ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16); |
2075 | ASSERT(sizeof(FieldLayout::guarded_cid_) == 2); |
2076 | ASSERT(sizeof(FieldLayout::is_nullable_) == 2); |
2077 | |
2078 | const intptr_t value_cid = value()->Type()->ToCid(); |
2079 | const intptr_t field_cid = field().guarded_cid(); |
2080 | const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid; |
2081 | |
2082 | if (field_cid == kDynamicCid) { |
2083 | return; // Nothing to emit. |
2084 | } |
2085 | |
2086 | const bool emit_full_guard = |
2087 | !compiler->is_optimizing() || (field_cid == kIllegalCid); |
2088 | |
2089 | const bool needs_value_cid_temp_reg = |
2090 | emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid)); |
2091 | |
2092 | const bool needs_field_temp_reg = emit_full_guard; |
2093 | |
2094 | const Register value_reg = locs()->in(0).reg(); |
2095 | |
2096 | const Register value_cid_reg = |
2097 | needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister; |
2098 | |
2099 | const Register field_reg = needs_field_temp_reg |
2100 | ? locs()->temp(locs()->temp_count() - 1).reg() |
2101 | : kNoRegister; |
2102 | |
2103 | compiler::Label ok, fail_label; |
2104 | |
2105 | compiler::Label* deopt = |
2106 | compiler->is_optimizing() |
2107 | ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) |
2108 | : NULL; |
2109 | |
2110 | compiler::Label* fail = (deopt != NULL) ? deopt : &fail_label; |
2111 | |
2112 | if (emit_full_guard) { |
2113 | __ LoadObject(field_reg, Field::ZoneHandle((field().Original()))); |
2114 | |
2115 | compiler::FieldAddress field_cid_operand( |
2116 | field_reg, Field::guarded_cid_offset(), kUnsignedHalfword); |
2117 | compiler::FieldAddress field_nullability_operand( |
2118 | field_reg, Field::is_nullable_offset(), kUnsignedHalfword); |
2119 | |
2120 | if (value_cid == kDynamicCid) { |
2121 | LoadValueCid(compiler, value_cid_reg, value_reg); |
2122 | compiler::Label skip_length_check; |
2123 | __ ldr(TMP, field_cid_operand, kUnsignedHalfword); |
2124 | __ CompareRegisters(value_cid_reg, TMP); |
2125 | __ b(&ok, EQ); |
2126 | __ ldr(TMP, field_nullability_operand, kUnsignedHalfword); |
2127 | __ CompareRegisters(value_cid_reg, TMP); |
2128 | } else if (value_cid == kNullCid) { |
2129 | __ ldr(value_cid_reg, field_nullability_operand, kUnsignedHalfword); |
2130 | __ CompareImmediate(value_cid_reg, value_cid); |
2131 | } else { |
2132 | compiler::Label skip_length_check; |
2133 | __ ldr(value_cid_reg, field_cid_operand, kUnsignedHalfword); |
2134 | __ CompareImmediate(value_cid_reg, value_cid); |
2135 | } |
2136 | __ b(&ok, EQ); |
2137 | |
2138 | // Check if the tracked state of the guarded field can be initialized |
2139 | // inline. If the field needs length check we fall through to runtime |
2140 | // which is responsible for computing offset of the length field |
2141 | // based on the class id. |
2142 | // Length guard will be emitted separately when needed via GuardFieldLength |
2143 | // instruction after GuardFieldClass. |
2144 | if (!field().needs_length_check()) { |
2145 | // Uninitialized field can be handled inline. Check if the |
2146 | // field is still unitialized. |
2147 | __ ldr(TMP, field_cid_operand, kUnsignedHalfword); |
2148 | __ CompareImmediate(TMP, kIllegalCid); |
2149 | __ b(fail, NE); |
2150 | |
2151 | if (value_cid == kDynamicCid) { |
2152 | __ str(value_cid_reg, field_cid_operand, kUnsignedHalfword); |
2153 | __ str(value_cid_reg, field_nullability_operand, kUnsignedHalfword); |
2154 | } else { |
2155 | __ LoadImmediate(TMP, value_cid); |
2156 | __ str(TMP, field_cid_operand, kUnsignedHalfword); |
2157 | __ str(TMP, field_nullability_operand, kUnsignedHalfword); |
2158 | } |
2159 | |
2160 | __ b(&ok); |
2161 | } |
2162 | |
2163 | if (deopt == NULL) { |
2164 | ASSERT(!compiler->is_optimizing()); |
2165 | __ Bind(fail); |
2166 | |
2167 | __ LoadFieldFromOffset(TMP, field_reg, Field::guarded_cid_offset(), |
2168 | kUnsignedHalfword); |
2169 | __ CompareImmediate(TMP, kDynamicCid); |
2170 | __ b(&ok, EQ); |
2171 | |
2172 | __ PushPair(value_reg, field_reg); |
2173 | __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2); |
2174 | __ Drop(2); // Drop the field and the value. |
2175 | } else { |
2176 | __ b(fail); |
2177 | } |
2178 | } else { |
2179 | ASSERT(compiler->is_optimizing()); |
2180 | ASSERT(deopt != NULL); |
2181 | |
2182 | // Field guard class has been initialized and is known. |
2183 | if (value_cid == kDynamicCid) { |
2184 | // Value's class id is not known. |
2185 | __ tsti(value_reg, compiler::Immediate(kSmiTagMask)); |
2186 | |
2187 | if (field_cid != kSmiCid) { |
2188 | __ b(fail, EQ); |
2189 | __ LoadClassId(value_cid_reg, value_reg); |
2190 | __ CompareImmediate(value_cid_reg, field_cid); |
2191 | } |
2192 | |
2193 | if (field().is_nullable() && (field_cid != kNullCid)) { |
2194 | __ b(&ok, EQ); |
2195 | __ CompareObject(value_reg, Object::null_object()); |
2196 | } |
2197 | |
2198 | __ b(fail, NE); |
2199 | } else if (value_cid == field_cid) { |
2200 | // This would normaly be caught by Canonicalize, but RemoveRedefinitions |
2201 | // may sometimes produce the situation after the last Canonicalize pass. |
2202 | } else { |
2203 | // Both value's and field's class id is known. |
2204 | ASSERT(value_cid != nullability); |
2205 | __ b(fail); |
2206 | } |
2207 | } |
2208 | __ Bind(&ok); |
2209 | } |
2210 | |
2211 | LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone, |
2212 | bool opt) const { |
2213 | const intptr_t kNumInputs = 1; |
2214 | if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) { |
2215 | const intptr_t kNumTemps = 3; |
2216 | LocationSummary* summary = new (zone) |
2217 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
2218 | summary->set_in(0, Location::RequiresRegister()); |
2219 | // We need temporaries for field object, length offset and expected length. |
2220 | summary->set_temp(0, Location::RequiresRegister()); |
2221 | summary->set_temp(1, Location::RequiresRegister()); |
2222 | summary->set_temp(2, Location::RequiresRegister()); |
2223 | return summary; |
2224 | } else { |
2225 | LocationSummary* summary = new (zone) |
2226 | LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall); |
2227 | summary->set_in(0, Location::RequiresRegister()); |
2228 | return summary; |
2229 | } |
2230 | UNREACHABLE(); |
2231 | } |
2232 | |
2233 | void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2234 | if (field().guarded_list_length() == Field::kNoFixedLength) { |
2235 | return; // Nothing to emit. |
2236 | } |
2237 | |
2238 | compiler::Label* deopt = |
2239 | compiler->is_optimizing() |
2240 | ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) |
2241 | : NULL; |
2242 | |
2243 | const Register value_reg = locs()->in(0).reg(); |
2244 | |
2245 | if (!compiler->is_optimizing() || |
2246 | (field().guarded_list_length() == Field::kUnknownFixedLength)) { |
2247 | const Register field_reg = locs()->temp(0).reg(); |
2248 | const Register offset_reg = locs()->temp(1).reg(); |
2249 | const Register length_reg = locs()->temp(2).reg(); |
2250 | |
2251 | compiler::Label ok; |
2252 | |
2253 | __ LoadObject(field_reg, Field::ZoneHandle(field().Original())); |
2254 | |
2255 | __ ldr(offset_reg, |
2256 | compiler::FieldAddress( |
2257 | field_reg, Field::guarded_list_length_in_object_offset_offset()), |
2258 | kByte); |
2259 | __ ldr(length_reg, compiler::FieldAddress( |
2260 | field_reg, Field::guarded_list_length_offset())); |
2261 | |
2262 | __ tst(offset_reg, compiler::Operand(offset_reg)); |
2263 | __ b(&ok, MI); |
2264 | |
2265 | // Load the length from the value. GuardFieldClass already verified that |
2266 | // value's class matches guarded class id of the field. |
2267 | // offset_reg contains offset already corrected by -kHeapObjectTag that is |
2268 | // why we use Address instead of FieldAddress. |
2269 | __ ldr(TMP, compiler::Address(value_reg, offset_reg)); |
2270 | __ CompareRegisters(length_reg, TMP); |
2271 | |
2272 | if (deopt == NULL) { |
2273 | __ b(&ok, EQ); |
2274 | |
2275 | __ PushPair(value_reg, field_reg); |
2276 | __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2); |
2277 | __ Drop(2); // Drop the field and the value. |
2278 | } else { |
2279 | __ b(deopt, NE); |
2280 | } |
2281 | |
2282 | __ Bind(&ok); |
2283 | } else { |
2284 | ASSERT(compiler->is_optimizing()); |
2285 | ASSERT(field().guarded_list_length() >= 0); |
2286 | ASSERT(field().guarded_list_length_in_object_offset() != |
2287 | Field::kUnknownLengthOffset); |
2288 | |
2289 | __ ldr(TMP, compiler::FieldAddress( |
2290 | value_reg, field().guarded_list_length_in_object_offset())); |
2291 | __ CompareImmediate(TMP, Smi::RawValue(field().guarded_list_length())); |
2292 | __ b(deopt, NE); |
2293 | } |
2294 | } |
2295 | |
2296 | class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> { |
2297 | public: |
2298 | BoxAllocationSlowPath(Instruction* instruction, |
2299 | const Class& cls, |
2300 | Register result) |
2301 | : TemplateSlowPathCode(instruction), cls_(cls), result_(result) {} |
2302 | |
2303 | virtual void EmitNativeCode(FlowGraphCompiler* compiler) { |
2304 | if (compiler::Assembler::EmittingComments()) { |
2305 | __ Comment("%s slow path allocation of %s" , instruction()->DebugName(), |
2306 | String::Handle(cls_.ScrubbedName()).ToCString()); |
2307 | } |
2308 | __ Bind(entry_label()); |
2309 | const Code& stub = Code::ZoneHandle( |
2310 | compiler->zone(), StubCode::GetAllocationStubForClass(cls_)); |
2311 | |
2312 | LocationSummary* locs = instruction()->locs(); |
2313 | |
2314 | locs->live_registers()->Remove(Location::RegisterLocation(result_)); |
2315 | |
2316 | compiler->SaveLiveRegisters(locs); |
2317 | compiler->GenerateStubCall(TokenPosition::kNoSource, // No token position. |
2318 | stub, PcDescriptorsLayout::kOther, locs); |
2319 | __ MoveRegister(result_, R0); |
2320 | compiler->RestoreLiveRegisters(locs); |
2321 | __ b(exit_label()); |
2322 | } |
2323 | |
2324 | static void Allocate(FlowGraphCompiler* compiler, |
2325 | Instruction* instruction, |
2326 | const Class& cls, |
2327 | Register result, |
2328 | Register temp) { |
2329 | if (compiler->intrinsic_mode()) { |
2330 | __ TryAllocate(cls, compiler->intrinsic_slow_path_label(), result, temp); |
2331 | } else { |
2332 | BoxAllocationSlowPath* slow_path = |
2333 | new BoxAllocationSlowPath(instruction, cls, result); |
2334 | compiler->AddSlowPathCode(slow_path); |
2335 | |
2336 | __ TryAllocate(cls, slow_path->entry_label(), result, temp); |
2337 | __ Bind(slow_path->exit_label()); |
2338 | } |
2339 | } |
2340 | |
2341 | private: |
2342 | const Class& cls_; |
2343 | const Register result_; |
2344 | }; |
2345 | |
2346 | static void EnsureMutableBox(FlowGraphCompiler* compiler, |
2347 | StoreInstanceFieldInstr* instruction, |
2348 | Register box_reg, |
2349 | const Class& cls, |
2350 | Register instance_reg, |
2351 | intptr_t offset, |
2352 | Register temp) { |
2353 | compiler::Label done; |
2354 | __ LoadFieldFromOffset(box_reg, instance_reg, offset); |
2355 | __ CompareObject(box_reg, Object::null_object()); |
2356 | __ b(&done, NE); |
2357 | BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp); |
2358 | __ MoveRegister(temp, box_reg); |
2359 | __ StoreIntoObjectOffset(instance_reg, offset, temp, |
2360 | compiler::Assembler::kValueIsNotSmi, |
2361 | /*lr_reserved=*/!compiler->intrinsic_mode()); |
2362 | __ Bind(&done); |
2363 | } |
2364 | |
2365 | LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(Zone* zone, |
2366 | bool opt) const { |
2367 | const intptr_t kNumInputs = 2; |
2368 | const intptr_t kNumTemps = (IsUnboxedStore() && opt) |
2369 | ? (FLAG_precompiled_mode ? 0 : 2) |
2370 | : (IsPotentialUnboxedStore() ? 2 : 0); |
2371 | LocationSummary* summary = new (zone) |
2372 | LocationSummary(zone, kNumInputs, kNumTemps, |
2373 | (!FLAG_precompiled_mode && |
2374 | ((IsUnboxedStore() && opt && is_initialization()) || |
2375 | IsPotentialUnboxedStore())) |
2376 | ? LocationSummary::kCallOnSlowPath |
2377 | : LocationSummary::kNoCall); |
2378 | |
2379 | summary->set_in(0, Location::RequiresRegister()); |
2380 | if (IsUnboxedStore() && opt) { |
2381 | if (slot().field().is_non_nullable_integer()) { |
2382 | ASSERT(FLAG_precompiled_mode); |
2383 | summary->set_in(1, Location::RequiresRegister()); |
2384 | } else { |
2385 | summary->set_in(1, Location::RequiresFpuRegister()); |
2386 | } |
2387 | if (!FLAG_precompiled_mode) { |
2388 | summary->set_temp(0, Location::RequiresRegister()); |
2389 | summary->set_temp(1, Location::RequiresRegister()); |
2390 | } |
2391 | } else if (IsPotentialUnboxedStore()) { |
2392 | summary->set_in(1, ShouldEmitStoreBarrier() ? Location::WritableRegister() |
2393 | : Location::RequiresRegister()); |
2394 | summary->set_temp(0, Location::RequiresRegister()); |
2395 | summary->set_temp(1, Location::RequiresRegister()); |
2396 | } else { |
2397 | summary->set_in(1, ShouldEmitStoreBarrier() |
2398 | ? Location::RegisterLocation(kWriteBarrierValueReg) |
2399 | : LocationRegisterOrConstant(value())); |
2400 | } |
2401 | return summary; |
2402 | } |
2403 | |
2404 | void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2405 | ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16); |
2406 | ASSERT(sizeof(FieldLayout::guarded_cid_) == 2); |
2407 | ASSERT(sizeof(FieldLayout::is_nullable_) == 2); |
2408 | |
2409 | compiler::Label skip_store; |
2410 | |
2411 | const Register instance_reg = locs()->in(0).reg(); |
2412 | const intptr_t offset_in_bytes = OffsetInBytes(); |
2413 | ASSERT(offset_in_bytes > 0); // Field is finalized and points after header. |
2414 | |
2415 | if (IsUnboxedStore() && compiler->is_optimizing()) { |
2416 | if (slot().field().is_non_nullable_integer()) { |
2417 | const Register value = locs()->in(1).reg(); |
2418 | __ Comment("UnboxedIntegerStoreInstanceFieldInstr" ); |
2419 | __ StoreFieldToOffset(value, instance_reg, offset_in_bytes); |
2420 | return; |
2421 | } |
2422 | |
2423 | const VRegister value = locs()->in(1).fpu_reg(); |
2424 | const intptr_t cid = slot().field().UnboxedFieldCid(); |
2425 | |
2426 | if (FLAG_precompiled_mode) { |
2427 | switch (cid) { |
2428 | case kDoubleCid: |
2429 | __ Comment("UnboxedDoubleStoreInstanceFieldInstr" ); |
2430 | __ StoreDFieldToOffset(value, instance_reg, offset_in_bytes); |
2431 | return; |
2432 | case kFloat32x4Cid: |
2433 | __ Comment("UnboxedFloat32x4StoreInstanceFieldInstr" ); |
2434 | __ StoreQFieldToOffset(value, instance_reg, offset_in_bytes); |
2435 | return; |
2436 | case kFloat64x2Cid: |
2437 | __ Comment("UnboxedFloat64x2StoreInstanceFieldInstr" ); |
2438 | __ StoreQFieldToOffset(value, instance_reg, offset_in_bytes); |
2439 | return; |
2440 | default: |
2441 | UNREACHABLE(); |
2442 | } |
2443 | } |
2444 | |
2445 | const Register temp = locs()->temp(0).reg(); |
2446 | const Register temp2 = locs()->temp(1).reg(); |
2447 | |
2448 | if (is_initialization()) { |
2449 | const Class* cls = NULL; |
2450 | switch (cid) { |
2451 | case kDoubleCid: |
2452 | cls = &compiler->double_class(); |
2453 | break; |
2454 | case kFloat32x4Cid: |
2455 | cls = &compiler->float32x4_class(); |
2456 | break; |
2457 | case kFloat64x2Cid: |
2458 | cls = &compiler->float64x2_class(); |
2459 | break; |
2460 | default: |
2461 | UNREACHABLE(); |
2462 | } |
2463 | |
2464 | BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2); |
2465 | __ MoveRegister(temp2, temp); |
2466 | __ StoreIntoObjectOffset(instance_reg, offset_in_bytes, temp2, |
2467 | compiler::Assembler::kValueIsNotSmi, |
2468 | /*lr_reserved=*/!compiler->intrinsic_mode()); |
2469 | } else { |
2470 | __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes); |
2471 | } |
2472 | switch (cid) { |
2473 | case kDoubleCid: |
2474 | __ Comment("UnboxedDoubleStoreInstanceFieldInstr" ); |
2475 | __ StoreDFieldToOffset(value, temp, Double::value_offset()); |
2476 | break; |
2477 | case kFloat32x4Cid: |
2478 | __ Comment("UnboxedFloat32x4StoreInstanceFieldInstr" ); |
2479 | __ StoreQFieldToOffset(value, temp, Float32x4::value_offset()); |
2480 | break; |
2481 | case kFloat64x2Cid: |
2482 | __ Comment("UnboxedFloat64x2StoreInstanceFieldInstr" ); |
2483 | __ StoreQFieldToOffset(value, temp, Float64x2::value_offset()); |
2484 | break; |
2485 | default: |
2486 | UNREACHABLE(); |
2487 | } |
2488 | |
2489 | return; |
2490 | } |
2491 | |
2492 | if (IsPotentialUnboxedStore()) { |
2493 | const Register value_reg = locs()->in(1).reg(); |
2494 | const Register temp = locs()->temp(0).reg(); |
2495 | const Register temp2 = locs()->temp(1).reg(); |
2496 | |
2497 | if (ShouldEmitStoreBarrier()) { |
2498 | // Value input is a writable register and should be manually preserved |
2499 | // across allocation slow-path. |
2500 | locs()->live_registers()->Add(locs()->in(1), kTagged); |
2501 | } |
2502 | |
2503 | compiler::Label store_pointer; |
2504 | compiler::Label store_double; |
2505 | compiler::Label store_float32x4; |
2506 | compiler::Label store_float64x2; |
2507 | |
2508 | __ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original())); |
2509 | |
2510 | __ LoadFieldFromOffset(temp2, temp, Field::is_nullable_offset(), |
2511 | kUnsignedHalfword); |
2512 | __ CompareImmediate(temp2, kNullCid); |
2513 | __ b(&store_pointer, EQ); |
2514 | |
2515 | __ LoadFromOffset(temp2, temp, Field::kind_bits_offset() - kHeapObjectTag, |
2516 | kUnsignedByte); |
2517 | __ tsti(temp2, compiler::Immediate(1 << Field::kUnboxingCandidateBit)); |
2518 | __ b(&store_pointer, EQ); |
2519 | |
2520 | __ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset(), |
2521 | kUnsignedHalfword); |
2522 | __ CompareImmediate(temp2, kDoubleCid); |
2523 | __ b(&store_double, EQ); |
2524 | |
2525 | __ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset(), |
2526 | kUnsignedHalfword); |
2527 | __ CompareImmediate(temp2, kFloat32x4Cid); |
2528 | __ b(&store_float32x4, EQ); |
2529 | |
2530 | __ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset(), |
2531 | kUnsignedHalfword); |
2532 | __ CompareImmediate(temp2, kFloat64x2Cid); |
2533 | __ b(&store_float64x2, EQ); |
2534 | |
2535 | // Fall through. |
2536 | __ b(&store_pointer); |
2537 | |
2538 | if (!compiler->is_optimizing()) { |
2539 | locs()->live_registers()->Add(locs()->in(0)); |
2540 | locs()->live_registers()->Add(locs()->in(1)); |
2541 | } |
2542 | |
2543 | { |
2544 | __ Bind(&store_double); |
2545 | EnsureMutableBox(compiler, this, temp, compiler->double_class(), |
2546 | instance_reg, offset_in_bytes, temp2); |
2547 | __ LoadDFieldFromOffset(VTMP, value_reg, Double::value_offset()); |
2548 | __ StoreDFieldToOffset(VTMP, temp, Double::value_offset()); |
2549 | __ b(&skip_store); |
2550 | } |
2551 | |
2552 | { |
2553 | __ Bind(&store_float32x4); |
2554 | EnsureMutableBox(compiler, this, temp, compiler->float32x4_class(), |
2555 | instance_reg, offset_in_bytes, temp2); |
2556 | __ LoadQFieldFromOffset(VTMP, value_reg, Float32x4::value_offset()); |
2557 | __ StoreQFieldToOffset(VTMP, temp, Float32x4::value_offset()); |
2558 | __ b(&skip_store); |
2559 | } |
2560 | |
2561 | { |
2562 | __ Bind(&store_float64x2); |
2563 | EnsureMutableBox(compiler, this, temp, compiler->float64x2_class(), |
2564 | instance_reg, offset_in_bytes, temp2); |
2565 | __ LoadQFieldFromOffset(VTMP, value_reg, Float64x2::value_offset()); |
2566 | __ StoreQFieldToOffset(VTMP, temp, Float64x2::value_offset()); |
2567 | __ b(&skip_store); |
2568 | } |
2569 | |
2570 | __ Bind(&store_pointer); |
2571 | } |
2572 | |
2573 | if (ShouldEmitStoreBarrier()) { |
2574 | const Register value_reg = locs()->in(1).reg(); |
2575 | // In intrinsic mode, there is no stack frame and the function will return |
2576 | // by executing 'ret LR' directly. Therefore we cannot overwrite LR. (see |
2577 | // ReturnInstr::EmitNativeCode). |
2578 | ASSERT((kDartAvailableCpuRegs & (1 << LR)) == 0); |
2579 | __ StoreIntoObjectOffset(instance_reg, offset_in_bytes, value_reg, |
2580 | CanValueBeSmi(), |
2581 | /*lr_reserved=*/!compiler->intrinsic_mode()); |
2582 | } else { |
2583 | if (locs()->in(1).IsConstant()) { |
2584 | __ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes, |
2585 | locs()->in(1).constant()); |
2586 | } else { |
2587 | const Register value_reg = locs()->in(1).reg(); |
2588 | __ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes, |
2589 | value_reg); |
2590 | } |
2591 | } |
2592 | __ Bind(&skip_store); |
2593 | } |
2594 | |
2595 | LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone, |
2596 | bool opt) const { |
2597 | const intptr_t kNumInputs = 1; |
2598 | const intptr_t kNumTemps = 1; |
2599 | LocationSummary* locs = new (zone) |
2600 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
2601 | locs->set_in(0, Location::RequiresRegister()); |
2602 | locs->set_temp(0, Location::RequiresRegister()); |
2603 | return locs; |
2604 | } |
2605 | |
2606 | void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2607 | const Register value = locs()->in(0).reg(); |
2608 | const Register temp = locs()->temp(0).reg(); |
2609 | |
2610 | compiler->used_static_fields().Add(&field()); |
2611 | |
2612 | __ LoadFromOffset(temp, THR, |
2613 | compiler::target::Thread::field_table_values_offset()); |
2614 | // Note: static fields ids won't be changed by hot-reload. |
2615 | __ StoreToOffset(value, temp, |
2616 | compiler::target::FieldTable::OffsetOf(field())); |
2617 | } |
2618 | |
2619 | LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone, |
2620 | bool opt) const { |
2621 | const intptr_t kNumInputs = 3; |
2622 | const intptr_t kNumTemps = 0; |
2623 | LocationSummary* summary = new (zone) |
2624 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
2625 | summary->set_in(0, Location::RegisterLocation(TypeTestABI::kInstanceReg)); |
2626 | summary->set_in(1, Location::RegisterLocation( |
2627 | TypeTestABI::kInstantiatorTypeArgumentsReg)); |
2628 | summary->set_in( |
2629 | 2, Location::RegisterLocation(TypeTestABI::kFunctionTypeArgumentsReg)); |
2630 | summary->set_out(0, Location::RegisterLocation(R0)); |
2631 | return summary; |
2632 | } |
2633 | |
2634 | void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2635 | ASSERT(locs()->in(0).reg() == TypeTestABI::kInstanceReg); |
2636 | ASSERT(locs()->in(1).reg() == TypeTestABI::kInstantiatorTypeArgumentsReg); |
2637 | ASSERT(locs()->in(2).reg() == TypeTestABI::kFunctionTypeArgumentsReg); |
2638 | |
2639 | compiler->GenerateInstanceOf(token_pos(), deopt_id(), type(), locs()); |
2640 | ASSERT(locs()->out(0).reg() == R0); |
2641 | } |
2642 | |
2643 | LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone, |
2644 | bool opt) const { |
2645 | const intptr_t kNumInputs = 2; |
2646 | const intptr_t kNumTemps = 0; |
2647 | LocationSummary* locs = new (zone) |
2648 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
2649 | locs->set_in(kElementTypePos, Location::RegisterLocation(R1)); |
2650 | locs->set_in(kLengthPos, Location::RegisterLocation(R2)); |
2651 | locs->set_out(0, Location::RegisterLocation(R0)); |
2652 | return locs; |
2653 | } |
2654 | |
2655 | // Inlines array allocation for known constant values. |
2656 | static void InlineArrayAllocation(FlowGraphCompiler* compiler, |
2657 | intptr_t num_elements, |
2658 | compiler::Label* slow_path, |
2659 | compiler::Label* done) { |
2660 | const int kInlineArraySize = 12; // Same as kInlineInstanceSize. |
2661 | const Register kLengthReg = R2; |
2662 | const Register kElemTypeReg = R1; |
2663 | const intptr_t instance_size = Array::InstanceSize(num_elements); |
2664 | |
2665 | __ TryAllocateArray(kArrayCid, instance_size, slow_path, |
2666 | R0, // instance |
2667 | R3, // end address |
2668 | R6, R8); |
2669 | // R0: new object start as a tagged pointer. |
2670 | // R3: new object end address. |
2671 | |
2672 | // Store the type argument field. |
2673 | __ StoreIntoObjectNoBarrier( |
2674 | R0, compiler::FieldAddress(R0, Array::type_arguments_offset()), |
2675 | kElemTypeReg); |
2676 | |
2677 | // Set the length field. |
2678 | __ StoreIntoObjectNoBarrier( |
2679 | R0, compiler::FieldAddress(R0, Array::length_offset()), kLengthReg); |
2680 | |
2681 | // TODO(zra): Use stp once added. |
2682 | // Initialize all array elements to raw_null. |
2683 | // R0: new object start as a tagged pointer. |
2684 | // R3: new object end address. |
2685 | // R8: iterator which initially points to the start of the variable |
2686 | // data area to be initialized. |
2687 | // R6: null |
2688 | if (num_elements > 0) { |
2689 | const intptr_t array_size = instance_size - sizeof(ArrayLayout); |
2690 | __ LoadObject(R6, Object::null_object()); |
2691 | __ AddImmediate(R8, R0, sizeof(ArrayLayout) - kHeapObjectTag); |
2692 | if (array_size < (kInlineArraySize * kWordSize)) { |
2693 | intptr_t current_offset = 0; |
2694 | while (current_offset < array_size) { |
2695 | __ str(R6, compiler::Address(R8, current_offset)); |
2696 | current_offset += kWordSize; |
2697 | } |
2698 | } else { |
2699 | compiler::Label end_loop, init_loop; |
2700 | __ Bind(&init_loop); |
2701 | __ CompareRegisters(R8, R3); |
2702 | __ b(&end_loop, CS); |
2703 | __ str(R6, compiler::Address(R8)); |
2704 | __ AddImmediate(R8, kWordSize); |
2705 | __ b(&init_loop); |
2706 | __ Bind(&end_loop); |
2707 | } |
2708 | } |
2709 | __ b(done); |
2710 | } |
2711 | |
2712 | void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2713 | TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info(); |
2714 | if (type_usage_info != nullptr) { |
2715 | const Class& list_class = Class::Handle( |
2716 | compiler->thread()->isolate()->class_table()->At(kArrayCid)); |
2717 | RegisterTypeArgumentsUse(compiler->function(), type_usage_info, list_class, |
2718 | element_type()->definition()); |
2719 | } |
2720 | |
2721 | const Register kLengthReg = R2; |
2722 | const Register kElemTypeReg = R1; |
2723 | const Register kResultReg = R0; |
2724 | |
2725 | ASSERT(locs()->in(kElementTypePos).reg() == kElemTypeReg); |
2726 | ASSERT(locs()->in(kLengthPos).reg() == kLengthReg); |
2727 | |
2728 | compiler::Label slow_path, done; |
2729 | if (compiler->is_optimizing() && !FLAG_precompiled_mode && |
2730 | num_elements()->BindsToConstant() && |
2731 | num_elements()->BoundConstant().IsSmi()) { |
2732 | const intptr_t length = Smi::Cast(num_elements()->BoundConstant()).Value(); |
2733 | if (Array::IsValidLength(length)) { |
2734 | InlineArrayAllocation(compiler, length, &slow_path, &done); |
2735 | } |
2736 | } |
2737 | |
2738 | __ Bind(&slow_path); |
2739 | auto object_store = compiler->isolate()->object_store(); |
2740 | const auto& allocate_array_stub = |
2741 | Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub()); |
2742 | compiler->GenerateStubCall(token_pos(), allocate_array_stub, |
2743 | PcDescriptorsLayout::kOther, locs(), deopt_id()); |
2744 | ASSERT(locs()->out(0).reg() == kResultReg); |
2745 | __ Bind(&done); |
2746 | } |
2747 | |
2748 | LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone, |
2749 | bool opt) const { |
2750 | const intptr_t kNumInputs = 1; |
2751 | const intptr_t kNumTemps = (IsUnboxedLoad() && opt) |
2752 | ? (FLAG_precompiled_mode ? 0 : 1) |
2753 | : (IsPotentialUnboxedLoad() ? 1 : 0); |
2754 | const auto contains_call = |
2755 | (IsUnboxedLoad() && opt) |
2756 | ? LocationSummary::kNoCall |
2757 | : (IsPotentialUnboxedLoad() |
2758 | ? LocationSummary::kCallOnSlowPath |
2759 | : (calls_initializer() ? LocationSummary::kCall |
2760 | : LocationSummary::kNoCall)); |
2761 | |
2762 | LocationSummary* locs = |
2763 | new (zone) LocationSummary(zone, kNumInputs, kNumTemps, contains_call); |
2764 | |
2765 | locs->set_in(0, calls_initializer() ? Location::RegisterLocation( |
2766 | InitInstanceFieldABI::kInstanceReg) |
2767 | : Location::RequiresRegister()); |
2768 | |
2769 | if (IsUnboxedLoad() && opt) { |
2770 | ASSERT(!calls_initializer()); |
2771 | if (!FLAG_precompiled_mode) { |
2772 | locs->set_temp(0, Location::RequiresRegister()); |
2773 | } |
2774 | if (slot().field().is_non_nullable_integer()) { |
2775 | ASSERT(FLAG_precompiled_mode); |
2776 | locs->set_out(0, Location::RequiresRegister()); |
2777 | } else { |
2778 | locs->set_out(0, Location::RequiresFpuRegister()); |
2779 | } |
2780 | } else if (IsPotentialUnboxedLoad()) { |
2781 | ASSERT(!calls_initializer()); |
2782 | locs->set_temp(0, Location::RequiresRegister()); |
2783 | locs->set_out(0, Location::RequiresRegister()); |
2784 | } else if (calls_initializer()) { |
2785 | locs->set_out(0, |
2786 | Location::RegisterLocation(InitInstanceFieldABI::kResultReg)); |
2787 | } else { |
2788 | locs->set_out(0, Location::RequiresRegister()); |
2789 | } |
2790 | return locs; |
2791 | } |
2792 | |
2793 | void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2794 | ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16); |
2795 | ASSERT(sizeof(FieldLayout::guarded_cid_) == 2); |
2796 | ASSERT(sizeof(FieldLayout::is_nullable_) == 2); |
2797 | |
2798 | const Register instance_reg = locs()->in(0).reg(); |
2799 | if (IsUnboxedLoad() && compiler->is_optimizing()) { |
2800 | ASSERT(!calls_initializer()); |
2801 | if (slot().field().is_non_nullable_integer()) { |
2802 | const Register result = locs()->out(0).reg(); |
2803 | __ Comment("UnboxedIntegerLoadFieldInstr" ); |
2804 | __ LoadFieldFromOffset(result, instance_reg, OffsetInBytes()); |
2805 | return; |
2806 | } |
2807 | |
2808 | const VRegister result = locs()->out(0).fpu_reg(); |
2809 | const intptr_t cid = slot().field().UnboxedFieldCid(); |
2810 | |
2811 | if (FLAG_precompiled_mode) { |
2812 | switch (cid) { |
2813 | case kDoubleCid: |
2814 | __ Comment("UnboxedDoubleLoadFieldInstr" ); |
2815 | __ LoadDFieldFromOffset(result, instance_reg, OffsetInBytes()); |
2816 | return; |
2817 | case kFloat32x4Cid: |
2818 | __ Comment("UnboxedFloat32x4LoadFieldInstr" ); |
2819 | __ LoadQFieldFromOffset(result, instance_reg, OffsetInBytes()); |
2820 | return; |
2821 | case kFloat64x2Cid: |
2822 | __ Comment("UnboxedFloat64x2LoadFieldInstr" ); |
2823 | __ LoadQFieldFromOffset(result, instance_reg, OffsetInBytes()); |
2824 | return; |
2825 | default: |
2826 | UNREACHABLE(); |
2827 | } |
2828 | } |
2829 | |
2830 | const Register temp = locs()->temp(0).reg(); |
2831 | |
2832 | __ LoadFieldFromOffset(temp, instance_reg, OffsetInBytes()); |
2833 | switch (cid) { |
2834 | case kDoubleCid: |
2835 | __ Comment("UnboxedDoubleLoadFieldInstr" ); |
2836 | __ LoadDFieldFromOffset(result, temp, Double::value_offset()); |
2837 | break; |
2838 | case kFloat32x4Cid: |
2839 | __ LoadQFieldFromOffset(result, temp, Float32x4::value_offset()); |
2840 | break; |
2841 | case kFloat64x2Cid: |
2842 | __ LoadQFieldFromOffset(result, temp, Float64x2::value_offset()); |
2843 | break; |
2844 | default: |
2845 | UNREACHABLE(); |
2846 | } |
2847 | return; |
2848 | } |
2849 | |
2850 | compiler::Label done; |
2851 | const Register result_reg = locs()->out(0).reg(); |
2852 | if (IsPotentialUnboxedLoad()) { |
2853 | ASSERT(!calls_initializer()); |
2854 | const Register temp = locs()->temp(0).reg(); |
2855 | |
2856 | compiler::Label load_pointer; |
2857 | compiler::Label load_double; |
2858 | compiler::Label load_float32x4; |
2859 | compiler::Label load_float64x2; |
2860 | |
2861 | __ LoadObject(result_reg, Field::ZoneHandle(slot().field().Original())); |
2862 | |
2863 | compiler::FieldAddress field_cid_operand( |
2864 | result_reg, Field::guarded_cid_offset(), kUnsignedHalfword); |
2865 | compiler::FieldAddress field_nullability_operand( |
2866 | result_reg, Field::is_nullable_offset(), kUnsignedHalfword); |
2867 | |
2868 | __ ldr(temp, field_nullability_operand, kUnsignedHalfword); |
2869 | __ CompareImmediate(temp, kNullCid); |
2870 | __ b(&load_pointer, EQ); |
2871 | |
2872 | __ ldr(temp, field_cid_operand, kUnsignedHalfword); |
2873 | __ CompareImmediate(temp, kDoubleCid); |
2874 | __ b(&load_double, EQ); |
2875 | |
2876 | __ ldr(temp, field_cid_operand, kUnsignedHalfword); |
2877 | __ CompareImmediate(temp, kFloat32x4Cid); |
2878 | __ b(&load_float32x4, EQ); |
2879 | |
2880 | __ ldr(temp, field_cid_operand, kUnsignedHalfword); |
2881 | __ CompareImmediate(temp, kFloat64x2Cid); |
2882 | __ b(&load_float64x2, EQ); |
2883 | |
2884 | // Fall through. |
2885 | __ b(&load_pointer); |
2886 | |
2887 | if (!compiler->is_optimizing()) { |
2888 | locs()->live_registers()->Add(locs()->in(0)); |
2889 | } |
2890 | |
2891 | { |
2892 | __ Bind(&load_double); |
2893 | BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(), |
2894 | result_reg, temp); |
2895 | __ LoadFieldFromOffset(temp, instance_reg, OffsetInBytes()); |
2896 | __ LoadDFieldFromOffset(VTMP, temp, Double::value_offset()); |
2897 | __ StoreDFieldToOffset(VTMP, result_reg, Double::value_offset()); |
2898 | __ b(&done); |
2899 | } |
2900 | |
2901 | { |
2902 | __ Bind(&load_float32x4); |
2903 | BoxAllocationSlowPath::Allocate( |
2904 | compiler, this, compiler->float32x4_class(), result_reg, temp); |
2905 | __ LoadFieldFromOffset(temp, instance_reg, OffsetInBytes()); |
2906 | __ LoadQFieldFromOffset(VTMP, temp, Float32x4::value_offset()); |
2907 | __ StoreQFieldToOffset(VTMP, result_reg, Float32x4::value_offset()); |
2908 | __ b(&done); |
2909 | } |
2910 | |
2911 | { |
2912 | __ Bind(&load_float64x2); |
2913 | BoxAllocationSlowPath::Allocate( |
2914 | compiler, this, compiler->float64x2_class(), result_reg, temp); |
2915 | __ LoadFieldFromOffset(temp, instance_reg, OffsetInBytes()); |
2916 | __ LoadQFieldFromOffset(VTMP, temp, Float64x2::value_offset()); |
2917 | __ StoreQFieldToOffset(VTMP, result_reg, Float64x2::value_offset()); |
2918 | __ b(&done); |
2919 | } |
2920 | |
2921 | __ Bind(&load_pointer); |
2922 | } |
2923 | |
2924 | __ LoadFieldFromOffset(result_reg, instance_reg, OffsetInBytes()); |
2925 | |
2926 | if (calls_initializer()) { |
2927 | EmitNativeCodeForInitializerCall(compiler); |
2928 | } |
2929 | |
2930 | __ Bind(&done); |
2931 | } |
2932 | |
2933 | LocationSummary* InstantiateTypeInstr::MakeLocationSummary(Zone* zone, |
2934 | bool opt) const { |
2935 | const intptr_t kNumInputs = 2; |
2936 | const intptr_t kNumTemps = 0; |
2937 | LocationSummary* locs = new (zone) |
2938 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
2939 | locs->set_in(0, Location::RegisterLocation( |
2940 | InstantiationABI::kInstantiatorTypeArgumentsReg)); |
2941 | locs->set_in(1, Location::RegisterLocation( |
2942 | InstantiationABI::kFunctionTypeArgumentsReg)); |
2943 | locs->set_out(0, |
2944 | Location::RegisterLocation(InstantiationABI::kResultTypeReg)); |
2945 | return locs; |
2946 | } |
2947 | |
2948 | void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2949 | const Register instantiator_type_args_reg = locs()->in(0).reg(); |
2950 | const Register function_type_args_reg = locs()->in(1).reg(); |
2951 | const Register result_reg = locs()->out(0).reg(); |
2952 | |
2953 | // 'instantiator_type_args_reg' is a TypeArguments object (or null). |
2954 | // 'function_type_args_reg' is a TypeArguments object (or null). |
2955 | // A runtime call to instantiate the type is required. |
2956 | __ LoadObject(TMP, type()); |
2957 | __ PushPair(TMP, NULL_REG); |
2958 | __ PushPair(function_type_args_reg, instantiator_type_args_reg); |
2959 | compiler->GenerateRuntimeCall(token_pos(), deopt_id(), |
2960 | kInstantiateTypeRuntimeEntry, 3, locs()); |
2961 | __ Drop(3); // Drop 2 type vectors, and uninstantiated type. |
2962 | __ Pop(result_reg); // Pop instantiated type. |
2963 | } |
2964 | |
2965 | LocationSummary* InstantiateTypeArgumentsInstr::MakeLocationSummary( |
2966 | Zone* zone, |
2967 | bool opt) const { |
2968 | const intptr_t kNumInputs = 2; |
2969 | const intptr_t kNumTemps = 0; |
2970 | LocationSummary* locs = new (zone) |
2971 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
2972 | locs->set_in(0, Location::RegisterLocation( |
2973 | InstantiationABI::kInstantiatorTypeArgumentsReg)); |
2974 | locs->set_in(1, Location::RegisterLocation( |
2975 | InstantiationABI::kFunctionTypeArgumentsReg)); |
2976 | locs->set_out( |
2977 | 0, Location::RegisterLocation(InstantiationABI::kResultTypeArgumentsReg)); |
2978 | return locs; |
2979 | } |
2980 | |
2981 | void InstantiateTypeArgumentsInstr::EmitNativeCode( |
2982 | FlowGraphCompiler* compiler) { |
2983 | const Register instantiator_type_args_reg = locs()->in(0).reg(); |
2984 | const Register function_type_args_reg = locs()->in(1).reg(); |
2985 | const Register result_reg = locs()->out(0).reg(); |
2986 | |
2987 | // 'instantiator_type_args_reg' is a TypeArguments object (or null). |
2988 | // 'function_type_args_reg' is a TypeArguments object (or null). |
2989 | |
2990 | // If both the instantiator and function type arguments are null and if the |
2991 | // type argument vector instantiated from null becomes a vector of dynamic, |
2992 | // then use null as the type arguments. |
2993 | compiler::Label type_arguments_instantiated; |
2994 | const intptr_t len = type_arguments().Length(); |
2995 | const bool can_function_type_args_be_null = |
2996 | function_type_arguments()->CanBe(Object::null_object()); |
2997 | if (type_arguments().IsRawWhenInstantiatedFromRaw(len) && |
2998 | can_function_type_args_be_null) { |
2999 | compiler::Label non_null_type_args; |
3000 | ASSERT(result_reg != instantiator_type_args_reg && |
3001 | result_reg != function_type_args_reg); |
3002 | __ LoadObject(result_reg, Object::null_object()); |
3003 | __ CompareRegisters(instantiator_type_args_reg, result_reg); |
3004 | if (!function_type_arguments()->BindsToConstant()) { |
3005 | __ b(&non_null_type_args, NE); |
3006 | __ CompareRegisters(function_type_args_reg, result_reg); |
3007 | } |
3008 | __ b(&type_arguments_instantiated, EQ); |
3009 | __ Bind(&non_null_type_args); |
3010 | } |
3011 | // Lookup cache in stub before calling runtime. |
3012 | __ LoadObject(InstantiationABI::kUninstantiatedTypeArgumentsReg, |
3013 | type_arguments()); |
3014 | compiler->GenerateStubCall(token_pos(), GetStub(), |
3015 | PcDescriptorsLayout::kOther, locs()); |
3016 | __ Bind(&type_arguments_instantiated); |
3017 | } |
3018 | |
3019 | LocationSummary* AllocateUninitializedContextInstr::MakeLocationSummary( |
3020 | Zone* zone, |
3021 | bool opt) const { |
3022 | ASSERT(opt); |
3023 | const intptr_t kNumInputs = 0; |
3024 | const intptr_t kNumTemps = 3; |
3025 | LocationSummary* locs = new (zone) LocationSummary( |
3026 | zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
3027 | locs->set_temp(0, Location::RegisterLocation(R1)); |
3028 | locs->set_temp(1, Location::RegisterLocation(R2)); |
3029 | locs->set_temp(2, Location::RegisterLocation(R3)); |
3030 | locs->set_out(0, Location::RegisterLocation(R0)); |
3031 | return locs; |
3032 | } |
3033 | |
3034 | class AllocateContextSlowPath |
3035 | : public TemplateSlowPathCode<AllocateUninitializedContextInstr> { |
3036 | public: |
3037 | explicit AllocateContextSlowPath( |
3038 | AllocateUninitializedContextInstr* instruction) |
3039 | : TemplateSlowPathCode(instruction) {} |
3040 | |
3041 | virtual void EmitNativeCode(FlowGraphCompiler* compiler) { |
3042 | __ Comment("AllocateContextSlowPath" ); |
3043 | __ Bind(entry_label()); |
3044 | |
3045 | LocationSummary* locs = instruction()->locs(); |
3046 | locs->live_registers()->Remove(locs->out(0)); |
3047 | |
3048 | compiler->SaveLiveRegisters(locs); |
3049 | |
3050 | auto object_store = compiler->isolate()->object_store(); |
3051 | const auto& allocate_context_stub = Code::ZoneHandle( |
3052 | compiler->zone(), object_store->allocate_context_stub()); |
3053 | |
3054 | __ LoadImmediate(R1, instruction()->num_context_variables()); |
3055 | compiler->GenerateStubCall(instruction()->token_pos(), |
3056 | allocate_context_stub, |
3057 | PcDescriptorsLayout::kOther, locs); |
3058 | ASSERT(instruction()->locs()->out(0).reg() == R0); |
3059 | compiler->RestoreLiveRegisters(instruction()->locs()); |
3060 | __ b(exit_label()); |
3061 | } |
3062 | }; |
3063 | |
3064 | void AllocateUninitializedContextInstr::EmitNativeCode( |
3065 | FlowGraphCompiler* compiler) { |
3066 | Register temp0 = locs()->temp(0).reg(); |
3067 | Register temp1 = locs()->temp(1).reg(); |
3068 | Register temp2 = locs()->temp(2).reg(); |
3069 | Register result = locs()->out(0).reg(); |
3070 | // Try allocate the object. |
3071 | AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this); |
3072 | compiler->AddSlowPathCode(slow_path); |
3073 | intptr_t instance_size = Context::InstanceSize(num_context_variables()); |
3074 | |
3075 | __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(), |
3076 | result, // instance |
3077 | temp0, temp1, temp2); |
3078 | |
3079 | // Setup up number of context variables field. |
3080 | __ LoadImmediate(temp0, num_context_variables()); |
3081 | __ str(temp0, |
3082 | compiler::FieldAddress(result, Context::num_variables_offset())); |
3083 | |
3084 | __ Bind(slow_path->exit_label()); |
3085 | } |
3086 | |
3087 | LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone, |
3088 | bool opt) const { |
3089 | const intptr_t kNumInputs = 0; |
3090 | const intptr_t kNumTemps = 1; |
3091 | LocationSummary* locs = new (zone) |
3092 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
3093 | locs->set_temp(0, Location::RegisterLocation(R1)); |
3094 | locs->set_out(0, Location::RegisterLocation(R0)); |
3095 | return locs; |
3096 | } |
3097 | |
3098 | void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3099 | ASSERT(locs()->temp(0).reg() == R1); |
3100 | ASSERT(locs()->out(0).reg() == R0); |
3101 | |
3102 | auto object_store = compiler->isolate()->object_store(); |
3103 | const auto& allocate_context_stub = |
3104 | Code::ZoneHandle(compiler->zone(), object_store->allocate_context_stub()); |
3105 | __ LoadImmediate(R1, num_context_variables()); |
3106 | compiler->GenerateStubCall(token_pos(), allocate_context_stub, |
3107 | PcDescriptorsLayout::kOther, locs()); |
3108 | } |
3109 | |
3110 | LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone, |
3111 | bool opt) const { |
3112 | const intptr_t kNumInputs = 1; |
3113 | const intptr_t kNumTemps = 0; |
3114 | LocationSummary* locs = new (zone) |
3115 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
3116 | locs->set_in(0, Location::RegisterLocation(R5)); |
3117 | locs->set_out(0, Location::RegisterLocation(R0)); |
3118 | return locs; |
3119 | } |
3120 | |
3121 | void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3122 | ASSERT(locs()->in(0).reg() == R5); |
3123 | ASSERT(locs()->out(0).reg() == R0); |
3124 | |
3125 | auto object_store = compiler->isolate()->object_store(); |
3126 | const auto& clone_context_stub = |
3127 | Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub()); |
3128 | compiler->GenerateStubCall(token_pos(), clone_context_stub, |
3129 | /*kind=*/PcDescriptorsLayout::kOther, locs()); |
3130 | } |
3131 | |
3132 | LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone, |
3133 | bool opt) const { |
3134 | UNREACHABLE(); |
3135 | return NULL; |
3136 | } |
3137 | |
3138 | void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3139 | __ Bind(compiler->GetJumpLabel(this)); |
3140 | compiler->AddExceptionHandler( |
3141 | catch_try_index(), try_index(), compiler->assembler()->CodeSize(), |
3142 | is_generated(), catch_handler_types_, needs_stacktrace()); |
3143 | if (!FLAG_precompiled_mode) { |
3144 | // On lazy deoptimization we patch the optimized code here to enter the |
3145 | // deoptimization stub. |
3146 | const intptr_t deopt_id = DeoptId::ToDeoptAfter(GetDeoptId()); |
3147 | if (compiler->is_optimizing()) { |
3148 | compiler->AddDeoptIndexAtCall(deopt_id); |
3149 | } else { |
3150 | compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id, |
3151 | TokenPosition::kNoSource); |
3152 | } |
3153 | } |
3154 | if (HasParallelMove()) { |
3155 | compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); |
3156 | } |
3157 | |
3158 | // Restore SP from FP as we are coming from a throw and the code for |
3159 | // popping arguments has not been run. |
3160 | const intptr_t fp_sp_dist = |
3161 | (compiler::target::frame_layout.first_local_from_fp + 1 - |
3162 | compiler->StackSize()) * |
3163 | kWordSize; |
3164 | ASSERT(fp_sp_dist <= 0); |
3165 | __ AddImmediate(SP, FP, fp_sp_dist); |
3166 | |
3167 | if (!compiler->is_optimizing()) { |
3168 | if (raw_exception_var_ != nullptr) { |
3169 | __ StoreToOffset( |
3170 | kExceptionObjectReg, FP, |
3171 | compiler::target::FrameOffsetInBytesForVariable(raw_exception_var_)); |
3172 | } |
3173 | if (raw_stacktrace_var_ != nullptr) { |
3174 | __ StoreToOffset( |
3175 | kStackTraceObjectReg, FP, |
3176 | compiler::target::FrameOffsetInBytesForVariable(raw_stacktrace_var_)); |
3177 | } |
3178 | } |
3179 | } |
3180 | |
3181 | LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone, |
3182 | bool opt) const { |
3183 | const intptr_t kNumInputs = 0; |
3184 | const intptr_t kNumTemps = 1; |
3185 | const bool using_shared_stub = UseSharedSlowPathStub(opt); |
3186 | ASSERT((kReservedCpuRegisters & (1 << LR)) != 0); |
3187 | LocationSummary* summary = new (zone) |
3188 | LocationSummary(zone, kNumInputs, kNumTemps, |
3189 | using_shared_stub ? LocationSummary::kCallOnSharedSlowPath |
3190 | : LocationSummary::kCallOnSlowPath); |
3191 | summary->set_temp(0, Location::RequiresRegister()); |
3192 | return summary; |
3193 | } |
3194 | |
3195 | class CheckStackOverflowSlowPath |
3196 | : public TemplateSlowPathCode<CheckStackOverflowInstr> { |
3197 | public: |
3198 | static constexpr intptr_t kNumSlowPathArgs = 0; |
3199 | |
3200 | explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction) |
3201 | : TemplateSlowPathCode(instruction) {} |
3202 | |
3203 | virtual void EmitNativeCode(FlowGraphCompiler* compiler) { |
3204 | auto locs = instruction()->locs(); |
3205 | if (compiler->isolate()->use_osr() && osr_entry_label()->IsLinked()) { |
3206 | const Register value = locs->temp(0).reg(); |
3207 | __ Comment("CheckStackOverflowSlowPathOsr" ); |
3208 | __ Bind(osr_entry_label()); |
3209 | __ LoadImmediate(value, Thread::kOsrRequest); |
3210 | __ str(value, |
3211 | compiler::Address(THR, Thread::stack_overflow_flags_offset())); |
3212 | } |
3213 | __ Comment("CheckStackOverflowSlowPath" ); |
3214 | __ Bind(entry_label()); |
3215 | const bool using_shared_stub = locs->call_on_shared_slow_path(); |
3216 | if (!using_shared_stub) { |
3217 | compiler->SaveLiveRegisters(locs); |
3218 | } |
3219 | // pending_deoptimization_env_ is needed to generate a runtime call that |
3220 | // may throw an exception. |
3221 | ASSERT(compiler->pending_deoptimization_env_ == NULL); |
3222 | Environment* env = |
3223 | compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs); |
3224 | compiler->pending_deoptimization_env_ = env; |
3225 | |
3226 | if (using_shared_stub) { |
3227 | auto object_store = compiler->isolate()->object_store(); |
3228 | const bool live_fpu_regs = locs->live_registers()->FpuRegisterCount() > 0; |
3229 | const auto& stub = Code::ZoneHandle( |
3230 | compiler->zone(), |
3231 | live_fpu_regs |
3232 | ? object_store->stack_overflow_stub_with_fpu_regs_stub() |
3233 | : object_store->stack_overflow_stub_without_fpu_regs_stub()); |
3234 | |
3235 | if (using_shared_stub && compiler->CanPcRelativeCall(stub)) { |
3236 | __ GenerateUnRelocatedPcRelativeCall(); |
3237 | compiler->AddPcRelativeCallStubTarget(stub); |
3238 | } else { |
3239 | const uword entry_point_offset = |
3240 | Thread::stack_overflow_shared_stub_entry_point_offset( |
3241 | locs->live_registers()->FpuRegisterCount() > 0); |
3242 | __ ldr(LR, compiler::Address(THR, entry_point_offset)); |
3243 | __ blr(LR); |
3244 | } |
3245 | compiler->RecordSafepoint(locs, kNumSlowPathArgs); |
3246 | compiler->RecordCatchEntryMoves(); |
3247 | compiler->AddDescriptor( |
3248 | PcDescriptorsLayout::kOther, compiler->assembler()->CodeSize(), |
3249 | instruction()->deopt_id(), instruction()->token_pos(), |
3250 | compiler->CurrentTryIndex()); |
3251 | } else { |
3252 | compiler->GenerateRuntimeCall( |
3253 | instruction()->token_pos(), instruction()->deopt_id(), |
3254 | kStackOverflowRuntimeEntry, kNumSlowPathArgs, locs); |
3255 | } |
3256 | |
3257 | if (compiler->isolate()->use_osr() && !compiler->is_optimizing() && |
3258 | instruction()->in_loop()) { |
3259 | // In unoptimized code, record loop stack checks as possible OSR entries. |
3260 | compiler->AddCurrentDescriptor(PcDescriptorsLayout::kOsrEntry, |
3261 | instruction()->deopt_id(), |
3262 | TokenPosition::kNoSource); |
3263 | } |
3264 | compiler->pending_deoptimization_env_ = NULL; |
3265 | if (!using_shared_stub) { |
3266 | compiler->RestoreLiveRegisters(locs); |
3267 | } |
3268 | __ b(exit_label()); |
3269 | } |
3270 | |
3271 | compiler::Label* osr_entry_label() { |
3272 | ASSERT(Isolate::Current()->use_osr()); |
3273 | return &osr_entry_label_; |
3274 | } |
3275 | |
3276 | private: |
3277 | compiler::Label osr_entry_label_; |
3278 | }; |
3279 | |
3280 | void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3281 | CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this); |
3282 | compiler->AddSlowPathCode(slow_path); |
3283 | |
3284 | __ ldr(TMP, compiler::Address( |
3285 | THR, compiler::target::Thread::stack_limit_offset())); |
3286 | __ CompareRegisters(SP, TMP); |
3287 | __ b(slow_path->entry_label(), LS); |
3288 | if (compiler->CanOSRFunction() && in_loop()) { |
3289 | const Register function = locs()->temp(0).reg(); |
3290 | // In unoptimized code check the usage counter to trigger OSR at loop |
3291 | // stack checks. Use progressively higher thresholds for more deeply |
3292 | // nested loops to attempt to hit outer loops with OSR when possible. |
3293 | __ LoadObject(function, compiler->parsed_function().function()); |
3294 | intptr_t threshold = |
3295 | FLAG_optimization_counter_threshold * (loop_depth() + 1); |
3296 | __ LoadFieldFromOffset(TMP, function, Function::usage_counter_offset(), |
3297 | kWord); |
3298 | __ add(TMP, TMP, compiler::Operand(1)); |
3299 | __ StoreFieldToOffset(TMP, function, Function::usage_counter_offset(), |
3300 | kWord); |
3301 | __ CompareImmediate(TMP, threshold); |
3302 | __ b(slow_path->osr_entry_label(), GE); |
3303 | } |
3304 | if (compiler->ForceSlowPathForStackOverflow()) { |
3305 | __ b(slow_path->entry_label()); |
3306 | } |
3307 | __ Bind(slow_path->exit_label()); |
3308 | } |
3309 | |
3310 | static void EmitSmiShiftLeft(FlowGraphCompiler* compiler, |
3311 | BinarySmiOpInstr* shift_left) { |
3312 | const LocationSummary& locs = *shift_left->locs(); |
3313 | const Register left = locs.in(0).reg(); |
3314 | const Register result = locs.out(0).reg(); |
3315 | compiler::Label* deopt = |
3316 | shift_left->CanDeoptimize() |
3317 | ? compiler->AddDeoptStub(shift_left->deopt_id(), |
3318 | ICData::kDeoptBinarySmiOp) |
3319 | : NULL; |
3320 | if (locs.in(1).IsConstant()) { |
3321 | const Object& constant = locs.in(1).constant(); |
3322 | ASSERT(constant.IsSmi()); |
3323 | // Immediate shift operation takes 6 bits for the count. |
3324 | const intptr_t kCountLimit = 0x3F; |
3325 | const intptr_t value = Smi::Cast(constant).Value(); |
3326 | ASSERT((0 < value) && (value < kCountLimit)); |
3327 | if (shift_left->can_overflow()) { |
3328 | // Check for overflow (preserve left). |
3329 | __ LslImmediate(TMP, left, value); |
3330 | __ cmp(left, compiler::Operand(TMP, ASR, value)); |
3331 | __ b(deopt, NE); // Overflow. |
3332 | } |
3333 | // Shift for result now we know there is no overflow. |
3334 | __ LslImmediate(result, left, value); |
3335 | return; |
3336 | } |
3337 | |
3338 | // Right (locs.in(1)) is not constant. |
3339 | const Register right = locs.in(1).reg(); |
3340 | Range* right_range = shift_left->right_range(); |
3341 | if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) { |
3342 | // TODO(srdjan): Implement code below for is_truncating(). |
3343 | // If left is constant, we know the maximal allowed size for right. |
3344 | const Object& obj = shift_left->left()->BoundConstant(); |
3345 | if (obj.IsSmi()) { |
3346 | const intptr_t left_int = Smi::Cast(obj).Value(); |
3347 | if (left_int == 0) { |
3348 | __ CompareRegisters(right, ZR); |
3349 | __ b(deopt, MI); |
3350 | __ mov(result, ZR); |
3351 | return; |
3352 | } |
3353 | const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int); |
3354 | const bool right_needs_check = |
3355 | !RangeUtils::IsWithin(right_range, 0, max_right - 1); |
3356 | if (right_needs_check) { |
3357 | __ CompareImmediate(right, static_cast<int64_t>(Smi::New(max_right))); |
3358 | __ b(deopt, CS); |
3359 | } |
3360 | __ SmiUntag(TMP, right); |
3361 | __ lslv(result, left, TMP); |
3362 | } |
3363 | return; |
3364 | } |
3365 | |
3366 | const bool right_needs_check = |
3367 | !RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1)); |
3368 | if (!shift_left->can_overflow()) { |
3369 | if (right_needs_check) { |
3370 | if (!RangeUtils::IsPositive(right_range)) { |
3371 | ASSERT(shift_left->CanDeoptimize()); |
3372 | __ CompareRegisters(right, ZR); |
3373 | __ b(deopt, MI); |
3374 | } |
3375 | |
3376 | __ CompareImmediate(right, static_cast<int64_t>(Smi::New(Smi::kBits))); |
3377 | __ csel(result, ZR, result, CS); |
3378 | __ SmiUntag(TMP, right); |
3379 | __ lslv(TMP, left, TMP); |
3380 | __ csel(result, TMP, result, CC); |
3381 | } else { |
3382 | __ SmiUntag(TMP, right); |
3383 | __ lslv(result, left, TMP); |
3384 | } |
3385 | } else { |
3386 | if (right_needs_check) { |
3387 | ASSERT(shift_left->CanDeoptimize()); |
3388 | __ CompareImmediate(right, static_cast<int64_t>(Smi::New(Smi::kBits))); |
3389 | __ b(deopt, CS); |
3390 | } |
3391 | // Left is not a constant. |
3392 | // Check if count too large for handling it inlined. |
3393 | __ SmiUntag(TMP, right); |
3394 | // Overflow test (preserve left, right, and TMP); |
3395 | const Register temp = locs.temp(0).reg(); |
3396 | __ lslv(temp, left, TMP); |
3397 | __ asrv(TMP2, temp, TMP); |
3398 | __ CompareRegisters(left, TMP2); |
3399 | __ b(deopt, NE); // Overflow. |
3400 | // Shift for result now we know there is no overflow. |
3401 | __ lslv(result, left, TMP); |
3402 | } |
3403 | } |
3404 | |
3405 | class CheckedSmiSlowPath : public TemplateSlowPathCode<CheckedSmiOpInstr> { |
3406 | public: |
3407 | static constexpr intptr_t kNumSlowPathArgs = 2; |
3408 | |
3409 | CheckedSmiSlowPath(CheckedSmiOpInstr* instruction, intptr_t try_index) |
3410 | : TemplateSlowPathCode(instruction), try_index_(try_index) {} |
3411 | |
3412 | virtual void EmitNativeCode(FlowGraphCompiler* compiler) { |
3413 | if (compiler::Assembler::EmittingComments()) { |
3414 | __ Comment("slow path smi operation" ); |
3415 | } |
3416 | __ Bind(entry_label()); |
3417 | LocationSummary* locs = instruction()->locs(); |
3418 | Register result = locs->out(0).reg(); |
3419 | locs->live_registers()->Remove(Location::RegisterLocation(result)); |
3420 | |
3421 | compiler->SaveLiveRegisters(locs); |
3422 | if (instruction()->env() != NULL) { |
3423 | Environment* env = |
3424 | compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs); |
3425 | compiler->pending_deoptimization_env_ = env; |
3426 | } |
3427 | __ PushPair(locs->in(1).reg(), locs->in(0).reg()); |
3428 | const auto& selector = String::Handle(instruction()->call()->Selector()); |
3429 | const auto& arguments_descriptor = |
3430 | Array::Handle(ArgumentsDescriptor::NewBoxed( |
3431 | /*type_args_len=*/0, /*num_arguments=*/2)); |
3432 | compiler->EmitMegamorphicInstanceCall( |
3433 | selector, arguments_descriptor, instruction()->call()->deopt_id(), |
3434 | instruction()->token_pos(), locs, try_index_, kNumSlowPathArgs); |
3435 | __ mov(result, R0); |
3436 | compiler->RestoreLiveRegisters(locs); |
3437 | __ b(exit_label()); |
3438 | compiler->pending_deoptimization_env_ = NULL; |
3439 | } |
3440 | |
3441 | private: |
3442 | intptr_t try_index_; |
3443 | }; |
3444 | |
3445 | LocationSummary* CheckedSmiOpInstr::MakeLocationSummary(Zone* zone, |
3446 | bool opt) const { |
3447 | const intptr_t kNumInputs = 2; |
3448 | const intptr_t kNumTemps = 0; |
3449 | LocationSummary* summary = new (zone) LocationSummary( |
3450 | zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
3451 | summary->set_in(0, Location::RequiresRegister()); |
3452 | summary->set_in(1, Location::RequiresRegister()); |
3453 | summary->set_out(0, Location::RequiresRegister()); |
3454 | return summary; |
3455 | } |
3456 | |
3457 | void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3458 | CheckedSmiSlowPath* slow_path = |
3459 | new CheckedSmiSlowPath(this, compiler->CurrentTryIndex()); |
3460 | compiler->AddSlowPathCode(slow_path); |
3461 | // Test operands if necessary. |
3462 | Register left = locs()->in(0).reg(); |
3463 | Register right = locs()->in(1).reg(); |
3464 | Register result = locs()->out(0).reg(); |
3465 | intptr_t left_cid = this->left()->Type()->ToCid(); |
3466 | intptr_t right_cid = this->right()->Type()->ToCid(); |
3467 | bool combined_smi_check = false; |
3468 | if (this->left()->definition() == this->right()->definition()) { |
3469 | __ BranchIfNotSmi(left, slow_path->entry_label()); |
3470 | } else if (left_cid == kSmiCid) { |
3471 | __ BranchIfNotSmi(right, slow_path->entry_label()); |
3472 | } else if (right_cid == kSmiCid) { |
3473 | __ BranchIfNotSmi(left, slow_path->entry_label()); |
3474 | } else { |
3475 | combined_smi_check = true; |
3476 | __ orr(result, left, compiler::Operand(right)); |
3477 | __ BranchIfNotSmi(result, slow_path->entry_label()); |
3478 | } |
3479 | |
3480 | switch (op_kind()) { |
3481 | case Token::kADD: |
3482 | __ adds(result, left, compiler::Operand(right)); |
3483 | __ b(slow_path->entry_label(), VS); |
3484 | break; |
3485 | case Token::kSUB: |
3486 | __ subs(result, left, compiler::Operand(right)); |
3487 | __ b(slow_path->entry_label(), VS); |
3488 | break; |
3489 | case Token::kMUL: |
3490 | __ SmiUntag(TMP, left); |
3491 | __ mul(result, TMP, right); |
3492 | __ smulh(TMP, TMP, right); |
3493 | // TMP: result bits 64..127. |
3494 | __ cmp(TMP, compiler::Operand(result, ASR, 63)); |
3495 | __ b(slow_path->entry_label(), NE); |
3496 | break; |
3497 | case Token::kBIT_OR: |
3498 | // Operation may be part of combined smi check. |
3499 | if (!combined_smi_check) { |
3500 | __ orr(result, left, compiler::Operand(right)); |
3501 | } |
3502 | break; |
3503 | case Token::kBIT_AND: |
3504 | __ and_(result, left, compiler::Operand(right)); |
3505 | break; |
3506 | case Token::kBIT_XOR: |
3507 | __ eor(result, left, compiler::Operand(right)); |
3508 | break; |
3509 | case Token::kSHL: |
3510 | ASSERT(result != left); |
3511 | ASSERT(result != right); |
3512 | __ CompareImmediate(right, static_cast<int64_t>(Smi::New(Smi::kBits))); |
3513 | __ b(slow_path->entry_label(), CS); |
3514 | |
3515 | __ SmiUntag(TMP, right); |
3516 | __ lslv(result, left, TMP); |
3517 | __ asrv(TMP2, result, TMP); |
3518 | __ CompareRegisters(left, TMP2); |
3519 | __ b(slow_path->entry_label(), NE); // Overflow. |
3520 | break; |
3521 | case Token::kSHR: |
3522 | ASSERT(result != left); |
3523 | ASSERT(result != right); |
3524 | __ CompareImmediate(right, static_cast<int64_t>(Smi::New(Smi::kBits))); |
3525 | __ b(slow_path->entry_label(), CS); |
3526 | |
3527 | __ SmiUntag(result, right); |
3528 | __ SmiUntag(TMP, left); |
3529 | __ asrv(result, TMP, result); |
3530 | __ SmiTag(result); |
3531 | break; |
3532 | default: |
3533 | UNIMPLEMENTED(); |
3534 | } |
3535 | __ Bind(slow_path->exit_label()); |
3536 | } |
3537 | |
3538 | class CheckedSmiComparisonSlowPath |
3539 | : public TemplateSlowPathCode<CheckedSmiComparisonInstr> { |
3540 | public: |
3541 | static constexpr intptr_t kNumSlowPathArgs = 2; |
3542 | |
3543 | CheckedSmiComparisonSlowPath(CheckedSmiComparisonInstr* instruction, |
3544 | Environment* env, |
3545 | intptr_t try_index, |
3546 | BranchLabels labels, |
3547 | bool merged) |
3548 | : TemplateSlowPathCode(instruction), |
3549 | try_index_(try_index), |
3550 | labels_(labels), |
3551 | merged_(merged), |
3552 | env_(env) { |
3553 | // The environment must either come from the comparison or the environment |
3554 | // was cleared from the comparison (and moved to a branch). |
3555 | ASSERT(env == instruction->env() || |
3556 | (merged && instruction->env() == nullptr)); |
3557 | } |
3558 | |
3559 | virtual void EmitNativeCode(FlowGraphCompiler* compiler) { |
3560 | if (compiler::Assembler::EmittingComments()) { |
3561 | __ Comment("slow path smi operation" ); |
3562 | } |
3563 | __ Bind(entry_label()); |
3564 | LocationSummary* locs = instruction()->locs(); |
3565 | Register result = merged_ ? locs->temp(0).reg() : locs->out(0).reg(); |
3566 | locs->live_registers()->Remove(Location::RegisterLocation(result)); |
3567 | |
3568 | compiler->SaveLiveRegisters(locs); |
3569 | if (env_ != nullptr) { |
3570 | compiler->pending_deoptimization_env_ = |
3571 | compiler->SlowPathEnvironmentFor(env_, locs, kNumSlowPathArgs); |
3572 | } |
3573 | __ PushPair(locs->in(1).reg(), locs->in(0).reg()); |
3574 | const auto& selector = String::Handle(instruction()->call()->Selector()); |
3575 | const auto& arguments_descriptor = |
3576 | Array::Handle(ArgumentsDescriptor::NewBoxed( |
3577 | /*type_args_len=*/0, /*num_arguments=*/2)); |
3578 | compiler->EmitMegamorphicInstanceCall( |
3579 | selector, arguments_descriptor, instruction()->call()->deopt_id(), |
3580 | instruction()->token_pos(), locs, try_index_, kNumSlowPathArgs); |
3581 | __ mov(result, R0); |
3582 | compiler->RestoreLiveRegisters(locs); |
3583 | compiler->pending_deoptimization_env_ = nullptr; |
3584 | if (merged_) { |
3585 | __ CompareObject(result, Bool::True()); |
3586 | __ b(instruction()->is_negated() ? labels_.false_label |
3587 | : labels_.true_label, |
3588 | EQ); |
3589 | __ b(instruction()->is_negated() ? labels_.true_label |
3590 | : labels_.false_label); |
3591 | ASSERT(exit_label()->IsUnused()); |
3592 | } else { |
3593 | ASSERT(!instruction()->is_negated()); |
3594 | __ b(exit_label()); |
3595 | } |
3596 | } |
3597 | |
3598 | private: |
3599 | intptr_t try_index_; |
3600 | BranchLabels labels_; |
3601 | bool merged_; |
3602 | Environment* env_; |
3603 | }; |
3604 | |
3605 | LocationSummary* CheckedSmiComparisonInstr::MakeLocationSummary( |
3606 | Zone* zone, |
3607 | bool opt) const { |
3608 | const intptr_t kNumInputs = 2; |
3609 | const intptr_t kNumTemps = 1; |
3610 | LocationSummary* summary = new (zone) LocationSummary( |
3611 | zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
3612 | summary->set_in(0, Location::RequiresRegister()); |
3613 | summary->set_in(1, Location::RequiresRegister()); |
3614 | summary->set_temp(0, Location::RequiresRegister()); |
3615 | summary->set_out(0, Location::RequiresRegister()); |
3616 | return summary; |
3617 | } |
3618 | |
3619 | Condition CheckedSmiComparisonInstr::EmitComparisonCode( |
3620 | FlowGraphCompiler* compiler, |
3621 | BranchLabels labels) { |
3622 | return EmitInt64ComparisonOp(compiler, locs(), kind(), labels); |
3623 | } |
3624 | |
3625 | #define EMIT_SMI_CHECK \ |
3626 | Register left = locs()->in(0).reg(); \ |
3627 | Register right = locs()->in(1).reg(); \ |
3628 | Register temp = locs()->temp(0).reg(); \ |
3629 | intptr_t left_cid = this->left()->Type()->ToCid(); \ |
3630 | intptr_t right_cid = this->right()->Type()->ToCid(); \ |
3631 | if (this->left()->definition() == this->right()->definition()) { \ |
3632 | __ BranchIfNotSmi(left, slow_path->entry_label()); \ |
3633 | } else if (left_cid == kSmiCid) { \ |
3634 | __ BranchIfNotSmi(right, slow_path->entry_label()); \ |
3635 | } else if (right_cid == kSmiCid) { \ |
3636 | __ BranchIfNotSmi(left, slow_path->entry_label()); \ |
3637 | } else { \ |
3638 | __ orr(temp, left, compiler::Operand(right)); \ |
3639 | __ BranchIfNotSmi(temp, slow_path->entry_label()); \ |
3640 | } |
3641 | |
3642 | void CheckedSmiComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler, |
3643 | BranchInstr* branch) { |
3644 | BranchLabels labels = compiler->CreateBranchLabels(branch); |
3645 | CheckedSmiComparisonSlowPath* slow_path = new CheckedSmiComparisonSlowPath( |
3646 | this, branch->env(), compiler->CurrentTryIndex(), labels, |
3647 | /* merged = */ true); |
3648 | compiler->AddSlowPathCode(slow_path); |
3649 | EMIT_SMI_CHECK; |
3650 | Condition true_condition = EmitComparisonCode(compiler, labels); |
3651 | if (true_condition != kInvalidCondition) { |
3652 | EmitBranchOnCondition(compiler, true_condition, labels); |
3653 | } |
3654 | // No need to bind slow_path->exit_label() as slow path exits through |
3655 | // true/false branch labels. |
3656 | } |
3657 | |
3658 | void CheckedSmiComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3659 | // Zone-allocate labels to pass them to slow-path which outlives local scope. |
3660 | compiler::Label* true_label = new (Z) compiler::Label(); |
3661 | compiler::Label* false_label = new (Z) compiler::Label(); |
3662 | compiler::Label done; |
3663 | BranchLabels labels = {true_label, false_label, false_label}; |
3664 | // In case of negated comparison result of a slow path call should be negated. |
3665 | // For this purpose, 'merged' slow path is generated: it tests |
3666 | // result of a call and jumps directly to true or false label. |
3667 | CheckedSmiComparisonSlowPath* slow_path = new CheckedSmiComparisonSlowPath( |
3668 | this, env(), compiler->CurrentTryIndex(), labels, |
3669 | /* merged = */ is_negated()); |
3670 | compiler->AddSlowPathCode(slow_path); |
3671 | EMIT_SMI_CHECK; |
3672 | Condition true_condition = EmitComparisonCode(compiler, labels); |
3673 | if (true_condition != kInvalidCondition) { |
3674 | EmitBranchOnCondition(compiler, true_condition, labels); |
3675 | } |
3676 | Register result = locs()->out(0).reg(); |
3677 | __ Bind(false_label); |
3678 | __ LoadObject(result, Bool::False()); |
3679 | __ b(&done); |
3680 | __ Bind(true_label); |
3681 | __ LoadObject(result, Bool::True()); |
3682 | __ Bind(&done); |
3683 | // In case of negated comparison slow path exits through true/false labels. |
3684 | if (!is_negated()) { |
3685 | __ Bind(slow_path->exit_label()); |
3686 | } |
3687 | } |
3688 | |
3689 | LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone, |
3690 | bool opt) const { |
3691 | const intptr_t kNumInputs = 2; |
3692 | const intptr_t kNumTemps = (((op_kind() == Token::kSHL) && can_overflow()) || |
3693 | (op_kind() == Token::kSHR)) |
3694 | ? 1 |
3695 | : 0; |
3696 | LocationSummary* summary = new (zone) |
3697 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3698 | if (op_kind() == Token::kTRUNCDIV) { |
3699 | summary->set_in(0, Location::RequiresRegister()); |
3700 | if (RightIsPowerOfTwoConstant()) { |
3701 | ConstantInstr* right_constant = right()->definition()->AsConstant(); |
3702 | summary->set_in(1, Location::Constant(right_constant)); |
3703 | } else { |
3704 | summary->set_in(1, Location::RequiresRegister()); |
3705 | } |
3706 | summary->set_out(0, Location::RequiresRegister()); |
3707 | return summary; |
3708 | } |
3709 | if (op_kind() == Token::kMOD) { |
3710 | summary->set_in(0, Location::RequiresRegister()); |
3711 | summary->set_in(1, Location::RequiresRegister()); |
3712 | summary->set_out(0, Location::RequiresRegister()); |
3713 | return summary; |
3714 | } |
3715 | summary->set_in(0, Location::RequiresRegister()); |
3716 | summary->set_in(1, LocationRegisterOrSmiConstant(right())); |
3717 | if (((op_kind() == Token::kSHL) && can_overflow()) || |
3718 | (op_kind() == Token::kSHR)) { |
3719 | summary->set_temp(0, Location::RequiresRegister()); |
3720 | } |
3721 | // We make use of 3-operand instructions by not requiring result register |
3722 | // to be identical to first input register as on Intel. |
3723 | summary->set_out(0, Location::RequiresRegister()); |
3724 | return summary; |
3725 | } |
3726 | |
3727 | void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3728 | if (op_kind() == Token::kSHL) { |
3729 | EmitSmiShiftLeft(compiler, this); |
3730 | return; |
3731 | } |
3732 | |
3733 | const Register left = locs()->in(0).reg(); |
3734 | const Register result = locs()->out(0).reg(); |
3735 | compiler::Label* deopt = NULL; |
3736 | if (CanDeoptimize()) { |
3737 | deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp); |
3738 | } |
3739 | |
3740 | if (locs()->in(1).IsConstant()) { |
3741 | const Object& constant = locs()->in(1).constant(); |
3742 | ASSERT(constant.IsSmi()); |
3743 | const int64_t imm = static_cast<int64_t>(constant.raw()); |
3744 | switch (op_kind()) { |
3745 | case Token::kADD: { |
3746 | if (deopt == NULL) { |
3747 | __ AddImmediate(result, left, imm); |
3748 | } else { |
3749 | __ AddImmediateSetFlags(result, left, imm); |
3750 | __ b(deopt, VS); |
3751 | } |
3752 | break; |
3753 | } |
3754 | case Token::kSUB: { |
3755 | if (deopt == NULL) { |
3756 | __ AddImmediate(result, left, -imm); |
3757 | } else { |
3758 | // Negating imm and using AddImmediateSetFlags would not detect the |
3759 | // overflow when imm == kMinInt64. |
3760 | __ SubImmediateSetFlags(result, left, imm); |
3761 | __ b(deopt, VS); |
3762 | } |
3763 | break; |
3764 | } |
3765 | case Token::kMUL: { |
3766 | // Keep left value tagged and untag right value. |
3767 | const intptr_t value = Smi::Cast(constant).Value(); |
3768 | __ LoadImmediate(TMP, value); |
3769 | __ mul(result, left, TMP); |
3770 | if (deopt != NULL) { |
3771 | __ smulh(TMP, left, TMP); |
3772 | // TMP: result bits 64..127. |
3773 | __ cmp(TMP, compiler::Operand(result, ASR, 63)); |
3774 | __ b(deopt, NE); |
3775 | } |
3776 | break; |
3777 | } |
3778 | case Token::kTRUNCDIV: { |
3779 | const intptr_t value = Smi::Cast(constant).Value(); |
3780 | ASSERT(value != kIntptrMin); |
3781 | ASSERT(Utils::IsPowerOfTwo(Utils::Abs(value))); |
3782 | const intptr_t shift_count = |
3783 | Utils::ShiftForPowerOfTwo(Utils::Abs(value)) + kSmiTagSize; |
3784 | ASSERT(kSmiTagSize == 1); |
3785 | __ AsrImmediate(TMP, left, 63); |
3786 | ASSERT(shift_count > 1); // 1, -1 case handled above. |
3787 | const Register temp = TMP2; |
3788 | __ add(temp, left, compiler::Operand(TMP, LSR, 64 - shift_count)); |
3789 | ASSERT(shift_count > 0); |
3790 | __ AsrImmediate(result, temp, shift_count); |
3791 | if (value < 0) { |
3792 | __ sub(result, ZR, compiler::Operand(result)); |
3793 | } |
3794 | __ SmiTag(result); |
3795 | break; |
3796 | } |
3797 | case Token::kBIT_AND: |
3798 | // No overflow check. |
3799 | __ AndImmediate(result, left, imm); |
3800 | break; |
3801 | case Token::kBIT_OR: |
3802 | // No overflow check. |
3803 | __ OrImmediate(result, left, imm); |
3804 | break; |
3805 | case Token::kBIT_XOR: |
3806 | // No overflow check. |
3807 | __ XorImmediate(result, left, imm); |
3808 | break; |
3809 | case Token::kSHR: { |
3810 | // Asr operation masks the count to 6 bits. |
3811 | const intptr_t kCountLimit = 0x3F; |
3812 | intptr_t value = Smi::Cast(constant).Value(); |
3813 | __ AsrImmediate(result, left, |
3814 | Utils::Minimum(value + kSmiTagSize, kCountLimit)); |
3815 | __ SmiTag(result); |
3816 | break; |
3817 | } |
3818 | default: |
3819 | UNREACHABLE(); |
3820 | break; |
3821 | } |
3822 | return; |
3823 | } |
3824 | |
3825 | const Register right = locs()->in(1).reg(); |
3826 | switch (op_kind()) { |
3827 | case Token::kADD: { |
3828 | if (deopt == NULL) { |
3829 | __ add(result, left, compiler::Operand(right)); |
3830 | } else { |
3831 | __ adds(result, left, compiler::Operand(right)); |
3832 | __ b(deopt, VS); |
3833 | } |
3834 | break; |
3835 | } |
3836 | case Token::kSUB: { |
3837 | if (deopt == NULL) { |
3838 | __ sub(result, left, compiler::Operand(right)); |
3839 | } else { |
3840 | __ subs(result, left, compiler::Operand(right)); |
3841 | __ b(deopt, VS); |
3842 | } |
3843 | break; |
3844 | } |
3845 | case Token::kMUL: { |
3846 | __ SmiUntag(TMP, left); |
3847 | if (deopt == NULL) { |
3848 | __ mul(result, TMP, right); |
3849 | } else { |
3850 | __ mul(result, TMP, right); |
3851 | __ smulh(TMP, TMP, right); |
3852 | // TMP: result bits 64..127. |
3853 | __ cmp(TMP, compiler::Operand(result, ASR, 63)); |
3854 | __ b(deopt, NE); |
3855 | } |
3856 | break; |
3857 | } |
3858 | case Token::kBIT_AND: { |
3859 | // No overflow check. |
3860 | __ and_(result, left, compiler::Operand(right)); |
3861 | break; |
3862 | } |
3863 | case Token::kBIT_OR: { |
3864 | // No overflow check. |
3865 | __ orr(result, left, compiler::Operand(right)); |
3866 | break; |
3867 | } |
3868 | case Token::kBIT_XOR: { |
3869 | // No overflow check. |
3870 | __ eor(result, left, compiler::Operand(right)); |
3871 | break; |
3872 | } |
3873 | case Token::kTRUNCDIV: { |
3874 | if (RangeUtils::CanBeZero(right_range())) { |
3875 | // Handle divide by zero in runtime. |
3876 | __ CompareRegisters(right, ZR); |
3877 | __ b(deopt, EQ); |
3878 | } |
3879 | const Register temp = TMP2; |
3880 | __ SmiUntag(temp, left); |
3881 | __ SmiUntag(TMP, right); |
3882 | |
3883 | __ sdiv(result, temp, TMP); |
3884 | if (RangeUtils::Overlaps(right_range(), -1, -1)) { |
3885 | // Check the corner case of dividing the 'MIN_SMI' with -1, in which |
3886 | // case we cannot tag the result. |
3887 | __ CompareImmediate(result, 0x4000000000000000LL); |
3888 | __ b(deopt, EQ); |
3889 | } |
3890 | __ SmiTag(result); |
3891 | break; |
3892 | } |
3893 | case Token::kMOD: { |
3894 | if (RangeUtils::CanBeZero(right_range())) { |
3895 | // Handle divide by zero in runtime. |
3896 | __ CompareRegisters(right, ZR); |
3897 | __ b(deopt, EQ); |
3898 | } |
3899 | const Register temp = TMP2; |
3900 | __ SmiUntag(temp, left); |
3901 | __ SmiUntag(TMP, right); |
3902 | |
3903 | __ sdiv(result, temp, TMP); |
3904 | |
3905 | __ SmiUntag(TMP, right); |
3906 | __ msub(result, TMP, result, temp); // result <- left - right * result |
3907 | __ SmiTag(result); |
3908 | // res = left % right; |
3909 | // if (res < 0) { |
3910 | // if (right < 0) { |
3911 | // res = res - right; |
3912 | // } else { |
3913 | // res = res + right; |
3914 | // } |
3915 | // } |
3916 | compiler::Label done; |
3917 | __ CompareRegisters(result, ZR); |
3918 | __ b(&done, GE); |
3919 | // Result is negative, adjust it. |
3920 | __ CompareRegisters(right, ZR); |
3921 | __ sub(TMP, result, compiler::Operand(right)); |
3922 | __ add(result, result, compiler::Operand(right)); |
3923 | __ csel(result, TMP, result, LT); |
3924 | __ Bind(&done); |
3925 | break; |
3926 | } |
3927 | case Token::kSHR: { |
3928 | if (CanDeoptimize()) { |
3929 | __ CompareRegisters(right, ZR); |
3930 | __ b(deopt, LT); |
3931 | } |
3932 | __ SmiUntag(TMP, right); |
3933 | // sarl operation masks the count to 6 bits. |
3934 | const intptr_t kCountLimit = 0x3F; |
3935 | if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) { |
3936 | __ LoadImmediate(TMP2, kCountLimit); |
3937 | __ CompareRegisters(TMP, TMP2); |
3938 | __ csel(TMP, TMP2, TMP, GT); |
3939 | } |
3940 | const Register temp = locs()->temp(0).reg(); |
3941 | __ SmiUntag(temp, left); |
3942 | __ asrv(result, temp, TMP); |
3943 | __ SmiTag(result); |
3944 | break; |
3945 | } |
3946 | case Token::kDIV: { |
3947 | // Dispatches to 'Double./'. |
3948 | // TODO(srdjan): Implement as conversion to double and double division. |
3949 | UNREACHABLE(); |
3950 | break; |
3951 | } |
3952 | case Token::kOR: |
3953 | case Token::kAND: { |
3954 | // Flow graph builder has dissected this operation to guarantee correct |
3955 | // behavior (short-circuit evaluation). |
3956 | UNREACHABLE(); |
3957 | break; |
3958 | } |
3959 | default: |
3960 | UNREACHABLE(); |
3961 | break; |
3962 | } |
3963 | } |
3964 | |
3965 | LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone, |
3966 | bool opt) const { |
3967 | intptr_t left_cid = left()->Type()->ToCid(); |
3968 | intptr_t right_cid = right()->Type()->ToCid(); |
3969 | ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid)); |
3970 | const intptr_t kNumInputs = 2; |
3971 | const intptr_t kNumTemps = 0; |
3972 | LocationSummary* summary = new (zone) |
3973 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3974 | summary->set_in(0, Location::RequiresRegister()); |
3975 | summary->set_in(1, Location::RequiresRegister()); |
3976 | return summary; |
3977 | } |
3978 | |
3979 | void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3980 | compiler::Label* deopt = |
3981 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp, |
3982 | licm_hoisted_ ? ICData::kHoisted : 0); |
3983 | intptr_t left_cid = left()->Type()->ToCid(); |
3984 | intptr_t right_cid = right()->Type()->ToCid(); |
3985 | const Register left = locs()->in(0).reg(); |
3986 | const Register right = locs()->in(1).reg(); |
3987 | if (this->left()->definition() == this->right()->definition()) { |
3988 | __ BranchIfSmi(left, deopt); |
3989 | } else if (left_cid == kSmiCid) { |
3990 | __ BranchIfSmi(right, deopt); |
3991 | } else if (right_cid == kSmiCid) { |
3992 | __ BranchIfSmi(left, deopt); |
3993 | } else { |
3994 | __ orr(TMP, left, compiler::Operand(right)); |
3995 | __ BranchIfSmi(TMP, deopt); |
3996 | } |
3997 | } |
3998 | |
3999 | LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
4000 | const intptr_t kNumInputs = 1; |
4001 | const intptr_t kNumTemps = 1; |
4002 | LocationSummary* summary = new (zone) LocationSummary( |
4003 | zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
4004 | summary->set_in(0, Location::RequiresFpuRegister()); |
4005 | summary->set_temp(0, Location::RequiresRegister()); |
4006 | summary->set_out(0, Location::RequiresRegister()); |
4007 | return summary; |
4008 | } |
4009 | |
4010 | void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4011 | const Register out_reg = locs()->out(0).reg(); |
4012 | const Register temp_reg = locs()->temp(0).reg(); |
4013 | const VRegister value = locs()->in(0).fpu_reg(); |
4014 | |
4015 | BoxAllocationSlowPath::Allocate(compiler, this, |
4016 | compiler->BoxClassFor(from_representation()), |
4017 | out_reg, temp_reg); |
4018 | |
4019 | switch (from_representation()) { |
4020 | case kUnboxedDouble: |
4021 | __ StoreDFieldToOffset(value, out_reg, ValueOffset()); |
4022 | break; |
4023 | case kUnboxedFloat: |
4024 | __ fcvtds(FpuTMP, value); |
4025 | __ StoreDFieldToOffset(FpuTMP, out_reg, ValueOffset()); |
4026 | break; |
4027 | case kUnboxedFloat32x4: |
4028 | case kUnboxedFloat64x2: |
4029 | case kUnboxedInt32x4: |
4030 | __ StoreQFieldToOffset(value, out_reg, ValueOffset()); |
4031 | break; |
4032 | default: |
4033 | UNREACHABLE(); |
4034 | break; |
4035 | } |
4036 | } |
4037 | |
4038 | LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
4039 | const intptr_t kNumInputs = 1; |
4040 | const intptr_t kNumTemps = 0; |
4041 | const bool is_floating_point = |
4042 | representation() != kUnboxedInt64 && representation() != kUnboxedInt32; |
4043 | LocationSummary* summary = new (zone) |
4044 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4045 | summary->set_in(0, Location::RequiresRegister()); |
4046 | summary->set_out(0, is_floating_point ? Location::RequiresFpuRegister() |
4047 | : Location::RequiresRegister()); |
4048 | return summary; |
4049 | } |
4050 | |
4051 | void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) { |
4052 | const Register box = locs()->in(0).reg(); |
4053 | |
4054 | switch (representation()) { |
4055 | case kUnboxedInt64: { |
4056 | const Register result = locs()->out(0).reg(); |
4057 | __ ldr(result, compiler::FieldAddress(box, ValueOffset())); |
4058 | break; |
4059 | } |
4060 | |
4061 | case kUnboxedDouble: { |
4062 | const VRegister result = locs()->out(0).fpu_reg(); |
4063 | __ LoadDFieldFromOffset(result, box, ValueOffset()); |
4064 | break; |
4065 | } |
4066 | |
4067 | case kUnboxedFloat: { |
4068 | const VRegister result = locs()->out(0).fpu_reg(); |
4069 | __ LoadDFieldFromOffset(result, box, ValueOffset()); |
4070 | __ fcvtsd(result, result); |
4071 | break; |
4072 | } |
4073 | |
4074 | case kUnboxedFloat32x4: |
4075 | case kUnboxedFloat64x2: |
4076 | case kUnboxedInt32x4: { |
4077 | const VRegister result = locs()->out(0).fpu_reg(); |
4078 | __ LoadQFieldFromOffset(result, box, ValueOffset()); |
4079 | break; |
4080 | } |
4081 | |
4082 | default: |
4083 | UNREACHABLE(); |
4084 | break; |
4085 | } |
4086 | } |
4087 | |
4088 | void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) { |
4089 | const Register box = locs()->in(0).reg(); |
4090 | |
4091 | switch (representation()) { |
4092 | case kUnboxedInt64: { |
4093 | const Register result = locs()->out(0).reg(); |
4094 | __ SmiUntag(result, box); |
4095 | break; |
4096 | } |
4097 | |
4098 | case kUnboxedDouble: { |
4099 | const VRegister result = locs()->out(0).fpu_reg(); |
4100 | __ SmiUntag(TMP, box); |
4101 | __ scvtfdx(result, TMP); |
4102 | break; |
4103 | } |
4104 | |
4105 | default: |
4106 | UNREACHABLE(); |
4107 | break; |
4108 | } |
4109 | } |
4110 | |
4111 | void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) { |
4112 | const Register value = locs()->in(0).reg(); |
4113 | const Register result = locs()->out(0).reg(); |
4114 | ASSERT(value != result); |
4115 | compiler::Label done; |
4116 | __ SmiUntag(result, value); |
4117 | __ BranchIfSmi(value, &done); |
4118 | __ ldr(result, compiler::FieldAddress(value, Mint::value_offset()), kWord); |
4119 | __ LoadFieldFromOffset(result, value, Mint::value_offset()); |
4120 | __ Bind(&done); |
4121 | } |
4122 | |
4123 | void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) { |
4124 | const Register value = locs()->in(0).reg(); |
4125 | const Register result = locs()->out(0).reg(); |
4126 | ASSERT(value != result); |
4127 | compiler::Label done; |
4128 | __ SmiUntag(result, value); |
4129 | __ BranchIfSmi(value, &done); |
4130 | __ LoadFieldFromOffset(result, value, Mint::value_offset()); |
4131 | __ Bind(&done); |
4132 | } |
4133 | |
4134 | LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone, |
4135 | bool opt) const { |
4136 | ASSERT((from_representation() == kUnboxedInt32) || |
4137 | (from_representation() == kUnboxedUint32)); |
4138 | const intptr_t kNumInputs = 1; |
4139 | const intptr_t kNumTemps = 0; |
4140 | LocationSummary* summary = new (zone) |
4141 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4142 | summary->set_in(0, Location::RequiresRegister()); |
4143 | summary->set_out(0, Location::RequiresRegister()); |
4144 | return summary; |
4145 | } |
4146 | |
4147 | void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4148 | Register value = locs()->in(0).reg(); |
4149 | Register out = locs()->out(0).reg(); |
4150 | ASSERT(value != out); |
4151 | |
4152 | ASSERT(kSmiTagSize == 1); |
4153 | // TODO(vegorov) implement and use UBFM/SBFM for this. |
4154 | __ LslImmediate(out, value, 32); |
4155 | if (from_representation() == kUnboxedInt32) { |
4156 | __ AsrImmediate(out, out, 32 - kSmiTagSize); |
4157 | } else { |
4158 | ASSERT(from_representation() == kUnboxedUint32); |
4159 | __ LsrImmediate(out, out, 32 - kSmiTagSize); |
4160 | } |
4161 | } |
4162 | |
4163 | LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone, |
4164 | bool opt) const { |
4165 | const intptr_t kNumInputs = 1; |
4166 | const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1; |
4167 | // Shared slow path is used in BoxInt64Instr::EmitNativeCode in |
4168 | // FLAG_use_bare_instructions mode and only after VM isolate stubs where |
4169 | // replaced with isolate-specific stubs. |
4170 | auto object_store = Isolate::Current()->object_store(); |
4171 | const bool stubs_in_vm_isolate = |
4172 | object_store->allocate_mint_with_fpu_regs_stub() |
4173 | ->ptr() |
4174 | ->InVMIsolateHeap() || |
4175 | object_store->allocate_mint_without_fpu_regs_stub() |
4176 | ->ptr() |
4177 | ->InVMIsolateHeap(); |
4178 | const bool shared_slow_path_call = SlowPathSharingSupported(opt) && |
4179 | FLAG_use_bare_instructions && |
4180 | !stubs_in_vm_isolate; |
4181 | LocationSummary* summary = new (zone) LocationSummary( |
4182 | zone, kNumInputs, kNumTemps, |
4183 | ValueFitsSmi() |
4184 | ? LocationSummary::kNoCall |
4185 | : shared_slow_path_call ? LocationSummary::kCallOnSharedSlowPath |
4186 | : LocationSummary::kCallOnSlowPath); |
4187 | summary->set_in(0, Location::RequiresRegister()); |
4188 | if (ValueFitsSmi()) { |
4189 | summary->set_out(0, Location::RequiresRegister()); |
4190 | } else if (shared_slow_path_call) { |
4191 | summary->set_out(0, |
4192 | Location::RegisterLocation(AllocateMintABI::kResultReg)); |
4193 | summary->set_temp(0, Location::RegisterLocation(AllocateMintABI::kTempReg)); |
4194 | } else { |
4195 | summary->set_out(0, Location::RequiresRegister()); |
4196 | summary->set_temp(0, Location::RequiresRegister()); |
4197 | } |
4198 | return summary; |
4199 | } |
4200 | |
4201 | void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4202 | Register in = locs()->in(0).reg(); |
4203 | Register out = locs()->out(0).reg(); |
4204 | if (ValueFitsSmi()) { |
4205 | __ SmiTag(out, in); |
4206 | return; |
4207 | } |
4208 | ASSERT(kSmiTag == 0); |
4209 | __ adds(out, in, compiler::Operand(in)); // SmiTag |
4210 | compiler::Label done; |
4211 | // If the value doesn't fit in a smi, the tagging changes the sign, |
4212 | // which causes the overflow flag to be set. |
4213 | __ b(&done, NO_OVERFLOW); |
4214 | |
4215 | Register temp = locs()->temp(0).reg(); |
4216 | if (compiler->intrinsic_mode()) { |
4217 | __ TryAllocate(compiler->mint_class(), |
4218 | compiler->intrinsic_slow_path_label(), out, temp); |
4219 | } else if (locs()->call_on_shared_slow_path()) { |
4220 | auto object_store = compiler->isolate()->object_store(); |
4221 | const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0; |
4222 | const auto& stub = Code::ZoneHandle( |
4223 | compiler->zone(), |
4224 | live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub() |
4225 | : object_store->allocate_mint_without_fpu_regs_stub()); |
4226 | |
4227 | ASSERT(!locs()->live_registers()->ContainsRegister( |
4228 | AllocateMintABI::kResultReg)); |
4229 | auto extended_env = compiler->SlowPathEnvironmentFor(this, 0); |
4230 | compiler->GenerateStubCall(token_pos(), stub, PcDescriptorsLayout::kOther, |
4231 | locs(), DeoptId::kNone, extended_env); |
4232 | } else { |
4233 | BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out, |
4234 | temp); |
4235 | } |
4236 | |
4237 | __ StoreToOffset(in, out, Mint::value_offset() - kHeapObjectTag); |
4238 | __ Bind(&done); |
4239 | } |
4240 | |
4241 | LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone, |
4242 | bool opt) const { |
4243 | const intptr_t kNumInputs = 1; |
4244 | const intptr_t kNumTemps = 0; |
4245 | LocationSummary* summary = new (zone) |
4246 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4247 | summary->set_in(0, Location::RequiresRegister()); |
4248 | summary->set_out(0, Location::RequiresRegister()); |
4249 | return summary; |
4250 | } |
4251 | |
4252 | void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4253 | const intptr_t value_cid = value()->Type()->ToCid(); |
4254 | const Register out = locs()->out(0).reg(); |
4255 | const Register value = locs()->in(0).reg(); |
4256 | compiler::Label* deopt = |
4257 | CanDeoptimize() |
4258 | ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger) |
4259 | : NULL; |
4260 | |
4261 | if (value_cid == kSmiCid) { |
4262 | __ SmiUntag(out, value); |
4263 | } else if (value_cid == kMintCid) { |
4264 | __ LoadFieldFromOffset(out, value, Mint::value_offset()); |
4265 | } else if (!CanDeoptimize()) { |
4266 | // Type information is not conclusive, but range analysis found |
4267 | // the value to be in int64 range. Therefore it must be a smi |
4268 | // or mint value. |
4269 | ASSERT(is_truncating()); |
4270 | compiler::Label done; |
4271 | __ SmiUntag(out, value); |
4272 | __ BranchIfSmi(value, &done); |
4273 | __ LoadFieldFromOffset(out, value, Mint::value_offset()); |
4274 | __ Bind(&done); |
4275 | } else { |
4276 | compiler::Label done; |
4277 | __ SmiUntag(out, value); |
4278 | __ BranchIfSmi(value, &done); |
4279 | __ CompareClassId(value, kMintCid); |
4280 | __ b(deopt, NE); |
4281 | __ LoadFieldFromOffset(out, value, Mint::value_offset()); |
4282 | __ Bind(&done); |
4283 | } |
4284 | |
4285 | // TODO(vegorov): as it is implemented right now truncating unboxing would |
4286 | // leave "garbage" in the higher word. |
4287 | if (!is_truncating() && (deopt != NULL)) { |
4288 | ASSERT(representation() == kUnboxedInt32); |
4289 | __ cmp(out, compiler::Operand(out, SXTW, 0)); |
4290 | __ b(deopt, NE); |
4291 | } |
4292 | } |
4293 | |
4294 | LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone, |
4295 | bool opt) const { |
4296 | const intptr_t kNumInputs = 2; |
4297 | const intptr_t kNumTemps = 0; |
4298 | LocationSummary* summary = new (zone) |
4299 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4300 | summary->set_in(0, Location::RequiresFpuRegister()); |
4301 | summary->set_in(1, Location::RequiresFpuRegister()); |
4302 | summary->set_out(0, Location::RequiresFpuRegister()); |
4303 | return summary; |
4304 | } |
4305 | |
4306 | void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4307 | const VRegister left = locs()->in(0).fpu_reg(); |
4308 | const VRegister right = locs()->in(1).fpu_reg(); |
4309 | const VRegister result = locs()->out(0).fpu_reg(); |
4310 | switch (op_kind()) { |
4311 | case Token::kADD: |
4312 | __ faddd(result, left, right); |
4313 | break; |
4314 | case Token::kSUB: |
4315 | __ fsubd(result, left, right); |
4316 | break; |
4317 | case Token::kMUL: |
4318 | __ fmuld(result, left, right); |
4319 | break; |
4320 | case Token::kDIV: |
4321 | __ fdivd(result, left, right); |
4322 | break; |
4323 | default: |
4324 | UNREACHABLE(); |
4325 | } |
4326 | } |
4327 | |
4328 | LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone, |
4329 | bool opt) const { |
4330 | const intptr_t kNumInputs = 1; |
4331 | const intptr_t kNumTemps = |
4332 | op_kind() == MethodRecognizer::kDouble_getIsInfinite ? 1 : 0; |
4333 | LocationSummary* summary = new (zone) |
4334 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4335 | summary->set_in(0, Location::RequiresFpuRegister()); |
4336 | if (op_kind() == MethodRecognizer::kDouble_getIsInfinite) { |
4337 | summary->set_temp(0, Location::RequiresRegister()); |
4338 | } |
4339 | summary->set_out(0, Location::RequiresRegister()); |
4340 | return summary; |
4341 | } |
4342 | |
4343 | Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
4344 | BranchLabels labels) { |
4345 | ASSERT(compiler->is_optimizing()); |
4346 | const VRegister value = locs()->in(0).fpu_reg(); |
4347 | const bool is_negated = kind() != Token::kEQ; |
4348 | if (op_kind() == MethodRecognizer::kDouble_getIsNaN) { |
4349 | __ fcmpd(value, value); |
4350 | return is_negated ? VC : VS; |
4351 | } else { |
4352 | ASSERT(op_kind() == MethodRecognizer::kDouble_getIsInfinite); |
4353 | const Register temp = locs()->temp(0).reg(); |
4354 | __ vmovrd(temp, value, 0); |
4355 | // Mask off the sign. |
4356 | __ AndImmediate(temp, temp, 0x7FFFFFFFFFFFFFFFLL); |
4357 | // Compare with +infinity. |
4358 | __ CompareImmediate(temp, 0x7FF0000000000000LL); |
4359 | return is_negated ? NE : EQ; |
4360 | } |
4361 | } |
4362 | |
4363 | // SIMD |
4364 | |
4365 | #define DEFINE_EMIT(Name, Args) \ |
4366 | static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr, \ |
4367 | PP_APPLY(PP_UNPACK, Args)) |
4368 | |
4369 | #define SIMD_OP_FLOAT_ARITH(V, Name, op) \ |
4370 | V(Float32x4##Name, op##s) \ |
4371 | V(Float64x2##Name, op##d) |
4372 | |
4373 | #define SIMD_OP_SIMPLE_BINARY(V) \ |
4374 | SIMD_OP_FLOAT_ARITH(V, Add, vadd) \ |
4375 | SIMD_OP_FLOAT_ARITH(V, Sub, vsub) \ |
4376 | SIMD_OP_FLOAT_ARITH(V, Mul, vmul) \ |
4377 | SIMD_OP_FLOAT_ARITH(V, Div, vdiv) \ |
4378 | SIMD_OP_FLOAT_ARITH(V, Min, vmin) \ |
4379 | SIMD_OP_FLOAT_ARITH(V, Max, vmax) \ |
4380 | V(Int32x4Add, vaddw) \ |
4381 | V(Int32x4Sub, vsubw) \ |
4382 | V(Int32x4BitAnd, vand) \ |
4383 | V(Int32x4BitOr, vorr) \ |
4384 | V(Int32x4BitXor, veor) \ |
4385 | V(Float32x4Equal, vceqs) \ |
4386 | V(Float32x4GreaterThan, vcgts) \ |
4387 | V(Float32x4GreaterThanOrEqual, vcges) |
4388 | |
4389 | DEFINE_EMIT(SimdBinaryOp, (VRegister result, VRegister left, VRegister right)) { |
4390 | switch (instr->kind()) { |
4391 | #define EMIT(Name, op) \ |
4392 | case SimdOpInstr::k##Name: \ |
4393 | __ op(result, left, right); \ |
4394 | break; |
4395 | SIMD_OP_SIMPLE_BINARY(EMIT) |
4396 | #undef EMIT |
4397 | case SimdOpInstr::kFloat32x4ShuffleMix: |
4398 | case SimdOpInstr::kInt32x4ShuffleMix: { |
4399 | const intptr_t mask = instr->mask(); |
4400 | __ vinss(result, 0, left, (mask >> 0) & 0x3); |
4401 | __ vinss(result, 1, left, (mask >> 2) & 0x3); |
4402 | __ vinss(result, 2, right, (mask >> 4) & 0x3); |
4403 | __ vinss(result, 3, right, (mask >> 6) & 0x3); |
4404 | break; |
4405 | } |
4406 | case SimdOpInstr::kFloat32x4NotEqual: |
4407 | __ vceqs(result, left, right); |
4408 | // Invert the result. |
4409 | __ vnot(result, result); |
4410 | break; |
4411 | case SimdOpInstr::kFloat32x4LessThan: |
4412 | __ vcgts(result, right, left); |
4413 | break; |
4414 | case SimdOpInstr::kFloat32x4LessThanOrEqual: |
4415 | __ vcges(result, right, left); |
4416 | break; |
4417 | case SimdOpInstr::kFloat32x4Scale: |
4418 | __ fcvtsd(VTMP, left); |
4419 | __ vdups(result, VTMP, 0); |
4420 | __ vmuls(result, result, right); |
4421 | break; |
4422 | case SimdOpInstr::kFloat64x2FromDoubles: |
4423 | __ vinsd(result, 0, left, 0); |
4424 | __ vinsd(result, 1, right, 0); |
4425 | break; |
4426 | case SimdOpInstr::kFloat64x2Scale: |
4427 | __ vdupd(VTMP, right, 0); |
4428 | __ vmuld(result, left, VTMP); |
4429 | break; |
4430 | default: |
4431 | UNREACHABLE(); |
4432 | } |
4433 | } |
4434 | |
4435 | #define SIMD_OP_SIMPLE_UNARY(V) \ |
4436 | SIMD_OP_FLOAT_ARITH(V, Sqrt, vsqrt) \ |
4437 | SIMD_OP_FLOAT_ARITH(V, Negate, vneg) \ |
4438 | SIMD_OP_FLOAT_ARITH(V, Abs, vabs) \ |
4439 | V(Float32x4Reciprocal, VRecps) \ |
4440 | V(Float32x4ReciprocalSqrt, VRSqrts) |
4441 | |
4442 | DEFINE_EMIT(SimdUnaryOp, (VRegister result, VRegister value)) { |
4443 | switch (instr->kind()) { |
4444 | #define EMIT(Name, op) \ |
4445 | case SimdOpInstr::k##Name: \ |
4446 | __ op(result, value); \ |
4447 | break; |
4448 | SIMD_OP_SIMPLE_UNARY(EMIT) |
4449 | #undef EMIT |
4450 | case SimdOpInstr::kFloat32x4ShuffleX: |
4451 | __ vinss(result, 0, value, 0); |
4452 | __ fcvtds(result, result); |
4453 | break; |
4454 | case SimdOpInstr::kFloat32x4ShuffleY: |
4455 | __ vinss(result, 0, value, 1); |
4456 | __ fcvtds(result, result); |
4457 | break; |
4458 | case SimdOpInstr::kFloat32x4ShuffleZ: |
4459 | __ vinss(result, 0, value, 2); |
4460 | __ fcvtds(result, result); |
4461 | break; |
4462 | case SimdOpInstr::kFloat32x4ShuffleW: |
4463 | __ vinss(result, 0, value, 3); |
4464 | __ fcvtds(result, result); |
4465 | break; |
4466 | case SimdOpInstr::kInt32x4Shuffle: |
4467 | case SimdOpInstr::kFloat32x4Shuffle: { |
4468 | const intptr_t mask = instr->mask(); |
4469 | if (mask == 0x00) { |
4470 | __ vdups(result, value, 0); |
4471 | } else if (mask == 0x55) { |
4472 | __ vdups(result, value, 1); |
4473 | } else if (mask == 0xAA) { |
4474 | __ vdups(result, value, 2); |
4475 | } else if (mask == 0xFF) { |
4476 | __ vdups(result, value, 3); |
4477 | } else { |
4478 | for (intptr_t i = 0; i < 4; i++) { |
4479 | __ vinss(result, i, value, (mask >> (2 * i)) & 0x3); |
4480 | } |
4481 | } |
4482 | break; |
4483 | } |
4484 | case SimdOpInstr::kFloat32x4Splat: |
4485 | // Convert to Float32. |
4486 | __ fcvtsd(VTMP, value); |
4487 | // Splat across all lanes. |
4488 | __ vdups(result, VTMP, 0); |
4489 | break; |
4490 | case SimdOpInstr::kFloat64x2GetX: |
4491 | __ vinsd(result, 0, value, 0); |
4492 | break; |
4493 | case SimdOpInstr::kFloat64x2GetY: |
4494 | __ vinsd(result, 0, value, 1); |
4495 | break; |
4496 | case SimdOpInstr::kFloat64x2Splat: |
4497 | __ vdupd(result, value, 0); |
4498 | break; |
4499 | case SimdOpInstr::kFloat64x2ToFloat32x4: |
4500 | // Zero register. |
4501 | __ veor(result, result, result); |
4502 | // Set X lane. |
4503 | __ vinsd(VTMP, 0, value, 0); |
4504 | __ fcvtsd(VTMP, VTMP); |
4505 | __ vinss(result, 0, VTMP, 0); |
4506 | // Set Y lane. |
4507 | __ vinsd(VTMP, 0, value, 1); |
4508 | __ fcvtsd(VTMP, VTMP); |
4509 | __ vinss(result, 1, VTMP, 0); |
4510 | break; |
4511 | case SimdOpInstr::kFloat32x4ToFloat64x2: |
4512 | // Set X. |
4513 | __ vinss(VTMP, 0, value, 0); |
4514 | __ fcvtds(VTMP, VTMP); |
4515 | __ vinsd(result, 0, VTMP, 0); |
4516 | // Set Y. |
4517 | __ vinss(VTMP, 0, value, 1); |
4518 | __ fcvtds(VTMP, VTMP); |
4519 | __ vinsd(result, 1, VTMP, 0); |
4520 | break; |
4521 | default: |
4522 | UNREACHABLE(); |
4523 | } |
4524 | } |
4525 | |
4526 | DEFINE_EMIT(Simd32x4GetSignMask, |
4527 | (Register out, VRegister value, Temp<Register> temp)) { |
4528 | // X lane. |
4529 | __ vmovrs(out, value, 0); |
4530 | __ LsrImmediate(out, out, 31); |
4531 | // Y lane. |
4532 | __ vmovrs(temp, value, 1); |
4533 | __ LsrImmediate(temp, temp, 31); |
4534 | __ orr(out, out, compiler::Operand(temp, LSL, 1)); |
4535 | // Z lane. |
4536 | __ vmovrs(temp, value, 2); |
4537 | __ LsrImmediate(temp, temp, 31); |
4538 | __ orr(out, out, compiler::Operand(temp, LSL, 2)); |
4539 | // W lane. |
4540 | __ vmovrs(temp, value, 3); |
4541 | __ LsrImmediate(temp, temp, 31); |
4542 | __ orr(out, out, compiler::Operand(temp, LSL, 3)); |
4543 | } |
4544 | |
4545 | DEFINE_EMIT( |
4546 | Float32x4FromDoubles, |
4547 | (VRegister r, VRegister v0, VRegister v1, VRegister v2, VRegister v3)) { |
4548 | __ fcvtsd(VTMP, v0); |
4549 | __ vinss(r, 0, VTMP, 0); |
4550 | __ fcvtsd(VTMP, v1); |
4551 | __ vinss(r, 1, VTMP, 0); |
4552 | __ fcvtsd(VTMP, v2); |
4553 | __ vinss(r, 2, VTMP, 0); |
4554 | __ fcvtsd(VTMP, v3); |
4555 | __ vinss(r, 3, VTMP, 0); |
4556 | } |
4557 | |
4558 | DEFINE_EMIT( |
4559 | Float32x4Clamp, |
4560 | (VRegister result, VRegister value, VRegister lower, VRegister upper)) { |
4561 | __ vmins(result, value, upper); |
4562 | __ vmaxs(result, result, lower); |
4563 | } |
4564 | |
4565 | DEFINE_EMIT(Float32x4With, |
4566 | (VRegister result, VRegister replacement, VRegister value)) { |
4567 | __ fcvtsd(VTMP, replacement); |
4568 | __ vmov(result, value); |
4569 | switch (instr->kind()) { |
4570 | case SimdOpInstr::kFloat32x4WithX: |
4571 | __ vinss(result, 0, VTMP, 0); |
4572 | break; |
4573 | case SimdOpInstr::kFloat32x4WithY: |
4574 | __ vinss(result, 1, VTMP, 0); |
4575 | break; |
4576 | case SimdOpInstr::kFloat32x4WithZ: |
4577 | __ vinss(result, 2, VTMP, 0); |
4578 | break; |
4579 | case SimdOpInstr::kFloat32x4WithW: |
4580 | __ vinss(result, 3, VTMP, 0); |
4581 | break; |
4582 | default: |
4583 | UNREACHABLE(); |
4584 | } |
4585 | } |
4586 | |
4587 | DEFINE_EMIT(Simd32x4ToSimd32x4, (SameAsFirstInput, VRegister value)) { |
4588 | // TODO(dartbug.com/30949) these operations are essentially nop and should |
4589 | // not generate any code. They should be removed from the graph before |
4590 | // code generation. |
4591 | } |
4592 | |
4593 | DEFINE_EMIT(SimdZero, (VRegister v)) { |
4594 | __ veor(v, v, v); |
4595 | } |
4596 | |
4597 | DEFINE_EMIT(Float64x2GetSignMask, (Register out, VRegister value)) { |
4598 | // Bits of X lane. |
4599 | __ vmovrd(out, value, 0); |
4600 | __ LsrImmediate(out, out, 63); |
4601 | // Bits of Y lane. |
4602 | __ vmovrd(TMP, value, 1); |
4603 | __ LsrImmediate(TMP, TMP, 63); |
4604 | __ orr(out, out, compiler::Operand(TMP, LSL, 1)); |
4605 | } |
4606 | |
4607 | DEFINE_EMIT(Float64x2With, |
4608 | (SameAsFirstInput, VRegister left, VRegister right)) { |
4609 | switch (instr->kind()) { |
4610 | case SimdOpInstr::kFloat64x2WithX: |
4611 | __ vinsd(left, 0, right, 0); |
4612 | break; |
4613 | case SimdOpInstr::kFloat64x2WithY: |
4614 | __ vinsd(left, 1, right, 0); |
4615 | break; |
4616 | default: |
4617 | UNREACHABLE(); |
4618 | } |
4619 | } |
4620 | |
4621 | DEFINE_EMIT( |
4622 | Int32x4FromInts, |
4623 | (VRegister result, Register v0, Register v1, Register v2, Register v3)) { |
4624 | __ veor(result, result, result); |
4625 | __ vinsw(result, 0, v0); |
4626 | __ vinsw(result, 1, v1); |
4627 | __ vinsw(result, 2, v2); |
4628 | __ vinsw(result, 3, v3); |
4629 | } |
4630 | |
4631 | DEFINE_EMIT(Int32x4FromBools, |
4632 | (VRegister result, |
4633 | Register v0, |
4634 | Register v1, |
4635 | Register v2, |
4636 | Register v3, |
4637 | Temp<Register> temp)) { |
4638 | __ veor(result, result, result); |
4639 | __ LoadImmediate(temp, 0xffffffff); |
4640 | __ LoadObject(TMP2, Bool::True()); |
4641 | |
4642 | const Register vs[] = {v0, v1, v2, v3}; |
4643 | for (intptr_t i = 0; i < 4; i++) { |
4644 | __ CompareRegisters(vs[i], TMP2); |
4645 | __ csel(TMP, temp, ZR, EQ); |
4646 | __ vinsw(result, i, TMP); |
4647 | } |
4648 | } |
4649 | |
4650 | DEFINE_EMIT(Int32x4GetFlag, (Register result, VRegister value)) { |
4651 | switch (instr->kind()) { |
4652 | case SimdOpInstr::kInt32x4GetFlagX: |
4653 | __ vmovrs(result, value, 0); |
4654 | break; |
4655 | case SimdOpInstr::kInt32x4GetFlagY: |
4656 | __ vmovrs(result, value, 1); |
4657 | break; |
4658 | case SimdOpInstr::kInt32x4GetFlagZ: |
4659 | __ vmovrs(result, value, 2); |
4660 | break; |
4661 | case SimdOpInstr::kInt32x4GetFlagW: |
4662 | __ vmovrs(result, value, 3); |
4663 | break; |
4664 | default: |
4665 | UNREACHABLE(); |
4666 | } |
4667 | |
4668 | __ tst(result, compiler::Operand(result)); |
4669 | __ LoadObject(result, Bool::True()); |
4670 | __ LoadObject(TMP, Bool::False()); |
4671 | __ csel(result, TMP, result, EQ); |
4672 | } |
4673 | |
4674 | DEFINE_EMIT(Int32x4Select, |
4675 | (VRegister out, |
4676 | VRegister mask, |
4677 | VRegister trueValue, |
4678 | VRegister falseValue, |
4679 | Temp<VRegister> temp)) { |
4680 | // Copy mask. |
4681 | __ vmov(temp, mask); |
4682 | // Invert it. |
4683 | __ vnot(temp, temp); |
4684 | // mask = mask & trueValue. |
4685 | __ vand(mask, mask, trueValue); |
4686 | // temp = temp & falseValue. |
4687 | __ vand(temp, temp, falseValue); |
4688 | // out = mask | temp. |
4689 | __ vorr(out, mask, temp); |
4690 | } |
4691 | |
4692 | DEFINE_EMIT(Int32x4WithFlag, |
4693 | (SameAsFirstInput, VRegister mask, Register flag)) { |
4694 | const VRegister result = mask; |
4695 | __ CompareObject(flag, Bool::True()); |
4696 | __ LoadImmediate(TMP, 0xffffffff); |
4697 | __ csel(TMP, TMP, ZR, EQ); |
4698 | switch (instr->kind()) { |
4699 | case SimdOpInstr::kInt32x4WithFlagX: |
4700 | __ vinsw(result, 0, TMP); |
4701 | break; |
4702 | case SimdOpInstr::kInt32x4WithFlagY: |
4703 | __ vinsw(result, 1, TMP); |
4704 | break; |
4705 | case SimdOpInstr::kInt32x4WithFlagZ: |
4706 | __ vinsw(result, 2, TMP); |
4707 | break; |
4708 | case SimdOpInstr::kInt32x4WithFlagW: |
4709 | __ vinsw(result, 3, TMP); |
4710 | break; |
4711 | default: |
4712 | UNREACHABLE(); |
4713 | } |
4714 | } |
4715 | |
4716 | // Map SimdOpInstr::Kind-s to corresponding emit functions. Uses the following |
4717 | // format: |
4718 | // |
4719 | // CASE(OpA) CASE(OpB) ____(Emitter) - Emitter is used to emit OpA and OpB. |
4720 | // SIMPLE(OpA) - Emitter with name OpA is used to emit OpA. |
4721 | // |
4722 | #define SIMD_OP_VARIANTS(CASE, ____) \ |
4723 | SIMD_OP_SIMPLE_BINARY(CASE) \ |
4724 | CASE(Float32x4ShuffleMix) \ |
4725 | CASE(Int32x4ShuffleMix) \ |
4726 | CASE(Float32x4NotEqual) \ |
4727 | CASE(Float32x4LessThan) \ |
4728 | CASE(Float32x4LessThanOrEqual) \ |
4729 | CASE(Float32x4Scale) \ |
4730 | CASE(Float64x2FromDoubles) \ |
4731 | CASE(Float64x2Scale) \ |
4732 | ____(SimdBinaryOp) \ |
4733 | SIMD_OP_SIMPLE_UNARY(CASE) \ |
4734 | CASE(Float32x4ShuffleX) \ |
4735 | CASE(Float32x4ShuffleY) \ |
4736 | CASE(Float32x4ShuffleZ) \ |
4737 | CASE(Float32x4ShuffleW) \ |
4738 | CASE(Int32x4Shuffle) \ |
4739 | CASE(Float32x4Shuffle) \ |
4740 | CASE(Float32x4Splat) \ |
4741 | CASE(Float64x2GetX) \ |
4742 | CASE(Float64x2GetY) \ |
4743 | CASE(Float64x2Splat) \ |
4744 | CASE(Float64x2ToFloat32x4) \ |
4745 | CASE(Float32x4ToFloat64x2) \ |
4746 | ____(SimdUnaryOp) \ |
4747 | CASE(Float32x4GetSignMask) \ |
4748 | CASE(Int32x4GetSignMask) \ |
4749 | ____(Simd32x4GetSignMask) \ |
4750 | CASE(Float32x4FromDoubles) \ |
4751 | ____(Float32x4FromDoubles) \ |
4752 | CASE(Float32x4Zero) \ |
4753 | CASE(Float64x2Zero) \ |
4754 | ____(SimdZero) \ |
4755 | CASE(Float32x4Clamp) \ |
4756 | ____(Float32x4Clamp) \ |
4757 | CASE(Float32x4WithX) \ |
4758 | CASE(Float32x4WithY) \ |
4759 | CASE(Float32x4WithZ) \ |
4760 | CASE(Float32x4WithW) \ |
4761 | ____(Float32x4With) \ |
4762 | CASE(Float32x4ToInt32x4) \ |
4763 | CASE(Int32x4ToFloat32x4) \ |
4764 | ____(Simd32x4ToSimd32x4) \ |
4765 | CASE(Float64x2GetSignMask) \ |
4766 | ____(Float64x2GetSignMask) \ |
4767 | CASE(Float64x2WithX) \ |
4768 | CASE(Float64x2WithY) \ |
4769 | ____(Float64x2With) \ |
4770 | CASE(Int32x4FromInts) \ |
4771 | ____(Int32x4FromInts) \ |
4772 | CASE(Int32x4FromBools) \ |
4773 | ____(Int32x4FromBools) \ |
4774 | CASE(Int32x4GetFlagX) \ |
4775 | CASE(Int32x4GetFlagY) \ |
4776 | CASE(Int32x4GetFlagZ) \ |
4777 | CASE(Int32x4GetFlagW) \ |
4778 | ____(Int32x4GetFlag) \ |
4779 | CASE(Int32x4Select) \ |
4780 | ____(Int32x4Select) \ |
4781 | CASE(Int32x4WithFlagX) \ |
4782 | CASE(Int32x4WithFlagY) \ |
4783 | CASE(Int32x4WithFlagZ) \ |
4784 | CASE(Int32x4WithFlagW) \ |
4785 | ____(Int32x4WithFlag) |
4786 | |
4787 | LocationSummary* SimdOpInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
4788 | switch (kind()) { |
4789 | #define CASE(Name, ...) case k##Name: |
4790 | #define EMIT(Name) \ |
4791 | return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name); |
4792 | SIMD_OP_VARIANTS(CASE, EMIT) |
4793 | #undef CASE |
4794 | #undef EMIT |
4795 | case kIllegalSimdOp: |
4796 | UNREACHABLE(); |
4797 | break; |
4798 | } |
4799 | UNREACHABLE(); |
4800 | return NULL; |
4801 | } |
4802 | |
4803 | void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4804 | switch (kind()) { |
4805 | #define CASE(Name, ...) case k##Name: |
4806 | #define EMIT(Name) \ |
4807 | InvokeEmitter(compiler, this, &Emit##Name); \ |
4808 | break; |
4809 | SIMD_OP_VARIANTS(CASE, EMIT) |
4810 | #undef CASE |
4811 | #undef EMIT |
4812 | case kIllegalSimdOp: |
4813 | UNREACHABLE(); |
4814 | break; |
4815 | } |
4816 | } |
4817 | |
4818 | #undef DEFINE_EMIT |
4819 | |
4820 | LocationSummary* MathUnaryInstr::MakeLocationSummary(Zone* zone, |
4821 | bool opt) const { |
4822 | ASSERT((kind() == MathUnaryInstr::kSqrt) || |
4823 | (kind() == MathUnaryInstr::kDoubleSquare)); |
4824 | const intptr_t kNumInputs = 1; |
4825 | const intptr_t kNumTemps = 0; |
4826 | LocationSummary* summary = new (zone) |
4827 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4828 | summary->set_in(0, Location::RequiresFpuRegister()); |
4829 | summary->set_out(0, Location::RequiresFpuRegister()); |
4830 | return summary; |
4831 | } |
4832 | |
4833 | void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4834 | if (kind() == MathUnaryInstr::kSqrt) { |
4835 | const VRegister val = locs()->in(0).fpu_reg(); |
4836 | const VRegister result = locs()->out(0).fpu_reg(); |
4837 | __ fsqrtd(result, val); |
4838 | } else if (kind() == MathUnaryInstr::kDoubleSquare) { |
4839 | const VRegister val = locs()->in(0).fpu_reg(); |
4840 | const VRegister result = locs()->out(0).fpu_reg(); |
4841 | __ fmuld(result, val, val); |
4842 | } else { |
4843 | UNREACHABLE(); |
4844 | } |
4845 | } |
4846 | |
4847 | LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary( |
4848 | Zone* zone, |
4849 | bool opt) const { |
4850 | const intptr_t kNumTemps = 0; |
4851 | LocationSummary* summary = new (zone) |
4852 | LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall); |
4853 | summary->set_in(0, Location::RegisterLocation(R0)); |
4854 | summary->set_in(1, Location::RegisterLocation(R1)); |
4855 | summary->set_in(2, Location::RegisterLocation(R2)); |
4856 | summary->set_in(3, Location::RegisterLocation(R3)); |
4857 | summary->set_out(0, Location::RegisterLocation(R0)); |
4858 | return summary; |
4859 | } |
4860 | |
4861 | void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4862 | // Call the function. |
4863 | __ CallRuntime(TargetFunction(), TargetFunction().argument_count()); |
4864 | } |
4865 | |
4866 | LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone, |
4867 | bool opt) const { |
4868 | if (result_cid() == kDoubleCid) { |
4869 | const intptr_t kNumInputs = 2; |
4870 | const intptr_t kNumTemps = 0; |
4871 | LocationSummary* summary = new (zone) |
4872 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4873 | summary->set_in(0, Location::RequiresFpuRegister()); |
4874 | summary->set_in(1, Location::RequiresFpuRegister()); |
4875 | // Reuse the left register so that code can be made shorter. |
4876 | summary->set_out(0, Location::SameAsFirstInput()); |
4877 | return summary; |
4878 | } |
4879 | ASSERT(result_cid() == kSmiCid); |
4880 | const intptr_t kNumInputs = 2; |
4881 | const intptr_t kNumTemps = 0; |
4882 | LocationSummary* summary = new (zone) |
4883 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4884 | summary->set_in(0, Location::RequiresRegister()); |
4885 | summary->set_in(1, Location::RequiresRegister()); |
4886 | // Reuse the left register so that code can be made shorter. |
4887 | summary->set_out(0, Location::SameAsFirstInput()); |
4888 | return summary; |
4889 | } |
4890 | |
4891 | void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4892 | ASSERT((op_kind() == MethodRecognizer::kMathMin) || |
4893 | (op_kind() == MethodRecognizer::kMathMax)); |
4894 | const intptr_t is_min = (op_kind() == MethodRecognizer::kMathMin); |
4895 | if (result_cid() == kDoubleCid) { |
4896 | compiler::Label done, returns_nan, are_equal; |
4897 | const VRegister left = locs()->in(0).fpu_reg(); |
4898 | const VRegister right = locs()->in(1).fpu_reg(); |
4899 | const VRegister result = locs()->out(0).fpu_reg(); |
4900 | __ fcmpd(left, right); |
4901 | __ b(&returns_nan, VS); |
4902 | __ b(&are_equal, EQ); |
4903 | const Condition double_condition = |
4904 | is_min ? TokenKindToDoubleCondition(Token::kLTE) |
4905 | : TokenKindToDoubleCondition(Token::kGTE); |
4906 | ASSERT(left == result); |
4907 | __ b(&done, double_condition); |
4908 | __ fmovdd(result, right); |
4909 | __ b(&done); |
4910 | |
4911 | __ Bind(&returns_nan); |
4912 | __ LoadDImmediate(result, NAN); |
4913 | __ b(&done); |
4914 | |
4915 | __ Bind(&are_equal); |
4916 | // Check for negative zero: -0.0 is equal 0.0 but min or max must return |
4917 | // -0.0 or 0.0 respectively. |
4918 | // Check for negative left value (get the sign bit): |
4919 | // - min -> left is negative ? left : right. |
4920 | // - max -> left is negative ? right : left |
4921 | // Check the sign bit. |
4922 | __ fmovrd(TMP, left); // Sign bit is in bit 63 of TMP. |
4923 | __ CompareImmediate(TMP, 0); |
4924 | if (is_min) { |
4925 | ASSERT(left == result); |
4926 | __ b(&done, LT); |
4927 | __ fmovdd(result, right); |
4928 | } else { |
4929 | __ b(&done, GE); |
4930 | __ fmovdd(result, right); |
4931 | ASSERT(left == result); |
4932 | } |
4933 | __ Bind(&done); |
4934 | return; |
4935 | } |
4936 | |
4937 | ASSERT(result_cid() == kSmiCid); |
4938 | const Register left = locs()->in(0).reg(); |
4939 | const Register right = locs()->in(1).reg(); |
4940 | const Register result = locs()->out(0).reg(); |
4941 | __ CompareRegisters(left, right); |
4942 | ASSERT(result == left); |
4943 | if (is_min) { |
4944 | __ csel(result, right, left, GT); |
4945 | } else { |
4946 | __ csel(result, right, left, LT); |
4947 | } |
4948 | } |
4949 | |
4950 | LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone, |
4951 | bool opt) const { |
4952 | const intptr_t kNumInputs = 1; |
4953 | const intptr_t kNumTemps = 0; |
4954 | LocationSummary* summary = new (zone) |
4955 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4956 | summary->set_in(0, Location::RequiresRegister()); |
4957 | // We make use of 3-operand instructions by not requiring result register |
4958 | // to be identical to first input register as on Intel. |
4959 | summary->set_out(0, Location::RequiresRegister()); |
4960 | return summary; |
4961 | } |
4962 | |
4963 | void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4964 | const Register value = locs()->in(0).reg(); |
4965 | const Register result = locs()->out(0).reg(); |
4966 | switch (op_kind()) { |
4967 | case Token::kNEGATE: { |
4968 | compiler::Label* deopt = |
4969 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp); |
4970 | __ subs(result, ZR, compiler::Operand(value)); |
4971 | __ b(deopt, VS); |
4972 | break; |
4973 | } |
4974 | case Token::kBIT_NOT: |
4975 | __ mvn(result, value); |
4976 | // Remove inverted smi-tag. |
4977 | __ andi(result, result, compiler::Immediate(~kSmiTagMask)); |
4978 | break; |
4979 | default: |
4980 | UNREACHABLE(); |
4981 | } |
4982 | } |
4983 | |
4984 | LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone, |
4985 | bool opt) const { |
4986 | const intptr_t kNumInputs = 1; |
4987 | const intptr_t kNumTemps = 0; |
4988 | LocationSummary* summary = new (zone) |
4989 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4990 | summary->set_in(0, Location::RequiresFpuRegister()); |
4991 | summary->set_out(0, Location::RequiresFpuRegister()); |
4992 | return summary; |
4993 | } |
4994 | |
4995 | void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4996 | const VRegister result = locs()->out(0).fpu_reg(); |
4997 | const VRegister value = locs()->in(0).fpu_reg(); |
4998 | __ fnegd(result, value); |
4999 | } |
5000 | |
5001 | LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone, |
5002 | bool opt) const { |
5003 | const intptr_t kNumInputs = 1; |
5004 | const intptr_t kNumTemps = 0; |
5005 | LocationSummary* result = new (zone) |
5006 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5007 | result->set_in(0, Location::RequiresRegister()); |
5008 | result->set_out(0, Location::RequiresFpuRegister()); |
5009 | return result; |
5010 | } |
5011 | |
5012 | void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5013 | const Register value = locs()->in(0).reg(); |
5014 | const VRegister result = locs()->out(0).fpu_reg(); |
5015 | __ scvtfdw(result, value); |
5016 | } |
5017 | |
5018 | LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone, |
5019 | bool opt) const { |
5020 | const intptr_t kNumInputs = 1; |
5021 | const intptr_t kNumTemps = 0; |
5022 | LocationSummary* result = new (zone) |
5023 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5024 | result->set_in(0, Location::RequiresRegister()); |
5025 | result->set_out(0, Location::RequiresFpuRegister()); |
5026 | return result; |
5027 | } |
5028 | |
5029 | void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5030 | const Register value = locs()->in(0).reg(); |
5031 | const VRegister result = locs()->out(0).fpu_reg(); |
5032 | __ SmiUntag(TMP, value); |
5033 | __ scvtfdx(result, TMP); |
5034 | } |
5035 | |
5036 | LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone, |
5037 | bool opt) const { |
5038 | const intptr_t kNumInputs = 1; |
5039 | const intptr_t kNumTemps = 0; |
5040 | LocationSummary* result = new (zone) |
5041 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5042 | result->set_in(0, Location::RequiresRegister()); |
5043 | result->set_out(0, Location::RequiresFpuRegister()); |
5044 | return result; |
5045 | } |
5046 | |
5047 | void Int64ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5048 | const Register value = locs()->in(0).reg(); |
5049 | const VRegister result = locs()->out(0).fpu_reg(); |
5050 | __ scvtfdx(result, value); |
5051 | } |
5052 | |
5053 | LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone, |
5054 | bool opt) const { |
5055 | const intptr_t kNumInputs = 1; |
5056 | const intptr_t kNumTemps = 0; |
5057 | LocationSummary* result = new (zone) |
5058 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
5059 | result->set_in(0, Location::RegisterLocation(R1)); |
5060 | result->set_out(0, Location::RegisterLocation(R0)); |
5061 | return result; |
5062 | } |
5063 | |
5064 | void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5065 | const Register result = locs()->out(0).reg(); |
5066 | const Register value_obj = locs()->in(0).reg(); |
5067 | ASSERT(result == R0); |
5068 | ASSERT(result != value_obj); |
5069 | __ LoadDFieldFromOffset(VTMP, value_obj, Double::value_offset()); |
5070 | |
5071 | compiler::Label do_call, done; |
5072 | // First check for NaN. Checking for minint after the conversion doesn't work |
5073 | // on ARM64 because fcvtzds gives 0 for NaN. |
5074 | __ fcmpd(VTMP, VTMP); |
5075 | __ b(&do_call, VS); |
5076 | |
5077 | __ fcvtzds(result, VTMP); |
5078 | // Overflow is signaled with minint. |
5079 | |
5080 | // Check for overflow and that it fits into Smi. |
5081 | __ CompareImmediate(result, 0xC000000000000000); |
5082 | __ b(&do_call, MI); |
5083 | __ SmiTag(result); |
5084 | __ b(&done); |
5085 | __ Bind(&do_call); |
5086 | __ Push(value_obj); |
5087 | ASSERT(instance_call()->HasICData()); |
5088 | const ICData& ic_data = *instance_call()->ic_data(); |
5089 | ASSERT(ic_data.NumberOfChecksIs(1)); |
5090 | const Function& target = Function::ZoneHandle(ic_data.GetTargetAt(0)); |
5091 | const int kTypeArgsLen = 0; |
5092 | const int kNumberOfArguments = 1; |
5093 | constexpr int kSizeOfArguments = 1; |
5094 | const Array& kNoArgumentNames = Object::null_array(); |
5095 | ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kSizeOfArguments, |
5096 | kNoArgumentNames); |
5097 | compiler->GenerateStaticCall(deopt_id(), instance_call()->token_pos(), target, |
5098 | args_info, locs(), ICData::Handle(), |
5099 | ICData::kStatic); |
5100 | __ Bind(&done); |
5101 | } |
5102 | |
5103 | LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone, |
5104 | bool opt) const { |
5105 | const intptr_t kNumInputs = 1; |
5106 | const intptr_t kNumTemps = 0; |
5107 | LocationSummary* result = new (zone) |
5108 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5109 | result->set_in(0, Location::RequiresFpuRegister()); |
5110 | result->set_out(0, Location::RequiresRegister()); |
5111 | return result; |
5112 | } |
5113 | |
5114 | void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5115 | compiler::Label* deopt = |
5116 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi); |
5117 | const Register result = locs()->out(0).reg(); |
5118 | const VRegister value = locs()->in(0).fpu_reg(); |
5119 | // First check for NaN. Checking for minint after the conversion doesn't work |
5120 | // on ARM64 because fcvtzds gives 0 for NaN. |
5121 | // TODO(zra): Check spec that this is true. |
5122 | __ fcmpd(value, value); |
5123 | __ b(deopt, VS); |
5124 | |
5125 | __ fcvtzds(result, value); |
5126 | // Check for overflow and that it fits into Smi. |
5127 | __ CompareImmediate(result, 0xC000000000000000); |
5128 | __ b(deopt, MI); |
5129 | __ SmiTag(result); |
5130 | } |
5131 | |
5132 | LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone, |
5133 | bool opt) const { |
5134 | UNIMPLEMENTED(); |
5135 | return NULL; |
5136 | } |
5137 | |
5138 | void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5139 | UNIMPLEMENTED(); |
5140 | } |
5141 | |
5142 | LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone, |
5143 | bool opt) const { |
5144 | const intptr_t kNumInputs = 1; |
5145 | const intptr_t kNumTemps = 0; |
5146 | LocationSummary* result = new (zone) |
5147 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5148 | result->set_in(0, Location::RequiresFpuRegister()); |
5149 | result->set_out(0, Location::RequiresFpuRegister()); |
5150 | return result; |
5151 | } |
5152 | |
5153 | void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5154 | const VRegister value = locs()->in(0).fpu_reg(); |
5155 | const VRegister result = locs()->out(0).fpu_reg(); |
5156 | __ fcvtsd(result, value); |
5157 | } |
5158 | |
5159 | LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone, |
5160 | bool opt) const { |
5161 | const intptr_t kNumInputs = 1; |
5162 | const intptr_t kNumTemps = 0; |
5163 | LocationSummary* result = new (zone) |
5164 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5165 | result->set_in(0, Location::RequiresFpuRegister()); |
5166 | result->set_out(0, Location::RequiresFpuRegister()); |
5167 | return result; |
5168 | } |
5169 | |
5170 | void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5171 | const VRegister value = locs()->in(0).fpu_reg(); |
5172 | const VRegister result = locs()->out(0).fpu_reg(); |
5173 | __ fcvtds(result, value); |
5174 | } |
5175 | |
5176 | LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone, |
5177 | bool opt) const { |
5178 | ASSERT((InputCount() == 1) || (InputCount() == 2)); |
5179 | const intptr_t kNumTemps = |
5180 | (recognized_kind() == MethodRecognizer::kMathDoublePow) ? 1 : 0; |
5181 | LocationSummary* result = new (zone) |
5182 | LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall); |
5183 | result->set_in(0, Location::FpuRegisterLocation(V0)); |
5184 | if (InputCount() == 2) { |
5185 | result->set_in(1, Location::FpuRegisterLocation(V1)); |
5186 | } |
5187 | if (recognized_kind() == MethodRecognizer::kMathDoublePow) { |
5188 | result->set_temp(0, Location::FpuRegisterLocation(V30)); |
5189 | } |
5190 | result->set_out(0, Location::FpuRegisterLocation(V0)); |
5191 | return result; |
5192 | } |
5193 | |
5194 | // Pseudo code: |
5195 | // if (exponent == 0.0) return 1.0; |
5196 | // // Speed up simple cases. |
5197 | // if (exponent == 1.0) return base; |
5198 | // if (exponent == 2.0) return base * base; |
5199 | // if (exponent == 3.0) return base * base * base; |
5200 | // if (base == 1.0) return 1.0; |
5201 | // if (base.isNaN || exponent.isNaN) { |
5202 | // return double.NAN; |
5203 | // } |
5204 | // if (base != -Infinity && exponent == 0.5) { |
5205 | // if (base == 0.0) return 0.0; |
5206 | // return sqrt(value); |
5207 | // } |
5208 | // TODO(srdjan): Move into a stub? |
5209 | static void InvokeDoublePow(FlowGraphCompiler* compiler, |
5210 | InvokeMathCFunctionInstr* instr) { |
5211 | ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow); |
5212 | const intptr_t kInputCount = 2; |
5213 | ASSERT(instr->InputCount() == kInputCount); |
5214 | LocationSummary* locs = instr->locs(); |
5215 | |
5216 | const VRegister base = locs->in(0).fpu_reg(); |
5217 | const VRegister exp = locs->in(1).fpu_reg(); |
5218 | const VRegister result = locs->out(0).fpu_reg(); |
5219 | const VRegister saved_base = locs->temp(0).fpu_reg(); |
5220 | ASSERT((base == result) && (result != saved_base)); |
5221 | |
5222 | compiler::Label skip_call, try_sqrt, check_base, return_nan, do_pow; |
5223 | __ fmovdd(saved_base, base); |
5224 | __ LoadDImmediate(result, 1.0); |
5225 | // exponent == 0.0 -> return 1.0; |
5226 | __ fcmpdz(exp); |
5227 | __ b(&check_base, VS); // NaN -> check base. |
5228 | __ b(&skip_call, EQ); // exp is 0.0, result is 1.0. |
5229 | |
5230 | // exponent == 1.0 ? |
5231 | __ fcmpd(exp, result); |
5232 | compiler::Label return_base; |
5233 | __ b(&return_base, EQ); |
5234 | |
5235 | // exponent == 2.0 ? |
5236 | __ LoadDImmediate(VTMP, 2.0); |
5237 | __ fcmpd(exp, VTMP); |
5238 | compiler::Label return_base_times_2; |
5239 | __ b(&return_base_times_2, EQ); |
5240 | |
5241 | // exponent == 3.0 ? |
5242 | __ LoadDImmediate(VTMP, 3.0); |
5243 | __ fcmpd(exp, VTMP); |
5244 | __ b(&check_base, NE); |
5245 | |
5246 | // base_times_3. |
5247 | __ fmuld(result, saved_base, saved_base); |
5248 | __ fmuld(result, result, saved_base); |
5249 | __ b(&skip_call); |
5250 | |
5251 | __ Bind(&return_base); |
5252 | __ fmovdd(result, saved_base); |
5253 | __ b(&skip_call); |
5254 | |
5255 | __ Bind(&return_base_times_2); |
5256 | __ fmuld(result, saved_base, saved_base); |
5257 | __ b(&skip_call); |
5258 | |
5259 | __ Bind(&check_base); |
5260 | // Note: 'exp' could be NaN. |
5261 | // base == 1.0 -> return 1.0; |
5262 | __ fcmpd(saved_base, result); |
5263 | __ b(&return_nan, VS); |
5264 | __ b(&skip_call, EQ); // base is 1.0, result is 1.0. |
5265 | |
5266 | __ fcmpd(saved_base, exp); |
5267 | __ b(&try_sqrt, VC); // // Neither 'exp' nor 'base' is NaN. |
5268 | |
5269 | __ Bind(&return_nan); |
5270 | __ LoadDImmediate(result, NAN); |
5271 | __ b(&skip_call); |
5272 | |
5273 | compiler::Label return_zero; |
5274 | __ Bind(&try_sqrt); |
5275 | |
5276 | // Before calling pow, check if we could use sqrt instead of pow. |
5277 | __ LoadDImmediate(result, kNegInfinity); |
5278 | |
5279 | // base == -Infinity -> call pow; |
5280 | __ fcmpd(saved_base, result); |
5281 | __ b(&do_pow, EQ); |
5282 | |
5283 | // exponent == 0.5 ? |
5284 | __ LoadDImmediate(result, 0.5); |
5285 | __ fcmpd(exp, result); |
5286 | __ b(&do_pow, NE); |
5287 | |
5288 | // base == 0 -> return 0; |
5289 | __ fcmpdz(saved_base); |
5290 | __ b(&return_zero, EQ); |
5291 | |
5292 | __ fsqrtd(result, saved_base); |
5293 | __ b(&skip_call); |
5294 | |
5295 | __ Bind(&return_zero); |
5296 | __ LoadDImmediate(result, 0.0); |
5297 | __ b(&skip_call); |
5298 | |
5299 | __ Bind(&do_pow); |
5300 | __ fmovdd(base, saved_base); // Restore base. |
5301 | |
5302 | __ CallRuntime(instr->TargetFunction(), kInputCount); |
5303 | __ Bind(&skip_call); |
5304 | } |
5305 | |
5306 | void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5307 | if (recognized_kind() == MethodRecognizer::kMathDoublePow) { |
5308 | InvokeDoublePow(compiler, this); |
5309 | return; |
5310 | } |
5311 | __ CallRuntime(TargetFunction(), InputCount()); |
5312 | } |
5313 | |
5314 | LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone, |
5315 | bool opt) const { |
5316 | // Only use this instruction in optimized code. |
5317 | ASSERT(opt); |
5318 | const intptr_t kNumInputs = 1; |
5319 | LocationSummary* summary = |
5320 | new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall); |
5321 | if (representation() == kUnboxedDouble) { |
5322 | if (index() == 0) { |
5323 | summary->set_in( |
5324 | 0, Location::Pair(Location::RequiresFpuRegister(), Location::Any())); |
5325 | } else { |
5326 | ASSERT(index() == 1); |
5327 | summary->set_in( |
5328 | 0, Location::Pair(Location::Any(), Location::RequiresFpuRegister())); |
5329 | } |
5330 | summary->set_out(0, Location::RequiresFpuRegister()); |
5331 | } else { |
5332 | ASSERT(representation() == kTagged); |
5333 | if (index() == 0) { |
5334 | summary->set_in( |
5335 | 0, Location::Pair(Location::RequiresRegister(), Location::Any())); |
5336 | } else { |
5337 | ASSERT(index() == 1); |
5338 | summary->set_in( |
5339 | 0, Location::Pair(Location::Any(), Location::RequiresRegister())); |
5340 | } |
5341 | summary->set_out(0, Location::RequiresRegister()); |
5342 | } |
5343 | return summary; |
5344 | } |
5345 | |
5346 | void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5347 | ASSERT(locs()->in(0).IsPairLocation()); |
5348 | PairLocation* pair = locs()->in(0).AsPairLocation(); |
5349 | Location in_loc = pair->At(index()); |
5350 | if (representation() == kUnboxedDouble) { |
5351 | const VRegister out = locs()->out(0).fpu_reg(); |
5352 | const VRegister in = in_loc.fpu_reg(); |
5353 | __ fmovdd(out, in); |
5354 | } else { |
5355 | ASSERT(representation() == kTagged); |
5356 | const Register out = locs()->out(0).reg(); |
5357 | const Register in = in_loc.reg(); |
5358 | __ mov(out, in); |
5359 | } |
5360 | } |
5361 | |
5362 | LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone, |
5363 | bool opt) const { |
5364 | const intptr_t kNumInputs = 2; |
5365 | const intptr_t kNumTemps = 0; |
5366 | LocationSummary* summary = new (zone) |
5367 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5368 | summary->set_in(0, Location::RequiresRegister()); |
5369 | summary->set_in(1, Location::RequiresRegister()); |
5370 | // Output is a pair of registers. |
5371 | summary->set_out(0, Location::Pair(Location::RequiresRegister(), |
5372 | Location::RequiresRegister())); |
5373 | return summary; |
5374 | } |
5375 | |
5376 | void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5377 | ASSERT(CanDeoptimize()); |
5378 | compiler::Label* deopt = |
5379 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp); |
5380 | const Register left = locs()->in(0).reg(); |
5381 | const Register right = locs()->in(1).reg(); |
5382 | ASSERT(locs()->out(0).IsPairLocation()); |
5383 | const PairLocation* pair = locs()->out(0).AsPairLocation(); |
5384 | const Register result_div = pair->At(0).reg(); |
5385 | const Register result_mod = pair->At(1).reg(); |
5386 | if (RangeUtils::CanBeZero(divisor_range())) { |
5387 | // Handle divide by zero in runtime. |
5388 | __ CompareRegisters(right, ZR); |
5389 | __ b(deopt, EQ); |
5390 | } |
5391 | |
5392 | __ SmiUntag(result_mod, left); |
5393 | __ SmiUntag(TMP, right); |
5394 | |
5395 | __ sdiv(result_div, result_mod, TMP); |
5396 | |
5397 | // Check the corner case of dividing the 'MIN_SMI' with -1, in which |
5398 | // case we cannot tag the result. |
5399 | __ CompareImmediate(result_div, 0x4000000000000000); |
5400 | __ b(deopt, EQ); |
5401 | // result_mod <- left - right * result_div. |
5402 | __ msub(result_mod, TMP, result_div, result_mod); |
5403 | __ SmiTag(result_div); |
5404 | __ SmiTag(result_mod); |
5405 | // Correct MOD result: |
5406 | // res = left % right; |
5407 | // if (res < 0) { |
5408 | // if (right < 0) { |
5409 | // res = res - right; |
5410 | // } else { |
5411 | // res = res + right; |
5412 | // } |
5413 | // } |
5414 | compiler::Label done; |
5415 | __ CompareRegisters(result_mod, ZR); |
5416 | __ b(&done, GE); |
5417 | // Result is negative, adjust it. |
5418 | __ CompareRegisters(right, ZR); |
5419 | __ sub(TMP2, result_mod, compiler::Operand(right)); |
5420 | __ add(TMP, result_mod, compiler::Operand(right)); |
5421 | __ csel(result_mod, TMP, TMP2, GE); |
5422 | __ Bind(&done); |
5423 | } |
5424 | |
5425 | LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
5426 | comparison()->InitializeLocationSummary(zone, opt); |
5427 | // Branches don't produce a result. |
5428 | comparison()->locs()->set_out(0, Location::NoLocation()); |
5429 | return comparison()->locs(); |
5430 | } |
5431 | |
5432 | void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5433 | comparison()->EmitBranchCode(compiler, this); |
5434 | } |
5435 | |
5436 | LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone, |
5437 | bool opt) const { |
5438 | const intptr_t kNumInputs = 1; |
5439 | const bool need_mask_temp = IsBitTest(); |
5440 | const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0; |
5441 | LocationSummary* summary = new (zone) |
5442 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5443 | summary->set_in(0, Location::RequiresRegister()); |
5444 | if (!IsNullCheck()) { |
5445 | summary->set_temp(0, Location::RequiresRegister()); |
5446 | if (need_mask_temp) { |
5447 | summary->set_temp(1, Location::RequiresRegister()); |
5448 | } |
5449 | } |
5450 | return summary; |
5451 | } |
5452 | |
5453 | void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler, |
5454 | compiler::Label* deopt) { |
5455 | __ CompareObject(locs()->in(0).reg(), Object::null_object()); |
5456 | ASSERT(IsDeoptIfNull() || IsDeoptIfNotNull()); |
5457 | Condition cond = IsDeoptIfNull() ? EQ : NE; |
5458 | __ b(deopt, cond); |
5459 | } |
5460 | |
5461 | void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler, |
5462 | intptr_t min, |
5463 | intptr_t max, |
5464 | intptr_t mask, |
5465 | compiler::Label* deopt) { |
5466 | Register biased_cid = locs()->temp(0).reg(); |
5467 | __ AddImmediate(biased_cid, -min); |
5468 | __ CompareImmediate(biased_cid, max - min); |
5469 | __ b(deopt, HI); |
5470 | |
5471 | Register bit_reg = locs()->temp(1).reg(); |
5472 | __ LoadImmediate(bit_reg, 1); |
5473 | __ lslv(bit_reg, bit_reg, biased_cid); |
5474 | __ TestImmediate(bit_reg, mask); |
5475 | __ b(deopt, EQ); |
5476 | } |
5477 | |
5478 | int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler, |
5479 | int bias, |
5480 | intptr_t cid_start, |
5481 | intptr_t cid_end, |
5482 | bool is_last, |
5483 | compiler::Label* is_ok, |
5484 | compiler::Label* deopt, |
5485 | bool use_near_jump) { |
5486 | Register biased_cid = locs()->temp(0).reg(); |
5487 | Condition no_match, match; |
5488 | if (cid_start == cid_end) { |
5489 | __ CompareImmediate(biased_cid, cid_start - bias); |
5490 | no_match = NE; |
5491 | match = EQ; |
5492 | } else { |
5493 | // For class ID ranges use a subtract followed by an unsigned |
5494 | // comparison to check both ends of the ranges with one comparison. |
5495 | __ AddImmediate(biased_cid, bias - cid_start); |
5496 | bias = cid_start; |
5497 | __ CompareImmediate(biased_cid, cid_end - cid_start); |
5498 | no_match = HI; // Unsigned higher. |
5499 | match = LS; // Unsigned lower or same. |
5500 | } |
5501 | if (is_last) { |
5502 | __ b(deopt, no_match); |
5503 | } else { |
5504 | __ b(is_ok, match); |
5505 | } |
5506 | return bias; |
5507 | } |
5508 | |
5509 | LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone, |
5510 | bool opt) const { |
5511 | const intptr_t kNumInputs = 1; |
5512 | const intptr_t kNumTemps = 0; |
5513 | LocationSummary* summary = new (zone) |
5514 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5515 | summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister() |
5516 | : Location::WritableRegister()); |
5517 | return summary; |
5518 | } |
5519 | |
5520 | void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5521 | Register value = locs()->in(0).reg(); |
5522 | compiler::Label* deopt = |
5523 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass); |
5524 | if (cids_.IsSingleCid()) { |
5525 | __ CompareImmediate(value, Smi::RawValue(cids_.cid_start)); |
5526 | __ b(deopt, NE); |
5527 | } else { |
5528 | __ AddImmediate(value, -Smi::RawValue(cids_.cid_start)); |
5529 | __ CompareImmediate(value, Smi::RawValue(cids_.cid_end - cids_.cid_start)); |
5530 | __ b(deopt, HI); // Unsigned higher. |
5531 | } |
5532 | } |
5533 | |
5534 | LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone, |
5535 | bool opt) const { |
5536 | const intptr_t kNumInputs = 1; |
5537 | const intptr_t kNumTemps = 0; |
5538 | LocationSummary* summary = new (zone) |
5539 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5540 | summary->set_in(0, Location::RequiresRegister()); |
5541 | return summary; |
5542 | } |
5543 | |
5544 | void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5545 | const Register value = locs()->in(0).reg(); |
5546 | compiler::Label* deopt = compiler->AddDeoptStub( |
5547 | deopt_id(), ICData::kDeoptCheckSmi, licm_hoisted_ ? ICData::kHoisted : 0); |
5548 | __ BranchIfNotSmi(value, deopt); |
5549 | } |
5550 | |
5551 | void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5552 | ThrowErrorSlowPathCode* slow_path = |
5553 | new NullErrorSlowPath(this, compiler->CurrentTryIndex()); |
5554 | compiler->AddSlowPathCode(slow_path); |
5555 | |
5556 | Register value_reg = locs()->in(0).reg(); |
5557 | // TODO(dartbug.com/30480): Consider passing `null` literal as an argument |
5558 | // in order to be able to allocate it on register. |
5559 | __ CompareObject(value_reg, Object::null_object()); |
5560 | __ BranchIf(EQUAL, slow_path->entry_label()); |
5561 | } |
5562 | |
5563 | LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone, |
5564 | bool opt) const { |
5565 | const intptr_t kNumInputs = 2; |
5566 | const intptr_t kNumTemps = 0; |
5567 | LocationSummary* locs = new (zone) |
5568 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5569 | locs->set_in(kLengthPos, LocationRegisterOrSmiConstant(length())); |
5570 | locs->set_in(kIndexPos, LocationRegisterOrSmiConstant(index())); |
5571 | return locs; |
5572 | } |
5573 | |
5574 | void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5575 | uint32_t flags = generalized_ ? ICData::kGeneralized : 0; |
5576 | flags |= licm_hoisted_ ? ICData::kHoisted : 0; |
5577 | compiler::Label* deopt = |
5578 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags); |
5579 | |
5580 | Location length_loc = locs()->in(kLengthPos); |
5581 | Location index_loc = locs()->in(kIndexPos); |
5582 | |
5583 | const intptr_t index_cid = index()->Type()->ToCid(); |
5584 | if (length_loc.IsConstant() && index_loc.IsConstant()) { |
5585 | // TODO(srdjan): remove this code once failures are fixed. |
5586 | if ((Smi::Cast(length_loc.constant()).Value() > |
5587 | Smi::Cast(index_loc.constant()).Value()) && |
5588 | (Smi::Cast(index_loc.constant()).Value() >= 0)) { |
5589 | // This CheckArrayBoundInstr should have been eliminated. |
5590 | return; |
5591 | } |
5592 | ASSERT((Smi::Cast(length_loc.constant()).Value() <= |
5593 | Smi::Cast(index_loc.constant()).Value()) || |
5594 | (Smi::Cast(index_loc.constant()).Value() < 0)); |
5595 | // Unconditionally deoptimize for constant bounds checks because they |
5596 | // only occur only when index is out-of-bounds. |
5597 | __ b(deopt); |
5598 | return; |
5599 | } |
5600 | |
5601 | if (index_loc.IsConstant()) { |
5602 | const Register length = length_loc.reg(); |
5603 | const Smi& index = Smi::Cast(index_loc.constant()); |
5604 | __ CompareImmediate(length, static_cast<int64_t>(index.raw())); |
5605 | __ b(deopt, LS); |
5606 | } else if (length_loc.IsConstant()) { |
5607 | const Smi& length = Smi::Cast(length_loc.constant()); |
5608 | const Register index = index_loc.reg(); |
5609 | if (index_cid != kSmiCid) { |
5610 | __ BranchIfNotSmi(index, deopt); |
5611 | } |
5612 | if (length.Value() == Smi::kMaxValue) { |
5613 | __ tst(index, compiler::Operand(index)); |
5614 | __ b(deopt, MI); |
5615 | } else { |
5616 | __ CompareImmediate(index, static_cast<int64_t>(length.raw())); |
5617 | __ b(deopt, CS); |
5618 | } |
5619 | } else { |
5620 | const Register length = length_loc.reg(); |
5621 | const Register index = index_loc.reg(); |
5622 | if (index_cid != kSmiCid) { |
5623 | __ BranchIfNotSmi(index, deopt); |
5624 | } |
5625 | __ CompareRegisters(index, length); |
5626 | __ b(deopt, CS); |
5627 | } |
5628 | } |
5629 | |
5630 | class Int64DivideSlowPath : public ThrowErrorSlowPathCode { |
5631 | public: |
5632 | static const intptr_t kNumberOfArguments = 0; |
5633 | |
5634 | Int64DivideSlowPath(BinaryInt64OpInstr* instruction, |
5635 | Register divisor, |
5636 | Range* divisor_range, |
5637 | Register tmp, |
5638 | Register out, |
5639 | intptr_t try_index) |
5640 | : ThrowErrorSlowPathCode(instruction, |
5641 | kIntegerDivisionByZeroExceptionRuntimeEntry, |
5642 | kNumberOfArguments, |
5643 | try_index), |
5644 | is_mod_(instruction->op_kind() == Token::kMOD), |
5645 | divisor_(divisor), |
5646 | divisor_range_(divisor_range), |
5647 | tmp_(tmp), |
5648 | out_(out), |
5649 | adjust_sign_label_() {} |
5650 | |
5651 | void EmitNativeCode(FlowGraphCompiler* compiler) override { |
5652 | // Handle modulo/division by zero, if needed. Use superclass code. |
5653 | if (has_divide_by_zero()) { |
5654 | ThrowErrorSlowPathCode::EmitNativeCode(compiler); |
5655 | } else { |
5656 | __ Bind(entry_label()); // not used, but keeps destructor happy |
5657 | if (compiler::Assembler::EmittingComments()) { |
5658 | __ Comment("slow path %s operation (no throw)" , name()); |
5659 | } |
5660 | } |
5661 | // Adjust modulo for negative sign, optimized for known ranges. |
5662 | // if (divisor < 0) |
5663 | // out -= divisor; |
5664 | // else |
5665 | // out += divisor; |
5666 | if (has_adjust_sign()) { |
5667 | __ Bind(adjust_sign_label()); |
5668 | if (RangeUtils::Overlaps(divisor_range_, -1, 1)) { |
5669 | // General case. |
5670 | __ CompareRegisters(divisor_, ZR); |
5671 | __ sub(tmp_, out_, compiler::Operand(divisor_)); |
5672 | __ add(out_, out_, compiler::Operand(divisor_)); |
5673 | __ csel(out_, tmp_, out_, LT); |
5674 | } else if (divisor_range_->IsPositive()) { |
5675 | // Always positive. |
5676 | __ add(out_, out_, compiler::Operand(divisor_)); |
5677 | } else { |
5678 | // Always negative. |
5679 | __ sub(out_, out_, compiler::Operand(divisor_)); |
5680 | } |
5681 | __ b(exit_label()); |
5682 | } |
5683 | } |
5684 | |
5685 | const char* name() override { return "int64 divide" ; } |
5686 | |
5687 | bool has_divide_by_zero() { return RangeUtils::CanBeZero(divisor_range_); } |
5688 | |
5689 | bool has_adjust_sign() { return is_mod_; } |
5690 | |
5691 | bool is_needed() { return has_divide_by_zero() || has_adjust_sign(); } |
5692 | |
5693 | compiler::Label* adjust_sign_label() { |
5694 | ASSERT(has_adjust_sign()); |
5695 | return &adjust_sign_label_; |
5696 | } |
5697 | |
5698 | private: |
5699 | bool is_mod_; |
5700 | Register divisor_; |
5701 | Range* divisor_range_; |
5702 | Register tmp_; |
5703 | Register out_; |
5704 | compiler::Label adjust_sign_label_; |
5705 | }; |
5706 | |
5707 | static void EmitInt64ModTruncDiv(FlowGraphCompiler* compiler, |
5708 | BinaryInt64OpInstr* instruction, |
5709 | Token::Kind op_kind, |
5710 | Register left, |
5711 | Register right, |
5712 | Register tmp, |
5713 | Register out) { |
5714 | ASSERT(op_kind == Token::kMOD || op_kind == Token::kTRUNCDIV); |
5715 | |
5716 | // Special case 64-bit div/mod by compile-time constant. Note that various |
5717 | // special constants (such as powers of two) should have been optimized |
5718 | // earlier in the pipeline. Div or mod by zero falls into general code |
5719 | // to implement the exception. |
5720 | if (FLAG_optimization_level <= 2) { |
5721 | // We only consider magic operations under O3. |
5722 | } else if (auto c = instruction->right()->definition()->AsConstant()) { |
5723 | if (c->value().IsInteger()) { |
5724 | const int64_t divisor = Integer::Cast(c->value()).AsInt64Value(); |
5725 | if (divisor <= -2 || divisor >= 2) { |
5726 | // For x DIV c or x MOD c: use magic operations. |
5727 | compiler::Label pos; |
5728 | int64_t magic = 0; |
5729 | int64_t shift = 0; |
5730 | Utils::CalculateMagicAndShiftForDivRem(divisor, &magic, &shift); |
5731 | // Compute tmp = high(magic * numerator). |
5732 | __ LoadImmediate(TMP2, magic); |
5733 | __ smulh(TMP2, TMP2, left); |
5734 | // Compute tmp +/-= numerator. |
5735 | if (divisor > 0 && magic < 0) { |
5736 | __ add(TMP2, TMP2, compiler::Operand(left)); |
5737 | } else if (divisor < 0 && magic > 0) { |
5738 | __ sub(TMP2, TMP2, compiler::Operand(left)); |
5739 | } |
5740 | // Shift if needed. |
5741 | if (shift != 0) { |
5742 | __ add(TMP2, ZR, compiler::Operand(TMP2, ASR, shift)); |
5743 | } |
5744 | // Finalize DIV or MOD. |
5745 | if (op_kind == Token::kTRUNCDIV) { |
5746 | __ sub(out, TMP2, compiler::Operand(TMP2, ASR, 63)); |
5747 | } else { |
5748 | __ sub(TMP2, TMP2, compiler::Operand(TMP2, ASR, 63)); |
5749 | __ LoadImmediate(TMP, divisor); |
5750 | __ msub(out, TMP2, TMP, left); |
5751 | // Compensate for Dart's Euclidean view of MOD. |
5752 | __ CompareRegisters(out, ZR); |
5753 | if (divisor > 0) { |
5754 | __ add(TMP2, out, compiler::Operand(TMP)); |
5755 | } else { |
5756 | __ sub(TMP2, out, compiler::Operand(TMP)); |
5757 | } |
5758 | __ csel(out, TMP2, out, LT); |
5759 | } |
5760 | return; |
5761 | } |
5762 | } |
5763 | } |
5764 | |
5765 | // Prepare a slow path. |
5766 | Range* right_range = instruction->right()->definition()->range(); |
5767 | Int64DivideSlowPath* slow_path = new (Z) Int64DivideSlowPath( |
5768 | instruction, right, right_range, tmp, out, compiler->CurrentTryIndex()); |
5769 | |
5770 | // Handle modulo/division by zero exception on slow path. |
5771 | if (slow_path->has_divide_by_zero()) { |
5772 | __ CompareRegisters(right, ZR); |
5773 | __ b(slow_path->entry_label(), EQ); |
5774 | } |
5775 | |
5776 | // Perform actual operation |
5777 | // out = left % right |
5778 | // or |
5779 | // out = left / right. |
5780 | if (op_kind == Token::kMOD) { |
5781 | __ sdiv(tmp, left, right); |
5782 | __ msub(out, tmp, right, left); |
5783 | // For the % operator, the sdiv instruction does not |
5784 | // quite do what we want. Adjust for sign on slow path. |
5785 | __ CompareRegisters(out, ZR); |
5786 | __ b(slow_path->adjust_sign_label(), LT); |
5787 | } else { |
5788 | __ sdiv(out, left, right); |
5789 | } |
5790 | |
5791 | if (slow_path->is_needed()) { |
5792 | __ Bind(slow_path->exit_label()); |
5793 | compiler->AddSlowPathCode(slow_path); |
5794 | } |
5795 | } |
5796 | |
5797 | LocationSummary* BinaryInt64OpInstr::MakeLocationSummary(Zone* zone, |
5798 | bool opt) const { |
5799 | switch (op_kind()) { |
5800 | case Token::kMOD: |
5801 | case Token::kTRUNCDIV: { |
5802 | const intptr_t kNumInputs = 2; |
5803 | const intptr_t kNumTemps = (op_kind() == Token::kMOD) ? 1 : 0; |
5804 | LocationSummary* summary = new (zone) LocationSummary( |
5805 | zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
5806 | summary->set_in(0, Location::RequiresRegister()); |
5807 | summary->set_in(1, Location::RequiresRegister()); |
5808 | summary->set_out(0, Location::RequiresRegister()); |
5809 | if (kNumTemps == 1) { |
5810 | summary->set_temp(0, Location::RequiresRegister()); |
5811 | } |
5812 | return summary; |
5813 | } |
5814 | default: { |
5815 | const intptr_t kNumInputs = 2; |
5816 | const intptr_t kNumTemps = 0; |
5817 | LocationSummary* summary = new (zone) LocationSummary( |
5818 | zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5819 | summary->set_in(0, Location::RequiresRegister()); |
5820 | summary->set_in(1, LocationRegisterOrConstant(right())); |
5821 | summary->set_out(0, Location::RequiresRegister()); |
5822 | return summary; |
5823 | } |
5824 | } |
5825 | } |
5826 | |
5827 | void BinaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5828 | ASSERT(!can_overflow()); |
5829 | ASSERT(!CanDeoptimize()); |
5830 | |
5831 | const Register left = locs()->in(0).reg(); |
5832 | const Location right = locs()->in(1); |
5833 | const Register out = locs()->out(0).reg(); |
5834 | |
5835 | if (op_kind() == Token::kMOD || op_kind() == Token::kTRUNCDIV) { |
5836 | Register tmp = |
5837 | (op_kind() == Token::kMOD) ? locs()->temp(0).reg() : kNoRegister; |
5838 | EmitInt64ModTruncDiv(compiler, this, op_kind(), left, right.reg(), tmp, |
5839 | out); |
5840 | return; |
5841 | } else if (op_kind() == Token::kMUL) { |
5842 | Register r = TMP; |
5843 | if (right.IsConstant()) { |
5844 | ConstantInstr* constant_instr = right.constant_instruction(); |
5845 | const int64_t value = |
5846 | constant_instr->GetUnboxedSignedIntegerConstantValue(); |
5847 | __ LoadImmediate(r, value); |
5848 | } else { |
5849 | r = right.reg(); |
5850 | } |
5851 | __ mul(out, left, r); |
5852 | return; |
5853 | } |
5854 | |
5855 | if (right.IsConstant()) { |
5856 | ConstantInstr* constant_instr = right.constant_instruction(); |
5857 | const int64_t value = |
5858 | constant_instr->GetUnboxedSignedIntegerConstantValue(); |
5859 | switch (op_kind()) { |
5860 | case Token::kADD: |
5861 | __ AddImmediate(out, left, value); |
5862 | break; |
5863 | case Token::kSUB: |
5864 | __ AddImmediate(out, left, -value); |
5865 | break; |
5866 | case Token::kBIT_AND: |
5867 | __ AndImmediate(out, left, value); |
5868 | break; |
5869 | case Token::kBIT_OR: |
5870 | __ OrImmediate(out, left, value); |
5871 | break; |
5872 | case Token::kBIT_XOR: |
5873 | __ XorImmediate(out, left, value); |
5874 | break; |
5875 | default: |
5876 | UNREACHABLE(); |
5877 | } |
5878 | } else { |
5879 | compiler::Operand r = compiler::Operand(right.reg()); |
5880 | switch (op_kind()) { |
5881 | case Token::kADD: |
5882 | __ add(out, left, r); |
5883 | break; |
5884 | case Token::kSUB: |
5885 | __ sub(out, left, r); |
5886 | break; |
5887 | case Token::kBIT_AND: |
5888 | __ and_(out, left, r); |
5889 | break; |
5890 | case Token::kBIT_OR: |
5891 | __ orr(out, left, r); |
5892 | break; |
5893 | case Token::kBIT_XOR: |
5894 | __ eor(out, left, r); |
5895 | break; |
5896 | default: |
5897 | UNREACHABLE(); |
5898 | } |
5899 | } |
5900 | } |
5901 | |
5902 | static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler, |
5903 | Token::Kind op_kind, |
5904 | Register out, |
5905 | Register left, |
5906 | const Object& right) { |
5907 | const int64_t shift = Integer::Cast(right).AsInt64Value(); |
5908 | ASSERT(shift >= 0); |
5909 | switch (op_kind) { |
5910 | case Token::kSHR: { |
5911 | __ AsrImmediate(out, left, |
5912 | Utils::Minimum<int64_t>(shift, kBitsPerWord - 1)); |
5913 | break; |
5914 | } |
5915 | case Token::kSHL: { |
5916 | ASSERT(shift < 64); |
5917 | __ LslImmediate(out, left, shift); |
5918 | break; |
5919 | } |
5920 | default: |
5921 | UNREACHABLE(); |
5922 | } |
5923 | } |
5924 | |
5925 | static void EmitShiftInt64ByRegister(FlowGraphCompiler* compiler, |
5926 | Token::Kind op_kind, |
5927 | Register out, |
5928 | Register left, |
5929 | Register right) { |
5930 | switch (op_kind) { |
5931 | case Token::kSHR: { |
5932 | __ asrv(out, left, right); |
5933 | break; |
5934 | } |
5935 | case Token::kSHL: { |
5936 | __ lslv(out, left, right); |
5937 | break; |
5938 | } |
5939 | default: |
5940 | UNREACHABLE(); |
5941 | } |
5942 | } |
5943 | |
5944 | static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler, |
5945 | Token::Kind op_kind, |
5946 | Register out, |
5947 | Register left, |
5948 | const Object& right) { |
5949 | const int64_t shift = Integer::Cast(right).AsInt64Value(); |
5950 | ASSERT(shift >= 0); |
5951 | if (shift >= 32) { |
5952 | __ LoadImmediate(out, 0); |
5953 | } else { |
5954 | switch (op_kind) { |
5955 | case Token::kSHR: |
5956 | __ LsrImmediate(out, left, shift, kWord); |
5957 | break; |
5958 | case Token::kSHL: |
5959 | __ LslImmediate(out, left, shift, kWord); |
5960 | break; |
5961 | default: |
5962 | UNREACHABLE(); |
5963 | } |
5964 | } |
5965 | } |
5966 | |
5967 | static void EmitShiftUint32ByRegister(FlowGraphCompiler* compiler, |
5968 | Token::Kind op_kind, |
5969 | Register out, |
5970 | Register left, |
5971 | Register right) { |
5972 | switch (op_kind) { |
5973 | case Token::kSHR: |
5974 | __ lsrvw(out, left, right); |
5975 | break; |
5976 | case Token::kSHL: |
5977 | __ lslvw(out, left, right); |
5978 | break; |
5979 | default: |
5980 | UNREACHABLE(); |
5981 | } |
5982 | } |
5983 | |
5984 | class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode { |
5985 | public: |
5986 | static const intptr_t kNumberOfArguments = 0; |
5987 | |
5988 | ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction, intptr_t try_index) |
5989 | : ThrowErrorSlowPathCode(instruction, |
5990 | kArgumentErrorUnboxedInt64RuntimeEntry, |
5991 | kNumberOfArguments, |
5992 | try_index) {} |
5993 | |
5994 | const char* name() override { return "int64 shift" ; } |
5995 | |
5996 | void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override { |
5997 | const Register left = instruction()->locs()->in(0).reg(); |
5998 | const Register right = instruction()->locs()->in(1).reg(); |
5999 | const Register out = instruction()->locs()->out(0).reg(); |
6000 | ASSERT((out != left) && (out != right)); |
6001 | |
6002 | compiler::Label throw_error; |
6003 | __ tbnz(&throw_error, right, kBitsPerWord - 1); |
6004 | |
6005 | switch (instruction()->AsShiftInt64Op()->op_kind()) { |
6006 | case Token::kSHR: |
6007 | __ AsrImmediate(out, left, kBitsPerWord - 1); |
6008 | break; |
6009 | case Token::kSHL: |
6010 | __ mov(out, ZR); |
6011 | break; |
6012 | default: |
6013 | UNREACHABLE(); |
6014 | } |
6015 | __ b(exit_label()); |
6016 | |
6017 | __ Bind(&throw_error); |
6018 | |
6019 | // Can't pass unboxed int64 value directly to runtime call, as all |
6020 | // arguments are expected to be tagged (boxed). |
6021 | // The unboxed int64 argument is passed through a dedicated slot in Thread. |
6022 | // TODO(dartbug.com/33549): Clean this up when unboxed values |
6023 | // could be passed as arguments. |
6024 | __ str(right, |
6025 | compiler::Address(THR, Thread::unboxed_int64_runtime_arg_offset())); |
6026 | } |
6027 | }; |
6028 | |
6029 | LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone, |
6030 | bool opt) const { |
6031 | const intptr_t kNumInputs = 2; |
6032 | const intptr_t kNumTemps = 0; |
6033 | LocationSummary* summary = new (zone) LocationSummary( |
6034 | zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
6035 | summary->set_in(0, Location::RequiresRegister()); |
6036 | summary->set_in(1, RangeUtils::IsPositive(shift_range()) |
6037 | ? LocationRegisterOrConstant(right()) |
6038 | : Location::RequiresRegister()); |
6039 | summary->set_out(0, Location::RequiresRegister()); |
6040 | return summary; |
6041 | } |
6042 | |
6043 | void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6044 | const Register left = locs()->in(0).reg(); |
6045 | const Register out = locs()->out(0).reg(); |
6046 | ASSERT(!can_overflow()); |
6047 | |
6048 | if (locs()->in(1).IsConstant()) { |
6049 | EmitShiftInt64ByConstant(compiler, op_kind(), out, left, |
6050 | locs()->in(1).constant()); |
6051 | } else { |
6052 | // Code for a variable shift amount (or constant that throws). |
6053 | Register shift = locs()->in(1).reg(); |
6054 | |
6055 | // Jump to a slow path if shift is larger than 63 or less than 0. |
6056 | ShiftInt64OpSlowPath* slow_path = NULL; |
6057 | if (!IsShiftCountInRange()) { |
6058 | slow_path = |
6059 | new (Z) ShiftInt64OpSlowPath(this, compiler->CurrentTryIndex()); |
6060 | compiler->AddSlowPathCode(slow_path); |
6061 | __ CompareImmediate(shift, kShiftCountLimit); |
6062 | __ b(slow_path->entry_label(), HI); |
6063 | } |
6064 | |
6065 | EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift); |
6066 | |
6067 | if (slow_path != NULL) { |
6068 | __ Bind(slow_path->exit_label()); |
6069 | } |
6070 | } |
6071 | } |
6072 | |
6073 | LocationSummary* SpeculativeShiftInt64OpInstr::MakeLocationSummary( |
6074 | Zone* zone, |
6075 | bool opt) const { |
6076 | const intptr_t kNumInputs = 2; |
6077 | const intptr_t kNumTemps = 0; |
6078 | LocationSummary* summary = new (zone) |
6079 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6080 | summary->set_in(0, Location::RequiresRegister()); |
6081 | summary->set_in(1, LocationRegisterOrSmiConstant(right())); |
6082 | summary->set_out(0, Location::RequiresRegister()); |
6083 | return summary; |
6084 | } |
6085 | |
6086 | void SpeculativeShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6087 | const Register left = locs()->in(0).reg(); |
6088 | const Register out = locs()->out(0).reg(); |
6089 | ASSERT(!can_overflow()); |
6090 | |
6091 | if (locs()->in(1).IsConstant()) { |
6092 | EmitShiftInt64ByConstant(compiler, op_kind(), out, left, |
6093 | locs()->in(1).constant()); |
6094 | } else { |
6095 | // Code for a variable shift amount. |
6096 | Register shift = locs()->in(1).reg(); |
6097 | |
6098 | // Untag shift count. |
6099 | __ SmiUntag(TMP, shift); |
6100 | shift = TMP; |
6101 | |
6102 | // Deopt if shift is larger than 63 or less than 0 (or not a smi). |
6103 | if (!IsShiftCountInRange()) { |
6104 | ASSERT(CanDeoptimize()); |
6105 | compiler::Label* deopt = |
6106 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op); |
6107 | |
6108 | __ CompareImmediate(shift, kShiftCountLimit); |
6109 | __ b(deopt, HI); |
6110 | } |
6111 | |
6112 | EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift); |
6113 | } |
6114 | } |
6115 | |
6116 | class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode { |
6117 | public: |
6118 | static const intptr_t kNumberOfArguments = 0; |
6119 | |
6120 | ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction, intptr_t try_index) |
6121 | : ThrowErrorSlowPathCode(instruction, |
6122 | kArgumentErrorUnboxedInt64RuntimeEntry, |
6123 | kNumberOfArguments, |
6124 | try_index) {} |
6125 | |
6126 | const char* name() override { return "uint32 shift" ; } |
6127 | |
6128 | void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override { |
6129 | const Register right = instruction()->locs()->in(1).reg(); |
6130 | |
6131 | // Can't pass unboxed int64 value directly to runtime call, as all |
6132 | // arguments are expected to be tagged (boxed). |
6133 | // The unboxed int64 argument is passed through a dedicated slot in Thread. |
6134 | // TODO(dartbug.com/33549): Clean this up when unboxed values |
6135 | // could be passed as arguments. |
6136 | __ str(right, |
6137 | compiler::Address(THR, Thread::unboxed_int64_runtime_arg_offset())); |
6138 | } |
6139 | }; |
6140 | |
6141 | LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone, |
6142 | bool opt) const { |
6143 | const intptr_t kNumInputs = 2; |
6144 | const intptr_t kNumTemps = 0; |
6145 | LocationSummary* summary = new (zone) LocationSummary( |
6146 | zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
6147 | summary->set_in(0, Location::RequiresRegister()); |
6148 | summary->set_in(1, RangeUtils::IsPositive(shift_range()) |
6149 | ? LocationRegisterOrConstant(right()) |
6150 | : Location::RequiresRegister()); |
6151 | summary->set_out(0, Location::RequiresRegister()); |
6152 | return summary; |
6153 | } |
6154 | |
6155 | void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6156 | Register left = locs()->in(0).reg(); |
6157 | Register out = locs()->out(0).reg(); |
6158 | |
6159 | if (locs()->in(1).IsConstant()) { |
6160 | EmitShiftUint32ByConstant(compiler, op_kind(), out, left, |
6161 | locs()->in(1).constant()); |
6162 | } else { |
6163 | // Code for a variable shift amount (or constant that throws). |
6164 | const Register right = locs()->in(1).reg(); |
6165 | const bool shift_count_in_range = |
6166 | IsShiftCountInRange(kUint32ShiftCountLimit); |
6167 | |
6168 | // Jump to a slow path if shift count is negative. |
6169 | if (!shift_count_in_range) { |
6170 | ShiftUint32OpSlowPath* slow_path = |
6171 | new (Z) ShiftUint32OpSlowPath(this, compiler->CurrentTryIndex()); |
6172 | compiler->AddSlowPathCode(slow_path); |
6173 | |
6174 | __ tbnz(slow_path->entry_label(), right, kBitsPerWord - 1); |
6175 | } |
6176 | |
6177 | EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right); |
6178 | |
6179 | if (!shift_count_in_range) { |
6180 | // If shift value is > 31, return zero. |
6181 | __ CompareImmediate(right, 31); |
6182 | __ csel(out, out, ZR, LE); |
6183 | } |
6184 | } |
6185 | } |
6186 | |
6187 | LocationSummary* SpeculativeShiftUint32OpInstr::MakeLocationSummary( |
6188 | Zone* zone, |
6189 | bool opt) const { |
6190 | const intptr_t kNumInputs = 2; |
6191 | const intptr_t kNumTemps = 0; |
6192 | LocationSummary* summary = new (zone) |
6193 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6194 | summary->set_in(0, Location::RequiresRegister()); |
6195 | summary->set_in(1, LocationRegisterOrSmiConstant(right())); |
6196 | summary->set_out(0, Location::RequiresRegister()); |
6197 | return summary; |
6198 | } |
6199 | |
6200 | void SpeculativeShiftUint32OpInstr::EmitNativeCode( |
6201 | FlowGraphCompiler* compiler) { |
6202 | Register left = locs()->in(0).reg(); |
6203 | Register out = locs()->out(0).reg(); |
6204 | |
6205 | if (locs()->in(1).IsConstant()) { |
6206 | EmitShiftUint32ByConstant(compiler, op_kind(), out, left, |
6207 | locs()->in(1).constant()); |
6208 | } else { |
6209 | Register right = locs()->in(1).reg(); |
6210 | const bool shift_count_in_range = |
6211 | IsShiftCountInRange(kUint32ShiftCountLimit); |
6212 | |
6213 | __ SmiUntag(TMP, right); |
6214 | right = TMP; |
6215 | |
6216 | // Jump to a slow path if shift count is negative. |
6217 | if (!shift_count_in_range) { |
6218 | // Deoptimize if shift count is negative. |
6219 | ASSERT(CanDeoptimize()); |
6220 | compiler::Label* deopt = |
6221 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op); |
6222 | |
6223 | __ tbnz(deopt, right, kBitsPerWord - 1); |
6224 | } |
6225 | |
6226 | EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right); |
6227 | |
6228 | if (!shift_count_in_range) { |
6229 | // If shift value is > 31, return zero. |
6230 | __ CompareImmediate(right, 31); |
6231 | __ csel(out, out, ZR, LE); |
6232 | } |
6233 | } |
6234 | } |
6235 | |
6236 | LocationSummary* UnaryInt64OpInstr::MakeLocationSummary(Zone* zone, |
6237 | bool opt) const { |
6238 | const intptr_t kNumInputs = 1; |
6239 | const intptr_t kNumTemps = 0; |
6240 | LocationSummary* summary = new (zone) |
6241 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6242 | summary->set_in(0, Location::RequiresRegister()); |
6243 | summary->set_out(0, Location::RequiresRegister()); |
6244 | return summary; |
6245 | } |
6246 | |
6247 | void UnaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6248 | const Register left = locs()->in(0).reg(); |
6249 | const Register out = locs()->out(0).reg(); |
6250 | switch (op_kind()) { |
6251 | case Token::kBIT_NOT: |
6252 | __ mvn(out, left); |
6253 | break; |
6254 | case Token::kNEGATE: |
6255 | __ sub(out, ZR, compiler::Operand(left)); |
6256 | break; |
6257 | default: |
6258 | UNREACHABLE(); |
6259 | } |
6260 | } |
6261 | |
6262 | LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone, |
6263 | bool opt) const { |
6264 | const intptr_t kNumInputs = 2; |
6265 | const intptr_t kNumTemps = 0; |
6266 | LocationSummary* summary = new (zone) |
6267 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6268 | summary->set_in(0, Location::RequiresRegister()); |
6269 | summary->set_in(1, Location::RequiresRegister()); |
6270 | summary->set_out(0, Location::RequiresRegister()); |
6271 | return summary; |
6272 | } |
6273 | |
6274 | void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6275 | Register left = locs()->in(0).reg(); |
6276 | Register right = locs()->in(1).reg(); |
6277 | compiler::Operand r = compiler::Operand(right); |
6278 | Register out = locs()->out(0).reg(); |
6279 | switch (op_kind()) { |
6280 | case Token::kBIT_AND: |
6281 | __ and_(out, left, r); |
6282 | break; |
6283 | case Token::kBIT_OR: |
6284 | __ orr(out, left, r); |
6285 | break; |
6286 | case Token::kBIT_XOR: |
6287 | __ eor(out, left, r); |
6288 | break; |
6289 | case Token::kADD: |
6290 | __ addw(out, left, r); |
6291 | break; |
6292 | case Token::kSUB: |
6293 | __ subw(out, left, r); |
6294 | break; |
6295 | case Token::kMUL: |
6296 | __ mulw(out, left, right); |
6297 | break; |
6298 | default: |
6299 | UNREACHABLE(); |
6300 | } |
6301 | } |
6302 | |
6303 | LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone, |
6304 | bool opt) const { |
6305 | const intptr_t kNumInputs = 1; |
6306 | const intptr_t kNumTemps = 0; |
6307 | LocationSummary* summary = new (zone) |
6308 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6309 | summary->set_in(0, Location::RequiresRegister()); |
6310 | summary->set_out(0, Location::RequiresRegister()); |
6311 | return summary; |
6312 | } |
6313 | |
6314 | void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6315 | Register left = locs()->in(0).reg(); |
6316 | Register out = locs()->out(0).reg(); |
6317 | |
6318 | ASSERT(op_kind() == Token::kBIT_NOT); |
6319 | __ mvnw(out, left); |
6320 | } |
6321 | |
6322 | DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr) |
6323 | |
6324 | LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone, |
6325 | bool opt) const { |
6326 | const intptr_t kNumInputs = 1; |
6327 | const intptr_t kNumTemps = 0; |
6328 | LocationSummary* summary = new (zone) |
6329 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6330 | if (from() == kUntagged || to() == kUntagged) { |
6331 | ASSERT((from() == kUntagged && to() == kUnboxedIntPtr) || |
6332 | (from() == kUnboxedIntPtr && to() == kUntagged)); |
6333 | ASSERT(!CanDeoptimize()); |
6334 | } else if (from() == kUnboxedInt64) { |
6335 | ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32); |
6336 | } else if (to() == kUnboxedInt64) { |
6337 | ASSERT(from() == kUnboxedInt32 || from() == kUnboxedUint32); |
6338 | } else { |
6339 | ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32); |
6340 | ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32); |
6341 | } |
6342 | summary->set_in(0, Location::RequiresRegister()); |
6343 | if (CanDeoptimize()) { |
6344 | summary->set_out(0, Location::RequiresRegister()); |
6345 | } else { |
6346 | summary->set_out(0, Location::SameAsFirstInput()); |
6347 | } |
6348 | return summary; |
6349 | } |
6350 | |
6351 | void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6352 | ASSERT(from() != to()); // We don't convert from a representation to itself. |
6353 | |
6354 | const bool is_nop_conversion = |
6355 | (from() == kUntagged && to() == kUnboxedIntPtr) || |
6356 | (from() == kUnboxedIntPtr && to() == kUntagged); |
6357 | if (is_nop_conversion) { |
6358 | ASSERT(locs()->in(0).reg() == locs()->out(0).reg()); |
6359 | return; |
6360 | } |
6361 | |
6362 | const Register value = locs()->in(0).reg(); |
6363 | const Register out = locs()->out(0).reg(); |
6364 | compiler::Label* deopt = |
6365 | !CanDeoptimize() |
6366 | ? NULL |
6367 | : compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger); |
6368 | if (from() == kUnboxedInt32 && to() == kUnboxedUint32) { |
6369 | if (CanDeoptimize()) { |
6370 | __ tbnz(deopt, value, |
6371 | 31); // If sign bit is set it won't fit in a uint32. |
6372 | } |
6373 | if (out != value) { |
6374 | __ mov(out, value); // For positive values the bits are the same. |
6375 | } |
6376 | } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) { |
6377 | if (CanDeoptimize()) { |
6378 | __ tbnz(deopt, value, |
6379 | 31); // If high bit is set it won't fit in an int32. |
6380 | } |
6381 | if (out != value) { |
6382 | __ mov(out, value); // For 31 bit values the bits are the same. |
6383 | } |
6384 | } else if (from() == kUnboxedInt64) { |
6385 | if (to() == kUnboxedInt32) { |
6386 | if (is_truncating() || out != value) { |
6387 | __ sxtw(out, value); // Signed extension 64->32. |
6388 | } |
6389 | } else { |
6390 | ASSERT(to() == kUnboxedUint32); |
6391 | if (is_truncating() || out != value) { |
6392 | __ uxtw(out, value); // Unsigned extension 64->32. |
6393 | } |
6394 | } |
6395 | if (CanDeoptimize()) { |
6396 | ASSERT(to() == kUnboxedInt32); |
6397 | __ cmp(out, compiler::Operand(value)); |
6398 | __ b(deopt, NE); // Value cannot be held in Int32, deopt. |
6399 | } |
6400 | } else if (to() == kUnboxedInt64) { |
6401 | if (from() == kUnboxedUint32) { |
6402 | __ uxtw(out, value); |
6403 | } else { |
6404 | ASSERT(from() == kUnboxedInt32); |
6405 | __ sxtw(out, value); // Signed extension 32->64. |
6406 | } |
6407 | } else { |
6408 | UNREACHABLE(); |
6409 | } |
6410 | } |
6411 | |
6412 | LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
6413 | return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall); |
6414 | } |
6415 | |
6416 | void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6417 | __ Stop(message()); |
6418 | } |
6419 | |
6420 | void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6421 | BlockEntryInstr* entry = normal_entry(); |
6422 | if (entry != nullptr) { |
6423 | if (!compiler->CanFallThroughTo(entry)) { |
6424 | FATAL("Checked function entry must have no offset" ); |
6425 | } |
6426 | } else { |
6427 | entry = osr_entry(); |
6428 | if (!compiler->CanFallThroughTo(entry)) { |
6429 | __ b(compiler->GetJumpLabel(entry)); |
6430 | } |
6431 | } |
6432 | } |
6433 | |
6434 | LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
6435 | return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall); |
6436 | } |
6437 | |
6438 | void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6439 | if (!compiler->is_optimizing()) { |
6440 | if (FLAG_reorder_basic_blocks) { |
6441 | compiler->EmitEdgeCounter(block()->preorder_number()); |
6442 | } |
6443 | // Add a deoptimization descriptor for deoptimizing instructions that |
6444 | // may be inserted before this instruction. |
6445 | compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(), |
6446 | TokenPosition::kNoSource); |
6447 | } |
6448 | if (HasParallelMove()) { |
6449 | compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); |
6450 | } |
6451 | |
6452 | // We can fall through if the successor is the next block in the list. |
6453 | // Otherwise, we need a jump. |
6454 | if (!compiler->CanFallThroughTo(successor())) { |
6455 | __ b(compiler->GetJumpLabel(successor())); |
6456 | } |
6457 | } |
6458 | |
6459 | LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone, |
6460 | bool opt) const { |
6461 | const intptr_t kNumInputs = 1; |
6462 | const intptr_t kNumTemps = 1; |
6463 | |
6464 | LocationSummary* summary = new (zone) |
6465 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6466 | |
6467 | summary->set_in(0, Location::RequiresRegister()); |
6468 | summary->set_temp(0, Location::RequiresRegister()); |
6469 | |
6470 | return summary; |
6471 | } |
6472 | |
6473 | void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6474 | Register target_address_reg = locs()->temp_slot(0)->reg(); |
6475 | |
6476 | // Load code entry point. |
6477 | const intptr_t entry_offset = __ CodeSize(); |
6478 | if (Utils::IsInt(21, -entry_offset)) { |
6479 | __ adr(target_address_reg, compiler::Immediate(-entry_offset)); |
6480 | } else { |
6481 | __ adr(target_address_reg, compiler::Immediate(0)); |
6482 | __ AddImmediate(target_address_reg, -entry_offset); |
6483 | } |
6484 | |
6485 | // Add the offset. |
6486 | Register offset_reg = locs()->in(0).reg(); |
6487 | compiler::Operand offset_opr = |
6488 | (offset()->definition()->representation() == kTagged) |
6489 | ? compiler::Operand(offset_reg, ASR, kSmiTagSize) |
6490 | : compiler::Operand(offset_reg); |
6491 | __ add(target_address_reg, target_address_reg, offset_opr); |
6492 | |
6493 | // Jump to the absolute address. |
6494 | __ br(target_address_reg); |
6495 | } |
6496 | |
6497 | LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone, |
6498 | bool opt) const { |
6499 | const intptr_t kNumInputs = 2; |
6500 | const intptr_t kNumTemps = 0; |
6501 | if (needs_number_check()) { |
6502 | LocationSummary* locs = new (zone) |
6503 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
6504 | locs->set_in(0, Location::RegisterLocation(R0)); |
6505 | locs->set_in(1, Location::RegisterLocation(R1)); |
6506 | locs->set_out(0, Location::RegisterLocation(R0)); |
6507 | return locs; |
6508 | } |
6509 | LocationSummary* locs = new (zone) |
6510 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6511 | locs->set_in(0, LocationRegisterOrConstant(left())); |
6512 | // Only one of the inputs can be a constant. Choose register if the first one |
6513 | // is a constant. |
6514 | locs->set_in(1, locs->in(0).IsConstant() |
6515 | ? Location::RequiresRegister() |
6516 | : LocationRegisterOrConstant(right())); |
6517 | locs->set_out(0, Location::RequiresRegister()); |
6518 | return locs; |
6519 | } |
6520 | |
6521 | Condition StrictCompareInstr::EmitComparisonCodeRegConstant( |
6522 | FlowGraphCompiler* compiler, |
6523 | BranchLabels labels, |
6524 | Register reg, |
6525 | const Object& obj) { |
6526 | Condition orig_cond = (kind() == Token::kEQ_STRICT) ? EQ : NE; |
6527 | if (!needs_number_check() && compiler::target::IsSmi(obj) && |
6528 | compiler::target::ToRawSmi(obj) == 0 && |
6529 | CanUseCbzTbzForComparison(compiler, reg, orig_cond, labels)) { |
6530 | EmitCbzTbz(reg, compiler, orig_cond, labels); |
6531 | return kInvalidCondition; |
6532 | } else { |
6533 | return compiler->EmitEqualityRegConstCompare(reg, obj, needs_number_check(), |
6534 | token_pos(), deopt_id()); |
6535 | } |
6536 | } |
6537 | |
6538 | void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6539 | compiler::Label is_true, is_false; |
6540 | BranchLabels labels = {&is_true, &is_false, &is_false}; |
6541 | Condition true_condition = EmitComparisonCode(compiler, labels); |
6542 | const Register result = this->locs()->out(0).reg(); |
6543 | |
6544 | // TODO(dartbug.com/29908): Use csel here for better branch prediction? |
6545 | if (true_condition != kInvalidCondition) { |
6546 | EmitBranchOnCondition(compiler, true_condition, labels); |
6547 | } |
6548 | compiler::Label done; |
6549 | __ Bind(&is_false); |
6550 | __ LoadObject(result, Bool::False()); |
6551 | __ b(&done); |
6552 | __ Bind(&is_true); |
6553 | __ LoadObject(result, Bool::True()); |
6554 | __ Bind(&done); |
6555 | } |
6556 | |
6557 | void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler, |
6558 | BranchInstr* branch) { |
6559 | BranchLabels labels = compiler->CreateBranchLabels(branch); |
6560 | Condition true_condition = EmitComparisonCode(compiler, labels); |
6561 | if (true_condition != kInvalidCondition) { |
6562 | EmitBranchOnCondition(compiler, true_condition, labels); |
6563 | } |
6564 | } |
6565 | |
6566 | LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone, |
6567 | bool opt) const { |
6568 | return LocationSummary::Make(zone, 1, Location::RequiresRegister(), |
6569 | LocationSummary::kNoCall); |
6570 | } |
6571 | |
6572 | void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6573 | const Register input = locs()->in(0).reg(); |
6574 | const Register result = locs()->out(0).reg(); |
6575 | |
6576 | if (value()->Type()->ToCid() == kBoolCid) { |
6577 | __ eori( |
6578 | result, input, |
6579 | compiler::Immediate(compiler::target::ObjectAlignment::kBoolValueMask)); |
6580 | } else { |
6581 | __ LoadObject(result, Bool::True()); |
6582 | __ LoadObject(TMP, Bool::False()); |
6583 | __ CompareRegisters(result, input); |
6584 | __ csel(result, TMP, result, EQ); |
6585 | } |
6586 | } |
6587 | |
6588 | LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone, |
6589 | bool opt) const { |
6590 | const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0; |
6591 | const intptr_t kNumTemps = 0; |
6592 | LocationSummary* locs = new (zone) |
6593 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
6594 | if (type_arguments() != nullptr) { |
6595 | locs->set_in(0, |
6596 | Location::RegisterLocation(kAllocationStubTypeArgumentsReg)); |
6597 | } |
6598 | locs->set_out(0, Location::RegisterLocation(R0)); |
6599 | return locs; |
6600 | } |
6601 | |
6602 | void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6603 | if (type_arguments() != nullptr) { |
6604 | TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info(); |
6605 | if (type_usage_info != nullptr) { |
6606 | RegisterTypeArgumentsUse(compiler->function(), type_usage_info, cls_, |
6607 | type_arguments()->definition()); |
6608 | } |
6609 | } |
6610 | const Code& stub = Code::ZoneHandle( |
6611 | compiler->zone(), StubCode::GetAllocationStubForClass(cls())); |
6612 | compiler->GenerateStubCall(token_pos(), stub, PcDescriptorsLayout::kOther, |
6613 | locs()); |
6614 | } |
6615 | |
6616 | void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6617 | #ifdef PRODUCT |
6618 | UNREACHABLE(); |
6619 | #else |
6620 | ASSERT(!compiler->is_optimizing()); |
6621 | __ BranchLinkPatchable(StubCode::DebugStepCheck()); |
6622 | compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, token_pos()); |
6623 | compiler->RecordSafepoint(locs()); |
6624 | #endif |
6625 | } |
6626 | |
6627 | } // namespace dart |
6628 | |
6629 | #endif // defined(TARGET_ARCH_ARM64) |
6630 | |