1 | // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "platform/globals.h" |
6 | #include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32. |
7 | #if defined(TARGET_ARCH_IA32) |
8 | |
9 | #include "vm/compiler/backend/il.h" |
10 | |
11 | #include "vm/compiler/backend/flow_graph.h" |
12 | #include "vm/compiler/backend/flow_graph_compiler.h" |
13 | #include "vm/compiler/backend/locations.h" |
14 | #include "vm/compiler/backend/locations_helpers.h" |
15 | #include "vm/compiler/backend/range_analysis.h" |
16 | #include "vm/compiler/ffi/native_calling_convention.h" |
17 | #include "vm/compiler/frontend/flow_graph_builder.h" |
18 | #include "vm/compiler/jit/compiler.h" |
19 | #include "vm/dart_entry.h" |
20 | #include "vm/instructions.h" |
21 | #include "vm/object_store.h" |
22 | #include "vm/parser.h" |
23 | #include "vm/stack_frame.h" |
24 | #include "vm/stub_code.h" |
25 | #include "vm/symbols.h" |
26 | |
27 | #define __ compiler->assembler()-> |
28 | #define Z (compiler->zone()) |
29 | |
30 | namespace dart { |
31 | |
32 | // Generic summary for call instructions that have all arguments pushed |
33 | // on the stack and return the result in a fixed register EAX. |
34 | LocationSummary* Instruction::MakeCallSummary(Zone* zone, |
35 | const Instruction* instr, |
36 | LocationSummary* locs) { |
37 | // This is unused on ia32. |
38 | ASSERT(locs == nullptr); |
39 | const intptr_t kNumInputs = 0; |
40 | const intptr_t kNumTemps = 0; |
41 | LocationSummary* result = new (zone) |
42 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
43 | result->set_out(0, Location::RegisterLocation(EAX)); |
44 | return result; |
45 | } |
46 | |
47 | DEFINE_BACKEND(LoadIndexedUnsafe, (Register out, Register index)) { |
48 | ASSERT(instr->RequiredInputRepresentation(0) == kTagged); // It is a Smi. |
49 | ASSERT(instr->representation() == kTagged); |
50 | __ movl(out, compiler::Address(instr->base_reg(), index, TIMES_2, |
51 | instr->offset())); |
52 | |
53 | ASSERT(kSmiTag == 0); |
54 | ASSERT(kSmiTagSize == 1); |
55 | } |
56 | |
57 | DEFINE_BACKEND(StoreIndexedUnsafe, |
58 | (NoLocation, Register index, Register value)) { |
59 | ASSERT(instr->RequiredInputRepresentation( |
60 | StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi. |
61 | __ movl(compiler::Address(instr->base_reg(), index, TIMES_2, instr->offset()), |
62 | value); |
63 | |
64 | ASSERT(kSmiTag == 0); |
65 | ASSERT(kSmiTagSize == 1); |
66 | } |
67 | |
68 | DEFINE_BACKEND(TailCall, |
69 | (NoLocation, |
70 | Fixed<Register, ARGS_DESC_REG>, |
71 | Temp<Register> temp)) { |
72 | __ LoadObject(CODE_REG, instr->code()); |
73 | __ LeaveFrame(); // The arguments are still on the stack. |
74 | __ movl(temp, compiler::FieldAddress(CODE_REG, Code::entry_point_offset())); |
75 | __ jmp(temp); |
76 | } |
77 | |
78 | LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone, |
79 | bool opt) const { |
80 | const intptr_t kNumInputs = 5; |
81 | const intptr_t kNumTemps = 0; |
82 | LocationSummary* locs = new (zone) |
83 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
84 | locs->set_in(kSrcPos, Location::RequiresRegister()); |
85 | locs->set_in(kDestPos, Location::RegisterLocation(EDI)); |
86 | locs->set_in(kSrcStartPos, Location::WritableRegister()); |
87 | locs->set_in(kDestStartPos, Location::WritableRegister()); |
88 | locs->set_in(kLengthPos, Location::RegisterLocation(ECX)); |
89 | return locs; |
90 | } |
91 | |
92 | void MemoryCopyInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
93 | const Register src_reg = locs()->in(kSrcPos).reg(); |
94 | const Register src_start_reg = locs()->in(kSrcStartPos).reg(); |
95 | const Register dest_start_reg = locs()->in(kDestStartPos).reg(); |
96 | |
97 | // Save ESI which is THR. |
98 | __ pushl(ESI); |
99 | __ movl(ESI, src_reg); |
100 | |
101 | EmitComputeStartPointer(compiler, src_cid_, src_start(), ESI, src_start_reg); |
102 | EmitComputeStartPointer(compiler, dest_cid_, dest_start(), EDI, |
103 | dest_start_reg); |
104 | if (element_size_ <= 4) { |
105 | __ SmiUntag(ECX); |
106 | } else if (element_size_ == 16) { |
107 | __ shll(ECX, compiler::Immediate(1)); |
108 | } |
109 | switch (element_size_) { |
110 | case 1: |
111 | __ rep_movsb(); |
112 | break; |
113 | case 2: |
114 | __ rep_movsw(); |
115 | break; |
116 | case 4: |
117 | case 8: |
118 | case 16: |
119 | __ rep_movsl(); |
120 | break; |
121 | } |
122 | |
123 | // Restore THR. |
124 | __ popl(ESI); |
125 | } |
126 | |
127 | void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler, |
128 | classid_t array_cid, |
129 | Value* start, |
130 | Register array_reg, |
131 | Register start_reg) { |
132 | intptr_t offset; |
133 | if (IsTypedDataBaseClassId(array_cid)) { |
134 | __ movl( |
135 | array_reg, |
136 | compiler::FieldAddress( |
137 | array_reg, compiler::target::TypedDataBase::data_field_offset())); |
138 | offset = 0; |
139 | } else { |
140 | switch (array_cid) { |
141 | case kOneByteStringCid: |
142 | offset = |
143 | compiler::target::OneByteString::data_offset() - kHeapObjectTag; |
144 | break; |
145 | case kTwoByteStringCid: |
146 | offset = |
147 | compiler::target::TwoByteString::data_offset() - kHeapObjectTag; |
148 | break; |
149 | case kExternalOneByteStringCid: |
150 | __ movl(array_reg, |
151 | compiler::FieldAddress(array_reg, |
152 | compiler::target::ExternalOneByteString:: |
153 | external_data_offset())); |
154 | offset = 0; |
155 | break; |
156 | case kExternalTwoByteStringCid: |
157 | __ movl(array_reg, |
158 | compiler::FieldAddress(array_reg, |
159 | compiler::target::ExternalTwoByteString:: |
160 | external_data_offset())); |
161 | offset = 0; |
162 | break; |
163 | default: |
164 | UNREACHABLE(); |
165 | break; |
166 | } |
167 | } |
168 | ScaleFactor scale; |
169 | switch (element_size_) { |
170 | case 1: |
171 | __ SmiUntag(start_reg); |
172 | scale = TIMES_1; |
173 | break; |
174 | case 2: |
175 | scale = TIMES_1; |
176 | break; |
177 | case 4: |
178 | scale = TIMES_2; |
179 | break; |
180 | case 8: |
181 | scale = TIMES_4; |
182 | break; |
183 | case 16: |
184 | scale = TIMES_8; |
185 | break; |
186 | default: |
187 | UNREACHABLE(); |
188 | break; |
189 | } |
190 | __ leal(array_reg, compiler::Address(array_reg, start_reg, scale, offset)); |
191 | } |
192 | |
193 | LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone, |
194 | bool opt) const { |
195 | const intptr_t kNumInputs = 1; |
196 | const intptr_t kNumTemps = 0; |
197 | LocationSummary* locs = new (zone) |
198 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
199 | ASSERT(representation() == kTagged); |
200 | locs->set_in(0, LocationAnyOrConstant(value())); |
201 | return locs; |
202 | } |
203 | |
204 | void PushArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
205 | // In SSA mode, we need an explicit push. Nothing to do in non-SSA mode |
206 | // where arguments are pushed by their definitions. |
207 | if (compiler->is_optimizing()) { |
208 | Location value = locs()->in(0); |
209 | if (value.IsRegister()) { |
210 | __ pushl(value.reg()); |
211 | } else if (value.IsConstant()) { |
212 | __ PushObject(value.constant()); |
213 | } else { |
214 | ASSERT(value.IsStackSlot()); |
215 | __ pushl(LocationToStackSlotAddress(value)); |
216 | } |
217 | } |
218 | } |
219 | |
220 | LocationSummary* ReturnInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
221 | const intptr_t kNumInputs = 1; |
222 | const intptr_t kNumTemps = 0; |
223 | LocationSummary* locs = new (zone) |
224 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
225 | ASSERT(representation() == kTagged); |
226 | locs->set_in(0, Location::RegisterLocation(EAX)); |
227 | return locs; |
228 | } |
229 | |
230 | // Attempt optimized compilation at return instruction instead of at the entry. |
231 | // The entry needs to be patchable, no inlined objects are allowed in the area |
232 | // that will be overwritten by the patch instruction: a jump). |
233 | void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
234 | Register result = locs()->in(0).reg(); |
235 | ASSERT(result == EAX); |
236 | |
237 | if (compiler->intrinsic_mode()) { |
238 | // Intrinsics don't have a frame. |
239 | __ ret(); |
240 | return; |
241 | } |
242 | |
243 | #if defined(DEBUG) |
244 | __ Comment("Stack Check" ); |
245 | compiler::Label done; |
246 | const intptr_t fp_sp_dist = |
247 | (compiler::target::frame_layout.first_local_from_fp + 1 - |
248 | compiler->StackSize()) * |
249 | kWordSize; |
250 | ASSERT(fp_sp_dist <= 0); |
251 | __ movl(EDI, ESP); |
252 | __ subl(EDI, EBP); |
253 | __ cmpl(EDI, compiler::Immediate(fp_sp_dist)); |
254 | __ j(EQUAL, &done, compiler::Assembler::kNearJump); |
255 | __ int3(); |
256 | __ Bind(&done); |
257 | #endif |
258 | if (yield_index() != PcDescriptorsLayout::kInvalidYieldIndex) { |
259 | compiler->EmitYieldPositionMetadata(token_pos(), yield_index()); |
260 | } |
261 | __ LeaveFrame(); |
262 | __ ret(); |
263 | } |
264 | |
265 | void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
266 | EmitReturnMoves(compiler); |
267 | |
268 | bool return_in_st0 = false; |
269 | if (marshaller_.Location(compiler::ffi::kResultIndex) |
270 | .payload_type() |
271 | .IsFloat()) { |
272 | ASSERT(locs()->in(0).IsFpuRegister() && locs()->in(0).fpu_reg() == XMM0); |
273 | return_in_st0 = true; |
274 | } |
275 | |
276 | // Leave Dart frame. |
277 | __ LeaveFrame(); |
278 | |
279 | // EDI is the only sane choice for a temporary register here because: |
280 | // |
281 | // EDX is used for large return values. |
282 | // ESI == THR. |
283 | // Could be EBX or ECX, but that would make code below confusing. |
284 | const Register tmp = EDI; |
285 | |
286 | // Pop dummy return address. |
287 | __ popl(tmp); |
288 | |
289 | // Anything besides the return register(s!). Callee-saved registers will be |
290 | // restored later. |
291 | const Register vm_tag_reg = EBX; |
292 | const Register old_exit_frame_reg = ECX; |
293 | const Register old_exit_through_ffi_reg = tmp; |
294 | |
295 | __ popl(old_exit_frame_reg); |
296 | __ popl(vm_tag_reg); /* old_exit_through_ffi, we still need to use tmp. */ |
297 | |
298 | // Restore top_resource. |
299 | __ popl(tmp); |
300 | __ movl( |
301 | compiler::Address(THR, compiler::target::Thread::top_resource_offset()), |
302 | tmp); |
303 | |
304 | __ movl(old_exit_through_ffi_reg, vm_tag_reg); |
305 | __ popl(vm_tag_reg); |
306 | |
307 | // This will reset the exit frame info to old_exit_frame_reg *before* entering |
308 | // the safepoint. |
309 | // |
310 | // If we were called by a trampoline, it will enter the safepoint on our |
311 | // behalf. |
312 | __ TransitionGeneratedToNative( |
313 | vm_tag_reg, old_exit_frame_reg, old_exit_through_ffi_reg, |
314 | /*enter_safepoint=*/!NativeCallbackTrampolines::Enabled()); |
315 | |
316 | // Move XMM0 into ST0 if needed. |
317 | if (return_in_st0) { |
318 | if (marshaller_.Location(compiler::ffi::kResultIndex) |
319 | .payload_type() |
320 | .SizeInBytes() == 8) { |
321 | __ movsd(compiler::Address(SPREG, -8), XMM0); |
322 | __ fldl(compiler::Address(SPREG, -8)); |
323 | } else { |
324 | __ movss(compiler::Address(SPREG, -4), XMM0); |
325 | __ flds(compiler::Address(SPREG, -4)); |
326 | } |
327 | } |
328 | |
329 | // Restore C++ ABI callee-saved registers. |
330 | __ popl(EDI); |
331 | __ popl(ESI); |
332 | __ popl(EBX); |
333 | |
334 | #if defined(TARGET_OS_FUCHSIA) |
335 | UNREACHABLE(); // Fuchsia does not allow dart:ffi. |
336 | #elif defined(USING_SHADOW_CALL_STACK) |
337 | #error Unimplemented |
338 | #endif |
339 | |
340 | // Leave the entry frame. |
341 | __ LeaveFrame(); |
342 | |
343 | __ ret(); |
344 | } |
345 | |
346 | LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone, |
347 | bool opt) const { |
348 | const intptr_t kNumInputs = 0; |
349 | const intptr_t stack_index = |
350 | compiler::target::frame_layout.FrameSlotForVariable(&local()); |
351 | return LocationSummary::Make(zone, kNumInputs, |
352 | Location::StackSlot(stack_index, FPREG), |
353 | LocationSummary::kNoCall); |
354 | } |
355 | |
356 | void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
357 | ASSERT(!compiler->is_optimizing()); |
358 | // Nothing to do. |
359 | } |
360 | |
361 | LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone, |
362 | bool opt) const { |
363 | const intptr_t kNumInputs = 1; |
364 | return LocationSummary::Make(zone, kNumInputs, Location::SameAsFirstInput(), |
365 | LocationSummary::kNoCall); |
366 | } |
367 | |
368 | void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
369 | Register value = locs()->in(0).reg(); |
370 | Register result = locs()->out(0).reg(); |
371 | ASSERT(result == value); // Assert that register assignment is correct. |
372 | __ movl(compiler::Address( |
373 | EBP, compiler::target::FrameOffsetInBytesForVariable(&local())), |
374 | value); |
375 | } |
376 | |
377 | LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone, |
378 | bool opt) const { |
379 | const intptr_t kNumInputs = 0; |
380 | return LocationSummary::Make(zone, kNumInputs, |
381 | compiler::Assembler::IsSafe(value()) |
382 | ? Location::Constant(this) |
383 | : Location::RequiresRegister(), |
384 | LocationSummary::kNoCall); |
385 | } |
386 | |
387 | void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
388 | // The register allocator drops constant definitions that have no uses. |
389 | Location out = locs()->out(0); |
390 | ASSERT(out.IsRegister() || out.IsConstant() || out.IsInvalid()); |
391 | if (out.IsRegister()) { |
392 | Register result = out.reg(); |
393 | __ LoadObjectSafely(result, value()); |
394 | } |
395 | } |
396 | |
397 | void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler, |
398 | const Location& destination, |
399 | Register tmp) { |
400 | if (destination.IsRegister()) { |
401 | if (value_.IsSmi() && Smi::Cast(value_).Value() == 0) { |
402 | __ xorl(destination.reg(), destination.reg()); |
403 | } else if (value_.IsSmi() && (representation() == kUnboxedInt32)) { |
404 | __ movl(destination.reg(), |
405 | compiler::Immediate(Smi::Cast(value_).Value())); |
406 | } else { |
407 | ASSERT(representation() == kTagged); |
408 | __ LoadObjectSafely(destination.reg(), value_); |
409 | } |
410 | } else if (destination.IsFpuRegister()) { |
411 | const double value_as_double = Double::Cast(value_).value(); |
412 | uword addr = FindDoubleConstant(value_as_double); |
413 | if (addr == 0) { |
414 | __ pushl(EAX); |
415 | __ LoadObject(EAX, value_); |
416 | __ movsd(destination.fpu_reg(), |
417 | compiler::FieldAddress(EAX, Double::value_offset())); |
418 | __ popl(EAX); |
419 | } else if (Utils::DoublesBitEqual(value_as_double, 0.0)) { |
420 | __ xorps(destination.fpu_reg(), destination.fpu_reg()); |
421 | } else { |
422 | __ movsd(destination.fpu_reg(), compiler::Address::Absolute(addr)); |
423 | } |
424 | } else if (destination.IsDoubleStackSlot()) { |
425 | const double value_as_double = Double::Cast(value_).value(); |
426 | uword addr = FindDoubleConstant(value_as_double); |
427 | if (addr == 0) { |
428 | __ pushl(EAX); |
429 | __ LoadObject(EAX, value_); |
430 | __ movsd(FpuTMP, compiler::FieldAddress(EAX, Double::value_offset())); |
431 | __ popl(EAX); |
432 | } else if (Utils::DoublesBitEqual(value_as_double, 0.0)) { |
433 | __ xorps(FpuTMP, FpuTMP); |
434 | } else { |
435 | __ movsd(FpuTMP, compiler::Address::Absolute(addr)); |
436 | } |
437 | __ movsd(LocationToStackSlotAddress(destination), FpuTMP); |
438 | } else { |
439 | ASSERT(destination.IsStackSlot()); |
440 | if (value_.IsSmi() && representation() == kUnboxedInt32) { |
441 | __ movl(LocationToStackSlotAddress(destination), |
442 | compiler::Immediate(Smi::Cast(value_).Value())); |
443 | } else { |
444 | if (compiler::Assembler::IsSafeSmi(value_) || value_.IsNull()) { |
445 | __ movl(LocationToStackSlotAddress(destination), |
446 | compiler::Immediate(static_cast<int32_t>(value_.raw()))); |
447 | } else { |
448 | __ pushl(EAX); |
449 | __ LoadObjectSafely(EAX, value_); |
450 | __ movl(LocationToStackSlotAddress(destination), EAX); |
451 | __ popl(EAX); |
452 | } |
453 | } |
454 | } |
455 | } |
456 | |
457 | LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone, |
458 | bool opt) const { |
459 | const intptr_t kNumInputs = 0; |
460 | const intptr_t kNumTemps = |
461 | (constant_address() == 0) && (representation() != kUnboxedInt32) ? 1 : 0; |
462 | LocationSummary* locs = new (zone) |
463 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
464 | if (representation() == kUnboxedDouble) { |
465 | locs->set_out(0, Location::RequiresFpuRegister()); |
466 | } else { |
467 | ASSERT(representation() == kUnboxedInt32); |
468 | locs->set_out(0, Location::RequiresRegister()); |
469 | } |
470 | if (kNumTemps == 1) { |
471 | locs->set_temp(0, Location::RequiresRegister()); |
472 | } |
473 | return locs; |
474 | } |
475 | |
476 | void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
477 | // The register allocator drops constant definitions that have no uses. |
478 | if (!locs()->out(0).IsInvalid()) { |
479 | EmitMoveToLocation(compiler, locs()->out(0)); |
480 | } |
481 | } |
482 | |
483 | LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone, |
484 | bool opt) const { |
485 | const intptr_t kNumInputs = 4; |
486 | const intptr_t kNumTemps = 0; |
487 | LocationSummary* summary = new (zone) |
488 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
489 | summary->set_in(0, Location::RegisterLocation(TypeTestABI::kInstanceReg)); |
490 | summary->set_in( |
491 | 1, LocationFixedRegisterOrConstant(dst_type(), TypeTestABI::kDstTypeReg)); |
492 | summary->set_in(2, Location::RegisterLocation( |
493 | TypeTestABI::kInstantiatorTypeArgumentsReg)); |
494 | summary->set_in( |
495 | 3, Location::RegisterLocation(TypeTestABI::kFunctionTypeArgumentsReg)); |
496 | summary->set_out(0, Location::SameAsFirstInput()); |
497 | return summary; |
498 | } |
499 | |
500 | static Condition TokenKindToSmiCondition(Token::Kind kind) { |
501 | switch (kind) { |
502 | case Token::kEQ: |
503 | return EQUAL; |
504 | case Token::kNE: |
505 | return NOT_EQUAL; |
506 | case Token::kLT: |
507 | return LESS; |
508 | case Token::kGT: |
509 | return GREATER; |
510 | case Token::kLTE: |
511 | return LESS_EQUAL; |
512 | case Token::kGTE: |
513 | return GREATER_EQUAL; |
514 | default: |
515 | UNREACHABLE(); |
516 | return OVERFLOW; |
517 | } |
518 | } |
519 | |
520 | LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone, |
521 | bool opt) const { |
522 | const intptr_t kNumInputs = 2; |
523 | if (operation_cid() == kMintCid) { |
524 | const intptr_t kNumTemps = 0; |
525 | LocationSummary* locs = new (zone) |
526 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
527 | locs->set_in(0, Location::Pair(Location::RequiresRegister(), |
528 | Location::RequiresRegister())); |
529 | locs->set_in(1, Location::Pair(Location::RequiresRegister(), |
530 | Location::RequiresRegister())); |
531 | locs->set_out(0, Location::RequiresRegister()); |
532 | return locs; |
533 | } |
534 | if (operation_cid() == kDoubleCid) { |
535 | const intptr_t kNumTemps = 0; |
536 | LocationSummary* locs = new (zone) |
537 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
538 | locs->set_in(0, Location::RequiresFpuRegister()); |
539 | locs->set_in(1, Location::RequiresFpuRegister()); |
540 | locs->set_out(0, Location::RequiresRegister()); |
541 | return locs; |
542 | } |
543 | if (operation_cid() == kSmiCid) { |
544 | const intptr_t kNumTemps = 0; |
545 | LocationSummary* locs = new (zone) |
546 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
547 | locs->set_in(0, LocationRegisterOrConstant(left())); |
548 | // Only one input can be a constant operand. The case of two constant |
549 | // operands should be handled by constant propagation. |
550 | // Only right can be a stack slot. |
551 | locs->set_in(1, locs->in(0).IsConstant() |
552 | ? Location::RequiresRegister() |
553 | : LocationRegisterOrConstant(right())); |
554 | locs->set_out(0, Location::RequiresRegister()); |
555 | return locs; |
556 | } |
557 | UNREACHABLE(); |
558 | return NULL; |
559 | } |
560 | |
561 | static void LoadValueCid(FlowGraphCompiler* compiler, |
562 | Register value_cid_reg, |
563 | Register value_reg, |
564 | compiler::Label* value_is_smi = NULL) { |
565 | compiler::Label done; |
566 | if (value_is_smi == NULL) { |
567 | __ movl(value_cid_reg, compiler::Immediate(kSmiCid)); |
568 | } |
569 | __ testl(value_reg, compiler::Immediate(kSmiTagMask)); |
570 | if (value_is_smi == NULL) { |
571 | __ j(ZERO, &done, compiler::Assembler::kNearJump); |
572 | } else { |
573 | __ j(ZERO, value_is_smi); |
574 | } |
575 | __ LoadClassId(value_cid_reg, value_reg); |
576 | __ Bind(&done); |
577 | } |
578 | |
579 | static Condition FlipCondition(Condition condition) { |
580 | switch (condition) { |
581 | case EQUAL: |
582 | return EQUAL; |
583 | case NOT_EQUAL: |
584 | return NOT_EQUAL; |
585 | case LESS: |
586 | return GREATER; |
587 | case LESS_EQUAL: |
588 | return GREATER_EQUAL; |
589 | case GREATER: |
590 | return LESS; |
591 | case GREATER_EQUAL: |
592 | return LESS_EQUAL; |
593 | case BELOW: |
594 | return ABOVE; |
595 | case BELOW_EQUAL: |
596 | return ABOVE_EQUAL; |
597 | case ABOVE: |
598 | return BELOW; |
599 | case ABOVE_EQUAL: |
600 | return BELOW_EQUAL; |
601 | default: |
602 | UNIMPLEMENTED(); |
603 | return EQUAL; |
604 | } |
605 | } |
606 | |
607 | static void EmitBranchOnCondition(FlowGraphCompiler* compiler, |
608 | Condition true_condition, |
609 | BranchLabels labels) { |
610 | if (labels.fall_through == labels.false_label) { |
611 | // If the next block is the false successor, fall through to it. |
612 | __ j(true_condition, labels.true_label); |
613 | } else { |
614 | // If the next block is not the false successor, branch to it. |
615 | Condition false_condition = InvertCondition(true_condition); |
616 | __ j(false_condition, labels.false_label); |
617 | |
618 | // Fall through or jump to the true successor. |
619 | if (labels.fall_through != labels.true_label) { |
620 | __ jmp(labels.true_label); |
621 | } |
622 | } |
623 | } |
624 | |
625 | static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler, |
626 | const LocationSummary& locs, |
627 | Token::Kind kind, |
628 | BranchLabels labels) { |
629 | Location left = locs.in(0); |
630 | Location right = locs.in(1); |
631 | ASSERT(!left.IsConstant() || !right.IsConstant()); |
632 | |
633 | Condition true_condition = TokenKindToSmiCondition(kind); |
634 | |
635 | if (left.IsConstant()) { |
636 | __ CompareObject(right.reg(), left.constant()); |
637 | true_condition = FlipCondition(true_condition); |
638 | } else if (right.IsConstant()) { |
639 | __ CompareObject(left.reg(), right.constant()); |
640 | } else if (right.IsStackSlot()) { |
641 | __ cmpl(left.reg(), LocationToStackSlotAddress(right)); |
642 | } else { |
643 | __ cmpl(left.reg(), right.reg()); |
644 | } |
645 | return true_condition; |
646 | } |
647 | |
648 | static Condition TokenKindToMintCondition(Token::Kind kind) { |
649 | switch (kind) { |
650 | case Token::kEQ: |
651 | return EQUAL; |
652 | case Token::kNE: |
653 | return NOT_EQUAL; |
654 | case Token::kLT: |
655 | return LESS; |
656 | case Token::kGT: |
657 | return GREATER; |
658 | case Token::kLTE: |
659 | return LESS_EQUAL; |
660 | case Token::kGTE: |
661 | return GREATER_EQUAL; |
662 | default: |
663 | UNREACHABLE(); |
664 | return OVERFLOW; |
665 | } |
666 | } |
667 | |
668 | static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler, |
669 | const LocationSummary& locs, |
670 | Token::Kind kind, |
671 | BranchLabels labels) { |
672 | ASSERT(Token::IsEqualityOperator(kind)); |
673 | PairLocation* left_pair = locs.in(0).AsPairLocation(); |
674 | Register left1 = left_pair->At(0).reg(); |
675 | Register left2 = left_pair->At(1).reg(); |
676 | PairLocation* right_pair = locs.in(1).AsPairLocation(); |
677 | Register right1 = right_pair->At(0).reg(); |
678 | Register right2 = right_pair->At(1).reg(); |
679 | compiler::Label done; |
680 | // Compare lower. |
681 | __ cmpl(left1, right1); |
682 | __ j(NOT_EQUAL, &done); |
683 | // Lower is equal, compare upper. |
684 | __ cmpl(left2, right2); |
685 | __ Bind(&done); |
686 | Condition true_condition = TokenKindToMintCondition(kind); |
687 | return true_condition; |
688 | } |
689 | |
690 | static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler, |
691 | const LocationSummary& locs, |
692 | Token::Kind kind, |
693 | BranchLabels labels) { |
694 | PairLocation* left_pair = locs.in(0).AsPairLocation(); |
695 | Register left1 = left_pair->At(0).reg(); |
696 | Register left2 = left_pair->At(1).reg(); |
697 | PairLocation* right_pair = locs.in(1).AsPairLocation(); |
698 | Register right1 = right_pair->At(0).reg(); |
699 | Register right2 = right_pair->At(1).reg(); |
700 | |
701 | Condition hi_cond = OVERFLOW, lo_cond = OVERFLOW; |
702 | switch (kind) { |
703 | case Token::kLT: |
704 | hi_cond = LESS; |
705 | lo_cond = BELOW; |
706 | break; |
707 | case Token::kGT: |
708 | hi_cond = GREATER; |
709 | lo_cond = ABOVE; |
710 | break; |
711 | case Token::kLTE: |
712 | hi_cond = LESS; |
713 | lo_cond = BELOW_EQUAL; |
714 | break; |
715 | case Token::kGTE: |
716 | hi_cond = GREATER; |
717 | lo_cond = ABOVE_EQUAL; |
718 | break; |
719 | default: |
720 | break; |
721 | } |
722 | ASSERT(hi_cond != OVERFLOW && lo_cond != OVERFLOW); |
723 | // Compare upper halves first. |
724 | __ cmpl(left2, right2); |
725 | __ j(hi_cond, labels.true_label); |
726 | __ j(FlipCondition(hi_cond), labels.false_label); |
727 | |
728 | // If upper is equal, compare lower half. |
729 | __ cmpl(left1, right1); |
730 | return lo_cond; |
731 | } |
732 | |
733 | static Condition TokenKindToDoubleCondition(Token::Kind kind) { |
734 | switch (kind) { |
735 | case Token::kEQ: |
736 | return EQUAL; |
737 | case Token::kNE: |
738 | return NOT_EQUAL; |
739 | case Token::kLT: |
740 | return BELOW; |
741 | case Token::kGT: |
742 | return ABOVE; |
743 | case Token::kLTE: |
744 | return BELOW_EQUAL; |
745 | case Token::kGTE: |
746 | return ABOVE_EQUAL; |
747 | default: |
748 | UNREACHABLE(); |
749 | return OVERFLOW; |
750 | } |
751 | } |
752 | |
753 | static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler, |
754 | const LocationSummary& locs, |
755 | Token::Kind kind, |
756 | BranchLabels labels) { |
757 | XmmRegister left = locs.in(0).fpu_reg(); |
758 | XmmRegister right = locs.in(1).fpu_reg(); |
759 | |
760 | __ comisd(left, right); |
761 | |
762 | Condition true_condition = TokenKindToDoubleCondition(kind); |
763 | compiler::Label* nan_result = |
764 | (true_condition == NOT_EQUAL) ? labels.true_label : labels.false_label; |
765 | __ j(PARITY_EVEN, nan_result); |
766 | return true_condition; |
767 | } |
768 | |
769 | Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
770 | BranchLabels labels) { |
771 | if (operation_cid() == kSmiCid) { |
772 | return EmitSmiComparisonOp(compiler, *locs(), kind(), labels); |
773 | } else if (operation_cid() == kMintCid) { |
774 | return EmitUnboxedMintEqualityOp(compiler, *locs(), kind(), labels); |
775 | } else { |
776 | ASSERT(operation_cid() == kDoubleCid); |
777 | return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels); |
778 | } |
779 | } |
780 | |
781 | void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
782 | compiler::Label is_true, is_false; |
783 | BranchLabels labels = {&is_true, &is_false, &is_false}; |
784 | Condition true_condition = EmitComparisonCode(compiler, labels); |
785 | if (true_condition != kInvalidCondition) { |
786 | EmitBranchOnCondition(compiler, true_condition, labels); |
787 | } |
788 | |
789 | Register result = locs()->out(0).reg(); |
790 | compiler::Label done; |
791 | __ Bind(&is_false); |
792 | __ LoadObject(result, Bool::False()); |
793 | __ jmp(&done, compiler::Assembler::kNearJump); |
794 | __ Bind(&is_true); |
795 | __ LoadObject(result, Bool::True()); |
796 | __ Bind(&done); |
797 | } |
798 | |
799 | void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler, |
800 | BranchInstr* branch) { |
801 | BranchLabels labels = compiler->CreateBranchLabels(branch); |
802 | Condition true_condition = EmitComparisonCode(compiler, labels); |
803 | if (true_condition != kInvalidCondition) { |
804 | EmitBranchOnCondition(compiler, true_condition, labels); |
805 | } |
806 | } |
807 | |
808 | LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
809 | const intptr_t kNumInputs = 2; |
810 | const intptr_t kNumTemps = 0; |
811 | LocationSummary* locs = new (zone) |
812 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
813 | locs->set_in(0, Location::RequiresRegister()); |
814 | // Only one input can be a constant operand. The case of two constant |
815 | // operands should be handled by constant propagation. |
816 | locs->set_in(1, LocationRegisterOrConstant(right())); |
817 | return locs; |
818 | } |
819 | |
820 | Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
821 | BranchLabels labels) { |
822 | Register left = locs()->in(0).reg(); |
823 | Location right = locs()->in(1); |
824 | if (right.IsConstant()) { |
825 | ASSERT(right.constant().IsSmi()); |
826 | const int32_t imm = static_cast<int32_t>(right.constant().raw()); |
827 | __ testl(left, compiler::Immediate(imm)); |
828 | } else { |
829 | __ testl(left, right.reg()); |
830 | } |
831 | Condition true_condition = (kind() == Token::kNE) ? NOT_ZERO : ZERO; |
832 | return true_condition; |
833 | } |
834 | |
835 | LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone, |
836 | bool opt) const { |
837 | const intptr_t kNumInputs = 1; |
838 | const intptr_t kNumTemps = 1; |
839 | LocationSummary* locs = new (zone) |
840 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
841 | locs->set_in(0, Location::RequiresRegister()); |
842 | locs->set_temp(0, Location::RequiresRegister()); |
843 | locs->set_out(0, Location::RequiresRegister()); |
844 | return locs; |
845 | } |
846 | |
847 | Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
848 | BranchLabels labels) { |
849 | ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT)); |
850 | Register val_reg = locs()->in(0).reg(); |
851 | Register cid_reg = locs()->temp(0).reg(); |
852 | |
853 | compiler::Label* deopt = |
854 | CanDeoptimize() |
855 | ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids, |
856 | licm_hoisted_ ? ICData::kHoisted : 0) |
857 | : NULL; |
858 | |
859 | const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0; |
860 | const ZoneGrowableArray<intptr_t>& data = cid_results(); |
861 | ASSERT(data[0] == kSmiCid); |
862 | bool result = data[1] == true_result; |
863 | __ testl(val_reg, compiler::Immediate(kSmiTagMask)); |
864 | __ j(ZERO, result ? labels.true_label : labels.false_label); |
865 | __ LoadClassId(cid_reg, val_reg); |
866 | for (intptr_t i = 2; i < data.length(); i += 2) { |
867 | const intptr_t test_cid = data[i]; |
868 | ASSERT(test_cid != kSmiCid); |
869 | result = data[i + 1] == true_result; |
870 | __ cmpl(cid_reg, compiler::Immediate(test_cid)); |
871 | __ j(EQUAL, result ? labels.true_label : labels.false_label); |
872 | } |
873 | // No match found, deoptimize or default action. |
874 | if (deopt == NULL) { |
875 | // If the cid is not in the list, jump to the opposite label from the cids |
876 | // that are in the list. These must be all the same (see asserts in the |
877 | // constructor). |
878 | compiler::Label* target = result ? labels.false_label : labels.true_label; |
879 | if (target != labels.fall_through) { |
880 | __ jmp(target); |
881 | } |
882 | } else { |
883 | __ jmp(deopt); |
884 | } |
885 | // Dummy result as this method already did the jump, there's no need |
886 | // for the caller to branch on a condition. |
887 | return kInvalidCondition; |
888 | } |
889 | |
890 | LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone, |
891 | bool opt) const { |
892 | const intptr_t kNumInputs = 2; |
893 | const intptr_t kNumTemps = 0; |
894 | if (operation_cid() == kMintCid) { |
895 | const intptr_t kNumTemps = 0; |
896 | LocationSummary* locs = new (zone) |
897 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
898 | locs->set_in(0, Location::Pair(Location::RequiresRegister(), |
899 | Location::RequiresRegister())); |
900 | locs->set_in(1, Location::Pair(Location::RequiresRegister(), |
901 | Location::RequiresRegister())); |
902 | locs->set_out(0, Location::RequiresRegister()); |
903 | return locs; |
904 | } |
905 | if (operation_cid() == kDoubleCid) { |
906 | LocationSummary* summary = new (zone) |
907 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
908 | summary->set_in(0, Location::RequiresFpuRegister()); |
909 | summary->set_in(1, Location::RequiresFpuRegister()); |
910 | summary->set_out(0, Location::RequiresRegister()); |
911 | return summary; |
912 | } |
913 | ASSERT(operation_cid() == kSmiCid); |
914 | LocationSummary* summary = new (zone) |
915 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
916 | summary->set_in(0, LocationRegisterOrConstant(left())); |
917 | // Only one input can be a constant operand. The case of two constant |
918 | // operands should be handled by constant propagation. |
919 | summary->set_in(1, summary->in(0).IsConstant() |
920 | ? Location::RequiresRegister() |
921 | : LocationRegisterOrConstant(right())); |
922 | summary->set_out(0, Location::RequiresRegister()); |
923 | return summary; |
924 | } |
925 | |
926 | Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
927 | BranchLabels labels) { |
928 | if (operation_cid() == kSmiCid) { |
929 | return EmitSmiComparisonOp(compiler, *locs(), kind(), labels); |
930 | } else if (operation_cid() == kMintCid) { |
931 | return EmitUnboxedMintComparisonOp(compiler, *locs(), kind(), labels); |
932 | } else { |
933 | ASSERT(operation_cid() == kDoubleCid); |
934 | return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels); |
935 | } |
936 | } |
937 | |
938 | void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
939 | SetupNative(); |
940 | Register result = locs()->out(0).reg(); |
941 | const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function()); |
942 | |
943 | // All arguments are already @ESP due to preceding PushArgument()s. |
944 | ASSERT(ArgumentCount() == |
945 | function().NumParameters() + (function().IsGeneric() ? 1 : 0)); |
946 | |
947 | // Push the result place holder initialized to NULL. |
948 | __ PushObject(Object::null_object()); |
949 | |
950 | // Pass a pointer to the first argument in EAX. |
951 | __ leal(EAX, compiler::Address(ESP, ArgumentCount() * kWordSize)); |
952 | |
953 | __ movl(EDX, compiler::Immediate(argc_tag)); |
954 | |
955 | const Code* stub; |
956 | |
957 | // There is no lazy-linking support on ia32. |
958 | ASSERT(!link_lazily()); |
959 | if (is_bootstrap_native()) { |
960 | stub = &StubCode::CallBootstrapNative(); |
961 | } else if (is_auto_scope()) { |
962 | stub = &StubCode::CallAutoScopeNative(); |
963 | } else { |
964 | stub = &StubCode::CallNoScopeNative(); |
965 | } |
966 | const compiler::ExternalLabel label( |
967 | reinterpret_cast<uword>(native_c_function())); |
968 | __ movl(ECX, compiler::Immediate(label.address())); |
969 | compiler->GenerateStubCall(token_pos(), *stub, PcDescriptorsLayout::kOther, |
970 | locs()); |
971 | |
972 | __ popl(result); |
973 | |
974 | __ Drop(ArgumentCount()); // Drop the arguments. |
975 | } |
976 | |
977 | void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
978 | const Register saved_fp = locs()->temp(0).reg(); |
979 | const Register temp = locs()->temp(1).reg(); |
980 | const Register branch = locs()->in(TargetAddressIndex()).reg(); |
981 | |
982 | // Save frame pointer because we're going to update it when we enter the exit |
983 | // frame. |
984 | __ movl(saved_fp, FPREG); |
985 | |
986 | // Make a space to put the return address. |
987 | __ pushl(compiler::Immediate(0)); |
988 | |
989 | // We need to create a dummy "exit frame". It will have a null code object. |
990 | __ LoadObject(CODE_REG, Object::null_object()); |
991 | __ EnterDartFrame(marshaller_.StackTopInBytes()); |
992 | |
993 | // Align frame before entering C++ world. |
994 | if (OS::ActivationFrameAlignment() > 1) { |
995 | __ andl(SPREG, compiler::Immediate(~(OS::ActivationFrameAlignment() - 1))); |
996 | } |
997 | |
998 | EmitParamMoves(compiler); |
999 | |
1000 | // We need to copy a dummy return address up into the dummy stack frame so the |
1001 | // stack walker will know which safepoint to use. Unlike X64, there's no |
1002 | // PC-relative 'leaq' available, so we have do a trick with 'call'. |
1003 | compiler::Label get_pc; |
1004 | __ call(&get_pc); |
1005 | compiler->EmitCallsiteMetadata(TokenPosition::kNoSource, deopt_id(), |
1006 | PcDescriptorsLayout::Kind::kOther, locs()); |
1007 | __ Bind(&get_pc); |
1008 | __ popl(temp); |
1009 | __ movl(compiler::Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize), temp); |
1010 | |
1011 | ASSERT(!CanExecuteGeneratedCodeInSafepoint()); |
1012 | // We cannot trust that this code will be executable within a safepoint. |
1013 | // Therefore we delegate the responsibility of entering/exiting the |
1014 | // safepoint to a stub which in the VM isolate's heap, which will never lose |
1015 | // execute permission. |
1016 | __ movl(temp, |
1017 | compiler::Address( |
1018 | THR, compiler::target::Thread:: |
1019 | call_native_through_safepoint_entry_point_offset())); |
1020 | |
1021 | // Calls EAX within a safepoint and clobbers EBX. |
1022 | ASSERT(temp == EBX && branch == EAX); |
1023 | __ call(temp); |
1024 | |
1025 | // The x86 calling convention requires floating point values to be returned on |
1026 | // the "floating-point stack" (aka. register ST0). We don't use the |
1027 | // floating-point stack in Dart, so we need to move the return value back into |
1028 | // an XMM register. |
1029 | if (representation() == kUnboxedDouble) { |
1030 | __ fstpl(compiler::Address(SPREG, -kDoubleSize)); |
1031 | __ movsd(XMM0, compiler::Address(SPREG, -kDoubleSize)); |
1032 | } else if (representation() == kUnboxedFloat) { |
1033 | __ fstps(compiler::Address(SPREG, -kFloatSize)); |
1034 | __ movss(XMM0, compiler::Address(SPREG, -kFloatSize)); |
1035 | } |
1036 | |
1037 | EmitReturnMoves(compiler); |
1038 | |
1039 | // Leave dummy exit frame. |
1040 | __ LeaveFrame(); |
1041 | |
1042 | // Instead of returning to the "fake" return address, we just pop it. |
1043 | __ popl(temp); |
1044 | } |
1045 | |
1046 | void NativeEntryInstr::SaveArgument( |
1047 | FlowGraphCompiler* compiler, |
1048 | const compiler::ffi::NativeLocation& nloc) const { |
1049 | // IA32 has no arguments passed in registers. |
1050 | UNREACHABLE(); |
1051 | } |
1052 | |
1053 | void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1054 | __ Bind(compiler->GetJumpLabel(this)); |
1055 | |
1056 | // Enter the entry frame. |
1057 | __ EnterFrame(0); |
1058 | |
1059 | // Save a space for the code object. |
1060 | __ xorl(EAX, EAX); |
1061 | __ pushl(EAX); |
1062 | |
1063 | #if defined(TARGET_OS_FUCHSIA) |
1064 | UNREACHABLE(); // Fuchsia does not allow dart:ffi. |
1065 | #elif defined(USING_SHADOW_CALL_STACK) |
1066 | #error Unimplemented |
1067 | #endif |
1068 | |
1069 | // Save ABI callee-saved registers. |
1070 | __ pushl(EBX); |
1071 | __ pushl(ESI); |
1072 | __ pushl(EDI); |
1073 | |
1074 | // Load the thread object. |
1075 | // |
1076 | // Create another frame to align the frame before continuing in "native" code. |
1077 | // If we were called by a trampoline, it has already loaded the thread. |
1078 | ASSERT(!FLAG_precompiled_mode); // No relocation for AOT linking. |
1079 | if (!NativeCallbackTrampolines::Enabled()) { |
1080 | __ EnterFrame(0); |
1081 | __ ReserveAlignedFrameSpace(compiler::target::kWordSize); |
1082 | |
1083 | __ movl(compiler::Address(SPREG, 0), compiler::Immediate(callback_id_)); |
1084 | __ movl(EAX, compiler::Immediate(reinterpret_cast<intptr_t>( |
1085 | DLRT_GetThreadForNativeCallback))); |
1086 | __ call(EAX); |
1087 | __ movl(THR, EAX); |
1088 | |
1089 | __ LeaveFrame(); |
1090 | } |
1091 | |
1092 | // Save the current VMTag on the stack. |
1093 | __ movl(ECX, compiler::Assembler::VMTagAddress()); |
1094 | __ pushl(ECX); |
1095 | |
1096 | // Save top resource. |
1097 | __ pushl( |
1098 | compiler::Address(THR, compiler::target::Thread::top_resource_offset())); |
1099 | __ movl( |
1100 | compiler::Address(THR, compiler::target::Thread::top_resource_offset()), |
1101 | compiler::Immediate(0)); |
1102 | |
1103 | __ pushl(compiler::Address( |
1104 | THR, compiler::target::Thread::exit_through_ffi_offset())); |
1105 | |
1106 | // Save top exit frame info. Stack walker expects it to be here. |
1107 | __ pushl(compiler::Address( |
1108 | THR, compiler::target::Thread::top_exit_frame_info_offset())); |
1109 | |
1110 | // In debug mode, verify that we've pushed the top exit frame info at the |
1111 | // correct offset from FP. |
1112 | __ EmitEntryFrameVerification(); |
1113 | |
1114 | // Either DLRT_GetThreadForNativeCallback or the callback trampoline (caller) |
1115 | // will leave the safepoint for us. |
1116 | __ TransitionNativeToGenerated(EAX, /*exit_safepoint=*/false); |
1117 | |
1118 | // Now that the safepoint has ended, we can hold Dart objects with bare hands. |
1119 | |
1120 | // Load the code object. |
1121 | __ movl(EAX, compiler::Address( |
1122 | THR, compiler::target::Thread::callback_code_offset())); |
1123 | __ movl(EAX, compiler::FieldAddress( |
1124 | EAX, compiler::target::GrowableObjectArray::data_offset())); |
1125 | __ movl(CODE_REG, compiler::FieldAddress( |
1126 | EAX, compiler::target::Array::data_offset() + |
1127 | callback_id_ * compiler::target::kWordSize)); |
1128 | |
1129 | // Put the code object in the reserved slot. |
1130 | __ movl(compiler::Address(FPREG, |
1131 | kPcMarkerSlotFromFp * compiler::target::kWordSize), |
1132 | CODE_REG); |
1133 | |
1134 | // Load a GC-safe value for the arguments descriptor (unused but tagged). |
1135 | __ xorl(ARGS_DESC_REG, ARGS_DESC_REG); |
1136 | |
1137 | // Push a dummy return address which suggests that we are inside of |
1138 | // InvokeDartCodeStub. This is how the stack walker detects an entry frame. |
1139 | __ movl(EAX, |
1140 | compiler::Address( |
1141 | THR, compiler::target::Thread::invoke_dart_code_stub_offset())); |
1142 | __ pushl(compiler::FieldAddress( |
1143 | EAX, compiler::target::Code::entry_point_offset())); |
1144 | |
1145 | // Continue with Dart frame setup. |
1146 | FunctionEntryInstr::EmitNativeCode(compiler); |
1147 | } |
1148 | |
1149 | static bool CanBeImmediateIndex(Value* value, intptr_t cid) { |
1150 | ConstantInstr* constant = value->definition()->AsConstant(); |
1151 | if ((constant == NULL) || |
1152 | !compiler::Assembler::IsSafeSmi(constant->value())) { |
1153 | return false; |
1154 | } |
1155 | const int64_t index = Smi::Cast(constant->value()).AsInt64Value(); |
1156 | const intptr_t scale = Instance::ElementSizeFor(cid); |
1157 | const intptr_t offset = Instance::DataOffsetFor(cid); |
1158 | const int64_t displacement = index * scale + offset; |
1159 | return Utils::IsInt(32, displacement); |
1160 | } |
1161 | |
1162 | LocationSummary* OneByteStringFromCharCodeInstr::MakeLocationSummary( |
1163 | Zone* zone, |
1164 | bool opt) const { |
1165 | const intptr_t kNumInputs = 1; |
1166 | // TODO(fschneider): Allow immediate operands for the char code. |
1167 | return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), |
1168 | LocationSummary::kNoCall); |
1169 | } |
1170 | |
1171 | void OneByteStringFromCharCodeInstr::EmitNativeCode( |
1172 | FlowGraphCompiler* compiler) { |
1173 | Register char_code = locs()->in(0).reg(); |
1174 | Register result = locs()->out(0).reg(); |
1175 | __ movl(result, compiler::Immediate( |
1176 | reinterpret_cast<uword>(Symbols::PredefinedAddress()))); |
1177 | __ movl(result, |
1178 | compiler::Address(result, char_code, |
1179 | TIMES_HALF_WORD_SIZE, // Char code is a smi. |
1180 | Symbols::kNullCharCodeSymbolOffset * kWordSize)); |
1181 | } |
1182 | |
1183 | LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone, |
1184 | bool opt) const { |
1185 | const intptr_t kNumInputs = 1; |
1186 | return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), |
1187 | LocationSummary::kNoCall); |
1188 | } |
1189 | |
1190 | void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1191 | ASSERT(cid_ == kOneByteStringCid); |
1192 | Register str = locs()->in(0).reg(); |
1193 | Register result = locs()->out(0).reg(); |
1194 | compiler::Label is_one, done; |
1195 | __ movl(result, compiler::FieldAddress(str, String::length_offset())); |
1196 | __ cmpl(result, compiler::Immediate(Smi::RawValue(1))); |
1197 | __ j(EQUAL, &is_one, compiler::Assembler::kNearJump); |
1198 | __ movl(result, compiler::Immediate(Smi::RawValue(-1))); |
1199 | __ jmp(&done); |
1200 | __ Bind(&is_one); |
1201 | __ movzxb(result, compiler::FieldAddress(str, OneByteString::data_offset())); |
1202 | __ SmiTag(result); |
1203 | __ Bind(&done); |
1204 | } |
1205 | |
1206 | LocationSummary* StringInterpolateInstr::MakeLocationSummary(Zone* zone, |
1207 | bool opt) const { |
1208 | const intptr_t kNumInputs = 1; |
1209 | const intptr_t kNumTemps = 0; |
1210 | LocationSummary* summary = new (zone) |
1211 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
1212 | summary->set_in(0, Location::RegisterLocation(EAX)); |
1213 | summary->set_out(0, Location::RegisterLocation(EAX)); |
1214 | return summary; |
1215 | } |
1216 | |
1217 | void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1218 | Register array = locs()->in(0).reg(); |
1219 | __ pushl(array); |
1220 | const int kTypeArgsLen = 0; |
1221 | const int kNumberOfArguments = 1; |
1222 | constexpr int kSizeOfArguments = 1; |
1223 | const Array& kNoArgumentNames = Object::null_array(); |
1224 | ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kSizeOfArguments, |
1225 | kNoArgumentNames); |
1226 | compiler->GenerateStaticCall(deopt_id(), token_pos(), CallFunction(), |
1227 | args_info, locs(), ICData::Handle(), |
1228 | ICData::kStatic); |
1229 | ASSERT(locs()->out(0).reg() == EAX); |
1230 | } |
1231 | |
1232 | LocationSummary* Utf8ScanInstr::MakeLocationSummary(Zone* zone, |
1233 | bool opt) const { |
1234 | const intptr_t kNumInputs = 5; |
1235 | const intptr_t kNumTemps = 0; |
1236 | LocationSummary* summary = new (zone) |
1237 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
1238 | summary->set_in(0, Location::Any()); // decoder |
1239 | summary->set_in(1, Location::WritableRegister()); // bytes |
1240 | summary->set_in(2, Location::WritableRegister()); // start |
1241 | summary->set_in(3, Location::WritableRegister()); // end |
1242 | summary->set_in(4, Location::RequiresRegister()); // table |
1243 | summary->set_out(0, Location::RequiresRegister()); |
1244 | return summary; |
1245 | } |
1246 | |
1247 | void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1248 | const Register bytes_reg = locs()->in(1).reg(); |
1249 | const Register start_reg = locs()->in(2).reg(); |
1250 | const Register end_reg = locs()->in(3).reg(); |
1251 | const Register table_reg = locs()->in(4).reg(); |
1252 | const Register size_reg = locs()->out(0).reg(); |
1253 | |
1254 | const Register bytes_ptr_reg = start_reg; |
1255 | const Register flags_reg = end_reg; |
1256 | const Register temp_reg = bytes_reg; |
1257 | const XmmRegister vector_reg = FpuTMP; |
1258 | |
1259 | static const intptr_t kBytesEndTempOffset = 1 * compiler::target::kWordSize; |
1260 | static const intptr_t kBytesEndMinus16TempOffset = |
1261 | 0 * compiler::target::kWordSize; |
1262 | |
1263 | static const intptr_t kSizeMask = 0x03; |
1264 | static const intptr_t kFlagsMask = 0x3C; |
1265 | |
1266 | compiler::Label scan_ascii, ascii_loop, ascii_loop_in, nonascii_loop; |
1267 | compiler::Label rest, rest_loop, rest_loop_in, done; |
1268 | |
1269 | // Address of input bytes. |
1270 | __ movl(bytes_reg, |
1271 | compiler::FieldAddress( |
1272 | bytes_reg, compiler::target::TypedDataBase::data_field_offset())); |
1273 | |
1274 | // Pointers to start, end and end-16. |
1275 | __ leal(bytes_ptr_reg, compiler::Address(bytes_reg, start_reg, TIMES_1, 0)); |
1276 | __ leal(temp_reg, compiler::Address(bytes_reg, end_reg, TIMES_1, 0)); |
1277 | __ pushl(temp_reg); |
1278 | __ leal(temp_reg, compiler::Address(temp_reg, -16)); |
1279 | __ pushl(temp_reg); |
1280 | |
1281 | // Initialize size and flags. |
1282 | __ xorl(size_reg, size_reg); |
1283 | __ xorl(flags_reg, flags_reg); |
1284 | |
1285 | __ jmp(&scan_ascii, compiler::Assembler::kNearJump); |
1286 | |
1287 | // Loop scanning through ASCII bytes one 16-byte vector at a time. |
1288 | // While scanning, the size register contains the size as it was at the start |
1289 | // of the current block of ASCII bytes, minus the address of the start of the |
1290 | // block. After the block, the end address of the block is added to update the |
1291 | // size to include the bytes in the block. |
1292 | __ Bind(&ascii_loop); |
1293 | __ addl(bytes_ptr_reg, compiler::Immediate(16)); |
1294 | __ Bind(&ascii_loop_in); |
1295 | |
1296 | // Exit vectorized loop when there are less than 16 bytes left. |
1297 | __ cmpl(bytes_ptr_reg, compiler::Address(ESP, kBytesEndMinus16TempOffset)); |
1298 | __ j(UNSIGNED_GREATER, &rest, compiler::Assembler::kNearJump); |
1299 | |
1300 | // Find next non-ASCII byte within the next 16 bytes. |
1301 | // Note: In principle, we should use MOVDQU here, since the loaded value is |
1302 | // used as input to an integer instruction. In practice, according to Agner |
1303 | // Fog, there is no penalty for using the wrong kind of load. |
1304 | __ movups(vector_reg, compiler::Address(bytes_ptr_reg, 0)); |
1305 | __ pmovmskb(temp_reg, vector_reg); |
1306 | __ bsfl(temp_reg, temp_reg); |
1307 | __ j(EQUAL, &ascii_loop, compiler::Assembler::kNearJump); |
1308 | |
1309 | // Point to non-ASCII byte and update size. |
1310 | __ addl(bytes_ptr_reg, temp_reg); |
1311 | __ addl(size_reg, bytes_ptr_reg); |
1312 | |
1313 | // Read first non-ASCII byte. |
1314 | __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0)); |
1315 | |
1316 | // Loop over block of non-ASCII bytes. |
1317 | __ Bind(&nonascii_loop); |
1318 | __ addl(bytes_ptr_reg, compiler::Immediate(1)); |
1319 | |
1320 | // Update size and flags based on byte value. |
1321 | __ movzxb(temp_reg, compiler::FieldAddress( |
1322 | table_reg, temp_reg, TIMES_1, |
1323 | compiler::target::OneByteString::data_offset())); |
1324 | __ orl(flags_reg, temp_reg); |
1325 | __ andl(temp_reg, compiler::Immediate(kSizeMask)); |
1326 | __ addl(size_reg, temp_reg); |
1327 | |
1328 | // Stop if end is reached. |
1329 | __ cmpl(bytes_ptr_reg, compiler::Address(ESP, kBytesEndTempOffset)); |
1330 | __ j(UNSIGNED_GREATER_EQUAL, &done, compiler::Assembler::kNearJump); |
1331 | |
1332 | // Go to ASCII scan if next byte is ASCII, otherwise loop. |
1333 | __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0)); |
1334 | __ testl(temp_reg, compiler::Immediate(0x80)); |
1335 | __ j(NOT_EQUAL, &nonascii_loop, compiler::Assembler::kNearJump); |
1336 | |
1337 | // Enter the ASCII scanning loop. |
1338 | __ Bind(&scan_ascii); |
1339 | __ subl(size_reg, bytes_ptr_reg); |
1340 | __ jmp(&ascii_loop_in); |
1341 | |
1342 | // Less than 16 bytes left. Process the remaining bytes individually. |
1343 | __ Bind(&rest); |
1344 | |
1345 | // Update size after ASCII scanning loop. |
1346 | __ addl(size_reg, bytes_ptr_reg); |
1347 | __ jmp(&rest_loop_in, compiler::Assembler::kNearJump); |
1348 | |
1349 | __ Bind(&rest_loop); |
1350 | |
1351 | // Read byte and increment pointer. |
1352 | __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0)); |
1353 | __ addl(bytes_ptr_reg, compiler::Immediate(1)); |
1354 | |
1355 | // Update size and flags based on byte value. |
1356 | __ movzxb(temp_reg, compiler::FieldAddress( |
1357 | table_reg, temp_reg, TIMES_1, |
1358 | compiler::target::OneByteString::data_offset())); |
1359 | __ orl(flags_reg, temp_reg); |
1360 | __ andl(temp_reg, compiler::Immediate(kSizeMask)); |
1361 | __ addl(size_reg, temp_reg); |
1362 | |
1363 | // Stop if end is reached. |
1364 | __ Bind(&rest_loop_in); |
1365 | __ cmpl(bytes_ptr_reg, compiler::Address(ESP, kBytesEndTempOffset)); |
1366 | __ j(UNSIGNED_LESS, &rest_loop, compiler::Assembler::kNearJump); |
1367 | __ Bind(&done); |
1368 | |
1369 | // Pop temporaries. |
1370 | __ addl(ESP, compiler::Immediate(2 * compiler::target::kWordSize)); |
1371 | |
1372 | // Write flags to field. |
1373 | __ andl(flags_reg, compiler::Immediate(kFlagsMask)); |
1374 | if (!IsScanFlagsUnboxed()) { |
1375 | __ SmiTag(flags_reg); |
1376 | } |
1377 | Register decoder_reg; |
1378 | const Location decoder_location = locs()->in(0); |
1379 | if (decoder_location.IsStackSlot()) { |
1380 | __ movl(temp_reg, LocationToStackSlotAddress(decoder_location)); |
1381 | decoder_reg = temp_reg; |
1382 | } else { |
1383 | decoder_reg = decoder_location.reg(); |
1384 | } |
1385 | const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes(); |
1386 | __ orl(compiler::FieldAddress(decoder_reg, scan_flags_field_offset), |
1387 | flags_reg); |
1388 | } |
1389 | |
1390 | LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone, |
1391 | bool opt) const { |
1392 | const intptr_t kNumInputs = 1; |
1393 | return LocationSummary::Make(zone, kNumInputs, Location::SameAsFirstInput(), |
1394 | LocationSummary::kNoCall); |
1395 | } |
1396 | |
1397 | void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1398 | Register obj = locs()->in(0).reg(); |
1399 | Register result = locs()->out(0).reg(); |
1400 | if (object()->definition()->representation() == kUntagged) { |
1401 | __ movl(result, compiler::Address(obj, offset())); |
1402 | } else { |
1403 | ASSERT(object()->definition()->representation() == kTagged); |
1404 | __ movl(result, compiler::FieldAddress(obj, offset())); |
1405 | } |
1406 | } |
1407 | |
1408 | DEFINE_BACKEND(StoreUntagged, (NoLocation, Register obj, Register value)) { |
1409 | __ movl(compiler::Address(obj, instr->offset_from_tagged()), value); |
1410 | } |
1411 | |
1412 | Representation LoadIndexedInstr::representation() const { |
1413 | switch (class_id_) { |
1414 | case kArrayCid: |
1415 | case kImmutableArrayCid: |
1416 | return kTagged; |
1417 | case kOneByteStringCid: |
1418 | case kTwoByteStringCid: |
1419 | case kTypedDataInt8ArrayCid: |
1420 | case kTypedDataInt16ArrayCid: |
1421 | case kTypedDataUint8ArrayCid: |
1422 | case kTypedDataUint8ClampedArrayCid: |
1423 | case kTypedDataUint16ArrayCid: |
1424 | case kExternalOneByteStringCid: |
1425 | case kExternalTwoByteStringCid: |
1426 | case kExternalTypedDataUint8ArrayCid: |
1427 | case kExternalTypedDataUint8ClampedArrayCid: |
1428 | return kUnboxedIntPtr; |
1429 | case kTypedDataInt32ArrayCid: |
1430 | return kUnboxedInt32; |
1431 | case kTypedDataUint32ArrayCid: |
1432 | return kUnboxedUint32; |
1433 | case kTypedDataInt64ArrayCid: |
1434 | case kTypedDataUint64ArrayCid: |
1435 | return kUnboxedInt64; |
1436 | case kTypedDataFloat32ArrayCid: |
1437 | case kTypedDataFloat64ArrayCid: |
1438 | return kUnboxedDouble; |
1439 | case kTypedDataFloat32x4ArrayCid: |
1440 | return kUnboxedFloat32x4; |
1441 | case kTypedDataInt32x4ArrayCid: |
1442 | return kUnboxedInt32x4; |
1443 | case kTypedDataFloat64x2ArrayCid: |
1444 | return kUnboxedFloat64x2; |
1445 | default: |
1446 | UNIMPLEMENTED(); |
1447 | return kTagged; |
1448 | } |
1449 | } |
1450 | |
1451 | LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone, |
1452 | bool opt) const { |
1453 | const intptr_t kNumInputs = 2; |
1454 | const intptr_t kNumTemps = 0; |
1455 | LocationSummary* locs = new (zone) |
1456 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
1457 | locs->set_in(0, Location::RequiresRegister()); |
1458 | if (CanBeImmediateIndex(index(), class_id())) { |
1459 | // CanBeImmediateIndex must return false for unsafe smis. |
1460 | locs->set_in(1, Location::Constant(index()->definition()->AsConstant())); |
1461 | } else { |
1462 | // The index is either untagged (element size == 1) or a smi (for all |
1463 | // element sizes > 1). |
1464 | locs->set_in(1, (index_scale() == 1) ? Location::WritableRegister() |
1465 | : Location::RequiresRegister()); |
1466 | } |
1467 | if ((representation() == kUnboxedDouble) || |
1468 | (representation() == kUnboxedFloat32x4) || |
1469 | (representation() == kUnboxedInt32x4) || |
1470 | (representation() == kUnboxedFloat64x2)) { |
1471 | locs->set_out(0, Location::RequiresFpuRegister()); |
1472 | } else if (representation() == kUnboxedInt64) { |
1473 | ASSERT(class_id() == kTypedDataInt64ArrayCid || |
1474 | class_id() == kTypedDataUint64ArrayCid); |
1475 | locs->set_out(0, Location::Pair(Location::RequiresRegister(), |
1476 | Location::RequiresRegister())); |
1477 | } else { |
1478 | locs->set_out(0, Location::RequiresRegister()); |
1479 | } |
1480 | return locs; |
1481 | } |
1482 | |
1483 | void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1484 | // The array register points to the backing store for external arrays. |
1485 | const Register array = locs()->in(0).reg(); |
1486 | const Location index = locs()->in(1); |
1487 | |
1488 | compiler::Address element_address = |
1489 | index.IsRegister() ? compiler::Assembler::ElementAddressForRegIndex( |
1490 | IsExternal(), class_id(), index_scale(), |
1491 | index_unboxed_, array, index.reg()) |
1492 | : compiler::Assembler::ElementAddressForIntIndex( |
1493 | IsExternal(), class_id(), index_scale(), array, |
1494 | Smi::Cast(index.constant()).Value()); |
1495 | |
1496 | if (index_scale() == 1 && !index_unboxed_) { |
1497 | if (index.IsRegister()) { |
1498 | __ SmiUntag(index.reg()); |
1499 | } else { |
1500 | ASSERT(index.IsConstant()); |
1501 | } |
1502 | } |
1503 | |
1504 | if ((representation() == kUnboxedDouble) || |
1505 | (representation() == kUnboxedFloat32x4) || |
1506 | (representation() == kUnboxedInt32x4) || |
1507 | (representation() == kUnboxedFloat64x2)) { |
1508 | XmmRegister result = locs()->out(0).fpu_reg(); |
1509 | switch (class_id()) { |
1510 | case kTypedDataFloat32ArrayCid: |
1511 | __ movss(result, element_address); |
1512 | break; |
1513 | case kTypedDataFloat64ArrayCid: |
1514 | __ movsd(result, element_address); |
1515 | break; |
1516 | case kTypedDataInt32x4ArrayCid: |
1517 | case kTypedDataFloat32x4ArrayCid: |
1518 | case kTypedDataFloat64x2ArrayCid: |
1519 | __ movups(result, element_address); |
1520 | break; |
1521 | default: |
1522 | UNREACHABLE(); |
1523 | } |
1524 | return; |
1525 | } |
1526 | |
1527 | switch (class_id()) { |
1528 | case kTypedDataInt32ArrayCid: { |
1529 | const Register result = locs()->out(0).reg(); |
1530 | ASSERT(representation() == kUnboxedInt32); |
1531 | __ movl(result, element_address); |
1532 | break; |
1533 | } |
1534 | case kTypedDataUint32ArrayCid: { |
1535 | const Register result = locs()->out(0).reg(); |
1536 | ASSERT(representation() == kUnboxedUint32); |
1537 | __ movl(result, element_address); |
1538 | break; |
1539 | } |
1540 | case kTypedDataInt64ArrayCid: |
1541 | case kTypedDataUint64ArrayCid: { |
1542 | ASSERT(representation() == kUnboxedInt64); |
1543 | ASSERT(locs()->out(0).IsPairLocation()); |
1544 | PairLocation* result_pair = locs()->out(0).AsPairLocation(); |
1545 | const Register result_lo = result_pair->At(0).reg(); |
1546 | const Register result_hi = result_pair->At(1).reg(); |
1547 | ASSERT(class_id() == kTypedDataInt64ArrayCid || |
1548 | class_id() == kTypedDataUint64ArrayCid); |
1549 | __ movl(result_lo, element_address); |
1550 | element_address = |
1551 | index.IsRegister() |
1552 | ? compiler::Assembler::ElementAddressForRegIndex( |
1553 | IsExternal(), class_id(), index_scale(), index_unboxed_, |
1554 | array, index.reg(), kWordSize) |
1555 | : compiler::Assembler::ElementAddressForIntIndex( |
1556 | IsExternal(), class_id(), index_scale(), array, |
1557 | Smi::Cast(index.constant()).Value(), kWordSize); |
1558 | __ movl(result_hi, element_address); |
1559 | break; |
1560 | } |
1561 | case kTypedDataInt8ArrayCid: { |
1562 | const Register result = locs()->out(0).reg(); |
1563 | ASSERT(representation() == kUnboxedIntPtr); |
1564 | ASSERT(index_scale() == 1); |
1565 | __ movsxb(result, element_address); |
1566 | break; |
1567 | } |
1568 | case kTypedDataUint8ArrayCid: |
1569 | case kTypedDataUint8ClampedArrayCid: |
1570 | case kExternalTypedDataUint8ArrayCid: |
1571 | case kExternalTypedDataUint8ClampedArrayCid: |
1572 | case kOneByteStringCid: |
1573 | case kExternalOneByteStringCid: { |
1574 | const Register result = locs()->out(0).reg(); |
1575 | ASSERT(representation() == kUnboxedIntPtr); |
1576 | ASSERT(index_scale() == 1); |
1577 | __ movzxb(result, element_address); |
1578 | break; |
1579 | } |
1580 | case kTypedDataInt16ArrayCid: { |
1581 | const Register result = locs()->out(0).reg(); |
1582 | ASSERT(representation() == kUnboxedIntPtr); |
1583 | __ movsxw(result, element_address); |
1584 | break; |
1585 | } |
1586 | case kTypedDataUint16ArrayCid: |
1587 | case kTwoByteStringCid: |
1588 | case kExternalTwoByteStringCid: { |
1589 | const Register result = locs()->out(0).reg(); |
1590 | ASSERT(representation() == kUnboxedIntPtr); |
1591 | __ movzxw(result, element_address); |
1592 | break; |
1593 | } |
1594 | default: { |
1595 | const Register result = locs()->out(0).reg(); |
1596 | ASSERT(representation() == kTagged); |
1597 | ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid)); |
1598 | __ movl(result, element_address); |
1599 | break; |
1600 | } |
1601 | } |
1602 | } |
1603 | |
1604 | Representation StoreIndexedInstr::RequiredInputRepresentation( |
1605 | intptr_t idx) const { |
1606 | // Array can be a Dart object or a pointer to external data. |
1607 | if (idx == 0) return kNoRepresentation; // Flexible input representation. |
1608 | if (idx == 1) { |
1609 | if (index_unboxed_) { |
1610 | // TODO(dartbug.com/39432): kUnboxedInt32 || kUnboxedUint32. |
1611 | return kNoRepresentation; |
1612 | } else { |
1613 | return kTagged; // Index is a smi. |
1614 | } |
1615 | } |
1616 | ASSERT(idx == 2); |
1617 | switch (class_id_) { |
1618 | case kArrayCid: |
1619 | return kTagged; |
1620 | case kOneByteStringCid: |
1621 | case kTwoByteStringCid: |
1622 | case kTypedDataInt8ArrayCid: |
1623 | case kTypedDataInt16ArrayCid: |
1624 | case kTypedDataUint8ArrayCid: |
1625 | case kTypedDataUint8ClampedArrayCid: |
1626 | case kTypedDataUint16ArrayCid: |
1627 | case kExternalTypedDataUint8ArrayCid: |
1628 | case kExternalTypedDataUint8ClampedArrayCid: |
1629 | return kUnboxedIntPtr; |
1630 | case kTypedDataInt32ArrayCid: |
1631 | return kUnboxedInt32; |
1632 | case kTypedDataUint32ArrayCid: |
1633 | return kUnboxedUint32; |
1634 | case kTypedDataInt64ArrayCid: |
1635 | case kTypedDataUint64ArrayCid: |
1636 | return kUnboxedInt64; |
1637 | case kTypedDataFloat32ArrayCid: |
1638 | case kTypedDataFloat64ArrayCid: |
1639 | return kUnboxedDouble; |
1640 | case kTypedDataFloat32x4ArrayCid: |
1641 | return kUnboxedFloat32x4; |
1642 | case kTypedDataInt32x4ArrayCid: |
1643 | return kUnboxedInt32x4; |
1644 | case kTypedDataFloat64x2ArrayCid: |
1645 | return kUnboxedFloat64x2; |
1646 | default: |
1647 | UNIMPLEMENTED(); |
1648 | return kTagged; |
1649 | } |
1650 | } |
1651 | |
1652 | LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone, |
1653 | bool opt) const { |
1654 | const intptr_t kNumInputs = 3; |
1655 | const intptr_t kNumTemps = |
1656 | class_id() == kArrayCid && ShouldEmitStoreBarrier() ? 1 : 0; |
1657 | LocationSummary* locs = new (zone) |
1658 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
1659 | locs->set_in(0, Location::RequiresRegister()); |
1660 | if (CanBeImmediateIndex(index(), class_id())) { |
1661 | // CanBeImmediateIndex must return false for unsafe smis. |
1662 | locs->set_in(1, Location::Constant(index()->definition()->AsConstant())); |
1663 | } else { |
1664 | // The index is either untagged (element size == 1) or a smi (for all |
1665 | // element sizes > 1). |
1666 | locs->set_in(1, (index_scale() == 1) ? Location::WritableRegister() |
1667 | : Location::RequiresRegister()); |
1668 | } |
1669 | switch (class_id()) { |
1670 | case kArrayCid: |
1671 | locs->set_in(2, ShouldEmitStoreBarrier() |
1672 | ? Location::WritableRegister() |
1673 | : LocationRegisterOrConstant(value())); |
1674 | if (ShouldEmitStoreBarrier()) { |
1675 | locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg)); |
1676 | locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg)); |
1677 | } |
1678 | break; |
1679 | case kExternalTypedDataUint8ArrayCid: |
1680 | case kExternalTypedDataUint8ClampedArrayCid: |
1681 | case kTypedDataInt8ArrayCid: |
1682 | case kTypedDataUint8ArrayCid: |
1683 | case kTypedDataUint8ClampedArrayCid: |
1684 | case kOneByteStringCid: |
1685 | case kTwoByteStringCid: |
1686 | // TODO(fschneider): Add location constraint for byte registers (EAX, |
1687 | // EBX, ECX, EDX) instead of using a fixed register. |
1688 | locs->set_in(2, LocationFixedRegisterOrSmiConstant(value(), EAX)); |
1689 | break; |
1690 | case kTypedDataInt16ArrayCid: |
1691 | case kTypedDataUint16ArrayCid: |
1692 | // Writable register because the value must be untagged before storing. |
1693 | locs->set_in(2, Location::WritableRegister()); |
1694 | break; |
1695 | case kTypedDataInt32ArrayCid: |
1696 | case kTypedDataUint32ArrayCid: |
1697 | locs->set_in(2, Location::RequiresRegister()); |
1698 | break; |
1699 | case kTypedDataInt64ArrayCid: |
1700 | case kTypedDataUint64ArrayCid: |
1701 | locs->set_in(2, Location::Pair(Location::RequiresRegister(), |
1702 | Location::RequiresRegister())); |
1703 | break; |
1704 | case kTypedDataFloat32ArrayCid: |
1705 | case kTypedDataFloat64ArrayCid: |
1706 | // TODO(srdjan): Support Float64 constants. |
1707 | locs->set_in(2, Location::RequiresFpuRegister()); |
1708 | break; |
1709 | case kTypedDataInt32x4ArrayCid: |
1710 | case kTypedDataFloat32x4ArrayCid: |
1711 | case kTypedDataFloat64x2ArrayCid: |
1712 | locs->set_in(2, Location::RequiresFpuRegister()); |
1713 | break; |
1714 | default: |
1715 | UNREACHABLE(); |
1716 | return NULL; |
1717 | } |
1718 | return locs; |
1719 | } |
1720 | |
1721 | void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1722 | // The array register points to the backing store for external arrays. |
1723 | const Register array = locs()->in(0).reg(); |
1724 | const Location index = locs()->in(1); |
1725 | |
1726 | compiler::Address element_address = |
1727 | index.IsRegister() ? compiler::Assembler::ElementAddressForRegIndex( |
1728 | IsExternal(), class_id(), index_scale(), |
1729 | index_unboxed_, array, index.reg()) |
1730 | : compiler::Assembler::ElementAddressForIntIndex( |
1731 | IsExternal(), class_id(), index_scale(), array, |
1732 | Smi::Cast(index.constant()).Value()); |
1733 | |
1734 | if ((index_scale() == 1) && index.IsRegister() && !index_unboxed_) { |
1735 | __ SmiUntag(index.reg()); |
1736 | } |
1737 | switch (class_id()) { |
1738 | case kArrayCid: |
1739 | if (ShouldEmitStoreBarrier()) { |
1740 | Register value = locs()->in(2).reg(); |
1741 | Register slot = locs()->temp(0).reg(); |
1742 | __ leal(slot, element_address); |
1743 | __ StoreIntoArray(array, slot, value, CanValueBeSmi()); |
1744 | } else if (locs()->in(2).IsConstant()) { |
1745 | const Object& constant = locs()->in(2).constant(); |
1746 | __ StoreIntoObjectNoBarrier(array, element_address, constant); |
1747 | } else { |
1748 | Register value = locs()->in(2).reg(); |
1749 | __ StoreIntoObjectNoBarrier(array, element_address, value); |
1750 | } |
1751 | break; |
1752 | case kTypedDataInt8ArrayCid: |
1753 | case kTypedDataUint8ArrayCid: |
1754 | case kExternalTypedDataUint8ArrayCid: |
1755 | case kOneByteStringCid: |
1756 | ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr); |
1757 | if (locs()->in(2).IsConstant()) { |
1758 | const Smi& constant = Smi::Cast(locs()->in(2).constant()); |
1759 | __ movb(element_address, |
1760 | compiler::Immediate(static_cast<int8_t>(constant.Value()))); |
1761 | } else { |
1762 | ASSERT(locs()->in(2).reg() == EAX); |
1763 | __ movb(element_address, AL); |
1764 | } |
1765 | break; |
1766 | case kTypedDataUint8ClampedArrayCid: |
1767 | case kExternalTypedDataUint8ClampedArrayCid: { |
1768 | ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr); |
1769 | if (locs()->in(2).IsConstant()) { |
1770 | const Smi& constant = Smi::Cast(locs()->in(2).constant()); |
1771 | intptr_t value = constant.Value(); |
1772 | // Clamp to 0x0 or 0xFF respectively. |
1773 | if (value > 0xFF) { |
1774 | value = 0xFF; |
1775 | } else if (value < 0) { |
1776 | value = 0; |
1777 | } |
1778 | __ movb(element_address, |
1779 | compiler::Immediate(static_cast<int8_t>(value))); |
1780 | } else { |
1781 | ASSERT(locs()->in(2).reg() == EAX); |
1782 | compiler::Label store_value, store_0xff; |
1783 | __ cmpl(EAX, compiler::Immediate(0xFF)); |
1784 | __ j(BELOW_EQUAL, &store_value, compiler::Assembler::kNearJump); |
1785 | // Clamp to 0x0 or 0xFF respectively. |
1786 | __ j(GREATER, &store_0xff); |
1787 | __ xorl(EAX, EAX); |
1788 | __ jmp(&store_value, compiler::Assembler::kNearJump); |
1789 | __ Bind(&store_0xff); |
1790 | __ movl(EAX, compiler::Immediate(0xFF)); |
1791 | __ Bind(&store_value); |
1792 | __ movb(element_address, AL); |
1793 | } |
1794 | break; |
1795 | } |
1796 | case kTwoByteStringCid: |
1797 | case kTypedDataInt16ArrayCid: |
1798 | case kTypedDataUint16ArrayCid: { |
1799 | ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr); |
1800 | const Register value = locs()->in(2).reg(); |
1801 | __ movw(element_address, value); |
1802 | break; |
1803 | } |
1804 | case kTypedDataInt32ArrayCid: |
1805 | case kTypedDataUint32ArrayCid: |
1806 | __ movl(element_address, locs()->in(2).reg()); |
1807 | break; |
1808 | case kTypedDataInt64ArrayCid: |
1809 | case kTypedDataUint64ArrayCid: { |
1810 | ASSERT(locs()->in(2).IsPairLocation()); |
1811 | PairLocation* value_pair = locs()->in(2).AsPairLocation(); |
1812 | const Register value_lo = value_pair->At(0).reg(); |
1813 | const Register value_hi = value_pair->At(1).reg(); |
1814 | __ movl(element_address, value_lo); |
1815 | element_address = |
1816 | index.IsRegister() |
1817 | ? compiler::Assembler::ElementAddressForRegIndex( |
1818 | IsExternal(), class_id(), index_scale(), index_unboxed_, |
1819 | array, index.reg(), kWordSize) |
1820 | : compiler::Assembler::ElementAddressForIntIndex( |
1821 | IsExternal(), class_id(), index_scale(), array, |
1822 | Smi::Cast(index.constant()).Value(), kWordSize); |
1823 | __ movl(element_address, value_hi); |
1824 | break; |
1825 | } |
1826 | case kTypedDataFloat32ArrayCid: |
1827 | __ movss(element_address, locs()->in(2).fpu_reg()); |
1828 | break; |
1829 | case kTypedDataFloat64ArrayCid: |
1830 | __ movsd(element_address, locs()->in(2).fpu_reg()); |
1831 | break; |
1832 | case kTypedDataInt32x4ArrayCid: |
1833 | case kTypedDataFloat32x4ArrayCid: |
1834 | case kTypedDataFloat64x2ArrayCid: |
1835 | __ movups(element_address, locs()->in(2).fpu_reg()); |
1836 | break; |
1837 | default: |
1838 | UNREACHABLE(); |
1839 | } |
1840 | } |
1841 | |
1842 | DEFINE_UNIMPLEMENTED_INSTRUCTION(GuardFieldTypeInstr) |
1843 | DEFINE_UNIMPLEMENTED_INSTRUCTION(CheckConditionInstr) |
1844 | |
1845 | LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone, |
1846 | bool opt) const { |
1847 | const intptr_t kNumInputs = 1; |
1848 | |
1849 | const intptr_t value_cid = value()->Type()->ToCid(); |
1850 | const intptr_t field_cid = field().guarded_cid(); |
1851 | |
1852 | const bool emit_full_guard = !opt || (field_cid == kIllegalCid); |
1853 | const bool needs_value_cid_temp_reg = |
1854 | (value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid)); |
1855 | const bool needs_field_temp_reg = emit_full_guard; |
1856 | |
1857 | intptr_t num_temps = 0; |
1858 | if (needs_value_cid_temp_reg) { |
1859 | num_temps++; |
1860 | } |
1861 | if (needs_field_temp_reg) { |
1862 | num_temps++; |
1863 | } |
1864 | |
1865 | LocationSummary* summary = new (zone) |
1866 | LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall); |
1867 | summary->set_in(0, Location::RequiresRegister()); |
1868 | |
1869 | for (intptr_t i = 0; i < num_temps; i++) { |
1870 | summary->set_temp(i, Location::RequiresRegister()); |
1871 | } |
1872 | |
1873 | return summary; |
1874 | } |
1875 | |
1876 | void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
1877 | ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16); |
1878 | ASSERT(sizeof(FieldLayout::guarded_cid_) == 2); |
1879 | ASSERT(sizeof(FieldLayout::is_nullable_) == 2); |
1880 | |
1881 | const intptr_t value_cid = value()->Type()->ToCid(); |
1882 | const intptr_t field_cid = field().guarded_cid(); |
1883 | const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid; |
1884 | |
1885 | if (field_cid == kDynamicCid) { |
1886 | return; // Nothing to emit. |
1887 | } |
1888 | |
1889 | const bool emit_full_guard = |
1890 | !compiler->is_optimizing() || (field_cid == kIllegalCid); |
1891 | |
1892 | const bool needs_value_cid_temp_reg = |
1893 | (value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid)); |
1894 | |
1895 | const bool needs_field_temp_reg = emit_full_guard; |
1896 | |
1897 | const Register value_reg = locs()->in(0).reg(); |
1898 | |
1899 | const Register value_cid_reg = |
1900 | needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister; |
1901 | |
1902 | const Register field_reg = needs_field_temp_reg |
1903 | ? locs()->temp(locs()->temp_count() - 1).reg() |
1904 | : kNoRegister; |
1905 | |
1906 | compiler::Label ok, fail_label; |
1907 | |
1908 | compiler::Label* deopt = nullptr; |
1909 | if (compiler->is_optimizing()) { |
1910 | deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField); |
1911 | } |
1912 | |
1913 | compiler::Label* fail = (deopt != NULL) ? deopt : &fail_label; |
1914 | |
1915 | if (emit_full_guard) { |
1916 | __ LoadObject(field_reg, Field::ZoneHandle(field().Original())); |
1917 | |
1918 | compiler::FieldAddress field_cid_operand(field_reg, |
1919 | Field::guarded_cid_offset()); |
1920 | compiler::FieldAddress field_nullability_operand( |
1921 | field_reg, Field::is_nullable_offset()); |
1922 | |
1923 | if (value_cid == kDynamicCid) { |
1924 | LoadValueCid(compiler, value_cid_reg, value_reg); |
1925 | __ cmpw(value_cid_reg, field_cid_operand); |
1926 | __ j(EQUAL, &ok); |
1927 | __ cmpw(value_cid_reg, field_nullability_operand); |
1928 | } else if (value_cid == kNullCid) { |
1929 | // Value in graph known to be null. |
1930 | // Compare with null. |
1931 | __ cmpw(field_nullability_operand, compiler::Immediate(value_cid)); |
1932 | } else { |
1933 | // Value in graph known to be non-null. |
1934 | // Compare class id with guard field class id. |
1935 | __ cmpw(field_cid_operand, compiler::Immediate(value_cid)); |
1936 | } |
1937 | __ j(EQUAL, &ok); |
1938 | |
1939 | // Check if the tracked state of the guarded field can be initialized |
1940 | // inline. If the field needs length check we fall through to runtime |
1941 | // which is responsible for computing offset of the length field |
1942 | // based on the class id. |
1943 | // Length guard will be emitted separately when needed via GuardFieldLength |
1944 | // instruction after GuardFieldClass. |
1945 | if (!field().needs_length_check()) { |
1946 | // Uninitialized field can be handled inline. Check if the |
1947 | // field is still unitialized. |
1948 | __ cmpw(field_cid_operand, compiler::Immediate(kIllegalCid)); |
1949 | // Jump to failure path when guard field has been initialized and |
1950 | // the field and value class ids do not not match. |
1951 | __ j(NOT_EQUAL, fail); |
1952 | |
1953 | if (value_cid == kDynamicCid) { |
1954 | // Do not know value's class id. |
1955 | __ movw(field_cid_operand, value_cid_reg); |
1956 | __ movw(field_nullability_operand, value_cid_reg); |
1957 | } else { |
1958 | ASSERT(field_reg != kNoRegister); |
1959 | __ movw(field_cid_operand, compiler::Immediate(value_cid)); |
1960 | __ movw(field_nullability_operand, compiler::Immediate(value_cid)); |
1961 | } |
1962 | |
1963 | __ jmp(&ok); |
1964 | } |
1965 | |
1966 | if (deopt == NULL) { |
1967 | ASSERT(!compiler->is_optimizing()); |
1968 | __ Bind(fail); |
1969 | |
1970 | __ cmpw(compiler::FieldAddress(field_reg, Field::guarded_cid_offset()), |
1971 | compiler::Immediate(kDynamicCid)); |
1972 | __ j(EQUAL, &ok); |
1973 | |
1974 | __ pushl(field_reg); |
1975 | __ pushl(value_reg); |
1976 | __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2); |
1977 | __ Drop(2); // Drop the field and the value. |
1978 | } else { |
1979 | __ jmp(fail); |
1980 | } |
1981 | } else { |
1982 | ASSERT(compiler->is_optimizing()); |
1983 | ASSERT(deopt != NULL); |
1984 | ASSERT(fail == deopt); |
1985 | |
1986 | // Field guard class has been initialized and is known. |
1987 | if (value_cid == kDynamicCid) { |
1988 | // Value's class id is not known. |
1989 | __ testl(value_reg, compiler::Immediate(kSmiTagMask)); |
1990 | |
1991 | if (field_cid != kSmiCid) { |
1992 | __ j(ZERO, fail); |
1993 | __ LoadClassId(value_cid_reg, value_reg); |
1994 | __ cmpl(value_cid_reg, compiler::Immediate(field_cid)); |
1995 | } |
1996 | |
1997 | if (field().is_nullable() && (field_cid != kNullCid)) { |
1998 | __ j(EQUAL, &ok); |
1999 | if (field_cid != kSmiCid) { |
2000 | __ cmpl(value_cid_reg, compiler::Immediate(kNullCid)); |
2001 | } else { |
2002 | const compiler::Immediate& raw_null = |
2003 | compiler::Immediate(static_cast<intptr_t>(Object::null())); |
2004 | __ cmpl(value_reg, raw_null); |
2005 | } |
2006 | } |
2007 | __ j(NOT_EQUAL, fail); |
2008 | } else if (value_cid == field_cid) { |
2009 | // This would normaly be caught by Canonicalize, but RemoveRedefinitions |
2010 | // may sometimes produce the situation after the last Canonicalize pass. |
2011 | } else { |
2012 | // Both value's and field's class id is known. |
2013 | ASSERT(value_cid != nullability); |
2014 | __ jmp(fail); |
2015 | } |
2016 | } |
2017 | __ Bind(&ok); |
2018 | } |
2019 | |
2020 | LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone, |
2021 | bool opt) const { |
2022 | const intptr_t kNumInputs = 1; |
2023 | if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) { |
2024 | const intptr_t kNumTemps = 3; |
2025 | LocationSummary* summary = new (zone) |
2026 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
2027 | summary->set_in(0, Location::RequiresRegister()); |
2028 | // We need temporaries for field object, length offset and expected length. |
2029 | summary->set_temp(0, Location::RequiresRegister()); |
2030 | summary->set_temp(1, Location::RequiresRegister()); |
2031 | summary->set_temp(2, Location::RequiresRegister()); |
2032 | return summary; |
2033 | } else { |
2034 | LocationSummary* summary = new (zone) |
2035 | LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall); |
2036 | summary->set_in(0, Location::RequiresRegister()); |
2037 | return summary; |
2038 | } |
2039 | UNREACHABLE(); |
2040 | } |
2041 | |
2042 | void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2043 | if (field().guarded_list_length() == Field::kNoFixedLength) { |
2044 | return; // Nothing to emit. |
2045 | } |
2046 | |
2047 | compiler::Label* deopt = |
2048 | compiler->is_optimizing() |
2049 | ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) |
2050 | : NULL; |
2051 | |
2052 | const Register value_reg = locs()->in(0).reg(); |
2053 | |
2054 | if (!compiler->is_optimizing() || |
2055 | (field().guarded_list_length() == Field::kUnknownFixedLength)) { |
2056 | const Register field_reg = locs()->temp(0).reg(); |
2057 | const Register offset_reg = locs()->temp(1).reg(); |
2058 | const Register length_reg = locs()->temp(2).reg(); |
2059 | |
2060 | compiler::Label ok; |
2061 | |
2062 | __ LoadObject(field_reg, Field::ZoneHandle(field().Original())); |
2063 | |
2064 | __ movsxb( |
2065 | offset_reg, |
2066 | compiler::FieldAddress( |
2067 | field_reg, Field::guarded_list_length_in_object_offset_offset())); |
2068 | __ movl(length_reg, compiler::FieldAddress( |
2069 | field_reg, Field::guarded_list_length_offset())); |
2070 | |
2071 | __ cmpl(offset_reg, compiler::Immediate(0)); |
2072 | __ j(NEGATIVE, &ok); |
2073 | |
2074 | // Load the length from the value. GuardFieldClass already verified that |
2075 | // value's class matches guarded class id of the field. |
2076 | // offset_reg contains offset already corrected by -kHeapObjectTag that is |
2077 | // why we use Address instead of FieldAddress. |
2078 | __ cmpl(length_reg, compiler::Address(value_reg, offset_reg, TIMES_1, 0)); |
2079 | |
2080 | if (deopt == NULL) { |
2081 | __ j(EQUAL, &ok); |
2082 | |
2083 | __ pushl(field_reg); |
2084 | __ pushl(value_reg); |
2085 | __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2); |
2086 | __ Drop(2); // Drop the field and the value. |
2087 | } else { |
2088 | __ j(NOT_EQUAL, deopt); |
2089 | } |
2090 | |
2091 | __ Bind(&ok); |
2092 | } else { |
2093 | ASSERT(compiler->is_optimizing()); |
2094 | ASSERT(field().guarded_list_length() >= 0); |
2095 | ASSERT(field().guarded_list_length_in_object_offset() != |
2096 | Field::kUnknownLengthOffset); |
2097 | |
2098 | __ cmpl(compiler::FieldAddress( |
2099 | value_reg, field().guarded_list_length_in_object_offset()), |
2100 | compiler::Immediate(Smi::RawValue(field().guarded_list_length()))); |
2101 | __ j(NOT_EQUAL, deopt); |
2102 | } |
2103 | } |
2104 | |
2105 | class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> { |
2106 | public: |
2107 | BoxAllocationSlowPath(Instruction* instruction, |
2108 | const Class& cls, |
2109 | Register result) |
2110 | : TemplateSlowPathCode(instruction), cls_(cls), result_(result) {} |
2111 | |
2112 | virtual void EmitNativeCode(FlowGraphCompiler* compiler) { |
2113 | if (compiler::Assembler::EmittingComments()) { |
2114 | __ Comment("%s slow path allocation of %s" , instruction()->DebugName(), |
2115 | String::Handle(cls_.ScrubbedName()).ToCString()); |
2116 | } |
2117 | __ Bind(entry_label()); |
2118 | const Code& stub = Code::ZoneHandle( |
2119 | compiler->zone(), StubCode::GetAllocationStubForClass(cls_)); |
2120 | |
2121 | LocationSummary* locs = instruction()->locs(); |
2122 | |
2123 | locs->live_registers()->Remove(Location::RegisterLocation(result_)); |
2124 | |
2125 | compiler->SaveLiveRegisters(locs); |
2126 | compiler->GenerateStubCall(TokenPosition::kNoSource, stub, |
2127 | PcDescriptorsLayout::kOther, locs); |
2128 | __ MoveRegister(result_, EAX); |
2129 | compiler->RestoreLiveRegisters(locs); |
2130 | __ jmp(exit_label()); |
2131 | } |
2132 | |
2133 | static void Allocate(FlowGraphCompiler* compiler, |
2134 | Instruction* instruction, |
2135 | const Class& cls, |
2136 | Register result, |
2137 | Register temp) { |
2138 | if (compiler->intrinsic_mode()) { |
2139 | __ TryAllocate(cls, compiler->intrinsic_slow_path_label(), |
2140 | compiler::Assembler::kFarJump, result, temp); |
2141 | } else { |
2142 | BoxAllocationSlowPath* slow_path = |
2143 | new BoxAllocationSlowPath(instruction, cls, result); |
2144 | compiler->AddSlowPathCode(slow_path); |
2145 | |
2146 | __ TryAllocate(cls, slow_path->entry_label(), |
2147 | compiler::Assembler::kFarJump, result, temp); |
2148 | __ Bind(slow_path->exit_label()); |
2149 | } |
2150 | } |
2151 | |
2152 | private: |
2153 | const Class& cls_; |
2154 | const Register result_; |
2155 | }; |
2156 | |
2157 | LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(Zone* zone, |
2158 | bool opt) const { |
2159 | const intptr_t kNumInputs = 2; |
2160 | const intptr_t kNumTemps = |
2161 | (IsUnboxedStore() && opt) ? 2 : ((IsPotentialUnboxedStore()) ? 3 : 0); |
2162 | LocationSummary* summary = new (zone) |
2163 | LocationSummary(zone, kNumInputs, kNumTemps, |
2164 | ((IsUnboxedStore() && opt && is_initialization()) || |
2165 | IsPotentialUnboxedStore()) |
2166 | ? LocationSummary::kCallOnSlowPath |
2167 | : LocationSummary::kNoCall); |
2168 | |
2169 | summary->set_in(0, Location::RequiresRegister()); |
2170 | if (IsUnboxedStore() && opt) { |
2171 | summary->set_in(1, Location::RequiresFpuRegister()); |
2172 | summary->set_temp(0, Location::RequiresRegister()); |
2173 | summary->set_temp(1, Location::RequiresRegister()); |
2174 | } else if (IsPotentialUnboxedStore()) { |
2175 | summary->set_in(1, ShouldEmitStoreBarrier() ? Location::WritableRegister() |
2176 | : Location::RequiresRegister()); |
2177 | summary->set_temp(0, Location::RequiresRegister()); |
2178 | summary->set_temp(1, Location::RequiresRegister()); |
2179 | summary->set_temp(2, opt ? Location::RequiresFpuRegister() |
2180 | : Location::FpuRegisterLocation(XMM1)); |
2181 | } else { |
2182 | summary->set_in(1, ShouldEmitStoreBarrier() |
2183 | ? Location::WritableRegister() |
2184 | : LocationRegisterOrConstant(value())); |
2185 | } |
2186 | return summary; |
2187 | } |
2188 | |
2189 | static void EnsureMutableBox(FlowGraphCompiler* compiler, |
2190 | StoreInstanceFieldInstr* instruction, |
2191 | Register box_reg, |
2192 | const Class& cls, |
2193 | Register instance_reg, |
2194 | intptr_t offset, |
2195 | Register temp) { |
2196 | compiler::Label done; |
2197 | const compiler::Immediate& raw_null = |
2198 | compiler::Immediate(static_cast<intptr_t>(Object::null())); |
2199 | __ movl(box_reg, compiler::FieldAddress(instance_reg, offset)); |
2200 | __ cmpl(box_reg, raw_null); |
2201 | __ j(NOT_EQUAL, &done); |
2202 | BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp); |
2203 | __ movl(temp, box_reg); |
2204 | __ StoreIntoObject(instance_reg, compiler::FieldAddress(instance_reg, offset), |
2205 | temp, compiler::Assembler::kValueIsNotSmi); |
2206 | |
2207 | __ Bind(&done); |
2208 | } |
2209 | |
2210 | void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2211 | ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16); |
2212 | ASSERT(sizeof(FieldLayout::guarded_cid_) == 2); |
2213 | ASSERT(sizeof(FieldLayout::is_nullable_) == 2); |
2214 | |
2215 | compiler::Label skip_store; |
2216 | |
2217 | const Register instance_reg = locs()->in(0).reg(); |
2218 | const intptr_t offset_in_bytes = OffsetInBytes(); |
2219 | ASSERT(offset_in_bytes > 0); // Field is finalized and points after header. |
2220 | |
2221 | if (IsUnboxedStore() && compiler->is_optimizing()) { |
2222 | XmmRegister value = locs()->in(1).fpu_reg(); |
2223 | Register temp = locs()->temp(0).reg(); |
2224 | Register temp2 = locs()->temp(1).reg(); |
2225 | const intptr_t cid = slot().field().UnboxedFieldCid(); |
2226 | |
2227 | if (is_initialization()) { |
2228 | const Class* cls = NULL; |
2229 | switch (cid) { |
2230 | case kDoubleCid: |
2231 | cls = &compiler->double_class(); |
2232 | break; |
2233 | case kFloat32x4Cid: |
2234 | cls = &compiler->float32x4_class(); |
2235 | break; |
2236 | case kFloat64x2Cid: |
2237 | cls = &compiler->float64x2_class(); |
2238 | break; |
2239 | default: |
2240 | UNREACHABLE(); |
2241 | } |
2242 | |
2243 | BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2); |
2244 | __ movl(temp2, temp); |
2245 | __ StoreIntoObject(instance_reg, |
2246 | compiler::FieldAddress(instance_reg, offset_in_bytes), |
2247 | temp2, compiler::Assembler::kValueIsNotSmi); |
2248 | } else { |
2249 | __ movl(temp, compiler::FieldAddress(instance_reg, offset_in_bytes)); |
2250 | } |
2251 | switch (cid) { |
2252 | case kDoubleCid: |
2253 | __ Comment("UnboxedDoubleStoreInstanceFieldInstr" ); |
2254 | __ movsd(compiler::FieldAddress(temp, Double::value_offset()), value); |
2255 | break; |
2256 | case kFloat32x4Cid: |
2257 | __ Comment("UnboxedFloat32x4StoreInstanceFieldInstr" ); |
2258 | __ movups(compiler::FieldAddress(temp, Float32x4::value_offset()), |
2259 | value); |
2260 | break; |
2261 | case kFloat64x2Cid: |
2262 | __ Comment("UnboxedFloat64x2StoreInstanceFieldInstr" ); |
2263 | __ movups(compiler::FieldAddress(temp, Float64x2::value_offset()), |
2264 | value); |
2265 | break; |
2266 | default: |
2267 | UNREACHABLE(); |
2268 | } |
2269 | return; |
2270 | } |
2271 | |
2272 | if (IsPotentialUnboxedStore()) { |
2273 | __ Comment("PotentialUnboxedStore" ); |
2274 | Register value_reg = locs()->in(1).reg(); |
2275 | Register temp = locs()->temp(0).reg(); |
2276 | Register temp2 = locs()->temp(1).reg(); |
2277 | FpuRegister fpu_temp = locs()->temp(2).fpu_reg(); |
2278 | |
2279 | if (ShouldEmitStoreBarrier()) { |
2280 | // Value input is a writable register and should be manually preserved |
2281 | // across allocation slow-path. Add it to live_registers set which |
2282 | // determines which registers to preserve. |
2283 | locs()->live_registers()->Add(locs()->in(1), kTagged); |
2284 | } |
2285 | |
2286 | compiler::Label store_pointer; |
2287 | compiler::Label store_double; |
2288 | compiler::Label store_float32x4; |
2289 | compiler::Label store_float64x2; |
2290 | |
2291 | __ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original())); |
2292 | |
2293 | __ cmpw(compiler::FieldAddress(temp, Field::is_nullable_offset()), |
2294 | compiler::Immediate(kNullCid)); |
2295 | __ j(EQUAL, &store_pointer); |
2296 | |
2297 | __ movzxb(temp2, compiler::FieldAddress(temp, Field::kind_bits_offset())); |
2298 | __ testl(temp2, compiler::Immediate(1 << Field::kUnboxingCandidateBit)); |
2299 | __ j(ZERO, &store_pointer); |
2300 | |
2301 | __ cmpw(compiler::FieldAddress(temp, Field::guarded_cid_offset()), |
2302 | compiler::Immediate(kDoubleCid)); |
2303 | __ j(EQUAL, &store_double); |
2304 | |
2305 | __ cmpw(compiler::FieldAddress(temp, Field::guarded_cid_offset()), |
2306 | compiler::Immediate(kFloat32x4Cid)); |
2307 | __ j(EQUAL, &store_float32x4); |
2308 | |
2309 | __ cmpw(compiler::FieldAddress(temp, Field::guarded_cid_offset()), |
2310 | compiler::Immediate(kFloat64x2Cid)); |
2311 | __ j(EQUAL, &store_float64x2); |
2312 | |
2313 | // Fall through. |
2314 | __ jmp(&store_pointer); |
2315 | |
2316 | if (!compiler->is_optimizing()) { |
2317 | locs()->live_registers()->Add(locs()->in(0)); |
2318 | locs()->live_registers()->Add(locs()->in(1)); |
2319 | } |
2320 | |
2321 | { |
2322 | __ Bind(&store_double); |
2323 | EnsureMutableBox(compiler, this, temp, compiler->double_class(), |
2324 | instance_reg, offset_in_bytes, temp2); |
2325 | __ movsd(fpu_temp, |
2326 | compiler::FieldAddress(value_reg, Double::value_offset())); |
2327 | __ movsd(compiler::FieldAddress(temp, Double::value_offset()), fpu_temp); |
2328 | __ jmp(&skip_store); |
2329 | } |
2330 | |
2331 | { |
2332 | __ Bind(&store_float32x4); |
2333 | EnsureMutableBox(compiler, this, temp, compiler->float32x4_class(), |
2334 | instance_reg, offset_in_bytes, temp2); |
2335 | __ movups(fpu_temp, |
2336 | compiler::FieldAddress(value_reg, Float32x4::value_offset())); |
2337 | __ movups(compiler::FieldAddress(temp, Float32x4::value_offset()), |
2338 | fpu_temp); |
2339 | __ jmp(&skip_store); |
2340 | } |
2341 | |
2342 | { |
2343 | __ Bind(&store_float64x2); |
2344 | EnsureMutableBox(compiler, this, temp, compiler->float64x2_class(), |
2345 | instance_reg, offset_in_bytes, temp2); |
2346 | __ movups(fpu_temp, |
2347 | compiler::FieldAddress(value_reg, Float64x2::value_offset())); |
2348 | __ movups(compiler::FieldAddress(temp, Float64x2::value_offset()), |
2349 | fpu_temp); |
2350 | __ jmp(&skip_store); |
2351 | } |
2352 | |
2353 | __ Bind(&store_pointer); |
2354 | } |
2355 | |
2356 | if (ShouldEmitStoreBarrier()) { |
2357 | Register value_reg = locs()->in(1).reg(); |
2358 | __ StoreIntoObject(instance_reg, |
2359 | compiler::FieldAddress(instance_reg, offset_in_bytes), |
2360 | value_reg, CanValueBeSmi()); |
2361 | } else { |
2362 | if (locs()->in(1).IsConstant()) { |
2363 | __ StoreIntoObjectNoBarrier( |
2364 | instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes), |
2365 | locs()->in(1).constant()); |
2366 | } else { |
2367 | Register value_reg = locs()->in(1).reg(); |
2368 | __ StoreIntoObjectNoBarrier( |
2369 | instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes), |
2370 | value_reg); |
2371 | } |
2372 | } |
2373 | __ Bind(&skip_store); |
2374 | } |
2375 | |
2376 | LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone, |
2377 | bool opt) const { |
2378 | LocationSummary* locs = |
2379 | new (zone) LocationSummary(zone, 1, 1, LocationSummary::kNoCall); |
2380 | locs->set_in(0, value()->NeedsWriteBarrier() ? Location::WritableRegister() |
2381 | : Location::RequiresRegister()); |
2382 | locs->set_temp(0, Location::RequiresRegister()); |
2383 | return locs; |
2384 | } |
2385 | |
2386 | void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2387 | Register value = locs()->in(0).reg(); |
2388 | Register temp = locs()->temp(0).reg(); |
2389 | |
2390 | compiler->used_static_fields().Add(&field()); |
2391 | |
2392 | __ movl(temp, |
2393 | compiler::Address( |
2394 | THR, compiler::target::Thread::field_table_values_offset())); |
2395 | // Note: static fields ids won't be changed by hot-reload. |
2396 | __ movl( |
2397 | compiler::Address(temp, compiler::target::FieldTable::OffsetOf(field())), |
2398 | value); |
2399 | } |
2400 | |
2401 | LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone, |
2402 | bool opt) const { |
2403 | const intptr_t kNumInputs = 3; |
2404 | const intptr_t kNumTemps = 0; |
2405 | LocationSummary* summary = new (zone) |
2406 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
2407 | |
2408 | summary->set_in(0, Location::RegisterLocation(TypeTestABI::kInstanceReg)); |
2409 | summary->set_in(1, Location::RegisterLocation( |
2410 | TypeTestABI::kInstantiatorTypeArgumentsReg)); |
2411 | summary->set_in( |
2412 | 2, Location::RegisterLocation(TypeTestABI::kFunctionTypeArgumentsReg)); |
2413 | summary->set_out(0, Location::RegisterLocation(EAX)); |
2414 | return summary; |
2415 | } |
2416 | |
2417 | void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2418 | ASSERT(locs()->in(0).reg() == TypeTestABI::kInstanceReg); |
2419 | ASSERT(locs()->in(1).reg() == TypeTestABI::kInstantiatorTypeArgumentsReg); |
2420 | ASSERT(locs()->in(2).reg() == TypeTestABI::kFunctionTypeArgumentsReg); |
2421 | |
2422 | compiler->GenerateInstanceOf(token_pos(), deopt_id(), type(), locs()); |
2423 | ASSERT(locs()->out(0).reg() == EAX); |
2424 | } |
2425 | |
2426 | // TODO(srdjan): In case of constant inputs make CreateArray kNoCall and |
2427 | // use slow path stub. |
2428 | LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone, |
2429 | bool opt) const { |
2430 | const intptr_t kNumInputs = 2; |
2431 | const intptr_t kNumTemps = 0; |
2432 | LocationSummary* locs = new (zone) |
2433 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
2434 | locs->set_in(0, Location::RegisterLocation(ECX)); |
2435 | locs->set_in(1, Location::RegisterLocation(EDX)); |
2436 | locs->set_out(0, Location::RegisterLocation(EAX)); |
2437 | return locs; |
2438 | } |
2439 | |
2440 | // Inlines array allocation for known constant values. |
2441 | static void InlineArrayAllocation(FlowGraphCompiler* compiler, |
2442 | intptr_t num_elements, |
2443 | compiler::Label* slow_path, |
2444 | compiler::Label* done) { |
2445 | const int kInlineArraySize = 12; // Same as kInlineInstanceSize. |
2446 | const Register kLengthReg = EDX; |
2447 | const Register kElemTypeReg = ECX; |
2448 | const intptr_t instance_size = Array::InstanceSize(num_elements); |
2449 | |
2450 | // Instance in EAX. |
2451 | // Object end address in EBX. |
2452 | __ TryAllocateArray(kArrayCid, instance_size, slow_path, |
2453 | compiler::Assembler::kFarJump, |
2454 | EAX, // instance |
2455 | EBX, // end address |
2456 | EDI); // temp |
2457 | |
2458 | // Store the type argument field. |
2459 | __ StoreIntoObjectNoBarrier( |
2460 | EAX, compiler::FieldAddress(EAX, Array::type_arguments_offset()), |
2461 | kElemTypeReg); |
2462 | |
2463 | // Set the length field. |
2464 | __ StoreIntoObjectNoBarrier( |
2465 | EAX, compiler::FieldAddress(EAX, Array::length_offset()), kLengthReg); |
2466 | |
2467 | // Initialize all array elements to raw_null. |
2468 | // EAX: new object start as a tagged pointer. |
2469 | // EBX: new object end address. |
2470 | // EDI: iterator which initially points to the start of the variable |
2471 | // data area to be initialized. |
2472 | if (num_elements > 0) { |
2473 | const intptr_t array_size = instance_size - sizeof(ArrayLayout); |
2474 | const compiler::Immediate& raw_null = |
2475 | compiler::Immediate(static_cast<intptr_t>(Object::null())); |
2476 | __ leal(EDI, compiler::FieldAddress(EAX, sizeof(ArrayLayout))); |
2477 | if (array_size < (kInlineArraySize * kWordSize)) { |
2478 | intptr_t current_offset = 0; |
2479 | __ movl(EBX, raw_null); |
2480 | while (current_offset < array_size) { |
2481 | __ StoreIntoObjectNoBarrier(EAX, compiler::Address(EDI, current_offset), |
2482 | EBX); |
2483 | current_offset += kWordSize; |
2484 | } |
2485 | } else { |
2486 | compiler::Label init_loop; |
2487 | __ Bind(&init_loop); |
2488 | __ StoreIntoObjectNoBarrier(EAX, compiler::Address(EDI, 0), |
2489 | Object::null_object()); |
2490 | __ addl(EDI, compiler::Immediate(kWordSize)); |
2491 | __ cmpl(EDI, EBX); |
2492 | __ j(BELOW, &init_loop, compiler::Assembler::kNearJump); |
2493 | } |
2494 | } |
2495 | __ jmp(done, compiler::Assembler::kNearJump); |
2496 | } |
2497 | |
2498 | void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2499 | // Allocate the array. EDX = length, ECX = element type. |
2500 | const Register kLengthReg = EDX; |
2501 | const Register kElemTypeReg = ECX; |
2502 | const Register kResultReg = EAX; |
2503 | ASSERT(locs()->in(0).reg() == kElemTypeReg); |
2504 | ASSERT(locs()->in(1).reg() == kLengthReg); |
2505 | |
2506 | compiler::Label slow_path, done; |
2507 | if (compiler->is_optimizing() && num_elements()->BindsToConstant() && |
2508 | num_elements()->BoundConstant().IsSmi()) { |
2509 | const intptr_t length = Smi::Cast(num_elements()->BoundConstant()).Value(); |
2510 | if (Array::IsValidLength(length)) { |
2511 | InlineArrayAllocation(compiler, length, &slow_path, &done); |
2512 | } |
2513 | } |
2514 | |
2515 | __ Bind(&slow_path); |
2516 | auto object_store = compiler->isolate()->object_store(); |
2517 | const auto& allocate_array_stub = |
2518 | Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub()); |
2519 | compiler->GenerateStubCall(token_pos(), allocate_array_stub, |
2520 | PcDescriptorsLayout::kOther, locs(), deopt_id()); |
2521 | __ Bind(&done); |
2522 | ASSERT(locs()->out(0).reg() == kResultReg); |
2523 | } |
2524 | |
2525 | LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone, |
2526 | bool opt) const { |
2527 | const intptr_t kNumInputs = 1; |
2528 | const intptr_t kNumTemps = |
2529 | (IsUnboxedLoad() && opt) ? 1 : ((IsPotentialUnboxedLoad()) ? 2 : 0); |
2530 | const auto contains_call = |
2531 | (IsUnboxedLoad() && opt) |
2532 | ? LocationSummary::kNoCall |
2533 | : (IsPotentialUnboxedLoad() |
2534 | ? LocationSummary::kCallOnSlowPath |
2535 | : (calls_initializer() ? LocationSummary::kCall |
2536 | : LocationSummary::kNoCall)); |
2537 | |
2538 | LocationSummary* locs = |
2539 | new (zone) LocationSummary(zone, kNumInputs, kNumTemps, contains_call); |
2540 | |
2541 | locs->set_in(0, calls_initializer() ? Location::RegisterLocation( |
2542 | InitInstanceFieldABI::kInstanceReg) |
2543 | : Location::RequiresRegister()); |
2544 | |
2545 | if (IsUnboxedLoad() && opt) { |
2546 | ASSERT(!calls_initializer()); |
2547 | locs->set_temp(0, Location::RequiresRegister()); |
2548 | } else if (IsPotentialUnboxedLoad()) { |
2549 | ASSERT(!calls_initializer()); |
2550 | locs->set_temp(0, opt ? Location::RequiresFpuRegister() |
2551 | : Location::FpuRegisterLocation(XMM1)); |
2552 | locs->set_temp(1, Location::RequiresRegister()); |
2553 | } |
2554 | locs->set_out(0, calls_initializer() ? Location::RegisterLocation( |
2555 | InitInstanceFieldABI::kResultReg) |
2556 | : Location::RequiresRegister()); |
2557 | return locs; |
2558 | } |
2559 | |
2560 | void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2561 | ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16); |
2562 | ASSERT(sizeof(FieldLayout::guarded_cid_) == 2); |
2563 | ASSERT(sizeof(FieldLayout::is_nullable_) == 2); |
2564 | |
2565 | Register instance_reg = locs()->in(0).reg(); |
2566 | if (IsUnboxedLoad() && compiler->is_optimizing()) { |
2567 | ASSERT(!calls_initializer()); |
2568 | XmmRegister result = locs()->out(0).fpu_reg(); |
2569 | Register temp = locs()->temp(0).reg(); |
2570 | __ movl(temp, compiler::FieldAddress(instance_reg, OffsetInBytes())); |
2571 | const intptr_t cid = slot().field().UnboxedFieldCid(); |
2572 | switch (cid) { |
2573 | case kDoubleCid: |
2574 | __ Comment("UnboxedDoubleLoadFieldInstr" ); |
2575 | __ movsd(result, compiler::FieldAddress(temp, Double::value_offset())); |
2576 | break; |
2577 | case kFloat32x4Cid: |
2578 | __ Comment("UnboxedFloat32x4LoadFieldInstr" ); |
2579 | __ movups(result, |
2580 | compiler::FieldAddress(temp, Float32x4::value_offset())); |
2581 | break; |
2582 | case kFloat64x2Cid: |
2583 | __ Comment("UnboxedFloat64x2LoadFieldInstr" ); |
2584 | __ movups(result, |
2585 | compiler::FieldAddress(temp, Float64x2::value_offset())); |
2586 | break; |
2587 | default: |
2588 | UNREACHABLE(); |
2589 | } |
2590 | return; |
2591 | } |
2592 | |
2593 | compiler::Label done; |
2594 | Register result = locs()->out(0).reg(); |
2595 | if (IsPotentialUnboxedLoad()) { |
2596 | ASSERT(!calls_initializer()); |
2597 | Register temp = locs()->temp(1).reg(); |
2598 | XmmRegister value = locs()->temp(0).fpu_reg(); |
2599 | |
2600 | compiler::Label load_pointer; |
2601 | compiler::Label load_double; |
2602 | compiler::Label load_float32x4; |
2603 | compiler::Label load_float64x2; |
2604 | |
2605 | __ LoadObject(result, Field::ZoneHandle(slot().field().Original())); |
2606 | |
2607 | compiler::FieldAddress field_cid_operand(result, |
2608 | Field::guarded_cid_offset()); |
2609 | compiler::FieldAddress field_nullability_operand( |
2610 | result, Field::is_nullable_offset()); |
2611 | |
2612 | __ cmpw(field_nullability_operand, compiler::Immediate(kNullCid)); |
2613 | __ j(EQUAL, &load_pointer); |
2614 | |
2615 | __ cmpw(field_cid_operand, compiler::Immediate(kDoubleCid)); |
2616 | __ j(EQUAL, &load_double); |
2617 | |
2618 | __ cmpw(field_cid_operand, compiler::Immediate(kFloat32x4Cid)); |
2619 | __ j(EQUAL, &load_float32x4); |
2620 | |
2621 | __ cmpw(field_cid_operand, compiler::Immediate(kFloat64x2Cid)); |
2622 | __ j(EQUAL, &load_float64x2); |
2623 | |
2624 | // Fall through. |
2625 | __ jmp(&load_pointer); |
2626 | |
2627 | if (!compiler->is_optimizing()) { |
2628 | locs()->live_registers()->Add(locs()->in(0)); |
2629 | } |
2630 | |
2631 | { |
2632 | __ Bind(&load_double); |
2633 | BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(), |
2634 | result, temp); |
2635 | __ movl(temp, compiler::FieldAddress(instance_reg, OffsetInBytes())); |
2636 | __ movsd(value, compiler::FieldAddress(temp, Double::value_offset())); |
2637 | __ movsd(compiler::FieldAddress(result, Double::value_offset()), value); |
2638 | __ jmp(&done); |
2639 | } |
2640 | |
2641 | { |
2642 | __ Bind(&load_float32x4); |
2643 | BoxAllocationSlowPath::Allocate( |
2644 | compiler, this, compiler->float32x4_class(), result, temp); |
2645 | __ movl(temp, compiler::FieldAddress(instance_reg, OffsetInBytes())); |
2646 | __ movups(value, compiler::FieldAddress(temp, Float32x4::value_offset())); |
2647 | __ movups(compiler::FieldAddress(result, Float32x4::value_offset()), |
2648 | value); |
2649 | __ jmp(&done); |
2650 | } |
2651 | |
2652 | { |
2653 | __ Bind(&load_float64x2); |
2654 | BoxAllocationSlowPath::Allocate( |
2655 | compiler, this, compiler->float64x2_class(), result, temp); |
2656 | __ movl(temp, compiler::FieldAddress(instance_reg, OffsetInBytes())); |
2657 | __ movups(value, compiler::FieldAddress(temp, Float64x2::value_offset())); |
2658 | __ movups(compiler::FieldAddress(result, Float64x2::value_offset()), |
2659 | value); |
2660 | __ jmp(&done); |
2661 | } |
2662 | |
2663 | __ Bind(&load_pointer); |
2664 | } |
2665 | |
2666 | __ movl(result, compiler::FieldAddress(instance_reg, OffsetInBytes())); |
2667 | |
2668 | if (calls_initializer()) { |
2669 | EmitNativeCodeForInitializerCall(compiler); |
2670 | } |
2671 | |
2672 | __ Bind(&done); |
2673 | } |
2674 | |
2675 | LocationSummary* InstantiateTypeInstr::MakeLocationSummary(Zone* zone, |
2676 | bool opt) const { |
2677 | const intptr_t kNumInputs = 2; |
2678 | const intptr_t kNumTemps = 0; |
2679 | LocationSummary* locs = new (zone) |
2680 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
2681 | locs->set_in(0, Location::RegisterLocation( |
2682 | InstantiationABI::kInstantiatorTypeArgumentsReg)); |
2683 | locs->set_in(1, Location::RegisterLocation( |
2684 | InstantiationABI::kFunctionTypeArgumentsReg)); |
2685 | locs->set_out(0, |
2686 | Location::RegisterLocation(InstantiationABI::kResultTypeReg)); |
2687 | return locs; |
2688 | } |
2689 | |
2690 | void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2691 | Register instantiator_type_args_reg = locs()->in(0).reg(); |
2692 | Register function_type_args_reg = locs()->in(1).reg(); |
2693 | Register result_reg = locs()->out(0).reg(); |
2694 | |
2695 | // 'instantiator_type_args_reg' is a TypeArguments object (or null). |
2696 | // 'function_type_args_reg' is a TypeArguments object (or null). |
2697 | // A runtime call to instantiate the type is required. |
2698 | __ PushObject(Object::null_object()); // Make room for the result. |
2699 | __ PushObject(type()); |
2700 | __ pushl(instantiator_type_args_reg); // Push instantiator type arguments. |
2701 | __ pushl(function_type_args_reg); // Push function type arguments. |
2702 | compiler->GenerateRuntimeCall(token_pos(), deopt_id(), |
2703 | kInstantiateTypeRuntimeEntry, 3, locs()); |
2704 | __ Drop(3); // Drop 2 type vectors, and uninstantiated type. |
2705 | __ popl(result_reg); // Pop instantiated type. |
2706 | } |
2707 | |
2708 | LocationSummary* InstantiateTypeArgumentsInstr::MakeLocationSummary( |
2709 | Zone* zone, |
2710 | bool opt) const { |
2711 | const intptr_t kNumInputs = 2; |
2712 | const intptr_t kNumTemps = 0; |
2713 | LocationSummary* locs = new (zone) |
2714 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
2715 | locs->set_in(0, Location::RegisterLocation( |
2716 | InstantiationABI::kInstantiatorTypeArgumentsReg)); |
2717 | locs->set_in(1, Location::RegisterLocation( |
2718 | InstantiationABI::kFunctionTypeArgumentsReg)); |
2719 | locs->set_out( |
2720 | 0, Location::RegisterLocation(InstantiationABI::kResultTypeArgumentsReg)); |
2721 | return locs; |
2722 | } |
2723 | |
2724 | void InstantiateTypeArgumentsInstr::EmitNativeCode( |
2725 | FlowGraphCompiler* compiler) { |
2726 | Register instantiator_type_args_reg = locs()->in(0).reg(); |
2727 | Register function_type_args_reg = locs()->in(1).reg(); |
2728 | Register result_reg = locs()->out(0).reg(); |
2729 | |
2730 | // 'instantiator_type_args_reg' is a TypeArguments object (or null). |
2731 | // 'function_type_args_reg' is a TypeArguments object (or null). |
2732 | |
2733 | // If both the instantiator and function type arguments are null and if the |
2734 | // type argument vector instantiated from null becomes a vector of dynamic, |
2735 | // then use null as the type arguments. |
2736 | compiler::Label type_arguments_instantiated; |
2737 | const intptr_t len = type_arguments().Length(); |
2738 | const bool can_function_type_args_be_null = |
2739 | function_type_arguments()->CanBe(Object::null_object()); |
2740 | if (type_arguments().IsRawWhenInstantiatedFromRaw(len) && |
2741 | can_function_type_args_be_null) { |
2742 | compiler::Label non_null_type_args; |
2743 | ASSERT(result_reg != instantiator_type_args_reg && |
2744 | result_reg != function_type_args_reg); |
2745 | __ LoadObject(result_reg, Object::null_object()); |
2746 | __ cmpl(instantiator_type_args_reg, result_reg); |
2747 | if (!function_type_arguments()->BindsToConstant()) { |
2748 | __ j(NOT_EQUAL, &non_null_type_args, compiler::Assembler::kNearJump); |
2749 | __ cmpl(function_type_args_reg, result_reg); |
2750 | } |
2751 | __ j(EQUAL, &type_arguments_instantiated, compiler::Assembler::kNearJump); |
2752 | __ Bind(&non_null_type_args); |
2753 | } |
2754 | // Lookup cache in stub before calling runtime. |
2755 | __ LoadObject(InstantiationABI::kUninstantiatedTypeArgumentsReg, |
2756 | type_arguments()); |
2757 | compiler->GenerateStubCall(token_pos(), GetStub(), |
2758 | PcDescriptorsLayout::kOther, locs()); |
2759 | __ Bind(&type_arguments_instantiated); |
2760 | } |
2761 | |
2762 | LocationSummary* AllocateUninitializedContextInstr::MakeLocationSummary( |
2763 | Zone* zone, |
2764 | bool opt) const { |
2765 | ASSERT(opt); |
2766 | const intptr_t kNumInputs = 0; |
2767 | const intptr_t kNumTemps = 2; |
2768 | LocationSummary* locs = new (zone) LocationSummary( |
2769 | zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
2770 | locs->set_temp(0, Location::RegisterLocation(ECX)); |
2771 | locs->set_temp(1, Location::RegisterLocation(EDI)); |
2772 | locs->set_out(0, Location::RegisterLocation(EAX)); |
2773 | return locs; |
2774 | } |
2775 | |
2776 | class AllocateContextSlowPath |
2777 | : public TemplateSlowPathCode<AllocateUninitializedContextInstr> { |
2778 | public: |
2779 | explicit AllocateContextSlowPath( |
2780 | AllocateUninitializedContextInstr* instruction) |
2781 | : TemplateSlowPathCode(instruction) {} |
2782 | |
2783 | virtual void EmitNativeCode(FlowGraphCompiler* compiler) { |
2784 | __ Comment("AllocateContextSlowPath" ); |
2785 | __ Bind(entry_label()); |
2786 | |
2787 | LocationSummary* locs = instruction()->locs(); |
2788 | ASSERT(!locs->live_registers()->Contains(locs->out(0))); |
2789 | |
2790 | compiler->SaveLiveRegisters(locs); |
2791 | |
2792 | __ movl(EDX, compiler::Immediate(instruction()->num_context_variables())); |
2793 | compiler->GenerateStubCall(instruction()->token_pos(), |
2794 | StubCode::AllocateContext(), |
2795 | PcDescriptorsLayout::kOther, locs); |
2796 | ASSERT(instruction()->locs()->out(0).reg() == EAX); |
2797 | compiler->RestoreLiveRegisters(instruction()->locs()); |
2798 | __ jmp(exit_label()); |
2799 | } |
2800 | }; |
2801 | |
2802 | void AllocateUninitializedContextInstr::EmitNativeCode( |
2803 | FlowGraphCompiler* compiler) { |
2804 | ASSERT(compiler->is_optimizing()); |
2805 | Register temp = locs()->temp(0).reg(); |
2806 | Register temp2 = locs()->temp(1).reg(); |
2807 | Register result = locs()->out(0).reg(); |
2808 | // Try allocate the object. |
2809 | AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this); |
2810 | compiler->AddSlowPathCode(slow_path); |
2811 | intptr_t instance_size = Context::InstanceSize(num_context_variables()); |
2812 | |
2813 | __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(), |
2814 | compiler::Assembler::kFarJump, |
2815 | result, // instance |
2816 | temp, // end address |
2817 | temp2); // temp |
2818 | |
2819 | // Setup up number of context variables field. |
2820 | __ movl(compiler::FieldAddress(result, Context::num_variables_offset()), |
2821 | compiler::Immediate(num_context_variables())); |
2822 | |
2823 | __ Bind(slow_path->exit_label()); |
2824 | } |
2825 | |
2826 | LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone, |
2827 | bool opt) const { |
2828 | const intptr_t kNumInputs = 0; |
2829 | const intptr_t kNumTemps = 1; |
2830 | LocationSummary* locs = new (zone) |
2831 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
2832 | locs->set_temp(0, Location::RegisterLocation(EDX)); |
2833 | locs->set_out(0, Location::RegisterLocation(EAX)); |
2834 | return locs; |
2835 | } |
2836 | |
2837 | void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2838 | ASSERT(locs()->temp(0).reg() == EDX); |
2839 | ASSERT(locs()->out(0).reg() == EAX); |
2840 | |
2841 | __ movl(EDX, compiler::Immediate(num_context_variables())); |
2842 | compiler->GenerateStubCall(token_pos(), StubCode::AllocateContext(), |
2843 | PcDescriptorsLayout::kOther, locs()); |
2844 | } |
2845 | |
2846 | LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone, |
2847 | bool opt) const { |
2848 | const intptr_t kNumInputs = 1; |
2849 | const intptr_t kNumTemps = 0; |
2850 | LocationSummary* locs = new (zone) |
2851 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
2852 | locs->set_in(0, Location::RegisterLocation(ECX)); |
2853 | locs->set_out(0, Location::RegisterLocation(EAX)); |
2854 | return locs; |
2855 | } |
2856 | |
2857 | void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2858 | ASSERT(locs()->in(0).reg() == ECX); |
2859 | ASSERT(locs()->out(0).reg() == EAX); |
2860 | |
2861 | compiler->GenerateStubCall(token_pos(), StubCode::CloneContext(), |
2862 | /*kind=*/PcDescriptorsLayout::kOther, locs()); |
2863 | } |
2864 | |
2865 | LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone, |
2866 | bool opt) const { |
2867 | UNREACHABLE(); |
2868 | return NULL; |
2869 | } |
2870 | |
2871 | void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2872 | __ Bind(compiler->GetJumpLabel(this)); |
2873 | compiler->AddExceptionHandler( |
2874 | catch_try_index(), try_index(), compiler->assembler()->CodeSize(), |
2875 | is_generated(), catch_handler_types_, needs_stacktrace()); |
2876 | if (!FLAG_precompiled_mode) { |
2877 | // On lazy deoptimization we patch the optimized code here to enter the |
2878 | // deoptimization stub. |
2879 | const intptr_t deopt_id = DeoptId::ToDeoptAfter(GetDeoptId()); |
2880 | if (compiler->is_optimizing()) { |
2881 | compiler->AddDeoptIndexAtCall(deopt_id); |
2882 | } else { |
2883 | compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id, |
2884 | TokenPosition::kNoSource); |
2885 | } |
2886 | } |
2887 | if (HasParallelMove()) { |
2888 | compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); |
2889 | } |
2890 | |
2891 | // Restore ESP from EBP as we are coming from a throw and the code for |
2892 | // popping arguments has not been run. |
2893 | const intptr_t fp_sp_dist = |
2894 | (compiler::target::frame_layout.first_local_from_fp + 1 - |
2895 | compiler->StackSize()) * |
2896 | kWordSize; |
2897 | ASSERT(fp_sp_dist <= 0); |
2898 | __ leal(ESP, compiler::Address(EBP, fp_sp_dist)); |
2899 | |
2900 | if (!compiler->is_optimizing()) { |
2901 | if (raw_exception_var_ != nullptr) { |
2902 | __ movl(compiler::Address(EBP, |
2903 | compiler::target::FrameOffsetInBytesForVariable( |
2904 | raw_exception_var_)), |
2905 | kExceptionObjectReg); |
2906 | } |
2907 | if (raw_stacktrace_var_ != nullptr) { |
2908 | __ movl(compiler::Address(EBP, |
2909 | compiler::target::FrameOffsetInBytesForVariable( |
2910 | raw_stacktrace_var_)), |
2911 | kStackTraceObjectReg); |
2912 | } |
2913 | } |
2914 | } |
2915 | |
2916 | LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone, |
2917 | bool opt) const { |
2918 | const intptr_t kNumInputs = 0; |
2919 | const intptr_t kNumTemps = opt ? 0 : 1; |
2920 | LocationSummary* summary = new (zone) LocationSummary( |
2921 | zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
2922 | if (!opt) { |
2923 | summary->set_temp(0, Location::RequiresRegister()); |
2924 | } |
2925 | return summary; |
2926 | } |
2927 | |
2928 | class CheckStackOverflowSlowPath |
2929 | : public TemplateSlowPathCode<CheckStackOverflowInstr> { |
2930 | public: |
2931 | explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction) |
2932 | : TemplateSlowPathCode(instruction) {} |
2933 | |
2934 | virtual void EmitNativeCode(FlowGraphCompiler* compiler) { |
2935 | if (compiler->isolate()->use_osr() && osr_entry_label()->IsLinked()) { |
2936 | __ Comment("CheckStackOverflowSlowPathOsr" ); |
2937 | __ Bind(osr_entry_label()); |
2938 | __ movl(compiler::Address(THR, Thread::stack_overflow_flags_offset()), |
2939 | compiler::Immediate(Thread::kOsrRequest)); |
2940 | } |
2941 | __ Comment("CheckStackOverflowSlowPath" ); |
2942 | __ Bind(entry_label()); |
2943 | compiler->SaveLiveRegisters(instruction()->locs()); |
2944 | // pending_deoptimization_env_ is needed to generate a runtime call that |
2945 | // may throw an exception. |
2946 | ASSERT(compiler->pending_deoptimization_env_ == NULL); |
2947 | Environment* env = compiler->SlowPathEnvironmentFor( |
2948 | instruction(), /*num_slow_path_args=*/0); |
2949 | compiler->pending_deoptimization_env_ = env; |
2950 | compiler->GenerateRuntimeCall( |
2951 | instruction()->token_pos(), instruction()->deopt_id(), |
2952 | kStackOverflowRuntimeEntry, 0, instruction()->locs()); |
2953 | |
2954 | if (compiler->isolate()->use_osr() && !compiler->is_optimizing() && |
2955 | instruction()->in_loop()) { |
2956 | // In unoptimized code, record loop stack checks as possible OSR entries. |
2957 | compiler->AddCurrentDescriptor(PcDescriptorsLayout::kOsrEntry, |
2958 | instruction()->deopt_id(), |
2959 | TokenPosition::kNoSource); |
2960 | } |
2961 | compiler->pending_deoptimization_env_ = NULL; |
2962 | compiler->RestoreLiveRegisters(instruction()->locs()); |
2963 | __ jmp(exit_label()); |
2964 | } |
2965 | |
2966 | compiler::Label* osr_entry_label() { |
2967 | ASSERT(Isolate::Current()->use_osr()); |
2968 | return &osr_entry_label_; |
2969 | } |
2970 | |
2971 | private: |
2972 | compiler::Label osr_entry_label_; |
2973 | }; |
2974 | |
2975 | void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
2976 | CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this); |
2977 | compiler->AddSlowPathCode(slow_path); |
2978 | |
2979 | __ cmpl(ESP, compiler::Address(THR, Thread::stack_limit_offset())); |
2980 | __ j(BELOW_EQUAL, slow_path->entry_label()); |
2981 | if (compiler->CanOSRFunction() && in_loop()) { |
2982 | // In unoptimized code check the usage counter to trigger OSR at loop |
2983 | // stack checks. Use progressively higher thresholds for more deeply |
2984 | // nested loops to attempt to hit outer loops with OSR when possible. |
2985 | __ LoadObject(EDI, compiler->parsed_function().function()); |
2986 | intptr_t threshold = |
2987 | FLAG_optimization_counter_threshold * (loop_depth() + 1); |
2988 | __ incl(compiler::FieldAddress(EDI, Function::usage_counter_offset())); |
2989 | __ cmpl(compiler::FieldAddress(EDI, Function::usage_counter_offset()), |
2990 | compiler::Immediate(threshold)); |
2991 | __ j(GREATER_EQUAL, slow_path->osr_entry_label()); |
2992 | } |
2993 | if (compiler->ForceSlowPathForStackOverflow()) { |
2994 | // TODO(turnidge): Implement stack overflow count in assembly to |
2995 | // make --stacktrace-every and --deoptimize-every faster. |
2996 | __ jmp(slow_path->entry_label()); |
2997 | } |
2998 | __ Bind(slow_path->exit_label()); |
2999 | } |
3000 | |
3001 | static void EmitSmiShiftLeft(FlowGraphCompiler* compiler, |
3002 | BinarySmiOpInstr* shift_left) { |
3003 | const LocationSummary& locs = *shift_left->locs(); |
3004 | Register left = locs.in(0).reg(); |
3005 | Register result = locs.out(0).reg(); |
3006 | ASSERT(left == result); |
3007 | compiler::Label* deopt = |
3008 | shift_left->CanDeoptimize() |
3009 | ? compiler->AddDeoptStub(shift_left->deopt_id(), |
3010 | ICData::kDeoptBinarySmiOp) |
3011 | : NULL; |
3012 | if (locs.in(1).IsConstant()) { |
3013 | const Object& constant = locs.in(1).constant(); |
3014 | ASSERT(constant.IsSmi()); |
3015 | // shll operation masks the count to 5 bits. |
3016 | const intptr_t kCountLimit = 0x1F; |
3017 | const intptr_t value = Smi::Cast(constant).Value(); |
3018 | ASSERT((0 < value) && (value < kCountLimit)); |
3019 | if (shift_left->can_overflow()) { |
3020 | if (value == 1) { |
3021 | // Use overflow flag. |
3022 | __ shll(left, compiler::Immediate(1)); |
3023 | __ j(OVERFLOW, deopt); |
3024 | return; |
3025 | } |
3026 | // Check for overflow. |
3027 | Register temp = locs.temp(0).reg(); |
3028 | __ movl(temp, left); |
3029 | __ shll(left, compiler::Immediate(value)); |
3030 | __ sarl(left, compiler::Immediate(value)); |
3031 | __ cmpl(left, temp); |
3032 | __ j(NOT_EQUAL, deopt); // Overflow. |
3033 | } |
3034 | // Shift for result now we know there is no overflow. |
3035 | __ shll(left, compiler::Immediate(value)); |
3036 | return; |
3037 | } |
3038 | |
3039 | // Right (locs.in(1)) is not constant. |
3040 | Register right = locs.in(1).reg(); |
3041 | Range* right_range = shift_left->right_range(); |
3042 | if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) { |
3043 | // TODO(srdjan): Implement code below for can_overflow(). |
3044 | // If left is constant, we know the maximal allowed size for right. |
3045 | const Object& obj = shift_left->left()->BoundConstant(); |
3046 | if (obj.IsSmi()) { |
3047 | const intptr_t left_int = Smi::Cast(obj).Value(); |
3048 | if (left_int == 0) { |
3049 | __ cmpl(right, compiler::Immediate(0)); |
3050 | __ j(NEGATIVE, deopt); |
3051 | return; |
3052 | } |
3053 | const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int); |
3054 | const bool right_needs_check = |
3055 | !RangeUtils::IsWithin(right_range, 0, max_right - 1); |
3056 | if (right_needs_check) { |
3057 | __ cmpl(right, |
3058 | compiler::Immediate(static_cast<int32_t>(Smi::New(max_right)))); |
3059 | __ j(ABOVE_EQUAL, deopt); |
3060 | } |
3061 | __ SmiUntag(right); |
3062 | __ shll(left, right); |
3063 | } |
3064 | return; |
3065 | } |
3066 | |
3067 | const bool right_needs_check = |
3068 | !RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1)); |
3069 | ASSERT(right == ECX); // Count must be in ECX |
3070 | if (!shift_left->can_overflow()) { |
3071 | if (right_needs_check) { |
3072 | if (!RangeUtils::IsPositive(right_range)) { |
3073 | ASSERT(shift_left->CanDeoptimize()); |
3074 | __ cmpl(right, compiler::Immediate(0)); |
3075 | __ j(NEGATIVE, deopt); |
3076 | } |
3077 | compiler::Label done, is_not_zero; |
3078 | __ cmpl(right, |
3079 | compiler::Immediate(static_cast<int32_t>(Smi::New(Smi::kBits)))); |
3080 | __ j(BELOW, &is_not_zero, compiler::Assembler::kNearJump); |
3081 | __ xorl(left, left); |
3082 | __ jmp(&done, compiler::Assembler::kNearJump); |
3083 | __ Bind(&is_not_zero); |
3084 | __ SmiUntag(right); |
3085 | __ shll(left, right); |
3086 | __ Bind(&done); |
3087 | } else { |
3088 | __ SmiUntag(right); |
3089 | __ shll(left, right); |
3090 | } |
3091 | } else { |
3092 | if (right_needs_check) { |
3093 | ASSERT(shift_left->CanDeoptimize()); |
3094 | __ cmpl(right, |
3095 | compiler::Immediate(static_cast<int32_t>(Smi::New(Smi::kBits)))); |
3096 | __ j(ABOVE_EQUAL, deopt); |
3097 | } |
3098 | // Left is not a constant. |
3099 | Register temp = locs.temp(0).reg(); |
3100 | // Check if count too large for handling it inlined. |
3101 | __ movl(temp, left); |
3102 | __ SmiUntag(right); |
3103 | // Overflow test (preserve temp and right); |
3104 | __ shll(left, right); |
3105 | __ sarl(left, right); |
3106 | __ cmpl(left, temp); |
3107 | __ j(NOT_EQUAL, deopt); // Overflow. |
3108 | // Shift for result now we know there is no overflow. |
3109 | __ shll(left, right); |
3110 | } |
3111 | } |
3112 | |
3113 | LocationSummary* CheckedSmiOpInstr::MakeLocationSummary(Zone* zone, |
3114 | bool opt) const { |
3115 | // Only for precompiled code, not on ia32 currently. |
3116 | UNIMPLEMENTED(); |
3117 | return NULL; |
3118 | } |
3119 | |
3120 | void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3121 | // Only for precompiled code, not on ia32 currently. |
3122 | UNIMPLEMENTED(); |
3123 | } |
3124 | |
3125 | LocationSummary* CheckedSmiComparisonInstr::MakeLocationSummary( |
3126 | Zone* zone, |
3127 | bool opt) const { |
3128 | // Only for precompiled code, not on ia32 currently. |
3129 | UNIMPLEMENTED(); |
3130 | return NULL; |
3131 | } |
3132 | |
3133 | Condition CheckedSmiComparisonInstr::EmitComparisonCode( |
3134 | FlowGraphCompiler* compiler, |
3135 | BranchLabels labels) { |
3136 | // Only for precompiled code, not on ia32 currently. |
3137 | UNIMPLEMENTED(); |
3138 | return ZERO; |
3139 | } |
3140 | |
3141 | void CheckedSmiComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler, |
3142 | BranchInstr* instr) { |
3143 | // Only for precompiled code, not on ia32 currently. |
3144 | UNIMPLEMENTED(); |
3145 | } |
3146 | |
3147 | void CheckedSmiComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3148 | // Only for precompiled code, not on ia32 currently. |
3149 | UNIMPLEMENTED(); |
3150 | } |
3151 | |
3152 | static bool IsSmiValue(const Object& constant, intptr_t value) { |
3153 | return constant.IsSmi() && (Smi::Cast(constant).Value() == value); |
3154 | } |
3155 | |
3156 | LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone, |
3157 | bool opt) const { |
3158 | const intptr_t kNumInputs = 2; |
3159 | if (op_kind() == Token::kTRUNCDIV) { |
3160 | const intptr_t kNumTemps = 1; |
3161 | LocationSummary* summary = new (zone) |
3162 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3163 | if (RightIsPowerOfTwoConstant()) { |
3164 | summary->set_in(0, Location::RequiresRegister()); |
3165 | ConstantInstr* right_constant = right()->definition()->AsConstant(); |
3166 | // The programmer only controls one bit, so the constant is safe. |
3167 | summary->set_in(1, Location::Constant(right_constant)); |
3168 | summary->set_temp(0, Location::RequiresRegister()); |
3169 | summary->set_out(0, Location::SameAsFirstInput()); |
3170 | } else { |
3171 | // Both inputs must be writable because they will be untagged. |
3172 | summary->set_in(0, Location::RegisterLocation(EAX)); |
3173 | summary->set_in(1, Location::WritableRegister()); |
3174 | summary->set_out(0, Location::SameAsFirstInput()); |
3175 | // Will be used for sign extension and division. |
3176 | summary->set_temp(0, Location::RegisterLocation(EDX)); |
3177 | } |
3178 | return summary; |
3179 | } else if (op_kind() == Token::kMOD) { |
3180 | const intptr_t kNumTemps = 1; |
3181 | LocationSummary* summary = new (zone) |
3182 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3183 | // Both inputs must be writable because they will be untagged. |
3184 | summary->set_in(0, Location::RegisterLocation(EDX)); |
3185 | summary->set_in(1, Location::WritableRegister()); |
3186 | summary->set_out(0, Location::SameAsFirstInput()); |
3187 | // Will be used for sign extension and division. |
3188 | summary->set_temp(0, Location::RegisterLocation(EAX)); |
3189 | return summary; |
3190 | } else if (op_kind() == Token::kSHR) { |
3191 | const intptr_t kNumTemps = 0; |
3192 | LocationSummary* summary = new (zone) |
3193 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3194 | summary->set_in(0, Location::RequiresRegister()); |
3195 | summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX)); |
3196 | summary->set_out(0, Location::SameAsFirstInput()); |
3197 | return summary; |
3198 | } else if (op_kind() == Token::kSHL) { |
3199 | ConstantInstr* right_constant = right()->definition()->AsConstant(); |
3200 | // Shift-by-1 overflow checking can use flags, otherwise we need a temp. |
3201 | const bool shiftBy1 = |
3202 | (right_constant != NULL) && IsSmiValue(right_constant->value(), 1); |
3203 | const intptr_t kNumTemps = (can_overflow() && !shiftBy1) ? 1 : 0; |
3204 | LocationSummary* summary = new (zone) |
3205 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3206 | summary->set_in(0, Location::RequiresRegister()); |
3207 | summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX)); |
3208 | if (kNumTemps == 1) { |
3209 | summary->set_temp(0, Location::RequiresRegister()); |
3210 | } |
3211 | summary->set_out(0, Location::SameAsFirstInput()); |
3212 | return summary; |
3213 | } else { |
3214 | const intptr_t kNumTemps = 0; |
3215 | LocationSummary* summary = new (zone) |
3216 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3217 | summary->set_in(0, Location::RequiresRegister()); |
3218 | ConstantInstr* constant = right()->definition()->AsConstant(); |
3219 | if (constant != NULL) { |
3220 | summary->set_in(1, LocationRegisterOrSmiConstant(right())); |
3221 | } else { |
3222 | summary->set_in(1, Location::PrefersRegister()); |
3223 | } |
3224 | summary->set_out(0, Location::SameAsFirstInput()); |
3225 | return summary; |
3226 | } |
3227 | } |
3228 | |
3229 | template <typename OperandType> |
3230 | static void EmitIntegerArithmetic(FlowGraphCompiler* compiler, |
3231 | Token::Kind op_kind, |
3232 | Register left, |
3233 | const OperandType& right, |
3234 | compiler::Label* deopt) { |
3235 | switch (op_kind) { |
3236 | case Token::kADD: |
3237 | __ addl(left, right); |
3238 | break; |
3239 | case Token::kSUB: |
3240 | __ subl(left, right); |
3241 | break; |
3242 | case Token::kBIT_AND: |
3243 | __ andl(left, right); |
3244 | break; |
3245 | case Token::kBIT_OR: |
3246 | __ orl(left, right); |
3247 | break; |
3248 | case Token::kBIT_XOR: |
3249 | __ xorl(left, right); |
3250 | break; |
3251 | case Token::kMUL: |
3252 | __ imull(left, right); |
3253 | break; |
3254 | default: |
3255 | UNREACHABLE(); |
3256 | } |
3257 | if (deopt != NULL) __ j(OVERFLOW, deopt); |
3258 | } |
3259 | |
3260 | void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3261 | if (op_kind() == Token::kSHL) { |
3262 | EmitSmiShiftLeft(compiler, this); |
3263 | return; |
3264 | } |
3265 | |
3266 | Register left = locs()->in(0).reg(); |
3267 | Register result = locs()->out(0).reg(); |
3268 | ASSERT(left == result); |
3269 | compiler::Label* deopt = NULL; |
3270 | if (CanDeoptimize()) { |
3271 | deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp); |
3272 | } |
3273 | |
3274 | if (locs()->in(1).IsConstant()) { |
3275 | const Object& constant = locs()->in(1).constant(); |
3276 | ASSERT(constant.IsSmi()); |
3277 | const intptr_t value = Smi::Cast(constant).Value(); |
3278 | switch (op_kind()) { |
3279 | case Token::kADD: |
3280 | case Token::kSUB: |
3281 | case Token::kBIT_AND: |
3282 | case Token::kBIT_OR: |
3283 | case Token::kBIT_XOR: |
3284 | case Token::kMUL: { |
3285 | const intptr_t imm = |
3286 | (op_kind() == Token::kMUL) ? value : Smi::RawValue(value); |
3287 | EmitIntegerArithmetic(compiler, op_kind(), left, |
3288 | compiler::Immediate(imm), deopt); |
3289 | break; |
3290 | } |
3291 | |
3292 | case Token::kTRUNCDIV: { |
3293 | ASSERT(value != kIntptrMin); |
3294 | ASSERT(Utils::IsPowerOfTwo(Utils::Abs(value))); |
3295 | const intptr_t shift_count = |
3296 | Utils::ShiftForPowerOfTwo(Utils::Abs(value)) + kSmiTagSize; |
3297 | ASSERT(kSmiTagSize == 1); |
3298 | Register temp = locs()->temp(0).reg(); |
3299 | __ movl(temp, left); |
3300 | __ sarl(temp, compiler::Immediate(31)); |
3301 | ASSERT(shift_count > 1); // 1, -1 case handled above. |
3302 | __ shrl(temp, compiler::Immediate(32 - shift_count)); |
3303 | __ addl(left, temp); |
3304 | ASSERT(shift_count > 0); |
3305 | __ sarl(left, compiler::Immediate(shift_count)); |
3306 | if (value < 0) { |
3307 | __ negl(left); |
3308 | } |
3309 | __ SmiTag(left); |
3310 | break; |
3311 | } |
3312 | |
3313 | case Token::kSHR: { |
3314 | // sarl operation masks the count to 5 bits. |
3315 | const intptr_t kCountLimit = 0x1F; |
3316 | __ sarl(left, compiler::Immediate( |
3317 | Utils::Minimum(value + kSmiTagSize, kCountLimit))); |
3318 | __ SmiTag(left); |
3319 | break; |
3320 | } |
3321 | |
3322 | default: |
3323 | UNREACHABLE(); |
3324 | break; |
3325 | } |
3326 | return; |
3327 | } // if locs()->in(1).IsConstant() |
3328 | |
3329 | if (locs()->in(1).IsStackSlot()) { |
3330 | const compiler::Address& right = LocationToStackSlotAddress(locs()->in(1)); |
3331 | if (op_kind() == Token::kMUL) { |
3332 | __ SmiUntag(left); |
3333 | } |
3334 | EmitIntegerArithmetic(compiler, op_kind(), left, right, deopt); |
3335 | return; |
3336 | } |
3337 | |
3338 | // if locs()->in(1).IsRegister. |
3339 | Register right = locs()->in(1).reg(); |
3340 | switch (op_kind()) { |
3341 | case Token::kADD: |
3342 | case Token::kSUB: |
3343 | case Token::kBIT_AND: |
3344 | case Token::kBIT_OR: |
3345 | case Token::kBIT_XOR: |
3346 | case Token::kMUL: |
3347 | if (op_kind() == Token::kMUL) { |
3348 | __ SmiUntag(left); |
3349 | } |
3350 | EmitIntegerArithmetic(compiler, op_kind(), left, right, deopt); |
3351 | break; |
3352 | |
3353 | case Token::kTRUNCDIV: { |
3354 | if (RangeUtils::CanBeZero(right_range())) { |
3355 | // Handle divide by zero in runtime. |
3356 | __ testl(right, right); |
3357 | __ j(ZERO, deopt); |
3358 | } |
3359 | ASSERT(left == EAX); |
3360 | ASSERT((right != EDX) && (right != EAX)); |
3361 | ASSERT(locs()->temp(0).reg() == EDX); |
3362 | ASSERT(result == EAX); |
3363 | __ SmiUntag(left); |
3364 | __ SmiUntag(right); |
3365 | __ cdq(); // Sign extend EAX -> EDX:EAX. |
3366 | __ idivl(right); // EAX: quotient, EDX: remainder. |
3367 | if (RangeUtils::Overlaps(right_range(), -1, -1)) { |
3368 | // Check the corner case of dividing the 'MIN_SMI' with -1, in which |
3369 | // case we cannot tag the result. |
3370 | __ cmpl(result, compiler::Immediate(0x40000000)); |
3371 | __ j(EQUAL, deopt); |
3372 | } |
3373 | __ SmiTag(result); |
3374 | break; |
3375 | } |
3376 | case Token::kMOD: { |
3377 | if (RangeUtils::CanBeZero(right_range())) { |
3378 | // Handle divide by zero in runtime. |
3379 | __ testl(right, right); |
3380 | __ j(ZERO, deopt); |
3381 | } |
3382 | ASSERT(left == EDX); |
3383 | ASSERT((right != EDX) && (right != EAX)); |
3384 | ASSERT(locs()->temp(0).reg() == EAX); |
3385 | ASSERT(result == EDX); |
3386 | __ SmiUntag(left); |
3387 | __ SmiUntag(right); |
3388 | __ movl(EAX, EDX); |
3389 | __ cdq(); // Sign extend EAX -> EDX:EAX. |
3390 | __ idivl(right); // EAX: quotient, EDX: remainder. |
3391 | // res = left % right; |
3392 | // if (res < 0) { |
3393 | // if (right < 0) { |
3394 | // res = res - right; |
3395 | // } else { |
3396 | // res = res + right; |
3397 | // } |
3398 | // } |
3399 | compiler::Label done; |
3400 | __ cmpl(result, compiler::Immediate(0)); |
3401 | __ j(GREATER_EQUAL, &done, compiler::Assembler::kNearJump); |
3402 | // Result is negative, adjust it. |
3403 | if (RangeUtils::Overlaps(right_range(), -1, 1)) { |
3404 | // Right can be positive and negative. |
3405 | compiler::Label subtract; |
3406 | __ cmpl(right, compiler::Immediate(0)); |
3407 | __ j(LESS, &subtract, compiler::Assembler::kNearJump); |
3408 | __ addl(result, right); |
3409 | __ jmp(&done, compiler::Assembler::kNearJump); |
3410 | __ Bind(&subtract); |
3411 | __ subl(result, right); |
3412 | } else if (right_range()->IsPositive()) { |
3413 | // Right is positive. |
3414 | __ addl(result, right); |
3415 | } else { |
3416 | // Right is negative. |
3417 | __ subl(result, right); |
3418 | } |
3419 | __ Bind(&done); |
3420 | __ SmiTag(result); |
3421 | break; |
3422 | } |
3423 | case Token::kSHR: { |
3424 | if (CanDeoptimize()) { |
3425 | __ cmpl(right, compiler::Immediate(0)); |
3426 | __ j(LESS, deopt); |
3427 | } |
3428 | __ SmiUntag(right); |
3429 | // sarl operation masks the count to 5 bits. |
3430 | const intptr_t kCountLimit = 0x1F; |
3431 | if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) { |
3432 | __ cmpl(right, compiler::Immediate(kCountLimit)); |
3433 | compiler::Label count_ok; |
3434 | __ j(LESS, &count_ok, compiler::Assembler::kNearJump); |
3435 | __ movl(right, compiler::Immediate(kCountLimit)); |
3436 | __ Bind(&count_ok); |
3437 | } |
3438 | ASSERT(right == ECX); // Count must be in ECX |
3439 | __ SmiUntag(left); |
3440 | __ sarl(left, right); |
3441 | __ SmiTag(left); |
3442 | break; |
3443 | } |
3444 | case Token::kDIV: { |
3445 | // Dispatches to 'Double./'. |
3446 | // TODO(srdjan): Implement as conversion to double and double division. |
3447 | UNREACHABLE(); |
3448 | break; |
3449 | } |
3450 | case Token::kOR: |
3451 | case Token::kAND: { |
3452 | // Flow graph builder has dissected this operation to guarantee correct |
3453 | // behavior (short-circuit evaluation). |
3454 | UNREACHABLE(); |
3455 | break; |
3456 | } |
3457 | default: |
3458 | UNREACHABLE(); |
3459 | break; |
3460 | } |
3461 | } |
3462 | |
3463 | LocationSummary* BinaryInt32OpInstr::MakeLocationSummary(Zone* zone, |
3464 | bool opt) const { |
3465 | const intptr_t kNumInputs = 2; |
3466 | if (op_kind() == Token::kTRUNCDIV) { |
3467 | UNREACHABLE(); |
3468 | return NULL; |
3469 | } else if (op_kind() == Token::kMOD) { |
3470 | UNREACHABLE(); |
3471 | return NULL; |
3472 | } else if (op_kind() == Token::kSHR) { |
3473 | const intptr_t kNumTemps = 0; |
3474 | LocationSummary* summary = new (zone) |
3475 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3476 | summary->set_in(0, Location::RequiresRegister()); |
3477 | summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX)); |
3478 | summary->set_out(0, Location::SameAsFirstInput()); |
3479 | return summary; |
3480 | } else if (op_kind() == Token::kSHL) { |
3481 | const intptr_t kNumTemps = can_overflow() ? 1 : 0; |
3482 | LocationSummary* summary = new (zone) |
3483 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3484 | summary->set_in(0, Location::RequiresRegister()); |
3485 | summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX)); |
3486 | if (can_overflow()) { |
3487 | summary->set_temp(0, Location::RequiresRegister()); |
3488 | } |
3489 | summary->set_out(0, Location::SameAsFirstInput()); |
3490 | return summary; |
3491 | } else { |
3492 | const intptr_t kNumTemps = 0; |
3493 | LocationSummary* summary = new (zone) |
3494 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3495 | summary->set_in(0, Location::RequiresRegister()); |
3496 | ConstantInstr* constant = right()->definition()->AsConstant(); |
3497 | if (constant != NULL) { |
3498 | summary->set_in(1, LocationRegisterOrSmiConstant(right())); |
3499 | } else { |
3500 | summary->set_in(1, Location::PrefersRegister()); |
3501 | } |
3502 | summary->set_out(0, Location::SameAsFirstInput()); |
3503 | return summary; |
3504 | } |
3505 | } |
3506 | |
3507 | static void EmitInt32ShiftLeft(FlowGraphCompiler* compiler, |
3508 | BinaryInt32OpInstr* shift_left) { |
3509 | const LocationSummary& locs = *shift_left->locs(); |
3510 | Register left = locs.in(0).reg(); |
3511 | Register result = locs.out(0).reg(); |
3512 | ASSERT(left == result); |
3513 | compiler::Label* deopt = |
3514 | shift_left->CanDeoptimize() |
3515 | ? compiler->AddDeoptStub(shift_left->deopt_id(), |
3516 | ICData::kDeoptBinarySmiOp) |
3517 | : NULL; |
3518 | ASSERT(locs.in(1).IsConstant()); |
3519 | |
3520 | const Object& constant = locs.in(1).constant(); |
3521 | ASSERT(constant.IsSmi()); |
3522 | // shll operation masks the count to 5 bits. |
3523 | const intptr_t kCountLimit = 0x1F; |
3524 | const intptr_t value = Smi::Cast(constant).Value(); |
3525 | ASSERT((0 < value) && (value < kCountLimit)); |
3526 | if (shift_left->can_overflow()) { |
3527 | // Check for overflow. |
3528 | Register temp = locs.temp(0).reg(); |
3529 | __ movl(temp, left); |
3530 | __ shll(left, compiler::Immediate(value)); |
3531 | __ sarl(left, compiler::Immediate(value)); |
3532 | __ cmpl(left, temp); |
3533 | __ j(NOT_EQUAL, deopt); // Overflow. |
3534 | } |
3535 | // Shift for result now we know there is no overflow. |
3536 | __ shll(left, compiler::Immediate(value)); |
3537 | } |
3538 | |
3539 | void BinaryInt32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3540 | if (op_kind() == Token::kSHL) { |
3541 | EmitInt32ShiftLeft(compiler, this); |
3542 | return; |
3543 | } |
3544 | |
3545 | Register left = locs()->in(0).reg(); |
3546 | Register result = locs()->out(0).reg(); |
3547 | ASSERT(left == result); |
3548 | compiler::Label* deopt = NULL; |
3549 | if (CanDeoptimize()) { |
3550 | deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp); |
3551 | } |
3552 | |
3553 | if (locs()->in(1).IsConstant()) { |
3554 | const Object& constant = locs()->in(1).constant(); |
3555 | ASSERT(constant.IsSmi()); |
3556 | const intptr_t value = Smi::Cast(constant).Value(); |
3557 | switch (op_kind()) { |
3558 | case Token::kADD: |
3559 | case Token::kSUB: |
3560 | case Token::kMUL: |
3561 | case Token::kBIT_AND: |
3562 | case Token::kBIT_OR: |
3563 | case Token::kBIT_XOR: |
3564 | EmitIntegerArithmetic(compiler, op_kind(), left, |
3565 | compiler::Immediate(value), deopt); |
3566 | break; |
3567 | |
3568 | case Token::kTRUNCDIV: { |
3569 | UNREACHABLE(); |
3570 | break; |
3571 | } |
3572 | |
3573 | case Token::kSHR: { |
3574 | // sarl operation masks the count to 5 bits. |
3575 | const intptr_t kCountLimit = 0x1F; |
3576 | __ sarl(left, compiler::Immediate(Utils::Minimum(value, kCountLimit))); |
3577 | break; |
3578 | } |
3579 | |
3580 | default: |
3581 | UNREACHABLE(); |
3582 | break; |
3583 | } |
3584 | return; |
3585 | } // if locs()->in(1).IsConstant() |
3586 | |
3587 | if (locs()->in(1).IsStackSlot()) { |
3588 | const compiler::Address& right = LocationToStackSlotAddress(locs()->in(1)); |
3589 | EmitIntegerArithmetic(compiler, op_kind(), left, right, deopt); |
3590 | return; |
3591 | } // if locs()->in(1).IsStackSlot. |
3592 | |
3593 | // if locs()->in(1).IsRegister. |
3594 | Register right = locs()->in(1).reg(); |
3595 | switch (op_kind()) { |
3596 | case Token::kADD: |
3597 | case Token::kSUB: |
3598 | case Token::kMUL: |
3599 | case Token::kBIT_AND: |
3600 | case Token::kBIT_OR: |
3601 | case Token::kBIT_XOR: |
3602 | EmitIntegerArithmetic(compiler, op_kind(), left, right, deopt); |
3603 | break; |
3604 | |
3605 | default: |
3606 | UNREACHABLE(); |
3607 | break; |
3608 | } |
3609 | } |
3610 | |
3611 | LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone, |
3612 | bool opt) const { |
3613 | const intptr_t kNumInputs = 2; |
3614 | const intptr_t kNumTemps = (op_kind() == Token::kMUL) ? 1 : 0; |
3615 | LocationSummary* summary = new (zone) |
3616 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3617 | if (op_kind() == Token::kMUL) { |
3618 | summary->set_in(0, Location::RegisterLocation(EAX)); |
3619 | summary->set_temp(0, Location::RegisterLocation(EDX)); |
3620 | } else { |
3621 | summary->set_in(0, Location::RequiresRegister()); |
3622 | } |
3623 | summary->set_in(1, Location::RequiresRegister()); |
3624 | summary->set_out(0, Location::SameAsFirstInput()); |
3625 | return summary; |
3626 | } |
3627 | |
3628 | void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3629 | Register left = locs()->in(0).reg(); |
3630 | Register right = locs()->in(1).reg(); |
3631 | Register out = locs()->out(0).reg(); |
3632 | ASSERT(out == left); |
3633 | switch (op_kind()) { |
3634 | case Token::kBIT_AND: |
3635 | case Token::kBIT_OR: |
3636 | case Token::kBIT_XOR: |
3637 | case Token::kADD: |
3638 | case Token::kSUB: |
3639 | EmitIntegerArithmetic(compiler, op_kind(), left, right, NULL); |
3640 | return; |
3641 | |
3642 | case Token::kMUL: |
3643 | __ mull(right); // Result in EDX:EAX. |
3644 | ASSERT(out == EAX); |
3645 | ASSERT(locs()->temp(0).reg() == EDX); |
3646 | break; |
3647 | default: |
3648 | UNREACHABLE(); |
3649 | } |
3650 | } |
3651 | |
3652 | LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone, |
3653 | bool opt) const { |
3654 | intptr_t left_cid = left()->Type()->ToCid(); |
3655 | intptr_t right_cid = right()->Type()->ToCid(); |
3656 | ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid)); |
3657 | const intptr_t kNumInputs = 2; |
3658 | const bool need_temp = (left()->definition() != right()->definition()) && |
3659 | (left_cid != kSmiCid) && (right_cid != kSmiCid); |
3660 | const intptr_t kNumTemps = need_temp ? 1 : 0; |
3661 | LocationSummary* summary = new (zone) |
3662 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3663 | summary->set_in(0, Location::RequiresRegister()); |
3664 | summary->set_in(1, Location::RequiresRegister()); |
3665 | if (need_temp) summary->set_temp(0, Location::RequiresRegister()); |
3666 | return summary; |
3667 | } |
3668 | |
3669 | void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3670 | compiler::Label* deopt = |
3671 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp, |
3672 | licm_hoisted_ ? ICData::kHoisted : 0); |
3673 | intptr_t left_cid = left()->Type()->ToCid(); |
3674 | intptr_t right_cid = right()->Type()->ToCid(); |
3675 | Register left = locs()->in(0).reg(); |
3676 | Register right = locs()->in(1).reg(); |
3677 | if (this->left()->definition() == this->right()->definition()) { |
3678 | __ testl(left, compiler::Immediate(kSmiTagMask)); |
3679 | } else if (left_cid == kSmiCid) { |
3680 | __ testl(right, compiler::Immediate(kSmiTagMask)); |
3681 | } else if (right_cid == kSmiCid) { |
3682 | __ testl(left, compiler::Immediate(kSmiTagMask)); |
3683 | } else { |
3684 | Register temp = locs()->temp(0).reg(); |
3685 | __ movl(temp, left); |
3686 | __ orl(temp, right); |
3687 | __ testl(temp, compiler::Immediate(kSmiTagMask)); |
3688 | } |
3689 | __ j(ZERO, deopt); |
3690 | } |
3691 | |
3692 | LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
3693 | const intptr_t kNumInputs = 1; |
3694 | const intptr_t kNumTemps = 1; |
3695 | LocationSummary* summary = new (zone) LocationSummary( |
3696 | zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
3697 | summary->set_in(0, Location::RequiresFpuRegister()); |
3698 | summary->set_temp(0, Location::RequiresRegister()); |
3699 | summary->set_out(0, Location::RequiresRegister()); |
3700 | return summary; |
3701 | } |
3702 | |
3703 | void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3704 | Register out_reg = locs()->out(0).reg(); |
3705 | XmmRegister value = locs()->in(0).fpu_reg(); |
3706 | |
3707 | BoxAllocationSlowPath::Allocate(compiler, this, |
3708 | compiler->BoxClassFor(from_representation()), |
3709 | out_reg, locs()->temp(0).reg()); |
3710 | |
3711 | switch (from_representation()) { |
3712 | case kUnboxedDouble: |
3713 | __ movsd(compiler::FieldAddress(out_reg, ValueOffset()), value); |
3714 | break; |
3715 | case kUnboxedFloat: |
3716 | __ cvtss2sd(FpuTMP, value); |
3717 | __ movsd(compiler::FieldAddress(out_reg, ValueOffset()), FpuTMP); |
3718 | break; |
3719 | case kUnboxedFloat32x4: |
3720 | case kUnboxedFloat64x2: |
3721 | case kUnboxedInt32x4: |
3722 | __ movups(compiler::FieldAddress(out_reg, ValueOffset()), value); |
3723 | break; |
3724 | default: |
3725 | UNREACHABLE(); |
3726 | break; |
3727 | } |
3728 | } |
3729 | |
3730 | LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
3731 | const bool needs_temp = |
3732 | CanDeoptimize() || |
3733 | (CanConvertSmi() && (value()->Type()->ToCid() == kSmiCid)); |
3734 | |
3735 | const intptr_t kNumInputs = 1; |
3736 | const intptr_t kNumTemps = needs_temp ? 1 : 0; |
3737 | LocationSummary* summary = new (zone) |
3738 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3739 | summary->set_in(0, Location::RequiresRegister()); |
3740 | if (needs_temp) { |
3741 | summary->set_temp(0, Location::RequiresRegister()); |
3742 | } |
3743 | if (representation() == kUnboxedInt64) { |
3744 | summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX), |
3745 | Location::RegisterLocation(EDX))); |
3746 | } else if (representation() == kUnboxedInt32) { |
3747 | summary->set_out(0, Location::SameAsFirstInput()); |
3748 | } else { |
3749 | summary->set_out(0, Location::RequiresFpuRegister()); |
3750 | } |
3751 | return summary; |
3752 | } |
3753 | |
3754 | void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) { |
3755 | const Register box = locs()->in(0).reg(); |
3756 | |
3757 | switch (representation()) { |
3758 | case kUnboxedInt64: { |
3759 | PairLocation* result = locs()->out(0).AsPairLocation(); |
3760 | ASSERT(result->At(0).reg() != box); |
3761 | __ movl(result->At(0).reg(), compiler::FieldAddress(box, ValueOffset())); |
3762 | __ movl(result->At(1).reg(), |
3763 | compiler::FieldAddress(box, ValueOffset() + kWordSize)); |
3764 | break; |
3765 | } |
3766 | |
3767 | case kUnboxedDouble: { |
3768 | const FpuRegister result = locs()->out(0).fpu_reg(); |
3769 | __ movsd(result, compiler::FieldAddress(box, ValueOffset())); |
3770 | break; |
3771 | } |
3772 | |
3773 | case kUnboxedFloat: { |
3774 | const FpuRegister result = locs()->out(0).fpu_reg(); |
3775 | __ movsd(result, compiler::FieldAddress(box, ValueOffset())); |
3776 | __ cvtsd2ss(result, result); |
3777 | break; |
3778 | } |
3779 | |
3780 | case kUnboxedFloat32x4: |
3781 | case kUnboxedFloat64x2: |
3782 | case kUnboxedInt32x4: { |
3783 | const FpuRegister result = locs()->out(0).fpu_reg(); |
3784 | __ movups(result, compiler::FieldAddress(box, ValueOffset())); |
3785 | break; |
3786 | } |
3787 | |
3788 | default: |
3789 | UNREACHABLE(); |
3790 | break; |
3791 | } |
3792 | } |
3793 | |
3794 | void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) { |
3795 | const Register box = locs()->in(0).reg(); |
3796 | |
3797 | switch (representation()) { |
3798 | case kUnboxedInt64: { |
3799 | PairLocation* result = locs()->out(0).AsPairLocation(); |
3800 | ASSERT(result->At(0).reg() == EAX); |
3801 | ASSERT(result->At(1).reg() == EDX); |
3802 | __ movl(EAX, box); |
3803 | __ SmiUntag(EAX); |
3804 | __ cdq(); |
3805 | break; |
3806 | } |
3807 | |
3808 | case kUnboxedDouble: { |
3809 | const Register temp = locs()->temp(0).reg(); |
3810 | const FpuRegister result = locs()->out(0).fpu_reg(); |
3811 | __ movl(temp, box); |
3812 | __ SmiUntag(temp); |
3813 | __ cvtsi2sd(result, temp); |
3814 | break; |
3815 | } |
3816 | |
3817 | default: |
3818 | UNREACHABLE(); |
3819 | break; |
3820 | } |
3821 | } |
3822 | |
3823 | void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) { |
3824 | const Register value = locs()->in(0).reg(); |
3825 | const Register result = locs()->out(0).reg(); |
3826 | ASSERT(value == result); |
3827 | compiler::Label done; |
3828 | __ SmiUntag(value); // Leaves CF after SmiUntag. |
3829 | __ j(NOT_CARRY, &done, compiler::Assembler::kNearJump); |
3830 | __ movl(result, compiler::FieldAddress(value, Mint::value_offset())); |
3831 | __ Bind(&done); |
3832 | } |
3833 | |
3834 | void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) { |
3835 | const Register box = locs()->in(0).reg(); |
3836 | PairLocation* result = locs()->out(0).AsPairLocation(); |
3837 | ASSERT(result->At(0).reg() != box); |
3838 | ASSERT(result->At(1).reg() != box); |
3839 | compiler::Label done; |
3840 | EmitSmiConversion(compiler); // Leaves CF after SmiUntag. |
3841 | __ j(NOT_CARRY, &done, compiler::Assembler::kNearJump); |
3842 | EmitLoadFromBox(compiler); |
3843 | __ Bind(&done); |
3844 | } |
3845 | |
3846 | LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone, |
3847 | bool opt) const { |
3848 | const intptr_t kNumInputs = 1; |
3849 | const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1; |
3850 | if (ValueFitsSmi()) { |
3851 | LocationSummary* summary = new (zone) |
3852 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3853 | // Same regs, can overwrite input. |
3854 | summary->set_in(0, Location::RequiresRegister()); |
3855 | summary->set_out(0, Location::SameAsFirstInput()); |
3856 | return summary; |
3857 | } else { |
3858 | LocationSummary* summary = new (zone) LocationSummary( |
3859 | zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
3860 | // Guaranteed different regs. In the signed case we are going to use the |
3861 | // input for sign extension of any Mint. |
3862 | const bool needs_writable_input = (from_representation() == kUnboxedInt32); |
3863 | summary->set_in(0, needs_writable_input ? Location::WritableRegister() |
3864 | : Location::RequiresRegister()); |
3865 | summary->set_temp(0, Location::RequiresRegister()); |
3866 | summary->set_out(0, Location::RequiresRegister()); |
3867 | return summary; |
3868 | } |
3869 | } |
3870 | |
3871 | void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3872 | const Register value = locs()->in(0).reg(); |
3873 | const Register out = locs()->out(0).reg(); |
3874 | |
3875 | if (ValueFitsSmi()) { |
3876 | ASSERT(value == out); |
3877 | ASSERT(kSmiTag == 0); |
3878 | __ shll(out, compiler::Immediate(kSmiTagSize)); |
3879 | return; |
3880 | } |
3881 | |
3882 | __ movl(out, value); |
3883 | __ shll(out, compiler::Immediate(kSmiTagSize)); |
3884 | compiler::Label done; |
3885 | if (from_representation() == kUnboxedInt32) { |
3886 | __ j(NO_OVERFLOW, &done); |
3887 | } else { |
3888 | ASSERT(value != out); // Value was not overwritten. |
3889 | __ testl(value, compiler::Immediate(0xC0000000)); |
3890 | __ j(ZERO, &done); |
3891 | } |
3892 | |
3893 | // Allocate a Mint. |
3894 | if (from_representation() == kUnboxedInt32) { |
3895 | // Value input is a writable register and should be manually preserved |
3896 | // across allocation slow-path. Add it to live_registers set which |
3897 | // determines which registers to preserve. |
3898 | locs()->live_registers()->Add(locs()->in(0), kUnboxedInt32); |
3899 | } |
3900 | ASSERT(value != out); // We need the value after the allocation. |
3901 | BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out, |
3902 | locs()->temp(0).reg()); |
3903 | __ movl(compiler::FieldAddress(out, Mint::value_offset()), value); |
3904 | if (from_representation() == kUnboxedInt32) { |
3905 | // In the signed may-overflow case we asked for the input (value) to be |
3906 | // writable so we can use it as a temp to put the sign extension bits in. |
3907 | __ sarl(value, compiler::Immediate(31)); // Sign extend the Mint. |
3908 | __ movl(compiler::FieldAddress(out, Mint::value_offset() + kWordSize), |
3909 | value); |
3910 | } else { |
3911 | __ movl(compiler::FieldAddress(out, Mint::value_offset() + kWordSize), |
3912 | compiler::Immediate(0)); // Zero extend the Mint. |
3913 | } |
3914 | __ Bind(&done); |
3915 | } |
3916 | |
3917 | LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone, |
3918 | bool opt) const { |
3919 | const intptr_t kNumInputs = 1; |
3920 | const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1; |
3921 | LocationSummary* summary = new (zone) |
3922 | LocationSummary(zone, kNumInputs, kNumTemps, |
3923 | ValueFitsSmi() ? LocationSummary::kNoCall |
3924 | : LocationSummary::kCallOnSlowPath); |
3925 | summary->set_in(0, Location::Pair(Location::RequiresRegister(), |
3926 | Location::RequiresRegister())); |
3927 | if (!ValueFitsSmi()) { |
3928 | summary->set_temp(0, Location::RequiresRegister()); |
3929 | } |
3930 | summary->set_out(0, Location::RequiresRegister()); |
3931 | return summary; |
3932 | } |
3933 | |
3934 | void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3935 | if (ValueFitsSmi()) { |
3936 | PairLocation* value_pair = locs()->in(0).AsPairLocation(); |
3937 | Register value_lo = value_pair->At(0).reg(); |
3938 | Register out_reg = locs()->out(0).reg(); |
3939 | __ movl(out_reg, value_lo); |
3940 | __ SmiTag(out_reg); |
3941 | return; |
3942 | } |
3943 | |
3944 | PairLocation* value_pair = locs()->in(0).AsPairLocation(); |
3945 | Register value_lo = value_pair->At(0).reg(); |
3946 | Register value_hi = value_pair->At(1).reg(); |
3947 | Register out_reg = locs()->out(0).reg(); |
3948 | |
3949 | // Copy value_hi into out_reg as a temporary. |
3950 | // We modify value_lo but restore it before using it. |
3951 | __ movl(out_reg, value_hi); |
3952 | |
3953 | // Unboxed operations produce smis or mint-sized values. |
3954 | // Check if value fits into a smi. |
3955 | compiler::Label not_smi, done; |
3956 | |
3957 | // 1. Compute (x + -kMinSmi) which has to be in the range |
3958 | // 0 .. -kMinSmi+kMaxSmi for x to fit into a smi. |
3959 | __ addl(value_lo, compiler::Immediate(0x40000000)); |
3960 | __ adcl(out_reg, compiler::Immediate(0)); |
3961 | // 2. Unsigned compare to -kMinSmi+kMaxSmi. |
3962 | __ cmpl(value_lo, compiler::Immediate(0x80000000)); |
3963 | __ sbbl(out_reg, compiler::Immediate(0)); |
3964 | __ j(ABOVE_EQUAL, ¬_smi); |
3965 | // 3. Restore lower half if result is a smi. |
3966 | __ subl(value_lo, compiler::Immediate(0x40000000)); |
3967 | __ movl(out_reg, value_lo); |
3968 | __ SmiTag(out_reg); |
3969 | __ jmp(&done); |
3970 | __ Bind(¬_smi); |
3971 | // 3. Restore lower half of input before using it. |
3972 | __ subl(value_lo, compiler::Immediate(0x40000000)); |
3973 | |
3974 | BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), |
3975 | out_reg, locs()->temp(0).reg()); |
3976 | __ movl(compiler::FieldAddress(out_reg, Mint::value_offset()), value_lo); |
3977 | __ movl(compiler::FieldAddress(out_reg, Mint::value_offset() + kWordSize), |
3978 | value_hi); |
3979 | __ Bind(&done); |
3980 | } |
3981 | |
3982 | LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone, |
3983 | bool opt) const { |
3984 | const intptr_t value_cid = value()->Type()->ToCid(); |
3985 | const intptr_t kNumInputs = 1; |
3986 | intptr_t kNumTemps = 0; |
3987 | |
3988 | if (CanDeoptimize()) { |
3989 | if ((value_cid != kSmiCid) && (value_cid != kMintCid) && !is_truncating()) { |
3990 | kNumTemps = 2; |
3991 | } else { |
3992 | kNumTemps = 1; |
3993 | } |
3994 | } |
3995 | |
3996 | LocationSummary* summary = new (zone) |
3997 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
3998 | summary->set_in(0, Location::RequiresRegister()); |
3999 | for (int i = 0; i < kNumTemps; i++) { |
4000 | summary->set_temp(i, Location::RequiresRegister()); |
4001 | } |
4002 | summary->set_out(0, ((value_cid == kSmiCid) || (value_cid != kMintCid)) |
4003 | ? Location::SameAsFirstInput() |
4004 | : Location::RequiresRegister()); |
4005 | return summary; |
4006 | } |
4007 | |
4008 | static void LoadInt32FromMint(FlowGraphCompiler* compiler, |
4009 | Register result, |
4010 | const compiler::Address& lo, |
4011 | const compiler::Address& hi, |
4012 | Register temp, |
4013 | compiler::Label* deopt) { |
4014 | __ movl(result, lo); |
4015 | if (deopt != NULL) { |
4016 | ASSERT(temp != result); |
4017 | __ movl(temp, result); |
4018 | __ sarl(temp, compiler::Immediate(31)); |
4019 | __ cmpl(temp, hi); |
4020 | __ j(NOT_EQUAL, deopt); |
4021 | } |
4022 | } |
4023 | |
4024 | void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4025 | const intptr_t value_cid = value()->Type()->ToCid(); |
4026 | Register value = locs()->in(0).reg(); |
4027 | const Register result = locs()->out(0).reg(); |
4028 | const Register temp = CanDeoptimize() ? locs()->temp(0).reg() : kNoRegister; |
4029 | compiler::Label* deopt = nullptr; |
4030 | if (CanDeoptimize()) { |
4031 | deopt = compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger); |
4032 | } |
4033 | compiler::Label* out_of_range = !is_truncating() ? deopt : NULL; |
4034 | |
4035 | const intptr_t lo_offset = Mint::value_offset(); |
4036 | const intptr_t hi_offset = Mint::value_offset() + kWordSize; |
4037 | |
4038 | if (value_cid == kSmiCid) { |
4039 | ASSERT(value == result); |
4040 | __ SmiUntag(value); |
4041 | } else if (value_cid == kMintCid) { |
4042 | ASSERT((value != result) || (out_of_range == NULL)); |
4043 | LoadInt32FromMint( |
4044 | compiler, result, compiler::FieldAddress(value, lo_offset), |
4045 | compiler::FieldAddress(value, hi_offset), temp, out_of_range); |
4046 | } else if (!CanDeoptimize()) { |
4047 | ASSERT(value == result); |
4048 | compiler::Label done; |
4049 | __ SmiUntag(value); |
4050 | __ j(NOT_CARRY, &done); |
4051 | __ movl(value, compiler::Address(value, TIMES_2, lo_offset)); |
4052 | __ Bind(&done); |
4053 | } else { |
4054 | ASSERT(value == result); |
4055 | compiler::Label done; |
4056 | __ SmiUntagOrCheckClass(value, kMintCid, temp, &done); |
4057 | __ j(NOT_EQUAL, deopt); |
4058 | if (out_of_range != NULL) { |
4059 | Register value_temp = locs()->temp(1).reg(); |
4060 | __ movl(value_temp, value); |
4061 | value = value_temp; |
4062 | } |
4063 | LoadInt32FromMint( |
4064 | compiler, result, compiler::Address(value, TIMES_2, lo_offset), |
4065 | compiler::Address(value, TIMES_2, hi_offset), temp, out_of_range); |
4066 | __ Bind(&done); |
4067 | } |
4068 | } |
4069 | |
4070 | LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone, |
4071 | bool opt) const { |
4072 | const bool might_box = (representation() == kTagged) && !can_pack_into_smi(); |
4073 | const intptr_t kNumInputs = 2; |
4074 | const intptr_t kNumTemps = might_box ? 2 : 0; |
4075 | LocationSummary* summary = new (zone) LocationSummary( |
4076 | zone, kNumInputs, kNumTemps, |
4077 | might_box ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall); |
4078 | summary->set_in(0, Location::RequiresRegister()); |
4079 | // The smi index is either untagged (element size == 1), or it is left smi |
4080 | // tagged (for all element sizes > 1). |
4081 | summary->set_in(1, (index_scale() == 1) ? Location::WritableRegister() |
4082 | : Location::RequiresRegister()); |
4083 | if (might_box) { |
4084 | summary->set_temp(0, Location::RequiresRegister()); |
4085 | summary->set_temp(1, Location::RequiresRegister()); |
4086 | } |
4087 | |
4088 | if (representation() == kUnboxedInt64) { |
4089 | summary->set_out(0, Location::Pair(Location::RequiresRegister(), |
4090 | Location::RequiresRegister())); |
4091 | } else { |
4092 | ASSERT(representation() == kTagged); |
4093 | summary->set_out(0, Location::RequiresRegister()); |
4094 | } |
4095 | |
4096 | return summary; |
4097 | } |
4098 | |
4099 | void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4100 | // The string register points to the backing store for external strings. |
4101 | const Register str = locs()->in(0).reg(); |
4102 | const Location index = locs()->in(1); |
4103 | |
4104 | compiler::Address element_address = |
4105 | compiler::Assembler::ElementAddressForRegIndex( |
4106 | IsExternal(), class_id(), index_scale(), /*index_unboxed=*/false, str, |
4107 | index.reg()); |
4108 | |
4109 | if ((index_scale() == 1)) { |
4110 | __ SmiUntag(index.reg()); |
4111 | } |
4112 | |
4113 | if (representation() == kUnboxedInt64) { |
4114 | ASSERT(compiler->is_optimizing()); |
4115 | ASSERT(locs()->out(0).IsPairLocation()); |
4116 | PairLocation* result_pair = locs()->out(0).AsPairLocation(); |
4117 | Register result1 = result_pair->At(0).reg(); |
4118 | Register result2 = result_pair->At(1).reg(); |
4119 | |
4120 | switch (class_id()) { |
4121 | case kOneByteStringCid: |
4122 | case kExternalOneByteStringCid: |
4123 | ASSERT(element_count() == 4); |
4124 | __ movl(result1, element_address); |
4125 | __ xorl(result2, result2); |
4126 | break; |
4127 | case kTwoByteStringCid: |
4128 | case kExternalTwoByteStringCid: |
4129 | ASSERT(element_count() == 2); |
4130 | __ movl(result1, element_address); |
4131 | __ xorl(result2, result2); |
4132 | break; |
4133 | default: |
4134 | UNREACHABLE(); |
4135 | } |
4136 | } else { |
4137 | ASSERT(representation() == kTagged); |
4138 | Register result = locs()->out(0).reg(); |
4139 | switch (class_id()) { |
4140 | case kOneByteStringCid: |
4141 | case kExternalOneByteStringCid: |
4142 | switch (element_count()) { |
4143 | case 1: |
4144 | __ movzxb(result, element_address); |
4145 | break; |
4146 | case 2: |
4147 | __ movzxw(result, element_address); |
4148 | break; |
4149 | case 4: |
4150 | __ movl(result, element_address); |
4151 | break; |
4152 | default: |
4153 | UNREACHABLE(); |
4154 | } |
4155 | break; |
4156 | case kTwoByteStringCid: |
4157 | case kExternalTwoByteStringCid: |
4158 | switch (element_count()) { |
4159 | case 1: |
4160 | __ movzxw(result, element_address); |
4161 | break; |
4162 | case 2: |
4163 | __ movl(result, element_address); |
4164 | break; |
4165 | default: |
4166 | UNREACHABLE(); |
4167 | } |
4168 | break; |
4169 | default: |
4170 | UNREACHABLE(); |
4171 | break; |
4172 | } |
4173 | if (can_pack_into_smi()) { |
4174 | __ SmiTag(result); |
4175 | } else { |
4176 | // If the value cannot fit in a smi then allocate a mint box for it. |
4177 | Register temp = locs()->temp(0).reg(); |
4178 | Register temp2 = locs()->temp(1).reg(); |
4179 | // Temp register needs to be manually preserved on allocation slow-path. |
4180 | // Add it to live_registers set which determines which registers to |
4181 | // preserve. |
4182 | locs()->live_registers()->Add(locs()->temp(0), kUnboxedInt32); |
4183 | |
4184 | ASSERT(temp != result); |
4185 | __ MoveRegister(temp, result); |
4186 | __ SmiTag(result); |
4187 | |
4188 | compiler::Label done; |
4189 | __ testl(temp, compiler::Immediate(0xC0000000)); |
4190 | __ j(ZERO, &done); |
4191 | BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), |
4192 | result, temp2); |
4193 | __ movl(compiler::FieldAddress(result, Mint::value_offset()), temp); |
4194 | __ movl(compiler::FieldAddress(result, Mint::value_offset() + kWordSize), |
4195 | compiler::Immediate(0)); |
4196 | __ Bind(&done); |
4197 | } |
4198 | } |
4199 | } |
4200 | |
4201 | LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone, |
4202 | bool opt) const { |
4203 | const intptr_t kNumInputs = 2; |
4204 | const intptr_t kNumTemps = 0; |
4205 | LocationSummary* summary = new (zone) |
4206 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4207 | summary->set_in(0, Location::RequiresFpuRegister()); |
4208 | summary->set_in(1, Location::RequiresFpuRegister()); |
4209 | summary->set_out(0, Location::SameAsFirstInput()); |
4210 | return summary; |
4211 | } |
4212 | |
4213 | void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4214 | XmmRegister left = locs()->in(0).fpu_reg(); |
4215 | XmmRegister right = locs()->in(1).fpu_reg(); |
4216 | |
4217 | ASSERT(locs()->out(0).fpu_reg() == left); |
4218 | |
4219 | switch (op_kind()) { |
4220 | case Token::kADD: |
4221 | __ addsd(left, right); |
4222 | break; |
4223 | case Token::kSUB: |
4224 | __ subsd(left, right); |
4225 | break; |
4226 | case Token::kMUL: |
4227 | __ mulsd(left, right); |
4228 | break; |
4229 | case Token::kDIV: |
4230 | __ divsd(left, right); |
4231 | break; |
4232 | default: |
4233 | UNREACHABLE(); |
4234 | } |
4235 | } |
4236 | |
4237 | LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone, |
4238 | bool opt) const { |
4239 | const intptr_t kNumInputs = 1; |
4240 | const intptr_t kNumTemps = |
4241 | (op_kind() == MethodRecognizer::kDouble_getIsInfinite) ? 1 : 0; |
4242 | LocationSummary* summary = new (zone) |
4243 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4244 | summary->set_in(0, Location::RequiresFpuRegister()); |
4245 | if (op_kind() == MethodRecognizer::kDouble_getIsInfinite) { |
4246 | summary->set_temp(0, Location::RequiresRegister()); |
4247 | } |
4248 | summary->set_out(0, Location::RequiresRegister()); |
4249 | return summary; |
4250 | } |
4251 | |
4252 | Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
4253 | BranchLabels labels) { |
4254 | ASSERT(compiler->is_optimizing()); |
4255 | const XmmRegister value = locs()->in(0).fpu_reg(); |
4256 | const bool is_negated = kind() != Token::kEQ; |
4257 | if (op_kind() == MethodRecognizer::kDouble_getIsNaN) { |
4258 | compiler::Label is_nan; |
4259 | __ comisd(value, value); |
4260 | return is_negated ? PARITY_ODD : PARITY_EVEN; |
4261 | } else { |
4262 | ASSERT(op_kind() == MethodRecognizer::kDouble_getIsInfinite); |
4263 | const Register temp = locs()->temp(0).reg(); |
4264 | compiler::Label check_upper; |
4265 | __ AddImmediate(ESP, compiler::Immediate(-kDoubleSize)); |
4266 | __ movsd(compiler::Address(ESP, 0), value); |
4267 | __ movl(temp, compiler::Address(ESP, 0)); |
4268 | // If the low word isn't zero, then it isn't infinity. |
4269 | __ cmpl(temp, compiler::Immediate(0)); |
4270 | __ j(EQUAL, &check_upper, compiler::Assembler::kNearJump); |
4271 | __ AddImmediate(ESP, compiler::Immediate(kDoubleSize)); |
4272 | __ jmp(is_negated ? labels.true_label : labels.false_label); |
4273 | __ Bind(&check_upper); |
4274 | // Check the high word. |
4275 | __ movl(temp, compiler::Address(ESP, kWordSize)); |
4276 | __ AddImmediate(ESP, compiler::Immediate(kDoubleSize)); |
4277 | // Mask off sign bit. |
4278 | __ andl(temp, compiler::Immediate(0x7FFFFFFF)); |
4279 | // Compare with +infinity. |
4280 | __ cmpl(temp, compiler::Immediate(0x7FF00000)); |
4281 | return is_negated ? NOT_EQUAL : EQUAL; |
4282 | } |
4283 | } |
4284 | |
4285 | // SIMD |
4286 | |
4287 | #define DEFINE_EMIT(Name, Args) \ |
4288 | static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr, \ |
4289 | PP_APPLY(PP_UNPACK, Args)) |
4290 | |
4291 | #define SIMD_OP_FLOAT_ARITH(V, Name, op) \ |
4292 | V(Float32x4##Name, op##ps) \ |
4293 | V(Float64x2##Name, op##pd) |
4294 | |
4295 | #define SIMD_OP_SIMPLE_BINARY(V) \ |
4296 | SIMD_OP_FLOAT_ARITH(V, Add, add) \ |
4297 | SIMD_OP_FLOAT_ARITH(V, Sub, sub) \ |
4298 | SIMD_OP_FLOAT_ARITH(V, Mul, mul) \ |
4299 | SIMD_OP_FLOAT_ARITH(V, Div, div) \ |
4300 | SIMD_OP_FLOAT_ARITH(V, Min, min) \ |
4301 | SIMD_OP_FLOAT_ARITH(V, Max, max) \ |
4302 | V(Int32x4Add, addpl) \ |
4303 | V(Int32x4Sub, subpl) \ |
4304 | V(Int32x4BitAnd, andps) \ |
4305 | V(Int32x4BitOr, orps) \ |
4306 | V(Int32x4BitXor, xorps) \ |
4307 | V(Float32x4Equal, cmppseq) \ |
4308 | V(Float32x4NotEqual, cmppsneq) \ |
4309 | V(Float32x4GreaterThan, cmppsnle) \ |
4310 | V(Float32x4GreaterThanOrEqual, cmppsnlt) \ |
4311 | V(Float32x4LessThan, cmppslt) \ |
4312 | V(Float32x4LessThanOrEqual, cmppsle) |
4313 | |
4314 | DEFINE_EMIT(SimdBinaryOp, |
4315 | (SameAsFirstInput, XmmRegister left, XmmRegister right)) { |
4316 | switch (instr->kind()) { |
4317 | #define EMIT(Name, op) \ |
4318 | case SimdOpInstr::k##Name: \ |
4319 | __ op(left, right); \ |
4320 | break; |
4321 | SIMD_OP_SIMPLE_BINARY(EMIT) |
4322 | #undef EMIT |
4323 | case SimdOpInstr::kFloat32x4Scale: |
4324 | __ cvtsd2ss(left, left); |
4325 | __ shufps(left, left, compiler::Immediate(0x00)); |
4326 | __ mulps(left, right); |
4327 | break; |
4328 | case SimdOpInstr::kFloat32x4ShuffleMix: |
4329 | case SimdOpInstr::kInt32x4ShuffleMix: |
4330 | __ shufps(left, right, compiler::Immediate(instr->mask())); |
4331 | break; |
4332 | case SimdOpInstr::kFloat64x2FromDoubles: |
4333 | // shufpd mask 0x0 results in: |
4334 | // Lower 64-bits of left = Lower 64-bits of left. |
4335 | // Upper 64-bits of left = Lower 64-bits of right. |
4336 | __ shufpd(left, right, compiler::Immediate(0x0)); |
4337 | break; |
4338 | case SimdOpInstr::kFloat64x2Scale: |
4339 | __ shufpd(right, right, compiler::Immediate(0x00)); |
4340 | __ mulpd(left, right); |
4341 | break; |
4342 | case SimdOpInstr::kFloat64x2WithX: |
4343 | case SimdOpInstr::kFloat64x2WithY: { |
4344 | // TODO(dartbug.com/30949) avoid transfer through memory |
4345 | COMPILE_ASSERT(SimdOpInstr::kFloat64x2WithY == |
4346 | (SimdOpInstr::kFloat64x2WithX + 1)); |
4347 | const intptr_t lane_index = instr->kind() - SimdOpInstr::kFloat64x2WithX; |
4348 | ASSERT(0 <= lane_index && lane_index < 2); |
4349 | __ SubImmediate(ESP, compiler::Immediate(kSimd128Size)); |
4350 | __ movups(compiler::Address(ESP, 0), left); |
4351 | __ movsd(compiler::Address(ESP, lane_index * kDoubleSize), right); |
4352 | __ movups(left, compiler::Address(ESP, 0)); |
4353 | __ AddImmediate(ESP, compiler::Immediate(kSimd128Size)); |
4354 | break; |
4355 | } |
4356 | case SimdOpInstr::kFloat32x4WithX: |
4357 | case SimdOpInstr::kFloat32x4WithY: |
4358 | case SimdOpInstr::kFloat32x4WithZ: |
4359 | case SimdOpInstr::kFloat32x4WithW: { |
4360 | // TODO(dartbug.com/30949) avoid transfer through memory. SSE4.1 has |
4361 | // insertps. SSE2 these instructions can be implemented via a combination |
4362 | // of shufps/movss/movlhps. |
4363 | COMPILE_ASSERT( |
4364 | SimdOpInstr::kFloat32x4WithY == (SimdOpInstr::kFloat32x4WithX + 1) && |
4365 | SimdOpInstr::kFloat32x4WithZ == (SimdOpInstr::kFloat32x4WithX + 2) && |
4366 | SimdOpInstr::kFloat32x4WithW == (SimdOpInstr::kFloat32x4WithX + 3)); |
4367 | const intptr_t lane_index = instr->kind() - SimdOpInstr::kFloat32x4WithX; |
4368 | ASSERT(0 <= lane_index && lane_index < 4); |
4369 | __ cvtsd2ss(left, left); |
4370 | __ SubImmediate(ESP, compiler::Immediate(kSimd128Size)); |
4371 | __ movups(compiler::Address(ESP, 0), right); |
4372 | __ movss(compiler::Address(ESP, lane_index * kFloatSize), left); |
4373 | __ movups(left, compiler::Address(ESP, 0)); |
4374 | __ AddImmediate(ESP, compiler::Immediate(kSimd128Size)); |
4375 | break; |
4376 | } |
4377 | default: |
4378 | UNREACHABLE(); |
4379 | } |
4380 | } |
4381 | |
4382 | #define SIMD_OP_SIMPLE_UNARY(V) \ |
4383 | SIMD_OP_FLOAT_ARITH(V, Sqrt, sqrt) \ |
4384 | SIMD_OP_FLOAT_ARITH(V, Negate, negate) \ |
4385 | SIMD_OP_FLOAT_ARITH(V, Abs, abs) \ |
4386 | V(Float32x4Reciprocal, reciprocalps) \ |
4387 | V(Float32x4ReciprocalSqrt, rsqrtps) |
4388 | |
4389 | DEFINE_EMIT(SimdUnaryOp, (SameAsFirstInput, XmmRegister value)) { |
4390 | // TODO(dartbug.com/30949) select better register constraints to avoid |
4391 | // redundant move of input into a different register because all instructions |
4392 | // below support two operand forms. |
4393 | switch (instr->kind()) { |
4394 | #define EMIT(Name, op) \ |
4395 | case SimdOpInstr::k##Name: \ |
4396 | __ op(value); \ |
4397 | break; |
4398 | SIMD_OP_SIMPLE_UNARY(EMIT) |
4399 | #undef EMIT |
4400 | case SimdOpInstr::kFloat32x4ShuffleX: |
4401 | // Shuffle not necessary. |
4402 | __ cvtss2sd(value, value); |
4403 | break; |
4404 | case SimdOpInstr::kFloat32x4ShuffleY: |
4405 | __ shufps(value, value, compiler::Immediate(0x55)); |
4406 | __ cvtss2sd(value, value); |
4407 | break; |
4408 | case SimdOpInstr::kFloat32x4ShuffleZ: |
4409 | __ shufps(value, value, compiler::Immediate(0xAA)); |
4410 | __ cvtss2sd(value, value); |
4411 | break; |
4412 | case SimdOpInstr::kFloat32x4ShuffleW: |
4413 | __ shufps(value, value, compiler::Immediate(0xFF)); |
4414 | __ cvtss2sd(value, value); |
4415 | break; |
4416 | case SimdOpInstr::kFloat32x4Shuffle: |
4417 | case SimdOpInstr::kInt32x4Shuffle: |
4418 | __ shufps(value, value, compiler::Immediate(instr->mask())); |
4419 | break; |
4420 | case SimdOpInstr::kFloat32x4Splat: |
4421 | // Convert to Float32. |
4422 | __ cvtsd2ss(value, value); |
4423 | // Splat across all lanes. |
4424 | __ shufps(value, value, compiler::Immediate(0x00)); |
4425 | break; |
4426 | case SimdOpInstr::kFloat64x2ToFloat32x4: |
4427 | __ cvtpd2ps(value, value); |
4428 | break; |
4429 | case SimdOpInstr::kFloat32x4ToFloat64x2: |
4430 | __ cvtps2pd(value, value); |
4431 | break; |
4432 | case SimdOpInstr::kFloat32x4ToInt32x4: |
4433 | case SimdOpInstr::kInt32x4ToFloat32x4: |
4434 | // TODO(dartbug.com/30949) these operations are essentially nop and should |
4435 | // not generate any code. They should be removed from the graph before |
4436 | // code generation. |
4437 | break; |
4438 | case SimdOpInstr::kFloat64x2GetX: |
4439 | // NOP. |
4440 | break; |
4441 | case SimdOpInstr::kFloat64x2GetY: |
4442 | __ shufpd(value, value, compiler::Immediate(0x33)); |
4443 | break; |
4444 | case SimdOpInstr::kFloat64x2Splat: |
4445 | __ shufpd(value, value, compiler::Immediate(0x0)); |
4446 | break; |
4447 | default: |
4448 | UNREACHABLE(); |
4449 | } |
4450 | } |
4451 | |
4452 | DEFINE_EMIT(SimdGetSignMask, (Register out, XmmRegister value)) { |
4453 | switch (instr->kind()) { |
4454 | case SimdOpInstr::kFloat32x4GetSignMask: |
4455 | case SimdOpInstr::kInt32x4GetSignMask: |
4456 | __ movmskps(out, value); |
4457 | break; |
4458 | case SimdOpInstr::kFloat64x2GetSignMask: |
4459 | __ movmskpd(out, value); |
4460 | break; |
4461 | default: |
4462 | UNREACHABLE(); |
4463 | break; |
4464 | } |
4465 | } |
4466 | |
4467 | DEFINE_EMIT( |
4468 | Float32x4FromDoubles, |
4469 | (SameAsFirstInput, XmmRegister v0, XmmRegister, XmmRegister, XmmRegister)) { |
4470 | // TODO(dartbug.com/30949) avoid transfer through memory. SSE4.1 has |
4471 | // insertps, with SSE2 this instruction can be implemented through unpcklps. |
4472 | const XmmRegister out = v0; |
4473 | __ SubImmediate(ESP, compiler::Immediate(kSimd128Size)); |
4474 | for (intptr_t i = 0; i < 4; i++) { |
4475 | __ cvtsd2ss(out, instr->locs()->in(i).fpu_reg()); |
4476 | __ movss(compiler::Address(ESP, i * kFloatSize), out); |
4477 | } |
4478 | __ movups(out, compiler::Address(ESP, 0)); |
4479 | __ AddImmediate(ESP, compiler::Immediate(kSimd128Size)); |
4480 | } |
4481 | |
4482 | DEFINE_EMIT(Float32x4Zero, (XmmRegister out)) { |
4483 | __ xorps(out, out); |
4484 | } |
4485 | |
4486 | DEFINE_EMIT(Float64x2Zero, (XmmRegister value)) { |
4487 | __ xorpd(value, value); |
4488 | } |
4489 | |
4490 | DEFINE_EMIT(Float32x4Clamp, |
4491 | (SameAsFirstInput, |
4492 | XmmRegister left, |
4493 | XmmRegister lower, |
4494 | XmmRegister upper)) { |
4495 | __ minps(left, upper); |
4496 | __ maxps(left, lower); |
4497 | } |
4498 | |
4499 | DEFINE_EMIT(Int32x4FromInts, |
4500 | (XmmRegister result, Register, Register, Register, Register)) { |
4501 | // TODO(dartbug.com/30949) avoid transfer through memory. |
4502 | __ SubImmediate(ESP, compiler::Immediate(kSimd128Size)); |
4503 | for (intptr_t i = 0; i < 4; i++) { |
4504 | __ movl(compiler::Address(ESP, i * kInt32Size), instr->locs()->in(i).reg()); |
4505 | } |
4506 | __ movups(result, compiler::Address(ESP, 0)); |
4507 | __ AddImmediate(ESP, compiler::Immediate(kSimd128Size)); |
4508 | } |
4509 | |
4510 | DEFINE_EMIT(Int32x4FromBools, |
4511 | (XmmRegister result, Register, Register, Register, Register)) { |
4512 | // TODO(dartbug.com/30949) avoid transfer through memory and branches. |
4513 | __ SubImmediate(ESP, compiler::Immediate(kSimd128Size)); |
4514 | for (intptr_t i = 0; i < 4; i++) { |
4515 | compiler::Label store_false, done; |
4516 | __ CompareObject(instr->locs()->in(i).reg(), Bool::True()); |
4517 | __ j(NOT_EQUAL, &store_false); |
4518 | __ movl(compiler::Address(ESP, kInt32Size * i), |
4519 | compiler::Immediate(0xFFFFFFFF)); |
4520 | __ jmp(&done); |
4521 | __ Bind(&store_false); |
4522 | __ movl(compiler::Address(ESP, kInt32Size * i), compiler::Immediate(0x0)); |
4523 | __ Bind(&done); |
4524 | } |
4525 | __ movups(result, compiler::Address(ESP, 0)); |
4526 | __ AddImmediate(ESP, compiler::Immediate(kSimd128Size)); |
4527 | } |
4528 | |
4529 | // TODO(dartbug.com/30953) need register with a byte component for setcc. |
4530 | DEFINE_EMIT(Int32x4GetFlag, (Fixed<Register, EDX> result, XmmRegister value)) { |
4531 | COMPILE_ASSERT( |
4532 | SimdOpInstr::kInt32x4GetFlagY == (SimdOpInstr::kInt32x4GetFlagX + 1) && |
4533 | SimdOpInstr::kInt32x4GetFlagZ == (SimdOpInstr::kInt32x4GetFlagX + 2) && |
4534 | SimdOpInstr::kInt32x4GetFlagW == (SimdOpInstr::kInt32x4GetFlagX + 3)); |
4535 | const intptr_t lane_index = instr->kind() - SimdOpInstr::kInt32x4GetFlagX; |
4536 | ASSERT(0 <= lane_index && lane_index < 4); |
4537 | |
4538 | // TODO(dartbug.com/30949) avoid transfer through memory. |
4539 | __ SubImmediate(ESP, compiler::Immediate(kSimd128Size)); |
4540 | __ movups(compiler::Address(ESP, 0), value); |
4541 | __ movl(EDX, compiler::Address(ESP, lane_index * kInt32Size)); |
4542 | __ AddImmediate(ESP, compiler::Immediate(kSimd128Size)); |
4543 | |
4544 | // EDX = EDX != 0 ? 0 : 1 |
4545 | __ testl(EDX, EDX); |
4546 | __ setcc(ZERO, DL); |
4547 | __ movzxb(EDX, DL); |
4548 | |
4549 | ASSERT_BOOL_FALSE_FOLLOWS_BOOL_TRUE(); |
4550 | __ movl(EDX, |
4551 | compiler::Address(THR, EDX, TIMES_4, Thread::bool_true_offset())); |
4552 | } |
4553 | |
4554 | // TODO(dartbug.com/30953) need register with a byte component for setcc. |
4555 | DEFINE_EMIT(Int32x4WithFlag, |
4556 | (SameAsFirstInput, |
4557 | XmmRegister mask, |
4558 | Register flag, |
4559 | Temp<Fixed<Register, EDX> > temp)) { |
4560 | COMPILE_ASSERT( |
4561 | SimdOpInstr::kInt32x4WithFlagY == (SimdOpInstr::kInt32x4WithFlagX + 1) && |
4562 | SimdOpInstr::kInt32x4WithFlagZ == (SimdOpInstr::kInt32x4WithFlagX + 2) && |
4563 | SimdOpInstr::kInt32x4WithFlagW == (SimdOpInstr::kInt32x4WithFlagX + 3)); |
4564 | const intptr_t lane_index = instr->kind() - SimdOpInstr::kInt32x4WithFlagX; |
4565 | ASSERT(0 <= lane_index && lane_index < 4); |
4566 | |
4567 | // TODO(dartbug.com/30949) avoid transfer through memory. |
4568 | __ SubImmediate(ESP, compiler::Immediate(kSimd128Size)); |
4569 | __ movups(compiler::Address(ESP, 0), mask); |
4570 | |
4571 | // EDX = flag == true ? -1 : 0 |
4572 | __ xorl(EDX, EDX); |
4573 | __ CompareObject(flag, Bool::True()); |
4574 | __ setcc(EQUAL, DL); |
4575 | __ negl(EDX); |
4576 | |
4577 | __ movl(compiler::Address(ESP, lane_index * kInt32Size), EDX); |
4578 | |
4579 | // Copy mask back to register. |
4580 | __ movups(mask, compiler::Address(ESP, 0)); |
4581 | __ AddImmediate(ESP, compiler::Immediate(kSimd128Size)); |
4582 | } |
4583 | |
4584 | DEFINE_EMIT(Int32x4Select, |
4585 | (SameAsFirstInput, |
4586 | XmmRegister mask, |
4587 | XmmRegister trueValue, |
4588 | XmmRegister falseValue, |
4589 | Temp<XmmRegister> temp)) { |
4590 | // Copy mask. |
4591 | __ movaps(temp, mask); |
4592 | // Invert it. |
4593 | __ notps(temp); |
4594 | // mask = mask & trueValue. |
4595 | __ andps(mask, trueValue); |
4596 | // temp = temp & falseValue. |
4597 | __ andps(temp, falseValue); |
4598 | // out = mask | temp. |
4599 | __ orps(mask, temp); |
4600 | } |
4601 | |
4602 | // Map SimdOpInstr::Kind-s to corresponding emit functions. Uses the following |
4603 | // format: |
4604 | // |
4605 | // CASE(OpA) CASE(OpB) ____(Emitter) - Emitter is used to emit OpA and OpB. |
4606 | // SIMPLE(OpA) - Emitter with name OpA is used to emit OpA. |
4607 | // |
4608 | #define SIMD_OP_VARIANTS(CASE, ____, SIMPLE) \ |
4609 | SIMD_OP_SIMPLE_BINARY(CASE) \ |
4610 | CASE(Float32x4Scale) \ |
4611 | CASE(Float32x4ShuffleMix) \ |
4612 | CASE(Int32x4ShuffleMix) \ |
4613 | CASE(Float64x2FromDoubles) \ |
4614 | CASE(Float64x2Scale) \ |
4615 | CASE(Float64x2WithX) \ |
4616 | CASE(Float64x2WithY) \ |
4617 | CASE(Float32x4WithX) \ |
4618 | CASE(Float32x4WithY) \ |
4619 | CASE(Float32x4WithZ) \ |
4620 | CASE(Float32x4WithW) \ |
4621 | ____(SimdBinaryOp) \ |
4622 | SIMD_OP_SIMPLE_UNARY(CASE) \ |
4623 | CASE(Float32x4ShuffleX) \ |
4624 | CASE(Float32x4ShuffleY) \ |
4625 | CASE(Float32x4ShuffleZ) \ |
4626 | CASE(Float32x4ShuffleW) \ |
4627 | CASE(Float32x4Shuffle) \ |
4628 | CASE(Int32x4Shuffle) \ |
4629 | CASE(Float32x4Splat) \ |
4630 | CASE(Float32x4ToFloat64x2) \ |
4631 | CASE(Float64x2ToFloat32x4) \ |
4632 | CASE(Int32x4ToFloat32x4) \ |
4633 | CASE(Float32x4ToInt32x4) \ |
4634 | CASE(Float64x2GetX) \ |
4635 | CASE(Float64x2GetY) \ |
4636 | CASE(Float64x2Splat) \ |
4637 | ____(SimdUnaryOp) \ |
4638 | CASE(Float32x4GetSignMask) \ |
4639 | CASE(Int32x4GetSignMask) \ |
4640 | CASE(Float64x2GetSignMask) \ |
4641 | ____(SimdGetSignMask) \ |
4642 | SIMPLE(Float32x4FromDoubles) \ |
4643 | SIMPLE(Int32x4FromInts) \ |
4644 | SIMPLE(Int32x4FromBools) \ |
4645 | SIMPLE(Float32x4Zero) \ |
4646 | SIMPLE(Float64x2Zero) \ |
4647 | SIMPLE(Float32x4Clamp) \ |
4648 | CASE(Int32x4GetFlagX) \ |
4649 | CASE(Int32x4GetFlagY) \ |
4650 | CASE(Int32x4GetFlagZ) \ |
4651 | CASE(Int32x4GetFlagW) \ |
4652 | ____(Int32x4GetFlag) \ |
4653 | CASE(Int32x4WithFlagX) \ |
4654 | CASE(Int32x4WithFlagY) \ |
4655 | CASE(Int32x4WithFlagZ) \ |
4656 | CASE(Int32x4WithFlagW) \ |
4657 | ____(Int32x4WithFlag) \ |
4658 | SIMPLE(Int32x4Select) |
4659 | |
4660 | LocationSummary* SimdOpInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
4661 | switch (kind()) { |
4662 | #define CASE(Name, ...) case k##Name: |
4663 | #define EMIT(Name) \ |
4664 | return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name); |
4665 | #define SIMPLE(Name) CASE(Name) EMIT(Name) |
4666 | SIMD_OP_VARIANTS(CASE, EMIT, SIMPLE) |
4667 | #undef CASE |
4668 | #undef EMIT |
4669 | #undef SIMPLE |
4670 | case kIllegalSimdOp: |
4671 | UNREACHABLE(); |
4672 | break; |
4673 | } |
4674 | UNREACHABLE(); |
4675 | return NULL; |
4676 | } |
4677 | |
4678 | void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4679 | switch (kind()) { |
4680 | #define CASE(Name, ...) case k##Name: |
4681 | #define EMIT(Name) \ |
4682 | InvokeEmitter(compiler, this, &Emit##Name); \ |
4683 | break; |
4684 | #define SIMPLE(Name) CASE(Name) EMIT(Name) |
4685 | SIMD_OP_VARIANTS(CASE, EMIT, SIMPLE) |
4686 | #undef CASE |
4687 | #undef EMIT |
4688 | #undef SIMPLE |
4689 | case kIllegalSimdOp: |
4690 | UNREACHABLE(); |
4691 | break; |
4692 | } |
4693 | } |
4694 | |
4695 | #undef DEFINE_EMIT |
4696 | |
4697 | LocationSummary* MathUnaryInstr::MakeLocationSummary(Zone* zone, |
4698 | bool opt) const { |
4699 | ASSERT((kind() == MathUnaryInstr::kSqrt) || |
4700 | (kind() == MathUnaryInstr::kDoubleSquare)); |
4701 | const intptr_t kNumInputs = 1; |
4702 | const intptr_t kNumTemps = 0; |
4703 | LocationSummary* summary = new (zone) |
4704 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4705 | summary->set_in(0, Location::RequiresFpuRegister()); |
4706 | if (kind() == MathUnaryInstr::kDoubleSquare) { |
4707 | summary->set_out(0, Location::SameAsFirstInput()); |
4708 | } else { |
4709 | summary->set_out(0, Location::RequiresFpuRegister()); |
4710 | } |
4711 | return summary; |
4712 | } |
4713 | |
4714 | void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4715 | if (kind() == MathUnaryInstr::kSqrt) { |
4716 | __ sqrtsd(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg()); |
4717 | } else if (kind() == MathUnaryInstr::kDoubleSquare) { |
4718 | XmmRegister value_reg = locs()->in(0).fpu_reg(); |
4719 | __ mulsd(value_reg, value_reg); |
4720 | ASSERT(value_reg == locs()->out(0).fpu_reg()); |
4721 | } else { |
4722 | UNREACHABLE(); |
4723 | } |
4724 | } |
4725 | |
4726 | LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary( |
4727 | Zone* zone, |
4728 | bool opt) const { |
4729 | const intptr_t kNumTemps = 0; |
4730 | LocationSummary* summary = new (zone) |
4731 | LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall); |
4732 | summary->set_in(0, Location::RegisterLocation(EAX)); |
4733 | summary->set_in(1, Location::RegisterLocation(ECX)); |
4734 | summary->set_in(2, Location::RegisterLocation(EDX)); |
4735 | summary->set_in(3, Location::RegisterLocation(EBX)); |
4736 | summary->set_out(0, Location::RegisterLocation(EAX)); |
4737 | return summary; |
4738 | } |
4739 | |
4740 | void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4741 | // Save ESP. EDI is chosen because it is callee saved so we do not need to |
4742 | // back it up before calling into the runtime. |
4743 | static const Register kSavedSPReg = EDI; |
4744 | __ movl(kSavedSPReg, ESP); |
4745 | __ ReserveAlignedFrameSpace(kWordSize * TargetFunction().argument_count()); |
4746 | |
4747 | __ movl(compiler::Address(ESP, +0 * kWordSize), locs()->in(0).reg()); |
4748 | __ movl(compiler::Address(ESP, +1 * kWordSize), locs()->in(1).reg()); |
4749 | __ movl(compiler::Address(ESP, +2 * kWordSize), locs()->in(2).reg()); |
4750 | __ movl(compiler::Address(ESP, +3 * kWordSize), locs()->in(3).reg()); |
4751 | |
4752 | // Call the function. |
4753 | __ CallRuntime(TargetFunction(), TargetFunction().argument_count()); |
4754 | |
4755 | // Restore ESP and pop the old value off the stack. |
4756 | __ movl(ESP, kSavedSPReg); |
4757 | } |
4758 | |
4759 | LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone, |
4760 | bool opt) const { |
4761 | if (result_cid() == kDoubleCid) { |
4762 | const intptr_t kNumInputs = 2; |
4763 | const intptr_t kNumTemps = 1; |
4764 | LocationSummary* summary = new (zone) |
4765 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4766 | summary->set_in(0, Location::RequiresFpuRegister()); |
4767 | summary->set_in(1, Location::RequiresFpuRegister()); |
4768 | // Reuse the left register so that code can be made shorter. |
4769 | summary->set_out(0, Location::SameAsFirstInput()); |
4770 | summary->set_temp(0, Location::RequiresRegister()); |
4771 | return summary; |
4772 | } |
4773 | |
4774 | ASSERT(result_cid() == kSmiCid); |
4775 | const intptr_t kNumInputs = 2; |
4776 | const intptr_t kNumTemps = 0; |
4777 | LocationSummary* summary = new (zone) |
4778 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4779 | summary->set_in(0, Location::RequiresRegister()); |
4780 | summary->set_in(1, Location::RequiresRegister()); |
4781 | // Reuse the left register so that code can be made shorter. |
4782 | summary->set_out(0, Location::SameAsFirstInput()); |
4783 | return summary; |
4784 | } |
4785 | |
4786 | void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4787 | ASSERT((op_kind() == MethodRecognizer::kMathMin) || |
4788 | (op_kind() == MethodRecognizer::kMathMax)); |
4789 | const intptr_t is_min = (op_kind() == MethodRecognizer::kMathMin); |
4790 | if (result_cid() == kDoubleCid) { |
4791 | compiler::Label done, returns_nan, are_equal; |
4792 | XmmRegister left = locs()->in(0).fpu_reg(); |
4793 | XmmRegister right = locs()->in(1).fpu_reg(); |
4794 | XmmRegister result = locs()->out(0).fpu_reg(); |
4795 | Register temp = locs()->temp(0).reg(); |
4796 | __ comisd(left, right); |
4797 | __ j(PARITY_EVEN, &returns_nan, compiler::Assembler::kNearJump); |
4798 | __ j(EQUAL, &are_equal, compiler::Assembler::kNearJump); |
4799 | const Condition double_condition = |
4800 | is_min ? TokenKindToDoubleCondition(Token::kLT) |
4801 | : TokenKindToDoubleCondition(Token::kGT); |
4802 | ASSERT(left == result); |
4803 | __ j(double_condition, &done, compiler::Assembler::kNearJump); |
4804 | __ movsd(result, right); |
4805 | __ jmp(&done, compiler::Assembler::kNearJump); |
4806 | |
4807 | __ Bind(&returns_nan); |
4808 | static double kNaN = NAN; |
4809 | __ movsd(result, |
4810 | compiler::Address::Absolute(reinterpret_cast<uword>(&kNaN))); |
4811 | __ jmp(&done, compiler::Assembler::kNearJump); |
4812 | |
4813 | __ Bind(&are_equal); |
4814 | compiler::Label left_is_negative; |
4815 | // Check for negative zero: -0.0 is equal 0.0 but min or max must return |
4816 | // -0.0 or 0.0 respectively. |
4817 | // Check for negative left value (get the sign bit): |
4818 | // - min -> left is negative ? left : right. |
4819 | // - max -> left is negative ? right : left |
4820 | // Check the sign bit. |
4821 | __ movmskpd(temp, left); |
4822 | __ testl(temp, compiler::Immediate(1)); |
4823 | ASSERT(left == result); |
4824 | if (is_min) { |
4825 | __ j(NOT_ZERO, &done, |
4826 | compiler::Assembler::kNearJump); // Negative -> return left. |
4827 | } else { |
4828 | __ j(ZERO, &done, |
4829 | compiler::Assembler::kNearJump); // Positive -> return left. |
4830 | } |
4831 | __ movsd(result, right); |
4832 | __ Bind(&done); |
4833 | return; |
4834 | } |
4835 | |
4836 | ASSERT(result_cid() == kSmiCid); |
4837 | Register left = locs()->in(0).reg(); |
4838 | Register right = locs()->in(1).reg(); |
4839 | Register result = locs()->out(0).reg(); |
4840 | __ cmpl(left, right); |
4841 | ASSERT(result == left); |
4842 | if (is_min) { |
4843 | __ cmovgel(result, right); |
4844 | } else { |
4845 | __ cmovlessl(result, right); |
4846 | } |
4847 | } |
4848 | |
4849 | LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone, |
4850 | bool opt) const { |
4851 | const intptr_t kNumInputs = 1; |
4852 | return LocationSummary::Make(zone, kNumInputs, Location::SameAsFirstInput(), |
4853 | LocationSummary::kNoCall); |
4854 | } |
4855 | |
4856 | void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4857 | Register value = locs()->in(0).reg(); |
4858 | ASSERT(value == locs()->out(0).reg()); |
4859 | switch (op_kind()) { |
4860 | case Token::kNEGATE: { |
4861 | compiler::Label* deopt = |
4862 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp); |
4863 | __ negl(value); |
4864 | __ j(OVERFLOW, deopt); |
4865 | break; |
4866 | } |
4867 | case Token::kBIT_NOT: |
4868 | __ notl(value); |
4869 | __ andl(value, |
4870 | compiler::Immediate(~kSmiTagMask)); // Remove inverted smi-tag. |
4871 | break; |
4872 | default: |
4873 | UNREACHABLE(); |
4874 | } |
4875 | } |
4876 | |
4877 | LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone, |
4878 | bool opt) const { |
4879 | const intptr_t kNumInputs = 1; |
4880 | const intptr_t kNumTemps = 0; |
4881 | LocationSummary* summary = new (zone) |
4882 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4883 | summary->set_in(0, Location::RequiresFpuRegister()); |
4884 | summary->set_out(0, Location::SameAsFirstInput()); |
4885 | return summary; |
4886 | } |
4887 | |
4888 | void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4889 | XmmRegister value = locs()->in(0).fpu_reg(); |
4890 | ASSERT(locs()->out(0).fpu_reg() == value); |
4891 | __ DoubleNegate(value); |
4892 | } |
4893 | |
4894 | LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone, |
4895 | bool opt) const { |
4896 | const intptr_t kNumInputs = 1; |
4897 | const intptr_t kNumTemps = 0; |
4898 | LocationSummary* result = new (zone) |
4899 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4900 | result->set_in(0, Location::RequiresRegister()); |
4901 | result->set_out(0, Location::RequiresFpuRegister()); |
4902 | return result; |
4903 | } |
4904 | |
4905 | void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4906 | Register value = locs()->in(0).reg(); |
4907 | FpuRegister result = locs()->out(0).fpu_reg(); |
4908 | __ cvtsi2sd(result, value); |
4909 | } |
4910 | |
4911 | LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone, |
4912 | bool opt) const { |
4913 | const intptr_t kNumInputs = 1; |
4914 | const intptr_t kNumTemps = 0; |
4915 | LocationSummary* result = new (zone) |
4916 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4917 | result->set_in(0, Location::WritableRegister()); |
4918 | result->set_out(0, Location::RequiresFpuRegister()); |
4919 | return result; |
4920 | } |
4921 | |
4922 | void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4923 | Register value = locs()->in(0).reg(); |
4924 | FpuRegister result = locs()->out(0).fpu_reg(); |
4925 | __ SmiUntag(value); |
4926 | __ cvtsi2sd(result, value); |
4927 | } |
4928 | |
4929 | LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone, |
4930 | bool opt) const { |
4931 | const intptr_t kNumInputs = 1; |
4932 | const intptr_t kNumTemps = 0; |
4933 | LocationSummary* result = new (zone) |
4934 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
4935 | result->set_in(0, Location::Pair(Location::RequiresRegister(), |
4936 | Location::RequiresRegister())); |
4937 | result->set_out(0, Location::RequiresFpuRegister()); |
4938 | return result; |
4939 | } |
4940 | |
4941 | void Int64ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4942 | PairLocation* pair = locs()->in(0).AsPairLocation(); |
4943 | Register in_lo = pair->At(0).reg(); |
4944 | Register in_hi = pair->At(1).reg(); |
4945 | |
4946 | FpuRegister result = locs()->out(0).fpu_reg(); |
4947 | |
4948 | // Push hi. |
4949 | __ pushl(in_hi); |
4950 | // Push lo. |
4951 | __ pushl(in_lo); |
4952 | // Perform conversion from Mint to double. |
4953 | __ fildl(compiler::Address(ESP, 0)); |
4954 | // Pop FPU stack onto regular stack. |
4955 | __ fstpl(compiler::Address(ESP, 0)); |
4956 | // Copy into result. |
4957 | __ movsd(result, compiler::Address(ESP, 0)); |
4958 | // Pop args. |
4959 | __ addl(ESP, compiler::Immediate(2 * kWordSize)); |
4960 | } |
4961 | |
4962 | LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone, |
4963 | bool opt) const { |
4964 | const intptr_t kNumInputs = 1; |
4965 | const intptr_t kNumTemps = 0; |
4966 | LocationSummary* result = new (zone) |
4967 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
4968 | result->set_in(0, Location::RegisterLocation(ECX)); |
4969 | result->set_out(0, Location::RegisterLocation(EAX)); |
4970 | return result; |
4971 | } |
4972 | |
4973 | void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4974 | Register result = locs()->out(0).reg(); |
4975 | Register value_obj = locs()->in(0).reg(); |
4976 | XmmRegister value_double = FpuTMP; |
4977 | ASSERT(result == EAX); |
4978 | ASSERT(result != value_obj); |
4979 | __ movsd(value_double, |
4980 | compiler::FieldAddress(value_obj, Double::value_offset())); |
4981 | __ cvttsd2si(result, value_double); |
4982 | // Overflow is signalled with minint. |
4983 | compiler::Label do_call, done; |
4984 | // Check for overflow and that it fits into Smi. |
4985 | __ cmpl(result, compiler::Immediate(0xC0000000)); |
4986 | __ j(NEGATIVE, &do_call, compiler::Assembler::kNearJump); |
4987 | __ SmiTag(result); |
4988 | __ jmp(&done); |
4989 | __ Bind(&do_call); |
4990 | __ pushl(value_obj); |
4991 | ASSERT(instance_call()->HasICData()); |
4992 | const ICData& ic_data = *instance_call()->ic_data(); |
4993 | ASSERT(ic_data.NumberOfChecksIs(1)); |
4994 | const Function& target = Function::ZoneHandle(ic_data.GetTargetAt(0)); |
4995 | const int kTypeArgsLen = 0; |
4996 | const int kNumberOfArguments = 1; |
4997 | constexpr int kSizeOfArguments = 1; |
4998 | const Array& kNoArgumentNames = Object::null_array(); |
4999 | ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kSizeOfArguments, |
5000 | kNoArgumentNames); |
5001 | compiler->GenerateStaticCall(deopt_id(), instance_call()->token_pos(), target, |
5002 | args_info, locs(), ICData::Handle(), |
5003 | ICData::kStatic); |
5004 | __ Bind(&done); |
5005 | } |
5006 | |
5007 | LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone, |
5008 | bool opt) const { |
5009 | const intptr_t kNumInputs = 1; |
5010 | const intptr_t kNumTemps = 0; |
5011 | LocationSummary* result = new (zone) |
5012 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5013 | result->set_in(0, Location::RequiresFpuRegister()); |
5014 | result->set_out(0, Location::RequiresRegister()); |
5015 | return result; |
5016 | } |
5017 | |
5018 | void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5019 | compiler::Label* deopt = |
5020 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi); |
5021 | Register result = locs()->out(0).reg(); |
5022 | XmmRegister value = locs()->in(0).fpu_reg(); |
5023 | __ cvttsd2si(result, value); |
5024 | // Check for overflow and that it fits into Smi. |
5025 | __ cmpl(result, compiler::Immediate(0xC0000000)); |
5026 | __ j(NEGATIVE, deopt); |
5027 | __ SmiTag(result); |
5028 | } |
5029 | |
5030 | LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone, |
5031 | bool opt) const { |
5032 | const intptr_t kNumInputs = 1; |
5033 | const intptr_t kNumTemps = 0; |
5034 | LocationSummary* result = new (zone) |
5035 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5036 | result->set_in(0, Location::RequiresFpuRegister()); |
5037 | result->set_out(0, Location::RequiresFpuRegister()); |
5038 | return result; |
5039 | } |
5040 | |
5041 | void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5042 | XmmRegister value = locs()->in(0).fpu_reg(); |
5043 | XmmRegister result = locs()->out(0).fpu_reg(); |
5044 | switch (recognized_kind()) { |
5045 | case MethodRecognizer::kDoubleTruncate: |
5046 | __ roundsd(result, value, compiler::Assembler::kRoundToZero); |
5047 | break; |
5048 | case MethodRecognizer::kDoubleFloor: |
5049 | __ roundsd(result, value, compiler::Assembler::kRoundDown); |
5050 | break; |
5051 | case MethodRecognizer::kDoubleCeil: |
5052 | __ roundsd(result, value, compiler::Assembler::kRoundUp); |
5053 | break; |
5054 | default: |
5055 | UNREACHABLE(); |
5056 | } |
5057 | } |
5058 | |
5059 | LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone, |
5060 | bool opt) const { |
5061 | const intptr_t kNumInputs = 1; |
5062 | const intptr_t kNumTemps = 0; |
5063 | LocationSummary* result = new (zone) |
5064 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5065 | result->set_in(0, Location::RequiresFpuRegister()); |
5066 | result->set_out(0, Location::SameAsFirstInput()); |
5067 | return result; |
5068 | } |
5069 | |
5070 | void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5071 | __ cvtsd2ss(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg()); |
5072 | } |
5073 | |
5074 | LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone, |
5075 | bool opt) const { |
5076 | const intptr_t kNumInputs = 1; |
5077 | const intptr_t kNumTemps = 0; |
5078 | LocationSummary* result = new (zone) |
5079 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5080 | result->set_in(0, Location::RequiresFpuRegister()); |
5081 | result->set_out(0, Location::SameAsFirstInput()); |
5082 | return result; |
5083 | } |
5084 | |
5085 | void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5086 | __ cvtss2sd(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg()); |
5087 | } |
5088 | |
5089 | LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone, |
5090 | bool opt) const { |
5091 | ASSERT((InputCount() == 1) || (InputCount() == 2)); |
5092 | const intptr_t kNumTemps = |
5093 | (recognized_kind() == MethodRecognizer::kMathDoublePow) ? 4 : 1; |
5094 | LocationSummary* result = new (zone) |
5095 | LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall); |
5096 | // EDI is chosen because it is callee saved so we do not need to back it |
5097 | // up before calling into the runtime. |
5098 | result->set_temp(0, Location::RegisterLocation(EDI)); |
5099 | result->set_in(0, Location::FpuRegisterLocation(XMM1)); |
5100 | if (InputCount() == 2) { |
5101 | result->set_in(1, Location::FpuRegisterLocation(XMM2)); |
5102 | } |
5103 | if (recognized_kind() == MethodRecognizer::kMathDoublePow) { |
5104 | // Temp index 1. |
5105 | result->set_temp(1, Location::RegisterLocation(EAX)); |
5106 | // Temp index 2. |
5107 | result->set_temp(2, Location::FpuRegisterLocation(XMM4)); |
5108 | // We need to block XMM0 for the floating-point calling convention. |
5109 | result->set_temp(3, Location::FpuRegisterLocation(XMM0)); |
5110 | } |
5111 | result->set_out(0, Location::FpuRegisterLocation(XMM3)); |
5112 | return result; |
5113 | } |
5114 | |
5115 | // Pseudo code: |
5116 | // if (exponent == 0.0) return 1.0; |
5117 | // // Speed up simple cases. |
5118 | // if (exponent == 1.0) return base; |
5119 | // if (exponent == 2.0) return base * base; |
5120 | // if (exponent == 3.0) return base * base * base; |
5121 | // if (base == 1.0) return 1.0; |
5122 | // if (base.isNaN || exponent.isNaN) { |
5123 | // return double.NAN; |
5124 | // } |
5125 | // if (base != -Infinity && exponent == 0.5) { |
5126 | // if (base == 0.0) return 0.0; |
5127 | // return sqrt(value); |
5128 | // } |
5129 | // TODO(srdjan): Move into a stub? |
5130 | static void InvokeDoublePow(FlowGraphCompiler* compiler, |
5131 | InvokeMathCFunctionInstr* instr) { |
5132 | ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow); |
5133 | const intptr_t kInputCount = 2; |
5134 | ASSERT(instr->InputCount() == kInputCount); |
5135 | LocationSummary* locs = instr->locs(); |
5136 | |
5137 | XmmRegister base = locs->in(0).fpu_reg(); |
5138 | XmmRegister exp = locs->in(1).fpu_reg(); |
5139 | XmmRegister result = locs->out(0).fpu_reg(); |
5140 | Register temp = locs->temp(InvokeMathCFunctionInstr::kObjectTempIndex).reg(); |
5141 | XmmRegister zero_temp = |
5142 | locs->temp(InvokeMathCFunctionInstr::kDoubleTempIndex).fpu_reg(); |
5143 | |
5144 | __ xorps(zero_temp, zero_temp); // 0.0. |
5145 | __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(1.0))); |
5146 | __ movsd(result, compiler::FieldAddress(temp, Double::value_offset())); |
5147 | |
5148 | compiler::Label check_base, skip_call; |
5149 | // exponent == 0.0 -> return 1.0; |
5150 | __ comisd(exp, zero_temp); |
5151 | __ j(PARITY_EVEN, &check_base); |
5152 | __ j(EQUAL, &skip_call); // 'result' is 1.0. |
5153 | |
5154 | // exponent == 1.0 ? |
5155 | __ comisd(exp, result); |
5156 | compiler::Label return_base; |
5157 | __ j(EQUAL, &return_base, compiler::Assembler::kNearJump); |
5158 | |
5159 | // exponent == 2.0 ? |
5160 | __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(2.0))); |
5161 | __ movsd(XMM0, compiler::FieldAddress(temp, Double::value_offset())); |
5162 | __ comisd(exp, XMM0); |
5163 | compiler::Label return_base_times_2; |
5164 | __ j(EQUAL, &return_base_times_2, compiler::Assembler::kNearJump); |
5165 | |
5166 | // exponent == 3.0 ? |
5167 | __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(3.0))); |
5168 | __ movsd(XMM0, compiler::FieldAddress(temp, Double::value_offset())); |
5169 | __ comisd(exp, XMM0); |
5170 | __ j(NOT_EQUAL, &check_base); |
5171 | |
5172 | // Base times 3. |
5173 | __ movsd(result, base); |
5174 | __ mulsd(result, base); |
5175 | __ mulsd(result, base); |
5176 | __ jmp(&skip_call); |
5177 | |
5178 | __ Bind(&return_base); |
5179 | __ movsd(result, base); |
5180 | __ jmp(&skip_call); |
5181 | |
5182 | __ Bind(&return_base_times_2); |
5183 | __ movsd(result, base); |
5184 | __ mulsd(result, base); |
5185 | __ jmp(&skip_call); |
5186 | |
5187 | __ Bind(&check_base); |
5188 | // Note: 'exp' could be NaN. |
5189 | |
5190 | // base == 1.0 -> return 1.0; |
5191 | __ comisd(base, result); |
5192 | compiler::Label return_nan; |
5193 | __ j(PARITY_EVEN, &return_nan, compiler::Assembler::kNearJump); |
5194 | __ j(EQUAL, &skip_call, compiler::Assembler::kNearJump); |
5195 | // Note: 'base' could be NaN. |
5196 | __ comisd(exp, base); |
5197 | // Neither 'exp' nor 'base' is NaN. |
5198 | compiler::Label try_sqrt; |
5199 | __ j(PARITY_ODD, &try_sqrt, compiler::Assembler::kNearJump); |
5200 | // Return NaN. |
5201 | __ Bind(&return_nan); |
5202 | __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(NAN))); |
5203 | __ movsd(result, compiler::FieldAddress(temp, Double::value_offset())); |
5204 | __ jmp(&skip_call); |
5205 | |
5206 | compiler::Label do_pow, return_zero; |
5207 | __ Bind(&try_sqrt); |
5208 | // Before calling pow, check if we could use sqrt instead of pow. |
5209 | __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(kNegInfinity))); |
5210 | __ movsd(result, compiler::FieldAddress(temp, Double::value_offset())); |
5211 | // base == -Infinity -> call pow; |
5212 | __ comisd(base, result); |
5213 | __ j(EQUAL, &do_pow, compiler::Assembler::kNearJump); |
5214 | |
5215 | // exponent == 0.5 ? |
5216 | __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(0.5))); |
5217 | __ movsd(result, compiler::FieldAddress(temp, Double::value_offset())); |
5218 | __ comisd(exp, result); |
5219 | __ j(NOT_EQUAL, &do_pow, compiler::Assembler::kNearJump); |
5220 | |
5221 | // base == 0 -> return 0; |
5222 | __ comisd(base, zero_temp); |
5223 | __ j(EQUAL, &return_zero, compiler::Assembler::kNearJump); |
5224 | |
5225 | __ sqrtsd(result, base); |
5226 | __ jmp(&skip_call, compiler::Assembler::kNearJump); |
5227 | |
5228 | __ Bind(&return_zero); |
5229 | __ movsd(result, zero_temp); |
5230 | __ jmp(&skip_call); |
5231 | |
5232 | __ Bind(&do_pow); |
5233 | // Save ESP. |
5234 | __ movl(locs->temp(InvokeMathCFunctionInstr::kSavedSpTempIndex).reg(), ESP); |
5235 | __ ReserveAlignedFrameSpace(kDoubleSize * kInputCount); |
5236 | for (intptr_t i = 0; i < kInputCount; i++) { |
5237 | __ movsd(compiler::Address(ESP, kDoubleSize * i), locs->in(i).fpu_reg()); |
5238 | } |
5239 | __ CallRuntime(instr->TargetFunction(), kInputCount); |
5240 | __ fstpl(compiler::Address(ESP, 0)); |
5241 | __ movsd(locs->out(0).fpu_reg(), compiler::Address(ESP, 0)); |
5242 | // Restore ESP. |
5243 | __ movl(ESP, locs->temp(InvokeMathCFunctionInstr::kSavedSpTempIndex).reg()); |
5244 | __ Bind(&skip_call); |
5245 | } |
5246 | |
5247 | void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5248 | if (recognized_kind() == MethodRecognizer::kMathDoublePow) { |
5249 | InvokeDoublePow(compiler, this); |
5250 | return; |
5251 | } |
5252 | // Save ESP. |
5253 | __ movl(locs()->temp(kSavedSpTempIndex).reg(), ESP); |
5254 | __ ReserveAlignedFrameSpace(kDoubleSize * InputCount()); |
5255 | for (intptr_t i = 0; i < InputCount(); i++) { |
5256 | __ movsd(compiler::Address(ESP, kDoubleSize * i), locs()->in(i).fpu_reg()); |
5257 | } |
5258 | |
5259 | __ CallRuntime(TargetFunction(), InputCount()); |
5260 | __ fstpl(compiler::Address(ESP, 0)); |
5261 | __ movsd(locs()->out(0).fpu_reg(), compiler::Address(ESP, 0)); |
5262 | // Restore ESP. |
5263 | __ movl(ESP, locs()->temp(kSavedSpTempIndex).reg()); |
5264 | } |
5265 | |
5266 | LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone, |
5267 | bool opt) const { |
5268 | // Only use this instruction in optimized code. |
5269 | ASSERT(opt); |
5270 | const intptr_t kNumInputs = 1; |
5271 | LocationSummary* summary = |
5272 | new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall); |
5273 | if (representation() == kUnboxedDouble) { |
5274 | if (index() == 0) { |
5275 | summary->set_in( |
5276 | 0, Location::Pair(Location::RequiresFpuRegister(), Location::Any())); |
5277 | } else { |
5278 | ASSERT(index() == 1); |
5279 | summary->set_in( |
5280 | 0, Location::Pair(Location::Any(), Location::RequiresFpuRegister())); |
5281 | } |
5282 | summary->set_out(0, Location::RequiresFpuRegister()); |
5283 | } else { |
5284 | ASSERT(representation() == kTagged); |
5285 | if (index() == 0) { |
5286 | summary->set_in( |
5287 | 0, Location::Pair(Location::RequiresRegister(), Location::Any())); |
5288 | } else { |
5289 | ASSERT(index() == 1); |
5290 | summary->set_in( |
5291 | 0, Location::Pair(Location::Any(), Location::RequiresRegister())); |
5292 | } |
5293 | summary->set_out(0, Location::RequiresRegister()); |
5294 | } |
5295 | return summary; |
5296 | } |
5297 | |
5298 | void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5299 | ASSERT(locs()->in(0).IsPairLocation()); |
5300 | PairLocation* pair = locs()->in(0).AsPairLocation(); |
5301 | Location in_loc = pair->At(index()); |
5302 | if (representation() == kUnboxedDouble) { |
5303 | XmmRegister out = locs()->out(0).fpu_reg(); |
5304 | XmmRegister in = in_loc.fpu_reg(); |
5305 | __ movaps(out, in); |
5306 | } else { |
5307 | ASSERT(representation() == kTagged); |
5308 | Register out = locs()->out(0).reg(); |
5309 | Register in = in_loc.reg(); |
5310 | __ movl(out, in); |
5311 | } |
5312 | } |
5313 | |
5314 | LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone, |
5315 | bool opt) const { |
5316 | const intptr_t kNumInputs = 2; |
5317 | const intptr_t kNumTemps = 0; |
5318 | LocationSummary* summary = new (zone) |
5319 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5320 | // Both inputs must be writable because they will be untagged. |
5321 | summary->set_in(0, Location::RegisterLocation(EAX)); |
5322 | summary->set_in(1, Location::WritableRegister()); |
5323 | // Output is a pair of registers. |
5324 | summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX), |
5325 | Location::RegisterLocation(EDX))); |
5326 | return summary; |
5327 | } |
5328 | |
5329 | void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5330 | ASSERT(CanDeoptimize()); |
5331 | compiler::Label* deopt = |
5332 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp); |
5333 | Register left = locs()->in(0).reg(); |
5334 | Register right = locs()->in(1).reg(); |
5335 | ASSERT(locs()->out(0).IsPairLocation()); |
5336 | PairLocation* pair = locs()->out(0).AsPairLocation(); |
5337 | Register result1 = pair->At(0).reg(); |
5338 | Register result2 = pair->At(1).reg(); |
5339 | if (RangeUtils::CanBeZero(divisor_range())) { |
5340 | // Handle divide by zero in runtime. |
5341 | __ testl(right, right); |
5342 | __ j(ZERO, deopt); |
5343 | } |
5344 | ASSERT(left == EAX); |
5345 | ASSERT((right != EDX) && (right != EAX)); |
5346 | ASSERT(result1 == EAX); |
5347 | ASSERT(result2 == EDX); |
5348 | __ SmiUntag(left); |
5349 | __ SmiUntag(right); |
5350 | __ cdq(); // Sign extend EAX -> EDX:EAX. |
5351 | __ idivl(right); // EAX: quotient, EDX: remainder. |
5352 | // Check the corner case of dividing the 'MIN_SMI' with -1, in which |
5353 | // case we cannot tag the result. |
5354 | // TODO(srdjan): We could store instead untagged intermediate results in a |
5355 | // typed array, but then the load indexed instructions would need to be |
5356 | // able to deoptimize. |
5357 | __ cmpl(EAX, compiler::Immediate(0x40000000)); |
5358 | __ j(EQUAL, deopt); |
5359 | // Modulo result (EDX) correction: |
5360 | // res = left % right; |
5361 | // if (res < 0) { |
5362 | // if (right < 0) { |
5363 | // res = res - right; |
5364 | // } else { |
5365 | // res = res + right; |
5366 | // } |
5367 | // } |
5368 | compiler::Label done; |
5369 | __ cmpl(EDX, compiler::Immediate(0)); |
5370 | __ j(GREATER_EQUAL, &done, compiler::Assembler::kNearJump); |
5371 | // Result is negative, adjust it. |
5372 | if (RangeUtils::Overlaps(divisor_range(), -1, 1)) { |
5373 | compiler::Label subtract; |
5374 | __ cmpl(right, compiler::Immediate(0)); |
5375 | __ j(LESS, &subtract, compiler::Assembler::kNearJump); |
5376 | __ addl(EDX, right); |
5377 | __ jmp(&done, compiler::Assembler::kNearJump); |
5378 | __ Bind(&subtract); |
5379 | __ subl(EDX, right); |
5380 | } else if (divisor_range()->IsPositive()) { |
5381 | // Right is positive. |
5382 | __ addl(EDX, right); |
5383 | } else { |
5384 | // Right is negative. |
5385 | __ subl(EDX, right); |
5386 | } |
5387 | __ Bind(&done); |
5388 | |
5389 | __ SmiTag(EAX); |
5390 | __ SmiTag(EDX); |
5391 | } |
5392 | |
5393 | LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
5394 | comparison()->InitializeLocationSummary(zone, opt); |
5395 | // Branches don't produce a result. |
5396 | comparison()->locs()->set_out(0, Location::NoLocation()); |
5397 | return comparison()->locs(); |
5398 | } |
5399 | |
5400 | void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5401 | comparison()->EmitBranchCode(compiler, this); |
5402 | } |
5403 | |
5404 | LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone, |
5405 | bool opt) const { |
5406 | const intptr_t kNumInputs = 1; |
5407 | const bool need_mask_temp = IsBitTest(); |
5408 | const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0; |
5409 | LocationSummary* summary = new (zone) |
5410 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5411 | summary->set_in(0, Location::RequiresRegister()); |
5412 | if (!IsNullCheck()) { |
5413 | summary->set_temp(0, Location::RequiresRegister()); |
5414 | if (need_mask_temp) { |
5415 | summary->set_temp(1, Location::RequiresRegister()); |
5416 | } |
5417 | } |
5418 | return summary; |
5419 | } |
5420 | |
5421 | void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler, |
5422 | compiler::Label* deopt) { |
5423 | const compiler::Immediate& raw_null = |
5424 | compiler::Immediate(static_cast<intptr_t>(Object::null())); |
5425 | __ cmpl(locs()->in(0).reg(), raw_null); |
5426 | ASSERT(IsDeoptIfNull() || IsDeoptIfNotNull()); |
5427 | Condition cond = IsDeoptIfNull() ? EQUAL : NOT_EQUAL; |
5428 | __ j(cond, deopt); |
5429 | } |
5430 | |
5431 | void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler, |
5432 | intptr_t min, |
5433 | intptr_t max, |
5434 | intptr_t mask, |
5435 | compiler::Label* deopt) { |
5436 | Register biased_cid = locs()->temp(0).reg(); |
5437 | __ subl(biased_cid, compiler::Immediate(min)); |
5438 | __ cmpl(biased_cid, compiler::Immediate(max - min)); |
5439 | __ j(ABOVE, deopt); |
5440 | |
5441 | Register mask_reg = locs()->temp(1).reg(); |
5442 | __ movl(mask_reg, compiler::Immediate(mask)); |
5443 | __ bt(mask_reg, biased_cid); |
5444 | __ j(NOT_CARRY, deopt); |
5445 | } |
5446 | |
5447 | int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler, |
5448 | int bias, |
5449 | intptr_t cid_start, |
5450 | intptr_t cid_end, |
5451 | bool is_last, |
5452 | compiler::Label* is_ok, |
5453 | compiler::Label* deopt, |
5454 | bool use_near_jump) { |
5455 | Register biased_cid = locs()->temp(0).reg(); |
5456 | Condition no_match, match; |
5457 | if (cid_start == cid_end) { |
5458 | __ cmpl(biased_cid, compiler::Immediate(cid_start - bias)); |
5459 | no_match = NOT_EQUAL; |
5460 | match = EQUAL; |
5461 | } else { |
5462 | // For class ID ranges use a subtract followed by an unsigned |
5463 | // comparison to check both ends of the ranges with one comparison. |
5464 | __ addl(biased_cid, compiler::Immediate(bias - cid_start)); |
5465 | bias = cid_start; |
5466 | __ cmpl(biased_cid, compiler::Immediate(cid_end - cid_start)); |
5467 | no_match = ABOVE; |
5468 | match = BELOW_EQUAL; |
5469 | } |
5470 | |
5471 | if (is_last) { |
5472 | __ j(no_match, deopt); |
5473 | } else { |
5474 | if (use_near_jump) { |
5475 | __ j(match, is_ok, compiler::Assembler::kNearJump); |
5476 | } else { |
5477 | __ j(match, is_ok); |
5478 | } |
5479 | } |
5480 | return bias; |
5481 | } |
5482 | |
5483 | LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone, |
5484 | bool opt) const { |
5485 | const intptr_t kNumInputs = 1; |
5486 | const intptr_t kNumTemps = 0; |
5487 | LocationSummary* summary = new (zone) |
5488 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5489 | summary->set_in(0, Location::RequiresRegister()); |
5490 | return summary; |
5491 | } |
5492 | |
5493 | void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5494 | Register value = locs()->in(0).reg(); |
5495 | compiler::Label* deopt = compiler->AddDeoptStub( |
5496 | deopt_id(), ICData::kDeoptCheckSmi, licm_hoisted_ ? ICData::kHoisted : 0); |
5497 | __ BranchIfNotSmi(value, deopt); |
5498 | } |
5499 | |
5500 | void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5501 | ThrowErrorSlowPathCode* slow_path = |
5502 | new NullErrorSlowPath(this, compiler->CurrentTryIndex()); |
5503 | compiler->AddSlowPathCode(slow_path); |
5504 | |
5505 | Register value_reg = locs()->in(0).reg(); |
5506 | // TODO(dartbug.com/30480): Consider passing `null` literal as an argument |
5507 | // in order to be able to allocate it on register. |
5508 | __ CompareObject(value_reg, Object::null_object()); |
5509 | __ BranchIf(EQUAL, slow_path->entry_label()); |
5510 | } |
5511 | |
5512 | LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone, |
5513 | bool opt) const { |
5514 | const intptr_t kNumInputs = 1; |
5515 | const intptr_t kNumTemps = 0; |
5516 | LocationSummary* summary = new (zone) |
5517 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5518 | summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister() |
5519 | : Location::WritableRegister()); |
5520 | return summary; |
5521 | } |
5522 | |
5523 | void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5524 | Register value = locs()->in(0).reg(); |
5525 | compiler::Label* deopt = |
5526 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass); |
5527 | if (cids_.IsSingleCid()) { |
5528 | __ cmpl(value, compiler::Immediate(Smi::RawValue(cids_.cid_start))); |
5529 | __ j(NOT_ZERO, deopt); |
5530 | } else { |
5531 | __ AddImmediate(value, |
5532 | compiler::Immediate(-Smi::RawValue(cids_.cid_start))); |
5533 | __ cmpl(value, compiler::Immediate(Smi::RawValue(cids_.Extent()))); |
5534 | __ j(ABOVE, deopt); |
5535 | } |
5536 | } |
5537 | |
5538 | // Length: register or constant. |
5539 | // Index: register, constant or stack slot. |
5540 | LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone, |
5541 | bool opt) const { |
5542 | const intptr_t kNumInputs = 2; |
5543 | const intptr_t kNumTemps = 0; |
5544 | LocationSummary* locs = new (zone) |
5545 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5546 | if (length()->definition()->IsConstant()) { |
5547 | locs->set_in(kLengthPos, LocationRegisterOrSmiConstant(length())); |
5548 | } else { |
5549 | locs->set_in(kLengthPos, Location::PrefersRegister()); |
5550 | } |
5551 | locs->set_in(kIndexPos, LocationRegisterOrSmiConstant(index())); |
5552 | return locs; |
5553 | } |
5554 | |
5555 | void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5556 | uint32_t flags = generalized_ ? ICData::kGeneralized : 0; |
5557 | flags |= licm_hoisted_ ? ICData::kHoisted : 0; |
5558 | compiler::Label* deopt = |
5559 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags); |
5560 | |
5561 | Location length_loc = locs()->in(kLengthPos); |
5562 | Location index_loc = locs()->in(kIndexPos); |
5563 | |
5564 | if (length_loc.IsConstant() && index_loc.IsConstant()) { |
5565 | ASSERT((Smi::Cast(length_loc.constant()).Value() <= |
5566 | Smi::Cast(index_loc.constant()).Value()) || |
5567 | (Smi::Cast(index_loc.constant()).Value() < 0)); |
5568 | // Unconditionally deoptimize for constant bounds checks because they |
5569 | // only occur only when index is out-of-bounds. |
5570 | __ jmp(deopt); |
5571 | return; |
5572 | } |
5573 | |
5574 | const intptr_t index_cid = index()->Type()->ToCid(); |
5575 | if (length_loc.IsConstant()) { |
5576 | Register index = index_loc.reg(); |
5577 | if (index_cid != kSmiCid) { |
5578 | __ BranchIfNotSmi(index, deopt); |
5579 | } |
5580 | const Smi& length = Smi::Cast(length_loc.constant()); |
5581 | if (length.Value() == Smi::kMaxValue) { |
5582 | __ testl(index, index); |
5583 | __ j(NEGATIVE, deopt); |
5584 | } else { |
5585 | __ cmpl(index, compiler::Immediate(static_cast<int32_t>(length.raw()))); |
5586 | __ j(ABOVE_EQUAL, deopt); |
5587 | } |
5588 | } else if (index_loc.IsConstant()) { |
5589 | const Smi& index = Smi::Cast(index_loc.constant()); |
5590 | if (length_loc.IsStackSlot()) { |
5591 | const compiler::Address& length = LocationToStackSlotAddress(length_loc); |
5592 | __ cmpl(length, compiler::Immediate(static_cast<int32_t>(index.raw()))); |
5593 | } else { |
5594 | Register length = length_loc.reg(); |
5595 | __ cmpl(length, compiler::Immediate(static_cast<int32_t>(index.raw()))); |
5596 | } |
5597 | __ j(BELOW_EQUAL, deopt); |
5598 | } else if (length_loc.IsStackSlot()) { |
5599 | Register index = index_loc.reg(); |
5600 | const compiler::Address& length = LocationToStackSlotAddress(length_loc); |
5601 | if (index_cid != kSmiCid) { |
5602 | __ BranchIfNotSmi(index, deopt); |
5603 | } |
5604 | __ cmpl(index, length); |
5605 | __ j(ABOVE_EQUAL, deopt); |
5606 | } else { |
5607 | Register index = index_loc.reg(); |
5608 | Register length = length_loc.reg(); |
5609 | if (index_cid != kSmiCid) { |
5610 | __ BranchIfNotSmi(index, deopt); |
5611 | } |
5612 | __ cmpl(length, index); |
5613 | __ j(BELOW_EQUAL, deopt); |
5614 | } |
5615 | } |
5616 | |
5617 | LocationSummary* BinaryInt64OpInstr::MakeLocationSummary(Zone* zone, |
5618 | bool opt) const { |
5619 | const intptr_t kNumInputs = 2; |
5620 | switch (op_kind()) { |
5621 | case Token::kBIT_AND: |
5622 | case Token::kBIT_OR: |
5623 | case Token::kBIT_XOR: |
5624 | case Token::kADD: |
5625 | case Token::kSUB: { |
5626 | const intptr_t kNumTemps = 0; |
5627 | LocationSummary* summary = new (zone) LocationSummary( |
5628 | zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5629 | summary->set_in(0, Location::Pair(Location::RequiresRegister(), |
5630 | Location::RequiresRegister())); |
5631 | summary->set_in(1, Location::Pair(Location::RequiresRegister(), |
5632 | Location::RequiresRegister())); |
5633 | summary->set_out(0, Location::SameAsFirstInput()); |
5634 | return summary; |
5635 | } |
5636 | case Token::kMUL: { |
5637 | const intptr_t kNumTemps = 1; |
5638 | LocationSummary* summary = new (zone) LocationSummary( |
5639 | zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5640 | summary->set_in(0, Location::Pair(Location::RegisterLocation(EAX), |
5641 | Location::RegisterLocation(EDX))); |
5642 | summary->set_in(1, Location::Pair(Location::RequiresRegister(), |
5643 | Location::RequiresRegister())); |
5644 | summary->set_out(0, Location::SameAsFirstInput()); |
5645 | summary->set_temp(0, Location::RequiresRegister()); |
5646 | return summary; |
5647 | } |
5648 | default: |
5649 | UNREACHABLE(); |
5650 | return NULL; |
5651 | } |
5652 | } |
5653 | |
5654 | void BinaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5655 | PairLocation* left_pair = locs()->in(0).AsPairLocation(); |
5656 | Register left_lo = left_pair->At(0).reg(); |
5657 | Register left_hi = left_pair->At(1).reg(); |
5658 | PairLocation* right_pair = locs()->in(1).AsPairLocation(); |
5659 | Register right_lo = right_pair->At(0).reg(); |
5660 | Register right_hi = right_pair->At(1).reg(); |
5661 | PairLocation* out_pair = locs()->out(0).AsPairLocation(); |
5662 | Register out_lo = out_pair->At(0).reg(); |
5663 | Register out_hi = out_pair->At(1).reg(); |
5664 | ASSERT(out_lo == left_lo); |
5665 | ASSERT(out_hi == left_hi); |
5666 | ASSERT(!can_overflow()); |
5667 | ASSERT(!CanDeoptimize()); |
5668 | |
5669 | switch (op_kind()) { |
5670 | case Token::kBIT_AND: |
5671 | __ andl(left_lo, right_lo); |
5672 | __ andl(left_hi, right_hi); |
5673 | break; |
5674 | case Token::kBIT_OR: |
5675 | __ orl(left_lo, right_lo); |
5676 | __ orl(left_hi, right_hi); |
5677 | break; |
5678 | case Token::kBIT_XOR: |
5679 | __ xorl(left_lo, right_lo); |
5680 | __ xorl(left_hi, right_hi); |
5681 | break; |
5682 | case Token::kADD: |
5683 | case Token::kSUB: { |
5684 | if (op_kind() == Token::kADD) { |
5685 | __ addl(left_lo, right_lo); |
5686 | __ adcl(left_hi, right_hi); |
5687 | } else { |
5688 | __ subl(left_lo, right_lo); |
5689 | __ sbbl(left_hi, right_hi); |
5690 | } |
5691 | break; |
5692 | } |
5693 | case Token::kMUL: { |
5694 | // Compute 64-bit a * b as: |
5695 | // a_l * b_l + (a_h * b_l + a_l * b_h) << 32 |
5696 | // Since we requested EDX:EAX for in and out, |
5697 | // we can use these as scratch registers once |
5698 | // input has been consumed. |
5699 | Register temp = locs()->temp(0).reg(); |
5700 | __ movl(temp, left_lo); |
5701 | __ imull(left_hi, right_lo); // a_h * b_l |
5702 | __ imull(temp, right_hi); // a_l * b_h |
5703 | __ addl(temp, left_hi); // sum_high |
5704 | ASSERT(left_lo == EAX); |
5705 | __ mull(right_lo); // a_l * b_l in EDX:EAX |
5706 | __ addl(EDX, temp); // add sum_high |
5707 | ASSERT(out_lo == EAX); |
5708 | ASSERT(out_hi == EDX); |
5709 | break; |
5710 | } |
5711 | default: |
5712 | UNREACHABLE(); |
5713 | } |
5714 | } |
5715 | |
5716 | static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler, |
5717 | Token::Kind op_kind, |
5718 | Register left_lo, |
5719 | Register left_hi, |
5720 | const Object& right) { |
5721 | const int64_t shift = Integer::Cast(right).AsInt64Value(); |
5722 | ASSERT(shift >= 0); |
5723 | switch (op_kind) { |
5724 | case Token::kSHR: { |
5725 | if (shift > 31) { |
5726 | __ movl(left_lo, left_hi); // Shift by 32. |
5727 | __ sarl(left_hi, compiler::Immediate(31)); // Sign extend left hi. |
5728 | if (shift > 32) { |
5729 | __ sarl(left_lo, compiler::Immediate(shift > 63 ? 31 : shift - 32)); |
5730 | } |
5731 | } else { |
5732 | __ shrdl(left_lo, left_hi, compiler::Immediate(shift)); |
5733 | __ sarl(left_hi, compiler::Immediate(shift)); |
5734 | } |
5735 | break; |
5736 | } |
5737 | case Token::kSHL: { |
5738 | ASSERT(shift < 64); |
5739 | if (shift > 31) { |
5740 | __ movl(left_hi, left_lo); // Shift by 32. |
5741 | __ xorl(left_lo, left_lo); // Zero left_lo. |
5742 | if (shift > 32) { |
5743 | __ shll(left_hi, compiler::Immediate(shift - 32)); |
5744 | } |
5745 | } else { |
5746 | __ shldl(left_hi, left_lo, compiler::Immediate(shift)); |
5747 | __ shll(left_lo, compiler::Immediate(shift)); |
5748 | } |
5749 | break; |
5750 | } |
5751 | default: |
5752 | UNREACHABLE(); |
5753 | } |
5754 | } |
5755 | |
5756 | static void EmitShiftInt64ByECX(FlowGraphCompiler* compiler, |
5757 | Token::Kind op_kind, |
5758 | Register left_lo, |
5759 | Register left_hi) { |
5760 | // sarl operation masks the count to 5 bits and |
5761 | // shrdl is undefined with count > operand size (32) |
5762 | compiler::Label done, large_shift; |
5763 | switch (op_kind) { |
5764 | case Token::kSHR: { |
5765 | __ cmpl(ECX, compiler::Immediate(31)); |
5766 | __ j(ABOVE, &large_shift); |
5767 | |
5768 | __ shrdl(left_lo, left_hi, ECX); // Shift count in CL. |
5769 | __ sarl(left_hi, ECX); // Shift count in CL. |
5770 | __ jmp(&done, compiler::Assembler::kNearJump); |
5771 | |
5772 | __ Bind(&large_shift); |
5773 | // No need to subtract 32 from CL, only 5 bits used by sarl. |
5774 | __ movl(left_lo, left_hi); // Shift by 32. |
5775 | __ sarl(left_hi, compiler::Immediate(31)); // Sign extend left hi. |
5776 | __ sarl(left_lo, ECX); // Shift count: CL % 32. |
5777 | break; |
5778 | } |
5779 | case Token::kSHL: { |
5780 | __ cmpl(ECX, compiler::Immediate(31)); |
5781 | __ j(ABOVE, &large_shift); |
5782 | |
5783 | __ shldl(left_hi, left_lo, ECX); // Shift count in CL. |
5784 | __ shll(left_lo, ECX); // Shift count in CL. |
5785 | __ jmp(&done, compiler::Assembler::kNearJump); |
5786 | |
5787 | __ Bind(&large_shift); |
5788 | // No need to subtract 32 from CL, only 5 bits used by shll. |
5789 | __ movl(left_hi, left_lo); // Shift by 32. |
5790 | __ xorl(left_lo, left_lo); // Zero left_lo. |
5791 | __ shll(left_hi, ECX); // Shift count: CL % 32. |
5792 | break; |
5793 | } |
5794 | default: |
5795 | UNREACHABLE(); |
5796 | } |
5797 | __ Bind(&done); |
5798 | } |
5799 | |
5800 | static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler, |
5801 | Token::Kind op_kind, |
5802 | Register left, |
5803 | const Object& right) { |
5804 | const int64_t shift = Integer::Cast(right).AsInt64Value(); |
5805 | if (shift >= 32) { |
5806 | __ xorl(left, left); |
5807 | } else { |
5808 | switch (op_kind) { |
5809 | case Token::kSHR: { |
5810 | __ shrl(left, compiler::Immediate(shift)); |
5811 | break; |
5812 | } |
5813 | case Token::kSHL: { |
5814 | __ shll(left, compiler::Immediate(shift)); |
5815 | break; |
5816 | } |
5817 | default: |
5818 | UNREACHABLE(); |
5819 | } |
5820 | } |
5821 | } |
5822 | |
5823 | static void EmitShiftUint32ByECX(FlowGraphCompiler* compiler, |
5824 | Token::Kind op_kind, |
5825 | Register left) { |
5826 | switch (op_kind) { |
5827 | case Token::kSHR: { |
5828 | __ shrl(left, ECX); |
5829 | break; |
5830 | } |
5831 | case Token::kSHL: { |
5832 | __ shll(left, ECX); |
5833 | break; |
5834 | } |
5835 | default: |
5836 | UNREACHABLE(); |
5837 | } |
5838 | } |
5839 | |
5840 | class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode { |
5841 | public: |
5842 | static const intptr_t kNumberOfArguments = 0; |
5843 | |
5844 | ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction, intptr_t try_index) |
5845 | : ThrowErrorSlowPathCode(instruction, |
5846 | kArgumentErrorUnboxedInt64RuntimeEntry, |
5847 | kNumberOfArguments, |
5848 | try_index) {} |
5849 | |
5850 | const char* name() override { return "int64 shift" ; } |
5851 | |
5852 | void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override { |
5853 | PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation(); |
5854 | Register right_lo = right_pair->At(0).reg(); |
5855 | Register right_hi = right_pair->At(1).reg(); |
5856 | PairLocation* out_pair = instruction()->locs()->out(0).AsPairLocation(); |
5857 | Register out_lo = out_pair->At(0).reg(); |
5858 | Register out_hi = out_pair->At(1).reg(); |
5859 | #if defined(DEBUG) |
5860 | PairLocation* left_pair = instruction()->locs()->in(0).AsPairLocation(); |
5861 | Register left_lo = left_pair->At(0).reg(); |
5862 | Register left_hi = left_pair->At(1).reg(); |
5863 | ASSERT(out_lo == left_lo); |
5864 | ASSERT(out_hi == left_hi); |
5865 | #endif // defined(DEBUG) |
5866 | |
5867 | compiler::Label throw_error; |
5868 | __ testl(right_hi, right_hi); |
5869 | __ j(NEGATIVE, &throw_error); |
5870 | |
5871 | switch (instruction()->AsShiftInt64Op()->op_kind()) { |
5872 | case Token::kSHR: |
5873 | __ sarl(out_hi, compiler::Immediate(31)); |
5874 | __ movl(out_lo, out_hi); |
5875 | break; |
5876 | case Token::kSHL: { |
5877 | __ xorl(out_lo, out_lo); |
5878 | __ xorl(out_hi, out_hi); |
5879 | break; |
5880 | } |
5881 | default: |
5882 | UNREACHABLE(); |
5883 | } |
5884 | __ jmp(exit_label()); |
5885 | |
5886 | __ Bind(&throw_error); |
5887 | |
5888 | // Can't pass unboxed int64 value directly to runtime call, as all |
5889 | // arguments are expected to be tagged (boxed). |
5890 | // The unboxed int64 argument is passed through a dedicated slot in Thread. |
5891 | // TODO(dartbug.com/33549): Clean this up when unboxed values |
5892 | // could be passed as arguments. |
5893 | __ movl(compiler::Address(THR, Thread::unboxed_int64_runtime_arg_offset()), |
5894 | right_lo); |
5895 | __ movl(compiler::Address( |
5896 | THR, Thread::unboxed_int64_runtime_arg_offset() + kWordSize), |
5897 | right_hi); |
5898 | } |
5899 | }; |
5900 | |
5901 | LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone, |
5902 | bool opt) const { |
5903 | const intptr_t kNumInputs = 2; |
5904 | const intptr_t kNumTemps = 0; |
5905 | LocationSummary* summary = new (zone) LocationSummary( |
5906 | zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
5907 | summary->set_in(0, Location::Pair(Location::RequiresRegister(), |
5908 | Location::RequiresRegister())); |
5909 | if (RangeUtils::IsPositive(shift_range()) && |
5910 | right()->definition()->IsConstant()) { |
5911 | ConstantInstr* constant = right()->definition()->AsConstant(); |
5912 | summary->set_in(1, Location::Constant(constant)); |
5913 | } else { |
5914 | summary->set_in(1, Location::Pair(Location::RegisterLocation(ECX), |
5915 | Location::RequiresRegister())); |
5916 | } |
5917 | summary->set_out(0, Location::SameAsFirstInput()); |
5918 | return summary; |
5919 | } |
5920 | |
5921 | void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5922 | PairLocation* left_pair = locs()->in(0).AsPairLocation(); |
5923 | Register left_lo = left_pair->At(0).reg(); |
5924 | Register left_hi = left_pair->At(1).reg(); |
5925 | PairLocation* out_pair = locs()->out(0).AsPairLocation(); |
5926 | Register out_lo = out_pair->At(0).reg(); |
5927 | Register out_hi = out_pair->At(1).reg(); |
5928 | ASSERT(out_lo == left_lo); |
5929 | ASSERT(out_hi == left_hi); |
5930 | ASSERT(!can_overflow()); |
5931 | |
5932 | if (locs()->in(1).IsConstant()) { |
5933 | EmitShiftInt64ByConstant(compiler, op_kind(), left_lo, left_hi, |
5934 | locs()->in(1).constant()); |
5935 | } else { |
5936 | // Code for a variable shift amount (or constant that throws). |
5937 | ASSERT(locs()->in(1).AsPairLocation()->At(0).reg() == ECX); |
5938 | Register right_hi = locs()->in(1).AsPairLocation()->At(1).reg(); |
5939 | |
5940 | // Jump to a slow path if shift count is > 63 or negative. |
5941 | ShiftInt64OpSlowPath* slow_path = NULL; |
5942 | if (!IsShiftCountInRange()) { |
5943 | slow_path = |
5944 | new (Z) ShiftInt64OpSlowPath(this, compiler->CurrentTryIndex()); |
5945 | compiler->AddSlowPathCode(slow_path); |
5946 | __ testl(right_hi, right_hi); |
5947 | __ j(NOT_ZERO, slow_path->entry_label()); |
5948 | __ cmpl(ECX, compiler::Immediate(kShiftCountLimit)); |
5949 | __ j(ABOVE, slow_path->entry_label()); |
5950 | } |
5951 | |
5952 | EmitShiftInt64ByECX(compiler, op_kind(), left_lo, left_hi); |
5953 | |
5954 | if (slow_path != NULL) { |
5955 | __ Bind(slow_path->exit_label()); |
5956 | } |
5957 | } |
5958 | } |
5959 | |
5960 | LocationSummary* SpeculativeShiftInt64OpInstr::MakeLocationSummary( |
5961 | Zone* zone, |
5962 | bool opt) const { |
5963 | const intptr_t kNumInputs = 2; |
5964 | const intptr_t kNumTemps = 0; |
5965 | LocationSummary* summary = new (zone) |
5966 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
5967 | summary->set_in(0, Location::Pair(Location::RequiresRegister(), |
5968 | Location::RequiresRegister())); |
5969 | summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX)); |
5970 | summary->set_out(0, Location::SameAsFirstInput()); |
5971 | return summary; |
5972 | } |
5973 | |
5974 | void SpeculativeShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5975 | PairLocation* left_pair = locs()->in(0).AsPairLocation(); |
5976 | Register left_lo = left_pair->At(0).reg(); |
5977 | Register left_hi = left_pair->At(1).reg(); |
5978 | PairLocation* out_pair = locs()->out(0).AsPairLocation(); |
5979 | Register out_lo = out_pair->At(0).reg(); |
5980 | Register out_hi = out_pair->At(1).reg(); |
5981 | ASSERT(out_lo == left_lo); |
5982 | ASSERT(out_hi == left_hi); |
5983 | ASSERT(!can_overflow()); |
5984 | |
5985 | if (locs()->in(1).IsConstant()) { |
5986 | EmitShiftInt64ByConstant(compiler, op_kind(), left_lo, left_hi, |
5987 | locs()->in(1).constant()); |
5988 | } else { |
5989 | ASSERT(locs()->in(1).reg() == ECX); |
5990 | __ SmiUntag(ECX); |
5991 | |
5992 | // Deoptimize if shift count is > 63 or negative (or not a smi). |
5993 | if (!IsShiftCountInRange()) { |
5994 | ASSERT(CanDeoptimize()); |
5995 | compiler::Label* deopt = |
5996 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op); |
5997 | __ cmpl(ECX, compiler::Immediate(kShiftCountLimit)); |
5998 | __ j(ABOVE, deopt); |
5999 | } |
6000 | |
6001 | EmitShiftInt64ByECX(compiler, op_kind(), left_lo, left_hi); |
6002 | } |
6003 | } |
6004 | |
6005 | class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode { |
6006 | public: |
6007 | static const intptr_t kNumberOfArguments = 0; |
6008 | |
6009 | ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction, intptr_t try_index) |
6010 | : ThrowErrorSlowPathCode(instruction, |
6011 | kArgumentErrorUnboxedInt64RuntimeEntry, |
6012 | kNumberOfArguments, |
6013 | try_index) {} |
6014 | |
6015 | const char* name() override { return "uint32 shift" ; } |
6016 | |
6017 | void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override { |
6018 | PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation(); |
6019 | Register right_lo = right_pair->At(0).reg(); |
6020 | Register right_hi = right_pair->At(1).reg(); |
6021 | const Register out = instruction()->locs()->out(0).reg(); |
6022 | ASSERT(out == instruction()->locs()->in(0).reg()); |
6023 | |
6024 | compiler::Label throw_error; |
6025 | __ testl(right_hi, right_hi); |
6026 | __ j(NEGATIVE, &throw_error); |
6027 | |
6028 | __ xorl(out, out); |
6029 | __ jmp(exit_label()); |
6030 | |
6031 | __ Bind(&throw_error); |
6032 | |
6033 | // Can't pass unboxed int64 value directly to runtime call, as all |
6034 | // arguments are expected to be tagged (boxed). |
6035 | // The unboxed int64 argument is passed through a dedicated slot in Thread. |
6036 | // TODO(dartbug.com/33549): Clean this up when unboxed values |
6037 | // could be passed as arguments. |
6038 | __ movl(compiler::Address(THR, Thread::unboxed_int64_runtime_arg_offset()), |
6039 | right_lo); |
6040 | __ movl(compiler::Address( |
6041 | THR, Thread::unboxed_int64_runtime_arg_offset() + kWordSize), |
6042 | right_hi); |
6043 | } |
6044 | }; |
6045 | |
6046 | LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone, |
6047 | bool opt) const { |
6048 | const intptr_t kNumInputs = 2; |
6049 | const intptr_t kNumTemps = 0; |
6050 | LocationSummary* summary = new (zone) LocationSummary( |
6051 | zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); |
6052 | summary->set_in(0, Location::RequiresRegister()); |
6053 | if (RangeUtils::IsPositive(shift_range()) && |
6054 | right()->definition()->IsConstant()) { |
6055 | ConstantInstr* constant = right()->definition()->AsConstant(); |
6056 | summary->set_in(1, Location::Constant(constant)); |
6057 | } else { |
6058 | summary->set_in(1, Location::Pair(Location::RegisterLocation(ECX), |
6059 | Location::RequiresRegister())); |
6060 | } |
6061 | summary->set_out(0, Location::SameAsFirstInput()); |
6062 | return summary; |
6063 | } |
6064 | |
6065 | void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6066 | Register left = locs()->in(0).reg(); |
6067 | Register out = locs()->out(0).reg(); |
6068 | ASSERT(left == out); |
6069 | |
6070 | if (locs()->in(1).IsConstant()) { |
6071 | EmitShiftUint32ByConstant(compiler, op_kind(), left, |
6072 | locs()->in(1).constant()); |
6073 | } else { |
6074 | // Code for a variable shift amount (or constant that throws). |
6075 | ASSERT(locs()->in(1).AsPairLocation()->At(0).reg() == ECX); |
6076 | Register right_hi = locs()->in(1).AsPairLocation()->At(1).reg(); |
6077 | |
6078 | // Jump to a slow path if shift count is > 31 or negative. |
6079 | ShiftUint32OpSlowPath* slow_path = NULL; |
6080 | if (!IsShiftCountInRange(kUint32ShiftCountLimit)) { |
6081 | slow_path = |
6082 | new (Z) ShiftUint32OpSlowPath(this, compiler->CurrentTryIndex()); |
6083 | compiler->AddSlowPathCode(slow_path); |
6084 | |
6085 | __ testl(right_hi, right_hi); |
6086 | __ j(NOT_ZERO, slow_path->entry_label()); |
6087 | __ cmpl(ECX, compiler::Immediate(kUint32ShiftCountLimit)); |
6088 | __ j(ABOVE, slow_path->entry_label()); |
6089 | } |
6090 | |
6091 | EmitShiftUint32ByECX(compiler, op_kind(), left); |
6092 | |
6093 | if (slow_path != NULL) { |
6094 | __ Bind(slow_path->exit_label()); |
6095 | } |
6096 | } |
6097 | } |
6098 | |
6099 | LocationSummary* SpeculativeShiftUint32OpInstr::MakeLocationSummary( |
6100 | Zone* zone, |
6101 | bool opt) const { |
6102 | const intptr_t kNumInputs = 2; |
6103 | const intptr_t kNumTemps = 0; |
6104 | LocationSummary* summary = new (zone) |
6105 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6106 | summary->set_in(0, Location::RequiresRegister()); |
6107 | summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX)); |
6108 | summary->set_out(0, Location::SameAsFirstInput()); |
6109 | return summary; |
6110 | } |
6111 | |
6112 | void SpeculativeShiftUint32OpInstr::EmitNativeCode( |
6113 | FlowGraphCompiler* compiler) { |
6114 | Register left = locs()->in(0).reg(); |
6115 | Register out = locs()->out(0).reg(); |
6116 | ASSERT(left == out); |
6117 | |
6118 | if (locs()->in(1).IsConstant()) { |
6119 | EmitShiftUint32ByConstant(compiler, op_kind(), left, |
6120 | locs()->in(1).constant()); |
6121 | } else { |
6122 | ASSERT(locs()->in(1).reg() == ECX); |
6123 | __ SmiUntag(ECX); |
6124 | |
6125 | if (!IsShiftCountInRange(kUint32ShiftCountLimit)) { |
6126 | if (!IsShiftCountInRange()) { |
6127 | // Deoptimize if shift count is negative. |
6128 | ASSERT(CanDeoptimize()); |
6129 | compiler::Label* deopt = |
6130 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op); |
6131 | |
6132 | __ testl(ECX, ECX); |
6133 | __ j(LESS, deopt); |
6134 | } |
6135 | |
6136 | compiler::Label cont; |
6137 | __ cmpl(ECX, compiler::Immediate(kUint32ShiftCountLimit)); |
6138 | __ j(LESS_EQUAL, &cont); |
6139 | |
6140 | __ xorl(left, left); |
6141 | |
6142 | __ Bind(&cont); |
6143 | } |
6144 | |
6145 | EmitShiftUint32ByECX(compiler, op_kind(), left); |
6146 | } |
6147 | } |
6148 | |
6149 | LocationSummary* UnaryInt64OpInstr::MakeLocationSummary(Zone* zone, |
6150 | bool opt) const { |
6151 | const intptr_t kNumInputs = 1; |
6152 | const intptr_t kNumTemps = 0; |
6153 | LocationSummary* summary = new (zone) |
6154 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6155 | summary->set_in(0, Location::Pair(Location::RequiresRegister(), |
6156 | Location::RequiresRegister())); |
6157 | summary->set_out(0, Location::SameAsFirstInput()); |
6158 | return summary; |
6159 | } |
6160 | |
6161 | void UnaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6162 | PairLocation* left_pair = locs()->in(0).AsPairLocation(); |
6163 | Register left_lo = left_pair->At(0).reg(); |
6164 | Register left_hi = left_pair->At(1).reg(); |
6165 | PairLocation* out_pair = locs()->out(0).AsPairLocation(); |
6166 | Register out_lo = out_pair->At(0).reg(); |
6167 | Register out_hi = out_pair->At(1).reg(); |
6168 | ASSERT(out_lo == left_lo); |
6169 | ASSERT(out_hi == left_hi); |
6170 | switch (op_kind()) { |
6171 | case Token::kBIT_NOT: |
6172 | __ notl(left_lo); |
6173 | __ notl(left_hi); |
6174 | break; |
6175 | case Token::kNEGATE: |
6176 | __ negl(left_lo); |
6177 | __ adcl(left_hi, compiler::Immediate(0)); |
6178 | __ negl(left_hi); |
6179 | break; |
6180 | default: |
6181 | UNREACHABLE(); |
6182 | } |
6183 | } |
6184 | |
6185 | LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone, |
6186 | bool opt) const { |
6187 | const intptr_t kNumInputs = 1; |
6188 | const intptr_t kNumTemps = 0; |
6189 | LocationSummary* summary = new (zone) |
6190 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6191 | summary->set_in(0, Location::RequiresRegister()); |
6192 | summary->set_out(0, Location::SameAsFirstInput()); |
6193 | return summary; |
6194 | } |
6195 | |
6196 | void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6197 | Register out = locs()->out(0).reg(); |
6198 | ASSERT(locs()->in(0).reg() == out); |
6199 | |
6200 | ASSERT(op_kind() == Token::kBIT_NOT); |
6201 | |
6202 | __ notl(out); |
6203 | } |
6204 | |
6205 | LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone, |
6206 | bool opt) const { |
6207 | const intptr_t kNumInputs = 1; |
6208 | const intptr_t kNumTemps = 0; |
6209 | LocationSummary* summary = new (zone) |
6210 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6211 | |
6212 | if (from() == kUntagged || to() == kUntagged) { |
6213 | ASSERT((from() == kUntagged && to() == kUnboxedInt32) || |
6214 | (from() == kUntagged && to() == kUnboxedUint32) || |
6215 | (from() == kUnboxedInt32 && to() == kUntagged) || |
6216 | (from() == kUnboxedUint32 && to() == kUntagged)); |
6217 | ASSERT(!CanDeoptimize()); |
6218 | summary->set_in(0, Location::RequiresRegister()); |
6219 | summary->set_out(0, Location::SameAsFirstInput()); |
6220 | } else if ((from() == kUnboxedInt32 || from() == kUnboxedUint32) && |
6221 | (to() == kUnboxedInt32 || to() == kUnboxedUint32)) { |
6222 | summary->set_in(0, Location::RequiresRegister()); |
6223 | summary->set_out(0, Location::SameAsFirstInput()); |
6224 | } else if (from() == kUnboxedInt64) { |
6225 | summary->set_in( |
6226 | 0, Location::Pair(CanDeoptimize() ? Location::WritableRegister() |
6227 | : Location::RequiresRegister(), |
6228 | Location::RequiresRegister())); |
6229 | summary->set_out(0, Location::RequiresRegister()); |
6230 | } else if (from() == kUnboxedUint32) { |
6231 | summary->set_in(0, Location::RequiresRegister()); |
6232 | summary->set_out(0, Location::Pair(Location::RequiresRegister(), |
6233 | Location::RequiresRegister())); |
6234 | } else if (from() == kUnboxedInt32) { |
6235 | summary->set_in(0, Location::RegisterLocation(EAX)); |
6236 | summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX), |
6237 | Location::RegisterLocation(EDX))); |
6238 | } |
6239 | |
6240 | return summary; |
6241 | } |
6242 | |
6243 | void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6244 | const bool is_nop_conversion = |
6245 | (from() == kUntagged && to() == kUnboxedInt32) || |
6246 | (from() == kUntagged && to() == kUnboxedUint32) || |
6247 | (from() == kUnboxedInt32 && to() == kUntagged) || |
6248 | (from() == kUnboxedUint32 && to() == kUntagged); |
6249 | if (is_nop_conversion) { |
6250 | ASSERT(locs()->in(0).reg() == locs()->out(0).reg()); |
6251 | return; |
6252 | } |
6253 | |
6254 | if (from() == kUnboxedInt32 && to() == kUnboxedUint32) { |
6255 | // Representations are bitwise equivalent. |
6256 | ASSERT(locs()->out(0).reg() == locs()->in(0).reg()); |
6257 | } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) { |
6258 | // Representations are bitwise equivalent. |
6259 | ASSERT(locs()->out(0).reg() == locs()->in(0).reg()); |
6260 | if (CanDeoptimize()) { |
6261 | compiler::Label* deopt = |
6262 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger); |
6263 | __ testl(locs()->out(0).reg(), locs()->out(0).reg()); |
6264 | __ j(NEGATIVE, deopt); |
6265 | } |
6266 | } else if (from() == kUnboxedInt64) { |
6267 | // TODO(vegorov) kUnboxedInt64 -> kInt32 conversion is currently usually |
6268 | // dominated by a CheckSmi(BoxInt64(val)) which is an artifact of ordering |
6269 | // of optimization passes and the way we check smi-ness of values. |
6270 | // Optimize it away. |
6271 | ASSERT(to() == kUnboxedInt32 || to() == kUnboxedUint32); |
6272 | PairLocation* in_pair = locs()->in(0).AsPairLocation(); |
6273 | Register in_lo = in_pair->At(0).reg(); |
6274 | Register in_hi = in_pair->At(1).reg(); |
6275 | Register out = locs()->out(0).reg(); |
6276 | // Copy low word. |
6277 | __ movl(out, in_lo); |
6278 | if (CanDeoptimize()) { |
6279 | compiler::Label* deopt = |
6280 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger); |
6281 | __ sarl(in_lo, compiler::Immediate(31)); |
6282 | __ cmpl(in_lo, in_hi); |
6283 | __ j(NOT_EQUAL, deopt); |
6284 | } |
6285 | } else if (from() == kUnboxedUint32) { |
6286 | ASSERT(to() == kUnboxedInt64); |
6287 | Register in = locs()->in(0).reg(); |
6288 | PairLocation* out_pair = locs()->out(0).AsPairLocation(); |
6289 | Register out_lo = out_pair->At(0).reg(); |
6290 | Register out_hi = out_pair->At(1).reg(); |
6291 | // Copy low word. |
6292 | __ movl(out_lo, in); |
6293 | // Zero upper word. |
6294 | __ xorl(out_hi, out_hi); |
6295 | } else if (from() == kUnboxedInt32) { |
6296 | ASSERT(to() == kUnboxedInt64); |
6297 | PairLocation* out_pair = locs()->out(0).AsPairLocation(); |
6298 | Register out_lo = out_pair->At(0).reg(); |
6299 | Register out_hi = out_pair->At(1).reg(); |
6300 | ASSERT(locs()->in(0).reg() == EAX); |
6301 | ASSERT(out_lo == EAX && out_hi == EDX); |
6302 | __ cdq(); |
6303 | } else { |
6304 | UNREACHABLE(); |
6305 | } |
6306 | } |
6307 | |
6308 | LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
6309 | return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall); |
6310 | } |
6311 | |
6312 | void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6313 | __ Stop(message()); |
6314 | } |
6315 | |
6316 | void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6317 | BlockEntryInstr* entry = normal_entry(); |
6318 | if (entry != nullptr) { |
6319 | if (!compiler->CanFallThroughTo(entry)) { |
6320 | FATAL("Checked function entry must have no offset" ); |
6321 | } |
6322 | } else { |
6323 | entry = osr_entry(); |
6324 | if (!compiler->CanFallThroughTo(entry)) { |
6325 | __ jmp(compiler->GetJumpLabel(entry)); |
6326 | } |
6327 | } |
6328 | } |
6329 | |
6330 | LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
6331 | return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall); |
6332 | } |
6333 | |
6334 | void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6335 | if (!compiler->is_optimizing()) { |
6336 | if (FLAG_reorder_basic_blocks) { |
6337 | compiler->EmitEdgeCounter(block()->preorder_number()); |
6338 | } |
6339 | // Add a deoptimization descriptor for deoptimizing instructions that |
6340 | // may be inserted before this instruction. |
6341 | compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(), |
6342 | TokenPosition::kNoSource); |
6343 | } |
6344 | if (HasParallelMove()) { |
6345 | compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); |
6346 | } |
6347 | |
6348 | // We can fall through if the successor is the next block in the list. |
6349 | // Otherwise, we need a jump. |
6350 | if (!compiler->CanFallThroughTo(successor())) { |
6351 | __ jmp(compiler->GetJumpLabel(successor())); |
6352 | } |
6353 | } |
6354 | |
6355 | LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone, |
6356 | bool opt) const { |
6357 | const intptr_t kNumInputs = 1; |
6358 | const intptr_t kNumTemps = 1; |
6359 | |
6360 | LocationSummary* summary = new (zone) |
6361 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6362 | |
6363 | summary->set_in(0, Location::RequiresRegister()); |
6364 | summary->set_temp(0, Location::RequiresRegister()); |
6365 | |
6366 | return summary; |
6367 | } |
6368 | |
6369 | void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6370 | Register target_reg = locs()->temp_slot(0)->reg(); |
6371 | |
6372 | // Load code object from frame. |
6373 | __ movl(target_reg, |
6374 | compiler::Address( |
6375 | EBP, compiler::target::frame_layout.code_from_fp * kWordSize)); |
6376 | // Load instructions object (active_instructions and Code::entry_point() may |
6377 | // not point to this instruction object any more; see Code::DisableDartCode). |
6378 | __ movl(target_reg, compiler::FieldAddress( |
6379 | target_reg, Code::saved_instructions_offset())); |
6380 | __ addl(target_reg, |
6381 | compiler::Immediate(Instructions::HeaderSize() - kHeapObjectTag)); |
6382 | |
6383 | // Add the offset. |
6384 | Register offset_reg = locs()->in(0).reg(); |
6385 | if (offset()->definition()->representation() == kTagged) { |
6386 | __ SmiUntag(offset_reg); |
6387 | } |
6388 | __ addl(target_reg, offset_reg); |
6389 | |
6390 | // Jump to the absolute address. |
6391 | __ jmp(target_reg); |
6392 | } |
6393 | |
6394 | LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone, |
6395 | bool opt) const { |
6396 | const intptr_t kNumInputs = 2; |
6397 | const intptr_t kNumTemps = 0; |
6398 | if (needs_number_check()) { |
6399 | LocationSummary* locs = new (zone) |
6400 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
6401 | locs->set_in(0, Location::RegisterLocation(EAX)); |
6402 | locs->set_in(1, Location::RegisterLocation(ECX)); |
6403 | locs->set_out(0, Location::RegisterLocation(EAX)); |
6404 | return locs; |
6405 | } |
6406 | LocationSummary* locs = new (zone) |
6407 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6408 | locs->set_in(0, LocationRegisterOrConstant(left())); |
6409 | // Only one of the inputs can be a constant. Choose register if the first one |
6410 | // is a constant. |
6411 | locs->set_in(1, locs->in(0).IsConstant() |
6412 | ? Location::RequiresRegister() |
6413 | : LocationRegisterOrConstant(right())); |
6414 | locs->set_out(0, Location::RequiresRegister()); |
6415 | return locs; |
6416 | } |
6417 | |
6418 | Condition StrictCompareInstr::EmitComparisonCodeRegConstant( |
6419 | FlowGraphCompiler* compiler, |
6420 | BranchLabels labels, |
6421 | Register reg, |
6422 | const Object& obj) { |
6423 | return compiler->EmitEqualityRegConstCompare(reg, obj, needs_number_check(), |
6424 | token_pos(), deopt_id()); |
6425 | } |
6426 | |
6427 | // Detect pattern when one value is zero and another is a power of 2. |
6428 | static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) { |
6429 | return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) || |
6430 | (Utils::IsPowerOfTwo(v2) && (v1 == 0)); |
6431 | } |
6432 | |
6433 | LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone, |
6434 | bool opt) const { |
6435 | comparison()->InitializeLocationSummary(zone, opt); |
6436 | // TODO(dartbug.com/30953): support byte register constraints in the |
6437 | // register allocator. |
6438 | comparison()->locs()->set_out(0, Location::RegisterLocation(EDX)); |
6439 | return comparison()->locs(); |
6440 | } |
6441 | |
6442 | void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6443 | ASSERT(locs()->out(0).reg() == EDX); |
6444 | |
6445 | // Clear upper part of the out register. We are going to use setcc on it |
6446 | // which is a byte move. |
6447 | __ xorl(EDX, EDX); |
6448 | |
6449 | // Emit comparison code. This must not overwrite the result register. |
6450 | // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using |
6451 | // the labels or returning an invalid condition. |
6452 | BranchLabels labels = {NULL, NULL, NULL}; |
6453 | Condition true_condition = comparison()->EmitComparisonCode(compiler, labels); |
6454 | ASSERT(true_condition != kInvalidCondition); |
6455 | |
6456 | const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_); |
6457 | |
6458 | intptr_t true_value = if_true_; |
6459 | intptr_t false_value = if_false_; |
6460 | |
6461 | if (is_power_of_two_kind) { |
6462 | if (true_value == 0) { |
6463 | // We need to have zero in EDX on true_condition. |
6464 | true_condition = InvertCondition(true_condition); |
6465 | } |
6466 | } else { |
6467 | if (true_value == 0) { |
6468 | // Swap values so that false_value is zero. |
6469 | intptr_t temp = true_value; |
6470 | true_value = false_value; |
6471 | false_value = temp; |
6472 | } else { |
6473 | true_condition = InvertCondition(true_condition); |
6474 | } |
6475 | } |
6476 | |
6477 | __ setcc(true_condition, DL); |
6478 | |
6479 | if (is_power_of_two_kind) { |
6480 | const intptr_t shift = |
6481 | Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value)); |
6482 | __ shll(EDX, compiler::Immediate(shift + kSmiTagSize)); |
6483 | } else { |
6484 | __ decl(EDX); |
6485 | __ andl(EDX, compiler::Immediate(Smi::RawValue(true_value) - |
6486 | Smi::RawValue(false_value))); |
6487 | if (false_value != 0) { |
6488 | __ addl(EDX, compiler::Immediate(Smi::RawValue(false_value))); |
6489 | } |
6490 | } |
6491 | } |
6492 | |
6493 | LocationSummary* DispatchTableCallInstr::MakeLocationSummary(Zone* zone, |
6494 | bool opt) const { |
6495 | // Only generated with precompilation. |
6496 | UNREACHABLE(); |
6497 | return NULL; |
6498 | } |
6499 | |
6500 | LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone, |
6501 | bool opt) const { |
6502 | const intptr_t kNumInputs = 1; |
6503 | const intptr_t kNumTemps = 0; |
6504 | LocationSummary* summary = new (zone) |
6505 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
6506 | summary->set_in(0, Location::RegisterLocation(EAX)); // Function. |
6507 | summary->set_out(0, Location::RegisterLocation(EAX)); |
6508 | return summary; |
6509 | } |
6510 | |
6511 | void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6512 | // Load arguments descriptor. |
6513 | const intptr_t argument_count = ArgumentCount(); // Includes type args. |
6514 | const Array& arguments_descriptor = |
6515 | Array::ZoneHandle(Z, GetArgumentsDescriptor()); |
6516 | __ LoadObject(EDX, arguments_descriptor); |
6517 | |
6518 | // EBX: Code (compiled code or lazy compile stub). |
6519 | ASSERT(locs()->in(0).reg() == EAX); |
6520 | __ movl(EBX, compiler::FieldAddress( |
6521 | EAX, Function::entry_point_offset(entry_kind()))); |
6522 | |
6523 | // EAX: Function. |
6524 | // EDX: Arguments descriptor array. |
6525 | // ECX: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value). |
6526 | __ xorl(ECX, ECX); |
6527 | __ call(EBX); |
6528 | compiler->EmitCallsiteMetadata(token_pos(), deopt_id(), |
6529 | PcDescriptorsLayout::kOther, locs()); |
6530 | __ Drop(argument_count); |
6531 | } |
6532 | |
6533 | LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone, |
6534 | bool opt) const { |
6535 | return LocationSummary::Make(zone, 1, |
6536 | value()->Type()->ToCid() == kBoolCid |
6537 | ? Location::SameAsFirstInput() |
6538 | : Location::RequiresRegister(), |
6539 | LocationSummary::kNoCall); |
6540 | } |
6541 | |
6542 | void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6543 | Register input = locs()->in(0).reg(); |
6544 | Register result = locs()->out(0).reg(); |
6545 | |
6546 | if (value()->Type()->ToCid() == kBoolCid) { |
6547 | ASSERT(input == result); |
6548 | __ xorl(result, compiler::Immediate( |
6549 | compiler::target::ObjectAlignment::kBoolValueMask)); |
6550 | } else { |
6551 | ASSERT(input != result); |
6552 | compiler::Label done; |
6553 | __ LoadObject(result, Bool::True()); |
6554 | __ CompareRegisters(result, input); |
6555 | __ j(NOT_EQUAL, &done, compiler::Assembler::kNearJump); |
6556 | __ LoadObject(result, Bool::False()); |
6557 | __ Bind(&done); |
6558 | } |
6559 | } |
6560 | |
6561 | LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone, |
6562 | bool opt) const { |
6563 | const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0; |
6564 | const intptr_t kNumTemps = 0; |
6565 | LocationSummary* locs = new (zone) |
6566 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
6567 | if (type_arguments() != nullptr) { |
6568 | locs->set_in(0, |
6569 | Location::RegisterLocation(kAllocationStubTypeArgumentsReg)); |
6570 | } |
6571 | locs->set_out(0, Location::RegisterLocation(EAX)); |
6572 | return locs; |
6573 | } |
6574 | |
6575 | void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6576 | const Code& stub = Code::ZoneHandle( |
6577 | compiler->zone(), StubCode::GetAllocationStubForClass(cls())); |
6578 | compiler->GenerateStubCall(token_pos(), stub, PcDescriptorsLayout::kOther, |
6579 | locs()); |
6580 | } |
6581 | |
6582 | void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6583 | #ifdef PRODUCT |
6584 | UNREACHABLE(); |
6585 | #else |
6586 | ASSERT(!compiler->is_optimizing()); |
6587 | __ Call(StubCode::DebugStepCheck()); |
6588 | compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, token_pos()); |
6589 | compiler->RecordSafepoint(locs()); |
6590 | #endif |
6591 | } |
6592 | |
6593 | } // namespace dart |
6594 | |
6595 | #undef __ |
6596 | |
6597 | #endif // defined(TARGET_ARCH_IA32) |
6598 | |