1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4//
5// The intrinsic code below is executed before a method has built its frame.
6// The return address is on the stack and the arguments below it.
7// Registers EDX (arguments descriptor) and ECX (function) must be preserved.
8// Each intrinsification method returns true if the corresponding
9// Dart method was intrinsified.
10
11#include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32.
12#if defined(TARGET_ARCH_IA32)
13
14#define SHOULD_NOT_INCLUDE_RUNTIME
15
16#include "vm/class_id.h"
17#include "vm/compiler/asm_intrinsifier.h"
18#include "vm/compiler/assembler/assembler.h"
19
20namespace dart {
21namespace compiler {
22
23// When entering intrinsics code:
24// ECX: IC Data
25// EDX: Arguments descriptor
26// TOS: Return address
27// The ECX, EDX registers can be destroyed only if there is no slow-path, i.e.
28// if the intrinsified method always executes a return.
29// The EBP register should not be modified, because it is used by the profiler.
30// The THR register (see constants_ia32.h) must be preserved.
31
32#define __ assembler->
33
34intptr_t AsmIntrinsifier::ParameterSlotFromSp() {
35 return 0;
36}
37
38void AsmIntrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
39 COMPILE_ASSERT(CALLEE_SAVED_TEMP != ARGS_DESC_REG);
40
41 assembler->Comment("IntrinsicCallPrologue");
42 assembler->movl(CALLEE_SAVED_TEMP, ARGS_DESC_REG);
43}
44
45void AsmIntrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
46 assembler->Comment("IntrinsicCallEpilogue");
47 assembler->movl(ARGS_DESC_REG, CALLEE_SAVED_TEMP);
48}
49
50// Allocate a GrowableObjectArray:: using the backing array specified.
51// On stack: type argument (+2), data (+1), return-address (+0).
52void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
53 Label* normal_ir_body) {
54 // This snippet of inlined code uses the following registers:
55 // EAX, EBX
56 // and the newly allocated object is returned in EAX.
57 const intptr_t kTypeArgumentsOffset = 2 * target::kWordSize;
58
59 const intptr_t kArrayOffset = 1 * target::kWordSize;
60
61 // Try allocating in new space.
62 const Class& cls = GrowableObjectArrayClass();
63 __ TryAllocate(cls, normal_ir_body, Assembler::kNearJump, EAX, EBX);
64
65 // Store backing array object in growable array object.
66 __ movl(EBX, Address(ESP, kArrayOffset)); // data argument.
67 // EAX is new, no barrier needed.
68 __ StoreIntoObjectNoBarrier(
69 EAX, FieldAddress(EAX, target::GrowableObjectArray::data_offset()), EBX);
70
71 // EAX: new growable array object start as a tagged pointer.
72 // Store the type argument field in the growable array object.
73 __ movl(EBX, Address(ESP, kTypeArgumentsOffset)); // type argument.
74 __ StoreIntoObjectNoBarrier(
75 EAX,
76 FieldAddress(EAX, target::GrowableObjectArray::type_arguments_offset()),
77 EBX);
78
79 __ ZeroInitSmiField(
80 FieldAddress(EAX, target::GrowableObjectArray::length_offset()));
81 __ ret(); // returns the newly allocated object in EAX.
82
83 __ Bind(normal_ir_body);
84}
85
86#define TYPED_ARRAY_ALLOCATION(cid, max_len, scale_factor) \
87 const intptr_t kArrayLengthStackOffset = 1 * target::kWordSize; \
88 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, EDI, normal_ir_body, false)); \
89 __ movl(EDI, Address(ESP, kArrayLengthStackOffset)); /* Array length. */ \
90 /* Check that length is a positive Smi. */ \
91 /* EDI: requested array length argument. */ \
92 __ testl(EDI, Immediate(kSmiTagMask)); \
93 __ j(NOT_ZERO, normal_ir_body); \
94 __ cmpl(EDI, Immediate(0)); \
95 __ j(LESS, normal_ir_body); \
96 __ SmiUntag(EDI); \
97 /* Check for maximum allowed length. */ \
98 /* EDI: untagged array length. */ \
99 __ cmpl(EDI, Immediate(max_len)); \
100 __ j(GREATER, normal_ir_body); \
101 /* Special case for scaling by 16. */ \
102 if (scale_factor == TIMES_16) { \
103 /* double length of array. */ \
104 __ addl(EDI, EDI); \
105 /* only scale by 8. */ \
106 scale_factor = TIMES_8; \
107 } \
108 const intptr_t fixed_size_plus_alignment_padding = \
109 target::TypedData::InstanceSize() + \
110 target::ObjectAlignment::kObjectAlignment - 1; \
111 __ leal(EDI, Address(EDI, scale_factor, fixed_size_plus_alignment_padding)); \
112 __ andl(EDI, Immediate(-target::ObjectAlignment::kObjectAlignment)); \
113 __ movl(EAX, Address(THR, target::Thread::top_offset())); \
114 __ movl(EBX, EAX); \
115 \
116 /* EDI: allocation size. */ \
117 __ addl(EBX, EDI); \
118 __ j(CARRY, normal_ir_body); \
119 \
120 /* Check if the allocation fits into the remaining space. */ \
121 /* EAX: potential new object start. */ \
122 /* EBX: potential next object start. */ \
123 /* EDI: allocation size. */ \
124 __ cmpl(EBX, Address(THR, target::Thread::end_offset())); \
125 __ j(ABOVE_EQUAL, normal_ir_body); \
126 \
127 /* Successfully allocated the object(s), now update top to point to */ \
128 /* next object start and initialize the object. */ \
129 __ movl(Address(THR, target::Thread::top_offset()), EBX); \
130 __ addl(EAX, Immediate(kHeapObjectTag)); \
131 \
132 /* Initialize the tags. */ \
133 /* EAX: new object start as a tagged pointer. */ \
134 /* EBX: new object end address. */ \
135 /* EDI: allocation size. */ \
136 { \
137 Label size_tag_overflow, done; \
138 __ cmpl(EDI, Immediate(target::ObjectLayout::kSizeTagMaxSizeTag)); \
139 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump); \
140 __ shll(EDI, Immediate(target::ObjectLayout::kTagBitsSizeTagPos - \
141 target::ObjectAlignment::kObjectAlignmentLog2)); \
142 __ jmp(&done, Assembler::kNearJump); \
143 \
144 __ Bind(&size_tag_overflow); \
145 __ movl(EDI, Immediate(0)); \
146 __ Bind(&done); \
147 \
148 /* Get the class index and insert it into the tags. */ \
149 uint32_t tags = \
150 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0); \
151 __ orl(EDI, Immediate(tags)); \
152 __ movl(FieldAddress(EAX, target::Object::tags_offset()), \
153 EDI); /* Tags. */ \
154 } \
155 /* Set the length field. */ \
156 /* EAX: new object start as a tagged pointer. */ \
157 /* EBX: new object end address. */ \
158 __ movl(EDI, Address(ESP, kArrayLengthStackOffset)); /* Array length. */ \
159 __ StoreIntoObjectNoBarrier( \
160 EAX, FieldAddress(EAX, target::TypedDataBase::length_offset()), EDI); \
161 /* Initialize all array elements to 0. */ \
162 /* EAX: new object start as a tagged pointer. */ \
163 /* EBX: new object end address. */ \
164 /* EDI: iterator which initially points to the start of the variable */ \
165 /* ECX: scratch register. */ \
166 /* data area to be initialized. */ \
167 __ xorl(ECX, ECX); /* Zero. */ \
168 __ leal(EDI, FieldAddress(EAX, target::TypedData::InstanceSize())); \
169 __ StoreInternalPointer( \
170 EAX, FieldAddress(EAX, target::TypedDataBase::data_field_offset()), \
171 EDI); \
172 Label done, init_loop; \
173 __ Bind(&init_loop); \
174 __ cmpl(EDI, EBX); \
175 __ j(ABOVE_EQUAL, &done, Assembler::kNearJump); \
176 __ movl(Address(EDI, 0), ECX); \
177 __ addl(EDI, Immediate(target::kWordSize)); \
178 __ jmp(&init_loop, Assembler::kNearJump); \
179 __ Bind(&done); \
180 \
181 __ ret(); \
182 __ Bind(normal_ir_body);
183
184static ScaleFactor GetScaleFactor(intptr_t size) {
185 switch (size) {
186 case 1:
187 return TIMES_1;
188 case 2:
189 return TIMES_2;
190 case 4:
191 return TIMES_4;
192 case 8:
193 return TIMES_8;
194 case 16:
195 return TIMES_16;
196 }
197 UNREACHABLE();
198 return static_cast<ScaleFactor>(0);
199}
200
201#define TYPED_DATA_ALLOCATOR(clazz) \
202 void AsmIntrinsifier::TypedData_##clazz##_factory(Assembler* assembler, \
203 Label* normal_ir_body) { \
204 intptr_t size = TypedDataElementSizeInBytes(kTypedData##clazz##Cid); \
205 intptr_t max_len = TypedDataMaxNewSpaceElements(kTypedData##clazz##Cid); \
206 ScaleFactor scale = GetScaleFactor(size); \
207 TYPED_ARRAY_ALLOCATION(kTypedData##clazz##Cid, max_len, scale); \
208 }
209CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR)
210#undef TYPED_DATA_ALLOCATOR
211
212// Tests if two top most arguments are smis, jumps to label not_smi if not.
213// Topmost argument is in EAX.
214static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
215 __ movl(EAX, Address(ESP, +1 * target::kWordSize));
216 __ movl(EBX, Address(ESP, +2 * target::kWordSize));
217 __ orl(EBX, EAX);
218 __ testl(EBX, Immediate(kSmiTagMask));
219 __ j(NOT_ZERO, not_smi, Assembler::kNearJump);
220}
221
222void AsmIntrinsifier::Integer_addFromInteger(Assembler* assembler,
223 Label* normal_ir_body) {
224 TestBothArgumentsSmis(assembler, normal_ir_body);
225 __ addl(EAX, Address(ESP, +2 * target::kWordSize));
226 __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
227 // Result is in EAX.
228 __ ret();
229 __ Bind(normal_ir_body);
230}
231
232void AsmIntrinsifier::Integer_add(Assembler* assembler, Label* normal_ir_body) {
233 Integer_addFromInteger(assembler, normal_ir_body);
234}
235
236void AsmIntrinsifier::Integer_subFromInteger(Assembler* assembler,
237 Label* normal_ir_body) {
238 TestBothArgumentsSmis(assembler, normal_ir_body);
239 __ subl(EAX, Address(ESP, +2 * target::kWordSize));
240 __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
241 // Result is in EAX.
242 __ ret();
243 __ Bind(normal_ir_body);
244}
245
246void AsmIntrinsifier::Integer_sub(Assembler* assembler, Label* normal_ir_body) {
247 TestBothArgumentsSmis(assembler, normal_ir_body);
248 __ movl(EBX, EAX);
249 __ movl(EAX, Address(ESP, +2 * target::kWordSize));
250 __ subl(EAX, EBX);
251 __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
252 // Result is in EAX.
253 __ ret();
254 __ Bind(normal_ir_body);
255}
256
257void AsmIntrinsifier::Integer_mulFromInteger(Assembler* assembler,
258 Label* normal_ir_body) {
259 TestBothArgumentsSmis(assembler, normal_ir_body);
260 ASSERT(kSmiTag == 0); // Adjust code below if not the case.
261 __ SmiUntag(EAX);
262 __ imull(EAX, Address(ESP, +2 * target::kWordSize));
263 __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
264 // Result is in EAX.
265 __ ret();
266 __ Bind(normal_ir_body);
267}
268
269void AsmIntrinsifier::Integer_mul(Assembler* assembler, Label* normal_ir_body) {
270 Integer_mulFromInteger(assembler, normal_ir_body);
271}
272
273// Optimizations:
274// - result is 0 if:
275// - left is 0
276// - left equals right
277// - result is left if
278// - left > 0 && left < right
279// EAX: Tagged left (dividend).
280// EBX: Tagged right (divisor).
281// Returns:
282// EDX: Untagged fallthrough result (remainder to be adjusted), or
283// EAX: Tagged return result (remainder).
284static void EmitRemainderOperation(Assembler* assembler) {
285 Label return_zero, modulo;
286 // Check for quick zero results.
287 __ cmpl(EAX, Immediate(0));
288 __ j(EQUAL, &return_zero, Assembler::kNearJump);
289 __ cmpl(EAX, EBX);
290 __ j(EQUAL, &return_zero, Assembler::kNearJump);
291
292 // Check if result equals left.
293 __ cmpl(EAX, Immediate(0));
294 __ j(LESS, &modulo, Assembler::kNearJump);
295 // left is positive.
296 __ cmpl(EAX, EBX);
297 __ j(GREATER, &modulo, Assembler::kNearJump);
298 // left is less than right, result is left (EAX).
299 __ ret();
300
301 __ Bind(&return_zero);
302 __ xorl(EAX, EAX);
303 __ ret();
304
305 __ Bind(&modulo);
306 __ SmiUntag(EBX);
307 __ SmiUntag(EAX);
308 __ cdq();
309 __ idivl(EBX);
310}
311
312// Implementation:
313// res = left % right;
314// if (res < 0) {
315// if (right < 0) {
316// res = res - right;
317// } else {
318// res = res + right;
319// }
320// }
321void AsmIntrinsifier::Integer_moduloFromInteger(Assembler* assembler,
322 Label* normal_ir_body) {
323 Label subtract;
324 TestBothArgumentsSmis(assembler, normal_ir_body);
325 __ movl(EBX, Address(ESP, +2 * target::kWordSize));
326 // EAX: Tagged left (dividend).
327 // EBX: Tagged right (divisor).
328 // Check if modulo by zero -> exception thrown in main function.
329 __ cmpl(EBX, Immediate(0));
330 __ j(EQUAL, normal_ir_body, Assembler::kNearJump);
331 EmitRemainderOperation(assembler);
332 // Untagged remainder result in EDX.
333 Label done;
334 __ movl(EAX, EDX);
335 __ cmpl(EAX, Immediate(0));
336 __ j(GREATER_EQUAL, &done, Assembler::kNearJump);
337 // Result is negative, adjust it.
338 __ cmpl(EBX, Immediate(0));
339 __ j(LESS, &subtract, Assembler::kNearJump);
340 __ addl(EAX, EBX);
341 __ SmiTag(EAX);
342 __ ret();
343
344 __ Bind(&subtract);
345 __ subl(EAX, EBX);
346
347 __ Bind(&done);
348 // The remainder of two smis is always a smi, no overflow check needed.
349 __ SmiTag(EAX);
350 __ ret();
351
352 __ Bind(normal_ir_body);
353}
354
355void AsmIntrinsifier::Integer_truncDivide(Assembler* assembler,
356 Label* normal_ir_body) {
357 TestBothArgumentsSmis(assembler, normal_ir_body);
358 // EAX: right argument (divisor)
359 __ cmpl(EAX, Immediate(0));
360 __ j(EQUAL, normal_ir_body, Assembler::kNearJump);
361 __ movl(EBX, EAX);
362 __ SmiUntag(EBX);
363 __ movl(EAX,
364 Address(ESP, +2 * target::kWordSize)); // Left argument (dividend).
365 __ SmiUntag(EAX);
366 __ pushl(EDX); // Preserve EDX in case of 'fall_through'.
367 __ cdq();
368 __ idivl(EBX);
369 __ popl(EDX);
370 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
371 // cannot tag the result.
372 __ cmpl(EAX, Immediate(0x40000000));
373 __ j(EQUAL, normal_ir_body);
374 __ SmiTag(EAX);
375 __ ret();
376 __ Bind(normal_ir_body);
377}
378
379void AsmIntrinsifier::Integer_negate(Assembler* assembler,
380 Label* normal_ir_body) {
381 __ movl(EAX, Address(ESP, +1 * target::kWordSize));
382 __ testl(EAX, Immediate(kSmiTagMask));
383 __ j(NOT_ZERO, normal_ir_body, Assembler::kNearJump); // Non-smi value.
384 __ negl(EAX);
385 __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
386 // Result is in EAX.
387 __ ret();
388 __ Bind(normal_ir_body);
389}
390
391void AsmIntrinsifier::Integer_bitAndFromInteger(Assembler* assembler,
392 Label* normal_ir_body) {
393 TestBothArgumentsSmis(assembler, normal_ir_body);
394 __ movl(EBX, Address(ESP, +2 * target::kWordSize));
395 __ andl(EAX, EBX);
396 // Result is in EAX.
397 __ ret();
398 __ Bind(normal_ir_body);
399}
400
401void AsmIntrinsifier::Integer_bitAnd(Assembler* assembler,
402 Label* normal_ir_body) {
403 Integer_bitAndFromInteger(assembler, normal_ir_body);
404}
405
406void AsmIntrinsifier::Integer_bitOrFromInteger(Assembler* assembler,
407 Label* normal_ir_body) {
408 TestBothArgumentsSmis(assembler, normal_ir_body);
409 __ movl(EBX, Address(ESP, +2 * target::kWordSize));
410 __ orl(EAX, EBX);
411 // Result is in EAX.
412 __ ret();
413 __ Bind(normal_ir_body);
414}
415
416void AsmIntrinsifier::Integer_bitOr(Assembler* assembler,
417 Label* normal_ir_body) {
418 Integer_bitOrFromInteger(assembler, normal_ir_body);
419}
420
421void AsmIntrinsifier::Integer_bitXorFromInteger(Assembler* assembler,
422 Label* normal_ir_body) {
423 TestBothArgumentsSmis(assembler, normal_ir_body);
424 __ movl(EBX, Address(ESP, +2 * target::kWordSize));
425 __ xorl(EAX, EBX);
426 // Result is in EAX.
427 __ ret();
428 __ Bind(normal_ir_body);
429}
430
431void AsmIntrinsifier::Integer_bitXor(Assembler* assembler,
432 Label* normal_ir_body) {
433 Integer_bitXorFromInteger(assembler, normal_ir_body);
434}
435
436void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
437 ASSERT(kSmiTagShift == 1);
438 ASSERT(kSmiTag == 0);
439 Label overflow;
440 TestBothArgumentsSmis(assembler, normal_ir_body);
441 // Shift value is in EAX. Compare with tagged Smi.
442 __ cmpl(EAX, Immediate(target::ToRawSmi(target::kSmiBits)));
443 __ j(ABOVE_EQUAL, normal_ir_body, Assembler::kNearJump);
444
445 __ SmiUntag(EAX);
446 __ movl(ECX, EAX); // Shift amount must be in ECX.
447 __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // Value.
448
449 // Overflow test - all the shifted-out bits must be same as the sign bit.
450 __ movl(EBX, EAX);
451 __ shll(EAX, ECX);
452 __ sarl(EAX, ECX);
453 __ cmpl(EAX, EBX);
454 __ j(NOT_EQUAL, &overflow, Assembler::kNearJump);
455
456 __ shll(EAX, ECX); // Shift for result now we know there is no overflow.
457
458 // EAX is a correctly tagged Smi.
459 __ ret();
460
461 __ Bind(&overflow);
462 // Arguments are Smi but the shift produced an overflow to Mint.
463 __ cmpl(EBX, Immediate(0));
464 // TODO(srdjan): Implement negative values, for now fall through.
465 __ j(LESS, normal_ir_body, Assembler::kNearJump);
466 __ SmiUntag(EBX);
467 __ movl(EAX, EBX);
468 __ shll(EBX, ECX);
469 __ xorl(EDI, EDI);
470 __ shldl(EDI, EAX, ECX);
471 // Result in EDI (high) and EBX (low).
472 const Class& mint_class = MintClass();
473 __ TryAllocate(mint_class, normal_ir_body, Assembler::kNearJump,
474 EAX, // Result register.
475 ECX); // temp
476 // EBX and EDI are not objects but integer values.
477 __ movl(FieldAddress(EAX, target::Mint::value_offset()), EBX);
478 __ movl(FieldAddress(EAX, target::Mint::value_offset() + target::kWordSize),
479 EDI);
480 __ ret();
481 __ Bind(normal_ir_body);
482}
483
484static void Push64SmiOrMint(Assembler* assembler,
485 Register reg,
486 Register tmp,
487 Label* not_smi_or_mint) {
488 Label not_smi, done;
489 __ testl(reg, Immediate(kSmiTagMask));
490 __ j(NOT_ZERO, &not_smi, Assembler::kNearJump);
491 __ SmiUntag(reg);
492 // Sign extend to 64 bit
493 __ movl(tmp, reg);
494 __ sarl(tmp, Immediate(31));
495 __ pushl(tmp);
496 __ pushl(reg);
497 __ jmp(&done);
498 __ Bind(&not_smi);
499 __ CompareClassId(reg, kMintCid, tmp);
500 __ j(NOT_EQUAL, not_smi_or_mint);
501 // Mint.
502 __ pushl(FieldAddress(reg, target::Mint::value_offset() + target::kWordSize));
503 __ pushl(FieldAddress(reg, target::Mint::value_offset()));
504 __ Bind(&done);
505}
506
507static void CompareIntegers(Assembler* assembler,
508 Label* normal_ir_body,
509 Condition true_condition) {
510 Label try_mint_smi, is_true, is_false, drop_two_fall_through, fall_through;
511 TestBothArgumentsSmis(assembler, &try_mint_smi);
512 // EAX contains the right argument.
513 __ cmpl(Address(ESP, +2 * target::kWordSize), EAX);
514 __ j(true_condition, &is_true, Assembler::kNearJump);
515 __ Bind(&is_false);
516 __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
517 __ ret();
518 __ Bind(&is_true);
519 __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
520 __ ret();
521
522 // 64-bit comparison
523 Condition hi_true_cond, hi_false_cond, lo_false_cond;
524 switch (true_condition) {
525 case LESS:
526 case LESS_EQUAL:
527 hi_true_cond = LESS;
528 hi_false_cond = GREATER;
529 lo_false_cond = (true_condition == LESS) ? ABOVE_EQUAL : ABOVE;
530 break;
531 case GREATER:
532 case GREATER_EQUAL:
533 hi_true_cond = GREATER;
534 hi_false_cond = LESS;
535 lo_false_cond = (true_condition == GREATER) ? BELOW_EQUAL : BELOW;
536 break;
537 default:
538 UNREACHABLE();
539 hi_true_cond = hi_false_cond = lo_false_cond = OVERFLOW;
540 }
541 __ Bind(&try_mint_smi);
542 // Note that EDX and ECX must be preserved in case we fall through to main
543 // method.
544 // EAX contains the right argument.
545 __ movl(EBX, Address(ESP, +2 * target::kWordSize)); // Left argument.
546 // Push left as 64 bit integer.
547 Push64SmiOrMint(assembler, EBX, EDI, normal_ir_body);
548 // Push right as 64 bit integer.
549 Push64SmiOrMint(assembler, EAX, EDI, &drop_two_fall_through);
550 __ popl(EBX); // Right.LO.
551 __ popl(ECX); // Right.HI.
552 __ popl(EAX); // Left.LO.
553 __ popl(EDX); // Left.HI.
554 __ cmpl(EDX, ECX); // cmpl left.HI, right.HI.
555 __ j(hi_false_cond, &is_false, Assembler::kNearJump);
556 __ j(hi_true_cond, &is_true, Assembler::kNearJump);
557 __ cmpl(EAX, EBX); // cmpl left.LO, right.LO.
558 __ j(lo_false_cond, &is_false, Assembler::kNearJump);
559 // Else is true.
560 __ jmp(&is_true);
561
562 __ Bind(&drop_two_fall_through);
563 __ Drop(2);
564 __ Bind(normal_ir_body);
565}
566
567void AsmIntrinsifier::Integer_greaterThanFromInt(Assembler* assembler,
568 Label* normal_ir_body) {
569 CompareIntegers(assembler, normal_ir_body, LESS);
570}
571
572void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
573 Label* normal_ir_body) {
574 Integer_greaterThanFromInt(assembler, normal_ir_body);
575}
576
577void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
578 Label* normal_ir_body) {
579 CompareIntegers(assembler, normal_ir_body, GREATER);
580}
581
582void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
583 Label* normal_ir_body) {
584 CompareIntegers(assembler, normal_ir_body, LESS_EQUAL);
585}
586
587void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
588 Label* normal_ir_body) {
589 CompareIntegers(assembler, normal_ir_body, GREATER_EQUAL);
590}
591
592// This is called for Smi and Mint receivers. The right argument
593// can be Smi, Mint or double.
594void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
595 Label* normal_ir_body) {
596 Label true_label, check_for_mint;
597 // For integer receiver '===' check first.
598 __ movl(EAX, Address(ESP, +1 * target::kWordSize));
599 __ cmpl(EAX, Address(ESP, +2 * target::kWordSize));
600 __ j(EQUAL, &true_label, Assembler::kNearJump);
601 __ movl(EBX, Address(ESP, +2 * target::kWordSize));
602 __ orl(EAX, EBX);
603 __ testl(EAX, Immediate(kSmiTagMask));
604 __ j(NOT_ZERO, &check_for_mint, Assembler::kNearJump);
605 // Both arguments are smi, '===' is good enough.
606 __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
607 __ ret();
608 __ Bind(&true_label);
609 __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
610 __ ret();
611
612 // At least one of the arguments was not Smi.
613 Label receiver_not_smi;
614 __ Bind(&check_for_mint);
615 __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // Receiver.
616 __ testl(EAX, Immediate(kSmiTagMask));
617 __ j(NOT_ZERO, &receiver_not_smi);
618
619 // Left (receiver) is Smi, return false if right is not Double.
620 // Note that an instance of Mint never contains a value that can be
621 // represented by Smi.
622 __ movl(EAX, Address(ESP, +1 * target::kWordSize)); // Right argument.
623 __ CompareClassId(EAX, kDoubleCid, EDI);
624 __ j(EQUAL, normal_ir_body);
625 __ LoadObject(EAX,
626 CastHandle<Object>(FalseObject())); // Smi == Mint -> false.
627 __ ret();
628
629 __ Bind(&receiver_not_smi);
630 // EAX:: receiver.
631 __ CompareClassId(EAX, kMintCid, EDI);
632 __ j(NOT_EQUAL, normal_ir_body);
633 // Receiver is Mint, return false if right is Smi.
634 __ movl(EAX, Address(ESP, +1 * target::kWordSize)); // Right argument.
635 __ testl(EAX, Immediate(kSmiTagMask));
636 __ j(NOT_ZERO, normal_ir_body);
637 __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
638 __ ret();
639 // TODO(srdjan): Implement Mint == Mint comparison.
640
641 __ Bind(normal_ir_body);
642}
643
644void AsmIntrinsifier::Integer_equal(Assembler* assembler,
645 Label* normal_ir_body) {
646 Integer_equalToInteger(assembler, normal_ir_body);
647}
648
649void AsmIntrinsifier::Integer_sar(Assembler* assembler, Label* normal_ir_body) {
650 Label shift_count_ok;
651 TestBothArgumentsSmis(assembler, normal_ir_body);
652 // Can destroy ECX since we are not falling through.
653 const Immediate& count_limit = Immediate(0x1F);
654 // Check that the count is not larger than what the hardware can handle.
655 // For shifting right a Smi the result is the same for all numbers
656 // >= count_limit.
657 __ SmiUntag(EAX);
658 // Negative counts throw exception.
659 __ cmpl(EAX, Immediate(0));
660 __ j(LESS, normal_ir_body, Assembler::kNearJump);
661 __ cmpl(EAX, count_limit);
662 __ j(LESS_EQUAL, &shift_count_ok, Assembler::kNearJump);
663 __ movl(EAX, count_limit);
664 __ Bind(&shift_count_ok);
665 __ movl(ECX, EAX); // Shift amount must be in ECX.
666 __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // Value.
667 __ SmiUntag(EAX); // Value.
668 __ sarl(EAX, ECX);
669 __ SmiTag(EAX);
670 __ ret();
671 __ Bind(normal_ir_body);
672}
673
674// Argument is Smi (receiver).
675void AsmIntrinsifier::Smi_bitNegate(Assembler* assembler,
676 Label* normal_ir_body) {
677 __ movl(EAX, Address(ESP, +1 * target::kWordSize)); // Receiver.
678 __ notl(EAX);
679 __ andl(EAX, Immediate(~kSmiTagMask)); // Remove inverted smi-tag.
680 __ ret();
681}
682
683void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
684 Label* normal_ir_body) {
685 ASSERT(kSmiTagShift == 1);
686 __ movl(EAX, Address(ESP, +1 * target::kWordSize)); // Receiver.
687 // XOR with sign bit to complement bits if value is negative.
688 __ movl(ECX, EAX);
689 __ sarl(ECX, Immediate(31)); // All 0 or all 1.
690 __ xorl(EAX, ECX);
691 // BSR does not write the destination register if source is zero. Put a 1 in
692 // the Smi tag bit to ensure BSR writes to destination register.
693 __ orl(EAX, Immediate(kSmiTagMask));
694 __ bsrl(EAX, EAX);
695 __ SmiTag(EAX);
696 __ ret();
697}
698
699void AsmIntrinsifier::Smi_bitAndFromSmi(Assembler* assembler,
700 Label* normal_ir_body) {
701 Integer_bitAndFromInteger(assembler, normal_ir_body);
702}
703
704void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
705 // static void _lsh(Uint32List x_digits, int x_used, int n,
706 // Uint32List r_digits)
707
708 // Preserve THR to free ESI.
709 __ pushl(THR);
710 ASSERT(THR == ESI);
711
712 __ movl(EDI, Address(ESP, 5 * target::kWordSize)); // x_digits
713 __ movl(ECX, Address(ESP, 3 * target::kWordSize)); // n is Smi
714 __ SmiUntag(ECX);
715 __ movl(EBX, Address(ESP, 2 * target::kWordSize)); // r_digits
716 __ movl(ESI, ECX);
717 __ sarl(ESI, Immediate(5)); // ESI = n ~/ _DIGIT_BITS.
718 __ leal(EBX,
719 FieldAddress(EBX, ESI, TIMES_4, target::TypedData::data_offset()));
720 __ movl(ESI, Address(ESP, 4 * target::kWordSize)); // x_used > 0, Smi.
721 __ SmiUntag(ESI);
722 __ decl(ESI);
723 __ xorl(EAX, EAX); // EAX = 0.
724 __ movl(EDX,
725 FieldAddress(EDI, ESI, TIMES_4, target::TypedData::data_offset()));
726 __ shldl(EAX, EDX, ECX);
727 __ movl(Address(EBX, ESI, TIMES_4, kBytesPerBigIntDigit), EAX);
728 Label last;
729 __ cmpl(ESI, Immediate(0));
730 __ j(EQUAL, &last, Assembler::kNearJump);
731 Label loop;
732 __ Bind(&loop);
733 __ movl(EAX, EDX);
734 __ movl(EDX, FieldAddress(
735 EDI, ESI, TIMES_4,
736 target::TypedData::data_offset() - kBytesPerBigIntDigit));
737 __ shldl(EAX, EDX, ECX);
738 __ movl(Address(EBX, ESI, TIMES_4, 0), EAX);
739 __ decl(ESI);
740 __ j(NOT_ZERO, &loop, Assembler::kNearJump);
741 __ Bind(&last);
742 __ shldl(EDX, ESI, ECX); // ESI == 0.
743 __ movl(Address(EBX, 0), EDX);
744
745 // Restore THR and return.
746 __ popl(THR);
747 __ LoadObject(EAX, NullObject());
748 __ ret();
749}
750
751void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
752 // static void _rsh(Uint32List x_digits, int x_used, int n,
753 // Uint32List r_digits)
754
755 // Preserve THR to free ESI.
756 __ pushl(THR);
757 ASSERT(THR == ESI);
758
759 __ movl(EDI, Address(ESP, 5 * target::kWordSize)); // x_digits
760 __ movl(ECX, Address(ESP, 3 * target::kWordSize)); // n is Smi
761 __ SmiUntag(ECX);
762 __ movl(EBX, Address(ESP, 2 * target::kWordSize)); // r_digits
763 __ movl(EDX, ECX);
764 __ sarl(EDX, Immediate(5)); // EDX = n ~/ _DIGIT_BITS.
765 __ movl(ESI, Address(ESP, 4 * target::kWordSize)); // x_used > 0, Smi.
766 __ SmiUntag(ESI);
767 __ decl(ESI);
768 // EDI = &x_digits[x_used - 1].
769 __ leal(EDI,
770 FieldAddress(EDI, ESI, TIMES_4, target::TypedData::data_offset()));
771 __ subl(ESI, EDX);
772 // EBX = &r_digits[x_used - 1 - (n ~/ 32)].
773 __ leal(EBX,
774 FieldAddress(EBX, ESI, TIMES_4, target::TypedData::data_offset()));
775 __ negl(ESI);
776 __ movl(EDX, Address(EDI, ESI, TIMES_4, 0));
777 Label last;
778 __ cmpl(ESI, Immediate(0));
779 __ j(EQUAL, &last, Assembler::kNearJump);
780 Label loop;
781 __ Bind(&loop);
782 __ movl(EAX, EDX);
783 __ movl(EDX, Address(EDI, ESI, TIMES_4, kBytesPerBigIntDigit));
784 __ shrdl(EAX, EDX, ECX);
785 __ movl(Address(EBX, ESI, TIMES_4, 0), EAX);
786 __ incl(ESI);
787 __ j(NOT_ZERO, &loop, Assembler::kNearJump);
788 __ Bind(&last);
789 __ shrdl(EDX, ESI, ECX); // ESI == 0.
790 __ movl(Address(EBX, 0), EDX);
791
792 // Restore THR and return.
793 __ popl(THR);
794 __ LoadObject(EAX, NullObject());
795 __ ret();
796}
797
798void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
799 Label* normal_ir_body) {
800 // static void _absAdd(Uint32List digits, int used,
801 // Uint32List a_digits, int a_used,
802 // Uint32List r_digits)
803
804 // Preserve THR to free ESI.
805 __ pushl(THR);
806 ASSERT(THR == ESI);
807
808 __ movl(EDI, Address(ESP, 6 * target::kWordSize)); // digits
809 __ movl(EAX, Address(ESP, 5 * target::kWordSize)); // used is Smi
810 __ SmiUntag(EAX); // used > 0.
811 __ movl(ESI, Address(ESP, 4 * target::kWordSize)); // a_digits
812 __ movl(ECX, Address(ESP, 3 * target::kWordSize)); // a_used is Smi
813 __ SmiUntag(ECX); // a_used > 0.
814 __ movl(EBX, Address(ESP, 2 * target::kWordSize)); // r_digits
815
816 // Precompute 'used - a_used' now so that carry flag is not lost later.
817 __ subl(EAX, ECX);
818 __ incl(EAX); // To account for the extra test between loops.
819 __ pushl(EAX);
820
821 __ xorl(EDX, EDX); // EDX = 0, carry flag = 0.
822 Label add_loop;
823 __ Bind(&add_loop);
824 // Loop a_used times, ECX = a_used, ECX > 0.
825 __ movl(EAX,
826 FieldAddress(EDI, EDX, TIMES_4, target::TypedData::data_offset()));
827 __ adcl(EAX,
828 FieldAddress(ESI, EDX, TIMES_4, target::TypedData::data_offset()));
829 __ movl(FieldAddress(EBX, EDX, TIMES_4, target::TypedData::data_offset()),
830 EAX);
831 __ incl(EDX); // Does not affect carry flag.
832 __ decl(ECX); // Does not affect carry flag.
833 __ j(NOT_ZERO, &add_loop, Assembler::kNearJump);
834
835 Label last_carry;
836 __ popl(ECX);
837 __ decl(ECX); // Does not affect carry flag.
838 __ j(ZERO, &last_carry, Assembler::kNearJump); // If used - a_used == 0.
839
840 Label carry_loop;
841 __ Bind(&carry_loop);
842 // Loop used - a_used times, ECX = used - a_used, ECX > 0.
843 __ movl(EAX,
844 FieldAddress(EDI, EDX, TIMES_4, target::TypedData::data_offset()));
845 __ adcl(EAX, Immediate(0));
846 __ movl(FieldAddress(EBX, EDX, TIMES_4, target::TypedData::data_offset()),
847 EAX);
848 __ incl(EDX); // Does not affect carry flag.
849 __ decl(ECX); // Does not affect carry flag.
850 __ j(NOT_ZERO, &carry_loop, Assembler::kNearJump);
851
852 __ Bind(&last_carry);
853 __ movl(EAX, Immediate(0));
854 __ adcl(EAX, Immediate(0));
855 __ movl(FieldAddress(EBX, EDX, TIMES_4, target::TypedData::data_offset()),
856 EAX);
857
858 // Restore THR and return.
859 __ popl(THR);
860 __ LoadObject(EAX, NullObject());
861 __ ret();
862}
863
864void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
865 Label* normal_ir_body) {
866 // static void _absSub(Uint32List digits, int used,
867 // Uint32List a_digits, int a_used,
868 // Uint32List r_digits)
869
870 // Preserve THR to free ESI.
871 __ pushl(THR);
872 ASSERT(THR == ESI);
873
874 __ movl(EDI, Address(ESP, 6 * target::kWordSize)); // digits
875 __ movl(EAX, Address(ESP, 5 * target::kWordSize)); // used is Smi
876 __ SmiUntag(EAX); // used > 0.
877 __ movl(ESI, Address(ESP, 4 * target::kWordSize)); // a_digits
878 __ movl(ECX, Address(ESP, 3 * target::kWordSize)); // a_used is Smi
879 __ SmiUntag(ECX); // a_used > 0.
880 __ movl(EBX, Address(ESP, 2 * target::kWordSize)); // r_digits
881
882 // Precompute 'used - a_used' now so that carry flag is not lost later.
883 __ subl(EAX, ECX);
884 __ incl(EAX); // To account for the extra test between loops.
885 __ pushl(EAX);
886
887 __ xorl(EDX, EDX); // EDX = 0, carry flag = 0.
888 Label sub_loop;
889 __ Bind(&sub_loop);
890 // Loop a_used times, ECX = a_used, ECX > 0.
891 __ movl(EAX,
892 FieldAddress(EDI, EDX, TIMES_4, target::TypedData::data_offset()));
893 __ sbbl(EAX,
894 FieldAddress(ESI, EDX, TIMES_4, target::TypedData::data_offset()));
895 __ movl(FieldAddress(EBX, EDX, TIMES_4, target::TypedData::data_offset()),
896 EAX);
897 __ incl(EDX); // Does not affect carry flag.
898 __ decl(ECX); // Does not affect carry flag.
899 __ j(NOT_ZERO, &sub_loop, Assembler::kNearJump);
900
901 Label done;
902 __ popl(ECX);
903 __ decl(ECX); // Does not affect carry flag.
904 __ j(ZERO, &done, Assembler::kNearJump); // If used - a_used == 0.
905
906 Label carry_loop;
907 __ Bind(&carry_loop);
908 // Loop used - a_used times, ECX = used - a_used, ECX > 0.
909 __ movl(EAX,
910 FieldAddress(EDI, EDX, TIMES_4, target::TypedData::data_offset()));
911 __ sbbl(EAX, Immediate(0));
912 __ movl(FieldAddress(EBX, EDX, TIMES_4, target::TypedData::data_offset()),
913 EAX);
914 __ incl(EDX); // Does not affect carry flag.
915 __ decl(ECX); // Does not affect carry flag.
916 __ j(NOT_ZERO, &carry_loop, Assembler::kNearJump);
917
918 __ Bind(&done);
919 // Restore THR and return.
920 __ popl(THR);
921 __ LoadObject(EAX, NullObject());
922 __ ret();
923}
924
925void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
926 Label* normal_ir_body) {
927 // Pseudo code:
928 // static int _mulAdd(Uint32List x_digits, int xi,
929 // Uint32List m_digits, int i,
930 // Uint32List a_digits, int j, int n) {
931 // uint32_t x = x_digits[xi >> 1]; // xi is Smi.
932 // if (x == 0 || n == 0) {
933 // return 1;
934 // }
935 // uint32_t* mip = &m_digits[i >> 1]; // i is Smi.
936 // uint32_t* ajp = &a_digits[j >> 1]; // j is Smi.
937 // uint32_t c = 0;
938 // SmiUntag(n);
939 // do {
940 // uint32_t mi = *mip++;
941 // uint32_t aj = *ajp;
942 // uint64_t t = x*mi + aj + c; // 32-bit * 32-bit -> 64-bit.
943 // *ajp++ = low32(t);
944 // c = high32(t);
945 // } while (--n > 0);
946 // while (c != 0) {
947 // uint64_t t = *ajp + c;
948 // *ajp++ = low32(t);
949 // c = high32(t); // c == 0 or 1.
950 // }
951 // return 1;
952 // }
953
954 Label no_op;
955 // EBX = x, no_op if x == 0
956 __ movl(ECX, Address(ESP, 7 * target::kWordSize)); // x_digits
957 __ movl(EAX, Address(ESP, 6 * target::kWordSize)); // xi is Smi
958 __ movl(EBX,
959 FieldAddress(ECX, EAX, TIMES_2, target::TypedData::data_offset()));
960 __ testl(EBX, EBX);
961 __ j(ZERO, &no_op, Assembler::kNearJump);
962
963 // EDX = SmiUntag(n), no_op if n == 0
964 __ movl(EDX, Address(ESP, 1 * target::kWordSize));
965 __ SmiUntag(EDX);
966 __ j(ZERO, &no_op, Assembler::kNearJump);
967
968 // Preserve THR to free ESI.
969 __ pushl(THR);
970 ASSERT(THR == ESI);
971
972 // EDI = mip = &m_digits[i >> 1]
973 __ movl(EDI, Address(ESP, 6 * target::kWordSize)); // m_digits
974 __ movl(EAX, Address(ESP, 5 * target::kWordSize)); // i is Smi
975 __ leal(EDI,
976 FieldAddress(EDI, EAX, TIMES_2, target::TypedData::data_offset()));
977
978 // ESI = ajp = &a_digits[j >> 1]
979 __ movl(ESI, Address(ESP, 4 * target::kWordSize)); // a_digits
980 __ movl(EAX, Address(ESP, 3 * target::kWordSize)); // j is Smi
981 __ leal(ESI,
982 FieldAddress(ESI, EAX, TIMES_2, target::TypedData::data_offset()));
983
984 // Save n
985 __ pushl(EDX);
986 Address n_addr = Address(ESP, 0 * target::kWordSize);
987
988 // ECX = c = 0
989 __ xorl(ECX, ECX);
990
991 Label muladd_loop;
992 __ Bind(&muladd_loop);
993 // x: EBX
994 // mip: EDI
995 // ajp: ESI
996 // c: ECX
997 // t: EDX:EAX (not live at loop entry)
998 // n: ESP[0]
999
1000 // uint32_t mi = *mip++
1001 __ movl(EAX, Address(EDI, 0));
1002 __ addl(EDI, Immediate(kBytesPerBigIntDigit));
1003
1004 // uint64_t t = x*mi
1005 __ mull(EBX); // t = EDX:EAX = EAX * EBX
1006 __ addl(EAX, ECX); // t += c
1007 __ adcl(EDX, Immediate(0));
1008
1009 // uint32_t aj = *ajp; t += aj
1010 __ addl(EAX, Address(ESI, 0));
1011 __ adcl(EDX, Immediate(0));
1012
1013 // *ajp++ = low32(t)
1014 __ movl(Address(ESI, 0), EAX);
1015 __ addl(ESI, Immediate(kBytesPerBigIntDigit));
1016
1017 // c = high32(t)
1018 __ movl(ECX, EDX);
1019
1020 // while (--n > 0)
1021 __ decl(n_addr); // --n
1022 __ j(NOT_ZERO, &muladd_loop, Assembler::kNearJump);
1023
1024 Label done;
1025 __ testl(ECX, ECX);
1026 __ j(ZERO, &done, Assembler::kNearJump);
1027
1028 // *ajp += c
1029 __ addl(Address(ESI, 0), ECX);
1030 __ j(NOT_CARRY, &done, Assembler::kNearJump);
1031
1032 Label propagate_carry_loop;
1033 __ Bind(&propagate_carry_loop);
1034 __ addl(ESI, Immediate(kBytesPerBigIntDigit));
1035 __ incl(Address(ESI, 0)); // c == 0 or 1
1036 __ j(CARRY, &propagate_carry_loop, Assembler::kNearJump);
1037
1038 __ Bind(&done);
1039 __ Drop(1); // n
1040 // Restore THR and return.
1041 __ popl(THR);
1042
1043 __ Bind(&no_op);
1044 __ movl(EAX, Immediate(target::ToRawSmi(1))); // One digit processed.
1045 __ ret();
1046}
1047
1048void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
1049 Label* normal_ir_body) {
1050 // Pseudo code:
1051 // static int _sqrAdd(Uint32List x_digits, int i,
1052 // Uint32List a_digits, int used) {
1053 // uint32_t* xip = &x_digits[i >> 1]; // i is Smi.
1054 // uint32_t x = *xip++;
1055 // if (x == 0) return 1;
1056 // uint32_t* ajp = &a_digits[i]; // j == 2*i, i is Smi.
1057 // uint32_t aj = *ajp;
1058 // uint64_t t = x*x + aj;
1059 // *ajp++ = low32(t);
1060 // uint64_t c = high32(t);
1061 // int n = ((used - i) >> 1) - 1; // used and i are Smi.
1062 // while (--n >= 0) {
1063 // uint32_t xi = *xip++;
1064 // uint32_t aj = *ajp;
1065 // uint96_t t = 2*x*xi + aj + c; // 2-bit * 32-bit * 32-bit -> 65-bit.
1066 // *ajp++ = low32(t);
1067 // c = high64(t); // 33-bit.
1068 // }
1069 // uint32_t aj = *ajp;
1070 // uint64_t t = aj + c; // 32-bit + 33-bit -> 34-bit.
1071 // *ajp++ = low32(t);
1072 // *ajp = high32(t);
1073 // return 1;
1074 // }
1075
1076 // EDI = xip = &x_digits[i >> 1]
1077 __ movl(EDI, Address(ESP, 4 * target::kWordSize)); // x_digits
1078 __ movl(EAX, Address(ESP, 3 * target::kWordSize)); // i is Smi
1079 __ leal(EDI,
1080 FieldAddress(EDI, EAX, TIMES_2, target::TypedData::data_offset()));
1081
1082 // EBX = x = *xip++, return if x == 0
1083 Label x_zero;
1084 __ movl(EBX, Address(EDI, 0));
1085 __ cmpl(EBX, Immediate(0));
1086 __ j(EQUAL, &x_zero, Assembler::kNearJump);
1087 __ addl(EDI, Immediate(kBytesPerBigIntDigit));
1088
1089 // Preserve THR to free ESI.
1090 __ pushl(THR);
1091 ASSERT(THR == ESI);
1092
1093 // ESI = ajp = &a_digits[i]
1094 __ movl(ESI, Address(ESP, 3 * target::kWordSize)); // a_digits
1095 __ leal(ESI,
1096 FieldAddress(ESI, EAX, TIMES_4, target::TypedData::data_offset()));
1097
1098 // EDX:EAX = t = x*x + *ajp
1099 __ movl(EAX, EBX);
1100 __ mull(EBX);
1101 __ addl(EAX, Address(ESI, 0));
1102 __ adcl(EDX, Immediate(0));
1103
1104 // *ajp++ = low32(t)
1105 __ movl(Address(ESI, 0), EAX);
1106 __ addl(ESI, Immediate(kBytesPerBigIntDigit));
1107
1108 // int n = used - i - 1
1109 __ movl(EAX, Address(ESP, 2 * target::kWordSize)); // used is Smi
1110 __ subl(EAX, Address(ESP, 4 * target::kWordSize)); // i is Smi
1111 __ SmiUntag(EAX);
1112 __ decl(EAX);
1113 __ pushl(EAX); // Save n on stack.
1114
1115 // uint64_t c = high32(t)
1116 __ pushl(Immediate(0)); // push high32(c) == 0
1117 __ pushl(EDX); // push low32(c) == high32(t)
1118
1119 Address n_addr = Address(ESP, 2 * target::kWordSize);
1120 Address ch_addr = Address(ESP, 1 * target::kWordSize);
1121 Address cl_addr = Address(ESP, 0 * target::kWordSize);
1122
1123 Label loop, done;
1124 __ Bind(&loop);
1125 // x: EBX
1126 // xip: EDI
1127 // ajp: ESI
1128 // c: ESP[1]:ESP[0]
1129 // t: ECX:EDX:EAX (not live at loop entry)
1130 // n: ESP[2]
1131
1132 // while (--n >= 0)
1133 __ decl(Address(ESP, 2 * target::kWordSize)); // --n
1134 __ j(NEGATIVE, &done, Assembler::kNearJump);
1135
1136 // uint32_t xi = *xip++
1137 __ movl(EAX, Address(EDI, 0));
1138 __ addl(EDI, Immediate(kBytesPerBigIntDigit));
1139
1140 // uint96_t t = ECX:EDX:EAX = 2*x*xi + aj + c
1141 __ mull(EBX); // EDX:EAX = EAX * EBX
1142 __ xorl(ECX, ECX); // ECX = 0
1143 __ shldl(ECX, EDX, Immediate(1));
1144 __ shldl(EDX, EAX, Immediate(1));
1145 __ shll(EAX, Immediate(1)); // ECX:EDX:EAX <<= 1
1146 __ addl(EAX, Address(ESI, 0)); // t += aj
1147 __ adcl(EDX, Immediate(0));
1148 __ adcl(ECX, Immediate(0));
1149 __ addl(EAX, cl_addr); // t += low32(c)
1150 __ adcl(EDX, ch_addr); // t += high32(c) << 32
1151 __ adcl(ECX, Immediate(0));
1152
1153 // *ajp++ = low32(t)
1154 __ movl(Address(ESI, 0), EAX);
1155 __ addl(ESI, Immediate(kBytesPerBigIntDigit));
1156
1157 // c = high64(t)
1158 __ movl(cl_addr, EDX);
1159 __ movl(ch_addr, ECX);
1160
1161 __ jmp(&loop, Assembler::kNearJump);
1162
1163 __ Bind(&done);
1164 // uint64_t t = aj + c
1165 __ movl(EAX, cl_addr); // t = c
1166 __ movl(EDX, ch_addr);
1167 __ addl(EAX, Address(ESI, 0)); // t += *ajp
1168 __ adcl(EDX, Immediate(0));
1169
1170 // *ajp++ = low32(t)
1171 // *ajp = high32(t)
1172 __ movl(Address(ESI, 0), EAX);
1173 __ movl(Address(ESI, kBytesPerBigIntDigit), EDX);
1174
1175 // Restore THR and return.
1176 __ Drop(3);
1177 __ popl(THR);
1178 __ Bind(&x_zero);
1179 __ movl(EAX, Immediate(target::ToRawSmi(1))); // One digit processed.
1180 __ ret();
1181}
1182
1183void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
1184 Label* normal_ir_body) {
1185 // Pseudo code:
1186 // static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) {
1187 // uint32_t yt = args[_YT]; // _YT == 1.
1188 // uint32_t* dp = &digits[i >> 1]; // i is Smi.
1189 // uint32_t dh = dp[0]; // dh == digits[i >> 1].
1190 // uint32_t qd;
1191 // if (dh == yt) {
1192 // qd = DIGIT_MASK;
1193 // } else {
1194 // dl = dp[-1]; // dl == digits[(i - 1) >> 1].
1195 // qd = dh:dl / yt; // No overflow possible, because dh < yt.
1196 // }
1197 // args[_QD] = qd; // _QD == 2.
1198 // return 1;
1199 // }
1200
1201 // EDI = args
1202 __ movl(EDI, Address(ESP, 3 * target::kWordSize)); // args
1203
1204 // ECX = yt = args[1]
1205 __ movl(ECX, FieldAddress(EDI, target::TypedData::data_offset() +
1206 kBytesPerBigIntDigit));
1207
1208 // EBX = dp = &digits[i >> 1]
1209 __ movl(EBX, Address(ESP, 2 * target::kWordSize)); // digits
1210 __ movl(EAX, Address(ESP, 1 * target::kWordSize)); // i is Smi
1211 __ leal(EBX,
1212 FieldAddress(EBX, EAX, TIMES_2, target::TypedData::data_offset()));
1213
1214 // EDX = dh = dp[0]
1215 __ movl(EDX, Address(EBX, 0));
1216
1217 // EAX = qd = DIGIT_MASK = -1
1218 __ movl(EAX, Immediate(-1));
1219
1220 // Return qd if dh == yt
1221 Label return_qd;
1222 __ cmpl(EDX, ECX);
1223 __ j(EQUAL, &return_qd, Assembler::kNearJump);
1224
1225 // EAX = dl = dp[-1]
1226 __ movl(EAX, Address(EBX, -kBytesPerBigIntDigit));
1227
1228 // EAX = qd = dh:dl / yt = EDX:EAX / ECX
1229 __ divl(ECX);
1230
1231 __ Bind(&return_qd);
1232 // args[2] = qd
1233 __ movl(FieldAddress(
1234 EDI, target::TypedData::data_offset() + 2 * kBytesPerBigIntDigit),
1235 EAX);
1236
1237 __ movl(EAX, Immediate(target::ToRawSmi(1))); // One digit processed.
1238 __ ret();
1239}
1240
1241void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
1242 Label* normal_ir_body) {
1243 // Pseudo code:
1244 // static int _mulMod(Uint32List args, Uint32List digits, int i) {
1245 // uint32_t rho = args[_RHO]; // _RHO == 2.
1246 // uint32_t d = digits[i >> 1]; // i is Smi.
1247 // uint64_t t = rho*d;
1248 // args[_MU] = t mod DIGIT_BASE; // _MU == 4.
1249 // return 1;
1250 // }
1251
1252 // EDI = args
1253 __ movl(EDI, Address(ESP, 3 * target::kWordSize)); // args
1254
1255 // ECX = rho = args[2]
1256 __ movl(ECX, FieldAddress(EDI, target::TypedData::data_offset() +
1257 2 * kBytesPerBigIntDigit));
1258
1259 // EAX = digits[i >> 1]
1260 __ movl(EBX, Address(ESP, 2 * target::kWordSize)); // digits
1261 __ movl(EAX, Address(ESP, 1 * target::kWordSize)); // i is Smi
1262 __ movl(EAX,
1263 FieldAddress(EBX, EAX, TIMES_2, target::TypedData::data_offset()));
1264
1265 // EDX:EAX = t = rho*d
1266 __ mull(ECX);
1267
1268 // args[4] = t mod DIGIT_BASE = low32(t)
1269 __ movl(FieldAddress(
1270 EDI, target::TypedData::data_offset() + 4 * kBytesPerBigIntDigit),
1271 EAX);
1272
1273 __ movl(EAX, Immediate(target::ToRawSmi(1))); // One digit processed.
1274 __ ret();
1275}
1276
1277// Check if the last argument is a double, jump to label 'is_smi' if smi
1278// (easy to convert to double), otherwise jump to label 'not_double_smi',
1279// Returns the last argument in EAX.
1280static void TestLastArgumentIsDouble(Assembler* assembler,
1281 Label* is_smi,
1282 Label* not_double_smi) {
1283 __ movl(EAX, Address(ESP, +1 * target::kWordSize));
1284 __ testl(EAX, Immediate(kSmiTagMask));
1285 __ j(ZERO, is_smi, Assembler::kNearJump); // Jump if Smi.
1286 __ CompareClassId(EAX, kDoubleCid, EBX);
1287 __ j(NOT_EQUAL, not_double_smi, Assembler::kNearJump);
1288 // Fall through if double.
1289}
1290
1291// Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown
1292// type. Return true or false object in the register EAX. Any NaN argument
1293// returns false. Any non-double arg1 causes control flow to fall through to the
1294// slow case (compiled method body).
1295static void CompareDoubles(Assembler* assembler,
1296 Label* normal_ir_body,
1297 Condition true_condition) {
1298 Label is_false, is_true, is_smi, double_op;
1299 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
1300 // Both arguments are double, right operand is in EAX.
1301 __ movsd(XMM1, FieldAddress(EAX, target::Double::value_offset()));
1302 __ Bind(&double_op);
1303 __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // Left argument.
1304 __ movsd(XMM0, FieldAddress(EAX, target::Double::value_offset()));
1305 __ comisd(XMM0, XMM1);
1306 __ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false;
1307 __ j(true_condition, &is_true, Assembler::kNearJump);
1308 // Fall through false.
1309 __ Bind(&is_false);
1310 __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
1311 __ ret();
1312 __ Bind(&is_true);
1313 __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
1314 __ ret();
1315 __ Bind(&is_smi);
1316 __ SmiUntag(EAX);
1317 __ cvtsi2sd(XMM1, EAX);
1318 __ jmp(&double_op);
1319 __ Bind(normal_ir_body);
1320}
1321
1322// arg0 is Double, arg1 is unknown.
1323void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
1324 Label* normal_ir_body) {
1325 CompareDoubles(assembler, normal_ir_body, ABOVE);
1326}
1327
1328// arg0 is Double, arg1 is unknown.
1329void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
1330 Label* normal_ir_body) {
1331 CompareDoubles(assembler, normal_ir_body, ABOVE_EQUAL);
1332}
1333
1334// arg0 is Double, arg1 is unknown.
1335void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
1336 Label* normal_ir_body) {
1337 CompareDoubles(assembler, normal_ir_body, BELOW);
1338}
1339
1340// arg0 is Double, arg1 is unknown.
1341void AsmIntrinsifier::Double_equal(Assembler* assembler,
1342 Label* normal_ir_body) {
1343 CompareDoubles(assembler, normal_ir_body, EQUAL);
1344}
1345
1346// arg0 is Double, arg1 is unknown.
1347void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
1348 Label* normal_ir_body) {
1349 CompareDoubles(assembler, normal_ir_body, BELOW_EQUAL);
1350}
1351
1352// Expects left argument to be double (receiver). Right argument is unknown.
1353// Both arguments are on stack.
1354static void DoubleArithmeticOperations(Assembler* assembler,
1355 Label* normal_ir_body,
1356 Token::Kind kind) {
1357 Label is_smi, double_op;
1358 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
1359 // Both arguments are double, right operand is in EAX.
1360 __ movsd(XMM1, FieldAddress(EAX, target::Double::value_offset()));
1361 __ Bind(&double_op);
1362 __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // Left argument.
1363 __ movsd(XMM0, FieldAddress(EAX, target::Double::value_offset()));
1364 switch (kind) {
1365 case Token::kADD:
1366 __ addsd(XMM0, XMM1);
1367 break;
1368 case Token::kSUB:
1369 __ subsd(XMM0, XMM1);
1370 break;
1371 case Token::kMUL:
1372 __ mulsd(XMM0, XMM1);
1373 break;
1374 case Token::kDIV:
1375 __ divsd(XMM0, XMM1);
1376 break;
1377 default:
1378 UNREACHABLE();
1379 }
1380 const Class& double_class = DoubleClass();
1381 __ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump,
1382 EAX, // Result register.
1383 EBX);
1384 __ movsd(FieldAddress(EAX, target::Double::value_offset()), XMM0);
1385 __ ret();
1386 __ Bind(&is_smi);
1387 __ SmiUntag(EAX);
1388 __ cvtsi2sd(XMM1, EAX);
1389 __ jmp(&double_op);
1390 __ Bind(normal_ir_body);
1391}
1392
1393void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
1394 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
1395}
1396
1397void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
1398 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
1399}
1400
1401void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
1402 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
1403}
1404
1405void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
1406 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
1407}
1408
1409// Left is double, right is integer (Mint or Smi)
1410void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
1411 Label* normal_ir_body) {
1412 // Only smis allowed.
1413 __ movl(EAX, Address(ESP, +1 * target::kWordSize));
1414 __ testl(EAX, Immediate(kSmiTagMask));
1415 __ j(NOT_ZERO, normal_ir_body, Assembler::kNearJump);
1416 // Is Smi.
1417 __ SmiUntag(EAX);
1418 __ cvtsi2sd(XMM1, EAX);
1419 __ movl(EAX, Address(ESP, +2 * target::kWordSize));
1420 __ movsd(XMM0, FieldAddress(EAX, target::Double::value_offset()));
1421 __ mulsd(XMM0, XMM1);
1422 const Class& double_class = DoubleClass();
1423 __ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump,
1424 EAX, // Result register.
1425 EBX);
1426 __ movsd(FieldAddress(EAX, target::Double::value_offset()), XMM0);
1427 __ ret();
1428 __ Bind(normal_ir_body);
1429}
1430
1431void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
1432 Label* normal_ir_body) {
1433 __ movl(EAX, Address(ESP, +1 * target::kWordSize));
1434 __ testl(EAX, Immediate(kSmiTagMask));
1435 __ j(NOT_ZERO, normal_ir_body, Assembler::kNearJump);
1436 // Is Smi.
1437 __ SmiUntag(EAX);
1438 __ cvtsi2sd(XMM0, EAX);
1439 const Class& double_class = DoubleClass();
1440 __ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump,
1441 EAX, // Result register.
1442 EBX);
1443 __ movsd(FieldAddress(EAX, target::Double::value_offset()), XMM0);
1444 __ ret();
1445 __ Bind(normal_ir_body);
1446}
1447
1448void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
1449 Label* normal_ir_body) {
1450 Label is_true;
1451 __ movl(EAX, Address(ESP, +1 * target::kWordSize));
1452 __ movsd(XMM0, FieldAddress(EAX, target::Double::value_offset()));
1453 __ comisd(XMM0, XMM0);
1454 __ j(PARITY_EVEN, &is_true, Assembler::kNearJump); // NaN -> true;
1455 __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
1456 __ ret();
1457 __ Bind(&is_true);
1458 __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
1459 __ ret();
1460}
1461
1462void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
1463 Label* normal_ir_body) {
1464 Label not_inf;
1465 __ movl(EAX, Address(ESP, +1 * target::kWordSize));
1466 __ movl(EBX, FieldAddress(EAX, target::Double::value_offset()));
1467
1468 // If the low word isn't zero, then it isn't infinity.
1469 __ cmpl(EBX, Immediate(0));
1470 __ j(NOT_EQUAL, &not_inf, Assembler::kNearJump);
1471 // Check the high word.
1472 __ movl(EBX, FieldAddress(
1473 EAX, target::Double::value_offset() + target::kWordSize));
1474 // Mask off sign bit.
1475 __ andl(EBX, Immediate(0x7FFFFFFF));
1476 // Compare with +infinity.
1477 __ cmpl(EBX, Immediate(0x7FF00000));
1478 __ j(NOT_EQUAL, &not_inf, Assembler::kNearJump);
1479 __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
1480 __ ret();
1481
1482 __ Bind(&not_inf);
1483 __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
1484 __ ret();
1485}
1486
1487void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
1488 Label* normal_ir_body) {
1489 Label is_false, is_true, is_zero;
1490 __ movl(EAX, Address(ESP, +1 * target::kWordSize));
1491 __ movsd(XMM0, FieldAddress(EAX, target::Double::value_offset()));
1492 __ xorpd(XMM1, XMM1); // 0.0 -> XMM1.
1493 __ comisd(XMM0, XMM1);
1494 __ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false.
1495 __ j(EQUAL, &is_zero, Assembler::kNearJump); // Check for negative zero.
1496 __ j(ABOVE_EQUAL, &is_false, Assembler::kNearJump); // >= 0 -> false.
1497 __ Bind(&is_true);
1498 __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
1499 __ ret();
1500 __ Bind(&is_false);
1501 __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
1502 __ ret();
1503 __ Bind(&is_zero);
1504 // Check for negative zero (get the sign bit).
1505 __ movmskpd(EAX, XMM0);
1506 __ testl(EAX, Immediate(1));
1507 __ j(NOT_ZERO, &is_true, Assembler::kNearJump);
1508 __ jmp(&is_false, Assembler::kNearJump);
1509}
1510
1511void AsmIntrinsifier::DoubleToInteger(Assembler* assembler,
1512 Label* normal_ir_body) {
1513 __ movl(EAX, Address(ESP, +1 * target::kWordSize));
1514 __ movsd(XMM0, FieldAddress(EAX, target::Double::value_offset()));
1515 __ cvttsd2si(EAX, XMM0);
1516 // Overflow is signalled with minint.
1517 // Check for overflow and that it fits into Smi.
1518 __ cmpl(EAX, Immediate(0xC0000000));
1519 __ j(NEGATIVE, normal_ir_body, Assembler::kNearJump);
1520 __ SmiTag(EAX);
1521 __ ret();
1522 __ Bind(normal_ir_body);
1523}
1524
1525void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
1526 Label* normal_ir_body) {
1527 // TODO(dartbug.com/31174): Convert this to a graph intrinsic.
1528
1529 // Convert double value to signed 32-bit int in EAX and
1530 // back to a double in XMM1.
1531 __ movl(ECX, Address(ESP, +1 * target::kWordSize));
1532 __ movsd(XMM0, FieldAddress(ECX, target::Double::value_offset()));
1533 __ cvttsd2si(EAX, XMM0);
1534 __ cvtsi2sd(XMM1, EAX);
1535
1536 // Tag the int as a Smi, making sure that it fits; this checks for
1537 // overflow and NaN in the conversion from double to int. Conversion
1538 // overflow from cvttsd2si is signalled with an INT32_MIN value.
1539 ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
1540 __ addl(EAX, EAX);
1541 __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
1542
1543 // Compare the two double values. If they are equal, we return the
1544 // Smi tagged result immediately as the hash code.
1545 Label double_hash;
1546 __ comisd(XMM0, XMM1);
1547 __ j(NOT_EQUAL, &double_hash, Assembler::kNearJump);
1548 __ ret();
1549
1550 // Convert the double bits to a hash code that fits in a Smi.
1551 __ Bind(&double_hash);
1552 __ movl(EAX, FieldAddress(ECX, target::Double::value_offset()));
1553 __ movl(ECX, FieldAddress(ECX, target::Double::value_offset() + 4));
1554 __ xorl(EAX, ECX);
1555 __ andl(EAX, Immediate(target::kSmiMax));
1556 __ SmiTag(EAX);
1557 __ ret();
1558
1559 // Fall into the native C++ implementation.
1560 __ Bind(normal_ir_body);
1561}
1562
1563// Argument type is not known
1564void AsmIntrinsifier::MathSqrt(Assembler* assembler, Label* normal_ir_body) {
1565 Label is_smi, double_op;
1566 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
1567 // Argument is double and is in EAX.
1568 __ movsd(XMM1, FieldAddress(EAX, target::Double::value_offset()));
1569 __ Bind(&double_op);
1570 __ sqrtsd(XMM0, XMM1);
1571 const Class& double_class = DoubleClass();
1572 __ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump,
1573 EAX, // Result register.
1574 EBX);
1575 __ movsd(FieldAddress(EAX, target::Double::value_offset()), XMM0);
1576 __ ret();
1577 __ Bind(&is_smi);
1578 __ SmiUntag(EAX);
1579 __ cvtsi2sd(XMM1, EAX);
1580 __ jmp(&double_op);
1581 __ Bind(normal_ir_body);
1582}
1583
1584// var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64;
1585// _state[kSTATE_LO] = state & _MASK_32;
1586// _state[kSTATE_HI] = state >> 32;
1587void AsmIntrinsifier::Random_nextState(Assembler* assembler,
1588 Label* normal_ir_body) {
1589 const Field& state_field = LookupMathRandomStateFieldOffset();
1590 const int64_t a_int_value = AsmIntrinsifier::kRandomAValue;
1591
1592 // 'a_int_value' is a mask.
1593 ASSERT(Utils::IsUint(32, a_int_value));
1594 int32_t a_int32_value = static_cast<int32_t>(a_int_value);
1595
1596 // Receiver.
1597 __ movl(EAX, Address(ESP, +1 * target::kWordSize));
1598 // Field '_state'.
1599 __ movl(EBX, FieldAddress(EAX, LookupFieldOffsetInBytes(state_field)));
1600 // Addresses of _state[0] and _state[1].
1601 const intptr_t scale =
1602 target::Instance::ElementSizeFor(kTypedDataUint32ArrayCid);
1603 const intptr_t offset =
1604 target::Instance::DataOffsetFor(kTypedDataUint32ArrayCid);
1605 Address addr_0 = FieldAddress(EBX, 0 * scale + offset);
1606 Address addr_1 = FieldAddress(EBX, 1 * scale + offset);
1607 __ movl(EAX, Immediate(a_int32_value));
1608 // 64-bit multiply EAX * value -> EDX:EAX.
1609 __ mull(addr_0);
1610 __ addl(EAX, addr_1);
1611 __ adcl(EDX, Immediate(0));
1612 __ movl(addr_1, EDX);
1613 __ movl(addr_0, EAX);
1614 ASSERT(target::ToRawSmi(0) == 0);
1615 __ xorl(EAX, EAX);
1616 __ ret();
1617}
1618
1619// Identity comparison.
1620void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
1621 Label* normal_ir_body) {
1622 Label is_true;
1623 __ movl(EAX, Address(ESP, +1 * target::kWordSize));
1624 __ cmpl(EAX, Address(ESP, +2 * target::kWordSize));
1625 __ j(EQUAL, &is_true, Assembler::kNearJump);
1626 __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
1627 __ ret();
1628 __ Bind(&is_true);
1629 __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
1630 __ ret();
1631}
1632
1633static void RangeCheck(Assembler* assembler,
1634 Register reg,
1635 intptr_t low,
1636 intptr_t high,
1637 Condition cc,
1638 Label* target) {
1639 __ subl(reg, Immediate(low));
1640 __ cmpl(reg, Immediate(high - low));
1641 __ j(cc, target);
1642}
1643
1644const Condition kIfNotInRange = ABOVE;
1645const Condition kIfInRange = BELOW_EQUAL;
1646
1647static void JumpIfInteger(Assembler* assembler, Register cid, Label* target) {
1648 RangeCheck(assembler, cid, kSmiCid, kMintCid, kIfInRange, target);
1649}
1650
1651static void JumpIfNotInteger(Assembler* assembler,
1652 Register cid,
1653 Label* target) {
1654 RangeCheck(assembler, cid, kSmiCid, kMintCid, kIfNotInRange, target);
1655}
1656
1657static void JumpIfString(Assembler* assembler, Register cid, Label* target) {
1658 RangeCheck(assembler, cid, kOneByteStringCid, kExternalTwoByteStringCid,
1659 kIfInRange, target);
1660}
1661
1662static void JumpIfNotString(Assembler* assembler, Register cid, Label* target) {
1663 RangeCheck(assembler, cid, kOneByteStringCid, kExternalTwoByteStringCid,
1664 kIfNotInRange, target);
1665}
1666
1667// Return type quickly for simple types (not parameterized and not signature).
1668void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
1669 Label* normal_ir_body) {
1670 Label use_declaration_type, not_double, not_integer;
1671 __ movl(EAX, Address(ESP, +1 * target::kWordSize));
1672 __ LoadClassIdMayBeSmi(EDI, EAX);
1673
1674 __ cmpl(EDI, Immediate(kClosureCid));
1675 __ j(EQUAL, normal_ir_body); // Instance is a closure.
1676
1677 __ cmpl(EDI, Immediate(kNumPredefinedCids));
1678 __ j(ABOVE, &use_declaration_type);
1679
1680 // If object is a instance of _Double return double type.
1681 __ cmpl(EDI, Immediate(kDoubleCid));
1682 __ j(NOT_EQUAL, &not_double);
1683
1684 __ LoadIsolate(EAX);
1685 __ movl(EAX, Address(EAX, target::Isolate::cached_object_store_offset()));
1686 __ movl(EAX, Address(EAX, target::ObjectStore::double_type_offset()));
1687 __ ret();
1688
1689 __ Bind(&not_double);
1690 // If object is an integer (smi, mint or bigint) return int type.
1691 __ movl(EAX, EDI);
1692 JumpIfNotInteger(assembler, EAX, &not_integer);
1693
1694 __ LoadIsolate(EAX);
1695 __ movl(EAX, Address(EAX, target::Isolate::cached_object_store_offset()));
1696 __ movl(EAX, Address(EAX, target::ObjectStore::int_type_offset()));
1697 __ ret();
1698
1699 __ Bind(&not_integer);
1700 // If object is a string (one byte, two byte or external variants) return
1701 // string type.
1702 __ movl(EAX, EDI);
1703 JumpIfNotString(assembler, EAX, &use_declaration_type);
1704
1705 __ LoadIsolate(EAX);
1706 __ movl(EAX, Address(EAX, target::Isolate::cached_object_store_offset()));
1707 __ movl(EAX, Address(EAX, target::ObjectStore::string_type_offset()));
1708 __ ret();
1709
1710 // Object is neither double, nor integer, nor string.
1711 __ Bind(&use_declaration_type);
1712 __ LoadClassById(EBX, EDI);
1713 __ movzxw(EDI, FieldAddress(EBX, target::Class::num_type_arguments_offset()));
1714 __ cmpl(EDI, Immediate(0));
1715 __ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
1716 __ movl(EAX, FieldAddress(EBX, target::Class::declaration_type_offset()));
1717 __ CompareObject(EAX, NullObject());
1718 __ j(EQUAL, normal_ir_body, Assembler::kNearJump); // Not yet set.
1719 __ ret();
1720
1721 __ Bind(normal_ir_body);
1722}
1723
1724// Compares cid1 and cid2 to see if they're syntactically equivalent. If this
1725// can be determined by this fast path, it jumps to either equal or not_equal,
1726// otherwise it jumps to normal_ir_body. May clobber cid1, cid2, and scratch.
1727static void EquivalentClassIds(Assembler* assembler,
1728 Label* normal_ir_body,
1729 Label* equal,
1730 Label* not_equal,
1731 Register cid1,
1732 Register cid2,
1733 Register scratch) {
1734 Label different_cids, not_integer;
1735
1736 // Check if left hand side is a closure. Closures are handled in the runtime.
1737 __ cmpl(cid1, Immediate(kClosureCid));
1738 __ j(EQUAL, normal_ir_body);
1739
1740 // Check whether class ids match. If class ids don't match types may still be
1741 // considered equivalent (e.g. multiple string implementation classes map to a
1742 // single String type).
1743 __ cmpl(cid1, cid2);
1744 __ j(NOT_EQUAL, &different_cids);
1745
1746 // Types have the same class and neither is a closure type.
1747 // Check if there are no type arguments. In this case we can return true.
1748 // Otherwise fall through into the runtime to handle comparison.
1749 __ LoadClassById(scratch, cid1);
1750 __ movzxw(scratch,
1751 FieldAddress(scratch, target::Class::num_type_arguments_offset()));
1752 __ cmpl(scratch, Immediate(0));
1753 __ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
1754 __ jmp(equal);
1755
1756 // Class ids are different. Check if we are comparing two string types (with
1757 // different representations) or two integer types.
1758 __ Bind(&different_cids);
1759 __ cmpl(cid1, Immediate(kNumPredefinedCids));
1760 __ j(ABOVE_EQUAL, not_equal);
1761
1762 // Check if both are integer types.
1763 __ movl(scratch, cid1);
1764 JumpIfNotInteger(assembler, scratch, &not_integer);
1765
1766 // First type is an integer. Check if the second is an integer too.
1767 // Otherwise types are unequiv because only integers have the same runtime
1768 // type as other integers.
1769 JumpIfInteger(assembler, cid2, equal);
1770 __ jmp(not_equal);
1771
1772 __ Bind(&not_integer);
1773 // Check if the first type is String. If it is not then types are not
1774 // equivalent because they have different class ids and they are not strings
1775 // or integers.
1776 JumpIfNotString(assembler, cid1, not_equal);
1777 // First type is String. Check if the second is a string too.
1778 JumpIfString(assembler, cid2, equal);
1779 // String types are only equivalent to other String types.
1780 __ jmp(not_equal);
1781}
1782
1783void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
1784 Label* normal_ir_body) {
1785 __ movl(EAX, Address(ESP, +1 * target::kWordSize));
1786 __ LoadClassIdMayBeSmi(EDI, EAX);
1787
1788 __ movl(EAX, Address(ESP, +2 * target::kWordSize));
1789 __ LoadClassIdMayBeSmi(EBX, EAX);
1790
1791 Label equal, not_equal;
1792 EquivalentClassIds(assembler, normal_ir_body, &equal, &not_equal, EDI, EBX,
1793 EAX);
1794
1795 __ Bind(&equal);
1796 __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
1797 __ ret();
1798
1799 __ Bind(&not_equal);
1800 __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
1801 __ ret();
1802
1803 __ Bind(normal_ir_body);
1804}
1805
1806void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
1807 Label* normal_ir_body) {
1808 __ movl(EAX, Address(ESP, +1 * target::kWordSize)); // String object.
1809 __ movl(EAX, FieldAddress(EAX, target::String::hash_offset()));
1810 __ cmpl(EAX, Immediate(0));
1811 __ j(EQUAL, normal_ir_body, Assembler::kNearJump);
1812 __ ret();
1813 __ Bind(normal_ir_body);
1814 // Hash not yet computed.
1815}
1816
1817void AsmIntrinsifier::Type_getHashCode(Assembler* assembler,
1818 Label* normal_ir_body) {
1819 __ movl(EAX, Address(ESP, +1 * target::kWordSize)); // Type object.
1820 __ movl(EAX, FieldAddress(EAX, target::Type::hash_offset()));
1821 __ testl(EAX, EAX);
1822 __ j(EQUAL, normal_ir_body, Assembler::kNearJump);
1823 __ ret();
1824 __ Bind(normal_ir_body);
1825 // Hash not yet computed.
1826}
1827
1828void AsmIntrinsifier::Type_equality(Assembler* assembler,
1829 Label* normal_ir_body) {
1830 Label equal, not_equal, equiv_cids, check_legacy;
1831
1832 __ movl(EDI, Address(ESP, +1 * target::kWordSize));
1833 __ movl(EBX, Address(ESP, +2 * target::kWordSize));
1834 __ cmpl(EDI, EBX);
1835 __ j(EQUAL, &equal);
1836
1837 // EDI might not be a Type object, so check that first (EBX should be though,
1838 // since this is a method on the Type class).
1839 __ LoadClassIdMayBeSmi(EAX, EDI);
1840 __ cmpl(EAX, Immediate(kTypeCid));
1841 __ j(NOT_EQUAL, normal_ir_body);
1842
1843 // Check if types are syntactically equal.
1844 __ movl(ECX, FieldAddress(EDI, target::Type::type_class_id_offset()));
1845 __ SmiUntag(ECX);
1846 __ movl(EDX, FieldAddress(EBX, target::Type::type_class_id_offset()));
1847 __ SmiUntag(EDX);
1848 EquivalentClassIds(assembler, normal_ir_body, &equiv_cids, &not_equal, ECX,
1849 EDX, EAX);
1850
1851 // Check nullability.
1852 __ Bind(&equiv_cids);
1853 __ movzxb(EDI, FieldAddress(EDI, target::Type::nullability_offset()));
1854 __ movzxb(EBX, FieldAddress(EBX, target::Type::nullability_offset()));
1855 __ cmpl(EDI, EBX);
1856 __ j(NOT_EQUAL, &check_legacy, Assembler::kNearJump);
1857 // Fall through to equal case if nullability is strictly equal.
1858
1859 __ Bind(&equal);
1860 __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
1861 __ ret();
1862
1863 // At this point the nullabilities are different, so they can only be
1864 // syntactically equivalent if they're both either kNonNullable or kLegacy.
1865 // These are the two largest values of the enum, so we can just do a < check.
1866 ASSERT(target::Nullability::kNullable < target::Nullability::kNonNullable &&
1867 target::Nullability::kNonNullable < target::Nullability::kLegacy);
1868 __ Bind(&check_legacy);
1869 __ cmpl(EDI, Immediate(target::Nullability::kNonNullable));
1870 __ j(LESS, &not_equal, Assembler::kNearJump);
1871 __ cmpl(EBX, Immediate(target::Nullability::kNonNullable));
1872 __ j(GREATER_EQUAL, &equal, Assembler::kNearJump);
1873
1874 __ Bind(&not_equal);
1875 __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
1876 __ ret();
1877
1878 __ Bind(normal_ir_body);
1879}
1880
1881// bool _substringMatches(int start, String other)
1882void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
1883 Label* normal_ir_body) {
1884 // For precompilation, not implemented on IA32.
1885}
1886
1887void AsmIntrinsifier::Object_getHash(Assembler* assembler,
1888 Label* normal_ir_body) {
1889 UNREACHABLE();
1890}
1891
1892void AsmIntrinsifier::Object_setHash(Assembler* assembler,
1893 Label* normal_ir_body) {
1894 UNREACHABLE();
1895}
1896
1897void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
1898 Label* normal_ir_body) {
1899 Label try_two_byte_string;
1900 __ movl(EBX, Address(ESP, +1 * target::kWordSize)); // Index.
1901 __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // String.
1902 __ testl(EBX, Immediate(kSmiTagMask));
1903 __ j(NOT_ZERO, normal_ir_body, Assembler::kNearJump); // Non-smi index.
1904 // Range check.
1905 __ cmpl(EBX, FieldAddress(EAX, target::String::length_offset()));
1906 // Runtime throws exception.
1907 __ j(ABOVE_EQUAL, normal_ir_body, Assembler::kNearJump);
1908 __ CompareClassId(EAX, kOneByteStringCid, EDI);
1909 __ j(NOT_EQUAL, &try_two_byte_string, Assembler::kNearJump);
1910 __ SmiUntag(EBX);
1911 __ movzxb(EBX, FieldAddress(EAX, EBX, TIMES_1,
1912 target::OneByteString::data_offset()));
1913 __ cmpl(EBX, Immediate(target::Symbols::kNumberOfOneCharCodeSymbols));
1914 __ j(GREATER_EQUAL, normal_ir_body);
1915 __ movl(EAX, Immediate(SymbolsPredefinedAddress()));
1916 __ movl(EAX, Address(EAX, EBX, TIMES_4,
1917 target::Symbols::kNullCharCodeSymbolOffset *
1918 target::kWordSize));
1919 __ ret();
1920
1921 __ Bind(&try_two_byte_string);
1922 __ CompareClassId(EAX, kTwoByteStringCid, EDI);
1923 __ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
1924 ASSERT(kSmiTagShift == 1);
1925 __ movzxw(EBX, FieldAddress(EAX, EBX, TIMES_1,
1926 target::TwoByteString::data_offset()));
1927 __ cmpl(EBX, Immediate(target::Symbols::kNumberOfOneCharCodeSymbols));
1928 __ j(GREATER_EQUAL, normal_ir_body);
1929 __ movl(EAX, Immediate(SymbolsPredefinedAddress()));
1930 __ movl(EAX, Address(EAX, EBX, TIMES_4,
1931 target::Symbols::kNullCharCodeSymbolOffset *
1932 target::kWordSize));
1933 __ ret();
1934
1935 __ Bind(normal_ir_body);
1936}
1937
1938void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
1939 Label* normal_ir_body) {
1940 Label is_true;
1941 // Get length.
1942 __ movl(EAX, Address(ESP, +1 * target::kWordSize)); // String object.
1943 __ movl(EAX, FieldAddress(EAX, target::String::length_offset()));
1944 __ cmpl(EAX, Immediate(target::ToRawSmi(0)));
1945 __ j(EQUAL, &is_true, Assembler::kNearJump);
1946 __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
1947 __ ret();
1948 __ Bind(&is_true);
1949 __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
1950 __ ret();
1951}
1952
1953void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
1954 Label* normal_ir_body) {
1955 Label compute_hash;
1956 __ movl(EBX, Address(ESP, +1 * target::kWordSize)); // OneByteString object.
1957 __ movl(EAX, FieldAddress(EBX, target::String::hash_offset()));
1958 __ cmpl(EAX, Immediate(0));
1959 __ j(EQUAL, &compute_hash, Assembler::kNearJump);
1960 __ ret();
1961
1962 __ Bind(&compute_hash);
1963 // Hash not yet computed, use algorithm of class StringHasher.
1964 __ movl(ECX, FieldAddress(EBX, target::String::length_offset()));
1965 __ SmiUntag(ECX);
1966 __ xorl(EAX, EAX);
1967 __ xorl(EDI, EDI);
1968 // EBX: Instance of OneByteString.
1969 // ECX: String length, untagged integer.
1970 // EDI: Loop counter, untagged integer.
1971 // EAX: Hash code, untagged integer.
1972 Label loop, done, set_hash_code;
1973 __ Bind(&loop);
1974 __ cmpl(EDI, ECX);
1975 __ j(EQUAL, &done, Assembler::kNearJump);
1976 // Add to hash code: (hash_ is uint32)
1977 // hash_ += ch;
1978 // hash_ += hash_ << 10;
1979 // hash_ ^= hash_ >> 6;
1980 // Get one characters (ch).
1981 __ movzxb(EDX, FieldAddress(EBX, EDI, TIMES_1,
1982 target::OneByteString::data_offset()));
1983 // EDX: ch and temporary.
1984 __ addl(EAX, EDX);
1985 __ movl(EDX, EAX);
1986 __ shll(EDX, Immediate(10));
1987 __ addl(EAX, EDX);
1988 __ movl(EDX, EAX);
1989 __ shrl(EDX, Immediate(6));
1990 __ xorl(EAX, EDX);
1991
1992 __ incl(EDI);
1993 __ jmp(&loop, Assembler::kNearJump);
1994
1995 __ Bind(&done);
1996 // Finalize:
1997 // hash_ += hash_ << 3;
1998 // hash_ ^= hash_ >> 11;
1999 // hash_ += hash_ << 15;
2000 __ movl(EDX, EAX);
2001 __ shll(EDX, Immediate(3));
2002 __ addl(EAX, EDX);
2003 __ movl(EDX, EAX);
2004 __ shrl(EDX, Immediate(11));
2005 __ xorl(EAX, EDX);
2006 __ movl(EDX, EAX);
2007 __ shll(EDX, Immediate(15));
2008 __ addl(EAX, EDX);
2009 // hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1);
2010 __ andl(
2011 EAX,
2012 Immediate(((static_cast<intptr_t>(1) << target::String::kHashBits) - 1)));
2013
2014 // return hash_ == 0 ? 1 : hash_;
2015 __ cmpl(EAX, Immediate(0));
2016 __ j(NOT_EQUAL, &set_hash_code, Assembler::kNearJump);
2017 __ incl(EAX);
2018 __ Bind(&set_hash_code);
2019 __ SmiTag(EAX);
2020 __ StoreIntoSmiField(FieldAddress(EBX, target::String::hash_offset()), EAX);
2021 __ ret();
2022}
2023
2024// Allocates a _OneByteString or _TwoByteString. The content is not initialized.
2025// 'length_reg' contains the desired length as a _Smi or _Mint.
2026// Returns new string as tagged pointer in EAX.
2027static void TryAllocateString(Assembler* assembler,
2028 classid_t cid,
2029 Label* ok,
2030 Label* failure,
2031 Register length_reg) {
2032 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
2033 // _Mint length: call to runtime to produce error.
2034 __ BranchIfNotSmi(length_reg, failure);
2035 // negative length: call to runtime to produce error.
2036 __ cmpl(length_reg, Immediate(0));
2037 __ j(LESS, failure);
2038
2039 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, EAX, failure, false));
2040 if (length_reg != EDI) {
2041 __ movl(EDI, length_reg);
2042 }
2043 Label pop_and_fail;
2044 __ pushl(EDI); // Preserve length.
2045 if (cid == kOneByteStringCid) {
2046 __ SmiUntag(EDI);
2047 } else {
2048 // Untag length and multiply by element size -> no-op.
2049 }
2050 const intptr_t fixed_size_plus_alignment_padding =
2051 target::String::InstanceSize() +
2052 target::ObjectAlignment::kObjectAlignment - 1;
2053 __ leal(EDI, Address(EDI, TIMES_1,
2054 fixed_size_plus_alignment_padding)); // EDI is untagged.
2055 __ andl(EDI, Immediate(-target::ObjectAlignment::kObjectAlignment));
2056
2057 __ movl(EAX, Address(THR, target::Thread::top_offset()));
2058 __ movl(EBX, EAX);
2059
2060 // EDI: allocation size.
2061 __ addl(EBX, EDI);
2062 __ j(CARRY, &pop_and_fail);
2063
2064 // Check if the allocation fits into the remaining space.
2065 // EAX: potential new object start.
2066 // EBX: potential next object start.
2067 // EDI: allocation size.
2068 __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
2069 __ j(ABOVE_EQUAL, &pop_and_fail);
2070
2071 // Successfully allocated the object(s), now update top to point to
2072 // next object start and initialize the object.
2073 __ movl(Address(THR, target::Thread::top_offset()), EBX);
2074 __ addl(EAX, Immediate(kHeapObjectTag));
2075
2076 // Initialize the tags.
2077 // EAX: new object start as a tagged pointer.
2078 // EBX: new object end address.
2079 // EDI: allocation size.
2080 {
2081 Label size_tag_overflow, done;
2082 __ cmpl(EDI, Immediate(target::ObjectLayout::kSizeTagMaxSizeTag));
2083 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
2084 __ shll(EDI, Immediate(target::ObjectLayout::kTagBitsSizeTagPos -
2085 target::ObjectAlignment::kObjectAlignmentLog2));
2086 __ jmp(&done, Assembler::kNearJump);
2087
2088 __ Bind(&size_tag_overflow);
2089 __ xorl(EDI, EDI);
2090 __ Bind(&done);
2091
2092 // Get the class index and insert it into the tags.
2093 const uint32_t tags =
2094 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
2095 __ orl(EDI, Immediate(tags));
2096 __ movl(FieldAddress(EAX, target::Object::tags_offset()), EDI); // Tags.
2097 }
2098
2099 // Set the length field.
2100 __ popl(EDI);
2101 __ StoreIntoObjectNoBarrier(
2102 EAX, FieldAddress(EAX, target::String::length_offset()), EDI);
2103 // Clear hash.
2104 __ ZeroInitSmiField(FieldAddress(EAX, target::String::hash_offset()));
2105 __ jmp(ok, Assembler::kNearJump);
2106
2107 __ Bind(&pop_and_fail);
2108 __ popl(EDI);
2109 __ jmp(failure);
2110}
2111
2112// Arg0: OneByteString (receiver)
2113// Arg1: Start index as Smi.
2114// Arg2: End index as Smi.
2115// The indexes must be valid.
2116void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
2117 Label* normal_ir_body) {
2118 const intptr_t kStringOffset = 3 * target::kWordSize;
2119 const intptr_t kStartIndexOffset = 2 * target::kWordSize;
2120 const intptr_t kEndIndexOffset = 1 * target::kWordSize;
2121 Label ok;
2122 __ movl(EAX, Address(ESP, +kStartIndexOffset));
2123 __ movl(EDI, Address(ESP, +kEndIndexOffset));
2124 __ orl(EAX, EDI);
2125 __ testl(EAX, Immediate(kSmiTagMask));
2126 __ j(NOT_ZERO, normal_ir_body); // 'start', 'end' not Smi.
2127
2128 __ subl(EDI, Address(ESP, +kStartIndexOffset));
2129 TryAllocateString(assembler, kOneByteStringCid, &ok, normal_ir_body, EDI);
2130 __ Bind(&ok);
2131 // EAX: new string as tagged pointer.
2132 // Copy string.
2133 __ movl(EDI, Address(ESP, +kStringOffset));
2134 __ movl(EBX, Address(ESP, +kStartIndexOffset));
2135 __ SmiUntag(EBX);
2136 __ leal(EDI, FieldAddress(EDI, EBX, TIMES_1,
2137 target::OneByteString::data_offset()));
2138 // EDI: Start address to copy from (untagged).
2139 // EBX: Untagged start index.
2140 __ movl(ECX, Address(ESP, +kEndIndexOffset));
2141 __ SmiUntag(ECX);
2142 __ subl(ECX, EBX);
2143 __ xorl(EDX, EDX);
2144 // EDI: Start address to copy from (untagged).
2145 // ECX: Untagged number of bytes to copy.
2146 // EAX: Tagged result string.
2147 // EDX: Loop counter.
2148 // EBX: Scratch register.
2149 Label loop, check;
2150 __ jmp(&check, Assembler::kNearJump);
2151 __ Bind(&loop);
2152 __ movzxb(EBX, Address(EDI, EDX, TIMES_1, 0));
2153 __ movb(FieldAddress(EAX, EDX, TIMES_1, target::OneByteString::data_offset()),
2154 BL);
2155 __ incl(EDX);
2156 __ Bind(&check);
2157 __ cmpl(EDX, ECX);
2158 __ j(LESS, &loop, Assembler::kNearJump);
2159 __ ret();
2160 __ Bind(normal_ir_body);
2161}
2162
2163void AsmIntrinsifier::WriteIntoOneByteString(Assembler* assembler,
2164 Label* normal_ir_body) {
2165 __ movl(ECX, Address(ESP, +1 * target::kWordSize)); // Value.
2166 __ movl(EBX, Address(ESP, +2 * target::kWordSize)); // Index.
2167 __ movl(EAX, Address(ESP, +3 * target::kWordSize)); // OneByteString.
2168 __ SmiUntag(EBX);
2169 __ SmiUntag(ECX);
2170 __ movb(FieldAddress(EAX, EBX, TIMES_1, target::OneByteString::data_offset()),
2171 CL);
2172 __ ret();
2173}
2174
2175void AsmIntrinsifier::WriteIntoTwoByteString(Assembler* assembler,
2176 Label* normal_ir_body) {
2177 __ movl(ECX, Address(ESP, +1 * target::kWordSize)); // Value.
2178 __ movl(EBX, Address(ESP, +2 * target::kWordSize)); // Index.
2179 __ movl(EAX, Address(ESP, +3 * target::kWordSize)); // TwoByteString.
2180 // Untag index and multiply by element size -> no-op.
2181 __ SmiUntag(ECX);
2182 __ movw(FieldAddress(EAX, EBX, TIMES_1, target::TwoByteString::data_offset()),
2183 ECX);
2184 __ ret();
2185}
2186
2187void AsmIntrinsifier::AllocateOneByteString(Assembler* assembler,
2188 Label* normal_ir_body) {
2189 __ movl(EDI, Address(ESP, +1 * target::kWordSize)); // Length.
2190 Label ok;
2191 TryAllocateString(assembler, kOneByteStringCid, &ok, normal_ir_body, EDI);
2192 // EDI: Start address to copy from (untagged).
2193
2194 __ Bind(&ok);
2195 __ ret();
2196
2197 __ Bind(normal_ir_body);
2198}
2199
2200void AsmIntrinsifier::AllocateTwoByteString(Assembler* assembler,
2201 Label* normal_ir_body) {
2202 __ movl(EDI, Address(ESP, +1 * target::kWordSize)); // Length.
2203 Label ok;
2204 TryAllocateString(assembler, kTwoByteStringCid, &ok, normal_ir_body, EDI);
2205 // EDI: Start address to copy from (untagged).
2206
2207 __ Bind(&ok);
2208 __ ret();
2209
2210 __ Bind(normal_ir_body);
2211}
2212
2213// TODO(srdjan): Add combinations (one-byte/two-byte/external strings).
2214static void StringEquality(Assembler* assembler,
2215 Label* normal_ir_body,
2216 intptr_t string_cid) {
2217 Label is_true, is_false, loop;
2218 __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // This.
2219 __ movl(EBX, Address(ESP, +1 * target::kWordSize)); // Other.
2220
2221 // Are identical?
2222 __ cmpl(EAX, EBX);
2223 __ j(EQUAL, &is_true, Assembler::kNearJump);
2224
2225 // Is other OneByteString?
2226 __ testl(EBX, Immediate(kSmiTagMask));
2227 __ j(ZERO, &is_false); // Smi
2228 __ CompareClassId(EBX, string_cid, EDI);
2229 __ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
2230
2231 // Have same length?
2232 __ movl(EDI, FieldAddress(EAX, target::String::length_offset()));
2233 __ cmpl(EDI, FieldAddress(EBX, target::String::length_offset()));
2234 __ j(NOT_EQUAL, &is_false, Assembler::kNearJump);
2235
2236 // Check contents, no fall-through possible.
2237 // TODO(srdjan): write a faster check.
2238 __ SmiUntag(EDI);
2239 __ Bind(&loop);
2240 __ decl(EDI);
2241 __ cmpl(EDI, Immediate(0));
2242 __ j(LESS, &is_true, Assembler::kNearJump);
2243 if (string_cid == kOneByteStringCid) {
2244 __ movzxb(ECX, FieldAddress(EAX, EDI, TIMES_1,
2245 target::OneByteString::data_offset()));
2246 __ movzxb(EDX, FieldAddress(EBX, EDI, TIMES_1,
2247 target::OneByteString::data_offset()));
2248 } else if (string_cid == kTwoByteStringCid) {
2249 __ movzxw(ECX, FieldAddress(EAX, EDI, TIMES_2,
2250 target::TwoByteString::data_offset()));
2251 __ movzxw(EDX, FieldAddress(EBX, EDI, TIMES_2,
2252 target::TwoByteString::data_offset()));
2253 } else {
2254 UNIMPLEMENTED();
2255 }
2256 __ cmpl(ECX, EDX);
2257 __ j(NOT_EQUAL, &is_false, Assembler::kNearJump);
2258 __ jmp(&loop, Assembler::kNearJump);
2259
2260 __ Bind(&is_true);
2261 __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
2262 __ ret();
2263
2264 __ Bind(&is_false);
2265 __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
2266 __ ret();
2267
2268 __ Bind(normal_ir_body);
2269}
2270
2271void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
2272 Label* normal_ir_body) {
2273 StringEquality(assembler, normal_ir_body, kOneByteStringCid);
2274}
2275
2276void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
2277 Label* normal_ir_body) {
2278 StringEquality(assembler, normal_ir_body, kTwoByteStringCid);
2279}
2280
2281void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
2282 Label* normal_ir_body,
2283 bool sticky) {
2284 if (FLAG_interpret_irregexp) return;
2285
2286 static const intptr_t kRegExpParamOffset = 3 * target::kWordSize;
2287 static const intptr_t kStringParamOffset = 2 * target::kWordSize;
2288 // start_index smi is located at offset 1.
2289
2290 // Incoming registers:
2291 // EAX: Function. (Will be loaded with the specialized matcher function.)
2292 // ECX: Unknown. (Must be GC safe on tail call.)
2293 // EDX: Arguments descriptor. (Will be preserved.)
2294
2295 // Load the specialized function pointer into EAX. Leverage the fact the
2296 // string CIDs as well as stored function pointers are in sequence.
2297 __ movl(EBX, Address(ESP, kRegExpParamOffset));
2298 __ movl(EDI, Address(ESP, kStringParamOffset));
2299 __ LoadClassId(EDI, EDI);
2300 __ SubImmediate(EDI, Immediate(kOneByteStringCid));
2301 __ movl(EAX, FieldAddress(
2302 EBX, EDI, TIMES_4,
2303 target::RegExp::function_offset(kOneByteStringCid, sticky)));
2304
2305 // Registers are now set up for the lazy compile stub. It expects the function
2306 // in EAX, the argument descriptor in EDX, and IC-Data in ECX.
2307 __ xorl(ECX, ECX);
2308
2309 // Tail-call the function.
2310 __ jmp(FieldAddress(EAX, target::Function::entry_point_offset()));
2311}
2312
2313// On stack: user tag (+1), return-address (+0).
2314void AsmIntrinsifier::UserTag_makeCurrent(Assembler* assembler,
2315 Label* normal_ir_body) {
2316 // EDI: Isolate.
2317 __ LoadIsolate(EDI);
2318 // EAX: Current user tag.
2319 __ movl(EAX, Address(EDI, target::Isolate::current_tag_offset()));
2320 // EAX: UserTag.
2321 __ movl(EBX, Address(ESP, +1 * target::kWordSize));
2322 // Set target::Isolate::current_tag_.
2323 __ movl(Address(EDI, target::Isolate::current_tag_offset()), EBX);
2324 // EAX: UserTag's tag.
2325 __ movl(EBX, FieldAddress(EBX, target::UserTag::tag_offset()));
2326 // Set target::Isolate::user_tag_.
2327 __ movl(Address(EDI, target::Isolate::user_tag_offset()), EBX);
2328 __ ret();
2329}
2330
2331void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
2332 Label* normal_ir_body) {
2333 __ LoadIsolate(EAX);
2334 __ movl(EAX, Address(EAX, target::Isolate::default_tag_offset()));
2335 __ ret();
2336}
2337
2338void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
2339 Label* normal_ir_body) {
2340 __ LoadIsolate(EAX);
2341 __ movl(EAX, Address(EAX, target::Isolate::current_tag_offset()));
2342 __ ret();
2343}
2344
2345void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
2346 Label* normal_ir_body) {
2347#if !defined(SUPPORT_TIMELINE)
2348 __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
2349 __ ret();
2350#else
2351 Label true_label;
2352 // Load TimelineStream*.
2353 __ movl(EAX, Address(THR, target::Thread::dart_stream_offset()));
2354 // Load uintptr_t from TimelineStream*.
2355 __ movl(EAX, Address(EAX, target::TimelineStream::enabled_offset()));
2356 __ cmpl(EAX, Immediate(0));
2357 __ j(NOT_ZERO, &true_label, Assembler::kNearJump);
2358 // Not enabled.
2359 __ LoadObject(EAX, CastHandle<Object>(FalseObject()));
2360 __ ret();
2361 // Enabled.
2362 __ Bind(&true_label);
2363 __ LoadObject(EAX, CastHandle<Object>(TrueObject()));
2364 __ ret();
2365#endif
2366}
2367
2368void AsmIntrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler,
2369 Label* normal_ir_body) {
2370 __ LoadObject(EAX, NullObject());
2371 __ movl(Address(THR, target::Thread::async_stack_trace_offset()), EAX);
2372 __ ret();
2373}
2374
2375void AsmIntrinsifier::SetAsyncThreadStackTrace(Assembler* assembler,
2376 Label* normal_ir_body) {
2377 __ movl(Address(THR, target::Thread::async_stack_trace_offset()), EAX);
2378 __ LoadObject(EAX, NullObject());
2379 __ ret();
2380}
2381
2382#undef __
2383
2384} // namespace compiler
2385} // namespace dart
2386
2387#endif // defined(TARGET_ARCH_IA32)
2388