1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6#if defined(TARGET_ARCH_X64)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
10#include "vm/class_id.h"
11#include "vm/compiler/asm_intrinsifier.h"
12#include "vm/compiler/assembler/assembler.h"
13
14namespace dart {
15namespace compiler {
16
17// When entering intrinsics code:
18// R10: Arguments descriptor
19// TOS: Return address
20// The R10 registers can be destroyed only if there is no slow-path, i.e.
21// if the intrinsified method always executes a return.
22// The RBP register should not be modified, because it is used by the profiler.
23// The PP and THR registers (see constants_x64.h) must be preserved.
24
25#define __ assembler->
26
27intptr_t AsmIntrinsifier::ParameterSlotFromSp() {
28 return 0;
29}
30
31static bool IsABIPreservedRegister(Register reg) {
32 return ((1 << reg) & CallingConventions::kCalleeSaveCpuRegisters) != 0;
33}
34
35void AsmIntrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
36 ASSERT(IsABIPreservedRegister(CODE_REG));
37 ASSERT(!IsABIPreservedRegister(ARGS_DESC_REG));
38 ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP));
39 ASSERT(CALLEE_SAVED_TEMP != CODE_REG);
40 ASSERT(CALLEE_SAVED_TEMP != ARGS_DESC_REG);
41
42 assembler->Comment("IntrinsicCallPrologue");
43 assembler->movq(CALLEE_SAVED_TEMP, ARGS_DESC_REG);
44}
45
46void AsmIntrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
47 assembler->Comment("IntrinsicCallEpilogue");
48 assembler->movq(ARGS_DESC_REG, CALLEE_SAVED_TEMP);
49}
50
51// Allocate a GrowableObjectArray using the backing array specified.
52// On stack: type argument (+2), data (+1), return-address (+0).
53void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
54 Label* normal_ir_body) {
55 // This snippet of inlined code uses the following registers:
56 // RAX, RCX, R13
57 // and the newly allocated object is returned in RAX.
58 const intptr_t kTypeArgumentsOffset = 2 * target::kWordSize;
59 const intptr_t kArrayOffset = 1 * target::kWordSize;
60
61 // Try allocating in new space.
62 const Class& cls = GrowableObjectArrayClass();
63 __ TryAllocate(cls, normal_ir_body, Assembler::kFarJump, RAX, R13);
64
65 // Store backing array object in growable array object.
66 __ movq(RCX, Address(RSP, kArrayOffset)); // data argument.
67 // RAX is new, no barrier needed.
68 __ StoreIntoObjectNoBarrier(
69 RAX, FieldAddress(RAX, target::GrowableObjectArray::data_offset()), RCX);
70
71 // RAX: new growable array object start as a tagged pointer.
72 // Store the type argument field in the growable array object.
73 __ movq(RCX, Address(RSP, kTypeArgumentsOffset)); // type argument.
74 __ StoreIntoObjectNoBarrier(
75 RAX,
76 FieldAddress(RAX, target::GrowableObjectArray::type_arguments_offset()),
77 RCX);
78
79 // Set the length field in the growable array object to 0.
80 __ ZeroInitSmiField(
81 FieldAddress(RAX, target::GrowableObjectArray::length_offset()));
82 __ ret(); // returns the newly allocated object in RAX.
83
84 __ Bind(normal_ir_body);
85}
86
87#define TYPED_ARRAY_ALLOCATION(cid, max_len, scale_factor) \
88 Label fall_through; \
89 const intptr_t kArrayLengthStackOffset = 1 * target::kWordSize; \
90 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, normal_ir_body, false)); \
91 __ movq(RDI, Address(RSP, kArrayLengthStackOffset)); /* Array length. */ \
92 /* Check that length is a positive Smi. */ \
93 /* RDI: requested array length argument. */ \
94 __ testq(RDI, Immediate(kSmiTagMask)); \
95 __ j(NOT_ZERO, normal_ir_body); \
96 __ cmpq(RDI, Immediate(0)); \
97 __ j(LESS, normal_ir_body); \
98 __ SmiUntag(RDI); \
99 /* Check for maximum allowed length. */ \
100 /* RDI: untagged array length. */ \
101 __ cmpq(RDI, Immediate(max_len)); \
102 __ j(GREATER, normal_ir_body); \
103 /* Special case for scaling by 16. */ \
104 if (scale_factor == TIMES_16) { \
105 /* double length of array. */ \
106 __ addq(RDI, RDI); \
107 /* only scale by 8. */ \
108 scale_factor = TIMES_8; \
109 } \
110 const intptr_t fixed_size_plus_alignment_padding = \
111 target::TypedData::InstanceSize() + \
112 target::ObjectAlignment::kObjectAlignment - 1; \
113 __ leaq(RDI, Address(RDI, scale_factor, fixed_size_plus_alignment_padding)); \
114 __ andq(RDI, Immediate(-target::ObjectAlignment::kObjectAlignment)); \
115 __ movq(RAX, Address(THR, target::Thread::top_offset())); \
116 __ movq(RCX, RAX); \
117 \
118 /* RDI: allocation size. */ \
119 __ addq(RCX, RDI); \
120 __ j(CARRY, normal_ir_body); \
121 \
122 /* Check if the allocation fits into the remaining space. */ \
123 /* RAX: potential new object start. */ \
124 /* RCX: potential next object start. */ \
125 /* RDI: allocation size. */ \
126 __ cmpq(RCX, Address(THR, target::Thread::end_offset())); \
127 __ j(ABOVE_EQUAL, normal_ir_body); \
128 \
129 /* Successfully allocated the object(s), now update top to point to */ \
130 /* next object start and initialize the object. */ \
131 __ movq(Address(THR, target::Thread::top_offset()), RCX); \
132 __ addq(RAX, Immediate(kHeapObjectTag)); \
133 /* Initialize the tags. */ \
134 /* RAX: new object start as a tagged pointer. */ \
135 /* RCX: new object end address. */ \
136 /* RDI: allocation size. */ \
137 /* R13: scratch register. */ \
138 { \
139 Label size_tag_overflow, done; \
140 __ cmpq(RDI, Immediate(target::ObjectLayout::kSizeTagMaxSizeTag)); \
141 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump); \
142 __ shlq(RDI, Immediate(target::ObjectLayout::kTagBitsSizeTagPos - \
143 target::ObjectAlignment::kObjectAlignmentLog2)); \
144 __ jmp(&done, Assembler::kNearJump); \
145 \
146 __ Bind(&size_tag_overflow); \
147 __ LoadImmediate(RDI, Immediate(0)); \
148 __ Bind(&done); \
149 \
150 /* Get the class index and insert it into the tags. */ \
151 uint32_t tags = \
152 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0); \
153 __ orq(RDI, Immediate(tags)); \
154 __ movq(FieldAddress(RAX, target::Object::tags_offset()), \
155 RDI); /* Tags. */ \
156 } \
157 /* Set the length field. */ \
158 /* RAX: new object start as a tagged pointer. */ \
159 /* RCX: new object end address. */ \
160 __ movq(RDI, Address(RSP, kArrayLengthStackOffset)); /* Array length. */ \
161 __ StoreIntoObjectNoBarrier( \
162 RAX, FieldAddress(RAX, target::TypedDataBase::length_offset()), RDI); \
163 /* Initialize all array elements to 0. */ \
164 /* RAX: new object start as a tagged pointer. */ \
165 /* RCX: new object end address. */ \
166 /* RDI: iterator which initially points to the start of the variable */ \
167 /* RBX: scratch register. */ \
168 /* data area to be initialized. */ \
169 __ xorq(RBX, RBX); /* Zero. */ \
170 __ leaq(RDI, FieldAddress(RAX, target::TypedData::InstanceSize())); \
171 __ StoreInternalPointer( \
172 RAX, FieldAddress(RAX, target::TypedDataBase::data_field_offset()), \
173 RDI); \
174 Label done, init_loop; \
175 __ Bind(&init_loop); \
176 __ cmpq(RDI, RCX); \
177 __ j(ABOVE_EQUAL, &done, Assembler::kNearJump); \
178 __ movq(Address(RDI, 0), RBX); \
179 __ addq(RDI, Immediate(target::kWordSize)); \
180 __ jmp(&init_loop, Assembler::kNearJump); \
181 __ Bind(&done); \
182 \
183 __ ret(); \
184 __ Bind(normal_ir_body);
185
186static ScaleFactor GetScaleFactor(intptr_t size) {
187 switch (size) {
188 case 1:
189 return TIMES_1;
190 case 2:
191 return TIMES_2;
192 case 4:
193 return TIMES_4;
194 case 8:
195 return TIMES_8;
196 case 16:
197 return TIMES_16;
198 }
199 UNREACHABLE();
200 return static_cast<ScaleFactor>(0);
201}
202
203#define TYPED_DATA_ALLOCATOR(clazz) \
204 void AsmIntrinsifier::TypedData_##clazz##_factory(Assembler* assembler, \
205 Label* normal_ir_body) { \
206 intptr_t size = TypedDataElementSizeInBytes(kTypedData##clazz##Cid); \
207 intptr_t max_len = TypedDataMaxNewSpaceElements(kTypedData##clazz##Cid); \
208 ScaleFactor scale = GetScaleFactor(size); \
209 TYPED_ARRAY_ALLOCATION(kTypedData##clazz##Cid, max_len, scale); \
210 }
211CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR)
212#undef TYPED_DATA_ALLOCATOR
213
214// Tests if two top most arguments are smis, jumps to label not_smi if not.
215// Topmost argument is in RAX.
216static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
217 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
218 __ movq(RCX, Address(RSP, +2 * target::kWordSize));
219 __ orq(RCX, RAX);
220 __ testq(RCX, Immediate(kSmiTagMask));
221 __ j(NOT_ZERO, not_smi);
222}
223
224void AsmIntrinsifier::Integer_addFromInteger(Assembler* assembler,
225 Label* normal_ir_body) {
226 TestBothArgumentsSmis(assembler, normal_ir_body);
227 // RAX contains right argument.
228 __ addq(RAX, Address(RSP, +2 * target::kWordSize));
229 __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
230 // Result is in RAX.
231 __ ret();
232 __ Bind(normal_ir_body);
233}
234
235void AsmIntrinsifier::Integer_add(Assembler* assembler, Label* normal_ir_body) {
236 Integer_addFromInteger(assembler, normal_ir_body);
237}
238
239void AsmIntrinsifier::Integer_subFromInteger(Assembler* assembler,
240 Label* normal_ir_body) {
241 TestBothArgumentsSmis(assembler, normal_ir_body);
242 // RAX contains right argument, which is the actual minuend of subtraction.
243 __ subq(RAX, Address(RSP, +2 * target::kWordSize));
244 __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
245 // Result is in RAX.
246 __ ret();
247 __ Bind(normal_ir_body);
248}
249
250void AsmIntrinsifier::Integer_sub(Assembler* assembler, Label* normal_ir_body) {
251 TestBothArgumentsSmis(assembler, normal_ir_body);
252 // RAX contains right argument, which is the actual subtrahend of subtraction.
253 __ movq(RCX, RAX);
254 __ movq(RAX, Address(RSP, +2 * target::kWordSize));
255 __ subq(RAX, RCX);
256 __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
257 // Result is in RAX.
258 __ ret();
259 __ Bind(normal_ir_body);
260}
261
262void AsmIntrinsifier::Integer_mulFromInteger(Assembler* assembler,
263 Label* normal_ir_body) {
264 TestBothArgumentsSmis(assembler, normal_ir_body);
265 // RAX is the right argument.
266 ASSERT(kSmiTag == 0); // Adjust code below if not the case.
267 __ SmiUntag(RAX);
268 __ imulq(RAX, Address(RSP, +2 * target::kWordSize));
269 __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
270 // Result is in RAX.
271 __ ret();
272 __ Bind(normal_ir_body);
273}
274
275void AsmIntrinsifier::Integer_mul(Assembler* assembler, Label* normal_ir_body) {
276 Integer_mulFromInteger(assembler, normal_ir_body);
277}
278
279// Optimizations:
280// - result is 0 if:
281// - left is 0
282// - left equals right
283// - result is left if
284// - left > 0 && left < right
285// RAX: Tagged left (dividend).
286// RCX: Tagged right (divisor).
287// Returns:
288// RAX: Untagged fallthrough result (remainder to be adjusted), or
289// RAX: Tagged return result (remainder).
290static void EmitRemainderOperation(Assembler* assembler) {
291 Label return_zero, try_modulo, not_32bit, done;
292 // Check for quick zero results.
293 __ cmpq(RAX, Immediate(0));
294 __ j(EQUAL, &return_zero, Assembler::kNearJump);
295 __ cmpq(RAX, RCX);
296 __ j(EQUAL, &return_zero, Assembler::kNearJump);
297
298 // Check if result equals left.
299 __ cmpq(RAX, Immediate(0));
300 __ j(LESS, &try_modulo, Assembler::kNearJump);
301 // left is positive.
302 __ cmpq(RAX, RCX);
303 __ j(GREATER, &try_modulo, Assembler::kNearJump);
304 // left is less than right, result is left (RAX).
305 __ ret();
306
307 __ Bind(&return_zero);
308 __ xorq(RAX, RAX);
309 __ ret();
310
311 __ Bind(&try_modulo);
312
313 // Check if both operands fit into 32bits as idiv with 64bit operands
314 // requires twice as many cycles and has much higher latency. We are checking
315 // this before untagging them to avoid corner case dividing INT_MAX by -1 that
316 // raises exception because quotient is too large for 32bit register.
317 __ movsxd(RBX, RAX);
318 __ cmpq(RBX, RAX);
319 __ j(NOT_EQUAL, &not_32bit, Assembler::kNearJump);
320 __ movsxd(RBX, RCX);
321 __ cmpq(RBX, RCX);
322 __ j(NOT_EQUAL, &not_32bit, Assembler::kNearJump);
323
324 // Both operands are 31bit smis. Divide using 32bit idiv.
325 __ SmiUntag(RAX);
326 __ SmiUntag(RCX);
327 __ cdq();
328 __ idivl(RCX);
329 __ movsxd(RAX, RDX);
330 __ jmp(&done, Assembler::kNearJump);
331
332 // Divide using 64bit idiv.
333 __ Bind(&not_32bit);
334 __ SmiUntag(RAX);
335 __ SmiUntag(RCX);
336 __ cqo();
337 __ idivq(RCX);
338 __ movq(RAX, RDX);
339 __ Bind(&done);
340}
341
342// Implementation:
343// res = left % right;
344// if (res < 0) {
345// if (right < 0) {
346// res = res - right;
347// } else {
348// res = res + right;
349// }
350// }
351void AsmIntrinsifier::Integer_moduloFromInteger(Assembler* assembler,
352 Label* normal_ir_body) {
353 Label negative_result;
354 TestBothArgumentsSmis(assembler, normal_ir_body);
355 __ movq(RCX, Address(RSP, +2 * target::kWordSize));
356 // RAX: Tagged left (dividend).
357 // RCX: Tagged right (divisor).
358 __ cmpq(RCX, Immediate(0));
359 __ j(EQUAL, normal_ir_body);
360 EmitRemainderOperation(assembler);
361 // Untagged remainder result in RAX.
362 __ cmpq(RAX, Immediate(0));
363 __ j(LESS, &negative_result, Assembler::kNearJump);
364 __ SmiTag(RAX);
365 __ ret();
366
367 __ Bind(&negative_result);
368 Label subtract;
369 // RAX: Untagged result.
370 // RCX: Untagged right.
371 __ cmpq(RCX, Immediate(0));
372 __ j(LESS, &subtract, Assembler::kNearJump);
373 __ addq(RAX, RCX);
374 __ SmiTag(RAX);
375 __ ret();
376
377 __ Bind(&subtract);
378 __ subq(RAX, RCX);
379 __ SmiTag(RAX);
380 __ ret();
381
382 __ Bind(normal_ir_body);
383}
384
385void AsmIntrinsifier::Integer_truncDivide(Assembler* assembler,
386 Label* normal_ir_body) {
387 Label not_32bit;
388 TestBothArgumentsSmis(assembler, normal_ir_body);
389 // RAX: right argument (divisor)
390 __ cmpq(RAX, Immediate(0));
391 __ j(EQUAL, normal_ir_body, Assembler::kNearJump);
392 __ movq(RCX, RAX);
393 __ movq(RAX,
394 Address(RSP, +2 * target::kWordSize)); // Left argument (dividend).
395
396 // Check if both operands fit into 32bits as idiv with 64bit operands
397 // requires twice as many cycles and has much higher latency. We are checking
398 // this before untagging them to avoid corner case dividing INT_MAX by -1 that
399 // raises exception because quotient is too large for 32bit register.
400 __ movsxd(RBX, RAX);
401 __ cmpq(RBX, RAX);
402 __ j(NOT_EQUAL, &not_32bit);
403 __ movsxd(RBX, RCX);
404 __ cmpq(RBX, RCX);
405 __ j(NOT_EQUAL, &not_32bit);
406
407 // Both operands are 31bit smis. Divide using 32bit idiv.
408 __ SmiUntag(RAX);
409 __ SmiUntag(RCX);
410 __ cdq();
411 __ idivl(RCX);
412 __ movsxd(RAX, RAX);
413 __ SmiTag(RAX); // Result is guaranteed to fit into a smi.
414 __ ret();
415
416 // Divide using 64bit idiv.
417 __ Bind(&not_32bit);
418 __ SmiUntag(RAX);
419 __ SmiUntag(RCX);
420 __ pushq(RDX); // Preserve RDX in case of 'fall_through'.
421 __ cqo();
422 __ idivq(RCX);
423 __ popq(RDX);
424 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
425 // cannot tag the result.
426 __ cmpq(RAX, Immediate(0x4000000000000000));
427 __ j(EQUAL, normal_ir_body);
428 __ SmiTag(RAX);
429 __ ret();
430 __ Bind(normal_ir_body);
431}
432
433void AsmIntrinsifier::Integer_negate(Assembler* assembler,
434 Label* normal_ir_body) {
435 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
436 __ testq(RAX, Immediate(kSmiTagMask));
437 __ j(NOT_ZERO, normal_ir_body, Assembler::kNearJump); // Non-smi value.
438 __ negq(RAX);
439 __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
440 // Result is in RAX.
441 __ ret();
442 __ Bind(normal_ir_body);
443}
444
445void AsmIntrinsifier::Integer_bitAndFromInteger(Assembler* assembler,
446 Label* normal_ir_body) {
447 TestBothArgumentsSmis(assembler, normal_ir_body);
448 // RAX is the right argument.
449 __ andq(RAX, Address(RSP, +2 * target::kWordSize));
450 // Result is in RAX.
451 __ ret();
452 __ Bind(normal_ir_body);
453}
454
455void AsmIntrinsifier::Integer_bitAnd(Assembler* assembler,
456 Label* normal_ir_body) {
457 Integer_bitAndFromInteger(assembler, normal_ir_body);
458}
459
460void AsmIntrinsifier::Integer_bitOrFromInteger(Assembler* assembler,
461 Label* normal_ir_body) {
462 TestBothArgumentsSmis(assembler, normal_ir_body);
463 // RAX is the right argument.
464 __ orq(RAX, Address(RSP, +2 * target::kWordSize));
465 // Result is in RAX.
466 __ ret();
467 __ Bind(normal_ir_body);
468}
469
470void AsmIntrinsifier::Integer_bitOr(Assembler* assembler,
471 Label* normal_ir_body) {
472 Integer_bitOrFromInteger(assembler, normal_ir_body);
473}
474
475void AsmIntrinsifier::Integer_bitXorFromInteger(Assembler* assembler,
476 Label* normal_ir_body) {
477 TestBothArgumentsSmis(assembler, normal_ir_body);
478 // RAX is the right argument.
479 __ xorq(RAX, Address(RSP, +2 * target::kWordSize));
480 // Result is in RAX.
481 __ ret();
482 __ Bind(normal_ir_body);
483}
484
485void AsmIntrinsifier::Integer_bitXor(Assembler* assembler,
486 Label* normal_ir_body) {
487 Integer_bitXorFromInteger(assembler, normal_ir_body);
488}
489
490void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
491 ASSERT(kSmiTagShift == 1);
492 ASSERT(kSmiTag == 0);
493 Label overflow;
494 TestBothArgumentsSmis(assembler, normal_ir_body);
495 // Shift value is in RAX. Compare with tagged Smi.
496 __ cmpq(RAX, Immediate(target::ToRawSmi(target::kSmiBits)));
497 __ j(ABOVE_EQUAL, normal_ir_body, Assembler::kNearJump);
498
499 __ SmiUntag(RAX);
500 __ movq(RCX, RAX); // Shift amount must be in RCX.
501 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Value.
502
503 // Overflow test - all the shifted-out bits must be same as the sign bit.
504 __ movq(RDI, RAX);
505 __ shlq(RAX, RCX);
506 __ sarq(RAX, RCX);
507 __ cmpq(RAX, RDI);
508 __ j(NOT_EQUAL, &overflow, Assembler::kNearJump);
509
510 __ shlq(RAX, RCX); // Shift for result now we know there is no overflow.
511
512 // RAX is a correctly tagged Smi.
513 __ ret();
514
515 __ Bind(&overflow);
516 // Mint is rarely used on x64 (only for integers requiring 64 bit instead of
517 // 63 bits as represented by Smi).
518 __ Bind(normal_ir_body);
519}
520
521static void CompareIntegers(Assembler* assembler,
522 Label* normal_ir_body,
523 Condition true_condition) {
524 Label true_label;
525 TestBothArgumentsSmis(assembler, normal_ir_body);
526 // RAX contains the right argument.
527 __ cmpq(Address(RSP, +2 * target::kWordSize), RAX);
528 __ j(true_condition, &true_label, Assembler::kNearJump);
529 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
530 __ ret();
531 __ Bind(&true_label);
532 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
533 __ ret();
534 __ Bind(normal_ir_body);
535}
536
537void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
538 Label* normal_ir_body) {
539 CompareIntegers(assembler, normal_ir_body, LESS);
540}
541
542void AsmIntrinsifier::Integer_greaterThanFromInt(Assembler* assembler,
543 Label* normal_ir_body) {
544 CompareIntegers(assembler, normal_ir_body, LESS);
545}
546
547void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
548 Label* normal_ir_body) {
549 CompareIntegers(assembler, normal_ir_body, GREATER);
550}
551
552void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
553 Label* normal_ir_body) {
554 CompareIntegers(assembler, normal_ir_body, LESS_EQUAL);
555}
556
557void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
558 Label* normal_ir_body) {
559 CompareIntegers(assembler, normal_ir_body, GREATER_EQUAL);
560}
561
562// This is called for Smi and Mint receivers. The right argument
563// can be Smi, Mint or double.
564void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
565 Label* normal_ir_body) {
566 Label true_label, check_for_mint;
567 const intptr_t kReceiverOffset = 2;
568 const intptr_t kArgumentOffset = 1;
569
570 // For integer receiver '===' check first.
571 __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
572 __ movq(RCX, Address(RSP, +kReceiverOffset * target::kWordSize));
573 __ cmpq(RAX, RCX);
574 __ j(EQUAL, &true_label, Assembler::kNearJump);
575 __ orq(RAX, RCX);
576 __ testq(RAX, Immediate(kSmiTagMask));
577 __ j(NOT_ZERO, &check_for_mint, Assembler::kNearJump);
578 // Both arguments are smi, '===' is good enough.
579 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
580 __ ret();
581 __ Bind(&true_label);
582 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
583 __ ret();
584
585 // At least one of the arguments was not Smi.
586 Label receiver_not_smi;
587 __ Bind(&check_for_mint);
588 __ movq(RAX, Address(RSP, +kReceiverOffset * target::kWordSize));
589 __ testq(RAX, Immediate(kSmiTagMask));
590 __ j(NOT_ZERO, &receiver_not_smi);
591
592 // Left (receiver) is Smi, return false if right is not Double.
593 // Note that an instance of Mint never contains a value that can be
594 // represented by Smi.
595 __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
596 __ CompareClassId(RAX, kDoubleCid);
597 __ j(EQUAL, normal_ir_body);
598 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
599 __ ret();
600
601 __ Bind(&receiver_not_smi);
602 // RAX:: receiver.
603 __ CompareClassId(RAX, kMintCid);
604 __ j(NOT_EQUAL, normal_ir_body);
605 // Receiver is Mint, return false if right is Smi.
606 __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
607 __ testq(RAX, Immediate(kSmiTagMask));
608 __ j(NOT_ZERO, normal_ir_body);
609 // Smi == Mint -> false.
610 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
611 __ ret();
612 // TODO(srdjan): Implement Mint == Mint comparison.
613
614 __ Bind(normal_ir_body);
615}
616
617void AsmIntrinsifier::Integer_equal(Assembler* assembler,
618 Label* normal_ir_body) {
619 Integer_equalToInteger(assembler, normal_ir_body);
620}
621
622void AsmIntrinsifier::Integer_sar(Assembler* assembler, Label* normal_ir_body) {
623 Label shift_count_ok;
624 TestBothArgumentsSmis(assembler, normal_ir_body);
625 const Immediate& count_limit = Immediate(0x3F);
626 // Check that the count is not larger than what the hardware can handle.
627 // For shifting right a Smi the result is the same for all numbers
628 // >= count_limit.
629 __ SmiUntag(RAX);
630 // Negative counts throw exception.
631 __ cmpq(RAX, Immediate(0));
632 __ j(LESS, normal_ir_body, Assembler::kNearJump);
633 __ cmpq(RAX, count_limit);
634 __ j(LESS_EQUAL, &shift_count_ok, Assembler::kNearJump);
635 __ movq(RAX, count_limit);
636 __ Bind(&shift_count_ok);
637 __ movq(RCX, RAX); // Shift amount must be in RCX.
638 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Value.
639 __ SmiUntag(RAX); // Value.
640 __ sarq(RAX, RCX);
641 __ SmiTag(RAX);
642 __ ret();
643 __ Bind(normal_ir_body);
644}
645
646// Argument is Smi (receiver).
647void AsmIntrinsifier::Smi_bitNegate(Assembler* assembler,
648 Label* normal_ir_body) {
649 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // Index.
650 __ notq(RAX);
651 __ andq(RAX, Immediate(~kSmiTagMask)); // Remove inverted smi-tag.
652 __ ret();
653}
654
655void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
656 Label* normal_ir_body) {
657 ASSERT(kSmiTagShift == 1);
658 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // Index.
659 // XOR with sign bit to complement bits if value is negative.
660 __ movq(RCX, RAX);
661 __ sarq(RCX, Immediate(63)); // All 0 or all 1.
662 __ xorq(RAX, RCX);
663 // BSR does not write the destination register if source is zero. Put a 1 in
664 // the Smi tag bit to ensure BSR writes to destination register.
665 __ orq(RAX, Immediate(kSmiTagMask));
666 __ bsrq(RAX, RAX);
667 __ SmiTag(RAX);
668 __ ret();
669}
670
671void AsmIntrinsifier::Smi_bitAndFromSmi(Assembler* assembler,
672 Label* normal_ir_body) {
673 Integer_bitAndFromInteger(assembler, normal_ir_body);
674}
675
676void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
677 // static void _lsh(Uint32List x_digits, int x_used, int n,
678 // Uint32List r_digits)
679
680 __ movq(RDI, Address(RSP, 4 * target::kWordSize)); // x_digits
681 __ movq(R8, Address(RSP, 3 * target::kWordSize)); // x_used is Smi
682 __ subq(R8, Immediate(2)); // x_used > 0, Smi. R8 = x_used - 1, round up.
683 __ sarq(R8, Immediate(2)); // R8 + 1 = number of digit pairs to read.
684 __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // n is Smi
685 __ SmiUntag(RCX);
686 __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
687 __ movq(RSI, RCX);
688 __ sarq(RSI, Immediate(6)); // RSI = n ~/ (2*_DIGIT_BITS).
689 __ leaq(RBX,
690 FieldAddress(RBX, RSI, TIMES_8, target::TypedData::data_offset()));
691 __ xorq(RAX, RAX); // RAX = 0.
692 __ movq(RDX,
693 FieldAddress(RDI, R8, TIMES_8, target::TypedData::data_offset()));
694 __ shldq(RAX, RDX, RCX);
695 __ movq(Address(RBX, R8, TIMES_8, 2 * kBytesPerBigIntDigit), RAX);
696 Label last;
697 __ cmpq(R8, Immediate(0));
698 __ j(EQUAL, &last, Assembler::kNearJump);
699 Label loop;
700 __ Bind(&loop);
701 __ movq(RAX, RDX);
702 __ movq(RDX, FieldAddress(RDI, R8, TIMES_8,
703 target::TypedData::data_offset() -
704 2 * kBytesPerBigIntDigit));
705 __ shldq(RAX, RDX, RCX);
706 __ movq(Address(RBX, R8, TIMES_8, 0), RAX);
707 __ decq(R8);
708 __ j(NOT_ZERO, &loop, Assembler::kNearJump);
709 __ Bind(&last);
710 __ shldq(RDX, R8, RCX); // R8 == 0.
711 __ movq(Address(RBX, 0), RDX);
712 __ LoadObject(RAX, NullObject());
713 __ ret();
714}
715
716void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
717 // static void _rsh(Uint32List x_digits, int x_used, int n,
718 // Uint32List r_digits)
719
720 __ movq(RDI, Address(RSP, 4 * target::kWordSize)); // x_digits
721 __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // n is Smi
722 __ SmiUntag(RCX);
723 __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
724 __ movq(RDX, RCX);
725 __ sarq(RDX, Immediate(6)); // RDX = n ~/ (2*_DIGIT_BITS).
726 __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // x_used is Smi
727 __ subq(RSI, Immediate(2)); // x_used > 0, Smi. RSI = x_used - 1, round up.
728 __ sarq(RSI, Immediate(2));
729 __ leaq(RDI,
730 FieldAddress(RDI, RSI, TIMES_8, target::TypedData::data_offset()));
731 __ subq(RSI, RDX); // RSI + 1 = number of digit pairs to read.
732 __ leaq(RBX,
733 FieldAddress(RBX, RSI, TIMES_8, target::TypedData::data_offset()));
734 __ negq(RSI);
735 __ movq(RDX, Address(RDI, RSI, TIMES_8, 0));
736 Label last;
737 __ cmpq(RSI, Immediate(0));
738 __ j(EQUAL, &last, Assembler::kNearJump);
739 Label loop;
740 __ Bind(&loop);
741 __ movq(RAX, RDX);
742 __ movq(RDX, Address(RDI, RSI, TIMES_8, 2 * kBytesPerBigIntDigit));
743 __ shrdq(RAX, RDX, RCX);
744 __ movq(Address(RBX, RSI, TIMES_8, 0), RAX);
745 __ incq(RSI);
746 __ j(NOT_ZERO, &loop, Assembler::kNearJump);
747 __ Bind(&last);
748 __ shrdq(RDX, RSI, RCX); // RSI == 0.
749 __ movq(Address(RBX, 0), RDX);
750 __ LoadObject(RAX, NullObject());
751 __ ret();
752}
753
754void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
755 Label* normal_ir_body) {
756 // static void _absAdd(Uint32List digits, int used,
757 // Uint32List a_digits, int a_used,
758 // Uint32List r_digits)
759
760 __ movq(RDI, Address(RSP, 5 * target::kWordSize)); // digits
761 __ movq(R8, Address(RSP, 4 * target::kWordSize)); // used is Smi
762 __ addq(R8, Immediate(2)); // used > 0, Smi. R8 = used + 1, round up.
763 __ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process.
764 __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // a_digits
765 __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // a_used is Smi
766 __ addq(RCX, Immediate(2)); // a_used > 0, Smi. R8 = a_used + 1, round up.
767 __ sarq(RCX, Immediate(2)); // R8 = number of digit pairs to process.
768 __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
769
770 // Precompute 'used - a_used' now so that carry flag is not lost later.
771 __ subq(R8, RCX);
772 __ incq(R8); // To account for the extra test between loops.
773
774 __ xorq(RDX, RDX); // RDX = 0, carry flag = 0.
775 Label add_loop;
776 __ Bind(&add_loop);
777 // Loop (a_used+1)/2 times, RCX > 0.
778 __ movq(RAX,
779 FieldAddress(RDI, RDX, TIMES_8, target::TypedData::data_offset()));
780 __ adcq(RAX,
781 FieldAddress(RSI, RDX, TIMES_8, target::TypedData::data_offset()));
782 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::data_offset()),
783 RAX);
784 __ incq(RDX); // Does not affect carry flag.
785 __ decq(RCX); // Does not affect carry flag.
786 __ j(NOT_ZERO, &add_loop, Assembler::kNearJump);
787
788 Label last_carry;
789 __ decq(R8); // Does not affect carry flag.
790 __ j(ZERO, &last_carry, Assembler::kNearJump); // If used - a_used == 0.
791
792 Label carry_loop;
793 __ Bind(&carry_loop);
794 // Loop (used+1)/2 - (a_used+1)/2 times, R8 > 0.
795 __ movq(RAX,
796 FieldAddress(RDI, RDX, TIMES_8, target::TypedData::data_offset()));
797 __ adcq(RAX, Immediate(0));
798 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::data_offset()),
799 RAX);
800 __ incq(RDX); // Does not affect carry flag.
801 __ decq(R8); // Does not affect carry flag.
802 __ j(NOT_ZERO, &carry_loop, Assembler::kNearJump);
803
804 __ Bind(&last_carry);
805 Label done;
806 __ j(NOT_CARRY, &done);
807 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::data_offset()),
808 Immediate(1));
809
810 __ Bind(&done);
811 __ LoadObject(RAX, NullObject());
812 __ ret();
813}
814
815void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
816 Label* normal_ir_body) {
817 // static void _absSub(Uint32List digits, int used,
818 // Uint32List a_digits, int a_used,
819 // Uint32List r_digits)
820
821 __ movq(RDI, Address(RSP, 5 * target::kWordSize)); // digits
822 __ movq(R8, Address(RSP, 4 * target::kWordSize)); // used is Smi
823 __ addq(R8, Immediate(2)); // used > 0, Smi. R8 = used + 1, round up.
824 __ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process.
825 __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // a_digits
826 __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // a_used is Smi
827 __ addq(RCX, Immediate(2)); // a_used > 0, Smi. R8 = a_used + 1, round up.
828 __ sarq(RCX, Immediate(2)); // R8 = number of digit pairs to process.
829 __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
830
831 // Precompute 'used - a_used' now so that carry flag is not lost later.
832 __ subq(R8, RCX);
833 __ incq(R8); // To account for the extra test between loops.
834
835 __ xorq(RDX, RDX); // RDX = 0, carry flag = 0.
836 Label sub_loop;
837 __ Bind(&sub_loop);
838 // Loop (a_used+1)/2 times, RCX > 0.
839 __ movq(RAX,
840 FieldAddress(RDI, RDX, TIMES_8, target::TypedData::data_offset()));
841 __ sbbq(RAX,
842 FieldAddress(RSI, RDX, TIMES_8, target::TypedData::data_offset()));
843 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::data_offset()),
844 RAX);
845 __ incq(RDX); // Does not affect carry flag.
846 __ decq(RCX); // Does not affect carry flag.
847 __ j(NOT_ZERO, &sub_loop, Assembler::kNearJump);
848
849 Label done;
850 __ decq(R8); // Does not affect carry flag.
851 __ j(ZERO, &done, Assembler::kNearJump); // If used - a_used == 0.
852
853 Label carry_loop;
854 __ Bind(&carry_loop);
855 // Loop (used+1)/2 - (a_used+1)/2 times, R8 > 0.
856 __ movq(RAX,
857 FieldAddress(RDI, RDX, TIMES_8, target::TypedData::data_offset()));
858 __ sbbq(RAX, Immediate(0));
859 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::data_offset()),
860 RAX);
861 __ incq(RDX); // Does not affect carry flag.
862 __ decq(R8); // Does not affect carry flag.
863 __ j(NOT_ZERO, &carry_loop, Assembler::kNearJump);
864
865 __ Bind(&done);
866 __ LoadObject(RAX, NullObject());
867 __ ret();
868}
869
870void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
871 Label* normal_ir_body) {
872 // Pseudo code:
873 // static int _mulAdd(Uint32List x_digits, int xi,
874 // Uint32List m_digits, int i,
875 // Uint32List a_digits, int j, int n) {
876 // uint64_t x = x_digits[xi >> 1 .. (xi >> 1) + 1]; // xi is Smi and even.
877 // if (x == 0 || n == 0) {
878 // return 2;
879 // }
880 // uint64_t* mip = &m_digits[i >> 1]; // i is Smi and even.
881 // uint64_t* ajp = &a_digits[j >> 1]; // j is Smi and even.
882 // uint64_t c = 0;
883 // SmiUntag(n); // n is Smi and even.
884 // n = (n + 1)/2; // Number of pairs to process.
885 // do {
886 // uint64_t mi = *mip++;
887 // uint64_t aj = *ajp;
888 // uint128_t t = x*mi + aj + c; // 64-bit * 64-bit -> 128-bit.
889 // *ajp++ = low64(t);
890 // c = high64(t);
891 // } while (--n > 0);
892 // while (c != 0) {
893 // uint128_t t = *ajp + c;
894 // *ajp++ = low64(t);
895 // c = high64(t); // c == 0 or 1.
896 // }
897 // return 2;
898 // }
899
900 Label done;
901 // RBX = x, done if x == 0
902 __ movq(RCX, Address(RSP, 7 * target::kWordSize)); // x_digits
903 __ movq(RAX, Address(RSP, 6 * target::kWordSize)); // xi is Smi
904 __ movq(RBX,
905 FieldAddress(RCX, RAX, TIMES_2, target::TypedData::data_offset()));
906 __ testq(RBX, RBX);
907 __ j(ZERO, &done, Assembler::kNearJump);
908
909 // R8 = (SmiUntag(n) + 1)/2, no_op if n == 0
910 __ movq(R8, Address(RSP, 1 * target::kWordSize));
911 __ addq(R8, Immediate(2));
912 __ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process.
913 __ j(ZERO, &done, Assembler::kNearJump);
914
915 // RDI = mip = &m_digits[i >> 1]
916 __ movq(RDI, Address(RSP, 5 * target::kWordSize)); // m_digits
917 __ movq(RAX, Address(RSP, 4 * target::kWordSize)); // i is Smi
918 __ leaq(RDI,
919 FieldAddress(RDI, RAX, TIMES_2, target::TypedData::data_offset()));
920
921 // RSI = ajp = &a_digits[j >> 1]
922 __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // a_digits
923 __ movq(RAX, Address(RSP, 2 * target::kWordSize)); // j is Smi
924 __ leaq(RSI,
925 FieldAddress(RSI, RAX, TIMES_2, target::TypedData::data_offset()));
926
927 // RCX = c = 0
928 __ xorq(RCX, RCX);
929
930 Label muladd_loop;
931 __ Bind(&muladd_loop);
932 // x: RBX
933 // mip: RDI
934 // ajp: RSI
935 // c: RCX
936 // t: RDX:RAX (not live at loop entry)
937 // n: R8
938
939 // uint64_t mi = *mip++
940 __ movq(RAX, Address(RDI, 0));
941 __ addq(RDI, Immediate(2 * kBytesPerBigIntDigit));
942
943 // uint128_t t = x*mi
944 __ mulq(RBX); // t = RDX:RAX = RAX * RBX, 64-bit * 64-bit -> 64-bit
945 __ addq(RAX, RCX); // t += c
946 __ adcq(RDX, Immediate(0));
947
948 // uint64_t aj = *ajp; t += aj
949 __ addq(RAX, Address(RSI, 0));
950 __ adcq(RDX, Immediate(0));
951
952 // *ajp++ = low64(t)
953 __ movq(Address(RSI, 0), RAX);
954 __ addq(RSI, Immediate(2 * kBytesPerBigIntDigit));
955
956 // c = high64(t)
957 __ movq(RCX, RDX);
958
959 // while (--n > 0)
960 __ decq(R8); // --n
961 __ j(NOT_ZERO, &muladd_loop, Assembler::kNearJump);
962
963 __ testq(RCX, RCX);
964 __ j(ZERO, &done, Assembler::kNearJump);
965
966 // *ajp += c
967 __ addq(Address(RSI, 0), RCX);
968 __ j(NOT_CARRY, &done, Assembler::kNearJump);
969
970 Label propagate_carry_loop;
971 __ Bind(&propagate_carry_loop);
972 __ addq(RSI, Immediate(2 * kBytesPerBigIntDigit));
973 __ incq(Address(RSI, 0)); // c == 0 or 1
974 __ j(CARRY, &propagate_carry_loop, Assembler::kNearJump);
975
976 __ Bind(&done);
977 __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
978 __ ret();
979}
980
981void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
982 Label* normal_ir_body) {
983 // Pseudo code:
984 // static int _sqrAdd(Uint32List x_digits, int i,
985 // Uint32List a_digits, int used) {
986 // uint64_t* xip = &x_digits[i >> 1]; // i is Smi and even.
987 // uint64_t x = *xip++;
988 // if (x == 0) return 2;
989 // uint64_t* ajp = &a_digits[i]; // j == 2*i, i is Smi.
990 // uint64_t aj = *ajp;
991 // uint128_t t = x*x + aj;
992 // *ajp++ = low64(t);
993 // uint128_t c = high64(t);
994 // int n = ((used - i + 2) >> 2) - 1; // used and i are Smi. n: num pairs.
995 // while (--n >= 0) {
996 // uint64_t xi = *xip++;
997 // uint64_t aj = *ajp;
998 // uint192_t t = 2*x*xi + aj + c; // 2-bit * 64-bit * 64-bit -> 129-bit.
999 // *ajp++ = low64(t);
1000 // c = high128(t); // 65-bit.
1001 // }
1002 // uint64_t aj = *ajp;
1003 // uint128_t t = aj + c; // 64-bit + 65-bit -> 66-bit.
1004 // *ajp++ = low64(t);
1005 // *ajp = high64(t);
1006 // return 2;
1007 // }
1008
1009 // RDI = xip = &x_digits[i >> 1]
1010 __ movq(RDI, Address(RSP, 4 * target::kWordSize)); // x_digits
1011 __ movq(RAX, Address(RSP, 3 * target::kWordSize)); // i is Smi
1012 __ leaq(RDI,
1013 FieldAddress(RDI, RAX, TIMES_2, target::TypedData::data_offset()));
1014
1015 // RBX = x = *xip++, return if x == 0
1016 Label x_zero;
1017 __ movq(RBX, Address(RDI, 0));
1018 __ cmpq(RBX, Immediate(0));
1019 __ j(EQUAL, &x_zero);
1020 __ addq(RDI, Immediate(2 * kBytesPerBigIntDigit));
1021
1022 // RSI = ajp = &a_digits[i]
1023 __ movq(RSI, Address(RSP, 2 * target::kWordSize)); // a_digits
1024 __ leaq(RSI,
1025 FieldAddress(RSI, RAX, TIMES_4, target::TypedData::data_offset()));
1026
1027 // RDX:RAX = t = x*x + *ajp
1028 __ movq(RAX, RBX);
1029 __ mulq(RBX);
1030 __ addq(RAX, Address(RSI, 0));
1031 __ adcq(RDX, Immediate(0));
1032
1033 // *ajp++ = low64(t)
1034 __ movq(Address(RSI, 0), RAX);
1035 __ addq(RSI, Immediate(2 * kBytesPerBigIntDigit));
1036
1037 // int n = (used - i + 1)/2 - 1
1038 __ movq(R8, Address(RSP, 1 * target::kWordSize)); // used is Smi
1039 __ subq(R8, Address(RSP, 3 * target::kWordSize)); // i is Smi
1040 __ addq(R8, Immediate(2));
1041 __ sarq(R8, Immediate(2));
1042 __ decq(R8); // R8 = number of digit pairs to process.
1043
1044 // uint128_t c = high64(t)
1045 __ xorq(R13, R13); // R13 = high64(c) == 0
1046 __ movq(R12, RDX); // R12 = low64(c) == high64(t)
1047
1048 Label loop, done;
1049 __ Bind(&loop);
1050 // x: RBX
1051 // xip: RDI
1052 // ajp: RSI
1053 // c: R13:R12
1054 // t: RCX:RDX:RAX (not live at loop entry)
1055 // n: R8
1056
1057 // while (--n >= 0)
1058 __ decq(R8); // --n
1059 __ j(NEGATIVE, &done, Assembler::kNearJump);
1060
1061 // uint64_t xi = *xip++
1062 __ movq(RAX, Address(RDI, 0));
1063 __ addq(RDI, Immediate(2 * kBytesPerBigIntDigit));
1064
1065 // uint192_t t = RCX:RDX:RAX = 2*x*xi + aj + c
1066 __ mulq(RBX); // RDX:RAX = RAX * RBX
1067 __ xorq(RCX, RCX); // RCX = 0
1068 __ shldq(RCX, RDX, Immediate(1));
1069 __ shldq(RDX, RAX, Immediate(1));
1070 __ shlq(RAX, Immediate(1)); // RCX:RDX:RAX <<= 1
1071 __ addq(RAX, Address(RSI, 0)); // t += aj
1072 __ adcq(RDX, Immediate(0));
1073 __ adcq(RCX, Immediate(0));
1074 __ addq(RAX, R12); // t += low64(c)
1075 __ adcq(RDX, R13); // t += high64(c) << 64
1076 __ adcq(RCX, Immediate(0));
1077
1078 // *ajp++ = low64(t)
1079 __ movq(Address(RSI, 0), RAX);
1080 __ addq(RSI, Immediate(2 * kBytesPerBigIntDigit));
1081
1082 // c = high128(t)
1083 __ movq(R12, RDX);
1084 __ movq(R13, RCX);
1085
1086 __ jmp(&loop, Assembler::kNearJump);
1087
1088 __ Bind(&done);
1089 // uint128_t t = aj + c
1090 __ addq(R12, Address(RSI, 0)); // t = c, t += *ajp
1091 __ adcq(R13, Immediate(0));
1092
1093 // *ajp++ = low64(t)
1094 // *ajp = high64(t)
1095 __ movq(Address(RSI, 0), R12);
1096 __ movq(Address(RSI, 2 * kBytesPerBigIntDigit), R13);
1097
1098 __ Bind(&x_zero);
1099 __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
1100 __ ret();
1101}
1102
1103void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
1104 Label* normal_ir_body) {
1105 // Pseudo code:
1106 // static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) {
1107 // uint64_t yt = args[_YT_LO .. _YT]; // _YT_LO == 0, _YT == 1.
1108 // uint64_t* dp = &digits[(i >> 1) - 1]; // i is Smi.
1109 // uint64_t dh = dp[0]; // dh == digits[(i >> 1) - 1 .. i >> 1].
1110 // uint64_t qd;
1111 // if (dh == yt) {
1112 // qd = (DIGIT_MASK << 32) | DIGIT_MASK;
1113 // } else {
1114 // dl = dp[-1]; // dl == digits[(i >> 1) - 3 .. (i >> 1) - 2].
1115 // qd = dh:dl / yt; // No overflow possible, because dh < yt.
1116 // }
1117 // args[_QD .. _QD_HI] = qd; // _QD == 2, _QD_HI == 3.
1118 // return 2;
1119 // }
1120
1121 // RDI = args
1122 __ movq(RDI, Address(RSP, 3 * target::kWordSize)); // args
1123
1124 // RCX = yt = args[0..1]
1125 __ movq(RCX, FieldAddress(RDI, target::TypedData::data_offset()));
1126
1127 // RBX = dp = &digits[(i >> 1) - 1]
1128 __ movq(RBX, Address(RSP, 2 * target::kWordSize)); // digits
1129 __ movq(RAX, Address(RSP, 1 * target::kWordSize)); // i is Smi and odd.
1130 __ leaq(RBX, FieldAddress(
1131 RBX, RAX, TIMES_2,
1132 target::TypedData::data_offset() - kBytesPerBigIntDigit));
1133
1134 // RDX = dh = dp[0]
1135 __ movq(RDX, Address(RBX, 0));
1136
1137 // RAX = qd = (DIGIT_MASK << 32) | DIGIT_MASK = -1
1138 __ movq(RAX, Immediate(-1));
1139
1140 // Return qd if dh == yt
1141 Label return_qd;
1142 __ cmpq(RDX, RCX);
1143 __ j(EQUAL, &return_qd, Assembler::kNearJump);
1144
1145 // RAX = dl = dp[-1]
1146 __ movq(RAX, Address(RBX, -2 * kBytesPerBigIntDigit));
1147
1148 // RAX = qd = dh:dl / yt = RDX:RAX / RCX
1149 __ divq(RCX);
1150
1151 __ Bind(&return_qd);
1152 // args[2..3] = qd
1153 __ movq(FieldAddress(
1154 RDI, target::TypedData::data_offset() + 2 * kBytesPerBigIntDigit),
1155 RAX);
1156
1157 __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
1158 __ ret();
1159}
1160
1161void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
1162 Label* normal_ir_body) {
1163 // Pseudo code:
1164 // static int _mulMod(Uint32List args, Uint32List digits, int i) {
1165 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3.
1166 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even.
1167 // uint128_t t = rho*d;
1168 // args[_MU .. _MU_HI] = t mod DIGIT_BASE^2; // _MU == 4, _MU_HI == 5.
1169 // return 2;
1170 // }
1171
1172 // RDI = args
1173 __ movq(RDI, Address(RSP, 3 * target::kWordSize)); // args
1174
1175 // RCX = rho = args[2 .. 3]
1176 __ movq(RCX, FieldAddress(RDI, target::TypedData::data_offset() +
1177 2 * kBytesPerBigIntDigit));
1178
1179 // RAX = digits[i >> 1 .. (i >> 1) + 1]
1180 __ movq(RBX, Address(RSP, 2 * target::kWordSize)); // digits
1181 __ movq(RAX, Address(RSP, 1 * target::kWordSize)); // i is Smi
1182 __ movq(RAX,
1183 FieldAddress(RBX, RAX, TIMES_2, target::TypedData::data_offset()));
1184
1185 // RDX:RAX = t = rho*d
1186 __ mulq(RCX);
1187
1188 // args[4 .. 5] = t mod DIGIT_BASE^2 = low64(t)
1189 __ movq(FieldAddress(
1190 RDI, target::TypedData::data_offset() + 4 * kBytesPerBigIntDigit),
1191 RAX);
1192
1193 __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
1194 __ ret();
1195}
1196
1197// Check if the last argument is a double, jump to label 'is_smi' if smi
1198// (easy to convert to double), otherwise jump to label 'not_double_smi',
1199// Returns the last argument in RAX.
1200static void TestLastArgumentIsDouble(Assembler* assembler,
1201 Label* is_smi,
1202 Label* not_double_smi) {
1203 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
1204 __ testq(RAX, Immediate(kSmiTagMask));
1205 __ j(ZERO, is_smi); // Jump if Smi.
1206 __ CompareClassId(RAX, kDoubleCid);
1207 __ j(NOT_EQUAL, not_double_smi);
1208 // Fall through if double.
1209}
1210
1211// Both arguments on stack, left argument is a double, right argument is of
1212// unknown type. Return true or false object in RAX. Any NaN argument
1213// returns false. Any non-double argument causes control flow to fall through
1214// to the slow case (compiled method body).
1215static void CompareDoubles(Assembler* assembler,
1216 Label* normal_ir_body,
1217 Condition true_condition) {
1218 Label is_false, is_true, is_smi, double_op;
1219 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
1220 // Both arguments are double, right operand is in RAX.
1221 __ movsd(XMM1, FieldAddress(RAX, target::Double::value_offset()));
1222 __ Bind(&double_op);
1223 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Left argument.
1224 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
1225 __ comisd(XMM0, XMM1);
1226 __ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false;
1227 __ j(true_condition, &is_true, Assembler::kNearJump);
1228 // Fall through false.
1229 __ Bind(&is_false);
1230 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1231 __ ret();
1232 __ Bind(&is_true);
1233 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1234 __ ret();
1235 __ Bind(&is_smi);
1236 __ SmiUntag(RAX);
1237 __ cvtsi2sdq(XMM1, RAX);
1238 __ jmp(&double_op);
1239 __ Bind(normal_ir_body);
1240}
1241
1242void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
1243 Label* normal_ir_body) {
1244 CompareDoubles(assembler, normal_ir_body, ABOVE);
1245}
1246
1247void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
1248 Label* normal_ir_body) {
1249 CompareDoubles(assembler, normal_ir_body, ABOVE_EQUAL);
1250}
1251
1252void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
1253 Label* normal_ir_body) {
1254 CompareDoubles(assembler, normal_ir_body, BELOW);
1255}
1256
1257void AsmIntrinsifier::Double_equal(Assembler* assembler,
1258 Label* normal_ir_body) {
1259 CompareDoubles(assembler, normal_ir_body, EQUAL);
1260}
1261
1262void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
1263 Label* normal_ir_body) {
1264 CompareDoubles(assembler, normal_ir_body, BELOW_EQUAL);
1265}
1266
1267// Expects left argument to be double (receiver). Right argument is unknown.
1268// Both arguments are on stack.
1269static void DoubleArithmeticOperations(Assembler* assembler,
1270 Label* normal_ir_body,
1271 Token::Kind kind) {
1272 Label is_smi, double_op;
1273 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
1274 // Both arguments are double, right operand is in RAX.
1275 __ movsd(XMM1, FieldAddress(RAX, target::Double::value_offset()));
1276 __ Bind(&double_op);
1277 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Left argument.
1278 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
1279 switch (kind) {
1280 case Token::kADD:
1281 __ addsd(XMM0, XMM1);
1282 break;
1283 case Token::kSUB:
1284 __ subsd(XMM0, XMM1);
1285 break;
1286 case Token::kMUL:
1287 __ mulsd(XMM0, XMM1);
1288 break;
1289 case Token::kDIV:
1290 __ divsd(XMM0, XMM1);
1291 break;
1292 default:
1293 UNREACHABLE();
1294 }
1295 const Class& double_class = DoubleClass();
1296 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
1297 RAX, // Result register.
1298 R13);
1299 __ movsd(FieldAddress(RAX, target::Double::value_offset()), XMM0);
1300 __ ret();
1301 __ Bind(&is_smi);
1302 __ SmiUntag(RAX);
1303 __ cvtsi2sdq(XMM1, RAX);
1304 __ jmp(&double_op);
1305 __ Bind(normal_ir_body);
1306}
1307
1308void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
1309 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
1310}
1311
1312void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
1313 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
1314}
1315
1316void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
1317 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
1318}
1319
1320void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
1321 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
1322}
1323
1324void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
1325 Label* normal_ir_body) {
1326 // Only smis allowed.
1327 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
1328 __ testq(RAX, Immediate(kSmiTagMask));
1329 __ j(NOT_ZERO, normal_ir_body);
1330 // Is Smi.
1331 __ SmiUntag(RAX);
1332 __ cvtsi2sdq(XMM1, RAX);
1333 __ movq(RAX, Address(RSP, +2 * target::kWordSize));
1334 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
1335 __ mulsd(XMM0, XMM1);
1336 const Class& double_class = DoubleClass();
1337 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
1338 RAX, // Result register.
1339 R13);
1340 __ movsd(FieldAddress(RAX, target::Double::value_offset()), XMM0);
1341 __ ret();
1342 __ Bind(normal_ir_body);
1343}
1344
1345// Left is double, right is integer (Mint or Smi)
1346void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
1347 Label* normal_ir_body) {
1348 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
1349 __ testq(RAX, Immediate(kSmiTagMask));
1350 __ j(NOT_ZERO, normal_ir_body);
1351 // Is Smi.
1352 __ SmiUntag(RAX);
1353 __ cvtsi2sdq(XMM0, RAX);
1354 const Class& double_class = DoubleClass();
1355 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
1356 RAX, // Result register.
1357 R13);
1358 __ movsd(FieldAddress(RAX, target::Double::value_offset()), XMM0);
1359 __ ret();
1360 __ Bind(normal_ir_body);
1361}
1362
1363void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
1364 Label* normal_ir_body) {
1365 Label is_true;
1366 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
1367 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
1368 __ comisd(XMM0, XMM0);
1369 __ j(PARITY_EVEN, &is_true, Assembler::kNearJump); // NaN -> true;
1370 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1371 __ ret();
1372 __ Bind(&is_true);
1373 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1374 __ ret();
1375}
1376
1377void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
1378 Label* normal_ir_body) {
1379 Label is_inf, done;
1380 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
1381 __ movq(RAX, FieldAddress(RAX, target::Double::value_offset()));
1382 // Mask off the sign.
1383 __ AndImmediate(RAX, Immediate(0x7FFFFFFFFFFFFFFFLL));
1384 // Compare with +infinity.
1385 __ CompareImmediate(RAX, Immediate(0x7FF0000000000000LL));
1386 __ j(EQUAL, &is_inf, Assembler::kNearJump);
1387 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1388 __ jmp(&done);
1389
1390 __ Bind(&is_inf);
1391 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1392
1393 __ Bind(&done);
1394 __ ret();
1395}
1396
1397void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
1398 Label* normal_ir_body) {
1399 Label is_false, is_true, is_zero;
1400 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
1401 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
1402 __ xorpd(XMM1, XMM1); // 0.0 -> XMM1.
1403 __ comisd(XMM0, XMM1);
1404 __ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false.
1405 __ j(EQUAL, &is_zero, Assembler::kNearJump); // Check for negative zero.
1406 __ j(ABOVE_EQUAL, &is_false, Assembler::kNearJump); // >= 0 -> false.
1407 __ Bind(&is_true);
1408 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1409 __ ret();
1410 __ Bind(&is_false);
1411 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1412 __ ret();
1413 __ Bind(&is_zero);
1414 // Check for negative zero (get the sign bit).
1415 __ movmskpd(RAX, XMM0);
1416 __ testq(RAX, Immediate(1));
1417 __ j(NOT_ZERO, &is_true, Assembler::kNearJump);
1418 __ jmp(&is_false, Assembler::kNearJump);
1419}
1420
1421void AsmIntrinsifier::DoubleToInteger(Assembler* assembler,
1422 Label* normal_ir_body) {
1423 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
1424 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
1425 __ cvttsd2siq(RAX, XMM0);
1426 // Overflow is signalled with minint.
1427 // Check for overflow and that it fits into Smi.
1428 __ movq(RCX, RAX);
1429 __ shlq(RCX, Immediate(1));
1430 __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
1431 __ SmiTag(RAX);
1432 __ ret();
1433 __ Bind(normal_ir_body);
1434}
1435
1436void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
1437 Label* normal_ir_body) {
1438 // TODO(dartbug.com/31174): Convert this to a graph intrinsic.
1439
1440 // Convert double value to signed 64-bit int in RAX and
1441 // back to a double in XMM1.
1442 __ movq(RCX, Address(RSP, +1 * target::kWordSize));
1443 __ movsd(XMM0, FieldAddress(RCX, target::Double::value_offset()));
1444 __ cvttsd2siq(RAX, XMM0);
1445 __ cvtsi2sdq(XMM1, RAX);
1446
1447 // Tag the int as a Smi, making sure that it fits; this checks for
1448 // overflow and NaN in the conversion from double to int. Conversion
1449 // overflow from cvttsd2si is signalled with an INT64_MIN value.
1450 ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
1451 __ addq(RAX, RAX);
1452 __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
1453
1454 // Compare the two double values. If they are equal, we return the
1455 // Smi tagged result immediately as the hash code.
1456 Label double_hash;
1457 __ comisd(XMM0, XMM1);
1458 __ j(NOT_EQUAL, &double_hash, Assembler::kNearJump);
1459 __ ret();
1460
1461 // Convert the double bits to a hash code that fits in a Smi.
1462 __ Bind(&double_hash);
1463 __ movq(RAX, FieldAddress(RCX, target::Double::value_offset()));
1464 __ movq(RCX, RAX);
1465 __ shrq(RCX, Immediate(32));
1466 __ xorq(RAX, RCX);
1467 __ andq(RAX, Immediate(target::kSmiMax));
1468 __ SmiTag(RAX);
1469 __ ret();
1470
1471 // Fall into the native C++ implementation.
1472 __ Bind(normal_ir_body);
1473}
1474
1475void AsmIntrinsifier::MathSqrt(Assembler* assembler, Label* normal_ir_body) {
1476 Label is_smi, double_op;
1477 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
1478 // Argument is double and is in RAX.
1479 __ movsd(XMM1, FieldAddress(RAX, target::Double::value_offset()));
1480 __ Bind(&double_op);
1481 __ sqrtsd(XMM0, XMM1);
1482 const Class& double_class = DoubleClass();
1483 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
1484 RAX, // Result register.
1485 R13);
1486 __ movsd(FieldAddress(RAX, target::Double::value_offset()), XMM0);
1487 __ ret();
1488 __ Bind(&is_smi);
1489 __ SmiUntag(RAX);
1490 __ cvtsi2sdq(XMM1, RAX);
1491 __ jmp(&double_op);
1492 __ Bind(normal_ir_body);
1493}
1494
1495// var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64;
1496// _state[kSTATE_LO] = state & _MASK_32;
1497// _state[kSTATE_HI] = state >> 32;
1498void AsmIntrinsifier::Random_nextState(Assembler* assembler,
1499 Label* normal_ir_body) {
1500 const Field& state_field = LookupMathRandomStateFieldOffset();
1501 const int64_t a_int_value = AsmIntrinsifier::kRandomAValue;
1502
1503 // Receiver.
1504 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
1505 // Field '_state'.
1506 __ movq(RBX, FieldAddress(RAX, LookupFieldOffsetInBytes(state_field)));
1507 // Addresses of _state[0] and _state[1].
1508 const intptr_t scale =
1509 target::Instance::ElementSizeFor(kTypedDataUint32ArrayCid);
1510 const intptr_t offset =
1511 target::Instance::DataOffsetFor(kTypedDataUint32ArrayCid);
1512 Address addr_0 = FieldAddress(RBX, 0 * scale + offset);
1513 Address addr_1 = FieldAddress(RBX, 1 * scale + offset);
1514 __ movq(RAX, Immediate(a_int_value));
1515 __ movl(RCX, addr_0);
1516 __ imulq(RCX, RAX);
1517 __ movl(RDX, addr_1);
1518 __ addq(RDX, RCX);
1519 __ movl(addr_0, RDX);
1520 __ shrq(RDX, Immediate(32));
1521 __ movl(addr_1, RDX);
1522 ASSERT(target::ToRawSmi(0) == 0);
1523 __ xorq(RAX, RAX);
1524 __ ret();
1525}
1526
1527// Identity comparison.
1528void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
1529 Label* normal_ir_body) {
1530 Label is_true;
1531 const intptr_t kReceiverOffset = 2;
1532 const intptr_t kArgumentOffset = 1;
1533
1534 __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
1535 __ cmpq(RAX, Address(RSP, +kReceiverOffset * target::kWordSize));
1536 __ j(EQUAL, &is_true, Assembler::kNearJump);
1537 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1538 __ ret();
1539 __ Bind(&is_true);
1540 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1541 __ ret();
1542}
1543
1544static void RangeCheck(Assembler* assembler,
1545 Register reg,
1546 intptr_t low,
1547 intptr_t high,
1548 Condition cc,
1549 Label* target) {
1550 __ subq(reg, Immediate(low));
1551 __ cmpq(reg, Immediate(high - low));
1552 __ j(cc, target);
1553}
1554
1555const Condition kIfNotInRange = ABOVE;
1556const Condition kIfInRange = BELOW_EQUAL;
1557
1558static void JumpIfInteger(Assembler* assembler, Register cid, Label* target) {
1559 RangeCheck(assembler, cid, kSmiCid, kMintCid, kIfInRange, target);
1560}
1561
1562static void JumpIfNotInteger(Assembler* assembler,
1563 Register cid,
1564 Label* target) {
1565 RangeCheck(assembler, cid, kSmiCid, kMintCid, kIfNotInRange, target);
1566}
1567
1568static void JumpIfString(Assembler* assembler, Register cid, Label* target) {
1569 RangeCheck(assembler, cid, kOneByteStringCid, kExternalTwoByteStringCid,
1570 kIfInRange, target);
1571}
1572
1573static void JumpIfNotString(Assembler* assembler, Register cid, Label* target) {
1574 RangeCheck(assembler, cid, kOneByteStringCid, kExternalTwoByteStringCid,
1575 kIfNotInRange, target);
1576}
1577
1578// Return type quickly for simple types (not parameterized and not signature).
1579void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
1580 Label* normal_ir_body) {
1581 Label use_declaration_type, not_integer, not_double;
1582 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
1583 __ LoadClassIdMayBeSmi(RCX, RAX);
1584
1585 // RCX: untagged cid of instance (RAX).
1586 __ cmpq(RCX, Immediate(kClosureCid));
1587 __ j(EQUAL, normal_ir_body); // Instance is a closure.
1588
1589 __ cmpl(RCX, Immediate(kNumPredefinedCids));
1590 __ j(ABOVE, &use_declaration_type);
1591
1592 // If object is a instance of _Double return double type.
1593 __ cmpl(RCX, Immediate(kDoubleCid));
1594 __ j(NOT_EQUAL, &not_double);
1595
1596 __ LoadIsolate(RAX);
1597 __ movq(RAX, Address(RAX, target::Isolate::cached_object_store_offset()));
1598 __ movq(RAX, Address(RAX, target::ObjectStore::double_type_offset()));
1599 __ ret();
1600
1601 __ Bind(&not_double);
1602 // If object is an integer (smi, mint or bigint) return int type.
1603 __ movl(RAX, RCX);
1604 JumpIfNotInteger(assembler, RAX, &not_integer);
1605
1606 __ LoadIsolate(RAX);
1607 __ movq(RAX, Address(RAX, target::Isolate::cached_object_store_offset()));
1608 __ movq(RAX, Address(RAX, target::ObjectStore::int_type_offset()));
1609 __ ret();
1610
1611 __ Bind(&not_integer);
1612 // If object is a string (one byte, two byte or external variants) return
1613 // string type.
1614 __ movq(RAX, RCX);
1615 JumpIfNotString(assembler, RAX, &use_declaration_type);
1616
1617 __ LoadIsolate(RAX);
1618 __ movq(RAX, Address(RAX, target::Isolate::cached_object_store_offset()));
1619 __ movq(RAX, Address(RAX, target::ObjectStore::string_type_offset()));
1620 __ ret();
1621
1622 // Object is neither double, nor integer, nor string.
1623 __ Bind(&use_declaration_type);
1624 __ LoadClassById(RDI, RCX);
1625 __ movzxw(RCX, FieldAddress(RDI, target::Class::num_type_arguments_offset()));
1626 __ cmpq(RCX, Immediate(0));
1627 __ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
1628 __ movq(RAX, FieldAddress(RDI, target::Class::declaration_type_offset()));
1629 __ CompareObject(RAX, NullObject());
1630 __ j(EQUAL, normal_ir_body, Assembler::kNearJump); // Not yet set.
1631 __ ret();
1632
1633 __ Bind(normal_ir_body);
1634}
1635
1636// Compares cid1 and cid2 to see if they're syntactically equivalent. If this
1637// can be determined by this fast path, it jumps to either equal or not_equal,
1638// otherwise it jumps to normal_ir_body. May clobber cid1, cid2, and scratch.
1639static void EquivalentClassIds(Assembler* assembler,
1640 Label* normal_ir_body,
1641 Label* equal,
1642 Label* not_equal,
1643 Register cid1,
1644 Register cid2,
1645 Register scratch) {
1646 Label different_cids, not_integer;
1647
1648 // Check if left hand side is a closure. Closures are handled in the runtime.
1649 __ cmpq(cid1, Immediate(kClosureCid));
1650 __ j(EQUAL, normal_ir_body);
1651
1652 // Check whether class ids match. If class ids don't match types may still be
1653 // considered equivalent (e.g. multiple string implementation classes map to a
1654 // single String type).
1655 __ cmpq(cid1, cid2);
1656 __ j(NOT_EQUAL, &different_cids);
1657
1658 // Types have the same class and neither is a closure type.
1659 // Check if there are no type arguments. In this case we can return true.
1660 // Otherwise fall through into the runtime to handle comparison.
1661 __ LoadClassById(scratch, cid1);
1662 __ movzxw(scratch,
1663 FieldAddress(scratch, target::Class::num_type_arguments_offset()));
1664 __ cmpq(scratch, Immediate(0));
1665 __ j(NOT_EQUAL, normal_ir_body);
1666 __ jmp(equal);
1667
1668 // Class ids are different. Check if we are comparing two string types (with
1669 // different representations) or two integer types.
1670 __ Bind(&different_cids);
1671 __ cmpq(cid1, Immediate(kNumPredefinedCids));
1672 __ j(ABOVE_EQUAL, not_equal);
1673
1674 // Check if both are integer types.
1675 __ movq(scratch, cid1);
1676 JumpIfNotInteger(assembler, scratch, &not_integer);
1677
1678 // First type is an integer. Check if the second is an integer too.
1679 // Otherwise types are unequiv because only integers have the same runtime
1680 // type as other integers.
1681 JumpIfInteger(assembler, cid2, equal);
1682 __ jmp(not_equal);
1683
1684 __ Bind(&not_integer);
1685 // Check if the first type is String. If it is not then types are not
1686 // equivalent because they have different class ids and they are not strings
1687 // or integers.
1688 JumpIfNotString(assembler, cid1, not_equal);
1689 // First type is String. Check if the second is a string too.
1690 JumpIfString(assembler, cid2, equal);
1691 // String types are only equivalent to other String types.
1692 __ jmp(not_equal);
1693}
1694
1695void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
1696 Label* normal_ir_body) {
1697 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
1698 __ LoadClassIdMayBeSmi(RCX, RAX);
1699
1700 __ movq(RAX, Address(RSP, +2 * target::kWordSize));
1701 __ LoadClassIdMayBeSmi(RDX, RAX);
1702
1703 Label equal, not_equal;
1704 EquivalentClassIds(assembler, normal_ir_body, &equal, &not_equal, RCX, RDX,
1705 RAX);
1706
1707 __ Bind(&equal);
1708 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1709 __ ret();
1710
1711 __ Bind(&not_equal);
1712 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1713 __ ret();
1714
1715 __ Bind(normal_ir_body);
1716}
1717
1718void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
1719 Label* normal_ir_body) {
1720 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // String object.
1721 __ movl(RAX, FieldAddress(RAX, target::String::hash_offset()));
1722 ASSERT(kSmiTag == 0);
1723 ASSERT(kSmiTagShift == 1);
1724 __ addq(RAX, RAX); // Smi tag RAX, setting Z flag.
1725 __ j(ZERO, normal_ir_body, Assembler::kNearJump);
1726 __ ret();
1727 __ Bind(normal_ir_body);
1728 // Hash not yet computed.
1729}
1730
1731void AsmIntrinsifier::Type_getHashCode(Assembler* assembler,
1732 Label* normal_ir_body) {
1733 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // Type object.
1734 __ movq(RAX, FieldAddress(RAX, target::Type::hash_offset()));
1735 ASSERT(kSmiTag == 0);
1736 ASSERT(kSmiTagShift == 1);
1737 __ testq(RAX, RAX);
1738 __ j(ZERO, normal_ir_body, Assembler::kNearJump);
1739 __ ret();
1740 __ Bind(normal_ir_body);
1741 // Hash not yet computed.
1742}
1743
1744void AsmIntrinsifier::Type_equality(Assembler* assembler,
1745 Label* normal_ir_body) {
1746 Label equal, not_equal, equiv_cids, check_legacy;
1747
1748 __ movq(RCX, Address(RSP, +1 * target::kWordSize));
1749 __ movq(RDX, Address(RSP, +2 * target::kWordSize));
1750 __ cmpq(RCX, RDX);
1751 __ j(EQUAL, &equal);
1752
1753 // RCX might not be a Type object, so check that first (RDX should be though,
1754 // since this is a method on the Type class).
1755 __ LoadClassIdMayBeSmi(RAX, RCX);
1756 __ cmpq(RAX, Immediate(kTypeCid));
1757 __ j(NOT_EQUAL, normal_ir_body);
1758
1759 // Check if types are syntactically equal.
1760 __ movq(RDI, FieldAddress(RCX, target::Type::type_class_id_offset()));
1761 __ SmiUntag(RDI);
1762 __ movq(RSI, FieldAddress(RDX, target::Type::type_class_id_offset()));
1763 __ SmiUntag(RSI);
1764 EquivalentClassIds(assembler, normal_ir_body, &equiv_cids, &not_equal, RDI,
1765 RSI, RAX);
1766
1767 // Check nullability.
1768 __ Bind(&equiv_cids);
1769 __ movzxb(RCX, FieldAddress(RCX, target::Type::nullability_offset()));
1770 __ movzxb(RDX, FieldAddress(RDX, target::Type::nullability_offset()));
1771 __ cmpq(RCX, RDX);
1772 __ j(NOT_EQUAL, &check_legacy, Assembler::kNearJump);
1773 // Fall through to equal case if nullability is strictly equal.
1774
1775 __ Bind(&equal);
1776 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1777 __ ret();
1778
1779 // At this point the nullabilities are different, so they can only be
1780 // syntactically equivalent if they're both either kNonNullable or kLegacy.
1781 // These are the two largest values of the enum, so we can just do a < check.
1782 ASSERT(target::Nullability::kNullable < target::Nullability::kNonNullable &&
1783 target::Nullability::kNonNullable < target::Nullability::kLegacy);
1784 __ Bind(&check_legacy);
1785 __ cmpq(RCX, Immediate(target::Nullability::kNonNullable));
1786 __ j(LESS, &not_equal, Assembler::kNearJump);
1787 __ cmpq(RDX, Immediate(target::Nullability::kNonNullable));
1788 __ j(GREATER_EQUAL, &equal, Assembler::kNearJump);
1789
1790 __ Bind(&not_equal);
1791 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1792 __ ret();
1793
1794 __ Bind(normal_ir_body);
1795}
1796
1797void AsmIntrinsifier::Object_getHash(Assembler* assembler,
1798 Label* normal_ir_body) {
1799 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // Object.
1800 __ movl(RAX, FieldAddress(RAX, target::String::hash_offset()));
1801 __ SmiTag(RAX);
1802 __ ret();
1803}
1804
1805void AsmIntrinsifier::Object_setHash(Assembler* assembler,
1806 Label* normal_ir_body) {
1807 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Object.
1808 __ movq(RDX, Address(RSP, +1 * target::kWordSize)); // Value.
1809 __ SmiUntag(RDX);
1810 __ movl(FieldAddress(RAX, target::String::hash_offset()), RDX);
1811 __ ret();
1812}
1813
1814void GenerateSubstringMatchesSpecialization(Assembler* assembler,
1815 intptr_t receiver_cid,
1816 intptr_t other_cid,
1817 Label* return_true,
1818 Label* return_false) {
1819 __ movq(R8, FieldAddress(RAX, target::String::length_offset()));
1820 __ movq(R9, FieldAddress(RCX, target::String::length_offset()));
1821
1822 // if (other.length == 0) return true;
1823 __ testq(R9, R9);
1824 __ j(ZERO, return_true);
1825
1826 // if (start < 0) return false;
1827 __ testq(RBX, RBX);
1828 __ j(SIGN, return_false);
1829
1830 // if (start + other.length > this.length) return false;
1831 __ movq(R11, RBX);
1832 __ addq(R11, R9);
1833 __ cmpq(R11, R8);
1834 __ j(GREATER, return_false);
1835
1836 __ SmiUntag(RBX); // start
1837 __ SmiUntag(R9); // other.length
1838 __ LoadImmediate(R11, Immediate(0)); // i = 0
1839
1840 // do
1841 Label loop;
1842 __ Bind(&loop);
1843
1844 // this.codeUnitAt(i + start)
1845 // clobbering this.length
1846 __ movq(R8, R11);
1847 __ addq(R8, RBX);
1848 if (receiver_cid == kOneByteStringCid) {
1849 __ movzxb(R12, FieldAddress(RAX, R8, TIMES_1,
1850 target::OneByteString::data_offset()));
1851 } else {
1852 ASSERT(receiver_cid == kTwoByteStringCid);
1853 __ movzxw(R12, FieldAddress(RAX, R8, TIMES_2,
1854 target::TwoByteString::data_offset()));
1855 }
1856 // other.codeUnitAt(i)
1857 if (other_cid == kOneByteStringCid) {
1858 __ movzxb(R13, FieldAddress(RCX, R11, TIMES_1,
1859 target::OneByteString::data_offset()));
1860 } else {
1861 ASSERT(other_cid == kTwoByteStringCid);
1862 __ movzxw(R13, FieldAddress(RCX, R11, TIMES_2,
1863 target::TwoByteString::data_offset()));
1864 }
1865 __ cmpq(R12, R13);
1866 __ j(NOT_EQUAL, return_false);
1867
1868 // i++, while (i < len)
1869 __ addq(R11, Immediate(1));
1870 __ cmpq(R11, R9);
1871 __ j(LESS, &loop, Assembler::kNearJump);
1872
1873 __ jmp(return_true);
1874}
1875
1876// bool _substringMatches(int start, String other)
1877// This intrinsic handles a OneByteString or TwoByteString receiver with a
1878// OneByteString other.
1879void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
1880 Label* normal_ir_body) {
1881 Label return_true, return_false, try_two_byte;
1882 __ movq(RAX, Address(RSP, +3 * target::kWordSize)); // receiver
1883 __ movq(RBX, Address(RSP, +2 * target::kWordSize)); // start
1884 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // other
1885
1886 __ testq(RBX, Immediate(kSmiTagMask));
1887 __ j(NOT_ZERO, normal_ir_body); // 'start' is not Smi.
1888
1889 __ CompareClassId(RCX, kOneByteStringCid);
1890 __ j(NOT_EQUAL, normal_ir_body);
1891
1892 __ CompareClassId(RAX, kOneByteStringCid);
1893 __ j(NOT_EQUAL, &try_two_byte);
1894
1895 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid,
1896 kOneByteStringCid, &return_true,
1897 &return_false);
1898
1899 __ Bind(&try_two_byte);
1900 __ CompareClassId(RAX, kTwoByteStringCid);
1901 __ j(NOT_EQUAL, normal_ir_body);
1902
1903 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid,
1904 kOneByteStringCid, &return_true,
1905 &return_false);
1906
1907 __ Bind(&return_true);
1908 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1909 __ ret();
1910
1911 __ Bind(&return_false);
1912 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1913 __ ret();
1914
1915 __ Bind(normal_ir_body);
1916}
1917
1918void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
1919 Label* normal_ir_body) {
1920 Label try_two_byte_string;
1921 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Index.
1922 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // String.
1923 __ testq(RCX, Immediate(kSmiTagMask));
1924 __ j(NOT_ZERO, normal_ir_body); // Non-smi index.
1925 // Range check.
1926 __ cmpq(RCX, FieldAddress(RAX, target::String::length_offset()));
1927 // Runtime throws exception.
1928 __ j(ABOVE_EQUAL, normal_ir_body);
1929 __ CompareClassId(RAX, kOneByteStringCid);
1930 __ j(NOT_EQUAL, &try_two_byte_string, Assembler::kNearJump);
1931 __ SmiUntag(RCX);
1932 __ movzxb(RCX, FieldAddress(RAX, RCX, TIMES_1,
1933 target::OneByteString::data_offset()));
1934 __ cmpq(RCX, Immediate(target::Symbols::kNumberOfOneCharCodeSymbols));
1935 __ j(GREATER_EQUAL, normal_ir_body);
1936 __ movq(RAX,
1937 Address(THR, target::Thread::predefined_symbols_address_offset()));
1938 __ movq(RAX, Address(RAX, RCX, TIMES_8,
1939 target::Symbols::kNullCharCodeSymbolOffset *
1940 target::kWordSize));
1941 __ ret();
1942
1943 __ Bind(&try_two_byte_string);
1944 __ CompareClassId(RAX, kTwoByteStringCid);
1945 __ j(NOT_EQUAL, normal_ir_body);
1946 ASSERT(kSmiTagShift == 1);
1947 __ movzxw(RCX, FieldAddress(RAX, RCX, TIMES_1,
1948 target::OneByteString::data_offset()));
1949 __ cmpq(RCX, Immediate(target::Symbols::kNumberOfOneCharCodeSymbols));
1950 __ j(GREATER_EQUAL, normal_ir_body);
1951 __ movq(RAX,
1952 Address(THR, target::Thread::predefined_symbols_address_offset()));
1953 __ movq(RAX, Address(RAX, RCX, TIMES_8,
1954 target::Symbols::kNullCharCodeSymbolOffset *
1955 target::kWordSize));
1956 __ ret();
1957
1958 __ Bind(normal_ir_body);
1959}
1960
1961void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
1962 Label* normal_ir_body) {
1963 Label is_true;
1964 // Get length.
1965 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // String object.
1966 __ movq(RAX, FieldAddress(RAX, target::String::length_offset()));
1967 __ cmpq(RAX, Immediate(target::ToRawSmi(0)));
1968 __ j(EQUAL, &is_true, Assembler::kNearJump);
1969 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1970 __ ret();
1971 __ Bind(&is_true);
1972 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1973 __ ret();
1974}
1975
1976void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
1977 Label* normal_ir_body) {
1978 Label compute_hash;
1979 __ movq(
1980 RBX,
1981 Address(RSP, +1 * target::kWordSize)); // target::OneByteString object.
1982 __ movl(RAX, FieldAddress(RBX, target::String::hash_offset()));
1983 __ cmpq(RAX, Immediate(0));
1984 __ j(EQUAL, &compute_hash, Assembler::kNearJump);
1985 __ SmiTag(RAX);
1986 __ ret();
1987
1988 __ Bind(&compute_hash);
1989 // Hash not yet computed, use algorithm of class StringHasher.
1990 __ movq(RCX, FieldAddress(RBX, target::String::length_offset()));
1991 __ SmiUntag(RCX);
1992 __ xorq(RAX, RAX);
1993 __ xorq(RDI, RDI);
1994 // RBX: Instance of target::OneByteString.
1995 // RCX: String length, untagged integer.
1996 // RDI: Loop counter, untagged integer.
1997 // RAX: Hash code, untagged integer.
1998 Label loop, done, set_hash_code;
1999 __ Bind(&loop);
2000 __ cmpq(RDI, RCX);
2001 __ j(EQUAL, &done, Assembler::kNearJump);
2002 // Add to hash code: (hash_ is uint32)
2003 // hash_ += ch;
2004 // hash_ += hash_ << 10;
2005 // hash_ ^= hash_ >> 6;
2006 // Get one characters (ch).
2007 __ movzxb(RDX, FieldAddress(RBX, RDI, TIMES_1,
2008 target::OneByteString::data_offset()));
2009 // RDX: ch and temporary.
2010 __ addl(RAX, RDX);
2011 __ movq(RDX, RAX);
2012 __ shll(RDX, Immediate(10));
2013 __ addl(RAX, RDX);
2014 __ movq(RDX, RAX);
2015 __ shrl(RDX, Immediate(6));
2016 __ xorl(RAX, RDX);
2017
2018 __ incq(RDI);
2019 __ jmp(&loop, Assembler::kNearJump);
2020
2021 __ Bind(&done);
2022 // Finalize:
2023 // hash_ += hash_ << 3;
2024 // hash_ ^= hash_ >> 11;
2025 // hash_ += hash_ << 15;
2026 __ movq(RDX, RAX);
2027 __ shll(RDX, Immediate(3));
2028 __ addl(RAX, RDX);
2029 __ movq(RDX, RAX);
2030 __ shrl(RDX, Immediate(11));
2031 __ xorl(RAX, RDX);
2032 __ movq(RDX, RAX);
2033 __ shll(RDX, Immediate(15));
2034 __ addl(RAX, RDX);
2035 // hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1);
2036 __ andl(
2037 RAX,
2038 Immediate(((static_cast<intptr_t>(1) << target::String::kHashBits) - 1)));
2039
2040 // return hash_ == 0 ? 1 : hash_;
2041 __ cmpq(RAX, Immediate(0));
2042 __ j(NOT_EQUAL, &set_hash_code, Assembler::kNearJump);
2043 __ incq(RAX);
2044 __ Bind(&set_hash_code);
2045 __ movl(FieldAddress(RBX, target::String::hash_offset()), RAX);
2046 __ SmiTag(RAX);
2047 __ ret();
2048}
2049
2050// Allocates a _OneByteString or _TwoByteString. The content is not initialized.
2051// 'length_reg' contains the desired length as a _Smi or _Mint.
2052// Returns new string as tagged pointer in RAX.
2053static void TryAllocateString(Assembler* assembler,
2054 classid_t cid,
2055 Label* ok,
2056 Label* failure,
2057 Register length_reg) {
2058 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
2059 // _Mint length: call to runtime to produce error.
2060 __ BranchIfNotSmi(length_reg, failure);
2061 // negative length: call to runtime to produce error.
2062 __ cmpq(length_reg, Immediate(0));
2063 __ j(LESS, failure);
2064
2065 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure, false));
2066 if (length_reg != RDI) {
2067 __ movq(RDI, length_reg);
2068 }
2069 Label pop_and_fail, not_zero_length;
2070 __ pushq(RDI); // Preserve length.
2071 if (cid == kOneByteStringCid) {
2072 // Untag length.
2073 __ sarq(RDI, Immediate(kSmiTagShift));
2074 } else {
2075 // Untag length and multiply by element size -> no-op.
2076 __ testq(RDI, RDI);
2077 }
2078 // If the length is 0 then we have to make the allocated size a bit bigger,
2079 // otherwise the string takes up less space than an ExternalOneByteString,
2080 // and cannot be externalized. TODO(erikcorry): We should probably just
2081 // return a static zero length string here instead.
2082 __ j(NOT_ZERO, &not_zero_length);
2083 __ addq(RDI, Immediate(1));
2084 __ Bind(&not_zero_length);
2085 const intptr_t fixed_size_plus_alignment_padding =
2086 target::String::InstanceSize() +
2087 target::ObjectAlignment::kObjectAlignment - 1;
2088 __ addq(RDI, Immediate(fixed_size_plus_alignment_padding));
2089 __ andq(RDI, Immediate(-target::ObjectAlignment::kObjectAlignment));
2090
2091 __ movq(RAX, Address(THR, target::Thread::top_offset()));
2092
2093 // RDI: allocation size.
2094 __ movq(RCX, RAX);
2095 __ addq(RCX, RDI);
2096 __ j(CARRY, &pop_and_fail);
2097
2098 // Check if the allocation fits into the remaining space.
2099 // RAX: potential new object start.
2100 // RCX: potential next object start.
2101 // RDI: allocation size.
2102 __ cmpq(RCX, Address(THR, target::Thread::end_offset()));
2103 __ j(ABOVE_EQUAL, &pop_and_fail);
2104
2105 // Successfully allocated the object(s), now update top to point to
2106 // next object start and initialize the object.
2107 __ movq(Address(THR, target::Thread::top_offset()), RCX);
2108 __ addq(RAX, Immediate(kHeapObjectTag));
2109
2110 // Initialize the tags.
2111 // RAX: new object start as a tagged pointer.
2112 // RDI: allocation size.
2113 {
2114 Label size_tag_overflow, done;
2115 __ cmpq(RDI, Immediate(target::ObjectLayout::kSizeTagMaxSizeTag));
2116 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
2117 __ shlq(RDI, Immediate(target::ObjectLayout::kTagBitsSizeTagPos -
2118 target::ObjectAlignment::kObjectAlignmentLog2));
2119 __ jmp(&done, Assembler::kNearJump);
2120
2121 __ Bind(&size_tag_overflow);
2122 __ xorq(RDI, RDI);
2123 __ Bind(&done);
2124
2125 // Get the class index and insert it into the tags.
2126 // This also clears the hash, which is in the high bits of the tags.
2127 const uint32_t tags =
2128 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
2129 __ orq(RDI, Immediate(tags));
2130 __ movq(FieldAddress(RAX, target::Object::tags_offset()), RDI); // Tags.
2131 }
2132
2133 // Set the length field.
2134 __ popq(RDI);
2135 __ StoreIntoObjectNoBarrier(
2136 RAX, FieldAddress(RAX, target::String::length_offset()), RDI);
2137 __ jmp(ok, Assembler::kNearJump);
2138
2139 __ Bind(&pop_and_fail);
2140 __ popq(RDI);
2141 __ jmp(failure);
2142}
2143
2144// Arg0: target::OneByteString (receiver).
2145// Arg1: Start index as Smi.
2146// Arg2: End index as Smi.
2147// The indexes must be valid.
2148void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
2149 Label* normal_ir_body) {
2150 const intptr_t kStringOffset = 3 * target::kWordSize;
2151 const intptr_t kStartIndexOffset = 2 * target::kWordSize;
2152 const intptr_t kEndIndexOffset = 1 * target::kWordSize;
2153 Label ok;
2154 __ movq(RSI, Address(RSP, +kStartIndexOffset));
2155 __ movq(RDI, Address(RSP, +kEndIndexOffset));
2156 __ orq(RSI, RDI);
2157 __ testq(RSI, Immediate(kSmiTagMask));
2158 __ j(NOT_ZERO, normal_ir_body); // 'start', 'end' not Smi.
2159
2160 __ subq(RDI, Address(RSP, +kStartIndexOffset));
2161 TryAllocateString(assembler, kOneByteStringCid, &ok, normal_ir_body, RDI);
2162 __ Bind(&ok);
2163 // RAX: new string as tagged pointer.
2164 // Copy string.
2165 __ movq(RSI, Address(RSP, +kStringOffset));
2166 __ movq(RBX, Address(RSP, +kStartIndexOffset));
2167 __ SmiUntag(RBX);
2168 __ leaq(RSI, FieldAddress(RSI, RBX, TIMES_1,
2169 target::OneByteString::data_offset()));
2170 // RSI: Start address to copy from (untagged).
2171 // RBX: Untagged start index.
2172 __ movq(RCX, Address(RSP, +kEndIndexOffset));
2173 __ SmiUntag(RCX);
2174 __ subq(RCX, RBX);
2175 __ xorq(RDX, RDX);
2176 // RSI: Start address to copy from (untagged).
2177 // RCX: Untagged number of bytes to copy.
2178 // RAX: Tagged result string
2179 // RDX: Loop counter.
2180 // RBX: Scratch register.
2181 Label loop, check;
2182 __ jmp(&check, Assembler::kNearJump);
2183 __ Bind(&loop);
2184 __ movzxb(RBX, Address(RSI, RDX, TIMES_1, 0));
2185 __ movb(FieldAddress(RAX, RDX, TIMES_1, target::OneByteString::data_offset()),
2186 RBX);
2187 __ incq(RDX);
2188 __ Bind(&check);
2189 __ cmpq(RDX, RCX);
2190 __ j(LESS, &loop, Assembler::kNearJump);
2191 __ ret();
2192 __ Bind(normal_ir_body);
2193}
2194
2195void AsmIntrinsifier::WriteIntoOneByteString(Assembler* assembler,
2196 Label* normal_ir_body) {
2197 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Value.
2198 __ movq(RBX, Address(RSP, +2 * target::kWordSize)); // Index.
2199 __ movq(RAX, Address(RSP, +3 * target::kWordSize)); // target::OneByteString.
2200 __ SmiUntag(RBX);
2201 __ SmiUntag(RCX);
2202 __ movb(FieldAddress(RAX, RBX, TIMES_1, target::OneByteString::data_offset()),
2203 RCX);
2204 __ ret();
2205}
2206
2207void AsmIntrinsifier::WriteIntoTwoByteString(Assembler* assembler,
2208 Label* normal_ir_body) {
2209 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Value.
2210 __ movq(RBX, Address(RSP, +2 * target::kWordSize)); // Index.
2211 __ movq(RAX, Address(RSP, +3 * target::kWordSize)); // target::TwoByteString.
2212 // Untag index and multiply by element size -> no-op.
2213 __ SmiUntag(RCX);
2214 __ movw(FieldAddress(RAX, RBX, TIMES_1, target::TwoByteString::data_offset()),
2215 RCX);
2216 __ ret();
2217}
2218
2219void AsmIntrinsifier::AllocateOneByteString(Assembler* assembler,
2220 Label* normal_ir_body) {
2221 __ movq(RDI, Address(RSP, +1 * target::kWordSize)); // Length.v=
2222 Label ok;
2223 TryAllocateString(assembler, kOneByteStringCid, &ok, normal_ir_body, RDI);
2224 // RDI: Start address to copy from (untagged).
2225
2226 __ Bind(&ok);
2227 __ ret();
2228
2229 __ Bind(normal_ir_body);
2230}
2231
2232void AsmIntrinsifier::AllocateTwoByteString(Assembler* assembler,
2233 Label* normal_ir_body) {
2234 __ movq(RDI, Address(RSP, +1 * target::kWordSize)); // Length.v=
2235 Label ok;
2236 TryAllocateString(assembler, kTwoByteStringCid, &ok, normal_ir_body, RDI);
2237 // RDI: Start address to copy from (untagged).
2238
2239 __ Bind(&ok);
2240 __ ret();
2241
2242 __ Bind(normal_ir_body);
2243}
2244
2245// TODO(srdjan): Add combinations (one-byte/two-byte/external strings).
2246static void StringEquality(Assembler* assembler,
2247 Label* normal_ir_body,
2248 intptr_t string_cid) {
2249 Label is_true, is_false, loop;
2250 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // This.
2251 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Other.
2252
2253 // Are identical?
2254 __ cmpq(RAX, RCX);
2255 __ j(EQUAL, &is_true, Assembler::kNearJump);
2256
2257 // Is other target::OneByteString?
2258 __ testq(RCX, Immediate(kSmiTagMask));
2259 __ j(ZERO, &is_false); // Smi
2260 __ CompareClassId(RCX, string_cid);
2261 __ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
2262
2263 // Have same length?
2264 __ movq(RDI, FieldAddress(RAX, target::String::length_offset()));
2265 __ cmpq(RDI, FieldAddress(RCX, target::String::length_offset()));
2266 __ j(NOT_EQUAL, &is_false, Assembler::kNearJump);
2267
2268 // Check contents, no fall-through possible.
2269 // TODO(srdjan): write a faster check.
2270 __ SmiUntag(RDI);
2271 __ Bind(&loop);
2272 __ decq(RDI);
2273 __ cmpq(RDI, Immediate(0));
2274 __ j(LESS, &is_true, Assembler::kNearJump);
2275 if (string_cid == kOneByteStringCid) {
2276 __ movzxb(RBX, FieldAddress(RAX, RDI, TIMES_1,
2277 target::OneByteString::data_offset()));
2278 __ movzxb(RDX, FieldAddress(RCX, RDI, TIMES_1,
2279 target::OneByteString::data_offset()));
2280 } else if (string_cid == kTwoByteStringCid) {
2281 __ movzxw(RBX, FieldAddress(RAX, RDI, TIMES_2,
2282 target::TwoByteString::data_offset()));
2283 __ movzxw(RDX, FieldAddress(RCX, RDI, TIMES_2,
2284 target::TwoByteString::data_offset()));
2285 } else {
2286 UNIMPLEMENTED();
2287 }
2288 __ cmpq(RBX, RDX);
2289 __ j(NOT_EQUAL, &is_false, Assembler::kNearJump);
2290 __ jmp(&loop, Assembler::kNearJump);
2291
2292 __ Bind(&is_true);
2293 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
2294 __ ret();
2295
2296 __ Bind(&is_false);
2297 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
2298 __ ret();
2299
2300 __ Bind(normal_ir_body);
2301}
2302
2303void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
2304 Label* normal_ir_body) {
2305 StringEquality(assembler, normal_ir_body, kOneByteStringCid);
2306}
2307
2308void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
2309 Label* normal_ir_body) {
2310 StringEquality(assembler, normal_ir_body, kTwoByteStringCid);
2311}
2312
2313void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
2314 Label* normal_ir_body,
2315 bool sticky) {
2316 if (FLAG_interpret_irregexp) return;
2317
2318 static const intptr_t kRegExpParamOffset = 3 * target::kWordSize;
2319 static const intptr_t kStringParamOffset = 2 * target::kWordSize;
2320 // start_index smi is located at offset 1.
2321
2322 // Incoming registers:
2323 // RAX: Function. (Will be loaded with the specialized matcher function.)
2324 // RCX: Unknown. (Must be GC safe on tail call.)
2325 // R10: Arguments descriptor. (Will be preserved.)
2326
2327 // Load the specialized function pointer into RAX. Leverage the fact the
2328 // string CIDs as well as stored function pointers are in sequence.
2329 __ movq(RBX, Address(RSP, kRegExpParamOffset));
2330 __ movq(RDI, Address(RSP, kStringParamOffset));
2331 __ LoadClassId(RDI, RDI);
2332 __ SubImmediate(RDI, Immediate(kOneByteStringCid));
2333 __ movq(RAX, FieldAddress(
2334 RBX, RDI, TIMES_8,
2335 target::RegExp::function_offset(kOneByteStringCid, sticky)));
2336
2337 // Registers are now set up for the lazy compile stub. It expects the function
2338 // in RAX, the argument descriptor in R10, and IC-Data in RCX.
2339 __ xorq(RCX, RCX);
2340
2341 // Tail-call the function.
2342 __ movq(CODE_REG, FieldAddress(RAX, target::Function::code_offset()));
2343 __ movq(RDI, FieldAddress(RAX, target::Function::entry_point_offset()));
2344 __ jmp(RDI);
2345}
2346
2347// On stack: user tag (+1), return-address (+0).
2348void AsmIntrinsifier::UserTag_makeCurrent(Assembler* assembler,
2349 Label* normal_ir_body) {
2350 // RBX: Isolate.
2351 __ LoadIsolate(RBX);
2352 // RAX: Current user tag.
2353 __ movq(RAX, Address(RBX, target::Isolate::current_tag_offset()));
2354 // R10: UserTag.
2355 __ movq(R10, Address(RSP, +1 * target::kWordSize));
2356 // Set Isolate::current_tag_.
2357 __ movq(Address(RBX, target::Isolate::current_tag_offset()), R10);
2358 // R10: UserTag's tag.
2359 __ movq(R10, FieldAddress(R10, target::UserTag::tag_offset()));
2360 // Set Isolate::user_tag_.
2361 __ movq(Address(RBX, target::Isolate::user_tag_offset()), R10);
2362 __ ret();
2363}
2364
2365void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
2366 Label* normal_ir_body) {
2367 __ LoadIsolate(RAX);
2368 __ movq(RAX, Address(RAX, target::Isolate::default_tag_offset()));
2369 __ ret();
2370}
2371
2372void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
2373 Label* normal_ir_body) {
2374 __ LoadIsolate(RAX);
2375 __ movq(RAX, Address(RAX, target::Isolate::current_tag_offset()));
2376 __ ret();
2377}
2378
2379void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
2380 Label* normal_ir_body) {
2381#if !defined(SUPPORT_TIMELINE)
2382 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
2383 __ ret();
2384#else
2385 Label true_label;
2386 // Load TimelineStream*.
2387 __ movq(RAX, Address(THR, target::Thread::dart_stream_offset()));
2388 // Load uintptr_t from TimelineStream*.
2389 __ movq(RAX, Address(RAX, target::TimelineStream::enabled_offset()));
2390 __ cmpq(RAX, Immediate(0));
2391 __ j(NOT_ZERO, &true_label, Assembler::kNearJump);
2392 // Not enabled.
2393 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
2394 __ ret();
2395 // Enabled.
2396 __ Bind(&true_label);
2397 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
2398 __ ret();
2399#endif
2400}
2401
2402void AsmIntrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler,
2403 Label* normal_ir_body) {
2404 __ LoadObject(RAX, NullObject());
2405 __ movq(Address(THR, target::Thread::async_stack_trace_offset()), RAX);
2406 __ ret();
2407}
2408
2409void AsmIntrinsifier::SetAsyncThreadStackTrace(Assembler* assembler,
2410 Label* normal_ir_body) {
2411 __ movq(Address(THR, target::Thread::async_stack_trace_offset()), RAX);
2412 __ LoadObject(RAX, NullObject());
2413 __ ret();
2414}
2415
2416#undef __
2417
2418} // namespace compiler
2419} // namespace dart
2420
2421#endif // defined(TARGET_ARCH_X64)
2422