1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
6#if defined(TARGET_ARCH_ARM64)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
10#include "vm/class_id.h"
11#include "vm/compiler/asm_intrinsifier.h"
12#include "vm/compiler/assembler/assembler.h"
13
14namespace dart {
15namespace compiler {
16
17// When entering intrinsics code:
18// R4: Arguments descriptor
19// LR: Return address
20// The R4 register can be destroyed only if there is no slow-path, i.e.
21// if the intrinsified method always executes a return.
22// The FP register should not be modified, because it is used by the profiler.
23// The PP and THR registers (see constants_arm64.h) must be preserved.
24
25#define __ assembler->
26
27intptr_t AsmIntrinsifier::ParameterSlotFromSp() {
28 return -1;
29}
30
31static bool IsABIPreservedRegister(Register reg) {
32 return ((1 << reg) & kAbiPreservedCpuRegs) != 0;
33}
34
35void AsmIntrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
36 ASSERT(IsABIPreservedRegister(CODE_REG));
37 ASSERT(!IsABIPreservedRegister(ARGS_DESC_REG));
38 ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP));
39 ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP2));
40 ASSERT(CALLEE_SAVED_TEMP != CODE_REG);
41 ASSERT(CALLEE_SAVED_TEMP != ARGS_DESC_REG);
42 ASSERT(CALLEE_SAVED_TEMP2 != CODE_REG);
43 ASSERT(CALLEE_SAVED_TEMP2 != ARGS_DESC_REG);
44
45 assembler->Comment("IntrinsicCallPrologue");
46 assembler->mov(CALLEE_SAVED_TEMP, LR);
47 assembler->mov(CALLEE_SAVED_TEMP2, ARGS_DESC_REG);
48}
49
50void AsmIntrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
51 assembler->Comment("IntrinsicCallEpilogue");
52 assembler->mov(LR, CALLEE_SAVED_TEMP);
53 assembler->mov(ARGS_DESC_REG, CALLEE_SAVED_TEMP2);
54}
55
56// Allocate a GrowableObjectArray:: using the backing array specified.
57// On stack: type argument (+1), data (+0).
58void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
59 Label* normal_ir_body) {
60 // The newly allocated object is returned in R0.
61 const intptr_t kTypeArgumentsOffset = 1 * target::kWordSize;
62 const intptr_t kArrayOffset = 0 * target::kWordSize;
63
64 // Try allocating in new space.
65 const Class& cls = GrowableObjectArrayClass();
66 __ TryAllocate(cls, normal_ir_body, R0, R1);
67
68 // Store backing array object in growable array object.
69 __ ldr(R1, Address(SP, kArrayOffset)); // Data argument.
70 // R0 is new, no barrier needed.
71 __ StoreIntoObjectNoBarrier(
72 R0, FieldAddress(R0, target::GrowableObjectArray::data_offset()), R1);
73
74 // R0: new growable array object start as a tagged pointer.
75 // Store the type argument field in the growable array object.
76 __ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument.
77 __ StoreIntoObjectNoBarrier(
78 R0,
79 FieldAddress(R0, target::GrowableObjectArray::type_arguments_offset()),
80 R1);
81
82 // Set the length field in the growable array object to 0.
83 __ LoadImmediate(R1, 0);
84 __ str(R1, FieldAddress(R0, target::GrowableObjectArray::length_offset()));
85 __ ret(); // Returns the newly allocated object in R0.
86
87 __ Bind(normal_ir_body);
88}
89
90static int GetScaleFactor(intptr_t size) {
91 switch (size) {
92 case 1:
93 return 0;
94 case 2:
95 return 1;
96 case 4:
97 return 2;
98 case 8:
99 return 3;
100 case 16:
101 return 4;
102 }
103 UNREACHABLE();
104 return -1;
105}
106
107#define TYPED_ARRAY_ALLOCATION(cid, max_len, scale_shift) \
108 Label fall_through; \
109 const intptr_t kArrayLengthStackOffset = 0 * target::kWordSize; \
110 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R2, normal_ir_body)); \
111 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
112 /* Check that length is a positive Smi. */ \
113 /* R2: requested array length argument. */ \
114 __ BranchIfNotSmi(R2, normal_ir_body); \
115 __ CompareRegisters(R2, ZR); \
116 __ b(normal_ir_body, LT); \
117 __ SmiUntag(R2); \
118 /* Check for maximum allowed length. */ \
119 /* R2: untagged array length. */ \
120 __ CompareImmediate(R2, max_len); \
121 __ b(normal_ir_body, GT); \
122 __ LslImmediate(R2, R2, scale_shift); \
123 const intptr_t fixed_size_plus_alignment_padding = \
124 target::TypedData::InstanceSize() + \
125 target::ObjectAlignment::kObjectAlignment - 1; \
126 __ AddImmediate(R2, fixed_size_plus_alignment_padding); \
127 __ andi(R2, R2, \
128 Immediate(~(target::ObjectAlignment::kObjectAlignment - 1))); \
129 __ ldr(R0, Address(THR, target::Thread::top_offset())); \
130 \
131 /* R2: allocation size. */ \
132 __ adds(R1, R0, Operand(R2)); \
133 __ b(normal_ir_body, CS); /* Fail on unsigned overflow. */ \
134 \
135 /* Check if the allocation fits into the remaining space. */ \
136 /* R0: potential new object start. */ \
137 /* R1: potential next object start. */ \
138 /* R2: allocation size. */ \
139 __ ldr(R6, Address(THR, target::Thread::end_offset())); \
140 __ cmp(R1, Operand(R6)); \
141 __ b(normal_ir_body, CS); \
142 \
143 /* Successfully allocated the object(s), now update top to point to */ \
144 /* next object start and initialize the object. */ \
145 __ str(R1, Address(THR, target::Thread::top_offset())); \
146 __ AddImmediate(R0, kHeapObjectTag); \
147 /* Initialize the tags. */ \
148 /* R0: new object start as a tagged pointer. */ \
149 /* R1: new object end address. */ \
150 /* R2: allocation size. */ \
151 { \
152 __ CompareImmediate(R2, target::ObjectLayout::kSizeTagMaxSizeTag); \
153 __ LslImmediate(R2, R2, \
154 target::ObjectLayout::kTagBitsSizeTagPos - \
155 target::ObjectAlignment::kObjectAlignmentLog2); \
156 __ csel(R2, ZR, R2, HI); \
157 \
158 /* Get the class index and insert it into the tags. */ \
159 uint32_t tags = \
160 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0); \
161 __ LoadImmediate(TMP, tags); \
162 __ orr(R2, R2, Operand(TMP)); \
163 __ str(R2, FieldAddress(R0, target::Object::tags_offset())); /* Tags. */ \
164 } \
165 /* Set the length field. */ \
166 /* R0: new object start as a tagged pointer. */ \
167 /* R1: new object end address. */ \
168 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
169 __ StoreIntoObjectNoBarrier( \
170 R0, FieldAddress(R0, target::TypedDataBase::length_offset()), R2); \
171 /* Initialize all array elements to 0. */ \
172 /* R0: new object start as a tagged pointer. */ \
173 /* R1: new object end address. */ \
174 /* R2: iterator which initially points to the start of the variable */ \
175 /* R3: scratch register. */ \
176 /* data area to be initialized. */ \
177 __ mov(R3, ZR); \
178 __ AddImmediate(R2, R0, target::TypedData::InstanceSize() - 1); \
179 __ StoreInternalPointer( \
180 R0, FieldAddress(R0, target::TypedDataBase::data_field_offset()), R2); \
181 Label init_loop, done; \
182 __ Bind(&init_loop); \
183 __ cmp(R2, Operand(R1)); \
184 __ b(&done, CS); \
185 __ str(R3, Address(R2, 0)); \
186 __ add(R2, R2, Operand(target::kWordSize)); \
187 __ b(&init_loop); \
188 __ Bind(&done); \
189 \
190 __ ret(); \
191 __ Bind(normal_ir_body);
192
193#define TYPED_DATA_ALLOCATOR(clazz) \
194 void AsmIntrinsifier::TypedData_##clazz##_factory(Assembler* assembler, \
195 Label* normal_ir_body) { \
196 intptr_t size = TypedDataElementSizeInBytes(kTypedData##clazz##Cid); \
197 intptr_t max_len = TypedDataMaxNewSpaceElements(kTypedData##clazz##Cid); \
198 int shift = GetScaleFactor(size); \
199 TYPED_ARRAY_ALLOCATION(kTypedData##clazz##Cid, max_len, shift); \
200 }
201CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR)
202#undef TYPED_DATA_ALLOCATOR
203
204// Loads args from stack into R0 and R1
205// Tests if they are smis, jumps to label not_smi if not.
206static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
207 __ ldr(R0, Address(SP, +0 * target::kWordSize));
208 __ ldr(R1, Address(SP, +1 * target::kWordSize));
209 __ orr(TMP, R0, Operand(R1));
210 __ BranchIfNotSmi(TMP, not_smi);
211}
212
213void AsmIntrinsifier::Integer_addFromInteger(Assembler* assembler,
214 Label* normal_ir_body) {
215 TestBothArgumentsSmis(assembler, normal_ir_body); // Checks two smis.
216 __ adds(R0, R0, Operand(R1)); // Adds.
217 __ b(normal_ir_body, VS); // Fall-through on overflow.
218 __ ret();
219 __ Bind(normal_ir_body);
220}
221
222void AsmIntrinsifier::Integer_add(Assembler* assembler, Label* normal_ir_body) {
223 Integer_addFromInteger(assembler, normal_ir_body);
224}
225
226void AsmIntrinsifier::Integer_subFromInteger(Assembler* assembler,
227 Label* normal_ir_body) {
228 TestBothArgumentsSmis(assembler, normal_ir_body);
229 __ subs(R0, R0, Operand(R1)); // Subtract.
230 __ b(normal_ir_body, VS); // Fall-through on overflow.
231 __ ret();
232 __ Bind(normal_ir_body);
233}
234
235void AsmIntrinsifier::Integer_sub(Assembler* assembler, Label* normal_ir_body) {
236 TestBothArgumentsSmis(assembler, normal_ir_body);
237 __ subs(R0, R1, Operand(R0)); // Subtract.
238 __ b(normal_ir_body, VS); // Fall-through on overflow.
239 __ ret();
240 __ Bind(normal_ir_body);
241}
242
243void AsmIntrinsifier::Integer_mulFromInteger(Assembler* assembler,
244 Label* normal_ir_body) {
245 TestBothArgumentsSmis(assembler, normal_ir_body); // checks two smis
246 __ SmiUntag(R0); // Untags R6. We only want result shifted by one.
247
248 __ mul(TMP, R0, R1);
249 __ smulh(TMP2, R0, R1);
250 // TMP: result bits 64..127.
251 __ cmp(TMP2, Operand(TMP, ASR, 63));
252 __ b(normal_ir_body, NE);
253 __ mov(R0, TMP);
254 __ ret();
255 __ Bind(normal_ir_body);
256}
257
258void AsmIntrinsifier::Integer_mul(Assembler* assembler, Label* normal_ir_body) {
259 Integer_mulFromInteger(assembler, normal_ir_body);
260}
261
262// Optimizations:
263// - result is 0 if:
264// - left is 0
265// - left equals right
266// - result is left if
267// - left > 0 && left < right
268// R1: Tagged left (dividend).
269// R0: Tagged right (divisor).
270// Returns:
271// R1: Untagged fallthrough result (remainder to be adjusted), or
272// R0: Tagged return result (remainder).
273static void EmitRemainderOperation(Assembler* assembler) {
274 Label return_zero, modulo;
275 const Register left = R1;
276 const Register right = R0;
277 const Register result = R1;
278 const Register tmp = R2;
279 ASSERT(left == result);
280
281 // Check for quick zero results.
282 __ CompareRegisters(left, ZR);
283 __ b(&return_zero, EQ);
284 __ CompareRegisters(left, right);
285 __ b(&return_zero, EQ);
286
287 // Check if result should be left.
288 __ CompareRegisters(left, ZR);
289 __ b(&modulo, LT);
290 // left is positive.
291 __ CompareRegisters(left, right);
292 // left is less than right, result is left.
293 __ b(&modulo, GT);
294 __ mov(R0, left);
295 __ ret();
296
297 __ Bind(&return_zero);
298 __ mov(R0, ZR);
299 __ ret();
300
301 __ Bind(&modulo);
302 // result <- left - right * (left / right)
303 __ SmiUntag(left);
304 __ SmiUntag(right);
305
306 __ sdiv(tmp, left, right);
307 __ msub(result, right, tmp, left); // result <- left - right * tmp
308}
309
310// Implementation:
311// res = left % right;
312// if (res < 0) {
313// if (right < 0) {
314// res = res - right;
315// } else {
316// res = res + right;
317// }
318// }
319void AsmIntrinsifier::Integer_moduloFromInteger(Assembler* assembler,
320 Label* normal_ir_body) {
321 // Check to see if we have integer division
322 Label neg_remainder, fall_through;
323 __ ldr(R1, Address(SP, +0 * target::kWordSize));
324 __ ldr(R0, Address(SP, +1 * target::kWordSize));
325 __ orr(TMP, R0, Operand(R1));
326 __ BranchIfNotSmi(TMP, normal_ir_body);
327 // R1: Tagged left (dividend).
328 // R0: Tagged right (divisor).
329 // Check if modulo by zero -> exception thrown in main function.
330 __ CompareRegisters(R0, ZR);
331 __ b(normal_ir_body, EQ);
332 EmitRemainderOperation(assembler);
333 // Untagged right in R0. Untagged remainder result in R1.
334
335 __ CompareRegisters(R1, ZR);
336 __ b(&neg_remainder, LT);
337 __ SmiTag(R0, R1); // Tag and move result to R0.
338 __ ret();
339
340 __ Bind(&neg_remainder);
341 // Result is negative, adjust it.
342 __ CompareRegisters(R0, ZR);
343 __ sub(TMP, R1, Operand(R0));
344 __ add(TMP2, R1, Operand(R0));
345 __ csel(R0, TMP2, TMP, GE);
346 __ SmiTag(R0);
347 __ ret();
348
349 __ Bind(normal_ir_body);
350}
351
352void AsmIntrinsifier::Integer_truncDivide(Assembler* assembler,
353 Label* normal_ir_body) {
354 // Check to see if we have integer division
355
356 TestBothArgumentsSmis(assembler, normal_ir_body);
357 __ CompareRegisters(R0, ZR);
358 __ b(normal_ir_body, EQ); // If b is 0, fall through.
359
360 __ SmiUntag(R0);
361 __ SmiUntag(R1);
362
363 __ sdiv(R0, R1, R0);
364
365 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
366 // cannot tag the result.
367 __ CompareImmediate(R0, 0x4000000000000000);
368 __ b(normal_ir_body, EQ);
369 __ SmiTag(R0); // Not equal. Okay to tag and return.
370 __ ret(); // Return.
371 __ Bind(normal_ir_body);
372}
373
374void AsmIntrinsifier::Integer_negate(Assembler* assembler,
375 Label* normal_ir_body) {
376 __ ldr(R0, Address(SP, +0 * target::kWordSize)); // Grab first argument.
377 __ BranchIfNotSmi(R0, normal_ir_body);
378 __ negs(R0, R0);
379 __ b(normal_ir_body, VS);
380 __ ret();
381 __ Bind(normal_ir_body);
382}
383
384void AsmIntrinsifier::Integer_bitAndFromInteger(Assembler* assembler,
385 Label* normal_ir_body) {
386 TestBothArgumentsSmis(assembler, normal_ir_body); // Checks two smis.
387 __ and_(R0, R0, Operand(R1));
388 __ ret();
389 __ Bind(normal_ir_body);
390}
391
392void AsmIntrinsifier::Integer_bitAnd(Assembler* assembler,
393 Label* normal_ir_body) {
394 Integer_bitAndFromInteger(assembler, normal_ir_body);
395}
396
397void AsmIntrinsifier::Integer_bitOrFromInteger(Assembler* assembler,
398 Label* normal_ir_body) {
399 TestBothArgumentsSmis(assembler, normal_ir_body); // Checks two smis.
400 __ orr(R0, R0, Operand(R1));
401 __ ret();
402 __ Bind(normal_ir_body);
403}
404
405void AsmIntrinsifier::Integer_bitOr(Assembler* assembler,
406 Label* normal_ir_body) {
407 Integer_bitOrFromInteger(assembler, normal_ir_body);
408}
409
410void AsmIntrinsifier::Integer_bitXorFromInteger(Assembler* assembler,
411 Label* normal_ir_body) {
412 TestBothArgumentsSmis(assembler, normal_ir_body); // Checks two smis.
413 __ eor(R0, R0, Operand(R1));
414 __ ret();
415 __ Bind(normal_ir_body);
416}
417
418void AsmIntrinsifier::Integer_bitXor(Assembler* assembler,
419 Label* normal_ir_body) {
420 Integer_bitXorFromInteger(assembler, normal_ir_body);
421}
422
423void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
424 ASSERT(kSmiTagShift == 1);
425 ASSERT(kSmiTag == 0);
426 const Register right = R0;
427 const Register left = R1;
428 const Register temp = R2;
429 const Register result = R0;
430
431 TestBothArgumentsSmis(assembler, normal_ir_body);
432 __ CompareImmediate(right, target::ToRawSmi(target::kSmiBits));
433 __ b(normal_ir_body, CS);
434
435 // Left is not a constant.
436 // Check if count too large for handling it inlined.
437 __ SmiUntag(TMP, right); // SmiUntag right into TMP.
438 // Overflow test (preserve left, right, and TMP);
439 __ lslv(temp, left, TMP);
440 __ asrv(TMP2, temp, TMP);
441 __ CompareRegisters(left, TMP2);
442 __ b(normal_ir_body, NE); // Overflow.
443 // Shift for result now we know there is no overflow.
444 __ lslv(result, left, TMP);
445 __ ret();
446 __ Bind(normal_ir_body);
447}
448
449static void CompareIntegers(Assembler* assembler,
450 Label* normal_ir_body,
451 Condition true_condition) {
452 Label true_label;
453 TestBothArgumentsSmis(assembler, normal_ir_body);
454 // R0 contains the right argument, R1 the left.
455 __ CompareRegisters(R1, R0);
456 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
457 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
458 __ csel(R0, TMP, R0, true_condition);
459 __ ret();
460 __ Bind(normal_ir_body);
461}
462
463void AsmIntrinsifier::Integer_greaterThanFromInt(Assembler* assembler,
464 Label* normal_ir_body) {
465 CompareIntegers(assembler, normal_ir_body, LT);
466}
467
468void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
469 Label* normal_ir_body) {
470 Integer_greaterThanFromInt(assembler, normal_ir_body);
471}
472
473void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
474 Label* normal_ir_body) {
475 CompareIntegers(assembler, normal_ir_body, GT);
476}
477
478void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
479 Label* normal_ir_body) {
480 CompareIntegers(assembler, normal_ir_body, LE);
481}
482
483void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
484 Label* normal_ir_body) {
485 CompareIntegers(assembler, normal_ir_body, GE);
486}
487
488// This is called for Smi and Mint receivers. The right argument
489// can be Smi, Mint or double.
490void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
491 Label* normal_ir_body) {
492 Label true_label, check_for_mint;
493 // For integer receiver '===' check first.
494 __ ldr(R0, Address(SP, 0 * target::kWordSize));
495 __ ldr(R1, Address(SP, 1 * target::kWordSize));
496 __ cmp(R0, Operand(R1));
497 __ b(&true_label, EQ);
498
499 __ orr(R2, R0, Operand(R1));
500 __ BranchIfNotSmi(R2, &check_for_mint);
501 // If R0 or R1 is not a smi do Mint checks.
502
503 // Both arguments are smi, '===' is good enough.
504 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
505 __ ret();
506 __ Bind(&true_label);
507 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
508 __ ret();
509
510 // At least one of the arguments was not Smi.
511 Label receiver_not_smi;
512 __ Bind(&check_for_mint);
513
514 __ BranchIfNotSmi(R1, &receiver_not_smi); // Check receiver.
515
516 // Left (receiver) is Smi, return false if right is not Double.
517 // Note that an instance of Mint never contains a value that can be
518 // represented by Smi.
519
520 __ CompareClassId(R0, kDoubleCid);
521 __ b(normal_ir_body, EQ);
522 __ LoadObject(R0,
523 CastHandle<Object>(FalseObject())); // Smi == Mint -> false.
524 __ ret();
525
526 __ Bind(&receiver_not_smi);
527 // R1: receiver.
528
529 __ CompareClassId(R1, kMintCid);
530 __ b(normal_ir_body, NE);
531 // Receiver is Mint, return false if right is Smi.
532 __ BranchIfNotSmi(R0, normal_ir_body);
533 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
534 __ ret();
535 // TODO(srdjan): Implement Mint == Mint comparison.
536
537 __ Bind(normal_ir_body);
538}
539
540void AsmIntrinsifier::Integer_equal(Assembler* assembler,
541 Label* normal_ir_body) {
542 Integer_equalToInteger(assembler, normal_ir_body);
543}
544
545void AsmIntrinsifier::Integer_sar(Assembler* assembler, Label* normal_ir_body) {
546 TestBothArgumentsSmis(assembler, normal_ir_body);
547 // Shift amount in R0. Value to shift in R1.
548
549 // Fall through if shift amount is negative.
550 __ SmiUntag(R0);
551 __ CompareRegisters(R0, ZR);
552 __ b(normal_ir_body, LT);
553
554 // If shift amount is bigger than 63, set to 63.
555 __ LoadImmediate(TMP, 0x3F);
556 __ CompareRegisters(R0, TMP);
557 __ csel(R0, TMP, R0, GT);
558 __ SmiUntag(R1);
559 __ asrv(R0, R1, R0);
560 __ SmiTag(R0);
561 __ ret();
562 __ Bind(normal_ir_body);
563}
564
565void AsmIntrinsifier::Smi_bitNegate(Assembler* assembler,
566 Label* normal_ir_body) {
567 __ ldr(R0, Address(SP, 0 * target::kWordSize));
568 __ mvn(R0, R0);
569 __ andi(R0, R0, Immediate(~kSmiTagMask)); // Remove inverted smi-tag.
570 __ ret();
571}
572
573void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
574 Label* normal_ir_body) {
575 __ ldr(R0, Address(SP, 0 * target::kWordSize));
576 __ SmiUntag(R0);
577 // XOR with sign bit to complement bits if value is negative.
578 __ eor(R0, R0, Operand(R0, ASR, 63));
579 __ clz(R0, R0);
580 __ LoadImmediate(R1, 64);
581 __ sub(R0, R1, Operand(R0));
582 __ SmiTag(R0);
583 __ ret();
584}
585
586void AsmIntrinsifier::Smi_bitAndFromSmi(Assembler* assembler,
587 Label* normal_ir_body) {
588 Integer_bitAndFromInteger(assembler, normal_ir_body);
589}
590
591void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
592 // static void _lsh(Uint32List x_digits, int x_used, int n,
593 // Uint32List r_digits)
594
595 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi.
596 __ ldp(R2, R3, Address(SP, 2 * target::kWordSize, Address::PairOffset));
597 __ add(R2, R2, Operand(2)); // x_used > 0, Smi. R2 = x_used + 1, round up.
598 __ AsrImmediate(R2, R2, 2); // R2 = num of digit pairs to read.
599 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
600 __ ldp(R4, R5, Address(SP, 0 * target::kWordSize, Address::PairOffset));
601 __ SmiUntag(R5);
602 // R0 = n ~/ (2*_DIGIT_BITS)
603 __ AsrImmediate(R0, R5, 6);
604 // R6 = &x_digits[0]
605 __ add(R6, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
606 // R7 = &x_digits[2*R2]
607 __ add(R7, R6, Operand(R2, LSL, 3));
608 // R8 = &r_digits[2*1]
609 __ add(R8, R4,
610 Operand(target::TypedData::data_offset() - kHeapObjectTag +
611 2 * kBytesPerBigIntDigit));
612 // R8 = &r_digits[2*(R2 + n ~/ (2*_DIGIT_BITS) + 1)]
613 __ add(R0, R0, Operand(R2));
614 __ add(R8, R8, Operand(R0, LSL, 3));
615 // R3 = n % (2 * _DIGIT_BITS)
616 __ AndImmediate(R3, R5, 63);
617 // R2 = 64 - R3
618 __ LoadImmediate(R2, 64);
619 __ sub(R2, R2, Operand(R3));
620 __ mov(R1, ZR);
621 Label loop;
622 __ Bind(&loop);
623 __ ldr(R0, Address(R7, -2 * kBytesPerBigIntDigit, Address::PreIndex));
624 __ lsrv(R4, R0, R2);
625 __ orr(R1, R1, Operand(R4));
626 __ str(R1, Address(R8, -2 * kBytesPerBigIntDigit, Address::PreIndex));
627 __ lslv(R1, R0, R3);
628 __ cmp(R7, Operand(R6));
629 __ b(&loop, NE);
630 __ str(R1, Address(R8, -2 * kBytesPerBigIntDigit, Address::PreIndex));
631 __ LoadObject(R0, NullObject());
632 __ ret();
633}
634
635void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
636 // static void _lsh(Uint32List x_digits, int x_used, int n,
637 // Uint32List r_digits)
638
639 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi.
640 __ ldp(R2, R3, Address(SP, 2 * target::kWordSize, Address::PairOffset));
641 __ add(R2, R2, Operand(2)); // x_used > 0, Smi. R2 = x_used + 1, round up.
642 __ AsrImmediate(R2, R2, 2); // R2 = num of digit pairs to read.
643 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
644 __ ldp(R4, R5, Address(SP, 0 * target::kWordSize, Address::PairOffset));
645 __ SmiUntag(R5);
646 // R0 = n ~/ (2*_DIGIT_BITS)
647 __ AsrImmediate(R0, R5, 6);
648 // R8 = &r_digits[0]
649 __ add(R8, R4, Operand(target::TypedData::data_offset() - kHeapObjectTag));
650 // R7 = &x_digits[2*(n ~/ (2*_DIGIT_BITS))]
651 __ add(R7, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
652 __ add(R7, R7, Operand(R0, LSL, 3));
653 // R6 = &r_digits[2*(R2 - n ~/ (2*_DIGIT_BITS) - 1)]
654 __ add(R0, R0, Operand(1));
655 __ sub(R0, R2, Operand(R0));
656 __ add(R6, R8, Operand(R0, LSL, 3));
657 // R3 = n % (2*_DIGIT_BITS)
658 __ AndImmediate(R3, R5, 63);
659 // R2 = 64 - R3
660 __ LoadImmediate(R2, 64);
661 __ sub(R2, R2, Operand(R3));
662 // R1 = x_digits[n ~/ (2*_DIGIT_BITS)] >> (n % (2*_DIGIT_BITS))
663 __ ldr(R1, Address(R7, 2 * kBytesPerBigIntDigit, Address::PostIndex));
664 __ lsrv(R1, R1, R3);
665 Label loop_entry;
666 __ b(&loop_entry);
667 Label loop;
668 __ Bind(&loop);
669 __ ldr(R0, Address(R7, 2 * kBytesPerBigIntDigit, Address::PostIndex));
670 __ lslv(R4, R0, R2);
671 __ orr(R1, R1, Operand(R4));
672 __ str(R1, Address(R8, 2 * kBytesPerBigIntDigit, Address::PostIndex));
673 __ lsrv(R1, R0, R3);
674 __ Bind(&loop_entry);
675 __ cmp(R8, Operand(R6));
676 __ b(&loop, NE);
677 __ str(R1, Address(R8, 0));
678 __ LoadObject(R0, NullObject());
679 __ ret();
680}
681
682void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
683 Label* normal_ir_body) {
684 // static void _absAdd(Uint32List digits, int used,
685 // Uint32List a_digits, int a_used,
686 // Uint32List r_digits)
687
688 // R2 = used, R3 = digits
689 __ ldp(R2, R3, Address(SP, 3 * target::kWordSize, Address::PairOffset));
690 __ add(R2, R2, Operand(2)); // used > 0, Smi. R2 = used + 1, round up.
691 __ add(R2, ZR, Operand(R2, ASR, 2)); // R2 = num of digit pairs to process.
692 // R3 = &digits[0]
693 __ add(R3, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
694
695 // R4 = a_used, R5 = a_digits
696 __ ldp(R4, R5, Address(SP, 1 * target::kWordSize, Address::PairOffset));
697 __ add(R4, R4, Operand(2)); // a_used > 0, Smi. R4 = a_used + 1, round up.
698 __ add(R4, ZR, Operand(R4, ASR, 2)); // R4 = num of digit pairs to process.
699 // R5 = &a_digits[0]
700 __ add(R5, R5, Operand(target::TypedData::data_offset() - kHeapObjectTag));
701
702 // R6 = r_digits
703 __ ldr(R6, Address(SP, 0 * target::kWordSize));
704 // R6 = &r_digits[0]
705 __ add(R6, R6, Operand(target::TypedData::data_offset() - kHeapObjectTag));
706
707 // R7 = &digits[a_used rounded up to even number].
708 __ add(R7, R3, Operand(R4, LSL, 3));
709
710 // R8 = &digits[a_used rounded up to even number].
711 __ add(R8, R3, Operand(R2, LSL, 3));
712
713 __ adds(R0, R0, Operand(0)); // carry flag = 0
714 Label add_loop;
715 __ Bind(&add_loop);
716 // Loop (a_used+1)/2 times, a_used > 0.
717 __ ldr(R0, Address(R3, 2 * kBytesPerBigIntDigit, Address::PostIndex));
718 __ ldr(R1, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
719 __ adcs(R0, R0, R1);
720 __ sub(R9, R3, Operand(R7)); // Does not affect carry flag.
721 __ str(R0, Address(R6, 2 * kBytesPerBigIntDigit, Address::PostIndex));
722 __ cbnz(&add_loop, R9); // Does not affect carry flag.
723
724 Label last_carry;
725 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
726 __ cbz(&last_carry, R9); // If used - a_used == 0.
727
728 Label carry_loop;
729 __ Bind(&carry_loop);
730 // Loop (used+1)/2 - (a_used+1)/2 times, used - a_used > 0.
731 __ ldr(R0, Address(R3, 2 * kBytesPerBigIntDigit, Address::PostIndex));
732 __ adcs(R0, R0, ZR);
733 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
734 __ str(R0, Address(R6, 2 * kBytesPerBigIntDigit, Address::PostIndex));
735 __ cbnz(&carry_loop, R9);
736
737 __ Bind(&last_carry);
738 Label done;
739 __ b(&done, CC);
740 __ LoadImmediate(R0, 1);
741 __ str(R0, Address(R6, 0));
742
743 __ Bind(&done);
744 __ LoadObject(R0, NullObject());
745 __ ret();
746}
747
748void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
749 Label* normal_ir_body) {
750 // static void _absSub(Uint32List digits, int used,
751 // Uint32List a_digits, int a_used,
752 // Uint32List r_digits)
753
754 // R2 = used, R3 = digits
755 __ ldp(R2, R3, Address(SP, 3 * target::kWordSize, Address::PairOffset));
756 __ add(R2, R2, Operand(2)); // used > 0, Smi. R2 = used + 1, round up.
757 __ add(R2, ZR, Operand(R2, ASR, 2)); // R2 = num of digit pairs to process.
758 // R3 = &digits[0]
759 __ add(R3, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
760
761 // R4 = a_used, R5 = a_digits
762 __ ldp(R4, R5, Address(SP, 1 * target::kWordSize, Address::PairOffset));
763 __ add(R4, R4, Operand(2)); // a_used > 0, Smi. R4 = a_used + 1, round up.
764 __ add(R4, ZR, Operand(R4, ASR, 2)); // R4 = num of digit pairs to process.
765 // R5 = &a_digits[0]
766 __ add(R5, R5, Operand(target::TypedData::data_offset() - kHeapObjectTag));
767
768 // R6 = r_digits
769 __ ldr(R6, Address(SP, 0 * target::kWordSize));
770 // R6 = &r_digits[0]
771 __ add(R6, R6, Operand(target::TypedData::data_offset() - kHeapObjectTag));
772
773 // R7 = &digits[a_used rounded up to even number].
774 __ add(R7, R3, Operand(R4, LSL, 3));
775
776 // R8 = &digits[a_used rounded up to even number].
777 __ add(R8, R3, Operand(R2, LSL, 3));
778
779 __ subs(R0, R0, Operand(0)); // carry flag = 1
780 Label sub_loop;
781 __ Bind(&sub_loop);
782 // Loop (a_used+1)/2 times, a_used > 0.
783 __ ldr(R0, Address(R3, 2 * kBytesPerBigIntDigit, Address::PostIndex));
784 __ ldr(R1, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
785 __ sbcs(R0, R0, R1);
786 __ sub(R9, R3, Operand(R7)); // Does not affect carry flag.
787 __ str(R0, Address(R6, 2 * kBytesPerBigIntDigit, Address::PostIndex));
788 __ cbnz(&sub_loop, R9); // Does not affect carry flag.
789
790 Label done;
791 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
792 __ cbz(&done, R9); // If used - a_used == 0.
793
794 Label carry_loop;
795 __ Bind(&carry_loop);
796 // Loop (used+1)/2 - (a_used+1)/2 times, used - a_used > 0.
797 __ ldr(R0, Address(R3, 2 * kBytesPerBigIntDigit, Address::PostIndex));
798 __ sbcs(R0, R0, ZR);
799 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
800 __ str(R0, Address(R6, 2 * kBytesPerBigIntDigit, Address::PostIndex));
801 __ cbnz(&carry_loop, R9);
802
803 __ Bind(&done);
804 __ LoadObject(R0, NullObject());
805 __ ret();
806}
807
808void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
809 Label* normal_ir_body) {
810 // Pseudo code:
811 // static int _mulAdd(Uint32List x_digits, int xi,
812 // Uint32List m_digits, int i,
813 // Uint32List a_digits, int j, int n) {
814 // uint64_t x = x_digits[xi >> 1 .. (xi >> 1) + 1]; // xi is Smi and even.
815 // if (x == 0 || n == 0) {
816 // return 2;
817 // }
818 // uint64_t* mip = &m_digits[i >> 1]; // i is Smi and even.
819 // uint64_t* ajp = &a_digits[j >> 1]; // j is Smi and even.
820 // uint64_t c = 0;
821 // SmiUntag(n); // n is Smi and even.
822 // n = (n + 1)/2; // Number of pairs to process.
823 // do {
824 // uint64_t mi = *mip++;
825 // uint64_t aj = *ajp;
826 // uint128_t t = x*mi + aj + c; // 64-bit * 64-bit -> 128-bit.
827 // *ajp++ = low64(t);
828 // c = high64(t);
829 // } while (--n > 0);
830 // while (c != 0) {
831 // uint128_t t = *ajp + c;
832 // *ajp++ = low64(t);
833 // c = high64(t); // c == 0 or 1.
834 // }
835 // return 2;
836 // }
837
838 Label done;
839 // R3 = x, no_op if x == 0
840 // R0 = xi as Smi, R1 = x_digits.
841 __ ldp(R0, R1, Address(SP, 5 * target::kWordSize, Address::PairOffset));
842 __ add(R1, R1, Operand(R0, LSL, 1));
843 __ ldr(R3, FieldAddress(R1, target::TypedData::data_offset()));
844 __ tst(R3, Operand(R3));
845 __ b(&done, EQ);
846
847 // R6 = (SmiUntag(n) + 1)/2, no_op if n == 0
848 __ ldr(R6, Address(SP, 0 * target::kWordSize));
849 __ add(R6, R6, Operand(2));
850 __ adds(R6, ZR, Operand(R6, ASR, 2)); // SmiUntag(R6) and set cc.
851 __ b(&done, EQ);
852
853 // R4 = mip = &m_digits[i >> 1]
854 // R0 = i as Smi, R1 = m_digits.
855 __ ldp(R0, R1, Address(SP, 3 * target::kWordSize, Address::PairOffset));
856 __ add(R1, R1, Operand(R0, LSL, 1));
857 __ add(R4, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
858
859 // R5 = ajp = &a_digits[j >> 1]
860 // R0 = j as Smi, R1 = a_digits.
861 __ ldp(R0, R1, Address(SP, 1 * target::kWordSize, Address::PairOffset));
862 __ add(R1, R1, Operand(R0, LSL, 1));
863 __ add(R5, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
864
865 // R1 = c = 0
866 __ mov(R1, ZR);
867
868 Label muladd_loop;
869 __ Bind(&muladd_loop);
870 // x: R3
871 // mip: R4
872 // ajp: R5
873 // c: R1
874 // n: R6
875 // t: R7:R8 (not live at loop entry)
876
877 // uint64_t mi = *mip++
878 __ ldr(R2, Address(R4, 2 * kBytesPerBigIntDigit, Address::PostIndex));
879
880 // uint64_t aj = *ajp
881 __ ldr(R0, Address(R5, 0));
882
883 // uint128_t t = x*mi + aj + c
884 __ mul(R7, R2, R3); // R7 = low64(R2*R3).
885 __ umulh(R8, R2, R3); // R8 = high64(R2*R3), t = R8:R7 = x*mi.
886 __ adds(R7, R7, Operand(R0));
887 __ adc(R8, R8, ZR); // t += aj.
888 __ adds(R0, R7, Operand(R1)); // t += c, R0 = low64(t).
889 __ adc(R1, R8, ZR); // c = R1 = high64(t).
890
891 // *ajp++ = low64(t) = R0
892 __ str(R0, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
893
894 // while (--n > 0)
895 __ subs(R6, R6, Operand(1)); // --n
896 __ b(&muladd_loop, NE);
897
898 __ tst(R1, Operand(R1));
899 __ b(&done, EQ);
900
901 // *ajp++ += c
902 __ ldr(R0, Address(R5, 0));
903 __ adds(R0, R0, Operand(R1));
904 __ str(R0, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
905 __ b(&done, CC);
906
907 Label propagate_carry_loop;
908 __ Bind(&propagate_carry_loop);
909 __ ldr(R0, Address(R5, 0));
910 __ adds(R0, R0, Operand(1));
911 __ str(R0, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
912 __ b(&propagate_carry_loop, CS);
913
914 __ Bind(&done);
915 __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
916 __ ret();
917}
918
919void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
920 Label* normal_ir_body) {
921 // Pseudo code:
922 // static int _sqrAdd(Uint32List x_digits, int i,
923 // Uint32List a_digits, int used) {
924 // uint64_t* xip = &x_digits[i >> 1]; // i is Smi and even.
925 // uint64_t x = *xip++;
926 // if (x == 0) return 2;
927 // uint64_t* ajp = &a_digits[i]; // j == 2*i, i is Smi.
928 // uint64_t aj = *ajp;
929 // uint128_t t = x*x + aj;
930 // *ajp++ = low64(t);
931 // uint128_t c = high64(t);
932 // int n = ((used - i + 2) >> 2) - 1; // used and i are Smi. n: num pairs.
933 // while (--n >= 0) {
934 // uint64_t xi = *xip++;
935 // uint64_t aj = *ajp;
936 // uint192_t t = 2*x*xi + aj + c; // 2-bit * 64-bit * 64-bit -> 129-bit.
937 // *ajp++ = low64(t);
938 // c = high128(t); // 65-bit.
939 // }
940 // uint64_t aj = *ajp;
941 // uint128_t t = aj + c; // 64-bit + 65-bit -> 66-bit.
942 // *ajp++ = low64(t);
943 // *ajp = high64(t);
944 // return 2;
945 // }
946
947 // R4 = xip = &x_digits[i >> 1]
948 // R2 = i as Smi, R3 = x_digits
949 __ ldp(R2, R3, Address(SP, 2 * target::kWordSize, Address::PairOffset));
950 __ add(R3, R3, Operand(R2, LSL, 1));
951 __ add(R4, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
952
953 // R3 = x = *xip++, return if x == 0
954 Label x_zero;
955 __ ldr(R3, Address(R4, 2 * kBytesPerBigIntDigit, Address::PostIndex));
956 __ tst(R3, Operand(R3));
957 __ b(&x_zero, EQ);
958
959 // R5 = ajp = &a_digits[i]
960 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // a_digits
961 __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi.
962 __ add(R5, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
963
964 // R6:R1 = t = x*x + *ajp
965 __ ldr(R0, Address(R5, 0));
966 __ mul(R1, R3, R3); // R1 = low64(R3*R3).
967 __ umulh(R6, R3, R3); // R6 = high64(R3*R3).
968 __ adds(R1, R1, Operand(R0)); // R6:R1 += *ajp.
969 __ adc(R6, R6, ZR); // R6 = low64(c) = high64(t).
970 __ mov(R7, ZR); // R7 = high64(c) = 0.
971
972 // *ajp++ = low64(t) = R1
973 __ str(R1, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
974
975 // int n = (used - i + 1)/2 - 1
976 __ ldr(R0, Address(SP, 0 * target::kWordSize)); // used is Smi
977 __ sub(R8, R0, Operand(R2));
978 __ add(R8, R8, Operand(2));
979 __ movn(R0, Immediate(1), 0); // R0 = ~1 = -2.
980 __ adds(R8, R0, Operand(R8, ASR, 2)); // while (--n >= 0)
981
982 Label loop, done;
983 __ b(&done, MI);
984
985 __ Bind(&loop);
986 // x: R3
987 // xip: R4
988 // ajp: R5
989 // c: R7:R6
990 // t: R2:R1:R0 (not live at loop entry)
991 // n: R8
992
993 // uint64_t xi = *xip++
994 __ ldr(R2, Address(R4, 2 * kBytesPerBigIntDigit, Address::PostIndex));
995
996 // uint192_t t = R2:R1:R0 = 2*x*xi + aj + c
997 __ mul(R0, R2, R3); // R0 = low64(R2*R3) = low64(x*xi).
998 __ umulh(R1, R2, R3); // R1 = high64(R2*R3) = high64(x*xi).
999 __ adds(R0, R0, Operand(R0));
1000 __ adcs(R1, R1, R1);
1001 __ adc(R2, ZR, ZR); // R2:R1:R0 = R1:R0 + R1:R0 = 2*x*xi.
1002 __ adds(R0, R0, Operand(R6));
1003 __ adcs(R1, R1, R7);
1004 __ adc(R2, R2, ZR); // R2:R1:R0 += c.
1005 __ ldr(R7, Address(R5, 0)); // R7 = aj = *ajp.
1006 __ adds(R0, R0, Operand(R7));
1007 __ adcs(R6, R1, ZR);
1008 __ adc(R7, R2, ZR); // R7:R6:R0 = 2*x*xi + aj + c.
1009
1010 // *ajp++ = low64(t) = R0
1011 __ str(R0, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
1012
1013 // while (--n >= 0)
1014 __ subs(R8, R8, Operand(1)); // --n
1015 __ b(&loop, PL);
1016
1017 __ Bind(&done);
1018 // uint64_t aj = *ajp
1019 __ ldr(R0, Address(R5, 0));
1020
1021 // uint128_t t = aj + c
1022 __ adds(R6, R6, Operand(R0));
1023 __ adc(R7, R7, ZR);
1024
1025 // *ajp = low64(t) = R6
1026 // *(ajp + 1) = high64(t) = R7
1027 __ stp(R6, R7, Address(R5, 0, Address::PairOffset));
1028
1029 __ Bind(&x_zero);
1030 __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
1031 __ ret();
1032}
1033
1034void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
1035 Label* normal_ir_body) {
1036 // There is no 128-bit by 64-bit division instruction on arm64, so we use two
1037 // 64-bit by 32-bit divisions and two 64-bit by 64-bit multiplications to
1038 // adjust the two 32-bit digits of the estimated quotient.
1039 //
1040 // Pseudo code:
1041 // static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) {
1042 // uint64_t yt = args[_YT_LO .. _YT]; // _YT_LO == 0, _YT == 1.
1043 // uint64_t* dp = &digits[(i >> 1) - 1]; // i is Smi.
1044 // uint64_t dh = dp[0]; // dh == digits[(i >> 1) - 1 .. i >> 1].
1045 // uint64_t qd;
1046 // if (dh == yt) {
1047 // qd = (DIGIT_MASK << 32) | DIGIT_MASK;
1048 // } else {
1049 // dl = dp[-1]; // dl == digits[(i >> 1) - 3 .. (i >> 1) - 2].
1050 // // We cannot calculate qd = dh:dl / yt, so ...
1051 // uint64_t yth = yt >> 32;
1052 // uint64_t qh = dh / yth;
1053 // uint128_t ph:pl = yt*qh;
1054 // uint64_t tl = (dh << 32)|(dl >> 32);
1055 // uint64_t th = dh >> 32;
1056 // while ((ph > th) || ((ph == th) && (pl > tl))) {
1057 // if (pl < yt) --ph;
1058 // pl -= yt;
1059 // --qh;
1060 // }
1061 // qd = qh << 32;
1062 // tl = (pl << 32);
1063 // th = (ph << 32)|(pl >> 32);
1064 // if (tl > dl) ++th;
1065 // dl -= tl;
1066 // dh -= th;
1067 // uint64_t ql = ((dh << 32)|(dl >> 32)) / yth;
1068 // ph:pl = yt*ql;
1069 // while ((ph > dh) || ((ph == dh) && (pl > dl))) {
1070 // if (pl < yt) --ph;
1071 // pl -= yt;
1072 // --ql;
1073 // }
1074 // qd |= ql;
1075 // }
1076 // args[_QD .. _QD_HI] = qd; // _QD == 2, _QD_HI == 3.
1077 // return 2;
1078 // }
1079
1080 // R4 = args
1081 __ ldr(R4, Address(SP, 2 * target::kWordSize)); // args
1082
1083 // R3 = yt = args[0..1]
1084 __ ldr(R3, FieldAddress(R4, target::TypedData::data_offset()));
1085
1086 // R2 = dh = digits[(i >> 1) - 1 .. i >> 1]
1087 // R0 = i as Smi, R1 = digits
1088 __ ldp(R0, R1, Address(SP, 0 * target::kWordSize, Address::PairOffset));
1089 __ add(R1, R1, Operand(R0, LSL, 1));
1090 __ ldr(R2, FieldAddress(
1091 R1, target::TypedData::data_offset() - kBytesPerBigIntDigit));
1092
1093 // R0 = qd = (DIGIT_MASK << 32) | DIGIT_MASK = -1
1094 __ movn(R0, Immediate(0), 0);
1095
1096 // Return qd if dh == yt
1097 Label return_qd;
1098 __ cmp(R2, Operand(R3));
1099 __ b(&return_qd, EQ);
1100
1101 // R1 = dl = digits[(i >> 1) - 3 .. (i >> 1) - 2]
1102 __ ldr(R1, FieldAddress(R1, target::TypedData::data_offset() -
1103 3 * kBytesPerBigIntDigit));
1104
1105 // R5 = yth = yt >> 32
1106 __ orr(R5, ZR, Operand(R3, LSR, 32));
1107
1108 // R6 = qh = dh / yth
1109 __ udiv(R6, R2, R5);
1110
1111 // R8:R7 = ph:pl = yt*qh
1112 __ mul(R7, R3, R6);
1113 __ umulh(R8, R3, R6);
1114
1115 // R9 = tl = (dh << 32)|(dl >> 32)
1116 __ orr(R9, ZR, Operand(R2, LSL, 32));
1117 __ orr(R9, R9, Operand(R1, LSR, 32));
1118
1119 // R10 = th = dh >> 32
1120 __ orr(R10, ZR, Operand(R2, LSR, 32));
1121
1122 // while ((ph > th) || ((ph == th) && (pl > tl)))
1123 Label qh_adj_loop, qh_adj, qh_ok;
1124 __ Bind(&qh_adj_loop);
1125 __ cmp(R8, Operand(R10));
1126 __ b(&qh_adj, HI);
1127 __ b(&qh_ok, NE);
1128 __ cmp(R7, Operand(R9));
1129 __ b(&qh_ok, LS);
1130
1131 __ Bind(&qh_adj);
1132 // if (pl < yt) --ph
1133 __ sub(TMP, R8, Operand(1)); // TMP = ph - 1
1134 __ cmp(R7, Operand(R3));
1135 __ csel(R8, TMP, R8, CC); // R8 = R7 < R3 ? TMP : R8
1136
1137 // pl -= yt
1138 __ sub(R7, R7, Operand(R3));
1139
1140 // --qh
1141 __ sub(R6, R6, Operand(1));
1142
1143 // Continue while loop.
1144 __ b(&qh_adj_loop);
1145
1146 __ Bind(&qh_ok);
1147 // R0 = qd = qh << 32
1148 __ orr(R0, ZR, Operand(R6, LSL, 32));
1149
1150 // tl = (pl << 32)
1151 __ orr(R9, ZR, Operand(R7, LSL, 32));
1152
1153 // th = (ph << 32)|(pl >> 32);
1154 __ orr(R10, ZR, Operand(R8, LSL, 32));
1155 __ orr(R10, R10, Operand(R7, LSR, 32));
1156
1157 // if (tl > dl) ++th
1158 __ add(TMP, R10, Operand(1)); // TMP = th + 1
1159 __ cmp(R9, Operand(R1));
1160 __ csel(R10, TMP, R10, HI); // R10 = R9 > R1 ? TMP : R10
1161
1162 // dl -= tl
1163 __ sub(R1, R1, Operand(R9));
1164
1165 // dh -= th
1166 __ sub(R2, R2, Operand(R10));
1167
1168 // R6 = ql = ((dh << 32)|(dl >> 32)) / yth
1169 __ orr(R6, ZR, Operand(R2, LSL, 32));
1170 __ orr(R6, R6, Operand(R1, LSR, 32));
1171 __ udiv(R6, R6, R5);
1172
1173 // R8:R7 = ph:pl = yt*ql
1174 __ mul(R7, R3, R6);
1175 __ umulh(R8, R3, R6);
1176
1177 // while ((ph > dh) || ((ph == dh) && (pl > dl))) {
1178 Label ql_adj_loop, ql_adj, ql_ok;
1179 __ Bind(&ql_adj_loop);
1180 __ cmp(R8, Operand(R2));
1181 __ b(&ql_adj, HI);
1182 __ b(&ql_ok, NE);
1183 __ cmp(R7, Operand(R1));
1184 __ b(&ql_ok, LS);
1185
1186 __ Bind(&ql_adj);
1187 // if (pl < yt) --ph
1188 __ sub(TMP, R8, Operand(1)); // TMP = ph - 1
1189 __ cmp(R7, Operand(R3));
1190 __ csel(R8, TMP, R8, CC); // R8 = R7 < R3 ? TMP : R8
1191
1192 // pl -= yt
1193 __ sub(R7, R7, Operand(R3));
1194
1195 // --ql
1196 __ sub(R6, R6, Operand(1));
1197
1198 // Continue while loop.
1199 __ b(&ql_adj_loop);
1200
1201 __ Bind(&ql_ok);
1202 // qd |= ql;
1203 __ orr(R0, R0, Operand(R6));
1204
1205 __ Bind(&return_qd);
1206 // args[2..3] = qd
1207 __ str(R0, FieldAddress(R4, target::TypedData::data_offset() +
1208 2 * kBytesPerBigIntDigit));
1209
1210 __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
1211 __ ret();
1212}
1213
1214void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
1215 Label* normal_ir_body) {
1216 // Pseudo code:
1217 // static int _mulMod(Uint32List args, Uint32List digits, int i) {
1218 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3.
1219 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even.
1220 // uint128_t t = rho*d;
1221 // args[_MU .. _MU_HI] = t mod DIGIT_BASE^2; // _MU == 4, _MU_HI == 5.
1222 // return 2;
1223 // }
1224
1225 // R4 = args
1226 __ ldr(R4, Address(SP, 2 * target::kWordSize)); // args
1227
1228 // R3 = rho = args[2..3]
1229 __ ldr(R3, FieldAddress(R4, target::TypedData::data_offset() +
1230 2 * kBytesPerBigIntDigit));
1231
1232 // R2 = digits[i >> 1 .. (i >> 1) + 1]
1233 // R0 = i as Smi, R1 = digits
1234 __ ldp(R0, R1, Address(SP, 0 * target::kWordSize, Address::PairOffset));
1235 __ add(R1, R1, Operand(R0, LSL, 1));
1236 __ ldr(R2, FieldAddress(R1, target::TypedData::data_offset()));
1237
1238 // R0 = rho*d mod DIGIT_BASE
1239 __ mul(R0, R2, R3); // R0 = low64(R2*R3).
1240
1241 // args[4 .. 5] = R0
1242 __ str(R0, FieldAddress(R4, target::TypedData::data_offset() +
1243 4 * kBytesPerBigIntDigit));
1244
1245 __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
1246 __ ret();
1247}
1248
1249// Check if the last argument is a double, jump to label 'is_smi' if smi
1250// (easy to convert to double), otherwise jump to label 'not_double_smi',
1251// Returns the last argument in R0.
1252static void TestLastArgumentIsDouble(Assembler* assembler,
1253 Label* is_smi,
1254 Label* not_double_smi) {
1255 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1256 __ BranchIfSmi(R0, is_smi);
1257 __ CompareClassId(R0, kDoubleCid);
1258 __ b(not_double_smi, NE);
1259 // Fall through with Double in R0.
1260}
1261
1262// Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown
1263// type. Return true or false object in the register R0. Any NaN argument
1264// returns false. Any non-double arg1 causes control flow to fall through to the
1265// slow case (compiled method body).
1266static void CompareDoubles(Assembler* assembler,
1267 Label* normal_ir_body,
1268 Condition true_condition) {
1269 Label is_smi, double_op, not_nan;
1270
1271 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
1272 // Both arguments are double, right operand is in R0.
1273
1274 __ LoadDFieldFromOffset(V1, R0, target::Double::value_offset());
1275 __ Bind(&double_op);
1276 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
1277 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
1278
1279 __ fcmpd(V0, V1);
1280 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1281 // Return false if D0 or D1 was NaN before checking true condition.
1282 __ b(&not_nan, VC);
1283 __ ret();
1284 __ Bind(&not_nan);
1285 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
1286 __ csel(R0, TMP, R0, true_condition);
1287 __ ret();
1288
1289 __ Bind(&is_smi); // Convert R0 to a double.
1290 __ SmiUntag(R0);
1291 __ scvtfdx(V1, R0);
1292 __ b(&double_op); // Then do the comparison.
1293 __ Bind(normal_ir_body);
1294}
1295
1296void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
1297 Label* normal_ir_body) {
1298 CompareDoubles(assembler, normal_ir_body, HI);
1299}
1300
1301void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
1302 Label* normal_ir_body) {
1303 CompareDoubles(assembler, normal_ir_body, CS);
1304}
1305
1306void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
1307 Label* normal_ir_body) {
1308 CompareDoubles(assembler, normal_ir_body, CC);
1309}
1310
1311void AsmIntrinsifier::Double_equal(Assembler* assembler,
1312 Label* normal_ir_body) {
1313 CompareDoubles(assembler, normal_ir_body, EQ);
1314}
1315
1316void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
1317 Label* normal_ir_body) {
1318 CompareDoubles(assembler, normal_ir_body, LS);
1319}
1320
1321// Expects left argument to be double (receiver). Right argument is unknown.
1322// Both arguments are on stack.
1323static void DoubleArithmeticOperations(Assembler* assembler,
1324 Label* normal_ir_body,
1325 Token::Kind kind) {
1326 Label is_smi, double_op;
1327
1328 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
1329 // Both arguments are double, right operand is in R0.
1330 __ LoadDFieldFromOffset(V1, R0, target::Double::value_offset());
1331 __ Bind(&double_op);
1332 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
1333 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
1334 switch (kind) {
1335 case Token::kADD:
1336 __ faddd(V0, V0, V1);
1337 break;
1338 case Token::kSUB:
1339 __ fsubd(V0, V0, V1);
1340 break;
1341 case Token::kMUL:
1342 __ fmuld(V0, V0, V1);
1343 break;
1344 case Token::kDIV:
1345 __ fdivd(V0, V0, V1);
1346 break;
1347 default:
1348 UNREACHABLE();
1349 }
1350 const Class& double_class = DoubleClass();
1351 __ TryAllocate(double_class, normal_ir_body, R0, R1);
1352 __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
1353 __ ret();
1354
1355 __ Bind(&is_smi); // Convert R0 to a double.
1356 __ SmiUntag(R0);
1357 __ scvtfdx(V1, R0);
1358 __ b(&double_op);
1359
1360 __ Bind(normal_ir_body);
1361}
1362
1363void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
1364 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
1365}
1366
1367void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
1368 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
1369}
1370
1371void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
1372 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
1373}
1374
1375void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
1376 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
1377}
1378
1379// Left is double, right is integer (Mint or Smi)
1380void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
1381 Label* normal_ir_body) {
1382 // Only smis allowed.
1383 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1384 __ BranchIfNotSmi(R0, normal_ir_body);
1385 // Is Smi.
1386 __ SmiUntag(R0);
1387 __ scvtfdx(V1, R0);
1388 __ ldr(R0, Address(SP, 1 * target::kWordSize));
1389 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
1390 __ fmuld(V0, V0, V1);
1391 const Class& double_class = DoubleClass();
1392 __ TryAllocate(double_class, normal_ir_body, R0, R1);
1393 __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
1394 __ ret();
1395 __ Bind(normal_ir_body);
1396}
1397
1398void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
1399 Label* normal_ir_body) {
1400 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1401 __ BranchIfNotSmi(R0, normal_ir_body);
1402 // Is Smi.
1403 __ SmiUntag(R0);
1404 __ scvtfdx(V0, R0);
1405 const Class& double_class = DoubleClass();
1406 __ TryAllocate(double_class, normal_ir_body, R0, R1);
1407 __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
1408 __ ret();
1409 __ Bind(normal_ir_body);
1410}
1411
1412void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
1413 Label* normal_ir_body) {
1414 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1415 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
1416 __ fcmpd(V0, V0);
1417 __ LoadObject(TMP, CastHandle<Object>(FalseObject()));
1418 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1419 __ csel(R0, TMP, R0, VC);
1420 __ ret();
1421}
1422
1423void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
1424 Label* normal_ir_body) {
1425 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1426 __ LoadFieldFromOffset(R0, R0, target::Double::value_offset());
1427 // Mask off the sign.
1428 __ AndImmediate(R0, R0, 0x7FFFFFFFFFFFFFFFLL);
1429 // Compare with +infinity.
1430 __ CompareImmediate(R0, 0x7FF0000000000000LL);
1431 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1432 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
1433 __ csel(R0, TMP, R0, EQ);
1434 __ ret();
1435}
1436
1437void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
1438 Label* normal_ir_body) {
1439 const Register false_reg = R0;
1440 const Register true_reg = R2;
1441 Label is_false, is_true, is_zero;
1442
1443 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1444 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
1445 __ fcmpdz(V0);
1446 __ LoadObject(true_reg, CastHandle<Object>(TrueObject()));
1447 __ LoadObject(false_reg, CastHandle<Object>(FalseObject()));
1448 __ b(&is_false, VS); // NaN -> false.
1449 __ b(&is_zero, EQ); // Check for negative zero.
1450 __ b(&is_false, CS); // >= 0 -> false.
1451
1452 __ Bind(&is_true);
1453 __ mov(R0, true_reg);
1454
1455 __ Bind(&is_false);
1456 __ ret();
1457
1458 __ Bind(&is_zero);
1459 // Check for negative zero by looking at the sign bit.
1460 __ fmovrd(R1, V0);
1461 __ LsrImmediate(R1, R1, 63);
1462 __ tsti(R1, Immediate(1));
1463 __ csel(R0, true_reg, false_reg, NE); // Sign bit set.
1464 __ ret();
1465}
1466
1467void AsmIntrinsifier::DoubleToInteger(Assembler* assembler,
1468 Label* normal_ir_body) {
1469 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1470 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
1471
1472 // Explicit NaN check, since ARM gives an FPU exception if you try to
1473 // convert NaN to an int.
1474 __ fcmpd(V0, V0);
1475 __ b(normal_ir_body, VS);
1476
1477 __ fcvtzds(R0, V0);
1478 // Overflow is signaled with minint.
1479 // Check for overflow and that it fits into Smi.
1480 __ CompareImmediate(R0, 0xC000000000000000);
1481 __ b(normal_ir_body, MI);
1482 __ SmiTag(R0);
1483 __ ret();
1484 __ Bind(normal_ir_body);
1485}
1486
1487void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
1488 Label* normal_ir_body) {
1489 // TODO(dartbug.com/31174): Convert this to a graph intrinsic.
1490
1491 // Load double value and check that it isn't NaN, since ARM gives an
1492 // FPU exception if you try to convert NaN to an int.
1493 Label double_hash;
1494 __ ldr(R1, Address(SP, 0 * target::kWordSize));
1495 __ LoadDFieldFromOffset(V0, R1, target::Double::value_offset());
1496 __ fcmpd(V0, V0);
1497 __ b(&double_hash, VS);
1498
1499 // Convert double value to signed 64-bit int in R0 and back to a
1500 // double value in V1.
1501 __ fcvtzds(R0, V0);
1502 __ scvtfdx(V1, R0);
1503
1504 // Tag the int as a Smi, making sure that it fits; this checks for
1505 // overflow in the conversion from double to int. Conversion
1506 // overflow is signalled by fcvt through clamping R0 to either
1507 // INT64_MAX or INT64_MIN (saturation).
1508 ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
1509 __ adds(R0, R0, Operand(R0));
1510 __ b(normal_ir_body, VS);
1511
1512 // Compare the two double values. If they are equal, we return the
1513 // Smi tagged result immediately as the hash code.
1514 __ fcmpd(V0, V1);
1515 __ b(&double_hash, NE);
1516 __ ret();
1517
1518 // Convert the double bits to a hash code that fits in a Smi.
1519 __ Bind(&double_hash);
1520 __ fmovrd(R0, V0);
1521 __ eor(R0, R0, Operand(R0, LSR, 32));
1522 __ AndImmediate(R0, R0, target::kSmiMax);
1523 __ SmiTag(R0);
1524 __ ret();
1525
1526 // Fall into the native C++ implementation.
1527 __ Bind(normal_ir_body);
1528}
1529
1530void AsmIntrinsifier::MathSqrt(Assembler* assembler, Label* normal_ir_body) {
1531 Label is_smi, double_op;
1532 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
1533 // Argument is double and is in R0.
1534 __ LoadDFieldFromOffset(V1, R0, target::Double::value_offset());
1535 __ Bind(&double_op);
1536 __ fsqrtd(V0, V1);
1537 const Class& double_class = DoubleClass();
1538 __ TryAllocate(double_class, normal_ir_body, R0, R1);
1539 __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
1540 __ ret();
1541 __ Bind(&is_smi);
1542 __ SmiUntag(R0);
1543 __ scvtfdx(V1, R0);
1544 __ b(&double_op);
1545 __ Bind(normal_ir_body);
1546}
1547
1548// var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64;
1549// _state[kSTATE_LO] = state & _MASK_32;
1550// _state[kSTATE_HI] = state >> 32;
1551void AsmIntrinsifier::Random_nextState(Assembler* assembler,
1552 Label* normal_ir_body) {
1553 const Field& state_field = LookupMathRandomStateFieldOffset();
1554 const int64_t a_int_value = AsmIntrinsifier::kRandomAValue;
1555
1556 // Receiver.
1557 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1558 // Field '_state'.
1559 __ ldr(R1, FieldAddress(R0, LookupFieldOffsetInBytes(state_field)));
1560
1561 // Addresses of _state[0].
1562 const int64_t disp =
1563 target::Instance::DataOffsetFor(kTypedDataUint32ArrayCid) -
1564 kHeapObjectTag;
1565
1566 __ LoadImmediate(R0, a_int_value);
1567 __ LoadFromOffset(R2, R1, disp);
1568 __ LsrImmediate(R3, R2, 32);
1569 __ andi(R2, R2, Immediate(0xffffffff));
1570 __ mul(R2, R0, R2);
1571 __ add(R2, R2, Operand(R3));
1572 __ StoreToOffset(R2, R1, disp);
1573 ASSERT(target::ToRawSmi(0) == 0);
1574 __ eor(R0, R0, Operand(R0));
1575 __ ret();
1576}
1577
1578void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
1579 Label* normal_ir_body) {
1580 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1581 __ ldr(R1, Address(SP, 1 * target::kWordSize));
1582 __ cmp(R0, Operand(R1));
1583 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1584 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
1585 __ csel(R0, TMP, R0, EQ);
1586 __ ret();
1587}
1588
1589static void RangeCheck(Assembler* assembler,
1590 Register val,
1591 Register tmp,
1592 intptr_t low,
1593 intptr_t high,
1594 Condition cc,
1595 Label* target) {
1596 __ AddImmediate(tmp, val, -low);
1597 __ CompareImmediate(tmp, high - low);
1598 __ b(target, cc);
1599}
1600
1601const Condition kIfNotInRange = HI;
1602const Condition kIfInRange = LS;
1603
1604static void JumpIfInteger(Assembler* assembler,
1605 Register cid,
1606 Register tmp,
1607 Label* target) {
1608 RangeCheck(assembler, cid, tmp, kSmiCid, kMintCid, kIfInRange, target);
1609}
1610
1611static void JumpIfNotInteger(Assembler* assembler,
1612 Register cid,
1613 Register tmp,
1614 Label* target) {
1615 RangeCheck(assembler, cid, tmp, kSmiCid, kMintCid, kIfNotInRange, target);
1616}
1617
1618static void JumpIfString(Assembler* assembler,
1619 Register cid,
1620 Register tmp,
1621 Label* target) {
1622 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid,
1623 kIfInRange, target);
1624}
1625
1626static void JumpIfNotString(Assembler* assembler,
1627 Register cid,
1628 Register tmp,
1629 Label* target) {
1630 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid,
1631 kIfNotInRange, target);
1632}
1633
1634// Return type quickly for simple types (not parameterized and not signature).
1635void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
1636 Label* normal_ir_body) {
1637 Label use_declaration_type, not_double, not_integer;
1638 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1639 __ LoadClassIdMayBeSmi(R1, R0);
1640
1641 __ CompareImmediate(R1, kClosureCid);
1642 __ b(normal_ir_body, EQ); // Instance is a closure.
1643
1644 __ CompareImmediate(R1, kNumPredefinedCids);
1645 __ b(&use_declaration_type, HI);
1646
1647 __ CompareImmediate(R1, kDoubleCid);
1648 __ b(&not_double, NE);
1649
1650 __ LoadIsolate(R0);
1651 __ LoadFromOffset(R0, R0, target::Isolate::cached_object_store_offset());
1652 __ LoadFromOffset(R0, R0, target::ObjectStore::double_type_offset());
1653 __ ret();
1654
1655 __ Bind(&not_double);
1656 JumpIfNotInteger(assembler, R1, R0, &not_integer);
1657 __ LoadIsolate(R0);
1658 __ LoadFromOffset(R0, R0, target::Isolate::cached_object_store_offset());
1659 __ LoadFromOffset(R0, R0, target::ObjectStore::int_type_offset());
1660 __ ret();
1661
1662 __ Bind(&not_integer);
1663 JumpIfNotString(assembler, R1, R0, &use_declaration_type);
1664 __ LoadIsolate(R0);
1665 __ LoadFromOffset(R0, R0, target::Isolate::cached_object_store_offset());
1666 __ LoadFromOffset(R0, R0, target::ObjectStore::string_type_offset());
1667 __ ret();
1668
1669 __ Bind(&use_declaration_type);
1670 __ LoadClassById(R2, R1);
1671 __ ldr(R3, FieldAddress(R2, target::Class::num_type_arguments_offset()),
1672 kHalfword);
1673 __ CompareImmediate(R3, 0);
1674 __ b(normal_ir_body, NE);
1675
1676 __ ldr(R0, FieldAddress(R2, target::Class::declaration_type_offset()));
1677 __ CompareObject(R0, NullObject());
1678 __ b(normal_ir_body, EQ);
1679 __ ret();
1680
1681 __ Bind(normal_ir_body);
1682}
1683
1684// Compares cid1 and cid2 to see if they're syntactically equivalent. If this
1685// can be determined by this fast path, it jumps to either equal or not_equal,
1686// otherwise it jumps to normal_ir_body. May clobber cid1, cid2, and scratch.
1687static void EquivalentClassIds(Assembler* assembler,
1688 Label* normal_ir_body,
1689 Label* equal,
1690 Label* not_equal,
1691 Register cid1,
1692 Register cid2,
1693 Register scratch) {
1694 Label different_cids, not_integer;
1695
1696 // Check if left hand side is a closure. Closures are handled in the runtime.
1697 __ CompareImmediate(cid1, kClosureCid);
1698 __ b(normal_ir_body, EQ);
1699
1700 // Check whether class ids match. If class ids don't match types may still be
1701 // considered equivalent (e.g. multiple string implementation classes map to a
1702 // single String type).
1703 __ cmp(cid1, Operand(cid2));
1704 __ b(&different_cids, NE);
1705
1706 // Types have the same class and neither is a closure type.
1707 // Check if there are no type arguments. In this case we can return true.
1708 // Otherwise fall through into the runtime to handle comparison.
1709 __ LoadClassById(scratch, cid1);
1710 __ ldr(scratch,
1711 FieldAddress(scratch, target::Class::num_type_arguments_offset()),
1712 kHalfword);
1713 __ cbnz(normal_ir_body, scratch);
1714 __ b(equal);
1715
1716 // Class ids are different. Check if we are comparing two string types (with
1717 // different representations) or two integer types.
1718 __ Bind(&different_cids);
1719 __ CompareImmediate(cid1, kNumPredefinedCids);
1720 __ b(not_equal, HI);
1721
1722 // Check if both are integer types.
1723 JumpIfNotInteger(assembler, cid1, scratch, &not_integer);
1724
1725 // First type is an integer. Check if the second is an integer too.
1726 // Otherwise types are unequiv because only integers have the same runtime
1727 // type as other integers.
1728 JumpIfInteger(assembler, cid2, scratch, equal);
1729 __ b(not_equal);
1730
1731 __ Bind(&not_integer);
1732 // Check if the first type is String. If it is not then types are not
1733 // equivalent because they have different class ids and they are not strings
1734 // or integers.
1735 JumpIfNotString(assembler, cid1, scratch, not_equal);
1736 // First type is String. Check if the second is a string too.
1737 JumpIfString(assembler, cid2, scratch, equal);
1738 // String types are only equivalent to other String types.
1739 // Fall-through to the not equal case.
1740 __ b(not_equal);
1741}
1742
1743void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
1744 Label* normal_ir_body) {
1745 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1746 __ LoadClassIdMayBeSmi(R1, R0);
1747
1748 __ ldr(R0, Address(SP, 1 * target::kWordSize));
1749 __ LoadClassIdMayBeSmi(R2, R0);
1750
1751 Label equal, not_equal;
1752 EquivalentClassIds(assembler, normal_ir_body, &equal, &not_equal, R1, R2, R0);
1753
1754 __ Bind(&equal);
1755 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1756 __ Ret();
1757
1758 __ Bind(&not_equal);
1759 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1760 __ ret();
1761
1762 __ Bind(normal_ir_body);
1763}
1764
1765void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
1766 Label* normal_ir_body) {
1767 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1768 __ ldr(R0, FieldAddress(R0, target::String::hash_offset()), kUnsignedWord);
1769 __ adds(R0, R0, Operand(R0)); // Smi tag the hash code, setting Z flag.
1770 __ b(normal_ir_body, EQ);
1771 __ ret();
1772 // Hash not yet computed.
1773 __ Bind(normal_ir_body);
1774}
1775
1776void AsmIntrinsifier::Type_getHashCode(Assembler* assembler,
1777 Label* normal_ir_body) {
1778 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1779 __ ldr(R0, FieldAddress(R0, target::Type::hash_offset()));
1780 __ cbz(normal_ir_body, R0);
1781 __ ret();
1782 // Hash not yet computed.
1783 __ Bind(normal_ir_body);
1784}
1785
1786void AsmIntrinsifier::Type_equality(Assembler* assembler,
1787 Label* normal_ir_body) {
1788 Label equal, not_equal, equiv_cids, check_legacy;
1789
1790 __ ldp(R1, R2, Address(SP, 0 * target::kWordSize, Address::PairOffset));
1791 __ cmp(R1, Operand(R2));
1792 __ b(&equal, EQ);
1793
1794 // R1 might not be a Type object, so check that first (R2 should be though,
1795 // since this is a method on the Type class).
1796 __ LoadClassIdMayBeSmi(R0, R1);
1797 __ CompareImmediate(R0, kTypeCid);
1798 __ b(normal_ir_body, NE);
1799
1800 // Check if types are syntactically equal.
1801 __ ldr(R3, FieldAddress(R1, target::Type::type_class_id_offset()));
1802 __ SmiUntag(R3);
1803 __ ldr(R4, FieldAddress(R2, target::Type::type_class_id_offset()));
1804 __ SmiUntag(R4);
1805 EquivalentClassIds(assembler, normal_ir_body, &equiv_cids, &not_equal, R3, R4,
1806 R0);
1807
1808 // Check nullability.
1809 __ Bind(&equiv_cids);
1810 __ ldr(R1, FieldAddress(R1, target::Type::nullability_offset()),
1811 kUnsignedByte);
1812 __ ldr(R2, FieldAddress(R2, target::Type::nullability_offset()),
1813 kUnsignedByte);
1814 __ cmp(R1, Operand(R2));
1815 __ b(&check_legacy, NE);
1816 // Fall through to equal case if nullability is strictly equal.
1817
1818 __ Bind(&equal);
1819 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1820 __ Ret();
1821
1822 // At this point the nullabilities are different, so they can only be
1823 // syntactically equivalent if they're both either kNonNullable or kLegacy.
1824 // These are the two largest values of the enum, so we can just do a < check.
1825 ASSERT(target::Nullability::kNullable < target::Nullability::kNonNullable &&
1826 target::Nullability::kNonNullable < target::Nullability::kLegacy);
1827 __ Bind(&check_legacy);
1828 __ CompareImmediate(R1, target::Nullability::kNonNullable);
1829 __ b(&not_equal, LT);
1830 __ CompareImmediate(R2, target::Nullability::kNonNullable);
1831 __ b(&equal, GE);
1832
1833 __ Bind(&not_equal);
1834 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1835 __ ret();
1836
1837 __ Bind(normal_ir_body);
1838}
1839
1840void AsmIntrinsifier::Object_getHash(Assembler* assembler,
1841 Label* normal_ir_body) {
1842 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1843 __ ldr(R0, FieldAddress(R0, target::String::hash_offset()), kUnsignedWord);
1844 __ SmiTag(R0);
1845 __ ret();
1846}
1847
1848void AsmIntrinsifier::Object_setHash(Assembler* assembler,
1849 Label* normal_ir_body) {
1850 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Object.
1851 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Value.
1852 __ SmiUntag(R1);
1853 __ str(R1, FieldAddress(R0, target::String::hash_offset()), kUnsignedWord);
1854 __ ret();
1855}
1856
1857void GenerateSubstringMatchesSpecialization(Assembler* assembler,
1858 intptr_t receiver_cid,
1859 intptr_t other_cid,
1860 Label* return_true,
1861 Label* return_false) {
1862 __ SmiUntag(R1);
1863 __ ldr(R8, FieldAddress(R0, target::String::length_offset())); // this.length
1864 __ SmiUntag(R8);
1865 __ ldr(R9,
1866 FieldAddress(R2, target::String::length_offset())); // other.length
1867 __ SmiUntag(R9);
1868
1869 // if (other.length == 0) return true;
1870 __ cmp(R9, Operand(0));
1871 __ b(return_true, EQ);
1872
1873 // if (start < 0) return false;
1874 __ cmp(R1, Operand(0));
1875 __ b(return_false, LT);
1876
1877 // if (start + other.length > this.length) return false;
1878 __ add(R3, R1, Operand(R9));
1879 __ cmp(R3, Operand(R8));
1880 __ b(return_false, GT);
1881
1882 if (receiver_cid == kOneByteStringCid) {
1883 __ AddImmediate(R0, target::OneByteString::data_offset() - kHeapObjectTag);
1884 __ add(R0, R0, Operand(R1));
1885 } else {
1886 ASSERT(receiver_cid == kTwoByteStringCid);
1887 __ AddImmediate(R0, target::TwoByteString::data_offset() - kHeapObjectTag);
1888 __ add(R0, R0, Operand(R1));
1889 __ add(R0, R0, Operand(R1));
1890 }
1891 if (other_cid == kOneByteStringCid) {
1892 __ AddImmediate(R2, target::OneByteString::data_offset() - kHeapObjectTag);
1893 } else {
1894 ASSERT(other_cid == kTwoByteStringCid);
1895 __ AddImmediate(R2, target::TwoByteString::data_offset() - kHeapObjectTag);
1896 }
1897
1898 // i = 0
1899 __ LoadImmediate(R3, 0);
1900
1901 // do
1902 Label loop;
1903 __ Bind(&loop);
1904
1905 // this.codeUnitAt(i + start)
1906 __ ldr(R10, Address(R0, 0),
1907 receiver_cid == kOneByteStringCid ? kUnsignedByte : kUnsignedHalfword);
1908 // other.codeUnitAt(i)
1909 __ ldr(R11, Address(R2, 0),
1910 other_cid == kOneByteStringCid ? kUnsignedByte : kUnsignedHalfword);
1911 __ cmp(R10, Operand(R11));
1912 __ b(return_false, NE);
1913
1914 // i++, while (i < len)
1915 __ add(R3, R3, Operand(1));
1916 __ add(R0, R0, Operand(receiver_cid == kOneByteStringCid ? 1 : 2));
1917 __ add(R2, R2, Operand(other_cid == kOneByteStringCid ? 1 : 2));
1918 __ cmp(R3, Operand(R9));
1919 __ b(&loop, LT);
1920
1921 __ b(return_true);
1922}
1923
1924// bool _substringMatches(int start, String other)
1925// This intrinsic handles a OneByteString or TwoByteString receiver with a
1926// OneByteString other.
1927void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
1928 Label* normal_ir_body) {
1929 Label return_true, return_false, try_two_byte;
1930 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // this
1931 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // start
1932 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // other
1933
1934 __ BranchIfNotSmi(R1, normal_ir_body);
1935
1936 __ CompareClassId(R2, kOneByteStringCid);
1937 __ b(normal_ir_body, NE);
1938
1939 __ CompareClassId(R0, kOneByteStringCid);
1940 __ b(normal_ir_body, NE);
1941
1942 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid,
1943 kOneByteStringCid, &return_true,
1944 &return_false);
1945
1946 __ Bind(&try_two_byte);
1947 __ CompareClassId(R0, kTwoByteStringCid);
1948 __ b(normal_ir_body, NE);
1949
1950 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid,
1951 kOneByteStringCid, &return_true,
1952 &return_false);
1953
1954 __ Bind(&return_true);
1955 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1956 __ ret();
1957
1958 __ Bind(&return_false);
1959 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1960 __ ret();
1961
1962 __ Bind(normal_ir_body);
1963}
1964
1965void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
1966 Label* normal_ir_body) {
1967 Label try_two_byte_string;
1968
1969 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Index.
1970 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // String.
1971 __ BranchIfNotSmi(R1, normal_ir_body); // Index is not a Smi.
1972 // Range check.
1973 __ ldr(R2, FieldAddress(R0, target::String::length_offset()));
1974 __ cmp(R1, Operand(R2));
1975 __ b(normal_ir_body, CS); // Runtime throws exception.
1976
1977 __ CompareClassId(R0, kOneByteStringCid);
1978 __ b(&try_two_byte_string, NE);
1979 __ SmiUntag(R1);
1980 __ AddImmediate(R0, target::OneByteString::data_offset() - kHeapObjectTag);
1981 __ ldr(R1, Address(R0, R1), kUnsignedByte);
1982 __ CompareImmediate(R1, target::Symbols::kNumberOfOneCharCodeSymbols);
1983 __ b(normal_ir_body, GE);
1984 __ ldr(R0, Address(THR, target::Thread::predefined_symbols_address_offset()));
1985 __ AddImmediate(
1986 R0, target::Symbols::kNullCharCodeSymbolOffset * target::kWordSize);
1987 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled));
1988 __ ret();
1989
1990 __ Bind(&try_two_byte_string);
1991 __ CompareClassId(R0, kTwoByteStringCid);
1992 __ b(normal_ir_body, NE);
1993 ASSERT(kSmiTagShift == 1);
1994 __ AddImmediate(R0, target::TwoByteString::data_offset() - kHeapObjectTag);
1995 __ ldr(R1, Address(R0, R1), kUnsignedHalfword);
1996 __ CompareImmediate(R1, target::Symbols::kNumberOfOneCharCodeSymbols);
1997 __ b(normal_ir_body, GE);
1998 __ ldr(R0, Address(THR, target::Thread::predefined_symbols_address_offset()));
1999 __ AddImmediate(
2000 R0, target::Symbols::kNullCharCodeSymbolOffset * target::kWordSize);
2001 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled));
2002 __ ret();
2003
2004 __ Bind(normal_ir_body);
2005}
2006
2007void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
2008 Label* normal_ir_body) {
2009 __ ldr(R0, Address(SP, 0 * target::kWordSize));
2010 __ ldr(R0, FieldAddress(R0, target::String::length_offset()));
2011 __ cmp(R0, Operand(target::ToRawSmi(0)));
2012 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
2013 __ LoadObject(TMP, CastHandle<Object>(FalseObject()));
2014 __ csel(R0, TMP, R0, NE);
2015 __ ret();
2016}
2017
2018void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
2019 Label* normal_ir_body) {
2020 Label compute_hash;
2021 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // OneByteString object.
2022 __ ldr(R0, FieldAddress(R1, target::String::hash_offset()), kUnsignedWord);
2023 __ adds(R0, R0, Operand(R0)); // Smi tag the hash code, setting Z flag.
2024 __ b(&compute_hash, EQ);
2025 __ ret(); // Return if already computed.
2026
2027 __ Bind(&compute_hash);
2028 __ ldr(R2, FieldAddress(R1, target::String::length_offset()));
2029 __ SmiUntag(R2);
2030
2031 Label done;
2032 // If the string is empty, set the hash to 1, and return.
2033 __ CompareRegisters(R2, ZR);
2034 __ b(&done, EQ);
2035
2036 __ mov(R3, ZR);
2037 __ AddImmediate(R6, R1,
2038 target::OneByteString::data_offset() - kHeapObjectTag);
2039 // R1: Instance of OneByteString.
2040 // R2: String length, untagged integer.
2041 // R3: Loop counter, untagged integer.
2042 // R6: String data.
2043 // R0: Hash code, untagged integer.
2044
2045 Label loop;
2046 // Add to hash code: (hash_ is uint32)
2047 // hash_ += ch;
2048 // hash_ += hash_ << 10;
2049 // hash_ ^= hash_ >> 6;
2050 // Get one characters (ch).
2051 __ Bind(&loop);
2052 __ ldr(R7, Address(R6, R3), kUnsignedByte);
2053 // R7: ch.
2054 __ add(R3, R3, Operand(1));
2055 __ addw(R0, R0, Operand(R7));
2056 __ addw(R0, R0, Operand(R0, LSL, 10));
2057 __ eorw(R0, R0, Operand(R0, LSR, 6));
2058 __ cmp(R3, Operand(R2));
2059 __ b(&loop, NE);
2060
2061 // Finalize.
2062 // hash_ += hash_ << 3;
2063 // hash_ ^= hash_ >> 11;
2064 // hash_ += hash_ << 15;
2065 __ addw(R0, R0, Operand(R0, LSL, 3));
2066 __ eorw(R0, R0, Operand(R0, LSR, 11));
2067 __ addw(R0, R0, Operand(R0, LSL, 15));
2068 // hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1);
2069 __ AndImmediate(R0, R0,
2070 (static_cast<intptr_t>(1) << target::String::kHashBits) - 1);
2071 __ CompareRegisters(R0, ZR);
2072 // return hash_ == 0 ? 1 : hash_;
2073 __ Bind(&done);
2074 __ csinc(R0, R0, ZR, NE); // R0 <- (R0 != 0) ? R0 : (ZR + 1).
2075 __ str(R0, FieldAddress(R1, target::String::hash_offset()), kUnsignedWord);
2076 __ SmiTag(R0);
2077 __ ret();
2078}
2079
2080// Allocates a _OneByteString or _TwoByteString. The content is not initialized.
2081// 'length-reg' (R2) contains the desired length as a _Smi or _Mint.
2082// Returns new string as tagged pointer in R0.
2083static void TryAllocateString(Assembler* assembler,
2084 classid_t cid,
2085 Label* ok,
2086 Label* failure) {
2087 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
2088 const Register length_reg = R2;
2089 // _Mint length: call to runtime to produce error.
2090 __ BranchIfNotSmi(length_reg, failure);
2091 // negative length: call to runtime to produce error.
2092 __ tbnz(failure, length_reg, compiler::target::kBitsPerWord - 1);
2093
2094 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R0, failure));
2095 __ mov(R6, length_reg); // Save the length register.
2096 if (cid == kOneByteStringCid) {
2097 // Untag length.
2098 __ adds(length_reg, ZR, Operand(length_reg, ASR, kSmiTagSize));
2099 } else {
2100 // Untag length and multiply by element size -> no-op.
2101 __ adds(length_reg, ZR, Operand(length_reg));
2102 }
2103 // If the length is 0 then we have to make the allocated size a bit bigger,
2104 // otherwise the string takes up less space than an ExternalOneByteString,
2105 // and cannot be externalized. TODO(erikcorry): We should probably just
2106 // return a static zero length string here instead.
2107 // length <- (length != 0) ? length : (ZR + 1).
2108 __ csinc(length_reg, length_reg, ZR, NE);
2109 const intptr_t fixed_size_plus_alignment_padding =
2110 target::String::InstanceSize() +
2111 target::ObjectAlignment::kObjectAlignment - 1;
2112 __ AddImmediate(length_reg, fixed_size_plus_alignment_padding);
2113 __ andi(length_reg, length_reg,
2114 Immediate(~(target::ObjectAlignment::kObjectAlignment - 1)));
2115
2116 __ ldr(R0, Address(THR, target::Thread::top_offset()));
2117
2118 // length_reg: allocation size.
2119 __ adds(R1, R0, Operand(length_reg));
2120 __ b(failure, CS); // Fail on unsigned overflow.
2121
2122 // Check if the allocation fits into the remaining space.
2123 // R0: potential new object start.
2124 // R1: potential next object start.
2125 // R2: allocation size.
2126 __ ldr(R7, Address(THR, target::Thread::end_offset()));
2127 __ cmp(R1, Operand(R7));
2128 __ b(failure, CS);
2129
2130 // Successfully allocated the object(s), now update top to point to
2131 // next object start and initialize the object.
2132 __ str(R1, Address(THR, target::Thread::top_offset()));
2133 __ AddImmediate(R0, kHeapObjectTag);
2134
2135 // Initialize the tags.
2136 // R0: new object start as a tagged pointer.
2137 // R1: new object end address.
2138 // R2: allocation size.
2139 {
2140 const intptr_t shift = target::ObjectLayout::kTagBitsSizeTagPos -
2141 target::ObjectAlignment::kObjectAlignmentLog2;
2142
2143 __ CompareImmediate(R2, target::ObjectLayout::kSizeTagMaxSizeTag);
2144 __ LslImmediate(R2, R2, shift);
2145 __ csel(R2, R2, ZR, LS);
2146
2147 // Get the class index and insert it into the tags.
2148 // R2: size and bit tags.
2149 // This also clears the hash, which is in the high word of the tags.
2150 const uint32_t tags =
2151 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
2152 __ LoadImmediate(TMP, tags);
2153 __ orr(R2, R2, Operand(TMP));
2154 __ str(R2, FieldAddress(R0, target::Object::tags_offset())); // Store tags.
2155 }
2156
2157 // Set the length field using the saved length (R6).
2158 __ StoreIntoObjectNoBarrier(
2159 R0, FieldAddress(R0, target::String::length_offset()), R6);
2160 __ b(ok);
2161}
2162
2163// Arg0: OneByteString (receiver).
2164// Arg1: Start index as Smi.
2165// Arg2: End index as Smi.
2166// The indexes must be valid.
2167void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
2168 Label* normal_ir_body) {
2169 const intptr_t kStringOffset = 2 * target::kWordSize;
2170 const intptr_t kStartIndexOffset = 1 * target::kWordSize;
2171 const intptr_t kEndIndexOffset = 0 * target::kWordSize;
2172 Label ok;
2173
2174 __ ldr(R2, Address(SP, kEndIndexOffset));
2175 __ ldr(TMP, Address(SP, kStartIndexOffset));
2176 __ orr(R3, R2, Operand(TMP));
2177 __ BranchIfNotSmi(R3, normal_ir_body); // 'start', 'end' not Smi.
2178
2179 __ sub(R2, R2, Operand(TMP));
2180 TryAllocateString(assembler, kOneByteStringCid, &ok, normal_ir_body);
2181 __ Bind(&ok);
2182 // R0: new string as tagged pointer.
2183 // Copy string.
2184 __ ldr(R3, Address(SP, kStringOffset));
2185 __ ldr(R1, Address(SP, kStartIndexOffset));
2186 __ SmiUntag(R1);
2187 __ add(R3, R3, Operand(R1));
2188 // Calculate start address and untag (- 1).
2189 __ AddImmediate(R3, target::OneByteString::data_offset() - 1);
2190
2191 // R3: Start address to copy from (untagged).
2192 // R1: Untagged start index.
2193 __ ldr(R2, Address(SP, kEndIndexOffset));
2194 __ SmiUntag(R2);
2195 __ sub(R2, R2, Operand(R1));
2196
2197 // R3: Start address to copy from (untagged).
2198 // R2: Untagged number of bytes to copy.
2199 // R0: Tagged result string.
2200 // R6: Pointer into R3.
2201 // R7: Pointer into R0.
2202 // R1: Scratch register.
2203 Label loop, done;
2204 __ cmp(R2, Operand(0));
2205 __ b(&done, LE);
2206 __ mov(R6, R3);
2207 __ mov(R7, R0);
2208 __ Bind(&loop);
2209 __ ldr(R1, Address(R6), kUnsignedByte);
2210 __ AddImmediate(R6, 1);
2211 __ sub(R2, R2, Operand(1));
2212 __ cmp(R2, Operand(0));
2213 __ str(R1, FieldAddress(R7, target::OneByteString::data_offset()),
2214 kUnsignedByte);
2215 __ AddImmediate(R7, 1);
2216 __ b(&loop, GT);
2217
2218 __ Bind(&done);
2219 __ ret();
2220 __ Bind(normal_ir_body);
2221}
2222
2223void AsmIntrinsifier::WriteIntoOneByteString(Assembler* assembler,
2224 Label* normal_ir_body) {
2225 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Value.
2226 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // Index.
2227 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // OneByteString.
2228 __ SmiUntag(R1);
2229 __ SmiUntag(R2);
2230 __ AddImmediate(R3, R0,
2231 target::OneByteString::data_offset() - kHeapObjectTag);
2232 __ str(R2, Address(R3, R1), kUnsignedByte);
2233 __ ret();
2234}
2235
2236void AsmIntrinsifier::WriteIntoTwoByteString(Assembler* assembler,
2237 Label* normal_ir_body) {
2238 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Value.
2239 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // Index.
2240 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // TwoByteString.
2241 // Untag index and multiply by element size -> no-op.
2242 __ SmiUntag(R2);
2243 __ AddImmediate(R3, R0,
2244 target::TwoByteString::data_offset() - kHeapObjectTag);
2245 __ str(R2, Address(R3, R1), kUnsignedHalfword);
2246 __ ret();
2247}
2248
2249void AsmIntrinsifier::AllocateOneByteString(Assembler* assembler,
2250 Label* normal_ir_body) {
2251 Label ok;
2252
2253 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Length.
2254 TryAllocateString(assembler, kOneByteStringCid, &ok, normal_ir_body);
2255
2256 __ Bind(&ok);
2257 __ ret();
2258
2259 __ Bind(normal_ir_body);
2260}
2261
2262void AsmIntrinsifier::AllocateTwoByteString(Assembler* assembler,
2263 Label* normal_ir_body) {
2264 Label ok;
2265
2266 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Length.
2267 TryAllocateString(assembler, kTwoByteStringCid, &ok, normal_ir_body);
2268
2269 __ Bind(&ok);
2270 __ ret();
2271
2272 __ Bind(normal_ir_body);
2273}
2274
2275// TODO(srdjan): Add combinations (one-byte/two-byte/external strings).
2276static void StringEquality(Assembler* assembler,
2277 Label* normal_ir_body,
2278 intptr_t string_cid) {
2279 Label is_true, is_false, loop;
2280 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // This.
2281 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Other.
2282
2283 // Are identical?
2284 __ cmp(R0, Operand(R1));
2285 __ b(&is_true, EQ);
2286
2287 // Is other OneByteString?
2288 __ BranchIfSmi(R1, normal_ir_body);
2289 __ CompareClassId(R1, string_cid);
2290 __ b(normal_ir_body, NE);
2291
2292 // Have same length?
2293 __ ldr(R2, FieldAddress(R0, target::String::length_offset()));
2294 __ ldr(R3, FieldAddress(R1, target::String::length_offset()));
2295 __ cmp(R2, Operand(R3));
2296 __ b(&is_false, NE);
2297
2298 // Check contents, no fall-through possible.
2299 // TODO(zra): try out other sequences.
2300 ASSERT((string_cid == kOneByteStringCid) ||
2301 (string_cid == kTwoByteStringCid));
2302 const intptr_t offset = (string_cid == kOneByteStringCid)
2303 ? target::OneByteString::data_offset()
2304 : target::TwoByteString::data_offset();
2305 __ AddImmediate(R0, offset - kHeapObjectTag);
2306 __ AddImmediate(R1, offset - kHeapObjectTag);
2307 __ SmiUntag(R2);
2308 __ Bind(&loop);
2309 __ AddImmediate(R2, -1);
2310 __ CompareRegisters(R2, ZR);
2311 __ b(&is_true, LT);
2312 if (string_cid == kOneByteStringCid) {
2313 __ ldr(R3, Address(R0), kUnsignedByte);
2314 __ ldr(R4, Address(R1), kUnsignedByte);
2315 __ AddImmediate(R0, 1);
2316 __ AddImmediate(R1, 1);
2317 } else if (string_cid == kTwoByteStringCid) {
2318 __ ldr(R3, Address(R0), kUnsignedHalfword);
2319 __ ldr(R4, Address(R1), kUnsignedHalfword);
2320 __ AddImmediate(R0, 2);
2321 __ AddImmediate(R1, 2);
2322 } else {
2323 UNIMPLEMENTED();
2324 }
2325 __ cmp(R3, Operand(R4));
2326 __ b(&is_false, NE);
2327 __ b(&loop);
2328
2329 __ Bind(&is_true);
2330 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
2331 __ ret();
2332
2333 __ Bind(&is_false);
2334 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
2335 __ ret();
2336
2337 __ Bind(normal_ir_body);
2338}
2339
2340void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
2341 Label* normal_ir_body) {
2342 StringEquality(assembler, normal_ir_body, kOneByteStringCid);
2343}
2344
2345void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
2346 Label* normal_ir_body) {
2347 StringEquality(assembler, normal_ir_body, kTwoByteStringCid);
2348}
2349
2350void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
2351 Label* normal_ir_body,
2352 bool sticky) {
2353 if (FLAG_interpret_irregexp) return;
2354
2355 static const intptr_t kRegExpParamOffset = 2 * target::kWordSize;
2356 static const intptr_t kStringParamOffset = 1 * target::kWordSize;
2357 // start_index smi is located at offset 0.
2358
2359 // Incoming registers:
2360 // R0: Function. (Will be reloaded with the specialized matcher function.)
2361 // R4: Arguments descriptor. (Will be preserved.)
2362 // R5: Unknown. (Must be GC safe on tail call.)
2363
2364 // Load the specialized function pointer into R0. Leverage the fact the
2365 // string CIDs as well as stored function pointers are in sequence.
2366 __ ldr(R2, Address(SP, kRegExpParamOffset));
2367 __ ldr(R1, Address(SP, kStringParamOffset));
2368 __ LoadClassId(R1, R1);
2369 __ AddImmediate(R1, -kOneByteStringCid);
2370 __ add(R1, R2, Operand(R1, LSL, target::kWordSizeLog2));
2371 __ ldr(R0, FieldAddress(R1, target::RegExp::function_offset(kOneByteStringCid,
2372 sticky)));
2373
2374 // Registers are now set up for the lazy compile stub. It expects the function
2375 // in R0, the argument descriptor in R4, and IC-Data in R5.
2376 __ eor(R5, R5, Operand(R5));
2377
2378 // Tail-call the function.
2379 __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
2380 __ ldr(R1, FieldAddress(R0, target::Function::entry_point_offset()));
2381 __ br(R1);
2382}
2383
2384// On stack: user tag (+0).
2385void AsmIntrinsifier::UserTag_makeCurrent(Assembler* assembler,
2386 Label* normal_ir_body) {
2387 // R1: Isolate.
2388 __ LoadIsolate(R1);
2389 // R0: Current user tag.
2390 __ ldr(R0, Address(R1, target::Isolate::current_tag_offset()));
2391 // R2: UserTag.
2392 __ ldr(R2, Address(SP, +0 * target::kWordSize));
2393 // Set target::Isolate::current_tag_.
2394 __ str(R2, Address(R1, target::Isolate::current_tag_offset()));
2395 // R2: UserTag's tag.
2396 __ ldr(R2, FieldAddress(R2, target::UserTag::tag_offset()));
2397 // Set target::Isolate::user_tag_.
2398 __ str(R2, Address(R1, target::Isolate::user_tag_offset()));
2399 __ ret();
2400}
2401
2402void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
2403 Label* normal_ir_body) {
2404 __ LoadIsolate(R0);
2405 __ ldr(R0, Address(R0, target::Isolate::default_tag_offset()));
2406 __ ret();
2407}
2408
2409void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
2410 Label* normal_ir_body) {
2411 __ LoadIsolate(R0);
2412 __ ldr(R0, Address(R0, target::Isolate::current_tag_offset()));
2413 __ ret();
2414}
2415
2416void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
2417 Label* normal_ir_body) {
2418#if !defined(SUPPORT_TIMELINE)
2419 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
2420 __ ret();
2421#else
2422 // Load TimelineStream*.
2423 __ ldr(R0, Address(THR, target::Thread::dart_stream_offset()));
2424 // Load uintptr_t from TimelineStream*.
2425 __ ldr(R0, Address(R0, target::TimelineStream::enabled_offset()));
2426 __ cmp(R0, Operand(0));
2427 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
2428 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
2429 __ csel(R0, TMP, R0, NE);
2430 __ ret();
2431#endif
2432}
2433
2434void AsmIntrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler,
2435 Label* normal_ir_body) {
2436 __ LoadObject(R0, NullObject());
2437 __ str(R0, Address(THR, target::Thread::async_stack_trace_offset()));
2438 __ ret();
2439}
2440
2441void AsmIntrinsifier::SetAsyncThreadStackTrace(Assembler* assembler,
2442 Label* normal_ir_body) {
2443 __ ldr(R0, Address(THR, target::Thread::async_stack_trace_offset()));
2444 __ LoadObject(R0, NullObject());
2445 __ ret();
2446}
2447
2448#undef __
2449
2450} // namespace compiler
2451} // namespace dart
2452
2453#endif // defined(TARGET_ARCH_ARM64)
2454