1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6#if defined(TARGET_ARCH_ARM)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
10#include "vm/class_id.h"
11#include "vm/compiler/asm_intrinsifier.h"
12#include "vm/compiler/assembler/assembler.h"
13
14namespace dart {
15namespace compiler {
16
17// When entering intrinsics code:
18// R4: Arguments descriptor
19// LR: Return address
20// The R4 register can be destroyed only if there is no slow-path, i.e.
21// if the intrinsified method always executes a return.
22// The FP register should not be modified, because it is used by the profiler.
23// The PP and THR registers (see constants_arm.h) must be preserved.
24
25#define __ assembler->
26
27intptr_t AsmIntrinsifier::ParameterSlotFromSp() {
28 return -1;
29}
30
31static bool IsABIPreservedRegister(Register reg) {
32 return ((1 << reg) & kAbiPreservedCpuRegs) != 0;
33}
34
35void AsmIntrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
36 ASSERT(IsABIPreservedRegister(CODE_REG));
37 ASSERT(IsABIPreservedRegister(ARGS_DESC_REG));
38 ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP));
39
40 // Save LR by moving it to a callee saved temporary register.
41 assembler->Comment("IntrinsicCallPrologue");
42 assembler->mov(CALLEE_SAVED_TEMP, Operand(LR));
43}
44
45void AsmIntrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
46 // Restore LR.
47 assembler->Comment("IntrinsicCallEpilogue");
48 assembler->mov(LR, Operand(CALLEE_SAVED_TEMP));
49}
50
51// Allocate a GrowableObjectArray:: using the backing array specified.
52// On stack: type argument (+1), data (+0).
53void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
54 Label* normal_ir_body) {
55 // The newly allocated object is returned in R0.
56 const intptr_t kTypeArgumentsOffset = 1 * target::kWordSize;
57 const intptr_t kArrayOffset = 0 * target::kWordSize;
58
59 // Try allocating in new space.
60 const Class& cls = GrowableObjectArrayClass();
61 __ TryAllocate(cls, normal_ir_body, R0, R1);
62
63 // Store backing array object in growable array object.
64 __ ldr(R1, Address(SP, kArrayOffset)); // Data argument.
65 // R0 is new, no barrier needed.
66 __ StoreIntoObjectNoBarrier(
67 R0, FieldAddress(R0, target::GrowableObjectArray::data_offset()), R1);
68
69 // R0: new growable array object start as a tagged pointer.
70 // Store the type argument field in the growable array object.
71 __ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument.
72 __ StoreIntoObjectNoBarrier(
73 R0,
74 FieldAddress(R0, target::GrowableObjectArray::type_arguments_offset()),
75 R1);
76
77 // Set the length field in the growable array object to 0.
78 __ LoadImmediate(R1, 0);
79 __ StoreIntoObjectNoBarrier(
80 R0, FieldAddress(R0, target::GrowableObjectArray::length_offset()), R1);
81 __ Ret(); // Returns the newly allocated object in R0.
82
83 __ Bind(normal_ir_body);
84}
85
86#define TYPED_ARRAY_ALLOCATION(cid, max_len, scale_shift) \
87 Label fall_through; \
88 const intptr_t kArrayLengthStackOffset = 0 * target::kWordSize; \
89 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R2, cid)); \
90 NOT_IN_PRODUCT(__ MaybeTraceAllocation(R2, normal_ir_body)); \
91 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
92 /* Check that length is a positive Smi. */ \
93 /* R2: requested array length argument. */ \
94 __ tst(R2, Operand(kSmiTagMask)); \
95 __ b(normal_ir_body, NE); \
96 __ CompareImmediate(R2, 0); \
97 __ b(normal_ir_body, LT); \
98 __ SmiUntag(R2); \
99 /* Check for maximum allowed length. */ \
100 /* R2: untagged array length. */ \
101 __ CompareImmediate(R2, max_len); \
102 __ b(normal_ir_body, GT); \
103 __ mov(R2, Operand(R2, LSL, scale_shift)); \
104 const intptr_t fixed_size_plus_alignment_padding = \
105 target::TypedData::InstanceSize() + \
106 target::ObjectAlignment::kObjectAlignment - 1; \
107 __ AddImmediate(R2, fixed_size_plus_alignment_padding); \
108 __ bic(R2, R2, Operand(target::ObjectAlignment::kObjectAlignment - 1)); \
109 __ ldr(R0, Address(THR, target::Thread::top_offset())); \
110 \
111 /* R2: allocation size. */ \
112 __ adds(R1, R0, Operand(R2)); \
113 __ b(normal_ir_body, CS); /* Fail on unsigned overflow. */ \
114 \
115 /* Check if the allocation fits into the remaining space. */ \
116 /* R0: potential new object start. */ \
117 /* R1: potential next object start. */ \
118 /* R2: allocation size. */ \
119 __ ldr(IP, Address(THR, target::Thread::end_offset())); \
120 __ cmp(R1, Operand(IP)); \
121 __ b(normal_ir_body, CS); \
122 \
123 __ str(R1, Address(THR, target::Thread::top_offset())); \
124 __ AddImmediate(R0, kHeapObjectTag); \
125 /* Initialize the tags. */ \
126 /* R0: new object start as a tagged pointer. */ \
127 /* R1: new object end address. */ \
128 /* R2: allocation size. */ \
129 { \
130 __ CompareImmediate(R2, target::ObjectLayout::kSizeTagMaxSizeTag); \
131 __ mov(R3, \
132 Operand(R2, LSL, \
133 target::ObjectLayout::kTagBitsSizeTagPos - \
134 target::ObjectAlignment::kObjectAlignmentLog2), \
135 LS); \
136 __ mov(R3, Operand(0), HI); \
137 \
138 /* Get the class index and insert it into the tags. */ \
139 uint32_t tags = \
140 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0); \
141 __ LoadImmediate(TMP, tags); \
142 __ orr(R3, R3, Operand(TMP)); \
143 __ str(R3, FieldAddress(R0, target::Object::tags_offset())); /* Tags. */ \
144 } \
145 /* Set the length field. */ \
146 /* R0: new object start as a tagged pointer. */ \
147 /* R1: new object end address. */ \
148 /* R2: allocation size. */ \
149 __ ldr(R3, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
150 __ StoreIntoObjectNoBarrier( \
151 R0, FieldAddress(R0, target::TypedDataBase::length_offset()), R3); \
152 /* Initialize all array elements to 0. */ \
153 /* R0: new object start as a tagged pointer. */ \
154 /* R1: new object end address. */ \
155 /* R2: allocation size. */ \
156 /* R3: iterator which initially points to the start of the variable */ \
157 /* R8, R9: zero. */ \
158 /* data area to be initialized. */ \
159 __ LoadImmediate(R8, 0); \
160 __ mov(R9, Operand(R8)); \
161 __ AddImmediate(R3, R0, target::TypedData::InstanceSize() - 1); \
162 __ StoreInternalPointer( \
163 R0, FieldAddress(R0, target::TypedDataBase::data_field_offset()), R3); \
164 Label init_loop; \
165 __ Bind(&init_loop); \
166 __ AddImmediate(R3, 2 * target::kWordSize); \
167 __ cmp(R3, Operand(R1)); \
168 __ strd(R8, R9, R3, -2 * target::kWordSize, LS); \
169 __ b(&init_loop, CC); \
170 __ str(R8, Address(R3, -2 * target::kWordSize), HI); \
171 \
172 __ Ret(); \
173 __ Bind(normal_ir_body);
174
175static int GetScaleFactor(intptr_t size) {
176 switch (size) {
177 case 1:
178 return 0;
179 case 2:
180 return 1;
181 case 4:
182 return 2;
183 case 8:
184 return 3;
185 case 16:
186 return 4;
187 }
188 UNREACHABLE();
189 return -1;
190}
191
192#define TYPED_DATA_ALLOCATOR(clazz) \
193 void AsmIntrinsifier::TypedData_##clazz##_factory(Assembler* assembler, \
194 Label* normal_ir_body) { \
195 intptr_t size = TypedDataElementSizeInBytes(kTypedData##clazz##Cid); \
196 intptr_t max_len = TypedDataMaxNewSpaceElements(kTypedData##clazz##Cid); \
197 int shift = GetScaleFactor(size); \
198 TYPED_ARRAY_ALLOCATION(kTypedData##clazz##Cid, max_len, shift); \
199 }
200CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR)
201#undef TYPED_DATA_ALLOCATOR
202
203// Loads args from stack into R0 and R1
204// Tests if they are smis, jumps to label not_smi if not.
205static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
206 __ ldr(R0, Address(SP, +0 * target::kWordSize));
207 __ ldr(R1, Address(SP, +1 * target::kWordSize));
208 __ orr(TMP, R0, Operand(R1));
209 __ tst(TMP, Operand(kSmiTagMask));
210 __ b(not_smi, NE);
211}
212
213void AsmIntrinsifier::Integer_addFromInteger(Assembler* assembler,
214 Label* normal_ir_body) {
215 TestBothArgumentsSmis(assembler, normal_ir_body); // Checks two smis.
216 __ adds(R0, R0, Operand(R1)); // Adds.
217 __ bx(LR, VC); // Return if no overflow.
218 // Otherwise fall through.
219 __ Bind(normal_ir_body);
220}
221
222void AsmIntrinsifier::Integer_add(Assembler* assembler, Label* normal_ir_body) {
223 Integer_addFromInteger(assembler, normal_ir_body);
224}
225
226void AsmIntrinsifier::Integer_subFromInteger(Assembler* assembler,
227 Label* normal_ir_body) {
228 TestBothArgumentsSmis(assembler, normal_ir_body);
229 __ subs(R0, R0, Operand(R1)); // Subtract.
230 __ bx(LR, VC); // Return if no overflow.
231 // Otherwise fall through.
232 __ Bind(normal_ir_body);
233}
234
235void AsmIntrinsifier::Integer_sub(Assembler* assembler, Label* normal_ir_body) {
236 TestBothArgumentsSmis(assembler, normal_ir_body);
237 __ subs(R0, R1, Operand(R0)); // Subtract.
238 __ bx(LR, VC); // Return if no overflow.
239 // Otherwise fall through.
240 __ Bind(normal_ir_body);
241}
242
243void AsmIntrinsifier::Integer_mulFromInteger(Assembler* assembler,
244 Label* normal_ir_body) {
245 TestBothArgumentsSmis(assembler, normal_ir_body); // checks two smis
246 __ SmiUntag(R0); // Untags R0. We only want result shifted by one.
247 __ smull(R0, IP, R0, R1); // IP:R0 <- R0 * R1.
248 __ cmp(IP, Operand(R0, ASR, 31));
249 __ bx(LR, EQ);
250 __ Bind(normal_ir_body); // Fall through on overflow.
251}
252
253void AsmIntrinsifier::Integer_mul(Assembler* assembler, Label* normal_ir_body) {
254 Integer_mulFromInteger(assembler, normal_ir_body);
255}
256
257// Optimizations:
258// - result is 0 if:
259// - left is 0
260// - left equals right
261// - result is left if
262// - left > 0 && left < right
263// R1: Tagged left (dividend).
264// R0: Tagged right (divisor).
265// Returns:
266// R1: Untagged fallthrough result (remainder to be adjusted), or
267// R0: Tagged return result (remainder).
268static void EmitRemainderOperation(Assembler* assembler) {
269 Label modulo;
270 const Register left = R1;
271 const Register right = R0;
272 const Register result = R1;
273 const Register tmp = R2;
274 ASSERT(left == result);
275
276 // Check for quick zero results.
277 __ cmp(left, Operand(0));
278 __ mov(R0, Operand(0), EQ);
279 __ bx(LR, EQ); // left is 0? Return 0.
280 __ cmp(left, Operand(right));
281 __ mov(R0, Operand(0), EQ);
282 __ bx(LR, EQ); // left == right? Return 0.
283
284 // Check if result should be left.
285 __ cmp(left, Operand(0));
286 __ b(&modulo, LT);
287 // left is positive.
288 __ cmp(left, Operand(right));
289 // left is less than right, result is left.
290 __ mov(R0, Operand(left), LT);
291 __ bx(LR, LT);
292
293 __ Bind(&modulo);
294 // result <- left - right * (left / right)
295 __ SmiUntag(left);
296 __ SmiUntag(right);
297
298 __ IntegerDivide(tmp, left, right, D1, D0);
299
300 __ mls(result, right, tmp, left); // result <- left - right * TMP
301}
302
303// Implementation:
304// res = left % right;
305// if (res < 0) {
306// if (right < 0) {
307// res = res - right;
308// } else {
309// res = res + right;
310// }
311// }
312void AsmIntrinsifier::Integer_moduloFromInteger(Assembler* assembler,
313 Label* normal_ir_body) {
314 if (!TargetCPUFeatures::can_divide()) {
315 return;
316 }
317 // Check to see if we have integer division
318 __ ldr(R1, Address(SP, +0 * target::kWordSize));
319 __ ldr(R0, Address(SP, +1 * target::kWordSize));
320 __ orr(TMP, R0, Operand(R1));
321 __ tst(TMP, Operand(kSmiTagMask));
322 __ b(normal_ir_body, NE);
323 // R1: Tagged left (dividend).
324 // R0: Tagged right (divisor).
325 // Check if modulo by zero -> exception thrown in main function.
326 __ cmp(R0, Operand(0));
327 __ b(normal_ir_body, EQ);
328 EmitRemainderOperation(assembler);
329 // Untagged right in R0. Untagged remainder result in R1.
330
331 __ cmp(R1, Operand(0));
332 __ mov(R0, Operand(R1, LSL, 1), GE); // Tag and move result to R0.
333 __ bx(LR, GE);
334
335 // Result is negative, adjust it.
336 __ cmp(R0, Operand(0));
337 __ sub(R0, R1, Operand(R0), LT);
338 __ add(R0, R1, Operand(R0), GE);
339 __ SmiTag(R0);
340 __ Ret();
341
342 __ Bind(normal_ir_body);
343}
344
345void AsmIntrinsifier::Integer_truncDivide(Assembler* assembler,
346 Label* normal_ir_body) {
347 if (!TargetCPUFeatures::can_divide()) {
348 return;
349 }
350 // Check to see if we have integer division
351
352 TestBothArgumentsSmis(assembler, normal_ir_body);
353 __ cmp(R0, Operand(0));
354 __ b(normal_ir_body, EQ); // If b is 0, fall through.
355
356 __ SmiUntag(R0);
357 __ SmiUntag(R1);
358
359 __ IntegerDivide(R0, R1, R0, D1, D0);
360
361 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
362 // cannot tag the result.
363 __ CompareImmediate(R0, 0x40000000);
364 __ SmiTag(R0, NE); // Not equal. Okay to tag and return.
365 __ bx(LR, NE); // Return.
366 __ Bind(normal_ir_body);
367}
368
369void AsmIntrinsifier::Integer_negate(Assembler* assembler,
370 Label* normal_ir_body) {
371 __ ldr(R0, Address(SP, +0 * target::kWordSize)); // Grab first argument.
372 __ tst(R0, Operand(kSmiTagMask)); // Test for Smi.
373 __ b(normal_ir_body, NE);
374 __ rsbs(R0, R0, Operand(0)); // R0 is a Smi. R0 <- 0 - R0.
375 __ bx(LR, VC); // Return if there wasn't overflow, fall through otherwise.
376 // R0 is not a Smi. Fall through.
377 __ Bind(normal_ir_body);
378}
379
380void AsmIntrinsifier::Integer_bitAndFromInteger(Assembler* assembler,
381 Label* normal_ir_body) {
382 TestBothArgumentsSmis(assembler, normal_ir_body); // checks two smis
383 __ and_(R0, R0, Operand(R1));
384
385 __ Ret();
386 __ Bind(normal_ir_body);
387}
388
389void AsmIntrinsifier::Integer_bitAnd(Assembler* assembler,
390 Label* normal_ir_body) {
391 Integer_bitAndFromInteger(assembler, normal_ir_body);
392}
393
394void AsmIntrinsifier::Integer_bitOrFromInteger(Assembler* assembler,
395 Label* normal_ir_body) {
396 TestBothArgumentsSmis(assembler, normal_ir_body); // checks two smis
397 __ orr(R0, R0, Operand(R1));
398
399 __ Ret();
400 __ Bind(normal_ir_body);
401}
402
403void AsmIntrinsifier::Integer_bitOr(Assembler* assembler,
404 Label* normal_ir_body) {
405 Integer_bitOrFromInteger(assembler, normal_ir_body);
406}
407
408void AsmIntrinsifier::Integer_bitXorFromInteger(Assembler* assembler,
409 Label* normal_ir_body) {
410 TestBothArgumentsSmis(assembler, normal_ir_body); // checks two smis
411 __ eor(R0, R0, Operand(R1));
412
413 __ Ret();
414 __ Bind(normal_ir_body);
415}
416
417void AsmIntrinsifier::Integer_bitXor(Assembler* assembler,
418 Label* normal_ir_body) {
419 Integer_bitXorFromInteger(assembler, normal_ir_body);
420}
421
422void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
423 ASSERT(kSmiTagShift == 1);
424 ASSERT(kSmiTag == 0);
425 TestBothArgumentsSmis(assembler, normal_ir_body);
426 __ CompareImmediate(R0, target::ToRawSmi(target::kSmiBits));
427 __ b(normal_ir_body, HI);
428
429 __ SmiUntag(R0);
430
431 // Check for overflow by shifting left and shifting back arithmetically.
432 // If the result is different from the original, there was overflow.
433 __ mov(IP, Operand(R1, LSL, R0));
434 __ cmp(R1, Operand(IP, ASR, R0));
435
436 // No overflow, result in R0.
437 __ mov(R0, Operand(R1, LSL, R0), EQ);
438 __ bx(LR, EQ);
439
440 // Arguments are Smi but the shift produced an overflow to Mint.
441 __ CompareImmediate(R1, 0);
442 __ b(normal_ir_body, LT);
443 __ SmiUntag(R1);
444
445 // Pull off high bits that will be shifted off of R1 by making a mask
446 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back.
447 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0)
448 // lo bits = R1 << R0
449 __ LoadImmediate(R8, 1);
450 __ mov(R8, Operand(R8, LSL, R0)); // R8 <- 1 << R0
451 __ sub(R8, R8, Operand(1)); // R8 <- R8 - 1
452 __ rsb(R3, R0, Operand(32)); // R3 <- 32 - R0
453 __ mov(R8, Operand(R8, LSL, R3)); // R8 <- R8 << R3
454 __ and_(R8, R1, Operand(R8)); // R8 <- R8 & R1
455 __ mov(R8, Operand(R8, LSR, R3)); // R8 <- R8 >> R3
456 // Now R8 has the bits that fall off of R1 on a left shift.
457 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits.
458
459 const Class& mint_class = MintClass();
460 __ TryAllocate(mint_class, normal_ir_body, R0, R2);
461
462 __ str(R1, FieldAddress(R0, target::Mint::value_offset()));
463 __ str(R8,
464 FieldAddress(R0, target::Mint::value_offset() + target::kWordSize));
465 __ Ret();
466 __ Bind(normal_ir_body);
467}
468
469static void Get64SmiOrMint(Assembler* assembler,
470 Register res_hi,
471 Register res_lo,
472 Register reg,
473 Label* not_smi_or_mint) {
474 Label not_smi, done;
475 __ tst(reg, Operand(kSmiTagMask));
476 __ b(&not_smi, NE);
477 __ SmiUntag(reg);
478
479 // Sign extend to 64 bit
480 __ mov(res_lo, Operand(reg));
481 __ mov(res_hi, Operand(res_lo, ASR, 31));
482 __ b(&done);
483
484 __ Bind(&not_smi);
485 __ CompareClassId(reg, kMintCid, res_lo);
486 __ b(not_smi_or_mint, NE);
487
488 // Mint.
489 __ ldr(res_lo, FieldAddress(reg, target::Mint::value_offset()));
490 __ ldr(res_hi,
491 FieldAddress(reg, target::Mint::value_offset() + target::kWordSize));
492 __ Bind(&done);
493}
494
495static void CompareIntegers(Assembler* assembler,
496 Label* normal_ir_body,
497 Condition true_condition) {
498 Label try_mint_smi, is_true, is_false, drop_two_fall_through, fall_through;
499 TestBothArgumentsSmis(assembler, &try_mint_smi);
500 // R0 contains the right argument. R1 contains left argument
501
502 __ cmp(R1, Operand(R0));
503 __ b(&is_true, true_condition);
504 __ Bind(&is_false);
505 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
506 __ Ret();
507 __ Bind(&is_true);
508 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
509 __ Ret();
510
511 // 64-bit comparison
512 Condition hi_true_cond, hi_false_cond, lo_false_cond;
513 switch (true_condition) {
514 case LT:
515 case LE:
516 hi_true_cond = LT;
517 hi_false_cond = GT;
518 lo_false_cond = (true_condition == LT) ? CS : HI;
519 break;
520 case GT:
521 case GE:
522 hi_true_cond = GT;
523 hi_false_cond = LT;
524 lo_false_cond = (true_condition == GT) ? LS : CC;
525 break;
526 default:
527 UNREACHABLE();
528 hi_true_cond = hi_false_cond = lo_false_cond = VS;
529 }
530
531 __ Bind(&try_mint_smi);
532 // Get left as 64 bit integer.
533 Get64SmiOrMint(assembler, R3, R2, R1, normal_ir_body);
534 // Get right as 64 bit integer.
535 Get64SmiOrMint(assembler, R1, R8, R0, normal_ir_body);
536 // R3: left high.
537 // R2: left low.
538 // R1: right high.
539 // R8: right low.
540
541 __ cmp(R3, Operand(R1)); // Compare left hi, right high.
542 __ b(&is_false, hi_false_cond);
543 __ b(&is_true, hi_true_cond);
544 __ cmp(R2, Operand(R8)); // Compare left lo, right lo.
545 __ b(&is_false, lo_false_cond);
546 // Else is true.
547 __ b(&is_true);
548
549 __ Bind(normal_ir_body);
550}
551
552void AsmIntrinsifier::Integer_greaterThanFromInt(Assembler* assembler,
553 Label* normal_ir_body) {
554 CompareIntegers(assembler, normal_ir_body, LT);
555}
556
557void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
558 Label* normal_ir_body) {
559 Integer_greaterThanFromInt(assembler, normal_ir_body);
560}
561
562void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
563 Label* normal_ir_body) {
564 CompareIntegers(assembler, normal_ir_body, GT);
565}
566
567void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
568 Label* normal_ir_body) {
569 CompareIntegers(assembler, normal_ir_body, LE);
570}
571
572void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
573 Label* normal_ir_body) {
574 CompareIntegers(assembler, normal_ir_body, GE);
575}
576
577// This is called for Smi and Mint receivers. The right argument
578// can be Smi, Mint or double.
579void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
580 Label* normal_ir_body) {
581 Label true_label, check_for_mint;
582 // For integer receiver '===' check first.
583 __ ldr(R0, Address(SP, 0 * target::kWordSize));
584 __ ldr(R1, Address(SP, 1 * target::kWordSize));
585 __ cmp(R0, Operand(R1));
586 __ b(&true_label, EQ);
587
588 __ orr(R2, R0, Operand(R1));
589 __ tst(R2, Operand(kSmiTagMask));
590 __ b(&check_for_mint, NE); // If R0 or R1 is not a smi do Mint checks.
591
592 // Both arguments are smi, '===' is good enough.
593 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
594 __ Ret();
595 __ Bind(&true_label);
596 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
597 __ Ret();
598
599 // At least one of the arguments was not Smi.
600 Label receiver_not_smi;
601 __ Bind(&check_for_mint);
602
603 __ tst(R1, Operand(kSmiTagMask)); // Check receiver.
604 __ b(&receiver_not_smi, NE);
605
606 // Left (receiver) is Smi, return false if right is not Double.
607 // Note that an instance of Mint never contains a value that can be
608 // represented by Smi.
609
610 __ CompareClassId(R0, kDoubleCid, R2);
611 __ b(normal_ir_body, EQ);
612 __ LoadObject(R0,
613 CastHandle<Object>(FalseObject())); // Smi == Mint -> false.
614 __ Ret();
615
616 __ Bind(&receiver_not_smi);
617 // R1:: receiver.
618
619 __ CompareClassId(R1, kMintCid, R2);
620 __ b(normal_ir_body, NE);
621 // Receiver is Mint, return false if right is Smi.
622 __ tst(R0, Operand(kSmiTagMask));
623 __ LoadObject(R0, CastHandle<Object>(FalseObject()), EQ);
624 __ bx(LR, EQ);
625 // TODO(srdjan): Implement Mint == Mint comparison.
626
627 __ Bind(normal_ir_body);
628}
629
630void AsmIntrinsifier::Integer_equal(Assembler* assembler,
631 Label* normal_ir_body) {
632 Integer_equalToInteger(assembler, normal_ir_body);
633}
634
635void AsmIntrinsifier::Integer_sar(Assembler* assembler, Label* normal_ir_body) {
636 TestBothArgumentsSmis(assembler, normal_ir_body);
637 // Shift amount in R0. Value to shift in R1.
638
639 // Fall through if shift amount is negative.
640 __ SmiUntag(R0);
641 __ CompareImmediate(R0, 0);
642 __ b(normal_ir_body, LT);
643
644 // If shift amount is bigger than 31, set to 31.
645 __ CompareImmediate(R0, 0x1F);
646 __ LoadImmediate(R0, 0x1F, GT);
647 __ SmiUntag(R1);
648 __ mov(R0, Operand(R1, ASR, R0));
649 __ SmiTag(R0);
650 __ Ret();
651 __ Bind(normal_ir_body);
652}
653
654void AsmIntrinsifier::Smi_bitNegate(Assembler* assembler,
655 Label* normal_ir_body) {
656 __ ldr(R0, Address(SP, 0 * target::kWordSize));
657 __ mvn(R0, Operand(R0));
658 __ bic(R0, R0, Operand(kSmiTagMask)); // Remove inverted smi-tag.
659 __ Ret();
660}
661
662void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
663 Label* normal_ir_body) {
664 __ ldr(R0, Address(SP, 0 * target::kWordSize));
665 __ SmiUntag(R0);
666 // XOR with sign bit to complement bits if value is negative.
667 __ eor(R0, R0, Operand(R0, ASR, 31));
668 __ clz(R0, R0);
669 __ rsb(R0, R0, Operand(32));
670 __ SmiTag(R0);
671 __ Ret();
672}
673
674void AsmIntrinsifier::Smi_bitAndFromSmi(Assembler* assembler,
675 Label* normal_ir_body) {
676 Integer_bitAndFromInteger(assembler, normal_ir_body);
677}
678
679void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
680 // static void _lsh(Uint32List x_digits, int x_used, int n,
681 // Uint32List r_digits)
682
683 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi.
684 __ ldrd(R0, R1, SP, 2 * target::kWordSize);
685 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0.
686 __ ldrd(R2, R3, SP, 0 * target::kWordSize);
687 __ SmiUntag(R3);
688 // R4 = n ~/ _DIGIT_BITS
689 __ Asr(R4, R3, Operand(5));
690 // R8 = &x_digits[0]
691 __ add(R8, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
692 // R6 = &r_digits[1]
693 __ add(R6, R2,
694 Operand(target::TypedData::data_offset() - kHeapObjectTag +
695 kBytesPerBigIntDigit));
696 // R2 = &x_digits[x_used]
697 __ add(R2, R8, Operand(R0, LSL, 1));
698 // R6 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1]
699 __ add(R4, R4, Operand(R0, ASR, 1));
700 __ add(R6, R6, Operand(R4, LSL, 2));
701 // R1 = n % _DIGIT_BITS
702 __ and_(R1, R3, Operand(31));
703 // R0 = 32 - R1
704 __ rsb(R0, R1, Operand(32));
705 __ mov(R9, Operand(0));
706 Label loop;
707 __ Bind(&loop);
708 __ ldr(R4, Address(R2, -kBytesPerBigIntDigit, Address::PreIndex));
709 __ orr(R9, R9, Operand(R4, LSR, R0));
710 __ str(R9, Address(R6, -kBytesPerBigIntDigit, Address::PreIndex));
711 __ mov(R9, Operand(R4, LSL, R1));
712 __ teq(R2, Operand(R8));
713 __ b(&loop, NE);
714 __ str(R9, Address(R6, -kBytesPerBigIntDigit, Address::PreIndex));
715 __ LoadObject(R0, NullObject());
716 __ Ret();
717}
718
719void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
720 // static void _lsh(Uint32List x_digits, int x_used, int n,
721 // Uint32List r_digits)
722
723 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi.
724 __ ldrd(R0, R1, SP, 2 * target::kWordSize);
725 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0.
726 __ ldrd(R2, R3, SP, 0 * target::kWordSize);
727 __ SmiUntag(R3);
728 // R4 = n ~/ _DIGIT_BITS
729 __ Asr(R4, R3, Operand(5));
730 // R6 = &r_digits[0]
731 __ add(R6, R2, Operand(target::TypedData::data_offset() - kHeapObjectTag));
732 // R2 = &x_digits[n ~/ _DIGIT_BITS]
733 __ add(R2, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
734 __ add(R2, R2, Operand(R4, LSL, 2));
735 // R8 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1]
736 __ add(R4, R4, Operand(1));
737 __ rsb(R4, R4, Operand(R0, ASR, 1));
738 __ add(R8, R6, Operand(R4, LSL, 2));
739 // R1 = n % _DIGIT_BITS
740 __ and_(R1, R3, Operand(31));
741 // R0 = 32 - R1
742 __ rsb(R0, R1, Operand(32));
743 // R9 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS)
744 __ ldr(R9, Address(R2, kBytesPerBigIntDigit, Address::PostIndex));
745 __ mov(R9, Operand(R9, LSR, R1));
746 Label loop_entry;
747 __ b(&loop_entry);
748 Label loop;
749 __ Bind(&loop);
750 __ ldr(R4, Address(R2, kBytesPerBigIntDigit, Address::PostIndex));
751 __ orr(R9, R9, Operand(R4, LSL, R0));
752 __ str(R9, Address(R6, kBytesPerBigIntDigit, Address::PostIndex));
753 __ mov(R9, Operand(R4, LSR, R1));
754 __ Bind(&loop_entry);
755 __ teq(R6, Operand(R8));
756 __ b(&loop, NE);
757 __ str(R9, Address(R6, 0));
758 __ LoadObject(R0, NullObject());
759 __ Ret();
760}
761
762void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
763 Label* normal_ir_body) {
764 // static void _absAdd(Uint32List digits, int used,
765 // Uint32List a_digits, int a_used,
766 // Uint32List r_digits)
767
768 // R0 = used, R1 = digits
769 __ ldrd(R0, R1, SP, 3 * target::kWordSize);
770 // R1 = &digits[0]
771 __ add(R1, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
772
773 // R2 = a_used, R3 = a_digits
774 __ ldrd(R2, R3, SP, 1 * target::kWordSize);
775 // R3 = &a_digits[0]
776 __ add(R3, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
777
778 // R8 = r_digits
779 __ ldr(R8, Address(SP, 0 * target::kWordSize));
780 // R8 = &r_digits[0]
781 __ add(R8, R8, Operand(target::TypedData::data_offset() - kHeapObjectTag));
782
783 // R2 = &digits[a_used >> 1], a_used is Smi.
784 __ add(R2, R1, Operand(R2, LSL, 1));
785
786 // R6 = &digits[used >> 1], used is Smi.
787 __ add(R6, R1, Operand(R0, LSL, 1));
788
789 __ adds(R4, R4, Operand(0)); // carry flag = 0
790 Label add_loop;
791 __ Bind(&add_loop);
792 // Loop a_used times, a_used > 0.
793 __ ldr(R4, Address(R1, kBytesPerBigIntDigit, Address::PostIndex));
794 __ ldr(R9, Address(R3, kBytesPerBigIntDigit, Address::PostIndex));
795 __ adcs(R4, R4, Operand(R9));
796 __ teq(R1, Operand(R2)); // Does not affect carry flag.
797 __ str(R4, Address(R8, kBytesPerBigIntDigit, Address::PostIndex));
798 __ b(&add_loop, NE);
799
800 Label last_carry;
801 __ teq(R1, Operand(R6)); // Does not affect carry flag.
802 __ b(&last_carry, EQ); // If used - a_used == 0.
803
804 Label carry_loop;
805 __ Bind(&carry_loop);
806 // Loop used - a_used times, used - a_used > 0.
807 __ ldr(R4, Address(R1, kBytesPerBigIntDigit, Address::PostIndex));
808 __ adcs(R4, R4, Operand(0));
809 __ teq(R1, Operand(R6)); // Does not affect carry flag.
810 __ str(R4, Address(R8, kBytesPerBigIntDigit, Address::PostIndex));
811 __ b(&carry_loop, NE);
812
813 __ Bind(&last_carry);
814 __ mov(R4, Operand(0));
815 __ adc(R4, R4, Operand(0));
816 __ str(R4, Address(R8, 0));
817
818 __ LoadObject(R0, NullObject());
819 __ Ret();
820}
821
822void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
823 Label* normal_ir_body) {
824 // static void _absSub(Uint32List digits, int used,
825 // Uint32List a_digits, int a_used,
826 // Uint32List r_digits)
827
828 // R0 = used, R1 = digits
829 __ ldrd(R0, R1, SP, 3 * target::kWordSize);
830 // R1 = &digits[0]
831 __ add(R1, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
832
833 // R2 = a_used, R3 = a_digits
834 __ ldrd(R2, R3, SP, 1 * target::kWordSize);
835 // R3 = &a_digits[0]
836 __ add(R3, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
837
838 // R8 = r_digits
839 __ ldr(R8, Address(SP, 0 * target::kWordSize));
840 // R8 = &r_digits[0]
841 __ add(R8, R8, Operand(target::TypedData::data_offset() - kHeapObjectTag));
842
843 // R2 = &digits[a_used >> 1], a_used is Smi.
844 __ add(R2, R1, Operand(R2, LSL, 1));
845
846 // R6 = &digits[used >> 1], used is Smi.
847 __ add(R6, R1, Operand(R0, LSL, 1));
848
849 __ subs(R4, R4, Operand(0)); // carry flag = 1
850 Label sub_loop;
851 __ Bind(&sub_loop);
852 // Loop a_used times, a_used > 0.
853 __ ldr(R4, Address(R1, kBytesPerBigIntDigit, Address::PostIndex));
854 __ ldr(R9, Address(R3, kBytesPerBigIntDigit, Address::PostIndex));
855 __ sbcs(R4, R4, Operand(R9));
856 __ teq(R1, Operand(R2)); // Does not affect carry flag.
857 __ str(R4, Address(R8, kBytesPerBigIntDigit, Address::PostIndex));
858 __ b(&sub_loop, NE);
859
860 Label done;
861 __ teq(R1, Operand(R6)); // Does not affect carry flag.
862 __ b(&done, EQ); // If used - a_used == 0.
863
864 Label carry_loop;
865 __ Bind(&carry_loop);
866 // Loop used - a_used times, used - a_used > 0.
867 __ ldr(R4, Address(R1, kBytesPerBigIntDigit, Address::PostIndex));
868 __ sbcs(R4, R4, Operand(0));
869 __ teq(R1, Operand(R6)); // Does not affect carry flag.
870 __ str(R4, Address(R8, kBytesPerBigIntDigit, Address::PostIndex));
871 __ b(&carry_loop, NE);
872
873 __ Bind(&done);
874 __ LoadObject(R0, NullObject());
875 __ Ret();
876}
877
878void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
879 Label* normal_ir_body) {
880 // Pseudo code:
881 // static int _mulAdd(Uint32List x_digits, int xi,
882 // Uint32List m_digits, int i,
883 // Uint32List a_digits, int j, int n) {
884 // uint32_t x = x_digits[xi >> 1]; // xi is Smi.
885 // if (x == 0 || n == 0) {
886 // return 1;
887 // }
888 // uint32_t* mip = &m_digits[i >> 1]; // i is Smi.
889 // uint32_t* ajp = &a_digits[j >> 1]; // j is Smi.
890 // uint32_t c = 0;
891 // SmiUntag(n);
892 // do {
893 // uint32_t mi = *mip++;
894 // uint32_t aj = *ajp;
895 // uint64_t t = x*mi + aj + c; // 32-bit * 32-bit -> 64-bit.
896 // *ajp++ = low32(t);
897 // c = high32(t);
898 // } while (--n > 0);
899 // while (c != 0) {
900 // uint64_t t = *ajp + c;
901 // *ajp++ = low32(t);
902 // c = high32(t); // c == 0 or 1.
903 // }
904 // return 1;
905 // }
906
907 Label done;
908 // R3 = x, no_op if x == 0
909 __ ldrd(R0, R1, SP, 5 * target::kWordSize); // R0 = xi as Smi, R1 = x_digits.
910 __ add(R1, R1, Operand(R0, LSL, 1));
911 __ ldr(R3, FieldAddress(R1, target::TypedData::data_offset()));
912 __ tst(R3, Operand(R3));
913 __ b(&done, EQ);
914
915 // R8 = SmiUntag(n), no_op if n == 0
916 __ ldr(R8, Address(SP, 0 * target::kWordSize));
917 __ Asrs(R8, R8, Operand(kSmiTagSize));
918 __ b(&done, EQ);
919
920 // R4 = mip = &m_digits[i >> 1]
921 __ ldrd(R0, R1, SP, 3 * target::kWordSize); // R0 = i as Smi, R1 = m_digits.
922 __ add(R1, R1, Operand(R0, LSL, 1));
923 __ add(R4, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
924
925 // R9 = ajp = &a_digits[j >> 1]
926 __ ldrd(R0, R1, SP, 1 * target::kWordSize); // R0 = j as Smi, R1 = a_digits.
927 __ add(R1, R1, Operand(R0, LSL, 1));
928 __ add(R9, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
929
930 // R1 = c = 0
931 __ mov(R1, Operand(0));
932
933 Label muladd_loop;
934 __ Bind(&muladd_loop);
935 // x: R3
936 // mip: R4
937 // ajp: R9
938 // c: R1
939 // n: R8
940
941 // uint32_t mi = *mip++
942 __ ldr(R2, Address(R4, kBytesPerBigIntDigit, Address::PostIndex));
943
944 // uint32_t aj = *ajp
945 __ ldr(R0, Address(R9, 0));
946
947 // uint64_t t = x*mi + aj + c
948 __ umaal(R0, R1, R2, R3); // R1:R0 = R2*R3 + R1 + R0.
949
950 // *ajp++ = low32(t) = R0
951 __ str(R0, Address(R9, kBytesPerBigIntDigit, Address::PostIndex));
952
953 // c = high32(t) = R1
954
955 // while (--n > 0)
956 __ subs(R8, R8, Operand(1)); // --n
957 __ b(&muladd_loop, NE);
958
959 __ tst(R1, Operand(R1));
960 __ b(&done, EQ);
961
962 // *ajp++ += c
963 __ ldr(R0, Address(R9, 0));
964 __ adds(R0, R0, Operand(R1));
965 __ str(R0, Address(R9, kBytesPerBigIntDigit, Address::PostIndex));
966 __ b(&done, CC);
967
968 Label propagate_carry_loop;
969 __ Bind(&propagate_carry_loop);
970 __ ldr(R0, Address(R9, 0));
971 __ adds(R0, R0, Operand(1));
972 __ str(R0, Address(R9, kBytesPerBigIntDigit, Address::PostIndex));
973 __ b(&propagate_carry_loop, CS);
974
975 __ Bind(&done);
976 __ mov(R0, Operand(target::ToRawSmi(1))); // One digit processed.
977 __ Ret();
978}
979
980void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
981 Label* normal_ir_body) {
982 // Pseudo code:
983 // static int _sqrAdd(Uint32List x_digits, int i,
984 // Uint32List a_digits, int used) {
985 // uint32_t* xip = &x_digits[i >> 1]; // i is Smi.
986 // uint32_t x = *xip++;
987 // if (x == 0) return 1;
988 // uint32_t* ajp = &a_digits[i]; // j == 2*i, i is Smi.
989 // uint32_t aj = *ajp;
990 // uint64_t t = x*x + aj;
991 // *ajp++ = low32(t);
992 // uint64_t c = high32(t);
993 // int n = ((used - i) >> 1) - 1; // used and i are Smi.
994 // while (--n >= 0) {
995 // uint32_t xi = *xip++;
996 // uint32_t aj = *ajp;
997 // uint96_t t = 2*x*xi + aj + c; // 2-bit * 32-bit * 32-bit -> 65-bit.
998 // *ajp++ = low32(t);
999 // c = high64(t); // 33-bit.
1000 // }
1001 // uint32_t aj = *ajp;
1002 // uint64_t t = aj + c; // 32-bit + 33-bit -> 34-bit.
1003 // *ajp++ = low32(t);
1004 // *ajp = high32(t);
1005 // return 1;
1006 // }
1007
1008 // The code has no bailout path, so we can use R6 (CODE_REG) freely.
1009
1010 // R4 = xip = &x_digits[i >> 1]
1011 __ ldrd(R2, R3, SP, 2 * target::kWordSize); // R2 = i as Smi, R3 = x_digits
1012 __ add(R3, R3, Operand(R2, LSL, 1));
1013 __ add(R4, R3, Operand(target::TypedData::data_offset() - kHeapObjectTag));
1014
1015 // R3 = x = *xip++, return if x == 0
1016 Label x_zero;
1017 __ ldr(R3, Address(R4, kBytesPerBigIntDigit, Address::PostIndex));
1018 __ tst(R3, Operand(R3));
1019 __ b(&x_zero, EQ);
1020
1021 // R6 = ajp = &a_digits[i]
1022 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // a_digits
1023 __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi.
1024 __ add(R6, R1, Operand(target::TypedData::data_offset() - kHeapObjectTag));
1025
1026 // R8:R0 = t = x*x + *ajp
1027 __ ldr(R0, Address(R6, 0));
1028 __ mov(R8, Operand(0));
1029 __ umaal(R0, R8, R3, R3); // R8:R0 = R3*R3 + R8 + R0.
1030
1031 // *ajp++ = low32(t) = R0
1032 __ str(R0, Address(R6, kBytesPerBigIntDigit, Address::PostIndex));
1033
1034 // R8 = low32(c) = high32(t)
1035 // R9 = high32(c) = 0
1036 __ mov(R9, Operand(0));
1037
1038 // int n = used - i - 1; while (--n >= 0) ...
1039 __ ldr(R0, Address(SP, 0 * target::kWordSize)); // used is Smi
1040 __ sub(TMP, R0, Operand(R2));
1041 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0)
1042 __ rsbs(TMP, R0, Operand(TMP, ASR, kSmiTagSize));
1043
1044 Label loop, done;
1045 __ b(&done, MI);
1046
1047 __ Bind(&loop);
1048 // x: R3
1049 // xip: R4
1050 // ajp: R6
1051 // c: R9:R8
1052 // t: R2:R1:R0 (not live at loop entry)
1053 // n: TMP
1054
1055 // uint32_t xi = *xip++
1056 __ ldr(R2, Address(R4, kBytesPerBigIntDigit, Address::PostIndex));
1057
1058 // uint96_t t = R9:R8:R0 = 2*x*xi + aj + c
1059 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3.
1060 __ adds(R0, R0, Operand(R0));
1061 __ adcs(R1, R1, Operand(R1));
1062 __ mov(R2, Operand(0));
1063 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi.
1064 __ adds(R0, R0, Operand(R8));
1065 __ adcs(R1, R1, Operand(R9));
1066 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c.
1067 __ ldr(R8, Address(R6, 0)); // R8 = aj = *ajp.
1068 __ adds(R0, R0, Operand(R8));
1069 __ adcs(R8, R1, Operand(0));
1070 __ adc(R9, R2, Operand(0)); // R9:R8:R0 = 2*x*xi + c + aj.
1071
1072 // *ajp++ = low32(t) = R0
1073 __ str(R0, Address(R6, kBytesPerBigIntDigit, Address::PostIndex));
1074
1075 // while (--n >= 0)
1076 __ subs(TMP, TMP, Operand(1)); // --n
1077 __ b(&loop, PL);
1078
1079 __ Bind(&done);
1080 // uint32_t aj = *ajp
1081 __ ldr(R0, Address(R6, 0));
1082
1083 // uint64_t t = aj + c
1084 __ adds(R8, R8, Operand(R0));
1085 __ adc(R9, R9, Operand(0));
1086
1087 // *ajp = low32(t) = R8
1088 // *(ajp + 1) = high32(t) = R9
1089 __ strd(R8, R9, R6, 0);
1090
1091 __ Bind(&x_zero);
1092 __ mov(R0, Operand(target::ToRawSmi(1))); // One digit processed.
1093 __ Ret();
1094}
1095
1096void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
1097 Label* normal_ir_body) {
1098 // No unsigned 64-bit / 32-bit divide instruction.
1099}
1100
1101void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
1102 Label* normal_ir_body) {
1103 // Pseudo code:
1104 // static int _mulMod(Uint32List args, Uint32List digits, int i) {
1105 // uint32_t rho = args[_RHO]; // _RHO == 2.
1106 // uint32_t d = digits[i >> 1]; // i is Smi.
1107 // uint64_t t = rho*d;
1108 // args[_MU] = t mod DIGIT_BASE; // _MU == 4.
1109 // return 1;
1110 // }
1111
1112 // R4 = args
1113 __ ldr(R4, Address(SP, 2 * target::kWordSize)); // args
1114
1115 // R3 = rho = args[2]
1116 __ ldr(R3, FieldAddress(R4, target::TypedData::data_offset() +
1117 2 * kBytesPerBigIntDigit));
1118
1119 // R2 = digits[i >> 1]
1120 __ ldrd(R0, R1, SP, 0 * target::kWordSize); // R0 = i as Smi, R1 = digits
1121 __ add(R1, R1, Operand(R0, LSL, 1));
1122 __ ldr(R2, FieldAddress(R1, target::TypedData::data_offset()));
1123
1124 // R1:R0 = t = rho*d
1125 __ umull(R0, R1, R2, R3);
1126
1127 // args[4] = t mod DIGIT_BASE = low32(t)
1128 __ str(R0, FieldAddress(R4, target::TypedData::data_offset() +
1129 4 * kBytesPerBigIntDigit));
1130
1131 __ mov(R0, Operand(target::ToRawSmi(1))); // One digit processed.
1132 __ Ret();
1133}
1134
1135// Check if the last argument is a double, jump to label 'is_smi' if smi
1136// (easy to convert to double), otherwise jump to label 'not_double_smi',
1137// Returns the last argument in R0.
1138static void TestLastArgumentIsDouble(Assembler* assembler,
1139 Label* is_smi,
1140 Label* not_double_smi) {
1141 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1142 __ tst(R0, Operand(kSmiTagMask));
1143 __ b(is_smi, EQ);
1144 __ CompareClassId(R0, kDoubleCid, R1);
1145 __ b(not_double_smi, NE);
1146 // Fall through with Double in R0.
1147}
1148
1149// Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown
1150// type. Return true or false object in the register R0. Any NaN argument
1151// returns false. Any non-double arg1 causes control flow to fall through to the
1152// slow case (compiled method body).
1153static void CompareDoubles(Assembler* assembler,
1154 Label* normal_ir_body,
1155 Condition true_condition) {
1156 if (TargetCPUFeatures::vfp_supported()) {
1157 Label is_smi, double_op;
1158
1159 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
1160 // Both arguments are double, right operand is in R0.
1161
1162 __ LoadDFromOffset(D1, R0, target::Double::value_offset() - kHeapObjectTag);
1163 __ Bind(&double_op);
1164 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
1165 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
1166
1167 __ vcmpd(D0, D1);
1168 __ vmstat();
1169 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1170 // Return false if D0 or D1 was NaN before checking true condition.
1171 __ bx(LR, VS);
1172 __ LoadObject(R0, CastHandle<Object>(TrueObject()), true_condition);
1173 __ Ret();
1174
1175 __ Bind(&is_smi); // Convert R0 to a double.
1176 __ SmiUntag(R0);
1177 __ vmovsr(S0, R0);
1178 __ vcvtdi(D1, S0);
1179 __ b(&double_op); // Then do the comparison.
1180 __ Bind(normal_ir_body);
1181 }
1182}
1183
1184void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
1185 Label* normal_ir_body) {
1186 CompareDoubles(assembler, normal_ir_body, HI);
1187}
1188
1189void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
1190 Label* normal_ir_body) {
1191 CompareDoubles(assembler, normal_ir_body, CS);
1192}
1193
1194void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
1195 Label* normal_ir_body) {
1196 CompareDoubles(assembler, normal_ir_body, CC);
1197}
1198
1199void AsmIntrinsifier::Double_equal(Assembler* assembler,
1200 Label* normal_ir_body) {
1201 CompareDoubles(assembler, normal_ir_body, EQ);
1202}
1203
1204void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
1205 Label* normal_ir_body) {
1206 CompareDoubles(assembler, normal_ir_body, LS);
1207}
1208
1209// Expects left argument to be double (receiver). Right argument is unknown.
1210// Both arguments are on stack.
1211static void DoubleArithmeticOperations(Assembler* assembler,
1212 Label* normal_ir_body,
1213 Token::Kind kind) {
1214 if (TargetCPUFeatures::vfp_supported()) {
1215 Label is_smi, double_op;
1216
1217 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
1218 // Both arguments are double, right operand is in R0.
1219 __ LoadDFromOffset(D1, R0, target::Double::value_offset() - kHeapObjectTag);
1220 __ Bind(&double_op);
1221 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
1222 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
1223 switch (kind) {
1224 case Token::kADD:
1225 __ vaddd(D0, D0, D1);
1226 break;
1227 case Token::kSUB:
1228 __ vsubd(D0, D0, D1);
1229 break;
1230 case Token::kMUL:
1231 __ vmuld(D0, D0, D1);
1232 break;
1233 case Token::kDIV:
1234 __ vdivd(D0, D0, D1);
1235 break;
1236 default:
1237 UNREACHABLE();
1238 }
1239 const Class& double_class = DoubleClass();
1240 __ TryAllocate(double_class, normal_ir_body, R0,
1241 R1); // Result register.
1242 __ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
1243 __ Ret();
1244 __ Bind(&is_smi); // Convert R0 to a double.
1245 __ SmiUntag(R0);
1246 __ vmovsr(S0, R0);
1247 __ vcvtdi(D1, S0);
1248 __ b(&double_op);
1249 __ Bind(normal_ir_body);
1250 }
1251}
1252
1253void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
1254 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
1255}
1256
1257void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
1258 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
1259}
1260
1261void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
1262 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
1263}
1264
1265void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
1266 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
1267}
1268
1269// Left is double, right is integer (Mint or Smi)
1270void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
1271 Label* normal_ir_body) {
1272 if (TargetCPUFeatures::vfp_supported()) {
1273 Label fall_through;
1274 // Only smis allowed.
1275 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1276 __ tst(R0, Operand(kSmiTagMask));
1277 __ b(normal_ir_body, NE);
1278 // Is Smi.
1279 __ SmiUntag(R0);
1280 __ vmovsr(S0, R0);
1281 __ vcvtdi(D1, S0);
1282 __ ldr(R0, Address(SP, 1 * target::kWordSize));
1283 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
1284 __ vmuld(D0, D0, D1);
1285 const Class& double_class = DoubleClass();
1286 __ TryAllocate(double_class, normal_ir_body, R0,
1287 R1); // Result register.
1288 __ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
1289 __ Ret();
1290 __ Bind(normal_ir_body);
1291 }
1292}
1293
1294void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
1295 Label* normal_ir_body) {
1296 if (TargetCPUFeatures::vfp_supported()) {
1297 Label fall_through;
1298
1299 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1300 __ tst(R0, Operand(kSmiTagMask));
1301 __ b(normal_ir_body, NE);
1302 // Is Smi.
1303 __ SmiUntag(R0);
1304 __ vmovsr(S0, R0);
1305 __ vcvtdi(D0, S0);
1306 const Class& double_class = DoubleClass();
1307 __ TryAllocate(double_class, normal_ir_body, R0,
1308 R1); // Result register.
1309 __ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
1310 __ Ret();
1311 __ Bind(normal_ir_body);
1312 }
1313}
1314
1315void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
1316 Label* normal_ir_body) {
1317 if (TargetCPUFeatures::vfp_supported()) {
1318 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1319 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
1320 __ vcmpd(D0, D0);
1321 __ vmstat();
1322 __ LoadObject(R0, CastHandle<Object>(FalseObject()), VC);
1323 __ LoadObject(R0, CastHandle<Object>(TrueObject()), VS);
1324 __ Ret();
1325 }
1326}
1327
1328void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
1329 Label* normal_ir_body) {
1330 if (TargetCPUFeatures::vfp_supported()) {
1331 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1332 // R1 <- value[0:31], R2 <- value[32:63]
1333 __ LoadFieldFromOffset(kWord, R1, R0, target::Double::value_offset());
1334 __ LoadFieldFromOffset(kWord, R2, R0,
1335 target::Double::value_offset() + target::kWordSize);
1336
1337 // If the low word isn't 0, then it isn't infinity.
1338 __ cmp(R1, Operand(0));
1339 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
1340 __ bx(LR, NE); // Return if NE.
1341
1342 // Mask off the sign bit.
1343 __ AndImmediate(R2, R2, 0x7FFFFFFF);
1344 // Compare with +infinity.
1345 __ CompareImmediate(R2, 0x7FF00000);
1346 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
1347 __ bx(LR, NE);
1348
1349 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1350 __ Ret();
1351 }
1352}
1353
1354void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
1355 Label* normal_ir_body) {
1356 if (TargetCPUFeatures::vfp_supported()) {
1357 Label is_false, is_true, is_zero;
1358 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1359 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
1360 __ vcmpdz(D0);
1361 __ vmstat();
1362 __ b(&is_false, VS); // NaN -> false.
1363 __ b(&is_zero, EQ); // Check for negative zero.
1364 __ b(&is_false, CS); // >= 0 -> false.
1365
1366 __ Bind(&is_true);
1367 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1368 __ Ret();
1369
1370 __ Bind(&is_false);
1371 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1372 __ Ret();
1373
1374 __ Bind(&is_zero);
1375 // Check for negative zero by looking at the sign bit.
1376 __ vmovrrd(R0, R1, D0); // R1:R0 <- D0, so sign bit is in bit 31 of R1.
1377 __ mov(R1, Operand(R1, LSR, 31));
1378 __ tst(R1, Operand(1));
1379 __ b(&is_true, NE); // Sign bit set.
1380 __ b(&is_false);
1381 }
1382}
1383
1384void AsmIntrinsifier::DoubleToInteger(Assembler* assembler,
1385 Label* normal_ir_body) {
1386 if (TargetCPUFeatures::vfp_supported()) {
1387 Label fall_through;
1388
1389 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1390 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
1391
1392 // Explicit NaN check, since ARM gives an FPU exception if you try to
1393 // convert NaN to an int.
1394 __ vcmpd(D0, D0);
1395 __ vmstat();
1396 __ b(normal_ir_body, VS);
1397
1398 __ vcvtid(S0, D0);
1399 __ vmovrs(R0, S0);
1400 // Overflow is signaled with minint.
1401 // Check for overflow and that it fits into Smi.
1402 __ CompareImmediate(R0, 0xC0000000);
1403 __ SmiTag(R0, PL);
1404 __ bx(LR, PL);
1405 __ Bind(normal_ir_body);
1406 }
1407}
1408
1409void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
1410 Label* normal_ir_body) {
1411 // TODO(dartbug.com/31174): Convert this to a graph intrinsic.
1412
1413 if (!TargetCPUFeatures::vfp_supported()) return;
1414
1415 // Load double value and check that it isn't NaN, since ARM gives an
1416 // FPU exception if you try to convert NaN to an int.
1417 Label double_hash;
1418 __ ldr(R1, Address(SP, 0 * target::kWordSize));
1419 __ LoadDFromOffset(D0, R1, target::Double::value_offset() - kHeapObjectTag);
1420 __ vcmpd(D0, D0);
1421 __ vmstat();
1422 __ b(&double_hash, VS);
1423
1424 // Convert double value to signed 32-bit int in R0.
1425 __ vcvtid(S2, D0);
1426 __ vmovrs(R0, S2);
1427
1428 // Tag the int as a Smi, making sure that it fits; this checks for
1429 // overflow in the conversion from double to int. Conversion
1430 // overflow is signalled by vcvt through clamping R0 to either
1431 // INT32_MAX or INT32_MIN (saturation).
1432 ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
1433 __ adds(R0, R0, Operand(R0));
1434 __ b(normal_ir_body, VS);
1435
1436 // Compare the two double values. If they are equal, we return the
1437 // Smi tagged result immediately as the hash code.
1438 __ vcvtdi(D1, S2);
1439 __ vcmpd(D0, D1);
1440 __ vmstat();
1441 __ bx(LR, EQ);
1442
1443 // Convert the double bits to a hash code that fits in a Smi.
1444 __ Bind(&double_hash);
1445 __ ldr(R0, FieldAddress(R1, target::Double::value_offset()));
1446 __ ldr(R1, FieldAddress(R1, target::Double::value_offset() + 4));
1447 __ eor(R0, R0, Operand(R1));
1448 __ AndImmediate(R0, R0, target::kSmiMax);
1449 __ SmiTag(R0);
1450 __ Ret();
1451
1452 // Fall into the native C++ implementation.
1453 __ Bind(normal_ir_body);
1454}
1455
1456void AsmIntrinsifier::MathSqrt(Assembler* assembler, Label* normal_ir_body) {
1457 if (TargetCPUFeatures::vfp_supported()) {
1458 Label is_smi, double_op;
1459 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
1460 // Argument is double and is in R0.
1461 __ LoadDFromOffset(D1, R0, target::Double::value_offset() - kHeapObjectTag);
1462 __ Bind(&double_op);
1463 __ vsqrtd(D0, D1);
1464 const Class& double_class = DoubleClass();
1465 __ TryAllocate(double_class, normal_ir_body, R0,
1466 R1); // Result register.
1467 __ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
1468 __ Ret();
1469 __ Bind(&is_smi);
1470 __ SmiUntag(R0);
1471 __ vmovsr(S0, R0);
1472 __ vcvtdi(D1, S0);
1473 __ b(&double_op);
1474 __ Bind(normal_ir_body);
1475 }
1476}
1477
1478// var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64;
1479// _state[kSTATE_LO] = state & _MASK_32;
1480// _state[kSTATE_HI] = state >> 32;
1481void AsmIntrinsifier::Random_nextState(Assembler* assembler,
1482 Label* normal_ir_body) {
1483 const Field& state_field = LookupMathRandomStateFieldOffset();
1484 const int64_t a_int_value = AsmIntrinsifier::kRandomAValue;
1485
1486 // 'a_int_value' is a mask.
1487 ASSERT(Utils::IsUint(32, a_int_value));
1488 int32_t a_int32_value = static_cast<int32_t>(a_int_value);
1489
1490 // Receiver.
1491 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1492 // Field '_state'.
1493 __ ldr(R1, FieldAddress(R0, target::Field::OffsetOf(state_field)));
1494 // Addresses of _state[0] and _state[1].
1495
1496 const int64_t disp_0 =
1497 target::Instance::DataOffsetFor(kTypedDataUint32ArrayCid);
1498 const int64_t disp_1 =
1499 disp_0 + target::Instance::ElementSizeFor(kTypedDataUint32ArrayCid);
1500
1501 __ LoadImmediate(R0, a_int32_value);
1502 __ LoadFromOffset(kWord, R2, R1, disp_0 - kHeapObjectTag);
1503 __ LoadFromOffset(kWord, R3, R1, disp_1 - kHeapObjectTag);
1504 __ mov(R8, Operand(0)); // Zero extend unsigned _state[kSTATE_HI].
1505 // Unsigned 32-bit multiply and 64-bit accumulate into R8:R3.
1506 __ umlal(R3, R8, R0, R2); // R8:R3 <- R8:R3 + R0 * R2.
1507 __ StoreToOffset(kWord, R3, R1, disp_0 - kHeapObjectTag);
1508 __ StoreToOffset(kWord, R8, R1, disp_1 - kHeapObjectTag);
1509 ASSERT(target::ToRawSmi(0) == 0);
1510 __ eor(R0, R0, Operand(R0));
1511 __ Ret();
1512}
1513
1514void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
1515 Label* normal_ir_body) {
1516 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1517 __ ldr(R1, Address(SP, 1 * target::kWordSize));
1518 __ cmp(R0, Operand(R1));
1519 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
1520 __ LoadObject(R0, CastHandle<Object>(TrueObject()), EQ);
1521 __ Ret();
1522}
1523
1524static void RangeCheck(Assembler* assembler,
1525 Register val,
1526 Register tmp,
1527 intptr_t low,
1528 intptr_t high,
1529 Condition cc,
1530 Label* target) {
1531 __ AddImmediate(tmp, val, -low);
1532 __ CompareImmediate(tmp, high - low);
1533 __ b(target, cc);
1534}
1535
1536const Condition kIfNotInRange = HI;
1537const Condition kIfInRange = LS;
1538
1539static void JumpIfInteger(Assembler* assembler,
1540 Register cid,
1541 Register tmp,
1542 Label* target) {
1543 RangeCheck(assembler, cid, tmp, kSmiCid, kMintCid, kIfInRange, target);
1544}
1545
1546static void JumpIfNotInteger(Assembler* assembler,
1547 Register cid,
1548 Register tmp,
1549 Label* target) {
1550 RangeCheck(assembler, cid, tmp, kSmiCid, kMintCid, kIfNotInRange, target);
1551}
1552
1553static void JumpIfString(Assembler* assembler,
1554 Register cid,
1555 Register tmp,
1556 Label* target) {
1557 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid,
1558 kIfInRange, target);
1559}
1560
1561static void JumpIfNotString(Assembler* assembler,
1562 Register cid,
1563 Register tmp,
1564 Label* target) {
1565 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid,
1566 kIfNotInRange, target);
1567}
1568
1569// Return type quickly for simple types (not parameterized and not signature).
1570void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
1571 Label* normal_ir_body) {
1572 Label use_declaration_type, not_double, not_integer;
1573 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1574 __ LoadClassIdMayBeSmi(R1, R0);
1575
1576 __ CompareImmediate(R1, kClosureCid);
1577 __ b(normal_ir_body, EQ); // Instance is a closure.
1578
1579 __ CompareImmediate(R1, kNumPredefinedCids);
1580 __ b(&use_declaration_type, HI);
1581
1582 __ CompareImmediate(R1, kDoubleCid);
1583 __ b(&not_double, NE);
1584
1585 __ LoadIsolate(R0);
1586 __ LoadFromOffset(kWord, R0, R0,
1587 target::Isolate::cached_object_store_offset());
1588 __ LoadFromOffset(kWord, R0, R0, target::ObjectStore::double_type_offset());
1589 __ Ret();
1590
1591 __ Bind(&not_double);
1592 JumpIfNotInteger(assembler, R1, R0, &not_integer);
1593 __ LoadIsolate(R0);
1594 __ LoadFromOffset(kWord, R0, R0,
1595 target::Isolate::cached_object_store_offset());
1596 __ LoadFromOffset(kWord, R0, R0, target::ObjectStore::int_type_offset());
1597 __ Ret();
1598
1599 __ Bind(&not_integer);
1600 JumpIfNotString(assembler, R1, R0, &use_declaration_type);
1601 __ LoadIsolate(R0);
1602 __ LoadFromOffset(kWord, R0, R0,
1603 target::Isolate::cached_object_store_offset());
1604 __ LoadFromOffset(kWord, R0, R0, target::ObjectStore::string_type_offset());
1605 __ Ret();
1606
1607 __ Bind(&use_declaration_type);
1608 __ LoadClassById(R2, R1);
1609 __ ldrh(R3, FieldAddress(R2, target::Class::num_type_arguments_offset()));
1610 __ CompareImmediate(R3, 0);
1611 __ b(normal_ir_body, NE);
1612
1613 __ ldr(R0, FieldAddress(R2, target::Class::declaration_type_offset()));
1614 __ CompareObject(R0, NullObject());
1615 __ b(normal_ir_body, EQ);
1616 __ Ret();
1617
1618 __ Bind(normal_ir_body);
1619}
1620
1621// Compares cid1 and cid2 to see if they're syntactically equivalent. If this
1622// can be determined by this fast path, it jumps to either equal or not_equal,
1623// otherwise it jumps to normal_ir_body. May clobber cid1, cid2, and scratch.
1624static void EquivalentClassIds(Assembler* assembler,
1625 Label* normal_ir_body,
1626 Label* equal,
1627 Label* not_equal,
1628 Register cid1,
1629 Register cid2,
1630 Register scratch) {
1631 Label different_cids, not_integer;
1632
1633 // Check if left hand side is a closure. Closures are handled in the runtime.
1634 __ CompareImmediate(cid1, kClosureCid);
1635 __ b(normal_ir_body, EQ);
1636
1637 // Check whether class ids match. If class ids don't match types may still be
1638 // considered equivalent (e.g. multiple string implementation classes map to a
1639 // single String type).
1640 __ cmp(cid1, Operand(cid2));
1641 __ b(&different_cids, NE);
1642
1643 // Types have the same class and neither is a closure type.
1644 // Check if there are no type arguments. In this case we can return true.
1645 // Otherwise fall through into the runtime to handle comparison.
1646 __ LoadClassById(scratch, cid1);
1647 __ ldrh(scratch,
1648 FieldAddress(scratch, target::Class::num_type_arguments_offset()));
1649 __ CompareImmediate(scratch, 0);
1650 __ b(normal_ir_body, NE);
1651 __ b(equal);
1652
1653 // Class ids are different. Check if we are comparing two string types (with
1654 // different representations) or two integer types.
1655 __ Bind(&different_cids);
1656 __ CompareImmediate(cid1, kNumPredefinedCids);
1657 __ b(not_equal, HI);
1658
1659 // Check if both are integer types.
1660 JumpIfNotInteger(assembler, cid1, scratch, &not_integer);
1661
1662 // First type is an integer. Check if the second is an integer too.
1663 // Otherwise types are unequiv because only integers have the same runtime
1664 // type as other integers.
1665 JumpIfInteger(assembler, cid2, scratch, equal);
1666 __ b(not_equal);
1667
1668 __ Bind(&not_integer);
1669 // Check if the first type is String. If it is not then types are not
1670 // equivalent because they have different class ids and they are not strings
1671 // or integers.
1672 JumpIfNotString(assembler, cid1, scratch, not_equal);
1673 // First type is String. Check if the second is a string too.
1674 JumpIfString(assembler, cid2, scratch, equal);
1675 // String types are only equivalent to other String types.
1676 __ b(not_equal);
1677}
1678
1679void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
1680 Label* normal_ir_body) {
1681 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1682 __ LoadClassIdMayBeSmi(R1, R0);
1683
1684 __ ldr(R0, Address(SP, 1 * target::kWordSize));
1685 __ LoadClassIdMayBeSmi(R2, R0);
1686
1687 Label equal, not_equal;
1688 EquivalentClassIds(assembler, normal_ir_body, &equal, &not_equal, R1, R2, R0);
1689
1690 __ Bind(&equal);
1691 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1692 __ Ret();
1693
1694 __ Bind(&not_equal);
1695 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1696 __ Ret();
1697
1698 __ Bind(normal_ir_body);
1699}
1700
1701void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
1702 Label* normal_ir_body) {
1703 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1704 __ ldr(R0, FieldAddress(R0, target::String::hash_offset()));
1705 __ cmp(R0, Operand(0));
1706 __ bx(LR, NE);
1707 // Hash not yet computed.
1708 __ Bind(normal_ir_body);
1709}
1710
1711void AsmIntrinsifier::Type_getHashCode(Assembler* assembler,
1712 Label* normal_ir_body) {
1713 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1714 __ ldr(R0, FieldAddress(R0, target::Type::hash_offset()));
1715 __ cmp(R0, Operand(0));
1716 __ bx(LR, NE);
1717 // Hash not yet computed.
1718 __ Bind(normal_ir_body);
1719}
1720
1721void AsmIntrinsifier::Type_equality(Assembler* assembler,
1722 Label* normal_ir_body) {
1723 Label equal, not_equal, equiv_cids, check_legacy;
1724
1725 __ ldm(IA, SP, (1 << R1 | 1 << R2));
1726 __ cmp(R1, Operand(R2));
1727 __ b(&equal, EQ);
1728
1729 // R1 might not be a Type object, so check that first (R2 should be though,
1730 // since this is a method on the Type class).
1731 __ LoadClassIdMayBeSmi(R0, R1);
1732 __ CompareImmediate(R0, kTypeCid);
1733 __ b(normal_ir_body, NE);
1734
1735 // Check if types are syntactically equal.
1736 __ ldr(R3, FieldAddress(R1, target::Type::type_class_id_offset()));
1737 __ SmiUntag(R3);
1738 __ ldr(R4, FieldAddress(R2, target::Type::type_class_id_offset()));
1739 __ SmiUntag(R4);
1740 EquivalentClassIds(assembler, normal_ir_body, &equiv_cids, &not_equal, R3, R4,
1741 R0);
1742
1743 // Check nullability.
1744 __ Bind(&equiv_cids);
1745 __ ldrb(R1, FieldAddress(R1, target::Type::nullability_offset()));
1746 __ ldrb(R2, FieldAddress(R2, target::Type::nullability_offset()));
1747 __ cmp(R1, Operand(R2));
1748 __ b(&check_legacy, NE);
1749 // Fall through to equal case if nullability is strictly equal.
1750
1751 __ Bind(&equal);
1752 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1753 __ Ret();
1754
1755 // At this point the nullabilities are different, so they can only be
1756 // syntactically equivalent if they're both either kNonNullable or kLegacy.
1757 // These are the two largest values of the enum, so we can just do a < check.
1758 ASSERT(target::Nullability::kNullable < target::Nullability::kNonNullable &&
1759 target::Nullability::kNonNullable < target::Nullability::kLegacy);
1760 __ Bind(&check_legacy);
1761 __ CompareImmediate(R1, target::Nullability::kNonNullable);
1762 __ b(&not_equal, LT);
1763 __ CompareImmediate(R2, target::Nullability::kNonNullable);
1764 __ b(&equal, GE);
1765
1766 __ Bind(&not_equal);
1767 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1768 __ Ret();
1769
1770 __ Bind(normal_ir_body);
1771}
1772
1773void GenerateSubstringMatchesSpecialization(Assembler* assembler,
1774 intptr_t receiver_cid,
1775 intptr_t other_cid,
1776 Label* return_true,
1777 Label* return_false) {
1778 __ SmiUntag(R1);
1779 __ ldr(R8, FieldAddress(R0, target::String::length_offset())); // this.length
1780 __ SmiUntag(R8);
1781 __ ldr(R9,
1782 FieldAddress(R2, target::String::length_offset())); // other.length
1783 __ SmiUntag(R9);
1784
1785 // if (other.length == 0) return true;
1786 __ cmp(R9, Operand(0));
1787 __ b(return_true, EQ);
1788
1789 // if (start < 0) return false;
1790 __ cmp(R1, Operand(0));
1791 __ b(return_false, LT);
1792
1793 // if (start + other.length > this.length) return false;
1794 __ add(R3, R1, Operand(R9));
1795 __ cmp(R3, Operand(R8));
1796 __ b(return_false, GT);
1797
1798 if (receiver_cid == kOneByteStringCid) {
1799 __ AddImmediate(R0, target::OneByteString::data_offset() - kHeapObjectTag);
1800 __ add(R0, R0, Operand(R1));
1801 } else {
1802 ASSERT(receiver_cid == kTwoByteStringCid);
1803 __ AddImmediate(R0, target::TwoByteString::data_offset() - kHeapObjectTag);
1804 __ add(R0, R0, Operand(R1));
1805 __ add(R0, R0, Operand(R1));
1806 }
1807 if (other_cid == kOneByteStringCid) {
1808 __ AddImmediate(R2, target::OneByteString::data_offset() - kHeapObjectTag);
1809 } else {
1810 ASSERT(other_cid == kTwoByteStringCid);
1811 __ AddImmediate(R2, target::TwoByteString::data_offset() - kHeapObjectTag);
1812 }
1813
1814 // i = 0
1815 __ LoadImmediate(R3, 0);
1816
1817 // do
1818 Label loop;
1819 __ Bind(&loop);
1820
1821 if (receiver_cid == kOneByteStringCid) {
1822 __ ldrb(R4, Address(R0, 0)); // this.codeUnitAt(i + start)
1823 } else {
1824 __ ldrh(R4, Address(R0, 0)); // this.codeUnitAt(i + start)
1825 }
1826 if (other_cid == kOneByteStringCid) {
1827 __ ldrb(TMP, Address(R2, 0)); // other.codeUnitAt(i)
1828 } else {
1829 __ ldrh(TMP, Address(R2, 0)); // other.codeUnitAt(i)
1830 }
1831 __ cmp(R4, Operand(TMP));
1832 __ b(return_false, NE);
1833
1834 // i++, while (i < len)
1835 __ AddImmediate(R3, 1);
1836 __ AddImmediate(R0, receiver_cid == kOneByteStringCid ? 1 : 2);
1837 __ AddImmediate(R2, other_cid == kOneByteStringCid ? 1 : 2);
1838 __ cmp(R3, Operand(R9));
1839 __ b(&loop, LT);
1840
1841 __ b(return_true);
1842}
1843
1844// bool _substringMatches(int start, String other)
1845// This intrinsic handles a OneByteString or TwoByteString receiver with a
1846// OneByteString other.
1847void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
1848 Label* normal_ir_body) {
1849 Label return_true, return_false, try_two_byte;
1850 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // this
1851 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // start
1852 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // other
1853 __ Push(R4); // Make ARGS_DESC_REG available.
1854
1855 __ tst(R1, Operand(kSmiTagMask));
1856 __ b(normal_ir_body, NE); // 'start' is not a Smi.
1857
1858 __ CompareClassId(R2, kOneByteStringCid, R3);
1859 __ b(normal_ir_body, NE);
1860
1861 __ CompareClassId(R0, kOneByteStringCid, R3);
1862 __ b(&try_two_byte, NE);
1863
1864 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid,
1865 kOneByteStringCid, &return_true,
1866 &return_false);
1867
1868 __ Bind(&try_two_byte);
1869 __ CompareClassId(R0, kTwoByteStringCid, R3);
1870 __ b(normal_ir_body, NE);
1871
1872 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid,
1873 kOneByteStringCid, &return_true,
1874 &return_false);
1875
1876 __ Bind(&return_true);
1877 __ Pop(R4);
1878 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1879 __ Ret();
1880
1881 __ Bind(&return_false);
1882 __ Pop(R4);
1883 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1884 __ Ret();
1885
1886 __ Bind(normal_ir_body);
1887 __ Pop(R4);
1888}
1889
1890void AsmIntrinsifier::Object_getHash(Assembler* assembler,
1891 Label* normal_ir_body) {
1892 UNREACHABLE();
1893}
1894
1895void AsmIntrinsifier::Object_setHash(Assembler* assembler,
1896 Label* normal_ir_body) {
1897 UNREACHABLE();
1898}
1899
1900void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
1901 Label* normal_ir_body) {
1902 Label try_two_byte_string;
1903
1904 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Index.
1905 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // String.
1906 __ tst(R1, Operand(kSmiTagMask));
1907 __ b(normal_ir_body, NE); // Index is not a Smi.
1908 // Range check.
1909 __ ldr(R2, FieldAddress(R0, target::String::length_offset()));
1910 __ cmp(R1, Operand(R2));
1911 __ b(normal_ir_body, CS); // Runtime throws exception.
1912
1913 __ CompareClassId(R0, kOneByteStringCid, R3);
1914 __ b(&try_two_byte_string, NE);
1915 __ SmiUntag(R1);
1916 __ AddImmediate(R0, target::OneByteString::data_offset() - kHeapObjectTag);
1917 __ ldrb(R1, Address(R0, R1));
1918 __ CompareImmediate(R1, target::Symbols::kNumberOfOneCharCodeSymbols);
1919 __ b(normal_ir_body, GE);
1920 __ ldr(R0, Address(THR, target::Thread::predefined_symbols_address_offset()));
1921 __ AddImmediate(
1922 R0, target::Symbols::kNullCharCodeSymbolOffset * target::kWordSize);
1923 __ ldr(R0, Address(R0, R1, LSL, 2));
1924 __ Ret();
1925
1926 __ Bind(&try_two_byte_string);
1927 __ CompareClassId(R0, kTwoByteStringCid, R3);
1928 __ b(normal_ir_body, NE);
1929 ASSERT(kSmiTagShift == 1);
1930 __ AddImmediate(R0, target::TwoByteString::data_offset() - kHeapObjectTag);
1931 __ ldrh(R1, Address(R0, R1));
1932 __ CompareImmediate(R1, target::Symbols::kNumberOfOneCharCodeSymbols);
1933 __ b(normal_ir_body, GE);
1934 __ ldr(R0, Address(THR, target::Thread::predefined_symbols_address_offset()));
1935 __ AddImmediate(
1936 R0, target::Symbols::kNullCharCodeSymbolOffset * target::kWordSize);
1937 __ ldr(R0, Address(R0, R1, LSL, 2));
1938 __ Ret();
1939
1940 __ Bind(normal_ir_body);
1941}
1942
1943void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
1944 Label* normal_ir_body) {
1945 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1946 __ ldr(R0, FieldAddress(R0, target::String::length_offset()));
1947 __ cmp(R0, Operand(target::ToRawSmi(0)));
1948 __ LoadObject(R0, CastHandle<Object>(TrueObject()), EQ);
1949 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
1950 __ Ret();
1951}
1952
1953void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
1954 Label* normal_ir_body) {
1955 __ ldr(R1, Address(SP, 0 * target::kWordSize));
1956 __ ldr(R0, FieldAddress(R1, target::String::hash_offset()));
1957 __ cmp(R0, Operand(0));
1958 __ bx(LR, NE); // Return if already computed.
1959
1960 __ ldr(R2, FieldAddress(R1, target::String::length_offset()));
1961
1962 Label done;
1963 // If the string is empty, set the hash to 1, and return.
1964 __ cmp(R2, Operand(target::ToRawSmi(0)));
1965 __ b(&done, EQ);
1966
1967 __ SmiUntag(R2);
1968 __ mov(R3, Operand(0));
1969 __ AddImmediate(R8, R1,
1970 target::OneByteString::data_offset() - kHeapObjectTag);
1971 // R1: Instance of OneByteString.
1972 // R2: String length, untagged integer.
1973 // R3: Loop counter, untagged integer.
1974 // R8: String data.
1975 // R0: Hash code, untagged integer.
1976
1977 Label loop;
1978 // Add to hash code: (hash_ is uint32)
1979 // hash_ += ch;
1980 // hash_ += hash_ << 10;
1981 // hash_ ^= hash_ >> 6;
1982 // Get one characters (ch).
1983 __ Bind(&loop);
1984 __ ldrb(TMP, Address(R8, 0));
1985 // TMP: ch.
1986 __ add(R3, R3, Operand(1));
1987 __ add(R8, R8, Operand(1));
1988 __ add(R0, R0, Operand(TMP));
1989 __ add(R0, R0, Operand(R0, LSL, 10));
1990 __ eor(R0, R0, Operand(R0, LSR, 6));
1991 __ cmp(R3, Operand(R2));
1992 __ b(&loop, NE);
1993
1994 // Finalize.
1995 // hash_ += hash_ << 3;
1996 // hash_ ^= hash_ >> 11;
1997 // hash_ += hash_ << 15;
1998 __ add(R0, R0, Operand(R0, LSL, 3));
1999 __ eor(R0, R0, Operand(R0, LSR, 11));
2000 __ add(R0, R0, Operand(R0, LSL, 15));
2001 // hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1);
2002 __ LoadImmediate(R2,
2003 (static_cast<intptr_t>(1) << target::String::kHashBits) - 1);
2004 __ and_(R0, R0, Operand(R2));
2005 __ cmp(R0, Operand(0));
2006 // return hash_ == 0 ? 1 : hash_;
2007 __ Bind(&done);
2008 __ mov(R0, Operand(1), EQ);
2009 __ SmiTag(R0);
2010 __ StoreIntoSmiField(FieldAddress(R1, target::String::hash_offset()), R0);
2011 __ Ret();
2012}
2013
2014// Allocates a _OneByteString or _TwoByteString. The content is not initialized.
2015// 'length-reg' (R2) contains the desired length as a _Smi or _Mint.
2016// Returns new string as tagged pointer in R0.
2017static void TryAllocateString(Assembler* assembler,
2018 classid_t cid,
2019 Label* ok,
2020 Label* failure) {
2021 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
2022 const Register length_reg = R2;
2023 // _Mint length: call to runtime to produce error.
2024 __ BranchIfNotSmi(length_reg, failure);
2025 // Negative length: call to runtime to produce error.
2026 __ cmp(length_reg, Operand(0));
2027 __ b(failure, LT);
2028
2029 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R0, cid));
2030 NOT_IN_PRODUCT(__ MaybeTraceAllocation(R0, failure));
2031 __ mov(R8, Operand(length_reg)); // Save the length register.
2032 if (cid == kOneByteStringCid) {
2033 __ SmiUntag(length_reg);
2034 } else {
2035 // Untag length and multiply by element size -> no-op.
2036 }
2037 const intptr_t fixed_size_plus_alignment_padding =
2038 target::String::InstanceSize() +
2039 target::ObjectAlignment::kObjectAlignment - 1;
2040 __ AddImmediate(length_reg, fixed_size_plus_alignment_padding);
2041 __ bic(length_reg, length_reg,
2042 Operand(target::ObjectAlignment::kObjectAlignment - 1));
2043
2044 __ ldr(R0, Address(THR, target::Thread::top_offset()));
2045
2046 // length_reg: allocation size.
2047 __ adds(R1, R0, Operand(length_reg));
2048 __ b(failure, CS); // Fail on unsigned overflow.
2049
2050 // Check if the allocation fits into the remaining space.
2051 // R0: potential new object start.
2052 // R1: potential next object start.
2053 // R2: allocation size.
2054 __ ldr(TMP, Address(THR, target::Thread::end_offset()));
2055 __ cmp(R1, Operand(TMP));
2056 __ b(failure, CS);
2057
2058 // Successfully allocated the object(s), now update top to point to
2059 // next object start and initialize the object.
2060 __ str(R1, Address(THR, target::Thread::top_offset()));
2061 __ AddImmediate(R0, kHeapObjectTag);
2062
2063 // Initialize the tags.
2064 // R0: new object start as a tagged pointer.
2065 // R1: new object end address.
2066 // R2: allocation size.
2067 {
2068 const intptr_t shift = target::ObjectLayout::kTagBitsSizeTagPos -
2069 target::ObjectAlignment::kObjectAlignmentLog2;
2070
2071 __ CompareImmediate(R2, target::ObjectLayout::kSizeTagMaxSizeTag);
2072 __ mov(R3, Operand(R2, LSL, shift), LS);
2073 __ mov(R3, Operand(0), HI);
2074
2075 // Get the class index and insert it into the tags.
2076 // R3: size and bit tags.
2077 const uint32_t tags =
2078 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
2079 __ LoadImmediate(TMP, tags);
2080 __ orr(R3, R3, Operand(TMP));
2081 __ str(R3, FieldAddress(R0, target::Object::tags_offset())); // Store tags.
2082 }
2083
2084 // Set the length field using the saved length (R8).
2085 __ StoreIntoObjectNoBarrier(
2086 R0, FieldAddress(R0, target::String::length_offset()), R8);
2087 // Clear hash.
2088 __ LoadImmediate(TMP, 0);
2089 __ StoreIntoObjectNoBarrier(
2090 R0, FieldAddress(R0, target::String::hash_offset()), TMP);
2091
2092 __ b(ok);
2093}
2094
2095// Arg0: OneByteString (receiver).
2096// Arg1: Start index as Smi.
2097// Arg2: End index as Smi.
2098// The indexes must be valid.
2099void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
2100 Label* normal_ir_body) {
2101 const intptr_t kStringOffset = 2 * target::kWordSize;
2102 const intptr_t kStartIndexOffset = 1 * target::kWordSize;
2103 const intptr_t kEndIndexOffset = 0 * target::kWordSize;
2104 Label ok;
2105
2106 __ ldr(R2, Address(SP, kEndIndexOffset));
2107 __ ldr(TMP, Address(SP, kStartIndexOffset));
2108 __ orr(R3, R2, Operand(TMP));
2109 __ tst(R3, Operand(kSmiTagMask));
2110 __ b(normal_ir_body, NE); // 'start', 'end' not Smi.
2111
2112 __ sub(R2, R2, Operand(TMP));
2113 TryAllocateString(assembler, kOneByteStringCid, &ok, normal_ir_body);
2114 __ Bind(&ok);
2115 // R0: new string as tagged pointer.
2116 // Copy string.
2117 __ ldr(R3, Address(SP, kStringOffset));
2118 __ ldr(R1, Address(SP, kStartIndexOffset));
2119 __ SmiUntag(R1);
2120 __ add(R3, R3, Operand(R1));
2121 // Calculate start address and untag (- 1).
2122 __ AddImmediate(R3, target::OneByteString::data_offset() - 1);
2123
2124 // R3: Start address to copy from (untagged).
2125 // R1: Untagged start index.
2126 __ ldr(R2, Address(SP, kEndIndexOffset));
2127 __ SmiUntag(R2);
2128 __ sub(R2, R2, Operand(R1));
2129
2130 // R3: Start address to copy from (untagged).
2131 // R2: Untagged number of bytes to copy.
2132 // R0: Tagged result string.
2133 // R8: Pointer into R3.
2134 // R1: Pointer into R0.
2135 // TMP: Scratch register.
2136 Label loop, done;
2137 __ cmp(R2, Operand(0));
2138 __ b(&done, LE);
2139 __ mov(R8, Operand(R3));
2140 __ mov(R1, Operand(R0));
2141 __ Bind(&loop);
2142 __ ldrb(TMP, Address(R8, 1, Address::PostIndex));
2143 __ sub(R2, R2, Operand(1));
2144 __ cmp(R2, Operand(0));
2145 __ strb(TMP, FieldAddress(R1, target::OneByteString::data_offset()));
2146 __ add(R1, R1, Operand(1));
2147 __ b(&loop, GT);
2148
2149 __ Bind(&done);
2150 __ Ret();
2151 __ Bind(normal_ir_body);
2152}
2153
2154void AsmIntrinsifier::WriteIntoOneByteString(Assembler* assembler,
2155 Label* normal_ir_body) {
2156 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Value.
2157 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // Index.
2158 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // OneByteString.
2159 __ SmiUntag(R1);
2160 __ SmiUntag(R2);
2161 __ AddImmediate(R3, R0,
2162 target::OneByteString::data_offset() - kHeapObjectTag);
2163 __ strb(R2, Address(R3, R1));
2164 __ Ret();
2165}
2166
2167void AsmIntrinsifier::WriteIntoTwoByteString(Assembler* assembler,
2168 Label* normal_ir_body) {
2169 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Value.
2170 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // Index.
2171 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // TwoByteString.
2172 // Untag index and multiply by element size -> no-op.
2173 __ SmiUntag(R2);
2174 __ AddImmediate(R3, R0,
2175 target::TwoByteString::data_offset() - kHeapObjectTag);
2176 __ strh(R2, Address(R3, R1));
2177 __ Ret();
2178}
2179
2180void AsmIntrinsifier::AllocateOneByteString(Assembler* assembler,
2181 Label* normal_ir_body) {
2182 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Length.
2183 Label ok;
2184 TryAllocateString(assembler, kOneByteStringCid, &ok, normal_ir_body);
2185
2186 __ Bind(&ok);
2187 __ Ret();
2188
2189 __ Bind(normal_ir_body);
2190}
2191
2192void AsmIntrinsifier::AllocateTwoByteString(Assembler* assembler,
2193 Label* normal_ir_body) {
2194 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Length.
2195 Label ok;
2196 TryAllocateString(assembler, kTwoByteStringCid, &ok, normal_ir_body);
2197
2198 __ Bind(&ok);
2199 __ Ret();
2200
2201 __ Bind(normal_ir_body);
2202}
2203
2204// TODO(srdjan): Add combinations (one-byte/two-byte/external strings).
2205static void StringEquality(Assembler* assembler,
2206 Label* normal_ir_body,
2207 intptr_t string_cid) {
2208 Label is_true, is_false, loop;
2209 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // This.
2210 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Other.
2211
2212 // Are identical?
2213 __ cmp(R0, Operand(R1));
2214 __ b(&is_true, EQ);
2215
2216 // Is other OneByteString?
2217 __ tst(R1, Operand(kSmiTagMask));
2218 __ b(normal_ir_body, EQ);
2219 __ CompareClassId(R1, string_cid, R2);
2220 __ b(normal_ir_body, NE);
2221
2222 // Have same length?
2223 __ ldr(R2, FieldAddress(R0, target::String::length_offset()));
2224 __ ldr(R3, FieldAddress(R1, target::String::length_offset()));
2225 __ cmp(R2, Operand(R3));
2226 __ b(&is_false, NE);
2227
2228 // Check contents, no fall-through possible.
2229 // TODO(zra): try out other sequences.
2230 ASSERT((string_cid == kOneByteStringCid) ||
2231 (string_cid == kTwoByteStringCid));
2232 const intptr_t offset = (string_cid == kOneByteStringCid)
2233 ? target::OneByteString::data_offset()
2234 : target::TwoByteString::data_offset();
2235 __ AddImmediate(R0, offset - kHeapObjectTag);
2236 __ AddImmediate(R1, offset - kHeapObjectTag);
2237 __ SmiUntag(R2);
2238 __ Bind(&loop);
2239 __ AddImmediate(R2, -1);
2240 __ cmp(R2, Operand(0));
2241 __ b(&is_true, LT);
2242 if (string_cid == kOneByteStringCid) {
2243 __ ldrb(R3, Address(R0));
2244 __ ldrb(R4, Address(R1));
2245 __ AddImmediate(R0, 1);
2246 __ AddImmediate(R1, 1);
2247 } else if (string_cid == kTwoByteStringCid) {
2248 __ ldrh(R3, Address(R0));
2249 __ ldrh(R4, Address(R1));
2250 __ AddImmediate(R0, 2);
2251 __ AddImmediate(R1, 2);
2252 } else {
2253 UNIMPLEMENTED();
2254 }
2255 __ cmp(R3, Operand(R4));
2256 __ b(&is_false, NE);
2257 __ b(&loop);
2258
2259 __ Bind(&is_true);
2260 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
2261 __ Ret();
2262
2263 __ Bind(&is_false);
2264 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
2265 __ Ret();
2266
2267 __ Bind(normal_ir_body);
2268}
2269
2270void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
2271 Label* normal_ir_body) {
2272 StringEquality(assembler, normal_ir_body, kOneByteStringCid);
2273}
2274
2275void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
2276 Label* normal_ir_body) {
2277 StringEquality(assembler, normal_ir_body, kTwoByteStringCid);
2278}
2279
2280void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
2281 Label* normal_ir_body,
2282 bool sticky) {
2283 if (FLAG_interpret_irregexp) return;
2284
2285 static const intptr_t kRegExpParamOffset = 2 * target::kWordSize;
2286 static const intptr_t kStringParamOffset = 1 * target::kWordSize;
2287 // start_index smi is located at offset 0.
2288
2289 // Incoming registers:
2290 // R0: Function. (Will be reloaded with the specialized matcher function.)
2291 // R4: Arguments descriptor. (Will be preserved.)
2292 // R9: Unknown. (Must be GC safe on tail call.)
2293
2294 // Load the specialized function pointer into R0. Leverage the fact the
2295 // string CIDs as well as stored function pointers are in sequence.
2296 __ ldr(R2, Address(SP, kRegExpParamOffset));
2297 __ ldr(R1, Address(SP, kStringParamOffset));
2298 __ LoadClassId(R1, R1);
2299 __ AddImmediate(R1, -kOneByteStringCid);
2300 __ add(R1, R2, Operand(R1, LSL, target::kWordSizeLog2));
2301 __ ldr(R0, FieldAddress(R1, target::RegExp::function_offset(kOneByteStringCid,
2302 sticky)));
2303
2304 // Registers are now set up for the lazy compile stub. It expects the function
2305 // in R0, the argument descriptor in R4, and IC-Data in R9.
2306 __ eor(R9, R9, Operand(R9));
2307
2308 // Tail-call the function.
2309 __ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
2310 __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
2311}
2312
2313// On stack: user tag (+0).
2314void AsmIntrinsifier::UserTag_makeCurrent(Assembler* assembler,
2315 Label* normal_ir_body) {
2316 // R1: Isolate.
2317 __ LoadIsolate(R1);
2318 // R0: Current user tag.
2319 __ ldr(R0, Address(R1, target::Isolate::current_tag_offset()));
2320 // R2: UserTag.
2321 __ ldr(R2, Address(SP, +0 * target::kWordSize));
2322 // Set target::Isolate::current_tag_.
2323 __ str(R2, Address(R1, target::Isolate::current_tag_offset()));
2324 // R2: UserTag's tag.
2325 __ ldr(R2, FieldAddress(R2, target::UserTag::tag_offset()));
2326 // Set target::Isolate::user_tag_.
2327 __ str(R2, Address(R1, target::Isolate::user_tag_offset()));
2328 __ Ret();
2329}
2330
2331void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
2332 Label* normal_ir_body) {
2333 __ LoadIsolate(R0);
2334 __ ldr(R0, Address(R0, target::Isolate::default_tag_offset()));
2335 __ Ret();
2336}
2337
2338void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
2339 Label* normal_ir_body) {
2340 __ LoadIsolate(R0);
2341 __ ldr(R0, Address(R0, target::Isolate::current_tag_offset()));
2342 __ Ret();
2343}
2344
2345void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
2346 Label* normal_ir_body) {
2347#if !defined(SUPPORT_TIMELINE)
2348 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
2349 __ Ret();
2350#else
2351 // Load TimelineStream*.
2352 __ ldr(R0, Address(THR, target::Thread::dart_stream_offset()));
2353 // Load uintptr_t from TimelineStream*.
2354 __ ldr(R0, Address(R0, target::TimelineStream::enabled_offset()));
2355 __ cmp(R0, Operand(0));
2356 __ LoadObject(R0, CastHandle<Object>(TrueObject()), NE);
2357 __ LoadObject(R0, CastHandle<Object>(FalseObject()), EQ);
2358 __ Ret();
2359#endif
2360}
2361
2362void AsmIntrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler,
2363 Label* normal_ir_body) {
2364 __ LoadObject(R0, NullObject());
2365 __ str(R0, Address(THR, target::Thread::async_stack_trace_offset()));
2366 __ Ret();
2367}
2368
2369void AsmIntrinsifier::SetAsyncThreadStackTrace(Assembler* assembler,
2370 Label* normal_ir_body) {
2371 __ ldr(R0, Address(THR, target::Thread::async_stack_trace_offset()));
2372 __ LoadObject(R0, NullObject());
2373 __ Ret();
2374}
2375
2376#undef __
2377
2378} // namespace compiler
2379} // namespace dart
2380
2381#endif // defined(TARGET_ARCH_ARM)
2382