1 | // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/globals.h" |
6 | #if defined(TARGET_ARCH_ARM) |
7 | |
8 | #include "vm/compiler/assembler/assembler.h" |
9 | #include "vm/cpu.h" |
10 | #include "vm/os.h" |
11 | #include "vm/unit_test.h" |
12 | #include "vm/virtual_memory.h" |
13 | |
14 | namespace dart { |
15 | namespace compiler { |
16 | |
17 | TEST_CASE(ReciprocalOps) { |
18 | EXPECT_EQ(true, isinf(ReciprocalEstimate(-0.0f))); |
19 | EXPECT_EQ(true, signbit(ReciprocalEstimate(-0.0f))); |
20 | EXPECT_EQ(true, isinf(ReciprocalEstimate(0.0f))); |
21 | EXPECT_EQ(true, !signbit(ReciprocalEstimate(0.0f))); |
22 | EXPECT_EQ(true, isnan(ReciprocalEstimate(NAN))); |
23 | |
24 | #define AS_UINT32(v) (bit_cast<uint32_t, float>(v)) |
25 | #define EXPECT_BITWISE_EQ(a, b) EXPECT_EQ(AS_UINT32(a), AS_UINT32(b)) |
26 | |
27 | EXPECT_BITWISE_EQ(0.0f, ReciprocalEstimate(kPosInfinity)); |
28 | EXPECT_BITWISE_EQ(-0.0f, ReciprocalEstimate(kNegInfinity)); |
29 | EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(0.0f, kPosInfinity)); |
30 | EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(0.0f, kNegInfinity)); |
31 | EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(-0.0f, kPosInfinity)); |
32 | EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(-0.0f, kNegInfinity)); |
33 | EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(kPosInfinity, 0.0f)); |
34 | EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(kNegInfinity, 0.0f)); |
35 | EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(kPosInfinity, -0.0f)); |
36 | EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(kNegInfinity, -0.0f)); |
37 | EXPECT_EQ(true, isnan(ReciprocalStep(NAN, 1.0f))); |
38 | EXPECT_EQ(true, isnan(ReciprocalStep(1.0f, NAN))); |
39 | |
40 | EXPECT_EQ(true, isnan(ReciprocalSqrtEstimate(-1.0f))); |
41 | EXPECT_EQ(true, isnan(ReciprocalSqrtEstimate(kNegInfinity))); |
42 | EXPECT_EQ(true, isnan(ReciprocalSqrtEstimate(-1.0f))); |
43 | EXPECT_EQ(true, isinf(ReciprocalSqrtEstimate(-0.0f))); |
44 | EXPECT_EQ(true, signbit(ReciprocalSqrtEstimate(-0.0f))); |
45 | EXPECT_EQ(true, isinf(ReciprocalSqrtEstimate(0.0f))); |
46 | EXPECT_EQ(true, !signbit(ReciprocalSqrtEstimate(0.0f))); |
47 | EXPECT_EQ(true, isnan(ReciprocalSqrtEstimate(NAN))); |
48 | EXPECT_BITWISE_EQ(0.0f, ReciprocalSqrtEstimate(kPosInfinity)); |
49 | |
50 | EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(0.0f, kPosInfinity)); |
51 | EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(0.0f, kNegInfinity)); |
52 | EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(-0.0f, kPosInfinity)); |
53 | EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(-0.0f, kNegInfinity)); |
54 | EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(kPosInfinity, 0.0f)); |
55 | EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(kNegInfinity, 0.0f)); |
56 | EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(kPosInfinity, -0.0f)); |
57 | EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(kNegInfinity, -0.0f)); |
58 | EXPECT_EQ(true, isnan(ReciprocalSqrtStep(NAN, 1.0f))); |
59 | EXPECT_EQ(true, isnan(ReciprocalSqrtStep(1.0f, NAN))); |
60 | |
61 | #undef AS_UINT32 |
62 | #undef EXPECT_BITWISE_EQ |
63 | } |
64 | |
65 | #define __ assembler-> |
66 | |
67 | ASSEMBLER_TEST_GENERATE(Simple, assembler) { |
68 | __ mov(R0, Operand(42)); |
69 | __ bx(LR); |
70 | } |
71 | |
72 | ASSEMBLER_TEST_RUN(Simple, test) { |
73 | typedef int (*SimpleCode)() DART_UNUSED; |
74 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); |
75 | } |
76 | |
77 | ASSEMBLER_TEST_GENERATE(MoveNegated, assembler) { |
78 | __ mvn(R0, Operand(42)); |
79 | __ bx(LR); |
80 | } |
81 | |
82 | ASSEMBLER_TEST_RUN(MoveNegated, test) { |
83 | EXPECT(test != NULL); |
84 | typedef int (*MoveNegated)() DART_UNUSED; |
85 | EXPECT_EQ(~42, EXECUTE_TEST_CODE_INT32(MoveNegated, test->entry())); |
86 | } |
87 | |
88 | ASSEMBLER_TEST_GENERATE(MoveRotImm, assembler) { |
89 | Operand o; |
90 | EXPECT(Operand::CanHold(0x00550000, &o)); |
91 | __ mov(R0, o); |
92 | EXPECT(Operand::CanHold(0x30000003, &o)); |
93 | __ add(R0, R0, o); |
94 | __ bx(LR); |
95 | } |
96 | |
97 | ASSEMBLER_TEST_RUN(MoveRotImm, test) { |
98 | EXPECT(test != NULL); |
99 | typedef int (*MoveRotImm)() DART_UNUSED; |
100 | EXPECT_EQ(0x30550003, EXECUTE_TEST_CODE_INT32(MoveRotImm, test->entry())); |
101 | } |
102 | |
103 | ASSEMBLER_TEST_GENERATE(MovImm16, assembler) { |
104 | __ LoadPatchableImmediate(R0, 0x12345678); |
105 | __ bx(LR); |
106 | } |
107 | |
108 | ASSEMBLER_TEST_RUN(MovImm16, test) { |
109 | EXPECT(test != NULL); |
110 | typedef int (*MovImm16)() DART_UNUSED; |
111 | EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INT32(MovImm16, test->entry())); |
112 | } |
113 | |
114 | ASSEMBLER_TEST_GENERATE(LoadImmediate, assembler) { |
115 | __ mov(R0, Operand(0)); |
116 | __ cmp(R0, Operand(0)); |
117 | __ LoadImmediate(R0, 0x12345678, EQ); |
118 | __ LoadImmediate(R0, 0x87654321, NE); |
119 | __ bx(LR); |
120 | } |
121 | |
122 | ASSEMBLER_TEST_RUN(LoadImmediate, test) { |
123 | EXPECT(test != NULL); |
124 | typedef int (*LoadImmediate)() DART_UNUSED; |
125 | EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INT32(LoadImmediate, test->entry())); |
126 | } |
127 | |
128 | ASSEMBLER_TEST_GENERATE(LoadHalfWordUnaligned, assembler) { |
129 | __ LoadHalfWordUnaligned(R1, R0, TMP); |
130 | __ mov(R0, Operand(R1)); |
131 | __ bx(LR); |
132 | } |
133 | |
134 | ASSEMBLER_TEST_RUN(LoadHalfWordUnaligned, test) { |
135 | EXPECT(test != NULL); |
136 | typedef intptr_t (*LoadHalfWordUnaligned)(intptr_t) DART_UNUSED; |
137 | uint8_t buffer[4] = { |
138 | 0x89, 0xAB, 0xCD, 0xEF, |
139 | }; |
140 | |
141 | EXPECT_EQ( |
142 | static_cast<int16_t>(static_cast<uint16_t>(0xAB89)), |
143 | EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadHalfWordUnaligned, test->entry(), |
144 | reinterpret_cast<intptr_t>(&buffer[0]))); |
145 | EXPECT_EQ( |
146 | static_cast<int16_t>(static_cast<uint16_t>(0xCDAB)), |
147 | EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadHalfWordUnaligned, test->entry(), |
148 | reinterpret_cast<intptr_t>(&buffer[1]))); |
149 | } |
150 | |
151 | ASSEMBLER_TEST_GENERATE(LoadHalfWordUnsignedUnaligned, assembler) { |
152 | __ LoadHalfWordUnsignedUnaligned(R1, R0, TMP); |
153 | __ mov(R0, Operand(R1)); |
154 | __ bx(LR); |
155 | } |
156 | |
157 | ASSEMBLER_TEST_RUN(LoadHalfWordUnsignedUnaligned, test) { |
158 | EXPECT(test != NULL); |
159 | typedef intptr_t (*LoadHalfWordUnsignedUnaligned)(intptr_t) DART_UNUSED; |
160 | uint8_t buffer[4] = { |
161 | 0x89, 0xAB, 0xCD, 0xEF, |
162 | }; |
163 | |
164 | EXPECT_EQ(0xAB89, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
165 | LoadHalfWordUnsignedUnaligned, test->entry(), |
166 | reinterpret_cast<intptr_t>(&buffer[0]))); |
167 | EXPECT_EQ(0xCDAB, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
168 | LoadHalfWordUnsignedUnaligned, test->entry(), |
169 | reinterpret_cast<intptr_t>(&buffer[1]))); |
170 | } |
171 | |
172 | ASSEMBLER_TEST_GENERATE(StoreHalfWordUnaligned, assembler) { |
173 | __ LoadImmediate(R1, 0xABCD); |
174 | __ StoreWordUnaligned(R1, R0, TMP); |
175 | __ mov(R0, Operand(R1)); |
176 | __ bx(LR); |
177 | } |
178 | |
179 | ASSEMBLER_TEST_RUN(StoreHalfWordUnaligned, test) { |
180 | EXPECT(test != NULL); |
181 | typedef intptr_t (*StoreHalfWordUnaligned)(intptr_t) DART_UNUSED; |
182 | uint8_t buffer[4] = { |
183 | 0, 0, 0, 0, |
184 | }; |
185 | |
186 | EXPECT_EQ(0xABCD, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
187 | StoreHalfWordUnaligned, test->entry(), |
188 | reinterpret_cast<intptr_t>(&buffer[0]))); |
189 | EXPECT_EQ(0xCD, buffer[0]); |
190 | EXPECT_EQ(0xAB, buffer[1]); |
191 | EXPECT_EQ(0, buffer[2]); |
192 | |
193 | EXPECT_EQ(0xABCD, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
194 | StoreHalfWordUnaligned, test->entry(), |
195 | reinterpret_cast<intptr_t>(&buffer[1]))); |
196 | EXPECT_EQ(0xCD, buffer[1]); |
197 | EXPECT_EQ(0xAB, buffer[2]); |
198 | EXPECT_EQ(0, buffer[3]); |
199 | } |
200 | |
201 | ASSEMBLER_TEST_GENERATE(LoadWordUnaligned, assembler) { |
202 | __ LoadWordUnaligned(R1, R0, TMP); |
203 | __ mov(R0, Operand(R1)); |
204 | __ bx(LR); |
205 | } |
206 | |
207 | ASSEMBLER_TEST_RUN(LoadWordUnaligned, test) { |
208 | EXPECT(test != NULL); |
209 | typedef intptr_t (*LoadWordUnaligned)(intptr_t) DART_UNUSED; |
210 | uint8_t buffer[8] = {0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0}; |
211 | |
212 | EXPECT_EQ( |
213 | static_cast<intptr_t>(0x78563412), |
214 | EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadWordUnaligned, test->entry(), |
215 | reinterpret_cast<intptr_t>(&buffer[0]))); |
216 | EXPECT_EQ( |
217 | static_cast<intptr_t>(0x9A785634), |
218 | EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadWordUnaligned, test->entry(), |
219 | reinterpret_cast<intptr_t>(&buffer[1]))); |
220 | EXPECT_EQ( |
221 | static_cast<intptr_t>(0xBC9A7856), |
222 | EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadWordUnaligned, test->entry(), |
223 | reinterpret_cast<intptr_t>(&buffer[2]))); |
224 | EXPECT_EQ( |
225 | static_cast<intptr_t>(0xDEBC9A78), |
226 | EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadWordUnaligned, test->entry(), |
227 | reinterpret_cast<intptr_t>(&buffer[3]))); |
228 | } |
229 | |
230 | ASSEMBLER_TEST_GENERATE(StoreWordUnaligned, assembler) { |
231 | __ LoadImmediate(R1, 0x12345678); |
232 | __ StoreWordUnaligned(R1, R0, TMP); |
233 | __ mov(R0, Operand(R1)); |
234 | __ bx(LR); |
235 | } |
236 | |
237 | ASSEMBLER_TEST_RUN(StoreWordUnaligned, test) { |
238 | EXPECT(test != NULL); |
239 | typedef intptr_t (*StoreWordUnaligned)(intptr_t) DART_UNUSED; |
240 | uint8_t buffer[8] = {0, 0, 0, 0, 0, 0, 0, 0}; |
241 | |
242 | EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
243 | StoreWordUnaligned, test->entry(), |
244 | reinterpret_cast<intptr_t>(&buffer[0]))); |
245 | EXPECT_EQ(0x78, buffer[0]); |
246 | EXPECT_EQ(0x56, buffer[1]); |
247 | EXPECT_EQ(0x34, buffer[2]); |
248 | EXPECT_EQ(0x12, buffer[3]); |
249 | |
250 | EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
251 | StoreWordUnaligned, test->entry(), |
252 | reinterpret_cast<intptr_t>(&buffer[1]))); |
253 | EXPECT_EQ(0x78, buffer[1]); |
254 | EXPECT_EQ(0x56, buffer[2]); |
255 | EXPECT_EQ(0x34, buffer[3]); |
256 | EXPECT_EQ(0x12, buffer[4]); |
257 | |
258 | EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
259 | StoreWordUnaligned, test->entry(), |
260 | reinterpret_cast<intptr_t>(&buffer[2]))); |
261 | EXPECT_EQ(0x78, buffer[2]); |
262 | EXPECT_EQ(0x56, buffer[3]); |
263 | EXPECT_EQ(0x34, buffer[4]); |
264 | EXPECT_EQ(0x12, buffer[5]); |
265 | |
266 | EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
267 | StoreWordUnaligned, test->entry(), |
268 | reinterpret_cast<intptr_t>(&buffer[3]))); |
269 | EXPECT_EQ(0x78, buffer[3]); |
270 | EXPECT_EQ(0x56, buffer[4]); |
271 | EXPECT_EQ(0x34, buffer[5]); |
272 | EXPECT_EQ(0x12, buffer[6]); |
273 | } |
274 | |
275 | ASSEMBLER_TEST_GENERATE(Vmov, assembler) { |
276 | if (TargetCPUFeatures::vfp_supported()) { |
277 | __ mov(R3, Operand(43)); |
278 | __ mov(R1, Operand(41)); |
279 | __ vmovsrr(S1, R1, R3); // S1:S2 = 41:43 |
280 | __ vmovs(S0, S2); // S0 = S2, S0:S1 == 43:41 |
281 | __ vmovd(D2, D0); // D2 = D0, S4:S5 == 43:41 |
282 | __ vmovrs(R3, S5); // R3 = S5, R3 == 41 |
283 | __ vmovrrs(R1, R2, S4); // R1:R2 = S4:S5, R1:R2 == 43:41 |
284 | __ vmovdrr(D3, R3, R2); // D3 = R3:R2, S6:S7 == 41:41 |
285 | __ vmovdr(D3, 1, R1); // D3[1] == S7 = R1, S6:S7 == 41:43 |
286 | __ vmovrrd(R0, R1, D3); // R0:R1 = D3, R0:R1 == 41:43 |
287 | __ sub(R0, R1, Operand(R0)); // 43-41 |
288 | } |
289 | __ bx(LR); |
290 | } |
291 | |
292 | ASSEMBLER_TEST_RUN(Vmov, test) { |
293 | EXPECT(test != NULL); |
294 | if (TargetCPUFeatures::vfp_supported()) { |
295 | typedef int (*Vmov)() DART_UNUSED; |
296 | EXPECT_EQ(2, EXECUTE_TEST_CODE_INT32(Vmov, test->entry())); |
297 | } |
298 | } |
299 | |
300 | ASSEMBLER_TEST_GENERATE(SingleVLoadStore, assembler) { |
301 | if (TargetCPUFeatures::vfp_supported()) { |
302 | __ LoadImmediate(R0, bit_cast<int32_t, float>(12.3f)); |
303 | __ mov(R2, Operand(SP)); |
304 | __ str(R0, Address(SP, (-target::kWordSize * 30), Address::PreIndex)); |
305 | __ vldrs(S0, Address(R2, (-target::kWordSize * 30))); |
306 | __ vadds(S0, S0, S0); |
307 | __ vstrs(S0, Address(R2, (-target::kWordSize * 30))); |
308 | __ ldr(R0, Address(SP, (target::kWordSize * 30), Address::PostIndex)); |
309 | } |
310 | __ bx(LR); |
311 | } |
312 | |
313 | ASSEMBLER_TEST_RUN(SingleVLoadStore, test) { |
314 | EXPECT(test != NULL); |
315 | if (TargetCPUFeatures::vfp_supported()) { |
316 | typedef float (*SingleVLoadStore)() DART_UNUSED; |
317 | float res = EXECUTE_TEST_CODE_FLOAT(SingleVLoadStore, test->entry()); |
318 | EXPECT_FLOAT_EQ(2 * 12.3f, res, 0.001f); |
319 | } |
320 | } |
321 | |
322 | ASSEMBLER_TEST_GENERATE(SingleVShiftLoadStore, assembler) { |
323 | if (TargetCPUFeatures::vfp_supported()) { |
324 | __ LoadImmediate(R0, bit_cast<int32_t, float>(12.3f)); |
325 | __ mov(R2, Operand(SP)); |
326 | // Expressing __str(R0, Address(SP, (-kWordSize * 32), Address::PreIndex)); |
327 | // as: |
328 | __ mov(R1, Operand(target::kWordSize)); |
329 | __ str(R0, Address(SP, R1, LSL, 5, Address::NegPreIndex)); |
330 | __ vldrs(S0, Address(R2, (-target::kWordSize * 32))); |
331 | __ vadds(S0, S0, S0); |
332 | __ vstrs(S0, Address(R2, (-target::kWordSize * 32))); |
333 | // Expressing __ldr(R0, Address(SP, (kWordSize * 32), Address::PostIndex)); |
334 | // as: |
335 | __ ldr(R0, Address(SP, R1, LSL, 5, Address::PostIndex)); |
336 | } |
337 | __ bx(LR); |
338 | } |
339 | |
340 | ASSEMBLER_TEST_RUN(SingleVShiftLoadStore, test) { |
341 | EXPECT(test != NULL); |
342 | if (TargetCPUFeatures::vfp_supported()) { |
343 | typedef float (*SingleVLoadStore)() DART_UNUSED; |
344 | float res = EXECUTE_TEST_CODE_FLOAT(SingleVLoadStore, test->entry()); |
345 | EXPECT_FLOAT_EQ(2 * 12.3f, res, 0.001f); |
346 | } |
347 | } |
348 | |
349 | ASSEMBLER_TEST_GENERATE(DoubleVLoadStore, assembler) { |
350 | if (TargetCPUFeatures::vfp_supported()) { |
351 | int64_t value = bit_cast<int64_t, double>(12.3); |
352 | __ LoadImmediate(R0, Utils::Low32Bits(value)); |
353 | __ LoadImmediate(R1, Utils::High32Bits(value)); |
354 | __ mov(R2, Operand(SP)); |
355 | __ str(R0, Address(SP, (-target::kWordSize * 30), Address::PreIndex)); |
356 | __ str(R1, Address(R2, (-target::kWordSize * 29))); |
357 | __ vldrd(D0, Address(R2, (-target::kWordSize * 30))); |
358 | __ vaddd(D0, D0, D0); |
359 | __ vstrd(D0, Address(R2, (-target::kWordSize * 30))); |
360 | __ ldr(R1, Address(R2, (-target::kWordSize * 29))); |
361 | __ ldr(R0, Address(SP, (target::kWordSize * 30), Address::PostIndex)); |
362 | } |
363 | __ bx(LR); |
364 | } |
365 | |
366 | ASSEMBLER_TEST_RUN(DoubleVLoadStore, test) { |
367 | EXPECT(test != NULL); |
368 | if (TargetCPUFeatures::vfp_supported()) { |
369 | typedef double (*DoubleVLoadStore)() DART_UNUSED; |
370 | float res = EXECUTE_TEST_CODE_DOUBLE(DoubleVLoadStore, test->entry()); |
371 | EXPECT_FLOAT_EQ(2 * 12.3f, res, 0.001f); |
372 | } |
373 | } |
374 | |
375 | ASSEMBLER_TEST_GENERATE(SingleFPOperations, assembler) { |
376 | if (TargetCPUFeatures::vfp_supported()) { |
377 | __ LoadSImmediate(S0, 12.3f); |
378 | __ LoadSImmediate(S1, 3.4f); |
379 | __ vnegs(S0, S0); // -12.3f |
380 | __ vabss(S0, S0); // 12.3f |
381 | __ vadds(S0, S0, S1); // 15.7f |
382 | __ vmuls(S0, S0, S1); // 53.38f |
383 | __ vsubs(S0, S0, S1); // 49.98f |
384 | __ vdivs(S0, S0, S1); // 14.7f |
385 | __ vsqrts(S0, S0); // 3.8340579f |
386 | } |
387 | __ bx(LR); |
388 | } |
389 | |
390 | ASSEMBLER_TEST_RUN(SingleFPOperations, test) { |
391 | EXPECT(test != NULL); |
392 | if (TargetCPUFeatures::vfp_supported()) { |
393 | typedef float (*SingleFPOperations)() DART_UNUSED; |
394 | float res = EXECUTE_TEST_CODE_FLOAT(SingleFPOperations, test->entry()); |
395 | EXPECT_FLOAT_EQ(3.8340579f, res, 0.001f); |
396 | } |
397 | } |
398 | |
399 | ASSEMBLER_TEST_GENERATE(DoubleFPOperations, assembler) { |
400 | if (TargetCPUFeatures::vfp_supported()) { |
401 | __ LoadDImmediate(D0, 12.3, R0); |
402 | __ LoadDImmediate(D1, 3.4, R0); |
403 | __ vnegd(D0, D0); // -12.3 |
404 | __ vabsd(D0, D0); // 12.3 |
405 | __ vaddd(D0, D0, D1); // 15.7 |
406 | __ vmuld(D0, D0, D1); // 53.38 |
407 | __ vsubd(D0, D0, D1); // 49.98 |
408 | __ vdivd(D0, D0, D1); // 14.7 |
409 | __ vsqrtd(D0, D0); // 3.8340579 |
410 | } |
411 | __ bx(LR); |
412 | } |
413 | |
414 | ASSEMBLER_TEST_RUN(DoubleFPOperations, test) { |
415 | EXPECT(test != NULL); |
416 | if (TargetCPUFeatures::vfp_supported()) { |
417 | typedef double (*DoubleFPOperations)() DART_UNUSED; |
418 | double res = EXECUTE_TEST_CODE_DOUBLE(DoubleFPOperations, test->entry()); |
419 | EXPECT_FLOAT_EQ(3.8340579, res, 0.001); |
420 | } |
421 | } |
422 | |
423 | ASSEMBLER_TEST_GENERATE(DoubleSqrtNeg, assembler) { |
424 | if (TargetCPUFeatures::vfp_supported()) { |
425 | // Check that sqrt of a negative double gives NaN. |
426 | __ LoadDImmediate(D1, -1.0, R0); |
427 | __ vsqrtd(D0, D1); |
428 | __ vcmpd(D0, D0); |
429 | __ vmstat(); |
430 | __ mov(R0, Operand(1), VS); |
431 | __ mov(R0, Operand(0), VC); |
432 | } |
433 | __ Ret(); |
434 | } |
435 | |
436 | ASSEMBLER_TEST_RUN(DoubleSqrtNeg, test) { |
437 | EXPECT(test != NULL); |
438 | if (TargetCPUFeatures::vfp_supported()) { |
439 | typedef int (*DoubleSqrtNeg)() DART_UNUSED; |
440 | EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(DoubleSqrtNeg, test->entry())); |
441 | } |
442 | } |
443 | |
444 | ASSEMBLER_TEST_GENERATE(IntToDoubleConversion, assembler) { |
445 | if (TargetCPUFeatures::vfp_supported()) { |
446 | __ mov(R3, Operand(6)); |
447 | __ vmovsr(S3, R3); |
448 | __ vcvtdi(D0, S3); |
449 | } |
450 | __ bx(LR); |
451 | } |
452 | |
453 | ASSEMBLER_TEST_RUN(IntToDoubleConversion, test) { |
454 | EXPECT(test != NULL); |
455 | if (TargetCPUFeatures::vfp_supported()) { |
456 | typedef double (*IntToDoubleConversionCode)() DART_UNUSED; |
457 | double res = |
458 | EXECUTE_TEST_CODE_DOUBLE(IntToDoubleConversionCode, test->entry()); |
459 | EXPECT_FLOAT_EQ(6.0, res, 0.001); |
460 | } |
461 | } |
462 | |
463 | ASSEMBLER_TEST_GENERATE(LongToDoubleConversion, assembler) { |
464 | if (TargetCPUFeatures::vfp_supported()) { |
465 | int64_t value = 60000000000LL; |
466 | __ LoadImmediate(R0, Utils::Low32Bits(value)); |
467 | __ LoadImmediate(R1, Utils::High32Bits(value)); |
468 | __ vmovsr(S0, R0); |
469 | __ vmovsr(S2, R1); |
470 | __ vcvtdu(D0, S0); |
471 | __ vcvtdi(D1, S2); |
472 | __ LoadDImmediate(D2, 1.0 * (1LL << 32), R0); |
473 | __ vmlad(D0, D1, D2); |
474 | } |
475 | __ bx(LR); |
476 | } |
477 | |
478 | ASSEMBLER_TEST_RUN(LongToDoubleConversion, test) { |
479 | EXPECT(test != NULL); |
480 | if (TargetCPUFeatures::vfp_supported()) { |
481 | typedef double (*LongToDoubleConversionCode)() DART_UNUSED; |
482 | double res = |
483 | EXECUTE_TEST_CODE_DOUBLE(LongToDoubleConversionCode, test->entry()); |
484 | EXPECT_FLOAT_EQ(60000000000.0, res, 0.001); |
485 | } |
486 | } |
487 | |
488 | ASSEMBLER_TEST_GENERATE(IntToFloatConversion, assembler) { |
489 | if (TargetCPUFeatures::vfp_supported()) { |
490 | __ mov(R3, Operand(6)); |
491 | __ vmovsr(S3, R3); |
492 | __ vcvtsi(S0, S3); |
493 | } |
494 | __ bx(LR); |
495 | } |
496 | |
497 | ASSEMBLER_TEST_RUN(IntToFloatConversion, test) { |
498 | EXPECT(test != NULL); |
499 | if (TargetCPUFeatures::vfp_supported()) { |
500 | typedef float (*IntToFloatConversionCode)() DART_UNUSED; |
501 | float res = |
502 | EXECUTE_TEST_CODE_FLOAT(IntToFloatConversionCode, test->entry()); |
503 | EXPECT_FLOAT_EQ(6.0, res, 0.001); |
504 | } |
505 | } |
506 | |
507 | ASSEMBLER_TEST_GENERATE(FloatToIntConversion, assembler) { |
508 | if (TargetCPUFeatures::vfp_supported()) { |
509 | __ vcvtis(S1, S0); |
510 | __ vmovrs(R0, S1); |
511 | } |
512 | __ bx(LR); |
513 | } |
514 | |
515 | ASSEMBLER_TEST_RUN(FloatToIntConversion, test) { |
516 | EXPECT(test != NULL); |
517 | if (TargetCPUFeatures::vfp_supported()) { |
518 | typedef int (*FloatToIntConversion)(float arg) DART_UNUSED; |
519 | EXPECT_EQ(12, EXECUTE_TEST_CODE_INT32_F(FloatToIntConversion, test->entry(), |
520 | 12.8f)); |
521 | EXPECT_EQ(INT32_MIN, EXECUTE_TEST_CODE_INT32_F(FloatToIntConversion, |
522 | test->entry(), -FLT_MAX)); |
523 | EXPECT_EQ(INT32_MAX, EXECUTE_TEST_CODE_INT32_F(FloatToIntConversion, |
524 | test->entry(), FLT_MAX)); |
525 | } |
526 | } |
527 | |
528 | ASSEMBLER_TEST_GENERATE(DoubleToIntConversion, assembler) { |
529 | if (TargetCPUFeatures::vfp_supported()) { |
530 | __ vcvtid(S0, D0); |
531 | __ vmovrs(R0, S0); |
532 | } |
533 | __ bx(LR); |
534 | } |
535 | |
536 | ASSEMBLER_TEST_RUN(DoubleToIntConversion, test) { |
537 | if (TargetCPUFeatures::vfp_supported()) { |
538 | typedef int (*DoubleToIntConversion)(double arg) DART_UNUSED; |
539 | EXPECT(test != NULL); |
540 | EXPECT_EQ(12, EXECUTE_TEST_CODE_INT32_D(DoubleToIntConversion, |
541 | test->entry(), 12.8)); |
542 | EXPECT_EQ(INT32_MIN, EXECUTE_TEST_CODE_INT32_D(DoubleToIntConversion, |
543 | test->entry(), -DBL_MAX)); |
544 | EXPECT_EQ(INT32_MAX, EXECUTE_TEST_CODE_INT32_D(DoubleToIntConversion, |
545 | test->entry(), DBL_MAX)); |
546 | } |
547 | } |
548 | |
549 | ASSEMBLER_TEST_GENERATE(FloatToDoubleConversion, assembler) { |
550 | if (TargetCPUFeatures::vfp_supported()) { |
551 | __ LoadSImmediate(S2, 12.8f); |
552 | __ vcvtds(D0, S2); |
553 | } |
554 | __ bx(LR); |
555 | } |
556 | |
557 | ASSEMBLER_TEST_RUN(FloatToDoubleConversion, test) { |
558 | if (TargetCPUFeatures::vfp_supported()) { |
559 | typedef double (*FloatToDoubleConversionCode)() DART_UNUSED; |
560 | EXPECT(test != NULL); |
561 | double res = |
562 | EXECUTE_TEST_CODE_DOUBLE(FloatToDoubleConversionCode, test->entry()); |
563 | EXPECT_FLOAT_EQ(12.8, res, 0.001); |
564 | } |
565 | } |
566 | |
567 | ASSEMBLER_TEST_GENERATE(DoubleToFloatConversion, assembler) { |
568 | if (TargetCPUFeatures::vfp_supported()) { |
569 | __ LoadDImmediate(D1, 12.8, R0); |
570 | __ vcvtsd(S0, D1); |
571 | } |
572 | __ bx(LR); |
573 | } |
574 | |
575 | ASSEMBLER_TEST_RUN(DoubleToFloatConversion, test) { |
576 | EXPECT(test != NULL); |
577 | if (TargetCPUFeatures::vfp_supported()) { |
578 | typedef float (*DoubleToFloatConversionCode)() DART_UNUSED; |
579 | float res = |
580 | EXECUTE_TEST_CODE_FLOAT(DoubleToFloatConversionCode, test->entry()); |
581 | EXPECT_FLOAT_EQ(12.8, res, 0.001); |
582 | } |
583 | } |
584 | |
585 | ASSEMBLER_TEST_GENERATE(FloatCompare, assembler) { |
586 | if (TargetCPUFeatures::vfp_supported()) { |
587 | // Test 12.3f vs 12.5f. |
588 | __ LoadSImmediate(S0, 12.3f); |
589 | __ LoadSImmediate(S1, 12.5f); |
590 | |
591 | // Count errors in R0. R0 is zero if no errors found. |
592 | __ mov(R0, Operand(0)); |
593 | __ vcmps(S0, S1); |
594 | __ vmstat(); |
595 | __ add(R0, R0, Operand(1), VS); // Error if unordered (Nan). |
596 | __ add(R0, R0, Operand(2), GT); // Error if greater. |
597 | __ add(R0, R0, Operand(4), EQ); // Error if equal. |
598 | __ add(R0, R0, Operand(8), PL); // Error if not less. |
599 | |
600 | // Test NaN. |
601 | // Create NaN by dividing 0.0f/0.0f. |
602 | __ LoadSImmediate(S1, 0.0f); |
603 | __ vdivs(S1, S1, S1); |
604 | __ vcmps(S1, S1); |
605 | __ vmstat(); |
606 | // Error if not unordered (not Nan). |
607 | __ add(R0, R0, Operand(16), VC); |
608 | } |
609 | // R0 is 0 if all tests passed. |
610 | __ bx(LR); |
611 | } |
612 | |
613 | ASSEMBLER_TEST_RUN(FloatCompare, test) { |
614 | EXPECT(test != NULL); |
615 | if (TargetCPUFeatures::vfp_supported()) { |
616 | typedef int (*FloatCompare)() DART_UNUSED; |
617 | EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(FloatCompare, test->entry())); |
618 | } |
619 | } |
620 | |
621 | ASSEMBLER_TEST_GENERATE(DoubleCompare, assembler) { |
622 | if (TargetCPUFeatures::vfp_supported()) { |
623 | // Test 12.3 vs 12.5. |
624 | __ LoadDImmediate(D0, 12.3, R1); |
625 | __ LoadDImmediate(D1, 12.5, R1); |
626 | |
627 | // Count errors in R0. R0 is zero if no errors found. |
628 | __ mov(R0, Operand(0)); |
629 | __ vcmpd(D0, D1); |
630 | __ vmstat(); |
631 | __ add(R0, R0, Operand(1), VS); // Error if unordered (Nan). |
632 | __ add(R0, R0, Operand(2), GT); // Error if greater. |
633 | __ add(R0, R0, Operand(4), EQ); // Error if equal. |
634 | __ add(R0, R0, Operand(8), PL); // Error if not less. |
635 | |
636 | // Test NaN. |
637 | // Create NaN by dividing 0.0/0.0. |
638 | __ LoadDImmediate(D1, 0.0, R1); |
639 | __ vdivd(D1, D1, D1); |
640 | __ vcmpd(D1, D1); |
641 | __ vmstat(); |
642 | // Error if not unordered (not Nan). |
643 | __ add(R0, R0, Operand(16), VC); |
644 | } |
645 | // R0 is 0 if all tests passed. |
646 | __ bx(LR); |
647 | } |
648 | |
649 | ASSEMBLER_TEST_RUN(DoubleCompare, test) { |
650 | EXPECT(test != NULL); |
651 | if (TargetCPUFeatures::vfp_supported()) { |
652 | typedef int (*DoubleCompare)() DART_UNUSED; |
653 | EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(DoubleCompare, test->entry())); |
654 | } |
655 | } |
656 | |
657 | ASSEMBLER_TEST_GENERATE(Loop, assembler) { |
658 | Label loop_entry; |
659 | __ mov(R0, Operand(1)); |
660 | __ mov(R1, Operand(2)); |
661 | __ Bind(&loop_entry); |
662 | __ mov(R0, Operand(R0, LSL, 1)); |
663 | __ movs(R1, Operand(R1, LSR, 1)); |
664 | __ b(&loop_entry, NE); |
665 | __ bx(LR); |
666 | } |
667 | |
668 | ASSEMBLER_TEST_RUN(Loop, test) { |
669 | EXPECT(test != NULL); |
670 | typedef int (*Loop)() DART_UNUSED; |
671 | EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Loop, test->entry())); |
672 | } |
673 | |
674 | ASSEMBLER_TEST_GENERATE(ForwardBranch, assembler) { |
675 | Label skip; |
676 | __ mov(R0, Operand(42)); |
677 | __ b(&skip); |
678 | __ mov(R0, Operand(11)); |
679 | __ Bind(&skip); |
680 | __ bx(LR); |
681 | } |
682 | |
683 | ASSEMBLER_TEST_RUN(ForwardBranch, test) { |
684 | EXPECT(test != NULL); |
685 | typedef int (*ForwardBranch)() DART_UNUSED; |
686 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(ForwardBranch, test->entry())); |
687 | } |
688 | |
689 | ASSEMBLER_TEST_GENERATE(Loop2, assembler) { |
690 | Label loop_entry; |
691 | __ set_use_far_branches(true); |
692 | __ mov(R0, Operand(1)); |
693 | __ mov(R1, Operand(2)); |
694 | __ Bind(&loop_entry); |
695 | __ mov(R0, Operand(R0, LSL, 1)); |
696 | __ movs(R1, Operand(R1, LSR, 1)); |
697 | __ b(&loop_entry, NE); |
698 | __ bx(LR); |
699 | } |
700 | |
701 | ASSEMBLER_TEST_RUN(Loop2, test) { |
702 | EXPECT(test != NULL); |
703 | typedef int (*Loop)() DART_UNUSED; |
704 | EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Loop, test->entry())); |
705 | } |
706 | |
707 | ASSEMBLER_TEST_GENERATE(Loop3, assembler) { |
708 | Label loop_entry; |
709 | __ set_use_far_branches(true); |
710 | __ mov(R0, Operand(1)); |
711 | __ mov(R1, Operand(2)); |
712 | __ Bind(&loop_entry); |
713 | for (int i = 0; i < (1 << 22); i++) { |
714 | __ nop(); |
715 | } |
716 | __ mov(R0, Operand(R0, LSL, 1)); |
717 | __ movs(R1, Operand(R1, LSR, 1)); |
718 | __ b(&loop_entry, NE); |
719 | __ bx(LR); |
720 | } |
721 | |
722 | ASSEMBLER_TEST_RUN(Loop3, test) { |
723 | EXPECT(test != NULL); |
724 | typedef int (*Loop)() DART_UNUSED; |
725 | EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Loop, test->entry())); |
726 | } |
727 | |
728 | ASSEMBLER_TEST_GENERATE(LoadStore, assembler) { |
729 | __ mov(R1, Operand(123)); |
730 | __ Push(R1); |
731 | __ Pop(R0); |
732 | __ bx(LR); |
733 | } |
734 | |
735 | ASSEMBLER_TEST_RUN(LoadStore, test) { |
736 | EXPECT(test != NULL); |
737 | typedef int (*LoadStore)() DART_UNUSED; |
738 | EXPECT_EQ(123, EXECUTE_TEST_CODE_INT32(LoadStore, test->entry())); |
739 | } |
740 | |
741 | ASSEMBLER_TEST_GENERATE(PushRegisterPair, assembler) { |
742 | __ mov(R2, Operand(12)); |
743 | __ mov(R3, Operand(21)); |
744 | __ PushRegisterPair(R2, R3); |
745 | __ Pop(R0); |
746 | __ Pop(R1); |
747 | __ bx(LR); |
748 | } |
749 | |
750 | ASSEMBLER_TEST_RUN(PushRegisterPair, test) { |
751 | EXPECT(test != NULL); |
752 | typedef int (*PushRegisterPair)() DART_UNUSED; |
753 | EXPECT_EQ(12, EXECUTE_TEST_CODE_INT32(PushRegisterPair, test->entry())); |
754 | } |
755 | |
756 | ASSEMBLER_TEST_GENERATE(PushRegisterPairReversed, assembler) { |
757 | __ mov(R3, Operand(12)); |
758 | __ mov(R2, Operand(21)); |
759 | __ PushRegisterPair(R3, R2); |
760 | __ Pop(R0); |
761 | __ Pop(R1); |
762 | __ bx(LR); |
763 | } |
764 | |
765 | ASSEMBLER_TEST_RUN(PushRegisterPairReversed, test) { |
766 | EXPECT(test != NULL); |
767 | typedef int (*PushRegisterPairReversed)() DART_UNUSED; |
768 | EXPECT_EQ(12, |
769 | EXECUTE_TEST_CODE_INT32(PushRegisterPairReversed, test->entry())); |
770 | } |
771 | |
772 | ASSEMBLER_TEST_GENERATE(PopRegisterPair, assembler) { |
773 | __ mov(R2, Operand(12)); |
774 | __ mov(R3, Operand(21)); |
775 | __ Push(R3); |
776 | __ Push(R2); |
777 | __ PopRegisterPair(R0, R1); |
778 | __ bx(LR); |
779 | } |
780 | |
781 | ASSEMBLER_TEST_RUN(PopRegisterPair, test) { |
782 | EXPECT(test != NULL); |
783 | typedef int (*PopRegisterPair)() DART_UNUSED; |
784 | EXPECT_EQ(12, EXECUTE_TEST_CODE_INT32(PopRegisterPair, test->entry())); |
785 | } |
786 | |
787 | ASSEMBLER_TEST_GENERATE(PopRegisterPairReversed, assembler) { |
788 | __ mov(R3, Operand(12)); |
789 | __ mov(R2, Operand(21)); |
790 | __ Push(R3); |
791 | __ Push(R2); |
792 | __ PopRegisterPair(R1, R0); |
793 | __ bx(LR); |
794 | } |
795 | |
796 | ASSEMBLER_TEST_RUN(PopRegisterPairReversed, test) { |
797 | EXPECT(test != NULL); |
798 | typedef int (*PopRegisterPairReversed)() DART_UNUSED; |
799 | EXPECT_EQ(12, |
800 | EXECUTE_TEST_CODE_INT32(PopRegisterPairReversed, test->entry())); |
801 | } |
802 | |
803 | ASSEMBLER_TEST_GENERATE(Semaphore, assembler) { |
804 | __ mov(R0, Operand(40)); |
805 | __ mov(R1, Operand(42)); |
806 | __ Push(R0); |
807 | Label retry; |
808 | __ Bind(&retry); |
809 | __ ldrex(R0, SP); |
810 | __ strex(IP, R1, SP); // IP == 0, success |
811 | __ tst(IP, Operand(0)); |
812 | __ b(&retry, NE); // NE if context switch occurred between ldrex and strex. |
813 | __ Pop(R0); // 42 |
814 | __ bx(LR); |
815 | } |
816 | |
817 | ASSEMBLER_TEST_RUN(Semaphore, test) { |
818 | EXPECT(test != NULL); |
819 | typedef int (*Semaphore)() DART_UNUSED; |
820 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Semaphore, test->entry())); |
821 | } |
822 | |
823 | ASSEMBLER_TEST_GENERATE(FailedSemaphore, assembler) { |
824 | __ mov(R0, Operand(40)); |
825 | __ mov(R1, Operand(42)); |
826 | __ Push(R0); |
827 | __ ldrex(R0, SP); |
828 | __ clrex(); // Simulate a context switch. |
829 | __ strex(IP, R1, SP); // IP == 1, failure |
830 | __ Pop(R0); // 40 |
831 | __ add(R0, R0, Operand(IP)); |
832 | __ bx(LR); |
833 | } |
834 | |
835 | ASSEMBLER_TEST_RUN(FailedSemaphore, test) { |
836 | EXPECT(test != NULL); |
837 | typedef int (*FailedSemaphore)() DART_UNUSED; |
838 | EXPECT_EQ(41, EXECUTE_TEST_CODE_INT32(FailedSemaphore, test->entry())); |
839 | } |
840 | |
841 | ASSEMBLER_TEST_GENERATE(AddSub, assembler) { |
842 | __ mov(R1, Operand(40)); |
843 | __ sub(R1, R1, Operand(2)); |
844 | __ add(R0, R1, Operand(4)); |
845 | __ rsbs(R0, R0, Operand(100)); |
846 | __ rsc(R0, R0, Operand(100)); |
847 | __ bx(LR); |
848 | } |
849 | |
850 | ASSEMBLER_TEST_RUN(AddSub, test) { |
851 | EXPECT(test != NULL); |
852 | typedef int (*AddSub)() DART_UNUSED; |
853 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(AddSub, test->entry())); |
854 | } |
855 | |
856 | ASSEMBLER_TEST_GENERATE(AddCarry, assembler) { |
857 | __ LoadImmediate(R2, 0xFFFFFFFF); |
858 | __ mov(R1, Operand(1)); |
859 | __ mov(R0, Operand(0)); |
860 | __ adds(R2, R2, Operand(R1)); |
861 | __ adcs(R0, R0, Operand(R0)); |
862 | __ bx(LR); |
863 | } |
864 | |
865 | ASSEMBLER_TEST_RUN(AddCarry, test) { |
866 | EXPECT(test != NULL); |
867 | typedef int (*AddCarry)() DART_UNUSED; |
868 | EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(AddCarry, test->entry())); |
869 | } |
870 | |
871 | ASSEMBLER_TEST_GENERATE(AddCarryInOut, assembler) { |
872 | __ LoadImmediate(R2, 0xFFFFFFFF); |
873 | __ mov(R1, Operand(1)); |
874 | __ mov(R0, Operand(0)); |
875 | __ adds(IP, R2, Operand(R1)); // c_out = 1. |
876 | __ adcs(IP, R2, Operand(R0)); // c_in = 1, c_out = 1. |
877 | __ adc(R0, R0, Operand(R0)); // c_in = 1. |
878 | __ bx(LR); |
879 | } |
880 | |
881 | ASSEMBLER_TEST_RUN(AddCarryInOut, test) { |
882 | EXPECT(test != NULL); |
883 | typedef int (*AddCarryInOut)() DART_UNUSED; |
884 | EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(AddCarryInOut, test->entry())); |
885 | } |
886 | |
887 | ASSEMBLER_TEST_GENERATE(SubCarry, assembler) { |
888 | __ LoadImmediate(R2, 0x0); |
889 | __ mov(R1, Operand(1)); |
890 | __ mov(R0, Operand(0)); |
891 | __ subs(R2, R2, Operand(R1)); |
892 | __ sbcs(R0, R0, Operand(R0)); |
893 | __ bx(LR); |
894 | } |
895 | |
896 | ASSEMBLER_TEST_RUN(SubCarry, test) { |
897 | EXPECT(test != NULL); |
898 | typedef int (*SubCarry)() DART_UNUSED; |
899 | EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SubCarry, test->entry())); |
900 | } |
901 | |
902 | ASSEMBLER_TEST_GENERATE(SubCarryInOut, assembler) { |
903 | __ mov(R1, Operand(1)); |
904 | __ mov(R0, Operand(0)); |
905 | __ subs(IP, R0, Operand(R1)); // c_out = 1. |
906 | __ sbcs(IP, R0, Operand(R0)); // c_in = 1, c_out = 1. |
907 | __ sbc(R0, R0, Operand(R0)); // c_in = 1. |
908 | __ bx(LR); |
909 | } |
910 | |
911 | ASSEMBLER_TEST_RUN(SubCarryInOut, test) { |
912 | EXPECT(test != NULL); |
913 | typedef int (*SubCarryInOut)() DART_UNUSED; |
914 | EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SubCarryInOut, test->entry())); |
915 | } |
916 | |
917 | ASSEMBLER_TEST_GENERATE(Overflow, assembler) { |
918 | __ LoadImmediate(R0, 0xFFFFFFFF); |
919 | __ LoadImmediate(R1, 0x7FFFFFFF); |
920 | __ adds(IP, R0, Operand(1)); // c_out = 1. |
921 | __ adcs(IP, R1, Operand(0)); // c_in = 1, c_out = 1, v = 1. |
922 | __ mov(R0, Operand(1), VS); |
923 | __ bx(LR); |
924 | } |
925 | |
926 | ASSEMBLER_TEST_RUN(Overflow, test) { |
927 | EXPECT(test != NULL); |
928 | typedef int (*Overflow)() DART_UNUSED; |
929 | EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Overflow, test->entry())); |
930 | } |
931 | |
932 | ASSEMBLER_TEST_GENERATE(AndOrr, assembler) { |
933 | __ mov(R1, Operand(40)); |
934 | __ mov(R2, Operand(0)); |
935 | __ and_(R1, R2, Operand(R1)); |
936 | __ mov(R3, Operand(42)); |
937 | __ orr(R0, R1, Operand(R3)); |
938 | __ bx(LR); |
939 | } |
940 | |
941 | ASSEMBLER_TEST_RUN(AndOrr, test) { |
942 | EXPECT(test != NULL); |
943 | typedef int (*AndOrr)() DART_UNUSED; |
944 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(AndOrr, test->entry())); |
945 | } |
946 | |
947 | ASSEMBLER_TEST_GENERATE(Orrs, assembler) { |
948 | __ mov(R0, Operand(0)); |
949 | __ tst(R0, Operand(R1)); // Set zero-flag. |
950 | __ orrs(R0, R0, Operand(1)); // Clear zero-flag. |
951 | __ bx(LR, EQ); |
952 | __ mov(R0, Operand(42)); |
953 | __ bx(LR, NE); // Only this return should fire. |
954 | __ mov(R0, Operand(2)); |
955 | __ bx(LR); |
956 | } |
957 | |
958 | ASSEMBLER_TEST_RUN(Orrs, test) { |
959 | EXPECT(test != NULL); |
960 | typedef int (*Orrs)() DART_UNUSED; |
961 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Orrs, test->entry())); |
962 | } |
963 | |
964 | ASSEMBLER_TEST_GENERATE(Multiply, assembler) { |
965 | __ mov(R1, Operand(20)); |
966 | __ mov(R2, Operand(40)); |
967 | __ mul(R3, R2, R1); |
968 | __ mov(R0, Operand(R3)); |
969 | __ bx(LR); |
970 | } |
971 | |
972 | ASSEMBLER_TEST_RUN(Multiply, test) { |
973 | EXPECT(test != NULL); |
974 | typedef int (*Multiply)() DART_UNUSED; |
975 | EXPECT_EQ(800, EXECUTE_TEST_CODE_INT32(Multiply, test->entry())); |
976 | } |
977 | |
978 | ASSEMBLER_TEST_GENERATE(QuotientRemainder, assembler) { |
979 | if (TargetCPUFeatures::vfp_supported()) { |
980 | __ vmovsr(S2, R0); |
981 | __ vmovsr(S4, R2); |
982 | __ vcvtdi(D1, S2); |
983 | __ vcvtdi(D2, S4); |
984 | __ vdivd(D0, D1, D2); |
985 | __ vcvtid(S0, D0); |
986 | __ vmovrs(R1, S0); // r1 = r0/r2 |
987 | __ mls(R0, R1, R2, R0); // r0 = r0 - r1*r2 |
988 | } |
989 | __ bx(LR); |
990 | } |
991 | |
992 | ASSEMBLER_TEST_RUN(QuotientRemainder, test) { |
993 | EXPECT(test != NULL); |
994 | if (TargetCPUFeatures::vfp_supported()) { |
995 | typedef int64_t (*QuotientRemainder)(int64_t dividend, int64_t divisor) |
996 | DART_UNUSED; |
997 | EXPECT_EQ(0x1000400000da8LL, |
998 | EXECUTE_TEST_CODE_INT64_LL(QuotientRemainder, test->entry(), |
999 | 0x12345678, 0x1234)); |
1000 | } |
1001 | } |
1002 | |
1003 | ASSEMBLER_TEST_GENERATE(Multiply64To64, assembler) { |
1004 | __ Push(R4); |
1005 | __ mov(IP, Operand(R0)); |
1006 | __ mul(R4, R2, R1); |
1007 | __ umull(R0, R1, R2, IP); |
1008 | __ mla(R2, IP, R3, R4); |
1009 | __ add(R1, R2, Operand(R1)); |
1010 | __ Pop(R4); |
1011 | __ bx(LR); |
1012 | } |
1013 | |
1014 | ASSEMBLER_TEST_RUN(Multiply64To64, test) { |
1015 | EXPECT(test != NULL); |
1016 | typedef int64_t (*Multiply64To64)(int64_t operand0, int64_t operand1) |
1017 | DART_UNUSED; |
1018 | EXPECT_EQ(6, |
1019 | EXECUTE_TEST_CODE_INT64_LL(Multiply64To64, test->entry(), -3, -2)); |
1020 | } |
1021 | |
1022 | ASSEMBLER_TEST_GENERATE(Multiply32To64, assembler) { |
1023 | __ smull(R0, R1, R0, R2); |
1024 | __ bx(LR); |
1025 | } |
1026 | |
1027 | ASSEMBLER_TEST_RUN(Multiply32To64, test) { |
1028 | EXPECT(test != NULL); |
1029 | typedef int64_t (*Multiply32To64)(int64_t operand0, int64_t operand1) |
1030 | DART_UNUSED; |
1031 | EXPECT_EQ(6, |
1032 | EXECUTE_TEST_CODE_INT64_LL(Multiply32To64, test->entry(), -3, -2)); |
1033 | } |
1034 | |
1035 | ASSEMBLER_TEST_GENERATE(MultiplyAccumAccum32To64, assembler) { |
1036 | __ umaal(R0, R1, R2, R3); |
1037 | __ bx(LR); |
1038 | } |
1039 | |
1040 | ASSEMBLER_TEST_RUN(MultiplyAccumAccum32To64, test) { |
1041 | EXPECT(test != NULL); |
1042 | typedef int64_t (*MultiplyAccumAccum32To64)(int64_t operand0, |
1043 | int64_t operand1) DART_UNUSED; |
1044 | EXPECT_EQ(3 + 7 + 5 * 11, |
1045 | EXECUTE_TEST_CODE_INT64_LL(MultiplyAccumAccum32To64, test->entry(), |
1046 | (3LL << 32) + 7, (5LL << 32) + 11)); |
1047 | } |
1048 | |
1049 | ASSEMBLER_TEST_GENERATE(Clz, assembler) { |
1050 | Label error; |
1051 | |
1052 | __ mov(R0, Operand(0)); |
1053 | __ clz(R1, R0); |
1054 | __ cmp(R1, Operand(32)); |
1055 | __ b(&error, NE); |
1056 | __ mov(R2, Operand(42)); |
1057 | __ clz(R2, R2); |
1058 | __ cmp(R2, Operand(26)); |
1059 | __ b(&error, NE); |
1060 | __ mvn(R0, Operand(0)); |
1061 | __ clz(R1, R0); |
1062 | __ cmp(R1, Operand(0)); |
1063 | __ b(&error, NE); |
1064 | __ Lsr(R0, R0, Operand(3)); |
1065 | __ clz(R1, R0); |
1066 | __ cmp(R1, Operand(3)); |
1067 | __ b(&error, NE); |
1068 | __ mov(R0, Operand(0)); |
1069 | __ bx(LR); |
1070 | __ Bind(&error); |
1071 | __ mov(R0, Operand(1)); |
1072 | __ bx(LR); |
1073 | } |
1074 | |
1075 | ASSEMBLER_TEST_RUN(Clz, test) { |
1076 | EXPECT(test != NULL); |
1077 | typedef int (*Clz)() DART_UNUSED; |
1078 | EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Clz, test->entry())); |
1079 | } |
1080 | |
1081 | ASSEMBLER_TEST_GENERATE(Rbit, assembler) { |
1082 | __ mov(R0, Operand(0x15)); |
1083 | __ rbit(R0, R0); |
1084 | __ bx(LR); |
1085 | } |
1086 | |
1087 | ASSEMBLER_TEST_RUN(Rbit, test) { |
1088 | EXPECT(test != NULL); |
1089 | typedef int (*Rbit)() DART_UNUSED; |
1090 | const int32_t expected = 0xa8000000; |
1091 | EXPECT_EQ(expected, EXECUTE_TEST_CODE_INT32(Rbit, test->entry())); |
1092 | } |
1093 | |
1094 | ASSEMBLER_TEST_GENERATE(Tst, assembler) { |
1095 | Label skip; |
1096 | |
1097 | __ mov(R0, Operand(42)); |
1098 | __ mov(R1, Operand(40)); |
1099 | __ tst(R1, Operand(0)); |
1100 | __ b(&skip, NE); |
1101 | __ mov(R0, Operand(0)); |
1102 | __ Bind(&skip); |
1103 | __ bx(LR); |
1104 | } |
1105 | |
1106 | ASSEMBLER_TEST_RUN(Tst, test) { |
1107 | EXPECT(test != NULL); |
1108 | typedef int (*Tst)() DART_UNUSED; |
1109 | EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1110 | } |
1111 | |
1112 | ASSEMBLER_TEST_GENERATE(Lsl, assembler) { |
1113 | Label skip; |
1114 | |
1115 | __ mov(R0, Operand(1)); |
1116 | __ mov(R0, Operand(R0, LSL, 1)); |
1117 | __ mov(R1, Operand(1)); |
1118 | __ mov(R0, Operand(R0, LSL, R1)); |
1119 | __ bx(LR); |
1120 | } |
1121 | |
1122 | ASSEMBLER_TEST_RUN(Lsl, test) { |
1123 | EXPECT(test != NULL); |
1124 | typedef int (*Tst)() DART_UNUSED; |
1125 | EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1126 | } |
1127 | |
1128 | ASSEMBLER_TEST_GENERATE(Lsr, assembler) { |
1129 | Label skip; |
1130 | |
1131 | __ mov(R0, Operand(4)); |
1132 | __ mov(R0, Operand(R0, LSR, 1)); |
1133 | __ mov(R1, Operand(1)); |
1134 | __ mov(R0, Operand(R0, LSR, R1)); |
1135 | __ bx(LR); |
1136 | } |
1137 | |
1138 | ASSEMBLER_TEST_RUN(Lsr, test) { |
1139 | EXPECT(test != NULL); |
1140 | typedef int (*Tst)() DART_UNUSED; |
1141 | EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1142 | } |
1143 | |
1144 | ASSEMBLER_TEST_GENERATE(Lsr1, assembler) { |
1145 | Label skip; |
1146 | |
1147 | __ mov(R0, Operand(1)); |
1148 | __ Lsl(R0, R0, Operand(31)); |
1149 | __ Lsr(R0, R0, Operand(31)); |
1150 | __ bx(LR); |
1151 | } |
1152 | |
1153 | ASSEMBLER_TEST_RUN(Lsr1, test) { |
1154 | EXPECT(test != NULL); |
1155 | typedef int (*Tst)() DART_UNUSED; |
1156 | EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1157 | } |
1158 | |
1159 | ASSEMBLER_TEST_GENERATE(Asr1, assembler) { |
1160 | Label skip; |
1161 | |
1162 | __ mov(R0, Operand(1)); |
1163 | __ Lsl(R0, R0, Operand(31)); |
1164 | __ Asr(R0, R0, Operand(31)); |
1165 | __ bx(LR); |
1166 | } |
1167 | |
1168 | ASSEMBLER_TEST_RUN(Asr1, test) { |
1169 | EXPECT(test != NULL); |
1170 | typedef int (*Tst)() DART_UNUSED; |
1171 | EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1172 | } |
1173 | |
1174 | ASSEMBLER_TEST_GENERATE(Rsb, assembler) { |
1175 | __ mov(R3, Operand(10)); |
1176 | __ rsb(R0, R3, Operand(42)); |
1177 | __ bx(LR); |
1178 | } |
1179 | |
1180 | ASSEMBLER_TEST_RUN(Rsb, test) { |
1181 | EXPECT(test != NULL); |
1182 | typedef int (*Rsb)() DART_UNUSED; |
1183 | EXPECT_EQ(32, EXECUTE_TEST_CODE_INT32(Rsb, test->entry())); |
1184 | } |
1185 | |
1186 | ASSEMBLER_TEST_GENERATE(Ldrh, assembler) { |
1187 | Label Test1, Test2, Test3, Done; |
1188 | |
1189 | __ mov(R1, Operand(0x11)); |
1190 | __ mov(R2, Operand(SP)); |
1191 | __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex)); |
1192 | __ ldrh(R0, Address(R2, (-target::kWordSize * 30))); |
1193 | __ cmp(R0, Operand(0x11)); |
1194 | __ b(&Test1, EQ); |
1195 | __ mov(R0, Operand(1)); |
1196 | __ b(&Done); |
1197 | __ Bind(&Test1); |
1198 | |
1199 | __ mov(R0, Operand(0x22)); |
1200 | __ strh(R0, Address(R2, (-target::kWordSize * 30))); |
1201 | __ ldrh(R1, Address(R2, (-target::kWordSize * 30))); |
1202 | __ cmp(R1, Operand(0x22)); |
1203 | __ b(&Test2, EQ); |
1204 | __ mov(R0, Operand(1)); |
1205 | __ b(&Done); |
1206 | __ Bind(&Test2); |
1207 | |
1208 | __ mov(R0, Operand(0)); |
1209 | __ AddImmediate(R2, (-target::kWordSize * 30)); |
1210 | __ strh(R0, Address(R2)); |
1211 | __ ldrh(R1, Address(R2)); |
1212 | __ cmp(R1, Operand(0)); |
1213 | __ b(&Test3, EQ); |
1214 | __ mov(R0, Operand(1)); |
1215 | __ b(&Done); |
1216 | __ Bind(&Test3); |
1217 | |
1218 | __ mov(R0, Operand(0)); |
1219 | __ Bind(&Done); |
1220 | __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex)); |
1221 | __ bx(LR); |
1222 | } |
1223 | |
1224 | ASSEMBLER_TEST_RUN(Ldrh, test) { |
1225 | EXPECT(test != NULL); |
1226 | typedef int (*Tst)() DART_UNUSED; |
1227 | EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1228 | } |
1229 | |
1230 | ASSEMBLER_TEST_GENERATE(Ldrsb, assembler) { |
1231 | __ mov(R1, Operand(0xFF)); |
1232 | __ mov(R2, Operand(SP)); |
1233 | __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex)); |
1234 | __ ldrsb(R0, Address(R2, (-target::kWordSize * 30))); |
1235 | __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex)); |
1236 | __ bx(LR); |
1237 | } |
1238 | |
1239 | ASSEMBLER_TEST_RUN(Ldrsb, test) { |
1240 | EXPECT(test != NULL); |
1241 | typedef int (*Tst)() DART_UNUSED; |
1242 | EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1243 | } |
1244 | |
1245 | ASSEMBLER_TEST_GENERATE(Ldrb, assembler) { |
1246 | __ mov(R1, Operand(0xFF)); |
1247 | __ mov(R2, Operand(SP)); |
1248 | __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex)); |
1249 | __ ldrb(R0, Address(R2, (-target::kWordSize * 30))); |
1250 | __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex)); |
1251 | __ bx(LR); |
1252 | } |
1253 | |
1254 | ASSEMBLER_TEST_RUN(Ldrb, test) { |
1255 | EXPECT(test != NULL); |
1256 | typedef int (*Tst)() DART_UNUSED; |
1257 | EXPECT_EQ(0xff, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1258 | } |
1259 | |
1260 | ASSEMBLER_TEST_GENERATE(Ldrsh, assembler) { |
1261 | __ mov(R1, Operand(0xFF)); |
1262 | __ mov(R2, Operand(SP)); |
1263 | __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex)); |
1264 | __ ldrsh(R0, Address(R2, (-target::kWordSize * 30))); |
1265 | __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex)); |
1266 | __ bx(LR); |
1267 | } |
1268 | |
1269 | ASSEMBLER_TEST_RUN(Ldrsh, test) { |
1270 | EXPECT(test != NULL); |
1271 | typedef int (*Tst)() DART_UNUSED; |
1272 | EXPECT_EQ(0xff, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1273 | } |
1274 | |
1275 | ASSEMBLER_TEST_GENERATE(Ldrh1, assembler) { |
1276 | __ mov(R1, Operand(0xFF)); |
1277 | __ mov(R2, Operand(SP)); |
1278 | __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex)); |
1279 | __ ldrh(R0, Address(R2, (-target::kWordSize * 30))); |
1280 | __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex)); |
1281 | __ bx(LR); |
1282 | } |
1283 | |
1284 | ASSEMBLER_TEST_RUN(Ldrh1, test) { |
1285 | EXPECT(test != NULL); |
1286 | typedef int (*Tst)() DART_UNUSED; |
1287 | EXPECT_EQ(0xff, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1288 | } |
1289 | |
1290 | ASSEMBLER_TEST_GENERATE(Ldrd, assembler) { |
1291 | __ mov(IP, Operand(SP)); |
1292 | __ sub(SP, SP, Operand(target::kWordSize * 30)); |
1293 | __ strd(R2, R3, SP, 0); |
1294 | __ strd(R0, R1, IP, (-target::kWordSize * 28)); |
1295 | __ ldrd(R2, R3, IP, (-target::kWordSize * 28)); |
1296 | __ ldrd(R0, R1, SP, 0); |
1297 | __ add(SP, SP, Operand(target::kWordSize * 30)); |
1298 | __ sub(R0, R0, Operand(R2)); |
1299 | __ add(R1, R1, Operand(R3)); |
1300 | __ bx(LR); |
1301 | } |
1302 | |
1303 | ASSEMBLER_TEST_RUN(Ldrd, test) { |
1304 | EXPECT(test != NULL); |
1305 | typedef int64_t (*Tst)(int64_t r0r1, int64_t r2r3) DART_UNUSED; |
1306 | EXPECT_EQ(0x0000444400002222LL, |
1307 | EXECUTE_TEST_CODE_INT64_LL(Tst, test->entry(), 0x0000111100000000LL, |
1308 | 0x0000333300002222LL)); |
1309 | } |
1310 | |
1311 | ASSEMBLER_TEST_GENERATE(Ldm_stm_da, assembler) { |
1312 | __ mov(R0, Operand(1)); |
1313 | __ mov(R1, Operand(7)); |
1314 | __ mov(R2, Operand(11)); |
1315 | __ mov(R3, Operand(31)); |
1316 | __ Push(R9); // We use R9 as accumulator. |
1317 | __ Push(R9); |
1318 | __ Push(R9); |
1319 | __ Push(R9); |
1320 | __ Push(R9); |
1321 | __ Push(R0); // Make room, so we can decrement after. |
1322 | __ stm(DA_W, SP, (1 << R0 | 1 << R1 | 1 << R2 | 1 << R3)); |
1323 | __ str(R2, Address(SP)); // Should be a free slot. |
1324 | __ ldr(R9, Address(SP, 1 * target::kWordSize)); // R0. R9 = +1. |
1325 | __ ldr(IP, Address(SP, 2 * target::kWordSize)); // R1. |
1326 | __ sub(R9, R9, Operand(IP)); // -R1. R9 = -6. |
1327 | __ ldr(IP, Address(SP, 3 * target::kWordSize)); // R2. |
1328 | __ add(R9, R9, Operand(IP)); // +R2. R9 = +5. |
1329 | __ ldr(IP, Address(SP, 4 * target::kWordSize)); // R3. |
1330 | __ sub(R9, R9, Operand(IP)); // -R3. R9 = -26. |
1331 | __ ldm(IB_W, SP, (1 << R0 | 1 << R1 | 1 << R2 | 1 << R3)); |
1332 | // Same operations again. But this time from the restore registers. |
1333 | __ add(R9, R9, Operand(R0)); |
1334 | __ sub(R9, R9, Operand(R1)); |
1335 | __ add(R9, R9, Operand(R2)); |
1336 | __ sub(R0, R9, Operand(R3)); // R0 = result = -52. |
1337 | __ Pop(R1); // Remove storage slot. |
1338 | __ Pop(R9); // Restore R9. |
1339 | __ Pop(R9); // Restore R9. |
1340 | __ Pop(R9); // Restore R9. |
1341 | __ Pop(R9); // Restore R9. |
1342 | __ Pop(R9); // Restore R9. |
1343 | __ bx(LR); |
1344 | } |
1345 | |
1346 | ASSEMBLER_TEST_RUN(Ldm_stm_da, test) { |
1347 | EXPECT(test != NULL); |
1348 | typedef int (*Tst)() DART_UNUSED; |
1349 | EXPECT_EQ(-52, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1350 | } |
1351 | |
1352 | ASSEMBLER_TEST_GENERATE(AddressShiftStrLSL1NegOffset, assembler) { |
1353 | __ mov(R2, Operand(42)); |
1354 | __ mov(R1, Operand(target::kWordSize)); |
1355 | __ str(R2, Address(SP, R1, LSL, 1, Address::NegOffset)); |
1356 | __ ldr(R0, Address(SP, (-target::kWordSize * 2), Address::Offset)); |
1357 | __ bx(LR); |
1358 | } |
1359 | |
1360 | ASSEMBLER_TEST_RUN(AddressShiftStrLSL1NegOffset, test) { |
1361 | EXPECT(test != NULL); |
1362 | typedef int (*Tst)() DART_UNUSED; |
1363 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1364 | } |
1365 | |
1366 | ASSEMBLER_TEST_GENERATE(AddressShiftLdrLSL5NegOffset, assembler) { |
1367 | __ mov(R2, Operand(42)); |
1368 | __ mov(R1, Operand(target::kWordSize)); |
1369 | __ str(R2, Address(SP, (-target::kWordSize * 32), Address::Offset)); |
1370 | __ ldr(R0, Address(SP, R1, LSL, 5, Address::NegOffset)); |
1371 | __ bx(LR); |
1372 | } |
1373 | |
1374 | ASSEMBLER_TEST_RUN(AddressShiftLdrLSL5NegOffset, test) { |
1375 | EXPECT(test != NULL); |
1376 | typedef int (*Tst)() DART_UNUSED; |
1377 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1378 | } |
1379 | |
1380 | ASSEMBLER_TEST_GENERATE(AddressShiftStrLRS1NegOffset, assembler) { |
1381 | __ mov(R2, Operand(42)); |
1382 | __ mov(R1, Operand(target::kWordSize * 2)); |
1383 | __ str(R2, Address(SP, R1, LSR, 1, Address::NegOffset)); |
1384 | __ ldr(R0, Address(SP, -target::kWordSize, Address::Offset)); |
1385 | __ bx(LR); |
1386 | } |
1387 | |
1388 | ASSEMBLER_TEST_RUN(AddressShiftStrLRS1NegOffset, test) { |
1389 | EXPECT(test != NULL); |
1390 | typedef int (*Tst)() DART_UNUSED; |
1391 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1392 | } |
1393 | |
1394 | ASSEMBLER_TEST_GENERATE(AddressShiftLdrLRS1NegOffset, assembler) { |
1395 | __ mov(R2, Operand(42)); |
1396 | __ mov(R1, Operand(target::kWordSize * 2)); |
1397 | __ str(R2, Address(SP, -target::kWordSize, Address::Offset)); |
1398 | __ ldr(R0, Address(SP, R1, LSR, 1, Address::NegOffset)); |
1399 | __ bx(LR); |
1400 | } |
1401 | |
1402 | ASSEMBLER_TEST_RUN(AddressShiftLdrLRS1NegOffset, test) { |
1403 | EXPECT(test != NULL); |
1404 | typedef int (*Tst)() DART_UNUSED; |
1405 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1406 | } |
1407 | |
1408 | ASSEMBLER_TEST_GENERATE(AddressShiftStrLSLNegPreIndex, assembler) { |
1409 | __ mov(R2, Operand(42)); |
1410 | __ mov(R1, Operand(target::kWordSize)); |
1411 | __ mov(R3, Operand(SP)); |
1412 | __ str(R2, Address(SP, R1, LSL, 5, Address::NegPreIndex)); |
1413 | __ ldr(R0, Address(R3, (-target::kWordSize * 32), Address::Offset)); |
1414 | __ mov(SP, Operand(R3)); |
1415 | __ bx(LR); |
1416 | } |
1417 | |
1418 | ASSEMBLER_TEST_RUN(AddressShiftStrLSLNegPreIndex, test) { |
1419 | EXPECT(test != NULL); |
1420 | typedef int (*Tst)() DART_UNUSED; |
1421 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1422 | } |
1423 | |
1424 | ASSEMBLER_TEST_GENERATE(AddressShiftLdrLSLNegPreIndex, assembler) { |
1425 | __ mov(R2, Operand(42)); |
1426 | __ mov(R1, Operand(target::kWordSize)); |
1427 | __ str(R2, Address(SP, (-target::kWordSize * 32), Address::PreIndex)); |
1428 | __ ldr(R0, Address(SP, R1, LSL, 5, Address::PostIndex)); |
1429 | __ bx(LR); |
1430 | } |
1431 | |
1432 | ASSEMBLER_TEST_RUN(AddressShiftLdrLSLNegPreIndex, test) { |
1433 | EXPECT(test != NULL); |
1434 | typedef int (*Tst)() DART_UNUSED; |
1435 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1436 | } |
1437 | |
1438 | // Make sure we can store and reload the D registers using vstmd and vldmd |
1439 | ASSEMBLER_TEST_GENERATE(VstmdVldmd, assembler) { |
1440 | if (TargetCPUFeatures::vfp_supported()) { |
1441 | __ LoadDImmediate(D0, 0.0, R0); |
1442 | __ LoadDImmediate(D1, 1.0, R0); |
1443 | __ LoadDImmediate(D2, 2.0, R0); |
1444 | __ LoadDImmediate(D3, 3.0, R0); |
1445 | __ LoadDImmediate(D4, 4.0, R0); |
1446 | __ vstmd(DB_W, SP, D0, 5); // Push D0 - D4 onto the stack, dec SP |
1447 | __ LoadDImmediate(D0, 0.0, R0); |
1448 | __ LoadDImmediate(D1, 0.0, R0); |
1449 | __ LoadDImmediate(D2, 0.0, R0); |
1450 | __ LoadDImmediate(D3, 0.0, R0); |
1451 | __ LoadDImmediate(D4, 0.0, R0); |
1452 | __ vldmd(IA_W, SP, D0, 5); // Pop stack into D0 - D4, inc SP |
1453 | |
1454 | // Load success value into R0 |
1455 | __ mov(R0, Operand(42)); |
1456 | |
1457 | // Check that 4.0 is back in D4 |
1458 | __ LoadDImmediate(D5, 4.0, R1); |
1459 | __ vcmpd(D4, D5); |
1460 | __ vmstat(); |
1461 | __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
1462 | |
1463 | // Check that 3.0 is back in D3 |
1464 | __ LoadDImmediate(D5, 3.0, R1); |
1465 | __ vcmpd(D3, D5); |
1466 | __ vmstat(); |
1467 | __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
1468 | |
1469 | // Check that 2.0 is back in D2 |
1470 | __ LoadDImmediate(D5, 2.0, R1); |
1471 | __ vcmpd(D2, D5); |
1472 | __ vmstat(); |
1473 | __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
1474 | |
1475 | // Check that 1.0 is back in D1 |
1476 | __ LoadDImmediate(D5, 1.0, R1); |
1477 | __ vcmpd(D1, D5); |
1478 | __ vmstat(); |
1479 | __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
1480 | } |
1481 | __ bx(LR); |
1482 | } |
1483 | |
1484 | ASSEMBLER_TEST_RUN(VstmdVldmd, test) { |
1485 | EXPECT(test != NULL); |
1486 | if (TargetCPUFeatures::vfp_supported()) { |
1487 | typedef int (*Tst)() DART_UNUSED; |
1488 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1489 | } |
1490 | } |
1491 | |
1492 | // Make sure we can store and reload the S registers using vstms and vldms |
1493 | ASSEMBLER_TEST_GENERATE(VstmsVldms, assembler) { |
1494 | if (TargetCPUFeatures::vfp_supported()) { |
1495 | __ LoadSImmediate(S0, 0.0); |
1496 | __ LoadSImmediate(S1, 1.0); |
1497 | __ LoadSImmediate(S2, 2.0); |
1498 | __ LoadSImmediate(S3, 3.0); |
1499 | __ LoadSImmediate(S4, 4.0); |
1500 | __ vstms(DB_W, SP, S0, S4); // Push S0 - S4 onto the stack, dec SP |
1501 | __ LoadSImmediate(S0, 0.0); |
1502 | __ LoadSImmediate(S1, 0.0); |
1503 | __ LoadSImmediate(S2, 0.0); |
1504 | __ LoadSImmediate(S3, 0.0); |
1505 | __ LoadSImmediate(S4, 0.0); |
1506 | __ vldms(IA_W, SP, S0, S4); // Pop stack into S0 - S4, inc SP |
1507 | |
1508 | // Load success value into R0 |
1509 | __ mov(R0, Operand(42)); |
1510 | |
1511 | // Check that 4.0 is back in S4 |
1512 | __ LoadSImmediate(S5, 4.0); |
1513 | __ vcmps(S4, S5); |
1514 | __ vmstat(); |
1515 | __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
1516 | |
1517 | // Check that 3.0 is back in S3 |
1518 | __ LoadSImmediate(S5, 3.0); |
1519 | __ vcmps(S3, S5); |
1520 | __ vmstat(); |
1521 | __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
1522 | |
1523 | // Check that 2.0 is back in S2 |
1524 | __ LoadSImmediate(S5, 2.0); |
1525 | __ vcmps(S2, S5); |
1526 | __ vmstat(); |
1527 | __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
1528 | |
1529 | // Check that 1.0 is back in S1 |
1530 | __ LoadSImmediate(S5, 1.0); |
1531 | __ vcmps(S1, S5); |
1532 | __ vmstat(); |
1533 | __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
1534 | } |
1535 | __ bx(LR); |
1536 | } |
1537 | |
1538 | ASSEMBLER_TEST_RUN(VstmsVldms, test) { |
1539 | EXPECT(test != NULL); |
1540 | if (TargetCPUFeatures::vfp_supported()) { |
1541 | typedef int (*Tst)() DART_UNUSED; |
1542 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1543 | } |
1544 | } |
1545 | |
1546 | // Make sure we can start somewhere other than D0 |
1547 | ASSEMBLER_TEST_GENERATE(VstmdVldmd1, assembler) { |
1548 | if (TargetCPUFeatures::vfp_supported()) { |
1549 | __ LoadDImmediate(D1, 1.0, R0); |
1550 | __ LoadDImmediate(D2, 2.0, R0); |
1551 | __ LoadDImmediate(D3, 3.0, R0); |
1552 | __ LoadDImmediate(D4, 4.0, R0); |
1553 | __ vstmd(DB_W, SP, D1, 4); // Push D1 - D4 onto the stack, dec SP |
1554 | __ LoadDImmediate(D1, 0.0, R0); |
1555 | __ LoadDImmediate(D2, 0.0, R0); |
1556 | __ LoadDImmediate(D3, 0.0, R0); |
1557 | __ LoadDImmediate(D4, 0.0, R0); |
1558 | __ vldmd(IA_W, SP, D1, 4); // Pop stack into D1 - D4, inc SP |
1559 | |
1560 | // Load success value into R0 |
1561 | __ mov(R0, Operand(42)); |
1562 | |
1563 | // Check that 4.0 is back in D4 |
1564 | __ LoadDImmediate(D5, 4.0, R1); |
1565 | __ vcmpd(D4, D5); |
1566 | __ vmstat(); |
1567 | __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
1568 | |
1569 | // Check that 3.0 is back in D3 |
1570 | __ LoadDImmediate(D5, 3.0, R1); |
1571 | __ vcmpd(D3, D5); |
1572 | __ vmstat(); |
1573 | __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
1574 | |
1575 | // Check that 2.0 is back in D2 |
1576 | __ LoadDImmediate(D5, 2.0, R1); |
1577 | __ vcmpd(D2, D5); |
1578 | __ vmstat(); |
1579 | __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
1580 | |
1581 | // Check that 1.0 is back in D1 |
1582 | __ LoadDImmediate(D5, 1.0, R1); |
1583 | __ vcmpd(D1, D5); |
1584 | __ vmstat(); |
1585 | __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
1586 | } |
1587 | __ bx(LR); |
1588 | } |
1589 | |
1590 | ASSEMBLER_TEST_RUN(VstmdVldmd1, test) { |
1591 | EXPECT(test != NULL); |
1592 | if (TargetCPUFeatures::vfp_supported()) { |
1593 | typedef int (*Tst)() DART_UNUSED; |
1594 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1595 | } |
1596 | } |
1597 | |
1598 | // Make sure we can start somewhere other than S0 |
1599 | ASSEMBLER_TEST_GENERATE(VstmsVldms1, assembler) { |
1600 | if (TargetCPUFeatures::vfp_supported()) { |
1601 | __ LoadSImmediate(S1, 1.0); |
1602 | __ LoadSImmediate(S2, 2.0); |
1603 | __ LoadSImmediate(S3, 3.0); |
1604 | __ LoadSImmediate(S4, 4.0); |
1605 | __ vstms(DB_W, SP, S1, S4); // Push S0 - S4 onto the stack, dec SP |
1606 | __ LoadSImmediate(S1, 0.0); |
1607 | __ LoadSImmediate(S2, 0.0); |
1608 | __ LoadSImmediate(S3, 0.0); |
1609 | __ LoadSImmediate(S4, 0.0); |
1610 | __ vldms(IA_W, SP, S1, S4); // Pop stack into S0 - S4, inc SP |
1611 | |
1612 | // Load success value into R0 |
1613 | __ mov(R0, Operand(42)); |
1614 | |
1615 | // Check that 4.0 is back in S4 |
1616 | __ LoadSImmediate(S5, 4.0); |
1617 | __ vcmps(S4, S5); |
1618 | __ vmstat(); |
1619 | __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
1620 | |
1621 | // Check that 3.0 is back in S3 |
1622 | __ LoadSImmediate(S5, 3.0); |
1623 | __ vcmps(S3, S5); |
1624 | __ vmstat(); |
1625 | __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
1626 | |
1627 | // Check that 2.0 is back in S2 |
1628 | __ LoadSImmediate(S5, 2.0); |
1629 | __ vcmps(S2, S5); |
1630 | __ vmstat(); |
1631 | __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
1632 | |
1633 | // Check that 1.0 is back in S1 |
1634 | __ LoadSImmediate(S5, 1.0); |
1635 | __ vcmps(S1, S5); |
1636 | __ vmstat(); |
1637 | __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
1638 | } |
1639 | __ bx(LR); |
1640 | } |
1641 | |
1642 | ASSEMBLER_TEST_RUN(VstmsVldms1, test) { |
1643 | EXPECT(test != NULL); |
1644 | if (TargetCPUFeatures::vfp_supported()) { |
1645 | typedef int (*Tst)() DART_UNUSED; |
1646 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1647 | } |
1648 | } |
1649 | |
1650 | // Make sure we can store the D registers using vstmd and |
1651 | // load them into a different set using vldmd |
1652 | ASSEMBLER_TEST_GENERATE(VstmdVldmd_off, assembler) { |
1653 | if (TargetCPUFeatures::vfp_supported()) { |
1654 | // Save used callee-saved FPU registers. |
1655 | __ vstmd(DB_W, SP, D8, 3); |
1656 | __ LoadDImmediate(D0, 0.0, R0); |
1657 | __ LoadDImmediate(D1, 1.0, R0); |
1658 | __ LoadDImmediate(D2, 2.0, R0); |
1659 | __ LoadDImmediate(D3, 3.0, R0); |
1660 | __ LoadDImmediate(D4, 4.0, R0); |
1661 | __ LoadDImmediate(D5, 5.0, R0); |
1662 | __ vstmd(DB_W, SP, D0, 5); // Push D0 - D4 onto the stack, dec SP |
1663 | __ vldmd(IA_W, SP, D5, 5); // Pop stack into D5 - D9, inc SP |
1664 | |
1665 | // Load success value into R0 |
1666 | __ mov(R0, Operand(42)); |
1667 | |
1668 | // Check that 4.0 is in D9 |
1669 | __ LoadDImmediate(D10, 4.0, R1); |
1670 | __ vcmpd(D9, D10); |
1671 | __ vmstat(); |
1672 | __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
1673 | |
1674 | // Check that 3.0 is in D8 |
1675 | __ LoadDImmediate(D10, 3.0, R1); |
1676 | __ vcmpd(D8, D10); |
1677 | __ vmstat(); |
1678 | __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
1679 | |
1680 | // Check that 2.0 is in D7 |
1681 | __ LoadDImmediate(D10, 2.0, R1); |
1682 | __ vcmpd(D7, D10); |
1683 | __ vmstat(); |
1684 | __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
1685 | |
1686 | // Check that 1.0 is in D6 |
1687 | __ LoadDImmediate(D10, 1.0, R1); |
1688 | __ vcmpd(D6, D10); |
1689 | __ vmstat(); |
1690 | __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
1691 | |
1692 | // Check that 0.0 is in D5 |
1693 | __ LoadDImmediate(D10, 0.0, R1); |
1694 | __ vcmpd(D5, D10); |
1695 | __ vmstat(); |
1696 | __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
1697 | |
1698 | // Restore used callee-saved FPU registers. |
1699 | __ vldmd(IA_W, SP, D8, 3); |
1700 | } |
1701 | __ bx(LR); |
1702 | } |
1703 | |
1704 | ASSEMBLER_TEST_RUN(VstmdVldmd_off, test) { |
1705 | EXPECT(test != NULL); |
1706 | if (TargetCPUFeatures::vfp_supported()) { |
1707 | typedef int (*Tst)() DART_UNUSED; |
1708 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1709 | } |
1710 | } |
1711 | |
1712 | // Make sure we can start somewhere other than S0 |
1713 | ASSEMBLER_TEST_GENERATE(VstmsVldms_off, assembler) { |
1714 | if (TargetCPUFeatures::vfp_supported()) { |
1715 | __ LoadSImmediate(S0, 0.0); |
1716 | __ LoadSImmediate(S1, 1.0); |
1717 | __ LoadSImmediate(S2, 2.0); |
1718 | __ LoadSImmediate(S3, 3.0); |
1719 | __ LoadSImmediate(S4, 4.0); |
1720 | __ LoadSImmediate(S5, 5.0); |
1721 | __ vstms(DB_W, SP, S0, S4); // Push S0 - S4 onto the stack, dec SP |
1722 | __ vldms(IA_W, SP, S5, S9); // Pop stack into S5 - S9, inc SP |
1723 | |
1724 | // Load success value into R0 |
1725 | __ mov(R0, Operand(42)); |
1726 | |
1727 | // Check that 4.0 is in S9 |
1728 | __ LoadSImmediate(S10, 4.0); |
1729 | __ vcmps(S9, S10); |
1730 | __ vmstat(); |
1731 | __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
1732 | |
1733 | // Check that 3.0 is in S8 |
1734 | __ LoadSImmediate(S10, 3.0); |
1735 | __ vcmps(S8, S10); |
1736 | __ vmstat(); |
1737 | __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
1738 | |
1739 | // Check that 2.0 is in S7 |
1740 | __ LoadSImmediate(S10, 2.0); |
1741 | __ vcmps(S7, S10); |
1742 | __ vmstat(); |
1743 | __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
1744 | |
1745 | // Check that 1.0 is back in S6 |
1746 | __ LoadSImmediate(S10, 1.0); |
1747 | __ vcmps(S6, S10); |
1748 | __ vmstat(); |
1749 | __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
1750 | |
1751 | // Check that 0.0 is back in S5 |
1752 | __ LoadSImmediate(S10, 0.0); |
1753 | __ vcmps(S5, S10); |
1754 | __ vmstat(); |
1755 | __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
1756 | } |
1757 | __ bx(LR); |
1758 | } |
1759 | |
1760 | ASSEMBLER_TEST_RUN(VstmsVldms_off, test) { |
1761 | EXPECT(test != NULL); |
1762 | if (TargetCPUFeatures::vfp_supported()) { |
1763 | typedef int (*Tst)() DART_UNUSED; |
1764 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1765 | } |
1766 | } |
1767 | |
1768 | ASSEMBLER_TEST_GENERATE(Udiv, assembler) { |
1769 | if (TargetCPUFeatures::integer_division_supported()) { |
1770 | __ mov(R0, Operand(27)); |
1771 | __ mov(R1, Operand(9)); |
1772 | __ udiv(R2, R0, R1); |
1773 | __ mov(R0, Operand(R2)); |
1774 | } |
1775 | __ bx(LR); |
1776 | } |
1777 | |
1778 | ASSEMBLER_TEST_RUN(Udiv, test) { |
1779 | EXPECT(test != NULL); |
1780 | if (TargetCPUFeatures::integer_division_supported()) { |
1781 | typedef int (*Tst)() DART_UNUSED; |
1782 | EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1783 | } |
1784 | } |
1785 | |
1786 | ASSEMBLER_TEST_GENERATE(Sdiv, assembler) { |
1787 | if (TargetCPUFeatures::integer_division_supported()) { |
1788 | __ mov(R0, Operand(27)); |
1789 | __ LoadImmediate(R1, -9); |
1790 | __ sdiv(R2, R0, R1); |
1791 | __ mov(R0, Operand(R2)); |
1792 | } |
1793 | __ bx(LR); |
1794 | } |
1795 | |
1796 | ASSEMBLER_TEST_RUN(Sdiv, test) { |
1797 | EXPECT(test != NULL); |
1798 | if (TargetCPUFeatures::integer_division_supported()) { |
1799 | typedef int (*Tst)() DART_UNUSED; |
1800 | EXPECT_EQ(-3, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1801 | } |
1802 | } |
1803 | |
1804 | ASSEMBLER_TEST_GENERATE(Udiv_zero, assembler) { |
1805 | if (TargetCPUFeatures::integer_division_supported()) { |
1806 | __ mov(R0, Operand(27)); |
1807 | __ mov(R1, Operand(0)); |
1808 | __ udiv(R2, R0, R1); |
1809 | __ mov(R0, Operand(R2)); |
1810 | } |
1811 | __ bx(LR); |
1812 | } |
1813 | |
1814 | ASSEMBLER_TEST_RUN(Udiv_zero, test) { |
1815 | EXPECT(test != NULL); |
1816 | if (TargetCPUFeatures::integer_division_supported()) { |
1817 | typedef int (*Tst)() DART_UNUSED; |
1818 | EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1819 | } |
1820 | } |
1821 | |
1822 | ASSEMBLER_TEST_GENERATE(Sdiv_zero, assembler) { |
1823 | if (TargetCPUFeatures::integer_division_supported()) { |
1824 | __ mov(R0, Operand(27)); |
1825 | __ mov(R1, Operand(0)); |
1826 | __ sdiv(R2, R0, R1); |
1827 | __ mov(R0, Operand(R2)); |
1828 | } |
1829 | __ bx(LR); |
1830 | } |
1831 | |
1832 | ASSEMBLER_TEST_RUN(Sdiv_zero, test) { |
1833 | EXPECT(test != NULL); |
1834 | if (TargetCPUFeatures::integer_division_supported()) { |
1835 | typedef int (*Tst)() DART_UNUSED; |
1836 | EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1837 | } |
1838 | } |
1839 | |
1840 | ASSEMBLER_TEST_GENERATE(Udiv_corner, assembler) { |
1841 | if (TargetCPUFeatures::integer_division_supported()) { |
1842 | __ LoadImmediate(R0, 0x80000000); |
1843 | __ LoadImmediate(R1, 0xffffffff); |
1844 | __ udiv(R2, R0, R1); |
1845 | __ mov(R0, Operand(R2)); |
1846 | } |
1847 | __ bx(LR); |
1848 | } |
1849 | |
1850 | ASSEMBLER_TEST_RUN(Udiv_corner, test) { |
1851 | EXPECT(test != NULL); |
1852 | if (TargetCPUFeatures::integer_division_supported()) { |
1853 | typedef int (*Tst)() DART_UNUSED; |
1854 | EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1855 | } |
1856 | } |
1857 | |
1858 | ASSEMBLER_TEST_GENERATE(Sdiv_corner, assembler) { |
1859 | if (TargetCPUFeatures::integer_division_supported()) { |
1860 | __ LoadImmediate(R0, 0x80000000); |
1861 | __ LoadImmediate(R1, 0xffffffff); |
1862 | __ sdiv(R2, R0, R1); |
1863 | __ mov(R0, Operand(R2)); |
1864 | } |
1865 | __ bx(LR); |
1866 | } |
1867 | |
1868 | ASSEMBLER_TEST_RUN(Sdiv_corner, test) { |
1869 | EXPECT(test != NULL); |
1870 | if (TargetCPUFeatures::integer_division_supported()) { |
1871 | typedef int (*Tst)() DART_UNUSED; |
1872 | EXPECT_EQ(static_cast<int32_t>(0x80000000), |
1873 | EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1874 | } |
1875 | } |
1876 | |
1877 | ASSEMBLER_TEST_GENERATE(IntDiv_supported, assembler) { |
1878 | #if defined(USING_SIMULATOR) |
1879 | bool orig = TargetCPUFeatures::integer_division_supported(); |
1880 | HostCPUFeatures::set_integer_division_supported(true); |
1881 | __ mov(R0, Operand(27)); |
1882 | __ mov(R1, Operand(9)); |
1883 | __ IntegerDivide(R0, R0, R1, D0, D1); |
1884 | HostCPUFeatures::set_integer_division_supported(orig); |
1885 | __ bx(LR); |
1886 | #else |
1887 | if (TargetCPUFeatures::can_divide()) { |
1888 | __ mov(R0, Operand(27)); |
1889 | __ mov(R1, Operand(9)); |
1890 | __ IntegerDivide(R0, R0, R1, D0, D1); |
1891 | } |
1892 | __ bx(LR); |
1893 | #endif |
1894 | } |
1895 | |
1896 | ASSEMBLER_TEST_RUN(IntDiv_supported, test) { |
1897 | EXPECT(test != NULL); |
1898 | #if defined(USING_SIMULATOR) |
1899 | bool orig = TargetCPUFeatures::integer_division_supported(); |
1900 | HostCPUFeatures::set_integer_division_supported(true); |
1901 | if (TargetCPUFeatures::can_divide()) { |
1902 | typedef int (*Tst)() DART_UNUSED; |
1903 | EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1904 | } |
1905 | HostCPUFeatures::set_integer_division_supported(orig); |
1906 | #else |
1907 | if (TargetCPUFeatures::can_divide()) { |
1908 | typedef int (*Tst)() DART_UNUSED; |
1909 | EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1910 | } |
1911 | #endif |
1912 | } |
1913 | |
1914 | ASSEMBLER_TEST_GENERATE(IntDiv_unsupported, assembler) { |
1915 | #if defined(USING_SIMULATOR) |
1916 | if (TargetCPUFeatures::can_divide()) { |
1917 | bool orig = TargetCPUFeatures::integer_division_supported(); |
1918 | HostCPUFeatures::set_integer_division_supported(false); |
1919 | __ mov(R0, Operand(27)); |
1920 | __ mov(R1, Operand(9)); |
1921 | __ IntegerDivide(R0, R0, R1, D0, D1); |
1922 | HostCPUFeatures::set_integer_division_supported(orig); |
1923 | } |
1924 | __ bx(LR); |
1925 | #else |
1926 | if (TargetCPUFeatures::can_divide()) { |
1927 | __ mov(R0, Operand(27)); |
1928 | __ mov(R1, Operand(9)); |
1929 | __ IntegerDivide(R0, R0, R1, D0, D1); |
1930 | } |
1931 | __ bx(LR); |
1932 | #endif |
1933 | } |
1934 | |
1935 | ASSEMBLER_TEST_RUN(IntDiv_unsupported, test) { |
1936 | EXPECT(test != NULL); |
1937 | #if defined(USING_SIMULATOR) |
1938 | bool orig = TargetCPUFeatures::integer_division_supported(); |
1939 | HostCPUFeatures::set_integer_division_supported(false); |
1940 | if (TargetCPUFeatures::can_divide()) { |
1941 | typedef int (*Tst)() DART_UNUSED; |
1942 | EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1943 | } |
1944 | HostCPUFeatures::set_integer_division_supported(orig); |
1945 | #else |
1946 | if (TargetCPUFeatures::can_divide()) { |
1947 | typedef int (*Tst)() DART_UNUSED; |
1948 | EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1949 | } |
1950 | #endif |
1951 | } |
1952 | |
1953 | ASSEMBLER_TEST_GENERATE(Muls, assembler) { |
1954 | __ mov(R0, Operand(3)); |
1955 | __ LoadImmediate(R1, -9); |
1956 | __ muls(R2, R0, R1); |
1957 | __ mov(R0, Operand(42), MI); |
1958 | __ bx(LR); |
1959 | } |
1960 | |
1961 | ASSEMBLER_TEST_RUN(Muls, test) { |
1962 | EXPECT(test != NULL); |
1963 | typedef int (*Tst)() DART_UNUSED; |
1964 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
1965 | } |
1966 | |
1967 | ASSEMBLER_TEST_GENERATE(Vaddqi8, assembler) { |
1968 | if (TargetCPUFeatures::neon_supported()) { |
1969 | __ mov(R0, Operand(1)); |
1970 | __ vmovsr(S0, R0); |
1971 | __ mov(R0, Operand(2)); |
1972 | __ vmovsr(S1, R0); |
1973 | __ mov(R0, Operand(3)); |
1974 | __ vmovsr(S2, R0); |
1975 | __ mov(R0, Operand(4)); |
1976 | __ vmovsr(S3, R0); |
1977 | __ mov(R0, Operand(5)); |
1978 | __ vmovsr(S4, R0); |
1979 | __ mov(R0, Operand(6)); |
1980 | __ vmovsr(S5, R0); |
1981 | __ mov(R0, Operand(7)); |
1982 | __ vmovsr(S6, R0); |
1983 | __ mov(R0, Operand(8)); |
1984 | __ vmovsr(S7, R0); |
1985 | |
1986 | __ vaddqi(kByte, Q2, Q0, Q1); |
1987 | |
1988 | __ vmovrs(R0, S8); |
1989 | __ vmovrs(R1, S9); |
1990 | __ vmovrs(R2, S10); |
1991 | __ vmovrs(R3, S11); |
1992 | |
1993 | __ add(R0, R0, Operand(R1)); |
1994 | __ add(R0, R0, Operand(R2)); |
1995 | __ add(R0, R0, Operand(R3)); |
1996 | } |
1997 | __ bx(LR); |
1998 | } |
1999 | |
2000 | ASSEMBLER_TEST_RUN(Vaddqi8, test) { |
2001 | EXPECT(test != NULL); |
2002 | if (TargetCPUFeatures::neon_supported()) { |
2003 | typedef int (*Tst)() DART_UNUSED; |
2004 | EXPECT_EQ(36, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2005 | } |
2006 | } |
2007 | |
2008 | ASSEMBLER_TEST_GENERATE(Vaddqi16, assembler) { |
2009 | if (TargetCPUFeatures::neon_supported()) { |
2010 | __ mov(R0, Operand(1)); |
2011 | __ vmovsr(S0, R0); |
2012 | __ mov(R0, Operand(2)); |
2013 | __ vmovsr(S1, R0); |
2014 | __ mov(R0, Operand(3)); |
2015 | __ vmovsr(S2, R0); |
2016 | __ mov(R0, Operand(4)); |
2017 | __ vmovsr(S3, R0); |
2018 | __ mov(R0, Operand(5)); |
2019 | __ vmovsr(S4, R0); |
2020 | __ mov(R0, Operand(6)); |
2021 | __ vmovsr(S5, R0); |
2022 | __ mov(R0, Operand(7)); |
2023 | __ vmovsr(S6, R0); |
2024 | __ mov(R0, Operand(8)); |
2025 | __ vmovsr(S7, R0); |
2026 | |
2027 | __ vaddqi(kHalfword, Q2, Q0, Q1); |
2028 | |
2029 | __ vmovrs(R0, S8); |
2030 | __ vmovrs(R1, S9); |
2031 | __ vmovrs(R2, S10); |
2032 | __ vmovrs(R3, S11); |
2033 | |
2034 | __ add(R0, R0, Operand(R1)); |
2035 | __ add(R0, R0, Operand(R2)); |
2036 | __ add(R0, R0, Operand(R3)); |
2037 | } |
2038 | __ bx(LR); |
2039 | } |
2040 | |
2041 | ASSEMBLER_TEST_RUN(Vaddqi16, test) { |
2042 | EXPECT(test != NULL); |
2043 | if (TargetCPUFeatures::neon_supported()) { |
2044 | typedef int (*Tst)() DART_UNUSED; |
2045 | EXPECT_EQ(36, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2046 | } |
2047 | } |
2048 | |
2049 | ASSEMBLER_TEST_GENERATE(Vaddqi32, assembler) { |
2050 | if (TargetCPUFeatures::neon_supported()) { |
2051 | __ mov(R0, Operand(1)); |
2052 | __ vmovsr(S0, R0); |
2053 | __ mov(R0, Operand(2)); |
2054 | __ vmovsr(S1, R0); |
2055 | __ mov(R0, Operand(3)); |
2056 | __ vmovsr(S2, R0); |
2057 | __ mov(R0, Operand(4)); |
2058 | __ vmovsr(S3, R0); |
2059 | __ mov(R0, Operand(5)); |
2060 | __ vmovsr(S4, R0); |
2061 | __ mov(R0, Operand(6)); |
2062 | __ vmovsr(S5, R0); |
2063 | __ mov(R0, Operand(7)); |
2064 | __ vmovsr(S6, R0); |
2065 | __ mov(R0, Operand(8)); |
2066 | __ vmovsr(S7, R0); |
2067 | |
2068 | __ vaddqi(kWord, Q2, Q0, Q1); |
2069 | |
2070 | __ vmovrs(R0, S8); |
2071 | __ vmovrs(R1, S9); |
2072 | __ vmovrs(R2, S10); |
2073 | __ vmovrs(R3, S11); |
2074 | |
2075 | __ add(R0, R0, Operand(R1)); |
2076 | __ add(R0, R0, Operand(R2)); |
2077 | __ add(R0, R0, Operand(R3)); |
2078 | } |
2079 | __ bx(LR); |
2080 | } |
2081 | |
2082 | ASSEMBLER_TEST_RUN(Vaddqi32, test) { |
2083 | EXPECT(test != NULL); |
2084 | if (TargetCPUFeatures::neon_supported()) { |
2085 | typedef int (*Tst)() DART_UNUSED; |
2086 | EXPECT_EQ(36, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2087 | } |
2088 | } |
2089 | |
2090 | ASSEMBLER_TEST_GENERATE(Vaddqi64, assembler) { |
2091 | if (TargetCPUFeatures::neon_supported()) { |
2092 | __ mov(R0, Operand(1)); |
2093 | __ vmovsr(S0, R0); |
2094 | __ mov(R0, Operand(2)); |
2095 | __ vmovsr(S2, R0); |
2096 | __ mov(R0, Operand(3)); |
2097 | __ vmovsr(S4, R0); |
2098 | __ mov(R0, Operand(4)); |
2099 | __ vmovsr(S6, R0); |
2100 | |
2101 | __ vaddqi(kWordPair, Q2, Q0, Q1); |
2102 | |
2103 | __ vmovrs(R0, S8); |
2104 | __ vmovrs(R2, S10); |
2105 | |
2106 | __ add(R0, R0, Operand(R2)); |
2107 | } |
2108 | __ bx(LR); |
2109 | } |
2110 | |
2111 | ASSEMBLER_TEST_RUN(Vaddqi64, test) { |
2112 | EXPECT(test != NULL); |
2113 | if (TargetCPUFeatures::neon_supported()) { |
2114 | typedef int (*Tst)() DART_UNUSED; |
2115 | EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2116 | } |
2117 | } |
2118 | |
2119 | ASSEMBLER_TEST_GENERATE(Vshlqu64, assembler) { |
2120 | if (TargetCPUFeatures::neon_supported()) { |
2121 | Label fail; |
2122 | __ LoadImmediate(R1, 21); |
2123 | __ LoadImmediate(R0, 1); |
2124 | __ vmovsr(S0, R1); |
2125 | __ vmovsr(S2, R1); |
2126 | __ vmovsr(S4, R0); |
2127 | __ vmovsr(S6, R0); |
2128 | |
2129 | __ vshlqu(kWordPair, Q2, Q0, Q1); |
2130 | |
2131 | __ vmovrs(R0, S8); |
2132 | __ vmovrs(R1, S10); |
2133 | __ CompareImmediate(R0, 42); |
2134 | __ LoadImmediate(R0, 0); |
2135 | __ b(&fail, NE); |
2136 | __ CompareImmediate(R1, 42); |
2137 | __ LoadImmediate(R0, 0); |
2138 | __ b(&fail, NE); |
2139 | |
2140 | __ LoadImmediate(R0, 1); |
2141 | __ Bind(&fail); |
2142 | } |
2143 | __ bx(LR); |
2144 | } |
2145 | |
2146 | ASSEMBLER_TEST_RUN(Vshlqu64, test) { |
2147 | EXPECT(test != NULL); |
2148 | if (TargetCPUFeatures::neon_supported()) { |
2149 | typedef int (*Tst)() DART_UNUSED; |
2150 | EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2151 | } |
2152 | } |
2153 | |
2154 | ASSEMBLER_TEST_GENERATE(Vshlqi64, assembler) { |
2155 | if (TargetCPUFeatures::neon_supported()) { |
2156 | Label fail; |
2157 | __ LoadImmediate(R1, -84); |
2158 | __ LoadImmediate(R0, -1); |
2159 | __ vmovdrr(D0, R1, R0); |
2160 | __ vmovdrr(D1, R1, R0); |
2161 | __ vmovsr(S4, R0); |
2162 | __ vmovsr(S6, R0); |
2163 | |
2164 | __ vshlqi(kWordPair, Q2, Q0, Q1); |
2165 | |
2166 | __ vmovrs(R0, S8); |
2167 | __ vmovrs(R1, S10); |
2168 | __ CompareImmediate(R0, -42); |
2169 | __ LoadImmediate(R0, 0); |
2170 | __ b(&fail, NE); |
2171 | __ CompareImmediate(R1, -42); |
2172 | __ LoadImmediate(R0, 0); |
2173 | __ b(&fail, NE); |
2174 | |
2175 | __ LoadImmediate(R0, 1); |
2176 | __ Bind(&fail); |
2177 | } |
2178 | __ bx(LR); |
2179 | } |
2180 | |
2181 | ASSEMBLER_TEST_RUN(Vshlqi64, test) { |
2182 | EXPECT(test != NULL); |
2183 | if (TargetCPUFeatures::neon_supported()) { |
2184 | typedef int (*Tst)() DART_UNUSED; |
2185 | EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2186 | } |
2187 | } |
2188 | |
2189 | ASSEMBLER_TEST_GENERATE(Mint_shl_ok, assembler) { |
2190 | if (TargetCPUFeatures::neon_supported()) { |
2191 | const QRegister value = Q0; |
2192 | const QRegister temp = Q1; |
2193 | const QRegister out = Q2; |
2194 | const Register shift = R1; |
2195 | const DRegister dtemp0 = EvenDRegisterOf(temp); |
2196 | const SRegister stemp0 = EvenSRegisterOf(dtemp0); |
2197 | const DRegister dout0 = EvenDRegisterOf(out); |
2198 | const SRegister sout0 = EvenSRegisterOf(dout0); |
2199 | const SRegister sout1 = OddSRegisterOf(dout0); |
2200 | Label fail; |
2201 | |
2202 | // Initialize. |
2203 | __ veorq(value, value, value); |
2204 | __ veorq(temp, temp, temp); |
2205 | __ veorq(out, out, out); |
2206 | __ LoadImmediate(shift, 32); |
2207 | __ LoadImmediate(R2, 1 << 7); |
2208 | __ vmovsr(S0, R2); |
2209 | |
2210 | __ vmovsr(stemp0, shift); // Move the shift into the low S register. |
2211 | __ vshlqu(kWordPair, out, value, temp); |
2212 | |
2213 | // check for overflow by shifting back and comparing. |
2214 | __ rsb(shift, shift, Operand(0)); |
2215 | __ vmovsr(stemp0, shift); |
2216 | __ vshlqi(kWordPair, temp, out, temp); |
2217 | __ vceqqi(kWord, out, temp, value); |
2218 | // Low 64 bits of temp should be all 1's, otherwise temp != value and |
2219 | // we deopt. |
2220 | __ vmovrs(shift, sout0); |
2221 | __ CompareImmediate(shift, -1); |
2222 | __ b(&fail, NE); |
2223 | __ vmovrs(shift, sout1); |
2224 | __ CompareImmediate(shift, -1); |
2225 | __ b(&fail, NE); |
2226 | |
2227 | __ LoadImmediate(R0, 1); |
2228 | __ bx(LR); |
2229 | |
2230 | __ Bind(&fail); |
2231 | __ LoadImmediate(R0, 0); |
2232 | } |
2233 | __ bx(LR); |
2234 | } |
2235 | |
2236 | ASSEMBLER_TEST_RUN(Mint_shl_ok, test) { |
2237 | EXPECT(test != NULL); |
2238 | if (TargetCPUFeatures::neon_supported()) { |
2239 | typedef int (*Tst)() DART_UNUSED; |
2240 | EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2241 | } |
2242 | } |
2243 | |
2244 | ASSEMBLER_TEST_GENERATE(Mint_shl_overflow, assembler) { |
2245 | if (TargetCPUFeatures::neon_supported()) { |
2246 | const QRegister value = Q0; |
2247 | const QRegister temp = Q1; |
2248 | const QRegister out = Q2; |
2249 | const Register shift = R1; |
2250 | const DRegister dtemp0 = EvenDRegisterOf(temp); |
2251 | const SRegister stemp0 = EvenSRegisterOf(dtemp0); |
2252 | const DRegister dout0 = EvenDRegisterOf(out); |
2253 | const SRegister sout0 = EvenSRegisterOf(dout0); |
2254 | const SRegister sout1 = OddSRegisterOf(dout0); |
2255 | Label fail; |
2256 | |
2257 | // Initialize. |
2258 | __ veorq(value, value, value); |
2259 | __ veorq(temp, temp, temp); |
2260 | __ veorq(out, out, out); |
2261 | __ LoadImmediate(shift, 60); |
2262 | __ LoadImmediate(R2, 1 << 7); |
2263 | __ vmovsr(S0, R2); |
2264 | |
2265 | __ vmovsr(stemp0, shift); // Move the shift into the low S register. |
2266 | __ vshlqu(kWordPair, out, value, temp); |
2267 | |
2268 | // check for overflow by shifting back and comparing. |
2269 | __ rsb(shift, shift, Operand(0)); |
2270 | __ vmovsr(stemp0, shift); |
2271 | __ vshlqi(kWordPair, temp, out, temp); |
2272 | __ vceqqi(kWord, out, temp, value); |
2273 | // Low 64 bits of temp should be all 1's, otherwise temp != value and |
2274 | // we deopt. |
2275 | __ vmovrs(shift, sout0); |
2276 | __ CompareImmediate(shift, -1); |
2277 | __ b(&fail, NE); |
2278 | __ vmovrs(shift, sout1); |
2279 | __ CompareImmediate(shift, -1); |
2280 | __ b(&fail, NE); |
2281 | |
2282 | __ LoadImmediate(R0, 0); |
2283 | __ bx(LR); |
2284 | |
2285 | __ Bind(&fail); |
2286 | __ LoadImmediate(R0, 1); |
2287 | } |
2288 | __ bx(LR); |
2289 | } |
2290 | |
2291 | ASSEMBLER_TEST_RUN(Mint_shl_overflow, test) { |
2292 | EXPECT(test != NULL); |
2293 | if (TargetCPUFeatures::neon_supported()) { |
2294 | typedef int (*Tst)() DART_UNUSED; |
2295 | EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2296 | } |
2297 | } |
2298 | |
2299 | ASSEMBLER_TEST_GENERATE(Vsubqi8, assembler) { |
2300 | if (TargetCPUFeatures::neon_supported()) { |
2301 | __ mov(R0, Operand(1)); |
2302 | __ vmovsr(S0, R0); |
2303 | __ mov(R0, Operand(2)); |
2304 | __ vmovsr(S1, R0); |
2305 | __ mov(R0, Operand(3)); |
2306 | __ vmovsr(S2, R0); |
2307 | __ mov(R0, Operand(4)); |
2308 | __ vmovsr(S3, R0); |
2309 | __ mov(R0, Operand(2)); |
2310 | __ vmovsr(S4, R0); |
2311 | __ mov(R0, Operand(4)); |
2312 | __ vmovsr(S5, R0); |
2313 | __ mov(R0, Operand(6)); |
2314 | __ vmovsr(S6, R0); |
2315 | __ mov(R0, Operand(8)); |
2316 | __ vmovsr(S7, R0); |
2317 | |
2318 | __ vsubqi(kByte, Q2, Q1, Q0); |
2319 | |
2320 | __ vmovrs(R0, S8); |
2321 | __ vmovrs(R1, S9); |
2322 | __ vmovrs(R2, S10); |
2323 | __ vmovrs(R3, S11); |
2324 | |
2325 | __ add(R0, R0, Operand(R1)); |
2326 | __ add(R0, R0, Operand(R2)); |
2327 | __ add(R0, R0, Operand(R3)); |
2328 | } |
2329 | __ bx(LR); |
2330 | } |
2331 | |
2332 | ASSEMBLER_TEST_RUN(Vsubqi8, test) { |
2333 | EXPECT(test != NULL); |
2334 | if (TargetCPUFeatures::neon_supported()) { |
2335 | typedef int (*Tst)() DART_UNUSED; |
2336 | EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2337 | } |
2338 | } |
2339 | |
2340 | ASSEMBLER_TEST_GENERATE(Vsubqi16, assembler) { |
2341 | if (TargetCPUFeatures::neon_supported()) { |
2342 | __ mov(R0, Operand(1)); |
2343 | __ vmovsr(S0, R0); |
2344 | __ mov(R0, Operand(2)); |
2345 | __ vmovsr(S1, R0); |
2346 | __ mov(R0, Operand(3)); |
2347 | __ vmovsr(S2, R0); |
2348 | __ mov(R0, Operand(4)); |
2349 | __ vmovsr(S3, R0); |
2350 | __ mov(R0, Operand(2)); |
2351 | __ vmovsr(S4, R0); |
2352 | __ mov(R0, Operand(4)); |
2353 | __ vmovsr(S5, R0); |
2354 | __ mov(R0, Operand(6)); |
2355 | __ vmovsr(S6, R0); |
2356 | __ mov(R0, Operand(8)); |
2357 | __ vmovsr(S7, R0); |
2358 | |
2359 | __ vsubqi(kHalfword, Q2, Q1, Q0); |
2360 | |
2361 | __ vmovrs(R0, S8); |
2362 | __ vmovrs(R1, S9); |
2363 | __ vmovrs(R2, S10); |
2364 | __ vmovrs(R3, S11); |
2365 | |
2366 | __ add(R0, R0, Operand(R1)); |
2367 | __ add(R0, R0, Operand(R2)); |
2368 | __ add(R0, R0, Operand(R3)); |
2369 | } |
2370 | __ bx(LR); |
2371 | } |
2372 | |
2373 | ASSEMBLER_TEST_RUN(Vsubqi16, test) { |
2374 | EXPECT(test != NULL); |
2375 | if (TargetCPUFeatures::neon_supported()) { |
2376 | typedef int (*Tst)() DART_UNUSED; |
2377 | EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2378 | } |
2379 | } |
2380 | |
2381 | ASSEMBLER_TEST_GENERATE(Vsubqi32, assembler) { |
2382 | if (TargetCPUFeatures::neon_supported()) { |
2383 | __ mov(R0, Operand(1)); |
2384 | __ vmovsr(S0, R0); |
2385 | __ mov(R0, Operand(2)); |
2386 | __ vmovsr(S1, R0); |
2387 | __ mov(R0, Operand(3)); |
2388 | __ vmovsr(S2, R0); |
2389 | __ mov(R0, Operand(4)); |
2390 | __ vmovsr(S3, R0); |
2391 | __ mov(R0, Operand(2)); |
2392 | __ vmovsr(S4, R0); |
2393 | __ mov(R0, Operand(4)); |
2394 | __ vmovsr(S5, R0); |
2395 | __ mov(R0, Operand(6)); |
2396 | __ vmovsr(S6, R0); |
2397 | __ mov(R0, Operand(8)); |
2398 | __ vmovsr(S7, R0); |
2399 | |
2400 | __ vsubqi(kWord, Q2, Q1, Q0); |
2401 | |
2402 | __ vmovrs(R0, S8); |
2403 | __ vmovrs(R1, S9); |
2404 | __ vmovrs(R2, S10); |
2405 | __ vmovrs(R3, S11); |
2406 | |
2407 | __ add(R0, R0, Operand(R1)); |
2408 | __ add(R0, R0, Operand(R2)); |
2409 | __ add(R0, R0, Operand(R3)); |
2410 | } |
2411 | __ bx(LR); |
2412 | } |
2413 | |
2414 | ASSEMBLER_TEST_RUN(Vsubqi32, test) { |
2415 | EXPECT(test != NULL); |
2416 | if (TargetCPUFeatures::neon_supported()) { |
2417 | typedef int (*Tst)() DART_UNUSED; |
2418 | EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2419 | } |
2420 | } |
2421 | |
2422 | ASSEMBLER_TEST_GENERATE(Vsubqi64, assembler) { |
2423 | if (TargetCPUFeatures::neon_supported()) { |
2424 | __ mov(R0, Operand(1)); |
2425 | __ vmovsr(S0, R0); |
2426 | __ mov(R0, Operand(2)); |
2427 | __ vmovsr(S2, R0); |
2428 | __ mov(R0, Operand(2)); |
2429 | __ vmovsr(S4, R0); |
2430 | __ mov(R0, Operand(4)); |
2431 | __ vmovsr(S6, R0); |
2432 | |
2433 | __ vsubqi(kWordPair, Q2, Q1, Q0); |
2434 | |
2435 | __ vmovrs(R0, S8); |
2436 | __ vmovrs(R2, S10); |
2437 | |
2438 | __ add(R0, R0, Operand(R2)); |
2439 | } |
2440 | __ bx(LR); |
2441 | } |
2442 | |
2443 | ASSEMBLER_TEST_RUN(Vsubqi64, test) { |
2444 | EXPECT(test != NULL); |
2445 | if (TargetCPUFeatures::neon_supported()) { |
2446 | typedef int (*Tst)() DART_UNUSED; |
2447 | EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2448 | } |
2449 | } |
2450 | |
2451 | ASSEMBLER_TEST_GENERATE(Vmulqi8, assembler) { |
2452 | if (TargetCPUFeatures::neon_supported()) { |
2453 | __ mov(R0, Operand(1)); |
2454 | __ vmovsr(S0, R0); |
2455 | __ mov(R0, Operand(2)); |
2456 | __ vmovsr(S1, R0); |
2457 | __ mov(R0, Operand(3)); |
2458 | __ vmovsr(S2, R0); |
2459 | __ mov(R0, Operand(4)); |
2460 | __ vmovsr(S3, R0); |
2461 | __ mov(R0, Operand(5)); |
2462 | __ vmovsr(S4, R0); |
2463 | __ mov(R0, Operand(6)); |
2464 | __ vmovsr(S5, R0); |
2465 | __ mov(R0, Operand(7)); |
2466 | __ vmovsr(S6, R0); |
2467 | __ mov(R0, Operand(8)); |
2468 | __ vmovsr(S7, R0); |
2469 | |
2470 | __ vmulqi(kByte, Q2, Q1, Q0); |
2471 | |
2472 | __ vmovrs(R0, S8); |
2473 | __ vmovrs(R1, S9); |
2474 | __ vmovrs(R2, S10); |
2475 | __ vmovrs(R3, S11); |
2476 | |
2477 | __ add(R0, R0, Operand(R1)); |
2478 | __ add(R0, R0, Operand(R2)); |
2479 | __ add(R0, R0, Operand(R3)); |
2480 | } |
2481 | __ bx(LR); |
2482 | } |
2483 | |
2484 | ASSEMBLER_TEST_RUN(Vmulqi8, test) { |
2485 | EXPECT(test != NULL); |
2486 | if (TargetCPUFeatures::neon_supported()) { |
2487 | typedef int (*Tst)() DART_UNUSED; |
2488 | EXPECT_EQ(70, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2489 | } |
2490 | } |
2491 | |
2492 | ASSEMBLER_TEST_GENERATE(Vmulqi16, assembler) { |
2493 | if (TargetCPUFeatures::neon_supported()) { |
2494 | __ mov(R0, Operand(1)); |
2495 | __ vmovsr(S0, R0); |
2496 | __ mov(R0, Operand(2)); |
2497 | __ vmovsr(S1, R0); |
2498 | __ mov(R0, Operand(3)); |
2499 | __ vmovsr(S2, R0); |
2500 | __ mov(R0, Operand(4)); |
2501 | __ vmovsr(S3, R0); |
2502 | __ mov(R0, Operand(5)); |
2503 | __ vmovsr(S4, R0); |
2504 | __ mov(R0, Operand(6)); |
2505 | __ vmovsr(S5, R0); |
2506 | __ mov(R0, Operand(7)); |
2507 | __ vmovsr(S6, R0); |
2508 | __ mov(R0, Operand(8)); |
2509 | __ vmovsr(S7, R0); |
2510 | |
2511 | __ vmulqi(kHalfword, Q2, Q1, Q0); |
2512 | |
2513 | __ vmovrs(R0, S8); |
2514 | __ vmovrs(R1, S9); |
2515 | __ vmovrs(R2, S10); |
2516 | __ vmovrs(R3, S11); |
2517 | |
2518 | __ add(R0, R0, Operand(R1)); |
2519 | __ add(R0, R0, Operand(R2)); |
2520 | __ add(R0, R0, Operand(R3)); |
2521 | } |
2522 | __ bx(LR); |
2523 | } |
2524 | |
2525 | ASSEMBLER_TEST_RUN(Vmulqi16, test) { |
2526 | EXPECT(test != NULL); |
2527 | if (TargetCPUFeatures::neon_supported()) { |
2528 | typedef int (*Tst)() DART_UNUSED; |
2529 | EXPECT_EQ(70, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2530 | } |
2531 | } |
2532 | |
2533 | ASSEMBLER_TEST_GENERATE(Vmulqi32, assembler) { |
2534 | if (TargetCPUFeatures::neon_supported()) { |
2535 | __ mov(R0, Operand(1)); |
2536 | __ vmovsr(S0, R0); |
2537 | __ mov(R0, Operand(2)); |
2538 | __ vmovsr(S1, R0); |
2539 | __ mov(R0, Operand(3)); |
2540 | __ vmovsr(S2, R0); |
2541 | __ mov(R0, Operand(4)); |
2542 | __ vmovsr(S3, R0); |
2543 | __ mov(R0, Operand(5)); |
2544 | __ vmovsr(S4, R0); |
2545 | __ mov(R0, Operand(6)); |
2546 | __ vmovsr(S5, R0); |
2547 | __ mov(R0, Operand(7)); |
2548 | __ vmovsr(S6, R0); |
2549 | __ mov(R0, Operand(8)); |
2550 | __ vmovsr(S7, R0); |
2551 | |
2552 | __ vmulqi(kWord, Q2, Q1, Q0); |
2553 | |
2554 | __ vmovrs(R0, S8); |
2555 | __ vmovrs(R1, S9); |
2556 | __ vmovrs(R2, S10); |
2557 | __ vmovrs(R3, S11); |
2558 | |
2559 | __ add(R0, R0, Operand(R1)); |
2560 | __ add(R0, R0, Operand(R2)); |
2561 | __ add(R0, R0, Operand(R3)); |
2562 | } |
2563 | __ bx(LR); |
2564 | } |
2565 | |
2566 | ASSEMBLER_TEST_RUN(Vmulqi32, test) { |
2567 | EXPECT(test != NULL); |
2568 | if (TargetCPUFeatures::neon_supported()) { |
2569 | typedef int (*Tst)() DART_UNUSED; |
2570 | EXPECT_EQ(70, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2571 | } |
2572 | } |
2573 | |
2574 | ASSEMBLER_TEST_GENERATE(Vaddqs, assembler) { |
2575 | if (TargetCPUFeatures::neon_supported()) { |
2576 | __ LoadSImmediate(S0, 1.0); |
2577 | __ LoadSImmediate(S1, 2.0); |
2578 | __ LoadSImmediate(S2, 3.0); |
2579 | __ LoadSImmediate(S3, 4.0); |
2580 | __ LoadSImmediate(S4, 5.0); |
2581 | __ LoadSImmediate(S5, 6.0); |
2582 | __ LoadSImmediate(S6, 7.0); |
2583 | __ LoadSImmediate(S7, 8.0); |
2584 | |
2585 | __ vaddqs(Q2, Q0, Q1); |
2586 | |
2587 | __ vadds(S8, S8, S9); |
2588 | __ vadds(S8, S8, S10); |
2589 | __ vadds(S8, S8, S11); |
2590 | |
2591 | __ vcvtis(S0, S8); |
2592 | __ vmovrs(R0, S0); |
2593 | } |
2594 | __ bx(LR); |
2595 | } |
2596 | |
2597 | ASSEMBLER_TEST_RUN(Vaddqs, test) { |
2598 | EXPECT(test != NULL); |
2599 | if (TargetCPUFeatures::neon_supported()) { |
2600 | typedef int (*Tst)() DART_UNUSED; |
2601 | EXPECT_EQ(36, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2602 | } |
2603 | } |
2604 | |
2605 | ASSEMBLER_TEST_GENERATE(Vsubqs, assembler) { |
2606 | if (TargetCPUFeatures::neon_supported()) { |
2607 | __ LoadSImmediate(S0, 1.0); |
2608 | __ LoadSImmediate(S1, 2.0); |
2609 | __ LoadSImmediate(S2, 3.0); |
2610 | __ LoadSImmediate(S3, 4.0); |
2611 | __ LoadSImmediate(S4, 2.0); |
2612 | __ LoadSImmediate(S5, 4.0); |
2613 | __ LoadSImmediate(S6, 6.0); |
2614 | __ LoadSImmediate(S7, 8.0); |
2615 | |
2616 | __ vsubqs(Q2, Q1, Q0); |
2617 | |
2618 | __ vadds(S8, S8, S9); |
2619 | __ vadds(S8, S8, S10); |
2620 | __ vadds(S8, S8, S11); |
2621 | |
2622 | __ vcvtis(S0, S8); |
2623 | __ vmovrs(R0, S0); |
2624 | } |
2625 | __ bx(LR); |
2626 | } |
2627 | |
2628 | ASSEMBLER_TEST_RUN(Vsubqs, test) { |
2629 | EXPECT(test != NULL); |
2630 | if (TargetCPUFeatures::neon_supported()) { |
2631 | typedef int (*Tst)() DART_UNUSED; |
2632 | EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2633 | } |
2634 | } |
2635 | |
2636 | ASSEMBLER_TEST_GENERATE(Vmulqs, assembler) { |
2637 | if (TargetCPUFeatures::neon_supported()) { |
2638 | __ LoadSImmediate(S0, 1.0); |
2639 | __ LoadSImmediate(S1, 2.0); |
2640 | __ LoadSImmediate(S2, 3.0); |
2641 | __ LoadSImmediate(S3, 4.0); |
2642 | __ LoadSImmediate(S4, 5.0); |
2643 | __ LoadSImmediate(S5, 6.0); |
2644 | __ LoadSImmediate(S6, 7.0); |
2645 | __ LoadSImmediate(S7, 8.0); |
2646 | |
2647 | __ vmulqs(Q2, Q1, Q0); |
2648 | |
2649 | __ vadds(S8, S8, S9); |
2650 | __ vadds(S8, S8, S10); |
2651 | __ vadds(S8, S8, S11); |
2652 | |
2653 | __ vcvtis(S0, S8); |
2654 | __ vmovrs(R0, S0); |
2655 | } |
2656 | __ bx(LR); |
2657 | } |
2658 | |
2659 | ASSEMBLER_TEST_RUN(Vmulqs, test) { |
2660 | EXPECT(test != NULL); |
2661 | if (TargetCPUFeatures::neon_supported()) { |
2662 | typedef int (*Tst)() DART_UNUSED; |
2663 | EXPECT_EQ(70, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2664 | } |
2665 | } |
2666 | |
2667 | ASSEMBLER_TEST_GENERATE(VtblX, assembler) { |
2668 | if (TargetCPUFeatures::neon_supported()) { |
2669 | // Index. |
2670 | __ LoadImmediate(R0, 0x03020100); |
2671 | __ vmovsr(S0, R0); |
2672 | __ vmovsr(S1, R0); |
2673 | |
2674 | // Table. |
2675 | __ LoadSImmediate(S2, 1.0); |
2676 | __ LoadSImmediate(S3, 2.0); |
2677 | __ LoadSImmediate(S4, 3.0); |
2678 | __ LoadSImmediate(S5, 4.0); |
2679 | |
2680 | // Select. |
2681 | __ vtbl(D3, D1, 2, D0); |
2682 | |
2683 | // Check that S6, S7 are both 1.0 |
2684 | __ vcvtis(S0, S6); |
2685 | __ vcvtis(S1, S7); |
2686 | __ vmovrs(R2, S0); |
2687 | __ vmovrs(R3, S1); |
2688 | |
2689 | __ LoadImmediate(R0, 0); |
2690 | __ CompareImmediate(R2, 1); |
2691 | __ bx(LR, NE); |
2692 | __ CompareImmediate(R3, 1); |
2693 | __ bx(LR, NE); |
2694 | __ LoadImmediate(R0, 42); |
2695 | } |
2696 | __ bx(LR); |
2697 | } |
2698 | |
2699 | ASSEMBLER_TEST_RUN(VtblX, test) { |
2700 | EXPECT(test != NULL); |
2701 | if (TargetCPUFeatures::neon_supported()) { |
2702 | typedef int (*Tst)() DART_UNUSED; |
2703 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2704 | } |
2705 | } |
2706 | |
2707 | ASSEMBLER_TEST_GENERATE(VtblY, assembler) { |
2708 | if (TargetCPUFeatures::neon_supported()) { |
2709 | // Index. |
2710 | __ LoadImmediate(R0, 0x07060504); |
2711 | __ vmovsr(S0, R0); |
2712 | __ vmovsr(S1, R0); |
2713 | |
2714 | // Table. |
2715 | __ LoadSImmediate(S2, 2.0); |
2716 | __ LoadSImmediate(S3, 1.0); |
2717 | __ LoadSImmediate(S4, 3.0); |
2718 | __ LoadSImmediate(S5, 4.0); |
2719 | |
2720 | // Select. |
2721 | __ vtbl(D3, D1, 2, D0); |
2722 | |
2723 | // Check that S6, S7 are both 1.0 |
2724 | __ vcvtis(S0, S6); |
2725 | __ vcvtis(S1, S7); |
2726 | __ vmovrs(R2, S0); |
2727 | __ vmovrs(R3, S1); |
2728 | |
2729 | __ LoadImmediate(R0, 0); |
2730 | __ CompareImmediate(R2, 1); |
2731 | __ bx(LR, NE); |
2732 | __ CompareImmediate(R3, 1); |
2733 | __ bx(LR, NE); |
2734 | __ LoadImmediate(R0, 42); |
2735 | } |
2736 | __ bx(LR); |
2737 | } |
2738 | |
2739 | ASSEMBLER_TEST_RUN(VtblY, test) { |
2740 | EXPECT(test != NULL); |
2741 | if (TargetCPUFeatures::neon_supported()) { |
2742 | typedef int (*Tst)() DART_UNUSED; |
2743 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2744 | } |
2745 | } |
2746 | |
2747 | ASSEMBLER_TEST_GENERATE(VtblZ, assembler) { |
2748 | if (TargetCPUFeatures::neon_supported()) { |
2749 | // Index. |
2750 | __ LoadImmediate(R0, 0x0b0a0908); |
2751 | __ vmovsr(S0, R0); |
2752 | __ vmovsr(S1, R0); |
2753 | |
2754 | // Table. |
2755 | __ LoadSImmediate(S2, 2.0); |
2756 | __ LoadSImmediate(S3, 3.0); |
2757 | __ LoadSImmediate(S4, 1.0); |
2758 | __ LoadSImmediate(S5, 4.0); |
2759 | |
2760 | // Select. |
2761 | __ vtbl(D3, D1, 2, D0); |
2762 | |
2763 | // Check that S6, S7 are both 1.0 |
2764 | __ vcvtis(S0, S6); |
2765 | __ vcvtis(S1, S7); |
2766 | __ vmovrs(R2, S0); |
2767 | __ vmovrs(R3, S1); |
2768 | |
2769 | __ LoadImmediate(R0, 0); |
2770 | __ CompareImmediate(R2, 1); |
2771 | __ bx(LR, NE); |
2772 | __ CompareImmediate(R3, 1); |
2773 | __ bx(LR, NE); |
2774 | __ LoadImmediate(R0, 42); |
2775 | } |
2776 | __ bx(LR); |
2777 | } |
2778 | |
2779 | ASSEMBLER_TEST_RUN(VtblZ, test) { |
2780 | EXPECT(test != NULL); |
2781 | if (TargetCPUFeatures::neon_supported()) { |
2782 | typedef int (*Tst)() DART_UNUSED; |
2783 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2784 | } |
2785 | } |
2786 | |
2787 | ASSEMBLER_TEST_GENERATE(VtblW, assembler) { |
2788 | if (TargetCPUFeatures::neon_supported()) { |
2789 | // Index. |
2790 | __ LoadImmediate(R0, 0x0f0e0d0c); |
2791 | __ vmovsr(S0, R0); |
2792 | __ vmovsr(S1, R0); |
2793 | |
2794 | // Table. |
2795 | __ LoadSImmediate(S2, 2.0); |
2796 | __ LoadSImmediate(S3, 3.0); |
2797 | __ LoadSImmediate(S4, 4.0); |
2798 | __ LoadSImmediate(S5, 1.0); |
2799 | |
2800 | // Select. |
2801 | __ vtbl(D3, D1, 2, D0); |
2802 | |
2803 | // Check that S6, S7 are both 1.0 |
2804 | __ vcvtis(S0, S6); |
2805 | __ vcvtis(S1, S7); |
2806 | __ vmovrs(R2, S0); |
2807 | __ vmovrs(R3, S1); |
2808 | |
2809 | __ LoadImmediate(R0, 0); |
2810 | __ CompareImmediate(R2, 1); |
2811 | __ bx(LR, NE); |
2812 | __ CompareImmediate(R3, 1); |
2813 | __ bx(LR, NE); |
2814 | __ LoadImmediate(R0, 42); |
2815 | } |
2816 | __ bx(LR); |
2817 | } |
2818 | |
2819 | ASSEMBLER_TEST_RUN(VtblW, test) { |
2820 | EXPECT(test != NULL); |
2821 | if (TargetCPUFeatures::neon_supported()) { |
2822 | typedef int (*Tst)() DART_UNUSED; |
2823 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2824 | } |
2825 | } |
2826 | |
2827 | ASSEMBLER_TEST_GENERATE(Veorq, assembler) { |
2828 | if (TargetCPUFeatures::neon_supported()) { |
2829 | // Q0 |
2830 | __ LoadImmediate(R0, 0xaaaaaaab); |
2831 | __ vmovsr(S0, R0); |
2832 | __ vmovsr(S1, R0); |
2833 | __ vmovsr(S2, R0); |
2834 | __ vmovsr(S3, R0); |
2835 | |
2836 | // Q1 |
2837 | __ LoadImmediate(R0, 0x55555555); |
2838 | __ vmovsr(S4, R0); |
2839 | __ vmovsr(S5, R0); |
2840 | __ vmovsr(S6, R0); |
2841 | __ vmovsr(S7, R0); |
2842 | |
2843 | // Q2 = -2 -2 -2 -2 |
2844 | __ veorq(Q2, Q1, Q0); |
2845 | |
2846 | __ vmovrs(R0, S8); |
2847 | __ vmovrs(R1, S9); |
2848 | __ vmovrs(R2, S10); |
2849 | __ vmovrs(R3, S11); |
2850 | |
2851 | __ add(R0, R0, Operand(R1)); |
2852 | __ add(R0, R0, Operand(R2)); |
2853 | __ add(R0, R0, Operand(R3)); |
2854 | } |
2855 | __ bx(LR); |
2856 | } |
2857 | |
2858 | ASSEMBLER_TEST_RUN(Veorq, test) { |
2859 | EXPECT(test != NULL); |
2860 | if (TargetCPUFeatures::neon_supported()) { |
2861 | typedef int (*Tst)() DART_UNUSED; |
2862 | EXPECT_EQ(-8, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2863 | } |
2864 | } |
2865 | |
2866 | ASSEMBLER_TEST_GENERATE(Vornq, assembler) { |
2867 | if (TargetCPUFeatures::neon_supported()) { |
2868 | // Q0 |
2869 | __ LoadImmediate(R0, 0xfffffff0); |
2870 | __ vmovsr(S0, R0); |
2871 | __ vmovsr(S1, R0); |
2872 | __ vmovsr(S2, R0); |
2873 | __ vmovsr(S3, R0); |
2874 | |
2875 | // Q1 |
2876 | __ LoadImmediate(R0, 0); |
2877 | __ vmovsr(S4, R0); |
2878 | __ vmovsr(S5, R0); |
2879 | __ vmovsr(S6, R0); |
2880 | __ vmovsr(S7, R0); |
2881 | |
2882 | // Q2 = 15 15 15 15 |
2883 | __ vornq(Q2, Q1, Q0); |
2884 | |
2885 | __ vmovrs(R0, S8); |
2886 | __ vmovrs(R1, S9); |
2887 | __ vmovrs(R2, S10); |
2888 | __ vmovrs(R3, S11); |
2889 | |
2890 | __ add(R0, R0, Operand(R1)); |
2891 | __ add(R0, R0, Operand(R2)); |
2892 | __ add(R0, R0, Operand(R3)); |
2893 | } |
2894 | __ bx(LR); |
2895 | } |
2896 | |
2897 | ASSEMBLER_TEST_RUN(Vornq, test) { |
2898 | EXPECT(test != NULL); |
2899 | if (TargetCPUFeatures::neon_supported()) { |
2900 | typedef int (*Tst)() DART_UNUSED; |
2901 | EXPECT_EQ(60, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2902 | } |
2903 | } |
2904 | |
2905 | ASSEMBLER_TEST_GENERATE(Vorrq, assembler) { |
2906 | if (TargetCPUFeatures::neon_supported()) { |
2907 | // Q0 |
2908 | __ LoadImmediate(R0, 0xaaaaaaaa); |
2909 | __ vmovsr(S0, R0); |
2910 | __ vmovsr(S1, R0); |
2911 | __ vmovsr(S2, R0); |
2912 | __ vmovsr(S3, R0); |
2913 | |
2914 | // Q1 |
2915 | __ LoadImmediate(R0, 0x55555555); |
2916 | __ vmovsr(S4, R0); |
2917 | __ vmovsr(S5, R0); |
2918 | __ vmovsr(S6, R0); |
2919 | __ vmovsr(S7, R0); |
2920 | |
2921 | // Q2 = -1 -1 -1 -1 |
2922 | __ vorrq(Q2, Q1, Q0); |
2923 | |
2924 | __ vmovrs(R0, S8); |
2925 | __ vmovrs(R1, S9); |
2926 | __ vmovrs(R2, S10); |
2927 | __ vmovrs(R3, S11); |
2928 | |
2929 | __ add(R0, R0, Operand(R1)); |
2930 | __ add(R0, R0, Operand(R2)); |
2931 | __ add(R0, R0, Operand(R3)); |
2932 | } |
2933 | __ bx(LR); |
2934 | } |
2935 | |
2936 | ASSEMBLER_TEST_RUN(Vorrq, test) { |
2937 | EXPECT(test != NULL); |
2938 | if (TargetCPUFeatures::neon_supported()) { |
2939 | typedef int (*Tst)() DART_UNUSED; |
2940 | EXPECT_EQ(-4, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2941 | } |
2942 | } |
2943 | |
2944 | ASSEMBLER_TEST_GENERATE(Vandq, assembler) { |
2945 | if (TargetCPUFeatures::neon_supported()) { |
2946 | // Q0 |
2947 | __ LoadImmediate(R0, 0xaaaaaaab); |
2948 | __ vmovsr(S0, R0); |
2949 | __ vmovsr(S1, R0); |
2950 | __ vmovsr(S2, R0); |
2951 | __ vmovsr(S3, R0); |
2952 | |
2953 | // Q1 |
2954 | __ LoadImmediate(R0, 0x55555555); |
2955 | __ vmovsr(S4, R0); |
2956 | __ vmovsr(S5, R0); |
2957 | __ vmovsr(S6, R0); |
2958 | __ vmovsr(S7, R0); |
2959 | |
2960 | // Q2 = 1 1 1 1 |
2961 | __ vandq(Q2, Q1, Q0); |
2962 | |
2963 | __ vmovrs(R0, S8); |
2964 | __ vmovrs(R1, S9); |
2965 | __ vmovrs(R2, S10); |
2966 | __ vmovrs(R3, S11); |
2967 | |
2968 | __ add(R0, R0, Operand(R1)); |
2969 | __ add(R0, R0, Operand(R2)); |
2970 | __ add(R0, R0, Operand(R3)); |
2971 | } |
2972 | __ bx(LR); |
2973 | } |
2974 | |
2975 | ASSEMBLER_TEST_RUN(Vandq, test) { |
2976 | EXPECT(test != NULL); |
2977 | if (TargetCPUFeatures::neon_supported()) { |
2978 | typedef int (*Tst)() DART_UNUSED; |
2979 | EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
2980 | } |
2981 | } |
2982 | |
2983 | ASSEMBLER_TEST_GENERATE(Vmovq, assembler) { |
2984 | if (TargetCPUFeatures::neon_supported()) { |
2985 | // Q0 |
2986 | __ LoadSImmediate(S0, 1.0); |
2987 | __ vmovs(S1, S0); |
2988 | __ vmovs(S2, S0); |
2989 | __ vmovs(S3, S0); |
2990 | |
2991 | // Q0 |
2992 | __ LoadSImmediate(S4, -1.0); |
2993 | __ vmovs(S5, S0); |
2994 | __ vmovs(S6, S0); |
2995 | __ vmovs(S7, S0); |
2996 | |
2997 | // Q1 = Q2 |
2998 | __ vmovq(Q1, Q0); |
2999 | |
3000 | __ vadds(S4, S4, S5); |
3001 | __ vadds(S4, S4, S6); |
3002 | __ vadds(S4, S4, S7); |
3003 | __ vcvtis(S0, S4); |
3004 | __ vmovrs(R0, S0); |
3005 | } |
3006 | __ bx(LR); |
3007 | } |
3008 | |
3009 | ASSEMBLER_TEST_RUN(Vmovq, test) { |
3010 | EXPECT(test != NULL); |
3011 | if (TargetCPUFeatures::neon_supported()) { |
3012 | typedef int (*Tst)() DART_UNUSED; |
3013 | EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3014 | } |
3015 | } |
3016 | |
3017 | ASSEMBLER_TEST_GENERATE(Vmvnq, assembler) { |
3018 | if (TargetCPUFeatures::neon_supported()) { |
3019 | __ LoadImmediate(R1, 42); // R1 <- 42. |
3020 | __ vmovsr(S2, R1); // S2 <- R1. |
3021 | __ vmvnq(Q1, Q0); // Q1 <- ~Q0. |
3022 | __ vmvnq(Q2, Q1); // Q2 <- ~Q1. |
3023 | __ vmovrs(R0, S10); // Now R0 should be 42 again. |
3024 | } |
3025 | __ bx(LR); |
3026 | } |
3027 | |
3028 | ASSEMBLER_TEST_RUN(Vmvnq, test) { |
3029 | EXPECT(test != NULL); |
3030 | if (TargetCPUFeatures::neon_supported()) { |
3031 | typedef int (*Tst)() DART_UNUSED; |
3032 | EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3033 | } |
3034 | } |
3035 | |
3036 | ASSEMBLER_TEST_GENERATE(Vdupb, assembler) { |
3037 | if (TargetCPUFeatures::neon_supported()) { |
3038 | __ LoadImmediate(R0, 0x00000000); |
3039 | __ LoadImmediate(R1, 0x00ff0000); |
3040 | __ vmovsr(S4, R0); |
3041 | __ vmovsr(S5, R1); |
3042 | |
3043 | // Should copy 0xff to each byte of Q0. |
3044 | __ vdup(kByte, Q0, D2, 6); |
3045 | |
3046 | __ vmovrs(R0, S0); |
3047 | __ vmovrs(R1, S1); |
3048 | __ vmovrs(R2, S2); |
3049 | __ vmovrs(R3, S3); |
3050 | |
3051 | __ add(R0, R0, Operand(R1)); |
3052 | __ add(R0, R0, Operand(R2)); |
3053 | __ add(R0, R0, Operand(R3)); |
3054 | } |
3055 | __ bx(LR); |
3056 | } |
3057 | |
3058 | ASSEMBLER_TEST_RUN(Vdupb, test) { |
3059 | EXPECT(test != NULL); |
3060 | if (TargetCPUFeatures::neon_supported()) { |
3061 | typedef int (*Tst)() DART_UNUSED; |
3062 | EXPECT_EQ(-4, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3063 | } |
3064 | } |
3065 | |
3066 | ASSEMBLER_TEST_GENERATE(Vduph, assembler) { |
3067 | if (TargetCPUFeatures::neon_supported()) { |
3068 | __ LoadImmediate(R0, 0xffff0000); |
3069 | __ LoadImmediate(R1, 0x00000000); |
3070 | __ vmovsr(S4, R0); |
3071 | __ vmovsr(S5, R1); |
3072 | |
3073 | // Should copy 0xff to each byte of Q0. |
3074 | __ vdup(kHalfword, Q0, D2, 1); |
3075 | |
3076 | __ vmovrs(R0, S0); |
3077 | __ vmovrs(R1, S1); |
3078 | __ vmovrs(R2, S2); |
3079 | __ vmovrs(R3, S3); |
3080 | |
3081 | __ add(R0, R0, Operand(R1)); |
3082 | __ add(R0, R0, Operand(R2)); |
3083 | __ add(R0, R0, Operand(R3)); |
3084 | } |
3085 | __ bx(LR); |
3086 | } |
3087 | |
3088 | ASSEMBLER_TEST_RUN(Vduph, test) { |
3089 | EXPECT(test != NULL); |
3090 | if (TargetCPUFeatures::neon_supported()) { |
3091 | typedef int (*Tst)() DART_UNUSED; |
3092 | EXPECT_EQ(-4, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3093 | } |
3094 | } |
3095 | |
3096 | ASSEMBLER_TEST_GENERATE(Vdupw, assembler) { |
3097 | if (TargetCPUFeatures::neon_supported()) { |
3098 | __ LoadImmediate(R0, 0x00000000); |
3099 | __ LoadImmediate(R1, 0xffffffff); |
3100 | __ vmovsr(S4, R0); |
3101 | __ vmovsr(S5, R1); |
3102 | |
3103 | // Should copy 0xff to each byte of Q0. |
3104 | __ vdup(kWord, Q0, D2, 1); |
3105 | |
3106 | __ vmovrs(R0, S0); |
3107 | __ vmovrs(R1, S1); |
3108 | __ vmovrs(R2, S2); |
3109 | __ vmovrs(R3, S3); |
3110 | |
3111 | __ add(R0, R0, Operand(R1)); |
3112 | __ add(R0, R0, Operand(R2)); |
3113 | __ add(R0, R0, Operand(R3)); |
3114 | } |
3115 | __ bx(LR); |
3116 | } |
3117 | |
3118 | ASSEMBLER_TEST_RUN(Vdupw, test) { |
3119 | EXPECT(test != NULL); |
3120 | if (TargetCPUFeatures::neon_supported()) { |
3121 | typedef int (*Tst)() DART_UNUSED; |
3122 | EXPECT_EQ(-4, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3123 | } |
3124 | } |
3125 | |
3126 | ASSEMBLER_TEST_GENERATE(Vzipqw, assembler) { |
3127 | if (TargetCPUFeatures::neon_supported()) { |
3128 | __ LoadSImmediate(S0, 0.0); |
3129 | __ LoadSImmediate(S1, 1.0); |
3130 | __ LoadSImmediate(S2, 2.0); |
3131 | __ LoadSImmediate(S3, 3.0); |
3132 | __ LoadSImmediate(S4, 4.0); |
3133 | __ LoadSImmediate(S5, 5.0); |
3134 | __ LoadSImmediate(S6, 6.0); |
3135 | __ LoadSImmediate(S7, 7.0); |
3136 | |
3137 | __ vzipqw(Q0, Q1); |
3138 | |
3139 | __ vsubqs(Q0, Q1, Q0); |
3140 | |
3141 | __ vadds(S0, S0, S1); |
3142 | __ vadds(S0, S0, S2); |
3143 | __ vadds(S0, S0, S3); |
3144 | } |
3145 | __ bx(LR); |
3146 | } |
3147 | |
3148 | ASSEMBLER_TEST_RUN(Vzipqw, test) { |
3149 | EXPECT(test != NULL); |
3150 | if (TargetCPUFeatures::neon_supported()) { |
3151 | typedef float (*Vzipqw)() DART_UNUSED; |
3152 | float res = EXECUTE_TEST_CODE_FLOAT(Vzipqw, test->entry()); |
3153 | EXPECT_FLOAT_EQ(8.0, res, 0.0001f); |
3154 | } |
3155 | } |
3156 | |
3157 | ASSEMBLER_TEST_GENERATE(Vceqqi32, assembler) { |
3158 | if (TargetCPUFeatures::neon_supported()) { |
3159 | __ mov(R0, Operand(1)); |
3160 | __ vmovsr(S0, R0); |
3161 | __ mov(R0, Operand(2)); |
3162 | __ vmovsr(S1, R0); |
3163 | __ mov(R0, Operand(3)); |
3164 | __ vmovsr(S2, R0); |
3165 | __ mov(R0, Operand(4)); |
3166 | __ vmovsr(S3, R0); |
3167 | __ mov(R0, Operand(1)); |
3168 | __ vmovsr(S4, R0); |
3169 | __ mov(R0, Operand(20)); |
3170 | __ vmovsr(S5, R0); |
3171 | __ mov(R0, Operand(3)); |
3172 | __ vmovsr(S6, R0); |
3173 | __ mov(R0, Operand(40)); |
3174 | __ vmovsr(S7, R0); |
3175 | |
3176 | __ vceqqi(kWord, Q2, Q1, Q0); |
3177 | |
3178 | __ vmovrs(R0, S8); |
3179 | __ vmovrs(R1, S9); |
3180 | __ vmovrs(R2, S10); |
3181 | __ vmovrs(R3, S11); |
3182 | |
3183 | __ add(R0, R0, Operand(R1)); |
3184 | __ add(R0, R0, Operand(R2)); |
3185 | __ add(R0, R0, Operand(R3)); |
3186 | } |
3187 | __ bx(LR); |
3188 | } |
3189 | |
3190 | ASSEMBLER_TEST_RUN(Vceqqi32, test) { |
3191 | EXPECT(test != NULL); |
3192 | if (TargetCPUFeatures::neon_supported()) { |
3193 | typedef int (*Tst)() DART_UNUSED; |
3194 | EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3195 | } |
3196 | } |
3197 | |
3198 | ASSEMBLER_TEST_GENERATE(Vceqqs, assembler) { |
3199 | if (TargetCPUFeatures::neon_supported()) { |
3200 | __ LoadSImmediate(S0, 1.0); |
3201 | __ LoadSImmediate(S1, 2.0); |
3202 | __ LoadSImmediate(S2, 3.0); |
3203 | __ LoadSImmediate(S3, 4.0); |
3204 | __ LoadSImmediate(S4, 1.0); |
3205 | __ LoadSImmediate(S5, 4.0); |
3206 | __ LoadSImmediate(S6, 3.0); |
3207 | __ LoadSImmediate(S7, 8.0); |
3208 | |
3209 | __ vceqqs(Q2, Q1, Q0); |
3210 | |
3211 | __ vmovrs(R0, S8); |
3212 | __ vmovrs(R1, S9); |
3213 | __ vmovrs(R2, S10); |
3214 | __ vmovrs(R3, S11); |
3215 | |
3216 | __ add(R0, R0, Operand(R1)); |
3217 | __ add(R0, R0, Operand(R2)); |
3218 | __ add(R0, R0, Operand(R3)); |
3219 | } |
3220 | __ bx(LR); |
3221 | } |
3222 | |
3223 | ASSEMBLER_TEST_RUN(Vceqqs, test) { |
3224 | EXPECT(test != NULL); |
3225 | if (TargetCPUFeatures::neon_supported()) { |
3226 | typedef int (*Tst)() DART_UNUSED; |
3227 | EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3228 | } |
3229 | } |
3230 | |
3231 | ASSEMBLER_TEST_GENERATE(Vcgeqi32, assembler) { |
3232 | if (TargetCPUFeatures::neon_supported()) { |
3233 | __ mov(R0, Operand(1)); |
3234 | __ vmovsr(S0, R0); |
3235 | __ mov(R0, Operand(2)); |
3236 | __ vmovsr(S1, R0); |
3237 | __ mov(R0, Operand(3)); |
3238 | __ vmovsr(S2, R0); |
3239 | __ mov(R0, Operand(4)); |
3240 | __ vmovsr(S3, R0); |
3241 | __ mov(R0, Operand(1)); |
3242 | __ vmovsr(S4, R0); |
3243 | __ mov(R0, Operand(1)); |
3244 | __ vmovsr(S5, R0); |
3245 | __ mov(R0, Operand(3)); |
3246 | __ vmovsr(S6, R0); |
3247 | __ mov(R0, Operand(1)); |
3248 | __ vmovsr(S7, R0); |
3249 | |
3250 | __ vcgeqi(kWord, Q2, Q1, Q0); |
3251 | |
3252 | __ vmovrs(R0, S8); |
3253 | __ vmovrs(R1, S9); |
3254 | __ vmovrs(R2, S10); |
3255 | __ vmovrs(R3, S11); |
3256 | |
3257 | __ add(R0, R0, Operand(R1)); |
3258 | __ add(R0, R0, Operand(R2)); |
3259 | __ add(R0, R0, Operand(R3)); |
3260 | } |
3261 | __ bx(LR); |
3262 | } |
3263 | |
3264 | ASSEMBLER_TEST_RUN(Vcgeqi32, test) { |
3265 | EXPECT(test != NULL); |
3266 | if (TargetCPUFeatures::neon_supported()) { |
3267 | typedef int (*Tst)() DART_UNUSED; |
3268 | EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3269 | } |
3270 | } |
3271 | |
3272 | ASSEMBLER_TEST_GENERATE(Vcugeqi32, assembler) { |
3273 | if (TargetCPUFeatures::neon_supported()) { |
3274 | __ mov(R0, Operand(1)); |
3275 | __ vmovsr(S0, R0); |
3276 | __ mov(R0, Operand(2)); |
3277 | __ vmovsr(S1, R0); |
3278 | __ mov(R0, Operand(3)); |
3279 | __ vmovsr(S2, R0); |
3280 | __ mov(R0, Operand(4)); |
3281 | __ vmovsr(S3, R0); |
3282 | __ LoadImmediate(R0, -1); |
3283 | __ vmovsr(S4, R0); |
3284 | __ mov(R0, Operand(1)); |
3285 | __ vmovsr(S5, R0); |
3286 | __ LoadImmediate(R0, -3); |
3287 | __ vmovsr(S6, R0); |
3288 | __ mov(R0, Operand(1)); |
3289 | __ vmovsr(S7, R0); |
3290 | |
3291 | __ vcugeqi(kWord, Q2, Q1, Q0); |
3292 | |
3293 | __ vmovrs(R0, S8); |
3294 | __ vmovrs(R1, S9); |
3295 | __ vmovrs(R2, S10); |
3296 | __ vmovrs(R3, S11); |
3297 | |
3298 | __ add(R0, R0, Operand(R1)); |
3299 | __ add(R0, R0, Operand(R2)); |
3300 | __ add(R0, R0, Operand(R3)); |
3301 | } |
3302 | __ bx(LR); |
3303 | } |
3304 | |
3305 | ASSEMBLER_TEST_RUN(Vcugeqi32, test) { |
3306 | EXPECT(test != NULL); |
3307 | if (TargetCPUFeatures::neon_supported()) { |
3308 | typedef int (*Tst)() DART_UNUSED; |
3309 | EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3310 | } |
3311 | } |
3312 | |
3313 | ASSEMBLER_TEST_GENERATE(Vcgeqs, assembler) { |
3314 | if (TargetCPUFeatures::neon_supported()) { |
3315 | __ LoadSImmediate(S0, 1.0); |
3316 | __ LoadSImmediate(S1, 2.0); |
3317 | __ LoadSImmediate(S2, 3.0); |
3318 | __ LoadSImmediate(S3, 4.0); |
3319 | __ LoadSImmediate(S4, 1.0); |
3320 | __ LoadSImmediate(S5, 1.0); |
3321 | __ LoadSImmediate(S6, 3.0); |
3322 | __ LoadSImmediate(S7, 1.0); |
3323 | |
3324 | __ vcgeqs(Q2, Q1, Q0); |
3325 | |
3326 | __ vmovrs(R0, S8); |
3327 | __ vmovrs(R1, S9); |
3328 | __ vmovrs(R2, S10); |
3329 | __ vmovrs(R3, S11); |
3330 | |
3331 | __ add(R0, R0, Operand(R1)); |
3332 | __ add(R0, R0, Operand(R2)); |
3333 | __ add(R0, R0, Operand(R3)); |
3334 | } |
3335 | __ bx(LR); |
3336 | } |
3337 | |
3338 | ASSEMBLER_TEST_RUN(Vcgeqs, test) { |
3339 | EXPECT(test != NULL); |
3340 | if (TargetCPUFeatures::neon_supported()) { |
3341 | typedef int (*Tst)() DART_UNUSED; |
3342 | EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3343 | } |
3344 | } |
3345 | |
3346 | ASSEMBLER_TEST_GENERATE(Vcgtqi32, assembler) { |
3347 | if (TargetCPUFeatures::neon_supported()) { |
3348 | __ mov(R0, Operand(1)); |
3349 | __ vmovsr(S0, R0); |
3350 | __ mov(R0, Operand(2)); |
3351 | __ vmovsr(S1, R0); |
3352 | __ mov(R0, Operand(3)); |
3353 | __ vmovsr(S2, R0); |
3354 | __ mov(R0, Operand(4)); |
3355 | __ vmovsr(S3, R0); |
3356 | __ mov(R0, Operand(2)); |
3357 | __ vmovsr(S4, R0); |
3358 | __ mov(R0, Operand(1)); |
3359 | __ vmovsr(S5, R0); |
3360 | __ mov(R0, Operand(4)); |
3361 | __ vmovsr(S6, R0); |
3362 | __ mov(R0, Operand(1)); |
3363 | __ vmovsr(S7, R0); |
3364 | |
3365 | __ vcgtqi(kWord, Q2, Q1, Q0); |
3366 | |
3367 | __ vmovrs(R0, S8); |
3368 | __ vmovrs(R1, S9); |
3369 | __ vmovrs(R2, S10); |
3370 | __ vmovrs(R3, S11); |
3371 | |
3372 | __ add(R0, R0, Operand(R1)); |
3373 | __ add(R0, R0, Operand(R2)); |
3374 | __ add(R0, R0, Operand(R3)); |
3375 | } |
3376 | __ bx(LR); |
3377 | } |
3378 | |
3379 | ASSEMBLER_TEST_RUN(Vcgtqi32, test) { |
3380 | EXPECT(test != NULL); |
3381 | if (TargetCPUFeatures::neon_supported()) { |
3382 | typedef int (*Tst)() DART_UNUSED; |
3383 | EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3384 | } |
3385 | } |
3386 | |
3387 | ASSEMBLER_TEST_GENERATE(Vcugtqi32, assembler) { |
3388 | if (TargetCPUFeatures::neon_supported()) { |
3389 | __ mov(R0, Operand(1)); |
3390 | __ vmovsr(S0, R0); |
3391 | __ mov(R0, Operand(2)); |
3392 | __ vmovsr(S1, R0); |
3393 | __ mov(R0, Operand(3)); |
3394 | __ vmovsr(S2, R0); |
3395 | __ mov(R0, Operand(4)); |
3396 | __ vmovsr(S3, R0); |
3397 | __ LoadImmediate(R0, -1); |
3398 | __ vmovsr(S4, R0); |
3399 | __ mov(R0, Operand(1)); |
3400 | __ vmovsr(S5, R0); |
3401 | __ LoadImmediate(R0, -3); |
3402 | __ vmovsr(S6, R0); |
3403 | __ mov(R0, Operand(1)); |
3404 | __ vmovsr(S7, R0); |
3405 | |
3406 | __ vcugtqi(kWord, Q2, Q1, Q0); |
3407 | |
3408 | __ vmovrs(R0, S8); |
3409 | __ vmovrs(R1, S9); |
3410 | __ vmovrs(R2, S10); |
3411 | __ vmovrs(R3, S11); |
3412 | |
3413 | __ add(R0, R0, Operand(R1)); |
3414 | __ add(R0, R0, Operand(R2)); |
3415 | __ add(R0, R0, Operand(R3)); |
3416 | } |
3417 | __ bx(LR); |
3418 | } |
3419 | |
3420 | ASSEMBLER_TEST_RUN(Vcugtqi32, test) { |
3421 | EXPECT(test != NULL); |
3422 | if (TargetCPUFeatures::neon_supported()) { |
3423 | typedef int (*Tst)() DART_UNUSED; |
3424 | EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3425 | } |
3426 | } |
3427 | |
3428 | ASSEMBLER_TEST_GENERATE(Vcgtqs, assembler) { |
3429 | if (TargetCPUFeatures::neon_supported()) { |
3430 | __ LoadSImmediate(S0, 1.0); |
3431 | __ LoadSImmediate(S1, 2.0); |
3432 | __ LoadSImmediate(S2, 3.0); |
3433 | __ LoadSImmediate(S3, 4.0); |
3434 | __ LoadSImmediate(S4, 2.0); |
3435 | __ LoadSImmediate(S5, 1.0); |
3436 | __ LoadSImmediate(S6, 4.0); |
3437 | __ LoadSImmediate(S7, 1.0); |
3438 | |
3439 | __ vcgtqs(Q2, Q1, Q0); |
3440 | |
3441 | __ vmovrs(R0, S8); |
3442 | __ vmovrs(R1, S9); |
3443 | __ vmovrs(R2, S10); |
3444 | __ vmovrs(R3, S11); |
3445 | |
3446 | __ add(R0, R0, Operand(R1)); |
3447 | __ add(R0, R0, Operand(R2)); |
3448 | __ add(R0, R0, Operand(R3)); |
3449 | } |
3450 | __ bx(LR); |
3451 | } |
3452 | |
3453 | ASSEMBLER_TEST_RUN(Vcgtqs, test) { |
3454 | EXPECT(test != NULL); |
3455 | if (TargetCPUFeatures::neon_supported()) { |
3456 | typedef int (*Tst)() DART_UNUSED; |
3457 | EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3458 | } |
3459 | } |
3460 | |
3461 | ASSEMBLER_TEST_GENERATE(Vminqs, assembler) { |
3462 | if (TargetCPUFeatures::neon_supported()) { |
3463 | __ LoadSImmediate(S0, 1.0); |
3464 | __ LoadSImmediate(S1, 2.0); |
3465 | __ LoadSImmediate(S2, 3.0); |
3466 | __ LoadSImmediate(S3, 4.0); |
3467 | |
3468 | __ LoadSImmediate(S4, 2.0); |
3469 | __ LoadSImmediate(S5, 1.0); |
3470 | __ LoadSImmediate(S6, 6.0); |
3471 | __ LoadSImmediate(S7, 3.0); |
3472 | |
3473 | __ vminqs(Q2, Q1, Q0); |
3474 | |
3475 | __ vadds(S8, S8, S9); |
3476 | __ vadds(S8, S8, S10); |
3477 | __ vadds(S8, S8, S11); |
3478 | |
3479 | __ vcvtis(S0, S8); |
3480 | __ vmovrs(R0, S0); |
3481 | } |
3482 | __ bx(LR); |
3483 | } |
3484 | |
3485 | ASSEMBLER_TEST_RUN(Vminqs, test) { |
3486 | EXPECT(test != NULL); |
3487 | if (TargetCPUFeatures::neon_supported()) { |
3488 | typedef int (*Tst)() DART_UNUSED; |
3489 | EXPECT_EQ(8, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3490 | } |
3491 | } |
3492 | |
3493 | ASSEMBLER_TEST_GENERATE(Vmaxqs, assembler) { |
3494 | if (TargetCPUFeatures::neon_supported()) { |
3495 | __ LoadSImmediate(S0, 1.0); |
3496 | __ LoadSImmediate(S1, 2.0); |
3497 | __ LoadSImmediate(S2, 3.0); |
3498 | __ LoadSImmediate(S3, 4.0); |
3499 | |
3500 | __ LoadSImmediate(S4, 2.0); |
3501 | __ LoadSImmediate(S5, 1.0); |
3502 | __ LoadSImmediate(S6, 6.0); |
3503 | __ LoadSImmediate(S7, 3.0); |
3504 | |
3505 | __ vmaxqs(Q2, Q1, Q0); |
3506 | |
3507 | __ vadds(S8, S8, S9); |
3508 | __ vadds(S8, S8, S10); |
3509 | __ vadds(S8, S8, S11); |
3510 | |
3511 | __ vcvtis(S0, S8); |
3512 | __ vmovrs(R0, S0); |
3513 | } |
3514 | __ bx(LR); |
3515 | } |
3516 | |
3517 | ASSEMBLER_TEST_RUN(Vmaxqs, test) { |
3518 | EXPECT(test != NULL); |
3519 | if (TargetCPUFeatures::neon_supported()) { |
3520 | typedef int (*Tst)() DART_UNUSED; |
3521 | EXPECT_EQ(14, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
3522 | } |
3523 | } |
3524 | |
3525 | ASSEMBLER_TEST_GENERATE(Vrecpeqs, assembler) { |
3526 | if (TargetCPUFeatures::neon_supported()) { |
3527 | __ LoadSImmediate(S4, 147.0); |
3528 | __ vmovs(S5, S4); |
3529 | __ vmovs(S6, S4); |
3530 | __ vmovs(S7, S4); |
3531 | __ vrecpeqs(Q0, Q1); |
3532 | } |
3533 | __ bx(LR); |
3534 | } |
3535 | |
3536 | ASSEMBLER_TEST_RUN(Vrecpeqs, test) { |
3537 | EXPECT(test != NULL); |
3538 | if (TargetCPUFeatures::neon_supported()) { |
3539 | typedef float (*Vrecpeqs)() DART_UNUSED; |
3540 | float res = EXECUTE_TEST_CODE_FLOAT(Vrecpeqs, test->entry()); |
3541 | EXPECT_FLOAT_EQ(ReciprocalEstimate(147.0), res, 0.0001f); |
3542 | } |
3543 | } |
3544 | |
3545 | ASSEMBLER_TEST_GENERATE(Vrecpsqs, assembler) { |
3546 | if (TargetCPUFeatures::neon_supported()) { |
3547 | __ LoadSImmediate(S4, 5.0); |
3548 | __ LoadSImmediate(S5, 2.0); |
3549 | __ LoadSImmediate(S6, 3.0); |
3550 | __ LoadSImmediate(S7, 4.0); |
3551 | |
3552 | __ LoadSImmediate(S8, 10.0); |
3553 | __ LoadSImmediate(S9, 1.0); |
3554 | __ LoadSImmediate(S10, 6.0); |
3555 | __ LoadSImmediate(S11, 3.0); |
3556 | |
3557 | __ vrecpsqs(Q0, Q1, Q2); |
3558 | } |
3559 | __ bx(LR); |
3560 | } |
3561 | |
3562 | ASSEMBLER_TEST_RUN(Vrecpsqs, test) { |
3563 | EXPECT(test != NULL); |
3564 | if (TargetCPUFeatures::neon_supported()) { |
3565 | typedef float (*Vrecpsqs)() DART_UNUSED; |
3566 | float res = EXECUTE_TEST_CODE_FLOAT(Vrecpsqs, test->entry()); |
3567 | EXPECT_FLOAT_EQ(2.0 - 10.0 * 5.0, res, 0.0001f); |
3568 | } |
3569 | } |
3570 | |
3571 | ASSEMBLER_TEST_GENERATE(Reciprocal, assembler) { |
3572 | if (TargetCPUFeatures::neon_supported()) { |
3573 | __ LoadSImmediate(S4, 147000.0); |
3574 | __ vmovs(S5, S4); |
3575 | __ vmovs(S6, S4); |
3576 | __ vmovs(S7, S4); |
3577 | |
3578 | // Reciprocal estimate. |
3579 | __ vrecpeqs(Q0, Q1); |
3580 | // 2 Newton-Raphson steps. |
3581 | __ vrecpsqs(Q2, Q1, Q0); |
3582 | __ vmulqs(Q0, Q0, Q2); |
3583 | __ vrecpsqs(Q2, Q1, Q0); |
3584 | __ vmulqs(Q0, Q0, Q2); |
3585 | } |
3586 | __ bx(LR); |
3587 | } |
3588 | |
3589 | ASSEMBLER_TEST_RUN(Reciprocal, test) { |
3590 | EXPECT(test != NULL); |
3591 | if (TargetCPUFeatures::neon_supported()) { |
3592 | typedef float (*Reciprocal)() DART_UNUSED; |
3593 | float res = EXECUTE_TEST_CODE_FLOAT(Reciprocal, test->entry()); |
3594 | EXPECT_FLOAT_EQ(1.0 / 147000.0, res, 0.0001f); |
3595 | } |
3596 | } |
3597 | |
3598 | ASSEMBLER_TEST_GENERATE(Vrsqrteqs, assembler) { |
3599 | if (TargetCPUFeatures::neon_supported()) { |
3600 | __ LoadSImmediate(S4, 147.0); |
3601 | __ vmovs(S5, S4); |
3602 | __ vmovs(S6, S4); |
3603 | __ vmovs(S7, S4); |
3604 | |
3605 | __ vrsqrteqs(Q0, Q1); |
3606 | } |
3607 | __ bx(LR); |
3608 | } |
3609 | |
3610 | ASSEMBLER_TEST_RUN(Vrsqrteqs, test) { |
3611 | EXPECT(test != NULL); |
3612 | if (TargetCPUFeatures::neon_supported()) { |
3613 | typedef float (*Vrsqrteqs)() DART_UNUSED; |
3614 | float res = EXECUTE_TEST_CODE_FLOAT(Vrsqrteqs, test->entry()); |
3615 | EXPECT_FLOAT_EQ(ReciprocalSqrtEstimate(147.0), res, 0.0001f); |
3616 | } |
3617 | } |
3618 | |
3619 | ASSEMBLER_TEST_GENERATE(Vrsqrtsqs, assembler) { |
3620 | if (TargetCPUFeatures::neon_supported()) { |
3621 | __ LoadSImmediate(S4, 5.0); |
3622 | __ LoadSImmediate(S5, 2.0); |
3623 | __ LoadSImmediate(S6, 3.0); |
3624 | __ LoadSImmediate(S7, 4.0); |
3625 | |
3626 | __ LoadSImmediate(S8, 10.0); |
3627 | __ LoadSImmediate(S9, 1.0); |
3628 | __ LoadSImmediate(S10, 6.0); |
3629 | __ LoadSImmediate(S11, 3.0); |
3630 | |
3631 | __ vrsqrtsqs(Q0, Q1, Q2); |
3632 | } |
3633 | __ bx(LR); |
3634 | } |
3635 | |
3636 | ASSEMBLER_TEST_RUN(Vrsqrtsqs, test) { |
3637 | EXPECT(test != NULL); |
3638 | if (TargetCPUFeatures::neon_supported()) { |
3639 | typedef float (*Vrsqrtsqs)() DART_UNUSED; |
3640 | float res = EXECUTE_TEST_CODE_FLOAT(Vrsqrtsqs, test->entry()); |
3641 | EXPECT_FLOAT_EQ((3.0 - 10.0 * 5.0) / 2.0, res, 0.0001f); |
3642 | } |
3643 | } |
3644 | |
3645 | ASSEMBLER_TEST_GENERATE(ReciprocalSqrt, assembler) { |
3646 | if (TargetCPUFeatures::neon_supported()) { |
3647 | __ LoadSImmediate(S4, 147000.0); |
3648 | __ vmovs(S5, S4); |
3649 | __ vmovs(S6, S4); |
3650 | __ vmovs(S7, S4); |
3651 | |
3652 | // Reciprocal square root estimate. |
3653 | __ vrsqrteqs(Q0, Q1); |
3654 | // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2. |
3655 | // First step. |
3656 | __ vmulqs(Q2, Q0, Q0); // Q2 <- xn^2 |
3657 | __ vrsqrtsqs(Q2, Q1, Q2); // Q2 <- (3 - Q1*Q2) / 2. |
3658 | __ vmulqs(Q0, Q0, Q2); // xn+1 <- xn * Q2 |
3659 | // Second step. |
3660 | __ vmulqs(Q2, Q0, Q0); |
3661 | __ vrsqrtsqs(Q2, Q1, Q2); |
3662 | __ vmulqs(Q0, Q0, Q2); |
3663 | } |
3664 | __ bx(LR); |
3665 | } |
3666 | |
3667 | ASSEMBLER_TEST_RUN(ReciprocalSqrt, test) { |
3668 | EXPECT(test != NULL); |
3669 | if (TargetCPUFeatures::neon_supported()) { |
3670 | typedef float (*ReciprocalSqrt)() DART_UNUSED; |
3671 | float res = EXECUTE_TEST_CODE_FLOAT(ReciprocalSqrt, test->entry()); |
3672 | EXPECT_FLOAT_EQ(1.0 / sqrt(147000.0), res, 0.0001f); |
3673 | } |
3674 | } |
3675 | |
3676 | ASSEMBLER_TEST_GENERATE(SIMDSqrt, assembler) { |
3677 | if (TargetCPUFeatures::neon_supported()) { |
3678 | __ LoadSImmediate(S4, 147000.0); |
3679 | __ vmovs(S5, S4); |
3680 | __ vmovs(S6, S4); |
3681 | __ vmovs(S7, S4); |
3682 | |
3683 | // Reciprocal square root estimate. |
3684 | __ vrsqrteqs(Q0, Q1); |
3685 | // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2. |
3686 | // First step. |
3687 | __ vmulqs(Q2, Q0, Q0); // Q2 <- xn^2 |
3688 | __ vrsqrtsqs(Q2, Q1, Q2); // Q2 <- (3 - Q1*Q2) / 2. |
3689 | __ vmulqs(Q0, Q0, Q2); // xn+1 <- xn * Q2 |
3690 | // Second step. |
3691 | __ vmulqs(Q2, Q0, Q0); |
3692 | __ vrsqrtsqs(Q2, Q1, Q2); |
3693 | __ vmulqs(Q0, Q0, Q2); |
3694 | |
3695 | // Reciprocal. |
3696 | __ vmovq(Q1, Q0); |
3697 | // Reciprocal estimate. |
3698 | __ vrecpeqs(Q0, Q1); |
3699 | // 2 Newton-Raphson steps. |
3700 | __ vrecpsqs(Q2, Q1, Q0); |
3701 | __ vmulqs(Q0, Q0, Q2); |
3702 | __ vrecpsqs(Q2, Q1, Q0); |
3703 | __ vmulqs(Q0, Q0, Q2); |
3704 | } |
3705 | __ bx(LR); |
3706 | } |
3707 | |
3708 | ASSEMBLER_TEST_RUN(SIMDSqrt, test) { |
3709 | EXPECT(test != NULL); |
3710 | if (TargetCPUFeatures::neon_supported()) { |
3711 | typedef float (*SIMDSqrt)() DART_UNUSED; |
3712 | float res = EXECUTE_TEST_CODE_FLOAT(SIMDSqrt, test->entry()); |
3713 | EXPECT_FLOAT_EQ(sqrt(147000.0), res, 0.0001f); |
3714 | } |
3715 | } |
3716 | |
3717 | ASSEMBLER_TEST_GENERATE(SIMDSqrt2, assembler) { |
3718 | if (TargetCPUFeatures::neon_supported()) { |
3719 | __ LoadSImmediate(S4, 1.0); |
3720 | __ LoadSImmediate(S5, 4.0); |
3721 | __ LoadSImmediate(S6, 9.0); |
3722 | __ LoadSImmediate(S7, 16.0); |
3723 | |
3724 | // Reciprocal square root estimate. |
3725 | __ vrsqrteqs(Q0, Q1); |
3726 | // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2. |
3727 | // First step. |
3728 | __ vmulqs(Q2, Q0, Q0); // Q2 <- xn^2 |
3729 | __ vrsqrtsqs(Q2, Q1, Q2); // Q2 <- (3 - Q1*Q2) / 2. |
3730 | __ vmulqs(Q0, Q0, Q2); // xn+1 <- xn * Q2 |
3731 | // Second step. |
3732 | __ vmulqs(Q2, Q0, Q0); |
3733 | __ vrsqrtsqs(Q2, Q1, Q2); |
3734 | __ vmulqs(Q0, Q0, Q2); |
3735 | |
3736 | // Reciprocal. |
3737 | __ vmovq(Q1, Q0); |
3738 | // Reciprocal estimate. |
3739 | __ vrecpeqs(Q0, Q1); |
3740 | // 2 Newton-Raphson steps. |
3741 | __ vrecpsqs(Q2, Q1, Q0); |
3742 | __ vmulqs(Q0, Q0, Q2); |
3743 | __ vrecpsqs(Q2, Q1, Q0); |
3744 | __ vmulqs(Q0, Q0, Q2); |
3745 | |
3746 | __ vadds(S0, S0, S1); |
3747 | __ vadds(S0, S0, S2); |
3748 | __ vadds(S0, S0, S3); |
3749 | } |
3750 | __ bx(LR); |
3751 | } |
3752 | |
3753 | ASSEMBLER_TEST_RUN(SIMDSqrt2, test) { |
3754 | EXPECT(test != NULL); |
3755 | if (TargetCPUFeatures::neon_supported()) { |
3756 | typedef float (*SIMDSqrt2)() DART_UNUSED; |
3757 | float res = EXECUTE_TEST_CODE_FLOAT(SIMDSqrt2, test->entry()); |
3758 | EXPECT_FLOAT_EQ(10.0, res, 0.0001f); |
3759 | } |
3760 | } |
3761 | |
3762 | ASSEMBLER_TEST_GENERATE(SIMDDiv, assembler) { |
3763 | if (TargetCPUFeatures::neon_supported()) { |
3764 | __ LoadSImmediate(S4, 1.0); |
3765 | __ LoadSImmediate(S5, 4.0); |
3766 | __ LoadSImmediate(S6, 9.0); |
3767 | __ LoadSImmediate(S7, 16.0); |
3768 | |
3769 | __ LoadSImmediate(S12, 4.0); |
3770 | __ LoadSImmediate(S13, 16.0); |
3771 | __ LoadSImmediate(S14, 36.0); |
3772 | __ LoadSImmediate(S15, 64.0); |
3773 | |
3774 | // Reciprocal estimate. |
3775 | __ vrecpeqs(Q0, Q1); |
3776 | // 2 Newton-Raphson steps. |
3777 | __ vrecpsqs(Q2, Q1, Q0); |
3778 | __ vmulqs(Q0, Q0, Q2); |
3779 | __ vrecpsqs(Q2, Q1, Q0); |
3780 | __ vmulqs(Q0, Q0, Q2); |
3781 | |
3782 | __ vmulqs(Q0, Q3, Q0); |
3783 | __ vadds(S0, S0, S1); |
3784 | __ vadds(S0, S0, S2); |
3785 | __ vadds(S0, S0, S3); |
3786 | } |
3787 | __ bx(LR); |
3788 | } |
3789 | |
3790 | ASSEMBLER_TEST_RUN(SIMDDiv, test) { |
3791 | EXPECT(test != NULL); |
3792 | if (TargetCPUFeatures::neon_supported()) { |
3793 | typedef float (*SIMDDiv)() DART_UNUSED; |
3794 | float res = EXECUTE_TEST_CODE_FLOAT(SIMDDiv, test->entry()); |
3795 | EXPECT_FLOAT_EQ(16.0, res, 0.0001f); |
3796 | } |
3797 | } |
3798 | |
3799 | ASSEMBLER_TEST_GENERATE(Vabsqs, assembler) { |
3800 | if (TargetCPUFeatures::neon_supported()) { |
3801 | __ LoadSImmediate(S4, 1.0); |
3802 | __ LoadSImmediate(S5, -1.0); |
3803 | __ LoadSImmediate(S6, 1.0); |
3804 | __ LoadSImmediate(S7, -1.0); |
3805 | |
3806 | __ vabsqs(Q0, Q1); |
3807 | |
3808 | __ vadds(S0, S0, S1); |
3809 | __ vadds(S0, S0, S2); |
3810 | __ vadds(S0, S0, S3); |
3811 | } |
3812 | __ bx(LR); |
3813 | } |
3814 | |
3815 | ASSEMBLER_TEST_RUN(Vabsqs, test) { |
3816 | EXPECT(test != NULL); |
3817 | if (TargetCPUFeatures::neon_supported()) { |
3818 | typedef float (*Vabsqs)() DART_UNUSED; |
3819 | float res = EXECUTE_TEST_CODE_FLOAT(Vabsqs, test->entry()); |
3820 | EXPECT_FLOAT_EQ(4.0, res, 0.0001f); |
3821 | } |
3822 | } |
3823 | |
3824 | ASSEMBLER_TEST_GENERATE(Vnegqs, assembler) { |
3825 | if (TargetCPUFeatures::neon_supported()) { |
3826 | __ LoadSImmediate(S4, 1.0); |
3827 | __ LoadSImmediate(S5, -2.0); |
3828 | __ LoadSImmediate(S6, 1.0); |
3829 | __ LoadSImmediate(S7, -2.0); |
3830 | |
3831 | __ vnegqs(Q0, Q1); |
3832 | |
3833 | __ vadds(S0, S0, S1); |
3834 | __ vadds(S0, S0, S2); |
3835 | __ vadds(S0, S0, S3); |
3836 | } |
3837 | __ bx(LR); |
3838 | } |
3839 | |
3840 | ASSEMBLER_TEST_RUN(Vnegqs, test) { |
3841 | EXPECT(test != NULL); |
3842 | if (TargetCPUFeatures::neon_supported()) { |
3843 | typedef float (*Vnegqs)() DART_UNUSED; |
3844 | float res = EXECUTE_TEST_CODE_FLOAT(Vnegqs, test->entry()); |
3845 | EXPECT_FLOAT_EQ(2.0, res, 0.0001f); |
3846 | } |
3847 | } |
3848 | |
3849 | // Called from assembler_test.cc. |
3850 | // LR: return address. |
3851 | // R0: value. |
3852 | // R1: growable array. |
3853 | // R2: current thread. |
3854 | ASSEMBLER_TEST_GENERATE(StoreIntoObject, assembler) { |
3855 | __ PushList((1 << LR) | (1 << THR)); |
3856 | __ mov(THR, Operand(R2)); |
3857 | __ StoreIntoObject(R1, FieldAddress(R1, GrowableObjectArray::data_offset()), |
3858 | R0); |
3859 | __ PopList((1 << LR) | (1 << THR)); |
3860 | __ Ret(); |
3861 | } |
3862 | |
3863 | } // namespace compiler |
3864 | } // namespace dart |
3865 | |
3866 | #endif // defined TARGET_ARCH_ARM |
3867 | |