1 | // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/globals.h" // NOLINT |
6 | #if defined(TARGET_ARCH_ARM) |
7 | |
8 | #define SHOULD_NOT_INCLUDE_RUNTIME |
9 | |
10 | #include "vm/class_id.h" |
11 | #include "vm/compiler/assembler/assembler.h" |
12 | #include "vm/compiler/backend/locations.h" |
13 | #include "vm/cpu.h" |
14 | #include "vm/instructions.h" |
15 | |
16 | // An extra check since we are assuming the existence of /proc/cpuinfo below. |
17 | #if !defined(USING_SIMULATOR) && !defined(__linux__) && !defined(ANDROID) && \ |
18 | !defined(HOST_OS_IOS) && !defined(HOST_OS_MACOS) |
19 | #error ARM cross-compile only supported on Linux, Android, iOS, and Mac |
20 | #endif |
21 | |
22 | namespace dart { |
23 | |
24 | DECLARE_FLAG(bool, check_code_pointer); |
25 | DECLARE_FLAG(bool, inline_alloc); |
26 | DECLARE_FLAG(bool, precompiled_mode); |
27 | DECLARE_FLAG(bool, use_slow_path); |
28 | |
29 | namespace compiler { |
30 | |
31 | Assembler::Assembler(ObjectPoolBuilder* object_pool_builder, |
32 | bool use_far_branches) |
33 | : AssemblerBase(object_pool_builder), |
34 | use_far_branches_(use_far_branches), |
35 | constant_pool_allowed_(false) { |
36 | generate_invoke_write_barrier_wrapper_ = [&](Condition cond, Register reg) { |
37 | ldr(LR, |
38 | Address(THR, target::Thread::write_barrier_wrappers_thread_offset(reg)), |
39 | cond); |
40 | blx(LR, cond); |
41 | }; |
42 | generate_invoke_array_write_barrier_ = [&](Condition cond) { |
43 | ldr(LR, |
44 | Address(THR, target::Thread::array_write_barrier_entry_point_offset()), |
45 | cond); |
46 | blx(LR, cond); |
47 | }; |
48 | } |
49 | |
50 | uint32_t Address::encoding3() const { |
51 | if (kind_ == Immediate) { |
52 | uint32_t offset = encoding_ & kOffset12Mask; |
53 | ASSERT(offset < 256); |
54 | return (encoding_ & ~kOffset12Mask) | B22 | ((offset & 0xf0) << 4) | |
55 | (offset & 0xf); |
56 | } |
57 | ASSERT(kind_ == IndexRegister); |
58 | return encoding_; |
59 | } |
60 | |
61 | uint32_t Address::vencoding() const { |
62 | ASSERT(kind_ == Immediate); |
63 | uint32_t offset = encoding_ & kOffset12Mask; |
64 | ASSERT(offset < (1 << 10)); // In the range 0 to +1020. |
65 | ASSERT(Utils::IsAligned(offset, 4)); // Multiple of 4. |
66 | int mode = encoding_ & ((8 | 4 | 1) << 21); |
67 | ASSERT((mode == Offset) || (mode == NegOffset)); |
68 | uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2); |
69 | if (mode == Offset) { |
70 | vencoding |= 1 << 23; |
71 | } |
72 | return vencoding; |
73 | } |
74 | |
75 | void Assembler::Emit(int32_t value) { |
76 | AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
77 | buffer_.Emit<int32_t>(value); |
78 | } |
79 | |
80 | void Assembler::EmitType01(Condition cond, |
81 | int type, |
82 | Opcode opcode, |
83 | int set_cc, |
84 | Register rn, |
85 | Register rd, |
86 | Operand o) { |
87 | ASSERT(rd != kNoRegister); |
88 | ASSERT(cond != kNoCondition); |
89 | int32_t encoding = |
90 | static_cast<int32_t>(cond) << kConditionShift | type << kTypeShift | |
91 | static_cast<int32_t>(opcode) << kOpcodeShift | set_cc << kSShift | |
92 | ArmEncode::Rn(rn) | ArmEncode::Rd(rd) | o.encoding(); |
93 | Emit(encoding); |
94 | } |
95 | |
96 | void Assembler::EmitType5(Condition cond, int32_t offset, bool link) { |
97 | ASSERT(cond != kNoCondition); |
98 | int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
99 | 5 << kTypeShift | (link ? 1 : 0) << kLinkShift; |
100 | BailoutIfInvalidBranchOffset(offset); |
101 | Emit(Assembler::EncodeBranchOffset(offset, encoding)); |
102 | } |
103 | |
104 | void Assembler::EmitMemOp(Condition cond, |
105 | bool load, |
106 | bool byte, |
107 | Register rd, |
108 | Address ad) { |
109 | ASSERT(rd != kNoRegister); |
110 | ASSERT(cond != kNoCondition); |
111 | // Unpredictable, illegal on some microarchitectures. |
112 | ASSERT(!ad.has_writeback() || (ad.rn() != rd)); |
113 | |
114 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B26 | |
115 | (ad.kind() == Address::Immediate ? 0 : B25) | |
116 | (load ? L : 0) | (byte ? B : 0) | ArmEncode::Rd(rd) | |
117 | ad.encoding(); |
118 | Emit(encoding); |
119 | } |
120 | |
121 | void Assembler::EmitMemOpAddressMode3(Condition cond, |
122 | int32_t mode, |
123 | Register rd, |
124 | Address ad) { |
125 | ASSERT(rd != kNoRegister); |
126 | ASSERT(cond != kNoCondition); |
127 | // Unpredictable, illegal on some microarchitectures. |
128 | ASSERT(!ad.has_writeback() || (ad.rn() != rd)); |
129 | |
130 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | mode | |
131 | ArmEncode::Rd(rd) | ad.encoding3(); |
132 | Emit(encoding); |
133 | } |
134 | |
135 | void Assembler::EmitMultiMemOp(Condition cond, |
136 | BlockAddressMode am, |
137 | bool load, |
138 | Register base, |
139 | RegList regs) { |
140 | ASSERT(base != kNoRegister); |
141 | ASSERT(cond != kNoCondition); |
142 | // Unpredictable, illegal on some microarchitectures. |
143 | ASSERT(!Address::has_writeback(am) || !(regs & (1 << base))); |
144 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
145 | am | (load ? L : 0) | ArmEncode::Rn(base) | regs; |
146 | Emit(encoding); |
147 | } |
148 | |
149 | void Assembler::EmitShiftImmediate(Condition cond, |
150 | Shift opcode, |
151 | Register rd, |
152 | Register rm, |
153 | Operand o) { |
154 | ASSERT(cond != kNoCondition); |
155 | ASSERT(o.type() == 1); |
156 | int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
157 | static_cast<int32_t>(MOV) << kOpcodeShift | |
158 | ArmEncode::Rd(rd) | o.encoding() << kShiftImmShift | |
159 | static_cast<int32_t>(opcode) << kShiftShift | |
160 | static_cast<int32_t>(rm); |
161 | Emit(encoding); |
162 | } |
163 | |
164 | void Assembler::EmitShiftRegister(Condition cond, |
165 | Shift opcode, |
166 | Register rd, |
167 | Register rm, |
168 | Operand o) { |
169 | ASSERT(cond != kNoCondition); |
170 | ASSERT(o.type() == 0); |
171 | int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
172 | static_cast<int32_t>(MOV) << kOpcodeShift | |
173 | ArmEncode::Rd(rd) | o.encoding() << kShiftRegisterShift | |
174 | static_cast<int32_t>(opcode) << kShiftShift | B4 | |
175 | static_cast<int32_t>(rm); |
176 | Emit(encoding); |
177 | } |
178 | |
179 | void Assembler::and_(Register rd, Register rn, Operand o, Condition cond) { |
180 | EmitType01(cond, o.type(), AND, 0, rn, rd, o); |
181 | } |
182 | |
183 | void Assembler::ands(Register rd, Register rn, Operand o, Condition cond) { |
184 | EmitType01(cond, o.type(), AND, 1, rn, rd, o); |
185 | } |
186 | |
187 | void Assembler::eor(Register rd, Register rn, Operand o, Condition cond) { |
188 | EmitType01(cond, o.type(), EOR, 0, rn, rd, o); |
189 | } |
190 | |
191 | void Assembler::sub(Register rd, Register rn, Operand o, Condition cond) { |
192 | EmitType01(cond, o.type(), SUB, 0, rn, rd, o); |
193 | } |
194 | |
195 | void Assembler::rsb(Register rd, Register rn, Operand o, Condition cond) { |
196 | EmitType01(cond, o.type(), RSB, 0, rn, rd, o); |
197 | } |
198 | |
199 | void Assembler::rsbs(Register rd, Register rn, Operand o, Condition cond) { |
200 | EmitType01(cond, o.type(), RSB, 1, rn, rd, o); |
201 | } |
202 | |
203 | void Assembler::add(Register rd, Register rn, Operand o, Condition cond) { |
204 | EmitType01(cond, o.type(), ADD, 0, rn, rd, o); |
205 | } |
206 | |
207 | void Assembler::adds(Register rd, Register rn, Operand o, Condition cond) { |
208 | EmitType01(cond, o.type(), ADD, 1, rn, rd, o); |
209 | } |
210 | |
211 | void Assembler::subs(Register rd, Register rn, Operand o, Condition cond) { |
212 | EmitType01(cond, o.type(), SUB, 1, rn, rd, o); |
213 | } |
214 | |
215 | void Assembler::adc(Register rd, Register rn, Operand o, Condition cond) { |
216 | EmitType01(cond, o.type(), ADC, 0, rn, rd, o); |
217 | } |
218 | |
219 | void Assembler::adcs(Register rd, Register rn, Operand o, Condition cond) { |
220 | EmitType01(cond, o.type(), ADC, 1, rn, rd, o); |
221 | } |
222 | |
223 | void Assembler::sbc(Register rd, Register rn, Operand o, Condition cond) { |
224 | EmitType01(cond, o.type(), SBC, 0, rn, rd, o); |
225 | } |
226 | |
227 | void Assembler::sbcs(Register rd, Register rn, Operand o, Condition cond) { |
228 | EmitType01(cond, o.type(), SBC, 1, rn, rd, o); |
229 | } |
230 | |
231 | void Assembler::rsc(Register rd, Register rn, Operand o, Condition cond) { |
232 | EmitType01(cond, o.type(), RSC, 0, rn, rd, o); |
233 | } |
234 | |
235 | void Assembler::tst(Register rn, Operand o, Condition cond) { |
236 | EmitType01(cond, o.type(), TST, 1, rn, R0, o); |
237 | } |
238 | |
239 | void Assembler::teq(Register rn, Operand o, Condition cond) { |
240 | EmitType01(cond, o.type(), TEQ, 1, rn, R0, o); |
241 | } |
242 | |
243 | void Assembler::cmp(Register rn, Operand o, Condition cond) { |
244 | EmitType01(cond, o.type(), CMP, 1, rn, R0, o); |
245 | } |
246 | |
247 | void Assembler::cmn(Register rn, Operand o, Condition cond) { |
248 | EmitType01(cond, o.type(), CMN, 1, rn, R0, o); |
249 | } |
250 | |
251 | void Assembler::orr(Register rd, Register rn, Operand o, Condition cond) { |
252 | EmitType01(cond, o.type(), ORR, 0, rn, rd, o); |
253 | } |
254 | |
255 | void Assembler::orrs(Register rd, Register rn, Operand o, Condition cond) { |
256 | EmitType01(cond, o.type(), ORR, 1, rn, rd, o); |
257 | } |
258 | |
259 | void Assembler::mov(Register rd, Operand o, Condition cond) { |
260 | EmitType01(cond, o.type(), MOV, 0, R0, rd, o); |
261 | } |
262 | |
263 | void Assembler::movs(Register rd, Operand o, Condition cond) { |
264 | EmitType01(cond, o.type(), MOV, 1, R0, rd, o); |
265 | } |
266 | |
267 | void Assembler::bic(Register rd, Register rn, Operand o, Condition cond) { |
268 | EmitType01(cond, o.type(), BIC, 0, rn, rd, o); |
269 | } |
270 | |
271 | void Assembler::bics(Register rd, Register rn, Operand o, Condition cond) { |
272 | EmitType01(cond, o.type(), BIC, 1, rn, rd, o); |
273 | } |
274 | |
275 | void Assembler::mvn(Register rd, Operand o, Condition cond) { |
276 | EmitType01(cond, o.type(), MVN, 0, R0, rd, o); |
277 | } |
278 | |
279 | void Assembler::mvns(Register rd, Operand o, Condition cond) { |
280 | EmitType01(cond, o.type(), MVN, 1, R0, rd, o); |
281 | } |
282 | |
283 | void Assembler::clz(Register rd, Register rm, Condition cond) { |
284 | ASSERT(rd != kNoRegister); |
285 | ASSERT(rm != kNoRegister); |
286 | ASSERT(cond != kNoCondition); |
287 | ASSERT(rd != PC); |
288 | ASSERT(rm != PC); |
289 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
290 | B22 | B21 | (0xf << 16) | ArmEncode::Rd(rd) | (0xf << 8) | |
291 | B4 | static_cast<int32_t>(rm); |
292 | Emit(encoding); |
293 | } |
294 | |
295 | void Assembler::rbit(Register rd, Register rm, Condition cond) { |
296 | ASSERT(rd != kNoRegister); |
297 | ASSERT(rm != kNoRegister); |
298 | ASSERT(cond != kNoCondition); |
299 | ASSERT(rd != PC); |
300 | ASSERT(rm != PC); |
301 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B26 | |
302 | B25 | B23 | B22 | B21 | B20 | (0xf << 16) | |
303 | ArmEncode::Rd(rd) | (0xf << 8) | B5 | B4 | |
304 | static_cast<int32_t>(rm); |
305 | Emit(encoding); |
306 | } |
307 | |
308 | void Assembler::movw(Register rd, uint16_t imm16, Condition cond) { |
309 | ASSERT(cond != kNoCondition); |
310 | int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | B25 | B24 | |
311 | ((imm16 >> 12) << 16) | ArmEncode::Rd(rd) | |
312 | (imm16 & 0xfff); |
313 | Emit(encoding); |
314 | } |
315 | |
316 | void Assembler::movt(Register rd, uint16_t imm16, Condition cond) { |
317 | ASSERT(cond != kNoCondition); |
318 | int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | B25 | B24 | |
319 | B22 | ((imm16 >> 12) << 16) | ArmEncode::Rd(rd) | |
320 | (imm16 & 0xfff); |
321 | Emit(encoding); |
322 | } |
323 | |
324 | void Assembler::EmitMulOp(Condition cond, |
325 | int32_t opcode, |
326 | Register rd, |
327 | Register rn, |
328 | Register rm, |
329 | Register rs) { |
330 | ASSERT(rd != kNoRegister); |
331 | ASSERT(rn != kNoRegister); |
332 | ASSERT(rm != kNoRegister); |
333 | ASSERT(rs != kNoRegister); |
334 | ASSERT(cond != kNoCondition); |
335 | int32_t encoding = opcode | (static_cast<int32_t>(cond) << kConditionShift) | |
336 | ArmEncode::Rn(rn) | ArmEncode::Rd(rd) | ArmEncode::Rs(rs) | |
337 | B7 | B4 | ArmEncode::Rm(rm); |
338 | Emit(encoding); |
339 | } |
340 | |
341 | void Assembler::mul(Register rd, Register rn, Register rm, Condition cond) { |
342 | // Assembler registers rd, rn, rm are encoded as rn, rm, rs. |
343 | EmitMulOp(cond, 0, R0, rd, rn, rm); |
344 | } |
345 | |
346 | // Like mul, but sets condition flags. |
347 | void Assembler::muls(Register rd, Register rn, Register rm, Condition cond) { |
348 | EmitMulOp(cond, B20, R0, rd, rn, rm); |
349 | } |
350 | |
351 | void Assembler::mla(Register rd, |
352 | Register rn, |
353 | Register rm, |
354 | Register ra, |
355 | Condition cond) { |
356 | // rd <- ra + rn * rm. |
357 | // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. |
358 | EmitMulOp(cond, B21, ra, rd, rn, rm); |
359 | } |
360 | |
361 | void Assembler::mls(Register rd, |
362 | Register rn, |
363 | Register rm, |
364 | Register ra, |
365 | Condition cond) { |
366 | // rd <- ra - rn * rm. |
367 | // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. |
368 | EmitMulOp(cond, B22 | B21, ra, rd, rn, rm); |
369 | } |
370 | |
371 | void Assembler::smull(Register rd_lo, |
372 | Register rd_hi, |
373 | Register rn, |
374 | Register rm, |
375 | Condition cond) { |
376 | // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. |
377 | EmitMulOp(cond, B23 | B22, rd_lo, rd_hi, rn, rm); |
378 | } |
379 | |
380 | void Assembler::umull(Register rd_lo, |
381 | Register rd_hi, |
382 | Register rn, |
383 | Register rm, |
384 | Condition cond) { |
385 | // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. |
386 | EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm); |
387 | } |
388 | |
389 | void Assembler::umlal(Register rd_lo, |
390 | Register rd_hi, |
391 | Register rn, |
392 | Register rm, |
393 | Condition cond) { |
394 | // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. |
395 | EmitMulOp(cond, B23 | B21, rd_lo, rd_hi, rn, rm); |
396 | } |
397 | |
398 | void Assembler::umaal(Register rd_lo, |
399 | Register rd_hi, |
400 | Register rn, |
401 | Register rm) { |
402 | ASSERT(rd_lo != IP); |
403 | ASSERT(rd_hi != IP); |
404 | ASSERT(rn != IP); |
405 | ASSERT(rm != IP); |
406 | // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. |
407 | EmitMulOp(AL, B22, rd_lo, rd_hi, rn, rm); |
408 | } |
409 | |
410 | void Assembler::EmitDivOp(Condition cond, |
411 | int32_t opcode, |
412 | Register rd, |
413 | Register rn, |
414 | Register rm) { |
415 | ASSERT(TargetCPUFeatures::integer_division_supported()); |
416 | ASSERT(rd != kNoRegister); |
417 | ASSERT(rn != kNoRegister); |
418 | ASSERT(rm != kNoRegister); |
419 | ASSERT(cond != kNoCondition); |
420 | int32_t encoding = opcode | (static_cast<int32_t>(cond) << kConditionShift) | |
421 | (static_cast<int32_t>(rn) << kDivRnShift) | |
422 | (static_cast<int32_t>(rd) << kDivRdShift) | B26 | B25 | |
423 | B24 | B20 | B4 | (static_cast<int32_t>(rm) << kDivRmShift); |
424 | Emit(encoding); |
425 | } |
426 | |
427 | void Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) { |
428 | EmitDivOp(cond, 0, rd, rn, rm); |
429 | } |
430 | |
431 | void Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) { |
432 | EmitDivOp(cond, B21, rd, rn, rm); |
433 | } |
434 | |
435 | void Assembler::ldr(Register rd, Address ad, Condition cond) { |
436 | EmitMemOp(cond, true, false, rd, ad); |
437 | } |
438 | |
439 | void Assembler::str(Register rd, Address ad, Condition cond) { |
440 | EmitMemOp(cond, false, false, rd, ad); |
441 | } |
442 | |
443 | void Assembler::ldrb(Register rd, Address ad, Condition cond) { |
444 | EmitMemOp(cond, true, true, rd, ad); |
445 | } |
446 | |
447 | void Assembler::strb(Register rd, Address ad, Condition cond) { |
448 | EmitMemOp(cond, false, true, rd, ad); |
449 | } |
450 | |
451 | void Assembler::ldrh(Register rd, Address ad, Condition cond) { |
452 | EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad); |
453 | } |
454 | |
455 | void Assembler::strh(Register rd, Address ad, Condition cond) { |
456 | EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad); |
457 | } |
458 | |
459 | void Assembler::ldrsb(Register rd, Address ad, Condition cond) { |
460 | EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad); |
461 | } |
462 | |
463 | void Assembler::ldrsh(Register rd, Address ad, Condition cond) { |
464 | EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad); |
465 | } |
466 | |
467 | void Assembler::ldrd(Register rd, |
468 | Register rd2, |
469 | Register rn, |
470 | int32_t offset, |
471 | Condition cond) { |
472 | ASSERT((rd % 2) == 0); |
473 | ASSERT(rd2 == rd + 1); |
474 | EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, Address(rn, offset)); |
475 | } |
476 | |
477 | void Assembler::strd(Register rd, |
478 | Register rd2, |
479 | Register rn, |
480 | int32_t offset, |
481 | Condition cond) { |
482 | ASSERT((rd % 2) == 0); |
483 | ASSERT(rd2 == rd + 1); |
484 | EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, Address(rn, offset)); |
485 | } |
486 | |
487 | void Assembler::ldm(BlockAddressMode am, |
488 | Register base, |
489 | RegList regs, |
490 | Condition cond) { |
491 | ASSERT(regs != 0); |
492 | EmitMultiMemOp(cond, am, true, base, regs); |
493 | } |
494 | |
495 | void Assembler::stm(BlockAddressMode am, |
496 | Register base, |
497 | RegList regs, |
498 | Condition cond) { |
499 | ASSERT(regs != 0); |
500 | EmitMultiMemOp(cond, am, false, base, regs); |
501 | } |
502 | |
503 | void Assembler::ldrex(Register rt, Register rn, Condition cond) { |
504 | ASSERT(rn != kNoRegister); |
505 | ASSERT(rt != kNoRegister); |
506 | ASSERT(cond != kNoCondition); |
507 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
508 | B23 | L | (static_cast<int32_t>(rn) << kLdExRnShift) | |
509 | (static_cast<int32_t>(rt) << kLdExRtShift) | B11 | B10 | |
510 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0; |
511 | Emit(encoding); |
512 | } |
513 | |
514 | void Assembler::strex(Register rd, Register rt, Register rn, Condition cond) { |
515 | ASSERT(rn != kNoRegister); |
516 | ASSERT(rd != kNoRegister); |
517 | ASSERT(rt != kNoRegister); |
518 | ASSERT(cond != kNoCondition); |
519 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
520 | B23 | (static_cast<int32_t>(rn) << kStrExRnShift) | |
521 | (static_cast<int32_t>(rd) << kStrExRdShift) | B11 | B10 | |
522 | B9 | B8 | B7 | B4 | |
523 | (static_cast<int32_t>(rt) << kStrExRtShift); |
524 | Emit(encoding); |
525 | } |
526 | |
527 | void Assembler::dmb() { |
528 | // Emit a `dmb ish` instruction. |
529 | Emit(kDataMemoryBarrier); |
530 | } |
531 | |
532 | void Assembler::EnterSafepoint(Register addr, Register state) { |
533 | // We generate the same number of instructions whether or not the slow-path is |
534 | // forced. This simplifies GenerateJitCallbackTrampolines. |
535 | Label slow_path, done, retry; |
536 | if (FLAG_use_slow_path) { |
537 | b(&slow_path); |
538 | } |
539 | |
540 | LoadImmediate(addr, target::Thread::safepoint_state_offset()); |
541 | add(addr, THR, Operand(addr)); |
542 | Bind(&retry); |
543 | ldrex(state, addr); |
544 | cmp(state, Operand(target::Thread::safepoint_state_unacquired())); |
545 | b(&slow_path, NE); |
546 | |
547 | mov(state, Operand(target::Thread::safepoint_state_acquired())); |
548 | strex(TMP, state, addr); |
549 | cmp(TMP, Operand(0)); // 0 means strex was successful. |
550 | b(&done, EQ); |
551 | |
552 | if (!FLAG_use_slow_path) { |
553 | b(&retry); |
554 | } |
555 | |
556 | Bind(&slow_path); |
557 | ldr(TMP, Address(THR, target::Thread::enter_safepoint_stub_offset())); |
558 | ldr(TMP, FieldAddress(TMP, target::Code::entry_point_offset())); |
559 | blx(TMP); |
560 | |
561 | Bind(&done); |
562 | } |
563 | |
564 | void Assembler::TransitionGeneratedToNative(Register destination_address, |
565 | Register exit_frame_fp, |
566 | Register exit_through_ffi, |
567 | Register tmp1, |
568 | bool enter_safepoint) { |
569 | // Save exit frame information to enable stack walking. |
570 | StoreToOffset(kWord, exit_frame_fp, THR, |
571 | target::Thread::top_exit_frame_info_offset()); |
572 | |
573 | StoreToOffset(kWord, exit_through_ffi, THR, |
574 | target::Thread::exit_through_ffi_offset()); |
575 | Register tmp2 = exit_through_ffi; |
576 | |
577 | // Mark that the thread is executing native code. |
578 | StoreToOffset(kWord, destination_address, THR, |
579 | target::Thread::vm_tag_offset()); |
580 | LoadImmediate(tmp1, target::Thread::native_execution_state()); |
581 | StoreToOffset(kWord, tmp1, THR, target::Thread::execution_state_offset()); |
582 | |
583 | if (enter_safepoint) { |
584 | EnterSafepoint(tmp1, tmp2); |
585 | } |
586 | } |
587 | |
588 | void Assembler::ExitSafepoint(Register tmp1, Register tmp2) { |
589 | Register addr = tmp1; |
590 | Register state = tmp2; |
591 | |
592 | // We generate the same number of instructions whether or not the slow-path is |
593 | // forced, for consistency with EnterSafepoint. |
594 | Label slow_path, done, retry; |
595 | if (FLAG_use_slow_path) { |
596 | b(&slow_path); |
597 | } |
598 | |
599 | LoadImmediate(addr, target::Thread::safepoint_state_offset()); |
600 | add(addr, THR, Operand(addr)); |
601 | Bind(&retry); |
602 | ldrex(state, addr); |
603 | cmp(state, Operand(target::Thread::safepoint_state_acquired())); |
604 | b(&slow_path, NE); |
605 | |
606 | mov(state, Operand(target::Thread::safepoint_state_unacquired())); |
607 | strex(TMP, state, addr); |
608 | cmp(TMP, Operand(0)); // 0 means strex was successful. |
609 | b(&done, EQ); |
610 | |
611 | if (!FLAG_use_slow_path) { |
612 | b(&retry); |
613 | } |
614 | |
615 | Bind(&slow_path); |
616 | ldr(TMP, Address(THR, target::Thread::exit_safepoint_stub_offset())); |
617 | ldr(TMP, FieldAddress(TMP, target::Code::entry_point_offset())); |
618 | blx(TMP); |
619 | |
620 | Bind(&done); |
621 | } |
622 | |
623 | void Assembler::TransitionNativeToGenerated(Register addr, |
624 | Register state, |
625 | bool exit_safepoint) { |
626 | if (exit_safepoint) { |
627 | ExitSafepoint(addr, state); |
628 | } else { |
629 | #if defined(DEBUG) |
630 | // Ensure we've already left the safepoint. |
631 | LoadImmediate(state, 1 << target::Thread::safepoint_state_inside_bit()); |
632 | ldr(TMP, Address(THR, target::Thread::safepoint_state_offset())); |
633 | ands(TMP, TMP, Operand(state)); // Is-at-safepoint is the LSB. |
634 | Label ok; |
635 | b(&ok, ZERO); |
636 | Breakpoint(); |
637 | Bind(&ok); |
638 | #endif |
639 | } |
640 | |
641 | // Mark that the thread is executing Dart code. |
642 | LoadImmediate(state, target::Thread::vm_tag_compiled_id()); |
643 | StoreToOffset(kWord, state, THR, target::Thread::vm_tag_offset()); |
644 | LoadImmediate(state, target::Thread::generated_execution_state()); |
645 | StoreToOffset(kWord, state, THR, target::Thread::execution_state_offset()); |
646 | |
647 | // Reset exit frame information in Isolate's mutator thread structure. |
648 | LoadImmediate(state, 0); |
649 | StoreToOffset(kWord, state, THR, |
650 | target::Thread::top_exit_frame_info_offset()); |
651 | StoreToOffset(kWord, state, THR, target::Thread::exit_through_ffi_offset()); |
652 | } |
653 | |
654 | void Assembler::clrex() { |
655 | int32_t encoding = (kSpecialCondition << kConditionShift) | B26 | B24 | B22 | |
656 | B21 | B20 | (0xff << 12) | B4 | 0xf; |
657 | Emit(encoding); |
658 | } |
659 | |
660 | void Assembler::nop(Condition cond) { |
661 | ASSERT(cond != kNoCondition); |
662 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B25 | |
663 | B24 | B21 | (0xf << 12); |
664 | Emit(encoding); |
665 | } |
666 | |
667 | void Assembler::vmovsr(SRegister sn, Register rt, Condition cond) { |
668 | ASSERT(TargetCPUFeatures::vfp_supported()); |
669 | ASSERT(sn != kNoSRegister); |
670 | ASSERT(rt != kNoRegister); |
671 | ASSERT(rt != SP); |
672 | ASSERT(rt != PC); |
673 | ASSERT(cond != kNoCondition); |
674 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
675 | B26 | B25 | ((static_cast<int32_t>(sn) >> 1) * B16) | |
676 | (static_cast<int32_t>(rt) * B12) | B11 | B9 | |
677 | ((static_cast<int32_t>(sn) & 1) * B7) | B4; |
678 | Emit(encoding); |
679 | } |
680 | |
681 | void Assembler::vmovrs(Register rt, SRegister sn, Condition cond) { |
682 | ASSERT(TargetCPUFeatures::vfp_supported()); |
683 | ASSERT(sn != kNoSRegister); |
684 | ASSERT(rt != kNoRegister); |
685 | ASSERT(rt != SP); |
686 | ASSERT(rt != PC); |
687 | ASSERT(cond != kNoCondition); |
688 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
689 | B26 | B25 | B20 | ((static_cast<int32_t>(sn) >> 1) * B16) | |
690 | (static_cast<int32_t>(rt) * B12) | B11 | B9 | |
691 | ((static_cast<int32_t>(sn) & 1) * B7) | B4; |
692 | Emit(encoding); |
693 | } |
694 | |
695 | void Assembler::vmovsrr(SRegister sm, |
696 | Register rt, |
697 | Register rt2, |
698 | Condition cond) { |
699 | ASSERT(TargetCPUFeatures::vfp_supported()); |
700 | ASSERT(sm != kNoSRegister); |
701 | ASSERT(sm != S31); |
702 | ASSERT(rt != kNoRegister); |
703 | ASSERT(rt != SP); |
704 | ASSERT(rt != PC); |
705 | ASSERT(rt2 != kNoRegister); |
706 | ASSERT(rt2 != SP); |
707 | ASSERT(rt2 != PC); |
708 | ASSERT(cond != kNoCondition); |
709 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
710 | B26 | B22 | (static_cast<int32_t>(rt2) * B16) | |
711 | (static_cast<int32_t>(rt) * B12) | B11 | B9 | |
712 | ((static_cast<int32_t>(sm) & 1) * B5) | B4 | |
713 | (static_cast<int32_t>(sm) >> 1); |
714 | Emit(encoding); |
715 | } |
716 | |
717 | void Assembler::vmovrrs(Register rt, |
718 | Register rt2, |
719 | SRegister sm, |
720 | Condition cond) { |
721 | ASSERT(TargetCPUFeatures::vfp_supported()); |
722 | ASSERT(sm != kNoSRegister); |
723 | ASSERT(sm != S31); |
724 | ASSERT(rt != kNoRegister); |
725 | ASSERT(rt != SP); |
726 | ASSERT(rt != PC); |
727 | ASSERT(rt2 != kNoRegister); |
728 | ASSERT(rt2 != SP); |
729 | ASSERT(rt2 != PC); |
730 | ASSERT(rt != rt2); |
731 | ASSERT(cond != kNoCondition); |
732 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
733 | B26 | B22 | B20 | (static_cast<int32_t>(rt2) * B16) | |
734 | (static_cast<int32_t>(rt) * B12) | B11 | B9 | |
735 | ((static_cast<int32_t>(sm) & 1) * B5) | B4 | |
736 | (static_cast<int32_t>(sm) >> 1); |
737 | Emit(encoding); |
738 | } |
739 | |
740 | void Assembler::vmovdr(DRegister dn, int i, Register rt, Condition cond) { |
741 | ASSERT(TargetCPUFeatures::vfp_supported()); |
742 | ASSERT((i == 0) || (i == 1)); |
743 | ASSERT(rt != kNoRegister); |
744 | ASSERT(rt != SP); |
745 | ASSERT(rt != PC); |
746 | ASSERT(dn != kNoDRegister); |
747 | ASSERT(cond != kNoCondition); |
748 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
749 | B26 | B25 | (i * B21) | (static_cast<int32_t>(rt) * B12) | |
750 | B11 | B9 | B8 | ((static_cast<int32_t>(dn) >> 4) * B7) | |
751 | ((static_cast<int32_t>(dn) & 0xf) * B16) | B4; |
752 | Emit(encoding); |
753 | } |
754 | |
755 | void Assembler::vmovdrr(DRegister dm, |
756 | Register rt, |
757 | Register rt2, |
758 | Condition cond) { |
759 | ASSERT(TargetCPUFeatures::vfp_supported()); |
760 | ASSERT(dm != kNoDRegister); |
761 | ASSERT(rt != kNoRegister); |
762 | ASSERT(rt != SP); |
763 | ASSERT(rt != PC); |
764 | ASSERT(rt2 != kNoRegister); |
765 | ASSERT(rt2 != SP); |
766 | ASSERT(rt2 != PC); |
767 | ASSERT(cond != kNoCondition); |
768 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
769 | B26 | B22 | (static_cast<int32_t>(rt2) * B16) | |
770 | (static_cast<int32_t>(rt) * B12) | B11 | B9 | B8 | |
771 | ((static_cast<int32_t>(dm) >> 4) * B5) | B4 | |
772 | (static_cast<int32_t>(dm) & 0xf); |
773 | Emit(encoding); |
774 | } |
775 | |
776 | void Assembler::vmovrrd(Register rt, |
777 | Register rt2, |
778 | DRegister dm, |
779 | Condition cond) { |
780 | ASSERT(TargetCPUFeatures::vfp_supported()); |
781 | ASSERT(dm != kNoDRegister); |
782 | ASSERT(rt != kNoRegister); |
783 | ASSERT(rt != SP); |
784 | ASSERT(rt != PC); |
785 | ASSERT(rt2 != kNoRegister); |
786 | ASSERT(rt2 != SP); |
787 | ASSERT(rt2 != PC); |
788 | ASSERT(rt != rt2); |
789 | ASSERT(cond != kNoCondition); |
790 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
791 | B26 | B22 | B20 | (static_cast<int32_t>(rt2) * B16) | |
792 | (static_cast<int32_t>(rt) * B12) | B11 | B9 | B8 | |
793 | ((static_cast<int32_t>(dm) >> 4) * B5) | B4 | |
794 | (static_cast<int32_t>(dm) & 0xf); |
795 | Emit(encoding); |
796 | } |
797 | |
798 | void Assembler::vldrs(SRegister sd, Address ad, Condition cond) { |
799 | ASSERT(TargetCPUFeatures::vfp_supported()); |
800 | ASSERT(sd != kNoSRegister); |
801 | ASSERT(cond != kNoCondition); |
802 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
803 | B26 | B24 | B20 | ((static_cast<int32_t>(sd) & 1) * B22) | |
804 | ((static_cast<int32_t>(sd) >> 1) * B12) | B11 | B9 | |
805 | ad.vencoding(); |
806 | Emit(encoding); |
807 | } |
808 | |
809 | void Assembler::vstrs(SRegister sd, Address ad, Condition cond) { |
810 | ASSERT(TargetCPUFeatures::vfp_supported()); |
811 | ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC); |
812 | ASSERT(sd != kNoSRegister); |
813 | ASSERT(cond != kNoCondition); |
814 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
815 | B26 | B24 | ((static_cast<int32_t>(sd) & 1) * B22) | |
816 | ((static_cast<int32_t>(sd) >> 1) * B12) | B11 | B9 | |
817 | ad.vencoding(); |
818 | Emit(encoding); |
819 | } |
820 | |
821 | void Assembler::vldrd(DRegister dd, Address ad, Condition cond) { |
822 | ASSERT(TargetCPUFeatures::vfp_supported()); |
823 | ASSERT(dd != kNoDRegister); |
824 | ASSERT(cond != kNoCondition); |
825 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
826 | B26 | B24 | B20 | ((static_cast<int32_t>(dd) >> 4) * B22) | |
827 | ((static_cast<int32_t>(dd) & 0xf) * B12) | B11 | B9 | B8 | |
828 | ad.vencoding(); |
829 | Emit(encoding); |
830 | } |
831 | |
832 | void Assembler::vstrd(DRegister dd, Address ad, Condition cond) { |
833 | ASSERT(TargetCPUFeatures::vfp_supported()); |
834 | ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC); |
835 | ASSERT(dd != kNoDRegister); |
836 | ASSERT(cond != kNoCondition); |
837 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
838 | B26 | B24 | ((static_cast<int32_t>(dd) >> 4) * B22) | |
839 | ((static_cast<int32_t>(dd) & 0xf) * B12) | B11 | B9 | B8 | |
840 | ad.vencoding(); |
841 | Emit(encoding); |
842 | } |
843 | |
844 | void Assembler::EmitMultiVSMemOp(Condition cond, |
845 | BlockAddressMode am, |
846 | bool load, |
847 | Register base, |
848 | SRegister start, |
849 | uint32_t count) { |
850 | ASSERT(TargetCPUFeatures::vfp_supported()); |
851 | ASSERT(base != kNoRegister); |
852 | ASSERT(cond != kNoCondition); |
853 | ASSERT(start != kNoSRegister); |
854 | ASSERT(static_cast<int32_t>(start) + count <= kNumberOfSRegisters); |
855 | |
856 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
857 | B26 | B11 | B9 | am | (load ? L : 0) | |
858 | ArmEncode::Rn(base) | |
859 | ((static_cast<int32_t>(start) & 0x1) ? D : 0) | |
860 | ((static_cast<int32_t>(start) >> 1) << 12) | count; |
861 | Emit(encoding); |
862 | } |
863 | |
864 | void Assembler::EmitMultiVDMemOp(Condition cond, |
865 | BlockAddressMode am, |
866 | bool load, |
867 | Register base, |
868 | DRegister start, |
869 | int32_t count) { |
870 | ASSERT(TargetCPUFeatures::vfp_supported()); |
871 | ASSERT(base != kNoRegister); |
872 | ASSERT(cond != kNoCondition); |
873 | ASSERT(start != kNoDRegister); |
874 | ASSERT(static_cast<int32_t>(start) + count <= kNumberOfDRegisters); |
875 | const int notArmv5te = 0; |
876 | |
877 | int32_t encoding = |
878 | (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B11 | B9 | |
879 | B8 | am | (load ? L : 0) | ArmEncode::Rn(base) | |
880 | ((static_cast<int32_t>(start) & 0x10) ? D : 0) | |
881 | ((static_cast<int32_t>(start) & 0xf) << 12) | (count << 1) | notArmv5te; |
882 | Emit(encoding); |
883 | } |
884 | |
885 | void Assembler::vldms(BlockAddressMode am, |
886 | Register base, |
887 | SRegister first, |
888 | SRegister last, |
889 | Condition cond) { |
890 | ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); |
891 | ASSERT(last > first); |
892 | EmitMultiVSMemOp(cond, am, true, base, first, last - first + 1); |
893 | } |
894 | |
895 | void Assembler::vstms(BlockAddressMode am, |
896 | Register base, |
897 | SRegister first, |
898 | SRegister last, |
899 | Condition cond) { |
900 | ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); |
901 | ASSERT(last > first); |
902 | EmitMultiVSMemOp(cond, am, false, base, first, last - first + 1); |
903 | } |
904 | |
905 | void Assembler::vldmd(BlockAddressMode am, |
906 | Register base, |
907 | DRegister first, |
908 | intptr_t count, |
909 | Condition cond) { |
910 | ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); |
911 | ASSERT(count <= 16); |
912 | ASSERT(first + count <= kNumberOfDRegisters); |
913 | EmitMultiVDMemOp(cond, am, true, base, first, count); |
914 | } |
915 | |
916 | void Assembler::vstmd(BlockAddressMode am, |
917 | Register base, |
918 | DRegister first, |
919 | intptr_t count, |
920 | Condition cond) { |
921 | ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); |
922 | ASSERT(count <= 16); |
923 | ASSERT(first + count <= kNumberOfDRegisters); |
924 | EmitMultiVDMemOp(cond, am, false, base, first, count); |
925 | } |
926 | |
927 | void Assembler::EmitVFPsss(Condition cond, |
928 | int32_t opcode, |
929 | SRegister sd, |
930 | SRegister sn, |
931 | SRegister sm) { |
932 | ASSERT(TargetCPUFeatures::vfp_supported()); |
933 | ASSERT(sd != kNoSRegister); |
934 | ASSERT(sn != kNoSRegister); |
935 | ASSERT(sm != kNoSRegister); |
936 | ASSERT(cond != kNoCondition); |
937 | int32_t encoding = |
938 | (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | |
939 | B9 | opcode | ((static_cast<int32_t>(sd) & 1) * B22) | |
940 | ((static_cast<int32_t>(sn) >> 1) * B16) | |
941 | ((static_cast<int32_t>(sd) >> 1) * B12) | |
942 | ((static_cast<int32_t>(sn) & 1) * B7) | |
943 | ((static_cast<int32_t>(sm) & 1) * B5) | (static_cast<int32_t>(sm) >> 1); |
944 | Emit(encoding); |
945 | } |
946 | |
947 | void Assembler::EmitVFPddd(Condition cond, |
948 | int32_t opcode, |
949 | DRegister dd, |
950 | DRegister dn, |
951 | DRegister dm) { |
952 | ASSERT(TargetCPUFeatures::vfp_supported()); |
953 | ASSERT(dd != kNoDRegister); |
954 | ASSERT(dn != kNoDRegister); |
955 | ASSERT(dm != kNoDRegister); |
956 | ASSERT(cond != kNoCondition); |
957 | int32_t encoding = |
958 | (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | |
959 | B9 | B8 | opcode | ((static_cast<int32_t>(dd) >> 4) * B22) | |
960 | ((static_cast<int32_t>(dn) & 0xf) * B16) | |
961 | ((static_cast<int32_t>(dd) & 0xf) * B12) | |
962 | ((static_cast<int32_t>(dn) >> 4) * B7) | |
963 | ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf); |
964 | Emit(encoding); |
965 | } |
966 | |
967 | void Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) { |
968 | EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm); |
969 | } |
970 | |
971 | void Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) { |
972 | EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm); |
973 | } |
974 | |
975 | bool Assembler::vmovs(SRegister sd, float s_imm, Condition cond) { |
976 | uint32_t imm32 = bit_cast<uint32_t, float>(s_imm); |
977 | if (((imm32 & ((1 << 19) - 1)) == 0) && |
978 | ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) || |
979 | (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) - 1)))) { |
980 | uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) | |
981 | ((imm32 >> 19) & ((1 << 6) - 1)); |
982 | EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4) * B16) | (imm8 & 0xf), sd, |
983 | S0, S0); |
984 | return true; |
985 | } |
986 | return false; |
987 | } |
988 | |
989 | bool Assembler::vmovd(DRegister dd, double d_imm, Condition cond) { |
990 | uint64_t imm64 = bit_cast<uint64_t, double>(d_imm); |
991 | if (((imm64 & ((1LL << 48) - 1)) == 0) && |
992 | ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) || |
993 | (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) - 1)))) { |
994 | uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) | |
995 | ((imm64 >> 48) & ((1 << 6) - 1)); |
996 | EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4) * B16) | B8 | (imm8 & 0xf), |
997 | dd, D0, D0); |
998 | return true; |
999 | } |
1000 | return false; |
1001 | } |
1002 | |
1003 | void Assembler::vadds(SRegister sd, |
1004 | SRegister sn, |
1005 | SRegister sm, |
1006 | Condition cond) { |
1007 | EmitVFPsss(cond, B21 | B20, sd, sn, sm); |
1008 | } |
1009 | |
1010 | void Assembler::vaddd(DRegister dd, |
1011 | DRegister dn, |
1012 | DRegister dm, |
1013 | Condition cond) { |
1014 | EmitVFPddd(cond, B21 | B20, dd, dn, dm); |
1015 | } |
1016 | |
1017 | void Assembler::vsubs(SRegister sd, |
1018 | SRegister sn, |
1019 | SRegister sm, |
1020 | Condition cond) { |
1021 | EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm); |
1022 | } |
1023 | |
1024 | void Assembler::vsubd(DRegister dd, |
1025 | DRegister dn, |
1026 | DRegister dm, |
1027 | Condition cond) { |
1028 | EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm); |
1029 | } |
1030 | |
1031 | void Assembler::vmuls(SRegister sd, |
1032 | SRegister sn, |
1033 | SRegister sm, |
1034 | Condition cond) { |
1035 | EmitVFPsss(cond, B21, sd, sn, sm); |
1036 | } |
1037 | |
1038 | void Assembler::vmuld(DRegister dd, |
1039 | DRegister dn, |
1040 | DRegister dm, |
1041 | Condition cond) { |
1042 | EmitVFPddd(cond, B21, dd, dn, dm); |
1043 | } |
1044 | |
1045 | void Assembler::vmlas(SRegister sd, |
1046 | SRegister sn, |
1047 | SRegister sm, |
1048 | Condition cond) { |
1049 | EmitVFPsss(cond, 0, sd, sn, sm); |
1050 | } |
1051 | |
1052 | void Assembler::vmlad(DRegister dd, |
1053 | DRegister dn, |
1054 | DRegister dm, |
1055 | Condition cond) { |
1056 | EmitVFPddd(cond, 0, dd, dn, dm); |
1057 | } |
1058 | |
1059 | void Assembler::vmlss(SRegister sd, |
1060 | SRegister sn, |
1061 | SRegister sm, |
1062 | Condition cond) { |
1063 | EmitVFPsss(cond, B6, sd, sn, sm); |
1064 | } |
1065 | |
1066 | void Assembler::vmlsd(DRegister dd, |
1067 | DRegister dn, |
1068 | DRegister dm, |
1069 | Condition cond) { |
1070 | EmitVFPddd(cond, B6, dd, dn, dm); |
1071 | } |
1072 | |
1073 | void Assembler::vdivs(SRegister sd, |
1074 | SRegister sn, |
1075 | SRegister sm, |
1076 | Condition cond) { |
1077 | EmitVFPsss(cond, B23, sd, sn, sm); |
1078 | } |
1079 | |
1080 | void Assembler::vdivd(DRegister dd, |
1081 | DRegister dn, |
1082 | DRegister dm, |
1083 | Condition cond) { |
1084 | EmitVFPddd(cond, B23, dd, dn, dm); |
1085 | } |
1086 | |
1087 | void Assembler::vabss(SRegister sd, SRegister sm, Condition cond) { |
1088 | EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm); |
1089 | } |
1090 | |
1091 | void Assembler::vabsd(DRegister dd, DRegister dm, Condition cond) { |
1092 | EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm); |
1093 | } |
1094 | |
1095 | void Assembler::vnegs(SRegister sd, SRegister sm, Condition cond) { |
1096 | EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm); |
1097 | } |
1098 | |
1099 | void Assembler::vnegd(DRegister dd, DRegister dm, Condition cond) { |
1100 | EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm); |
1101 | } |
1102 | |
1103 | void Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) { |
1104 | EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm); |
1105 | } |
1106 | |
1107 | void Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) { |
1108 | EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm); |
1109 | } |
1110 | |
1111 | void Assembler::EmitVFPsd(Condition cond, |
1112 | int32_t opcode, |
1113 | SRegister sd, |
1114 | DRegister dm) { |
1115 | ASSERT(TargetCPUFeatures::vfp_supported()); |
1116 | ASSERT(sd != kNoSRegister); |
1117 | ASSERT(dm != kNoDRegister); |
1118 | ASSERT(cond != kNoCondition); |
1119 | int32_t encoding = |
1120 | (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | |
1121 | B9 | opcode | ((static_cast<int32_t>(sd) & 1) * B22) | |
1122 | ((static_cast<int32_t>(sd) >> 1) * B12) | |
1123 | ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf); |
1124 | Emit(encoding); |
1125 | } |
1126 | |
1127 | void Assembler::EmitVFPds(Condition cond, |
1128 | int32_t opcode, |
1129 | DRegister dd, |
1130 | SRegister sm) { |
1131 | ASSERT(TargetCPUFeatures::vfp_supported()); |
1132 | ASSERT(dd != kNoDRegister); |
1133 | ASSERT(sm != kNoSRegister); |
1134 | ASSERT(cond != kNoCondition); |
1135 | int32_t encoding = |
1136 | (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | |
1137 | B9 | opcode | ((static_cast<int32_t>(dd) >> 4) * B22) | |
1138 | ((static_cast<int32_t>(dd) & 0xf) * B12) | |
1139 | ((static_cast<int32_t>(sm) & 1) * B5) | (static_cast<int32_t>(sm) >> 1); |
1140 | Emit(encoding); |
1141 | } |
1142 | |
1143 | void Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) { |
1144 | EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm); |
1145 | } |
1146 | |
1147 | void Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) { |
1148 | EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm); |
1149 | } |
1150 | |
1151 | void Assembler::vcvtis(SRegister sd, SRegister sm, Condition cond) { |
1152 | EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm); |
1153 | } |
1154 | |
1155 | void Assembler::vcvtid(SRegister sd, DRegister dm, Condition cond) { |
1156 | EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm); |
1157 | } |
1158 | |
1159 | void Assembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) { |
1160 | EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm); |
1161 | } |
1162 | |
1163 | void Assembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) { |
1164 | EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm); |
1165 | } |
1166 | |
1167 | void Assembler::vcvtus(SRegister sd, SRegister sm, Condition cond) { |
1168 | EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm); |
1169 | } |
1170 | |
1171 | void Assembler::vcvtud(SRegister sd, DRegister dm, Condition cond) { |
1172 | EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm); |
1173 | } |
1174 | |
1175 | void Assembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) { |
1176 | EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm); |
1177 | } |
1178 | |
1179 | void Assembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) { |
1180 | EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm); |
1181 | } |
1182 | |
1183 | void Assembler::vcmps(SRegister sd, SRegister sm, Condition cond) { |
1184 | EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm); |
1185 | } |
1186 | |
1187 | void Assembler::vcmpd(DRegister dd, DRegister dm, Condition cond) { |
1188 | EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm); |
1189 | } |
1190 | |
1191 | void Assembler::vcmpsz(SRegister sd, Condition cond) { |
1192 | EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0); |
1193 | } |
1194 | |
1195 | void Assembler::vcmpdz(DRegister dd, Condition cond) { |
1196 | EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0); |
1197 | } |
1198 | |
1199 | void Assembler::vmrs(Register rd, Condition cond) { |
1200 | ASSERT(TargetCPUFeatures::vfp_supported()); |
1201 | ASSERT(cond != kNoCondition); |
1202 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
1203 | B26 | B25 | B23 | B22 | B21 | B20 | B16 | |
1204 | (static_cast<int32_t>(rd) * B12) | B11 | B9 | B4; |
1205 | Emit(encoding); |
1206 | } |
1207 | |
1208 | void Assembler::vmstat(Condition cond) { |
1209 | vmrs(APSR, cond); |
1210 | } |
1211 | |
1212 | static inline int ShiftOfOperandSize(OperandSize size) { |
1213 | switch (size) { |
1214 | case kByte: |
1215 | case kUnsignedByte: |
1216 | return 0; |
1217 | case kHalfword: |
1218 | case kUnsignedHalfword: |
1219 | return 1; |
1220 | case kWord: |
1221 | case kUnsignedWord: |
1222 | return 2; |
1223 | case kWordPair: |
1224 | return 3; |
1225 | case kSWord: |
1226 | case kDWord: |
1227 | return 0; |
1228 | default: |
1229 | UNREACHABLE(); |
1230 | break; |
1231 | } |
1232 | |
1233 | UNREACHABLE(); |
1234 | return -1; |
1235 | } |
1236 | |
1237 | void Assembler::EmitSIMDqqq(int32_t opcode, |
1238 | OperandSize size, |
1239 | QRegister qd, |
1240 | QRegister qn, |
1241 | QRegister qm) { |
1242 | ASSERT(TargetCPUFeatures::neon_supported()); |
1243 | int sz = ShiftOfOperandSize(size); |
1244 | int32_t encoding = |
1245 | (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | B25 | B6 | |
1246 | opcode | ((sz & 0x3) * B20) | |
1247 | ((static_cast<int32_t>(qd * 2) >> 4) * B22) | |
1248 | ((static_cast<int32_t>(qn * 2) & 0xf) * B16) | |
1249 | ((static_cast<int32_t>(qd * 2) & 0xf) * B12) | |
1250 | ((static_cast<int32_t>(qn * 2) >> 4) * B7) | |
1251 | ((static_cast<int32_t>(qm * 2) >> 4) * B5) | |
1252 | (static_cast<int32_t>(qm * 2) & 0xf); |
1253 | Emit(encoding); |
1254 | } |
1255 | |
1256 | void Assembler::EmitSIMDddd(int32_t opcode, |
1257 | OperandSize size, |
1258 | DRegister dd, |
1259 | DRegister dn, |
1260 | DRegister dm) { |
1261 | ASSERT(TargetCPUFeatures::neon_supported()); |
1262 | int sz = ShiftOfOperandSize(size); |
1263 | int32_t encoding = |
1264 | (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | B25 | |
1265 | opcode | ((sz & 0x3) * B20) | ((static_cast<int32_t>(dd) >> 4) * B22) | |
1266 | ((static_cast<int32_t>(dn) & 0xf) * B16) | |
1267 | ((static_cast<int32_t>(dd) & 0xf) * B12) | |
1268 | ((static_cast<int32_t>(dn) >> 4) * B7) | |
1269 | ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf); |
1270 | Emit(encoding); |
1271 | } |
1272 | |
1273 | void Assembler::vmovq(QRegister qd, QRegister qm) { |
1274 | EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qm, qm); |
1275 | } |
1276 | |
1277 | void Assembler::vaddqi(OperandSize sz, |
1278 | QRegister qd, |
1279 | QRegister qn, |
1280 | QRegister qm) { |
1281 | EmitSIMDqqq(B11, sz, qd, qn, qm); |
1282 | } |
1283 | |
1284 | void Assembler::vaddqs(QRegister qd, QRegister qn, QRegister qm) { |
1285 | EmitSIMDqqq(B11 | B10 | B8, kSWord, qd, qn, qm); |
1286 | } |
1287 | |
1288 | void Assembler::vsubqi(OperandSize sz, |
1289 | QRegister qd, |
1290 | QRegister qn, |
1291 | QRegister qm) { |
1292 | EmitSIMDqqq(B24 | B11, sz, qd, qn, qm); |
1293 | } |
1294 | |
1295 | void Assembler::vsubqs(QRegister qd, QRegister qn, QRegister qm) { |
1296 | EmitSIMDqqq(B21 | B11 | B10 | B8, kSWord, qd, qn, qm); |
1297 | } |
1298 | |
1299 | void Assembler::vmulqi(OperandSize sz, |
1300 | QRegister qd, |
1301 | QRegister qn, |
1302 | QRegister qm) { |
1303 | EmitSIMDqqq(B11 | B8 | B4, sz, qd, qn, qm); |
1304 | } |
1305 | |
1306 | void Assembler::vmulqs(QRegister qd, QRegister qn, QRegister qm) { |
1307 | EmitSIMDqqq(B24 | B11 | B10 | B8 | B4, kSWord, qd, qn, qm); |
1308 | } |
1309 | |
1310 | void Assembler::vshlqi(OperandSize sz, |
1311 | QRegister qd, |
1312 | QRegister qm, |
1313 | QRegister qn) { |
1314 | EmitSIMDqqq(B25 | B10, sz, qd, qn, qm); |
1315 | } |
1316 | |
1317 | void Assembler::vshlqu(OperandSize sz, |
1318 | QRegister qd, |
1319 | QRegister qm, |
1320 | QRegister qn) { |
1321 | EmitSIMDqqq(B25 | B24 | B10, sz, qd, qn, qm); |
1322 | } |
1323 | |
1324 | void Assembler::veorq(QRegister qd, QRegister qn, QRegister qm) { |
1325 | EmitSIMDqqq(B24 | B8 | B4, kByte, qd, qn, qm); |
1326 | } |
1327 | |
1328 | void Assembler::vorrq(QRegister qd, QRegister qn, QRegister qm) { |
1329 | EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qn, qm); |
1330 | } |
1331 | |
1332 | void Assembler::vornq(QRegister qd, QRegister qn, QRegister qm) { |
1333 | EmitSIMDqqq(B21 | B20 | B8 | B4, kByte, qd, qn, qm); |
1334 | } |
1335 | |
1336 | void Assembler::vandq(QRegister qd, QRegister qn, QRegister qm) { |
1337 | EmitSIMDqqq(B8 | B4, kByte, qd, qn, qm); |
1338 | } |
1339 | |
1340 | void Assembler::vmvnq(QRegister qd, QRegister qm) { |
1341 | EmitSIMDqqq(B25 | B24 | B23 | B10 | B8 | B7, kWordPair, qd, Q0, qm); |
1342 | } |
1343 | |
1344 | void Assembler::vminqs(QRegister qd, QRegister qn, QRegister qm) { |
1345 | EmitSIMDqqq(B21 | B11 | B10 | B9 | B8, kSWord, qd, qn, qm); |
1346 | } |
1347 | |
1348 | void Assembler::vmaxqs(QRegister qd, QRegister qn, QRegister qm) { |
1349 | EmitSIMDqqq(B11 | B10 | B9 | B8, kSWord, qd, qn, qm); |
1350 | } |
1351 | |
1352 | void Assembler::vabsqs(QRegister qd, QRegister qm) { |
1353 | EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8, kSWord, qd, Q0, |
1354 | qm); |
1355 | } |
1356 | |
1357 | void Assembler::vnegqs(QRegister qd, QRegister qm) { |
1358 | EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8 | B7, kSWord, |
1359 | qd, Q0, qm); |
1360 | } |
1361 | |
1362 | void Assembler::vrecpeqs(QRegister qd, QRegister qm) { |
1363 | EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8, kSWord, qd, |
1364 | Q0, qm); |
1365 | } |
1366 | |
1367 | void Assembler::vrecpsqs(QRegister qd, QRegister qn, QRegister qm) { |
1368 | EmitSIMDqqq(B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm); |
1369 | } |
1370 | |
1371 | void Assembler::vrsqrteqs(QRegister qd, QRegister qm) { |
1372 | EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8 | B7, kSWord, |
1373 | qd, Q0, qm); |
1374 | } |
1375 | |
1376 | void Assembler::vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm) { |
1377 | EmitSIMDqqq(B21 | B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm); |
1378 | } |
1379 | |
1380 | void Assembler::vdup(OperandSize sz, QRegister qd, DRegister dm, int idx) { |
1381 | ASSERT((sz != kDWord) && (sz != kSWord) && (sz != kWordPair)); |
1382 | int code = 0; |
1383 | |
1384 | switch (sz) { |
1385 | case kByte: |
1386 | case kUnsignedByte: { |
1387 | ASSERT((idx >= 0) && (idx < 8)); |
1388 | code = 1 | (idx << 1); |
1389 | break; |
1390 | } |
1391 | case kHalfword: |
1392 | case kUnsignedHalfword: { |
1393 | ASSERT((idx >= 0) && (idx < 4)); |
1394 | code = 2 | (idx << 2); |
1395 | break; |
1396 | } |
1397 | case kWord: |
1398 | case kUnsignedWord: { |
1399 | ASSERT((idx >= 0) && (idx < 2)); |
1400 | code = 4 | (idx << 3); |
1401 | break; |
1402 | } |
1403 | default: { |
1404 | break; |
1405 | } |
1406 | } |
1407 | |
1408 | EmitSIMDddd(B24 | B23 | B11 | B10 | B6, kWordPair, |
1409 | static_cast<DRegister>(qd * 2), |
1410 | static_cast<DRegister>(code & 0xf), dm); |
1411 | } |
1412 | |
1413 | void Assembler::vtbl(DRegister dd, DRegister dn, int len, DRegister dm) { |
1414 | ASSERT((len >= 1) && (len <= 4)); |
1415 | EmitSIMDddd(B24 | B23 | B11 | ((len - 1) * B8), kWordPair, dd, dn, dm); |
1416 | } |
1417 | |
1418 | void Assembler::vzipqw(QRegister qd, QRegister qm) { |
1419 | EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B8 | B7, kByte, qd, Q0, qm); |
1420 | } |
1421 | |
1422 | void Assembler::vceqqi(OperandSize sz, |
1423 | QRegister qd, |
1424 | QRegister qn, |
1425 | QRegister qm) { |
1426 | EmitSIMDqqq(B24 | B11 | B4, sz, qd, qn, qm); |
1427 | } |
1428 | |
1429 | void Assembler::vceqqs(QRegister qd, QRegister qn, QRegister qm) { |
1430 | EmitSIMDqqq(B11 | B10 | B9, kSWord, qd, qn, qm); |
1431 | } |
1432 | |
1433 | void Assembler::vcgeqi(OperandSize sz, |
1434 | QRegister qd, |
1435 | QRegister qn, |
1436 | QRegister qm) { |
1437 | EmitSIMDqqq(B9 | B8 | B4, sz, qd, qn, qm); |
1438 | } |
1439 | |
1440 | void Assembler::vcugeqi(OperandSize sz, |
1441 | QRegister qd, |
1442 | QRegister qn, |
1443 | QRegister qm) { |
1444 | EmitSIMDqqq(B24 | B9 | B8 | B4, sz, qd, qn, qm); |
1445 | } |
1446 | |
1447 | void Assembler::vcgeqs(QRegister qd, QRegister qn, QRegister qm) { |
1448 | EmitSIMDqqq(B24 | B11 | B10 | B9, kSWord, qd, qn, qm); |
1449 | } |
1450 | |
1451 | void Assembler::vcgtqi(OperandSize sz, |
1452 | QRegister qd, |
1453 | QRegister qn, |
1454 | QRegister qm) { |
1455 | EmitSIMDqqq(B9 | B8, sz, qd, qn, qm); |
1456 | } |
1457 | |
1458 | void Assembler::vcugtqi(OperandSize sz, |
1459 | QRegister qd, |
1460 | QRegister qn, |
1461 | QRegister qm) { |
1462 | EmitSIMDqqq(B24 | B9 | B8, sz, qd, qn, qm); |
1463 | } |
1464 | |
1465 | void Assembler::vcgtqs(QRegister qd, QRegister qn, QRegister qm) { |
1466 | EmitSIMDqqq(B24 | B21 | B11 | B10 | B9, kSWord, qd, qn, qm); |
1467 | } |
1468 | |
1469 | void Assembler::bkpt(uint16_t imm16) { |
1470 | Emit(BkptEncoding(imm16)); |
1471 | } |
1472 | |
1473 | void Assembler::b(Label* label, Condition cond) { |
1474 | EmitBranch(cond, label, false); |
1475 | } |
1476 | |
1477 | void Assembler::bl(Label* label, Condition cond) { |
1478 | EmitBranch(cond, label, true); |
1479 | } |
1480 | |
1481 | void Assembler::bx(Register rm, Condition cond) { |
1482 | ASSERT(rm != kNoRegister); |
1483 | ASSERT(cond != kNoCondition); |
1484 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
1485 | B21 | (0xfff << 8) | B4 | ArmEncode::Rm(rm); |
1486 | Emit(encoding); |
1487 | } |
1488 | |
1489 | void Assembler::blx(Register rm, Condition cond) { |
1490 | ASSERT(rm != kNoRegister); |
1491 | ASSERT(cond != kNoCondition); |
1492 | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
1493 | B21 | (0xfff << 8) | B5 | B4 | ArmEncode::Rm(rm); |
1494 | Emit(encoding); |
1495 | } |
1496 | |
1497 | void Assembler::MarkExceptionHandler(Label* label) { |
1498 | EmitType01(AL, 1, TST, 1, PC, R0, Operand(0)); |
1499 | Label l; |
1500 | b(&l); |
1501 | EmitBranch(AL, label, false); |
1502 | Bind(&l); |
1503 | } |
1504 | |
1505 | void Assembler::Drop(intptr_t stack_elements) { |
1506 | ASSERT(stack_elements >= 0); |
1507 | if (stack_elements > 0) { |
1508 | AddImmediate(SP, stack_elements * target::kWordSize); |
1509 | } |
1510 | } |
1511 | |
1512 | intptr_t Assembler::FindImmediate(int32_t imm) { |
1513 | return object_pool_builder().FindImmediate(imm); |
1514 | } |
1515 | |
1516 | // Uses a code sequence that can easily be decoded. |
1517 | void Assembler::LoadWordFromPoolOffset(Register rd, |
1518 | int32_t offset, |
1519 | Register pp, |
1520 | Condition cond) { |
1521 | ASSERT((pp != PP) || constant_pool_allowed()); |
1522 | ASSERT(rd != pp); |
1523 | int32_t offset_mask = 0; |
1524 | if (Address::CanHoldLoadOffset(kWord, offset, &offset_mask)) { |
1525 | ldr(rd, Address(pp, offset), cond); |
1526 | } else { |
1527 | int32_t offset_hi = offset & ~offset_mask; // signed |
1528 | uint32_t offset_lo = offset & offset_mask; // unsigned |
1529 | // Inline a simplified version of AddImmediate(rd, pp, offset_hi). |
1530 | Operand o; |
1531 | if (Operand::CanHold(offset_hi, &o)) { |
1532 | add(rd, pp, o, cond); |
1533 | } else { |
1534 | LoadImmediate(rd, offset_hi, cond); |
1535 | add(rd, pp, Operand(rd), cond); |
1536 | } |
1537 | ldr(rd, Address(rd, offset_lo), cond); |
1538 | } |
1539 | } |
1540 | |
1541 | void Assembler::CheckCodePointer() { |
1542 | #ifdef DEBUG |
1543 | if (!FLAG_check_code_pointer) { |
1544 | return; |
1545 | } |
1546 | Comment("CheckCodePointer" ); |
1547 | Label cid_ok, instructions_ok; |
1548 | Push(R0); |
1549 | Push(IP); |
1550 | CompareClassId(CODE_REG, kCodeCid, R0); |
1551 | b(&cid_ok, EQ); |
1552 | bkpt(0); |
1553 | Bind(&cid_ok); |
1554 | |
1555 | const intptr_t offset = CodeSize() + Instr::kPCReadOffset + |
1556 | target::Instructions::HeaderSize() - kHeapObjectTag; |
1557 | mov(R0, Operand(PC)); |
1558 | AddImmediate(R0, -offset); |
1559 | ldr(IP, FieldAddress(CODE_REG, target::Code::saved_instructions_offset())); |
1560 | cmp(R0, Operand(IP)); |
1561 | b(&instructions_ok, EQ); |
1562 | bkpt(1); |
1563 | Bind(&instructions_ok); |
1564 | Pop(IP); |
1565 | Pop(R0); |
1566 | #endif |
1567 | } |
1568 | |
1569 | void Assembler::RestoreCodePointer() { |
1570 | ldr(CODE_REG, |
1571 | Address(FP, target::frame_layout.code_from_fp * target::kWordSize)); |
1572 | CheckCodePointer(); |
1573 | } |
1574 | |
1575 | void Assembler::LoadPoolPointer(Register reg) { |
1576 | // Load new pool pointer. |
1577 | CheckCodePointer(); |
1578 | ldr(reg, FieldAddress(CODE_REG, target::Code::object_pool_offset())); |
1579 | set_constant_pool_allowed(reg == PP); |
1580 | } |
1581 | |
1582 | void Assembler::SetupGlobalPoolAndDispatchTable() { |
1583 | ASSERT(FLAG_precompiled_mode && FLAG_use_bare_instructions); |
1584 | ldr(PP, Address(THR, target::Thread::global_object_pool_offset())); |
1585 | if (FLAG_use_table_dispatch) { |
1586 | ldr(DISPATCH_TABLE_REG, |
1587 | Address(THR, target::Thread::dispatch_table_array_offset())); |
1588 | } |
1589 | } |
1590 | |
1591 | void Assembler::LoadIsolate(Register rd) { |
1592 | ldr(rd, Address(THR, target::Thread::isolate_offset())); |
1593 | } |
1594 | |
1595 | bool Assembler::CanLoadFromObjectPool(const Object& object) const { |
1596 | ASSERT(IsOriginalObject(object)); |
1597 | if (!constant_pool_allowed()) { |
1598 | return false; |
1599 | } |
1600 | |
1601 | ASSERT(IsNotTemporaryScopedHandle(object)); |
1602 | ASSERT(IsInOldSpace(object)); |
1603 | return true; |
1604 | } |
1605 | |
1606 | void Assembler::LoadObjectHelper(Register rd, |
1607 | const Object& object, |
1608 | Condition cond, |
1609 | bool is_unique, |
1610 | Register pp) { |
1611 | ASSERT(IsOriginalObject(object)); |
1612 | // `is_unique == true` effectively means object has to be patchable. |
1613 | if (!is_unique) { |
1614 | intptr_t offset = 0; |
1615 | if (target::CanLoadFromThread(object, &offset)) { |
1616 | // Load common VM constants from the thread. This works also in places |
1617 | // where no constant pool is set up (e.g. intrinsic code). |
1618 | ldr(rd, Address(THR, offset), cond); |
1619 | return; |
1620 | } |
1621 | if (target::IsSmi(object)) { |
1622 | // Relocation doesn't apply to Smis. |
1623 | LoadImmediate(rd, target::ToRawSmi(object), cond); |
1624 | return; |
1625 | } |
1626 | } |
1627 | if (!CanLoadFromObjectPool(object)) { |
1628 | UNREACHABLE(); |
1629 | return; |
1630 | } |
1631 | // Make sure that class CallPattern is able to decode this load from the |
1632 | // object pool. |
1633 | const auto index = is_unique ? object_pool_builder().AddObject(object) |
1634 | : object_pool_builder().FindObject(object); |
1635 | const int32_t offset = target::ObjectPool::element_offset(index); |
1636 | LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, pp, cond); |
1637 | } |
1638 | |
1639 | void Assembler::LoadObject(Register rd, const Object& object, Condition cond) { |
1640 | LoadObjectHelper(rd, object, cond, /* is_unique = */ false, PP); |
1641 | } |
1642 | |
1643 | void Assembler::LoadUniqueObject(Register rd, |
1644 | const Object& object, |
1645 | Condition cond) { |
1646 | LoadObjectHelper(rd, object, cond, /* is_unique = */ true, PP); |
1647 | } |
1648 | |
1649 | void Assembler::LoadNativeEntry(Register rd, |
1650 | const ExternalLabel* label, |
1651 | ObjectPoolBuilderEntry::Patchability patchable, |
1652 | Condition cond) { |
1653 | const int32_t offset = target::ObjectPool::element_offset( |
1654 | object_pool_builder().FindNativeFunction(label, patchable)); |
1655 | LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); |
1656 | } |
1657 | |
1658 | void Assembler::PushObject(const Object& object) { |
1659 | ASSERT(IsOriginalObject(object)); |
1660 | LoadObject(IP, object); |
1661 | Push(IP); |
1662 | } |
1663 | |
1664 | void Assembler::CompareObject(Register rn, const Object& object) { |
1665 | ASSERT(IsOriginalObject(object)); |
1666 | ASSERT(rn != IP); |
1667 | if (target::IsSmi(object)) { |
1668 | CompareImmediate(rn, target::ToRawSmi(object)); |
1669 | } else { |
1670 | LoadObject(IP, object); |
1671 | cmp(rn, Operand(IP)); |
1672 | } |
1673 | } |
1674 | |
1675 | // Preserves object and value registers. |
1676 | void Assembler::StoreIntoObjectFilter(Register object, |
1677 | Register value, |
1678 | Label* label, |
1679 | CanBeSmi value_can_be_smi, |
1680 | BarrierFilterMode how_to_jump) { |
1681 | COMPILE_ASSERT((target::ObjectAlignment::kNewObjectAlignmentOffset == |
1682 | target::kWordSize) && |
1683 | (target::ObjectAlignment::kOldObjectAlignmentOffset == 0)); |
1684 | // For the value we are only interested in the new/old bit and the tag bit. |
1685 | // And the new bit with the tag bit. The resulting bit will be 0 for a Smi. |
1686 | if (value_can_be_smi == kValueCanBeSmi) { |
1687 | and_( |
1688 | IP, value, |
1689 | Operand(value, LSL, target::ObjectAlignment::kObjectAlignmentLog2 - 1)); |
1690 | // And the result with the negated space bit of the object. |
1691 | bic(IP, IP, Operand(object)); |
1692 | } else { |
1693 | #if defined(DEBUG) |
1694 | Label okay; |
1695 | BranchIfNotSmi(value, &okay); |
1696 | Stop("Unexpected Smi!" ); |
1697 | Bind(&okay); |
1698 | #endif |
1699 | bic(IP, value, Operand(object)); |
1700 | } |
1701 | tst(IP, Operand(target::ObjectAlignment::kNewObjectAlignmentOffset)); |
1702 | if (how_to_jump != kNoJump) { |
1703 | b(label, how_to_jump == kJumpToNoUpdate ? EQ : NE); |
1704 | } |
1705 | } |
1706 | |
1707 | Register UseRegister(Register reg, RegList* used) { |
1708 | ASSERT(reg != THR); |
1709 | ASSERT(reg != SP); |
1710 | ASSERT(reg != FP); |
1711 | ASSERT(reg != PC); |
1712 | ASSERT((*used & (1 << reg)) == 0); |
1713 | *used |= (1 << reg); |
1714 | return reg; |
1715 | } |
1716 | |
1717 | Register AllocateRegister(RegList* used) { |
1718 | const RegList free = ~*used; |
1719 | return (free == 0) |
1720 | ? kNoRegister |
1721 | : UseRegister( |
1722 | static_cast<Register>(Utils::CountTrailingZerosWord(free)), |
1723 | used); |
1724 | } |
1725 | |
1726 | void Assembler::StoreIntoObject(Register object, |
1727 | const Address& dest, |
1728 | Register value, |
1729 | CanBeSmi can_be_smi, |
1730 | bool lr_reserved) { |
1731 | // x.slot = x. Barrier should have be removed at the IL level. |
1732 | ASSERT(object != value); |
1733 | ASSERT(object != LR); |
1734 | ASSERT(value != LR); |
1735 | ASSERT(object != TMP); |
1736 | ASSERT(value != TMP); |
1737 | |
1738 | str(value, dest); |
1739 | |
1740 | // In parallel, test whether |
1741 | // - object is old and not remembered and value is new, or |
1742 | // - object is old and value is old and not marked and concurrent marking is |
1743 | // in progress |
1744 | // If so, call the WriteBarrier stub, which will either add object to the |
1745 | // store buffer (case 1) or add value to the marking stack (case 2). |
1746 | // Compare ObjectLayout::StorePointer. |
1747 | Label done; |
1748 | if (can_be_smi == kValueCanBeSmi) { |
1749 | BranchIfSmi(value, &done); |
1750 | } |
1751 | if (!lr_reserved) Push(LR); |
1752 | ldrb(TMP, FieldAddress(object, target::Object::tags_offset())); |
1753 | ldrb(LR, FieldAddress(value, target::Object::tags_offset())); |
1754 | and_(TMP, LR, Operand(TMP, LSR, target::ObjectLayout::kBarrierOverlapShift)); |
1755 | ldr(LR, Address(THR, target::Thread::write_barrier_mask_offset())); |
1756 | tst(TMP, Operand(LR)); |
1757 | if (value != kWriteBarrierValueReg) { |
1758 | // Unlikely. Only non-graph intrinsics. |
1759 | // TODO(rmacnak): Shuffle registers in intrinsics. |
1760 | Label restore_and_done; |
1761 | b(&restore_and_done, ZERO); |
1762 | Register objectForCall = object; |
1763 | if (object != kWriteBarrierValueReg) { |
1764 | Push(kWriteBarrierValueReg); |
1765 | } else { |
1766 | COMPILE_ASSERT(R2 != kWriteBarrierValueReg); |
1767 | COMPILE_ASSERT(R3 != kWriteBarrierValueReg); |
1768 | objectForCall = (value == R2) ? R3 : R2; |
1769 | PushList((1 << kWriteBarrierValueReg) | (1 << objectForCall)); |
1770 | mov(objectForCall, Operand(object)); |
1771 | } |
1772 | mov(kWriteBarrierValueReg, Operand(value)); |
1773 | generate_invoke_write_barrier_wrapper_(AL, objectForCall); |
1774 | |
1775 | if (object != kWriteBarrierValueReg) { |
1776 | Pop(kWriteBarrierValueReg); |
1777 | } else { |
1778 | PopList((1 << kWriteBarrierValueReg) | (1 << objectForCall)); |
1779 | } |
1780 | Bind(&restore_and_done); |
1781 | } else { |
1782 | generate_invoke_write_barrier_wrapper_(NE, object); |
1783 | } |
1784 | if (!lr_reserved) Pop(LR); |
1785 | Bind(&done); |
1786 | } |
1787 | |
1788 | void Assembler::StoreIntoArray(Register object, |
1789 | Register slot, |
1790 | Register value, |
1791 | CanBeSmi can_be_smi, |
1792 | bool lr_reserved) { |
1793 | // x.slot = x. Barrier should have be removed at the IL level. |
1794 | ASSERT(object != value); |
1795 | ASSERT(object != LR); |
1796 | ASSERT(value != LR); |
1797 | ASSERT(slot != LR); |
1798 | ASSERT(object != TMP); |
1799 | ASSERT(value != TMP); |
1800 | ASSERT(slot != TMP); |
1801 | |
1802 | str(value, Address(slot, 0)); |
1803 | |
1804 | // In parallel, test whether |
1805 | // - object is old and not remembered and value is new, or |
1806 | // - object is old and value is old and not marked and concurrent marking is |
1807 | // in progress |
1808 | // If so, call the WriteBarrier stub, which will either add object to the |
1809 | // store buffer (case 1) or add value to the marking stack (case 2). |
1810 | // Compare ObjectLayout::StorePointer. |
1811 | Label done; |
1812 | if (can_be_smi == kValueCanBeSmi) { |
1813 | BranchIfSmi(value, &done); |
1814 | } |
1815 | if (!lr_reserved) Push(LR); |
1816 | ldrb(TMP, FieldAddress(object, target::Object::tags_offset())); |
1817 | ldrb(LR, FieldAddress(value, target::Object::tags_offset())); |
1818 | and_(TMP, LR, Operand(TMP, LSR, target::ObjectLayout::kBarrierOverlapShift)); |
1819 | ldr(LR, Address(THR, target::Thread::write_barrier_mask_offset())); |
1820 | tst(TMP, Operand(LR)); |
1821 | |
1822 | if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) || |
1823 | (slot != kWriteBarrierSlotReg)) { |
1824 | // Spill and shuffle unimplemented. Currently StoreIntoArray is only used |
1825 | // from StoreIndexInstr, which gets these exact registers from the register |
1826 | // allocator. |
1827 | UNIMPLEMENTED(); |
1828 | } |
1829 | generate_invoke_array_write_barrier_(NE); |
1830 | if (!lr_reserved) Pop(LR); |
1831 | Bind(&done); |
1832 | } |
1833 | |
1834 | void Assembler::StoreIntoObjectOffset(Register object, |
1835 | int32_t offset, |
1836 | Register value, |
1837 | CanBeSmi can_value_be_smi, |
1838 | bool lr_reserved) { |
1839 | int32_t ignored = 0; |
1840 | if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) { |
1841 | StoreIntoObject(object, FieldAddress(object, offset), value, |
1842 | can_value_be_smi, lr_reserved); |
1843 | } else { |
1844 | AddImmediate(IP, object, offset - kHeapObjectTag); |
1845 | StoreIntoObject(object, Address(IP), value, can_value_be_smi, lr_reserved); |
1846 | } |
1847 | } |
1848 | |
1849 | void Assembler::StoreIntoObjectNoBarrier(Register object, |
1850 | const Address& dest, |
1851 | Register value) { |
1852 | str(value, dest); |
1853 | #if defined(DEBUG) |
1854 | Label done; |
1855 | StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate); |
1856 | |
1857 | ldrb(TMP, FieldAddress(object, target::Object::tags_offset())); |
1858 | tst(TMP, Operand(1 << target::ObjectLayout::kOldAndNotRememberedBit)); |
1859 | b(&done, ZERO); |
1860 | |
1861 | Stop("Store buffer update is required" ); |
1862 | Bind(&done); |
1863 | #endif // defined(DEBUG) |
1864 | // No store buffer update. |
1865 | } |
1866 | |
1867 | void Assembler::StoreIntoObjectNoBarrier(Register object, |
1868 | const Address& dest, |
1869 | const Object& value) { |
1870 | ASSERT(IsOriginalObject(value)); |
1871 | ASSERT(IsNotTemporaryScopedHandle(value)); |
1872 | // No store buffer update. |
1873 | LoadObject(IP, value); |
1874 | str(IP, dest); |
1875 | } |
1876 | |
1877 | void Assembler::StoreIntoObjectNoBarrierOffset(Register object, |
1878 | int32_t offset, |
1879 | Register value) { |
1880 | int32_t ignored = 0; |
1881 | if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) { |
1882 | StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); |
1883 | } else { |
1884 | Register base = object == R9 ? R8 : R9; |
1885 | Push(base); |
1886 | AddImmediate(base, object, offset - kHeapObjectTag); |
1887 | StoreIntoObjectNoBarrier(object, Address(base), value); |
1888 | Pop(base); |
1889 | } |
1890 | } |
1891 | |
1892 | void Assembler::StoreIntoObjectNoBarrierOffset(Register object, |
1893 | int32_t offset, |
1894 | const Object& value) { |
1895 | ASSERT(IsOriginalObject(value)); |
1896 | int32_t ignored = 0; |
1897 | if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) { |
1898 | StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); |
1899 | } else { |
1900 | Register base = object == R9 ? R8 : R9; |
1901 | Push(base); |
1902 | AddImmediate(base, object, offset - kHeapObjectTag); |
1903 | StoreIntoObjectNoBarrier(object, Address(base), value); |
1904 | Pop(base); |
1905 | } |
1906 | } |
1907 | |
1908 | void Assembler::StoreInternalPointer(Register object, |
1909 | const Address& dest, |
1910 | Register value) { |
1911 | str(value, dest); |
1912 | } |
1913 | |
1914 | void Assembler::InitializeFieldsNoBarrier(Register object, |
1915 | Register begin, |
1916 | Register end, |
1917 | Register value_even, |
1918 | Register value_odd) { |
1919 | ASSERT(value_odd == value_even + 1); |
1920 | Label init_loop; |
1921 | Bind(&init_loop); |
1922 | AddImmediate(begin, 2 * target::kWordSize); |
1923 | cmp(begin, Operand(end)); |
1924 | strd(value_even, value_odd, begin, -2 * target::kWordSize, LS); |
1925 | b(&init_loop, CC); |
1926 | str(value_even, Address(begin, -2 * target::kWordSize), HI); |
1927 | #if defined(DEBUG) |
1928 | Label done; |
1929 | StoreIntoObjectFilter(object, value_even, &done, kValueCanBeSmi, |
1930 | kJumpToNoUpdate); |
1931 | StoreIntoObjectFilter(object, value_odd, &done, kValueCanBeSmi, |
1932 | kJumpToNoUpdate); |
1933 | Stop("Store buffer update is required" ); |
1934 | Bind(&done); |
1935 | #endif // defined(DEBUG) |
1936 | // No store buffer update. |
1937 | } |
1938 | |
1939 | void Assembler::InitializeFieldsNoBarrierUnrolled(Register object, |
1940 | Register base, |
1941 | intptr_t begin_offset, |
1942 | intptr_t end_offset, |
1943 | Register value_even, |
1944 | Register value_odd) { |
1945 | ASSERT(value_odd == value_even + 1); |
1946 | intptr_t current_offset = begin_offset; |
1947 | while (current_offset + target::kWordSize < end_offset) { |
1948 | strd(value_even, value_odd, base, current_offset); |
1949 | current_offset += 2 * target::kWordSize; |
1950 | } |
1951 | while (current_offset < end_offset) { |
1952 | str(value_even, Address(base, current_offset)); |
1953 | current_offset += target::kWordSize; |
1954 | } |
1955 | #if defined(DEBUG) |
1956 | Label done; |
1957 | StoreIntoObjectFilter(object, value_even, &done, kValueCanBeSmi, |
1958 | kJumpToNoUpdate); |
1959 | StoreIntoObjectFilter(object, value_odd, &done, kValueCanBeSmi, |
1960 | kJumpToNoUpdate); |
1961 | Stop("Store buffer update is required" ); |
1962 | Bind(&done); |
1963 | #endif // defined(DEBUG) |
1964 | // No store buffer update. |
1965 | } |
1966 | |
1967 | void Assembler::StoreIntoSmiField(const Address& dest, Register value) { |
1968 | #if defined(DEBUG) |
1969 | Label done; |
1970 | tst(value, Operand(kHeapObjectTag)); |
1971 | b(&done, EQ); |
1972 | Stop("New value must be Smi." ); |
1973 | Bind(&done); |
1974 | #endif // defined(DEBUG) |
1975 | str(value, dest); |
1976 | } |
1977 | |
1978 | void Assembler::ExtractClassIdFromTags(Register result, Register tags) { |
1979 | ASSERT(target::ObjectLayout::kClassIdTagPos == 16); |
1980 | ASSERT(target::ObjectLayout::kClassIdTagSize == 16); |
1981 | Lsr(result, tags, Operand(target::ObjectLayout::kClassIdTagPos), AL); |
1982 | } |
1983 | |
1984 | void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) { |
1985 | ASSERT(target::ObjectLayout::kSizeTagPos == 8); |
1986 | ASSERT(target::ObjectLayout::kSizeTagSize == 8); |
1987 | Lsr(result, tags, |
1988 | Operand(target::ObjectLayout::kSizeTagPos - |
1989 | target::ObjectAlignment::kObjectAlignmentLog2), |
1990 | AL); |
1991 | AndImmediate(result, result, |
1992 | (Utils::NBitMask(target::ObjectLayout::kSizeTagSize) |
1993 | << target::ObjectAlignment::kObjectAlignmentLog2)); |
1994 | } |
1995 | |
1996 | void Assembler::LoadClassId(Register result, Register object, Condition cond) { |
1997 | ASSERT(target::ObjectLayout::kClassIdTagPos == 16); |
1998 | ASSERT(target::ObjectLayout::kClassIdTagSize == 16); |
1999 | const intptr_t class_id_offset = |
2000 | target::Object::tags_offset() + |
2001 | target::ObjectLayout::kClassIdTagPos / kBitsPerByte; |
2002 | ldrh(result, FieldAddress(object, class_id_offset), cond); |
2003 | } |
2004 | |
2005 | void Assembler::LoadClassById(Register result, Register class_id) { |
2006 | ASSERT(result != class_id); |
2007 | |
2008 | const intptr_t table_offset = |
2009 | target::Isolate::cached_class_table_table_offset(); |
2010 | |
2011 | LoadIsolate(result); |
2012 | LoadFromOffset(kWord, result, result, table_offset); |
2013 | ldr(result, Address(result, class_id, LSL, target::kWordSizeLog2)); |
2014 | } |
2015 | |
2016 | void Assembler::CompareClassId(Register object, |
2017 | intptr_t class_id, |
2018 | Register scratch) { |
2019 | LoadClassId(scratch, object); |
2020 | CompareImmediate(scratch, class_id); |
2021 | } |
2022 | |
2023 | void Assembler::LoadClassIdMayBeSmi(Register result, Register object) { |
2024 | tst(object, Operand(kSmiTagMask)); |
2025 | LoadClassId(result, object, NE); |
2026 | LoadImmediate(result, kSmiCid, EQ); |
2027 | } |
2028 | |
2029 | void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) { |
2030 | LoadClassIdMayBeSmi(result, object); |
2031 | SmiTag(result); |
2032 | } |
2033 | |
2034 | void Assembler::BailoutIfInvalidBranchOffset(int32_t offset) { |
2035 | if (!CanEncodeBranchDistance(offset)) { |
2036 | ASSERT(!use_far_branches()); |
2037 | BailoutWithBranchOffsetError(); |
2038 | } |
2039 | } |
2040 | |
2041 | int32_t Assembler::EncodeBranchOffset(int32_t offset, int32_t inst) { |
2042 | // The offset is off by 8 due to the way the ARM CPUs read PC. |
2043 | offset -= Instr::kPCReadOffset; |
2044 | |
2045 | // Properly preserve only the bits supported in the instruction. |
2046 | offset >>= 2; |
2047 | offset &= kBranchOffsetMask; |
2048 | return (inst & ~kBranchOffsetMask) | offset; |
2049 | } |
2050 | |
2051 | int Assembler::DecodeBranchOffset(int32_t inst) { |
2052 | // Sign-extend, left-shift by 2, then add 8. |
2053 | return ((((inst & kBranchOffsetMask) << 8) >> 6) + Instr::kPCReadOffset); |
2054 | } |
2055 | |
2056 | static int32_t DecodeARMv7LoadImmediate(int32_t movt, int32_t movw) { |
2057 | int32_t offset = 0; |
2058 | offset |= (movt & 0xf0000) << 12; |
2059 | offset |= (movt & 0xfff) << 16; |
2060 | offset |= (movw & 0xf0000) >> 4; |
2061 | offset |= movw & 0xfff; |
2062 | return offset; |
2063 | } |
2064 | |
2065 | class PatchFarBranch : public AssemblerFixup { |
2066 | public: |
2067 | PatchFarBranch() {} |
2068 | |
2069 | void Process(const MemoryRegion& region, intptr_t position) { |
2070 | ProcessARMv7(region, position); |
2071 | } |
2072 | |
2073 | private: |
2074 | void ProcessARMv7(const MemoryRegion& region, intptr_t position) { |
2075 | const int32_t movw = region.Load<int32_t>(position); |
2076 | const int32_t movt = region.Load<int32_t>(position + Instr::kInstrSize); |
2077 | const int32_t bx = region.Load<int32_t>(position + 2 * Instr::kInstrSize); |
2078 | |
2079 | if (((movt & 0xfff0f000) == 0xe340c000) && // movt IP, high |
2080 | ((movw & 0xfff0f000) == 0xe300c000)) { // movw IP, low |
2081 | const int32_t offset = DecodeARMv7LoadImmediate(movt, movw); |
2082 | const int32_t dest = region.start() + offset; |
2083 | const uint16_t dest_high = Utils::High16Bits(dest); |
2084 | const uint16_t dest_low = Utils::Low16Bits(dest); |
2085 | const int32_t patched_movt = |
2086 | 0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff); |
2087 | const int32_t patched_movw = |
2088 | 0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff); |
2089 | |
2090 | region.Store<int32_t>(position, patched_movw); |
2091 | region.Store<int32_t>(position + Instr::kInstrSize, patched_movt); |
2092 | return; |
2093 | } |
2094 | |
2095 | // If the offset loading instructions aren't there, we must have replaced |
2096 | // the far branch with a near one, and so these instructions |
2097 | // should be NOPs. |
2098 | ASSERT((movt == Instr::kNopInstruction) && (bx == Instr::kNopInstruction)); |
2099 | } |
2100 | |
2101 | virtual bool IsPointerOffset() const { return false; } |
2102 | }; |
2103 | |
2104 | void Assembler::EmitFarBranch(Condition cond, int32_t offset, bool link) { |
2105 | buffer_.EmitFixup(new PatchFarBranch()); |
2106 | LoadPatchableImmediate(IP, offset); |
2107 | if (link) { |
2108 | blx(IP, cond); |
2109 | } else { |
2110 | bx(IP, cond); |
2111 | } |
2112 | } |
2113 | |
2114 | void Assembler::EmitBranch(Condition cond, Label* label, bool link) { |
2115 | if (label->IsBound()) { |
2116 | const int32_t dest = label->Position() - buffer_.Size(); |
2117 | if (use_far_branches() && !CanEncodeBranchDistance(dest)) { |
2118 | EmitFarBranch(cond, label->Position(), link); |
2119 | } else { |
2120 | EmitType5(cond, dest, link); |
2121 | } |
2122 | } else { |
2123 | const intptr_t position = buffer_.Size(); |
2124 | if (use_far_branches()) { |
2125 | const int32_t dest = label->position_; |
2126 | EmitFarBranch(cond, dest, link); |
2127 | } else { |
2128 | // Use the offset field of the branch instruction for linking the sites. |
2129 | EmitType5(cond, label->position_, link); |
2130 | } |
2131 | label->LinkTo(position); |
2132 | } |
2133 | } |
2134 | |
2135 | void Assembler::BindARMv7(Label* label) { |
2136 | ASSERT(!label->IsBound()); |
2137 | intptr_t bound_pc = buffer_.Size(); |
2138 | while (label->IsLinked()) { |
2139 | const int32_t position = label->Position(); |
2140 | int32_t dest = bound_pc - position; |
2141 | if (use_far_branches() && !CanEncodeBranchDistance(dest)) { |
2142 | // Far branches are enabled and we can't encode the branch offset. |
2143 | |
2144 | // Grab instructions that load the offset. |
2145 | const int32_t movw = |
2146 | buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize); |
2147 | const int32_t movt = |
2148 | buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize); |
2149 | |
2150 | // Change from relative to the branch to relative to the assembler |
2151 | // buffer. |
2152 | dest = buffer_.Size(); |
2153 | const uint16_t dest_high = Utils::High16Bits(dest); |
2154 | const uint16_t dest_low = Utils::Low16Bits(dest); |
2155 | const int32_t patched_movt = |
2156 | 0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff); |
2157 | const int32_t patched_movw = |
2158 | 0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff); |
2159 | |
2160 | // Rewrite the instructions. |
2161 | buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_movw); |
2162 | buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_movt); |
2163 | label->position_ = DecodeARMv7LoadImmediate(movt, movw); |
2164 | } else if (use_far_branches() && CanEncodeBranchDistance(dest)) { |
2165 | // Far branches are enabled, but we can encode the branch offset. |
2166 | |
2167 | // Grab instructions that load the offset, and the branch. |
2168 | const int32_t movw = |
2169 | buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize); |
2170 | const int32_t movt = |
2171 | buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize); |
2172 | const int32_t branch = |
2173 | buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize); |
2174 | |
2175 | // Grab the branch condition, and encode the link bit. |
2176 | const int32_t cond = branch & 0xf0000000; |
2177 | const int32_t link = (branch & 0x20) << 19; |
2178 | |
2179 | // Encode the branch and the offset. |
2180 | const int32_t new_branch = cond | link | 0x0a000000; |
2181 | const int32_t encoded = EncodeBranchOffset(dest, new_branch); |
2182 | |
2183 | // Write the encoded branch instruction followed by two nops. |
2184 | buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, encoded); |
2185 | buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, |
2186 | Instr::kNopInstruction); |
2187 | buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize, |
2188 | Instr::kNopInstruction); |
2189 | |
2190 | label->position_ = DecodeARMv7LoadImmediate(movt, movw); |
2191 | } else { |
2192 | BailoutIfInvalidBranchOffset(dest); |
2193 | int32_t next = buffer_.Load<int32_t>(position); |
2194 | int32_t encoded = Assembler::EncodeBranchOffset(dest, next); |
2195 | buffer_.Store<int32_t>(position, encoded); |
2196 | label->position_ = Assembler::DecodeBranchOffset(next); |
2197 | } |
2198 | } |
2199 | label->BindTo(bound_pc); |
2200 | } |
2201 | |
2202 | void Assembler::Bind(Label* label) { |
2203 | BindARMv7(label); |
2204 | } |
2205 | |
2206 | OperandSize Address::OperandSizeFor(intptr_t cid) { |
2207 | switch (cid) { |
2208 | case kArrayCid: |
2209 | case kImmutableArrayCid: |
2210 | return kWord; |
2211 | case kOneByteStringCid: |
2212 | case kExternalOneByteStringCid: |
2213 | return kByte; |
2214 | case kTwoByteStringCid: |
2215 | case kExternalTwoByteStringCid: |
2216 | return kHalfword; |
2217 | case kTypedDataInt8ArrayCid: |
2218 | return kByte; |
2219 | case kTypedDataUint8ArrayCid: |
2220 | case kTypedDataUint8ClampedArrayCid: |
2221 | case kExternalTypedDataUint8ArrayCid: |
2222 | case kExternalTypedDataUint8ClampedArrayCid: |
2223 | return kUnsignedByte; |
2224 | case kTypedDataInt16ArrayCid: |
2225 | return kHalfword; |
2226 | case kTypedDataUint16ArrayCid: |
2227 | return kUnsignedHalfword; |
2228 | case kTypedDataInt32ArrayCid: |
2229 | return kWord; |
2230 | case kTypedDataUint32ArrayCid: |
2231 | return kUnsignedWord; |
2232 | case kTypedDataInt64ArrayCid: |
2233 | case kTypedDataUint64ArrayCid: |
2234 | return kDWord; |
2235 | case kTypedDataFloat32ArrayCid: |
2236 | return kSWord; |
2237 | case kTypedDataFloat64ArrayCid: |
2238 | return kDWord; |
2239 | case kTypedDataFloat32x4ArrayCid: |
2240 | case kTypedDataInt32x4ArrayCid: |
2241 | case kTypedDataFloat64x2ArrayCid: |
2242 | return kRegList; |
2243 | case kTypedDataInt8ArrayViewCid: |
2244 | UNREACHABLE(); |
2245 | return kByte; |
2246 | default: |
2247 | UNREACHABLE(); |
2248 | return kByte; |
2249 | } |
2250 | } |
2251 | |
2252 | bool Address::CanHoldLoadOffset(OperandSize size, |
2253 | int32_t offset, |
2254 | int32_t* offset_mask) { |
2255 | switch (size) { |
2256 | case kByte: |
2257 | case kHalfword: |
2258 | case kUnsignedHalfword: |
2259 | case kWordPair: { |
2260 | *offset_mask = 0xff; |
2261 | return Utils::IsAbsoluteUint(8, offset); // Addressing mode 3. |
2262 | } |
2263 | case kUnsignedByte: |
2264 | case kWord: |
2265 | case kUnsignedWord: { |
2266 | *offset_mask = 0xfff; |
2267 | return Utils::IsAbsoluteUint(12, offset); // Addressing mode 2. |
2268 | } |
2269 | case kSWord: |
2270 | case kDWord: { |
2271 | *offset_mask = 0x3fc; // Multiple of 4. |
2272 | // VFP addressing mode. |
2273 | return (Utils::IsAbsoluteUint(10, offset) && Utils::IsAligned(offset, 4)); |
2274 | } |
2275 | case kRegList: { |
2276 | *offset_mask = 0x0; |
2277 | return offset == 0; |
2278 | } |
2279 | default: { |
2280 | UNREACHABLE(); |
2281 | return false; |
2282 | } |
2283 | } |
2284 | } |
2285 | |
2286 | bool Address::CanHoldStoreOffset(OperandSize size, |
2287 | int32_t offset, |
2288 | int32_t* offset_mask) { |
2289 | switch (size) { |
2290 | case kHalfword: |
2291 | case kUnsignedHalfword: |
2292 | case kWordPair: { |
2293 | *offset_mask = 0xff; |
2294 | return Utils::IsAbsoluteUint(8, offset); // Addressing mode 3. |
2295 | } |
2296 | case kByte: |
2297 | case kUnsignedByte: |
2298 | case kWord: |
2299 | case kUnsignedWord: { |
2300 | *offset_mask = 0xfff; |
2301 | return Utils::IsAbsoluteUint(12, offset); // Addressing mode 2. |
2302 | } |
2303 | case kSWord: |
2304 | case kDWord: { |
2305 | *offset_mask = 0x3fc; // Multiple of 4. |
2306 | // VFP addressing mode. |
2307 | return (Utils::IsAbsoluteUint(10, offset) && Utils::IsAligned(offset, 4)); |
2308 | } |
2309 | case kRegList: { |
2310 | *offset_mask = 0x0; |
2311 | return offset == 0; |
2312 | } |
2313 | default: { |
2314 | UNREACHABLE(); |
2315 | return false; |
2316 | } |
2317 | } |
2318 | } |
2319 | |
2320 | bool Address::CanHoldImmediateOffset(bool is_load, |
2321 | intptr_t cid, |
2322 | int64_t offset) { |
2323 | int32_t offset_mask = 0; |
2324 | if (is_load) { |
2325 | return CanHoldLoadOffset(OperandSizeFor(cid), offset, &offset_mask); |
2326 | } else { |
2327 | return CanHoldStoreOffset(OperandSizeFor(cid), offset, &offset_mask); |
2328 | } |
2329 | } |
2330 | |
2331 | void Assembler::Push(Register rd, Condition cond) { |
2332 | str(rd, Address(SP, -target::kWordSize, Address::PreIndex), cond); |
2333 | } |
2334 | |
2335 | void Assembler::Pop(Register rd, Condition cond) { |
2336 | ldr(rd, Address(SP, target::kWordSize, Address::PostIndex), cond); |
2337 | } |
2338 | |
2339 | void Assembler::PushList(RegList regs, Condition cond) { |
2340 | stm(DB_W, SP, regs, cond); |
2341 | } |
2342 | |
2343 | void Assembler::PopList(RegList regs, Condition cond) { |
2344 | ldm(IA_W, SP, regs, cond); |
2345 | } |
2346 | |
2347 | void Assembler::PushRegisters(const RegisterSet& regs) { |
2348 | const intptr_t fpu_regs_count = regs.FpuRegisterCount(); |
2349 | if (fpu_regs_count > 0) { |
2350 | AddImmediate(SP, -(fpu_regs_count * kFpuRegisterSize)); |
2351 | // Store fpu registers with the lowest register number at the lowest |
2352 | // address. |
2353 | intptr_t offset = 0; |
2354 | mov(TMP, Operand(SP)); |
2355 | for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) { |
2356 | QRegister fpu_reg = static_cast<QRegister>(i); |
2357 | if (regs.ContainsFpuRegister(fpu_reg)) { |
2358 | DRegister d = EvenDRegisterOf(fpu_reg); |
2359 | ASSERT(d + 1 == OddDRegisterOf(fpu_reg)); |
2360 | vstmd(IA_W, IP, d, 2); |
2361 | offset += kFpuRegisterSize; |
2362 | } |
2363 | } |
2364 | ASSERT(offset == (fpu_regs_count * kFpuRegisterSize)); |
2365 | } |
2366 | |
2367 | // The order in which the registers are pushed must match the order |
2368 | // in which the registers are encoded in the safe point's stack map. |
2369 | // NOTE: This matches the order of ARM's multi-register push. |
2370 | RegList reg_list = 0; |
2371 | for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) { |
2372 | Register reg = static_cast<Register>(i); |
2373 | if (regs.ContainsRegister(reg)) { |
2374 | reg_list |= (1 << reg); |
2375 | } |
2376 | } |
2377 | if (reg_list != 0) { |
2378 | PushList(reg_list); |
2379 | } |
2380 | } |
2381 | |
2382 | void Assembler::PopRegisters(const RegisterSet& regs) { |
2383 | RegList reg_list = 0; |
2384 | for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) { |
2385 | Register reg = static_cast<Register>(i); |
2386 | if (regs.ContainsRegister(reg)) { |
2387 | reg_list |= (1 << reg); |
2388 | } |
2389 | } |
2390 | if (reg_list != 0) { |
2391 | PopList(reg_list); |
2392 | } |
2393 | |
2394 | const intptr_t fpu_regs_count = regs.FpuRegisterCount(); |
2395 | if (fpu_regs_count > 0) { |
2396 | // Fpu registers have the lowest register number at the lowest address. |
2397 | intptr_t offset = 0; |
2398 | for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) { |
2399 | QRegister fpu_reg = static_cast<QRegister>(i); |
2400 | if (regs.ContainsFpuRegister(fpu_reg)) { |
2401 | DRegister d = EvenDRegisterOf(fpu_reg); |
2402 | ASSERT(d + 1 == OddDRegisterOf(fpu_reg)); |
2403 | vldmd(IA_W, SP, d, 2); |
2404 | offset += kFpuRegisterSize; |
2405 | } |
2406 | } |
2407 | ASSERT(offset == (fpu_regs_count * kFpuRegisterSize)); |
2408 | } |
2409 | } |
2410 | |
2411 | void Assembler::PushNativeCalleeSavedRegisters() { |
2412 | // Save new context and C++ ABI callee-saved registers. |
2413 | PushList(kAbiPreservedCpuRegs); |
2414 | |
2415 | const DRegister firstd = EvenDRegisterOf(kAbiFirstPreservedFpuReg); |
2416 | if (TargetCPUFeatures::vfp_supported()) { |
2417 | ASSERT(2 * kAbiPreservedFpuRegCount < 16); |
2418 | // Save FPU registers. 2 D registers per Q register. |
2419 | vstmd(DB_W, SP, firstd, 2 * kAbiPreservedFpuRegCount); |
2420 | } else { |
2421 | sub(SP, SP, Operand(kAbiPreservedFpuRegCount * kFpuRegisterSize)); |
2422 | } |
2423 | } |
2424 | |
2425 | void Assembler::PopNativeCalleeSavedRegisters() { |
2426 | const DRegister firstd = EvenDRegisterOf(kAbiFirstPreservedFpuReg); |
2427 | // Restore C++ ABI callee-saved registers. |
2428 | if (TargetCPUFeatures::vfp_supported()) { |
2429 | // Restore FPU registers. 2 D registers per Q register. |
2430 | vldmd(IA_W, SP, firstd, 2 * kAbiPreservedFpuRegCount); |
2431 | } else { |
2432 | AddImmediate(SP, kAbiPreservedFpuRegCount * kFpuRegisterSize); |
2433 | } |
2434 | // Restore CPU registers. |
2435 | PopList(kAbiPreservedCpuRegs); |
2436 | } |
2437 | |
2438 | void Assembler::MoveRegister(Register rd, Register rm, Condition cond) { |
2439 | if (rd != rm) { |
2440 | mov(rd, Operand(rm), cond); |
2441 | } |
2442 | } |
2443 | |
2444 | void Assembler::Lsl(Register rd, |
2445 | Register rm, |
2446 | const Operand& shift_imm, |
2447 | Condition cond) { |
2448 | ASSERT(shift_imm.type() == 1); |
2449 | ASSERT(shift_imm.encoding() != 0); // Do not use Lsl if no shift is wanted. |
2450 | mov(rd, Operand(rm, LSL, shift_imm.encoding()), cond); |
2451 | } |
2452 | |
2453 | void Assembler::Lsl(Register rd, Register rm, Register rs, Condition cond) { |
2454 | mov(rd, Operand(rm, LSL, rs), cond); |
2455 | } |
2456 | |
2457 | void Assembler::Lsr(Register rd, |
2458 | Register rm, |
2459 | const Operand& shift_imm, |
2460 | Condition cond) { |
2461 | ASSERT(shift_imm.type() == 1); |
2462 | uint32_t shift = shift_imm.encoding(); |
2463 | ASSERT(shift != 0); // Do not use Lsr if no shift is wanted. |
2464 | if (shift == 32) { |
2465 | shift = 0; // Comply to UAL syntax. |
2466 | } |
2467 | mov(rd, Operand(rm, LSR, shift), cond); |
2468 | } |
2469 | |
2470 | void Assembler::Lsr(Register rd, Register rm, Register rs, Condition cond) { |
2471 | mov(rd, Operand(rm, LSR, rs), cond); |
2472 | } |
2473 | |
2474 | void Assembler::Asr(Register rd, |
2475 | Register rm, |
2476 | const Operand& shift_imm, |
2477 | Condition cond) { |
2478 | ASSERT(shift_imm.type() == 1); |
2479 | uint32_t shift = shift_imm.encoding(); |
2480 | ASSERT(shift != 0); // Do not use Asr if no shift is wanted. |
2481 | if (shift == 32) { |
2482 | shift = 0; // Comply to UAL syntax. |
2483 | } |
2484 | mov(rd, Operand(rm, ASR, shift), cond); |
2485 | } |
2486 | |
2487 | void Assembler::Asrs(Register rd, |
2488 | Register rm, |
2489 | const Operand& shift_imm, |
2490 | Condition cond) { |
2491 | ASSERT(shift_imm.type() == 1); |
2492 | uint32_t shift = shift_imm.encoding(); |
2493 | ASSERT(shift != 0); // Do not use Asr if no shift is wanted. |
2494 | if (shift == 32) { |
2495 | shift = 0; // Comply to UAL syntax. |
2496 | } |
2497 | movs(rd, Operand(rm, ASR, shift), cond); |
2498 | } |
2499 | |
2500 | void Assembler::Asr(Register rd, Register rm, Register rs, Condition cond) { |
2501 | mov(rd, Operand(rm, ASR, rs), cond); |
2502 | } |
2503 | |
2504 | void Assembler::Ror(Register rd, |
2505 | Register rm, |
2506 | const Operand& shift_imm, |
2507 | Condition cond) { |
2508 | ASSERT(shift_imm.type() == 1); |
2509 | ASSERT(shift_imm.encoding() != 0); // Use Rrx instruction. |
2510 | mov(rd, Operand(rm, ROR, shift_imm.encoding()), cond); |
2511 | } |
2512 | |
2513 | void Assembler::Ror(Register rd, Register rm, Register rs, Condition cond) { |
2514 | mov(rd, Operand(rm, ROR, rs), cond); |
2515 | } |
2516 | |
2517 | void Assembler::Rrx(Register rd, Register rm, Condition cond) { |
2518 | mov(rd, Operand(rm, ROR, 0), cond); |
2519 | } |
2520 | |
2521 | void Assembler::SignFill(Register rd, Register rm, Condition cond) { |
2522 | Asr(rd, rm, Operand(31), cond); |
2523 | } |
2524 | |
2525 | void Assembler::Vreciprocalqs(QRegister qd, QRegister qm) { |
2526 | ASSERT(qm != QTMP); |
2527 | ASSERT(qd != QTMP); |
2528 | |
2529 | // Reciprocal estimate. |
2530 | vrecpeqs(qd, qm); |
2531 | // 2 Newton-Raphson steps. |
2532 | vrecpsqs(QTMP, qm, qd); |
2533 | vmulqs(qd, qd, QTMP); |
2534 | vrecpsqs(QTMP, qm, qd); |
2535 | vmulqs(qd, qd, QTMP); |
2536 | } |
2537 | |
2538 | void Assembler::VreciprocalSqrtqs(QRegister qd, QRegister qm) { |
2539 | ASSERT(qm != QTMP); |
2540 | ASSERT(qd != QTMP); |
2541 | |
2542 | // Reciprocal square root estimate. |
2543 | vrsqrteqs(qd, qm); |
2544 | // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2. |
2545 | // First step. |
2546 | vmulqs(QTMP, qd, qd); // QTMP <- xn^2 |
2547 | vrsqrtsqs(QTMP, qm, QTMP); // QTMP <- (3 - Q1*QTMP) / 2. |
2548 | vmulqs(qd, qd, QTMP); // xn+1 <- xn * QTMP |
2549 | // Second step. |
2550 | vmulqs(QTMP, qd, qd); |
2551 | vrsqrtsqs(QTMP, qm, QTMP); |
2552 | vmulqs(qd, qd, QTMP); |
2553 | } |
2554 | |
2555 | void Assembler::Vsqrtqs(QRegister qd, QRegister qm, QRegister temp) { |
2556 | ASSERT(temp != QTMP); |
2557 | ASSERT(qm != QTMP); |
2558 | ASSERT(qd != QTMP); |
2559 | |
2560 | if (temp != kNoQRegister) { |
2561 | vmovq(temp, qm); |
2562 | qm = temp; |
2563 | } |
2564 | |
2565 | VreciprocalSqrtqs(qd, qm); |
2566 | vmovq(qm, qd); |
2567 | Vreciprocalqs(qd, qm); |
2568 | } |
2569 | |
2570 | void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) { |
2571 | ASSERT(qd != QTMP); |
2572 | ASSERT(qn != QTMP); |
2573 | ASSERT(qm != QTMP); |
2574 | |
2575 | Vreciprocalqs(qd, qm); |
2576 | vmulqs(qd, qn, qd); |
2577 | } |
2578 | |
2579 | void Assembler::Branch(const Code& target, |
2580 | ObjectPoolBuilderEntry::Patchability patchable, |
2581 | Register pp, |
2582 | Condition cond) { |
2583 | const int32_t offset = target::ObjectPool::element_offset( |
2584 | object_pool_builder().FindObject(ToObject(target), patchable)); |
2585 | LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, pp, cond); |
2586 | Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()), cond); |
2587 | } |
2588 | |
2589 | void Assembler::Branch(const Address& address, Condition cond) { |
2590 | ldr(PC, address, cond); |
2591 | } |
2592 | |
2593 | void Assembler::BranchLink(const Code& target, |
2594 | ObjectPoolBuilderEntry::Patchability patchable, |
2595 | CodeEntryKind entry_kind) { |
2596 | // Make sure that class CallPattern is able to patch the label referred |
2597 | // to by this code sequence. |
2598 | // For added code robustness, use 'blx lr' in a patchable sequence and |
2599 | // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors). |
2600 | const int32_t offset = target::ObjectPool::element_offset( |
2601 | object_pool_builder().FindObject(ToObject(target), patchable)); |
2602 | LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL); |
2603 | ldr(LR, FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind))); |
2604 | blx(LR); // Use blx instruction so that the return branch prediction works. |
2605 | } |
2606 | |
2607 | void Assembler::BranchLinkPatchable(const Code& target, |
2608 | CodeEntryKind entry_kind) { |
2609 | BranchLink(target, ObjectPoolBuilderEntry::kPatchable, entry_kind); |
2610 | } |
2611 | |
2612 | void Assembler::BranchLinkToRuntime() { |
2613 | ldr(IP, Address(THR, target::Thread::call_to_runtime_entry_point_offset())); |
2614 | blx(IP); |
2615 | } |
2616 | |
2617 | void Assembler::BranchLinkWithEquivalence(const Code& target, |
2618 | const Object& equivalence, |
2619 | CodeEntryKind entry_kind) { |
2620 | // Make sure that class CallPattern is able to patch the label referred |
2621 | // to by this code sequence. |
2622 | // For added code robustness, use 'blx lr' in a patchable sequence and |
2623 | // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors). |
2624 | const int32_t offset = target::ObjectPool::element_offset( |
2625 | object_pool_builder().FindObject(ToObject(target), equivalence)); |
2626 | LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL); |
2627 | ldr(LR, FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind))); |
2628 | blx(LR); // Use blx instruction so that the return branch prediction works. |
2629 | } |
2630 | |
2631 | void Assembler::BranchLink(const ExternalLabel* label) { |
2632 | LoadImmediate(LR, label->address()); // Target address is never patched. |
2633 | blx(LR); // Use blx instruction so that the return branch prediction works. |
2634 | } |
2635 | |
2636 | void Assembler::BranchLinkOffset(Register base, int32_t offset) { |
2637 | ASSERT(base != PC); |
2638 | ASSERT(base != IP); |
2639 | LoadFromOffset(kWord, IP, base, offset); |
2640 | blx(IP); // Use blx instruction so that the return branch prediction works. |
2641 | } |
2642 | |
2643 | void Assembler::LoadPatchableImmediate(Register rd, |
2644 | int32_t value, |
2645 | Condition cond) { |
2646 | const uint16_t value_low = Utils::Low16Bits(value); |
2647 | const uint16_t value_high = Utils::High16Bits(value); |
2648 | movw(rd, value_low, cond); |
2649 | movt(rd, value_high, cond); |
2650 | } |
2651 | |
2652 | void Assembler::LoadDecodableImmediate(Register rd, |
2653 | int32_t value, |
2654 | Condition cond) { |
2655 | movw(rd, Utils::Low16Bits(value), cond); |
2656 | const uint16_t value_high = Utils::High16Bits(value); |
2657 | if (value_high != 0) { |
2658 | movt(rd, value_high, cond); |
2659 | } |
2660 | } |
2661 | |
2662 | void Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) { |
2663 | Operand o; |
2664 | if (Operand::CanHold(value, &o)) { |
2665 | mov(rd, o, cond); |
2666 | } else if (Operand::CanHold(~value, &o)) { |
2667 | mvn(rd, o, cond); |
2668 | } else { |
2669 | LoadDecodableImmediate(rd, value, cond); |
2670 | } |
2671 | } |
2672 | |
2673 | void Assembler::LoadSImmediate(SRegister sd, float value, Condition cond) { |
2674 | if (!vmovs(sd, value, cond)) { |
2675 | const DRegister dd = static_cast<DRegister>(sd >> 1); |
2676 | const int index = sd & 1; |
2677 | LoadImmediate(IP, bit_cast<int32_t, float>(value), cond); |
2678 | vmovdr(dd, index, IP, cond); |
2679 | } |
2680 | } |
2681 | |
2682 | void Assembler::LoadDImmediate(DRegister dd, |
2683 | double value, |
2684 | Register scratch, |
2685 | Condition cond) { |
2686 | ASSERT(scratch != PC); |
2687 | ASSERT(scratch != IP); |
2688 | if (!vmovd(dd, value, cond)) { |
2689 | // A scratch register and IP are needed to load an arbitrary double. |
2690 | ASSERT(scratch != kNoRegister); |
2691 | int64_t imm64 = bit_cast<int64_t, double>(value); |
2692 | LoadImmediate(IP, Utils::Low32Bits(imm64), cond); |
2693 | LoadImmediate(scratch, Utils::High32Bits(imm64), cond); |
2694 | vmovdrr(dd, IP, scratch, cond); |
2695 | } |
2696 | } |
2697 | |
2698 | void Assembler::LoadFromOffset(OperandSize size, |
2699 | Register reg, |
2700 | Register base, |
2701 | int32_t offset, |
2702 | Condition cond) { |
2703 | ASSERT(size != kWordPair); |
2704 | int32_t offset_mask = 0; |
2705 | if (!Address::CanHoldLoadOffset(size, offset, &offset_mask)) { |
2706 | ASSERT(base != IP); |
2707 | AddImmediate(IP, base, offset & ~offset_mask, cond); |
2708 | base = IP; |
2709 | offset = offset & offset_mask; |
2710 | } |
2711 | switch (size) { |
2712 | case kByte: |
2713 | ldrsb(reg, Address(base, offset), cond); |
2714 | break; |
2715 | case kUnsignedByte: |
2716 | ldrb(reg, Address(base, offset), cond); |
2717 | break; |
2718 | case kHalfword: |
2719 | ldrsh(reg, Address(base, offset), cond); |
2720 | break; |
2721 | case kUnsignedHalfword: |
2722 | ldrh(reg, Address(base, offset), cond); |
2723 | break; |
2724 | case kWord: |
2725 | ldr(reg, Address(base, offset), cond); |
2726 | break; |
2727 | default: |
2728 | UNREACHABLE(); |
2729 | } |
2730 | } |
2731 | |
2732 | void Assembler::StoreToOffset(OperandSize size, |
2733 | Register reg, |
2734 | Register base, |
2735 | int32_t offset, |
2736 | Condition cond) { |
2737 | ASSERT(size != kWordPair); |
2738 | int32_t offset_mask = 0; |
2739 | if (!Address::CanHoldStoreOffset(size, offset, &offset_mask)) { |
2740 | ASSERT(reg != IP); |
2741 | ASSERT(base != IP); |
2742 | AddImmediate(IP, base, offset & ~offset_mask, cond); |
2743 | base = IP; |
2744 | offset = offset & offset_mask; |
2745 | } |
2746 | switch (size) { |
2747 | case kByte: |
2748 | strb(reg, Address(base, offset), cond); |
2749 | break; |
2750 | case kHalfword: |
2751 | strh(reg, Address(base, offset), cond); |
2752 | break; |
2753 | case kWord: |
2754 | str(reg, Address(base, offset), cond); |
2755 | break; |
2756 | default: |
2757 | UNREACHABLE(); |
2758 | } |
2759 | } |
2760 | |
2761 | void Assembler::LoadSFromOffset(SRegister reg, |
2762 | Register base, |
2763 | int32_t offset, |
2764 | Condition cond) { |
2765 | int32_t offset_mask = 0; |
2766 | if (!Address::CanHoldLoadOffset(kSWord, offset, &offset_mask)) { |
2767 | ASSERT(base != IP); |
2768 | AddImmediate(IP, base, offset & ~offset_mask, cond); |
2769 | base = IP; |
2770 | offset = offset & offset_mask; |
2771 | } |
2772 | vldrs(reg, Address(base, offset), cond); |
2773 | } |
2774 | |
2775 | void Assembler::StoreSToOffset(SRegister reg, |
2776 | Register base, |
2777 | int32_t offset, |
2778 | Condition cond) { |
2779 | int32_t offset_mask = 0; |
2780 | if (!Address::CanHoldStoreOffset(kSWord, offset, &offset_mask)) { |
2781 | ASSERT(base != IP); |
2782 | AddImmediate(IP, base, offset & ~offset_mask, cond); |
2783 | base = IP; |
2784 | offset = offset & offset_mask; |
2785 | } |
2786 | vstrs(reg, Address(base, offset), cond); |
2787 | } |
2788 | |
2789 | void Assembler::LoadDFromOffset(DRegister reg, |
2790 | Register base, |
2791 | int32_t offset, |
2792 | Condition cond) { |
2793 | int32_t offset_mask = 0; |
2794 | if (!Address::CanHoldLoadOffset(kDWord, offset, &offset_mask)) { |
2795 | ASSERT(base != IP); |
2796 | AddImmediate(IP, base, offset & ~offset_mask, cond); |
2797 | base = IP; |
2798 | offset = offset & offset_mask; |
2799 | } |
2800 | vldrd(reg, Address(base, offset), cond); |
2801 | } |
2802 | |
2803 | void Assembler::StoreDToOffset(DRegister reg, |
2804 | Register base, |
2805 | int32_t offset, |
2806 | Condition cond) { |
2807 | int32_t offset_mask = 0; |
2808 | if (!Address::CanHoldStoreOffset(kDWord, offset, &offset_mask)) { |
2809 | ASSERT(base != IP); |
2810 | AddImmediate(IP, base, offset & ~offset_mask, cond); |
2811 | base = IP; |
2812 | offset = offset & offset_mask; |
2813 | } |
2814 | vstrd(reg, Address(base, offset), cond); |
2815 | } |
2816 | |
2817 | void Assembler::LoadMultipleDFromOffset(DRegister first, |
2818 | intptr_t count, |
2819 | Register base, |
2820 | int32_t offset) { |
2821 | ASSERT(base != IP); |
2822 | AddImmediate(IP, base, offset); |
2823 | vldmd(IA, IP, first, count); |
2824 | } |
2825 | |
2826 | void Assembler::StoreMultipleDToOffset(DRegister first, |
2827 | intptr_t count, |
2828 | Register base, |
2829 | int32_t offset) { |
2830 | ASSERT(base != IP); |
2831 | AddImmediate(IP, base, offset); |
2832 | vstmd(IA, IP, first, count); |
2833 | } |
2834 | |
2835 | void Assembler::CopyDoubleField(Register dst, |
2836 | Register src, |
2837 | Register tmp1, |
2838 | Register tmp2, |
2839 | DRegister dtmp) { |
2840 | if (TargetCPUFeatures::vfp_supported()) { |
2841 | LoadDFromOffset(dtmp, src, target::Double::value_offset() - kHeapObjectTag); |
2842 | StoreDToOffset(dtmp, dst, target::Double::value_offset() - kHeapObjectTag); |
2843 | } else { |
2844 | LoadFromOffset(kWord, tmp1, src, |
2845 | target::Double::value_offset() - kHeapObjectTag); |
2846 | LoadFromOffset( |
2847 | kWord, tmp2, src, |
2848 | target::Double::value_offset() + target::kWordSize - kHeapObjectTag); |
2849 | StoreToOffset(kWord, tmp1, dst, |
2850 | target::Double::value_offset() - kHeapObjectTag); |
2851 | StoreToOffset( |
2852 | kWord, tmp2, dst, |
2853 | target::Double::value_offset() + target::kWordSize - kHeapObjectTag); |
2854 | } |
2855 | } |
2856 | |
2857 | void Assembler::CopyFloat32x4Field(Register dst, |
2858 | Register src, |
2859 | Register tmp1, |
2860 | Register tmp2, |
2861 | DRegister dtmp) { |
2862 | if (TargetCPUFeatures::neon_supported()) { |
2863 | LoadMultipleDFromOffset(dtmp, 2, src, |
2864 | target::Float32x4::value_offset() - kHeapObjectTag); |
2865 | StoreMultipleDToOffset(dtmp, 2, dst, |
2866 | target::Float32x4::value_offset() - kHeapObjectTag); |
2867 | } else { |
2868 | LoadFromOffset(kWord, tmp1, src, |
2869 | (target::Float32x4::value_offset() + 0 * target::kWordSize) - |
2870 | kHeapObjectTag); |
2871 | LoadFromOffset(kWord, tmp2, src, |
2872 | (target::Float32x4::value_offset() + 1 * target::kWordSize) - |
2873 | kHeapObjectTag); |
2874 | StoreToOffset(kWord, tmp1, dst, |
2875 | (target::Float32x4::value_offset() + 0 * target::kWordSize) - |
2876 | kHeapObjectTag); |
2877 | StoreToOffset(kWord, tmp2, dst, |
2878 | (target::Float32x4::value_offset() + 1 * target::kWordSize) - |
2879 | kHeapObjectTag); |
2880 | |
2881 | LoadFromOffset(kWord, tmp1, src, |
2882 | (target::Float32x4::value_offset() + 2 * target::kWordSize) - |
2883 | kHeapObjectTag); |
2884 | LoadFromOffset(kWord, tmp2, src, |
2885 | (target::Float32x4::value_offset() + 3 * target::kWordSize) - |
2886 | kHeapObjectTag); |
2887 | StoreToOffset(kWord, tmp1, dst, |
2888 | (target::Float32x4::value_offset() + 2 * target::kWordSize) - |
2889 | kHeapObjectTag); |
2890 | StoreToOffset(kWord, tmp2, dst, |
2891 | (target::Float32x4::value_offset() + 3 * target::kWordSize) - |
2892 | kHeapObjectTag); |
2893 | } |
2894 | } |
2895 | |
2896 | void Assembler::CopyFloat64x2Field(Register dst, |
2897 | Register src, |
2898 | Register tmp1, |
2899 | Register tmp2, |
2900 | DRegister dtmp) { |
2901 | if (TargetCPUFeatures::neon_supported()) { |
2902 | LoadMultipleDFromOffset(dtmp, 2, src, |
2903 | target::Float64x2::value_offset() - kHeapObjectTag); |
2904 | StoreMultipleDToOffset(dtmp, 2, dst, |
2905 | target::Float64x2::value_offset() - kHeapObjectTag); |
2906 | } else { |
2907 | LoadFromOffset(kWord, tmp1, src, |
2908 | (target::Float64x2::value_offset() + 0 * target::kWordSize) - |
2909 | kHeapObjectTag); |
2910 | LoadFromOffset(kWord, tmp2, src, |
2911 | (target::Float64x2::value_offset() + 1 * target::kWordSize) - |
2912 | kHeapObjectTag); |
2913 | StoreToOffset(kWord, tmp1, dst, |
2914 | (target::Float64x2::value_offset() + 0 * target::kWordSize) - |
2915 | kHeapObjectTag); |
2916 | StoreToOffset(kWord, tmp2, dst, |
2917 | (target::Float64x2::value_offset() + 1 * target::kWordSize) - |
2918 | kHeapObjectTag); |
2919 | |
2920 | LoadFromOffset(kWord, tmp1, src, |
2921 | (target::Float64x2::value_offset() + 2 * target::kWordSize) - |
2922 | kHeapObjectTag); |
2923 | LoadFromOffset(kWord, tmp2, src, |
2924 | (target::Float64x2::value_offset() + 3 * target::kWordSize) - |
2925 | kHeapObjectTag); |
2926 | StoreToOffset(kWord, tmp1, dst, |
2927 | (target::Float64x2::value_offset() + 2 * target::kWordSize) - |
2928 | kHeapObjectTag); |
2929 | StoreToOffset(kWord, tmp2, dst, |
2930 | (target::Float64x2::value_offset() + 3 * target::kWordSize) - |
2931 | kHeapObjectTag); |
2932 | } |
2933 | } |
2934 | |
2935 | void Assembler::AddImmediate(Register rd, |
2936 | Register rn, |
2937 | int32_t value, |
2938 | Condition cond) { |
2939 | if (value == 0) { |
2940 | if (rd != rn) { |
2941 | mov(rd, Operand(rn), cond); |
2942 | } |
2943 | return; |
2944 | } |
2945 | // We prefer to select the shorter code sequence rather than selecting add for |
2946 | // positive values and sub for negatives ones, which would slightly improve |
2947 | // the readability of generated code for some constants. |
2948 | Operand o; |
2949 | if (Operand::CanHold(value, &o)) { |
2950 | add(rd, rn, o, cond); |
2951 | } else if (Operand::CanHold(-value, &o)) { |
2952 | sub(rd, rn, o, cond); |
2953 | } else { |
2954 | ASSERT(rn != IP); |
2955 | if (Operand::CanHold(~value, &o)) { |
2956 | mvn(IP, o, cond); |
2957 | add(rd, rn, Operand(IP), cond); |
2958 | } else if (Operand::CanHold(~(-value), &o)) { |
2959 | mvn(IP, o, cond); |
2960 | sub(rd, rn, Operand(IP), cond); |
2961 | } else if (value > 0) { |
2962 | LoadDecodableImmediate(IP, value, cond); |
2963 | add(rd, rn, Operand(IP), cond); |
2964 | } else { |
2965 | LoadDecodableImmediate(IP, -value, cond); |
2966 | sub(rd, rn, Operand(IP), cond); |
2967 | } |
2968 | } |
2969 | } |
2970 | |
2971 | void Assembler::AddImmediateSetFlags(Register rd, |
2972 | Register rn, |
2973 | int32_t value, |
2974 | Condition cond) { |
2975 | Operand o; |
2976 | if (Operand::CanHold(value, &o)) { |
2977 | // Handles value == kMinInt32. |
2978 | adds(rd, rn, o, cond); |
2979 | } else if (Operand::CanHold(-value, &o)) { |
2980 | ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. |
2981 | subs(rd, rn, o, cond); |
2982 | } else { |
2983 | ASSERT(rn != IP); |
2984 | if (Operand::CanHold(~value, &o)) { |
2985 | mvn(IP, o, cond); |
2986 | adds(rd, rn, Operand(IP), cond); |
2987 | } else if (Operand::CanHold(~(-value), &o)) { |
2988 | ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. |
2989 | mvn(IP, o, cond); |
2990 | subs(rd, rn, Operand(IP), cond); |
2991 | } else { |
2992 | LoadDecodableImmediate(IP, value, cond); |
2993 | adds(rd, rn, Operand(IP), cond); |
2994 | } |
2995 | } |
2996 | } |
2997 | |
2998 | void Assembler::SubImmediate(Register rd, |
2999 | Register rn, |
3000 | int32_t value, |
3001 | Condition cond) { |
3002 | AddImmediate(rd, rn, -value, cond); |
3003 | } |
3004 | |
3005 | void Assembler::SubImmediateSetFlags(Register rd, |
3006 | Register rn, |
3007 | int32_t value, |
3008 | Condition cond) { |
3009 | Operand o; |
3010 | if (Operand::CanHold(value, &o)) { |
3011 | // Handles value == kMinInt32. |
3012 | subs(rd, rn, o, cond); |
3013 | } else if (Operand::CanHold(-value, &o)) { |
3014 | ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. |
3015 | adds(rd, rn, o, cond); |
3016 | } else { |
3017 | ASSERT(rn != IP); |
3018 | if (Operand::CanHold(~value, &o)) { |
3019 | mvn(IP, o, cond); |
3020 | subs(rd, rn, Operand(IP), cond); |
3021 | } else if (Operand::CanHold(~(-value), &o)) { |
3022 | ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. |
3023 | mvn(IP, o, cond); |
3024 | adds(rd, rn, Operand(IP), cond); |
3025 | } else { |
3026 | LoadDecodableImmediate(IP, value, cond); |
3027 | subs(rd, rn, Operand(IP), cond); |
3028 | } |
3029 | } |
3030 | } |
3031 | |
3032 | void Assembler::AndImmediate(Register rd, |
3033 | Register rs, |
3034 | int32_t imm, |
3035 | Condition cond) { |
3036 | Operand o; |
3037 | if (Operand::CanHold(imm, &o)) { |
3038 | and_(rd, rs, Operand(o), cond); |
3039 | } else { |
3040 | LoadImmediate(TMP, imm, cond); |
3041 | and_(rd, rs, Operand(TMP), cond); |
3042 | } |
3043 | } |
3044 | |
3045 | void Assembler::CompareImmediate(Register rn, int32_t value, Condition cond) { |
3046 | Operand o; |
3047 | if (Operand::CanHold(value, &o)) { |
3048 | cmp(rn, o, cond); |
3049 | } else { |
3050 | ASSERT(rn != IP); |
3051 | LoadImmediate(IP, value, cond); |
3052 | cmp(rn, Operand(IP), cond); |
3053 | } |
3054 | } |
3055 | |
3056 | void Assembler::TestImmediate(Register rn, int32_t imm, Condition cond) { |
3057 | Operand o; |
3058 | if (Operand::CanHold(imm, &o)) { |
3059 | tst(rn, o, cond); |
3060 | } else { |
3061 | LoadImmediate(IP, imm); |
3062 | tst(rn, Operand(IP), cond); |
3063 | } |
3064 | } |
3065 | |
3066 | void Assembler::IntegerDivide(Register result, |
3067 | Register left, |
3068 | Register right, |
3069 | DRegister tmpl, |
3070 | DRegister tmpr) { |
3071 | ASSERT(tmpl != tmpr); |
3072 | if (TargetCPUFeatures::integer_division_supported()) { |
3073 | sdiv(result, left, right); |
3074 | } else { |
3075 | ASSERT(TargetCPUFeatures::vfp_supported()); |
3076 | SRegister stmpl = EvenSRegisterOf(tmpl); |
3077 | SRegister stmpr = EvenSRegisterOf(tmpr); |
3078 | vmovsr(stmpl, left); |
3079 | vcvtdi(tmpl, stmpl); // left is in tmpl. |
3080 | vmovsr(stmpr, right); |
3081 | vcvtdi(tmpr, stmpr); // right is in tmpr. |
3082 | vdivd(tmpr, tmpl, tmpr); |
3083 | vcvtid(stmpr, tmpr); |
3084 | vmovrs(result, stmpr); |
3085 | } |
3086 | } |
3087 | |
3088 | static int NumRegsBelowFP(RegList regs) { |
3089 | int count = 0; |
3090 | for (int i = 0; i < FP; i++) { |
3091 | if ((regs & (1 << i)) != 0) { |
3092 | count++; |
3093 | } |
3094 | } |
3095 | return count; |
3096 | } |
3097 | |
3098 | void Assembler::EnterFrame(RegList regs, intptr_t frame_size) { |
3099 | if (prologue_offset_ == -1) { |
3100 | prologue_offset_ = CodeSize(); |
3101 | } |
3102 | PushList(regs); |
3103 | if ((regs & (1 << FP)) != 0) { |
3104 | // Set FP to the saved previous FP. |
3105 | add(FP, SP, Operand(4 * NumRegsBelowFP(regs))); |
3106 | } |
3107 | if (frame_size != 0) { |
3108 | AddImmediate(SP, -frame_size); |
3109 | } |
3110 | } |
3111 | |
3112 | void Assembler::LeaveFrame(RegList regs, bool allow_pop_pc) { |
3113 | ASSERT(allow_pop_pc || (regs & (1 << PC)) == 0); // Must not pop PC. |
3114 | if ((regs & (1 << FP)) != 0) { |
3115 | // Use FP to set SP. |
3116 | sub(SP, FP, Operand(4 * NumRegsBelowFP(regs))); |
3117 | } |
3118 | PopList(regs); |
3119 | } |
3120 | |
3121 | void Assembler::Ret() { |
3122 | bx(LR); |
3123 | } |
3124 | |
3125 | void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) { |
3126 | // Reserve space for arguments and align frame before entering |
3127 | // the C++ world. |
3128 | AddImmediate(SP, -frame_space); |
3129 | if (OS::ActivationFrameAlignment() > 1) { |
3130 | bic(SP, SP, Operand(OS::ActivationFrameAlignment() - 1)); |
3131 | } |
3132 | } |
3133 | |
3134 | void Assembler::EmitEntryFrameVerification(Register scratch) { |
3135 | #if defined(DEBUG) |
3136 | Label done; |
3137 | ASSERT(!constant_pool_allowed()); |
3138 | LoadImmediate(scratch, target::frame_layout.exit_link_slot_from_entry_fp * |
3139 | target::kWordSize); |
3140 | add(scratch, scratch, Operand(FPREG)); |
3141 | cmp(scratch, Operand(SPREG)); |
3142 | b(&done, EQ); |
3143 | |
3144 | Breakpoint(); |
3145 | |
3146 | Bind(&done); |
3147 | #endif |
3148 | } |
3149 | |
3150 | void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) { |
3151 | Comment("EnterCallRuntimeFrame" ); |
3152 | // Preserve volatile CPU registers and PP. |
3153 | EnterFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP) | (1 << LR), 0); |
3154 | COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0); |
3155 | |
3156 | // Preserve all volatile FPU registers. |
3157 | if (TargetCPUFeatures::vfp_supported()) { |
3158 | DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg); |
3159 | DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg); |
3160 | if ((lastv - firstv + 1) >= 16) { |
3161 | DRegister mid = static_cast<DRegister>(firstv + 16); |
3162 | vstmd(DB_W, SP, mid, lastv - mid + 1); |
3163 | vstmd(DB_W, SP, firstv, 16); |
3164 | } else { |
3165 | vstmd(DB_W, SP, firstv, lastv - firstv + 1); |
3166 | } |
3167 | } |
3168 | |
3169 | ReserveAlignedFrameSpace(frame_space); |
3170 | } |
3171 | |
3172 | void Assembler::LeaveCallRuntimeFrame() { |
3173 | // SP might have been modified to reserve space for arguments |
3174 | // and ensure proper alignment of the stack frame. |
3175 | // We need to restore it before restoring registers. |
3176 | const intptr_t kPushedFpuRegisterSize = |
3177 | TargetCPUFeatures::vfp_supported() |
3178 | ? kDartVolatileFpuRegCount * kFpuRegisterSize |
3179 | : 0; |
3180 | |
3181 | COMPILE_ASSERT(PP < FP); |
3182 | COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0); |
3183 | // kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile, |
3184 | // it is pushed ahead of FP. |
3185 | const intptr_t kPushedRegistersSize = |
3186 | kDartVolatileCpuRegCount * target::kWordSize + kPushedFpuRegisterSize; |
3187 | AddImmediate(SP, FP, -kPushedRegistersSize); |
3188 | |
3189 | // Restore all volatile FPU registers. |
3190 | if (TargetCPUFeatures::vfp_supported()) { |
3191 | DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg); |
3192 | DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg); |
3193 | if ((lastv - firstv + 1) >= 16) { |
3194 | DRegister mid = static_cast<DRegister>(firstv + 16); |
3195 | vldmd(IA_W, SP, firstv, 16); |
3196 | vldmd(IA_W, SP, mid, lastv - mid + 1); |
3197 | } else { |
3198 | vldmd(IA_W, SP, firstv, lastv - firstv + 1); |
3199 | } |
3200 | } |
3201 | |
3202 | // Restore volatile CPU registers. |
3203 | LeaveFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP) | (1 << LR)); |
3204 | } |
3205 | |
3206 | void Assembler::CallRuntime(const RuntimeEntry& entry, |
3207 | intptr_t argument_count) { |
3208 | entry.Call(this, argument_count); |
3209 | } |
3210 | |
3211 | void Assembler::EnterDartFrame(intptr_t frame_size, bool load_pool_pointer) { |
3212 | ASSERT(!constant_pool_allowed()); |
3213 | |
3214 | // Registers are pushed in descending order: R5 | R6 | R7/R11 | R14. |
3215 | COMPILE_ASSERT(PP < CODE_REG); |
3216 | COMPILE_ASSERT(CODE_REG < FP); |
3217 | COMPILE_ASSERT(FP < LR); |
3218 | |
3219 | if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) { |
3220 | EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << LR), 0); |
3221 | |
3222 | // Setup pool pointer for this dart function. |
3223 | if (load_pool_pointer) LoadPoolPointer(); |
3224 | } else { |
3225 | EnterFrame((1 << FP) | (1 << LR), 0); |
3226 | } |
3227 | set_constant_pool_allowed(true); |
3228 | |
3229 | // Reserve space for locals. |
3230 | AddImmediate(SP, -frame_size); |
3231 | } |
3232 | |
3233 | // On entry to a function compiled for OSR, the caller's frame pointer, the |
3234 | // stack locals, and any copied parameters are already in place. The frame |
3235 | // pointer is already set up. The PC marker is not correct for the |
3236 | // optimized function and there may be extra space for spill slots to |
3237 | // allocate. We must also set up the pool pointer for the function. |
3238 | void Assembler::EnterOsrFrame(intptr_t extra_size) { |
3239 | ASSERT(!constant_pool_allowed()); |
3240 | Comment("EnterOsrFrame" ); |
3241 | RestoreCodePointer(); |
3242 | LoadPoolPointer(); |
3243 | |
3244 | AddImmediate(SP, -extra_size); |
3245 | } |
3246 | |
3247 | void Assembler::LeaveDartFrame() { |
3248 | if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) { |
3249 | ldr(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp * |
3250 | target::kWordSize)); |
3251 | } |
3252 | set_constant_pool_allowed(false); |
3253 | |
3254 | // This will implicitly drop saved PP, PC marker due to restoring SP from FP |
3255 | // first. |
3256 | LeaveFrame((1 << FP) | (1 << LR)); |
3257 | } |
3258 | |
3259 | void Assembler::LeaveDartFrameAndReturn() { |
3260 | if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) { |
3261 | ldr(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp * |
3262 | target::kWordSize)); |
3263 | } |
3264 | set_constant_pool_allowed(false); |
3265 | |
3266 | // This will implicitly drop saved PP, PC marker due to restoring SP from FP |
3267 | // first. |
3268 | LeaveFrame((1 << FP) | (1 << PC), /*allow_pop_pc=*/true); |
3269 | } |
3270 | |
3271 | void Assembler::EnterStubFrame() { |
3272 | EnterDartFrame(0); |
3273 | } |
3274 | |
3275 | void Assembler::LeaveStubFrame() { |
3276 | LeaveDartFrame(); |
3277 | } |
3278 | |
3279 | void Assembler::EnterCFrame(intptr_t frame_space) { |
3280 | EnterFrame(1 << FP, 0); |
3281 | ReserveAlignedFrameSpace(frame_space); |
3282 | } |
3283 | |
3284 | void Assembler::LeaveCFrame() { |
3285 | LeaveFrame(1 << FP); |
3286 | } |
3287 | |
3288 | // R0 receiver, R9 ICData entries array |
3289 | // Preserve R4 (ARGS_DESC_REG), not required today, but maybe later. |
3290 | void Assembler::MonomorphicCheckedEntryJIT() { |
3291 | has_monomorphic_entry_ = true; |
3292 | #if defined(TESTING) || defined(DEBUG) |
3293 | bool saved_use_far_branches = use_far_branches(); |
3294 | set_use_far_branches(false); |
3295 | #endif |
3296 | intptr_t start = CodeSize(); |
3297 | |
3298 | Comment("MonomorphicCheckedEntry" ); |
3299 | ASSERT_EQUAL(CodeSize() - start, |
3300 | target::Instructions::kMonomorphicEntryOffsetJIT); |
3301 | |
3302 | const intptr_t cid_offset = target::Array::element_offset(0); |
3303 | const intptr_t count_offset = target::Array::element_offset(1); |
3304 | |
3305 | // Sadly this cannot use ldm because ldm takes no offset. |
3306 | ldr(R1, FieldAddress(R9, cid_offset)); |
3307 | ldr(R2, FieldAddress(R9, count_offset)); |
3308 | LoadClassIdMayBeSmi(IP, R0); |
3309 | add(R2, R2, Operand(target::ToRawSmi(1))); |
3310 | cmp(R1, Operand(IP, LSL, 1)); |
3311 | Branch(Address(THR, target::Thread::switchable_call_miss_entry_offset()), NE); |
3312 | str(R2, FieldAddress(R9, count_offset)); |
3313 | LoadImmediate(R4, 0); // GC-safe for OptimizeInvokedFunction. |
3314 | |
3315 | // Fall through to unchecked entry. |
3316 | ASSERT_EQUAL(CodeSize() - start, |
3317 | target::Instructions::kPolymorphicEntryOffsetJIT); |
3318 | |
3319 | #if defined(TESTING) || defined(DEBUG) |
3320 | set_use_far_branches(saved_use_far_branches); |
3321 | #endif |
3322 | } |
3323 | |
3324 | // R0 receiver, R9 guarded cid as Smi. |
3325 | // Preserve R4 (ARGS_DESC_REG), not required today, but maybe later. |
3326 | void Assembler::MonomorphicCheckedEntryAOT() { |
3327 | has_monomorphic_entry_ = true; |
3328 | #if defined(TESTING) || defined(DEBUG) |
3329 | bool saved_use_far_branches = use_far_branches(); |
3330 | set_use_far_branches(false); |
3331 | #endif |
3332 | intptr_t start = CodeSize(); |
3333 | |
3334 | Comment("MonomorphicCheckedEntry" ); |
3335 | ASSERT_EQUAL(CodeSize() - start, |
3336 | target::Instructions::kMonomorphicEntryOffsetAOT); |
3337 | |
3338 | LoadClassId(IP, R0); |
3339 | cmp(R9, Operand(IP, LSL, 1)); |
3340 | Branch(Address(THR, target::Thread::switchable_call_miss_entry_offset()), NE); |
3341 | |
3342 | // Fall through to unchecked entry. |
3343 | ASSERT_EQUAL(CodeSize() - start, |
3344 | target::Instructions::kPolymorphicEntryOffsetAOT); |
3345 | |
3346 | #if defined(TESTING) || defined(DEBUG) |
3347 | set_use_far_branches(saved_use_far_branches); |
3348 | #endif |
3349 | } |
3350 | |
3351 | void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) { |
3352 | has_monomorphic_entry_ = true; |
3353 | while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) { |
3354 | bkpt(0); |
3355 | } |
3356 | b(label); |
3357 | while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) { |
3358 | bkpt(0); |
3359 | } |
3360 | } |
3361 | |
3362 | #ifndef PRODUCT |
3363 | void Assembler::MaybeTraceAllocation(Register stats_addr_reg, Label* trace) { |
3364 | ASSERT(stats_addr_reg != kNoRegister); |
3365 | ASSERT(stats_addr_reg != TMP); |
3366 | ldrb(TMP, Address(stats_addr_reg, 0)); |
3367 | cmp(TMP, Operand(0)); |
3368 | b(trace, NE); |
3369 | } |
3370 | |
3371 | void Assembler::LoadAllocationStatsAddress(Register dest, intptr_t cid) { |
3372 | ASSERT(dest != kNoRegister); |
3373 | ASSERT(dest != TMP); |
3374 | ASSERT(cid > 0); |
3375 | |
3376 | const intptr_t shared_table_offset = |
3377 | target::Isolate::shared_class_table_offset(); |
3378 | const intptr_t table_offset = |
3379 | target::SharedClassTable::class_heap_stats_table_offset(); |
3380 | const intptr_t class_offset = target::ClassTable::ClassOffsetFor(cid); |
3381 | |
3382 | LoadIsolate(dest); |
3383 | ldr(dest, Address(dest, shared_table_offset)); |
3384 | ldr(dest, Address(dest, table_offset)); |
3385 | AddImmediate(dest, class_offset); |
3386 | } |
3387 | #endif // !PRODUCT |
3388 | |
3389 | void Assembler::TryAllocate(const Class& cls, |
3390 | Label* failure, |
3391 | Register instance_reg, |
3392 | Register temp_reg) { |
3393 | ASSERT(failure != NULL); |
3394 | const intptr_t instance_size = target::Class::GetInstanceSize(cls); |
3395 | if (FLAG_inline_alloc && |
3396 | target::Heap::IsAllocatableInNewSpace(instance_size)) { |
3397 | const classid_t cid = target::Class::GetId(cls); |
3398 | ASSERT(instance_reg != temp_reg); |
3399 | ASSERT(temp_reg != IP); |
3400 | ASSERT(instance_size != 0); |
3401 | NOT_IN_PRODUCT(LoadAllocationStatsAddress(temp_reg, cid)); |
3402 | ldr(instance_reg, Address(THR, target::Thread::top_offset())); |
3403 | // TODO(koda): Protect against unsigned overflow here. |
3404 | AddImmediateSetFlags(instance_reg, instance_reg, instance_size); |
3405 | |
3406 | // instance_reg: potential next object start. |
3407 | ldr(IP, Address(THR, target::Thread::end_offset())); |
3408 | cmp(IP, Operand(instance_reg)); |
3409 | // fail if heap end unsigned less than or equal to instance_reg. |
3410 | b(failure, LS); |
3411 | |
3412 | // If this allocation is traced, program will jump to failure path |
3413 | // (i.e. the allocation stub) which will allocate the object and trace the |
3414 | // allocation call site. |
3415 | NOT_IN_PRODUCT(MaybeTraceAllocation(temp_reg, failure)); |
3416 | |
3417 | // Successfully allocated the object, now update top to point to |
3418 | // next object start and store the class in the class field of object. |
3419 | str(instance_reg, Address(THR, target::Thread::top_offset())); |
3420 | |
3421 | ASSERT(instance_size >= kHeapObjectTag); |
3422 | AddImmediate(instance_reg, -instance_size + kHeapObjectTag); |
3423 | |
3424 | const uint32_t tags = |
3425 | target::MakeTagWordForNewSpaceObject(cid, instance_size); |
3426 | LoadImmediate(IP, tags); |
3427 | str(IP, FieldAddress(instance_reg, target::Object::tags_offset())); |
3428 | } else { |
3429 | b(failure); |
3430 | } |
3431 | } |
3432 | |
3433 | void Assembler::TryAllocateArray(intptr_t cid, |
3434 | intptr_t instance_size, |
3435 | Label* failure, |
3436 | Register instance, |
3437 | Register end_address, |
3438 | Register temp1, |
3439 | Register temp2) { |
3440 | if (FLAG_inline_alloc && |
3441 | target::Heap::IsAllocatableInNewSpace(instance_size)) { |
3442 | NOT_IN_PRODUCT(LoadAllocationStatsAddress(temp1, cid)); |
3443 | // Potential new object start. |
3444 | ldr(instance, Address(THR, target::Thread::top_offset())); |
3445 | AddImmediateSetFlags(end_address, instance, instance_size); |
3446 | b(failure, CS); // Branch if unsigned overflow. |
3447 | |
3448 | // Check if the allocation fits into the remaining space. |
3449 | // instance: potential new object start. |
3450 | // end_address: potential next object start. |
3451 | ldr(temp2, Address(THR, target::Thread::end_offset())); |
3452 | cmp(end_address, Operand(temp2)); |
3453 | b(failure, CS); |
3454 | |
3455 | // If this allocation is traced, program will jump to failure path |
3456 | // (i.e. the allocation stub) which will allocate the object and trace the |
3457 | // allocation call site. |
3458 | NOT_IN_PRODUCT(MaybeTraceAllocation(temp1, failure)); |
3459 | |
3460 | // Successfully allocated the object(s), now update top to point to |
3461 | // next object start and initialize the object. |
3462 | str(end_address, Address(THR, target::Thread::top_offset())); |
3463 | add(instance, instance, Operand(kHeapObjectTag)); |
3464 | |
3465 | // Initialize the tags. |
3466 | // instance: new object start as a tagged pointer. |
3467 | const uint32_t tags = |
3468 | target::MakeTagWordForNewSpaceObject(cid, instance_size); |
3469 | LoadImmediate(temp2, tags); |
3470 | str(temp2, |
3471 | FieldAddress(instance, target::Object::tags_offset())); // Store tags. |
3472 | } else { |
3473 | b(failure); |
3474 | } |
3475 | } |
3476 | |
3477 | void Assembler::GenerateUnRelocatedPcRelativeCall(Condition cond, |
3478 | intptr_t offset_into_target) { |
3479 | // Emit "blr.cond <offset>". |
3480 | EmitType5(cond, 0x686868, /*link=*/true); |
3481 | |
3482 | PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() - |
3483 | PcRelativeCallPattern::kLengthInBytes); |
3484 | pattern.set_distance(offset_into_target); |
3485 | } |
3486 | |
3487 | void Assembler::GenerateUnRelocatedPcRelativeTailCall( |
3488 | Condition cond, |
3489 | intptr_t offset_into_target) { |
3490 | // Emit "b <offset>". |
3491 | EmitType5(cond, 0x686868, /*link=*/false); |
3492 | |
3493 | PcRelativeTailCallPattern pattern(buffer_.contents() + buffer_.Size() - |
3494 | PcRelativeTailCallPattern::kLengthInBytes); |
3495 | pattern.set_distance(offset_into_target); |
3496 | } |
3497 | |
3498 | Address Assembler::ElementAddressForIntIndex(bool is_load, |
3499 | bool is_external, |
3500 | intptr_t cid, |
3501 | intptr_t index_scale, |
3502 | Register array, |
3503 | intptr_t index, |
3504 | Register temp) { |
3505 | const int64_t offset_base = |
3506 | (is_external ? 0 |
3507 | : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag)); |
3508 | const int64_t offset = |
3509 | offset_base + static_cast<int64_t>(index) * index_scale; |
3510 | ASSERT(Utils::IsInt(32, offset)); |
3511 | |
3512 | if (Address::CanHoldImmediateOffset(is_load, cid, offset)) { |
3513 | return Address(array, static_cast<int32_t>(offset)); |
3514 | } else { |
3515 | ASSERT(Address::CanHoldImmediateOffset(is_load, cid, offset - offset_base)); |
3516 | AddImmediate(temp, array, static_cast<int32_t>(offset_base)); |
3517 | return Address(temp, static_cast<int32_t>(offset - offset_base)); |
3518 | } |
3519 | } |
3520 | |
3521 | void Assembler::LoadElementAddressForIntIndex(Register address, |
3522 | bool is_load, |
3523 | bool is_external, |
3524 | intptr_t cid, |
3525 | intptr_t index_scale, |
3526 | Register array, |
3527 | intptr_t index) { |
3528 | const int64_t offset_base = |
3529 | (is_external ? 0 |
3530 | : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag)); |
3531 | const int64_t offset = |
3532 | offset_base + static_cast<int64_t>(index) * index_scale; |
3533 | ASSERT(Utils::IsInt(32, offset)); |
3534 | AddImmediate(address, array, offset); |
3535 | } |
3536 | |
3537 | Address Assembler::ElementAddressForRegIndex(bool is_load, |
3538 | bool is_external, |
3539 | intptr_t cid, |
3540 | intptr_t index_scale, |
3541 | bool index_unboxed, |
3542 | Register array, |
3543 | Register index) { |
3544 | // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays. |
3545 | const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift; |
3546 | const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift; |
3547 | int32_t offset = |
3548 | is_external ? 0 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag); |
3549 | const OperandSize size = Address::OperandSizeFor(cid); |
3550 | ASSERT(array != IP); |
3551 | ASSERT(index != IP); |
3552 | const Register base = is_load ? IP : index; |
3553 | if ((offset != 0) || (is_load && (size == kByte)) || (size == kHalfword) || |
3554 | (size == kUnsignedHalfword) || (size == kSWord) || (size == kDWord) || |
3555 | (size == kRegList)) { |
3556 | if (shift < 0) { |
3557 | ASSERT(shift == -1); |
3558 | add(base, array, Operand(index, ASR, 1)); |
3559 | } else { |
3560 | add(base, array, Operand(index, LSL, shift)); |
3561 | } |
3562 | } else { |
3563 | if (shift < 0) { |
3564 | ASSERT(shift == -1); |
3565 | return Address(array, index, ASR, 1); |
3566 | } else { |
3567 | return Address(array, index, LSL, shift); |
3568 | } |
3569 | } |
3570 | int32_t offset_mask = 0; |
3571 | if ((is_load && !Address::CanHoldLoadOffset(size, offset, &offset_mask)) || |
3572 | (!is_load && !Address::CanHoldStoreOffset(size, offset, &offset_mask))) { |
3573 | AddImmediate(base, offset & ~offset_mask); |
3574 | offset = offset & offset_mask; |
3575 | } |
3576 | return Address(base, offset); |
3577 | } |
3578 | |
3579 | void Assembler::LoadElementAddressForRegIndex(Register address, |
3580 | bool is_load, |
3581 | bool is_external, |
3582 | intptr_t cid, |
3583 | intptr_t index_scale, |
3584 | bool index_unboxed, |
3585 | Register array, |
3586 | Register index) { |
3587 | // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays. |
3588 | const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift; |
3589 | const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift; |
3590 | int32_t offset = |
3591 | is_external ? 0 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag); |
3592 | if (shift < 0) { |
3593 | ASSERT(shift == -1); |
3594 | add(address, array, Operand(index, ASR, 1)); |
3595 | } else { |
3596 | add(address, array, Operand(index, LSL, shift)); |
3597 | } |
3598 | if (offset != 0) { |
3599 | AddImmediate(address, offset); |
3600 | } |
3601 | } |
3602 | |
3603 | void Assembler::LoadFieldAddressForRegOffset(Register address, |
3604 | Register instance, |
3605 | Register offset_in_words_as_smi) { |
3606 | add(address, instance, |
3607 | Operand(offset_in_words_as_smi, LSL, |
3608 | target::kWordSizeLog2 - kSmiTagShift)); |
3609 | AddImmediate(address, -kHeapObjectTag); |
3610 | } |
3611 | |
3612 | void Assembler::LoadHalfWordUnaligned(Register dst, |
3613 | Register addr, |
3614 | Register tmp) { |
3615 | ASSERT(dst != addr); |
3616 | ldrb(dst, Address(addr, 0)); |
3617 | ldrsb(tmp, Address(addr, 1)); |
3618 | orr(dst, dst, Operand(tmp, LSL, 8)); |
3619 | } |
3620 | |
3621 | void Assembler::LoadHalfWordUnsignedUnaligned(Register dst, |
3622 | Register addr, |
3623 | Register tmp) { |
3624 | ASSERT(dst != addr); |
3625 | ldrb(dst, Address(addr, 0)); |
3626 | ldrb(tmp, Address(addr, 1)); |
3627 | orr(dst, dst, Operand(tmp, LSL, 8)); |
3628 | } |
3629 | |
3630 | void Assembler::StoreHalfWordUnaligned(Register src, |
3631 | Register addr, |
3632 | Register tmp) { |
3633 | strb(src, Address(addr, 0)); |
3634 | Lsr(tmp, src, Operand(8)); |
3635 | strb(tmp, Address(addr, 1)); |
3636 | } |
3637 | |
3638 | void Assembler::LoadWordUnaligned(Register dst, Register addr, Register tmp) { |
3639 | ASSERT(dst != addr); |
3640 | ldrb(dst, Address(addr, 0)); |
3641 | ldrb(tmp, Address(addr, 1)); |
3642 | orr(dst, dst, Operand(tmp, LSL, 8)); |
3643 | ldrb(tmp, Address(addr, 2)); |
3644 | orr(dst, dst, Operand(tmp, LSL, 16)); |
3645 | ldrb(tmp, Address(addr, 3)); |
3646 | orr(dst, dst, Operand(tmp, LSL, 24)); |
3647 | } |
3648 | |
3649 | void Assembler::StoreWordUnaligned(Register src, Register addr, Register tmp) { |
3650 | strb(src, Address(addr, 0)); |
3651 | Lsr(tmp, src, Operand(8)); |
3652 | strb(tmp, Address(addr, 1)); |
3653 | Lsr(tmp, src, Operand(16)); |
3654 | strb(tmp, Address(addr, 2)); |
3655 | Lsr(tmp, src, Operand(24)); |
3656 | strb(tmp, Address(addr, 3)); |
3657 | } |
3658 | |
3659 | } // namespace compiler |
3660 | } // namespace dart |
3661 | |
3662 | #endif // defined(TARGET_ARCH_ARM) |
3663 | |