1// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
6#if defined(TARGET_ARCH_ARM64)
7
8#include "vm/instructions.h"
9#include "vm/instructions_arm64.h"
10
11#include "vm/constants.h"
12#include "vm/cpu.h"
13#include "vm/object.h"
14#include "vm/reverse_pc_lookup_cache.h"
15
16namespace dart {
17
18CallPattern::CallPattern(uword pc, const Code& code)
19 : object_pool_(ObjectPool::Handle(code.GetObjectPool())),
20 target_code_pool_index_(-1) {
21 ASSERT(code.ContainsInstructionAt(pc));
22 // Last instruction: blr ip0.
23 ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xd63f0200);
24
25 Register reg;
26 InstructionPattern::DecodeLoadWordFromPool(pc - 2 * Instr::kInstrSize, &reg,
27 &target_code_pool_index_);
28 ASSERT(reg == CODE_REG);
29}
30
31ICCallPattern::ICCallPattern(uword pc, const Code& code)
32 : object_pool_(ObjectPool::Handle(code.GetObjectPool())),
33 target_pool_index_(-1),
34 data_pool_index_(-1) {
35 ASSERT(code.ContainsInstructionAt(pc));
36 // Last instruction: blr lr.
37 ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xd63f03c0);
38
39 Register data_reg, code_reg;
40 intptr_t pool_index;
41 InstructionPattern::DecodeLoadDoubleWordFromPool(
42 pc - 2 * Instr::kInstrSize, &data_reg, &code_reg, &pool_index);
43 ASSERT(data_reg == R5);
44 ASSERT(code_reg == CODE_REG);
45
46 data_pool_index_ = pool_index;
47 target_pool_index_ = pool_index + 1;
48}
49
50NativeCallPattern::NativeCallPattern(uword pc, const Code& code)
51 : object_pool_(ObjectPool::Handle(code.GetObjectPool())),
52 end_(pc),
53 native_function_pool_index_(-1),
54 target_code_pool_index_(-1) {
55 ASSERT(code.ContainsInstructionAt(pc));
56 // Last instruction: blr ip0.
57 ASSERT(*(reinterpret_cast<uint32_t*>(end_) - 1) == 0xd63f0200);
58
59 Register reg;
60 uword native_function_load_end = InstructionPattern::DecodeLoadWordFromPool(
61 end_ - 2 * Instr::kInstrSize, &reg, &target_code_pool_index_);
62 ASSERT(reg == CODE_REG);
63 InstructionPattern::DecodeLoadWordFromPool(native_function_load_end, &reg,
64 &native_function_pool_index_);
65 ASSERT(reg == R5);
66}
67
68CodePtr NativeCallPattern::target() const {
69 return static_cast<CodePtr>(object_pool_.ObjectAt(target_code_pool_index_));
70}
71
72void NativeCallPattern::set_target(const Code& target) const {
73 object_pool_.SetObjectAt(target_code_pool_index_, target);
74 // No need to flush the instruction cache, since the code is not modified.
75}
76
77NativeFunction NativeCallPattern::native_function() const {
78 return reinterpret_cast<NativeFunction>(
79 object_pool_.RawValueAt(native_function_pool_index_));
80}
81
82void NativeCallPattern::set_native_function(NativeFunction func) const {
83 object_pool_.SetRawValueAt(native_function_pool_index_,
84 reinterpret_cast<uword>(func));
85}
86
87// Decodes a load sequence ending at 'end' (the last instruction of the load
88// sequence is the instruction before the one at end). Returns a pointer to
89// the first instruction in the sequence. Returns the register being loaded
90// and the loaded object in the output parameters 'reg' and 'obj'
91// respectively.
92uword InstructionPattern::DecodeLoadObject(uword end,
93 const ObjectPool& object_pool,
94 Register* reg,
95 Object* obj) {
96 // 1. LoadWordFromPool
97 // or
98 // 2. LoadDecodableImmediate
99 uword start = 0;
100 Instr* instr = Instr::At(end - Instr::kInstrSize);
101 if (instr->IsLoadStoreRegOp()) {
102 // Case 1.
103 intptr_t index = 0;
104 start = DecodeLoadWordFromPool(end, reg, &index);
105 *obj = object_pool.ObjectAt(index);
106 } else {
107 // Case 2.
108 intptr_t value = 0;
109 start = DecodeLoadWordImmediate(end, reg, &value);
110 *obj = static_cast<ObjectPtr>(value);
111 }
112 return start;
113}
114
115// Decodes a load sequence ending at 'end' (the last instruction of the load
116// sequence is the instruction before the one at end). Returns a pointer to
117// the first instruction in the sequence. Returns the register being loaded
118// and the loaded immediate value in the output parameters 'reg' and 'value'
119// respectively.
120uword InstructionPattern::DecodeLoadWordImmediate(uword end,
121 Register* reg,
122 intptr_t* value) {
123 // 1. LoadWordFromPool
124 // or
125 // 2. LoadWordFromPool
126 // orri
127 // or
128 // 3. LoadPatchableImmediate
129 uword start = end - Instr::kInstrSize;
130 Instr* instr = Instr::At(start);
131 bool odd = false;
132
133 // Case 2.
134 if (instr->IsLogicalImmOp()) {
135 ASSERT(instr->Bit(29) == 1);
136 odd = true;
137 // end points at orri so that we can pass it to DecodeLoadWordFromPool.
138 end = start;
139 start -= Instr::kInstrSize;
140 instr = Instr::At(start);
141 // Case 2 falls through to case 1.
142 }
143
144 // Case 1.
145 if (instr->IsLoadStoreRegOp()) {
146 start = DecodeLoadWordFromPool(end, reg, value);
147 if (odd) {
148 *value |= 1;
149 }
150 return start;
151 }
152
153 // Case 3.
154 // movk dst, imm3, 3; movk dst, imm2, 2; movk dst, imm1, 1; movz dst, imm0, 0
155 ASSERT(instr->IsMoveWideOp());
156 ASSERT(instr->Bits(29, 2) == 3);
157 ASSERT(instr->HWField() == 3); // movk dst, imm3, 3
158 *reg = instr->RdField();
159 *value = static_cast<int64_t>(instr->Imm16Field()) << 48;
160
161 start -= Instr::kInstrSize;
162 instr = Instr::At(start);
163 ASSERT(instr->IsMoveWideOp());
164 ASSERT(instr->Bits(29, 2) == 3);
165 ASSERT(instr->HWField() == 2); // movk dst, imm2, 2
166 ASSERT(instr->RdField() == *reg);
167 *value |= static_cast<int64_t>(instr->Imm16Field()) << 32;
168
169 start -= Instr::kInstrSize;
170 instr = Instr::At(start);
171 ASSERT(instr->IsMoveWideOp());
172 ASSERT(instr->Bits(29, 2) == 3);
173 ASSERT(instr->HWField() == 1); // movk dst, imm1, 1
174 ASSERT(instr->RdField() == *reg);
175 *value |= static_cast<int64_t>(instr->Imm16Field()) << 16;
176
177 start -= Instr::kInstrSize;
178 instr = Instr::At(start);
179 ASSERT(instr->IsMoveWideOp());
180 ASSERT(instr->Bits(29, 2) == 2);
181 ASSERT(instr->HWField() == 0); // movz dst, imm0, 0
182 ASSERT(instr->RdField() == *reg);
183 *value |= static_cast<int64_t>(instr->Imm16Field());
184
185 return start;
186}
187
188// See comment in instructions_arm64.h
189uword InstructionPattern::DecodeLoadWordFromPool(uword end,
190 Register* reg,
191 intptr_t* index) {
192 // 1. ldr dst, [pp, offset]
193 // or
194 // 2. add dst, pp, #offset_hi12
195 // ldr dst [dst, #offset_lo12]
196 // or
197 // 3. movz dst, low_offset, 0
198 // movk dst, hi_offset, 1 (optional)
199 // ldr dst, [pp, dst]
200 uword start = end - Instr::kInstrSize;
201 Instr* instr = Instr::At(start);
202 intptr_t offset = 0;
203
204 // Last instruction is always an ldr into a 64-bit X register.
205 ASSERT(instr->IsLoadStoreRegOp() && (instr->Bit(22) == 1) &&
206 (instr->Bits(30, 2) == 3));
207
208 // Grab the destination register from the ldr instruction.
209 *reg = instr->RtField();
210
211 if (instr->Bit(24) == 1) {
212 // base + scaled unsigned 12-bit immediate offset.
213 // Case 1.
214 offset |= (instr->Imm12Field() << 3);
215 if (instr->RnField() == *reg) {
216 start -= Instr::kInstrSize;
217 instr = Instr::At(start);
218 ASSERT(instr->IsAddSubImmOp());
219 ASSERT(instr->RnField() == PP);
220 ASSERT(instr->RdField() == *reg);
221 offset |= (instr->Imm12Field() << 12);
222 }
223 } else {
224 ASSERT(instr->Bits(10, 2) == 2);
225 // We have to look at the preceding one or two instructions to find the
226 // offset.
227
228 start -= Instr::kInstrSize;
229 instr = Instr::At(start);
230 ASSERT(instr->IsMoveWideOp());
231 ASSERT(instr->RdField() == *reg);
232 if (instr->Bits(29, 2) == 2) { // movz dst, low_offset, 0
233 ASSERT(instr->HWField() == 0);
234 offset = instr->Imm16Field();
235 // no high offset.
236 } else {
237 ASSERT(instr->Bits(29, 2) == 3); // movk dst, high_offset, 1
238 ASSERT(instr->HWField() == 1);
239 offset = instr->Imm16Field() << 16;
240
241 start -= Instr::kInstrSize;
242 instr = Instr::At(start);
243 ASSERT(instr->IsMoveWideOp());
244 ASSERT(instr->RdField() == *reg);
245 ASSERT(instr->Bits(29, 2) == 2); // movz dst, low_offset, 0
246 ASSERT(instr->HWField() == 0);
247 offset |= instr->Imm16Field();
248 }
249 }
250 // PP is untagged on ARM64.
251 ASSERT(Utils::IsAligned(offset, 8));
252 *index = ObjectPool::IndexFromOffset(offset - kHeapObjectTag);
253 return start;
254}
255
256// See comment in instructions_arm64.h
257uword InstructionPattern::DecodeLoadDoubleWordFromPool(uword end,
258 Register* reg1,
259 Register* reg2,
260 intptr_t* index) {
261 // Cases:
262 //
263 // 1. ldp reg1, reg2, [pp, offset]
264 //
265 // 2. add tmp, pp, #upper12
266 // ldp reg1, reg2, [tmp, #lower12]
267 //
268 // 3. add tmp, pp, #upper12
269 // add tmp, tmp, #lower12
270 // ldp reg1, reg2, [tmp, 0]
271 //
272 // Note that the pp register is untagged!
273 //
274 uword start = end - Instr::kInstrSize;
275 Instr* ldr_instr = Instr::At(start);
276
277 // Last instruction is always an ldp into two 64-bit X registers.
278 ASSERT(ldr_instr->IsLoadStoreRegPairOp() && (ldr_instr->Bit(22) == 1));
279
280 // Grab the destination register from the ldp instruction.
281 *reg1 = ldr_instr->RtField();
282 *reg2 = ldr_instr->Rt2Field();
283
284 Register base_reg = ldr_instr->RnField();
285 const int base_offset = 8 * ldr_instr->Imm7Field();
286
287 intptr_t pool_offset = 0;
288 if (base_reg == PP) {
289 // Case 1.
290 pool_offset = base_offset;
291 } else {
292 // Case 2 & 3.
293 ASSERT(base_reg == TMP);
294
295 pool_offset = base_offset;
296
297 start -= Instr::kInstrSize;
298 Instr* add_instr = Instr::At(start);
299 ASSERT(add_instr->IsAddSubImmOp());
300 ASSERT(add_instr->RdField() == TMP);
301
302 const auto shift = add_instr->Imm12ShiftField();
303 ASSERT(shift == 0 || shift == 1);
304 pool_offset += (add_instr->Imm12Field() << (shift == 1 ? 12 : 0));
305
306 if (add_instr->RnField() == TMP) {
307 start -= Instr::kInstrSize;
308 Instr* prev_add_instr = Instr::At(start);
309 ASSERT(prev_add_instr->IsAddSubImmOp());
310 ASSERT(prev_add_instr->RnField() == PP);
311
312 const auto shift = prev_add_instr->Imm12ShiftField();
313 ASSERT(shift == 0 || shift == 1);
314 pool_offset += (prev_add_instr->Imm12Field() << (shift == 1 ? 12 : 0));
315 } else {
316 ASSERT(add_instr->RnField() == PP);
317 }
318 }
319 *index = ObjectPool::IndexFromOffset(pool_offset - kHeapObjectTag);
320 return start;
321}
322
323bool DecodeLoadObjectFromPoolOrThread(uword pc, const Code& code, Object* obj) {
324 ASSERT(code.ContainsInstructionAt(pc));
325
326 Instr* instr = Instr::At(pc);
327 if (instr->IsLoadStoreRegOp() && (instr->Bit(22) == 1) &&
328 (instr->Bits(30, 2) == 3) && instr->Bit(24) == 1) {
329 intptr_t offset = (instr->Imm12Field() << 3);
330 if (instr->RnField() == PP) {
331 // PP is untagged on ARM64.
332 ASSERT(Utils::IsAligned(offset, 8));
333 // A code object may have an object pool attached in bare instructions
334 // mode if the v8 snapshot profile writer is active, but this pool cannot
335 // be used for object loading.
336 if (FLAG_use_bare_instructions) return false;
337 intptr_t index = ObjectPool::IndexFromOffset(offset - kHeapObjectTag);
338 const ObjectPool& pool = ObjectPool::Handle(code.object_pool());
339 if (!pool.IsNull()) {
340 if (pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject) {
341 *obj = pool.ObjectAt(index);
342 return true;
343 }
344 }
345 } else if (instr->RnField() == THR) {
346 return Thread::ObjectAtOffset(offset, obj);
347 }
348 }
349 // TODO(rmacnak): Loads with offsets beyond 12 bits.
350
351 return false;
352}
353
354// Encodes a load sequence ending at 'end'. Encodes a fixed length two
355// instruction load from the pool pointer in PP using the destination
356// register reg as a temporary for the base address.
357// Assumes that the location has already been validated for patching.
358void InstructionPattern::EncodeLoadWordFromPoolFixed(uword end,
359 int32_t offset) {
360 uword start = end - Instr::kInstrSize;
361 Instr* instr = Instr::At(start);
362 const int32_t upper12 = offset & 0x00fff000;
363 const int32_t lower12 = offset & 0x00000fff;
364 ASSERT((offset & 0xff000000) == 0); // Can't encode > 24 bits.
365 ASSERT(((lower12 >> 3) << 3) == lower12); // 8-byte aligned.
366 instr->SetImm12Bits(instr->InstructionBits(), lower12 >> 3);
367
368 start -= Instr::kInstrSize;
369 instr = Instr::At(start);
370 instr->SetImm12Bits(instr->InstructionBits(), upper12 >> 12);
371 instr->SetInstructionBits(instr->InstructionBits() | B22);
372}
373
374CodePtr CallPattern::TargetCode() const {
375 return static_cast<CodePtr>(object_pool_.ObjectAt(target_code_pool_index_));
376}
377
378void CallPattern::SetTargetCode(const Code& target) const {
379 object_pool_.SetObjectAt(target_code_pool_index_, target);
380 // No need to flush the instruction cache, since the code is not modified.
381}
382
383ObjectPtr ICCallPattern::Data() const {
384 return object_pool_.ObjectAt(data_pool_index_);
385}
386
387void ICCallPattern::SetData(const Object& data) const {
388 ASSERT(data.IsArray() || data.IsICData() || data.IsMegamorphicCache());
389 object_pool_.SetObjectAt(data_pool_index_, data);
390}
391
392CodePtr ICCallPattern::TargetCode() const {
393 return static_cast<CodePtr>(object_pool_.ObjectAt(target_pool_index_));
394}
395
396void ICCallPattern::SetTargetCode(const Code& target) const {
397 object_pool_.SetObjectAt(target_pool_index_, target);
398 // No need to flush the instruction cache, since the code is not modified.
399}
400
401SwitchableCallPatternBase::SwitchableCallPatternBase(const Code& code)
402 : object_pool_(ObjectPool::Handle(code.GetObjectPool())),
403 data_pool_index_(-1),
404 target_pool_index_(-1) {}
405
406ObjectPtr SwitchableCallPatternBase::data() const {
407 return object_pool_.ObjectAt(data_pool_index_);
408}
409
410void SwitchableCallPatternBase::SetData(const Object& data) const {
411 ASSERT(!Object::Handle(object_pool_.ObjectAt(data_pool_index_)).IsCode());
412 object_pool_.SetObjectAt(data_pool_index_, data);
413}
414
415SwitchableCallPattern::SwitchableCallPattern(uword pc, const Code& code)
416 : SwitchableCallPatternBase(code) {
417 ASSERT(code.ContainsInstructionAt(pc));
418 // Last instruction: blr lr.
419 ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xd63f03c0);
420
421 Register ic_data_reg, code_reg;
422 intptr_t pool_index;
423 InstructionPattern::DecodeLoadDoubleWordFromPool(
424 pc - 2 * Instr::kInstrSize, &ic_data_reg, &code_reg, &pool_index);
425 ASSERT(ic_data_reg == R5);
426 ASSERT(code_reg == CODE_REG);
427
428 data_pool_index_ = pool_index;
429 target_pool_index_ = pool_index + 1;
430}
431
432CodePtr SwitchableCallPattern::target() const {
433 return static_cast<CodePtr>(object_pool_.ObjectAt(target_pool_index_));
434}
435
436void SwitchableCallPattern::SetTarget(const Code& target) const {
437 ASSERT(Object::Handle(object_pool_.ObjectAt(target_pool_index_)).IsCode());
438 object_pool_.SetObjectAt(target_pool_index_, target);
439}
440
441BareSwitchableCallPattern::BareSwitchableCallPattern(uword pc, const Code& code)
442 : SwitchableCallPatternBase(code) {
443 ASSERT(code.ContainsInstructionAt(pc));
444 // Last instruction: blr lr.
445 ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xd63f03c0);
446
447 Register ic_data_reg, code_reg;
448 intptr_t pool_index;
449 InstructionPattern::DecodeLoadDoubleWordFromPool(
450 pc - Instr::kInstrSize, &ic_data_reg, &code_reg, &pool_index);
451 ASSERT(ic_data_reg == R5);
452 ASSERT(code_reg == LR);
453
454 data_pool_index_ = pool_index;
455 target_pool_index_ = pool_index + 1;
456}
457
458CodePtr BareSwitchableCallPattern::target() const {
459 const uword pc = object_pool_.RawValueAt(target_pool_index_);
460 CodePtr result = ReversePc::Lookup(IsolateGroup::Current(), pc);
461 if (result != Code::null()) {
462 return result;
463 }
464 result = ReversePc::Lookup(Dart::vm_isolate()->group(), pc);
465 if (result != Code::null()) {
466 return result;
467 }
468 UNREACHABLE();
469}
470
471void BareSwitchableCallPattern::SetTarget(const Code& target) const {
472 ASSERT(object_pool_.TypeAt(target_pool_index_) ==
473 ObjectPool::EntryType::kImmediate);
474 object_pool_.SetRawValueAt(target_pool_index_,
475 target.MonomorphicEntryPoint());
476}
477
478ReturnPattern::ReturnPattern(uword pc) : pc_(pc) {}
479
480bool ReturnPattern::IsValid() const {
481 Instr* bx_lr = Instr::At(pc_);
482 const Register crn = ConcreteRegister(LR);
483 const int32_t instruction = RET | (static_cast<int32_t>(crn) << kRnShift);
484 return bx_lr->InstructionBits() == instruction;
485}
486
487bool PcRelativeCallPattern::IsValid() const {
488 // bl <offset>
489 const uint32_t word = *reinterpret_cast<uint32_t*>(pc_);
490 const uint32_t branch_link = 0x25;
491 return (word >> 26) == branch_link;
492}
493
494bool PcRelativeTailCallPattern::IsValid() const {
495 // b <offset>
496 const uint32_t word = *reinterpret_cast<uint32_t*>(pc_);
497 const uint32_t branch_link = 0x5;
498 return (word >> 26) == branch_link;
499}
500
501void PcRelativeTrampolineJumpPattern::Initialize() {
502#if !defined(DART_PRECOMPILED_RUNTIME)
503 uint32_t* pattern = reinterpret_cast<uint32_t*>(pattern_start_);
504 pattern[0] = kAdrEncoding;
505 pattern[1] = kMovzEncoding;
506 pattern[2] = kAddTmpTmp2;
507 pattern[3] = kJumpEncoding;
508 set_distance(0);
509#else
510 UNREACHABLE();
511#endif
512}
513
514int32_t PcRelativeTrampolineJumpPattern::distance() {
515#if !defined(DART_PRECOMPILED_RUNTIME)
516 uint32_t* pattern = reinterpret_cast<uint32_t*>(pattern_start_);
517 const uint32_t adr = pattern[0];
518 const uint32_t movz = pattern[1];
519 const uint32_t lower16 =
520 (((adr >> 5) & ((1 << 19) - 1)) << 2) | ((adr >> 29) & 0x3);
521 const uint32_t higher16 = (movz >> kImm16Shift) & 0xffff;
522 return (higher16 << 16) | lower16;
523#else
524 UNREACHABLE();
525 return 0;
526#endif
527}
528
529void PcRelativeTrampolineJumpPattern::set_distance(int32_t distance) {
530#if !defined(DART_PRECOMPILED_RUNTIME)
531 uint32_t* pattern = reinterpret_cast<uint32_t*>(pattern_start_);
532 uint32_t low16 = distance & 0xffff;
533 uint32_t high16 = (distance >> 16) & 0xffff;
534 pattern[0] = kAdrEncoding | ((low16 & 0x3) << 29) | ((low16 >> 2) << 5);
535 pattern[1] = kMovzEncoding | (high16 << kImm16Shift);
536 ASSERT(IsValid());
537#else
538 UNREACHABLE();
539#endif
540}
541
542bool PcRelativeTrampolineJumpPattern::IsValid() const {
543#if !defined(DART_PRECOMPILED_RUNTIME)
544 const uint32_t adr_mask = (3 << 29) | (((1 << 19) - 1) << 5);
545 const uint32_t movz_mask = 0xffff << 5;
546 uint32_t* pattern = reinterpret_cast<uint32_t*>(pattern_start_);
547 return ((pattern[0] & ~adr_mask) == kAdrEncoding) &&
548 ((pattern[1] & ~movz_mask) == kMovzEncoding) &&
549 (pattern[2] == kAddTmpTmp2) && (pattern[3] == kJumpEncoding);
550#else
551 UNREACHABLE();
552 return false;
553#endif
554}
555
556intptr_t TypeTestingStubCallPattern::GetSubtypeTestCachePoolIndex() {
557 // Calls to the type testing stubs look like:
558 // ldr R9, ...
559 // ldr R3, [PP+idx]
560 // blr R9
561 // or
562 // ldr R3, [PP+idx]
563 // blr pc+<offset>
564
565 // Ensure the caller of the type testing stub (whose return address is [pc_])
566 // branched via `blr R9` or a pc-relative call.
567 uword pc = pc_ - Instr::kInstrSize;
568 const uword blr_r9 = 0xd63f0120;
569 if (*reinterpret_cast<uint32_t*>(pc) != blr_r9) {
570 PcRelativeCallPattern pattern(pc);
571 RELEASE_ASSERT(pattern.IsValid());
572 }
573
574 const uword load_instr_end = pc;
575
576 Register reg;
577 intptr_t pool_index = -1;
578 InstructionPattern::DecodeLoadWordFromPool(load_instr_end, &reg, &pool_index);
579 ASSERT(reg == R3);
580 return pool_index;
581}
582
583} // namespace dart
584
585#endif // defined TARGET_ARCH_ARM64
586