1 | // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_X64_H_ |
6 | #define RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_X64_H_ |
7 | |
8 | #if defined(DART_PRECOMPILED_RUNTIME) |
9 | #error "AOT runtime should not use compiler sources (including header files)" |
10 | #endif // defined(DART_PRECOMPILED_RUNTIME) |
11 | |
12 | #ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_H_ |
13 | #error Do not include assembler_x64.h directly; use assembler.h instead. |
14 | #endif |
15 | |
16 | #include <functional> |
17 | |
18 | #include "platform/assert.h" |
19 | #include "platform/utils.h" |
20 | #include "vm/compiler/assembler/assembler_base.h" |
21 | #include "vm/constants.h" |
22 | #include "vm/constants_x86.h" |
23 | #include "vm/hash_map.h" |
24 | #include "vm/pointer_tagging.h" |
25 | |
26 | namespace dart { |
27 | |
28 | // Forward declarations. |
29 | class FlowGraphCompiler; |
30 | |
31 | namespace compiler { |
32 | |
33 | class Immediate : public ValueObject { |
34 | public: |
35 | explicit Immediate(int64_t value) : value_(value) {} |
36 | |
37 | Immediate(const Immediate& other) : ValueObject(), value_(other.value_) {} |
38 | |
39 | int64_t value() const { return value_; } |
40 | |
41 | bool is_int8() const { return Utils::IsInt(8, value_); } |
42 | bool is_uint8() const { return Utils::IsUint(8, value_); } |
43 | bool is_int16() const { return Utils::IsInt(16, value_); } |
44 | bool is_uint16() const { return Utils::IsUint(16, value_); } |
45 | bool is_int32() const { return Utils::IsInt(32, value_); } |
46 | bool is_uint32() const { return Utils::IsUint(32, value_); } |
47 | |
48 | private: |
49 | const int64_t value_; |
50 | |
51 | // TODO(5411081): Add DISALLOW_COPY_AND_ASSIGN(Immediate) once the mac |
52 | // build issue is resolved. |
53 | // And remove the unnecessary copy constructor. |
54 | }; |
55 | |
56 | class Operand : public ValueObject { |
57 | public: |
58 | uint8_t rex() const { return rex_; } |
59 | |
60 | uint8_t mod() const { return (encoding_at(0) >> 6) & 3; } |
61 | |
62 | Register rm() const { |
63 | int rm_rex = (rex_ & REX_B) << 3; |
64 | return static_cast<Register>(rm_rex + (encoding_at(0) & 7)); |
65 | } |
66 | |
67 | ScaleFactor scale() const { |
68 | return static_cast<ScaleFactor>((encoding_at(1) >> 6) & 3); |
69 | } |
70 | |
71 | Register index() const { |
72 | int index_rex = (rex_ & REX_X) << 2; |
73 | return static_cast<Register>(index_rex + ((encoding_at(1) >> 3) & 7)); |
74 | } |
75 | |
76 | Register base() const { |
77 | int base_rex = (rex_ & REX_B) << 3; |
78 | return static_cast<Register>(base_rex + (encoding_at(1) & 7)); |
79 | } |
80 | |
81 | int8_t disp8() const { |
82 | ASSERT(length_ >= 2); |
83 | return static_cast<int8_t>(encoding_[length_ - 1]); |
84 | } |
85 | |
86 | int32_t disp32() const { |
87 | ASSERT(length_ >= 5); |
88 | return bit_copy<int32_t>(encoding_[length_ - 4]); |
89 | } |
90 | |
91 | Operand(const Operand& other) |
92 | : ValueObject(), length_(other.length_), rex_(other.rex_) { |
93 | memmove(&encoding_[0], &other.encoding_[0], other.length_); |
94 | } |
95 | |
96 | Operand& operator=(const Operand& other) { |
97 | length_ = other.length_; |
98 | rex_ = other.rex_; |
99 | memmove(&encoding_[0], &other.encoding_[0], other.length_); |
100 | return *this; |
101 | } |
102 | |
103 | bool Equals(const Operand& other) const { |
104 | if (length_ != other.length_) return false; |
105 | if (rex_ != other.rex_) return false; |
106 | for (uint8_t i = 0; i < length_; i++) { |
107 | if (encoding_[i] != other.encoding_[i]) return false; |
108 | } |
109 | return true; |
110 | } |
111 | |
112 | protected: |
113 | Operand() : length_(0), rex_(REX_NONE) {} // Needed by subclass Address. |
114 | |
115 | void SetModRM(int mod, Register rm) { |
116 | ASSERT((mod & ~3) == 0); |
117 | if ((rm > 7) && !((rm == R12) && (mod != 3))) { |
118 | rex_ |= REX_B; |
119 | } |
120 | encoding_[0] = (mod << 6) | (rm & 7); |
121 | length_ = 1; |
122 | } |
123 | |
124 | void SetSIB(ScaleFactor scale, Register index, Register base) { |
125 | ASSERT(length_ == 1); |
126 | ASSERT((scale & ~3) == 0); |
127 | if (base > 7) { |
128 | ASSERT((rex_ & REX_B) == 0); // Must not have REX.B already set. |
129 | rex_ |= REX_B; |
130 | } |
131 | if (index > 7) rex_ |= REX_X; |
132 | encoding_[1] = (scale << 6) | ((index & 7) << 3) | (base & 7); |
133 | length_ = 2; |
134 | } |
135 | |
136 | void SetDisp8(int8_t disp) { |
137 | ASSERT(length_ == 1 || length_ == 2); |
138 | encoding_[length_++] = static_cast<uint8_t>(disp); |
139 | } |
140 | |
141 | void SetDisp32(int32_t disp) { |
142 | ASSERT(length_ == 1 || length_ == 2); |
143 | memmove(&encoding_[length_], &disp, sizeof(disp)); |
144 | length_ += sizeof(disp); |
145 | } |
146 | |
147 | private: |
148 | uint8_t length_; |
149 | uint8_t rex_; |
150 | uint8_t encoding_[6]; |
151 | |
152 | explicit Operand(Register reg) : rex_(REX_NONE) { SetModRM(3, reg); } |
153 | |
154 | // Get the operand encoding byte at the given index. |
155 | uint8_t encoding_at(intptr_t index) const { |
156 | ASSERT(index >= 0 && index < length_); |
157 | return encoding_[index]; |
158 | } |
159 | |
160 | // Returns whether or not this operand is really the given register in |
161 | // disguise. Used from the assembler to generate better encodings. |
162 | bool IsRegister(Register reg) const { |
163 | return ((reg > 7 ? 1 : 0) == (rex_ & REX_B)) // REX.B match. |
164 | && ((encoding_at(0) & 0xF8) == 0xC0) // Addressing mode is register. |
165 | && ((encoding_at(0) & 0x07) == reg); // Register codes match. |
166 | } |
167 | |
168 | friend class Assembler; |
169 | }; |
170 | |
171 | class Address : public Operand { |
172 | public: |
173 | Address(Register base, int32_t disp) { |
174 | if ((disp == 0) && ((base & 7) != RBP)) { |
175 | SetModRM(0, base); |
176 | if ((base & 7) == RSP) { |
177 | SetSIB(TIMES_1, RSP, base); |
178 | } |
179 | } else if (Utils::IsInt(8, disp)) { |
180 | SetModRM(1, base); |
181 | if ((base & 7) == RSP) { |
182 | SetSIB(TIMES_1, RSP, base); |
183 | } |
184 | SetDisp8(disp); |
185 | } else { |
186 | SetModRM(2, base); |
187 | if ((base & 7) == RSP) { |
188 | SetSIB(TIMES_1, RSP, base); |
189 | } |
190 | SetDisp32(disp); |
191 | } |
192 | } |
193 | |
194 | // This addressing mode does not exist. |
195 | Address(Register base, Register r); |
196 | |
197 | Address(Register index, ScaleFactor scale, int32_t disp) { |
198 | ASSERT(index != RSP); // Illegal addressing mode. |
199 | ASSERT(scale != TIMES_16); // Unsupported scale factor. |
200 | SetModRM(0, RSP); |
201 | SetSIB(scale, index, RBP); |
202 | SetDisp32(disp); |
203 | } |
204 | |
205 | // This addressing mode does not exist. |
206 | Address(Register index, ScaleFactor scale, Register r); |
207 | |
208 | Address(Register base, Register index, ScaleFactor scale, int32_t disp) { |
209 | ASSERT(index != RSP); // Illegal addressing mode. |
210 | ASSERT(scale != TIMES_16); // Unsupported scale factor. |
211 | if ((disp == 0) && ((base & 7) != RBP)) { |
212 | SetModRM(0, RSP); |
213 | SetSIB(scale, index, base); |
214 | } else if (Utils::IsInt(8, disp)) { |
215 | SetModRM(1, RSP); |
216 | SetSIB(scale, index, base); |
217 | SetDisp8(disp); |
218 | } else { |
219 | SetModRM(2, RSP); |
220 | SetSIB(scale, index, base); |
221 | SetDisp32(disp); |
222 | } |
223 | } |
224 | |
225 | // This addressing mode does not exist. |
226 | Address(Register base, Register index, ScaleFactor scale, Register r); |
227 | |
228 | Address(const Address& other) : Operand(other) {} |
229 | |
230 | Address& operator=(const Address& other) { |
231 | Operand::operator=(other); |
232 | return *this; |
233 | } |
234 | |
235 | static Address AddressRIPRelative(int32_t disp) { |
236 | return Address(RIPRelativeDisp(disp)); |
237 | } |
238 | static Address AddressBaseImm32(Register base, int32_t disp) { |
239 | return Address(base, disp, true); |
240 | } |
241 | |
242 | // This addressing mode does not exist. |
243 | static Address AddressBaseImm32(Register base, Register r); |
244 | |
245 | private: |
246 | Address(Register base, int32_t disp, bool fixed) { |
247 | ASSERT(fixed); |
248 | SetModRM(2, base); |
249 | if ((base & 7) == RSP) { |
250 | SetSIB(TIMES_1, RSP, base); |
251 | } |
252 | SetDisp32(disp); |
253 | } |
254 | |
255 | struct RIPRelativeDisp { |
256 | explicit RIPRelativeDisp(int32_t disp) : disp_(disp) {} |
257 | const int32_t disp_; |
258 | }; |
259 | |
260 | explicit Address(const RIPRelativeDisp& disp) { |
261 | SetModRM(0, static_cast<Register>(0x5)); |
262 | SetDisp32(disp.disp_); |
263 | } |
264 | }; |
265 | |
266 | class FieldAddress : public Address { |
267 | public: |
268 | FieldAddress(Register base, int32_t disp) |
269 | : Address(base, disp - kHeapObjectTag) {} |
270 | |
271 | // This addressing mode does not exist. |
272 | FieldAddress(Register base, Register r); |
273 | |
274 | FieldAddress(Register base, Register index, ScaleFactor scale, int32_t disp) |
275 | : Address(base, index, scale, disp - kHeapObjectTag) {} |
276 | |
277 | // This addressing mode does not exist. |
278 | FieldAddress(Register base, Register index, ScaleFactor scale, Register r); |
279 | |
280 | FieldAddress(const FieldAddress& other) : Address(other) {} |
281 | |
282 | FieldAddress& operator=(const FieldAddress& other) { |
283 | Address::operator=(other); |
284 | return *this; |
285 | } |
286 | }; |
287 | |
288 | class Assembler : public AssemblerBase { |
289 | public: |
290 | explicit Assembler(ObjectPoolBuilder* object_pool_builder, |
291 | bool use_far_branches = false); |
292 | |
293 | ~Assembler() {} |
294 | |
295 | static const bool kNearJump = true; |
296 | static const bool kFarJump = false; |
297 | |
298 | /* |
299 | * Emit Machine Instructions. |
300 | */ |
301 | void call(Register reg) { EmitUnaryL(reg, 0xFF, 2); } |
302 | void call(const Address& address) { EmitUnaryL(address, 0xFF, 2); } |
303 | void call(Label* label); |
304 | void call(const ExternalLabel* label); |
305 | |
306 | void pushq(Register reg); |
307 | void pushq(const Address& address) { EmitUnaryL(address, 0xFF, 6); } |
308 | void pushq(const Immediate& imm); |
309 | void PushImmediate(const Immediate& imm); |
310 | |
311 | void popq(Register reg); |
312 | void popq(const Address& address) { EmitUnaryL(address, 0x8F, 0); } |
313 | |
314 | void setcc(Condition condition, ByteRegister dst); |
315 | |
316 | void EnterSafepoint(); |
317 | void LeaveSafepoint(); |
318 | void TransitionGeneratedToNative(Register destination_address, |
319 | Register new_exit_frame, |
320 | Register new_exit_through_ffi, |
321 | bool enter_safepoint); |
322 | void TransitionNativeToGenerated(bool leave_safepoint); |
323 | |
324 | // Register-register, register-address and address-register instructions. |
325 | #define RR(width, name, ...) \ |
326 | void name(Register dst, Register src) { Emit##width(dst, src, __VA_ARGS__); } |
327 | #define RA(width, name, ...) \ |
328 | void name(Register dst, const Address& src) { \ |
329 | Emit##width(dst, src, __VA_ARGS__); \ |
330 | } |
331 | #define AR(width, name, ...) \ |
332 | void name(const Address& dst, Register src) { \ |
333 | Emit##width(src, dst, __VA_ARGS__); \ |
334 | } |
335 | #define REGULAR_INSTRUCTION(name, ...) \ |
336 | RA(W, name##w, __VA_ARGS__) \ |
337 | RA(L, name##l, __VA_ARGS__) \ |
338 | RA(Q, name##q, __VA_ARGS__) \ |
339 | RR(W, name##w, __VA_ARGS__) \ |
340 | RR(L, name##l, __VA_ARGS__) \ |
341 | RR(Q, name##q, __VA_ARGS__) |
342 | REGULAR_INSTRUCTION(test, 0x85) |
343 | REGULAR_INSTRUCTION(xchg, 0x87) |
344 | REGULAR_INSTRUCTION(imul, 0xAF, 0x0F) |
345 | REGULAR_INSTRUCTION(bsf, 0xBC, 0x0F) |
346 | REGULAR_INSTRUCTION(bsr, 0xBD, 0x0F) |
347 | REGULAR_INSTRUCTION(popcnt, 0xB8, 0x0F, 0xF3) |
348 | REGULAR_INSTRUCTION(lzcnt, 0xBD, 0x0F, 0xF3) |
349 | #undef REGULAR_INSTRUCTION |
350 | RA(Q, movsxd, 0x63) |
351 | RR(Q, movsxd, 0x63) |
352 | AR(L, movb, 0x88) |
353 | AR(L, movl, 0x89) |
354 | AR(Q, movq, 0x89) |
355 | AR(W, movw, 0x89) |
356 | RA(L, movb, 0x8A) |
357 | RA(L, movl, 0x8B) |
358 | RA(Q, movq, 0x8B) |
359 | RR(L, movl, 0x8B) |
360 | RA(Q, leaq, 0x8D) |
361 | RA(L, leal, 0x8D) |
362 | AR(L, cmpxchgl, 0xB1, 0x0F) |
363 | AR(Q, cmpxchgq, 0xB1, 0x0F) |
364 | RA(L, cmpxchgl, 0xB1, 0x0F) |
365 | RA(Q, cmpxchgq, 0xB1, 0x0F) |
366 | RR(L, cmpxchgl, 0xB1, 0x0F) |
367 | RR(Q, cmpxchgq, 0xB1, 0x0F) |
368 | RA(Q, movzxb, 0xB6, 0x0F) |
369 | RR(Q, movzxb, 0xB6, 0x0F) |
370 | RA(Q, movzxw, 0xB7, 0x0F) |
371 | RR(Q, movzxw, 0xB7, 0x0F) |
372 | RA(Q, movsxb, 0xBE, 0x0F) |
373 | RR(Q, movsxb, 0xBE, 0x0F) |
374 | RA(Q, movsxw, 0xBF, 0x0F) |
375 | RR(Q, movsxw, 0xBF, 0x0F) |
376 | #define DECLARE_CMOV(name, code) \ |
377 | RR(Q, cmov##name##q, 0x40 + code, 0x0F) \ |
378 | RR(L, cmov##name##l, 0x40 + code, 0x0F) \ |
379 | RA(Q, cmov##name##q, 0x40 + code, 0x0F) \ |
380 | RA(L, cmov##name##l, 0x40 + code, 0x0F) |
381 | X86_CONDITIONAL_SUFFIXES(DECLARE_CMOV) |
382 | #undef DECLARE_CMOV |
383 | #undef AA |
384 | #undef RA |
385 | #undef AR |
386 | |
387 | #define SIMPLE(name, ...) \ |
388 | void name() { EmitSimple(__VA_ARGS__); } |
389 | SIMPLE(cpuid, 0x0F, 0xA2) |
390 | SIMPLE(fcos, 0xD9, 0xFF) |
391 | SIMPLE(fincstp, 0xD9, 0xF7) |
392 | SIMPLE(fsin, 0xD9, 0xFE) |
393 | SIMPLE(lock, 0xF0) |
394 | SIMPLE(rep_movsb, 0xF3, 0xA4) |
395 | SIMPLE(rep_movsw, 0xF3, 0x66, 0xA5) |
396 | SIMPLE(rep_movsl, 0xF3, 0xA5) |
397 | SIMPLE(rep_movsq, 0xF3, 0x48, 0xA5) |
398 | #undef SIMPLE |
399 | // XmmRegister operations with another register or an address. |
400 | #define XX(width, name, ...) \ |
401 | void name(XmmRegister dst, XmmRegister src) { \ |
402 | Emit##width(dst, src, __VA_ARGS__); \ |
403 | } |
404 | #define XA(width, name, ...) \ |
405 | void name(XmmRegister dst, const Address& src) { \ |
406 | Emit##width(dst, src, __VA_ARGS__); \ |
407 | } |
408 | #define AX(width, name, ...) \ |
409 | void name(const Address& dst, XmmRegister src) { \ |
410 | Emit##width(src, dst, __VA_ARGS__); \ |
411 | } |
412 | // We could add movupd here, but movups does the same and is shorter. |
413 | XA(L, movups, 0x10, 0x0F); |
414 | XA(L, movsd, 0x10, 0x0F, 0xF2) |
415 | XA(L, movss, 0x10, 0x0F, 0xF3) |
416 | AX(L, movups, 0x11, 0x0F); |
417 | AX(L, movsd, 0x11, 0x0F, 0xF2) |
418 | AX(L, movss, 0x11, 0x0F, 0xF3) |
419 | XX(L, movhlps, 0x12, 0x0F) |
420 | XX(L, unpcklps, 0x14, 0x0F) |
421 | XX(L, unpcklpd, 0x14, 0x0F, 0x66) |
422 | XX(L, unpckhps, 0x15, 0x0F) |
423 | XX(L, unpckhpd, 0x15, 0x0F, 0x66) |
424 | XX(L, movlhps, 0x16, 0x0F) |
425 | XX(L, movaps, 0x28, 0x0F) |
426 | XX(L, comisd, 0x2F, 0x0F, 0x66) |
427 | #define DECLARE_XMM(name, code) \ |
428 | XX(L, name##ps, 0x50 + code, 0x0F) \ |
429 | XA(L, name##ps, 0x50 + code, 0x0F) \ |
430 | AX(L, name##ps, 0x50 + code, 0x0F) \ |
431 | XX(L, name##pd, 0x50 + code, 0x0F, 0x66) \ |
432 | XA(L, name##pd, 0x50 + code, 0x0F, 0x66) \ |
433 | AX(L, name##pd, 0x50 + code, 0x0F, 0x66) \ |
434 | XX(L, name##sd, 0x50 + code, 0x0F, 0xF2) \ |
435 | XA(L, name##sd, 0x50 + code, 0x0F, 0xF2) \ |
436 | AX(L, name##sd, 0x50 + code, 0x0F, 0xF2) \ |
437 | XX(L, name##ss, 0x50 + code, 0x0F, 0xF3) \ |
438 | XA(L, name##ss, 0x50 + code, 0x0F, 0xF3) \ |
439 | AX(L, name##ss, 0x50 + code, 0x0F, 0xF3) |
440 | XMM_ALU_CODES(DECLARE_XMM) |
441 | #undef DECLARE_XMM |
442 | XX(L, cvtps2pd, 0x5A, 0x0F) |
443 | XX(L, cvtpd2ps, 0x5A, 0x0F, 0x66) |
444 | XX(L, cvtsd2ss, 0x5A, 0x0F, 0xF2) |
445 | XX(L, cvtss2sd, 0x5A, 0x0F, 0xF3) |
446 | XX(L, pxor, 0xEF, 0x0F, 0x66) |
447 | XX(L, subpl, 0xFA, 0x0F, 0x66) |
448 | XX(L, addpl, 0xFE, 0x0F, 0x66) |
449 | #undef XX |
450 | #undef AX |
451 | #undef XA |
452 | |
453 | #define DECLARE_CMPPS(name, code) \ |
454 | void cmpps##name(XmmRegister dst, XmmRegister src) { \ |
455 | EmitL(dst, src, 0xC2, 0x0F); \ |
456 | AssemblerBuffer::EnsureCapacity ensured(&buffer_); \ |
457 | EmitUint8(code); \ |
458 | } |
459 | XMM_CONDITIONAL_CODES(DECLARE_CMPPS) |
460 | #undef DECLARE_CMPPS |
461 | |
462 | #define DECLARE_SIMPLE(name, opcode) \ |
463 | void name() { EmitSimple(opcode); } |
464 | X86_ZERO_OPERAND_1_BYTE_INSTRUCTIONS(DECLARE_SIMPLE) |
465 | #undef DECLARE_SIMPLE |
466 | |
467 | void movl(Register dst, const Immediate& imm); |
468 | void movl(const Address& dst, const Immediate& imm); |
469 | |
470 | void movb(const Address& dst, const Immediate& imm); |
471 | |
472 | void movw(Register dst, const Address& src); |
473 | void movw(const Address& dst, const Immediate& imm); |
474 | |
475 | void movq(Register dst, const Immediate& imm); |
476 | void movq(const Address& dst, const Immediate& imm); |
477 | |
478 | // Destination and source are reversed for some reason. |
479 | void movq(Register dst, XmmRegister src) { |
480 | EmitQ(src, dst, 0x7E, 0x0F, 0x66); |
481 | } |
482 | void movl(Register dst, XmmRegister src) { |
483 | EmitL(src, dst, 0x7E, 0x0F, 0x66); |
484 | } |
485 | void movss(XmmRegister dst, XmmRegister src) { |
486 | EmitL(src, dst, 0x11, 0x0F, 0xF3); |
487 | } |
488 | void movsd(XmmRegister dst, XmmRegister src) { |
489 | EmitL(src, dst, 0x11, 0x0F, 0xF2); |
490 | } |
491 | |
492 | // Use the reversed operand order and the 0x89 bytecode instead of the |
493 | // obvious 0x88 encoding for this some, because it is expected by gdb64 older |
494 | // than 7.3.1-gg5 when disassembling a function's prologue (movq rbp, rsp) |
495 | // for proper unwinding of Dart frames (use --generate_gdb_symbols and -O0). |
496 | void movq(Register dst, Register src) { EmitQ(src, dst, 0x89); } |
497 | |
498 | void movq(XmmRegister dst, Register src) { |
499 | EmitQ(dst, src, 0x6E, 0x0F, 0x66); |
500 | } |
501 | |
502 | void movd(XmmRegister dst, Register src) { |
503 | EmitL(dst, src, 0x6E, 0x0F, 0x66); |
504 | } |
505 | void cvtsi2sdq(XmmRegister dst, Register src) { |
506 | EmitQ(dst, src, 0x2A, 0x0F, 0xF2); |
507 | } |
508 | void cvtsi2sdl(XmmRegister dst, Register src) { |
509 | EmitL(dst, src, 0x2A, 0x0F, 0xF2); |
510 | } |
511 | void cvttsd2siq(Register dst, XmmRegister src) { |
512 | EmitQ(dst, src, 0x2C, 0x0F, 0xF2); |
513 | } |
514 | void cvttsd2sil(Register dst, XmmRegister src) { |
515 | EmitL(dst, src, 0x2C, 0x0F, 0xF2); |
516 | } |
517 | void movmskpd(Register dst, XmmRegister src) { |
518 | EmitL(dst, src, 0x50, 0x0F, 0x66); |
519 | } |
520 | void movmskps(Register dst, XmmRegister src) { EmitL(dst, src, 0x50, 0x0F); } |
521 | void pmovmskb(Register dst, XmmRegister src) { |
522 | EmitL(dst, src, 0xD7, 0x0F, 0x66); |
523 | } |
524 | |
525 | void btl(Register dst, Register src) { EmitL(src, dst, 0xA3, 0x0F); } |
526 | void btq(Register dst, Register src) { EmitQ(src, dst, 0xA3, 0x0F); } |
527 | |
528 | void notps(XmmRegister dst, XmmRegister src); |
529 | void negateps(XmmRegister dst, XmmRegister src); |
530 | void absps(XmmRegister dst, XmmRegister src); |
531 | void zerowps(XmmRegister dst, XmmRegister src); |
532 | |
533 | void set1ps(XmmRegister dst, Register tmp, const Immediate& imm); |
534 | void shufps(XmmRegister dst, XmmRegister src, const Immediate& mask); |
535 | |
536 | void negatepd(XmmRegister dst, XmmRegister src); |
537 | void abspd(XmmRegister dst, XmmRegister src); |
538 | void shufpd(XmmRegister dst, XmmRegister src, const Immediate& mask); |
539 | |
540 | enum RoundingMode { |
541 | kRoundToNearest = 0x0, |
542 | kRoundDown = 0x1, |
543 | kRoundUp = 0x2, |
544 | kRoundToZero = 0x3 |
545 | }; |
546 | void roundsd(XmmRegister dst, XmmRegister src, RoundingMode mode); |
547 | |
548 | void CompareImmediate(Register reg, const Immediate& imm); |
549 | void CompareImmediate(const Address& address, const Immediate& imm); |
550 | void CompareImmediate(Register reg, int32_t immediate) { |
551 | return CompareImmediate(reg, Immediate(immediate)); |
552 | } |
553 | |
554 | void testl(Register reg, const Immediate& imm) { testq(reg, imm); } |
555 | void testb(const Address& address, const Immediate& imm); |
556 | void testb(const Address& address, Register reg); |
557 | |
558 | void testq(Register reg, const Immediate& imm); |
559 | void TestImmediate(Register dst, const Immediate& imm); |
560 | |
561 | void AndImmediate(Register dst, const Immediate& imm); |
562 | void OrImmediate(Register dst, const Immediate& imm); |
563 | void XorImmediate(Register dst, const Immediate& imm); |
564 | |
565 | void shldq(Register dst, Register src, Register shifter) { |
566 | ASSERT(shifter == RCX); |
567 | EmitQ(src, dst, 0xA5, 0x0F); |
568 | } |
569 | void shrdq(Register dst, Register src, Register shifter) { |
570 | ASSERT(shifter == RCX); |
571 | EmitQ(src, dst, 0xAD, 0x0F); |
572 | } |
573 | |
574 | #define DECLARE_ALU(op, c) \ |
575 | void op##w(Register dst, Register src) { EmitW(dst, src, c * 8 + 3); } \ |
576 | void op##l(Register dst, Register src) { EmitL(dst, src, c * 8 + 3); } \ |
577 | void op##q(Register dst, Register src) { EmitQ(dst, src, c * 8 + 3); } \ |
578 | void op##w(Register dst, const Address& src) { EmitW(dst, src, c * 8 + 3); } \ |
579 | void op##l(Register dst, const Address& src) { EmitL(dst, src, c * 8 + 3); } \ |
580 | void op##q(Register dst, const Address& src) { EmitQ(dst, src, c * 8 + 3); } \ |
581 | void op##w(const Address& dst, Register src) { EmitW(src, dst, c * 8 + 1); } \ |
582 | void op##l(const Address& dst, Register src) { EmitL(src, dst, c * 8 + 1); } \ |
583 | void op##q(const Address& dst, Register src) { EmitQ(src, dst, c * 8 + 1); } \ |
584 | void op##l(Register dst, const Immediate& imm) { AluL(c, dst, imm); } \ |
585 | void op##q(Register dst, const Immediate& imm) { \ |
586 | AluQ(c, c * 8 + 3, dst, imm); \ |
587 | } \ |
588 | void op##b(const Address& dst, const Immediate& imm) { AluB(c, dst, imm); } \ |
589 | void op##w(const Address& dst, const Immediate& imm) { AluW(c, dst, imm); } \ |
590 | void op##l(const Address& dst, const Immediate& imm) { AluL(c, dst, imm); } \ |
591 | void op##q(const Address& dst, const Immediate& imm) { \ |
592 | AluQ(c, c * 8 + 3, dst, imm); \ |
593 | } |
594 | |
595 | X86_ALU_CODES(DECLARE_ALU) |
596 | |
597 | #undef DECLARE_ALU |
598 | #undef ALU_OPS |
599 | |
600 | void cqo(); |
601 | |
602 | #define REGULAR_UNARY(name, opcode, modrm) \ |
603 | void name##q(Register reg) { EmitUnaryQ(reg, opcode, modrm); } \ |
604 | void name##l(Register reg) { EmitUnaryL(reg, opcode, modrm); } \ |
605 | void name##q(const Address& address) { EmitUnaryQ(address, opcode, modrm); } \ |
606 | void name##l(const Address& address) { EmitUnaryL(address, opcode, modrm); } |
607 | REGULAR_UNARY(not, 0xF7, 2) |
608 | REGULAR_UNARY(neg, 0xF7, 3) |
609 | REGULAR_UNARY(mul, 0xF7, 4) |
610 | REGULAR_UNARY(imul, 0xF7, 5) |
611 | REGULAR_UNARY(div, 0xF7, 6) |
612 | REGULAR_UNARY(idiv, 0xF7, 7) |
613 | REGULAR_UNARY(inc, 0xFF, 0) |
614 | REGULAR_UNARY(dec, 0xFF, 1) |
615 | #undef REGULAR_UNARY |
616 | |
617 | // We could use kWord, kDoubleWord, and kQuadWord here, but it is rather |
618 | // confusing since the same sizes mean something different on ARM. |
619 | enum OperandWidth { k32Bit, k64Bit }; |
620 | |
621 | void imull(Register reg, const Immediate& imm); |
622 | |
623 | void imulq(Register dst, const Immediate& imm); |
624 | void MulImmediate(Register reg, |
625 | const Immediate& imm, |
626 | OperandWidth width = k64Bit); |
627 | |
628 | void shll(Register reg, const Immediate& imm); |
629 | void shll(Register operand, Register shifter); |
630 | void shrl(Register reg, const Immediate& imm); |
631 | void shrl(Register operand, Register shifter); |
632 | void sarl(Register reg, const Immediate& imm); |
633 | void sarl(Register operand, Register shifter); |
634 | void shldl(Register dst, Register src, const Immediate& imm); |
635 | |
636 | void shlq(Register reg, const Immediate& imm); |
637 | void shlq(Register operand, Register shifter); |
638 | void shrq(Register reg, const Immediate& imm); |
639 | void shrq(Register operand, Register shifter); |
640 | void sarq(Register reg, const Immediate& imm); |
641 | void sarq(Register operand, Register shifter); |
642 | void shldq(Register dst, Register src, const Immediate& imm); |
643 | |
644 | void btq(Register base, int bit); |
645 | |
646 | void enter(const Immediate& imm); |
647 | |
648 | void fldl(const Address& src); |
649 | void fstpl(const Address& dst); |
650 | |
651 | void ffree(intptr_t value); |
652 | |
653 | // 'size' indicates size in bytes and must be in the range 1..8. |
654 | void nop(int size = 1); |
655 | |
656 | void j(Condition condition, Label* label, bool near = kFarJump); |
657 | void jmp(Register reg) { EmitUnaryL(reg, 0xFF, 4); } |
658 | void jmp(const Address& address) { EmitUnaryL(address, 0xFF, 4); } |
659 | void jmp(Label* label, bool near = kFarJump); |
660 | void jmp(const ExternalLabel* label); |
661 | void jmp(const Code& code); |
662 | |
663 | // Issue memory to memory move through a TMP register. |
664 | // TODO(koda): Assert that these are not used for heap objects. |
665 | void MoveMemoryToMemory(const Address& dst, const Address& src) { |
666 | movq(TMP, src); |
667 | movq(dst, TMP); |
668 | } |
669 | |
670 | void Exchange(Register reg, const Address& mem) { |
671 | movq(TMP, mem); |
672 | movq(mem, reg); |
673 | movq(reg, TMP); |
674 | } |
675 | |
676 | void Exchange(const Address& mem1, const Address& mem2) { |
677 | movq(TMP, mem1); |
678 | xorq(TMP, mem2); |
679 | xorq(mem1, TMP); |
680 | xorq(mem2, TMP); |
681 | } |
682 | |
683 | // Methods for High-level operations and implemented on all architectures. |
684 | void Ret() { ret(); } |
685 | void CompareRegisters(Register a, Register b); |
686 | void BranchIf(Condition condition, Label* label) { j(condition, label); } |
687 | |
688 | // Issues a move instruction if 'to' is not the same as 'from'. |
689 | void MoveRegister(Register to, Register from); |
690 | void PushRegister(Register r); |
691 | void PopRegister(Register r); |
692 | |
693 | void PushRegisterPair(Register r0, Register r1) { |
694 | PushRegister(r1); |
695 | PushRegister(r0); |
696 | } |
697 | void PopRegisterPair(Register r0, Register r1) { |
698 | PopRegister(r0); |
699 | PopRegister(r1); |
700 | } |
701 | |
702 | // Methods for adding/subtracting an immediate value that may be loaded from |
703 | // the constant pool. |
704 | // TODO(koda): Assert that these are not used for heap objects. |
705 | void AddImmediate(Register reg, |
706 | const Immediate& imm, |
707 | OperandWidth width = k64Bit); |
708 | void AddImmediate(const Address& address, const Immediate& imm); |
709 | void SubImmediate(Register reg, |
710 | const Immediate& imm, |
711 | OperandWidth width = k64Bit); |
712 | void SubImmediate(const Address& address, const Immediate& imm); |
713 | |
714 | void Drop(intptr_t stack_elements, Register tmp = TMP); |
715 | |
716 | bool constant_pool_allowed() const { return constant_pool_allowed_; } |
717 | void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; } |
718 | |
719 | // Unlike movq this can affect the flags or use the constant pool. |
720 | void LoadImmediate(Register reg, const Immediate& imm); |
721 | void LoadImmediate(Register reg, int32_t immediate) { |
722 | LoadImmediate(reg, Immediate(immediate)); |
723 | } |
724 | |
725 | void LoadIsolate(Register dst); |
726 | void LoadDispatchTable(Register dst); |
727 | void LoadObject(Register dst, const Object& obj); |
728 | void LoadUniqueObject(Register dst, const Object& obj); |
729 | void LoadNativeEntry(Register dst, |
730 | const ExternalLabel* label, |
731 | ObjectPoolBuilderEntry::Patchability patchable); |
732 | void JmpPatchable(const Code& code, Register pp); |
733 | void Jmp(const Code& code, Register pp = PP); |
734 | void J(Condition condition, const Code& code, Register pp); |
735 | void CallPatchable(const Code& code, |
736 | CodeEntryKind entry_kind = CodeEntryKind::kNormal); |
737 | void Call(const Code& stub_entry); |
738 | void CallToRuntime(); |
739 | |
740 | // Emit a call that shares its object pool entries with other calls |
741 | // that have the same equivalence marker. |
742 | void CallWithEquivalence(const Code& code, |
743 | const Object& equivalence, |
744 | CodeEntryKind entry_kind = CodeEntryKind::kNormal); |
745 | |
746 | void Call(Address target) { call(target); } |
747 | |
748 | // Unaware of write barrier (use StoreInto* methods for storing to objects). |
749 | // TODO(koda): Add StackAddress/HeapAddress types to prevent misuse. |
750 | void StoreObject(const Address& dst, const Object& obj); |
751 | void PushObject(const Object& object); |
752 | void CompareObject(Register reg, const Object& object); |
753 | |
754 | enum CanBeSmi { |
755 | kValueIsNotSmi, |
756 | kValueCanBeSmi, |
757 | }; |
758 | |
759 | // Store into a heap object and apply the generational and incremental write |
760 | // barriers. All stores into heap objects must pass through this function or, |
761 | // if the value can be proven either Smi or old-and-premarked, its NoBarrier |
762 | // variants. |
763 | // Preserves object and value registers. |
764 | void StoreIntoObject(Register object, // Object we are storing into. |
765 | const Address& dest, // Where we are storing into. |
766 | Register value, // Value we are storing. |
767 | CanBeSmi can_be_smi = kValueCanBeSmi); |
768 | void StoreIntoArray(Register object, // Object we are storing into. |
769 | Register slot, // Where we are storing into. |
770 | Register value, // Value we are storing. |
771 | CanBeSmi can_be_smi = kValueCanBeSmi); |
772 | |
773 | void StoreIntoObjectNoBarrier(Register object, |
774 | const Address& dest, |
775 | Register value); |
776 | void StoreIntoObjectNoBarrier(Register object, |
777 | const Address& dest, |
778 | const Object& value); |
779 | |
780 | // Stores a non-tagged value into a heap object. |
781 | void StoreInternalPointer(Register object, |
782 | const Address& dest, |
783 | Register value); |
784 | |
785 | // Stores a Smi value into a heap object field that always contains a Smi. |
786 | void StoreIntoSmiField(const Address& dest, Register value); |
787 | void ZeroInitSmiField(const Address& dest); |
788 | // Increments a Smi field. Leaves flags in same state as an 'addq'. |
789 | void IncrementSmiField(const Address& dest, int64_t increment); |
790 | |
791 | void DoubleNegate(XmmRegister dst, XmmRegister src); |
792 | void DoubleAbs(XmmRegister dst, XmmRegister src); |
793 | |
794 | void LockCmpxchgq(const Address& address, Register reg) { |
795 | lock(); |
796 | cmpxchgq(address, reg); |
797 | } |
798 | |
799 | void LockCmpxchgl(const Address& address, Register reg) { |
800 | lock(); |
801 | cmpxchgl(address, reg); |
802 | } |
803 | |
804 | void PushRegisters(intptr_t cpu_register_set, intptr_t xmm_register_set); |
805 | void PopRegisters(intptr_t cpu_register_set, intptr_t xmm_register_set); |
806 | |
807 | void CheckCodePointer(); |
808 | |
809 | void EnterFrame(intptr_t frame_space); |
810 | void LeaveFrame(); |
811 | void ReserveAlignedFrameSpace(intptr_t frame_space); |
812 | |
813 | // In debug mode, generates code to verify that: |
814 | // FP + kExitLinkSlotFromFp == SP |
815 | // |
816 | // Triggers breakpoint otherwise. |
817 | // Clobbers RAX. |
818 | void EmitEntryFrameVerification(); |
819 | |
820 | // Create a frame for calling into runtime that preserves all volatile |
821 | // registers. Frame's RSP is guaranteed to be correctly aligned and |
822 | // frame_space bytes are reserved under it. |
823 | void EnterCallRuntimeFrame(intptr_t frame_space); |
824 | void LeaveCallRuntimeFrame(); |
825 | |
826 | void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count); |
827 | |
828 | // Call runtime function. Reserves shadow space on the stack before calling |
829 | // if platform ABI requires that. Does not restore RSP after the call itself. |
830 | void CallCFunction(Register reg); |
831 | void CallCFunction(Address address); |
832 | |
833 | void (Register result, Register tags); |
834 | void (Register result, Register tags); |
835 | |
836 | // Loading and comparing classes of objects. |
837 | void LoadClassId(Register result, Register object); |
838 | void LoadClassById(Register result, Register class_id); |
839 | |
840 | void CompareClassId(Register object, |
841 | intptr_t class_id, |
842 | Register scratch = kNoRegister); |
843 | |
844 | void LoadClassIdMayBeSmi(Register result, Register object); |
845 | void LoadTaggedClassIdMayBeSmi(Register result, Register object); |
846 | |
847 | // CheckClassIs fused with optimistic SmiUntag. |
848 | // Value in the register object is untagged optimistically. |
849 | void SmiUntagOrCheckClass(Register object, intptr_t class_id, Label* smi); |
850 | |
851 | // Misc. functionality. |
852 | void SmiTag(Register reg) { addq(reg, reg); } |
853 | |
854 | void SmiUntag(Register reg) { sarq(reg, Immediate(kSmiTagSize)); } |
855 | |
856 | void BranchIfNotSmi(Register reg, Label* label) { |
857 | testq(reg, Immediate(kSmiTagMask)); |
858 | j(NOT_ZERO, label); |
859 | } |
860 | |
861 | void BranchIfSmi(Register reg, Label* label) { |
862 | testq(reg, Immediate(kSmiTagMask)); |
863 | j(ZERO, label); |
864 | } |
865 | |
866 | void Align(int alignment, intptr_t offset); |
867 | void Bind(Label* label); |
868 | void Jump(Label* label) { jmp(label); } |
869 | |
870 | void LoadField(Register dst, FieldAddress address) { movq(dst, address); } |
871 | void LoadMemoryValue(Register dst, Register base, int32_t offset) { |
872 | movq(dst, Address(base, offset)); |
873 | } |
874 | void StoreMemoryValue(Register src, Register base, int32_t offset) { |
875 | movq(Address(base, offset), src); |
876 | } |
877 | void LoadAcquire(Register dst, Register address, int32_t offset = 0) { |
878 | // On intel loads have load-acquire behavior (i.e. loads are not re-ordered |
879 | // with other loads). |
880 | movq(dst, Address(address, offset)); |
881 | } |
882 | void StoreRelease(Register src, Register address, int32_t offset = 0) { |
883 | // On intel stores have store-release behavior (i.e. stores are not |
884 | // re-ordered with other stores). |
885 | movq(Address(address, offset), src); |
886 | } |
887 | |
888 | void CompareWithFieldValue(Register value, FieldAddress address) { |
889 | cmpq(value, address); |
890 | } |
891 | |
892 | void CompareTypeNullabilityWith(Register type, int8_t value) { |
893 | cmpb(FieldAddress(type, compiler::target::Type::nullability_offset()), |
894 | Immediate(value)); |
895 | } |
896 | |
897 | void RestoreCodePointer(); |
898 | void LoadPoolPointer(Register pp = PP); |
899 | |
900 | // Set up a Dart frame on entry with a frame pointer and PC information to |
901 | // enable easy access to the RawInstruction object of code corresponding |
902 | // to this frame. |
903 | // The dart frame layout is as follows: |
904 | // .... |
905 | // locals space <=== RSP |
906 | // saved PP |
907 | // code object (used to derive the RawInstruction Object of the dart code) |
908 | // saved RBP <=== RBP |
909 | // ret PC |
910 | // ..... |
911 | // This code sets this up with the sequence: |
912 | // pushq rbp |
913 | // movq rbp, rsp |
914 | // call L |
915 | // L: <code to adjust saved pc if there is any intrinsification code> |
916 | // ... |
917 | // pushq r15 |
918 | // ..... |
919 | void EnterDartFrame(intptr_t frame_size, Register new_pp = kNoRegister); |
920 | void LeaveDartFrame(RestorePP restore_pp = kRestoreCallerPP); |
921 | |
922 | // Set up a Dart frame for a function compiled for on-stack replacement. |
923 | // The frame layout is a normal Dart frame, but the frame is partially set |
924 | // up on entry (it is the frame of the unoptimized code). |
925 | void EnterOsrFrame(intptr_t ); |
926 | |
927 | // Set up a stub frame so that the stack traversal code can easily identify |
928 | // a stub frame. |
929 | // The stub frame layout is as follows: |
930 | // .... <=== RSP |
931 | // pc (used to derive the RawInstruction Object of the stub) |
932 | // saved RBP <=== RBP |
933 | // ret PC |
934 | // ..... |
935 | // This code sets this up with the sequence: |
936 | // pushq rbp |
937 | // movq rbp, rsp |
938 | // pushq immediate(0) |
939 | // ..... |
940 | void EnterStubFrame(); |
941 | void LeaveStubFrame(); |
942 | |
943 | // Set up a frame for calling a C function. |
944 | // Automatically save the pinned registers in Dart which are not callee- |
945 | // saved in the native calling convention. |
946 | // Use together with CallCFunction. |
947 | void EnterCFrame(intptr_t frame_space); |
948 | void LeaveCFrame(); |
949 | |
950 | void MonomorphicCheckedEntryJIT(); |
951 | void MonomorphicCheckedEntryAOT(); |
952 | void BranchOnMonomorphicCheckedEntryJIT(Label* label); |
953 | |
954 | // If allocation tracing for |cid| is enabled, will jump to |trace| label, |
955 | // which will allocate in the runtime where tracing occurs. |
956 | void MaybeTraceAllocation(intptr_t cid, Label* trace, bool near_jump); |
957 | |
958 | // Inlined allocation of an instance of class 'cls', code has no runtime |
959 | // calls. Jump to 'failure' if the instance cannot be allocated here. |
960 | // Allocated instance is returned in 'instance_reg'. |
961 | // Only the tags field of the object is initialized. |
962 | void TryAllocate(const Class& cls, |
963 | Label* failure, |
964 | bool near_jump, |
965 | Register instance_reg, |
966 | Register temp); |
967 | |
968 | void TryAllocateArray(intptr_t cid, |
969 | intptr_t instance_size, |
970 | Label* failure, |
971 | bool near_jump, |
972 | Register instance, |
973 | Register end_address, |
974 | Register temp); |
975 | |
976 | // This emits an PC-relative call of the form "callq *[rip+<offset>]". The |
977 | // offset is not yet known and needs therefore relocation to the right place |
978 | // before the code can be used. |
979 | // |
980 | // The neccessary information for the "linker" (i.e. the relocation |
981 | // information) is stored in [CodeLayout::static_calls_target_table_]: an |
982 | // entry of the form |
983 | // |
984 | // (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>) |
985 | // |
986 | // will be used during relocation to fix the offset. |
987 | // |
988 | // The provided [offset_into_target] will be added to calculate the final |
989 | // destination. It can be used e.g. for calling into the middle of a |
990 | // function. |
991 | void GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target = 0); |
992 | |
993 | // This emits an PC-relative tail call of the form "jmp *[rip+<offset>]". |
994 | // |
995 | // See also above for the pc-relative call. |
996 | void GenerateUnRelocatedPcRelativeTailCall(intptr_t offset_into_target = 0); |
997 | |
998 | // Debugging and bringup support. |
999 | void Breakpoint() override { int3(); } |
1000 | |
1001 | static Address ElementAddressForIntIndex(bool is_external, |
1002 | intptr_t cid, |
1003 | intptr_t index_scale, |
1004 | Register array, |
1005 | intptr_t index); |
1006 | static Address ElementAddressForRegIndex(bool is_external, |
1007 | intptr_t cid, |
1008 | intptr_t index_scale, |
1009 | bool index_unboxed, |
1010 | Register array, |
1011 | Register index); |
1012 | |
1013 | void LoadFieldAddressForRegOffset(Register address, |
1014 | Register instance, |
1015 | Register offset_in_words_as_smi) { |
1016 | static_assert(kSmiTagShift == 1, "adjust scale factor" ); |
1017 | leaq(address, FieldAddress(instance, offset_in_words_as_smi, TIMES_4, 0)); |
1018 | } |
1019 | |
1020 | static Address VMTagAddress(); |
1021 | |
1022 | // On some other platforms, we draw a distinction between safe and unsafe |
1023 | // smis. |
1024 | static bool IsSafe(const Object& object) { return true; } |
1025 | static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); } |
1026 | |
1027 | private: |
1028 | bool constant_pool_allowed_; |
1029 | |
1030 | intptr_t FindImmediate(int64_t imm); |
1031 | bool CanLoadFromObjectPool(const Object& object) const; |
1032 | void LoadObjectHelper(Register dst, const Object& obj, bool is_unique); |
1033 | void LoadWordFromPoolOffset(Register dst, int32_t offset); |
1034 | |
1035 | void AluL(uint8_t modrm_opcode, Register dst, const Immediate& imm); |
1036 | void AluB(uint8_t modrm_opcode, const Address& dst, const Immediate& imm); |
1037 | void AluW(uint8_t modrm_opcode, const Address& dst, const Immediate& imm); |
1038 | void AluL(uint8_t modrm_opcode, const Address& dst, const Immediate& imm); |
1039 | void AluQ(uint8_t modrm_opcode, |
1040 | uint8_t opcode, |
1041 | Register dst, |
1042 | const Immediate& imm); |
1043 | void AluQ(uint8_t modrm_opcode, |
1044 | uint8_t opcode, |
1045 | const Address& dst, |
1046 | const Immediate& imm); |
1047 | |
1048 | void EmitSimple(int opcode, int opcode2 = -1, int opcode3 = -1); |
1049 | void EmitUnaryQ(Register reg, int opcode, int modrm_code); |
1050 | void EmitUnaryL(Register reg, int opcode, int modrm_code); |
1051 | void EmitUnaryQ(const Address& address, int opcode, int modrm_code); |
1052 | void EmitUnaryL(const Address& address, int opcode, int modrm_code); |
1053 | // The prefixes are in reverse order due to the rules of default arguments in |
1054 | // C++. |
1055 | void EmitQ(int reg, |
1056 | const Address& address, |
1057 | int opcode, |
1058 | int prefix2 = -1, |
1059 | int prefix1 = -1); |
1060 | void EmitL(int reg, |
1061 | const Address& address, |
1062 | int opcode, |
1063 | int prefix2 = -1, |
1064 | int prefix1 = -1); |
1065 | void EmitW(Register reg, |
1066 | const Address& address, |
1067 | int opcode, |
1068 | int prefix2 = -1, |
1069 | int prefix1 = -1); |
1070 | void EmitQ(int dst, int src, int opcode, int prefix2 = -1, int prefix1 = -1); |
1071 | void EmitL(int dst, int src, int opcode, int prefix2 = -1, int prefix1 = -1); |
1072 | void EmitW(Register dst, |
1073 | Register src, |
1074 | int opcode, |
1075 | int prefix2 = -1, |
1076 | int prefix1 = -1); |
1077 | void CmpPS(XmmRegister dst, XmmRegister src, int condition); |
1078 | |
1079 | inline void EmitUint8(uint8_t value); |
1080 | inline void EmitInt32(int32_t value); |
1081 | inline void EmitUInt32(uint32_t value); |
1082 | inline void EmitInt64(int64_t value); |
1083 | |
1084 | inline void EmitRegisterREX(Register reg, |
1085 | uint8_t rex, |
1086 | bool force_emit = false); |
1087 | inline void EmitOperandREX(int rm, const Operand& operand, uint8_t rex); |
1088 | inline void EmitRegisterOperand(int rm, int reg); |
1089 | inline void EmitFixup(AssemblerFixup* fixup); |
1090 | inline void EmitOperandSizeOverride(); |
1091 | inline void EmitRegRegRex(int reg, int base, uint8_t rex = REX_NONE); |
1092 | void EmitOperand(int rm, const Operand& operand); |
1093 | void EmitImmediate(const Immediate& imm); |
1094 | void EmitComplex(int rm, const Operand& operand, const Immediate& immediate); |
1095 | void EmitSignExtendedInt8(int rm, |
1096 | const Operand& operand, |
1097 | const Immediate& immediate); |
1098 | void EmitLabel(Label* label, intptr_t instruction_size); |
1099 | void EmitLabelLink(Label* label); |
1100 | void EmitNearLabelLink(Label* label); |
1101 | |
1102 | void EmitGenericShift(bool wide, int rm, Register reg, const Immediate& imm); |
1103 | void EmitGenericShift(bool wide, int rm, Register operand, Register shifter); |
1104 | |
1105 | enum BarrierFilterMode { |
1106 | // Filter falls through into the barrier update code. Target label |
1107 | // is a "after-store" label. |
1108 | kJumpToNoUpdate, |
1109 | |
1110 | // Filter falls through to the "after-store" code. Target label |
1111 | // is barrier update code label. |
1112 | kJumpToBarrier, |
1113 | }; |
1114 | |
1115 | void StoreIntoObjectFilter(Register object, |
1116 | Register value, |
1117 | Label* label, |
1118 | CanBeSmi can_be_smi, |
1119 | BarrierFilterMode barrier_filter_mode); |
1120 | |
1121 | // Unaware of write barrier (use StoreInto* methods for storing to objects). |
1122 | void MoveImmediate(const Address& dst, const Immediate& imm); |
1123 | |
1124 | friend class dart::FlowGraphCompiler; |
1125 | std::function<void(Register reg)> generate_invoke_write_barrier_wrapper_; |
1126 | std::function<void()> generate_invoke_array_write_barrier_; |
1127 | |
1128 | DISALLOW_ALLOCATION(); |
1129 | DISALLOW_COPY_AND_ASSIGN(Assembler); |
1130 | }; |
1131 | |
1132 | inline void Assembler::EmitUint8(uint8_t value) { |
1133 | buffer_.Emit<uint8_t>(value); |
1134 | } |
1135 | |
1136 | inline void Assembler::EmitInt32(int32_t value) { |
1137 | buffer_.Emit<int32_t>(value); |
1138 | } |
1139 | |
1140 | inline void Assembler::EmitUInt32(uint32_t value) { |
1141 | buffer_.Emit<uint32_t>(value); |
1142 | } |
1143 | |
1144 | inline void Assembler::EmitInt64(int64_t value) { |
1145 | buffer_.Emit<int64_t>(value); |
1146 | } |
1147 | |
1148 | inline void Assembler::EmitRegisterREX(Register reg, uint8_t rex, bool force) { |
1149 | ASSERT(reg != kNoRegister && reg <= R15); |
1150 | ASSERT(rex == REX_NONE || rex == REX_W); |
1151 | rex |= (reg > 7 ? REX_B : REX_NONE); |
1152 | if (rex != REX_NONE || force) EmitUint8(REX_PREFIX | rex); |
1153 | } |
1154 | |
1155 | inline void Assembler::EmitOperandREX(int rm, |
1156 | const Operand& operand, |
1157 | uint8_t rex) { |
1158 | rex |= (rm > 7 ? REX_R : REX_NONE) | operand.rex(); |
1159 | if (rex != REX_NONE) EmitUint8(REX_PREFIX | rex); |
1160 | } |
1161 | |
1162 | inline void Assembler::EmitRegRegRex(int reg, int base, uint8_t rex) { |
1163 | ASSERT(reg != kNoRegister && reg <= R15); |
1164 | ASSERT(base != kNoRegister && base <= R15); |
1165 | ASSERT(rex == REX_NONE || rex == REX_W); |
1166 | if (reg > 7) rex |= REX_R; |
1167 | if (base > 7) rex |= REX_B; |
1168 | if (rex != REX_NONE) EmitUint8(REX_PREFIX | rex); |
1169 | } |
1170 | |
1171 | inline void Assembler::EmitFixup(AssemblerFixup* fixup) { |
1172 | buffer_.EmitFixup(fixup); |
1173 | } |
1174 | |
1175 | inline void Assembler::EmitOperandSizeOverride() { |
1176 | EmitUint8(0x66); |
1177 | } |
1178 | |
1179 | } // namespace compiler |
1180 | } // namespace dart |
1181 | |
1182 | #endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_X64_H_ |
1183 | |