1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // NOLINT
6#if defined(TARGET_ARCH_X64)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
10#include "vm/class_id.h"
11#include "vm/compiler/assembler/assembler.h"
12#include "vm/compiler/backend/locations.h"
13#include "vm/instructions.h"
14
15namespace dart {
16
17DECLARE_FLAG(bool, check_code_pointer);
18DECLARE_FLAG(bool, inline_alloc);
19DECLARE_FLAG(bool, precompiled_mode);
20DECLARE_FLAG(bool, use_slow_path);
21
22namespace compiler {
23
24Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
25 bool use_far_branches)
26 : AssemblerBase(object_pool_builder), constant_pool_allowed_(false) {
27 // Far branching mode is only needed and implemented for ARM.
28 ASSERT(!use_far_branches);
29
30 generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
31 call(Address(THR,
32 target::Thread::write_barrier_wrappers_thread_offset(reg)));
33 };
34 generate_invoke_array_write_barrier_ = [&]() {
35 call(
36 Address(THR, target::Thread::array_write_barrier_entry_point_offset()));
37 };
38}
39
40void Assembler::call(Label* label) {
41 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
42 static const int kSize = 5;
43 EmitUint8(0xE8);
44 EmitLabel(label, kSize);
45}
46
47void Assembler::LoadNativeEntry(
48 Register dst,
49 const ExternalLabel* label,
50 ObjectPoolBuilderEntry::Patchability patchable) {
51 const int32_t offset = target::ObjectPool::element_offset(
52 object_pool_builder().FindNativeFunction(label, patchable));
53 LoadWordFromPoolOffset(dst, offset - kHeapObjectTag);
54}
55
56void Assembler::call(const ExternalLabel* label) {
57 { // Encode movq(TMP, Immediate(label->address())), but always as imm64.
58 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
59 EmitRegisterREX(TMP, REX_W);
60 EmitUint8(0xB8 | (TMP & 7));
61 EmitInt64(label->address());
62 }
63 call(TMP);
64}
65
66void Assembler::CallPatchable(const Code& target, CodeEntryKind entry_kind) {
67 ASSERT(constant_pool_allowed());
68 const intptr_t idx = object_pool_builder().AddObject(
69 ToObject(target), ObjectPoolBuilderEntry::kPatchable);
70 const int32_t offset = target::ObjectPool::element_offset(idx);
71 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag);
72 call(FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
73}
74
75void Assembler::CallWithEquivalence(const Code& target,
76 const Object& equivalence,
77 CodeEntryKind entry_kind) {
78 ASSERT(constant_pool_allowed());
79 const intptr_t idx =
80 object_pool_builder().FindObject(ToObject(target), equivalence);
81 const int32_t offset = target::ObjectPool::element_offset(idx);
82 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag);
83 call(FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
84}
85
86void Assembler::Call(const Code& target) {
87 ASSERT(constant_pool_allowed());
88 const intptr_t idx = object_pool_builder().FindObject(
89 ToObject(target), ObjectPoolBuilderEntry::kNotPatchable);
90 const int32_t offset = target::ObjectPool::element_offset(idx);
91 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag);
92 call(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
93}
94
95void Assembler::CallToRuntime() {
96 call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
97}
98
99void Assembler::pushq(Register reg) {
100 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
101 EmitRegisterREX(reg, REX_NONE);
102 EmitUint8(0x50 | (reg & 7));
103}
104
105void Assembler::pushq(const Immediate& imm) {
106 if (imm.is_int8()) {
107 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
108 EmitUint8(0x6A);
109 EmitUint8(imm.value() & 0xFF);
110 } else if (imm.is_int32()) {
111 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
112 EmitUint8(0x68);
113 EmitImmediate(imm);
114 } else {
115 movq(TMP, imm);
116 pushq(TMP);
117 }
118}
119
120void Assembler::PushImmediate(const Immediate& imm) {
121 if (imm.is_int32()) {
122 pushq(imm);
123 } else {
124 LoadImmediate(TMP, imm);
125 pushq(TMP);
126 }
127}
128
129void Assembler::popq(Register reg) {
130 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
131 EmitRegisterREX(reg, REX_NONE);
132 EmitUint8(0x58 | (reg & 7));
133}
134
135void Assembler::setcc(Condition condition, ByteRegister dst) {
136 ASSERT(dst != kNoByteRegister);
137 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
138 if (dst >= 8) {
139 EmitUint8(REX_PREFIX | (((dst & 0x08) != 0) ? REX_B : REX_NONE));
140 }
141 EmitUint8(0x0F);
142 EmitUint8(0x90 + condition);
143 EmitUint8(0xC0 + (dst & 0x07));
144}
145
146void Assembler::EnterSafepoint() {
147 // We generate the same number of instructions whether or not the slow-path is
148 // forced, to simplify GenerateJitCallbackTrampolines.
149 Label done, slow_path;
150 if (FLAG_use_slow_path) {
151 jmp(&slow_path);
152 }
153
154 // Compare and swap the value at Thread::safepoint_state from unacquired to
155 // acquired. If the CAS fails, go to a slow-path stub.
156 pushq(RAX);
157 movq(RAX, Immediate(target::Thread::safepoint_state_unacquired()));
158 movq(TMP, Immediate(target::Thread::safepoint_state_acquired()));
159 LockCmpxchgq(Address(THR, target::Thread::safepoint_state_offset()), TMP);
160 movq(TMP, RAX);
161 popq(RAX);
162 cmpq(TMP, Immediate(target::Thread::safepoint_state_unacquired()));
163
164 if (!FLAG_use_slow_path) {
165 j(EQUAL, &done);
166 }
167
168 Bind(&slow_path);
169 movq(TMP, Address(THR, target::Thread::enter_safepoint_stub_offset()));
170 movq(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
171
172 // Use call instead of CallCFunction to avoid having to clean up shadow space
173 // afterwards. This is possible because the safepoint stub does not use the
174 // shadow space as scratch and has no arguments.
175 call(TMP);
176
177 Bind(&done);
178}
179
180void Assembler::TransitionGeneratedToNative(Register destination_address,
181 Register new_exit_frame,
182 Register new_exit_through_ffi,
183 bool enter_safepoint) {
184 // Save exit frame information to enable stack walking.
185 movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
186 new_exit_frame);
187
188 movq(compiler::Address(THR,
189 compiler::target::Thread::exit_through_ffi_offset()),
190 new_exit_through_ffi);
191
192 movq(Assembler::VMTagAddress(), destination_address);
193 movq(Address(THR, target::Thread::execution_state_offset()),
194 Immediate(target::Thread::native_execution_state()));
195
196 if (enter_safepoint) {
197 EnterSafepoint();
198 }
199}
200
201void Assembler::LeaveSafepoint() {
202 // We generate the same number of instructions whether or not the slow-path is
203 // forced, for consistency with EnterSafepoint.
204 Label done, slow_path;
205 if (FLAG_use_slow_path) {
206 jmp(&slow_path);
207 }
208
209 // Compare and swap the value at Thread::safepoint_state from acquired to
210 // unacquired. On success, jump to 'success'; otherwise, fallthrough.
211
212 pushq(RAX);
213 movq(RAX, Immediate(target::Thread::safepoint_state_acquired()));
214 movq(TMP, Immediate(target::Thread::safepoint_state_unacquired()));
215 LockCmpxchgq(Address(THR, target::Thread::safepoint_state_offset()), TMP);
216 movq(TMP, RAX);
217 popq(RAX);
218 cmpq(TMP, Immediate(target::Thread::safepoint_state_acquired()));
219
220 if (!FLAG_use_slow_path) {
221 j(EQUAL, &done);
222 }
223
224 Bind(&slow_path);
225 movq(TMP, Address(THR, target::Thread::exit_safepoint_stub_offset()));
226 movq(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
227
228 // Use call instead of CallCFunction to avoid having to clean up shadow space
229 // afterwards. This is possible because the safepoint stub does not use the
230 // shadow space as scratch and has no arguments.
231 call(TMP);
232
233 Bind(&done);
234}
235
236void Assembler::TransitionNativeToGenerated(bool leave_safepoint) {
237 if (leave_safepoint) {
238 LeaveSafepoint();
239 } else {
240#if defined(DEBUG)
241 // Ensure we've already left the safepoint.
242 movq(TMP, Address(THR, target::Thread::safepoint_state_offset()));
243 andq(TMP, Immediate((1 << target::Thread::safepoint_state_inside_bit())));
244 Label ok;
245 j(ZERO, &ok);
246 Breakpoint();
247 Bind(&ok);
248#endif
249 }
250
251 movq(Assembler::VMTagAddress(),
252 Immediate(target::Thread::vm_tag_compiled_id()));
253 movq(Address(THR, target::Thread::execution_state_offset()),
254 Immediate(target::Thread::generated_execution_state()));
255
256 // Reset exit frame information in Isolate's mutator thread structure.
257 movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
258 Immediate(0));
259 movq(compiler::Address(THR,
260 compiler::target::Thread::exit_through_ffi_offset()),
261 compiler::Immediate(0));
262}
263
264void Assembler::EmitQ(int reg,
265 const Address& address,
266 int opcode,
267 int prefix2,
268 int prefix1) {
269 ASSERT(reg <= XMM15);
270 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
271 if (prefix1 >= 0) {
272 EmitUint8(prefix1);
273 }
274 EmitOperandREX(reg, address, REX_W);
275 if (prefix2 >= 0) {
276 EmitUint8(prefix2);
277 }
278 EmitUint8(opcode);
279 EmitOperand(reg & 7, address);
280}
281
282void Assembler::EmitL(int reg,
283 const Address& address,
284 int opcode,
285 int prefix2,
286 int prefix1) {
287 ASSERT(reg <= XMM15);
288 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
289 if (prefix1 >= 0) {
290 EmitUint8(prefix1);
291 }
292 EmitOperandREX(reg, address, REX_NONE);
293 if (prefix2 >= 0) {
294 EmitUint8(prefix2);
295 }
296 EmitUint8(opcode);
297 EmitOperand(reg & 7, address);
298}
299
300void Assembler::EmitW(Register reg,
301 const Address& address,
302 int opcode,
303 int prefix2,
304 int prefix1) {
305 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
306 if (prefix1 >= 0) {
307 EmitUint8(prefix1);
308 }
309 EmitOperandSizeOverride();
310 EmitOperandREX(reg, address, REX_NONE);
311 if (prefix2 >= 0) {
312 EmitUint8(prefix2);
313 }
314 EmitUint8(opcode);
315 EmitOperand(reg & 7, address);
316}
317
318void Assembler::movl(Register dst, const Immediate& imm) {
319 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
320 Operand operand(dst);
321 EmitOperandREX(0, operand, REX_NONE);
322 EmitUint8(0xC7);
323 EmitOperand(0, operand);
324 ASSERT(imm.is_int32());
325 EmitImmediate(imm);
326}
327
328void Assembler::movl(const Address& dst, const Immediate& imm) {
329 movl(TMP, imm);
330 movl(dst, TMP);
331}
332
333void Assembler::movb(const Address& dst, const Immediate& imm) {
334 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
335 EmitOperandREX(0, dst, REX_NONE);
336 EmitUint8(0xC6);
337 EmitOperand(0, dst);
338 ASSERT(imm.is_int8());
339 EmitUint8(imm.value() & 0xFF);
340}
341
342void Assembler::movw(Register dst, const Address& src) {
343 // This would leave 16 bits above the 2 byte value undefined.
344 // If we ever want to purposefully have those undefined, remove this.
345 // TODO(40210): Allow this.
346 FATAL("Use movzxw or movsxw instead.");
347}
348
349void Assembler::movw(const Address& dst, const Immediate& imm) {
350 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
351 EmitOperandSizeOverride();
352 EmitOperandREX(0, dst, REX_NONE);
353 EmitUint8(0xC7);
354 EmitOperand(0, dst);
355 EmitUint8(imm.value() & 0xFF);
356 EmitUint8((imm.value() >> 8) & 0xFF);
357}
358
359void Assembler::movq(Register dst, const Immediate& imm) {
360 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
361 if (imm.is_uint32()) {
362 // Pick single byte B8 encoding if possible. If dst < 8 then we also omit
363 // the Rex byte.
364 EmitRegisterREX(dst, REX_NONE);
365 EmitUint8(0xB8 | (dst & 7));
366 EmitUInt32(imm.value());
367 } else if (imm.is_int32()) {
368 // Sign extended C7 Cx encoding if we have a negative input.
369 Operand operand(dst);
370 EmitOperandREX(0, operand, REX_W);
371 EmitUint8(0xC7);
372 EmitOperand(0, operand);
373 EmitImmediate(imm);
374 } else {
375 // Full 64 bit immediate encoding.
376 EmitRegisterREX(dst, REX_W);
377 EmitUint8(0xB8 | (dst & 7));
378 EmitImmediate(imm);
379 }
380}
381
382void Assembler::movq(const Address& dst, const Immediate& imm) {
383 if (imm.is_int32()) {
384 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
385 EmitOperandREX(0, dst, REX_W);
386 EmitUint8(0xC7);
387 EmitOperand(0, dst);
388 EmitImmediate(imm);
389 } else {
390 movq(TMP, imm);
391 movq(dst, TMP);
392 }
393}
394
395void Assembler::EmitSimple(int opcode, int opcode2, int opcode3) {
396 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
397 EmitUint8(opcode);
398 if (opcode2 != -1) {
399 EmitUint8(opcode2);
400 if (opcode3 != -1) {
401 EmitUint8(opcode3);
402 }
403 }
404}
405
406void Assembler::EmitQ(int dst, int src, int opcode, int prefix2, int prefix1) {
407 ASSERT(src <= XMM15);
408 ASSERT(dst <= XMM15);
409 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
410 if (prefix1 >= 0) {
411 EmitUint8(prefix1);
412 }
413 EmitRegRegRex(dst, src, REX_W);
414 if (prefix2 >= 0) {
415 EmitUint8(prefix2);
416 }
417 EmitUint8(opcode);
418 EmitRegisterOperand(dst & 7, src);
419}
420
421void Assembler::EmitL(int dst, int src, int opcode, int prefix2, int prefix1) {
422 ASSERT(src <= XMM15);
423 ASSERT(dst <= XMM15);
424 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
425 if (prefix1 >= 0) {
426 EmitUint8(prefix1);
427 }
428 EmitRegRegRex(dst, src);
429 if (prefix2 >= 0) {
430 EmitUint8(prefix2);
431 }
432 EmitUint8(opcode);
433 EmitRegisterOperand(dst & 7, src);
434}
435
436void Assembler::EmitW(Register dst,
437 Register src,
438 int opcode,
439 int prefix2,
440 int prefix1) {
441 ASSERT(src <= R15);
442 ASSERT(dst <= R15);
443 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
444 if (prefix1 >= 0) {
445 EmitUint8(prefix1);
446 }
447 EmitOperandSizeOverride();
448 EmitRegRegRex(dst, src);
449 if (prefix2 >= 0) {
450 EmitUint8(prefix2);
451 }
452 EmitUint8(opcode);
453 EmitRegisterOperand(dst & 7, src);
454}
455
456#define UNARY_XMM_WITH_CONSTANT(name, constant, op) \
457 void Assembler::name(XmmRegister dst, XmmRegister src) { \
458 movq(TMP, Address(THR, target::Thread::constant##_address_offset())); \
459 if (dst == src) { \
460 op(dst, Address(TMP, 0)); \
461 } else { \
462 movups(dst, Address(TMP, 0)); \
463 op(dst, src); \
464 } \
465 }
466
467// TODO(erikcorry): For the case where dst != src, we could construct these
468// with pcmpeqw xmm0,xmm0 followed by left and right shifts. This would avoid
469// memory traffic.
470// { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
471UNARY_XMM_WITH_CONSTANT(notps, float_not, xorps)
472// { 0x80000000, 0x80000000, 0x80000000, 0x80000000 }
473UNARY_XMM_WITH_CONSTANT(negateps, float_negate, xorps)
474// { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF }
475UNARY_XMM_WITH_CONSTANT(absps, float_absolute, andps)
476// { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 }
477UNARY_XMM_WITH_CONSTANT(zerowps, float_zerow, andps)
478// { 0x8000000000000000LL, 0x8000000000000000LL }
479UNARY_XMM_WITH_CONSTANT(negatepd, double_negate, xorpd)
480// { 0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL }
481UNARY_XMM_WITH_CONSTANT(abspd, double_abs, andpd)
482// {0x8000000000000000LL, 0x8000000000000000LL}
483UNARY_XMM_WITH_CONSTANT(DoubleNegate, double_negate, xorpd)
484// {0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL}
485UNARY_XMM_WITH_CONSTANT(DoubleAbs, double_abs, andpd)
486
487#undef UNARY_XMM_WITH_CONSTANT
488
489void Assembler::CmpPS(XmmRegister dst, XmmRegister src, int condition) {
490 EmitL(dst, src, 0xC2, 0x0F);
491 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
492 EmitUint8(condition);
493}
494
495void Assembler::set1ps(XmmRegister dst, Register tmp1, const Immediate& imm) {
496 // Load 32-bit immediate value into tmp1.
497 movl(tmp1, imm);
498 // Move value from tmp1 into dst.
499 movd(dst, tmp1);
500 // Broadcast low lane into other three lanes.
501 shufps(dst, dst, Immediate(0x0));
502}
503
504void Assembler::shufps(XmmRegister dst, XmmRegister src, const Immediate& imm) {
505 EmitL(dst, src, 0xC6, 0x0F);
506 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
507 ASSERT(imm.is_uint8());
508 EmitUint8(imm.value());
509}
510
511void Assembler::shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm) {
512 EmitL(dst, src, 0xC6, 0x0F, 0x66);
513 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
514 ASSERT(imm.is_uint8());
515 EmitUint8(imm.value());
516}
517
518void Assembler::roundsd(XmmRegister dst, XmmRegister src, RoundingMode mode) {
519 ASSERT(src <= XMM15);
520 ASSERT(dst <= XMM15);
521 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
522 EmitUint8(0x66);
523 EmitRegRegRex(dst, src);
524 EmitUint8(0x0F);
525 EmitUint8(0x3A);
526 EmitUint8(0x0B);
527 EmitRegisterOperand(dst & 7, src);
528 // Mask precision exeption.
529 EmitUint8(static_cast<uint8_t>(mode) | 0x8);
530}
531
532void Assembler::fldl(const Address& src) {
533 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
534 EmitUint8(0xDD);
535 EmitOperand(0, src);
536}
537
538void Assembler::fstpl(const Address& dst) {
539 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
540 EmitUint8(0xDD);
541 EmitOperand(3, dst);
542}
543
544void Assembler::ffree(intptr_t value) {
545 ASSERT(value < 7);
546 EmitSimple(0xDD, 0xC0 + value);
547}
548
549void Assembler::CompareImmediate(Register reg, const Immediate& imm) {
550 if (imm.is_int32()) {
551 cmpq(reg, imm);
552 } else {
553 ASSERT(reg != TMP);
554 LoadImmediate(TMP, imm);
555 cmpq(reg, TMP);
556 }
557}
558
559void Assembler::CompareImmediate(const Address& address, const Immediate& imm) {
560 if (imm.is_int32()) {
561 cmpq(address, imm);
562 } else {
563 LoadImmediate(TMP, imm);
564 cmpq(address, TMP);
565 }
566}
567
568void Assembler::testb(const Address& address, const Immediate& imm) {
569 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
570 EmitOperandREX(0, address, REX_NONE);
571 EmitUint8(0xF6);
572 EmitOperand(0, address);
573 ASSERT(imm.is_int8());
574 EmitUint8(imm.value() & 0xFF);
575}
576
577void Assembler::testb(const Address& address, Register reg) {
578 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
579 EmitOperandREX(reg, address, REX_NONE);
580 EmitUint8(0x84);
581 EmitOperand(reg & 7, address);
582}
583
584void Assembler::testq(Register reg, const Immediate& imm) {
585 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
586 if (imm.is_uint8()) {
587 // Use zero-extended 8-bit immediate.
588 if (reg >= 4) {
589 // We need the Rex byte to give access to the SIL and DIL registers (the
590 // low bytes of RSI and RDI).
591 EmitRegisterREX(reg, REX_NONE, /* force = */ true);
592 }
593 if (reg == RAX) {
594 EmitUint8(0xA8);
595 } else {
596 EmitUint8(0xF6);
597 EmitUint8(0xC0 + (reg & 7));
598 }
599 EmitUint8(imm.value() & 0xFF);
600 } else if (imm.is_uint32()) {
601 if (reg == RAX) {
602 EmitUint8(0xA9);
603 } else {
604 EmitRegisterREX(reg, REX_NONE);
605 EmitUint8(0xF7);
606 EmitUint8(0xC0 | (reg & 7));
607 }
608 EmitUInt32(imm.value());
609 } else {
610 // Sign extended version of 32 bit test.
611 ASSERT(imm.is_int32());
612 EmitRegisterREX(reg, REX_W);
613 if (reg == RAX) {
614 EmitUint8(0xA9);
615 } else {
616 EmitUint8(0xF7);
617 EmitUint8(0xC0 | (reg & 7));
618 }
619 EmitImmediate(imm);
620 }
621}
622
623void Assembler::TestImmediate(Register dst, const Immediate& imm) {
624 if (imm.is_int32() || imm.is_uint32()) {
625 testq(dst, imm);
626 } else {
627 ASSERT(dst != TMP);
628 LoadImmediate(TMP, imm);
629 testq(dst, TMP);
630 }
631}
632
633void Assembler::AluL(uint8_t modrm_opcode, Register dst, const Immediate& imm) {
634 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
635 EmitRegisterREX(dst, REX_NONE);
636 EmitComplex(modrm_opcode, Operand(dst), imm);
637}
638
639void Assembler::AluB(uint8_t modrm_opcode,
640 const Address& dst,
641 const Immediate& imm) {
642 ASSERT(imm.is_uint8() || imm.is_int8());
643 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
644 EmitOperandREX(modrm_opcode, dst, REX_NONE);
645 EmitUint8(0x80);
646 EmitOperand(modrm_opcode, dst);
647 EmitUint8(imm.value() & 0xFF);
648}
649
650void Assembler::AluW(uint8_t modrm_opcode,
651 const Address& dst,
652 const Immediate& imm) {
653 ASSERT(imm.is_int16() || imm.is_uint16());
654 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
655 EmitOperandSizeOverride();
656 EmitOperandREX(modrm_opcode, dst, REX_NONE);
657 if (imm.is_int8()) {
658 EmitSignExtendedInt8(modrm_opcode, dst, imm);
659 } else {
660 EmitUint8(0x81);
661 EmitOperand(modrm_opcode, dst);
662 EmitUint8(imm.value() & 0xFF);
663 EmitUint8((imm.value() >> 8) & 0xFF);
664 }
665}
666
667void Assembler::AluL(uint8_t modrm_opcode,
668 const Address& dst,
669 const Immediate& imm) {
670 ASSERT(imm.is_int32());
671 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
672 EmitOperandREX(modrm_opcode, dst, REX_NONE);
673 EmitComplex(modrm_opcode, dst, imm);
674}
675
676void Assembler::AluQ(uint8_t modrm_opcode,
677 uint8_t opcode,
678 Register dst,
679 const Immediate& imm) {
680 Operand operand(dst);
681 if (modrm_opcode == 4 && imm.is_uint32()) {
682 // We can use andl for andq.
683 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
684 EmitRegisterREX(dst, REX_NONE);
685 // Would like to use EmitComplex here, but it doesn't like uint32
686 // immediates.
687 if (imm.is_int8()) {
688 EmitSignExtendedInt8(modrm_opcode, operand, imm);
689 } else {
690 if (dst == RAX) {
691 EmitUint8(0x25);
692 } else {
693 EmitUint8(0x81);
694 EmitOperand(modrm_opcode, operand);
695 }
696 EmitUInt32(imm.value());
697 }
698 } else if (imm.is_int32()) {
699 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
700 EmitRegisterREX(dst, REX_W);
701 EmitComplex(modrm_opcode, operand, imm);
702 } else {
703 ASSERT(dst != TMP);
704 movq(TMP, imm);
705 EmitQ(dst, TMP, opcode);
706 }
707}
708
709void Assembler::AluQ(uint8_t modrm_opcode,
710 uint8_t opcode,
711 const Address& dst,
712 const Immediate& imm) {
713 if (imm.is_int32()) {
714 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
715 EmitOperandREX(modrm_opcode, dst, REX_W);
716 EmitComplex(modrm_opcode, dst, imm);
717 } else {
718 movq(TMP, imm);
719 EmitQ(TMP, dst, opcode);
720 }
721}
722
723void Assembler::AndImmediate(Register dst, const Immediate& imm) {
724 if (imm.is_int32() || imm.is_uint32()) {
725 andq(dst, imm);
726 } else {
727 ASSERT(dst != TMP);
728 LoadImmediate(TMP, imm);
729 andq(dst, TMP);
730 }
731}
732
733void Assembler::OrImmediate(Register dst, const Immediate& imm) {
734 if (imm.is_int32()) {
735 orq(dst, imm);
736 } else {
737 ASSERT(dst != TMP);
738 LoadImmediate(TMP, imm);
739 orq(dst, TMP);
740 }
741}
742
743void Assembler::XorImmediate(Register dst, const Immediate& imm) {
744 if (imm.is_int32()) {
745 xorq(dst, imm);
746 } else {
747 ASSERT(dst != TMP);
748 LoadImmediate(TMP, imm);
749 xorq(dst, TMP);
750 }
751}
752
753void Assembler::cqo() {
754 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
755 EmitRegisterREX(RAX, REX_W);
756 EmitUint8(0x99);
757}
758
759void Assembler::EmitUnaryQ(Register reg, int opcode, int modrm_code) {
760 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
761 EmitRegisterREX(reg, REX_W);
762 EmitUint8(opcode);
763 EmitOperand(modrm_code, Operand(reg));
764}
765
766void Assembler::EmitUnaryL(Register reg, int opcode, int modrm_code) {
767 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
768 EmitRegisterREX(reg, REX_NONE);
769 EmitUint8(opcode);
770 EmitOperand(modrm_code, Operand(reg));
771}
772
773void Assembler::EmitUnaryQ(const Address& address, int opcode, int modrm_code) {
774 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
775 Operand operand(address);
776 EmitOperandREX(modrm_code, operand, REX_W);
777 EmitUint8(opcode);
778 EmitOperand(modrm_code, operand);
779}
780
781void Assembler::EmitUnaryL(const Address& address, int opcode, int modrm_code) {
782 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
783 Operand operand(address);
784 EmitOperandREX(modrm_code, operand, REX_NONE);
785 EmitUint8(opcode);
786 EmitOperand(modrm_code, operand);
787}
788
789void Assembler::imull(Register reg, const Immediate& imm) {
790 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
791 Operand operand(reg);
792 EmitOperandREX(reg, operand, REX_NONE);
793 EmitUint8(0x69);
794 EmitOperand(reg & 7, Operand(reg));
795 EmitImmediate(imm);
796}
797
798void Assembler::imulq(Register reg, const Immediate& imm) {
799 if (imm.is_int32()) {
800 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
801 Operand operand(reg);
802 EmitOperandREX(reg, operand, REX_W);
803 EmitUint8(0x69);
804 EmitOperand(reg & 7, Operand(reg));
805 EmitImmediate(imm);
806 } else {
807 ASSERT(reg != TMP);
808 movq(TMP, imm);
809 imulq(reg, TMP);
810 }
811}
812
813void Assembler::MulImmediate(Register reg,
814 const Immediate& imm,
815 OperandWidth width) {
816 if (imm.is_int32()) {
817 if (width == k32Bit) {
818 imull(reg, imm);
819 } else {
820 imulq(reg, imm);
821 }
822 } else {
823 ASSERT(reg != TMP);
824 ASSERT(width != k32Bit);
825 movq(TMP, imm);
826 imulq(reg, TMP);
827 }
828}
829
830void Assembler::shll(Register reg, const Immediate& imm) {
831 EmitGenericShift(false, 4, reg, imm);
832}
833
834void Assembler::shll(Register operand, Register shifter) {
835 EmitGenericShift(false, 4, operand, shifter);
836}
837
838void Assembler::shrl(Register reg, const Immediate& imm) {
839 EmitGenericShift(false, 5, reg, imm);
840}
841
842void Assembler::shrl(Register operand, Register shifter) {
843 EmitGenericShift(false, 5, operand, shifter);
844}
845
846void Assembler::sarl(Register reg, const Immediate& imm) {
847 EmitGenericShift(false, 7, reg, imm);
848}
849
850void Assembler::sarl(Register operand, Register shifter) {
851 EmitGenericShift(false, 7, operand, shifter);
852}
853
854void Assembler::shldl(Register dst, Register src, const Immediate& imm) {
855 EmitL(src, dst, 0xA4, 0x0F);
856 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
857 ASSERT(imm.is_int8());
858 EmitUint8(imm.value() & 0xFF);
859}
860
861void Assembler::shlq(Register reg, const Immediate& imm) {
862 EmitGenericShift(true, 4, reg, imm);
863}
864
865void Assembler::shlq(Register operand, Register shifter) {
866 EmitGenericShift(true, 4, operand, shifter);
867}
868
869void Assembler::shrq(Register reg, const Immediate& imm) {
870 EmitGenericShift(true, 5, reg, imm);
871}
872
873void Assembler::shrq(Register operand, Register shifter) {
874 EmitGenericShift(true, 5, operand, shifter);
875}
876
877void Assembler::sarq(Register reg, const Immediate& imm) {
878 EmitGenericShift(true, 7, reg, imm);
879}
880
881void Assembler::sarq(Register operand, Register shifter) {
882 EmitGenericShift(true, 7, operand, shifter);
883}
884
885void Assembler::shldq(Register dst, Register src, const Immediate& imm) {
886 EmitQ(src, dst, 0xA4, 0x0F);
887 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
888 ASSERT(imm.is_int8());
889 EmitUint8(imm.value() & 0xFF);
890}
891
892void Assembler::btq(Register base, int bit) {
893 ASSERT(bit >= 0 && bit < 64);
894 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
895 Operand operand(base);
896 EmitOperandREX(4, operand, bit >= 32 ? REX_W : REX_NONE);
897 EmitUint8(0x0F);
898 EmitUint8(0xBA);
899 EmitOperand(4, operand);
900 EmitUint8(bit);
901}
902
903void Assembler::enter(const Immediate& imm) {
904 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
905 EmitUint8(0xC8);
906 ASSERT(imm.is_uint16());
907 EmitUint8(imm.value() & 0xFF);
908 EmitUint8((imm.value() >> 8) & 0xFF);
909 EmitUint8(0x00);
910}
911
912void Assembler::nop(int size) {
913 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
914 // There are nops up to size 15, but for now just provide up to size 8.
915 ASSERT(0 < size && size <= MAX_NOP_SIZE);
916 switch (size) {
917 case 1:
918 EmitUint8(0x90);
919 break;
920 case 2:
921 EmitUint8(0x66);
922 EmitUint8(0x90);
923 break;
924 case 3:
925 EmitUint8(0x0F);
926 EmitUint8(0x1F);
927 EmitUint8(0x00);
928 break;
929 case 4:
930 EmitUint8(0x0F);
931 EmitUint8(0x1F);
932 EmitUint8(0x40);
933 EmitUint8(0x00);
934 break;
935 case 5:
936 EmitUint8(0x0F);
937 EmitUint8(0x1F);
938 EmitUint8(0x44);
939 EmitUint8(0x00);
940 EmitUint8(0x00);
941 break;
942 case 6:
943 EmitUint8(0x66);
944 EmitUint8(0x0F);
945 EmitUint8(0x1F);
946 EmitUint8(0x44);
947 EmitUint8(0x00);
948 EmitUint8(0x00);
949 break;
950 case 7:
951 EmitUint8(0x0F);
952 EmitUint8(0x1F);
953 EmitUint8(0x80);
954 EmitUint8(0x00);
955 EmitUint8(0x00);
956 EmitUint8(0x00);
957 EmitUint8(0x00);
958 break;
959 case 8:
960 EmitUint8(0x0F);
961 EmitUint8(0x1F);
962 EmitUint8(0x84);
963 EmitUint8(0x00);
964 EmitUint8(0x00);
965 EmitUint8(0x00);
966 EmitUint8(0x00);
967 EmitUint8(0x00);
968 break;
969 default:
970 UNIMPLEMENTED();
971 }
972}
973
974void Assembler::j(Condition condition, Label* label, bool near) {
975 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
976 if (label->IsBound()) {
977 static const int kShortSize = 2;
978 static const int kLongSize = 6;
979 intptr_t offset = label->Position() - buffer_.Size();
980 ASSERT(offset <= 0);
981 if (Utils::IsInt(8, offset - kShortSize)) {
982 EmitUint8(0x70 + condition);
983 EmitUint8((offset - kShortSize) & 0xFF);
984 } else {
985 EmitUint8(0x0F);
986 EmitUint8(0x80 + condition);
987 EmitInt32(offset - kLongSize);
988 }
989 } else if (near) {
990 EmitUint8(0x70 + condition);
991 EmitNearLabelLink(label);
992 } else {
993 EmitUint8(0x0F);
994 EmitUint8(0x80 + condition);
995 EmitLabelLink(label);
996 }
997}
998
999void Assembler::J(Condition condition, const Code& target, Register pp) {
1000 Label no_jump;
1001 // Negate condition.
1002 j(static_cast<Condition>(condition ^ 1), &no_jump, kNearJump);
1003 Jmp(target, pp);
1004 Bind(&no_jump);
1005}
1006
1007void Assembler::jmp(Label* label, bool near) {
1008 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1009 if (label->IsBound()) {
1010 static const int kShortSize = 2;
1011 static const int kLongSize = 5;
1012 intptr_t offset = label->Position() - buffer_.Size();
1013 ASSERT(offset <= 0);
1014 if (Utils::IsInt(8, offset - kShortSize)) {
1015 EmitUint8(0xEB);
1016 EmitUint8((offset - kShortSize) & 0xFF);
1017 } else {
1018 EmitUint8(0xE9);
1019 EmitInt32(offset - kLongSize);
1020 }
1021 } else if (near) {
1022 EmitUint8(0xEB);
1023 EmitNearLabelLink(label);
1024 } else {
1025 EmitUint8(0xE9);
1026 EmitLabelLink(label);
1027 }
1028}
1029
1030void Assembler::jmp(const ExternalLabel* label) {
1031 { // Encode movq(TMP, Immediate(label->address())), but always as imm64.
1032 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1033 EmitRegisterREX(TMP, REX_W);
1034 EmitUint8(0xB8 | (TMP & 7));
1035 EmitInt64(label->address());
1036 }
1037 jmp(TMP);
1038}
1039
1040void Assembler::JmpPatchable(const Code& target, Register pp) {
1041 ASSERT((pp != PP) || constant_pool_allowed());
1042 const intptr_t idx = object_pool_builder().AddObject(
1043 ToObject(target), ObjectPoolBuilderEntry::kPatchable);
1044 const int32_t offset = target::ObjectPool::element_offset(idx);
1045 movq(CODE_REG, Address(pp, offset - kHeapObjectTag));
1046 movq(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1047 jmp(TMP);
1048}
1049
1050void Assembler::Jmp(const Code& target, Register pp) {
1051 ASSERT((pp != PP) || constant_pool_allowed());
1052 const intptr_t idx = object_pool_builder().FindObject(
1053 ToObject(target), ObjectPoolBuilderEntry::kNotPatchable);
1054 const int32_t offset = target::ObjectPool::element_offset(idx);
1055 movq(CODE_REG, FieldAddress(pp, offset));
1056 jmp(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1057}
1058
1059void Assembler::CompareRegisters(Register a, Register b) {
1060 cmpq(a, b);
1061}
1062
1063void Assembler::MoveRegister(Register to, Register from) {
1064 if (to != from) {
1065 movq(to, from);
1066 }
1067}
1068
1069void Assembler::PushRegister(Register r) {
1070 pushq(r);
1071}
1072
1073void Assembler::PopRegister(Register r) {
1074 popq(r);
1075}
1076
1077void Assembler::AddImmediate(Register reg,
1078 const Immediate& imm,
1079 OperandWidth width) {
1080 const int64_t value = imm.value();
1081 if (value == 0) {
1082 return;
1083 }
1084 if ((value > 0) || (value == kMinInt64)) {
1085 if (value == 1) {
1086 if (width == k32Bit) {
1087 incl(reg);
1088 } else {
1089 incq(reg);
1090 }
1091 } else {
1092 if (imm.is_int32() || (width == k32Bit && imm.is_uint32())) {
1093 if (width == k32Bit) {
1094 addl(reg, imm);
1095 } else {
1096 addq(reg, imm);
1097 }
1098 } else {
1099 ASSERT(reg != TMP);
1100 ASSERT(width != k32Bit);
1101 LoadImmediate(TMP, imm);
1102 addq(reg, TMP);
1103 }
1104 }
1105 } else {
1106 SubImmediate(reg, Immediate(-value), width);
1107 }
1108}
1109
1110void Assembler::AddImmediate(const Address& address, const Immediate& imm) {
1111 const int64_t value = imm.value();
1112 if (value == 0) {
1113 return;
1114 }
1115 if ((value > 0) || (value == kMinInt64)) {
1116 if (value == 1) {
1117 incq(address);
1118 } else {
1119 if (imm.is_int32()) {
1120 addq(address, imm);
1121 } else {
1122 LoadImmediate(TMP, imm);
1123 addq(address, TMP);
1124 }
1125 }
1126 } else {
1127 SubImmediate(address, Immediate(-value));
1128 }
1129}
1130
1131void Assembler::SubImmediate(Register reg,
1132 const Immediate& imm,
1133 OperandWidth width) {
1134 const int64_t value = imm.value();
1135 if (value == 0) {
1136 return;
1137 }
1138 if ((value > 0) || (value == kMinInt64) ||
1139 (value == kMinInt32 && width == k32Bit)) {
1140 if (value == 1) {
1141 if (width == k32Bit) {
1142 decl(reg);
1143 } else {
1144 decq(reg);
1145 }
1146 } else {
1147 if (imm.is_int32()) {
1148 if (width == k32Bit) {
1149 subl(reg, imm);
1150 } else {
1151 subq(reg, imm);
1152 }
1153 } else {
1154 ASSERT(reg != TMP);
1155 ASSERT(width != k32Bit);
1156 LoadImmediate(TMP, imm);
1157 subq(reg, TMP);
1158 }
1159 }
1160 } else {
1161 AddImmediate(reg, Immediate(-value), width);
1162 }
1163}
1164
1165void Assembler::SubImmediate(const Address& address, const Immediate& imm) {
1166 const int64_t value = imm.value();
1167 if (value == 0) {
1168 return;
1169 }
1170 if ((value > 0) || (value == kMinInt64)) {
1171 if (value == 1) {
1172 decq(address);
1173 } else {
1174 if (imm.is_int32()) {
1175 subq(address, imm);
1176 } else {
1177 LoadImmediate(TMP, imm);
1178 subq(address, TMP);
1179 }
1180 }
1181 } else {
1182 AddImmediate(address, Immediate(-value));
1183 }
1184}
1185
1186void Assembler::Drop(intptr_t stack_elements, Register tmp) {
1187 ASSERT(stack_elements >= 0);
1188 if (stack_elements <= 4) {
1189 for (intptr_t i = 0; i < stack_elements; i++) {
1190 popq(tmp);
1191 }
1192 return;
1193 }
1194 addq(RSP, Immediate(stack_elements * target::kWordSize));
1195}
1196
1197bool Assembler::CanLoadFromObjectPool(const Object& object) const {
1198 ASSERT(IsOriginalObject(object));
1199 if (!constant_pool_allowed()) {
1200 return false;
1201 }
1202
1203 if (target::IsSmi(object)) {
1204 // If the raw smi does not fit into a 32-bit signed int, then we'll keep
1205 // the raw value in the object pool.
1206 return !Utils::IsInt(32, target::ToRawSmi(object));
1207 }
1208 ASSERT(IsNotTemporaryScopedHandle(object));
1209 ASSERT(IsInOldSpace(object));
1210 return true;
1211}
1212
1213void Assembler::LoadWordFromPoolOffset(Register dst, int32_t offset) {
1214 ASSERT(constant_pool_allowed());
1215 ASSERT(dst != PP);
1216 // This sequence must be decodable by code_patcher_x64.cc.
1217 movq(dst, Address(PP, offset));
1218}
1219
1220void Assembler::LoadIsolate(Register dst) {
1221 movq(dst, Address(THR, target::Thread::isolate_offset()));
1222}
1223
1224void Assembler::LoadDispatchTable(Register dst) {
1225 movq(dst, Address(THR, target::Thread::dispatch_table_array_offset()));
1226}
1227
1228void Assembler::LoadObjectHelper(Register dst,
1229 const Object& object,
1230 bool is_unique) {
1231 ASSERT(IsOriginalObject(object));
1232
1233 // `is_unique == true` effectively means object has to be patchable.
1234 if (!is_unique) {
1235 intptr_t offset;
1236 if (target::CanLoadFromThread(object, &offset)) {
1237 movq(dst, Address(THR, offset));
1238 return;
1239 }
1240 }
1241 if (CanLoadFromObjectPool(object)) {
1242 const int32_t offset = target::ObjectPool::element_offset(
1243 is_unique ? object_pool_builder().AddObject(object)
1244 : object_pool_builder().FindObject(object));
1245 LoadWordFromPoolOffset(dst, offset - kHeapObjectTag);
1246 return;
1247 }
1248 ASSERT(target::IsSmi(object));
1249 LoadImmediate(dst, Immediate(target::ToRawSmi(object)));
1250}
1251
1252void Assembler::LoadObject(Register dst, const Object& object) {
1253 LoadObjectHelper(dst, object, false);
1254}
1255
1256void Assembler::LoadUniqueObject(Register dst, const Object& object) {
1257 LoadObjectHelper(dst, object, true);
1258}
1259
1260void Assembler::StoreObject(const Address& dst, const Object& object) {
1261 ASSERT(IsOriginalObject(object));
1262
1263 intptr_t offset_from_thread;
1264 if (target::CanLoadFromThread(object, &offset_from_thread)) {
1265 movq(TMP, Address(THR, offset_from_thread));
1266 movq(dst, TMP);
1267 } else if (CanLoadFromObjectPool(object)) {
1268 LoadObject(TMP, object);
1269 movq(dst, TMP);
1270 } else {
1271 ASSERT(target::IsSmi(object));
1272 MoveImmediate(dst, Immediate(target::ToRawSmi(object)));
1273 }
1274}
1275
1276void Assembler::PushObject(const Object& object) {
1277 ASSERT(IsOriginalObject(object));
1278
1279 intptr_t offset_from_thread;
1280 if (target::CanLoadFromThread(object, &offset_from_thread)) {
1281 pushq(Address(THR, offset_from_thread));
1282 } else if (CanLoadFromObjectPool(object)) {
1283 LoadObject(TMP, object);
1284 pushq(TMP);
1285 } else {
1286 ASSERT(target::IsSmi(object));
1287 PushImmediate(Immediate(target::ToRawSmi(object)));
1288 }
1289}
1290
1291void Assembler::CompareObject(Register reg, const Object& object) {
1292 ASSERT(IsOriginalObject(object));
1293
1294 intptr_t offset_from_thread;
1295 if (target::CanLoadFromThread(object, &offset_from_thread)) {
1296 cmpq(reg, Address(THR, offset_from_thread));
1297 } else if (CanLoadFromObjectPool(object)) {
1298 const intptr_t idx = object_pool_builder().FindObject(
1299 object, ObjectPoolBuilderEntry::kNotPatchable);
1300 const int32_t offset = target::ObjectPool::element_offset(idx);
1301 cmpq(reg, Address(PP, offset - kHeapObjectTag));
1302 } else {
1303 ASSERT(target::IsSmi(object));
1304 CompareImmediate(reg, Immediate(target::ToRawSmi(object)));
1305 }
1306}
1307
1308intptr_t Assembler::FindImmediate(int64_t imm) {
1309 return object_pool_builder().FindImmediate(imm);
1310}
1311
1312void Assembler::LoadImmediate(Register reg, const Immediate& imm) {
1313 if (imm.value() == 0) {
1314 xorl(reg, reg);
1315 } else if (imm.is_int32() || !constant_pool_allowed()) {
1316 movq(reg, imm);
1317 } else {
1318 int32_t offset =
1319 target::ObjectPool::element_offset(FindImmediate(imm.value()));
1320 LoadWordFromPoolOffset(reg, offset - kHeapObjectTag);
1321 }
1322}
1323
1324void Assembler::MoveImmediate(const Address& dst, const Immediate& imm) {
1325 if (imm.is_int32()) {
1326 movq(dst, imm);
1327 } else {
1328 LoadImmediate(TMP, imm);
1329 movq(dst, TMP);
1330 }
1331}
1332
1333// Destroys the value register.
1334void Assembler::StoreIntoObjectFilter(Register object,
1335 Register value,
1336 Label* label,
1337 CanBeSmi can_be_smi,
1338 BarrierFilterMode how_to_jump) {
1339 COMPILE_ASSERT((target::ObjectAlignment::kNewObjectAlignmentOffset ==
1340 target::kWordSize) &&
1341 (target::ObjectAlignment::kOldObjectAlignmentOffset == 0));
1342
1343 if (can_be_smi == kValueIsNotSmi) {
1344#if defined(DEBUG)
1345 Label okay;
1346 BranchIfNotSmi(value, &okay);
1347 Stop("Unexpected Smi!");
1348 Bind(&okay);
1349#endif
1350 // Write-barrier triggers if the value is in the new space (has bit set) and
1351 // the object is in the old space (has bit cleared).
1352 // To check that we could compute value & ~object and skip the write barrier
1353 // if the bit is not set. However we can't destroy the object.
1354 // However to preserve the object we compute negated expression
1355 // ~value | object instead and skip the write barrier if the bit is set.
1356 notl(value);
1357 orl(value, object);
1358 testl(value, Immediate(target::ObjectAlignment::kNewObjectAlignmentOffset));
1359 } else {
1360 ASSERT(kHeapObjectTag == 1);
1361 // Detect value being ...1001 and object being ...0001.
1362 andl(value, Immediate(0xf));
1363 leal(value, Address(value, object, TIMES_2, 0x15));
1364 testl(value, Immediate(0x1f));
1365 }
1366 Condition condition = how_to_jump == kJumpToNoUpdate ? NOT_ZERO : ZERO;
1367 bool distance = how_to_jump == kJumpToNoUpdate ? kNearJump : kFarJump;
1368 j(condition, label, distance);
1369}
1370
1371void Assembler::StoreIntoObject(Register object,
1372 const Address& dest,
1373 Register value,
1374 CanBeSmi can_be_smi) {
1375 // x.slot = x. Barrier should have be removed at the IL level.
1376 ASSERT(object != value);
1377 ASSERT(object != TMP);
1378 ASSERT(value != TMP);
1379
1380 movq(dest, value);
1381
1382 // In parallel, test whether
1383 // - object is old and not remembered and value is new, or
1384 // - object is old and value is old and not marked and concurrent marking is
1385 // in progress
1386 // If so, call the WriteBarrier stub, which will either add object to the
1387 // store buffer (case 1) or add value to the marking stack (case 2).
1388 // Compare ObjectLayout::StorePointer.
1389 Label done;
1390 if (can_be_smi == kValueCanBeSmi) {
1391 testq(value, Immediate(kSmiTagMask));
1392 j(ZERO, &done, kNearJump);
1393 }
1394 movb(TMP, FieldAddress(object, target::Object::tags_offset()));
1395 shrl(TMP, Immediate(target::ObjectLayout::kBarrierOverlapShift));
1396 andl(TMP, Address(THR, target::Thread::write_barrier_mask_offset()));
1397 testb(FieldAddress(value, target::Object::tags_offset()), TMP);
1398 j(ZERO, &done, kNearJump);
1399
1400 Register object_for_call = object;
1401 if (value != kWriteBarrierValueReg) {
1402 // Unlikely. Only non-graph intrinsics.
1403 // TODO(rmacnak): Shuffle registers in intrinsics.
1404 pushq(kWriteBarrierValueReg);
1405 if (object == kWriteBarrierValueReg) {
1406 COMPILE_ASSERT(RBX != kWriteBarrierValueReg);
1407 COMPILE_ASSERT(RCX != kWriteBarrierValueReg);
1408 object_for_call = (value == RBX) ? RCX : RBX;
1409 pushq(object_for_call);
1410 movq(object_for_call, object);
1411 }
1412 movq(kWriteBarrierValueReg, value);
1413 }
1414 generate_invoke_write_barrier_wrapper_(object_for_call);
1415 if (value != kWriteBarrierValueReg) {
1416 if (object == kWriteBarrierValueReg) {
1417 popq(object_for_call);
1418 }
1419 popq(kWriteBarrierValueReg);
1420 }
1421 Bind(&done);
1422}
1423
1424void Assembler::StoreIntoArray(Register object,
1425 Register slot,
1426 Register value,
1427 CanBeSmi can_be_smi) {
1428 ASSERT(object != TMP);
1429 ASSERT(value != TMP);
1430 ASSERT(slot != TMP);
1431
1432 movq(Address(slot, 0), value);
1433
1434 // In parallel, test whether
1435 // - object is old and not remembered and value is new, or
1436 // - object is old and value is old and not marked and concurrent marking is
1437 // in progress
1438 // If so, call the WriteBarrier stub, which will either add object to the
1439 // store buffer (case 1) or add value to the marking stack (case 2).
1440 // Compare ObjectLayout::StorePointer.
1441 Label done;
1442 if (can_be_smi == kValueCanBeSmi) {
1443 testq(value, Immediate(kSmiTagMask));
1444 j(ZERO, &done, kNearJump);
1445 }
1446 movb(TMP, FieldAddress(object, target::Object::tags_offset()));
1447 shrl(TMP, Immediate(target::ObjectLayout::kBarrierOverlapShift));
1448 andl(TMP, Address(THR, target::Thread::write_barrier_mask_offset()));
1449 testb(FieldAddress(value, target::Object::tags_offset()), TMP);
1450 j(ZERO, &done, kNearJump);
1451
1452 if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
1453 (slot != kWriteBarrierSlotReg)) {
1454 // Spill and shuffle unimplemented. Currently StoreIntoArray is only used
1455 // from StoreIndexInstr, which gets these exact registers from the register
1456 // allocator.
1457 UNIMPLEMENTED();
1458 }
1459
1460 generate_invoke_array_write_barrier_();
1461
1462 Bind(&done);
1463}
1464
1465void Assembler::StoreIntoObjectNoBarrier(Register object,
1466 const Address& dest,
1467 Register value) {
1468 movq(dest, value);
1469#if defined(DEBUG)
1470 Label done;
1471 pushq(value);
1472 StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate);
1473
1474 testb(FieldAddress(object, target::Object::tags_offset()),
1475 Immediate(1 << target::ObjectLayout::kOldAndNotRememberedBit));
1476 j(ZERO, &done, Assembler::kNearJump);
1477
1478 Stop("Store buffer update is required");
1479 Bind(&done);
1480 popq(value);
1481#endif // defined(DEBUG)
1482 // No store buffer update.
1483}
1484
1485void Assembler::StoreIntoObjectNoBarrier(Register object,
1486 const Address& dest,
1487 const Object& value) {
1488 StoreObject(dest, value);
1489}
1490
1491void Assembler::StoreInternalPointer(Register object,
1492 const Address& dest,
1493 Register value) {
1494 movq(dest, value);
1495}
1496
1497void Assembler::StoreIntoSmiField(const Address& dest, Register value) {
1498#if defined(DEBUG)
1499 Label done;
1500 testq(value, Immediate(kHeapObjectTag));
1501 j(ZERO, &done);
1502 Stop("New value must be Smi.");
1503 Bind(&done);
1504#endif // defined(DEBUG)
1505 movq(dest, value);
1506}
1507
1508void Assembler::ZeroInitSmiField(const Address& dest) {
1509 Immediate zero(target::ToRawSmi(0));
1510 movq(dest, zero);
1511}
1512
1513void Assembler::IncrementSmiField(const Address& dest, int64_t increment) {
1514 // Note: FlowGraphCompiler::EdgeCounterIncrementSizeInBytes depends on
1515 // the length of this instruction sequence.
1516 Immediate inc_imm(target::ToRawSmi(increment));
1517 addq(dest, inc_imm);
1518}
1519
1520void Assembler::Bind(Label* label) {
1521 intptr_t bound = buffer_.Size();
1522 ASSERT(!label->IsBound()); // Labels can only be bound once.
1523 while (label->IsLinked()) {
1524 intptr_t position = label->LinkPosition();
1525 intptr_t next = buffer_.Load<int32_t>(position);
1526 buffer_.Store<int32_t>(position, bound - (position + 4));
1527 label->position_ = next;
1528 }
1529 while (label->HasNear()) {
1530 intptr_t position = label->NearPosition();
1531 intptr_t offset = bound - (position + 1);
1532 ASSERT(Utils::IsInt(8, offset));
1533 buffer_.Store<int8_t>(position, offset);
1534 }
1535 label->BindTo(bound);
1536}
1537
1538void Assembler::EnterFrame(intptr_t frame_size) {
1539 if (prologue_offset_ == -1) {
1540 prologue_offset_ = CodeSize();
1541 Comment("PrologueOffset = %" Pd "", CodeSize());
1542 }
1543#ifdef DEBUG
1544 intptr_t check_offset = CodeSize();
1545#endif
1546 pushq(RBP);
1547 movq(RBP, RSP);
1548#ifdef DEBUG
1549 ProloguePattern pp(CodeAddress(check_offset));
1550 ASSERT(pp.IsValid());
1551#endif
1552 if (frame_size != 0) {
1553 Immediate frame_space(frame_size);
1554 subq(RSP, frame_space);
1555 }
1556}
1557
1558void Assembler::LeaveFrame() {
1559 movq(RSP, RBP);
1560 popq(RBP);
1561}
1562
1563void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
1564 // Reserve space for arguments and align frame before entering
1565 // the C++ world.
1566 if (frame_space != 0) {
1567 subq(RSP, Immediate(frame_space));
1568 }
1569 if (OS::ActivationFrameAlignment() > 1) {
1570 andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
1571 }
1572}
1573
1574void Assembler::EmitEntryFrameVerification() {
1575#if defined(DEBUG)
1576 Label ok;
1577 leaq(RAX, Address(RBP, target::frame_layout.exit_link_slot_from_entry_fp *
1578 target::kWordSize));
1579 cmpq(RAX, RSP);
1580 j(EQUAL, &ok);
1581 Stop("target::frame_layout.exit_link_slot_from_entry_fp mismatch");
1582 Bind(&ok);
1583#endif
1584}
1585
1586void Assembler::PushRegisters(intptr_t cpu_register_set,
1587 intptr_t xmm_register_set) {
1588 const intptr_t xmm_regs_count = RegisterSet::RegisterCount(xmm_register_set);
1589 if (xmm_regs_count > 0) {
1590 AddImmediate(RSP, Immediate(-xmm_regs_count * kFpuRegisterSize));
1591 // Store XMM registers with the lowest register number at the lowest
1592 // address.
1593 intptr_t offset = 0;
1594 for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) {
1595 XmmRegister xmm_reg = static_cast<XmmRegister>(i);
1596 if (RegisterSet::Contains(xmm_register_set, xmm_reg)) {
1597 movups(Address(RSP, offset), xmm_reg);
1598 offset += kFpuRegisterSize;
1599 }
1600 }
1601 ASSERT(offset == (xmm_regs_count * kFpuRegisterSize));
1602 }
1603
1604 // The order in which the registers are pushed must match the order
1605 // in which the registers are encoded in the safe point's stack map.
1606 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
1607 Register reg = static_cast<Register>(i);
1608 if (RegisterSet::Contains(cpu_register_set, reg)) {
1609 pushq(reg);
1610 }
1611 }
1612}
1613
1614void Assembler::PopRegisters(intptr_t cpu_register_set,
1615 intptr_t xmm_register_set) {
1616 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
1617 Register reg = static_cast<Register>(i);
1618 if (RegisterSet::Contains(cpu_register_set, reg)) {
1619 popq(reg);
1620 }
1621 }
1622
1623 const intptr_t xmm_regs_count = RegisterSet::RegisterCount(xmm_register_set);
1624 if (xmm_regs_count > 0) {
1625 // XMM registers have the lowest register number at the lowest address.
1626 intptr_t offset = 0;
1627 for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) {
1628 XmmRegister xmm_reg = static_cast<XmmRegister>(i);
1629 if (RegisterSet::Contains(xmm_register_set, xmm_reg)) {
1630 movups(xmm_reg, Address(RSP, offset));
1631 offset += kFpuRegisterSize;
1632 }
1633 }
1634 ASSERT(offset == (xmm_regs_count * kFpuRegisterSize));
1635 AddImmediate(RSP, Immediate(offset));
1636 }
1637}
1638
1639void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) {
1640 Comment("EnterCallRuntimeFrame");
1641 EnterFrame(0);
1642 if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
1643 pushq(CODE_REG);
1644 pushq(PP);
1645 }
1646
1647 // TODO(vegorov): avoid saving FpuTMP, it is used only as scratch.
1648 PushRegisters(CallingConventions::kVolatileCpuRegisters,
1649 CallingConventions::kVolatileXmmRegisters);
1650
1651 ReserveAlignedFrameSpace(frame_space);
1652}
1653
1654void Assembler::LeaveCallRuntimeFrame() {
1655 // RSP might have been modified to reserve space for arguments
1656 // and ensure proper alignment of the stack frame.
1657 // We need to restore it before restoring registers.
1658 const intptr_t kPushedCpuRegistersCount =
1659 RegisterSet::RegisterCount(CallingConventions::kVolatileCpuRegisters);
1660 const intptr_t kPushedXmmRegistersCount =
1661 RegisterSet::RegisterCount(CallingConventions::kVolatileXmmRegisters);
1662 const intptr_t kPushedRegistersSize =
1663 kPushedCpuRegistersCount * target::kWordSize +
1664 kPushedXmmRegistersCount * kFpuRegisterSize +
1665 (target::frame_layout.dart_fixed_frame_size - 2) *
1666 target::kWordSize; // From EnterStubFrame (excluding PC / FP)
1667
1668 leaq(RSP, Address(RBP, -kPushedRegistersSize));
1669
1670 // TODO(vegorov): avoid saving FpuTMP, it is used only as scratch.
1671 PopRegisters(CallingConventions::kVolatileCpuRegisters,
1672 CallingConventions::kVolatileXmmRegisters);
1673
1674 LeaveStubFrame();
1675}
1676
1677void Assembler::CallCFunction(Register reg) {
1678 // Reserve shadow space for outgoing arguments.
1679 if (CallingConventions::kShadowSpaceBytes != 0) {
1680 subq(RSP, Immediate(CallingConventions::kShadowSpaceBytes));
1681 }
1682 call(reg);
1683}
1684void Assembler::CallCFunction(Address address) {
1685 // Reserve shadow space for outgoing arguments.
1686 if (CallingConventions::kShadowSpaceBytes != 0) {
1687 subq(RSP, Immediate(CallingConventions::kShadowSpaceBytes));
1688 }
1689 call(address);
1690}
1691
1692void Assembler::CallRuntime(const RuntimeEntry& entry,
1693 intptr_t argument_count) {
1694 entry.Call(this, argument_count);
1695}
1696
1697void Assembler::RestoreCodePointer() {
1698 movq(CODE_REG,
1699 Address(RBP, target::frame_layout.code_from_fp * target::kWordSize));
1700}
1701
1702void Assembler::LoadPoolPointer(Register pp) {
1703 // Load new pool pointer.
1704 CheckCodePointer();
1705 movq(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
1706 set_constant_pool_allowed(pp == PP);
1707}
1708
1709void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
1710 ASSERT(!constant_pool_allowed());
1711 EnterFrame(0);
1712 if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
1713 pushq(CODE_REG);
1714 pushq(PP);
1715 if (new_pp == kNoRegister) {
1716 LoadPoolPointer(PP);
1717 } else {
1718 movq(PP, new_pp);
1719 }
1720 }
1721 set_constant_pool_allowed(true);
1722 if (frame_size != 0) {
1723 subq(RSP, Immediate(frame_size));
1724 }
1725}
1726
1727void Assembler::LeaveDartFrame(RestorePP restore_pp) {
1728 // Restore caller's PP register that was pushed in EnterDartFrame.
1729 if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
1730 if (restore_pp == kRestoreCallerPP) {
1731 movq(PP, Address(RBP, (target::frame_layout.saved_caller_pp_from_fp *
1732 target::kWordSize)));
1733 }
1734 }
1735 set_constant_pool_allowed(false);
1736 LeaveFrame();
1737}
1738
1739void Assembler::CheckCodePointer() {
1740#ifdef DEBUG
1741 if (!FLAG_check_code_pointer) {
1742 return;
1743 }
1744 Comment("CheckCodePointer");
1745 Label cid_ok, instructions_ok;
1746 pushq(RAX);
1747 LoadClassId(RAX, CODE_REG);
1748 cmpq(RAX, Immediate(kCodeCid));
1749 j(EQUAL, &cid_ok);
1750 int3();
1751 Bind(&cid_ok);
1752 {
1753 const intptr_t kRIPRelativeLeaqSize = 7;
1754 const intptr_t header_to_entry_offset =
1755 (target::Instructions::HeaderSize() - kHeapObjectTag);
1756 const intptr_t header_to_rip_offset =
1757 CodeSize() + kRIPRelativeLeaqSize + header_to_entry_offset;
1758 leaq(RAX, Address::AddressRIPRelative(-header_to_rip_offset));
1759 ASSERT(CodeSize() == (header_to_rip_offset - header_to_entry_offset));
1760 }
1761 cmpq(RAX, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
1762 j(EQUAL, &instructions_ok);
1763 int3();
1764 Bind(&instructions_ok);
1765 popq(RAX);
1766#endif
1767}
1768
1769// On entry to a function compiled for OSR, the caller's frame pointer, the
1770// stack locals, and any copied parameters are already in place. The frame
1771// pointer is already set up. The PC marker is not correct for the
1772// optimized function and there may be extra space for spill slots to
1773// allocate.
1774void Assembler::EnterOsrFrame(intptr_t extra_size) {
1775 ASSERT(!constant_pool_allowed());
1776 if (prologue_offset_ == -1) {
1777 Comment("PrologueOffset = %" Pd "", CodeSize());
1778 prologue_offset_ = CodeSize();
1779 }
1780 RestoreCodePointer();
1781 LoadPoolPointer();
1782
1783 if (extra_size != 0) {
1784 subq(RSP, Immediate(extra_size));
1785 }
1786}
1787
1788void Assembler::EnterStubFrame() {
1789 EnterDartFrame(0, kNoRegister);
1790}
1791
1792void Assembler::LeaveStubFrame() {
1793 LeaveDartFrame();
1794}
1795
1796void Assembler::EnterCFrame(intptr_t frame_space) {
1797 EnterFrame(0);
1798 ReserveAlignedFrameSpace(frame_space);
1799}
1800
1801void Assembler::LeaveCFrame() {
1802 LeaveFrame();
1803}
1804
1805// RDX receiver, RBX ICData entries array
1806// Preserve R10 (ARGS_DESC_REG), not required today, but maybe later.
1807void Assembler::MonomorphicCheckedEntryJIT() {
1808 has_monomorphic_entry_ = true;
1809 intptr_t start = CodeSize();
1810 Label have_cid, miss;
1811 Bind(&miss);
1812 jmp(Address(THR, target::Thread::switchable_call_miss_entry_offset()));
1813
1814 // Ensure the monomorphic entry is 2-byte aligned (so GC can see them if we
1815 // store them in ICData / MegamorphicCache arrays)
1816 nop(1);
1817
1818 Comment("MonomorphicCheckedEntry");
1819 ASSERT_EQUAL(CodeSize() - start,
1820 target::Instructions::kMonomorphicEntryOffsetJIT);
1821 ASSERT((CodeSize() & kSmiTagMask) == kSmiTag);
1822
1823 const intptr_t cid_offset = target::Array::element_offset(0);
1824 const intptr_t count_offset = target::Array::element_offset(1);
1825
1826 LoadTaggedClassIdMayBeSmi(RAX, RDX);
1827
1828 cmpq(RAX, FieldAddress(RBX, cid_offset));
1829 j(NOT_EQUAL, &miss, Assembler::kNearJump);
1830 addl(FieldAddress(RBX, count_offset), Immediate(target::ToRawSmi(1)));
1831 xorq(R10, R10); // GC-safe for OptimizeInvokedFunction.
1832 nop(1);
1833
1834 // Fall through to unchecked entry.
1835 ASSERT_EQUAL(CodeSize() - start,
1836 target::Instructions::kPolymorphicEntryOffsetJIT);
1837 ASSERT(((CodeSize() - start) & kSmiTagMask) == kSmiTag);
1838}
1839
1840// RBX - input: class id smi
1841// RDX - input: receiver object
1842void Assembler::MonomorphicCheckedEntryAOT() {
1843 has_monomorphic_entry_ = true;
1844 intptr_t start = CodeSize();
1845 Label have_cid, miss;
1846 Bind(&miss);
1847 jmp(Address(THR, target::Thread::switchable_call_miss_entry_offset()));
1848
1849 // Ensure the monomorphic entry is 2-byte aligned (so GC can see them if we
1850 // store them in ICData / MegamorphicCache arrays)
1851 nop(1);
1852
1853 Comment("MonomorphicCheckedEntry");
1854 ASSERT_EQUAL(CodeSize() - start,
1855 target::Instructions::kMonomorphicEntryOffsetAOT);
1856 ASSERT((CodeSize() & kSmiTagMask) == kSmiTag);
1857
1858 SmiUntag(RBX);
1859 LoadClassId(RAX, RDX);
1860 cmpq(RAX, RBX);
1861 j(NOT_EQUAL, &miss, Assembler::kNearJump);
1862
1863 // Ensure the unchecked entry is 2-byte aligned (so GC can see them if we
1864 // store them in ICData / MegamorphicCache arrays).
1865 nop(1);
1866
1867 // Fall through to unchecked entry.
1868 ASSERT_EQUAL(CodeSize() - start,
1869 target::Instructions::kPolymorphicEntryOffsetAOT);
1870 ASSERT(((CodeSize() - start) & kSmiTagMask) == kSmiTag);
1871}
1872
1873void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
1874 has_monomorphic_entry_ = true;
1875 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
1876 int3();
1877 }
1878 jmp(label);
1879 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
1880 int3();
1881 }
1882}
1883
1884#ifndef PRODUCT
1885void Assembler::MaybeTraceAllocation(intptr_t cid,
1886 Label* trace,
1887 bool near_jump) {
1888 ASSERT(cid > 0);
1889 const intptr_t shared_table_offset =
1890 target::Isolate::shared_class_table_offset();
1891 const intptr_t table_offset =
1892 target::SharedClassTable::class_heap_stats_table_offset();
1893 const intptr_t class_offset = target::ClassTable::ClassOffsetFor(cid);
1894
1895 Register temp_reg = TMP;
1896 LoadIsolate(temp_reg);
1897 movq(temp_reg, Address(temp_reg, shared_table_offset));
1898 movq(temp_reg, Address(temp_reg, table_offset));
1899 cmpb(Address(temp_reg, class_offset), Immediate(0));
1900 // We are tracing for this class, jump to the trace label which will use
1901 // the allocation stub.
1902 j(NOT_ZERO, trace, near_jump);
1903}
1904#endif // !PRODUCT
1905
1906void Assembler::TryAllocate(const Class& cls,
1907 Label* failure,
1908 bool near_jump,
1909 Register instance_reg,
1910 Register temp) {
1911 ASSERT(failure != NULL);
1912 const intptr_t instance_size = target::Class::GetInstanceSize(cls);
1913 if (FLAG_inline_alloc &&
1914 target::Heap::IsAllocatableInNewSpace(instance_size)) {
1915 const classid_t cid = target::Class::GetId(cls);
1916 // If this allocation is traced, program will jump to failure path
1917 // (i.e. the allocation stub) which will allocate the object and trace the
1918 // allocation call site.
1919 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, near_jump));
1920 movq(instance_reg, Address(THR, target::Thread::top_offset()));
1921 addq(instance_reg, Immediate(instance_size));
1922 // instance_reg: potential next object start.
1923 cmpq(instance_reg, Address(THR, target::Thread::end_offset()));
1924 j(ABOVE_EQUAL, failure, near_jump);
1925 // Successfully allocated the object, now update top to point to
1926 // next object start and store the class in the class field of object.
1927 movq(Address(THR, target::Thread::top_offset()), instance_reg);
1928 ASSERT(instance_size >= kHeapObjectTag);
1929 AddImmediate(instance_reg, Immediate(kHeapObjectTag - instance_size));
1930 const uint32_t tags =
1931 target::MakeTagWordForNewSpaceObject(cid, instance_size);
1932 // Extends the 32 bit tags with zeros, which is the uninitialized
1933 // hash code.
1934 MoveImmediate(FieldAddress(instance_reg, target::Object::tags_offset()),
1935 Immediate(tags));
1936 } else {
1937 jmp(failure);
1938 }
1939}
1940
1941void Assembler::TryAllocateArray(intptr_t cid,
1942 intptr_t instance_size,
1943 Label* failure,
1944 bool near_jump,
1945 Register instance,
1946 Register end_address,
1947 Register temp) {
1948 ASSERT(failure != NULL);
1949 if (FLAG_inline_alloc &&
1950 target::Heap::IsAllocatableInNewSpace(instance_size)) {
1951 // If this allocation is traced, program will jump to failure path
1952 // (i.e. the allocation stub) which will allocate the object and trace the
1953 // allocation call site.
1954 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, near_jump));
1955 movq(instance, Address(THR, target::Thread::top_offset()));
1956 movq(end_address, instance);
1957
1958 addq(end_address, Immediate(instance_size));
1959 j(CARRY, failure);
1960
1961 // Check if the allocation fits into the remaining space.
1962 // instance: potential new object start.
1963 // end_address: potential next object start.
1964 cmpq(end_address, Address(THR, target::Thread::end_offset()));
1965 j(ABOVE_EQUAL, failure);
1966
1967 // Successfully allocated the object(s), now update top to point to
1968 // next object start and initialize the object.
1969 movq(Address(THR, target::Thread::top_offset()), end_address);
1970 addq(instance, Immediate(kHeapObjectTag));
1971
1972 // Initialize the tags.
1973 // instance: new object start as a tagged pointer.
1974 const uint32_t tags =
1975 target::MakeTagWordForNewSpaceObject(cid, instance_size);
1976 // Extends the 32 bit tags with zeros, which is the uninitialized
1977 // hash code.
1978 movq(FieldAddress(instance, target::Object::tags_offset()),
1979 Immediate(tags));
1980 } else {
1981 jmp(failure);
1982 }
1983}
1984
1985void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
1986 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1987 buffer_.Emit<uint8_t>(0xe8);
1988 buffer_.Emit<int32_t>(0);
1989
1990 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
1991 PcRelativeCallPattern::kLengthInBytes);
1992 pattern.set_distance(offset_into_target);
1993}
1994
1995void Assembler::GenerateUnRelocatedPcRelativeTailCall(
1996 intptr_t offset_into_target) {
1997 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1998 buffer_.Emit<uint8_t>(0xe9);
1999 buffer_.Emit<int32_t>(0);
2000
2001 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
2002 PcRelativeCallPattern::kLengthInBytes);
2003 pattern.set_distance(offset_into_target);
2004}
2005
2006void Assembler::Align(int alignment, intptr_t offset) {
2007 ASSERT(Utils::IsPowerOfTwo(alignment));
2008 intptr_t pos = offset + buffer_.GetPosition();
2009 int mod = pos & (alignment - 1);
2010 if (mod == 0) {
2011 return;
2012 }
2013 intptr_t bytes_needed = alignment - mod;
2014 while (bytes_needed > MAX_NOP_SIZE) {
2015 nop(MAX_NOP_SIZE);
2016 bytes_needed -= MAX_NOP_SIZE;
2017 }
2018 if (bytes_needed != 0) {
2019 nop(bytes_needed);
2020 }
2021 ASSERT(((offset + buffer_.GetPosition()) & (alignment - 1)) == 0);
2022}
2023
2024void Assembler::EmitOperand(int rm, const Operand& operand) {
2025 ASSERT(rm >= 0 && rm < 8);
2026 const intptr_t length = operand.length_;
2027 ASSERT(length > 0);
2028 // Emit the ModRM byte updated with the given RM value.
2029 ASSERT((operand.encoding_[0] & 0x38) == 0);
2030 EmitUint8(operand.encoding_[0] + (rm << 3));
2031 // Emit the rest of the encoded operand.
2032 for (intptr_t i = 1; i < length; i++) {
2033 EmitUint8(operand.encoding_[i]);
2034 }
2035}
2036
2037void Assembler::EmitRegisterOperand(int rm, int reg) {
2038 Operand operand;
2039 operand.SetModRM(3, static_cast<Register>(reg));
2040 EmitOperand(rm, operand);
2041}
2042
2043void Assembler::EmitImmediate(const Immediate& imm) {
2044 if (imm.is_int32()) {
2045 EmitInt32(static_cast<int32_t>(imm.value()));
2046 } else {
2047 EmitInt64(imm.value());
2048 }
2049}
2050
2051void Assembler::EmitSignExtendedInt8(int rm,
2052 const Operand& operand,
2053 const Immediate& immediate) {
2054 EmitUint8(0x83);
2055 EmitOperand(rm, operand);
2056 EmitUint8(immediate.value() & 0xFF);
2057}
2058
2059void Assembler::EmitComplex(int rm,
2060 const Operand& operand,
2061 const Immediate& immediate) {
2062 ASSERT(rm >= 0 && rm < 8);
2063 ASSERT(immediate.is_int32());
2064 if (immediate.is_int8()) {
2065 EmitSignExtendedInt8(rm, operand, immediate);
2066 } else if (operand.IsRegister(RAX)) {
2067 // Use short form if the destination is rax.
2068 EmitUint8(0x05 + (rm << 3));
2069 EmitImmediate(immediate);
2070 } else {
2071 EmitUint8(0x81);
2072 EmitOperand(rm, operand);
2073 EmitImmediate(immediate);
2074 }
2075}
2076
2077void Assembler::EmitLabel(Label* label, intptr_t instruction_size) {
2078 if (label->IsBound()) {
2079 intptr_t offset = label->Position() - buffer_.Size();
2080 ASSERT(offset <= 0);
2081 EmitInt32(offset - instruction_size);
2082 } else {
2083 EmitLabelLink(label);
2084 }
2085}
2086
2087void Assembler::EmitLabelLink(Label* label) {
2088 ASSERT(!label->IsBound());
2089 intptr_t position = buffer_.Size();
2090 EmitInt32(label->position_);
2091 label->LinkTo(position);
2092}
2093
2094void Assembler::EmitNearLabelLink(Label* label) {
2095 ASSERT(!label->IsBound());
2096 intptr_t position = buffer_.Size();
2097 EmitUint8(0);
2098 label->NearLinkTo(position);
2099}
2100
2101void Assembler::EmitGenericShift(bool wide,
2102 int rm,
2103 Register reg,
2104 const Immediate& imm) {
2105 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2106 ASSERT(imm.is_int8());
2107 if (wide) {
2108 EmitRegisterREX(reg, REX_W);
2109 } else {
2110 EmitRegisterREX(reg, REX_NONE);
2111 }
2112 if (imm.value() == 1) {
2113 EmitUint8(0xD1);
2114 EmitOperand(rm, Operand(reg));
2115 } else {
2116 EmitUint8(0xC1);
2117 EmitOperand(rm, Operand(reg));
2118 EmitUint8(imm.value() & 0xFF);
2119 }
2120}
2121
2122void Assembler::EmitGenericShift(bool wide,
2123 int rm,
2124 Register operand,
2125 Register shifter) {
2126 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2127 ASSERT(shifter == RCX);
2128 EmitRegisterREX(operand, wide ? REX_W : REX_NONE);
2129 EmitUint8(0xD3);
2130 EmitOperand(rm, Operand(operand));
2131}
2132
2133void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
2134 ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
2135 ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
2136 movl(result, tags);
2137 shrl(result, Immediate(target::ObjectLayout::kClassIdTagPos));
2138}
2139
2140void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
2141 ASSERT(target::ObjectLayout::kSizeTagPos == 8);
2142 ASSERT(target::ObjectLayout::kSizeTagSize == 8);
2143 movzxw(result, tags);
2144 shrl(result, Immediate(target::ObjectLayout::kSizeTagPos -
2145 target::ObjectAlignment::kObjectAlignmentLog2));
2146 AndImmediate(result,
2147 Immediate(Utils::NBitMask(target::ObjectLayout::kSizeTagSize)
2148 << target::ObjectAlignment::kObjectAlignmentLog2));
2149}
2150
2151void Assembler::LoadClassId(Register result, Register object) {
2152 ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
2153 ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
2154 const intptr_t class_id_offset =
2155 target::Object::tags_offset() +
2156 target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
2157 movzxw(result, FieldAddress(object, class_id_offset));
2158}
2159
2160void Assembler::LoadClassById(Register result, Register class_id) {
2161 ASSERT(result != class_id);
2162 const intptr_t table_offset =
2163 target::Isolate::cached_class_table_table_offset();
2164
2165 LoadIsolate(result);
2166 movq(result, Address(result, table_offset));
2167 movq(result, Address(result, class_id, TIMES_8, 0));
2168}
2169
2170void Assembler::CompareClassId(Register object,
2171 intptr_t class_id,
2172 Register scratch) {
2173 ASSERT(scratch == kNoRegister);
2174 LoadClassId(TMP, object);
2175 cmpl(TMP, Immediate(class_id));
2176}
2177
2178void Assembler::SmiUntagOrCheckClass(Register object,
2179 intptr_t class_id,
2180 Label* is_smi) {
2181 ASSERT(kSmiTagShift == 1);
2182 ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
2183 ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
2184 const intptr_t class_id_offset =
2185 target::Object::tags_offset() +
2186 target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
2187
2188 // Untag optimistically. Tag bit is shifted into the CARRY.
2189 SmiUntag(object);
2190 j(NOT_CARRY, is_smi, kNearJump);
2191 // Load cid: can't use LoadClassId, object is untagged. Use TIMES_2 scale
2192 // factor in the addressing mode to compensate for this.
2193 movzxw(TMP, Address(object, TIMES_2, class_id_offset));
2194 cmpl(TMP, Immediate(class_id));
2195}
2196
2197void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
2198 Label smi;
2199
2200 if (result == object) {
2201 Label join;
2202
2203 testq(object, Immediate(kSmiTagMask));
2204 j(EQUAL, &smi, Assembler::kNearJump);
2205 LoadClassId(result, object);
2206 jmp(&join, Assembler::kNearJump);
2207
2208 Bind(&smi);
2209 movq(result, Immediate(kSmiCid));
2210
2211 Bind(&join);
2212 } else {
2213 testq(object, Immediate(kSmiTagMask));
2214 movq(result, Immediate(kSmiCid));
2215 j(EQUAL, &smi, Assembler::kNearJump);
2216 LoadClassId(result, object);
2217
2218 Bind(&smi);
2219 }
2220}
2221
2222void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
2223 Label smi;
2224
2225 if (result == object) {
2226 Label join;
2227
2228 testq(object, Immediate(kSmiTagMask));
2229 j(EQUAL, &smi, Assembler::kNearJump);
2230 LoadClassId(result, object);
2231 SmiTag(result);
2232 jmp(&join, Assembler::kNearJump);
2233
2234 Bind(&smi);
2235 movq(result, Immediate(target::ToRawSmi(kSmiCid)));
2236
2237 Bind(&join);
2238 } else {
2239 testq(object, Immediate(kSmiTagMask));
2240 movq(result, Immediate(target::ToRawSmi(kSmiCid)));
2241 j(EQUAL, &smi, Assembler::kNearJump);
2242 LoadClassId(result, object);
2243 SmiTag(result);
2244
2245 Bind(&smi);
2246 }
2247}
2248
2249Address Assembler::VMTagAddress() {
2250 return Address(THR, target::Thread::vm_tag_offset());
2251}
2252
2253Address Assembler::ElementAddressForIntIndex(bool is_external,
2254 intptr_t cid,
2255 intptr_t index_scale,
2256 Register array,
2257 intptr_t index) {
2258 if (is_external) {
2259 return Address(array, index * index_scale);
2260 } else {
2261 const int64_t disp = static_cast<int64_t>(index) * index_scale +
2262 target::Instance::DataOffsetFor(cid);
2263 ASSERT(Utils::IsInt(32, disp));
2264 return FieldAddress(array, static_cast<int32_t>(disp));
2265 }
2266}
2267
2268static ScaleFactor ToScaleFactor(intptr_t index_scale, bool index_unboxed) {
2269 if (index_unboxed) {
2270 switch (index_scale) {
2271 case 1:
2272 return TIMES_1;
2273 case 2:
2274 return TIMES_2;
2275 case 4:
2276 return TIMES_4;
2277 case 8:
2278 return TIMES_8;
2279 case 16:
2280 return TIMES_16;
2281 default:
2282 UNREACHABLE();
2283 return TIMES_1;
2284 }
2285 } else {
2286 // Note that index is expected smi-tagged, (i.e, times 2) for all arrays
2287 // with index scale factor > 1. E.g., for Uint8Array and OneByteString the
2288 // index is expected to be untagged before accessing.
2289 ASSERT(kSmiTagShift == 1);
2290 switch (index_scale) {
2291 case 1:
2292 return TIMES_1;
2293 case 2:
2294 return TIMES_1;
2295 case 4:
2296 return TIMES_2;
2297 case 8:
2298 return TIMES_4;
2299 case 16:
2300 return TIMES_8;
2301 default:
2302 UNREACHABLE();
2303 return TIMES_1;
2304 }
2305 }
2306}
2307
2308Address Assembler::ElementAddressForRegIndex(bool is_external,
2309 intptr_t cid,
2310 intptr_t index_scale,
2311 bool index_unboxed,
2312 Register array,
2313 Register index) {
2314 if (is_external) {
2315 return Address(array, index, ToScaleFactor(index_scale, index_unboxed), 0);
2316 } else {
2317 return FieldAddress(array, index, ToScaleFactor(index_scale, index_unboxed),
2318 target::Instance::DataOffsetFor(cid));
2319 }
2320}
2321
2322} // namespace compiler
2323} // namespace dart
2324
2325#endif // defined(TARGET_ARCH_X64)
2326