1/*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef CPU_X86_ASSEMBLER_X86_HPP
26#define CPU_X86_ASSEMBLER_X86_HPP
27
28#include "asm/register.hpp"
29#include "vm_version_x86.hpp"
30
31class BiasedLockingCounters;
32
33// Contains all the definitions needed for x86 assembly code generation.
34
35// Calling convention
36class Argument {
37 public:
38 enum {
39#ifdef _LP64
40#ifdef _WIN64
41 n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
42 n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... )
43#else
44 n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
45 n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... )
46#endif // _WIN64
47 n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ...
48 n_float_register_parameters_j = 8 // j_farg0, j_farg1, ...
49#else
50 n_register_parameters = 0 // 0 registers used to pass arguments
51#endif // _LP64
52 };
53};
54
55
56#ifdef _LP64
57// Symbolically name the register arguments used by the c calling convention.
58// Windows is different from linux/solaris. So much for standards...
59
60#ifdef _WIN64
61
62REGISTER_DECLARATION(Register, c_rarg0, rcx);
63REGISTER_DECLARATION(Register, c_rarg1, rdx);
64REGISTER_DECLARATION(Register, c_rarg2, r8);
65REGISTER_DECLARATION(Register, c_rarg3, r9);
66
67REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
68REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
69REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
70REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
71
72#else
73
74REGISTER_DECLARATION(Register, c_rarg0, rdi);
75REGISTER_DECLARATION(Register, c_rarg1, rsi);
76REGISTER_DECLARATION(Register, c_rarg2, rdx);
77REGISTER_DECLARATION(Register, c_rarg3, rcx);
78REGISTER_DECLARATION(Register, c_rarg4, r8);
79REGISTER_DECLARATION(Register, c_rarg5, r9);
80
81REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
82REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
83REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
84REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
85REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4);
86REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5);
87REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6);
88REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7);
89
90#endif // _WIN64
91
92// Symbolically name the register arguments used by the Java calling convention.
93// We have control over the convention for java so we can do what we please.
94// What pleases us is to offset the java calling convention so that when
95// we call a suitable jni method the arguments are lined up and we don't
96// have to do little shuffling. A suitable jni method is non-static and a
97// small number of arguments (two fewer args on windows)
98//
99// |-------------------------------------------------------|
100// | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 |
101// |-------------------------------------------------------|
102// | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg)
103// | rdi rsi rdx rcx r8 r9 | solaris/linux
104// |-------------------------------------------------------|
105// | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 |
106// |-------------------------------------------------------|
107
108REGISTER_DECLARATION(Register, j_rarg0, c_rarg1);
109REGISTER_DECLARATION(Register, j_rarg1, c_rarg2);
110REGISTER_DECLARATION(Register, j_rarg2, c_rarg3);
111// Windows runs out of register args here
112#ifdef _WIN64
113REGISTER_DECLARATION(Register, j_rarg3, rdi);
114REGISTER_DECLARATION(Register, j_rarg4, rsi);
115#else
116REGISTER_DECLARATION(Register, j_rarg3, c_rarg4);
117REGISTER_DECLARATION(Register, j_rarg4, c_rarg5);
118#endif /* _WIN64 */
119REGISTER_DECLARATION(Register, j_rarg5, c_rarg0);
120
121REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0);
122REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1);
123REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2);
124REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3);
125REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4);
126REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5);
127REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6);
128REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7);
129
130REGISTER_DECLARATION(Register, rscratch1, r10); // volatile
131REGISTER_DECLARATION(Register, rscratch2, r11); // volatile
132
133REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved
134REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
135
136#else
137// rscratch1 will apear in 32bit code that is dead but of course must compile
138// Using noreg ensures if the dead code is incorrectly live and executed it
139// will cause an assertion failure
140#define rscratch1 noreg
141#define rscratch2 noreg
142
143#endif // _LP64
144
145// JSR 292
146// On x86, the SP does not have to be saved when invoking method handle intrinsics
147// or compiled lambda forms. We indicate that by setting rbp_mh_SP_save to noreg.
148REGISTER_DECLARATION(Register, rbp_mh_SP_save, noreg);
149
150// Address is an abstraction used to represent a memory location
151// using any of the amd64 addressing modes with one object.
152//
153// Note: A register location is represented via a Register, not
154// via an address for efficiency & simplicity reasons.
155
156class ArrayAddress;
157
158class Address {
159 public:
160 enum ScaleFactor {
161 no_scale = -1,
162 times_1 = 0,
163 times_2 = 1,
164 times_4 = 2,
165 times_8 = 3,
166 times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4)
167 };
168 static ScaleFactor times(int size) {
169 assert(size >= 1 && size <= 8 && is_power_of_2(size), "bad scale size");
170 if (size == 8) return times_8;
171 if (size == 4) return times_4;
172 if (size == 2) return times_2;
173 return times_1;
174 }
175 static int scale_size(ScaleFactor scale) {
176 assert(scale != no_scale, "");
177 assert(((1 << (int)times_1) == 1 &&
178 (1 << (int)times_2) == 2 &&
179 (1 << (int)times_4) == 4 &&
180 (1 << (int)times_8) == 8), "");
181 return (1 << (int)scale);
182 }
183
184 private:
185 Register _base;
186 Register _index;
187 XMMRegister _xmmindex;
188 ScaleFactor _scale;
189 int _disp;
190 bool _isxmmindex;
191 RelocationHolder _rspec;
192
193 // Easily misused constructors make them private
194 // %%% can we make these go away?
195 NOT_LP64(Address(address loc, RelocationHolder spec);)
196 Address(int disp, address loc, relocInfo::relocType rtype);
197 Address(int disp, address loc, RelocationHolder spec);
198
199 public:
200
201 int disp() { return _disp; }
202 // creation
203 Address()
204 : _base(noreg),
205 _index(noreg),
206 _xmmindex(xnoreg),
207 _scale(no_scale),
208 _disp(0),
209 _isxmmindex(false){
210 }
211
212 // No default displacement otherwise Register can be implicitly
213 // converted to 0(Register) which is quite a different animal.
214
215 Address(Register base, int disp)
216 : _base(base),
217 _index(noreg),
218 _xmmindex(xnoreg),
219 _scale(no_scale),
220 _disp(disp),
221 _isxmmindex(false){
222 }
223
224 Address(Register base, Register index, ScaleFactor scale, int disp = 0)
225 : _base (base),
226 _index(index),
227 _xmmindex(xnoreg),
228 _scale(scale),
229 _disp (disp),
230 _isxmmindex(false) {
231 assert(!index->is_valid() == (scale == Address::no_scale),
232 "inconsistent address");
233 }
234
235 Address(Register base, RegisterOrConstant index, ScaleFactor scale = times_1, int disp = 0)
236 : _base (base),
237 _index(index.register_or_noreg()),
238 _xmmindex(xnoreg),
239 _scale(scale),
240 _disp (disp + (index.constant_or_zero() * scale_size(scale))),
241 _isxmmindex(false){
242 if (!index.is_register()) scale = Address::no_scale;
243 assert(!_index->is_valid() == (scale == Address::no_scale),
244 "inconsistent address");
245 }
246
247 Address(Register base, XMMRegister index, ScaleFactor scale, int disp = 0)
248 : _base (base),
249 _index(noreg),
250 _xmmindex(index),
251 _scale(scale),
252 _disp(disp),
253 _isxmmindex(true) {
254 assert(!index->is_valid() == (scale == Address::no_scale),
255 "inconsistent address");
256 }
257
258 Address plus_disp(int disp) const {
259 Address a = (*this);
260 a._disp += disp;
261 return a;
262 }
263 Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const {
264 Address a = (*this);
265 a._disp += disp.constant_or_zero() * scale_size(scale);
266 if (disp.is_register()) {
267 assert(!a.index()->is_valid(), "competing indexes");
268 a._index = disp.as_register();
269 a._scale = scale;
270 }
271 return a;
272 }
273 bool is_same_address(Address a) const {
274 // disregard _rspec
275 return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale;
276 }
277
278 // The following two overloads are used in connection with the
279 // ByteSize type (see sizes.hpp). They simplify the use of
280 // ByteSize'd arguments in assembly code. Note that their equivalent
281 // for the optimized build are the member functions with int disp
282 // argument since ByteSize is mapped to an int type in that case.
283 //
284 // Note: DO NOT introduce similar overloaded functions for WordSize
285 // arguments as in the optimized mode, both ByteSize and WordSize
286 // are mapped to the same type and thus the compiler cannot make a
287 // distinction anymore (=> compiler errors).
288
289#ifdef ASSERT
290 Address(Register base, ByteSize disp)
291 : _base(base),
292 _index(noreg),
293 _xmmindex(xnoreg),
294 _scale(no_scale),
295 _disp(in_bytes(disp)),
296 _isxmmindex(false){
297 }
298
299 Address(Register base, Register index, ScaleFactor scale, ByteSize disp)
300 : _base(base),
301 _index(index),
302 _xmmindex(xnoreg),
303 _scale(scale),
304 _disp(in_bytes(disp)),
305 _isxmmindex(false){
306 assert(!index->is_valid() == (scale == Address::no_scale),
307 "inconsistent address");
308 }
309 Address(Register base, RegisterOrConstant index, ScaleFactor scale, ByteSize disp)
310 : _base (base),
311 _index(index.register_or_noreg()),
312 _xmmindex(xnoreg),
313 _scale(scale),
314 _disp (in_bytes(disp) + (index.constant_or_zero() * scale_size(scale))),
315 _isxmmindex(false) {
316 if (!index.is_register()) scale = Address::no_scale;
317 assert(!_index->is_valid() == (scale == Address::no_scale),
318 "inconsistent address");
319 }
320
321#endif // ASSERT
322
323 // accessors
324 bool uses(Register reg) const { return _base == reg || _index == reg; }
325 Register base() const { return _base; }
326 Register index() const { return _index; }
327 XMMRegister xmmindex() const { return _xmmindex; }
328 ScaleFactor scale() const { return _scale; }
329 int disp() const { return _disp; }
330 bool isxmmindex() const { return _isxmmindex; }
331
332 // Convert the raw encoding form into the form expected by the constructor for
333 // Address. An index of 4 (rsp) corresponds to having no index, so convert
334 // that to noreg for the Address constructor.
335 static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc);
336
337 static Address make_array(ArrayAddress);
338
339 private:
340 bool base_needs_rex() const {
341 return _base != noreg && _base->encoding() >= 8;
342 }
343
344 bool index_needs_rex() const {
345 return _index != noreg &&_index->encoding() >= 8;
346 }
347
348 bool xmmindex_needs_rex() const {
349 return _xmmindex != xnoreg && _xmmindex->encoding() >= 8;
350 }
351
352 relocInfo::relocType reloc() const { return _rspec.type(); }
353
354 friend class Assembler;
355 friend class MacroAssembler;
356 friend class LIR_Assembler; // base/index/scale/disp
357};
358
359//
360// AddressLiteral has been split out from Address because operands of this type
361// need to be treated specially on 32bit vs. 64bit platforms. By splitting it out
362// the few instructions that need to deal with address literals are unique and the
363// MacroAssembler does not have to implement every instruction in the Assembler
364// in order to search for address literals that may need special handling depending
365// on the instruction and the platform. As small step on the way to merging i486/amd64
366// directories.
367//
368class AddressLiteral {
369 friend class ArrayAddress;
370 RelocationHolder _rspec;
371 // Typically we use AddressLiterals we want to use their rval
372 // However in some situations we want the lval (effect address) of the item.
373 // We provide a special factory for making those lvals.
374 bool _is_lval;
375
376 // If the target is far we'll need to load the ea of this to
377 // a register to reach it. Otherwise if near we can do rip
378 // relative addressing.
379
380 address _target;
381
382 protected:
383 // creation
384 AddressLiteral()
385 : _is_lval(false),
386 _target(NULL)
387 {}
388
389 public:
390
391
392 AddressLiteral(address target, relocInfo::relocType rtype);
393
394 AddressLiteral(address target, RelocationHolder const& rspec)
395 : _rspec(rspec),
396 _is_lval(false),
397 _target(target)
398 {}
399
400 AddressLiteral addr() {
401 AddressLiteral ret = *this;
402 ret._is_lval = true;
403 return ret;
404 }
405
406
407 private:
408
409 address target() { return _target; }
410 bool is_lval() { return _is_lval; }
411
412 relocInfo::relocType reloc() const { return _rspec.type(); }
413 const RelocationHolder& rspec() const { return _rspec; }
414
415 friend class Assembler;
416 friend class MacroAssembler;
417 friend class Address;
418 friend class LIR_Assembler;
419};
420
421// Convience classes
422class RuntimeAddress: public AddressLiteral {
423
424 public:
425
426 RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {}
427
428};
429
430class ExternalAddress: public AddressLiteral {
431 private:
432 static relocInfo::relocType reloc_for_target(address target) {
433 // Sometimes ExternalAddress is used for values which aren't
434 // exactly addresses, like the card table base.
435 // external_word_type can't be used for values in the first page
436 // so just skip the reloc in that case.
437 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
438 }
439
440 public:
441
442 ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {}
443
444};
445
446class InternalAddress: public AddressLiteral {
447
448 public:
449
450 InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {}
451
452};
453
454// x86 can do array addressing as a single operation since disp can be an absolute
455// address amd64 can't. We create a class that expresses the concept but does extra
456// magic on amd64 to get the final result
457
458class ArrayAddress {
459 private:
460
461 AddressLiteral _base;
462 Address _index;
463
464 public:
465
466 ArrayAddress() {};
467 ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {};
468 AddressLiteral base() { return _base; }
469 Address index() { return _index; }
470
471};
472
473class InstructionAttr;
474
475// 64-bit refect the fxsave size which is 512 bytes and the new xsave area on EVEX which is another 2176 bytes
476// See fxsave and xsave(EVEX enabled) documentation for layout
477const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY(2688 / wordSize);
478
479// The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction
480// level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write
481// is what you get. The Assembler is generating code into a CodeBuffer.
482
483class Assembler : public AbstractAssembler {
484 friend class AbstractAssembler; // for the non-virtual hack
485 friend class LIR_Assembler; // as_Address()
486 friend class StubGenerator;
487
488 public:
489 enum Condition { // The x86 condition codes used for conditional jumps/moves.
490 zero = 0x4,
491 notZero = 0x5,
492 equal = 0x4,
493 notEqual = 0x5,
494 less = 0xc,
495 lessEqual = 0xe,
496 greater = 0xf,
497 greaterEqual = 0xd,
498 below = 0x2,
499 belowEqual = 0x6,
500 above = 0x7,
501 aboveEqual = 0x3,
502 overflow = 0x0,
503 noOverflow = 0x1,
504 carrySet = 0x2,
505 carryClear = 0x3,
506 negative = 0x8,
507 positive = 0x9,
508 parity = 0xa,
509 noParity = 0xb
510 };
511
512 enum Prefix {
513 // segment overrides
514 CS_segment = 0x2e,
515 SS_segment = 0x36,
516 DS_segment = 0x3e,
517 ES_segment = 0x26,
518 FS_segment = 0x64,
519 GS_segment = 0x65,
520
521 REX = 0x40,
522
523 REX_B = 0x41,
524 REX_X = 0x42,
525 REX_XB = 0x43,
526 REX_R = 0x44,
527 REX_RB = 0x45,
528 REX_RX = 0x46,
529 REX_RXB = 0x47,
530
531 REX_W = 0x48,
532
533 REX_WB = 0x49,
534 REX_WX = 0x4A,
535 REX_WXB = 0x4B,
536 REX_WR = 0x4C,
537 REX_WRB = 0x4D,
538 REX_WRX = 0x4E,
539 REX_WRXB = 0x4F,
540
541 VEX_3bytes = 0xC4,
542 VEX_2bytes = 0xC5,
543 EVEX_4bytes = 0x62,
544 Prefix_EMPTY = 0x0
545 };
546
547 enum VexPrefix {
548 VEX_B = 0x20,
549 VEX_X = 0x40,
550 VEX_R = 0x80,
551 VEX_W = 0x80
552 };
553
554 enum ExexPrefix {
555 EVEX_F = 0x04,
556 EVEX_V = 0x08,
557 EVEX_Rb = 0x10,
558 EVEX_X = 0x40,
559 EVEX_Z = 0x80
560 };
561
562 enum VexSimdPrefix {
563 VEX_SIMD_NONE = 0x0,
564 VEX_SIMD_66 = 0x1,
565 VEX_SIMD_F3 = 0x2,
566 VEX_SIMD_F2 = 0x3
567 };
568
569 enum VexOpcode {
570 VEX_OPCODE_NONE = 0x0,
571 VEX_OPCODE_0F = 0x1,
572 VEX_OPCODE_0F_38 = 0x2,
573 VEX_OPCODE_0F_3A = 0x3,
574 VEX_OPCODE_MASK = 0x1F
575 };
576
577 enum AvxVectorLen {
578 AVX_128bit = 0x0,
579 AVX_256bit = 0x1,
580 AVX_512bit = 0x2,
581 AVX_NoVec = 0x4
582 };
583
584 enum EvexTupleType {
585 EVEX_FV = 0,
586 EVEX_HV = 4,
587 EVEX_FVM = 6,
588 EVEX_T1S = 7,
589 EVEX_T1F = 11,
590 EVEX_T2 = 13,
591 EVEX_T4 = 15,
592 EVEX_T8 = 17,
593 EVEX_HVM = 18,
594 EVEX_QVM = 19,
595 EVEX_OVM = 20,
596 EVEX_M128 = 21,
597 EVEX_DUP = 22,
598 EVEX_ETUP = 23
599 };
600
601 enum EvexInputSizeInBits {
602 EVEX_8bit = 0,
603 EVEX_16bit = 1,
604 EVEX_32bit = 2,
605 EVEX_64bit = 3,
606 EVEX_NObit = 4
607 };
608
609 enum WhichOperand {
610 // input to locate_operand, and format code for relocations
611 imm_operand = 0, // embedded 32-bit|64-bit immediate operand
612 disp32_operand = 1, // embedded 32-bit displacement or address
613 call32_operand = 2, // embedded 32-bit self-relative displacement
614#ifndef _LP64
615 _WhichOperand_limit = 3
616#else
617 narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop
618 _WhichOperand_limit = 4
619#endif
620 };
621
622 enum ComparisonPredicate {
623 eq = 0,
624 lt = 1,
625 le = 2,
626 _false = 3,
627 neq = 4,
628 nlt = 5,
629 nle = 6,
630 _true = 7
631 };
632
633 //---< calculate length of instruction >---
634 // As instruction size can't be found out easily on x86/x64,
635 // we just use '4' for len and maxlen.
636 // instruction must start at passed address
637 static unsigned int instr_len(unsigned char *instr) { return 4; }
638
639 //---< longest instructions >---
640 // Max instruction length is not specified in architecture documentation.
641 // We could use a "safe enough" estimate (15), but just default to
642 // instruction length guess from above.
643 static unsigned int instr_maxlen() { return 4; }
644
645 // NOTE: The general philopsophy of the declarations here is that 64bit versions
646 // of instructions are freely declared without the need for wrapping them an ifdef.
647 // (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
648 // In the .cpp file the implementations are wrapped so that they are dropped out
649 // of the resulting jvm. This is done mostly to keep the footprint of MINIMAL
650 // to the size it was prior to merging up the 32bit and 64bit assemblers.
651 //
652 // This does mean you'll get a linker/runtime error if you use a 64bit only instruction
653 // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down.
654
655private:
656
657 bool _legacy_mode_bw;
658 bool _legacy_mode_dq;
659 bool _legacy_mode_vl;
660 bool _legacy_mode_vlbw;
661 bool _is_managed;
662 bool _vector_masking; // For stub code use only
663
664 class InstructionAttr *_attributes;
665
666 // 64bit prefixes
667 int prefix_and_encode(int reg_enc, bool byteinst = false);
668 int prefixq_and_encode(int reg_enc);
669
670 int prefix_and_encode(int dst_enc, int src_enc) {
671 return prefix_and_encode(dst_enc, false, src_enc, false);
672 }
673 int prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte);
674 int prefixq_and_encode(int dst_enc, int src_enc);
675
676 void prefix(Register reg);
677 void prefix(Register dst, Register src, Prefix p);
678 void prefix(Register dst, Address adr, Prefix p);
679 void prefix(Address adr);
680 void prefixq(Address adr);
681
682 void prefix(Address adr, Register reg, bool byteinst = false);
683 void prefix(Address adr, XMMRegister reg);
684 void prefixq(Address adr, Register reg);
685 void prefixq(Address adr, XMMRegister reg);
686
687 void prefetch_prefix(Address src);
688
689 void rex_prefix(Address adr, XMMRegister xreg,
690 VexSimdPrefix pre, VexOpcode opc, bool rex_w);
691 int rex_prefix_and_encode(int dst_enc, int src_enc,
692 VexSimdPrefix pre, VexOpcode opc, bool rex_w);
693
694 void vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc);
695
696 void evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v,
697 int nds_enc, VexSimdPrefix pre, VexOpcode opc);
698
699 void vex_prefix(Address adr, int nds_enc, int xreg_enc,
700 VexSimdPrefix pre, VexOpcode opc,
701 InstructionAttr *attributes);
702
703 int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
704 VexSimdPrefix pre, VexOpcode opc,
705 InstructionAttr *attributes);
706
707 void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre,
708 VexOpcode opc, InstructionAttr *attributes);
709
710 int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre,
711 VexOpcode opc, InstructionAttr *attributes);
712
713 // Helper functions for groups of instructions
714 void emit_arith_b(int op1, int op2, Register dst, int imm8);
715
716 void emit_arith(int op1, int op2, Register dst, int32_t imm32);
717 // Force generation of a 4 byte immediate value even if it fits into 8bit
718 void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32);
719 void emit_arith(int op1, int op2, Register dst, Register src);
720
721 bool emit_compressed_disp_byte(int &disp);
722
723 void emit_operand(Register reg,
724 Register base, Register index, Address::ScaleFactor scale,
725 int disp,
726 RelocationHolder const& rspec,
727 int rip_relative_correction = 0);
728
729 void emit_operand(XMMRegister reg, Register base, XMMRegister index,
730 Address::ScaleFactor scale,
731 int disp, RelocationHolder const& rspec);
732
733 void emit_operand(Register reg, Address adr, int rip_relative_correction = 0);
734
735 // operands that only take the original 32bit registers
736 void emit_operand32(Register reg, Address adr);
737
738 void emit_operand(XMMRegister reg,
739 Register base, Register index, Address::ScaleFactor scale,
740 int disp,
741 RelocationHolder const& rspec);
742
743 void emit_operand(XMMRegister reg, Address adr);
744
745 void emit_operand(MMXRegister reg, Address adr);
746
747 // workaround gcc (3.2.1-7) bug
748 void emit_operand(Address adr, MMXRegister reg);
749
750
751 // Immediate-to-memory forms
752 void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32);
753
754 void emit_farith(int b1, int b2, int i);
755
756
757 protected:
758 #ifdef ASSERT
759 void check_relocation(RelocationHolder const& rspec, int format);
760 #endif
761
762 void emit_data(jint data, relocInfo::relocType rtype, int format);
763 void emit_data(jint data, RelocationHolder const& rspec, int format);
764 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
765 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
766
767 bool reachable(AddressLiteral adr) NOT_LP64({ return true;});
768
769 // These are all easily abused and hence protected
770
771 // 32BIT ONLY SECTION
772#ifndef _LP64
773 // Make these disappear in 64bit mode since they would never be correct
774 void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
775 void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
776
777 void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
778 void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
779
780 void push_literal32(int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
781#else
782 // 64BIT ONLY SECTION
783 void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec); // 64BIT ONLY
784
785 void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec);
786 void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec);
787
788 void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec);
789 void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec);
790#endif // _LP64
791
792 // These are unique in that we are ensured by the caller that the 32bit
793 // relative in these instructions will always be able to reach the potentially
794 // 64bit address described by entry. Since they can take a 64bit address they
795 // don't have the 32 suffix like the other instructions in this class.
796
797 void call_literal(address entry, RelocationHolder const& rspec);
798 void jmp_literal(address entry, RelocationHolder const& rspec);
799
800 // Avoid using directly section
801 // Instructions in this section are actually usable by anyone without danger
802 // of failure but have performance issues that are addressed my enhanced
803 // instructions which will do the proper thing base on the particular cpu.
804 // We protect them because we don't trust you...
805
806 // Don't use next inc() and dec() methods directly. INC & DEC instructions
807 // could cause a partial flag stall since they don't set CF flag.
808 // Use MacroAssembler::decrement() & MacroAssembler::increment() methods
809 // which call inc() & dec() or add() & sub() in accordance with
810 // the product flag UseIncDec value.
811
812 void decl(Register dst);
813 void decl(Address dst);
814 void decq(Register dst);
815 void decq(Address dst);
816
817 void incl(Register dst);
818 void incl(Address dst);
819 void incq(Register dst);
820 void incq(Address dst);
821
822 // New cpus require use of movsd and movss to avoid partial register stall
823 // when loading from memory. But for old Opteron use movlpd instead of movsd.
824 // The selection is done in MacroAssembler::movdbl() and movflt().
825
826 // Move Scalar Single-Precision Floating-Point Values
827 void movss(XMMRegister dst, Address src);
828 void movss(XMMRegister dst, XMMRegister src);
829 void movss(Address dst, XMMRegister src);
830
831 // Move Scalar Double-Precision Floating-Point Values
832 void movsd(XMMRegister dst, Address src);
833 void movsd(XMMRegister dst, XMMRegister src);
834 void movsd(Address dst, XMMRegister src);
835 void movlpd(XMMRegister dst, Address src);
836
837 // New cpus require use of movaps and movapd to avoid partial register stall
838 // when moving between registers.
839 void movaps(XMMRegister dst, XMMRegister src);
840 void movapd(XMMRegister dst, XMMRegister src);
841
842 // End avoid using directly
843
844
845 // Instruction prefixes
846 void prefix(Prefix p);
847
848 public:
849
850 // Creation
851 Assembler(CodeBuffer* code) : AbstractAssembler(code) {
852 init_attributes();
853 }
854
855 // Decoding
856 static address locate_operand(address inst, WhichOperand which);
857 static address locate_next_instruction(address inst);
858
859 // Utilities
860 static bool is_polling_page_far() NOT_LP64({ return false;});
861 static bool query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len,
862 int cur_tuple_type, int in_size_in_bits, int cur_encoding);
863
864 // Generic instructions
865 // Does 32bit or 64bit as needed for the platform. In some sense these
866 // belong in macro assembler but there is no need for both varieties to exist
867
868 void init_attributes(void) {
869 _legacy_mode_bw = (VM_Version::supports_avx512bw() == false);
870 _legacy_mode_dq = (VM_Version::supports_avx512dq() == false);
871 _legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
872 _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
873 _is_managed = false;
874 _vector_masking = false;
875 _attributes = NULL;
876 }
877
878 void set_attributes(InstructionAttr *attributes) { _attributes = attributes; }
879 void clear_attributes(void) { _attributes = NULL; }
880
881 void set_managed(void) { _is_managed = true; }
882 void clear_managed(void) { _is_managed = false; }
883 bool is_managed(void) { return _is_managed; }
884
885 void lea(Register dst, Address src);
886
887 void mov(Register dst, Register src);
888
889 void pusha();
890 void popa();
891
892 void pushf();
893 void popf();
894
895 void push(int32_t imm32);
896
897 void push(Register src);
898
899 void pop(Register dst);
900
901 // These are dummies to prevent surprise implicit conversions to Register
902 void push(void* v);
903 void pop(void* v);
904
905 // These do register sized moves/scans
906 void rep_mov();
907 void rep_stos();
908 void rep_stosb();
909 void repne_scan();
910#ifdef _LP64
911 void repne_scanl();
912#endif
913
914 // Vanilla instructions in lexical order
915
916 void adcl(Address dst, int32_t imm32);
917 void adcl(Address dst, Register src);
918 void adcl(Register dst, int32_t imm32);
919 void adcl(Register dst, Address src);
920 void adcl(Register dst, Register src);
921
922 void adcq(Register dst, int32_t imm32);
923 void adcq(Register dst, Address src);
924 void adcq(Register dst, Register src);
925
926 void addb(Address dst, int imm8);
927 void addw(Address dst, int imm16);
928
929 void addl(Address dst, int32_t imm32);
930 void addl(Address dst, Register src);
931 void addl(Register dst, int32_t imm32);
932 void addl(Register dst, Address src);
933 void addl(Register dst, Register src);
934
935 void addq(Address dst, int32_t imm32);
936 void addq(Address dst, Register src);
937 void addq(Register dst, int32_t imm32);
938 void addq(Register dst, Address src);
939 void addq(Register dst, Register src);
940
941#ifdef _LP64
942 //Add Unsigned Integers with Carry Flag
943 void adcxq(Register dst, Register src);
944
945 //Add Unsigned Integers with Overflow Flag
946 void adoxq(Register dst, Register src);
947#endif
948
949 void addr_nop_4();
950 void addr_nop_5();
951 void addr_nop_7();
952 void addr_nop_8();
953
954 // Add Scalar Double-Precision Floating-Point Values
955 void addsd(XMMRegister dst, Address src);
956 void addsd(XMMRegister dst, XMMRegister src);
957
958 // Add Scalar Single-Precision Floating-Point Values
959 void addss(XMMRegister dst, Address src);
960 void addss(XMMRegister dst, XMMRegister src);
961
962 // AES instructions
963 void aesdec(XMMRegister dst, Address src);
964 void aesdec(XMMRegister dst, XMMRegister src);
965 void aesdeclast(XMMRegister dst, Address src);
966 void aesdeclast(XMMRegister dst, XMMRegister src);
967 void aesenc(XMMRegister dst, Address src);
968 void aesenc(XMMRegister dst, XMMRegister src);
969 void aesenclast(XMMRegister dst, Address src);
970 void aesenclast(XMMRegister dst, XMMRegister src);
971 void vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
972 void vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
973
974 void andl(Address dst, int32_t imm32);
975 void andl(Register dst, int32_t imm32);
976 void andl(Register dst, Address src);
977 void andl(Register dst, Register src);
978
979 void andq(Address dst, int32_t imm32);
980 void andq(Register dst, int32_t imm32);
981 void andq(Register dst, Address src);
982 void andq(Register dst, Register src);
983
984 // BMI instructions
985 void andnl(Register dst, Register src1, Register src2);
986 void andnl(Register dst, Register src1, Address src2);
987 void andnq(Register dst, Register src1, Register src2);
988 void andnq(Register dst, Register src1, Address src2);
989
990 void blsil(Register dst, Register src);
991 void blsil(Register dst, Address src);
992 void blsiq(Register dst, Register src);
993 void blsiq(Register dst, Address src);
994
995 void blsmskl(Register dst, Register src);
996 void blsmskl(Register dst, Address src);
997 void blsmskq(Register dst, Register src);
998 void blsmskq(Register dst, Address src);
999
1000 void blsrl(Register dst, Register src);
1001 void blsrl(Register dst, Address src);
1002 void blsrq(Register dst, Register src);
1003 void blsrq(Register dst, Address src);
1004
1005 void bsfl(Register dst, Register src);
1006 void bsrl(Register dst, Register src);
1007
1008#ifdef _LP64
1009 void bsfq(Register dst, Register src);
1010 void bsrq(Register dst, Register src);
1011#endif
1012
1013 void bswapl(Register reg);
1014
1015 void bswapq(Register reg);
1016
1017 void call(Label& L, relocInfo::relocType rtype);
1018 void call(Register reg); // push pc; pc <- reg
1019 void call(Address adr); // push pc; pc <- adr
1020
1021 void cdql();
1022
1023 void cdqq();
1024
1025 void cld();
1026
1027 void clflush(Address adr);
1028
1029 void cmovl(Condition cc, Register dst, Register src);
1030 void cmovl(Condition cc, Register dst, Address src);
1031
1032 void cmovq(Condition cc, Register dst, Register src);
1033 void cmovq(Condition cc, Register dst, Address src);
1034
1035
1036 void cmpb(Address dst, int imm8);
1037
1038 void cmpl(Address dst, int32_t imm32);
1039
1040 void cmpl(Register dst, int32_t imm32);
1041 void cmpl(Register dst, Register src);
1042 void cmpl(Register dst, Address src);
1043
1044 void cmpq(Address dst, int32_t imm32);
1045 void cmpq(Address dst, Register src);
1046
1047 void cmpq(Register dst, int32_t imm32);
1048 void cmpq(Register dst, Register src);
1049 void cmpq(Register dst, Address src);
1050
1051 // these are dummies used to catch attempting to convert NULL to Register
1052 void cmpl(Register dst, void* junk); // dummy
1053 void cmpq(Register dst, void* junk); // dummy
1054
1055 void cmpw(Address dst, int imm16);
1056
1057 void cmpxchg8 (Address adr);
1058
1059 void cmpxchgb(Register reg, Address adr);
1060 void cmpxchgl(Register reg, Address adr);
1061
1062 void cmpxchgq(Register reg, Address adr);
1063
1064 // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
1065 void comisd(XMMRegister dst, Address src);
1066 void comisd(XMMRegister dst, XMMRegister src);
1067
1068 // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
1069 void comiss(XMMRegister dst, Address src);
1070 void comiss(XMMRegister dst, XMMRegister src);
1071
1072 // Identify processor type and features
1073 void cpuid();
1074
1075 // CRC32C
1076 void crc32(Register crc, Register v, int8_t sizeInBytes);
1077 void crc32(Register crc, Address adr, int8_t sizeInBytes);
1078
1079 // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
1080 void cvtsd2ss(XMMRegister dst, XMMRegister src);
1081 void cvtsd2ss(XMMRegister dst, Address src);
1082
1083 // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value
1084 void cvtsi2sdl(XMMRegister dst, Register src);
1085 void cvtsi2sdl(XMMRegister dst, Address src);
1086 void cvtsi2sdq(XMMRegister dst, Register src);
1087 void cvtsi2sdq(XMMRegister dst, Address src);
1088
1089 // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
1090 void cvtsi2ssl(XMMRegister dst, Register src);
1091 void cvtsi2ssl(XMMRegister dst, Address src);
1092 void cvtsi2ssq(XMMRegister dst, Register src);
1093 void cvtsi2ssq(XMMRegister dst, Address src);
1094
1095 // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value
1096 void cvtdq2pd(XMMRegister dst, XMMRegister src);
1097
1098 // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value
1099 void cvtdq2ps(XMMRegister dst, XMMRegister src);
1100
1101 // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value
1102 void cvtss2sd(XMMRegister dst, XMMRegister src);
1103 void cvtss2sd(XMMRegister dst, Address src);
1104
1105 // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer
1106 void cvttsd2sil(Register dst, Address src);
1107 void cvttsd2sil(Register dst, XMMRegister src);
1108 void cvttsd2siq(Register dst, XMMRegister src);
1109
1110 // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer
1111 void cvttss2sil(Register dst, XMMRegister src);
1112 void cvttss2siq(Register dst, XMMRegister src);
1113
1114 void cvttpd2dq(XMMRegister dst, XMMRegister src);
1115
1116 //Abs of packed Integer values
1117 void pabsb(XMMRegister dst, XMMRegister src);
1118 void pabsw(XMMRegister dst, XMMRegister src);
1119 void pabsd(XMMRegister dst, XMMRegister src);
1120 void vpabsb(XMMRegister dst, XMMRegister src, int vector_len);
1121 void vpabsw(XMMRegister dst, XMMRegister src, int vector_len);
1122 void vpabsd(XMMRegister dst, XMMRegister src, int vector_len);
1123 void evpabsq(XMMRegister dst, XMMRegister src, int vector_len);
1124
1125 // Divide Scalar Double-Precision Floating-Point Values
1126 void divsd(XMMRegister dst, Address src);
1127 void divsd(XMMRegister dst, XMMRegister src);
1128
1129 // Divide Scalar Single-Precision Floating-Point Values
1130 void divss(XMMRegister dst, Address src);
1131 void divss(XMMRegister dst, XMMRegister src);
1132
1133 void emms();
1134
1135 void fabs();
1136
1137 void fadd(int i);
1138
1139 void fadd_d(Address src);
1140 void fadd_s(Address src);
1141
1142 // "Alternate" versions of x87 instructions place result down in FPU
1143 // stack instead of on TOS
1144
1145 void fadda(int i); // "alternate" fadd
1146 void faddp(int i = 1);
1147
1148 void fchs();
1149
1150 void fcom(int i);
1151
1152 void fcomp(int i = 1);
1153 void fcomp_d(Address src);
1154 void fcomp_s(Address src);
1155
1156 void fcompp();
1157
1158 void fcos();
1159
1160 void fdecstp();
1161
1162 void fdiv(int i);
1163 void fdiv_d(Address src);
1164 void fdivr_s(Address src);
1165 void fdiva(int i); // "alternate" fdiv
1166 void fdivp(int i = 1);
1167
1168 void fdivr(int i);
1169 void fdivr_d(Address src);
1170 void fdiv_s(Address src);
1171
1172 void fdivra(int i); // "alternate" reversed fdiv
1173
1174 void fdivrp(int i = 1);
1175
1176 void ffree(int i = 0);
1177
1178 void fild_d(Address adr);
1179 void fild_s(Address adr);
1180
1181 void fincstp();
1182
1183 void finit();
1184
1185 void fist_s (Address adr);
1186 void fistp_d(Address adr);
1187 void fistp_s(Address adr);
1188
1189 void fld1();
1190
1191 void fld_d(Address adr);
1192 void fld_s(Address adr);
1193 void fld_s(int index);
1194 void fld_x(Address adr); // extended-precision (80-bit) format
1195
1196 void fldcw(Address src);
1197
1198 void fldenv(Address src);
1199
1200 void fldlg2();
1201
1202 void fldln2();
1203
1204 void fldz();
1205
1206 void flog();
1207 void flog10();
1208
1209 void fmul(int i);
1210
1211 void fmul_d(Address src);
1212 void fmul_s(Address src);
1213
1214 void fmula(int i); // "alternate" fmul
1215
1216 void fmulp(int i = 1);
1217
1218 void fnsave(Address dst);
1219
1220 void fnstcw(Address src);
1221
1222 void fnstsw_ax();
1223
1224 void fprem();
1225 void fprem1();
1226
1227 void frstor(Address src);
1228
1229 void fsin();
1230
1231 void fsqrt();
1232
1233 void fst_d(Address adr);
1234 void fst_s(Address adr);
1235
1236 void fstp_d(Address adr);
1237 void fstp_d(int index);
1238 void fstp_s(Address adr);
1239 void fstp_x(Address adr); // extended-precision (80-bit) format
1240
1241 void fsub(int i);
1242 void fsub_d(Address src);
1243 void fsub_s(Address src);
1244
1245 void fsuba(int i); // "alternate" fsub
1246
1247 void fsubp(int i = 1);
1248
1249 void fsubr(int i);
1250 void fsubr_d(Address src);
1251 void fsubr_s(Address src);
1252
1253 void fsubra(int i); // "alternate" reversed fsub
1254
1255 void fsubrp(int i = 1);
1256
1257 void ftan();
1258
1259 void ftst();
1260
1261 void fucomi(int i = 1);
1262 void fucomip(int i = 1);
1263
1264 void fwait();
1265
1266 void fxch(int i = 1);
1267
1268 void fxrstor(Address src);
1269 void xrstor(Address src);
1270
1271 void fxsave(Address dst);
1272 void xsave(Address dst);
1273
1274 void fyl2x();
1275 void frndint();
1276 void f2xm1();
1277 void fldl2e();
1278
1279 void hlt();
1280
1281 void idivl(Register src);
1282 void divl(Register src); // Unsigned division
1283
1284#ifdef _LP64
1285 void idivq(Register src);
1286#endif
1287
1288 void imull(Register src);
1289 void imull(Register dst, Register src);
1290 void imull(Register dst, Register src, int value);
1291 void imull(Register dst, Address src);
1292
1293#ifdef _LP64
1294 void imulq(Register dst, Register src);
1295 void imulq(Register dst, Register src, int value);
1296 void imulq(Register dst, Address src);
1297#endif
1298
1299 // jcc is the generic conditional branch generator to run-
1300 // time routines, jcc is used for branches to labels. jcc
1301 // takes a branch opcode (cc) and a label (L) and generates
1302 // either a backward branch or a forward branch and links it
1303 // to the label fixup chain. Usage:
1304 //
1305 // Label L; // unbound label
1306 // jcc(cc, L); // forward branch to unbound label
1307 // bind(L); // bind label to the current pc
1308 // jcc(cc, L); // backward branch to bound label
1309 // bind(L); // illegal: a label may be bound only once
1310 //
1311 // Note: The same Label can be used for forward and backward branches
1312 // but it may be bound only once.
1313
1314 void jcc(Condition cc, Label& L, bool maybe_short = true);
1315
1316 // Conditional jump to a 8-bit offset to L.
1317 // WARNING: be very careful using this for forward jumps. If the label is
1318 // not bound within an 8-bit offset of this instruction, a run-time error
1319 // will occur.
1320
1321 // Use macro to record file and line number.
1322 #define jccb(cc, L) jccb_0(cc, L, __FILE__, __LINE__)
1323
1324 void jccb_0(Condition cc, Label& L, const char* file, int line);
1325
1326 void jmp(Address entry); // pc <- entry
1327
1328 // Label operations & relative jumps (PPUM Appendix D)
1329 void jmp(Label& L, bool maybe_short = true); // unconditional jump to L
1330
1331 void jmp(Register entry); // pc <- entry
1332
1333 // Unconditional 8-bit offset jump to L.
1334 // WARNING: be very careful using this for forward jumps. If the label is
1335 // not bound within an 8-bit offset of this instruction, a run-time error
1336 // will occur.
1337
1338 // Use macro to record file and line number.
1339 #define jmpb(L) jmpb_0(L, __FILE__, __LINE__)
1340
1341 void jmpb_0(Label& L, const char* file, int line);
1342
1343 void ldmxcsr( Address src );
1344
1345 void leal(Register dst, Address src);
1346
1347 void leaq(Register dst, Address src);
1348
1349 void lfence();
1350
1351 void lock();
1352
1353 void lzcntl(Register dst, Register src);
1354
1355#ifdef _LP64
1356 void lzcntq(Register dst, Register src);
1357#endif
1358
1359 enum Membar_mask_bits {
1360 StoreStore = 1 << 3,
1361 LoadStore = 1 << 2,
1362 StoreLoad = 1 << 1,
1363 LoadLoad = 1 << 0
1364 };
1365
1366 // Serializes memory and blows flags
1367 void membar(Membar_mask_bits order_constraint) {
1368 // We only have to handle StoreLoad
1369 if (order_constraint & StoreLoad) {
1370 // All usable chips support "locked" instructions which suffice
1371 // as barriers, and are much faster than the alternative of
1372 // using cpuid instruction. We use here a locked add [esp-C],0.
1373 // This is conveniently otherwise a no-op except for blowing
1374 // flags, and introducing a false dependency on target memory
1375 // location. We can't do anything with flags, but we can avoid
1376 // memory dependencies in the current method by locked-adding
1377 // somewhere else on the stack. Doing [esp+C] will collide with
1378 // something on stack in current method, hence we go for [esp-C].
1379 // It is convenient since it is almost always in data cache, for
1380 // any small C. We need to step back from SP to avoid data
1381 // dependencies with other things on below SP (callee-saves, for
1382 // example). Without a clear way to figure out the minimal safe
1383 // distance from SP, it makes sense to step back the complete
1384 // cache line, as this will also avoid possible second-order effects
1385 // with locked ops against the cache line. Our choice of offset
1386 // is bounded by x86 operand encoding, which should stay within
1387 // [-128; +127] to have the 8-byte displacement encoding.
1388 //
1389 // Any change to this code may need to revisit other places in
1390 // the code where this idiom is used, in particular the
1391 // orderAccess code.
1392
1393 int offset = -VM_Version::L1_line_size();
1394 if (offset < -128) {
1395 offset = -128;
1396 }
1397
1398 lock();
1399 addl(Address(rsp, offset), 0);// Assert the lock# signal here
1400 }
1401 }
1402
1403 void mfence();
1404
1405 // Moves
1406
1407 void mov64(Register dst, int64_t imm64);
1408
1409 void movb(Address dst, Register src);
1410 void movb(Address dst, int imm8);
1411 void movb(Register dst, Address src);
1412
1413 void movddup(XMMRegister dst, XMMRegister src);
1414
1415 void kmovbl(KRegister dst, Register src);
1416 void kmovbl(Register dst, KRegister src);
1417 void kmovwl(KRegister dst, Register src);
1418 void kmovwl(KRegister dst, Address src);
1419 void kmovwl(Register dst, KRegister src);
1420 void kmovdl(KRegister dst, Register src);
1421 void kmovdl(Register dst, KRegister src);
1422 void kmovql(KRegister dst, KRegister src);
1423 void kmovql(Address dst, KRegister src);
1424 void kmovql(KRegister dst, Address src);
1425 void kmovql(KRegister dst, Register src);
1426 void kmovql(Register dst, KRegister src);
1427
1428 void knotwl(KRegister dst, KRegister src);
1429
1430 void kortestbl(KRegister dst, KRegister src);
1431 void kortestwl(KRegister dst, KRegister src);
1432 void kortestdl(KRegister dst, KRegister src);
1433 void kortestql(KRegister dst, KRegister src);
1434
1435 void ktestq(KRegister src1, KRegister src2);
1436 void ktestd(KRegister src1, KRegister src2);
1437
1438 void ktestql(KRegister dst, KRegister src);
1439
1440 void movdl(XMMRegister dst, Register src);
1441 void movdl(Register dst, XMMRegister src);
1442 void movdl(XMMRegister dst, Address src);
1443 void movdl(Address dst, XMMRegister src);
1444
1445 // Move Double Quadword
1446 void movdq(XMMRegister dst, Register src);
1447 void movdq(Register dst, XMMRegister src);
1448
1449 // Move Aligned Double Quadword
1450 void movdqa(XMMRegister dst, XMMRegister src);
1451 void movdqa(XMMRegister dst, Address src);
1452
1453 // Move Unaligned Double Quadword
1454 void movdqu(Address dst, XMMRegister src);
1455 void movdqu(XMMRegister dst, Address src);
1456 void movdqu(XMMRegister dst, XMMRegister src);
1457
1458 // Move Unaligned 256bit Vector
1459 void vmovdqu(Address dst, XMMRegister src);
1460 void vmovdqu(XMMRegister dst, Address src);
1461 void vmovdqu(XMMRegister dst, XMMRegister src);
1462
1463 // Move Unaligned 512bit Vector
1464 void evmovdqub(Address dst, XMMRegister src, int vector_len);
1465 void evmovdqub(XMMRegister dst, Address src, int vector_len);
1466 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len);
1467 void evmovdqub(XMMRegister dst, KRegister mask, Address src, int vector_len);
1468 void evmovdquw(Address dst, XMMRegister src, int vector_len);
1469 void evmovdquw(Address dst, KRegister mask, XMMRegister src, int vector_len);
1470 void evmovdquw(XMMRegister dst, Address src, int vector_len);
1471 void evmovdquw(XMMRegister dst, KRegister mask, Address src, int vector_len);
1472 void evmovdqul(Address dst, XMMRegister src, int vector_len);
1473 void evmovdqul(XMMRegister dst, Address src, int vector_len);
1474 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len);
1475 void evmovdquq(Address dst, XMMRegister src, int vector_len);
1476 void evmovdquq(XMMRegister dst, Address src, int vector_len);
1477 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len);
1478
1479 // Move lower 64bit to high 64bit in 128bit register
1480 void movlhps(XMMRegister dst, XMMRegister src);
1481
1482 void movl(Register dst, int32_t imm32);
1483 void movl(Address dst, int32_t imm32);
1484 void movl(Register dst, Register src);
1485 void movl(Register dst, Address src);
1486 void movl(Address dst, Register src);
1487
1488 // These dummies prevent using movl from converting a zero (like NULL) into Register
1489 // by giving the compiler two choices it can't resolve
1490
1491 void movl(Address dst, void* junk);
1492 void movl(Register dst, void* junk);
1493
1494#ifdef _LP64
1495 void movq(Register dst, Register src);
1496 void movq(Register dst, Address src);
1497 void movq(Address dst, Register src);
1498#endif
1499
1500 void movq(Address dst, MMXRegister src );
1501 void movq(MMXRegister dst, Address src );
1502
1503#ifdef _LP64
1504 // These dummies prevent using movq from converting a zero (like NULL) into Register
1505 // by giving the compiler two choices it can't resolve
1506
1507 void movq(Address dst, void* dummy);
1508 void movq(Register dst, void* dummy);
1509#endif
1510
1511 // Move Quadword
1512 void movq(Address dst, XMMRegister src);
1513 void movq(XMMRegister dst, Address src);
1514
1515 void movsbl(Register dst, Address src);
1516 void movsbl(Register dst, Register src);
1517
1518#ifdef _LP64
1519 void movsbq(Register dst, Address src);
1520 void movsbq(Register dst, Register src);
1521
1522 // Move signed 32bit immediate to 64bit extending sign
1523 void movslq(Address dst, int32_t imm64);
1524 void movslq(Register dst, int32_t imm64);
1525
1526 void movslq(Register dst, Address src);
1527 void movslq(Register dst, Register src);
1528 void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous
1529#endif
1530
1531 void movswl(Register dst, Address src);
1532 void movswl(Register dst, Register src);
1533
1534#ifdef _LP64
1535 void movswq(Register dst, Address src);
1536 void movswq(Register dst, Register src);
1537#endif
1538
1539 void movw(Address dst, int imm16);
1540 void movw(Register dst, Address src);
1541 void movw(Address dst, Register src);
1542
1543 void movzbl(Register dst, Address src);
1544 void movzbl(Register dst, Register src);
1545
1546#ifdef _LP64
1547 void movzbq(Register dst, Address src);
1548 void movzbq(Register dst, Register src);
1549#endif
1550
1551 void movzwl(Register dst, Address src);
1552 void movzwl(Register dst, Register src);
1553
1554#ifdef _LP64
1555 void movzwq(Register dst, Address src);
1556 void movzwq(Register dst, Register src);
1557#endif
1558
1559 // Unsigned multiply with RAX destination register
1560 void mull(Address src);
1561 void mull(Register src);
1562
1563#ifdef _LP64
1564 void mulq(Address src);
1565 void mulq(Register src);
1566 void mulxq(Register dst1, Register dst2, Register src);
1567#endif
1568
1569 // Multiply Scalar Double-Precision Floating-Point Values
1570 void mulsd(XMMRegister dst, Address src);
1571 void mulsd(XMMRegister dst, XMMRegister src);
1572
1573 // Multiply Scalar Single-Precision Floating-Point Values
1574 void mulss(XMMRegister dst, Address src);
1575 void mulss(XMMRegister dst, XMMRegister src);
1576
1577 void negl(Register dst);
1578
1579#ifdef _LP64
1580 void negq(Register dst);
1581#endif
1582
1583 void nop(int i = 1);
1584
1585 void notl(Register dst);
1586
1587#ifdef _LP64
1588 void notq(Register dst);
1589#endif
1590
1591 void orl(Address dst, int32_t imm32);
1592 void orl(Register dst, int32_t imm32);
1593 void orl(Register dst, Address src);
1594 void orl(Register dst, Register src);
1595 void orl(Address dst, Register src);
1596
1597 void orb(Address dst, int imm8);
1598
1599 void orq(Address dst, int32_t imm32);
1600 void orq(Register dst, int32_t imm32);
1601 void orq(Register dst, Address src);
1602 void orq(Register dst, Register src);
1603
1604 // Pack with unsigned saturation
1605 void packuswb(XMMRegister dst, XMMRegister src);
1606 void packuswb(XMMRegister dst, Address src);
1607 void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1608
1609 // Pemutation of 64bit words
1610 void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len);
1611 void vpermq(XMMRegister dst, XMMRegister src, int imm8);
1612 void vpermq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1613 void vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8);
1614 void vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8);
1615 void evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1616
1617 void pause();
1618
1619 // Undefined Instruction
1620 void ud2();
1621
1622 // SSE4.2 string instructions
1623 void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
1624 void pcmpestri(XMMRegister xmm1, Address src, int imm8);
1625
1626 void pcmpeqb(XMMRegister dst, XMMRegister src);
1627 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1628 void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1629 void evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1630 void evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len);
1631
1632 void evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1633 void evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len);
1634
1635 void evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len);
1636 void evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate of, int vector_len);
1637 void evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len);
1638
1639 void pcmpeqw(XMMRegister dst, XMMRegister src);
1640 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1641 void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1642 void evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1643
1644 void pcmpeqd(XMMRegister dst, XMMRegister src);
1645 void vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1646 void evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1647 void evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1648
1649 void pcmpeqq(XMMRegister dst, XMMRegister src);
1650 void vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1651 void evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1652 void evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1653
1654 void pmovmskb(Register dst, XMMRegister src);
1655 void vpmovmskb(Register dst, XMMRegister src);
1656
1657 // SSE 4.1 extract
1658 void pextrd(Register dst, XMMRegister src, int imm8);
1659 void pextrq(Register dst, XMMRegister src, int imm8);
1660 void pextrd(Address dst, XMMRegister src, int imm8);
1661 void pextrq(Address dst, XMMRegister src, int imm8);
1662 void pextrb(Address dst, XMMRegister src, int imm8);
1663 // SSE 2 extract
1664 void pextrw(Register dst, XMMRegister src, int imm8);
1665 void pextrw(Address dst, XMMRegister src, int imm8);
1666
1667 // SSE 4.1 insert
1668 void pinsrd(XMMRegister dst, Register src, int imm8);
1669 void pinsrq(XMMRegister dst, Register src, int imm8);
1670 void pinsrd(XMMRegister dst, Address src, int imm8);
1671 void pinsrq(XMMRegister dst, Address src, int imm8);
1672 void pinsrb(XMMRegister dst, Address src, int imm8);
1673 // SSE 2 insert
1674 void pinsrw(XMMRegister dst, Register src, int imm8);
1675 void pinsrw(XMMRegister dst, Address src, int imm8);
1676
1677 // SSE4.1 packed move
1678 void pmovzxbw(XMMRegister dst, XMMRegister src);
1679 void pmovzxbw(XMMRegister dst, Address src);
1680
1681 void vpmovzxbw( XMMRegister dst, Address src, int vector_len);
1682 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len);
1683 void evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len);
1684
1685 void evpmovwb(Address dst, XMMRegister src, int vector_len);
1686 void evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len);
1687
1688 void vpmovzxwd(XMMRegister dst, XMMRegister src, int vector_len);
1689
1690 void evpmovdb(Address dst, XMMRegister src, int vector_len);
1691
1692 // Sign extend moves
1693 void pmovsxbw(XMMRegister dst, XMMRegister src);
1694 void vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len);
1695
1696 // Multiply add
1697 void pmaddwd(XMMRegister dst, XMMRegister src);
1698 void vpmaddwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1699 // Multiply add accumulate
1700 void evpdpwssd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1701
1702#ifndef _LP64 // no 32bit push/pop on amd64
1703 void popl(Address dst);
1704#endif
1705
1706#ifdef _LP64
1707 void popq(Address dst);
1708#endif
1709
1710 void popcntl(Register dst, Address src);
1711 void popcntl(Register dst, Register src);
1712
1713 void vpopcntd(XMMRegister dst, XMMRegister src, int vector_len);
1714
1715#ifdef _LP64
1716 void popcntq(Register dst, Address src);
1717 void popcntq(Register dst, Register src);
1718#endif
1719
1720 // Prefetches (SSE, SSE2, 3DNOW only)
1721
1722 void prefetchnta(Address src);
1723 void prefetchr(Address src);
1724 void prefetcht0(Address src);
1725 void prefetcht1(Address src);
1726 void prefetcht2(Address src);
1727 void prefetchw(Address src);
1728
1729 // Shuffle Bytes
1730 void pshufb(XMMRegister dst, XMMRegister src);
1731 void pshufb(XMMRegister dst, Address src);
1732 void vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1733
1734 // Shuffle Packed Doublewords
1735 void pshufd(XMMRegister dst, XMMRegister src, int mode);
1736 void pshufd(XMMRegister dst, Address src, int mode);
1737 void vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len);
1738
1739 // Shuffle Packed Low Words
1740 void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1741 void pshuflw(XMMRegister dst, Address src, int mode);
1742
1743 // Shuffle packed values at 128 bit granularity
1744 void evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len);
1745
1746 // Shift Right by bytes Logical DoubleQuadword Immediate
1747 void psrldq(XMMRegister dst, int shift);
1748 // Shift Left by bytes Logical DoubleQuadword Immediate
1749 void pslldq(XMMRegister dst, int shift);
1750
1751 // Logical Compare 128bit
1752 void ptest(XMMRegister dst, XMMRegister src);
1753 void ptest(XMMRegister dst, Address src);
1754 // Logical Compare 256bit
1755 void vptest(XMMRegister dst, XMMRegister src);
1756 void vptest(XMMRegister dst, Address src);
1757
1758 // Interleave Low Bytes
1759 void punpcklbw(XMMRegister dst, XMMRegister src);
1760 void punpcklbw(XMMRegister dst, Address src);
1761
1762 // Interleave Low Doublewords
1763 void punpckldq(XMMRegister dst, XMMRegister src);
1764 void punpckldq(XMMRegister dst, Address src);
1765
1766 // Interleave Low Quadwords
1767 void punpcklqdq(XMMRegister dst, XMMRegister src);
1768
1769#ifndef _LP64 // no 32bit push/pop on amd64
1770 void pushl(Address src);
1771#endif
1772
1773 void pushq(Address src);
1774
1775 void rcll(Register dst, int imm8);
1776
1777 void rclq(Register dst, int imm8);
1778
1779 void rcrq(Register dst, int imm8);
1780
1781 void rcpps(XMMRegister dst, XMMRegister src);
1782
1783 void rcpss(XMMRegister dst, XMMRegister src);
1784
1785 void rdtsc();
1786
1787 void ret(int imm16);
1788
1789#ifdef _LP64
1790 void rorq(Register dst, int imm8);
1791 void rorxq(Register dst, Register src, int imm8);
1792 void rorxd(Register dst, Register src, int imm8);
1793#endif
1794
1795 void sahf();
1796
1797 void sarl(Register dst, int imm8);
1798 void sarl(Register dst);
1799
1800 void sarq(Register dst, int imm8);
1801 void sarq(Register dst);
1802
1803 void sbbl(Address dst, int32_t imm32);
1804 void sbbl(Register dst, int32_t imm32);
1805 void sbbl(Register dst, Address src);
1806 void sbbl(Register dst, Register src);
1807
1808 void sbbq(Address dst, int32_t imm32);
1809 void sbbq(Register dst, int32_t imm32);
1810 void sbbq(Register dst, Address src);
1811 void sbbq(Register dst, Register src);
1812
1813 void setb(Condition cc, Register dst);
1814
1815 void palignr(XMMRegister dst, XMMRegister src, int imm8);
1816 void vpalignr(XMMRegister dst, XMMRegister src1, XMMRegister src2, int imm8, int vector_len);
1817 void evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
1818
1819 void pblendw(XMMRegister dst, XMMRegister src, int imm8);
1820
1821 void sha1rnds4(XMMRegister dst, XMMRegister src, int imm8);
1822 void sha1nexte(XMMRegister dst, XMMRegister src);
1823 void sha1msg1(XMMRegister dst, XMMRegister src);
1824 void sha1msg2(XMMRegister dst, XMMRegister src);
1825 // xmm0 is implicit additional source to the following instruction.
1826 void sha256rnds2(XMMRegister dst, XMMRegister src);
1827 void sha256msg1(XMMRegister dst, XMMRegister src);
1828 void sha256msg2(XMMRegister dst, XMMRegister src);
1829
1830 void shldl(Register dst, Register src);
1831 void shldl(Register dst, Register src, int8_t imm8);
1832
1833 void shll(Register dst, int imm8);
1834 void shll(Register dst);
1835
1836 void shlq(Register dst, int imm8);
1837 void shlq(Register dst);
1838
1839 void shrdl(Register dst, Register src);
1840
1841 void shrl(Register dst, int imm8);
1842 void shrl(Register dst);
1843
1844 void shrq(Register dst, int imm8);
1845 void shrq(Register dst);
1846
1847 void smovl(); // QQQ generic?
1848
1849 // Compute Square Root of Scalar Double-Precision Floating-Point Value
1850 void sqrtsd(XMMRegister dst, Address src);
1851 void sqrtsd(XMMRegister dst, XMMRegister src);
1852
1853 // Compute Square Root of Scalar Single-Precision Floating-Point Value
1854 void sqrtss(XMMRegister dst, Address src);
1855 void sqrtss(XMMRegister dst, XMMRegister src);
1856
1857 void std();
1858
1859 void stmxcsr( Address dst );
1860
1861 void subl(Address dst, int32_t imm32);
1862 void subl(Address dst, Register src);
1863 void subl(Register dst, int32_t imm32);
1864 void subl(Register dst, Address src);
1865 void subl(Register dst, Register src);
1866
1867 void subq(Address dst, int32_t imm32);
1868 void subq(Address dst, Register src);
1869 void subq(Register dst, int32_t imm32);
1870 void subq(Register dst, Address src);
1871 void subq(Register dst, Register src);
1872
1873 // Force generation of a 4 byte immediate value even if it fits into 8bit
1874 void subl_imm32(Register dst, int32_t imm32);
1875 void subq_imm32(Register dst, int32_t imm32);
1876
1877 // Subtract Scalar Double-Precision Floating-Point Values
1878 void subsd(XMMRegister dst, Address src);
1879 void subsd(XMMRegister dst, XMMRegister src);
1880
1881 // Subtract Scalar Single-Precision Floating-Point Values
1882 void subss(XMMRegister dst, Address src);
1883 void subss(XMMRegister dst, XMMRegister src);
1884
1885 void testb(Register dst, int imm8);
1886 void testb(Address dst, int imm8);
1887
1888 void testl(Register dst, int32_t imm32);
1889 void testl(Register dst, Register src);
1890 void testl(Register dst, Address src);
1891
1892 void testq(Register dst, int32_t imm32);
1893 void testq(Register dst, Register src);
1894 void testq(Register dst, Address src);
1895
1896 // BMI - count trailing zeros
1897 void tzcntl(Register dst, Register src);
1898 void tzcntq(Register dst, Register src);
1899
1900 // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
1901 void ucomisd(XMMRegister dst, Address src);
1902 void ucomisd(XMMRegister dst, XMMRegister src);
1903
1904 // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
1905 void ucomiss(XMMRegister dst, Address src);
1906 void ucomiss(XMMRegister dst, XMMRegister src);
1907
1908 void xabort(int8_t imm8);
1909
1910 void xaddb(Address dst, Register src);
1911 void xaddw(Address dst, Register src);
1912 void xaddl(Address dst, Register src);
1913 void xaddq(Address dst, Register src);
1914
1915 void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none);
1916
1917 void xchgb(Register reg, Address adr);
1918 void xchgw(Register reg, Address adr);
1919 void xchgl(Register reg, Address adr);
1920 void xchgl(Register dst, Register src);
1921
1922 void xchgq(Register reg, Address adr);
1923 void xchgq(Register dst, Register src);
1924
1925 void xend();
1926
1927 // Get Value of Extended Control Register
1928 void xgetbv();
1929
1930 void xorl(Register dst, int32_t imm32);
1931 void xorl(Register dst, Address src);
1932 void xorl(Register dst, Register src);
1933
1934 void xorb(Register dst, Address src);
1935
1936 void xorq(Register dst, Address src);
1937 void xorq(Register dst, Register src);
1938
1939 void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
1940
1941 // AVX 3-operands scalar instructions (encoded with VEX prefix)
1942
1943 void vaddsd(XMMRegister dst, XMMRegister nds, Address src);
1944 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1945 void vaddss(XMMRegister dst, XMMRegister nds, Address src);
1946 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1947 void vdivsd(XMMRegister dst, XMMRegister nds, Address src);
1948 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1949 void vdivss(XMMRegister dst, XMMRegister nds, Address src);
1950 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1951 void vfmadd231sd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1952 void vfmadd231ss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1953 void vmulsd(XMMRegister dst, XMMRegister nds, Address src);
1954 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1955 void vmulss(XMMRegister dst, XMMRegister nds, Address src);
1956 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1957 void vsubsd(XMMRegister dst, XMMRegister nds, Address src);
1958 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1959 void vsubss(XMMRegister dst, XMMRegister nds, Address src);
1960 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1961
1962 void vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1963 void vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1964 void vminss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1965 void vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1966
1967 void shlxl(Register dst, Register src1, Register src2);
1968 void shlxq(Register dst, Register src1, Register src2);
1969
1970 //====================VECTOR ARITHMETIC=====================================
1971
1972 // Add Packed Floating-Point Values
1973 void addpd(XMMRegister dst, XMMRegister src);
1974 void addpd(XMMRegister dst, Address src);
1975 void addps(XMMRegister dst, XMMRegister src);
1976 void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1977 void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1978 void vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1979 void vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1980
1981 // Subtract Packed Floating-Point Values
1982 void subpd(XMMRegister dst, XMMRegister src);
1983 void subps(XMMRegister dst, XMMRegister src);
1984 void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1985 void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1986 void vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1987 void vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1988
1989 // Multiply Packed Floating-Point Values
1990 void mulpd(XMMRegister dst, XMMRegister src);
1991 void mulpd(XMMRegister dst, Address src);
1992 void mulps(XMMRegister dst, XMMRegister src);
1993 void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1994 void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1995 void vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1996 void vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1997
1998 void vfmadd231pd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1999 void vfmadd231ps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2000 void vfmadd231pd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2001 void vfmadd231ps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2002
2003 // Divide Packed Floating-Point Values
2004 void divpd(XMMRegister dst, XMMRegister src);
2005 void divps(XMMRegister dst, XMMRegister src);
2006 void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2007 void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2008 void vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2009 void vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2010
2011 // Sqrt Packed Floating-Point Values
2012 void vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len);
2013 void vsqrtpd(XMMRegister dst, Address src, int vector_len);
2014 void vsqrtps(XMMRegister dst, XMMRegister src, int vector_len);
2015 void vsqrtps(XMMRegister dst, Address src, int vector_len);
2016
2017 // Bitwise Logical AND of Packed Floating-Point Values
2018 void andpd(XMMRegister dst, XMMRegister src);
2019 void andps(XMMRegister dst, XMMRegister src);
2020 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2021 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2022 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2023 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2024
2025 void unpckhpd(XMMRegister dst, XMMRegister src);
2026 void unpcklpd(XMMRegister dst, XMMRegister src);
2027
2028 // Bitwise Logical XOR of Packed Floating-Point Values
2029 void xorpd(XMMRegister dst, XMMRegister src);
2030 void xorps(XMMRegister dst, XMMRegister src);
2031 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2032 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2033 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2034 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2035
2036 // Add horizontal packed integers
2037 void vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2038 void vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2039 void phaddw(XMMRegister dst, XMMRegister src);
2040 void phaddd(XMMRegister dst, XMMRegister src);
2041
2042 // Add packed integers
2043 void paddb(XMMRegister dst, XMMRegister src);
2044 void paddw(XMMRegister dst, XMMRegister src);
2045 void paddd(XMMRegister dst, XMMRegister src);
2046 void paddd(XMMRegister dst, Address src);
2047 void paddq(XMMRegister dst, XMMRegister src);
2048 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2049 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2050 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2051 void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2052 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2053 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2054 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2055 void vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2056
2057 // Sub packed integers
2058 void psubb(XMMRegister dst, XMMRegister src);
2059 void psubw(XMMRegister dst, XMMRegister src);
2060 void psubd(XMMRegister dst, XMMRegister src);
2061 void psubq(XMMRegister dst, XMMRegister src);
2062 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2063 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2064 void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2065 void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2066 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2067 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2068 void vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2069 void vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2070
2071 // Multiply packed integers (only shorts and ints)
2072 void pmullw(XMMRegister dst, XMMRegister src);
2073 void pmulld(XMMRegister dst, XMMRegister src);
2074 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2075 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2076 void vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2077 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2078 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2079 void vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2080
2081 // Shift left packed integers
2082 void psllw(XMMRegister dst, int shift);
2083 void pslld(XMMRegister dst, int shift);
2084 void psllq(XMMRegister dst, int shift);
2085 void psllw(XMMRegister dst, XMMRegister shift);
2086 void pslld(XMMRegister dst, XMMRegister shift);
2087 void psllq(XMMRegister dst, XMMRegister shift);
2088 void vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2089 void vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2090 void vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2091 void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2092 void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2093 void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2094 void vpslldq(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2095
2096 // Logical shift right packed integers
2097 void psrlw(XMMRegister dst, int shift);
2098 void psrld(XMMRegister dst, int shift);
2099 void psrlq(XMMRegister dst, int shift);
2100 void psrlw(XMMRegister dst, XMMRegister shift);
2101 void psrld(XMMRegister dst, XMMRegister shift);
2102 void psrlq(XMMRegister dst, XMMRegister shift);
2103 void vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2104 void vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2105 void vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2106 void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2107 void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2108 void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2109 void vpsrldq(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2110 void evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2111 void evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2112
2113 // Arithmetic shift right packed integers (only shorts and ints, no instructions for longs)
2114 void psraw(XMMRegister dst, int shift);
2115 void psrad(XMMRegister dst, int shift);
2116 void psraw(XMMRegister dst, XMMRegister shift);
2117 void psrad(XMMRegister dst, XMMRegister shift);
2118 void vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2119 void vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2120 void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2121 void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2122 void evpsraq(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2123 void evpsraq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2124
2125 // And packed integers
2126 void pand(XMMRegister dst, XMMRegister src);
2127 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2128 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2129 void vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2130
2131 // Andn packed integers
2132 void pandn(XMMRegister dst, XMMRegister src);
2133 void vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2134
2135 // Or packed integers
2136 void por(XMMRegister dst, XMMRegister src);
2137 void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2138 void vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2139 void vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2140
2141 // Xor packed integers
2142 void pxor(XMMRegister dst, XMMRegister src);
2143 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2144 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2145 void evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2146 void evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2147
2148
2149 // vinserti forms
2150 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2151 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2152 void vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2153 void vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2154 void vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2155
2156 // vinsertf forms
2157 void vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2158 void vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2159 void vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2160 void vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2161 void vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2162 void vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2163
2164 // vextracti forms
2165 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8);
2166 void vextracti128(Address dst, XMMRegister src, uint8_t imm8);
2167 void vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
2168 void vextracti32x4(Address dst, XMMRegister src, uint8_t imm8);
2169 void vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
2170 void vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
2171 void vextracti64x4(Address dst, XMMRegister src, uint8_t imm8);
2172
2173 // vextractf forms
2174 void vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8);
2175 void vextractf128(Address dst, XMMRegister src, uint8_t imm8);
2176 void vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
2177 void vextractf32x4(Address dst, XMMRegister src, uint8_t imm8);
2178 void vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
2179 void vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
2180 void vextractf64x4(Address dst, XMMRegister src, uint8_t imm8);
2181
2182 // xmm/mem sourced byte/word/dword/qword replicate
2183 void vpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len);
2184 void vpbroadcastb(XMMRegister dst, Address src, int vector_len);
2185 void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
2186 void vpbroadcastw(XMMRegister dst, Address src, int vector_len);
2187 void vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len);
2188 void vpbroadcastd(XMMRegister dst, Address src, int vector_len);
2189 void vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len);
2190 void vpbroadcastq(XMMRegister dst, Address src, int vector_len);
2191
2192 void evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len);
2193 void evbroadcasti64x2(XMMRegister dst, Address src, int vector_len);
2194
2195 // scalar single/double precision replicate
2196 void vpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len);
2197 void vpbroadcastss(XMMRegister dst, Address src, int vector_len);
2198 void vpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len);
2199 void vpbroadcastsd(XMMRegister dst, Address src, int vector_len);
2200
2201 // gpr sourced byte/word/dword/qword replicate
2202 void evpbroadcastb(XMMRegister dst, Register src, int vector_len);
2203 void evpbroadcastw(XMMRegister dst, Register src, int vector_len);
2204 void evpbroadcastd(XMMRegister dst, Register src, int vector_len);
2205 void evpbroadcastq(XMMRegister dst, Register src, int vector_len);
2206
2207 void evpgatherdd(XMMRegister dst, KRegister k1, Address src, int vector_len);
2208
2209 // Carry-Less Multiplication Quadword
2210 void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
2211 void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
2212 void evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len);
2213 // AVX instruction which is used to clear upper 128 bits of YMM registers and
2214 // to avoid transaction penalty between AVX and SSE states. There is no
2215 // penalty if legacy SSE instructions are encoded using VEX prefix because
2216 // they always clear upper 128 bits. It should be used before calling
2217 // runtime code and native libraries.
2218 void vzeroupper();
2219
2220 // AVX support for vectorized conditional move (float/double). The following two instructions used only coupled.
2221 void cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len);
2222 void blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len);
2223 void cmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len);
2224 void blendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len);
2225 void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len);
2226
2227 protected:
2228 // Next instructions require address alignment 16 bytes SSE mode.
2229 // They should be called only from corresponding MacroAssembler instructions.
2230 void andpd(XMMRegister dst, Address src);
2231 void andps(XMMRegister dst, Address src);
2232 void xorpd(XMMRegister dst, Address src);
2233 void xorps(XMMRegister dst, Address src);
2234
2235};
2236
2237// The Intel x86/Amd64 Assembler attributes: All fields enclosed here are to guide encoding level decisions.
2238// Specific set functions are for specialized use, else defaults or whatever was supplied to object construction
2239// are applied.
2240class InstructionAttr {
2241public:
2242 InstructionAttr(
2243 int vector_len, // The length of vector to be applied in encoding - for both AVX and EVEX
2244 bool rex_vex_w, // Width of data: if 32-bits or less, false, else if 64-bit or specially defined, true
2245 bool legacy_mode, // Details if either this instruction is conditionally encoded to AVX or earlier if true else possibly EVEX
2246 bool no_reg_mask, // when true, k0 is used when EVEX encoding is chosen, else embedded_opmask_register_specifier is used
2247 bool uses_vl) // This instruction may have legacy constraints based on vector length for EVEX
2248 :
2249 _avx_vector_len(vector_len),
2250 _rex_vex_w(rex_vex_w),
2251 _rex_vex_w_reverted(false),
2252 _legacy_mode(legacy_mode),
2253 _no_reg_mask(no_reg_mask),
2254 _uses_vl(uses_vl),
2255 _tuple_type(Assembler::EVEX_ETUP),
2256 _input_size_in_bits(Assembler::EVEX_NObit),
2257 _is_evex_instruction(false),
2258 _evex_encoding(0),
2259 _is_clear_context(true),
2260 _is_extended_context(false),
2261 _embedded_opmask_register_specifier(0), // hard code k0
2262 _current_assembler(NULL) {
2263 if (UseAVX < 3) _legacy_mode = true;
2264 }
2265
2266 ~InstructionAttr() {
2267 if (_current_assembler != NULL) {
2268 _current_assembler->clear_attributes();
2269 }
2270 _current_assembler = NULL;
2271 }
2272
2273private:
2274 int _avx_vector_len;
2275 bool _rex_vex_w;
2276 bool _rex_vex_w_reverted;
2277 bool _legacy_mode;
2278 bool _no_reg_mask;
2279 bool _uses_vl;
2280 int _tuple_type;
2281 int _input_size_in_bits;
2282 bool _is_evex_instruction;
2283 int _evex_encoding;
2284 bool _is_clear_context;
2285 bool _is_extended_context;
2286 int _embedded_opmask_register_specifier;
2287
2288 Assembler *_current_assembler;
2289
2290public:
2291 // query functions for field accessors
2292 int get_vector_len(void) const { return _avx_vector_len; }
2293 bool is_rex_vex_w(void) const { return _rex_vex_w; }
2294 bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; }
2295 bool is_legacy_mode(void) const { return _legacy_mode; }
2296 bool is_no_reg_mask(void) const { return _no_reg_mask; }
2297 bool uses_vl(void) const { return _uses_vl; }
2298 int get_tuple_type(void) const { return _tuple_type; }
2299 int get_input_size(void) const { return _input_size_in_bits; }
2300 int is_evex_instruction(void) const { return _is_evex_instruction; }
2301 int get_evex_encoding(void) const { return _evex_encoding; }
2302 bool is_clear_context(void) const { return _is_clear_context; }
2303 bool is_extended_context(void) const { return _is_extended_context; }
2304 int get_embedded_opmask_register_specifier(void) const { return _embedded_opmask_register_specifier; }
2305
2306 // Set the vector len manually
2307 void set_vector_len(int vector_len) { _avx_vector_len = vector_len; }
2308
2309 // Set revert rex_vex_w for avx encoding
2310 void set_rex_vex_w_reverted(void) { _rex_vex_w_reverted = true; }
2311
2312 // Set rex_vex_w based on state
2313 void set_rex_vex_w(bool state) { _rex_vex_w = state; }
2314
2315 // Set the instruction to be encoded in AVX mode
2316 void set_is_legacy_mode(void) { _legacy_mode = true; }
2317
2318 // Set the current instuction to be encoded as an EVEX instuction
2319 void set_is_evex_instruction(void) { _is_evex_instruction = true; }
2320
2321 // Internal encoding data used in compressed immediate offset programming
2322 void set_evex_encoding(int value) { _evex_encoding = value; }
2323
2324 // Set the Evex.Z field to be used to clear all non directed XMM/YMM/ZMM components
2325 void reset_is_clear_context(void) { _is_clear_context = false; }
2326
2327 // Map back to current asembler so that we can manage object level assocation
2328 void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; }
2329
2330 // Address modifiers used for compressed displacement calculation
2331 void set_address_attributes(int tuple_type, int input_size_in_bits) {
2332 if (VM_Version::supports_evex()) {
2333 _tuple_type = tuple_type;
2334 _input_size_in_bits = input_size_in_bits;
2335 }
2336 }
2337
2338 // Set embedded opmask register specifier.
2339 void set_embedded_opmask_register_specifier(KRegister mask) {
2340 _embedded_opmask_register_specifier = (*mask).encoding() & 0x7;
2341 }
2342
2343};
2344
2345#endif // CPU_X86_ASSEMBLER_X86_HPP
2346