1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4
5/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7XX XX
8XX Instruction XX
9XX XX
10XX The interface to generate a machine-instruction. XX
11XX XX
12XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14*/
15
16#include "jitpch.h"
17#ifdef _MSC_VER
18#pragma hdrstop
19#endif
20
21#include "codegen.h"
22#include "instr.h"
23#include "emit.h"
24
25/*****************************************************************************/
26#ifdef DEBUG
27
28/*****************************************************************************
29 *
30 * Returns the string representation of the given CPU instruction.
31 */
32
33const char* CodeGen::genInsName(instruction ins)
34{
35 // clang-format off
36 static
37 const char * const insNames[] =
38 {
39#if defined(_TARGET_XARCH_)
40 #define INST0(id, nm, um, mr, flags) nm,
41 #define INST1(id, nm, um, mr, flags) nm,
42 #define INST2(id, nm, um, mr, mi, flags) nm,
43 #define INST3(id, nm, um, mr, mi, rm, flags) nm,
44 #define INST4(id, nm, um, mr, mi, rm, a4, flags) nm,
45 #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) nm,
46 #include "instrs.h"
47
48#elif defined(_TARGET_ARM_)
49 #define INST1(id, nm, fp, ldst, fmt, e1 ) nm,
50 #define INST2(id, nm, fp, ldst, fmt, e1, e2 ) nm,
51 #define INST3(id, nm, fp, ldst, fmt, e1, e2, e3 ) nm,
52 #define INST4(id, nm, fp, ldst, fmt, e1, e2, e3, e4 ) nm,
53 #define INST5(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5 ) nm,
54 #define INST6(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5, e6 ) nm,
55 #define INST8(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) nm,
56 #define INST9(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) nm,
57 #include "instrs.h"
58
59#elif defined(_TARGET_ARM64_)
60 #define INST1(id, nm, fp, ldst, fmt, e1 ) nm,
61 #define INST2(id, nm, fp, ldst, fmt, e1, e2 ) nm,
62 #define INST3(id, nm, fp, ldst, fmt, e1, e2, e3 ) nm,
63 #define INST4(id, nm, fp, ldst, fmt, e1, e2, e3, e4 ) nm,
64 #define INST5(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5 ) nm,
65 #define INST6(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5, e6 ) nm,
66 #define INST9(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) nm,
67 #include "instrs.h"
68
69#else
70#error "Unknown _TARGET_"
71#endif
72 };
73 // clang-format on
74
75 assert((unsigned)ins < _countof(insNames));
76 assert(insNames[ins] != nullptr);
77
78 return insNames[ins];
79}
80
81void __cdecl CodeGen::instDisp(instruction ins, bool noNL, const char* fmt, ...)
82{
83 if (compiler->opts.dspCode)
84 {
85 /* Display the instruction offset within the emit block */
86
87 // printf("[%08X:%04X]", getEmitter().emitCodeCurBlock(), getEmitter().emitCodeOffsInBlock());
88
89 /* Display the FP stack depth (before the instruction is executed) */
90
91 // printf("[FP=%02u] ", genGetFPstkLevel());
92
93 /* Display the instruction mnemonic */
94 printf(" ");
95
96 printf(" %-8s", genInsName(ins));
97
98 if (fmt)
99 {
100 va_list args;
101 va_start(args, fmt);
102 vprintf(fmt, args);
103 va_end(args);
104 }
105
106 if (!noNL)
107 {
108 printf("\n");
109 }
110 }
111}
112
113/*****************************************************************************/
114#endif // DEBUG
115/*****************************************************************************/
116
117void CodeGen::instInit()
118{
119}
120
121/*****************************************************************************
122 *
123 * Return the size string (e.g. "word ptr") appropriate for the given size.
124 */
125
126#ifdef DEBUG
127
128const char* CodeGen::genSizeStr(emitAttr attr)
129{
130 // clang-format off
131 static
132 const char * const sizes[] =
133 {
134 "",
135 "byte ptr ",
136 "word ptr ",
137 nullptr,
138 "dword ptr ",
139 nullptr,
140 nullptr,
141 nullptr,
142 "qword ptr ",
143 nullptr,
144 nullptr,
145 nullptr,
146 nullptr,
147 nullptr,
148 nullptr,
149 nullptr,
150 "xmmword ptr ",
151 nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
152 nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
153 "ymmword ptr"
154 };
155 // clang-format on
156
157 unsigned size = EA_SIZE(attr);
158
159 assert(size == 0 || size == 1 || size == 2 || size == 4 || size == 8 || size == 16 || size == 32);
160
161 if (EA_ATTR(size) == attr)
162 {
163 return sizes[size];
164 }
165 else if (attr == EA_GCREF)
166 {
167 return "gword ptr ";
168 }
169 else if (attr == EA_BYREF)
170 {
171 return "bword ptr ";
172 }
173 else if (EA_IS_DSP_RELOC(attr))
174 {
175 return "rword ptr ";
176 }
177 else
178 {
179 assert(!"Unexpected");
180 return "unknw ptr ";
181 }
182}
183
184#endif
185
186/*****************************************************************************
187 *
188 * Generate an instruction.
189 */
190
191void CodeGen::instGen(instruction ins)
192{
193
194 getEmitter()->emitIns(ins);
195
196#ifdef _TARGET_XARCH_
197 // A workaround necessitated by limitations of emitter
198 // if we are scheduled to insert a nop here, we have to delay it
199 // hopefully we have not missed any other prefix instructions or places
200 // they could be inserted
201 if (ins == INS_lock && getEmitter()->emitNextNop == 0)
202 {
203 getEmitter()->emitNextNop = 1;
204 }
205#endif
206}
207
208/*****************************************************************************
209 *
210 * Returns non-zero if the given CPU instruction is a floating-point ins.
211 */
212
213// static inline
214bool CodeGenInterface::instIsFP(instruction ins)
215{
216 assert((unsigned)ins < _countof(instInfo));
217
218#ifdef _TARGET_XARCH_
219 return (instInfo[ins] & INS_FLAGS_x87Instr) != 0;
220#else
221 return (instInfo[ins] & INST_FP) != 0;
222#endif
223}
224
225#ifdef _TARGET_XARCH_
226/*****************************************************************************
227 *
228 * Generate a multi-byte NOP instruction.
229 */
230
231void CodeGen::instNop(unsigned size)
232{
233 assert(size <= 15);
234 getEmitter()->emitIns_Nop(size);
235}
236#endif
237
238/*****************************************************************************
239 *
240 * Generate a jump instruction.
241 */
242
243void CodeGen::inst_JMP(emitJumpKind jmp, BasicBlock* tgtBlock)
244{
245#if !FEATURE_FIXED_OUT_ARGS
246 // On the x86 we are pushing (and changing the stack level), but on x64 and other archs we have
247 // a fixed outgoing args area that we store into and we never change the stack level when calling methods.
248 //
249 // Thus only on x86 do we need to assert that the stack level at the target block matches the current stack level.
250 //
251 CLANG_FORMAT_COMMENT_ANCHOR;
252
253#ifdef UNIX_X86_ABI
254 // bbTgtStkDepth is a (pure) argument count (stack alignment padding should be excluded).
255 assert((tgtBlock->bbTgtStkDepth * sizeof(int) == (genStackLevel - curNestedAlignment)) || isFramePointerUsed());
256#else
257 assert((tgtBlock->bbTgtStkDepth * sizeof(int) == genStackLevel) || isFramePointerUsed());
258#endif
259#endif // !FEATURE_FIXED_OUT_ARGS
260
261 getEmitter()->emitIns_J(emitter::emitJumpKindToIns(jmp), tgtBlock);
262}
263
264/*****************************************************************************
265 *
266 * Generate a set instruction.
267 */
268
269void CodeGen::inst_SET(emitJumpKind condition, regNumber reg)
270{
271#ifdef _TARGET_XARCH_
272 instruction ins;
273
274 /* Convert the condition to an instruction opcode */
275
276 switch (condition)
277 {
278 case EJ_js:
279 ins = INS_sets;
280 break;
281 case EJ_jns:
282 ins = INS_setns;
283 break;
284 case EJ_je:
285 ins = INS_sete;
286 break;
287 case EJ_jne:
288 ins = INS_setne;
289 break;
290
291 case EJ_jl:
292 ins = INS_setl;
293 break;
294 case EJ_jle:
295 ins = INS_setle;
296 break;
297 case EJ_jge:
298 ins = INS_setge;
299 break;
300 case EJ_jg:
301 ins = INS_setg;
302 break;
303
304 case EJ_jb:
305 ins = INS_setb;
306 break;
307 case EJ_jbe:
308 ins = INS_setbe;
309 break;
310 case EJ_jae:
311 ins = INS_setae;
312 break;
313 case EJ_ja:
314 ins = INS_seta;
315 break;
316
317 case EJ_jpe:
318 ins = INS_setpe;
319 break;
320 case EJ_jpo:
321 ins = INS_setpo;
322 break;
323
324 default:
325 NO_WAY("unexpected condition type");
326 return;
327 }
328
329 assert(genRegMask(reg) & RBM_BYTE_REGS);
330
331 // These instructions only write the low byte of 'reg'
332 getEmitter()->emitIns_R(ins, EA_1BYTE, reg);
333#elif defined(_TARGET_ARM64_)
334 insCond cond;
335 /* Convert the condition to an insCond value */
336 switch (condition)
337 {
338 case EJ_eq:
339 cond = INS_COND_EQ;
340 break;
341 case EJ_ne:
342 cond = INS_COND_NE;
343 break;
344 case EJ_hs:
345 cond = INS_COND_HS;
346 break;
347 case EJ_lo:
348 cond = INS_COND_LO;
349 break;
350
351 case EJ_mi:
352 cond = INS_COND_MI;
353 break;
354 case EJ_pl:
355 cond = INS_COND_PL;
356 break;
357 case EJ_vs:
358 cond = INS_COND_VS;
359 break;
360 case EJ_vc:
361 cond = INS_COND_VC;
362 break;
363
364 case EJ_hi:
365 cond = INS_COND_HI;
366 break;
367 case EJ_ls:
368 cond = INS_COND_LS;
369 break;
370 case EJ_ge:
371 cond = INS_COND_GE;
372 break;
373 case EJ_lt:
374 cond = INS_COND_LT;
375 break;
376
377 case EJ_gt:
378 cond = INS_COND_GT;
379 break;
380 case EJ_le:
381 cond = INS_COND_LE;
382 break;
383
384 default:
385 NO_WAY("unexpected condition type");
386 return;
387 }
388 getEmitter()->emitIns_R_COND(INS_cset, EA_8BYTE, reg, cond);
389#else
390 NYI("inst_SET");
391#endif
392}
393
394/*****************************************************************************
395 *
396 * Generate a "op reg" instruction.
397 */
398
399void CodeGen::inst_RV(instruction ins, regNumber reg, var_types type, emitAttr size)
400{
401 if (size == EA_UNKNOWN)
402 {
403 size = emitActualTypeSize(type);
404 }
405
406 getEmitter()->emitIns_R(ins, size, reg);
407}
408
409/*****************************************************************************
410 *
411 * Generate a "op reg1, reg2" instruction.
412 */
413
414void CodeGen::inst_RV_RV(instruction ins,
415 regNumber reg1,
416 regNumber reg2,
417 var_types type,
418 emitAttr size,
419 insFlags flags /* = INS_FLAGS_DONT_CARE */)
420{
421 if (size == EA_UNKNOWN)
422 {
423 size = emitActualTypeSize(type);
424 }
425
426#ifdef _TARGET_ARM_
427 getEmitter()->emitIns_R_R(ins, size, reg1, reg2, flags);
428#else
429 getEmitter()->emitIns_R_R(ins, size, reg1, reg2);
430#endif
431}
432
433/*****************************************************************************
434 *
435 * Generate a "op reg1, reg2, reg3" instruction.
436 */
437
438void CodeGen::inst_RV_RV_RV(instruction ins,
439 regNumber reg1,
440 regNumber reg2,
441 regNumber reg3,
442 emitAttr size,
443 insFlags flags /* = INS_FLAGS_DONT_CARE */)
444{
445#ifdef _TARGET_ARM_
446 getEmitter()->emitIns_R_R_R(ins, size, reg1, reg2, reg3, flags);
447#elif defined(_TARGET_XARCH_)
448 getEmitter()->emitIns_R_R_R(ins, size, reg1, reg2, reg3);
449#else
450 NYI("inst_RV_RV_RV");
451#endif
452}
453/*****************************************************************************
454 *
455 * Generate a "op icon" instruction.
456 */
457
458void CodeGen::inst_IV(instruction ins, int val)
459{
460 getEmitter()->emitIns_I(ins, EA_PTRSIZE, val);
461}
462
463/*****************************************************************************
464 *
465 * Generate a "op icon" instruction where icon is a handle of type specified
466 * by 'flags'
467 */
468
469void CodeGen::inst_IV_handle(instruction ins, int val)
470{
471 getEmitter()->emitIns_I(ins, EA_HANDLE_CNS_RELOC, val);
472}
473
474/*****************************************************************************
475 *
476 * Display a stack frame reference.
477 */
478
479void CodeGen::inst_set_SV_var(GenTree* tree)
480{
481#ifdef DEBUG
482 assert(tree && (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_LCL_VAR_ADDR || tree->gtOper == GT_STORE_LCL_VAR));
483 assert(tree->gtLclVarCommon.gtLclNum < compiler->lvaCount);
484
485 getEmitter()->emitVarRefOffs = tree->gtLclVar.gtLclILoffs;
486
487#endif // DEBUG
488}
489
490/*****************************************************************************
491 *
492 * Generate a "op reg, icon" instruction.
493 */
494
495void CodeGen::inst_RV_IV(
496 instruction ins, regNumber reg, target_ssize_t val, emitAttr size, insFlags flags /* = INS_FLAGS_DONT_CARE */)
497{
498#if !defined(_TARGET_64BIT_)
499 assert(size != EA_8BYTE);
500#endif
501
502#ifdef _TARGET_ARM_
503 if (arm_Valid_Imm_For_Instr(ins, val, flags))
504 {
505 getEmitter()->emitIns_R_I(ins, size, reg, val, flags);
506 }
507 else if (ins == INS_mov)
508 {
509 instGen_Set_Reg_To_Imm(size, reg, val);
510 }
511 else
512 {
513 // TODO-Cleanup: Add a comment about why this is unreached() for RyuJIT backend.
514 unreached();
515 }
516#elif defined(_TARGET_ARM64_)
517 // TODO-Arm64-Bug: handle large constants!
518 // Probably need something like the ARM case above: if (arm_Valid_Imm_For_Instr(ins, val)) ...
519 assert(ins != INS_cmp);
520 assert(ins != INS_tst);
521 assert(ins != INS_mov);
522 getEmitter()->emitIns_R_R_I(ins, size, reg, reg, val);
523#else // !_TARGET_ARM_
524#ifdef _TARGET_AMD64_
525 // Instead of an 8-byte immediate load, a 4-byte immediate will do fine
526 // as the high 4 bytes will be zero anyway.
527 if (size == EA_8BYTE && ins == INS_mov && ((val & 0xFFFFFFFF00000000LL) == 0))
528 {
529 size = EA_4BYTE;
530 getEmitter()->emitIns_R_I(ins, size, reg, val);
531 }
532 else if (EA_SIZE(size) == EA_8BYTE && ins != INS_mov && (((int)val != val) || EA_IS_CNS_RELOC(size)))
533 {
534 assert(!"Invalid immediate for inst_RV_IV");
535 }
536 else
537#endif // _TARGET_AMD64_
538 {
539 getEmitter()->emitIns_R_I(ins, size, reg, val);
540 }
541#endif // !_TARGET_ARM_
542}
543
544/*****************************************************************************
545 *
546 * Generate an instruction that has one operand given by a tree (which has
547 * been made addressable).
548 */
549
550void CodeGen::inst_TT(instruction ins, GenTree* tree, unsigned offs, int shfv, emitAttr size)
551{
552 bool sizeInferred = false;
553
554 if (size == EA_UNKNOWN)
555 {
556 sizeInferred = true;
557 if (instIsFP(ins))
558 {
559 size = EA_ATTR(genTypeSize(tree->TypeGet()));
560 }
561 else
562 {
563 size = emitTypeSize(tree->TypeGet());
564 }
565 }
566
567AGAIN:
568
569 /* Is this a spilled value? */
570
571 if (tree->gtFlags & GTF_SPILLED)
572 {
573 assert(!"ISSUE: If this can happen, we need to generate 'ins [ebp+spill]'");
574 }
575
576 switch (tree->gtOper)
577 {
578 unsigned varNum;
579
580 case GT_LCL_VAR:
581
582 inst_set_SV_var(tree);
583 goto LCL;
584
585 case GT_LCL_FLD:
586
587 offs += tree->gtLclFld.gtLclOffs;
588 goto LCL;
589
590 LCL:
591 varNum = tree->gtLclVarCommon.gtLclNum;
592 assert(varNum < compiler->lvaCount);
593
594 if (shfv)
595 {
596 getEmitter()->emitIns_S_I(ins, size, varNum, offs, shfv);
597 }
598 else
599 {
600 getEmitter()->emitIns_S(ins, size, varNum, offs);
601 }
602
603 return;
604
605 case GT_CLS_VAR:
606 // Make sure FP instruction size matches the operand size
607 // (We optimized constant doubles to floats when we can, just want to
608 // make sure that we don't mistakenly use 8 bytes when the
609 // constant.
610 assert(!isFloatRegType(tree->gtType) || genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
611
612 if (shfv)
613 {
614 getEmitter()->emitIns_C_I(ins, size, tree->gtClsVar.gtClsVarHnd, offs, shfv);
615 }
616 else
617 {
618 getEmitter()->emitIns_C(ins, size, tree->gtClsVar.gtClsVarHnd, offs);
619 }
620 return;
621
622 case GT_IND:
623 case GT_NULLCHECK:
624 case GT_ARR_ELEM:
625 {
626 assert(!"inst_TT not supported for GT_IND, GT_NULLCHECK or GT_ARR_ELEM");
627 }
628 break;
629
630#ifdef _TARGET_X86_
631 case GT_CNS_INT:
632 // We will get here for GT_MKREFANY from CodeGen::genPushArgList
633 assert(offs == 0);
634 assert(!shfv);
635 if (tree->IsIconHandle())
636 inst_IV_handle(ins, tree->gtIntCon.gtIconVal);
637 else
638 inst_IV(ins, tree->gtIntCon.gtIconVal);
639 break;
640#endif
641
642 case GT_COMMA:
643 // tree->gtOp.gtOp1 - already processed by genCreateAddrMode()
644 tree = tree->gtOp.gtOp2;
645 goto AGAIN;
646
647 default:
648 assert(!"invalid address");
649 }
650}
651
652/*****************************************************************************
653 *
654 * Generate an instruction that has one operand given by a tree (which has
655 * been made addressable) and another that is a register.
656 */
657
658void CodeGen::inst_TT_RV(instruction ins, GenTree* tree, regNumber reg, unsigned offs, emitAttr size, insFlags flags)
659{
660 assert(reg != REG_STK);
661
662AGAIN:
663
664 /* Is this a spilled value? */
665
666 if (tree->gtFlags & GTF_SPILLED)
667 {
668 assert(!"ISSUE: If this can happen, we need to generate 'ins [ebp+spill]'");
669 }
670
671 if (size == EA_UNKNOWN)
672 {
673 if (instIsFP(ins))
674 {
675 size = EA_ATTR(genTypeSize(tree->TypeGet()));
676 }
677 else
678 {
679 size = emitTypeSize(tree->TypeGet());
680 }
681 }
682
683 switch (tree->gtOper)
684 {
685 unsigned varNum;
686
687 case GT_LCL_VAR:
688
689 inst_set_SV_var(tree);
690 goto LCL;
691
692 case GT_LCL_FLD:
693 case GT_STORE_LCL_FLD:
694 offs += tree->gtLclFld.gtLclOffs;
695 goto LCL;
696
697 LCL:
698
699 varNum = tree->gtLclVarCommon.gtLclNum;
700 assert(varNum < compiler->lvaCount);
701
702#if CPU_LOAD_STORE_ARCH
703 if (!getEmitter()->emitInsIsStore(ins))
704 {
705 // TODO-LdStArch-Bug: Should regTmp be a dst on the node or an internal reg?
706 // Either way, it is not currently being handled by Lowering.
707 regNumber regTmp = tree->gtRegNum;
708 assert(regTmp != REG_NA);
709 getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
710 getEmitter()->emitIns_R_R(ins, size, regTmp, reg, flags);
711 getEmitter()->emitIns_S_R(ins_Store(tree->TypeGet()), size, regTmp, varNum, offs);
712
713 regSet.verifyRegUsed(regTmp);
714 }
715 else
716#endif
717 {
718 // ins is a Store instruction
719 //
720 getEmitter()->emitIns_S_R(ins, size, reg, varNum, offs);
721#ifdef _TARGET_ARM_
722 // If we need to set the flags then add an extra movs reg,reg instruction
723 if (flags == INS_FLAGS_SET)
724 getEmitter()->emitIns_R_R(INS_mov, size, reg, reg, INS_FLAGS_SET);
725#endif
726 }
727 return;
728
729 case GT_CLS_VAR:
730 // Make sure FP instruction size matches the operand size
731 // (We optimized constant doubles to floats when we can, just want to
732 // make sure that we don't mistakenly use 8 bytes when the
733 // constant).
734 assert(!isFloatRegType(tree->gtType) || genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
735
736#if CPU_LOAD_STORE_ARCH
737 if (!getEmitter()->emitInsIsStore(ins))
738 {
739 NYI("Store of GT_CLS_VAR not supported for ARM");
740 }
741 else
742#endif // CPU_LOAD_STORE_ARCH
743 {
744 getEmitter()->emitIns_C_R(ins, size, tree->gtClsVar.gtClsVarHnd, reg, offs);
745 }
746 return;
747
748 case GT_IND:
749 case GT_NULLCHECK:
750 case GT_ARR_ELEM:
751 {
752 assert(!"inst_TT_RV not supported for GT_IND, GT_NULLCHECK or GT_ARR_ELEM");
753 }
754 break;
755
756 case GT_COMMA:
757 // tree->gtOp.gtOp1 - already processed by genCreateAddrMode()
758 tree = tree->gtOp.gtOp2;
759 goto AGAIN;
760
761 default:
762 assert(!"invalid address");
763 }
764}
765
766/*****************************************************************************
767 *
768 * Generate an instruction that has one operand given by a register and the
769 * other one by a tree (which has been made addressable).
770 */
771
772void CodeGen::inst_RV_TT(instruction ins,
773 regNumber reg,
774 GenTree* tree,
775 unsigned offs,
776 emitAttr size,
777 insFlags flags /* = INS_FLAGS_DONT_CARE */)
778{
779 assert(reg != REG_STK);
780
781 if (size == EA_UNKNOWN)
782 {
783 if (!instIsFP(ins))
784 {
785 size = emitTypeSize(tree->TypeGet());
786 }
787 else
788 {
789 size = EA_ATTR(genTypeSize(tree->TypeGet()));
790 }
791 }
792
793#ifdef _TARGET_XARCH_
794#ifdef DEBUG
795 // If it is a GC type and the result is not, then either
796 // 1) it is an LEA
797 // 2) optOptimizeBools() optimized if (ref != 0 && ref != 0) to if (ref & ref)
798 // 3) optOptimizeBools() optimized if (ref == 0 || ref == 0) to if (ref | ref)
799 // 4) byref - byref = int
800 if (tree->gtType == TYP_REF && !EA_IS_GCREF(size))
801 {
802 assert((EA_IS_BYREF(size) && ins == INS_add) || (ins == INS_lea || ins == INS_and || ins == INS_or));
803 }
804 if (tree->gtType == TYP_BYREF && !EA_IS_BYREF(size))
805 {
806 assert(ins == INS_lea || ins == INS_and || ins == INS_or || ins == INS_sub);
807 }
808#endif
809#endif
810
811#if CPU_LOAD_STORE_ARCH
812 if (ins == INS_mov)
813 {
814#if defined(_TARGET_ARM64_) || defined(_TARGET_ARM64_)
815 ins = ins_Move_Extend(tree->TypeGet(), false);
816#else
817 NYI("CodeGen::inst_RV_TT with INS_mov");
818#endif
819 }
820#endif // CPU_LOAD_STORE_ARCH
821
822AGAIN:
823
824 /* Is this a spilled value? */
825
826 if (tree->gtFlags & GTF_SPILLED)
827 {
828 assert(!"ISSUE: If this can happen, we need to generate 'ins [ebp+spill]'");
829 }
830
831 switch (tree->gtOper)
832 {
833 unsigned varNum;
834
835 case GT_LCL_VAR:
836 case GT_LCL_VAR_ADDR:
837
838 inst_set_SV_var(tree);
839 goto LCL;
840
841 case GT_LCL_FLD_ADDR:
842 case GT_LCL_FLD:
843 offs += tree->gtLclFld.gtLclOffs;
844 goto LCL;
845
846 LCL:
847 varNum = tree->gtLclVarCommon.gtLclNum;
848 assert(varNum < compiler->lvaCount);
849
850#ifdef _TARGET_ARM_
851 switch (ins)
852 {
853 case INS_mov:
854 ins = ins_Load(tree->TypeGet());
855 __fallthrough;
856
857 case INS_lea:
858 case INS_ldr:
859 case INS_ldrh:
860 case INS_ldrb:
861 case INS_ldrsh:
862 case INS_ldrsb:
863 case INS_vldr:
864 assert(flags != INS_FLAGS_SET);
865 getEmitter()->emitIns_R_S(ins, size, reg, varNum, offs);
866 return;
867
868 default:
869 regNumber regTmp;
870 regTmp = tree->gtRegNum;
871
872 getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
873 getEmitter()->emitIns_R_R(ins, size, reg, regTmp, flags);
874
875 regSet.verifyRegUsed(regTmp);
876 return;
877 }
878#else // !_TARGET_ARM_
879 getEmitter()->emitIns_R_S(ins, size, reg, varNum, offs);
880 return;
881#endif // !_TARGET_ARM_
882
883 case GT_CLS_VAR:
884 // Make sure FP instruction size matches the operand size
885 // (We optimized constant doubles to floats when we can, just want to
886 // make sure that we don't mistakenly use 8 bytes when the
887 // constant.
888 assert(!isFloatRegType(tree->gtType) || genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
889
890#if CPU_LOAD_STORE_ARCH
891 assert(!"GT_CLS_VAR not supported in ARM backend");
892#else // CPU_LOAD_STORE_ARCH
893 getEmitter()->emitIns_R_C(ins, size, reg, tree->gtClsVar.gtClsVarHnd, offs);
894#endif // CPU_LOAD_STORE_ARCH
895 return;
896
897 case GT_IND:
898 case GT_NULLCHECK:
899 case GT_ARR_ELEM:
900 case GT_LEA:
901 {
902 assert(!"inst_RV_TT not supported for GT_IND, GT_NULLCHECK, GT_ARR_ELEM or GT_LEA");
903 }
904 break;
905
906 case GT_CNS_INT:
907
908 assert(offs == 0);
909
910 // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t type.
911 inst_RV_IV(ins, reg, (target_ssize_t)tree->gtIntCon.gtIconVal, emitActualTypeSize(tree->TypeGet()), flags);
912 break;
913
914 case GT_CNS_LNG:
915
916 assert(size == EA_4BYTE || size == EA_8BYTE);
917
918#ifdef _TARGET_AMD64_
919 assert(offs == 0);
920#endif // _TARGET_AMD64_
921
922 target_ssize_t constVal;
923 emitAttr size;
924 if (offs == 0)
925 {
926 constVal = (target_ssize_t)(tree->gtLngCon.gtLconVal);
927 size = EA_PTRSIZE;
928 }
929 else
930 {
931 constVal = (target_ssize_t)(tree->gtLngCon.gtLconVal >> 32);
932 size = EA_4BYTE;
933 }
934
935 inst_RV_IV(ins, reg, constVal, size, flags);
936 break;
937
938 case GT_COMMA:
939 tree = tree->gtOp.gtOp2;
940 goto AGAIN;
941
942 default:
943 assert(!"invalid address");
944 }
945}
946
947/*****************************************************************************
948 *
949 * Generate a "shift reg, icon" instruction.
950 */
951
952void CodeGen::inst_RV_SH(
953 instruction ins, emitAttr size, regNumber reg, unsigned val, insFlags flags /* = INS_FLAGS_DONT_CARE */)
954{
955#if defined(_TARGET_ARM_)
956
957 if (val >= 32)
958 val &= 0x1f;
959
960 getEmitter()->emitIns_R_I(ins, size, reg, val, flags);
961
962#elif defined(_TARGET_XARCH_)
963
964#ifdef _TARGET_AMD64_
965 // X64 JB BE insures only encodable values make it here.
966 // x86 can encode 8 bits, though it masks down to 5 or 6
967 // depending on 32-bit or 64-bit registers are used.
968 // Here we will allow anything that is encodable.
969 assert(val < 256);
970#endif
971
972 ins = genMapShiftInsToShiftByConstantIns(ins, val);
973
974 if (val == 1)
975 {
976 getEmitter()->emitIns_R(ins, size, reg);
977 }
978 else
979 {
980 getEmitter()->emitIns_R_I(ins, size, reg, val);
981 }
982
983#else
984 NYI("inst_RV_SH - unknown target");
985#endif // _TARGET_*
986}
987
988/*****************************************************************************
989 *
990 * Generate a "shift [r/m], icon" instruction.
991 */
992
993void CodeGen::inst_TT_SH(instruction ins, GenTree* tree, unsigned val, unsigned offs)
994{
995#ifdef _TARGET_XARCH_
996 if (val == 0)
997 {
998 // Shift by 0 - why are you wasting our precious time????
999 return;
1000 }
1001
1002 ins = genMapShiftInsToShiftByConstantIns(ins, val);
1003 if (val == 1)
1004 {
1005 inst_TT(ins, tree, offs, 0, emitTypeSize(tree->TypeGet()));
1006 }
1007 else
1008 {
1009 inst_TT(ins, tree, offs, val, emitTypeSize(tree->TypeGet()));
1010 }
1011#endif // _TARGET_XARCH_
1012
1013#ifdef _TARGET_ARM_
1014 inst_TT(ins, tree, offs, val, emitTypeSize(tree->TypeGet()));
1015#endif
1016}
1017
1018/*****************************************************************************
1019 *
1020 * Generate a "shift [addr], cl" instruction.
1021 */
1022
1023void CodeGen::inst_TT_CL(instruction ins, GenTree* tree, unsigned offs)
1024{
1025 inst_TT(ins, tree, offs, 0, emitTypeSize(tree->TypeGet()));
1026}
1027
1028/*****************************************************************************
1029 *
1030 * Generate an instruction of the form "op reg1, reg2, icon".
1031 */
1032
1033#if defined(_TARGET_XARCH_)
1034void CodeGen::inst_RV_RV_IV(instruction ins, emitAttr size, regNumber reg1, regNumber reg2, unsigned ival)
1035{
1036 assert(ins == INS_shld || ins == INS_shrd || ins == INS_shufps || ins == INS_shufpd || ins == INS_pshufd ||
1037 ins == INS_cmpps || ins == INS_cmppd || ins == INS_dppd || ins == INS_dpps || ins == INS_insertps ||
1038 ins == INS_roundps || ins == INS_roundss || ins == INS_roundpd || ins == INS_roundsd);
1039
1040 getEmitter()->emitIns_R_R_I(ins, size, reg1, reg2, ival);
1041}
1042#endif
1043
1044/*****************************************************************************
1045 *
1046 * Generate an instruction with two registers, the second one being a byte
1047 * or word register (i.e. this is something like "movzx eax, cl").
1048 */
1049
1050void CodeGen::inst_RV_RR(instruction ins, emitAttr size, regNumber reg1, regNumber reg2)
1051{
1052 assert(size == EA_1BYTE || size == EA_2BYTE);
1053#ifdef _TARGET_XARCH_
1054 assert(ins == INS_movsx || ins == INS_movzx);
1055 assert(size != EA_1BYTE || (genRegMask(reg2) & RBM_BYTE_REGS));
1056#endif
1057
1058 getEmitter()->emitIns_R_R(ins, size, reg1, reg2);
1059}
1060
1061/*****************************************************************************
1062 *
1063 * The following should all end up inline in compiler.hpp at some point.
1064 */
1065
1066void CodeGen::inst_ST_RV(instruction ins, TempDsc* tmp, unsigned ofs, regNumber reg, var_types type)
1067{
1068 getEmitter()->emitIns_S_R(ins, emitActualTypeSize(type), reg, tmp->tdTempNum(), ofs);
1069}
1070
1071void CodeGen::inst_ST_IV(instruction ins, TempDsc* tmp, unsigned ofs, int val, var_types type)
1072{
1073 getEmitter()->emitIns_S_I(ins, emitActualTypeSize(type), tmp->tdTempNum(), ofs, val);
1074}
1075
1076#if FEATURE_FIXED_OUT_ARGS
1077/*****************************************************************************
1078 *
1079 * Generate an instruction that references the outgoing argument space
1080 * like "str r3, [sp+0x04]"
1081 */
1082
1083void CodeGen::inst_SA_RV(instruction ins, unsigned ofs, regNumber reg, var_types type)
1084{
1085 assert(ofs < compiler->lvaOutgoingArgSpaceSize);
1086
1087 getEmitter()->emitIns_S_R(ins, emitActualTypeSize(type), reg, compiler->lvaOutgoingArgSpaceVar, ofs);
1088}
1089
1090void CodeGen::inst_SA_IV(instruction ins, unsigned ofs, int val, var_types type)
1091{
1092 assert(ofs < compiler->lvaOutgoingArgSpaceSize);
1093
1094 getEmitter()->emitIns_S_I(ins, emitActualTypeSize(type), compiler->lvaOutgoingArgSpaceVar, ofs, val);
1095}
1096#endif // FEATURE_FIXED_OUT_ARGS
1097
1098/*****************************************************************************
1099 *
1100 * Generate an instruction with one register and one operand that is byte
1101 * or short (e.g. something like "movzx eax, byte ptr [edx]").
1102 */
1103
1104void CodeGen::inst_RV_ST(instruction ins, emitAttr size, regNumber reg, GenTree* tree)
1105{
1106 assert(size == EA_1BYTE || size == EA_2BYTE);
1107
1108 inst_RV_TT(ins, reg, tree, 0, size);
1109}
1110
1111void CodeGen::inst_RV_ST(instruction ins, regNumber reg, TempDsc* tmp, unsigned ofs, var_types type, emitAttr size)
1112{
1113 if (size == EA_UNKNOWN)
1114 {
1115 size = emitActualTypeSize(type);
1116 }
1117
1118#ifdef _TARGET_ARM_
1119 switch (ins)
1120 {
1121 case INS_mov:
1122 assert(!"Please call ins_Load(type) to get the load instruction");
1123 break;
1124
1125 case INS_add:
1126 case INS_ldr:
1127 case INS_ldrh:
1128 case INS_ldrb:
1129 case INS_ldrsh:
1130 case INS_ldrsb:
1131 case INS_lea:
1132 case INS_vldr:
1133 getEmitter()->emitIns_R_S(ins, size, reg, tmp->tdTempNum(), ofs);
1134 break;
1135
1136 default:
1137 assert(!"Default inst_RV_ST case not supported for Arm");
1138 break;
1139 }
1140#else // !_TARGET_ARM_
1141 getEmitter()->emitIns_R_S(ins, size, reg, tmp->tdTempNum(), ofs);
1142#endif // !_TARGET_ARM_
1143}
1144
1145void CodeGen::inst_mov_RV_ST(regNumber reg, GenTree* tree)
1146{
1147 /* Figure out the size of the value being loaded */
1148
1149 emitAttr size = EA_ATTR(genTypeSize(tree->gtType));
1150 instruction loadIns = ins_Move_Extend(tree->TypeGet(), false);
1151
1152 if (size < EA_4BYTE)
1153 {
1154 /* Generate the "movsx/movzx" opcode */
1155
1156 inst_RV_ST(loadIns, size, reg, tree);
1157 }
1158 else
1159 {
1160 /* Compute op1 into the target register */
1161
1162 inst_RV_TT(loadIns, reg, tree);
1163 }
1164}
1165#ifdef _TARGET_XARCH_
1166void CodeGen::inst_FS_ST(instruction ins, emitAttr size, TempDsc* tmp, unsigned ofs)
1167{
1168 getEmitter()->emitIns_S(ins, size, tmp->tdTempNum(), ofs);
1169}
1170#endif
1171
1172#ifdef _TARGET_ARM_
1173bool CodeGenInterface::validImmForInstr(instruction ins, target_ssize_t imm, insFlags flags)
1174{
1175 if (getEmitter()->emitInsIsLoadOrStore(ins) && !instIsFP(ins))
1176 {
1177 return validDispForLdSt(imm, TYP_INT);
1178 }
1179
1180 bool result = false;
1181 switch (ins)
1182 {
1183 case INS_cmp:
1184 case INS_cmn:
1185 if (validImmForAlu(imm) || validImmForAlu(-imm))
1186 result = true;
1187 break;
1188
1189 case INS_and:
1190 case INS_bic:
1191 case INS_orr:
1192 case INS_orn:
1193 case INS_mvn:
1194 if (validImmForAlu(imm) || validImmForAlu(~imm))
1195 result = true;
1196 break;
1197
1198 case INS_mov:
1199 if (validImmForMov(imm))
1200 result = true;
1201 break;
1202
1203 case INS_addw:
1204 case INS_subw:
1205 if ((unsigned_abs(imm) <= 0x00000fff) && (flags != INS_FLAGS_SET)) // 12-bit immediate
1206 result = true;
1207 break;
1208
1209 case INS_add:
1210 case INS_sub:
1211 if (validImmForAdd(imm, flags))
1212 result = true;
1213 break;
1214
1215 case INS_tst:
1216 case INS_eor:
1217 case INS_teq:
1218 case INS_adc:
1219 case INS_sbc:
1220 case INS_rsb:
1221 if (validImmForAlu(imm))
1222 result = true;
1223 break;
1224
1225 case INS_asr:
1226 case INS_lsl:
1227 case INS_lsr:
1228 case INS_ror:
1229 if (imm > 0 && imm <= 32)
1230 result = true;
1231 break;
1232
1233 case INS_vstr:
1234 case INS_vldr:
1235 if ((imm & 0x3FC) == imm)
1236 result = true;
1237 break;
1238
1239 default:
1240 break;
1241 }
1242 return result;
1243}
1244bool CodeGen::arm_Valid_Imm_For_Instr(instruction ins, target_ssize_t imm, insFlags flags)
1245{
1246 return validImmForInstr(ins, imm, flags);
1247}
1248
1249bool CodeGenInterface::validDispForLdSt(target_ssize_t disp, var_types type)
1250{
1251 if (varTypeIsFloating(type))
1252 {
1253 if ((disp & 0x3FC) == disp)
1254 return true;
1255 else
1256 return false;
1257 }
1258 else
1259 {
1260 if ((disp >= -0x00ff) && (disp <= 0x0fff))
1261 return true;
1262 else
1263 return false;
1264 }
1265}
1266bool CodeGen::arm_Valid_Disp_For_LdSt(target_ssize_t disp, var_types type)
1267{
1268 return validDispForLdSt(disp, type);
1269}
1270
1271bool CodeGenInterface::validImmForAlu(target_ssize_t imm)
1272{
1273 return emitter::emitIns_valid_imm_for_alu(imm);
1274}
1275bool CodeGen::arm_Valid_Imm_For_Alu(target_ssize_t imm)
1276{
1277 return validImmForAlu(imm);
1278}
1279
1280bool CodeGenInterface::validImmForMov(target_ssize_t imm)
1281{
1282 return emitter::emitIns_valid_imm_for_mov(imm);
1283}
1284bool CodeGen::arm_Valid_Imm_For_Mov(target_ssize_t imm)
1285{
1286 return validImmForMov(imm);
1287}
1288
1289bool CodeGen::arm_Valid_Imm_For_Small_Mov(regNumber reg, target_ssize_t imm, insFlags flags)
1290{
1291 return emitter::emitIns_valid_imm_for_small_mov(reg, imm, flags);
1292}
1293
1294bool CodeGenInterface::validImmForAdd(target_ssize_t imm, insFlags flags)
1295{
1296 return emitter::emitIns_valid_imm_for_add(imm, flags);
1297}
1298bool CodeGen::arm_Valid_Imm_For_Add(target_ssize_t imm, insFlags flags)
1299{
1300 return emitter::emitIns_valid_imm_for_add(imm, flags);
1301}
1302
1303// Check "add Rd,SP,i10"
1304bool CodeGen::arm_Valid_Imm_For_Add_SP(target_ssize_t imm)
1305{
1306 return emitter::emitIns_valid_imm_for_add_sp(imm);
1307}
1308
1309bool CodeGenInterface::validImmForBL(ssize_t addr)
1310{
1311 return
1312 // If we are running the altjit for NGEN, then assume we can use the "BL" instruction.
1313 // This matches the usual behavior for NGEN, since we normally do generate "BL".
1314 (!compiler->info.compMatchedVM && compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) ||
1315 (compiler->eeGetRelocTypeHint((void*)addr) == IMAGE_REL_BASED_THUMB_BRANCH24);
1316}
1317bool CodeGen::arm_Valid_Imm_For_BL(ssize_t addr)
1318{
1319 return validImmForBL(addr);
1320}
1321
1322// Returns true if this instruction writes to a destination register
1323//
1324bool CodeGen::ins_Writes_Dest(instruction ins)
1325{
1326 switch (ins)
1327 {
1328
1329 case INS_cmp:
1330 case INS_cmn:
1331 case INS_tst:
1332 case INS_teq:
1333 return false;
1334
1335 default:
1336 return true;
1337 }
1338}
1339#endif // _TARGET_ARM_
1340
1341#if defined(_TARGET_ARM64_)
1342bool CodeGenInterface::validImmForBL(ssize_t addr)
1343{
1344 // On arm64, we always assume a call target is in range and generate a 28-bit relative
1345 // 'bl' instruction. If this isn't sufficient range, the VM will generate a jump stub when
1346 // we call recordRelocation(). See the IMAGE_REL_ARM64_BRANCH26 case in jitinterface.cpp
1347 // (for JIT) or zapinfo.cpp (for NGEN). If we cannot allocate a jump stub, it is fatal.
1348 return true;
1349}
1350#endif // _TARGET_ARM64_
1351
1352/*****************************************************************************
1353 *
1354 * Get the machine dependent instruction for performing sign/zero extension.
1355 *
1356 * Parameters
1357 * srcType - source type
1358 * srcInReg - whether source is in a register
1359 */
1360instruction CodeGen::ins_Move_Extend(var_types srcType, bool srcInReg)
1361{
1362 instruction ins = INS_invalid;
1363
1364 if (varTypeIsSIMD(srcType))
1365 {
1366#if defined(_TARGET_XARCH_)
1367 // SSE2/AVX requires destination to be a reg always.
1368 // If src is in reg means, it is a reg-reg move.
1369 //
1370 // SSE2 Note: always prefer movaps/movups over movapd/movupd since the
1371 // former doesn't require 66h prefix and one byte smaller than the
1372 // latter.
1373 //
1374 // TODO-CQ: based on whether src type is aligned use movaps instead
1375
1376 return (srcInReg) ? INS_movaps : INS_movups;
1377#elif defined(_TARGET_ARM64_)
1378 return (srcInReg) ? INS_mov : ins_Load(srcType);
1379#else // !defined(_TARGET_ARM64_) && !defined(_TARGET_XARCH_)
1380 assert(!"unhandled SIMD type");
1381#endif // !defined(_TARGET_ARM64_) && !defined(_TARGET_XARCH_)
1382 }
1383
1384#if defined(_TARGET_XARCH_)
1385 if (varTypeIsFloating(srcType))
1386 {
1387 if (srcType == TYP_DOUBLE)
1388 {
1389 return (srcInReg) ? INS_movaps : INS_movsdsse2;
1390 }
1391 else if (srcType == TYP_FLOAT)
1392 {
1393 return (srcInReg) ? INS_movaps : INS_movss;
1394 }
1395 else
1396 {
1397 assert(!"unhandled floating type");
1398 }
1399 }
1400#elif defined(_TARGET_ARM_)
1401 if (varTypeIsFloating(srcType))
1402 return INS_vmov;
1403#else
1404 assert(!varTypeIsFloating(srcType));
1405#endif
1406
1407#if defined(_TARGET_XARCH_)
1408 if (!varTypeIsSmall(srcType))
1409 {
1410 ins = INS_mov;
1411 }
1412 else if (varTypeIsUnsigned(srcType))
1413 {
1414 ins = INS_movzx;
1415 }
1416 else
1417 {
1418 ins = INS_movsx;
1419 }
1420#elif defined(_TARGET_ARM_)
1421 //
1422 // Register to Register zero/sign extend operation
1423 //
1424 if (srcInReg)
1425 {
1426 if (!varTypeIsSmall(srcType))
1427 {
1428 ins = INS_mov;
1429 }
1430 else if (varTypeIsUnsigned(srcType))
1431 {
1432 if (varTypeIsByte(srcType))
1433 ins = INS_uxtb;
1434 else
1435 ins = INS_uxth;
1436 }
1437 else
1438 {
1439 if (varTypeIsByte(srcType))
1440 ins = INS_sxtb;
1441 else
1442 ins = INS_sxth;
1443 }
1444 }
1445 else
1446 {
1447 ins = ins_Load(srcType);
1448 }
1449#elif defined(_TARGET_ARM64_)
1450 //
1451 // Register to Register zero/sign extend operation
1452 //
1453 if (srcInReg)
1454 {
1455 if (varTypeIsUnsigned(srcType))
1456 {
1457 if (varTypeIsByte(srcType))
1458 {
1459 ins = INS_uxtb;
1460 }
1461 else if (varTypeIsShort(srcType))
1462 {
1463 ins = INS_uxth;
1464 }
1465 else
1466 {
1467 // A mov Rd, Rm instruction performs the zero extend
1468 // for the upper 32 bits when the size is EA_4BYTE
1469
1470 ins = INS_mov;
1471 }
1472 }
1473 else
1474 {
1475 if (varTypeIsByte(srcType))
1476 {
1477 ins = INS_sxtb;
1478 }
1479 else if (varTypeIsShort(srcType))
1480 {
1481 ins = INS_sxth;
1482 }
1483 else
1484 {
1485 if (srcType == TYP_INT)
1486 {
1487 ins = INS_sxtw;
1488 }
1489 else
1490 {
1491 ins = INS_mov;
1492 }
1493 }
1494 }
1495 }
1496 else
1497 {
1498 ins = ins_Load(srcType);
1499 }
1500#else
1501 NYI("ins_Move_Extend");
1502#endif
1503 assert(ins != INS_invalid);
1504 return ins;
1505}
1506
1507/*****************************************************************************
1508 *
1509 * Get the machine dependent instruction for performing a load for srcType
1510 *
1511 * Parameters
1512 * srcType - source type
1513 * aligned - whether source is properly aligned if srcType is a SIMD type
1514 */
1515instruction CodeGenInterface::ins_Load(var_types srcType, bool aligned /*=false*/)
1516{
1517 instruction ins = INS_invalid;
1518
1519 if (varTypeIsSIMD(srcType))
1520 {
1521#if defined(_TARGET_XARCH_)
1522#ifdef FEATURE_SIMD
1523 if (srcType == TYP_SIMD8)
1524 {
1525 return INS_movsdsse2;
1526 }
1527 else
1528#endif // FEATURE_SIMD
1529 if (compiler->canUseVexEncoding())
1530 {
1531 return (aligned) ? INS_movapd : INS_movupd;
1532 }
1533 else
1534 {
1535 // SSE2 Note: always prefer movaps/movups over movapd/movupd since the
1536 // former doesn't require 66h prefix and one byte smaller than the
1537 // latter.
1538 return (aligned) ? INS_movaps : INS_movups;
1539 }
1540#elif defined(_TARGET_ARM64_)
1541 return INS_ldr;
1542#else
1543 assert(!"ins_Load with SIMD type");
1544#endif
1545 }
1546
1547 if (varTypeIsFloating(srcType))
1548 {
1549#if defined(_TARGET_XARCH_)
1550 if (srcType == TYP_DOUBLE)
1551 {
1552 return INS_movsdsse2;
1553 }
1554 else if (srcType == TYP_FLOAT)
1555 {
1556 return INS_movss;
1557 }
1558 else
1559 {
1560 assert(!"unhandled floating type");
1561 }
1562#elif defined(_TARGET_ARM64_)
1563 return INS_ldr;
1564#elif defined(_TARGET_ARM_)
1565 return INS_vldr;
1566#else
1567 assert(!varTypeIsFloating(srcType));
1568#endif
1569 }
1570
1571#if defined(_TARGET_XARCH_)
1572 if (!varTypeIsSmall(srcType))
1573 {
1574 ins = INS_mov;
1575 }
1576 else if (varTypeIsUnsigned(srcType))
1577 {
1578 ins = INS_movzx;
1579 }
1580 else
1581 {
1582 ins = INS_movsx;
1583 }
1584
1585#elif defined(_TARGET_ARMARCH_)
1586 if (!varTypeIsSmall(srcType))
1587 {
1588#if defined(_TARGET_ARM64_)
1589 if (!varTypeIsI(srcType) && !varTypeIsUnsigned(srcType))
1590 {
1591 ins = INS_ldrsw;
1592 }
1593 else
1594#endif // defined(_TARGET_ARM64_)
1595 {
1596 ins = INS_ldr;
1597 }
1598 }
1599 else if (varTypeIsByte(srcType))
1600 {
1601 if (varTypeIsUnsigned(srcType))
1602 ins = INS_ldrb;
1603 else
1604 ins = INS_ldrsb;
1605 }
1606 else if (varTypeIsShort(srcType))
1607 {
1608 if (varTypeIsUnsigned(srcType))
1609 ins = INS_ldrh;
1610 else
1611 ins = INS_ldrsh;
1612 }
1613#else
1614 NYI("ins_Load");
1615#endif
1616
1617 assert(ins != INS_invalid);
1618 return ins;
1619}
1620
1621/*****************************************************************************
1622 *
1623 * Get the machine dependent instruction for performing a reg-reg copy for dstType
1624 *
1625 * Parameters
1626 * dstType - destination type
1627 */
1628instruction CodeGen::ins_Copy(var_types dstType)
1629{
1630#if defined(_TARGET_XARCH_)
1631 if (varTypeIsSIMD(dstType))
1632 {
1633 return INS_movaps;
1634 }
1635 else if (varTypeIsFloating(dstType))
1636 {
1637 // Both float and double copy can use movaps
1638 return INS_movaps;
1639 }
1640 else
1641 {
1642 return INS_mov;
1643 }
1644#elif defined(_TARGET_ARM64_)
1645 if (varTypeIsFloating(dstType))
1646 {
1647 return INS_fmov;
1648 }
1649 else
1650 {
1651 return INS_mov;
1652 }
1653#elif defined(_TARGET_ARM_)
1654 assert(!varTypeIsSIMD(dstType));
1655 if (varTypeIsFloating(dstType))
1656 {
1657 return INS_vmov;
1658 }
1659 else
1660 {
1661 return INS_mov;
1662 }
1663#elif defined(_TARGET_X86_)
1664 assert(!varTypeIsSIMD(dstType));
1665 assert(!varTypeIsFloating(dstType));
1666 return INS_mov;
1667#else // _TARGET_*
1668#error "Unknown _TARGET_"
1669#endif
1670}
1671
1672/*****************************************************************************
1673 *
1674 * Get the machine dependent instruction for performing a store for dstType
1675 *
1676 * Parameters
1677 * dstType - destination type
1678 * aligned - whether destination is properly aligned if dstType is a SIMD type
1679 */
1680instruction CodeGenInterface::ins_Store(var_types dstType, bool aligned /*=false*/)
1681{
1682 instruction ins = INS_invalid;
1683
1684#if defined(_TARGET_XARCH_)
1685 if (varTypeIsSIMD(dstType))
1686 {
1687#ifdef FEATURE_SIMD
1688 if (dstType == TYP_SIMD8)
1689 {
1690 return INS_movsdsse2;
1691 }
1692 else
1693#endif // FEATURE_SIMD
1694 if (compiler->canUseVexEncoding())
1695 {
1696 return (aligned) ? INS_movapd : INS_movupd;
1697 }
1698 else
1699 {
1700 // SSE2 Note: always prefer movaps/movups over movapd/movupd since the
1701 // former doesn't require 66h prefix and one byte smaller than the
1702 // latter.
1703 return (aligned) ? INS_movaps : INS_movups;
1704 }
1705 }
1706 else if (varTypeIsFloating(dstType))
1707 {
1708 if (dstType == TYP_DOUBLE)
1709 {
1710 return INS_movsdsse2;
1711 }
1712 else if (dstType == TYP_FLOAT)
1713 {
1714 return INS_movss;
1715 }
1716 else
1717 {
1718 assert(!"unhandled floating type");
1719 }
1720 }
1721#elif defined(_TARGET_ARM64_)
1722 if (varTypeIsSIMD(dstType) || varTypeIsFloating(dstType))
1723 {
1724 // All sizes of SIMD and FP instructions use INS_str
1725 return INS_str;
1726 }
1727#elif defined(_TARGET_ARM_)
1728 assert(!varTypeIsSIMD(dstType));
1729 if (varTypeIsFloating(dstType))
1730 {
1731 return INS_vstr;
1732 }
1733#else
1734 assert(!varTypeIsSIMD(dstType));
1735 assert(!varTypeIsFloating(dstType));
1736#endif
1737
1738#if defined(_TARGET_XARCH_)
1739 ins = INS_mov;
1740#elif defined(_TARGET_ARMARCH_)
1741 if (!varTypeIsSmall(dstType))
1742 ins = INS_str;
1743 else if (varTypeIsByte(dstType))
1744 ins = INS_strb;
1745 else if (varTypeIsShort(dstType))
1746 ins = INS_strh;
1747#else
1748 NYI("ins_Store");
1749#endif
1750
1751 assert(ins != INS_invalid);
1752 return ins;
1753}
1754
1755#if defined(_TARGET_XARCH_)
1756
1757bool CodeGen::isMoveIns(instruction ins)
1758{
1759 return (ins == INS_mov);
1760}
1761
1762instruction CodeGenInterface::ins_FloatLoad(var_types type)
1763{
1764 // Do Not use this routine in RyuJIT backend. Instead use ins_Load()/ins_Store()
1765 unreached();
1766}
1767
1768// everything is just an addressing mode variation on x64
1769instruction CodeGen::ins_FloatStore(var_types type)
1770{
1771 // Do Not use this routine in RyuJIT backend. Instead use ins_Store()
1772 unreached();
1773}
1774
1775instruction CodeGen::ins_FloatCopy(var_types type)
1776{
1777 // Do Not use this routine in RyuJIT backend. Instead use ins_Load().
1778 unreached();
1779}
1780
1781instruction CodeGen::ins_FloatCompare(var_types type)
1782{
1783 return (type == TYP_FLOAT) ? INS_ucomiss : INS_ucomisd;
1784}
1785
1786instruction CodeGen::ins_CopyIntToFloat(var_types srcType, var_types dstType)
1787{
1788 // On SSE2/AVX - the same instruction is used for moving double/quad word to XMM/YMM register.
1789 assert((srcType == TYP_INT) || (srcType == TYP_UINT) || (srcType == TYP_LONG) || (srcType == TYP_ULONG));
1790
1791#if !defined(_TARGET_64BIT_)
1792 // No 64-bit registers on x86.
1793 assert((srcType != TYP_LONG) && (srcType != TYP_ULONG));
1794#endif // !defined(_TARGET_64BIT_)
1795
1796 return INS_mov_i2xmm;
1797}
1798
1799instruction CodeGen::ins_CopyFloatToInt(var_types srcType, var_types dstType)
1800{
1801 // On SSE2/AVX - the same instruction is used for moving double/quad word of XMM/YMM to an integer register.
1802 assert((dstType == TYP_INT) || (dstType == TYP_UINT) || (dstType == TYP_LONG) || (dstType == TYP_ULONG));
1803
1804#if !defined(_TARGET_64BIT_)
1805 // No 64-bit registers on x86.
1806 assert((dstType != TYP_LONG) && (dstType != TYP_ULONG));
1807#endif // !defined(_TARGET_64BIT_)
1808
1809 return INS_mov_xmm2i;
1810}
1811
1812instruction CodeGen::ins_MathOp(genTreeOps oper, var_types type)
1813{
1814 switch (oper)
1815 {
1816 case GT_ADD:
1817 return type == TYP_DOUBLE ? INS_addsd : INS_addss;
1818 case GT_SUB:
1819 return type == TYP_DOUBLE ? INS_subsd : INS_subss;
1820 case GT_MUL:
1821 return type == TYP_DOUBLE ? INS_mulsd : INS_mulss;
1822 case GT_DIV:
1823 return type == TYP_DOUBLE ? INS_divsd : INS_divss;
1824 default:
1825 unreached();
1826 }
1827}
1828
1829instruction CodeGen::ins_FloatSqrt(var_types type)
1830{
1831 instruction ins = INS_invalid;
1832
1833 if (type == TYP_DOUBLE)
1834 {
1835 ins = INS_sqrtsd;
1836 }
1837 else if (type == TYP_FLOAT)
1838 {
1839 ins = INS_sqrtss;
1840 }
1841 else
1842 {
1843 assert(!"ins_FloatSqrt: Unsupported type");
1844 unreached();
1845 }
1846
1847 return ins;
1848}
1849
1850// Conversions to or from floating point values
1851instruction CodeGen::ins_FloatConv(var_types to, var_types from)
1852{
1853 // AVX: For now we support only conversion from Int/Long -> float
1854
1855 switch (from)
1856 {
1857 // int/long -> float/double use the same instruction but type size would be different.
1858 case TYP_INT:
1859 case TYP_LONG:
1860 switch (to)
1861 {
1862 case TYP_FLOAT:
1863 return INS_cvtsi2ss;
1864 case TYP_DOUBLE:
1865 return INS_cvtsi2sd;
1866 default:
1867 unreached();
1868 }
1869 break;
1870
1871 case TYP_FLOAT:
1872 switch (to)
1873 {
1874 case TYP_INT:
1875 return INS_cvttss2si;
1876 case TYP_LONG:
1877 return INS_cvttss2si;
1878 case TYP_FLOAT:
1879 return ins_Move_Extend(TYP_FLOAT, false);
1880 case TYP_DOUBLE:
1881 return INS_cvtss2sd;
1882 default:
1883 unreached();
1884 }
1885 break;
1886
1887 case TYP_DOUBLE:
1888 switch (to)
1889 {
1890 case TYP_INT:
1891 return INS_cvttsd2si;
1892 case TYP_LONG:
1893 return INS_cvttsd2si;
1894 case TYP_FLOAT:
1895 return INS_cvtsd2ss;
1896 case TYP_DOUBLE:
1897 return ins_Move_Extend(TYP_DOUBLE, false);
1898 default:
1899 unreached();
1900 }
1901 break;
1902
1903 default:
1904 unreached();
1905 }
1906}
1907
1908#elif defined(_TARGET_ARM_)
1909
1910bool CodeGen::isMoveIns(instruction ins)
1911{
1912 return (ins == INS_vmov) || (ins == INS_mov);
1913}
1914
1915instruction CodeGenInterface::ins_FloatLoad(var_types type)
1916{
1917 assert(type == TYP_DOUBLE || type == TYP_FLOAT);
1918 return INS_vldr;
1919}
1920instruction CodeGen::ins_FloatStore(var_types type)
1921{
1922 assert(type == TYP_DOUBLE || type == TYP_FLOAT);
1923 return INS_vstr;
1924}
1925instruction CodeGen::ins_FloatCopy(var_types type)
1926{
1927 assert(type == TYP_DOUBLE || type == TYP_FLOAT);
1928 return INS_vmov;
1929}
1930
1931instruction CodeGen::ins_CopyIntToFloat(var_types srcType, var_types dstType)
1932{
1933 assert((dstType == TYP_FLOAT) || (dstType == TYP_DOUBLE));
1934 assert((srcType == TYP_INT) || (srcType == TYP_UINT) || (srcType == TYP_LONG) || (srcType == TYP_ULONG));
1935
1936 if ((srcType == TYP_LONG) || (srcType == TYP_ULONG))
1937 {
1938 return INS_vmov_i2d;
1939 }
1940 else
1941 {
1942 return INS_vmov_i2f;
1943 }
1944}
1945
1946instruction CodeGen::ins_CopyFloatToInt(var_types srcType, var_types dstType)
1947{
1948 assert((srcType == TYP_FLOAT) || (srcType == TYP_DOUBLE));
1949 assert((dstType == TYP_INT) || (dstType == TYP_UINT) || (dstType == TYP_LONG) || (dstType == TYP_ULONG));
1950
1951 if ((dstType == TYP_LONG) || (dstType == TYP_ULONG))
1952 {
1953 return INS_vmov_d2i;
1954 }
1955 else
1956 {
1957 return INS_vmov_f2i;
1958 }
1959}
1960
1961instruction CodeGen::ins_FloatCompare(var_types type)
1962{
1963 // Not used and not implemented
1964 unreached();
1965}
1966
1967instruction CodeGen::ins_FloatSqrt(var_types type)
1968{
1969 // Not used and not implemented
1970 unreached();
1971}
1972
1973instruction CodeGen::ins_MathOp(genTreeOps oper, var_types type)
1974{
1975 switch (oper)
1976 {
1977 case GT_ADD:
1978 return INS_vadd;
1979 case GT_SUB:
1980 return INS_vsub;
1981 case GT_MUL:
1982 return INS_vmul;
1983 case GT_DIV:
1984 return INS_vdiv;
1985 case GT_NEG:
1986 return INS_vneg;
1987 default:
1988 unreached();
1989 }
1990}
1991
1992instruction CodeGen::ins_FloatConv(var_types to, var_types from)
1993{
1994 switch (from)
1995 {
1996 case TYP_INT:
1997 switch (to)
1998 {
1999 case TYP_FLOAT:
2000 return INS_vcvt_i2f;
2001 case TYP_DOUBLE:
2002 return INS_vcvt_i2d;
2003 default:
2004 unreached();
2005 }
2006 break;
2007 case TYP_UINT:
2008 switch (to)
2009 {
2010 case TYP_FLOAT:
2011 return INS_vcvt_u2f;
2012 case TYP_DOUBLE:
2013 return INS_vcvt_u2d;
2014 default:
2015 unreached();
2016 }
2017 break;
2018 case TYP_LONG:
2019 switch (to)
2020 {
2021 case TYP_FLOAT:
2022 NYI("long to float");
2023 case TYP_DOUBLE:
2024 NYI("long to double");
2025 default:
2026 unreached();
2027 }
2028 break;
2029 case TYP_FLOAT:
2030 switch (to)
2031 {
2032 case TYP_INT:
2033 return INS_vcvt_f2i;
2034 case TYP_UINT:
2035 return INS_vcvt_f2u;
2036 case TYP_LONG:
2037 NYI("float to long");
2038 case TYP_DOUBLE:
2039 return INS_vcvt_f2d;
2040 case TYP_FLOAT:
2041 return INS_vmov;
2042 default:
2043 unreached();
2044 }
2045 break;
2046 case TYP_DOUBLE:
2047 switch (to)
2048 {
2049 case TYP_INT:
2050 return INS_vcvt_d2i;
2051 case TYP_UINT:
2052 return INS_vcvt_d2u;
2053 case TYP_LONG:
2054 NYI("double to long");
2055 case TYP_FLOAT:
2056 return INS_vcvt_d2f;
2057 case TYP_DOUBLE:
2058 return INS_vmov;
2059 default:
2060 unreached();
2061 }
2062 break;
2063 default:
2064 unreached();
2065 }
2066}
2067
2068#endif // #elif defined(_TARGET_ARM_)
2069
2070/*****************************************************************************
2071 *
2072 * Machine independent way to return
2073 */
2074void CodeGen::instGen_Return(unsigned stkArgSize)
2075{
2076#if defined(_TARGET_XARCH_)
2077 if (stkArgSize == 0)
2078 {
2079 instGen(INS_ret);
2080 }
2081 else
2082 {
2083 inst_IV(INS_ret, stkArgSize);
2084 }
2085#elif defined(_TARGET_ARM_)
2086//
2087// The return on ARM is folded into the pop multiple instruction
2088// and as we do not know the exact set of registers that we will
2089// need to restore (pop) when we first call instGen_Return we will
2090// instead just not emit anything for this method on the ARM
2091// The return will be part of the pop multiple and that will be
2092// part of the epilog that is generated by genFnEpilog()
2093#elif defined(_TARGET_ARM64_)
2094 // This function shouldn't be used on ARM64.
2095 unreached();
2096#else
2097 NYI("instGen_Return");
2098#endif
2099}
2100
2101/*****************************************************************************
2102 *
2103 * Emit a MemoryBarrier instruction
2104 *
2105 * Note: all MemoryBarriers instructions can be removed by
2106 * SET COMPlus_JitNoMemoryBarriers=1
2107 */
2108#ifdef _TARGET_ARM64_
2109void CodeGen::instGen_MemoryBarrier(insBarrier barrierType)
2110#else
2111void CodeGen::instGen_MemoryBarrier()
2112#endif
2113{
2114#ifdef DEBUG
2115 if (JitConfig.JitNoMemoryBarriers() == 1)
2116 {
2117 return;
2118 }
2119#endif // DEBUG
2120
2121#if defined(_TARGET_XARCH_)
2122 instGen(INS_lock);
2123 getEmitter()->emitIns_I_AR(INS_or, EA_4BYTE, 0, REG_SPBASE, 0);
2124#elif defined(_TARGET_ARM_)
2125 getEmitter()->emitIns_I(INS_dmb, EA_4BYTE, 0xf);
2126#elif defined(_TARGET_ARM64_)
2127 getEmitter()->emitIns_BARR(INS_dmb, barrierType);
2128#else
2129#error "Unknown _TARGET_"
2130#endif
2131}
2132
2133/*****************************************************************************
2134 *
2135 * Machine independent way to move a Zero value into a register
2136 */
2137void CodeGen::instGen_Set_Reg_To_Zero(emitAttr size, regNumber reg, insFlags flags)
2138{
2139#if defined(_TARGET_XARCH_)
2140 getEmitter()->emitIns_R_R(INS_xor, size, reg, reg);
2141#elif defined(_TARGET_ARMARCH_)
2142 getEmitter()->emitIns_R_I(INS_mov, size, reg, 0 ARM_ARG(flags));
2143#else
2144#error "Unknown _TARGET_"
2145#endif
2146 regSet.verifyRegUsed(reg);
2147}
2148
2149/*****************************************************************************
2150 *
2151 * Machine independent way to set the flags based on
2152 * comparing a register with zero
2153 */
2154void CodeGen::instGen_Compare_Reg_To_Zero(emitAttr size, regNumber reg)
2155{
2156#if defined(_TARGET_XARCH_)
2157 getEmitter()->emitIns_R_R(INS_test, size, reg, reg);
2158#elif defined(_TARGET_ARMARCH_)
2159 getEmitter()->emitIns_R_I(INS_cmp, size, reg, 0);
2160#else
2161#error "Unknown _TARGET_"
2162#endif
2163}
2164
2165/*****************************************************************************
2166 *
2167 * Machine independent way to set the flags based upon
2168 * comparing a register with another register
2169 */
2170void CodeGen::instGen_Compare_Reg_To_Reg(emitAttr size, regNumber reg1, regNumber reg2)
2171{
2172#if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
2173 getEmitter()->emitIns_R_R(INS_cmp, size, reg1, reg2);
2174#else
2175#error "Unknown _TARGET_"
2176#endif
2177}
2178
2179/*****************************************************************************
2180 *
2181 * Machine independent way to set the flags based upon
2182 * comparing a register with an immediate
2183 */
2184void CodeGen::instGen_Compare_Reg_To_Imm(emitAttr size, regNumber reg, target_ssize_t imm)
2185{
2186 if (imm == 0)
2187 {
2188 instGen_Compare_Reg_To_Zero(size, reg);
2189 }
2190 else
2191 {
2192#if defined(_TARGET_XARCH_)
2193#if defined(_TARGET_AMD64_)
2194 if ((EA_SIZE(size) == EA_8BYTE) && (((int)imm != (ssize_t)imm) || EA_IS_CNS_RELOC(size)))
2195 {
2196 assert(!"Invalid immediate for instGen_Compare_Reg_To_Imm");
2197 }
2198 else
2199#endif // _TARGET_AMD64_
2200 {
2201 getEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
2202 }
2203#elif defined(_TARGET_ARM_)
2204 if (arm_Valid_Imm_For_Alu(imm) || arm_Valid_Imm_For_Alu(-imm))
2205 {
2206 getEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
2207 }
2208 else // We need a scratch register
2209 {
2210 assert(!"Invalid immediate for instGen_Compare_Reg_To_Imm");
2211 }
2212#elif defined(_TARGET_ARM64_)
2213 if (true) // TODO-ARM64-NYI: arm_Valid_Imm_For_Alu(imm) || arm_Valid_Imm_For_Alu(-imm))
2214 {
2215 getEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
2216 }
2217 else // We need a scratch register
2218 {
2219 assert(!"Invalid immediate for instGen_Compare_Reg_To_Imm");
2220 }
2221#else
2222#error "Unknown _TARGET_"
2223#endif
2224 }
2225}
2226
2227/*****************************************************************************
2228 *
2229 * Machine independent way to move a stack based local variable into a register
2230 */
2231void CodeGen::instGen_Load_Reg_From_Lcl(var_types srcType, regNumber dstReg, int varNum, int offs)
2232{
2233 emitAttr size = emitTypeSize(srcType);
2234
2235 getEmitter()->emitIns_R_S(ins_Load(srcType), size, dstReg, varNum, offs);
2236}
2237
2238/*****************************************************************************
2239 *
2240 * Machine independent way to move a register into a stack based local variable
2241 */
2242void CodeGen::instGen_Store_Reg_Into_Lcl(var_types dstType, regNumber srcReg, int varNum, int offs)
2243{
2244 emitAttr size = emitTypeSize(dstType);
2245
2246 getEmitter()->emitIns_S_R(ins_Store(dstType), size, srcReg, varNum, offs);
2247}
2248
2249/*****************************************************************************
2250 *
2251 * Machine independent way to move an immediate into a stack based local variable
2252 */
2253void CodeGen::instGen_Store_Imm_Into_Lcl(
2254 var_types dstType, emitAttr sizeAttr, ssize_t imm, int varNum, int offs, regNumber regToUse)
2255{
2256#ifdef _TARGET_XARCH_
2257#ifdef _TARGET_AMD64_
2258 if ((EA_SIZE(sizeAttr) == EA_8BYTE) && (((int)imm != (ssize_t)imm) || EA_IS_CNS_RELOC(sizeAttr)))
2259 {
2260 assert(!"Invalid immediate for instGen_Store_Imm_Into_Lcl");
2261 }
2262 else
2263#endif // _TARGET_AMD64_
2264 {
2265 getEmitter()->emitIns_S_I(ins_Store(dstType), sizeAttr, varNum, offs, (int)imm);
2266 }
2267#elif defined(_TARGET_ARMARCH_)
2268 // Load imm into a register
2269 regNumber immReg = regToUse;
2270 assert(regToUse != REG_NA);
2271 instGen_Set_Reg_To_Imm(sizeAttr, immReg, (ssize_t)imm);
2272 instGen_Store_Reg_Into_Lcl(dstType, immReg, varNum, offs);
2273 if (EA_IS_RELOC(sizeAttr))
2274 {
2275 regSet.verifyRegUsed(immReg);
2276 }
2277#else // _TARGET_*
2278#error "Unknown _TARGET_"
2279#endif // _TARGET_*
2280}
2281
2282/*****************************************************************************/
2283/*****************************************************************************/
2284/*****************************************************************************/
2285