1/*
2** IR assembler (SSA IR -> machine code).
3** Copyright (C) 2005-2021 Mike Pall. See Copyright Notice in luajit.h
4*/
5
6#define lj_asm_c
7#define LUA_CORE
8
9#include "lj_obj.h"
10
11#if LJ_HASJIT
12
13#include "lj_gc.h"
14#include "lj_str.h"
15#include "lj_tab.h"
16#include "lj_frame.h"
17#if LJ_HASFFI
18#include "lj_ctype.h"
19#endif
20#include "lj_ir.h"
21#include "lj_jit.h"
22#include "lj_ircall.h"
23#include "lj_iropt.h"
24#include "lj_mcode.h"
25#include "lj_trace.h"
26#include "lj_snap.h"
27#include "lj_asm.h"
28#include "lj_dispatch.h"
29#include "lj_vm.h"
30#include "lj_target.h"
31
32#ifdef LUA_USE_ASSERT
33#include <stdio.h>
34#endif
35
36/* -- Assembler state and common macros ----------------------------------- */
37
38/* Assembler state. */
39typedef struct ASMState {
40 RegCost cost[RID_MAX]; /* Reference and blended allocation cost for regs. */
41
42 MCode *mcp; /* Current MCode pointer (grows down). */
43 MCode *mclim; /* Lower limit for MCode memory + red zone. */
44#ifdef LUA_USE_ASSERT
45 MCode *mcp_prev; /* Red zone overflow check. */
46#endif
47
48 IRIns *ir; /* Copy of pointer to IR instructions/constants. */
49 jit_State *J; /* JIT compiler state. */
50
51#if LJ_TARGET_X86ORX64
52 x86ModRM mrm; /* Fused x86 address operand. */
53#endif
54
55 RegSet freeset; /* Set of free registers. */
56 RegSet modset; /* Set of registers modified inside the loop. */
57 RegSet weakset; /* Set of weakly referenced registers. */
58 RegSet phiset; /* Set of PHI registers. */
59
60 uint32_t flags; /* Copy of JIT compiler flags. */
61 int loopinv; /* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */
62
63 int32_t evenspill; /* Next even spill slot. */
64 int32_t oddspill; /* Next odd spill slot (or 0). */
65
66 IRRef curins; /* Reference of current instruction. */
67 IRRef stopins; /* Stop assembly before hitting this instruction. */
68 IRRef orignins; /* Original T->nins. */
69
70 IRRef snapref; /* Current snapshot is active after this reference. */
71 IRRef snaprename; /* Rename highwater mark for snapshot check. */
72 SnapNo snapno; /* Current snapshot number. */
73 SnapNo loopsnapno; /* Loop snapshot number. */
74 int snapalloc; /* Current snapshot needs allocation. */
75 BloomFilter snapfilt1, snapfilt2; /* Filled with snapshot refs. */
76
77 IRRef fuseref; /* Fusion limit (loopref, 0 or FUSE_DISABLED). */
78 IRRef sectref; /* Section base reference (loopref or 0). */
79 IRRef loopref; /* Reference of LOOP instruction (or 0). */
80
81 BCReg topslot; /* Number of slots for stack check (unless 0). */
82 int32_t gcsteps; /* Accumulated number of GC steps (per section). */
83
84 GCtrace *T; /* Trace to assemble. */
85 GCtrace *parent; /* Parent trace (or NULL). */
86
87 MCode *mcbot; /* Bottom of reserved MCode. */
88 MCode *mctop; /* Top of generated MCode. */
89 MCode *mctoporig; /* Original top of generated MCode. */
90 MCode *mcloop; /* Pointer to loop MCode (or NULL). */
91 MCode *invmcp; /* Points to invertible loop branch (or NULL). */
92 MCode *flagmcp; /* Pending opportunity to merge flag setting ins. */
93 MCode *realign; /* Realign loop if not NULL. */
94
95#ifdef RID_NUM_KREF
96 intptr_t krefk[RID_NUM_KREF];
97#endif
98 IRRef1 phireg[RID_MAX]; /* PHI register references. */
99 uint16_t parentmap[LJ_MAX_JSLOTS]; /* Parent instruction to RegSP map. */
100} ASMState;
101
102#ifdef LUA_USE_ASSERT
103#define lj_assertA(c, ...) lj_assertG_(J2G(as->J), (c), __VA_ARGS__)
104#else
105#define lj_assertA(c, ...) ((void)as)
106#endif
107
108#define IR(ref) (&as->ir[(ref)])
109
110#define ASMREF_TMP1 REF_TRUE /* Temp. register. */
111#define ASMREF_TMP2 REF_FALSE /* Temp. register. */
112#define ASMREF_L REF_NIL /* Stores register for L. */
113
114/* Check for variant to invariant references. */
115#define iscrossref(as, ref) ((ref) < as->sectref)
116
117/* Inhibit memory op fusion from variant to invariant references. */
118#define FUSE_DISABLED (~(IRRef)0)
119#define mayfuse(as, ref) ((ref) > as->fuseref)
120#define neverfuse(as) (as->fuseref == FUSE_DISABLED)
121#define canfuse(as, ir) (!neverfuse(as) && !irt_isphi((ir)->t))
122#define opisfusableload(o) \
123 ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \
124 (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD)
125
126/* Sparse limit checks using a red zone before the actual limit. */
127#define MCLIM_REDZONE 64
128
129static LJ_NORET LJ_NOINLINE void asm_mclimit(ASMState *as)
130{
131 lj_mcode_limiterr(as->J, (size_t)(as->mctop - as->mcp + 4*MCLIM_REDZONE));
132}
133
134static LJ_AINLINE void checkmclim(ASMState *as)
135{
136#ifdef LUA_USE_ASSERT
137 if (as->mcp + MCLIM_REDZONE < as->mcp_prev) {
138 IRIns *ir = IR(as->curins+1);
139 lj_assertA(0, "red zone overflow: %p IR %04d %02d %04d %04d\n", as->mcp,
140 as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS);
141 }
142#endif
143 if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as);
144#ifdef LUA_USE_ASSERT
145 as->mcp_prev = as->mcp;
146#endif
147}
148
149#ifdef RID_NUM_KREF
150#define ra_iskref(ref) ((ref) < RID_NUM_KREF)
151#define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref)))
152#define ra_krefk(as, ref) (as->krefk[(ref)])
153
154static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, intptr_t k)
155{
156 IRRef ref = (IRRef)(r - RID_MIN_KREF);
157 as->krefk[ref] = k;
158 as->cost[r] = REGCOST(ref, ref);
159}
160
161#else
162#define ra_iskref(ref) 0
163#define ra_krefreg(ref) RID_MIN_GPR
164#define ra_krefk(as, ref) 0
165#endif
166
167/* Arch-specific field offsets. */
168static const uint8_t field_ofs[IRFL__MAX+1] = {
169#define FLOFS(name, ofs) (uint8_t)(ofs),
170IRFLDEF(FLOFS)
171#undef FLOFS
172 0
173};
174
175/* -- Target-specific instruction emitter --------------------------------- */
176
177#if LJ_TARGET_X86ORX64
178#include "lj_emit_x86.h"
179#elif LJ_TARGET_ARM
180#include "lj_emit_arm.h"
181#elif LJ_TARGET_ARM64
182#include "lj_emit_arm64.h"
183#elif LJ_TARGET_PPC
184#include "lj_emit_ppc.h"
185#elif LJ_TARGET_MIPS
186#include "lj_emit_mips.h"
187#else
188#error "Missing instruction emitter for target CPU"
189#endif
190
191/* Generic load/store of register from/to stack slot. */
192#define emit_spload(as, ir, r, ofs) \
193 emit_loadofs(as, ir, (r), RID_SP, (ofs))
194#define emit_spstore(as, ir, r, ofs) \
195 emit_storeofs(as, ir, (r), RID_SP, (ofs))
196
197/* -- Register allocator debugging ---------------------------------------- */
198
199/* #define LUAJIT_DEBUG_RA */
200
201#ifdef LUAJIT_DEBUG_RA
202
203#include <stdio.h>
204#include <stdarg.h>
205
206#define RIDNAME(name) #name,
207static const char *const ra_regname[] = {
208 GPRDEF(RIDNAME)
209 FPRDEF(RIDNAME)
210 VRIDDEF(RIDNAME)
211 NULL
212};
213#undef RIDNAME
214
215static char ra_dbg_buf[65536];
216static char *ra_dbg_p;
217static char *ra_dbg_merge;
218static MCode *ra_dbg_mcp;
219
220static void ra_dstart(void)
221{
222 ra_dbg_p = ra_dbg_buf;
223 ra_dbg_merge = NULL;
224 ra_dbg_mcp = NULL;
225}
226
227static void ra_dflush(void)
228{
229 fwrite(ra_dbg_buf, 1, (size_t)(ra_dbg_p-ra_dbg_buf), stdout);
230 ra_dstart();
231}
232
233static void ra_dprintf(ASMState *as, const char *fmt, ...)
234{
235 char *p;
236 va_list argp;
237 va_start(argp, fmt);
238 p = ra_dbg_mcp == as->mcp ? ra_dbg_merge : ra_dbg_p;
239 ra_dbg_mcp = NULL;
240 p += sprintf(p, "%08x \e[36m%04d ", (uintptr_t)as->mcp, as->curins-REF_BIAS);
241 for (;;) {
242 const char *e = strchr(fmt, '$');
243 if (e == NULL) break;
244 memcpy(p, fmt, (size_t)(e-fmt));
245 p += e-fmt;
246 if (e[1] == 'r') {
247 Reg r = va_arg(argp, Reg) & RID_MASK;
248 if (r <= RID_MAX) {
249 const char *q;
250 for (q = ra_regname[r]; *q; q++)
251 *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q;
252 } else {
253 *p++ = '?';
254 lj_assertA(0, "bad register %d for debug format \"%s\"", r, fmt);
255 }
256 } else if (e[1] == 'f' || e[1] == 'i') {
257 IRRef ref;
258 if (e[1] == 'f')
259 ref = va_arg(argp, IRRef);
260 else
261 ref = va_arg(argp, IRIns *) - as->ir;
262 if (ref >= REF_BIAS)
263 p += sprintf(p, "%04d", ref - REF_BIAS);
264 else
265 p += sprintf(p, "K%03d", REF_BIAS - ref);
266 } else if (e[1] == 's') {
267 uint32_t slot = va_arg(argp, uint32_t);
268 p += sprintf(p, "[sp+0x%x]", sps_scale(slot));
269 } else if (e[1] == 'x') {
270 p += sprintf(p, "%08x", va_arg(argp, int32_t));
271 } else {
272 lj_assertA(0, "bad debug format code");
273 }
274 fmt = e+2;
275 }
276 va_end(argp);
277 while (*fmt)
278 *p++ = *fmt++;
279 *p++ = '\e'; *p++ = '['; *p++ = 'm'; *p++ = '\n';
280 if (p > ra_dbg_buf+sizeof(ra_dbg_buf)-256) {
281 fwrite(ra_dbg_buf, 1, (size_t)(p-ra_dbg_buf), stdout);
282 p = ra_dbg_buf;
283 }
284 ra_dbg_p = p;
285}
286
287#define RA_DBG_START() ra_dstart()
288#define RA_DBG_FLUSH() ra_dflush()
289#define RA_DBG_REF() \
290 do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \
291 ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0)
292#define RA_DBGX(x) ra_dprintf x
293
294#else
295#define RA_DBG_START() ((void)0)
296#define RA_DBG_FLUSH() ((void)0)
297#define RA_DBG_REF() ((void)0)
298#define RA_DBGX(x) ((void)0)
299#endif
300
301/* -- Register allocator -------------------------------------------------- */
302
303#define ra_free(as, r) rset_set(as->freeset, (r))
304#define ra_modified(as, r) rset_set(as->modset, (r))
305#define ra_weak(as, r) rset_set(as->weakset, (r))
306#define ra_noweak(as, r) rset_clear(as->weakset, (r))
307
308#define ra_used(ir) (ra_hasreg((ir)->r) || ra_hasspill((ir)->s))
309
310/* Setup register allocator. */
311static void ra_setup(ASMState *as)
312{
313 Reg r;
314 /* Initially all regs (except the stack pointer) are free for use. */
315 as->freeset = RSET_INIT;
316 as->modset = RSET_EMPTY;
317 as->weakset = RSET_EMPTY;
318 as->phiset = RSET_EMPTY;
319 memset(as->phireg, 0, sizeof(as->phireg));
320 for (r = RID_MIN_GPR; r < RID_MAX; r++)
321 as->cost[r] = REGCOST(~0u, 0u);
322}
323
324/* Rematerialize constants. */
325static Reg ra_rematk(ASMState *as, IRRef ref)
326{
327 IRIns *ir;
328 Reg r;
329 if (ra_iskref(ref)) {
330 r = ra_krefreg(ref);
331 lj_assertA(!rset_test(as->freeset, r), "rematk of free reg %d", r);
332 ra_free(as, r);
333 ra_modified(as, r);
334#if LJ_64
335 emit_loadu64(as, r, ra_krefk(as, ref));
336#else
337 emit_loadi(as, r, ra_krefk(as, ref));
338#endif
339 return r;
340 }
341 ir = IR(ref);
342 r = ir->r;
343 lj_assertA(ra_hasreg(r), "rematk of K%03d has no reg", REF_BIAS - ref);
344 lj_assertA(!ra_hasspill(ir->s),
345 "rematk of K%03d has spill slot [%x]", REF_BIAS - ref, ir->s);
346 ra_free(as, r);
347 ra_modified(as, r);
348 ir->r = RID_INIT; /* Do not keep any hint. */
349 RA_DBGX((as, "remat $i $r", ir, r));
350#if !LJ_SOFTFP32
351 if (ir->o == IR_KNUM) {
352 emit_loadk64(as, r, ir);
353 } else
354#endif
355 if (emit_canremat(REF_BASE) && ir->o == IR_BASE) {
356 ra_sethint(ir->r, RID_BASE); /* Restore BASE register hint. */
357 emit_getgl(as, r, jit_base);
358 } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) {
359 /* REF_NIL stores ASMREF_L register. */
360 lj_assertA(irt_isnil(ir->t), "rematk of bad ASMREF_L");
361 emit_getgl(as, r, cur_L);
362#if LJ_64
363 } else if (ir->o == IR_KINT64) {
364 emit_loadu64(as, r, ir_kint64(ir)->u64);
365#if LJ_GC64
366 } else if (ir->o == IR_KGC) {
367 emit_loadu64(as, r, (uintptr_t)ir_kgc(ir));
368 } else if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
369 emit_loadu64(as, r, (uintptr_t)ir_kptr(ir));
370#endif
371#endif
372 } else {
373 lj_assertA(ir->o == IR_KINT || ir->o == IR_KGC ||
374 ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL,
375 "rematk of bad IR op %d", ir->o);
376 emit_loadi(as, r, ir->i);
377 }
378 return r;
379}
380
381/* Force a spill. Allocate a new spill slot if needed. */
382static int32_t ra_spill(ASMState *as, IRIns *ir)
383{
384 int32_t slot = ir->s;
385 lj_assertA(ir >= as->ir + REF_TRUE,
386 "spill of K%03d", REF_BIAS - (int)(ir - as->ir));
387 if (!ra_hasspill(slot)) {
388 if (irt_is64(ir->t)) {
389 slot = as->evenspill;
390 as->evenspill += 2;
391 } else if (as->oddspill) {
392 slot = as->oddspill;
393 as->oddspill = 0;
394 } else {
395 slot = as->evenspill;
396 as->oddspill = slot+1;
397 as->evenspill += 2;
398 }
399 if (as->evenspill > 256)
400 lj_trace_err(as->J, LJ_TRERR_SPILLOV);
401 ir->s = (uint8_t)slot;
402 }
403 return sps_scale(slot);
404}
405
406/* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */
407static Reg ra_releasetmp(ASMState *as, IRRef ref)
408{
409 IRIns *ir = IR(ref);
410 Reg r = ir->r;
411 lj_assertA(ra_hasreg(r), "release of TMP%d has no reg", ref-ASMREF_TMP1+1);
412 lj_assertA(!ra_hasspill(ir->s),
413 "release of TMP%d has spill slot [%x]", ref-ASMREF_TMP1+1, ir->s);
414 ra_free(as, r);
415 ra_modified(as, r);
416 ir->r = RID_INIT;
417 return r;
418}
419
420/* Restore a register (marked as free). Rematerialize or force a spill. */
421static Reg ra_restore(ASMState *as, IRRef ref)
422{
423 if (emit_canremat(ref)) {
424 return ra_rematk(as, ref);
425 } else {
426 IRIns *ir = IR(ref);
427 int32_t ofs = ra_spill(as, ir); /* Force a spill slot. */
428 Reg r = ir->r;
429 lj_assertA(ra_hasreg(r), "restore of IR %04d has no reg", ref - REF_BIAS);
430 ra_sethint(ir->r, r); /* Keep hint. */
431 ra_free(as, r);
432 if (!rset_test(as->weakset, r)) { /* Only restore non-weak references. */
433 ra_modified(as, r);
434 RA_DBGX((as, "restore $i $r", ir, r));
435 emit_spload(as, ir, r, ofs);
436 }
437 return r;
438 }
439}
440
441/* Save a register to a spill slot. */
442static void ra_save(ASMState *as, IRIns *ir, Reg r)
443{
444 RA_DBGX((as, "save $i $r", ir, r));
445 emit_spstore(as, ir, r, sps_scale(ir->s));
446}
447
448#define MINCOST(name) \
449 if (rset_test(RSET_ALL, RID_##name) && \
450 LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \
451 cost = as->cost[RID_##name];
452
453/* Evict the register with the lowest cost, forcing a restore. */
454static Reg ra_evict(ASMState *as, RegSet allow)
455{
456 IRRef ref;
457 RegCost cost = ~(RegCost)0;
458 lj_assertA(allow != RSET_EMPTY, "evict from empty set");
459 if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) {
460 GPRDEF(MINCOST)
461 } else {
462 FPRDEF(MINCOST)
463 }
464 ref = regcost_ref(cost);
465 lj_assertA(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins),
466 "evict of out-of-range IR %04d", ref - REF_BIAS);
467 /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
468 if (!irref_isk(ref) && (as->weakset & allow)) {
469 IRIns *ir = IR(ref);
470 if (!rset_test(as->weakset, ir->r))
471 ref = regcost_ref(as->cost[rset_pickbot((as->weakset & allow))]);
472 }
473 return ra_restore(as, ref);
474}
475
476/* Pick any register (marked as free). Evict on-demand. */
477static Reg ra_pick(ASMState *as, RegSet allow)
478{
479 RegSet pick = as->freeset & allow;
480 if (!pick)
481 return ra_evict(as, allow);
482 else
483 return rset_picktop(pick);
484}
485
486/* Get a scratch register (marked as free). */
487static Reg ra_scratch(ASMState *as, RegSet allow)
488{
489 Reg r = ra_pick(as, allow);
490 ra_modified(as, r);
491 RA_DBGX((as, "scratch $r", r));
492 return r;
493}
494
495/* Evict all registers from a set (if not free). */
496static void ra_evictset(ASMState *as, RegSet drop)
497{
498 RegSet work;
499 as->modset |= drop;
500#if !LJ_SOFTFP
501 work = (drop & ~as->freeset) & RSET_FPR;
502 while (work) {
503 Reg r = rset_pickbot(work);
504 ra_restore(as, regcost_ref(as->cost[r]));
505 rset_clear(work, r);
506 checkmclim(as);
507 }
508#endif
509 work = (drop & ~as->freeset);
510 while (work) {
511 Reg r = rset_pickbot(work);
512 ra_restore(as, regcost_ref(as->cost[r]));
513 rset_clear(work, r);
514 checkmclim(as);
515 }
516}
517
518/* Evict (rematerialize) all registers allocated to constants. */
519static void ra_evictk(ASMState *as)
520{
521 RegSet work;
522#if !LJ_SOFTFP
523 work = ~as->freeset & RSET_FPR;
524 while (work) {
525 Reg r = rset_pickbot(work);
526 IRRef ref = regcost_ref(as->cost[r]);
527 if (emit_canremat(ref) && irref_isk(ref)) {
528 ra_rematk(as, ref);
529 checkmclim(as);
530 }
531 rset_clear(work, r);
532 }
533#endif
534 work = ~as->freeset & RSET_GPR;
535 while (work) {
536 Reg r = rset_pickbot(work);
537 IRRef ref = regcost_ref(as->cost[r]);
538 if (emit_canremat(ref) && irref_isk(ref)) {
539 ra_rematk(as, ref);
540 checkmclim(as);
541 }
542 rset_clear(work, r);
543 }
544}
545
546#ifdef RID_NUM_KREF
547/* Allocate a register for a constant. */
548static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow)
549{
550 /* First try to find a register which already holds the same constant. */
551 RegSet pick, work = ~as->freeset & RSET_GPR;
552 Reg r;
553 while (work) {
554 IRRef ref;
555 r = rset_pickbot(work);
556 ref = regcost_ref(as->cost[r]);
557#if LJ_64
558 if (ref < ASMREF_L) {
559 if (ra_iskref(ref)) {
560 if (k == ra_krefk(as, ref))
561 return r;
562 } else {
563 IRIns *ir = IR(ref);
564 if ((ir->o == IR_KINT64 && k == (int64_t)ir_kint64(ir)->u64) ||
565#if LJ_GC64
566 (ir->o == IR_KINT && k == ir->i) ||
567 (ir->o == IR_KGC && k == (intptr_t)ir_kgc(ir)) ||
568 ((ir->o == IR_KPTR || ir->o == IR_KKPTR) &&
569 k == (intptr_t)ir_kptr(ir))
570#else
571 (ir->o != IR_KINT64 && k == ir->i)
572#endif
573 )
574 return r;
575 }
576 }
577#else
578 if (ref < ASMREF_L &&
579 k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i))
580 return r;
581#endif
582 rset_clear(work, r);
583 }
584 pick = as->freeset & allow;
585 if (pick) {
586 /* Constants should preferably get unmodified registers. */
587 if ((pick & ~as->modset))
588 pick &= ~as->modset;
589 r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */
590 } else {
591 r = ra_evict(as, allow);
592 }
593 RA_DBGX((as, "allock $x $r", k, r));
594 ra_setkref(as, r, k);
595 rset_clear(as->freeset, r);
596 ra_noweak(as, r);
597 return r;
598}
599
600/* Allocate a specific register for a constant. */
601static void ra_allockreg(ASMState *as, intptr_t k, Reg r)
602{
603 Reg kr = ra_allock(as, k, RID2RSET(r));
604 if (kr != r) {
605 IRIns irdummy;
606 irdummy.t.irt = IRT_INT;
607 ra_scratch(as, RID2RSET(r));
608 emit_movrr(as, &irdummy, r, kr);
609 }
610}
611#else
612#define ra_allockreg(as, k, r) emit_loadi(as, (r), (k))
613#endif
614
615/* Allocate a register for ref from the allowed set of registers.
616** Note: this function assumes the ref does NOT have a register yet!
617** Picks an optimal register, sets the cost and marks the register as non-free.
618*/
619static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow)
620{
621 IRIns *ir = IR(ref);
622 RegSet pick = as->freeset & allow;
623 Reg r;
624 lj_assertA(ra_noreg(ir->r),
625 "IR %04d already has reg %d", ref - REF_BIAS, ir->r);
626 if (pick) {
627 /* First check register hint from propagation or PHI. */
628 if (ra_hashint(ir->r)) {
629 r = ra_gethint(ir->r);
630 if (rset_test(pick, r)) /* Use hint register if possible. */
631 goto found;
632 /* Rematerialization is cheaper than missing a hint. */
633 if (rset_test(allow, r) && emit_canremat(regcost_ref(as->cost[r]))) {
634 ra_rematk(as, regcost_ref(as->cost[r]));
635 goto found;
636 }
637 RA_DBGX((as, "hintmiss $f $r", ref, r));
638 }
639 /* Invariants should preferably get unmodified registers. */
640 if (ref < as->loopref && !irt_isphi(ir->t)) {
641 if ((pick & ~as->modset))
642 pick &= ~as->modset;
643 r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */
644 } else {
645 /* We've got plenty of regs, so get callee-save regs if possible. */
646 if (RID_NUM_GPR > 8 && (pick & ~RSET_SCRATCH))
647 pick &= ~RSET_SCRATCH;
648 r = rset_picktop(pick);
649 }
650 } else {
651 r = ra_evict(as, allow);
652 }
653found:
654 RA_DBGX((as, "alloc $f $r", ref, r));
655 ir->r = (uint8_t)r;
656 rset_clear(as->freeset, r);
657 ra_noweak(as, r);
658 as->cost[r] = REGCOST_REF_T(ref, irt_t(ir->t));
659 return r;
660}
661
662/* Allocate a register on-demand. */
663static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow)
664{
665 Reg r = IR(ref)->r;
666 /* Note: allow is ignored if the register is already allocated. */
667 if (ra_noreg(r)) r = ra_allocref(as, ref, allow);
668 ra_noweak(as, r);
669 return r;
670}
671
672/* Add a register rename to the IR. */
673static void ra_addrename(ASMState *as, Reg down, IRRef ref, SnapNo snapno)
674{
675 IRRef ren;
676 lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, snapno);
677 ren = tref_ref(lj_ir_emit(as->J));
678 as->J->cur.ir[ren].r = (uint8_t)down;
679 as->J->cur.ir[ren].s = SPS_NONE;
680}
681
682/* Rename register allocation and emit move. */
683static void ra_rename(ASMState *as, Reg down, Reg up)
684{
685 IRRef ref = regcost_ref(as->cost[up] = as->cost[down]);
686 IRIns *ir = IR(ref);
687 ir->r = (uint8_t)up;
688 as->cost[down] = 0;
689 lj_assertA((down < RID_MAX_GPR) == (up < RID_MAX_GPR),
690 "rename between GPR/FPR %d and %d", down, up);
691 lj_assertA(!rset_test(as->freeset, down), "rename from free reg %d", down);
692 lj_assertA(rset_test(as->freeset, up), "rename to non-free reg %d", up);
693 ra_free(as, down); /* 'down' is free ... */
694 ra_modified(as, down);
695 rset_clear(as->freeset, up); /* ... and 'up' is now allocated. */
696 ra_noweak(as, up);
697 RA_DBGX((as, "rename $f $r $r", regcost_ref(as->cost[up]), down, up));
698 emit_movrr(as, ir, down, up); /* Backwards codegen needs inverse move. */
699 if (!ra_hasspill(IR(ref)->s)) { /* Add the rename to the IR. */
700 ra_addrename(as, down, ref, as->snapno);
701 }
702}
703
704/* Pick a destination register (marked as free).
705** Caveat: allow is ignored if there's already a destination register.
706** Use ra_destreg() to get a specific register.
707*/
708static Reg ra_dest(ASMState *as, IRIns *ir, RegSet allow)
709{
710 Reg dest = ir->r;
711 if (ra_hasreg(dest)) {
712 ra_free(as, dest);
713 ra_modified(as, dest);
714 } else {
715 if (ra_hashint(dest) && rset_test((as->freeset&allow), ra_gethint(dest))) {
716 dest = ra_gethint(dest);
717 ra_modified(as, dest);
718 RA_DBGX((as, "dest $r", dest));
719 } else {
720 dest = ra_scratch(as, allow);
721 }
722 ir->r = dest;
723 }
724 if (LJ_UNLIKELY(ra_hasspill(ir->s))) ra_save(as, ir, dest);
725 return dest;
726}
727
728/* Force a specific destination register (marked as free). */
729static void ra_destreg(ASMState *as, IRIns *ir, Reg r)
730{
731 Reg dest = ra_dest(as, ir, RID2RSET(r));
732 if (dest != r) {
733 lj_assertA(rset_test(as->freeset, r), "dest reg %d is not free", r);
734 ra_modified(as, r);
735 emit_movrr(as, ir, dest, r);
736 }
737}
738
739#if LJ_TARGET_X86ORX64
740/* Propagate dest register to left reference. Emit moves as needed.
741** This is a required fixup step for all 2-operand machine instructions.
742*/
743static void ra_left(ASMState *as, Reg dest, IRRef lref)
744{
745 IRIns *ir = IR(lref);
746 Reg left = ir->r;
747 if (ra_noreg(left)) {
748 if (irref_isk(lref)) {
749 if (ir->o == IR_KNUM) {
750 /* FP remat needs a load except for +0. Still better than eviction. */
751 if (tvispzero(ir_knum(ir)) || !(as->freeset & RSET_FPR)) {
752 emit_loadk64(as, dest, ir);
753 return;
754 }
755#if LJ_64
756 } else if (ir->o == IR_KINT64) {
757 emit_loadk64(as, dest, ir);
758 return;
759#if LJ_GC64
760 } else if (ir->o == IR_KGC || ir->o == IR_KPTR || ir->o == IR_KKPTR) {
761 emit_loadk64(as, dest, ir);
762 return;
763#endif
764#endif
765 } else if (ir->o != IR_KPRI) {
766 lj_assertA(ir->o == IR_KINT || ir->o == IR_KGC ||
767 ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL,
768 "K%03d has bad IR op %d", REF_BIAS - lref, ir->o);
769 emit_loadi(as, dest, ir->i);
770 return;
771 }
772 }
773 if (!ra_hashint(left) && !iscrossref(as, lref))
774 ra_sethint(ir->r, dest); /* Propagate register hint. */
775 left = ra_allocref(as, lref, dest < RID_MAX_GPR ? RSET_GPR : RSET_FPR);
776 }
777 ra_noweak(as, left);
778 /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */
779 if (dest != left) {
780 /* Use register renaming if dest is the PHI reg. */
781 if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
782 ra_modified(as, left);
783 ra_rename(as, left, dest);
784 } else {
785 emit_movrr(as, ir, dest, left);
786 }
787 }
788}
789#else
790/* Similar to ra_left, except we override any hints. */
791static void ra_leftov(ASMState *as, Reg dest, IRRef lref)
792{
793 IRIns *ir = IR(lref);
794 Reg left = ir->r;
795 if (ra_noreg(left)) {
796 ra_sethint(ir->r, dest); /* Propagate register hint. */
797 left = ra_allocref(as, lref,
798 (LJ_SOFTFP || dest < RID_MAX_GPR) ? RSET_GPR : RSET_FPR);
799 }
800 ra_noweak(as, left);
801 if (dest != left) {
802 /* Use register renaming if dest is the PHI reg. */
803 if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
804 ra_modified(as, left);
805 ra_rename(as, left, dest);
806 } else {
807 emit_movrr(as, ir, dest, left);
808 }
809 }
810}
811#endif
812
813#if !LJ_64
814/* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */
815static void ra_destpair(ASMState *as, IRIns *ir)
816{
817 Reg destlo = ir->r, desthi = (ir+1)->r;
818 /* First spill unrelated refs blocking the destination registers. */
819 if (!rset_test(as->freeset, RID_RETLO) &&
820 destlo != RID_RETLO && desthi != RID_RETLO)
821 ra_restore(as, regcost_ref(as->cost[RID_RETLO]));
822 if (!rset_test(as->freeset, RID_RETHI) &&
823 destlo != RID_RETHI && desthi != RID_RETHI)
824 ra_restore(as, regcost_ref(as->cost[RID_RETHI]));
825 /* Next free the destination registers (if any). */
826 if (ra_hasreg(destlo)) {
827 ra_free(as, destlo);
828 ra_modified(as, destlo);
829 } else {
830 destlo = RID_RETLO;
831 }
832 if (ra_hasreg(desthi)) {
833 ra_free(as, desthi);
834 ra_modified(as, desthi);
835 } else {
836 desthi = RID_RETHI;
837 }
838 /* Check for conflicts and shuffle the registers as needed. */
839 if (destlo == RID_RETHI) {
840 if (desthi == RID_RETLO) {
841#if LJ_TARGET_X86
842 *--as->mcp = XI_XCHGa + RID_RETHI;
843#else
844 emit_movrr(as, ir, RID_RETHI, RID_TMP);
845 emit_movrr(as, ir, RID_RETLO, RID_RETHI);
846 emit_movrr(as, ir, RID_TMP, RID_RETLO);
847#endif
848 } else {
849 emit_movrr(as, ir, RID_RETHI, RID_RETLO);
850 if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
851 }
852 } else if (desthi == RID_RETLO) {
853 emit_movrr(as, ir, RID_RETLO, RID_RETHI);
854 if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
855 } else {
856 if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
857 if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
858 }
859 /* Restore spill slots (if any). */
860 if (ra_hasspill((ir+1)->s)) ra_save(as, ir+1, RID_RETHI);
861 if (ra_hasspill(ir->s)) ra_save(as, ir, RID_RETLO);
862}
863#endif
864
865/* -- Snapshot handling --------- ----------------------------------------- */
866
867/* Can we rematerialize a KNUM instead of forcing a spill? */
868static int asm_snap_canremat(ASMState *as)
869{
870 Reg r;
871 for (r = RID_MIN_FPR; r < RID_MAX_FPR; r++)
872 if (irref_isk(regcost_ref(as->cost[r])))
873 return 1;
874 return 0;
875}
876
877/* Check whether a sunk store corresponds to an allocation. */
878static int asm_sunk_store(ASMState *as, IRIns *ira, IRIns *irs)
879{
880 if (irs->s == 255) {
881 if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
882 irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
883 IRIns *irk = IR(irs->op1);
884 if (irk->o == IR_AREF || irk->o == IR_HREFK)
885 irk = IR(irk->op1);
886 return (IR(irk->op1) == ira);
887 }
888 return 0;
889 } else {
890 return (ira + irs->s == irs); /* Quick check. */
891 }
892}
893
894/* Allocate register or spill slot for a ref that escapes to a snapshot. */
895static void asm_snap_alloc1(ASMState *as, IRRef ref)
896{
897 IRIns *ir = IR(ref);
898 if (!irref_isk(ref) && ir->r != RID_SUNK) {
899 bloomset(as->snapfilt1, ref);
900 bloomset(as->snapfilt2, hashrot(ref, ref + HASH_BIAS));
901 if (ra_used(ir)) return;
902 if (ir->r == RID_SINK) {
903 ir->r = RID_SUNK;
904#if LJ_HASFFI
905 if (ir->o == IR_CNEWI) { /* Allocate CNEWI value. */
906 asm_snap_alloc1(as, ir->op2);
907 if (LJ_32 && (ir+1)->o == IR_HIOP)
908 asm_snap_alloc1(as, (ir+1)->op2);
909 } else
910#endif
911 { /* Allocate stored values for TNEW, TDUP and CNEW. */
912 IRIns *irs;
913 lj_assertA(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW,
914 "sink of IR %04d has bad op %d", ref - REF_BIAS, ir->o);
915 for (irs = IR(as->snapref-1); irs > ir; irs--)
916 if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) {
917 lj_assertA(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
918 irs->o == IR_FSTORE || irs->o == IR_XSTORE,
919 "sunk store IR %04d has bad op %d",
920 (int)(irs - as->ir) - REF_BIAS, irs->o);
921 asm_snap_alloc1(as, irs->op2);
922 if (LJ_32 && (irs+1)->o == IR_HIOP)
923 asm_snap_alloc1(as, (irs+1)->op2);
924 }
925 }
926 } else {
927 RegSet allow;
928 if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT) {
929 IRIns *irc;
930 for (irc = IR(as->curins); irc > ir; irc--)
931 if ((irc->op1 == ref || irc->op2 == ref) &&
932 !(irc->r == RID_SINK || irc->r == RID_SUNK))
933 goto nosink; /* Don't sink conversion if result is used. */
934 asm_snap_alloc1(as, ir->op1);
935 return;
936 }
937 nosink:
938 allow = (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR;
939 if ((as->freeset & allow) ||
940 (allow == RSET_FPR && asm_snap_canremat(as))) {
941 /* Get a weak register if we have a free one or can rematerialize. */
942 Reg r = ra_allocref(as, ref, allow); /* Allocate a register. */
943 if (!irt_isphi(ir->t))
944 ra_weak(as, r); /* But mark it as weakly referenced. */
945 checkmclim(as);
946 RA_DBGX((as, "snapreg $f $r", ref, ir->r));
947 } else {
948 ra_spill(as, ir); /* Otherwise force a spill slot. */
949 RA_DBGX((as, "snapspill $f $s", ref, ir->s));
950 }
951 }
952 }
953}
954
955/* Allocate refs escaping to a snapshot. */
956static void asm_snap_alloc(ASMState *as, int snapno)
957{
958 SnapShot *snap = &as->T->snap[snapno];
959 SnapEntry *map = &as->T->snapmap[snap->mapofs];
960 MSize n, nent = snap->nent;
961 as->snapfilt1 = as->snapfilt2 = 0;
962 for (n = 0; n < nent; n++) {
963 SnapEntry sn = map[n];
964 IRRef ref = snap_ref(sn);
965 if (!irref_isk(ref)) {
966 asm_snap_alloc1(as, ref);
967 if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
968 lj_assertA(irt_type(IR(ref+1)->t) == IRT_SOFTFP,
969 "snap %d[%d] points to bad SOFTFP IR %04d",
970 snapno, n, ref - REF_BIAS);
971 asm_snap_alloc1(as, ref+1);
972 }
973 }
974 }
975}
976
977/* All guards for a snapshot use the same exitno. This is currently the
978** same as the snapshot number. Since the exact origin of the exit cannot
979** be determined, all guards for the same snapshot must exit with the same
980** RegSP mapping.
981** A renamed ref which has been used in a prior guard for the same snapshot
982** would cause an inconsistency. The easy way out is to force a spill slot.
983*/
984static int asm_snap_checkrename(ASMState *as, IRRef ren)
985{
986 if (bloomtest(as->snapfilt1, ren) &&
987 bloomtest(as->snapfilt2, hashrot(ren, ren + HASH_BIAS))) {
988 IRIns *ir = IR(ren);
989 ra_spill(as, ir); /* Register renamed, so force a spill slot. */
990 RA_DBGX((as, "snaprensp $f $s", ren, ir->s));
991 return 1; /* Found. */
992 }
993 return 0; /* Not found. */
994}
995
996/* Prepare snapshot for next guard or throwing instruction. */
997static void asm_snap_prep(ASMState *as)
998{
999 if (as->snapalloc) {
1000 /* Alloc on first invocation for each snapshot. */
1001 as->snapalloc = 0;
1002 asm_snap_alloc(as, as->snapno);
1003 as->snaprename = as->T->nins;
1004 } else {
1005 /* Check any renames above the highwater mark. */
1006 for (; as->snaprename < as->T->nins; as->snaprename++) {
1007 IRIns *ir = &as->T->ir[as->snaprename];
1008 if (asm_snap_checkrename(as, ir->op1))
1009 ir->op2 = REF_BIAS-1; /* Kill rename. */
1010 }
1011 }
1012}
1013
1014/* Move to previous snapshot when we cross the current snapshot ref. */
1015static void asm_snap_prev(ASMState *as)
1016{
1017 if (as->curins < as->snapref) {
1018 ptrdiff_t ofs = as->mctoporig - as->mcp;
1019 if (ofs >= 0x10000) lj_trace_err(as->J, LJ_TRERR_MCODEOV);
1020 do {
1021 if (as->snapno == 0) return;
1022 as->snapno--;
1023 as->snapref = as->T->snap[as->snapno].ref;
1024 as->T->snap[as->snapno].mcofs = ofs; /* Remember mcode offset. */
1025 } while (as->curins < as->snapref); /* May have no ins inbetween. */
1026 as->snapalloc = 1;
1027 }
1028}
1029
1030/* Fixup snapshot mcode offsetst. */
1031static void asm_snap_fixup_mcofs(ASMState *as)
1032{
1033 uint32_t sz = (uint32_t)(as->mctoporig - as->mcp);
1034 SnapShot *snap = as->T->snap;
1035 SnapNo i;
1036 for (i = as->T->nsnap-1; i > 0; i--) {
1037 /* Compute offset from mcode start and store in correct snapshot. */
1038 snap[i].mcofs = (uint16_t)(sz - snap[i-1].mcofs);
1039 }
1040 snap[0].mcofs = 0;
1041}
1042
1043/* -- Miscellaneous helpers ----------------------------------------------- */
1044
1045/* Calculate stack adjustment. */
1046static int32_t asm_stack_adjust(ASMState *as)
1047{
1048 if (as->evenspill <= SPS_FIXED)
1049 return 0;
1050 return sps_scale(sps_align(as->evenspill));
1051}
1052
1053/* Must match with hash*() in lj_tab.c. */
1054static uint32_t ir_khash(ASMState *as, IRIns *ir)
1055{
1056 uint32_t lo, hi;
1057 UNUSED(as);
1058 if (irt_isstr(ir->t)) {
1059 return ir_kstr(ir)->sid;
1060 } else if (irt_isnum(ir->t)) {
1061 lo = ir_knum(ir)->u32.lo;
1062 hi = ir_knum(ir)->u32.hi << 1;
1063 } else if (irt_ispri(ir->t)) {
1064 lj_assertA(!irt_isnil(ir->t), "hash of nil key");
1065 return irt_type(ir->t)-IRT_FALSE;
1066 } else {
1067 lj_assertA(irt_isgcv(ir->t), "hash of bad IR type %d", irt_type(ir->t));
1068 lo = u32ptr(ir_kgc(ir));
1069#if LJ_GC64
1070 hi = (uint32_t)(u64ptr(ir_kgc(ir)) >> 32) | (irt_toitype(ir->t) << 15);
1071#else
1072 hi = lo + HASH_BIAS;
1073#endif
1074 }
1075 return hashrot(lo, hi);
1076}
1077
1078/* -- Allocations --------------------------------------------------------- */
1079
1080static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args);
1081static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci);
1082
1083static void asm_snew(ASMState *as, IRIns *ir)
1084{
1085 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new];
1086 IRRef args[3];
1087 asm_snap_prep(as);
1088 args[0] = ASMREF_L; /* lua_State *L */
1089 args[1] = ir->op1; /* const char *str */
1090 args[2] = ir->op2; /* size_t len */
1091 as->gcsteps++;
1092 asm_setupresult(as, ir, ci); /* GCstr * */
1093 asm_gencall(as, ci, args);
1094}
1095
1096static void asm_tnew(ASMState *as, IRIns *ir)
1097{
1098 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_new1];
1099 IRRef args[2];
1100 asm_snap_prep(as);
1101 args[0] = ASMREF_L; /* lua_State *L */
1102 args[1] = ASMREF_TMP1; /* uint32_t ahsize */
1103 as->gcsteps++;
1104 asm_setupresult(as, ir, ci); /* GCtab * */
1105 asm_gencall(as, ci, args);
1106 ra_allockreg(as, ir->op1 | (ir->op2 << 24), ra_releasetmp(as, ASMREF_TMP1));
1107}
1108
1109static void asm_tdup(ASMState *as, IRIns *ir)
1110{
1111 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_dup];
1112 IRRef args[2];
1113 asm_snap_prep(as);
1114 args[0] = ASMREF_L; /* lua_State *L */
1115 args[1] = ir->op1; /* const GCtab *kt */
1116 as->gcsteps++;
1117 asm_setupresult(as, ir, ci); /* GCtab * */
1118 asm_gencall(as, ci, args);
1119}
1120
1121static void asm_gc_check(ASMState *as);
1122
1123/* Explicit GC step. */
1124static void asm_gcstep(ASMState *as, IRIns *ir)
1125{
1126 IRIns *ira;
1127 for (ira = IR(as->stopins+1); ira < ir; ira++)
1128 if ((ira->o == IR_TNEW || ira->o == IR_TDUP ||
1129 (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI))) &&
1130 ra_used(ira))
1131 as->gcsteps++;
1132 if (as->gcsteps)
1133 asm_gc_check(as);
1134 as->gcsteps = 0x80000000; /* Prevent implicit GC check further up. */
1135}
1136
1137/* -- Buffer operations --------------------------------------------------- */
1138
1139static void asm_tvptr(ASMState *as, Reg dest, IRRef ref);
1140
1141static void asm_bufhdr(ASMState *as, IRIns *ir)
1142{
1143 Reg sb = ra_dest(as, ir, RSET_GPR);
1144 if ((ir->op2 & IRBUFHDR_APPEND)) {
1145 /* Rematerialize const buffer pointer instead of likely spill. */
1146 IRIns *irp = IR(ir->op1);
1147 if (!(ra_hasreg(irp->r) || irp == ir-1 ||
1148 (irp == ir-2 && !ra_used(ir-1)))) {
1149 while (!(irp->o == IR_BUFHDR && !(irp->op2 & IRBUFHDR_APPEND)))
1150 irp = IR(irp->op1);
1151 if (irref_isk(irp->op1)) {
1152 ra_weak(as, ra_allocref(as, ir->op1, RSET_GPR));
1153 ir = irp;
1154 }
1155 }
1156 } else {
1157 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
1158 /* Passing ir isn't strictly correct, but it's an IRT_PGC, too. */
1159 emit_storeofs(as, ir, tmp, sb, offsetof(SBuf, p));
1160 emit_loadofs(as, ir, tmp, sb, offsetof(SBuf, b));
1161 }
1162#if LJ_TARGET_X86ORX64
1163 ra_left(as, sb, ir->op1);
1164#else
1165 ra_leftov(as, sb, ir->op1);
1166#endif
1167}
1168
1169static void asm_bufput(ASMState *as, IRIns *ir)
1170{
1171 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_putstr];
1172 IRRef args[3];
1173 IRIns *irs;
1174 int kchar = -129;
1175 args[0] = ir->op1; /* SBuf * */
1176 args[1] = ir->op2; /* GCstr * */
1177 irs = IR(ir->op2);
1178 lj_assertA(irt_isstr(irs->t),
1179 "BUFPUT of non-string IR %04d", ir->op2 - REF_BIAS);
1180 if (irs->o == IR_KGC) {
1181 GCstr *s = ir_kstr(irs);
1182 if (s->len == 1) { /* Optimize put of single-char string constant. */
1183 kchar = (int8_t)strdata(s)[0]; /* Signed! */
1184 args[1] = ASMREF_TMP1; /* int, truncated to char */
1185 ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
1186 }
1187 } else if (mayfuse(as, ir->op2) && ra_noreg(irs->r)) {
1188 if (irs->o == IR_TOSTR) { /* Fuse number to string conversions. */
1189 if (irs->op2 == IRTOSTR_NUM) {
1190 args[1] = ASMREF_TMP1; /* TValue * */
1191 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putnum];
1192 } else {
1193 lj_assertA(irt_isinteger(IR(irs->op1)->t),
1194 "TOSTR of non-numeric IR %04d", irs->op1);
1195 args[1] = irs->op1; /* int */
1196 if (irs->op2 == IRTOSTR_INT)
1197 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putint];
1198 else
1199 ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
1200 }
1201 } else if (irs->o == IR_SNEW) { /* Fuse string allocation. */
1202 args[1] = irs->op1; /* const void * */
1203 args[2] = irs->op2; /* MSize */
1204 ci = &lj_ir_callinfo[IRCALL_lj_buf_putmem];
1205 }
1206 }
1207 asm_setupresult(as, ir, ci); /* SBuf * */
1208 asm_gencall(as, ci, args);
1209 if (args[1] == ASMREF_TMP1) {
1210 Reg tmp = ra_releasetmp(as, ASMREF_TMP1);
1211 if (kchar == -129)
1212 asm_tvptr(as, tmp, irs->op1);
1213 else
1214 ra_allockreg(as, kchar, tmp);
1215 }
1216}
1217
1218static void asm_bufstr(ASMState *as, IRIns *ir)
1219{
1220 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_tostr];
1221 IRRef args[1];
1222 args[0] = ir->op1; /* SBuf *sb */
1223 as->gcsteps++;
1224 asm_setupresult(as, ir, ci); /* GCstr * */
1225 asm_gencall(as, ci, args);
1226}
1227
1228/* -- Type conversions ---------------------------------------------------- */
1229
1230static void asm_tostr(ASMState *as, IRIns *ir)
1231{
1232 const CCallInfo *ci;
1233 IRRef args[2];
1234 asm_snap_prep(as);
1235 args[0] = ASMREF_L;
1236 as->gcsteps++;
1237 if (ir->op2 == IRTOSTR_NUM) {
1238 args[1] = ASMREF_TMP1; /* cTValue * */
1239 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_num];
1240 } else {
1241 args[1] = ir->op1; /* int32_t k */
1242 if (ir->op2 == IRTOSTR_INT)
1243 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_int];
1244 else
1245 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_char];
1246 }
1247 asm_setupresult(as, ir, ci); /* GCstr * */
1248 asm_gencall(as, ci, args);
1249 if (ir->op2 == IRTOSTR_NUM)
1250 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
1251}
1252
1253#if LJ_32 && LJ_HASFFI && !LJ_SOFTFP && !LJ_TARGET_X86
1254static void asm_conv64(ASMState *as, IRIns *ir)
1255{
1256 IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
1257 IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
1258 IRCallID id;
1259 IRRef args[2];
1260 lj_assertA((ir-1)->o == IR_CONV && ir->o == IR_HIOP,
1261 "not a CONV/HIOP pair at IR %04d", (int)(ir - as->ir) - REF_BIAS);
1262 args[LJ_BE] = (ir-1)->op1;
1263 args[LJ_LE] = ir->op1;
1264 if (st == IRT_NUM || st == IRT_FLOAT) {
1265 id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
1266 ir--;
1267 } else {
1268 id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
1269 }
1270 {
1271#if LJ_TARGET_ARM && !LJ_ABI_SOFTFP
1272 CCallInfo cim = lj_ir_callinfo[id], *ci = &cim;
1273 cim.flags |= CCI_VARARG; /* These calls don't use the hard-float ABI! */
1274#else
1275 const CCallInfo *ci = &lj_ir_callinfo[id];
1276#endif
1277 asm_setupresult(as, ir, ci);
1278 asm_gencall(as, ci, args);
1279 }
1280}
1281#endif
1282
1283/* -- Memory references --------------------------------------------------- */
1284
1285static void asm_newref(ASMState *as, IRIns *ir)
1286{
1287 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
1288 IRRef args[3];
1289 if (ir->r == RID_SINK)
1290 return;
1291 asm_snap_prep(as);
1292 args[0] = ASMREF_L; /* lua_State *L */
1293 args[1] = ir->op1; /* GCtab *t */
1294 args[2] = ASMREF_TMP1; /* cTValue *key */
1295 asm_setupresult(as, ir, ci); /* TValue * */
1296 asm_gencall(as, ci, args);
1297 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
1298}
1299
1300static void asm_lref(ASMState *as, IRIns *ir)
1301{
1302 Reg r = ra_dest(as, ir, RSET_GPR);
1303#if LJ_TARGET_X86ORX64
1304 ra_left(as, r, ASMREF_L);
1305#else
1306 ra_leftov(as, r, ASMREF_L);
1307#endif
1308}
1309
1310/* -- Calls --------------------------------------------------------------- */
1311
1312/* Collect arguments from CALL* and CARG instructions. */
1313static void asm_collectargs(ASMState *as, IRIns *ir,
1314 const CCallInfo *ci, IRRef *args)
1315{
1316 uint32_t n = CCI_XNARGS(ci);
1317 /* Account for split args. */
1318 lj_assertA(n <= CCI_NARGS_MAX*2, "too many args %d to collect", n);
1319 if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; }
1320 while (n-- > 1) {
1321 ir = IR(ir->op1);
1322 lj_assertA(ir->o == IR_CARG, "malformed CALL arg tree");
1323 args[n] = ir->op2 == REF_NIL ? 0 : ir->op2;
1324 }
1325 args[0] = ir->op1 == REF_NIL ? 0 : ir->op1;
1326 lj_assertA(IR(ir->op1)->o != IR_CARG, "malformed CALL arg tree");
1327}
1328
1329/* Reconstruct CCallInfo flags for CALLX*. */
1330static uint32_t asm_callx_flags(ASMState *as, IRIns *ir)
1331{
1332 uint32_t nargs = 0;
1333 if (ir->op1 != REF_NIL) { /* Count number of arguments first. */
1334 IRIns *ira = IR(ir->op1);
1335 nargs++;
1336 while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); }
1337 }
1338#if LJ_HASFFI
1339 if (IR(ir->op2)->o == IR_CARG) { /* Copy calling convention info. */
1340 CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i;
1341 CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id);
1342 nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0);
1343#if LJ_TARGET_X86
1344 nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT);
1345#endif
1346 }
1347#endif
1348 return (nargs | (ir->t.irt << CCI_OTSHIFT));
1349}
1350
1351static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
1352{
1353 const CCallInfo *ci = &lj_ir_callinfo[id];
1354 IRRef args[2];
1355 args[0] = ir->op1;
1356 args[1] = ir->op2;
1357 asm_setupresult(as, ir, ci);
1358 asm_gencall(as, ci, args);
1359}
1360
1361static void asm_call(ASMState *as, IRIns *ir)
1362{
1363 IRRef args[CCI_NARGS_MAX];
1364 const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
1365 asm_collectargs(as, ir, ci, args);
1366 asm_setupresult(as, ir, ci);
1367 asm_gencall(as, ci, args);
1368}
1369
1370/* -- PHI and loop handling ----------------------------------------------- */
1371
1372/* Break a PHI cycle by renaming to a free register (evict if needed). */
1373static void asm_phi_break(ASMState *as, RegSet blocked, RegSet blockedby,
1374 RegSet allow)
1375{
1376 RegSet candidates = blocked & allow;
1377 if (candidates) { /* If this register file has candidates. */
1378 /* Note: the set for ra_pick cannot be empty, since each register file
1379 ** has some registers never allocated to PHIs.
1380 */
1381 Reg down, up = ra_pick(as, ~blocked & allow); /* Get a free register. */
1382 if (candidates & ~blockedby) /* Optimize shifts, else it's a cycle. */
1383 candidates = candidates & ~blockedby;
1384 down = rset_picktop(candidates); /* Pick candidate PHI register. */
1385 ra_rename(as, down, up); /* And rename it to the free register. */
1386 }
1387}
1388
1389/* PHI register shuffling.
1390**
1391** The allocator tries hard to preserve PHI register assignments across
1392** the loop body. Most of the time this loop does nothing, since there
1393** are no register mismatches.
1394**
1395** If a register mismatch is detected and ...
1396** - the register is currently free: rename it.
1397** - the register is blocked by an invariant: restore/remat and rename it.
1398** - Otherwise the register is used by another PHI, so mark it as blocked.
1399**
1400** The renames are order-sensitive, so just retry the loop if a register
1401** is marked as blocked, but has been freed in the meantime. A cycle is
1402** detected if all of the blocked registers are allocated. To break the
1403** cycle rename one of them to a free register and retry.
1404**
1405** Note that PHI spill slots are kept in sync and don't need to be shuffled.
1406*/
1407static void asm_phi_shuffle(ASMState *as)
1408{
1409 RegSet work;
1410
1411 /* Find and resolve PHI register mismatches. */
1412 for (;;) {
1413 RegSet blocked = RSET_EMPTY;
1414 RegSet blockedby = RSET_EMPTY;
1415 RegSet phiset = as->phiset;
1416 while (phiset) { /* Check all left PHI operand registers. */
1417 Reg r = rset_pickbot(phiset);
1418 IRIns *irl = IR(as->phireg[r]);
1419 Reg left = irl->r;
1420 if (r != left) { /* Mismatch? */
1421 if (!rset_test(as->freeset, r)) { /* PHI register blocked? */
1422 IRRef ref = regcost_ref(as->cost[r]);
1423 /* Blocked by other PHI (w/reg)? */
1424 if (!ra_iskref(ref) && irt_ismarked(IR(ref)->t)) {
1425 rset_set(blocked, r);
1426 if (ra_hasreg(left))
1427 rset_set(blockedby, left);
1428 left = RID_NONE;
1429 } else { /* Otherwise grab register from invariant. */
1430 ra_restore(as, ref);
1431 checkmclim(as);
1432 }
1433 }
1434 if (ra_hasreg(left)) {
1435 ra_rename(as, left, r);
1436 checkmclim(as);
1437 }
1438 }
1439 rset_clear(phiset, r);
1440 }
1441 if (!blocked) break; /* Finished. */
1442 if (!(as->freeset & blocked)) { /* Break cycles if none are free. */
1443 asm_phi_break(as, blocked, blockedby, RSET_GPR);
1444 if (!LJ_SOFTFP) asm_phi_break(as, blocked, blockedby, RSET_FPR);
1445 checkmclim(as);
1446 } /* Else retry some more renames. */
1447 }
1448
1449 /* Restore/remat invariants whose registers are modified inside the loop. */
1450#if !LJ_SOFTFP
1451 work = as->modset & ~(as->freeset | as->phiset) & RSET_FPR;
1452 while (work) {
1453 Reg r = rset_pickbot(work);
1454 ra_restore(as, regcost_ref(as->cost[r]));
1455 rset_clear(work, r);
1456 checkmclim(as);
1457 }
1458#endif
1459 work = as->modset & ~(as->freeset | as->phiset);
1460 while (work) {
1461 Reg r = rset_pickbot(work);
1462 ra_restore(as, regcost_ref(as->cost[r]));
1463 rset_clear(work, r);
1464 checkmclim(as);
1465 }
1466
1467 /* Allocate and save all unsaved PHI regs and clear marks. */
1468 work = as->phiset;
1469 while (work) {
1470 Reg r = rset_picktop(work);
1471 IRRef lref = as->phireg[r];
1472 IRIns *ir = IR(lref);
1473 if (ra_hasspill(ir->s)) { /* Left PHI gained a spill slot? */
1474 irt_clearmark(ir->t); /* Handled here, so clear marker now. */
1475 ra_alloc1(as, lref, RID2RSET(r));
1476 ra_save(as, ir, r); /* Save to spill slot inside the loop. */
1477 checkmclim(as);
1478 }
1479 rset_clear(work, r);
1480 }
1481}
1482
1483/* Copy unsynced left/right PHI spill slots. Rarely needed. */
1484static void asm_phi_copyspill(ASMState *as)
1485{
1486 int need = 0;
1487 IRIns *ir;
1488 for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--)
1489 if (ra_hasspill(ir->s) && ra_hasspill(IR(ir->op1)->s))
1490 need |= irt_isfp(ir->t) ? 2 : 1; /* Unsynced spill slot? */
1491 if ((need & 1)) { /* Copy integer spill slots. */
1492#if !LJ_TARGET_X86ORX64
1493 Reg r = RID_TMP;
1494#else
1495 Reg r = RID_RET;
1496 if ((as->freeset & RSET_GPR))
1497 r = rset_pickbot((as->freeset & RSET_GPR));
1498 else
1499 emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1500#endif
1501 for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
1502 if (ra_hasspill(ir->s)) {
1503 IRIns *irl = IR(ir->op1);
1504 if (ra_hasspill(irl->s) && !irt_isfp(ir->t)) {
1505 emit_spstore(as, irl, r, sps_scale(irl->s));
1506 emit_spload(as, ir, r, sps_scale(ir->s));
1507 checkmclim(as);
1508 }
1509 }
1510 }
1511#if LJ_TARGET_X86ORX64
1512 if (!rset_test(as->freeset, r))
1513 emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1514#endif
1515 }
1516#if !LJ_SOFTFP
1517 if ((need & 2)) { /* Copy FP spill slots. */
1518#if LJ_TARGET_X86
1519 Reg r = RID_XMM0;
1520#else
1521 Reg r = RID_FPRET;
1522#endif
1523 if ((as->freeset & RSET_FPR))
1524 r = rset_pickbot((as->freeset & RSET_FPR));
1525 if (!rset_test(as->freeset, r))
1526 emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1527 for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
1528 if (ra_hasspill(ir->s)) {
1529 IRIns *irl = IR(ir->op1);
1530 if (ra_hasspill(irl->s) && irt_isfp(ir->t)) {
1531 emit_spstore(as, irl, r, sps_scale(irl->s));
1532 emit_spload(as, ir, r, sps_scale(ir->s));
1533 checkmclim(as);
1534 }
1535 }
1536 }
1537 if (!rset_test(as->freeset, r))
1538 emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1539 }
1540#endif
1541}
1542
1543/* Emit renames for left PHIs which are only spilled outside the loop. */
1544static void asm_phi_fixup(ASMState *as)
1545{
1546 RegSet work = as->phiset;
1547 while (work) {
1548 Reg r = rset_picktop(work);
1549 IRRef lref = as->phireg[r];
1550 IRIns *ir = IR(lref);
1551 if (irt_ismarked(ir->t)) {
1552 irt_clearmark(ir->t);
1553 /* Left PHI gained a spill slot before the loop? */
1554 if (ra_hasspill(ir->s)) {
1555 ra_addrename(as, r, lref, as->loopsnapno);
1556 }
1557 }
1558 rset_clear(work, r);
1559 }
1560}
1561
1562/* Setup right PHI reference. */
1563static void asm_phi(ASMState *as, IRIns *ir)
1564{
1565 RegSet allow = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) &
1566 ~as->phiset;
1567 RegSet afree = (as->freeset & allow);
1568 IRIns *irl = IR(ir->op1);
1569 IRIns *irr = IR(ir->op2);
1570 if (ir->r == RID_SINK) /* Sink PHI. */
1571 return;
1572 /* Spill slot shuffling is not implemented yet (but rarely needed). */
1573 if (ra_hasspill(irl->s) || ra_hasspill(irr->s))
1574 lj_trace_err(as->J, LJ_TRERR_NYIPHI);
1575 /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */
1576 if ((afree & (afree-1))) { /* Two or more free registers? */
1577 Reg r;
1578 if (ra_noreg(irr->r)) { /* Get a register for the right PHI. */
1579 r = ra_allocref(as, ir->op2, allow);
1580 } else { /* Duplicate right PHI, need a copy (rare). */
1581 r = ra_scratch(as, allow);
1582 emit_movrr(as, irr, r, irr->r);
1583 }
1584 ir->r = (uint8_t)r;
1585 rset_set(as->phiset, r);
1586 as->phireg[r] = (IRRef1)ir->op1;
1587 irt_setmark(irl->t); /* Marks left PHIs _with_ register. */
1588 if (ra_noreg(irl->r))
1589 ra_sethint(irl->r, r); /* Set register hint for left PHI. */
1590 } else { /* Otherwise allocate a spill slot. */
1591 /* This is overly restrictive, but it triggers only on synthetic code. */
1592 if (ra_hasreg(irl->r) || ra_hasreg(irr->r))
1593 lj_trace_err(as->J, LJ_TRERR_NYIPHI);
1594 ra_spill(as, ir);
1595 irr->s = ir->s; /* Set right PHI spill slot. Sync left slot later. */
1596 }
1597}
1598
1599static void asm_loop_fixup(ASMState *as);
1600
1601/* Middle part of a loop. */
1602static void asm_loop(ASMState *as)
1603{
1604 MCode *mcspill;
1605 /* LOOP is a guard, so the snapno is up to date. */
1606 as->loopsnapno = as->snapno;
1607 if (as->gcsteps)
1608 asm_gc_check(as);
1609 /* LOOP marks the transition from the variant to the invariant part. */
1610 as->flagmcp = as->invmcp = NULL;
1611 as->sectref = 0;
1612 if (!neverfuse(as)) as->fuseref = 0;
1613 asm_phi_shuffle(as);
1614 mcspill = as->mcp;
1615 asm_phi_copyspill(as);
1616 asm_loop_fixup(as);
1617 as->mcloop = as->mcp;
1618 RA_DBGX((as, "===== LOOP ====="));
1619 if (!as->realign) RA_DBG_FLUSH();
1620 if (as->mcp != mcspill)
1621 emit_jmp(as, mcspill);
1622}
1623
1624/* -- Target-specific assembler ------------------------------------------- */
1625
1626#if LJ_TARGET_X86ORX64
1627#include "lj_asm_x86.h"
1628#elif LJ_TARGET_ARM
1629#include "lj_asm_arm.h"
1630#elif LJ_TARGET_ARM64
1631#include "lj_asm_arm64.h"
1632#elif LJ_TARGET_PPC
1633#include "lj_asm_ppc.h"
1634#elif LJ_TARGET_MIPS
1635#include "lj_asm_mips.h"
1636#else
1637#error "Missing assembler for target CPU"
1638#endif
1639
1640/* -- Common instruction helpers ------------------------------------------ */
1641
1642#if !LJ_SOFTFP32
1643#if !LJ_TARGET_X86ORX64
1644#define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp)
1645#define asm_fppowi(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi)
1646#endif
1647
1648static void asm_pow(ASMState *as, IRIns *ir)
1649{
1650#if LJ_64 && LJ_HASFFI
1651 if (!irt_isnum(ir->t))
1652 asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
1653 IRCALL_lj_carith_powu64);
1654 else
1655#endif
1656 if (irt_isnum(IR(ir->op2)->t))
1657 asm_callid(as, ir, IRCALL_pow);
1658 else
1659 asm_fppowi(as, ir);
1660}
1661
1662static void asm_div(ASMState *as, IRIns *ir)
1663{
1664#if LJ_64 && LJ_HASFFI
1665 if (!irt_isnum(ir->t))
1666 asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
1667 IRCALL_lj_carith_divu64);
1668 else
1669#endif
1670 asm_fpdiv(as, ir);
1671}
1672#endif
1673
1674static void asm_mod(ASMState *as, IRIns *ir)
1675{
1676#if LJ_64 && LJ_HASFFI
1677 if (!irt_isint(ir->t))
1678 asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
1679 IRCALL_lj_carith_modu64);
1680 else
1681#endif
1682 asm_callid(as, ir, IRCALL_lj_vm_modi);
1683}
1684
1685static void asm_fuseequal(ASMState *as, IRIns *ir)
1686{
1687 /* Fuse HREF + EQ/NE. */
1688 if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
1689 as->curins--;
1690 asm_href(as, ir-1, (IROp)ir->o);
1691 } else {
1692 asm_equal(as, ir);
1693 }
1694}
1695
1696static void asm_alen(ASMState *as, IRIns *ir)
1697{
1698 asm_callid(as, ir, ir->op2 == REF_NIL ? IRCALL_lj_tab_len :
1699 IRCALL_lj_tab_len_hint);
1700}
1701
1702/* -- Instruction dispatch ------------------------------------------------ */
1703
1704/* Assemble a single instruction. */
1705static void asm_ir(ASMState *as, IRIns *ir)
1706{
1707 switch ((IROp)ir->o) {
1708 /* Miscellaneous ops. */
1709 case IR_LOOP: asm_loop(as); break;
1710 case IR_NOP: case IR_XBAR:
1711 lj_assertA(!ra_used(ir),
1712 "IR %04d not unused", (int)(ir - as->ir) - REF_BIAS);
1713 break;
1714 case IR_USE:
1715 ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
1716 case IR_PHI: asm_phi(as, ir); break;
1717 case IR_HIOP: asm_hiop(as, ir); break;
1718 case IR_GCSTEP: asm_gcstep(as, ir); break;
1719 case IR_PROF: asm_prof(as, ir); break;
1720
1721 /* Guarded assertions. */
1722 case IR_LT: case IR_GE: case IR_LE: case IR_GT:
1723 case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
1724 case IR_ABC:
1725 asm_comp(as, ir);
1726 break;
1727 case IR_EQ: case IR_NE: asm_fuseequal(as, ir); break;
1728
1729 case IR_RETF: asm_retf(as, ir); break;
1730
1731 /* Bit ops. */
1732 case IR_BNOT: asm_bnot(as, ir); break;
1733 case IR_BSWAP: asm_bswap(as, ir); break;
1734 case IR_BAND: asm_band(as, ir); break;
1735 case IR_BOR: asm_bor(as, ir); break;
1736 case IR_BXOR: asm_bxor(as, ir); break;
1737 case IR_BSHL: asm_bshl(as, ir); break;
1738 case IR_BSHR: asm_bshr(as, ir); break;
1739 case IR_BSAR: asm_bsar(as, ir); break;
1740 case IR_BROL: asm_brol(as, ir); break;
1741 case IR_BROR: asm_bror(as, ir); break;
1742
1743 /* Arithmetic ops. */
1744 case IR_ADD: asm_add(as, ir); break;
1745 case IR_SUB: asm_sub(as, ir); break;
1746 case IR_MUL: asm_mul(as, ir); break;
1747 case IR_MOD: asm_mod(as, ir); break;
1748 case IR_NEG: asm_neg(as, ir); break;
1749#if LJ_SOFTFP32
1750 case IR_DIV: case IR_POW: case IR_ABS:
1751 case IR_LDEXP: case IR_FPMATH: case IR_TOBIT:
1752 /* Unused for LJ_SOFTFP32. */
1753 lj_assertA(0, "IR %04d with unused op %d",
1754 (int)(ir - as->ir) - REF_BIAS, ir->o);
1755 break;
1756#else
1757 case IR_DIV: asm_div(as, ir); break;
1758 case IR_POW: asm_pow(as, ir); break;
1759 case IR_ABS: asm_abs(as, ir); break;
1760 case IR_LDEXP: asm_ldexp(as, ir); break;
1761 case IR_FPMATH: asm_fpmath(as, ir); break;
1762 case IR_TOBIT: asm_tobit(as, ir); break;
1763#endif
1764 case IR_MIN: asm_min(as, ir); break;
1765 case IR_MAX: asm_max(as, ir); break;
1766
1767 /* Overflow-checking arithmetic ops. */
1768 case IR_ADDOV: asm_addov(as, ir); break;
1769 case IR_SUBOV: asm_subov(as, ir); break;
1770 case IR_MULOV: asm_mulov(as, ir); break;
1771
1772 /* Memory references. */
1773 case IR_AREF: asm_aref(as, ir); break;
1774 case IR_HREF: asm_href(as, ir, 0); break;
1775 case IR_HREFK: asm_hrefk(as, ir); break;
1776 case IR_NEWREF: asm_newref(as, ir); break;
1777 case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
1778 case IR_FREF: asm_fref(as, ir); break;
1779 case IR_STRREF: asm_strref(as, ir); break;
1780 case IR_LREF: asm_lref(as, ir); break;
1781
1782 /* Loads and stores. */
1783 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
1784 asm_ahuvload(as, ir);
1785 break;
1786 case IR_FLOAD: asm_fload(as, ir); break;
1787 case IR_XLOAD: asm_xload(as, ir); break;
1788 case IR_SLOAD: asm_sload(as, ir); break;
1789 case IR_ALEN: asm_alen(as, ir); break;
1790
1791 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
1792 case IR_FSTORE: asm_fstore(as, ir); break;
1793 case IR_XSTORE: asm_xstore(as, ir); break;
1794
1795 /* Allocations. */
1796 case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
1797 case IR_TNEW: asm_tnew(as, ir); break;
1798 case IR_TDUP: asm_tdup(as, ir); break;
1799 case IR_CNEW: case IR_CNEWI:
1800#if LJ_HASFFI
1801 asm_cnew(as, ir);
1802#else
1803 lj_assertA(0, "IR %04d with unused op %d",
1804 (int)(ir - as->ir) - REF_BIAS, ir->o);
1805#endif
1806 break;
1807
1808 /* Buffer operations. */
1809 case IR_BUFHDR: asm_bufhdr(as, ir); break;
1810 case IR_BUFPUT: asm_bufput(as, ir); break;
1811 case IR_BUFSTR: asm_bufstr(as, ir); break;
1812
1813 /* Write barriers. */
1814 case IR_TBAR: asm_tbar(as, ir); break;
1815 case IR_OBAR: asm_obar(as, ir); break;
1816
1817 /* Type conversions. */
1818 case IR_CONV: asm_conv(as, ir); break;
1819 case IR_TOSTR: asm_tostr(as, ir); break;
1820 case IR_STRTO: asm_strto(as, ir); break;
1821
1822 /* Calls. */
1823 case IR_CALLA:
1824 as->gcsteps++;
1825 /* fallthrough */
1826 case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
1827 case IR_CALLXS: asm_callx(as, ir); break;
1828 case IR_CARG: break;
1829
1830 default:
1831 setintV(&as->J->errinfo, ir->o);
1832 lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
1833 break;
1834 }
1835}
1836
1837/* -- Head of trace ------------------------------------------------------- */
1838
1839/* Head of a root trace. */
1840static void asm_head_root(ASMState *as)
1841{
1842 int32_t spadj;
1843 asm_head_root_base(as);
1844 emit_setvmstate(as, (int32_t)as->T->traceno);
1845 spadj = asm_stack_adjust(as);
1846 as->T->spadjust = (uint16_t)spadj;
1847 emit_spsub(as, spadj);
1848 /* Root traces assume a checked stack for the starting proto. */
1849 as->T->topslot = gcref(as->T->startpt)->pt.framesize;
1850}
1851
1852/* Head of a side trace.
1853**
1854** The current simplistic algorithm requires that all slots inherited
1855** from the parent are live in a register between pass 2 and pass 3. This
1856** avoids the complexity of stack slot shuffling. But of course this may
1857** overflow the register set in some cases and cause the dreaded error:
1858** "NYI: register coalescing too complex". A refined algorithm is needed.
1859*/
1860static void asm_head_side(ASMState *as)
1861{
1862 IRRef1 sloadins[RID_MAX];
1863 RegSet allow = RSET_ALL; /* Inverse of all coalesced registers. */
1864 RegSet live = RSET_EMPTY; /* Live parent registers. */
1865 IRIns *irp = &as->parent->ir[REF_BASE]; /* Parent base. */
1866 int32_t spadj, spdelta;
1867 int pass2 = 0;
1868 int pass3 = 0;
1869 IRRef i;
1870
1871 if (as->snapno && as->topslot > as->parent->topslot) {
1872 /* Force snap #0 alloc to prevent register overwrite in stack check. */
1873 asm_snap_alloc(as, 0);
1874 }
1875 allow = asm_head_side_base(as, irp, allow);
1876
1877 /* Scan all parent SLOADs and collect register dependencies. */
1878 for (i = as->stopins; i > REF_BASE; i--) {
1879 IRIns *ir = IR(i);
1880 RegSP rs;
1881 lj_assertA((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) ||
1882 (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL,
1883 "IR %04d has bad parent op %d",
1884 (int)(ir - as->ir) - REF_BIAS, ir->o);
1885 rs = as->parentmap[i - REF_FIRST];
1886 if (ra_hasreg(ir->r)) {
1887 rset_clear(allow, ir->r);
1888 if (ra_hasspill(ir->s)) {
1889 ra_save(as, ir, ir->r);
1890 checkmclim(as);
1891 }
1892 } else if (ra_hasspill(ir->s)) {
1893 irt_setmark(ir->t);
1894 pass2 = 1;
1895 }
1896 if (ir->r == rs) { /* Coalesce matching registers right now. */
1897 ra_free(as, ir->r);
1898 } else if (ra_hasspill(regsp_spill(rs))) {
1899 if (ra_hasreg(ir->r))
1900 pass3 = 1;
1901 } else if (ra_used(ir)) {
1902 sloadins[rs] = (IRRef1)i;
1903 rset_set(live, rs); /* Block live parent register. */
1904 }
1905 }
1906
1907 /* Calculate stack frame adjustment. */
1908 spadj = asm_stack_adjust(as);
1909 spdelta = spadj - (int32_t)as->parent->spadjust;
1910 if (spdelta < 0) { /* Don't shrink the stack frame. */
1911 spadj = (int32_t)as->parent->spadjust;
1912 spdelta = 0;
1913 }
1914 as->T->spadjust = (uint16_t)spadj;
1915
1916 /* Reload spilled target registers. */
1917 if (pass2) {
1918 for (i = as->stopins; i > REF_BASE; i--) {
1919 IRIns *ir = IR(i);
1920 if (irt_ismarked(ir->t)) {
1921 RegSet mask;
1922 Reg r;
1923 RegSP rs;
1924 irt_clearmark(ir->t);
1925 rs = as->parentmap[i - REF_FIRST];
1926 if (!ra_hasspill(regsp_spill(rs)))
1927 ra_sethint(ir->r, rs); /* Hint may be gone, set it again. */
1928 else if (sps_scale(regsp_spill(rs))+spdelta == sps_scale(ir->s))
1929 continue; /* Same spill slot, do nothing. */
1930 mask = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & allow;
1931 if (mask == RSET_EMPTY)
1932 lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1933 r = ra_allocref(as, i, mask);
1934 ra_save(as, ir, r);
1935 rset_clear(allow, r);
1936 if (r == rs) { /* Coalesce matching registers right now. */
1937 ra_free(as, r);
1938 rset_clear(live, r);
1939 } else if (ra_hasspill(regsp_spill(rs))) {
1940 pass3 = 1;
1941 }
1942 checkmclim(as);
1943 }
1944 }
1945 }
1946
1947 /* Store trace number and adjust stack frame relative to the parent. */
1948 emit_setvmstate(as, (int32_t)as->T->traceno);
1949 emit_spsub(as, spdelta);
1950
1951#if !LJ_TARGET_X86ORX64
1952 /* Restore BASE register from parent spill slot. */
1953 if (ra_hasspill(irp->s))
1954 emit_spload(as, IR(REF_BASE), IR(REF_BASE)->r, sps_scale(irp->s));
1955#endif
1956
1957 /* Restore target registers from parent spill slots. */
1958 if (pass3) {
1959 RegSet work = ~as->freeset & RSET_ALL;
1960 while (work) {
1961 Reg r = rset_pickbot(work);
1962 IRRef ref = regcost_ref(as->cost[r]);
1963 RegSP rs = as->parentmap[ref - REF_FIRST];
1964 rset_clear(work, r);
1965 if (ra_hasspill(regsp_spill(rs))) {
1966 int32_t ofs = sps_scale(regsp_spill(rs));
1967 ra_free(as, r);
1968 emit_spload(as, IR(ref), r, ofs);
1969 checkmclim(as);
1970 }
1971 }
1972 }
1973
1974 /* Shuffle registers to match up target regs with parent regs. */
1975 for (;;) {
1976 RegSet work;
1977
1978 /* Repeatedly coalesce free live registers by moving to their target. */
1979 while ((work = as->freeset & live) != RSET_EMPTY) {
1980 Reg rp = rset_pickbot(work);
1981 IRIns *ir = IR(sloadins[rp]);
1982 rset_clear(live, rp);
1983 rset_clear(allow, rp);
1984 ra_free(as, ir->r);
1985 emit_movrr(as, ir, ir->r, rp);
1986 checkmclim(as);
1987 }
1988
1989 /* We're done if no live registers remain. */
1990 if (live == RSET_EMPTY)
1991 break;
1992
1993 /* Break cycles by renaming one target to a temp. register. */
1994 if (live & RSET_GPR) {
1995 RegSet tmpset = as->freeset & ~live & allow & RSET_GPR;
1996 if (tmpset == RSET_EMPTY)
1997 lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1998 ra_rename(as, rset_pickbot(live & RSET_GPR), rset_pickbot(tmpset));
1999 }
2000 if (!LJ_SOFTFP && (live & RSET_FPR)) {
2001 RegSet tmpset = as->freeset & ~live & allow & RSET_FPR;
2002 if (tmpset == RSET_EMPTY)
2003 lj_trace_err(as->J, LJ_TRERR_NYICOAL);
2004 ra_rename(as, rset_pickbot(live & RSET_FPR), rset_pickbot(tmpset));
2005 }
2006 checkmclim(as);
2007 /* Continue with coalescing to fix up the broken cycle(s). */
2008 }
2009
2010 /* Inherit top stack slot already checked by parent trace. */
2011 as->T->topslot = as->parent->topslot;
2012 if (as->topslot > as->T->topslot) { /* Need to check for higher slot? */
2013#ifdef EXITSTATE_CHECKEXIT
2014 /* Highest exit + 1 indicates stack check. */
2015 ExitNo exitno = as->T->nsnap;
2016#else
2017 /* Reuse the parent exit in the context of the parent trace. */
2018 ExitNo exitno = as->J->exitno;
2019#endif
2020 as->T->topslot = (uint8_t)as->topslot; /* Remember for child traces. */
2021 asm_stack_check(as, as->topslot, irp, allow & RSET_GPR, exitno);
2022 }
2023}
2024
2025/* -- Tail of trace ------------------------------------------------------- */
2026
2027/* Get base slot for a snapshot. */
2028static BCReg asm_baseslot(ASMState *as, SnapShot *snap, int *gotframe)
2029{
2030 SnapEntry *map = &as->T->snapmap[snap->mapofs];
2031 MSize n;
2032 for (n = snap->nent; n > 0; n--) {
2033 SnapEntry sn = map[n-1];
2034 if ((sn & SNAP_FRAME)) {
2035 *gotframe = 1;
2036 return snap_slot(sn) - LJ_FR2;
2037 }
2038 }
2039 return 0;
2040}
2041
2042/* Link to another trace. */
2043static void asm_tail_link(ASMState *as)
2044{
2045 SnapNo snapno = as->T->nsnap-1; /* Last snapshot. */
2046 SnapShot *snap = &as->T->snap[snapno];
2047 int gotframe = 0;
2048 BCReg baseslot = asm_baseslot(as, snap, &gotframe);
2049
2050 as->topslot = snap->topslot;
2051 checkmclim(as);
2052 ra_allocref(as, REF_BASE, RID2RSET(RID_BASE));
2053
2054 if (as->T->link == 0) {
2055 /* Setup fixed registers for exit to interpreter. */
2056 const BCIns *pc = snap_pc(&as->T->snapmap[snap->mapofs + snap->nent]);
2057 int32_t mres;
2058 if (bc_op(*pc) == BC_JLOOP) { /* NYI: find a better way to do this. */
2059 BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins;
2060 if (bc_isret(bc_op(*retpc)))
2061 pc = retpc;
2062 }
2063#if LJ_GC64
2064 emit_loadu64(as, RID_LPC, u64ptr(pc));
2065#else
2066 ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH);
2067 ra_allockreg(as, i32ptr(pc), RID_LPC);
2068#endif
2069 mres = (int32_t)(snap->nslots - baseslot - LJ_FR2);
2070 switch (bc_op(*pc)) {
2071 case BC_CALLM: case BC_CALLMT:
2072 mres -= (int32_t)(1 + LJ_FR2 + bc_a(*pc) + bc_c(*pc)); break;
2073 case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break;
2074 case BC_TSETM: mres -= (int32_t)bc_a(*pc); break;
2075 default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break;
2076 }
2077 ra_allockreg(as, mres, RID_RET); /* Return MULTRES or 0. */
2078 } else if (baseslot) {
2079 /* Save modified BASE for linking to trace with higher start frame. */
2080 emit_setgl(as, RID_BASE, jit_base);
2081 }
2082 emit_addptr(as, RID_BASE, 8*(int32_t)baseslot);
2083
2084 if (as->J->ktrace) { /* Patch ktrace slot with the final GCtrace pointer. */
2085 setgcref(IR(as->J->ktrace)[LJ_GC64].gcr, obj2gco(as->J->curfinal));
2086 IR(as->J->ktrace)->o = IR_KGC;
2087 }
2088
2089 /* Sync the interpreter state with the on-trace state. */
2090 asm_stack_restore(as, snap);
2091
2092 /* Root traces that add frames need to check the stack at the end. */
2093 if (!as->parent && gotframe)
2094 asm_stack_check(as, as->topslot, NULL, as->freeset & RSET_GPR, snapno);
2095}
2096
2097/* -- Trace setup --------------------------------------------------------- */
2098
2099/* Clear reg/sp for all instructions and add register hints. */
2100static void asm_setup_regsp(ASMState *as)
2101{
2102 GCtrace *T = as->T;
2103 int sink = T->sinktags;
2104 IRRef nins = T->nins;
2105 IRIns *ir, *lastir;
2106 int inloop;
2107#if LJ_TARGET_ARM
2108 uint32_t rload = 0xa6402a64;
2109#endif
2110
2111 ra_setup(as);
2112
2113 /* Clear reg/sp for constants. */
2114 for (ir = IR(T->nk), lastir = IR(REF_BASE); ir < lastir; ir++) {
2115 ir->prev = REGSP_INIT;
2116 if (irt_is64(ir->t) && ir->o != IR_KNULL) {
2117#if LJ_GC64
2118 /* The false-positive of irt_is64() for ASMREF_L (REF_NIL) is OK here. */
2119 ir->i = 0; /* Will become non-zero only for RIP-relative addresses. */
2120#else
2121 /* Make life easier for backends by putting address of constant in i. */
2122 ir->i = (int32_t)(intptr_t)(ir+1);
2123#endif
2124 ir++;
2125 }
2126 }
2127
2128 /* REF_BASE is used for implicit references to the BASE register. */
2129 lastir->prev = REGSP_HINT(RID_BASE);
2130
2131 as->snaprename = nins;
2132 as->snapref = nins;
2133 as->snapno = T->nsnap;
2134 as->snapalloc = 0;
2135
2136 as->stopins = REF_BASE;
2137 as->orignins = nins;
2138 as->curins = nins;
2139
2140 /* Setup register hints for parent link instructions. */
2141 ir = IR(REF_FIRST);
2142 if (as->parent) {
2143 uint16_t *p;
2144 lastir = lj_snap_regspmap(as->J, as->parent, as->J->exitno, ir);
2145 if (lastir - ir > LJ_MAX_JSLOTS)
2146 lj_trace_err(as->J, LJ_TRERR_NYICOAL);
2147 as->stopins = (IRRef)((lastir-1) - as->ir);
2148 for (p = as->parentmap; ir < lastir; ir++) {
2149 RegSP rs = ir->prev;
2150 *p++ = (uint16_t)rs; /* Copy original parent RegSP to parentmap. */
2151 if (!ra_hasspill(regsp_spill(rs)))
2152 ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs));
2153 else
2154 ir->prev = REGSP_INIT;
2155 }
2156 }
2157
2158 inloop = 0;
2159 as->evenspill = SPS_FIRST;
2160 for (lastir = IR(nins); ir < lastir; ir++) {
2161 if (sink) {
2162 if (ir->r == RID_SINK)
2163 continue;
2164 if (ir->r == RID_SUNK) { /* Revert after ASM restart. */
2165 ir->r = RID_SINK;
2166 continue;
2167 }
2168 }
2169 switch (ir->o) {
2170 case IR_LOOP:
2171 inloop = 1;
2172 break;
2173#if LJ_TARGET_ARM
2174 case IR_SLOAD:
2175 if (!((ir->op2 & IRSLOAD_TYPECHECK) || (ir+1)->o == IR_HIOP))
2176 break;
2177 /* fallthrough */
2178 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2179 if (!LJ_SOFTFP && irt_isnum(ir->t)) break;
2180 ir->prev = (uint16_t)REGSP_HINT((rload & 15));
2181 rload = lj_ror(rload, 4);
2182 continue;
2183#endif
2184 case IR_CALLXS: {
2185 CCallInfo ci;
2186 ci.flags = asm_callx_flags(as, ir);
2187 ir->prev = asm_setup_call_slots(as, ir, &ci);
2188 if (inloop)
2189 as->modset |= RSET_SCRATCH;
2190 continue;
2191 }
2192 case IR_CALLN: case IR_CALLA: case IR_CALLL: case IR_CALLS: {
2193 const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
2194 ir->prev = asm_setup_call_slots(as, ir, ci);
2195 if (inloop)
2196 as->modset |= (ci->flags & CCI_NOFPRCLOBBER) ?
2197 (RSET_SCRATCH & ~RSET_FPR) : RSET_SCRATCH;
2198 continue;
2199 }
2200#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
2201 case IR_HIOP:
2202 switch ((ir-1)->o) {
2203#if LJ_SOFTFP && LJ_TARGET_ARM
2204 case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2205 if (ra_hashint((ir-1)->r)) {
2206 ir->prev = (ir-1)->prev + 1;
2207 continue;
2208 }
2209 break;
2210#endif
2211#if !LJ_SOFTFP && LJ_NEED_FP64
2212 case IR_CONV:
2213 if (irt_isfp((ir-1)->t)) {
2214 ir->prev = REGSP_HINT(RID_FPRET);
2215 continue;
2216 }
2217#endif
2218 /* fallthrough */
2219 case IR_CALLN: case IR_CALLXS:
2220#if LJ_SOFTFP
2221 case IR_MIN: case IR_MAX:
2222#endif
2223 (ir-1)->prev = REGSP_HINT(RID_RETLO);
2224 ir->prev = REGSP_HINT(RID_RETHI);
2225 continue;
2226 default:
2227 break;
2228 }
2229 break;
2230#endif
2231#if LJ_SOFTFP
2232 case IR_MIN: case IR_MAX:
2233 if ((ir+1)->o != IR_HIOP) break;
2234#endif
2235 /* fallthrough */
2236 /* C calls evict all scratch regs and return results in RID_RET. */
2237 case IR_SNEW: case IR_XSNEW: case IR_NEWREF: case IR_BUFPUT:
2238 if (REGARG_NUMGPR < 3 && as->evenspill < 3)
2239 as->evenspill = 3; /* lj_str_new and lj_tab_newkey need 3 args. */
2240#if LJ_TARGET_X86 && LJ_HASFFI
2241 if (0) {
2242 case IR_CNEW:
2243 if (ir->op2 != REF_NIL && as->evenspill < 4)
2244 as->evenspill = 4; /* lj_cdata_newv needs 4 args. */
2245 }
2246 /* fallthrough */
2247#else
2248 /* fallthrough */
2249 case IR_CNEW:
2250#endif
2251 /* fallthrough */
2252 case IR_TNEW: case IR_TDUP: case IR_CNEWI: case IR_TOSTR:
2253 case IR_BUFSTR:
2254 ir->prev = REGSP_HINT(RID_RET);
2255 if (inloop)
2256 as->modset = RSET_SCRATCH;
2257 continue;
2258 case IR_STRTO: case IR_OBAR:
2259 if (inloop)
2260 as->modset = RSET_SCRATCH;
2261 break;
2262#if !LJ_SOFTFP
2263#if !LJ_TARGET_X86ORX64
2264 case IR_LDEXP:
2265#endif
2266#endif
2267 /* fallthrough */
2268 case IR_POW:
2269 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
2270 if (inloop)
2271 as->modset |= RSET_SCRATCH;
2272#if LJ_TARGET_X86
2273 if (irt_isnum(IR(ir->op2)->t)) {
2274 if (as->evenspill < 4) /* Leave room to call pow(). */
2275 as->evenspill = 4;
2276 }
2277 break;
2278#else
2279 ir->prev = REGSP_HINT(RID_FPRET);
2280 continue;
2281#endif
2282 }
2283 /* fallthrough */ /* for integer POW */
2284 case IR_DIV: case IR_MOD:
2285 if (!irt_isnum(ir->t)) {
2286 ir->prev = REGSP_HINT(RID_RET);
2287 if (inloop)
2288 as->modset |= (RSET_SCRATCH & RSET_GPR);
2289 continue;
2290 }
2291 break;
2292 case IR_FPMATH:
2293#if LJ_TARGET_X86ORX64
2294 if (ir->op2 <= IRFPM_TRUNC) {
2295 if (!(as->flags & JIT_F_SSE4_1)) {
2296 ir->prev = REGSP_HINT(RID_XMM0);
2297 if (inloop)
2298 as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
2299 continue;
2300 }
2301 break;
2302 }
2303#endif
2304 if (inloop)
2305 as->modset |= RSET_SCRATCH;
2306#if LJ_TARGET_X86
2307 break;
2308#else
2309 ir->prev = REGSP_HINT(RID_FPRET);
2310 continue;
2311#endif
2312#if LJ_TARGET_X86ORX64
2313 /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
2314 case IR_BSHL: case IR_BSHR: case IR_BSAR:
2315 if ((as->flags & JIT_F_BMI2)) /* Except if BMI2 is available. */
2316 break;
2317 /* fallthrough */
2318 case IR_BROL: case IR_BROR:
2319 if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) {
2320 IR(ir->op2)->r = REGSP_HINT(RID_ECX);
2321 if (inloop)
2322 rset_set(as->modset, RID_ECX);
2323 }
2324 break;
2325#endif
2326 /* Do not propagate hints across type conversions or loads. */
2327 case IR_TOBIT:
2328 case IR_XLOAD:
2329#if !LJ_TARGET_ARM
2330 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2331#endif
2332 break;
2333 case IR_CONV:
2334 if (irt_isfp(ir->t) || (ir->op2 & IRCONV_SRCMASK) == IRT_NUM ||
2335 (ir->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
2336 break;
2337 /* fallthrough */
2338 default:
2339 /* Propagate hints across likely 'op reg, imm' or 'op reg'. */
2340 if (irref_isk(ir->op2) && !irref_isk(ir->op1) &&
2341 ra_hashint(regsp_reg(IR(ir->op1)->prev))) {
2342 ir->prev = IR(ir->op1)->prev;
2343 continue;
2344 }
2345 break;
2346 }
2347 ir->prev = REGSP_INIT;
2348 }
2349 if ((as->evenspill & 1))
2350 as->oddspill = as->evenspill++;
2351 else
2352 as->oddspill = 0;
2353}
2354
2355/* -- Assembler core ------------------------------------------------------ */
2356
2357/* Assemble a trace. */
2358void lj_asm_trace(jit_State *J, GCtrace *T)
2359{
2360 ASMState as_;
2361 ASMState *as = &as_;
2362
2363 /* Remove nops/renames left over from ASM restart due to LJ_TRERR_MCODELM. */
2364 {
2365 IRRef nins = T->nins;
2366 IRIns *ir = &T->ir[nins-1];
2367 if (ir->o == IR_NOP || ir->o == IR_RENAME) {
2368 do { ir--; nins--; } while (ir->o == IR_NOP || ir->o == IR_RENAME);
2369 T->nins = nins;
2370 }
2371 }
2372
2373 /* Ensure an initialized instruction beyond the last one for HIOP checks. */
2374 /* This also allows one RENAME to be added without reallocating curfinal. */
2375 as->orignins = lj_ir_nextins(J);
2376 lj_ir_nop(&J->cur.ir[as->orignins]);
2377
2378 /* Setup initial state. Copy some fields to reduce indirections. */
2379 as->J = J;
2380 as->T = T;
2381 J->curfinal = lj_trace_alloc(J->L, T); /* This copies the IR, too. */
2382 as->flags = J->flags;
2383 as->loopref = J->loopref;
2384 as->realign = NULL;
2385 as->loopinv = 0;
2386 as->parent = J->parent ? traceref(J, J->parent) : NULL;
2387
2388 /* Reserve MCode memory. */
2389 as->mctop = as->mctoporig = lj_mcode_reserve(J, &as->mcbot);
2390 as->mcp = as->mctop;
2391 as->mclim = as->mcbot + MCLIM_REDZONE;
2392 asm_setup_target(as);
2393
2394 /*
2395 ** This is a loop, because the MCode may have to be (re-)assembled
2396 ** multiple times:
2397 **
2398 ** 1. as->realign is set (and the assembly aborted), if the arch-specific
2399 ** backend wants the MCode to be aligned differently.
2400 **
2401 ** This is currently only the case on x86/x64, where small loops get
2402 ** an aligned loop body plus a short branch. Not much effort is wasted,
2403 ** because the abort happens very quickly and only once.
2404 **
2405 ** 2. The IR is immovable, since the MCode embeds pointers to various
2406 ** constants inside the IR. But RENAMEs may need to be added to the IR
2407 ** during assembly, which might grow and reallocate the IR. We check
2408 ** at the end if the IR (in J->cur.ir) has actually grown, resize the
2409 ** copy (in J->curfinal.ir) and try again.
2410 **
2411 ** 95% of all traces have zero RENAMEs, 3% have one RENAME, 1.5% have
2412 ** 2 RENAMEs and only 0.5% have more than that. That's why we opt to
2413 ** always have one spare slot in the IR (see above), which means we
2414 ** have to redo the assembly for only ~2% of all traces.
2415 **
2416 ** Very, very rarely, this needs to be done repeatedly, since the
2417 ** location of constants inside the IR (actually, reachability from
2418 ** a global pointer) may affect register allocation and thus the
2419 ** number of RENAMEs.
2420 */
2421 for (;;) {
2422 as->mcp = as->mctop;
2423#ifdef LUA_USE_ASSERT
2424 as->mcp_prev = as->mcp;
2425#endif
2426 as->ir = J->curfinal->ir; /* Use the copied IR. */
2427 as->curins = J->cur.nins = as->orignins;
2428
2429 RA_DBG_START();
2430 RA_DBGX((as, "===== STOP ====="));
2431
2432 /* General trace setup. Emit tail of trace. */
2433 asm_tail_prep(as);
2434 as->mcloop = NULL;
2435 as->flagmcp = NULL;
2436 as->topslot = 0;
2437 as->gcsteps = 0;
2438 as->sectref = as->loopref;
2439 as->fuseref = (as->flags & JIT_F_OPT_FUSE) ? as->loopref : FUSE_DISABLED;
2440 asm_setup_regsp(as);
2441 if (!as->loopref)
2442 asm_tail_link(as);
2443
2444 /* Assemble a trace in linear backwards order. */
2445 for (as->curins--; as->curins > as->stopins; as->curins--) {
2446 IRIns *ir = IR(as->curins);
2447 /* 64 bit types handled by SPLIT for 32 bit archs. */
2448 lj_assertA(!(LJ_32 && irt_isint64(ir->t)),
2449 "IR %04d has unsplit 64 bit type",
2450 (int)(ir - as->ir) - REF_BIAS);
2451 asm_snap_prev(as);
2452 if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE))
2453 continue; /* Dead-code elimination can be soooo easy. */
2454 if (irt_isguard(ir->t))
2455 asm_snap_prep(as);
2456 RA_DBG_REF();
2457 checkmclim(as);
2458 asm_ir(as, ir);
2459 }
2460
2461 if (as->realign && J->curfinal->nins >= T->nins)
2462 continue; /* Retry in case only the MCode needs to be realigned. */
2463
2464 /* Emit head of trace. */
2465 RA_DBG_REF();
2466 checkmclim(as);
2467 if (as->gcsteps > 0) {
2468 as->curins = as->T->snap[0].ref;
2469 asm_snap_prep(as); /* The GC check is a guard. */
2470 asm_gc_check(as);
2471 as->curins = as->stopins;
2472 }
2473 ra_evictk(as);
2474 if (as->parent)
2475 asm_head_side(as);
2476 else
2477 asm_head_root(as);
2478 asm_phi_fixup(as);
2479
2480 if (J->curfinal->nins >= T->nins) { /* IR didn't grow? */
2481 lj_assertA(J->curfinal->nk == T->nk, "unexpected IR constant growth");
2482 memcpy(J->curfinal->ir + as->orignins, T->ir + as->orignins,
2483 (T->nins - as->orignins) * sizeof(IRIns)); /* Copy RENAMEs. */
2484 T->nins = J->curfinal->nins;
2485 /* Fill mcofs of any unprocessed snapshots. */
2486 as->curins = REF_FIRST;
2487 asm_snap_prev(as);
2488 break; /* Done. */
2489 }
2490
2491 /* Otherwise try again with a bigger IR. */
2492 lj_trace_free(J2G(J), J->curfinal);
2493 J->curfinal = NULL; /* In case lj_trace_alloc() OOMs. */
2494 J->curfinal = lj_trace_alloc(J->L, T);
2495 as->realign = NULL;
2496 }
2497
2498 RA_DBGX((as, "===== START ===="));
2499 RA_DBG_FLUSH();
2500 if (as->freeset != RSET_ALL)
2501 lj_trace_err(as->J, LJ_TRERR_BADRA); /* Ouch! Should never happen. */
2502
2503 /* Set trace entry point before fixing up tail to allow link to self. */
2504 T->mcode = as->mcp;
2505 T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0;
2506 if (!as->loopref)
2507 asm_tail_fixup(as, T->link); /* Note: this may change as->mctop! */
2508 T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp);
2509 asm_snap_fixup_mcofs(as);
2510#if LJ_TARGET_MCODE_FIXUP
2511 asm_mcode_fixup(T->mcode, T->szmcode);
2512#endif
2513 lj_mcode_sync(T->mcode, as->mctoporig);
2514}
2515
2516#undef IR
2517
2518#endif
2519