| 1 | /* |
| 2 | ** SSA IR (Intermediate Representation) emitter. |
| 3 | ** Copyright (C) 2005-2014 Mike Pall. See Copyright Notice in luajit.h |
| 4 | */ |
| 5 | |
| 6 | #define lj_ir_c |
| 7 | #define LUA_CORE |
| 8 | |
| 9 | /* For pointers to libc/libm functions. */ |
| 10 | #include <stdio.h> |
| 11 | #include <math.h> |
| 12 | |
| 13 | #include "lj_obj.h" |
| 14 | |
| 15 | #if LJ_HASJIT |
| 16 | |
| 17 | #include "lj_gc.h" |
| 18 | #include "lj_str.h" |
| 19 | #include "lj_tab.h" |
| 20 | #include "lj_ir.h" |
| 21 | #include "lj_jit.h" |
| 22 | #include "lj_ircall.h" |
| 23 | #include "lj_iropt.h" |
| 24 | #include "lj_trace.h" |
| 25 | #if LJ_HASFFI |
| 26 | #include "lj_ctype.h" |
| 27 | #include "lj_cdata.h" |
| 28 | #include "lj_carith.h" |
| 29 | #endif |
| 30 | #include "lj_vm.h" |
| 31 | #include "lj_strscan.h" |
| 32 | #include "lj_lib.h" |
| 33 | |
| 34 | /* Some local macros to save typing. Undef'd at the end. */ |
| 35 | #define IR(ref) (&J->cur.ir[(ref)]) |
| 36 | #define fins (&J->fold.ins) |
| 37 | |
| 38 | /* Pass IR on to next optimization in chain (FOLD). */ |
| 39 | #define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J)) |
| 40 | |
| 41 | /* -- IR tables ----------------------------------------------------------- */ |
| 42 | |
| 43 | /* IR instruction modes. */ |
| 44 | LJ_DATADEF const uint8_t lj_ir_mode[IR__MAX+1] = { |
| 45 | IRDEF(IRMODE) |
| 46 | 0 |
| 47 | }; |
| 48 | |
| 49 | /* IR type sizes. */ |
| 50 | LJ_DATADEF const uint8_t lj_ir_type_size[IRT__MAX+1] = { |
| 51 | #define IRTSIZE(name, size) size, |
| 52 | IRTDEF(IRTSIZE) |
| 53 | #undef IRTSIZE |
| 54 | 0 |
| 55 | }; |
| 56 | |
| 57 | /* C call info for CALL* instructions. */ |
| 58 | LJ_DATADEF const CCallInfo lj_ir_callinfo[] = { |
| 59 | #define IRCALLCI(cond, name, nargs, kind, type, flags) \ |
| 60 | { (ASMFunction)IRCALLCOND_##cond(name), \ |
| 61 | (nargs)|(CCI_CALL_##kind)|(IRT_##type<<CCI_OTSHIFT)|(flags) }, |
| 62 | IRCALLDEF(IRCALLCI) |
| 63 | #undef IRCALLCI |
| 64 | { NULL, 0 } |
| 65 | }; |
| 66 | |
| 67 | /* -- IR emitter ---------------------------------------------------------- */ |
| 68 | |
| 69 | /* Grow IR buffer at the top. */ |
| 70 | void LJ_FASTCALL lj_ir_growtop(jit_State *J) |
| 71 | { |
| 72 | IRIns *baseir = J->irbuf + J->irbotlim; |
| 73 | MSize szins = J->irtoplim - J->irbotlim; |
| 74 | if (szins) { |
| 75 | baseir = (IRIns *)lj_mem_realloc(J->L, baseir, szins*sizeof(IRIns), |
| 76 | 2*szins*sizeof(IRIns)); |
| 77 | J->irtoplim = J->irbotlim + 2*szins; |
| 78 | } else { |
| 79 | baseir = (IRIns *)lj_mem_realloc(J->L, NULL, 0, LJ_MIN_IRSZ*sizeof(IRIns)); |
| 80 | J->irbotlim = REF_BASE - LJ_MIN_IRSZ/4; |
| 81 | J->irtoplim = J->irbotlim + LJ_MIN_IRSZ; |
| 82 | } |
| 83 | J->cur.ir = J->irbuf = baseir - J->irbotlim; |
| 84 | } |
| 85 | |
| 86 | /* Grow IR buffer at the bottom or shift it up. */ |
| 87 | static void lj_ir_growbot(jit_State *J) |
| 88 | { |
| 89 | IRIns *baseir = J->irbuf + J->irbotlim; |
| 90 | MSize szins = J->irtoplim - J->irbotlim; |
| 91 | lua_assert(szins != 0); |
| 92 | lua_assert(J->cur.nk == J->irbotlim); |
| 93 | if (J->cur.nins + (szins >> 1) < J->irtoplim) { |
| 94 | /* More than half of the buffer is free on top: shift up by a quarter. */ |
| 95 | MSize ofs = szins >> 2; |
| 96 | memmove(baseir + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns)); |
| 97 | J->irbotlim -= ofs; |
| 98 | J->irtoplim -= ofs; |
| 99 | J->cur.ir = J->irbuf = baseir - J->irbotlim; |
| 100 | } else { |
| 101 | /* Double the buffer size, but split the growth amongst top/bottom. */ |
| 102 | IRIns *newbase = lj_mem_newt(J->L, 2*szins*sizeof(IRIns), IRIns); |
| 103 | MSize ofs = szins >= 256 ? 128 : (szins >> 1); /* Limit bottom growth. */ |
| 104 | memcpy(newbase + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns)); |
| 105 | lj_mem_free(G(J->L), baseir, szins*sizeof(IRIns)); |
| 106 | J->irbotlim -= ofs; |
| 107 | J->irtoplim = J->irbotlim + 2*szins; |
| 108 | J->cur.ir = J->irbuf = newbase - J->irbotlim; |
| 109 | } |
| 110 | } |
| 111 | |
| 112 | /* Emit IR without any optimizations. */ |
| 113 | TRef LJ_FASTCALL lj_ir_emit(jit_State *J) |
| 114 | { |
| 115 | IRRef ref = lj_ir_nextins(J); |
| 116 | IRIns *ir = IR(ref); |
| 117 | IROp op = fins->o; |
| 118 | ir->prev = J->chain[op]; |
| 119 | J->chain[op] = (IRRef1)ref; |
| 120 | ir->o = op; |
| 121 | ir->op1 = fins->op1; |
| 122 | ir->op2 = fins->op2; |
| 123 | J->guardemit.irt |= fins->t.irt; |
| 124 | return TREF(ref, irt_t((ir->t = fins->t))); |
| 125 | } |
| 126 | |
| 127 | /* Emit call to a C function. */ |
| 128 | TRef lj_ir_call(jit_State *J, IRCallID id, ...) |
| 129 | { |
| 130 | const CCallInfo *ci = &lj_ir_callinfo[id]; |
| 131 | uint32_t n = CCI_NARGS(ci); |
| 132 | TRef tr = TREF_NIL; |
| 133 | va_list argp; |
| 134 | va_start(argp, id); |
| 135 | if ((ci->flags & CCI_L)) n--; |
| 136 | if (n > 0) |
| 137 | tr = va_arg(argp, IRRef); |
| 138 | while (n-- > 1) |
| 139 | tr = emitir(IRT(IR_CARG, IRT_NIL), tr, va_arg(argp, IRRef)); |
| 140 | va_end(argp); |
| 141 | if (CCI_OP(ci) == IR_CALLS) |
| 142 | J->needsnap = 1; /* Need snapshot after call with side effect. */ |
| 143 | return emitir(CCI_OPTYPE(ci), tr, id); |
| 144 | } |
| 145 | |
| 146 | /* -- Interning of constants ---------------------------------------------- */ |
| 147 | |
| 148 | /* |
| 149 | ** IR instructions for constants are kept between J->cur.nk >= ref < REF_BIAS. |
| 150 | ** They are chained like all other instructions, but grow downwards. |
| 151 | ** The are interned (like strings in the VM) to facilitate reference |
| 152 | ** comparisons. The same constant must get the same reference. |
| 153 | */ |
| 154 | |
| 155 | /* Get ref of next IR constant and optionally grow IR. |
| 156 | ** Note: this may invalidate all IRIns *! |
| 157 | */ |
| 158 | static LJ_AINLINE IRRef ir_nextk(jit_State *J) |
| 159 | { |
| 160 | IRRef ref = J->cur.nk; |
| 161 | if (LJ_UNLIKELY(ref <= J->irbotlim)) lj_ir_growbot(J); |
| 162 | J->cur.nk = --ref; |
| 163 | return ref; |
| 164 | } |
| 165 | |
| 166 | /* Intern int32_t constant. */ |
| 167 | TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k) |
| 168 | { |
| 169 | IRIns *ir, *cir = J->cur.ir; |
| 170 | IRRef ref; |
| 171 | for (ref = J->chain[IR_KINT]; ref; ref = cir[ref].prev) |
| 172 | if (cir[ref].i == k) |
| 173 | goto found; |
| 174 | ref = ir_nextk(J); |
| 175 | ir = IR(ref); |
| 176 | ir->i = k; |
| 177 | ir->t.irt = IRT_INT; |
| 178 | ir->o = IR_KINT; |
| 179 | ir->prev = J->chain[IR_KINT]; |
| 180 | J->chain[IR_KINT] = (IRRef1)ref; |
| 181 | found: |
| 182 | return TREF(ref, IRT_INT); |
| 183 | } |
| 184 | |
| 185 | /* The MRef inside the KNUM/KINT64 IR instructions holds the address of the |
| 186 | ** 64 bit constant. The constants themselves are stored in a chained array |
| 187 | ** and shared across traces. |
| 188 | ** |
| 189 | ** Rationale for choosing this data structure: |
| 190 | ** - The address of the constants is embedded in the generated machine code |
| 191 | ** and must never move. A resizable array or hash table wouldn't work. |
| 192 | ** - Most apps need very few non-32 bit integer constants (less than a dozen). |
| 193 | ** - Linear search is hard to beat in terms of speed and low complexity. |
| 194 | */ |
| 195 | typedef struct K64Array { |
| 196 | MRef next; /* Pointer to next list. */ |
| 197 | MSize numk; /* Number of used elements in this array. */ |
| 198 | TValue k[LJ_MIN_K64SZ]; /* Array of constants. */ |
| 199 | } K64Array; |
| 200 | |
| 201 | /* Free all chained arrays. */ |
| 202 | void lj_ir_k64_freeall(jit_State *J) |
| 203 | { |
| 204 | K64Array *k; |
| 205 | for (k = mref(J->k64, K64Array); k; ) { |
| 206 | K64Array *next = mref(k->next, K64Array); |
| 207 | lj_mem_free(J2G(J), k, sizeof(K64Array)); |
| 208 | k = next; |
| 209 | } |
| 210 | } |
| 211 | |
| 212 | /* Find 64 bit constant in chained array or add it. */ |
| 213 | cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64) |
| 214 | { |
| 215 | K64Array *k, *kp = NULL; |
| 216 | TValue *ntv; |
| 217 | MSize idx; |
| 218 | /* Search for the constant in the whole chain of arrays. */ |
| 219 | for (k = mref(J->k64, K64Array); k; k = mref(k->next, K64Array)) { |
| 220 | kp = k; /* Remember previous element in list. */ |
| 221 | for (idx = 0; idx < k->numk; idx++) { /* Search one array. */ |
| 222 | TValue *tv = &k->k[idx]; |
| 223 | if (tv->u64 == u64) /* Needed for +-0/NaN/absmask. */ |
| 224 | return tv; |
| 225 | } |
| 226 | } |
| 227 | /* Constant was not found, need to add it. */ |
| 228 | if (!(kp && kp->numk < LJ_MIN_K64SZ)) { /* Allocate a new array. */ |
| 229 | K64Array *kn = lj_mem_newt(J->L, sizeof(K64Array), K64Array); |
| 230 | setmref(kn->next, NULL); |
| 231 | kn->numk = 0; |
| 232 | if (kp) |
| 233 | setmref(kp->next, kn); /* Chain to the end of the list. */ |
| 234 | else |
| 235 | setmref(J->k64, kn); /* Link first array. */ |
| 236 | kp = kn; |
| 237 | } |
| 238 | ntv = &kp->k[kp->numk++]; /* Add to current array. */ |
| 239 | ntv->u64 = u64; |
| 240 | return ntv; |
| 241 | } |
| 242 | |
| 243 | /* Intern 64 bit constant, given by its address. */ |
| 244 | TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv) |
| 245 | { |
| 246 | IRIns *ir, *cir = J->cur.ir; |
| 247 | IRRef ref; |
| 248 | IRType t = op == IR_KNUM ? IRT_NUM : IRT_I64; |
| 249 | for (ref = J->chain[op]; ref; ref = cir[ref].prev) |
| 250 | if (ir_k64(&cir[ref]) == tv) |
| 251 | goto found; |
| 252 | ref = ir_nextk(J); |
| 253 | ir = IR(ref); |
| 254 | lua_assert(checkptr32(tv)); |
| 255 | setmref(ir->ptr, tv); |
| 256 | ir->t.irt = t; |
| 257 | ir->o = op; |
| 258 | ir->prev = J->chain[op]; |
| 259 | J->chain[op] = (IRRef1)ref; |
| 260 | found: |
| 261 | return TREF(ref, t); |
| 262 | } |
| 263 | |
| 264 | /* Intern FP constant, given by its 64 bit pattern. */ |
| 265 | TRef lj_ir_knum_u64(jit_State *J, uint64_t u64) |
| 266 | { |
| 267 | return lj_ir_k64(J, IR_KNUM, lj_ir_k64_find(J, u64)); |
| 268 | } |
| 269 | |
| 270 | /* Intern 64 bit integer constant. */ |
| 271 | TRef lj_ir_kint64(jit_State *J, uint64_t u64) |
| 272 | { |
| 273 | return lj_ir_k64(J, IR_KINT64, lj_ir_k64_find(J, u64)); |
| 274 | } |
| 275 | |
| 276 | /* Check whether a number is int and return it. -0 is NOT considered an int. */ |
| 277 | static int numistrueint(lua_Number n, int32_t *kp) |
| 278 | { |
| 279 | int32_t k = lj_num2int(n); |
| 280 | if (n == (lua_Number)k) { |
| 281 | if (kp) *kp = k; |
| 282 | if (k == 0) { /* Special check for -0. */ |
| 283 | TValue tv; |
| 284 | setnumV(&tv, n); |
| 285 | if (tv.u32.hi != 0) |
| 286 | return 0; |
| 287 | } |
| 288 | return 1; |
| 289 | } |
| 290 | return 0; |
| 291 | } |
| 292 | |
| 293 | /* Intern number as int32_t constant if possible, otherwise as FP constant. */ |
| 294 | TRef lj_ir_knumint(jit_State *J, lua_Number n) |
| 295 | { |
| 296 | int32_t k; |
| 297 | if (numistrueint(n, &k)) |
| 298 | return lj_ir_kint(J, k); |
| 299 | else |
| 300 | return lj_ir_knum(J, n); |
| 301 | } |
| 302 | |
| 303 | /* Intern GC object "constant". */ |
| 304 | TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t) |
| 305 | { |
| 306 | IRIns *ir, *cir = J->cur.ir; |
| 307 | IRRef ref; |
| 308 | lua_assert(!isdead(J2G(J), o)); |
| 309 | for (ref = J->chain[IR_KGC]; ref; ref = cir[ref].prev) |
| 310 | if (ir_kgc(&cir[ref]) == o) |
| 311 | goto found; |
| 312 | ref = ir_nextk(J); |
| 313 | ir = IR(ref); |
| 314 | /* NOBARRIER: Current trace is a GC root. */ |
| 315 | setgcref(ir->gcr, o); |
| 316 | ir->t.irt = (uint8_t)t; |
| 317 | ir->o = IR_KGC; |
| 318 | ir->prev = J->chain[IR_KGC]; |
| 319 | J->chain[IR_KGC] = (IRRef1)ref; |
| 320 | found: |
| 321 | return TREF(ref, t); |
| 322 | } |
| 323 | |
| 324 | /* Intern 32 bit pointer constant. */ |
| 325 | TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr) |
| 326 | { |
| 327 | IRIns *ir, *cir = J->cur.ir; |
| 328 | IRRef ref; |
| 329 | lua_assert((void *)(intptr_t)i32ptr(ptr) == ptr); |
| 330 | for (ref = J->chain[op]; ref; ref = cir[ref].prev) |
| 331 | if (mref(cir[ref].ptr, void) == ptr) |
| 332 | goto found; |
| 333 | ref = ir_nextk(J); |
| 334 | ir = IR(ref); |
| 335 | setmref(ir->ptr, ptr); |
| 336 | ir->t.irt = IRT_P32; |
| 337 | ir->o = op; |
| 338 | ir->prev = J->chain[op]; |
| 339 | J->chain[op] = (IRRef1)ref; |
| 340 | found: |
| 341 | return TREF(ref, IRT_P32); |
| 342 | } |
| 343 | |
| 344 | /* Intern typed NULL constant. */ |
| 345 | TRef lj_ir_knull(jit_State *J, IRType t) |
| 346 | { |
| 347 | IRIns *ir, *cir = J->cur.ir; |
| 348 | IRRef ref; |
| 349 | for (ref = J->chain[IR_KNULL]; ref; ref = cir[ref].prev) |
| 350 | if (irt_t(cir[ref].t) == t) |
| 351 | goto found; |
| 352 | ref = ir_nextk(J); |
| 353 | ir = IR(ref); |
| 354 | ir->i = 0; |
| 355 | ir->t.irt = (uint8_t)t; |
| 356 | ir->o = IR_KNULL; |
| 357 | ir->prev = J->chain[IR_KNULL]; |
| 358 | J->chain[IR_KNULL] = (IRRef1)ref; |
| 359 | found: |
| 360 | return TREF(ref, t); |
| 361 | } |
| 362 | |
| 363 | /* Intern key slot. */ |
| 364 | TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot) |
| 365 | { |
| 366 | IRIns *ir, *cir = J->cur.ir; |
| 367 | IRRef2 op12 = IRREF2((IRRef1)key, (IRRef1)slot); |
| 368 | IRRef ref; |
| 369 | /* Const part is not touched by CSE/DCE, so 0-65535 is ok for IRMlit here. */ |
| 370 | lua_assert(tref_isk(key) && slot == (IRRef)(IRRef1)slot); |
| 371 | for (ref = J->chain[IR_KSLOT]; ref; ref = cir[ref].prev) |
| 372 | if (cir[ref].op12 == op12) |
| 373 | goto found; |
| 374 | ref = ir_nextk(J); |
| 375 | ir = IR(ref); |
| 376 | ir->op12 = op12; |
| 377 | ir->t.irt = IRT_P32; |
| 378 | ir->o = IR_KSLOT; |
| 379 | ir->prev = J->chain[IR_KSLOT]; |
| 380 | J->chain[IR_KSLOT] = (IRRef1)ref; |
| 381 | found: |
| 382 | return TREF(ref, IRT_P32); |
| 383 | } |
| 384 | |
| 385 | /* -- Access to IR constants ---------------------------------------------- */ |
| 386 | |
| 387 | /* Copy value of IR constant. */ |
| 388 | void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir) |
| 389 | { |
| 390 | UNUSED(L); |
| 391 | lua_assert(ir->o != IR_KSLOT); /* Common mistake. */ |
| 392 | switch (ir->o) { |
| 393 | case IR_KPRI: setitype(tv, irt_toitype(ir->t)); break; |
| 394 | case IR_KINT: setintV(tv, ir->i); break; |
| 395 | case IR_KGC: setgcV(L, tv, ir_kgc(ir), irt_toitype(ir->t)); break; |
| 396 | case IR_KPTR: case IR_KKPTR: case IR_KNULL: |
| 397 | setlightudV(tv, mref(ir->ptr, void)); |
| 398 | break; |
| 399 | case IR_KNUM: setnumV(tv, ir_knum(ir)->n); break; |
| 400 | #if LJ_HASFFI |
| 401 | case IR_KINT64: { |
| 402 | GCcdata *cd = lj_cdata_new_(L, CTID_INT64, 8); |
| 403 | *(uint64_t *)cdataptr(cd) = ir_kint64(ir)->u64; |
| 404 | setcdataV(L, tv, cd); |
| 405 | break; |
| 406 | } |
| 407 | #endif |
| 408 | default: lua_assert(0); break; |
| 409 | } |
| 410 | } |
| 411 | |
| 412 | /* -- Convert IR operand types -------------------------------------------- */ |
| 413 | |
| 414 | /* Convert from string to number. */ |
| 415 | TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr) |
| 416 | { |
| 417 | if (!tref_isnumber(tr)) { |
| 418 | if (tref_isstr(tr)) |
| 419 | tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); |
| 420 | else |
| 421 | lj_trace_err(J, LJ_TRERR_BADTYPE); |
| 422 | } |
| 423 | return tr; |
| 424 | } |
| 425 | |
| 426 | /* Convert from integer or string to number. */ |
| 427 | TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr) |
| 428 | { |
| 429 | if (!tref_isnum(tr)) { |
| 430 | if (tref_isinteger(tr)) |
| 431 | tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT); |
| 432 | else if (tref_isstr(tr)) |
| 433 | tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0); |
| 434 | else |
| 435 | lj_trace_err(J, LJ_TRERR_BADTYPE); |
| 436 | } |
| 437 | return tr; |
| 438 | } |
| 439 | |
| 440 | /* Convert from integer or number to string. */ |
| 441 | TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr) |
| 442 | { |
| 443 | if (!tref_isstr(tr)) { |
| 444 | if (!tref_isnumber(tr)) |
| 445 | lj_trace_err(J, LJ_TRERR_BADTYPE); |
| 446 | tr = emitir(IRT(IR_TOSTR, IRT_STR), tr, 0); |
| 447 | } |
| 448 | return tr; |
| 449 | } |
| 450 | |
| 451 | /* -- Miscellaneous IR ops ------------------------------------------------ */ |
| 452 | |
| 453 | /* Evaluate numeric comparison. */ |
| 454 | int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op) |
| 455 | { |
| 456 | switch (op) { |
| 457 | case IR_EQ: return (a == b); |
| 458 | case IR_NE: return (a != b); |
| 459 | case IR_LT: return (a < b); |
| 460 | case IR_GE: return (a >= b); |
| 461 | case IR_LE: return (a <= b); |
| 462 | case IR_GT: return (a > b); |
| 463 | case IR_ULT: return !(a >= b); |
| 464 | case IR_UGE: return !(a < b); |
| 465 | case IR_ULE: return !(a > b); |
| 466 | case IR_UGT: return !(a <= b); |
| 467 | default: lua_assert(0); return 0; |
| 468 | } |
| 469 | } |
| 470 | |
| 471 | /* Evaluate string comparison. */ |
| 472 | int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op) |
| 473 | { |
| 474 | int res = lj_str_cmp(a, b); |
| 475 | switch (op) { |
| 476 | case IR_LT: return (res < 0); |
| 477 | case IR_GE: return (res >= 0); |
| 478 | case IR_LE: return (res <= 0); |
| 479 | case IR_GT: return (res > 0); |
| 480 | default: lua_assert(0); return 0; |
| 481 | } |
| 482 | } |
| 483 | |
| 484 | /* Rollback IR to previous state. */ |
| 485 | void lj_ir_rollback(jit_State *J, IRRef ref) |
| 486 | { |
| 487 | IRRef nins = J->cur.nins; |
| 488 | while (nins > ref) { |
| 489 | IRIns *ir; |
| 490 | nins--; |
| 491 | ir = IR(nins); |
| 492 | J->chain[ir->o] = ir->prev; |
| 493 | } |
| 494 | J->cur.nins = nins; |
| 495 | } |
| 496 | |
| 497 | #undef IR |
| 498 | #undef fins |
| 499 | #undef emitir |
| 500 | |
| 501 | #endif |
| 502 | |