1 | /* |
2 | ** Garbage collector. |
3 | ** Copyright (C) 2005-2021 Mike Pall. See Copyright Notice in luajit.h |
4 | ** |
5 | ** Major portions taken verbatim or adapted from the Lua interpreter. |
6 | ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h |
7 | */ |
8 | |
9 | #define lj_gc_c |
10 | #define LUA_CORE |
11 | |
12 | #include "lj_obj.h" |
13 | #include "lj_gc.h" |
14 | #include "lj_err.h" |
15 | #include "lj_buf.h" |
16 | #include "lj_str.h" |
17 | #include "lj_tab.h" |
18 | #include "lj_func.h" |
19 | #include "lj_udata.h" |
20 | #include "lj_meta.h" |
21 | #include "lj_state.h" |
22 | #include "lj_frame.h" |
23 | #if LJ_HASFFI |
24 | #include "lj_ctype.h" |
25 | #include "lj_cdata.h" |
26 | #endif |
27 | #include "lj_trace.h" |
28 | #include "lj_dispatch.h" |
29 | #include "lj_vm.h" |
30 | |
31 | #define GCSTEPSIZE 1024u |
32 | #define GCSWEEPMAX 40 |
33 | #define GCSWEEPCOST 10 |
34 | #define GCFINALIZECOST 100 |
35 | |
36 | /* Macros to set GCobj colors and flags. */ |
37 | #define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES) |
38 | #define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK) |
39 | #define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED) |
40 | |
41 | /* -- Mark phase ---------------------------------------------------------- */ |
42 | |
43 | /* Mark a TValue (if needed). */ |
44 | #define gc_marktv(g, tv) \ |
45 | { lj_assertG(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct), \ |
46 | "TValue and GC type mismatch"); \ |
47 | if (tviswhite(tv)) gc_mark(g, gcV(tv)); } |
48 | |
49 | /* Mark a GCobj (if needed). */ |
50 | #define gc_markobj(g, o) \ |
51 | { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); } |
52 | |
53 | /* Mark a string object. */ |
54 | #define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES) |
55 | |
56 | /* Mark a white GCobj. */ |
57 | static void gc_mark(global_State *g, GCobj *o) |
58 | { |
59 | int gct = o->gch.gct; |
60 | lj_assertG(iswhite(o), "mark of non-white object" ); |
61 | lj_assertG(!isdead(g, o), "mark of dead object" ); |
62 | white2gray(o); |
63 | if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) { |
64 | GCtab *mt = tabref(gco2ud(o)->metatable); |
65 | gray2black(o); /* Userdata are never gray. */ |
66 | if (mt) gc_markobj(g, mt); |
67 | gc_markobj(g, tabref(gco2ud(o)->env)); |
68 | } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) { |
69 | GCupval *uv = gco2uv(o); |
70 | gc_marktv(g, uvval(uv)); |
71 | if (uv->closed) |
72 | gray2black(o); /* Closed upvalues are never gray. */ |
73 | } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) { |
74 | lj_assertG(gct == ~LJ_TFUNC || gct == ~LJ_TTAB || |
75 | gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE, |
76 | "bad GC type %d" , gct); |
77 | setgcrefr(o->gch.gclist, g->gc.gray); |
78 | setgcref(g->gc.gray, o); |
79 | } |
80 | } |
81 | |
82 | /* Mark GC roots. */ |
83 | static void gc_mark_gcroot(global_State *g) |
84 | { |
85 | ptrdiff_t i; |
86 | for (i = 0; i < GCROOT_MAX; i++) |
87 | if (gcref(g->gcroot[i]) != NULL) |
88 | gc_markobj(g, gcref(g->gcroot[i])); |
89 | } |
90 | |
91 | /* Start a GC cycle and mark the root set. */ |
92 | static void gc_mark_start(global_State *g) |
93 | { |
94 | setgcrefnull(g->gc.gray); |
95 | setgcrefnull(g->gc.grayagain); |
96 | setgcrefnull(g->gc.weak); |
97 | gc_markobj(g, mainthread(g)); |
98 | gc_markobj(g, tabref(mainthread(g)->env)); |
99 | gc_marktv(g, &g->registrytv); |
100 | gc_mark_gcroot(g); |
101 | g->gc.state = GCSpropagate; |
102 | } |
103 | |
104 | /* Mark open upvalues. */ |
105 | static void gc_mark_uv(global_State *g) |
106 | { |
107 | GCupval *uv; |
108 | for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) { |
109 | lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv, |
110 | "broken upvalue chain" ); |
111 | if (isgray(obj2gco(uv))) |
112 | gc_marktv(g, uvval(uv)); |
113 | } |
114 | } |
115 | |
116 | /* Mark userdata in mmudata list. */ |
117 | static void gc_mark_mmudata(global_State *g) |
118 | { |
119 | GCobj *root = gcref(g->gc.mmudata); |
120 | GCobj *u = root; |
121 | if (u) { |
122 | do { |
123 | u = gcnext(u); |
124 | makewhite(g, u); /* Could be from previous GC. */ |
125 | gc_mark(g, u); |
126 | } while (u != root); |
127 | } |
128 | } |
129 | |
130 | /* Separate userdata objects to be finalized to mmudata list. */ |
131 | size_t lj_gc_separateudata(global_State *g, int all) |
132 | { |
133 | size_t m = 0; |
134 | GCRef *p = &mainthread(g)->nextgc; |
135 | GCobj *o; |
136 | while ((o = gcref(*p)) != NULL) { |
137 | if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) { |
138 | p = &o->gch.nextgc; /* Nothing to do. */ |
139 | } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) { |
140 | markfinalized(o); /* Done, as there's no __gc metamethod. */ |
141 | p = &o->gch.nextgc; |
142 | } else { /* Otherwise move userdata to be finalized to mmudata list. */ |
143 | m += sizeudata(gco2ud(o)); |
144 | markfinalized(o); |
145 | *p = o->gch.nextgc; |
146 | if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */ |
147 | GCobj *root = gcref(g->gc.mmudata); |
148 | setgcrefr(o->gch.nextgc, root->gch.nextgc); |
149 | setgcref(root->gch.nextgc, o); |
150 | setgcref(g->gc.mmudata, o); |
151 | } else { /* Create circular list. */ |
152 | setgcref(o->gch.nextgc, o); |
153 | setgcref(g->gc.mmudata, o); |
154 | } |
155 | } |
156 | } |
157 | return m; |
158 | } |
159 | |
160 | /* -- Propagation phase --------------------------------------------------- */ |
161 | |
162 | /* Traverse a table. */ |
163 | static int gc_traverse_tab(global_State *g, GCtab *t) |
164 | { |
165 | int weak = 0; |
166 | cTValue *mode; |
167 | GCtab *mt = tabref(t->metatable); |
168 | if (mt) |
169 | gc_markobj(g, mt); |
170 | mode = lj_meta_fastg(g, mt, MM_mode); |
171 | if (mode && tvisstr(mode)) { /* Valid __mode field? */ |
172 | const char *modestr = strVdata(mode); |
173 | int c; |
174 | while ((c = *modestr++)) { |
175 | if (c == 'k') weak |= LJ_GC_WEAKKEY; |
176 | else if (c == 'v') weak |= LJ_GC_WEAKVAL; |
177 | } |
178 | if (weak) { /* Weak tables are cleared in the atomic phase. */ |
179 | #if LJ_HASFFI |
180 | CTState *cts = ctype_ctsG(g); |
181 | if (cts && cts->finalizer == t) { |
182 | weak = (int)(~0u & ~LJ_GC_WEAKVAL); |
183 | } else |
184 | #endif |
185 | { |
186 | t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak); |
187 | setgcrefr(t->gclist, g->gc.weak); |
188 | setgcref(g->gc.weak, obj2gco(t)); |
189 | } |
190 | } |
191 | } |
192 | if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */ |
193 | return 1; |
194 | if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */ |
195 | MSize i, asize = t->asize; |
196 | for (i = 0; i < asize; i++) |
197 | gc_marktv(g, arrayslot(t, i)); |
198 | } |
199 | if (t->hmask > 0) { /* Mark hash part. */ |
200 | Node *node = noderef(t->node); |
201 | MSize i, hmask = t->hmask; |
202 | for (i = 0; i <= hmask; i++) { |
203 | Node *n = &node[i]; |
204 | if (!tvisnil(&n->val)) { /* Mark non-empty slot. */ |
205 | lj_assertG(!tvisnil(&n->key), "mark of nil key in non-empty slot" ); |
206 | if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key); |
207 | if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val); |
208 | } |
209 | } |
210 | } |
211 | return weak; |
212 | } |
213 | |
214 | /* Traverse a function. */ |
215 | static void gc_traverse_func(global_State *g, GCfunc *fn) |
216 | { |
217 | gc_markobj(g, tabref(fn->c.env)); |
218 | if (isluafunc(fn)) { |
219 | uint32_t i; |
220 | lj_assertG(fn->l.nupvalues <= funcproto(fn)->sizeuv, |
221 | "function upvalues out of range" ); |
222 | gc_markobj(g, funcproto(fn)); |
223 | for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */ |
224 | gc_markobj(g, &gcref(fn->l.uvptr[i])->uv); |
225 | } else { |
226 | uint32_t i; |
227 | for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */ |
228 | gc_marktv(g, &fn->c.upvalue[i]); |
229 | } |
230 | } |
231 | |
232 | #if LJ_HASJIT |
233 | /* Mark a trace. */ |
234 | static void gc_marktrace(global_State *g, TraceNo traceno) |
235 | { |
236 | GCobj *o = obj2gco(traceref(G2J(g), traceno)); |
237 | lj_assertG(traceno != G2J(g)->cur.traceno, "active trace escaped" ); |
238 | if (iswhite(o)) { |
239 | white2gray(o); |
240 | setgcrefr(o->gch.gclist, g->gc.gray); |
241 | setgcref(g->gc.gray, o); |
242 | } |
243 | } |
244 | |
245 | /* Traverse a trace. */ |
246 | static void gc_traverse_trace(global_State *g, GCtrace *T) |
247 | { |
248 | IRRef ref; |
249 | if (T->traceno == 0) return; |
250 | for (ref = T->nk; ref < REF_TRUE; ref++) { |
251 | IRIns *ir = &T->ir[ref]; |
252 | if (ir->o == IR_KGC) |
253 | gc_markobj(g, ir_kgc(ir)); |
254 | if (irt_is64(ir->t) && ir->o != IR_KNULL) |
255 | ref++; |
256 | } |
257 | if (T->link) gc_marktrace(g, T->link); |
258 | if (T->nextroot) gc_marktrace(g, T->nextroot); |
259 | if (T->nextside) gc_marktrace(g, T->nextside); |
260 | gc_markobj(g, gcref(T->startpt)); |
261 | } |
262 | |
263 | /* The current trace is a GC root while not anchored in the prototype (yet). */ |
264 | #define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur) |
265 | #else |
266 | #define gc_traverse_curtrace(g) UNUSED(g) |
267 | #endif |
268 | |
269 | /* Traverse a prototype. */ |
270 | static void gc_traverse_proto(global_State *g, GCproto *pt) |
271 | { |
272 | ptrdiff_t i; |
273 | gc_mark_str(proto_chunkname(pt)); |
274 | for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */ |
275 | gc_markobj(g, proto_kgc(pt, i)); |
276 | #if LJ_HASJIT |
277 | if (pt->trace) gc_marktrace(g, pt->trace); |
278 | #endif |
279 | } |
280 | |
281 | /* Traverse the frame structure of a stack. */ |
282 | static MSize gc_traverse_frames(global_State *g, lua_State *th) |
283 | { |
284 | TValue *frame, *top = th->top-1, *bot = tvref(th->stack); |
285 | /* Note: extra vararg frame not skipped, marks function twice (harmless). */ |
286 | for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) { |
287 | GCfunc *fn = frame_func(frame); |
288 | TValue *ftop = frame; |
289 | if (isluafunc(fn)) ftop += funcproto(fn)->framesize; |
290 | if (ftop > top) top = ftop; |
291 | if (!LJ_FR2) gc_markobj(g, fn); /* Need to mark hidden function (or L). */ |
292 | } |
293 | top++; /* Correct bias of -1 (frame == base-1). */ |
294 | if (top > tvref(th->maxstack)) top = tvref(th->maxstack); |
295 | return (MSize)(top - bot); /* Return minimum needed stack size. */ |
296 | } |
297 | |
298 | /* Traverse a thread object. */ |
299 | static void gc_traverse_thread(global_State *g, lua_State *th) |
300 | { |
301 | TValue *o, *top = th->top; |
302 | for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++) |
303 | gc_marktv(g, o); |
304 | if (g->gc.state == GCSatomic) { |
305 | top = tvref(th->stack) + th->stacksize; |
306 | for (; o < top; o++) /* Clear unmarked slots. */ |
307 | setnilV(o); |
308 | } |
309 | gc_markobj(g, tabref(th->env)); |
310 | lj_state_shrinkstack(th, gc_traverse_frames(g, th)); |
311 | } |
312 | |
313 | /* Propagate one gray object. Traverse it and turn it black. */ |
314 | static size_t propagatemark(global_State *g) |
315 | { |
316 | GCobj *o = gcref(g->gc.gray); |
317 | int gct = o->gch.gct; |
318 | lj_assertG(isgray(o), "propagation of non-gray object" ); |
319 | gray2black(o); |
320 | setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */ |
321 | if (LJ_LIKELY(gct == ~LJ_TTAB)) { |
322 | GCtab *t = gco2tab(o); |
323 | if (gc_traverse_tab(g, t) > 0) |
324 | black2gray(o); /* Keep weak tables gray. */ |
325 | return sizeof(GCtab) + sizeof(TValue) * t->asize + |
326 | (t->hmask ? sizeof(Node) * (t->hmask + 1) : 0); |
327 | } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) { |
328 | GCfunc *fn = gco2func(o); |
329 | gc_traverse_func(g, fn); |
330 | return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) : |
331 | sizeCfunc((MSize)fn->c.nupvalues); |
332 | } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) { |
333 | GCproto *pt = gco2pt(o); |
334 | gc_traverse_proto(g, pt); |
335 | return pt->sizept; |
336 | } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) { |
337 | lua_State *th = gco2th(o); |
338 | setgcrefr(th->gclist, g->gc.grayagain); |
339 | setgcref(g->gc.grayagain, o); |
340 | black2gray(o); /* Threads are never black. */ |
341 | gc_traverse_thread(g, th); |
342 | return sizeof(lua_State) + sizeof(TValue) * th->stacksize; |
343 | } else { |
344 | #if LJ_HASJIT |
345 | GCtrace *T = gco2trace(o); |
346 | gc_traverse_trace(g, T); |
347 | return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) + |
348 | T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry); |
349 | #else |
350 | lj_assertG(0, "bad GC type %d" , gct); |
351 | return 0; |
352 | #endif |
353 | } |
354 | } |
355 | |
356 | /* Propagate all gray objects. */ |
357 | static size_t gc_propagate_gray(global_State *g) |
358 | { |
359 | size_t m = 0; |
360 | while (gcref(g->gc.gray) != NULL) |
361 | m += propagatemark(g); |
362 | return m; |
363 | } |
364 | |
365 | /* -- Sweep phase --------------------------------------------------------- */ |
366 | |
367 | /* Type of GC free functions. */ |
368 | typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o); |
369 | |
370 | /* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */ |
371 | static const GCFreeFunc gc_freefunc[] = { |
372 | (GCFreeFunc)lj_str_free, |
373 | (GCFreeFunc)lj_func_freeuv, |
374 | (GCFreeFunc)lj_state_free, |
375 | (GCFreeFunc)lj_func_freeproto, |
376 | (GCFreeFunc)lj_func_free, |
377 | #if LJ_HASJIT |
378 | (GCFreeFunc)lj_trace_free, |
379 | #else |
380 | (GCFreeFunc)0, |
381 | #endif |
382 | #if LJ_HASFFI |
383 | (GCFreeFunc)lj_cdata_free, |
384 | #else |
385 | (GCFreeFunc)0, |
386 | #endif |
387 | (GCFreeFunc)lj_tab_free, |
388 | (GCFreeFunc)lj_udata_free |
389 | }; |
390 | |
391 | /* Full sweep of a GC list. */ |
392 | #define gc_fullsweep(g, p) gc_sweep(g, (p), ~(uint32_t)0) |
393 | |
394 | /* Partial sweep of a GC list. */ |
395 | static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim) |
396 | { |
397 | /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */ |
398 | int ow = otherwhite(g); |
399 | GCobj *o; |
400 | while ((o = gcref(*p)) != NULL && lim-- > 0) { |
401 | if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */ |
402 | gc_fullsweep(g, &gco2th(o)->openupval); |
403 | if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */ |
404 | lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED), |
405 | "sweep of undead object" ); |
406 | makewhite(g, o); /* Value is alive, change to the current white. */ |
407 | p = &o->gch.nextgc; |
408 | } else { /* Otherwise value is dead, free it. */ |
409 | lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED, |
410 | "sweep of unlive object" ); |
411 | setgcrefr(*p, o->gch.nextgc); |
412 | if (o == gcref(g->gc.root)) |
413 | setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */ |
414 | gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o); |
415 | } |
416 | } |
417 | return p; |
418 | } |
419 | |
420 | /* Sweep one string interning table chain. Preserves hashalg bit. */ |
421 | static void gc_sweepstr(global_State *g, GCRef *chain) |
422 | { |
423 | /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */ |
424 | int ow = otherwhite(g); |
425 | uintptr_t u = gcrefu(*chain); |
426 | GCRef q; |
427 | GCRef *p = &q; |
428 | GCobj *o; |
429 | setgcrefp(q, (u & ~(uintptr_t)1)); |
430 | while ((o = gcref(*p)) != NULL) { |
431 | if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */ |
432 | lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED), |
433 | "sweep of undead string" ); |
434 | makewhite(g, o); /* String is alive, change to the current white. */ |
435 | p = &o->gch.nextgc; |
436 | } else { /* Otherwise string is dead, free it. */ |
437 | lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED, |
438 | "sweep of unlive string" ); |
439 | setgcrefr(*p, o->gch.nextgc); |
440 | lj_str_free(g, gco2str(o)); |
441 | } |
442 | } |
443 | setgcrefp(*chain, (gcrefu(q) | (u & 1))); |
444 | } |
445 | |
446 | /* Check whether we can clear a key or a value slot from a table. */ |
447 | static int gc_mayclear(cTValue *o, int val) |
448 | { |
449 | if (tvisgcv(o)) { /* Only collectable objects can be weak references. */ |
450 | if (tvisstr(o)) { /* But strings cannot be used as weak references. */ |
451 | gc_mark_str(strV(o)); /* And need to be marked. */ |
452 | return 0; |
453 | } |
454 | if (iswhite(gcV(o))) |
455 | return 1; /* Object is about to be collected. */ |
456 | if (tvisudata(o) && val && isfinalized(udataV(o))) |
457 | return 1; /* Finalized userdata is dropped only from values. */ |
458 | } |
459 | return 0; /* Cannot clear. */ |
460 | } |
461 | |
462 | /* Clear collected entries from weak tables. */ |
463 | static void gc_clearweak(global_State *g, GCobj *o) |
464 | { |
465 | UNUSED(g); |
466 | while (o) { |
467 | GCtab *t = gco2tab(o); |
468 | lj_assertG((t->marked & LJ_GC_WEAK), "clear of non-weak table" ); |
469 | if ((t->marked & LJ_GC_WEAKVAL)) { |
470 | MSize i, asize = t->asize; |
471 | for (i = 0; i < asize; i++) { |
472 | /* Clear array slot when value is about to be collected. */ |
473 | TValue *tv = arrayslot(t, i); |
474 | if (gc_mayclear(tv, 1)) |
475 | setnilV(tv); |
476 | } |
477 | } |
478 | if (t->hmask > 0) { |
479 | Node *node = noderef(t->node); |
480 | MSize i, hmask = t->hmask; |
481 | for (i = 0; i <= hmask; i++) { |
482 | Node *n = &node[i]; |
483 | /* Clear hash slot when key or value is about to be collected. */ |
484 | if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) || |
485 | gc_mayclear(&n->val, 1))) |
486 | setnilV(&n->val); |
487 | } |
488 | } |
489 | o = gcref(t->gclist); |
490 | } |
491 | } |
492 | |
493 | /* Call a userdata or cdata finalizer. */ |
494 | static void gc_call_finalizer(global_State *g, lua_State *L, |
495 | cTValue *mo, GCobj *o) |
496 | { |
497 | /* Save and restore lots of state around the __gc callback. */ |
498 | uint8_t oldh = hook_save(g); |
499 | GCSize oldt = g->gc.threshold; |
500 | int errcode; |
501 | TValue *top; |
502 | lj_trace_abort(g); |
503 | hook_entergc(g); /* Disable hooks and new traces during __gc. */ |
504 | if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g); |
505 | g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */ |
506 | top = L->top; |
507 | copyTV(L, top++, mo); |
508 | if (LJ_FR2) setnilV(top++); |
509 | setgcV(L, top, o, ~o->gch.gct); |
510 | L->top = top+1; |
511 | errcode = lj_vm_pcall(L, top, 1+0, -1); /* Stack: |mo|o| -> | */ |
512 | hook_restore(g, oldh); |
513 | if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g); |
514 | g->gc.threshold = oldt; /* Restore GC threshold. */ |
515 | if (errcode) |
516 | lj_err_throw(L, errcode); /* Propagate errors. */ |
517 | } |
518 | |
519 | /* Finalize one userdata or cdata object from the mmudata list. */ |
520 | static void gc_finalize(lua_State *L) |
521 | { |
522 | global_State *g = G(L); |
523 | GCobj *o = gcnext(gcref(g->gc.mmudata)); |
524 | cTValue *mo; |
525 | lj_assertG(tvref(g->jit_base) == NULL, "finalizer called on trace" ); |
526 | /* Unchain from list of userdata to be finalized. */ |
527 | if (o == gcref(g->gc.mmudata)) |
528 | setgcrefnull(g->gc.mmudata); |
529 | else |
530 | setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc); |
531 | #if LJ_HASFFI |
532 | if (o->gch.gct == ~LJ_TCDATA) { |
533 | TValue tmp, *tv; |
534 | /* Add cdata back to the GC list and make it white. */ |
535 | setgcrefr(o->gch.nextgc, g->gc.root); |
536 | setgcref(g->gc.root, o); |
537 | makewhite(g, o); |
538 | o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN; |
539 | /* Resolve finalizer. */ |
540 | setcdataV(L, &tmp, gco2cd(o)); |
541 | tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp); |
542 | if (!tvisnil(tv)) { |
543 | g->gc.nocdatafin = 0; |
544 | copyTV(L, &tmp, tv); |
545 | setnilV(tv); /* Clear entry in finalizer table. */ |
546 | gc_call_finalizer(g, L, &tmp, o); |
547 | } |
548 | return; |
549 | } |
550 | #endif |
551 | /* Add userdata back to the main userdata list and make it white. */ |
552 | setgcrefr(o->gch.nextgc, mainthread(g)->nextgc); |
553 | setgcref(mainthread(g)->nextgc, o); |
554 | makewhite(g, o); |
555 | /* Resolve the __gc metamethod. */ |
556 | mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc); |
557 | if (mo) |
558 | gc_call_finalizer(g, L, mo, o); |
559 | } |
560 | |
561 | /* Finalize all userdata objects from mmudata list. */ |
562 | void lj_gc_finalize_udata(lua_State *L) |
563 | { |
564 | while (gcref(G(L)->gc.mmudata) != NULL) |
565 | gc_finalize(L); |
566 | } |
567 | |
568 | #if LJ_HASFFI |
569 | /* Finalize all cdata objects from finalizer table. */ |
570 | void lj_gc_finalize_cdata(lua_State *L) |
571 | { |
572 | global_State *g = G(L); |
573 | CTState *cts = ctype_ctsG(g); |
574 | if (cts) { |
575 | GCtab *t = cts->finalizer; |
576 | Node *node = noderef(t->node); |
577 | ptrdiff_t i; |
578 | setgcrefnull(t->metatable); /* Mark finalizer table as disabled. */ |
579 | for (i = (ptrdiff_t)t->hmask; i >= 0; i--) |
580 | if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) { |
581 | GCobj *o = gcV(&node[i].key); |
582 | TValue tmp; |
583 | makewhite(g, o); |
584 | o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN; |
585 | copyTV(L, &tmp, &node[i].val); |
586 | setnilV(&node[i].val); |
587 | gc_call_finalizer(g, L, &tmp, o); |
588 | } |
589 | } |
590 | } |
591 | #endif |
592 | |
593 | /* Free all remaining GC objects. */ |
594 | void lj_gc_freeall(global_State *g) |
595 | { |
596 | MSize i, strmask; |
597 | /* Free everything, except super-fixed objects (the main thread). */ |
598 | g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED; |
599 | gc_fullsweep(g, &g->gc.root); |
600 | strmask = g->str.mask; |
601 | for (i = 0; i <= strmask; i++) /* Free all string hash chains. */ |
602 | gc_sweepstr(g, &g->str.tab[i]); |
603 | } |
604 | |
605 | /* -- Collector ----------------------------------------------------------- */ |
606 | |
607 | /* Atomic part of the GC cycle, transitioning from mark to sweep phase. */ |
608 | static void atomic(global_State *g, lua_State *L) |
609 | { |
610 | size_t udsize; |
611 | |
612 | gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */ |
613 | gc_propagate_gray(g); /* Propagate any left-overs. */ |
614 | |
615 | setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */ |
616 | setgcrefnull(g->gc.weak); |
617 | lj_assertG(!iswhite(obj2gco(mainthread(g))), "main thread turned white" ); |
618 | gc_markobj(g, L); /* Mark running thread. */ |
619 | gc_traverse_curtrace(g); /* Traverse current trace. */ |
620 | gc_mark_gcroot(g); /* Mark GC roots (again). */ |
621 | gc_propagate_gray(g); /* Propagate all of the above. */ |
622 | |
623 | setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */ |
624 | setgcrefnull(g->gc.grayagain); |
625 | gc_propagate_gray(g); /* Propagate it. */ |
626 | |
627 | udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */ |
628 | gc_mark_mmudata(g); /* Mark them. */ |
629 | udsize += gc_propagate_gray(g); /* And propagate the marks. */ |
630 | |
631 | /* All marking done, clear weak tables. */ |
632 | gc_clearweak(g, gcref(g->gc.weak)); |
633 | |
634 | lj_buf_shrink(L, &g->tmpbuf); /* Shrink temp buffer. */ |
635 | |
636 | /* Prepare for sweep phase. */ |
637 | g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */ |
638 | g->strempty.marked = g->gc.currentwhite; |
639 | setmref(g->gc.sweep, &g->gc.root); |
640 | g->gc.estimate = g->gc.total - (GCSize)udsize; /* Initial estimate. */ |
641 | } |
642 | |
643 | /* GC state machine. Returns a cost estimate for each step performed. */ |
644 | static size_t gc_onestep(lua_State *L) |
645 | { |
646 | global_State *g = G(L); |
647 | switch (g->gc.state) { |
648 | case GCSpause: |
649 | gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */ |
650 | return 0; |
651 | case GCSpropagate: |
652 | if (gcref(g->gc.gray) != NULL) |
653 | return propagatemark(g); /* Propagate one gray object. */ |
654 | g->gc.state = GCSatomic; /* End of mark phase. */ |
655 | return 0; |
656 | case GCSatomic: |
657 | if (tvref(g->jit_base)) /* Don't run atomic phase on trace. */ |
658 | return LJ_MAX_MEM; |
659 | atomic(g, L); |
660 | g->gc.state = GCSsweepstring; /* Start of sweep phase. */ |
661 | g->gc.sweepstr = 0; |
662 | return 0; |
663 | case GCSsweepstring: { |
664 | GCSize old = g->gc.total; |
665 | gc_sweepstr(g, &g->str.tab[g->gc.sweepstr++]); /* Sweep one chain. */ |
666 | if (g->gc.sweepstr > g->str.mask) |
667 | g->gc.state = GCSsweep; /* All string hash chains sweeped. */ |
668 | lj_assertG(old >= g->gc.total, "sweep increased memory" ); |
669 | g->gc.estimate -= old - g->gc.total; |
670 | return GCSWEEPCOST; |
671 | } |
672 | case GCSsweep: { |
673 | GCSize old = g->gc.total; |
674 | setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX)); |
675 | lj_assertG(old >= g->gc.total, "sweep increased memory" ); |
676 | g->gc.estimate -= old - g->gc.total; |
677 | if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) { |
678 | if (g->str.num <= (g->str.mask >> 2) && g->str.mask > LJ_MIN_STRTAB*2-1) |
679 | lj_str_resize(L, g->str.mask >> 1); /* Shrink string table. */ |
680 | if (gcref(g->gc.mmudata)) { /* Need any finalizations? */ |
681 | g->gc.state = GCSfinalize; |
682 | #if LJ_HASFFI |
683 | g->gc.nocdatafin = 1; |
684 | #endif |
685 | } else { /* Otherwise skip this phase to help the JIT. */ |
686 | g->gc.state = GCSpause; /* End of GC cycle. */ |
687 | g->gc.debt = 0; |
688 | } |
689 | } |
690 | return GCSWEEPMAX*GCSWEEPCOST; |
691 | } |
692 | case GCSfinalize: |
693 | if (gcref(g->gc.mmudata) != NULL) { |
694 | if (tvref(g->jit_base)) /* Don't call finalizers on trace. */ |
695 | return LJ_MAX_MEM; |
696 | gc_finalize(L); /* Finalize one userdata object. */ |
697 | if (g->gc.estimate > GCFINALIZECOST) |
698 | g->gc.estimate -= GCFINALIZECOST; |
699 | return GCFINALIZECOST; |
700 | } |
701 | #if LJ_HASFFI |
702 | if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer); |
703 | #endif |
704 | g->gc.state = GCSpause; /* End of GC cycle. */ |
705 | g->gc.debt = 0; |
706 | return 0; |
707 | default: |
708 | lj_assertG(0, "bad GC state" ); |
709 | return 0; |
710 | } |
711 | } |
712 | |
713 | /* Perform a limited amount of incremental GC steps. */ |
714 | int LJ_FASTCALL lj_gc_step(lua_State *L) |
715 | { |
716 | global_State *g = G(L); |
717 | GCSize lim; |
718 | int32_t ostate = g->vmstate; |
719 | setvmstate(g, GC); |
720 | lim = (GCSTEPSIZE/100) * g->gc.stepmul; |
721 | if (lim == 0) |
722 | lim = LJ_MAX_MEM; |
723 | if (g->gc.total > g->gc.threshold) |
724 | g->gc.debt += g->gc.total - g->gc.threshold; |
725 | do { |
726 | lim -= (GCSize)gc_onestep(L); |
727 | if (g->gc.state == GCSpause) { |
728 | g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; |
729 | g->vmstate = ostate; |
730 | return 1; /* Finished a GC cycle. */ |
731 | } |
732 | } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0)); |
733 | if (g->gc.debt < GCSTEPSIZE) { |
734 | g->gc.threshold = g->gc.total + GCSTEPSIZE; |
735 | g->vmstate = ostate; |
736 | return -1; |
737 | } else { |
738 | g->gc.debt -= GCSTEPSIZE; |
739 | g->gc.threshold = g->gc.total; |
740 | g->vmstate = ostate; |
741 | return 0; |
742 | } |
743 | } |
744 | |
745 | /* Ditto, but fix the stack top first. */ |
746 | void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L) |
747 | { |
748 | if (curr_funcisL(L)) L->top = curr_topL(L); |
749 | lj_gc_step(L); |
750 | } |
751 | |
752 | #if LJ_HASJIT |
753 | /* Perform multiple GC steps. Called from JIT-compiled code. */ |
754 | int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps) |
755 | { |
756 | lua_State *L = gco2th(gcref(g->cur_L)); |
757 | L->base = tvref(G(L)->jit_base); |
758 | L->top = curr_topL(L); |
759 | while (steps-- > 0 && lj_gc_step(L) == 0) |
760 | ; |
761 | /* Return 1 to force a trace exit. */ |
762 | return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize); |
763 | } |
764 | #endif |
765 | |
766 | /* Perform a full GC cycle. */ |
767 | void lj_gc_fullgc(lua_State *L) |
768 | { |
769 | global_State *g = G(L); |
770 | int32_t ostate = g->vmstate; |
771 | setvmstate(g, GC); |
772 | if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */ |
773 | setmref(g->gc.sweep, &g->gc.root); /* Sweep everything (preserving it). */ |
774 | setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */ |
775 | setgcrefnull(g->gc.grayagain); |
776 | setgcrefnull(g->gc.weak); |
777 | g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */ |
778 | g->gc.sweepstr = 0; |
779 | } |
780 | while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep) |
781 | gc_onestep(L); /* Finish sweep. */ |
782 | lj_assertG(g->gc.state == GCSfinalize || g->gc.state == GCSpause, |
783 | "bad GC state" ); |
784 | /* Now perform a full GC. */ |
785 | g->gc.state = GCSpause; |
786 | do { gc_onestep(L); } while (g->gc.state != GCSpause); |
787 | g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; |
788 | g->vmstate = ostate; |
789 | } |
790 | |
791 | /* -- Write barriers ------------------------------------------------------ */ |
792 | |
793 | /* Move the GC propagation frontier forward. */ |
794 | void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v) |
795 | { |
796 | lj_assertG(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o), |
797 | "bad object states for forward barrier" ); |
798 | lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause, |
799 | "bad GC state" ); |
800 | lj_assertG(o->gch.gct != ~LJ_TTAB, "barrier object is not a table" ); |
801 | /* Preserve invariant during propagation. Otherwise it doesn't matter. */ |
802 | if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) |
803 | gc_mark(g, v); /* Move frontier forward. */ |
804 | else |
805 | makewhite(g, o); /* Make it white to avoid the following barrier. */ |
806 | } |
807 | |
808 | /* Specialized barrier for closed upvalue. Pass &uv->tv. */ |
809 | void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv) |
810 | { |
811 | #define TV2MARKED(x) \ |
812 | (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked))) |
813 | if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) |
814 | gc_mark(g, gcV(tv)); |
815 | else |
816 | TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g); |
817 | #undef TV2MARKED |
818 | } |
819 | |
820 | /* Close upvalue. Also needs a write barrier. */ |
821 | void lj_gc_closeuv(global_State *g, GCupval *uv) |
822 | { |
823 | GCobj *o = obj2gco(uv); |
824 | /* Copy stack slot to upvalue itself and point to the copy. */ |
825 | copyTV(mainthread(g), &uv->tv, uvval(uv)); |
826 | setmref(uv->v, &uv->tv); |
827 | uv->closed = 1; |
828 | setgcrefr(o->gch.nextgc, g->gc.root); |
829 | setgcref(g->gc.root, o); |
830 | if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */ |
831 | if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) { |
832 | gray2black(o); /* Make it black and preserve invariant. */ |
833 | if (tviswhite(&uv->tv)) |
834 | lj_gc_barrierf(g, o, gcV(&uv->tv)); |
835 | } else { |
836 | makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */ |
837 | lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause, |
838 | "bad GC state" ); |
839 | } |
840 | } |
841 | } |
842 | |
843 | #if LJ_HASJIT |
844 | /* Mark a trace if it's saved during the propagation phase. */ |
845 | void lj_gc_barriertrace(global_State *g, uint32_t traceno) |
846 | { |
847 | if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) |
848 | gc_marktrace(g, traceno); |
849 | } |
850 | #endif |
851 | |
852 | /* -- Allocator ----------------------------------------------------------- */ |
853 | |
854 | /* Call pluggable memory allocator to allocate or resize a fragment. */ |
855 | void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz) |
856 | { |
857 | global_State *g = G(L); |
858 | lj_assertG((osz == 0) == (p == NULL), "realloc API violation" ); |
859 | p = g->allocf(g->allocd, p, osz, nsz); |
860 | if (p == NULL && nsz > 0) |
861 | lj_err_mem(L); |
862 | lj_assertG((nsz == 0) == (p == NULL), "allocf API violation" ); |
863 | lj_assertG(checkptrGC(p), |
864 | "allocated memory address %p outside required range" , p); |
865 | g->gc.total = (g->gc.total - osz) + nsz; |
866 | return p; |
867 | } |
868 | |
869 | /* Allocate new GC object and link it to the root set. */ |
870 | void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size) |
871 | { |
872 | global_State *g = G(L); |
873 | GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size); |
874 | if (o == NULL) |
875 | lj_err_mem(L); |
876 | lj_assertG(checkptrGC(o), |
877 | "allocated memory address %p outside required range" , o); |
878 | g->gc.total += size; |
879 | setgcrefr(o->gch.nextgc, g->gc.root); |
880 | setgcref(g->gc.root, o); |
881 | newwhite(g, o); |
882 | return o; |
883 | } |
884 | |
885 | /* Resize growable vector. */ |
886 | void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz) |
887 | { |
888 | MSize sz = (*szp) << 1; |
889 | if (sz < LJ_MIN_VECSZ) |
890 | sz = LJ_MIN_VECSZ; |
891 | if (sz > lim) |
892 | sz = lim; |
893 | p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz); |
894 | *szp = sz; |
895 | return p; |
896 | } |
897 | |
898 | |