aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/libraries/luajit-2.0/src/lj_gc.c
diff options
context:
space:
mode:
Diffstat (limited to 'libraries/luajit-2.0/src/lj_gc.c')
-rw-r--r--libraries/luajit-2.0/src/lj_gc.c838
1 files changed, 0 insertions, 838 deletions
diff --git a/libraries/luajit-2.0/src/lj_gc.c b/libraries/luajit-2.0/src/lj_gc.c
deleted file mode 100644
index 1985abc..0000000
--- a/libraries/luajit-2.0/src/lj_gc.c
+++ /dev/null
@@ -1,838 +0,0 @@
1/*
2** Garbage collector.
3** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h
4**
5** Major portions taken verbatim or adapted from the Lua interpreter.
6** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
7*/
8
9#define lj_gc_c
10#define LUA_CORE
11
12#include "lj_obj.h"
13#include "lj_gc.h"
14#include "lj_err.h"
15#include "lj_str.h"
16#include "lj_tab.h"
17#include "lj_func.h"
18#include "lj_udata.h"
19#include "lj_meta.h"
20#include "lj_state.h"
21#include "lj_frame.h"
22#if LJ_HASFFI
23#include "lj_ctype.h"
24#include "lj_cdata.h"
25#endif
26#include "lj_trace.h"
27#include "lj_vm.h"
28
29#define GCSTEPSIZE 1024u
30#define GCSWEEPMAX 40
31#define GCSWEEPCOST 10
32#define GCFINALIZECOST 100
33
34/* Macros to set GCobj colors and flags. */
35#define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
36#define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK)
37#define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED)
38#define markfinalized(u) ((u)->marked |= LJ_GC_FINALIZED)
39
40/* -- Mark phase ---------------------------------------------------------- */
41
42/* Mark a TValue (if needed). */
43#define gc_marktv(g, tv) \
44 { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \
45 if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
46
47/* Mark a GCobj (if needed). */
48#define gc_markobj(g, o) \
49 { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
50
51/* Mark a string object. */
52#define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
53
54/* Mark a white GCobj. */
55static void gc_mark(global_State *g, GCobj *o)
56{
57 lua_assert(iswhite(o) && !isdead(g, o));
58 white2gray(o);
59 if (LJ_UNLIKELY(o->gch.gct == ~LJ_TUDATA)) {
60 GCtab *mt = tabref(gco2ud(o)->metatable);
61 gray2black(o); /* Userdata are never gray. */
62 if (mt) gc_markobj(g, mt);
63 gc_markobj(g, tabref(gco2ud(o)->env));
64 } else if (LJ_UNLIKELY(o->gch.gct == ~LJ_TUPVAL)) {
65 GCupval *uv = gco2uv(o);
66 gc_marktv(g, uvval(uv));
67 if (uv->closed)
68 gray2black(o); /* Closed upvalues are never gray. */
69 } else if (o->gch.gct != ~LJ_TSTR && o->gch.gct != ~LJ_TCDATA) {
70 lua_assert(o->gch.gct == ~LJ_TFUNC || o->gch.gct == ~LJ_TTAB ||
71 o->gch.gct == ~LJ_TTHREAD || o->gch.gct == ~LJ_TPROTO);
72 setgcrefr(o->gch.gclist, g->gc.gray);
73 setgcref(g->gc.gray, o);
74 }
75}
76
77/* Mark GC roots. */
78static void gc_mark_gcroot(global_State *g)
79{
80 ptrdiff_t i;
81 for (i = 0; i < GCROOT_MAX; i++)
82 if (gcref(g->gcroot[i]) != NULL)
83 gc_markobj(g, gcref(g->gcroot[i]));
84}
85
86/* Start a GC cycle and mark the root set. */
87static void gc_mark_start(global_State *g)
88{
89 setgcrefnull(g->gc.gray);
90 setgcrefnull(g->gc.grayagain);
91 setgcrefnull(g->gc.weak);
92 gc_markobj(g, mainthread(g));
93 gc_markobj(g, tabref(mainthread(g)->env));
94 gc_marktv(g, &g->registrytv);
95 gc_mark_gcroot(g);
96 g->gc.state = GCSpropagate;
97}
98
99/* Mark open upvalues. */
100static void gc_mark_uv(global_State *g)
101{
102 GCupval *uv;
103 for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
104 lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
105 if (isgray(obj2gco(uv)))
106 gc_marktv(g, uvval(uv));
107 }
108}
109
110/* Mark userdata in mmudata list. */
111static void gc_mark_mmudata(global_State *g)
112{
113 GCobj *root = gcref(g->gc.mmudata);
114 GCobj *u = root;
115 if (u) {
116 do {
117 u = gcnext(u);
118 makewhite(g, u); /* Could be from previous GC. */
119 gc_mark(g, u);
120 } while (u != root);
121 }
122}
123
124/* Separate userdata which which needs finalization to mmudata list. */
125size_t lj_gc_separateudata(global_State *g, int all)
126{
127 size_t m = 0;
128 GCRef *p = &mainthread(g)->nextgc;
129 GCobj *o;
130 while ((o = gcref(*p)) != NULL) {
131 if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
132 p = &o->gch.nextgc; /* Nothing to do. */
133 } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
134 markfinalized(gco2ud(o)); /* Done, as there's no __gc metamethod. */
135 p = &o->gch.nextgc;
136 } else { /* Otherwise move userdata to be finalized to mmudata list. */
137 m += sizeudata(gco2ud(o));
138 markfinalized(gco2ud(o));
139 *p = o->gch.nextgc;
140 if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */
141 GCobj *root = gcref(g->gc.mmudata);
142 setgcrefr(o->gch.nextgc, root->gch.nextgc);
143 setgcref(root->gch.nextgc, o);
144 setgcref(g->gc.mmudata, o);
145 } else { /* Create circular list. */
146 setgcref(o->gch.nextgc, o);
147 setgcref(g->gc.mmudata, o);
148 }
149 }
150 }
151 return m;
152}
153
154/* -- Propagation phase --------------------------------------------------- */
155
156/* Traverse a table. */
157static int gc_traverse_tab(global_State *g, GCtab *t)
158{
159 int weak = 0;
160 cTValue *mode;
161 GCtab *mt = tabref(t->metatable);
162 if (mt)
163 gc_markobj(g, mt);
164 mode = lj_meta_fastg(g, mt, MM_mode);
165 if (mode && tvisstr(mode)) { /* Valid __mode field? */
166 const char *modestr = strVdata(mode);
167 int c;
168 while ((c = *modestr++)) {
169 if (c == 'k') weak |= LJ_GC_WEAKKEY;
170 else if (c == 'v') weak |= LJ_GC_WEAKVAL;
171 else if (c == 'K') weak = (int)(~0u & ~LJ_GC_WEAKVAL);
172 }
173 if (weak > 0) { /* Weak tables are cleared in the atomic phase. */
174 t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
175 setgcrefr(t->gclist, g->gc.weak);
176 setgcref(g->gc.weak, obj2gco(t));
177 }
178 }
179 if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */
180 return 1;
181 if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */
182 MSize i, asize = t->asize;
183 for (i = 0; i < asize; i++)
184 gc_marktv(g, arrayslot(t, i));
185 }
186 if (t->hmask > 0) { /* Mark hash part. */
187 Node *node = noderef(t->node);
188 MSize i, hmask = t->hmask;
189 for (i = 0; i <= hmask; i++) {
190 Node *n = &node[i];
191 if (!tvisnil(&n->val)) { /* Mark non-empty slot. */
192 lua_assert(!tvisnil(&n->key));
193 if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
194 if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
195 }
196 }
197 }
198 return weak;
199}
200
201/* Traverse a function. */
202static void gc_traverse_func(global_State *g, GCfunc *fn)
203{
204 gc_markobj(g, tabref(fn->c.env));
205 if (isluafunc(fn)) {
206 uint32_t i;
207 lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv);
208 gc_markobj(g, funcproto(fn));
209 for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */
210 gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
211 } else {
212 uint32_t i;
213 for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */
214 gc_marktv(g, &fn->c.upvalue[i]);
215 }
216}
217
218#if LJ_HASJIT
219/* Mark a trace. */
220static void gc_marktrace(global_State *g, TraceNo traceno)
221{
222 GCobj *o = obj2gco(traceref(G2J(g), traceno));
223 lua_assert(traceno != G2J(g)->cur.traceno);
224 if (iswhite(o)) {
225 white2gray(o);
226 setgcrefr(o->gch.gclist, g->gc.gray);
227 setgcref(g->gc.gray, o);
228 }
229}
230
231/* Traverse a trace. */
232static void gc_traverse_trace(global_State *g, GCtrace *T)
233{
234 IRRef ref;
235 if (T->traceno == 0) return;
236 for (ref = T->nk; ref < REF_TRUE; ref++) {
237 IRIns *ir = &T->ir[ref];
238 if (ir->o == IR_KGC)
239 gc_markobj(g, ir_kgc(ir));
240 }
241 if (T->link) gc_marktrace(g, T->link);
242 if (T->nextroot) gc_marktrace(g, T->nextroot);
243 if (T->nextside) gc_marktrace(g, T->nextside);
244 gc_markobj(g, gcref(T->startpt));
245}
246
247/* The current trace is a GC root while not anchored in the prototype (yet). */
248#define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
249#else
250#define gc_traverse_curtrace(g) UNUSED(g)
251#endif
252
253/* Traverse a prototype. */
254static void gc_traverse_proto(global_State *g, GCproto *pt)
255{
256 ptrdiff_t i;
257 gc_mark_str(proto_chunkname(pt));
258 for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */
259 gc_markobj(g, proto_kgc(pt, i));
260#if LJ_HASJIT
261 if (pt->trace) gc_marktrace(g, pt->trace);
262#endif
263}
264
265/* Traverse the frame structure of a stack. */
266static MSize gc_traverse_frames(global_State *g, lua_State *th)
267{
268 TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
269 /* Note: extra vararg frame not skipped, marks function twice (harmless). */
270 for (frame = th->base-1; frame > bot; frame = frame_prev(frame)) {
271 GCfunc *fn = frame_func(frame);
272 TValue *ftop = frame;
273 if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
274 if (ftop > top) top = ftop;
275 gc_markobj(g, fn); /* Need to mark hidden function (or L). */
276 }
277 top++; /* Correct bias of -1 (frame == base-1). */
278 if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
279 return (MSize)(top - bot); /* Return minimum needed stack size. */
280}
281
282/* Traverse a thread object. */
283static void gc_traverse_thread(global_State *g, lua_State *th)
284{
285 TValue *o, *top = th->top;
286 for (o = tvref(th->stack)+1; o < top; o++)
287 gc_marktv(g, o);
288 if (g->gc.state == GCSatomic) {
289 top = tvref(th->stack) + th->stacksize;
290 for (; o < top; o++) /* Clear unmarked slots. */
291 setnilV(o);
292 }
293 gc_markobj(g, tabref(th->env));
294 lj_state_shrinkstack(th, gc_traverse_frames(g, th));
295}
296
297/* Propagate one gray object. Traverse it and turn it black. */
298static size_t propagatemark(global_State *g)
299{
300 GCobj *o = gcref(g->gc.gray);
301 lua_assert(isgray(o));
302 gray2black(o);
303 setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */
304 if (LJ_LIKELY(o->gch.gct == ~LJ_TTAB)) {
305 GCtab *t = gco2tab(o);
306 if (gc_traverse_tab(g, t) > 0)
307 black2gray(o); /* Keep weak tables gray. */
308 return sizeof(GCtab) + sizeof(TValue) * t->asize +
309 sizeof(Node) * (t->hmask + 1);
310 } else if (LJ_LIKELY(o->gch.gct == ~LJ_TFUNC)) {
311 GCfunc *fn = gco2func(o);
312 gc_traverse_func(g, fn);
313 return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
314 sizeCfunc((MSize)fn->c.nupvalues);
315 } else if (LJ_LIKELY(o->gch.gct == ~LJ_TPROTO)) {
316 GCproto *pt = gco2pt(o);
317 gc_traverse_proto(g, pt);
318 return pt->sizept;
319 } else if (LJ_LIKELY(o->gch.gct == ~LJ_TTHREAD)) {
320 lua_State *th = gco2th(o);
321 setgcrefr(th->gclist, g->gc.grayagain);
322 setgcref(g->gc.grayagain, o);
323 black2gray(o); /* Threads are never black. */
324 gc_traverse_thread(g, th);
325 return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
326 } else {
327#if LJ_HASJIT
328 GCtrace *T = gco2trace(o);
329 gc_traverse_trace(g, T);
330 return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
331 T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
332#else
333 lua_assert(0);
334 return 0;
335#endif
336 }
337}
338
339/* Propagate all gray objects. */
340static size_t gc_propagate_gray(global_State *g)
341{
342 size_t m = 0;
343 while (gcref(g->gc.gray) != NULL)
344 m += propagatemark(g);
345 return m;
346}
347
348/* -- Sweep phase --------------------------------------------------------- */
349
350/* Try to shrink some common data structures. */
351static void gc_shrink(global_State *g, lua_State *L)
352{
353 if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1)
354 lj_str_resize(L, g->strmask >> 1); /* Shrink string table. */
355 if (g->tmpbuf.sz > LJ_MIN_SBUF*2)
356 lj_str_resizebuf(L, &g->tmpbuf, g->tmpbuf.sz >> 1); /* Shrink temp buf. */
357}
358
359/* Type of GC free functions. */
360typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
361
362/* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
363static const GCFreeFunc gc_freefunc[] = {
364 (GCFreeFunc)lj_str_free,
365 (GCFreeFunc)lj_func_freeuv,
366 (GCFreeFunc)lj_state_free,
367 (GCFreeFunc)lj_func_freeproto,
368 (GCFreeFunc)lj_func_free,
369#if LJ_HASJIT
370 (GCFreeFunc)lj_trace_free,
371#else
372 (GCFreeFunc)0,
373#endif
374#if LJ_HASFFI
375 (GCFreeFunc)lj_cdata_free,
376#else
377 (GCFreeFunc)0,
378#endif
379 (GCFreeFunc)lj_tab_free,
380 (GCFreeFunc)lj_udata_free
381};
382
383/* Full sweep of a GC list. */
384#define gc_fullsweep(g, p) gc_sweep(g, (p), LJ_MAX_MEM)
385
386/* Partial sweep of a GC list. */
387static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
388{
389 /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
390 int ow = otherwhite(g);
391 GCobj *o;
392 while ((o = gcref(*p)) != NULL && lim-- > 0) {
393 if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */
394 gc_fullsweep(g, &gco2th(o)->openupval);
395 if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
396 lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED));
397 makewhite(g, o); /* Value is alive, change to the current white. */
398 p = &o->gch.nextgc;
399 } else { /* Otherwise value is dead, free it. */
400 lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED);
401 setgcrefr(*p, o->gch.nextgc);
402 if (o == gcref(g->gc.root))
403 setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */
404 gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
405 }
406 }
407 return p;
408}
409
410/* Check whether we can clear a key or a value slot from a table. */
411static int gc_mayclear(cTValue *o, int val)
412{
413 if (tvisgcv(o)) { /* Only collectable objects can be weak references. */
414 if (tvisstr(o)) { /* But strings cannot be used as weak references. */
415 gc_mark_str(strV(o)); /* And need to be marked. */
416 return 0;
417 }
418 if (iswhite(gcV(o)))
419 return 1; /* Object is about to be collected. */
420 if (tvisudata(o) && val && isfinalized(udataV(o)))
421 return 1; /* Finalized userdata is dropped only from values. */
422 }
423 return 0; /* Cannot clear. */
424}
425
426/* Clear collected entries from weak tables. */
427static void gc_clearweak(GCobj *o)
428{
429 while (o) {
430 GCtab *t = gco2tab(o);
431 lua_assert((t->marked & LJ_GC_WEAK));
432 if ((t->marked & LJ_GC_WEAKVAL)) {
433 MSize i, asize = t->asize;
434 for (i = 0; i < asize; i++) {
435 /* Clear array slot when value is about to be collected. */
436 TValue *tv = arrayslot(t, i);
437 if (gc_mayclear(tv, 1))
438 setnilV(tv);
439 }
440 }
441 if (t->hmask > 0) {
442 Node *node = noderef(t->node);
443 MSize i, hmask = t->hmask;
444 for (i = 0; i <= hmask; i++) {
445 Node *n = &node[i];
446 /* Clear hash slot when key or value is about to be collected. */
447 if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
448 gc_mayclear(&n->val, 1)))
449 setnilV(&n->val);
450 }
451 }
452 o = gcref(t->gclist);
453 }
454}
455
456/* Call a userdata or cdata finalizer. */
457static void gc_call_finalizer(global_State *g, lua_State *L,
458 cTValue *mo, GCobj *o)
459{
460 /* Save and restore lots of state around the __gc callback. */
461 uint8_t oldh = hook_save(g);
462 MSize oldt = g->gc.threshold;
463 int errcode;
464 TValue *top;
465 lj_trace_abort(g);
466 top = L->top;
467 L->top = top+2;
468 hook_entergc(g); /* Disable hooks and new traces during __gc. */
469 g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */
470 copyTV(L, top, mo);
471 setgcV(L, top+1, o, ~o->gch.gct);
472 errcode = lj_vm_pcall(L, top+1, 1+0, -1); /* Stack: |mo|o| -> | */
473 hook_restore(g, oldh);
474 g->gc.threshold = oldt; /* Restore GC threshold. */
475 if (errcode)
476 lj_err_throw(L, errcode); /* Propagate errors. */
477}
478
479/* Finalize one userdata or cdata object from the mmudata list. */
480static void gc_finalize(lua_State *L)
481{
482 global_State *g = G(L);
483 GCobj *o = gcnext(gcref(g->gc.mmudata));
484 cTValue *mo;
485 lua_assert(gcref(g->jit_L) == NULL); /* Must not be called on trace. */
486 /* Unchain from list of userdata to be finalized. */
487 if (o == gcref(g->gc.mmudata))
488 setgcrefnull(g->gc.mmudata);
489 else
490 setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
491#if LJ_HASFFI
492 if (o->gch.gct == ~LJ_TCDATA) {
493 TValue tmp, *tv;
494 /* Add cdata back to the GC list and make it white. */
495 setgcrefr(o->gch.nextgc, g->gc.root);
496 setgcref(g->gc.root, o);
497 makewhite(g, o);
498 o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
499 /* Resolve finalizer. */
500 setcdataV(L, &tmp, gco2cd(o));
501 tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
502 if (!tvisnil(tv)) {
503 copyTV(L, &tmp, tv);
504 setnilV(tv); /* Clear entry in finalizer table. */
505 gc_call_finalizer(g, L, &tmp, o);
506 }
507 return;
508 }
509#endif
510 /* Add userdata back to the main userdata list and make it white. */
511 setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
512 setgcref(mainthread(g)->nextgc, o);
513 makewhite(g, o);
514 /* Resolve the __gc metamethod. */
515 mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
516 if (mo)
517 gc_call_finalizer(g, L, mo, o);
518}
519
520/* Finalize all userdata objects from mmudata list. */
521void lj_gc_finalize_udata(lua_State *L)
522{
523 while (gcref(G(L)->gc.mmudata) != NULL)
524 gc_finalize(L);
525}
526
527#if LJ_HASFFI
528/* Finalize all cdata objects from finalizer table. */
529void lj_gc_finalize_cdata(lua_State *L)
530{
531 global_State *g = G(L);
532 CTState *cts = ctype_ctsG(g);
533 if (cts) {
534 GCtab *t = cts->finalizer;
535 Node *node = noderef(t->node);
536 ptrdiff_t i;
537 setgcrefnull(t->metatable); /* Mark finalizer table as disabled. */
538 for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
539 if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
540 GCobj *o = gcV(&node[i].key);
541 TValue tmp;
542 makewhite(g, o);
543 o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
544 copyTV(L, &tmp, &node[i].val);
545 setnilV(&node[i].val);
546 gc_call_finalizer(g, L, &tmp, o);
547 }
548 }
549}
550#endif
551
552/* Free all remaining GC objects. */
553void lj_gc_freeall(global_State *g)
554{
555 MSize i, strmask;
556 /* Free everything, except super-fixed objects (the main thread). */
557 g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
558 gc_fullsweep(g, &g->gc.root);
559 strmask = g->strmask;
560 for (i = 0; i <= strmask; i++) /* Free all string hash chains. */
561 gc_fullsweep(g, &g->strhash[i]);
562}
563
564/* -- Collector ----------------------------------------------------------- */
565
566/* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
567static void atomic(global_State *g, lua_State *L)
568{
569 size_t udsize;
570
571 gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */
572 gc_propagate_gray(g); /* Propagate any left-overs. */
573
574 setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */
575 setgcrefnull(g->gc.weak);
576 lua_assert(!iswhite(obj2gco(mainthread(g))));
577 gc_markobj(g, L); /* Mark running thread. */
578 gc_traverse_curtrace(g); /* Traverse current trace. */
579 gc_mark_gcroot(g); /* Mark GC roots (again). */
580 gc_propagate_gray(g); /* Propagate all of the above. */
581
582 setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */
583 setgcrefnull(g->gc.grayagain);
584 gc_propagate_gray(g); /* Propagate it. */
585
586 udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */
587 gc_mark_mmudata(g); /* Mark them. */
588 udsize += gc_propagate_gray(g); /* And propagate the marks. */
589
590 /* All marking done, clear weak tables. */
591 gc_clearweak(gcref(g->gc.weak));
592
593 /* Prepare for sweep phase. */
594 g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */
595 g->strempty.marked = g->gc.currentwhite;
596 setmref(g->gc.sweep, &g->gc.root);
597 g->gc.estimate = g->gc.total - (MSize)udsize; /* Initial estimate. */
598}
599
600/* GC state machine. Returns a cost estimate for each step performed. */
601static size_t gc_onestep(lua_State *L)
602{
603 global_State *g = G(L);
604 switch (g->gc.state) {
605 case GCSpause:
606 gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */
607 return 0;
608 case GCSpropagate:
609 if (gcref(g->gc.gray) != NULL)
610 return propagatemark(g); /* Propagate one gray object. */
611 g->gc.state = GCSatomic; /* End of mark phase. */
612 return 0;
613 case GCSatomic:
614 if (gcref(g->jit_L)) /* Don't run atomic phase on trace. */
615 return LJ_MAX_MEM;
616 atomic(g, L);
617 g->gc.state = GCSsweepstring; /* Start of sweep phase. */
618 g->gc.sweepstr = 0;
619 return 0;
620 case GCSsweepstring: {
621 MSize old = g->gc.total;
622 gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */
623 if (g->gc.sweepstr > g->strmask)
624 g->gc.state = GCSsweep; /* All string hash chains sweeped. */
625 lua_assert(old >= g->gc.total);
626 g->gc.estimate -= old - g->gc.total;
627 return GCSWEEPCOST;
628 }
629 case GCSsweep: {
630 MSize old = g->gc.total;
631 setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
632 if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
633 gc_shrink(g, L);
634 if (gcref(g->gc.mmudata)) { /* Need any finalizations? */
635 g->gc.state = GCSfinalize;
636 } else { /* Otherwise skip this phase to help the JIT. */
637 g->gc.state = GCSpause; /* End of GC cycle. */
638 g->gc.debt = 0;
639 }
640 }
641 lua_assert(old >= g->gc.total);
642 g->gc.estimate -= old - g->gc.total;
643 return GCSWEEPMAX*GCSWEEPCOST;
644 }
645 case GCSfinalize:
646 if (gcref(g->gc.mmudata) != NULL) {
647 if (gcref(g->jit_L)) /* Don't call finalizers on trace. */
648 return LJ_MAX_MEM;
649 gc_finalize(L); /* Finalize one userdata object. */
650 if (g->gc.estimate > GCFINALIZECOST)
651 g->gc.estimate -= GCFINALIZECOST;
652 return GCFINALIZECOST;
653 }
654 g->gc.state = GCSpause; /* End of GC cycle. */
655 g->gc.debt = 0;
656 return 0;
657 default:
658 lua_assert(0);
659 return 0;
660 }
661}
662
663/* Perform a limited amount of incremental GC steps. */
664int LJ_FASTCALL lj_gc_step(lua_State *L)
665{
666 global_State *g = G(L);
667 MSize lim;
668 int32_t ostate = g->vmstate;
669 setvmstate(g, GC);
670 lim = (GCSTEPSIZE/100) * g->gc.stepmul;
671 if (lim == 0)
672 lim = LJ_MAX_MEM;
673 g->gc.debt += g->gc.total - g->gc.threshold;
674 do {
675 lim -= (MSize)gc_onestep(L);
676 if (g->gc.state == GCSpause) {
677 g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
678 g->vmstate = ostate;
679 return 1; /* Finished a GC cycle. */
680 }
681 } while ((int32_t)lim > 0);
682 if (g->gc.debt < GCSTEPSIZE) {
683 g->gc.threshold = g->gc.total + GCSTEPSIZE;
684 } else {
685 g->gc.debt -= GCSTEPSIZE;
686 g->gc.threshold = g->gc.total;
687 }
688 g->vmstate = ostate;
689 return 0;
690}
691
692/* Ditto, but fix the stack top first. */
693void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
694{
695 if (curr_funcisL(L)) L->top = curr_topL(L);
696 lj_gc_step(L);
697}
698
699#if LJ_HASJIT
700/* Perform multiple GC steps. Called from JIT-compiled code. */
701int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
702{
703 lua_State *L = gco2th(gcref(g->jit_L));
704 L->base = mref(G(L)->jit_base, TValue);
705 L->top = curr_topL(L);
706 while (steps-- > 0 && lj_gc_step(L) == 0)
707 ;
708 /* Return 1 to force a trace exit. */
709 return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
710}
711#endif
712
713/* Perform a full GC cycle. */
714void lj_gc_fullgc(lua_State *L)
715{
716 global_State *g = G(L);
717 int32_t ostate = g->vmstate;
718 setvmstate(g, GC);
719 if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */
720 setmref(g->gc.sweep, &g->gc.root); /* Sweep everything (preserving it). */
721 setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */
722 setgcrefnull(g->gc.grayagain);
723 setgcrefnull(g->gc.weak);
724 g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */
725 g->gc.sweepstr = 0;
726 }
727 while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
728 gc_onestep(L); /* Finish sweep. */
729 lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause);
730 /* Now perform a full GC. */
731 g->gc.state = GCSpause;
732 do { gc_onestep(L); } while (g->gc.state != GCSpause);
733 g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
734 g->vmstate = ostate;
735}
736
737/* -- Write barriers ------------------------------------------------------ */
738
739/* Move the GC propagation frontier forward. */
740void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
741{
742 lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
743 lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
744 lua_assert(o->gch.gct != ~LJ_TTAB);
745 /* Preserve invariant during propagation. Otherwise it doesn't matter. */
746 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
747 gc_mark(g, v); /* Move frontier forward. */
748 else
749 makewhite(g, o); /* Make it white to avoid the following barrier. */
750}
751
752/* Specialized barrier for closed upvalue. Pass &uv->tv. */
753void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
754{
755#define TV2MARKED(x) \
756 (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
757 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
758 gc_mark(g, gcV(tv));
759 else
760 TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
761#undef TV2MARKED
762}
763
764/* Close upvalue. Also needs a write barrier. */
765void lj_gc_closeuv(global_State *g, GCupval *uv)
766{
767 GCobj *o = obj2gco(uv);
768 /* Copy stack slot to upvalue itself and point to the copy. */
769 copyTV(mainthread(g), &uv->tv, uvval(uv));
770 setmref(uv->v, &uv->tv);
771 uv->closed = 1;
772 setgcrefr(o->gch.nextgc, g->gc.root);
773 setgcref(g->gc.root, o);
774 if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */
775 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
776 gray2black(o); /* Make it black and preserve invariant. */
777 if (tviswhite(&uv->tv))
778 lj_gc_barrierf(g, o, gcV(&uv->tv));
779 } else {
780 makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */
781 lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
782 }
783 }
784}
785
786#if LJ_HASJIT
787/* Mark a trace if it's saved during the propagation phase. */
788void lj_gc_barriertrace(global_State *g, uint32_t traceno)
789{
790 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
791 gc_marktrace(g, traceno);
792}
793#endif
794
795/* -- Allocator ----------------------------------------------------------- */
796
797/* Call pluggable memory allocator to allocate or resize a fragment. */
798void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz)
799{
800 global_State *g = G(L);
801 lua_assert((osz == 0) == (p == NULL));
802 p = g->allocf(g->allocd, p, osz, nsz);
803 if (p == NULL && nsz > 0)
804 lj_err_mem(L);
805 lua_assert((nsz == 0) == (p == NULL));
806 lua_assert(checkptr32(p));
807 g->gc.total = (g->gc.total - osz) + nsz;
808 return p;
809}
810
811/* Allocate new GC object and link it to the root set. */
812void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size)
813{
814 global_State *g = G(L);
815 GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
816 if (o == NULL)
817 lj_err_mem(L);
818 lua_assert(checkptr32(o));
819 g->gc.total += size;
820 setgcrefr(o->gch.nextgc, g->gc.root);
821 setgcref(g->gc.root, o);
822 newwhite(g, o);
823 return o;
824}
825
826/* Resize growable vector. */
827void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
828{
829 MSize sz = (*szp) << 1;
830 if (sz < LJ_MIN_VECSZ)
831 sz = LJ_MIN_VECSZ;
832 if (sz > lim)
833 sz = lim;
834 p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
835 *szp = sz;
836 return p;
837}
838