aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/libraries/LuaJIT-1.1.7/src/ljit_mem.c
diff options
context:
space:
mode:
authorDavid Walter Seikel2013-01-13 17:32:23 +1000
committerDavid Walter Seikel2013-01-13 17:32:23 +1000
commit2f933e86b41b1112e6697f95cba7f541f029af8b (patch)
tree271b16e41805a3a5a49583fe6e675ceb3291020e /libraries/LuaJIT-1.1.7/src/ljit_mem.c
parentRemove EFL, since it's been released now. (diff)
downloadSledjHamr-2f933e86b41b1112e6697f95cba7f541f029af8b.zip
SledjHamr-2f933e86b41b1112e6697f95cba7f541f029af8b.tar.gz
SledjHamr-2f933e86b41b1112e6697f95cba7f541f029af8b.tar.bz2
SledjHamr-2f933e86b41b1112e6697f95cba7f541f029af8b.tar.xz
Remove unused LuaJIT 1.1.7, since the 2.0 version works fine.
Diffstat (limited to 'libraries/LuaJIT-1.1.7/src/ljit_mem.c')
-rw-r--r--libraries/LuaJIT-1.1.7/src/ljit_mem.c405
1 files changed, 0 insertions, 405 deletions
diff --git a/libraries/LuaJIT-1.1.7/src/ljit_mem.c b/libraries/LuaJIT-1.1.7/src/ljit_mem.c
deleted file mode 100644
index 73ade7f..0000000
--- a/libraries/LuaJIT-1.1.7/src/ljit_mem.c
+++ /dev/null
@@ -1,405 +0,0 @@
1/*
2** Memory management for machine code.
3** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h
4*/
5
6#define ljit_mem_c
7#define LUA_CORE
8
9#include <string.h>
10
11#include "lua.h"
12
13#include "lmem.h"
14#include "ldo.h"
15#include "ljit.h"
16#include "ljit_dasm.h"
17
18
19/*
20** Define this if you want to run LuaJIT with valgrind. You will get random
21** errors if you don't. And these errors are usually not caught by valgrind!
22**
23** This macro evaluates to a no-op if not run with valgrind. I.e. you can
24** use the same binary for regular runs, too (without a performance loss).
25*/
26#ifdef USE_VALGRIND
27#include <valgrind/valgrind.h>
28#define MCH_INVALIDATE(ptr, addr) VALGRIND_DISCARD_TRANSLATIONS(ptr, addr)
29#else
30#define MCH_INVALIDATE(ptr, addr) ((void)0)
31#endif
32
33
34/* ------------------------------------------------------------------------ */
35
36#if defined(_WIN32) && !defined(LUAJIT_MCH_USE_MALLOC)
37
38/* Use a private heap with executable memory for Windows. */
39#include <windows.h>
40
41/* No need for serialization. There's already a lock per Lua universe. */
42#ifdef HEAP_CREATE_ENABLE_EXECUTE
43#define MCH_HCFLAGS (HEAP_NO_SERIALIZE|HEAP_CREATE_ENABLE_EXECUTE)
44#else
45#define MCH_HCFLAGS (HEAP_NO_SERIALIZE|0x00040000)
46#endif
47
48/* Free the whole mcode heap. */
49void luaJIT_freemcodeheap(jit_State *J)
50{
51 if (J->mcodeheap) HeapDestroy((HANDLE)J->mcodeheap);
52}
53
54/* Allocate a code block from the mcode heap. */
55static void *mcode_alloc(jit_State *J, size_t sz)
56{
57 void *ptr;
58 if (J->mcodeheap == NULL) {
59 J->mcodeheap = (void *)HeapCreate(MCH_HCFLAGS, 0, 0);
60 if (J->mcodeheap == NULL) luaD_throw(J->L, LUA_ERRMEM);
61 }
62 ptr = HeapAlloc(J->mcodeheap, 0, (sz));
63 if (ptr == NULL) luaD_throw(J->L, LUA_ERRMEM);
64 return ptr;
65}
66
67#define mcode_free(L, J, p, sz) HeapFree(J->mcodeheap, 0, (p))
68
69/* ------------------------------------------------------------------------ */
70
71#elif defined(LUA_USE_POSIX) && !defined(LUAJIT_MCH_USE_MALLOC)
72
73/*
74** Allocate EXECUTABLE memory with mmap() on POSIX systems.
75**
76** There is no standard way to reuse malloc(). So this is a very small,
77** but also very naive memory allocator. This should be ok, because:
78**
79** 1. Most apps only allocate mcode while running and free all on exit.
80**
81** 2. Some apps regularly load/unload a bunch of modules ("stages").
82** Allocs/frees come in groups, so coalescing should work fine.
83**
84** If your app differs, then please elaborate and/or supply code.
85** And no -- including a full blown malloc is NOT an option.
86**
87** Caveat: the mmap()'ed heaps are not freed until exit.
88** This shouldn't be too difficult to add, but I didn't bother.
89*/
90
91#include <sys/types.h>
92#include <sys/mman.h>
93
94/* TODO: move this to luaconf.h */
95#define LUAJIT_MCH_CHUNKSIZE (1<<17) /* 128K */
96
97#if defined(MAP_ANONYMOUS)
98#define MCH_MMFLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
99#elif defined(MAP_ANON)
100#define MCH_MMFLAGS (MAP_PRIVATE|MAP_ANON)
101#else
102/* I'm too lazy to add /dev/zero support for ancient systems. */
103#error "Your OS has no (easy) support for anonymous mmap(). Please upgrade."
104#endif
105
106/* Chunk header. Used for the free chunk list / heap headers. */
107typedef struct MCodeHead {
108 struct MCodeHead *next; /* Next free chunk / 1st head: first free. */
109 struct MCodeHead *prev; /* Prev free chunk / 1st head: next head. */
110 size_t size; /* Size of free chunk / Size of heap. */
111 size_t dummy; /* May or may not overlap with trailer. */
112} MCodeHead;
113
114/* Allocation granularity. Assumes sizeof(void *) >= sizeof(size_t). */
115#define MCH_GRANULARITY (4*sizeof(void *))
116#define MCH_ROUNDSIZE(x) (((x) + MCH_GRANULARITY-1) & -MCH_GRANULARITY)
117#define MCH_ROUNDHEAP(x) (((x) + 4095) & -4096)
118#define MCH_HEADERSIZE MCH_ROUNDSIZE(sizeof(MCodeHead))
119
120/* Trailer flags. */
121#define MCH_USED 1 /* Next chunk is in use. */
122#define MCH_LAST 2 /* Next chunk is the last one. */
123#define MCH_FIRST 4 /* Next chunk is the first one. */
124/* Note: the last chunk of each heap doesn't have a trailer. */
125
126/* Trailer macros. */
127#define MCH_PREVTRAILER(mh) ((size_t *)(mh) - 1)
128#define MCH_TRAILER(mh, sz) ((size_t *)((char *)(mh) + (sz)) - 1)
129#define MCH_TRFLAGS(tr) ((tr) & (MCH_USED|MCH_LAST))
130#define MCH_TRSIZE(tr) ((tr) & ~(MCH_USED|MCH_LAST))
131
132/* Debugging memory allocators is ... oh well. */
133#ifdef MCH_DEBUG
134#include <stdio.h>
135#define MCH_DBGF stderr
136#define MCH_DBG(x) fprintf x
137#else
138#define MCH_DBG(x) ((void)0)
139#endif
140
141/* Free the whole list of mcode heaps. */
142void luaJIT_freemcodeheap(jit_State *J)
143{
144 MCodeHead *mh = (MCodeHead *)J->mcodeheap;
145 while (mh) {
146 MCodeHead *prev = mh->prev; /* Heaps are in the prev chain. */
147#ifdef MCH_DEBUG
148 munmap((void *)mh, mh->size+4096);
149#else
150 munmap((void *)mh, mh->size);
151#endif
152 mh = prev;
153 }
154 J->mcodeheap = NULL;
155}
156
157/* Allocate a new heap of at least the given size. */
158static void mcode_newheap(jit_State *J, size_t sz)
159{
160 MCodeHead *mh, *mhn, *fh;
161 void *ptr;
162
163 /* Ensure minimum size or round up. */
164 if (sz + MCH_HEADERSIZE <= LUAJIT_MCH_CHUNKSIZE)
165 sz = LUAJIT_MCH_CHUNKSIZE;
166 else
167 sz = MCH_ROUNDHEAP(sz + MCH_HEADERSIZE);
168
169#ifdef MCH_DEBUG
170 /* Allocate a new heap plus a guard page. */
171 ptr = mmap(NULL, sz+4096, PROT_READ|PROT_WRITE|PROT_EXEC, MCH_MMFLAGS, -1, 0);
172 if (ptr == MAP_FAILED) luaD_throw(J->L, LUA_ERRMEM);
173 mprotect((char *)ptr+sz, 4096, PROT_NONE);
174#else
175 /* Allocate a new heap. */
176 ptr = mmap(NULL, sz, PROT_READ|PROT_WRITE|PROT_EXEC, MCH_MMFLAGS, -1, 0);
177 if (ptr == MAP_FAILED) luaD_throw(J->L, LUA_ERRMEM);
178#endif
179
180 /* Initialize free chunk. */
181 fh = (MCodeHead *)((char *)ptr + MCH_HEADERSIZE);
182 fh->size = sz - MCH_HEADERSIZE;
183 *MCH_PREVTRAILER(fh) = MCH_LAST | MCH_FIRST; /* Zero size, no coalesce. */
184
185 /* Initialize new heap and make it the first heap. */
186 mh = (MCodeHead *)J->mcodeheap;
187 J->mcodeheap = ptr;
188 mhn = (MCodeHead *)ptr;
189 mhn->prev = mh; /* Heaps are in the prev. chain. */
190 mhn->size = sz;
191 mhn->next = fh; /* Start of free list is always in the first heap. */
192 fh->prev = mhn;
193 if (mh) {
194 fh->next = mh->next; /* Old start of free list. */
195 mh->next = NULL; /* Just in case. */
196 } else {
197 fh->next = NULL; /* No other free chunks yet. */
198 }
199 MCH_DBG((MCH_DBGF, "HEAP %p %5x\n", mhn, sz));
200}
201
202/* Allocate a code block. */
203static void *mcode_alloc(jit_State *J, size_t sz)
204{
205 sz = MCH_ROUNDSIZE(sz + sizeof(size_t));
206 for ( ; ; ) {
207 MCodeHead *mh = (MCodeHead *)J->mcodeheap;
208 if (mh) { /* Got at least one heap so search free list. */
209#ifdef MCH_DEBUG
210 int slen = 0;
211 for (mh = mh->next; mh ; mh = mh->next, slen++)
212#else
213 for (mh = mh->next; mh ; mh = mh->next)
214#endif
215 if (mh->size >= sz) { /* Very naive first fit. */
216 size_t *trailer = MCH_TRAILER(mh, sz);
217 size_t *ptrailer = MCH_PREVTRAILER(mh);
218 if (mh->size == sz) { /* Exact match: just unchain chunk. */
219 mh->prev->next = mh->next;
220 if (mh->next)
221 mh->next->prev = mh->prev;
222 *ptrailer |= MCH_USED;
223 MCH_DBG((MCH_DBGF, "NEW %p %5x FIT #%d%s\n",
224 mh, sz, slen, (*ptrailer & MCH_LAST) ? " LAST" : ""));
225 } else { /* Chunk is larger: rechain remainder chunk. */
226 MCodeHead *fh = (MCodeHead *)((char *)mh + sz);
227 size_t tr;
228 fh->size = mh->size - sz;
229 (fh->prev = mh->prev)->next = fh;
230 if ((fh->next = mh->next) != NULL)
231 fh->next->prev = fh;
232 tr = *ptrailer;
233 if (tr & MCH_LAST) {
234 *ptrailer = (tr & ~MCH_LAST) | MCH_USED;
235 *trailer = sz | MCH_LAST;
236 MCH_DBG((MCH_DBGF, "NEW %p %5x REST %p %5x #%d LAST\n",
237 mh, sz, fh, fh->size, slen));
238 } else {
239 size_t *ftrailer = MCH_TRAILER(fh, fh->size);
240 *ftrailer = MCH_TRFLAGS(*ftrailer) | fh->size;
241 *ptrailer = tr | MCH_USED;
242 *trailer = sz;
243 MCH_DBG((MCH_DBGF, "NEW %p %5x REST %p %5x #%d\n",
244 mh, sz, fh, fh->size, slen));
245 }
246 }
247 return (void *)mh;
248 }
249 }
250 /* No luck. Allocate a new heap. Next loop iteration will succeed. */
251 mcode_newheap(J, sz);
252 }
253}
254
255/* Free a code block. */
256static void mcode_free_(jit_State *J, void *ptr, size_t sz)
257{
258 MCodeHead *mh = (MCodeHead *)ptr;
259 size_t *trailer = MCH_TRAILER(mh, sz);
260 size_t *ptrailer = MCH_PREVTRAILER(mh);
261 size_t tr = *ptrailer;
262
263#ifdef MCH_DEBUG
264 if (!(tr & MCH_USED)) MCH_DBG((MCH_DBGF, "**unused %p %5x\n", ptr, sz));
265#endif
266
267 if (!(tr & MCH_FIRST)) {
268 MCodeHead *ph = (MCodeHead *)((char *)mh - MCH_TRSIZE(tr));
269 size_t *pptrailer = MCH_PREVTRAILER(ph);
270 if (!(*pptrailer & MCH_USED)) { /* Prev free? */
271 if (!(tr & MCH_LAST) && !(*trailer & MCH_USED)) { /* Next free? */
272 /* Coalesce with previous and next chunk. */
273 MCodeHead *nh = (MCodeHead *)((char *)mh + sz);
274 MCH_DBG((MCH_DBGF, "free %p %5x PN %p %5x %p %5x%s\n",
275 mh, sz, ph, ph->size, nh, nh->size,
276 (*trailer & MCH_LAST) ? " last" : ""));
277 if ((nh->prev->next = nh->next) != NULL)
278 nh->next->prev = nh->prev;
279 ph->size += sz + nh->size;
280 if (*trailer & MCH_LAST) {
281 *pptrailer |= MCH_LAST;
282 } else {
283 trailer = MCH_TRAILER(nh, nh->size);
284 *trailer = MCH_TRFLAGS(*trailer) | ph->size;
285 }
286 return;
287 }
288 MCH_DBG((MCH_DBGF, "free %p %5x P- %p %5x%s\n",
289 mh, sz, ph, ph->size,
290 (tr & MCH_LAST) ? " last" : ""));
291 ph->size += sz;
292 if (tr & MCH_LAST)
293 *pptrailer |= MCH_LAST;
294 else
295 *trailer = MCH_TRFLAGS(*trailer) | ph->size;
296 return;
297 }
298 }
299
300 if (!(tr & MCH_LAST) && !(*trailer & MCH_USED)) { /* Next free? */
301 /* Coalesce with next chunk. */
302 MCodeHead *nh = (MCodeHead *)((char *)mh + sz);
303 MCH_DBG((MCH_DBGF, "free %p %5x -N %p %5x%s\n",
304 mh, sz, nh, nh->size, (*trailer & MCH_LAST) ? " last" : ""));
305 (mh->prev = nh->prev)->next = mh;
306 if ((mh->next = nh->next))
307 mh->next->prev = mh;
308 mh->size = nh->size + sz;
309 if (*trailer & MCH_LAST) {
310 *ptrailer = (tr & ~MCH_USED) | MCH_LAST;
311 } else {
312 trailer = MCH_TRAILER(mh, mh->size);
313 *trailer = MCH_TRFLAGS(*trailer) | mh->size;
314 *ptrailer = tr & ~MCH_USED;
315 }
316 } else {
317 /* No coalesce possible, just add to free list. */
318 MCodeHead *fh = (MCodeHead *)J->mcodeheap;
319 MCH_DBG((MCH_DBGF, "free %p %5x --%s\n",
320 mh, sz, (tr & MCH_LAST) ? " last" : ""));
321 if ((mh->next = fh->next))
322 mh->next->prev = mh;
323 fh->next = mh;
324 mh->prev = fh;
325 mh->size = sz;
326 *ptrailer = tr & ~MCH_USED;
327 }
328}
329
330#define mcode_free(L, J, p, sz) \
331 mcode_free_(J, (p), MCH_ROUNDSIZE((sz) + sizeof(size_t)))
332
333/* ------------------------------------------------------------------------ */
334
335#else
336
337/*
338** Fallback to Lua's alloc, i.e. probably malloc().
339**
340** Note: the returned memory is usually not marked executable!
341** Running the code will crash if the CPU/OS checks for this.
342** E.g. on x86 CPUs that support the NX (No eXecute) bit.
343*/
344
345/* There's no heap to free, but the JSUB mcode is. */
346void luaJIT_freemcodeheap(jit_State *J)
347{
348 if (J->jsubmcode) luaM_freemem(J->L, J->jsubmcode, J->szjsubmcode);
349}
350
351#define mcode_alloc(J, sz) luaM_realloc_(J->L, NULL, 0, (sz))
352#define mcode_free(L, J, p, sz) luaM_freemem(L, p, sz)
353
354#endif
355
356/* ------------------------------------------------------------------------ */
357
358/* Free mcode. */
359void luaJIT_freemcode(jit_State *J, void *mcode, size_t sz)
360{
361 mcode_free(J->L, J, mcode, sz);
362}
363
364/* Free JIT structures in function prototype. */
365void luaJIT_freeproto(lua_State *L, Proto *pt)
366{
367 char *mcode = (char *)pt->jit_mcode;
368 size_t sz = pt->jit_szmcode;
369 pt->jit_mcode = NULL;
370 pt->jit_szmcode = 0;
371 while (sz != 0) { /* Free whole chain of mcode blocks for this proto. */
372 jit_MCTrailer next;
373 memcpy((void *)&next, JIT_MCTRAILER(mcode, sz), sizeof(jit_MCTrailer));
374 MCH_INVALIDATE(mcode, sz);
375 mcode_free(L, G(L)->jit_state, mcode, sz);
376 mcode = next.mcode;
377 sz = next.sz;
378 }
379}
380
381/* Link generated code. Return mcode address, size and status. */
382int luaJIT_link(jit_State *J, void **mcodep, size_t *szp)
383{
384 size_t sz;
385 void *mcode;
386
387 /* Pass 2: link sections. */
388 if ((J->dasmstatus = dasm_link(Dst, &sz))) return JIT_S_DASM_ERROR;
389
390 /* Check for hardcoded limit on mcode size. */
391 if (sz > LUAJIT_LIM_MCODE) return JIT_S_TOOLARGE;
392
393 /* TODO: mark mcode readonly when we're done. */
394 mcode = mcode_alloc(J, sz);
395
396 /* Pass 3: encode sections. */
397 if ((J->dasmstatus = dasm_encode(Dst, mcode)) != 0) {
398 mcode_free(J->L, J, mcode, sz);
399 return JIT_S_DASM_ERROR;
400 }
401 *mcodep = mcode;
402 *szp = sz;
403 return JIT_S_OK;
404}
405