diff options
Diffstat (limited to '')
-rw-r--r-- | libraries/luajit-2.0/src/lj_mcode.c | 364 |
1 files changed, 0 insertions, 364 deletions
diff --git a/libraries/luajit-2.0/src/lj_mcode.c b/libraries/luajit-2.0/src/lj_mcode.c deleted file mode 100644 index 90ac34f..0000000 --- a/libraries/luajit-2.0/src/lj_mcode.c +++ /dev/null | |||
@@ -1,364 +0,0 @@ | |||
1 | /* | ||
2 | ** Machine code management. | ||
3 | ** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h | ||
4 | */ | ||
5 | |||
6 | #define lj_mcode_c | ||
7 | #define LUA_CORE | ||
8 | |||
9 | #include "lj_obj.h" | ||
10 | #if LJ_HASJIT | ||
11 | #include "lj_gc.h" | ||
12 | #include "lj_jit.h" | ||
13 | #include "lj_mcode.h" | ||
14 | #include "lj_trace.h" | ||
15 | #include "lj_dispatch.h" | ||
16 | #include "lj_vm.h" | ||
17 | #endif | ||
18 | |||
19 | /* -- OS-specific functions ----------------------------------------------- */ | ||
20 | |||
21 | #if LJ_HASJIT || LJ_HASFFI | ||
22 | |||
23 | /* Define this if you want to run LuaJIT with Valgrind. */ | ||
24 | #ifdef LUAJIT_USE_VALGRIND | ||
25 | #include <valgrind/valgrind.h> | ||
26 | #endif | ||
27 | |||
28 | #if !LJ_TARGET_X86ORX64 && LJ_TARGET_OSX | ||
29 | void sys_icache_invalidate(void *start, size_t len); | ||
30 | #endif | ||
31 | |||
32 | #if LJ_TARGET_LINUX && LJ_TARGET_PPC | ||
33 | #include <dlfcn.h> | ||
34 | static void (*mcode_sync_ppc)(void *start, void *end); | ||
35 | static void mcode_sync_dummy(void *start, void *end) | ||
36 | { | ||
37 | UNUSED(start); UNUSED(end); | ||
38 | } | ||
39 | #endif | ||
40 | |||
41 | /* Synchronize data/instruction cache. */ | ||
42 | void lj_mcode_sync(void *start, void *end) | ||
43 | { | ||
44 | #ifdef LUAJIT_USE_VALGRIND | ||
45 | VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start); | ||
46 | #endif | ||
47 | #if LJ_TARGET_X86ORX64 | ||
48 | UNUSED(start); UNUSED(end); | ||
49 | #elif LJ_TARGET_OSX | ||
50 | sys_icache_invalidate(start, (char *)end-(char *)start); | ||
51 | #elif LJ_TARGET_LINUX && LJ_TARGET_PPC | ||
52 | if (!mcode_sync_ppc) { | ||
53 | void *vdso = dlopen("linux-vdso32.so.1", RTLD_LAZY); | ||
54 | if (!vdso || !(mcode_sync_ppc = dlsym(vdso, "__kernel_sync_dicache"))) | ||
55 | mcode_sync_ppc = mcode_sync_dummy; | ||
56 | } | ||
57 | mcode_sync_ppc(start, end); | ||
58 | #elif defined(__GNUC__) && !LJ_TARGET_PPC | ||
59 | __clear_cache(start, end); | ||
60 | #else | ||
61 | #error "Missing builtin to flush instruction cache" | ||
62 | #endif | ||
63 | } | ||
64 | |||
65 | #endif | ||
66 | |||
67 | #if LJ_HASJIT | ||
68 | |||
69 | #if LJ_TARGET_WINDOWS | ||
70 | |||
71 | #define WIN32_LEAN_AND_MEAN | ||
72 | #include <windows.h> | ||
73 | |||
74 | #define MCPROT_RW PAGE_READWRITE | ||
75 | #define MCPROT_RX PAGE_EXECUTE_READ | ||
76 | #define MCPROT_RWX PAGE_EXECUTE_READWRITE | ||
77 | |||
78 | static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot) | ||
79 | { | ||
80 | void *p = VirtualAlloc((void *)hint, sz, | ||
81 | MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot); | ||
82 | if (!p && !hint) | ||
83 | lj_trace_err(J, LJ_TRERR_MCODEAL); | ||
84 | return p; | ||
85 | } | ||
86 | |||
87 | static void mcode_free(jit_State *J, void *p, size_t sz) | ||
88 | { | ||
89 | UNUSED(J); UNUSED(sz); | ||
90 | VirtualFree(p, 0, MEM_RELEASE); | ||
91 | } | ||
92 | |||
93 | static void mcode_setprot(void *p, size_t sz, DWORD prot) | ||
94 | { | ||
95 | DWORD oprot; | ||
96 | VirtualProtect(p, sz, prot, &oprot); | ||
97 | } | ||
98 | |||
99 | #elif LJ_TARGET_POSIX | ||
100 | |||
101 | #include <sys/mman.h> | ||
102 | |||
103 | #ifndef MAP_ANONYMOUS | ||
104 | #define MAP_ANONYMOUS MAP_ANON | ||
105 | #endif | ||
106 | |||
107 | #define MCPROT_RW (PROT_READ|PROT_WRITE) | ||
108 | #define MCPROT_RX (PROT_READ|PROT_EXEC) | ||
109 | #define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC) | ||
110 | |||
111 | static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot) | ||
112 | { | ||
113 | void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); | ||
114 | if (p == MAP_FAILED && !hint) | ||
115 | lj_trace_err(J, LJ_TRERR_MCODEAL); | ||
116 | return p; | ||
117 | } | ||
118 | |||
119 | static void mcode_free(jit_State *J, void *p, size_t sz) | ||
120 | { | ||
121 | UNUSED(J); | ||
122 | munmap(p, sz); | ||
123 | } | ||
124 | |||
125 | static void mcode_setprot(void *p, size_t sz, int prot) | ||
126 | { | ||
127 | mprotect(p, sz, prot); | ||
128 | } | ||
129 | |||
130 | #elif LJ_64 | ||
131 | |||
132 | #error "Missing OS support for explicit placement of executable memory" | ||
133 | |||
134 | #else | ||
135 | |||
136 | /* Fallback allocator. This will fail if memory is not executable by default. */ | ||
137 | #define LUAJIT_UNPROTECT_MCODE | ||
138 | #define MCPROT_RW 0 | ||
139 | #define MCPROT_RX 0 | ||
140 | #define MCPROT_RWX 0 | ||
141 | |||
142 | static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot) | ||
143 | { | ||
144 | UNUSED(hint); UNUSED(prot); | ||
145 | return lj_mem_new(J->L, sz); | ||
146 | } | ||
147 | |||
148 | static void mcode_free(jit_State *J, void *p, size_t sz) | ||
149 | { | ||
150 | lj_mem_free(J2G(J), p, sz); | ||
151 | } | ||
152 | |||
153 | #define mcode_setprot(p, sz, prot) UNUSED(p) | ||
154 | |||
155 | #endif | ||
156 | |||
157 | /* -- MCode area protection ----------------------------------------------- */ | ||
158 | |||
159 | /* Define this ONLY if the page protection twiddling becomes a bottleneck. */ | ||
160 | #ifdef LUAJIT_UNPROTECT_MCODE | ||
161 | |||
162 | /* It's generally considered to be a potential security risk to have | ||
163 | ** pages with simultaneous write *and* execute access in a process. | ||
164 | ** | ||
165 | ** Do not even think about using this mode for server processes or | ||
166 | ** apps handling untrusted external data (such as a browser). | ||
167 | ** | ||
168 | ** The security risk is not in LuaJIT itself -- but if an adversary finds | ||
169 | ** any *other* flaw in your C application logic, then any RWX memory page | ||
170 | ** simplifies writing an exploit considerably. | ||
171 | */ | ||
172 | #define MCPROT_GEN MCPROT_RWX | ||
173 | #define MCPROT_RUN MCPROT_RWX | ||
174 | |||
175 | static void mcode_protect(jit_State *J, int prot) | ||
176 | { | ||
177 | UNUSED(J); UNUSED(prot); | ||
178 | } | ||
179 | |||
180 | #else | ||
181 | |||
182 | /* This is the default behaviour and much safer: | ||
183 | ** | ||
184 | ** Most of the time the memory pages holding machine code are executable, | ||
185 | ** but NONE of them is writable. | ||
186 | ** | ||
187 | ** The current memory area is marked read-write (but NOT executable) only | ||
188 | ** during the short time window while the assembler generates machine code. | ||
189 | */ | ||
190 | #define MCPROT_GEN MCPROT_RW | ||
191 | #define MCPROT_RUN MCPROT_RX | ||
192 | |||
193 | /* Change protection of MCode area. */ | ||
194 | static void mcode_protect(jit_State *J, int prot) | ||
195 | { | ||
196 | if (J->mcprot != prot) { | ||
197 | mcode_setprot(J->mcarea, J->szmcarea, prot); | ||
198 | J->mcprot = prot; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | #endif | ||
203 | |||
204 | /* -- MCode area allocation ----------------------------------------------- */ | ||
205 | |||
206 | #if LJ_TARGET_X64 | ||
207 | #define mcode_validptr(p) ((p) && (uintptr_t)(p) < (uintptr_t)1<<47) | ||
208 | #else | ||
209 | #define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000) | ||
210 | #endif | ||
211 | |||
212 | #ifdef LJ_TARGET_JUMPRANGE | ||
213 | |||
214 | /* Get memory within relative jump distance of our code in 64 bit mode. */ | ||
215 | static void *mcode_alloc(jit_State *J, size_t sz) | ||
216 | { | ||
217 | /* Target an address in the static assembler code (64K aligned). | ||
218 | ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB. | ||
219 | */ | ||
220 | uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff; | ||
221 | const uintptr_t range = (1u << LJ_TARGET_JUMPRANGE) - (1u << 21); | ||
222 | /* First try a contiguous area below the last one. */ | ||
223 | uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0; | ||
224 | int i; | ||
225 | for (i = 0; i < 32; i++) { /* 32 attempts ought to be enough ... */ | ||
226 | if (mcode_validptr(hint)) { | ||
227 | void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN); | ||
228 | |||
229 | if (mcode_validptr(p)) { | ||
230 | if ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range) | ||
231 | return p; | ||
232 | mcode_free(J, p, sz); /* Free badly placed area. */ | ||
233 | } | ||
234 | } | ||
235 | /* Next try probing pseudo-random addresses. */ | ||
236 | do { | ||
237 | hint = (0x78fb ^ LJ_PRNG_BITS(J, 15)) << 16; /* 64K aligned. */ | ||
238 | } while (!(hint + sz < range)); | ||
239 | hint = target + hint - (range>>1); | ||
240 | } | ||
241 | lj_trace_err(J, LJ_TRERR_MCODEAL); /* Give up. OS probably ignores hints? */ | ||
242 | return NULL; | ||
243 | } | ||
244 | |||
245 | #else | ||
246 | |||
247 | /* All memory addresses are reachable by relative jumps. */ | ||
248 | #define mcode_alloc(J, sz) mcode_alloc_at((J), 0, (sz), MCPROT_GEN) | ||
249 | |||
250 | #endif | ||
251 | |||
252 | /* -- MCode area management ----------------------------------------------- */ | ||
253 | |||
254 | /* Linked list of MCode areas. */ | ||
255 | typedef struct MCLink { | ||
256 | MCode *next; /* Next area. */ | ||
257 | size_t size; /* Size of current area. */ | ||
258 | } MCLink; | ||
259 | |||
260 | /* Allocate a new MCode area. */ | ||
261 | static void mcode_allocarea(jit_State *J) | ||
262 | { | ||
263 | MCode *oldarea = J->mcarea; | ||
264 | size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10; | ||
265 | sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1); | ||
266 | J->mcarea = (MCode *)mcode_alloc(J, sz); | ||
267 | J->szmcarea = sz; | ||
268 | J->mcprot = MCPROT_GEN; | ||
269 | J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea); | ||
270 | J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink)); | ||
271 | ((MCLink *)J->mcarea)->next = oldarea; | ||
272 | ((MCLink *)J->mcarea)->size = sz; | ||
273 | J->szallmcarea += sz; | ||
274 | } | ||
275 | |||
276 | /* Free all MCode areas. */ | ||
277 | void lj_mcode_free(jit_State *J) | ||
278 | { | ||
279 | MCode *mc = J->mcarea; | ||
280 | J->mcarea = NULL; | ||
281 | J->szallmcarea = 0; | ||
282 | while (mc) { | ||
283 | MCode *next = ((MCLink *)mc)->next; | ||
284 | mcode_free(J, mc, ((MCLink *)mc)->size); | ||
285 | mc = next; | ||
286 | } | ||
287 | } | ||
288 | |||
289 | /* -- MCode transactions -------------------------------------------------- */ | ||
290 | |||
291 | /* Reserve the remainder of the current MCode area. */ | ||
292 | MCode *lj_mcode_reserve(jit_State *J, MCode **lim) | ||
293 | { | ||
294 | if (!J->mcarea) | ||
295 | mcode_allocarea(J); | ||
296 | else | ||
297 | mcode_protect(J, MCPROT_GEN); | ||
298 | *lim = J->mcbot; | ||
299 | return J->mctop; | ||
300 | } | ||
301 | |||
302 | /* Commit the top part of the current MCode area. */ | ||
303 | void lj_mcode_commit(jit_State *J, MCode *top) | ||
304 | { | ||
305 | J->mctop = top; | ||
306 | mcode_protect(J, MCPROT_RUN); | ||
307 | } | ||
308 | |||
309 | /* Abort the reservation. */ | ||
310 | void lj_mcode_abort(jit_State *J) | ||
311 | { | ||
312 | mcode_protect(J, MCPROT_RUN); | ||
313 | } | ||
314 | |||
315 | /* Set/reset protection to allow patching of MCode areas. */ | ||
316 | MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish) | ||
317 | { | ||
318 | #ifdef LUAJIT_UNPROTECT_MCODE | ||
319 | UNUSED(J); UNUSED(ptr); UNUSED(finish); | ||
320 | return NULL; | ||
321 | #else | ||
322 | if (finish) { | ||
323 | if (J->mcarea == ptr) | ||
324 | mcode_protect(J, MCPROT_RUN); | ||
325 | else | ||
326 | mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN); | ||
327 | return NULL; | ||
328 | } else { | ||
329 | MCode *mc = J->mcarea; | ||
330 | /* Try current area first to use the protection cache. */ | ||
331 | if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) { | ||
332 | mcode_protect(J, MCPROT_GEN); | ||
333 | return mc; | ||
334 | } | ||
335 | /* Otherwise search through the list of MCode areas. */ | ||
336 | for (;;) { | ||
337 | mc = ((MCLink *)mc)->next; | ||
338 | lua_assert(mc != NULL); | ||
339 | if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) { | ||
340 | mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN); | ||
341 | return mc; | ||
342 | } | ||
343 | } | ||
344 | } | ||
345 | #endif | ||
346 | } | ||
347 | |||
348 | /* Limit of MCode reservation reached. */ | ||
349 | void lj_mcode_limiterr(jit_State *J, size_t need) | ||
350 | { | ||
351 | size_t sizemcode, maxmcode; | ||
352 | lj_mcode_abort(J); | ||
353 | sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10; | ||
354 | sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1); | ||
355 | maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10; | ||
356 | if ((size_t)need > sizemcode) | ||
357 | lj_trace_err(J, LJ_TRERR_MCODEOV); /* Too long for any area. */ | ||
358 | if (J->szallmcarea + sizemcode > maxmcode) | ||
359 | lj_trace_err(J, LJ_TRERR_MCODEAL); | ||
360 | mcode_allocarea(J); | ||
361 | lj_trace_err(J, LJ_TRERR_MCODELM); /* Retry with new area. */ | ||
362 | } | ||
363 | |||
364 | #endif | ||