1 | /* |
2 | ** Machine code management. |
3 | ** Copyright (C) 2005-2014 Mike Pall. See Copyright Notice in luajit.h |
4 | */ |
5 | |
6 | #define lj_mcode_c |
7 | #define LUA_CORE |
8 | |
9 | #include "lj_obj.h" |
10 | #if LJ_HASJIT |
11 | #include "lj_gc.h" |
12 | #include "lj_err.h" |
13 | #include "lj_jit.h" |
14 | #include "lj_mcode.h" |
15 | #include "lj_trace.h" |
16 | #include "lj_dispatch.h" |
17 | #endif |
18 | #if LJ_HASJIT || LJ_HASFFI |
19 | #include "lj_vm.h" |
20 | #endif |
21 | |
22 | /* -- OS-specific functions ----------------------------------------------- */ |
23 | |
24 | #if LJ_HASJIT || LJ_HASFFI |
25 | |
26 | /* Define this if you want to run LuaJIT with Valgrind. */ |
27 | #ifdef LUAJIT_USE_VALGRIND |
28 | #include <valgrind/valgrind.h> |
29 | #endif |
30 | |
31 | #if LJ_TARGET_IOS |
32 | void sys_icache_invalidate(void *start, size_t len); |
33 | #endif |
34 | |
35 | /* Synchronize data/instruction cache. */ |
36 | void lj_mcode_sync(void *start, void *end) |
37 | { |
38 | #ifdef LUAJIT_USE_VALGRIND |
39 | VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start); |
40 | #endif |
41 | #if LJ_TARGET_X86ORX64 |
42 | UNUSED(start); UNUSED(end); |
43 | #elif LJ_TARGET_IOS |
44 | sys_icache_invalidate(start, (char *)end-(char *)start); |
45 | #elif LJ_TARGET_PPC |
46 | lj_vm_cachesync(start, end); |
47 | #elif defined(__GNUC__) |
48 | __clear_cache(start, end); |
49 | #else |
50 | #error "Missing builtin to flush instruction cache" |
51 | #endif |
52 | } |
53 | |
54 | #endif |
55 | |
56 | #if LJ_HASJIT |
57 | |
58 | #if LJ_TARGET_WINDOWS |
59 | |
60 | #define WIN32_LEAN_AND_MEAN |
61 | #include <windows.h> |
62 | |
63 | #define MCPROT_RW PAGE_READWRITE |
64 | #define MCPROT_RX PAGE_EXECUTE_READ |
65 | #define MCPROT_RWX PAGE_EXECUTE_READWRITE |
66 | |
67 | static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot) |
68 | { |
69 | void *p = VirtualAlloc((void *)hint, sz, |
70 | MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot); |
71 | if (!p && !hint) |
72 | lj_trace_err(J, LJ_TRERR_MCODEAL); |
73 | return p; |
74 | } |
75 | |
76 | static void mcode_free(jit_State *J, void *p, size_t sz) |
77 | { |
78 | UNUSED(J); UNUSED(sz); |
79 | VirtualFree(p, 0, MEM_RELEASE); |
80 | } |
81 | |
82 | static int mcode_setprot(void *p, size_t sz, DWORD prot) |
83 | { |
84 | DWORD oprot; |
85 | return !VirtualProtect(p, sz, prot, &oprot); |
86 | } |
87 | |
88 | #elif LJ_TARGET_POSIX |
89 | |
90 | #include <sys/mman.h> |
91 | |
92 | #ifndef MAP_ANONYMOUS |
93 | #define MAP_ANONYMOUS MAP_ANON |
94 | #endif |
95 | |
96 | #define MCPROT_RW (PROT_READ|PROT_WRITE) |
97 | #define MCPROT_RX (PROT_READ|PROT_EXEC) |
98 | #define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC) |
99 | |
100 | static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot) |
101 | { |
102 | void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
103 | if (p == MAP_FAILED) { |
104 | if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL); |
105 | p = NULL; |
106 | } |
107 | return p; |
108 | } |
109 | |
110 | static void mcode_free(jit_State *J, void *p, size_t sz) |
111 | { |
112 | UNUSED(J); |
113 | munmap(p, sz); |
114 | } |
115 | |
116 | static int mcode_setprot(void *p, size_t sz, int prot) |
117 | { |
118 | return mprotect(p, sz, prot); |
119 | } |
120 | |
121 | #elif LJ_64 |
122 | |
123 | #error "Missing OS support for explicit placement of executable memory" |
124 | |
125 | #else |
126 | |
127 | /* Fallback allocator. This will fail if memory is not executable by default. */ |
128 | #define LUAJIT_UNPROTECT_MCODE |
129 | #define MCPROT_RW 0 |
130 | #define MCPROT_RX 0 |
131 | #define MCPROT_RWX 0 |
132 | |
133 | static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot) |
134 | { |
135 | UNUSED(hint); UNUSED(prot); |
136 | return lj_mem_new(J->L, sz); |
137 | } |
138 | |
139 | static void mcode_free(jit_State *J, void *p, size_t sz) |
140 | { |
141 | lj_mem_free(J2G(J), p, sz); |
142 | } |
143 | |
144 | #endif |
145 | |
146 | /* -- MCode area protection ----------------------------------------------- */ |
147 | |
148 | /* Define this ONLY if the page protection twiddling becomes a bottleneck. */ |
149 | #ifdef LUAJIT_UNPROTECT_MCODE |
150 | |
151 | /* It's generally considered to be a potential security risk to have |
152 | ** pages with simultaneous write *and* execute access in a process. |
153 | ** |
154 | ** Do not even think about using this mode for server processes or |
155 | ** apps handling untrusted external data (such as a browser). |
156 | ** |
157 | ** The security risk is not in LuaJIT itself -- but if an adversary finds |
158 | ** any *other* flaw in your C application logic, then any RWX memory page |
159 | ** simplifies writing an exploit considerably. |
160 | */ |
161 | #define MCPROT_GEN MCPROT_RWX |
162 | #define MCPROT_RUN MCPROT_RWX |
163 | |
164 | static void mcode_protect(jit_State *J, int prot) |
165 | { |
166 | UNUSED(J); UNUSED(prot); |
167 | } |
168 | |
169 | #else |
170 | |
171 | /* This is the default behaviour and much safer: |
172 | ** |
173 | ** Most of the time the memory pages holding machine code are executable, |
174 | ** but NONE of them is writable. |
175 | ** |
176 | ** The current memory area is marked read-write (but NOT executable) only |
177 | ** during the short time window while the assembler generates machine code. |
178 | */ |
179 | #define MCPROT_GEN MCPROT_RW |
180 | #define MCPROT_RUN MCPROT_RX |
181 | |
182 | /* Protection twiddling failed. Probably due to kernel security. */ |
183 | static LJ_NOINLINE void mcode_protfail(jit_State *J) |
184 | { |
185 | lua_CFunction panic = J2G(J)->panic; |
186 | if (panic) { |
187 | lua_State *L = J->L; |
188 | setstrV(L, L->top++, lj_err_str(L, LJ_ERR_JITPROT)); |
189 | panic(L); |
190 | } |
191 | } |
192 | |
193 | /* Change protection of MCode area. */ |
194 | static void mcode_protect(jit_State *J, int prot) |
195 | { |
196 | if (J->mcprot != prot) { |
197 | if (LJ_UNLIKELY(mcode_setprot(J->mcarea, J->szmcarea, prot))) |
198 | mcode_protfail(J); |
199 | J->mcprot = prot; |
200 | } |
201 | } |
202 | |
203 | #endif |
204 | |
205 | /* -- MCode area allocation ----------------------------------------------- */ |
206 | |
207 | #if LJ_TARGET_X64 |
208 | #define mcode_validptr(p) ((p) && (uintptr_t)(p) < (uintptr_t)1<<47) |
209 | #else |
210 | #define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000) |
211 | #endif |
212 | |
213 | #ifdef LJ_TARGET_JUMPRANGE |
214 | |
215 | /* Get memory within relative jump distance of our code in 64 bit mode. */ |
216 | static void *mcode_alloc(jit_State *J, size_t sz) |
217 | { |
218 | /* Target an address in the static assembler code (64K aligned). |
219 | ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB. |
220 | ** Use half the jump range so every address in the range can reach any other. |
221 | */ |
222 | #if LJ_TARGET_MIPS |
223 | /* Use the middle of the 256MB-aligned region. */ |
224 | uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & 0xf0000000u) + |
225 | 0x08000000u; |
226 | #else |
227 | uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff; |
228 | #endif |
229 | const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21); |
230 | /* First try a contiguous area below the last one. */ |
231 | uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0; |
232 | int i; |
233 | for (i = 0; i < 32; i++) { /* 32 attempts ought to be enough ... */ |
234 | if (mcode_validptr(hint)) { |
235 | void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN); |
236 | |
237 | if (mcode_validptr(p) && |
238 | ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range)) |
239 | return p; |
240 | if (p) mcode_free(J, p, sz); /* Free badly placed area. */ |
241 | } |
242 | /* Next try probing pseudo-random addresses. */ |
243 | do { |
244 | hint = (0x78fb ^ LJ_PRNG_BITS(J, 15)) << 16; /* 64K aligned. */ |
245 | } while (!(hint + sz < range)); |
246 | hint = target + hint - (range>>1); |
247 | } |
248 | lj_trace_err(J, LJ_TRERR_MCODEAL); /* Give up. OS probably ignores hints? */ |
249 | return NULL; |
250 | } |
251 | |
252 | #else |
253 | |
254 | /* All memory addresses are reachable by relative jumps. */ |
255 | #define mcode_alloc(J, sz) mcode_alloc_at((J), 0, (sz), MCPROT_GEN) |
256 | |
257 | #endif |
258 | |
259 | /* -- MCode area management ----------------------------------------------- */ |
260 | |
261 | /* Linked list of MCode areas. */ |
262 | typedef struct MCLink { |
263 | MCode *next; /* Next area. */ |
264 | size_t size; /* Size of current area. */ |
265 | } MCLink; |
266 | |
267 | /* Allocate a new MCode area. */ |
268 | static void mcode_allocarea(jit_State *J) |
269 | { |
270 | MCode *oldarea = J->mcarea; |
271 | size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10; |
272 | sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1); |
273 | J->mcarea = (MCode *)mcode_alloc(J, sz); |
274 | J->szmcarea = sz; |
275 | J->mcprot = MCPROT_GEN; |
276 | J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea); |
277 | J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink)); |
278 | ((MCLink *)J->mcarea)->next = oldarea; |
279 | ((MCLink *)J->mcarea)->size = sz; |
280 | J->szallmcarea += sz; |
281 | } |
282 | |
283 | /* Free all MCode areas. */ |
284 | void lj_mcode_free(jit_State *J) |
285 | { |
286 | MCode *mc = J->mcarea; |
287 | J->mcarea = NULL; |
288 | J->szallmcarea = 0; |
289 | while (mc) { |
290 | MCode *next = ((MCLink *)mc)->next; |
291 | mcode_free(J, mc, ((MCLink *)mc)->size); |
292 | mc = next; |
293 | } |
294 | } |
295 | |
296 | /* -- MCode transactions -------------------------------------------------- */ |
297 | |
298 | /* Reserve the remainder of the current MCode area. */ |
299 | MCode *lj_mcode_reserve(jit_State *J, MCode **lim) |
300 | { |
301 | if (!J->mcarea) |
302 | mcode_allocarea(J); |
303 | else |
304 | mcode_protect(J, MCPROT_GEN); |
305 | *lim = J->mcbot; |
306 | return J->mctop; |
307 | } |
308 | |
309 | /* Commit the top part of the current MCode area. */ |
310 | void lj_mcode_commit(jit_State *J, MCode *top) |
311 | { |
312 | J->mctop = top; |
313 | mcode_protect(J, MCPROT_RUN); |
314 | } |
315 | |
316 | /* Abort the reservation. */ |
317 | void lj_mcode_abort(jit_State *J) |
318 | { |
319 | if (J->mcarea) |
320 | mcode_protect(J, MCPROT_RUN); |
321 | } |
322 | |
323 | /* Set/reset protection to allow patching of MCode areas. */ |
324 | MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish) |
325 | { |
326 | #ifdef LUAJIT_UNPROTECT_MCODE |
327 | UNUSED(J); UNUSED(ptr); UNUSED(finish); |
328 | return NULL; |
329 | #else |
330 | if (finish) { |
331 | if (J->mcarea == ptr) |
332 | mcode_protect(J, MCPROT_RUN); |
333 | else if (LJ_UNLIKELY(mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN))) |
334 | mcode_protfail(J); |
335 | return NULL; |
336 | } else { |
337 | MCode *mc = J->mcarea; |
338 | /* Try current area first to use the protection cache. */ |
339 | if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) { |
340 | mcode_protect(J, MCPROT_GEN); |
341 | return mc; |
342 | } |
343 | /* Otherwise search through the list of MCode areas. */ |
344 | for (;;) { |
345 | mc = ((MCLink *)mc)->next; |
346 | lua_assert(mc != NULL); |
347 | if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) { |
348 | if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN))) |
349 | mcode_protfail(J); |
350 | return mc; |
351 | } |
352 | } |
353 | } |
354 | #endif |
355 | } |
356 | |
357 | /* Limit of MCode reservation reached. */ |
358 | void lj_mcode_limiterr(jit_State *J, size_t need) |
359 | { |
360 | size_t sizemcode, maxmcode; |
361 | lj_mcode_abort(J); |
362 | sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10; |
363 | sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1); |
364 | maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10; |
365 | if ((size_t)need > sizemcode) |
366 | lj_trace_err(J, LJ_TRERR_MCODEOV); /* Too long for any area. */ |
367 | if (J->szallmcarea + sizemcode > maxmcode) |
368 | lj_trace_err(J, LJ_TRERR_MCODEAL); |
369 | mcode_allocarea(J); |
370 | lj_trace_err(J, LJ_TRERR_MCODELM); /* Retry with new area. */ |
371 | } |
372 | |
373 | #endif |
374 | |