1 | // |
2 | // m3_core.c |
3 | // |
4 | // Created by Steven Massey on 4/15/19. |
5 | // Copyright © 2019 Steven Massey. All rights reserved. |
6 | // |
7 | |
8 | #define M3_IMPLEMENT_ERROR_STRINGS |
9 | #include "m3_config.h" |
10 | #include "wasm3.h" |
11 | |
12 | #include "m3_core.h" |
13 | #include "m3_env.h" |
14 | |
15 | void m3_Abort(const char* message) { |
16 | #ifdef DEBUG |
17 | fprintf(stderr, "Error: %s\n" , message); |
18 | #endif |
19 | abort(); |
20 | } |
21 | |
22 | M3_WEAK |
23 | M3Result m3_Yield () |
24 | { |
25 | return m3Err_none; |
26 | } |
27 | |
28 | #if d_m3LogTimestamps |
29 | |
30 | #include <time.h> |
31 | |
32 | #define SEC_TO_US(sec) ((sec)*1000000) |
33 | #define NS_TO_US(ns) ((ns)/1000) |
34 | |
35 | static uint64_t initial_ts = -1; |
36 | |
37 | uint64_t m3_GetTimestamp() |
38 | { |
39 | if (initial_ts == -1) { |
40 | initial_ts = 0; |
41 | initial_ts = m3_GetTimestamp(); |
42 | } |
43 | struct timespec ts; |
44 | timespec_get(&ts, TIME_UTC); |
45 | uint64_t us = SEC_TO_US((uint64_t)ts.tv_sec) + NS_TO_US((uint64_t)ts.tv_nsec); |
46 | return us - initial_ts; |
47 | } |
48 | |
49 | #endif |
50 | |
51 | #if d_m3FixedHeap |
52 | |
53 | static u8 fixedHeap[d_m3FixedHeap]; |
54 | static u8* fixedHeapPtr = fixedHeap; |
55 | static u8* const fixedHeapEnd = fixedHeap + d_m3FixedHeap; |
56 | static u8* fixedHeapLast = NULL; |
57 | |
58 | #if d_m3FixedHeapAlign > 1 |
59 | # define HEAP_ALIGN_PTR(P) P = (u8*)(((size_t)(P)+(d_m3FixedHeapAlign-1)) & ~ (d_m3FixedHeapAlign-1)); |
60 | #else |
61 | # define HEAP_ALIGN_PTR(P) |
62 | #endif |
63 | |
64 | void * m3_Malloc_Impl (size_t i_size) |
65 | { |
66 | u8 * ptr = fixedHeapPtr; |
67 | |
68 | fixedHeapPtr += i_size; |
69 | HEAP_ALIGN_PTR(fixedHeapPtr); |
70 | |
71 | if (fixedHeapPtr >= fixedHeapEnd) |
72 | { |
73 | return NULL; |
74 | } |
75 | |
76 | memset (ptr, 0x0, i_size); |
77 | fixedHeapLast = ptr; |
78 | |
79 | return ptr; |
80 | } |
81 | |
82 | void m3_Free_Impl (void * i_ptr) |
83 | { |
84 | // Handle the last chunk |
85 | if (i_ptr && i_ptr == fixedHeapLast) { |
86 | fixedHeapPtr = fixedHeapLast; |
87 | fixedHeapLast = NULL; |
88 | } else { |
89 | //printf("== free %p [failed]\n", io_ptr); |
90 | } |
91 | } |
92 | |
93 | void * m3_Realloc_Impl (void * i_ptr, size_t i_newSize, size_t i_oldSize) |
94 | { |
95 | if (M3_UNLIKELY(i_newSize == i_oldSize)) return i_ptr; |
96 | |
97 | void * newPtr; |
98 | |
99 | // Handle the last chunk |
100 | if (i_ptr && i_ptr == fixedHeapLast) { |
101 | fixedHeapPtr = fixedHeapLast + i_newSize; |
102 | HEAP_ALIGN_PTR(fixedHeapPtr); |
103 | if (fixedHeapPtr >= fixedHeapEnd) |
104 | { |
105 | return NULL; |
106 | } |
107 | newPtr = i_ptr; |
108 | } else { |
109 | newPtr = m3_Malloc_Impl(i_newSize); |
110 | if (!newPtr) { |
111 | return NULL; |
112 | } |
113 | if (i_ptr) { |
114 | memcpy(newPtr, i_ptr, i_oldSize); |
115 | } |
116 | } |
117 | |
118 | if (i_newSize > i_oldSize) { |
119 | memset ((u8 *) newPtr + i_oldSize, 0x0, i_newSize - i_oldSize); |
120 | } |
121 | |
122 | return newPtr; |
123 | } |
124 | |
125 | #else |
126 | |
127 | void * m3_Malloc_Impl (size_t i_size) |
128 | { |
129 | return calloc (i_size, 1); |
130 | } |
131 | |
132 | void m3_Free_Impl (void * io_ptr) |
133 | { |
134 | free (io_ptr); |
135 | } |
136 | |
137 | void * m3_Realloc_Impl (void * i_ptr, size_t i_newSize, size_t i_oldSize) |
138 | { |
139 | if (M3_UNLIKELY(i_newSize == i_oldSize)) return i_ptr; |
140 | |
141 | void * newPtr = realloc (i_ptr, i_newSize); |
142 | |
143 | if (M3_LIKELY(newPtr)) |
144 | { |
145 | if (i_newSize > i_oldSize) { |
146 | memset ((u8 *) newPtr + i_oldSize, 0x0, i_newSize - i_oldSize); |
147 | } |
148 | return newPtr; |
149 | } |
150 | return NULL; |
151 | } |
152 | |
153 | #endif |
154 | |
155 | void * m3_CopyMem (const void * i_from, size_t i_size) |
156 | { |
157 | void * ptr = m3_Malloc("CopyMem" , i_size); |
158 | if (ptr) { |
159 | memcpy (ptr, i_from, i_size); |
160 | } |
161 | return ptr; |
162 | } |
163 | |
164 | //-------------------------------------------------------------------------------------------- |
165 | |
166 | #if d_m3LogNativeStack |
167 | |
168 | static size_t stack_start; |
169 | static size_t stack_end; |
170 | |
171 | void m3StackCheckInit () |
172 | { |
173 | char stack; |
174 | stack_end = stack_start = (size_t)&stack; |
175 | } |
176 | |
177 | void m3StackCheck () |
178 | { |
179 | char stack; |
180 | size_t addr = (size_t)&stack; |
181 | |
182 | size_t stackEnd = stack_end; |
183 | stack_end = M3_MIN (stack_end, addr); |
184 | |
185 | // if (stackEnd != stack_end) |
186 | // printf ("maxStack: %ld\n", m3StackGetMax ()); |
187 | } |
188 | |
189 | int m3StackGetMax () |
190 | { |
191 | return stack_start - stack_end; |
192 | } |
193 | |
194 | #endif |
195 | |
196 | //-------------------------------------------------------------------------------------------- |
197 | |
198 | M3Result NormalizeType (u8 * o_type, i8 i_convolutedWasmType) |
199 | { |
200 | M3Result result = m3Err_none; |
201 | |
202 | u8 type = -i_convolutedWasmType; |
203 | |
204 | if (type == 0x40) |
205 | type = c_m3Type_none; |
206 | else if (type < c_m3Type_i32 or type > c_m3Type_f64) |
207 | result = m3Err_invalidTypeId; |
208 | |
209 | * o_type = type; |
210 | |
211 | return result; |
212 | } |
213 | |
214 | |
215 | bool IsFpType (u8 i_m3Type) |
216 | { |
217 | return (i_m3Type == c_m3Type_f32 or i_m3Type == c_m3Type_f64); |
218 | } |
219 | |
220 | |
221 | bool IsIntType (u8 i_m3Type) |
222 | { |
223 | return (i_m3Type == c_m3Type_i32 or i_m3Type == c_m3Type_i64); |
224 | } |
225 | |
226 | |
227 | bool Is64BitType (u8 i_m3Type) |
228 | { |
229 | if (i_m3Type == c_m3Type_i64 or i_m3Type == c_m3Type_f64) |
230 | return true; |
231 | else if (i_m3Type == c_m3Type_i32 or i_m3Type == c_m3Type_f32 or i_m3Type == c_m3Type_none) |
232 | return false; |
233 | else |
234 | return (sizeof (voidptr_t) == 8); // all other cases are pointers |
235 | } |
236 | |
237 | u32 SizeOfType (u8 i_m3Type) |
238 | { |
239 | if (i_m3Type == c_m3Type_i32 or i_m3Type == c_m3Type_f32) |
240 | return sizeof (i32); |
241 | |
242 | return sizeof (i64); |
243 | } |
244 | |
245 | |
246 | //-- Binary Wasm parsing utils ------------------------------------------------------------------------------------------ |
247 | |
248 | |
249 | M3Result Read_u64 (u64 * o_value, bytes_t * io_bytes, cbytes_t i_end) |
250 | { |
251 | const u8 * ptr = * io_bytes; |
252 | ptr += sizeof (u64); |
253 | |
254 | if (ptr <= i_end) |
255 | { |
256 | memcpy(o_value, * io_bytes, sizeof(u64)); |
257 | M3_BSWAP_u64(*o_value); |
258 | * io_bytes = ptr; |
259 | return m3Err_none; |
260 | } |
261 | else return m3Err_wasmUnderrun; |
262 | } |
263 | |
264 | |
265 | M3Result Read_u32 (u32 * o_value, bytes_t * io_bytes, cbytes_t i_end) |
266 | { |
267 | const u8 * ptr = * io_bytes; |
268 | ptr += sizeof (u32); |
269 | |
270 | if (ptr <= i_end) |
271 | { |
272 | memcpy(o_value, * io_bytes, sizeof(u32)); |
273 | M3_BSWAP_u32(*o_value); |
274 | * io_bytes = ptr; |
275 | return m3Err_none; |
276 | } |
277 | else return m3Err_wasmUnderrun; |
278 | } |
279 | |
280 | #if d_m3ImplementFloat |
281 | |
282 | M3Result Read_f64 (f64 * o_value, bytes_t * io_bytes, cbytes_t i_end) |
283 | { |
284 | const u8 * ptr = * io_bytes; |
285 | ptr += sizeof (f64); |
286 | |
287 | if (ptr <= i_end) |
288 | { |
289 | memcpy(o_value, * io_bytes, sizeof(f64)); |
290 | M3_BSWAP_f64(*o_value); |
291 | * io_bytes = ptr; |
292 | return m3Err_none; |
293 | } |
294 | else return m3Err_wasmUnderrun; |
295 | } |
296 | |
297 | |
298 | M3Result Read_f32 (f32 * o_value, bytes_t * io_bytes, cbytes_t i_end) |
299 | { |
300 | const u8 * ptr = * io_bytes; |
301 | ptr += sizeof (f32); |
302 | |
303 | if (ptr <= i_end) |
304 | { |
305 | memcpy(o_value, * io_bytes, sizeof(f32)); |
306 | M3_BSWAP_f32(*o_value); |
307 | * io_bytes = ptr; |
308 | return m3Err_none; |
309 | } |
310 | else return m3Err_wasmUnderrun; |
311 | } |
312 | |
313 | #endif |
314 | |
315 | M3Result Read_u8 (u8 * o_value, bytes_t * io_bytes, cbytes_t i_end) |
316 | { |
317 | const u8 * ptr = * io_bytes; |
318 | |
319 | if (ptr < i_end) |
320 | { |
321 | * o_value = * ptr; |
322 | * io_bytes = ptr + 1; |
323 | |
324 | return m3Err_none; |
325 | } |
326 | else return m3Err_wasmUnderrun; |
327 | } |
328 | |
329 | M3Result Read_opcode (m3opcode_t * o_value, bytes_t * io_bytes, cbytes_t i_end) |
330 | { |
331 | const u8 * ptr = * io_bytes; |
332 | |
333 | if (ptr < i_end) |
334 | { |
335 | m3opcode_t opcode = * ptr++; |
336 | |
337 | #if d_m3CascadedOpcodes == 0 |
338 | if (M3_UNLIKELY(opcode == c_waOp_extended)) |
339 | { |
340 | if (ptr < i_end) |
341 | { |
342 | opcode = (opcode << 8) | (* ptr++); |
343 | } |
344 | else return m3Err_wasmUnderrun; |
345 | } |
346 | #endif |
347 | * o_value = opcode; |
348 | * io_bytes = ptr; |
349 | |
350 | return m3Err_none; |
351 | } |
352 | else return m3Err_wasmUnderrun; |
353 | } |
354 | |
355 | |
356 | M3Result ReadLebUnsigned (u64 * o_value, u32 i_maxNumBits, bytes_t * io_bytes, cbytes_t i_end) |
357 | { |
358 | M3Result result = m3Err_wasmUnderrun; |
359 | |
360 | u64 value = 0; |
361 | |
362 | u32 shift = 0; |
363 | const u8 * ptr = * io_bytes; |
364 | |
365 | while (ptr < i_end) |
366 | { |
367 | u64 byte = * (ptr++); |
368 | |
369 | value |= ((byte & 0x7f) << shift); |
370 | shift += 7; |
371 | |
372 | if ((byte & 0x80) == 0) |
373 | { |
374 | result = m3Err_none; |
375 | break; |
376 | } |
377 | |
378 | if (shift >= i_maxNumBits) |
379 | { |
380 | result = m3Err_lebOverflow; |
381 | break; |
382 | } |
383 | } |
384 | |
385 | * o_value = value; |
386 | * io_bytes = ptr; |
387 | |
388 | return result; |
389 | } |
390 | |
391 | |
392 | M3Result ReadLebSigned (i64 * o_value, u32 i_maxNumBits, bytes_t * io_bytes, cbytes_t i_end) |
393 | { |
394 | M3Result result = m3Err_wasmUnderrun; |
395 | |
396 | i64 value = 0; |
397 | |
398 | u32 shift = 0; |
399 | const u8 * ptr = * io_bytes; |
400 | |
401 | while (ptr < i_end) |
402 | { |
403 | u64 byte = * (ptr++); |
404 | |
405 | value |= ((byte & 0x7f) << shift); |
406 | shift += 7; |
407 | |
408 | if ((byte & 0x80) == 0) |
409 | { |
410 | result = m3Err_none; |
411 | |
412 | if ((byte & 0x40) and (shift < 64)) // do sign extension |
413 | { |
414 | u64 extend = 0; |
415 | value |= (~extend << shift); |
416 | } |
417 | |
418 | break; |
419 | } |
420 | |
421 | if (shift >= i_maxNumBits) |
422 | { |
423 | result = m3Err_lebOverflow; |
424 | break; |
425 | } |
426 | } |
427 | |
428 | * o_value = value; |
429 | * io_bytes = ptr; |
430 | |
431 | return result; |
432 | } |
433 | |
434 | |
435 | M3Result ReadLEB_u32 (u32 * o_value, bytes_t * io_bytes, cbytes_t i_end) |
436 | { |
437 | u64 value; |
438 | M3Result result = ReadLebUnsigned (& value, 32, io_bytes, i_end); |
439 | * o_value = (u32) value; |
440 | |
441 | return result; |
442 | } |
443 | |
444 | |
445 | M3Result ReadLEB_u7 (u8 * o_value, bytes_t * io_bytes, cbytes_t i_end) |
446 | { |
447 | u64 value; |
448 | M3Result result = ReadLebUnsigned (& value, 7, io_bytes, i_end); |
449 | * o_value = (u8) value; |
450 | |
451 | return result; |
452 | } |
453 | |
454 | |
455 | M3Result ReadLEB_i7 (i8 * o_value, bytes_t * io_bytes, cbytes_t i_end) |
456 | { |
457 | i64 value; |
458 | M3Result result = ReadLebSigned (& value, 7, io_bytes, i_end); |
459 | * o_value = (i8) value; |
460 | |
461 | return result; |
462 | } |
463 | |
464 | |
465 | M3Result ReadLEB_i32 (i32 * o_value, bytes_t * io_bytes, cbytes_t i_end) |
466 | { |
467 | i64 value; |
468 | M3Result result = ReadLebSigned (& value, 32, io_bytes, i_end); |
469 | * o_value = (i32) value; |
470 | |
471 | return result; |
472 | } |
473 | |
474 | |
475 | M3Result ReadLEB_i64 (i64 * o_value, bytes_t * io_bytes, cbytes_t i_end) |
476 | { |
477 | i64 value; |
478 | M3Result result = ReadLebSigned (& value, 64, io_bytes, i_end); |
479 | * o_value = value; |
480 | |
481 | return result; |
482 | } |
483 | |
484 | |
485 | M3Result Read_utf8 (cstr_t * o_utf8, bytes_t * io_bytes, cbytes_t i_end) |
486 | { |
487 | *o_utf8 = NULL; |
488 | |
489 | u32 utf8Length; |
490 | M3Result result = ReadLEB_u32 (& utf8Length, io_bytes, i_end); |
491 | |
492 | if (not result) |
493 | { |
494 | if (utf8Length <= d_m3MaxSaneUtf8Length) |
495 | { |
496 | const u8 * ptr = * io_bytes; |
497 | const u8 * end = ptr + utf8Length; |
498 | |
499 | if (end <= i_end) |
500 | { |
501 | char * utf8 = (char *)m3_Malloc ("UTF8" , utf8Length + 1); |
502 | |
503 | if (utf8) |
504 | { |
505 | memcpy (utf8, ptr, utf8Length); |
506 | utf8 [utf8Length] = 0; |
507 | * o_utf8 = utf8; |
508 | } |
509 | |
510 | * io_bytes = end; |
511 | } |
512 | else result = m3Err_wasmUnderrun; |
513 | } |
514 | else result = m3Err_missingUTF8; |
515 | } |
516 | |
517 | return result; |
518 | } |
519 | |
520 | #if d_m3RecordBacktraces |
521 | u32 FindModuleOffset (IM3Runtime i_runtime, pc_t i_pc) |
522 | { |
523 | // walk the code pages |
524 | IM3CodePage curr = i_runtime->pagesOpen; |
525 | bool pageFound = false; |
526 | |
527 | while (curr) |
528 | { |
529 | if (ContainsPC (curr, i_pc)) |
530 | { |
531 | pageFound = true; |
532 | break; |
533 | } |
534 | curr = curr->info.next; |
535 | } |
536 | |
537 | if (!pageFound) |
538 | { |
539 | curr = i_runtime->pagesFull; |
540 | while (curr) |
541 | { |
542 | if (ContainsPC (curr, i_pc)) |
543 | { |
544 | pageFound = true; |
545 | break; |
546 | } |
547 | curr = curr->info.next; |
548 | } |
549 | } |
550 | |
551 | if (pageFound) |
552 | { |
553 | u32 result = 0; |
554 | |
555 | bool pcFound = MapPCToOffset (curr, i_pc, & result); |
556 | d_m3Assert (pcFound); |
557 | |
558 | return result; |
559 | } |
560 | else return 0; |
561 | } |
562 | |
563 | |
564 | void PushBacktraceFrame (IM3Runtime io_runtime, pc_t i_pc) |
565 | { |
566 | // don't try to push any more frames if we've already had an alloc failure |
567 | if (M3_UNLIKELY (io_runtime->backtrace.lastFrame == M3_BACKTRACE_TRUNCATED)) |
568 | return; |
569 | |
570 | M3BacktraceFrame * newFrame = m3_AllocStruct(M3BacktraceFrame); |
571 | |
572 | if (!newFrame) |
573 | { |
574 | io_runtime->backtrace.lastFrame = M3_BACKTRACE_TRUNCATED; |
575 | return; |
576 | } |
577 | |
578 | newFrame->moduleOffset = FindModuleOffset (io_runtime, i_pc); |
579 | |
580 | if (!io_runtime->backtrace.frames || !io_runtime->backtrace.lastFrame) |
581 | io_runtime->backtrace.frames = newFrame; |
582 | else |
583 | io_runtime->backtrace.lastFrame->next = newFrame; |
584 | io_runtime->backtrace.lastFrame = newFrame; |
585 | } |
586 | |
587 | |
588 | void FillBacktraceFunctionInfo (IM3Runtime io_runtime, IM3Function i_function) |
589 | { |
590 | // If we've had an alloc failure then the last frame doesn't refer to the |
591 | // frame we want to fill in the function info for. |
592 | if (M3_UNLIKELY (io_runtime->backtrace.lastFrame == M3_BACKTRACE_TRUNCATED)) |
593 | return; |
594 | |
595 | if (!io_runtime->backtrace.lastFrame) |
596 | return; |
597 | |
598 | io_runtime->backtrace.lastFrame->function = i_function; |
599 | } |
600 | |
601 | |
602 | void ClearBacktrace (IM3Runtime io_runtime) |
603 | { |
604 | M3BacktraceFrame * currentFrame = io_runtime->backtrace.frames; |
605 | while (currentFrame) |
606 | { |
607 | M3BacktraceFrame * nextFrame = currentFrame->next; |
608 | m3_Free (currentFrame); |
609 | currentFrame = nextFrame; |
610 | } |
611 | |
612 | io_runtime->backtrace.frames = NULL; |
613 | io_runtime->backtrace.lastFrame = NULL; |
614 | } |
615 | #endif // d_m3RecordBacktraces |
616 | |