1 | /* ---------------------------------------------------------------------------- |
2 | Copyright (c) 2018, Microsoft Research, Daan Leijen |
3 | This is free software; you can redistribute it and/or modify it under the |
4 | terms of the MIT license. A copy of the license can be found in the file |
5 | "LICENSE" at the root of this distribution. |
6 | -----------------------------------------------------------------------------*/ |
7 | #include "mimalloc.h" |
8 | #include "mimalloc-internal.h" |
9 | #include "mimalloc-atomic.h" |
10 | |
11 | #include <string.h> // memset, memcpy, strlen |
12 | #include <stdlib.h> // malloc, exit |
13 | |
14 | #define MI_IN_ALLOC_C |
15 | #include "alloc-override.c" |
16 | #undef MI_IN_ALLOC_C |
17 | |
18 | // ------------------------------------------------------ |
19 | // Allocation |
20 | // ------------------------------------------------------ |
21 | |
22 | // Fast allocation in a page: just pop from the free list. |
23 | // Fall back to generic allocation only if the list is empty. |
24 | extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept { |
25 | mi_assert_internal(page->block_size==0||page->block_size >= size); |
26 | mi_block_t* block = page->free; |
27 | if (mi_unlikely(block == NULL)) { |
28 | return _mi_malloc_generic(heap, size); // slow path |
29 | } |
30 | mi_assert_internal(block != NULL && _mi_ptr_page(block) == page); |
31 | // pop from the free list |
32 | page->free = mi_block_next(page,block); |
33 | page->used++; |
34 | mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); |
35 | #if (MI_DEBUG!=0) |
36 | if (!page->is_zero) { memset(block, MI_DEBUG_UNINIT, size); } |
37 | #elif (MI_SECURE!=0) |
38 | block->next = 0; // don't leak internal data |
39 | #endif |
40 | #if (MI_STAT>1) |
41 | if(size <= MI_LARGE_OBJ_SIZE_MAX) { |
42 | size_t bin = _mi_bin(size); |
43 | mi_heap_stat_increase(heap,normal[bin], 1); |
44 | } |
45 | #endif |
46 | return block; |
47 | } |
48 | |
49 | // allocate a small block |
50 | extern inline mi_decl_allocator void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept { |
51 | mi_assert(size <= MI_SMALL_SIZE_MAX); |
52 | mi_page_t* page = _mi_heap_get_free_small_page(heap,size); |
53 | return _mi_page_malloc(heap, page, size); |
54 | } |
55 | |
56 | extern inline mi_decl_allocator void* mi_malloc_small(size_t size) mi_attr_noexcept { |
57 | return mi_heap_malloc_small(mi_get_default_heap(), size); |
58 | } |
59 | |
60 | |
61 | // zero initialized small block |
62 | mi_decl_allocator void* mi_zalloc_small(size_t size) mi_attr_noexcept { |
63 | void* p = mi_malloc_small(size); |
64 | if (p != NULL) { memset(p, 0, size); } |
65 | return p; |
66 | } |
67 | |
68 | // The main allocation function |
69 | extern inline mi_decl_allocator void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { |
70 | mi_assert(heap!=NULL); |
71 | mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local |
72 | void* p; |
73 | if (mi_likely(size <= MI_SMALL_SIZE_MAX)) { |
74 | p = mi_heap_malloc_small(heap, size); |
75 | } |
76 | else { |
77 | p = _mi_malloc_generic(heap, size); |
78 | } |
79 | #if MI_STAT>1 |
80 | if (p != NULL) { |
81 | if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); } |
82 | mi_heap_stat_increase( heap, malloc, mi_good_size(size) ); // overestimate for aligned sizes |
83 | } |
84 | #endif |
85 | return p; |
86 | } |
87 | |
88 | extern inline mi_decl_allocator void* mi_malloc(size_t size) mi_attr_noexcept { |
89 | return mi_heap_malloc(mi_get_default_heap(), size); |
90 | } |
91 | |
92 | void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) { |
93 | // note: we need to initialize the whole block to zero, not just size |
94 | // or the recalloc/rezalloc functions cannot safely expand in place (see issue #63) |
95 | UNUSED(size); |
96 | mi_assert_internal(p != NULL); |
97 | mi_assert_internal(size > 0 && page->block_size >= size); |
98 | mi_assert_internal(_mi_ptr_page(p)==page); |
99 | if (page->is_zero) { |
100 | // already zero initialized memory? |
101 | ((mi_block_t*)p)->next = 0; // clear the free list pointer |
102 | mi_assert_expensive(mi_mem_is_zero(p,page->block_size)); |
103 | } |
104 | else { |
105 | // otherwise memset |
106 | memset(p, 0, page->block_size); |
107 | } |
108 | } |
109 | |
110 | void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) { |
111 | void* p = mi_heap_malloc(heap,size); |
112 | if (zero && p != NULL) { |
113 | _mi_block_zero_init(_mi_ptr_page(p),p,size); // todo: can we avoid getting the page again? |
114 | } |
115 | return p; |
116 | } |
117 | |
118 | extern inline mi_decl_allocator void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { |
119 | return _mi_heap_malloc_zero(heap, size, true); |
120 | } |
121 | |
122 | mi_decl_allocator void* mi_zalloc(size_t size) mi_attr_noexcept { |
123 | return mi_heap_zalloc(mi_get_default_heap(),size); |
124 | } |
125 | |
126 | |
127 | // ------------------------------------------------------ |
128 | // Check for double free in secure and debug mode |
129 | // This is somewhat expensive so only enabled for secure mode 4 |
130 | // ------------------------------------------------------ |
131 | |
132 | #if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0)) |
133 | // linear check if the free list contains a specific element |
134 | static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) { |
135 | while (list != NULL) { |
136 | if (elem==list) return true; |
137 | list = mi_block_next(page, list); |
138 | } |
139 | return false; |
140 | } |
141 | |
142 | static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block, const mi_block_t* n) { |
143 | size_t psize; |
144 | uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize); |
145 | if (n == NULL || ((uint8_t*)n >= pstart && (uint8_t*)n < (pstart + psize))) { |
146 | // Suspicious: the decoded value is in the same page (or NULL). |
147 | // Walk the free lists to verify positively if it is already freed |
148 | if (mi_list_contains(page, page->free, block) || |
149 | mi_list_contains(page, page->local_free, block) || |
150 | mi_list_contains(page, (const mi_block_t*)mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*,&page->thread_free)), block)) |
151 | { |
152 | _mi_fatal_error("double free detected of block %p with size %zu\n" , block, page->block_size); |
153 | return true; |
154 | } |
155 | } |
156 | return false; |
157 | } |
158 | |
159 | static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { |
160 | mi_block_t* n = mi_block_nextx(page, block, page->cookie); // pretend it is freed, and get the decoded first field |
161 | if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer? |
162 | (n==NULL || mi_is_in_same_segment(block, n))) // quick check: in same segment or NULL? |
163 | { |
164 | // Suspicous: decoded value in block is in the same segment (or NULL) -- maybe a double free? |
165 | // (continue in separate function to improve code generation) |
166 | return mi_check_is_double_freex(page, block, n); |
167 | } |
168 | return false; |
169 | } |
170 | #else |
171 | static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { |
172 | UNUSED(page); |
173 | UNUSED(block); |
174 | return false; |
175 | } |
176 | #endif |
177 | |
178 | |
179 | // ------------------------------------------------------ |
180 | // Free |
181 | // ------------------------------------------------------ |
182 | |
183 | // multi-threaded free |
184 | static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block) |
185 | { |
186 | mi_thread_free_t tfree; |
187 | mi_thread_free_t tfreex; |
188 | bool use_delayed; |
189 | |
190 | mi_segment_t* segment = _mi_page_segment(page); |
191 | if (segment->page_kind==MI_PAGE_HUGE) { |
192 | // huge page segments are always abandoned and can be freed immediately |
193 | mi_assert_internal(mi_atomic_read_relaxed(&segment->thread_id)==0); |
194 | mi_assert_internal(mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*,&segment->abandoned_next))==NULL); |
195 | // claim it and free |
196 | mi_heap_t* heap = mi_get_default_heap(); |
197 | // paranoia: if this it the last reference, the cas should always succeed |
198 | if (mi_atomic_cas_strong(&segment->thread_id,heap->thread_id,0)) { |
199 | mi_block_set_next(page, block, page->free); |
200 | page->free = block; |
201 | page->used--; |
202 | page->is_zero = false; |
203 | mi_assert(page->used == 0); |
204 | mi_tld_t* tld = heap->tld; |
205 | if (page->block_size > MI_HUGE_OBJ_SIZE_MAX) { |
206 | _mi_stat_decrease(&tld->stats.giant, page->block_size); |
207 | } |
208 | else { |
209 | _mi_stat_decrease(&tld->stats.huge, page->block_size); |
210 | } |
211 | _mi_segment_page_free(page,true,&tld->segments); |
212 | } |
213 | return; |
214 | } |
215 | |
216 | do { |
217 | tfree = page->thread_free; |
218 | use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE || |
219 | (mi_tf_delayed(tfree) == MI_NO_DELAYED_FREE && page->used == mi_atomic_read_relaxed(&page->thread_freed)+1) // data-race but ok, just optimizes early release of the page |
220 | ); |
221 | if (mi_unlikely(use_delayed)) { |
222 | // unlikely: this only happens on the first concurrent free in a page that is in the full list |
223 | tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING); |
224 | } |
225 | else { |
226 | // usual: directly add to page thread_free list |
227 | mi_block_set_next(page, block, mi_tf_block(tfree)); |
228 | tfreex = mi_tf_set_block(tfree,block); |
229 | } |
230 | } while (!mi_atomic_cas_weak(mi_atomic_cast(uintptr_t,&page->thread_free), tfreex, tfree)); |
231 | |
232 | if (mi_likely(!use_delayed)) { |
233 | // increment the thread free count and return |
234 | mi_atomic_increment(&page->thread_freed); |
235 | } |
236 | else { |
237 | // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`) |
238 | mi_heap_t* heap = (mi_heap_t*)mi_atomic_read_ptr(mi_atomic_cast(void*, &page->heap)); |
239 | mi_assert_internal(heap != NULL); |
240 | if (heap != NULL) { |
241 | // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity) |
242 | mi_block_t* dfree; |
243 | do { |
244 | dfree = (mi_block_t*)heap->thread_delayed_free; |
245 | mi_block_set_nextx(heap,block,dfree, heap->cookie); |
246 | } while (!mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&heap->thread_delayed_free), block, dfree)); |
247 | } |
248 | |
249 | // and reset the MI_DELAYED_FREEING flag |
250 | do { |
251 | tfreex = tfree = page->thread_free; |
252 | mi_assert_internal(mi_tf_delayed(tfree) == MI_NEVER_DELAYED_FREE || mi_tf_delayed(tfree) == MI_DELAYED_FREEING); |
253 | if (mi_tf_delayed(tfree) != MI_NEVER_DELAYED_FREE) tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE); |
254 | } while (!mi_atomic_cas_weak(mi_atomic_cast(uintptr_t,&page->thread_free), tfreex, tfree)); |
255 | } |
256 | } |
257 | |
258 | |
259 | // regular free |
260 | static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block) |
261 | { |
262 | #if (MI_DEBUG) |
263 | memset(block, MI_DEBUG_FREED, page->block_size); |
264 | #endif |
265 | |
266 | // and push it on the free list |
267 | if (mi_likely(local)) { |
268 | // owning thread can free a block directly |
269 | if (mi_check_is_double_free(page, block)) return; |
270 | mi_block_set_next(page, block, page->local_free); |
271 | page->local_free = block; |
272 | page->used--; |
273 | if (mi_unlikely(mi_page_all_free(page))) { |
274 | _mi_page_retire(page); |
275 | } |
276 | else if (mi_unlikely(mi_page_is_in_full(page))) { |
277 | _mi_page_unfull(page); |
278 | } |
279 | } |
280 | else { |
281 | _mi_free_block_mt(page,block); |
282 | } |
283 | } |
284 | |
285 | |
286 | // Adjust a block that was allocated aligned, to the actual start of the block in the page. |
287 | mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) { |
288 | mi_assert_internal(page!=NULL && p!=NULL); |
289 | size_t diff = (uint8_t*)p - _mi_page_start(segment, page, NULL); |
290 | size_t adjust = (diff % page->block_size); |
291 | return (mi_block_t*)((uintptr_t)p - adjust); |
292 | } |
293 | |
294 | |
295 | static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool local, void* p) { |
296 | mi_block_t* block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p); |
297 | _mi_free_block(page, local, block); |
298 | } |
299 | |
300 | // Free a block |
301 | void mi_free(void* p) mi_attr_noexcept |
302 | { |
303 | #if (MI_DEBUG>0) |
304 | if (mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0)) { |
305 | _mi_error_message("trying to free an invalid (unaligned) pointer: %p\n" , p); |
306 | return; |
307 | } |
308 | #endif |
309 | |
310 | const mi_segment_t* const segment = _mi_ptr_segment(p); |
311 | if (mi_unlikely(segment == NULL)) return; // checks for (p==NULL) |
312 | |
313 | #if (MI_DEBUG!=0) |
314 | if (mi_unlikely(!mi_is_in_heap_region(p))) { |
315 | _mi_warning_message("possibly trying to free a pointer that does not point to a valid heap region: 0x%p\n" |
316 | "(this may still be a valid very large allocation (over 64MiB))\n" , p); |
317 | if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) { |
318 | _mi_warning_message("(yes, the previous pointer 0x%p was valid after all)\n" , p); |
319 | } |
320 | } |
321 | #endif |
322 | #if (MI_DEBUG!=0 || MI_SECURE>=4) |
323 | if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) { |
324 | _mi_error_message("trying to free a pointer that does not point to a valid heap space: %p\n" , p); |
325 | return; |
326 | } |
327 | #endif |
328 | |
329 | const uintptr_t tid = _mi_thread_id(); |
330 | mi_page_t* const page = _mi_segment_page_of(segment, p); |
331 | |
332 | #if (MI_STAT>1) |
333 | mi_heap_t* heap = mi_heap_get_default(); |
334 | mi_heap_stat_decrease(heap, malloc, mi_usable_size(p)); |
335 | if (page->block_size <= MI_LARGE_OBJ_SIZE_MAX) { |
336 | mi_heap_stat_decrease(heap, normal[_mi_bin(page->block_size)], 1); |
337 | } |
338 | // huge page stat is accounted for in `_mi_page_retire` |
339 | #endif |
340 | |
341 | if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks |
342 | // local, and not full or aligned |
343 | mi_block_t* block = (mi_block_t*)p; |
344 | if (mi_check_is_double_free(page,block)) return; |
345 | mi_block_set_next(page, block, page->local_free); |
346 | page->local_free = block; |
347 | page->used--; |
348 | if (mi_unlikely(mi_page_all_free(page))) { _mi_page_retire(page); } |
349 | } |
350 | else { |
351 | // non-local, aligned blocks, or a full page; use the more generic path |
352 | mi_free_generic(segment, page, tid == segment->thread_id, p); |
353 | } |
354 | } |
355 | |
356 | bool _mi_free_delayed_block(mi_block_t* block) { |
357 | // get segment and page |
358 | const mi_segment_t* segment = _mi_ptr_segment(block); |
359 | mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); |
360 | mi_assert_internal(_mi_thread_id() == segment->thread_id); |
361 | mi_page_t* page = _mi_segment_page_of(segment, block); |
362 | if (mi_tf_delayed(page->thread_free) == MI_DELAYED_FREEING) { |
363 | // we might already start delayed freeing while another thread has not yet |
364 | // reset the delayed_freeing flag; in that case don't free it quite yet if |
365 | // this is the last block remaining. |
366 | if (page->used - page->thread_freed == 1) return false; |
367 | } |
368 | _mi_free_block(page,true,block); |
369 | return true; |
370 | } |
371 | |
372 | // Bytes available in a block |
373 | size_t mi_usable_size(const void* p) mi_attr_noexcept { |
374 | if (p==NULL) return 0; |
375 | const mi_segment_t* segment = _mi_ptr_segment(p); |
376 | const mi_page_t* page = _mi_segment_page_of(segment,p); |
377 | size_t size = page->block_size; |
378 | if (mi_unlikely(mi_page_has_aligned(page))) { |
379 | ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)_mi_page_ptr_unalign(segment,page,p); |
380 | mi_assert_internal(adjust >= 0 && (size_t)adjust <= size); |
381 | return (size - adjust); |
382 | } |
383 | else { |
384 | return size; |
385 | } |
386 | } |
387 | |
388 | |
389 | // ------------------------------------------------------ |
390 | // ensure explicit external inline definitions are emitted! |
391 | // ------------------------------------------------------ |
392 | |
393 | #ifdef __cplusplus |
394 | void* _mi_externs[] = { |
395 | (void*)&_mi_page_malloc, |
396 | (void*)&mi_malloc, |
397 | (void*)&mi_malloc_small, |
398 | (void*)&mi_heap_malloc, |
399 | (void*)&mi_heap_zalloc, |
400 | (void*)&mi_heap_malloc_small |
401 | }; |
402 | #endif |
403 | |
404 | |
405 | // ------------------------------------------------------ |
406 | // Allocation extensions |
407 | // ------------------------------------------------------ |
408 | |
409 | void mi_free_size(void* p, size_t size) mi_attr_noexcept { |
410 | UNUSED_RELEASE(size); |
411 | mi_assert(p == NULL || size <= mi_usable_size(p)); |
412 | mi_free(p); |
413 | } |
414 | |
415 | void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept { |
416 | UNUSED_RELEASE(alignment); |
417 | mi_assert(((uintptr_t)p % alignment) == 0); |
418 | mi_free_size(p,size); |
419 | } |
420 | |
421 | void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept { |
422 | UNUSED_RELEASE(alignment); |
423 | mi_assert(((uintptr_t)p % alignment) == 0); |
424 | mi_free(p); |
425 | } |
426 | |
427 | extern inline mi_decl_allocator void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept { |
428 | size_t total; |
429 | if (mi_mul_overflow(count,size,&total)) return NULL; |
430 | return mi_heap_zalloc(heap,total); |
431 | } |
432 | |
433 | mi_decl_allocator void* mi_calloc(size_t count, size_t size) mi_attr_noexcept { |
434 | return mi_heap_calloc(mi_get_default_heap(),count,size); |
435 | } |
436 | |
437 | // Uninitialized `calloc` |
438 | extern mi_decl_allocator void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept { |
439 | size_t total; |
440 | if (mi_mul_overflow(count,size,&total)) return NULL; |
441 | return mi_heap_malloc(heap, total); |
442 | } |
443 | |
444 | mi_decl_allocator void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept { |
445 | return mi_heap_mallocn(mi_get_default_heap(),count,size); |
446 | } |
447 | |
448 | // Expand in place or fail |
449 | mi_decl_allocator void* mi_expand(void* p, size_t newsize) mi_attr_noexcept { |
450 | if (p == NULL) return NULL; |
451 | size_t size = mi_usable_size(p); |
452 | if (newsize > size) return NULL; |
453 | return p; // it fits |
454 | } |
455 | |
456 | void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) { |
457 | if (p == NULL) return _mi_heap_malloc_zero(heap,newsize,zero); |
458 | size_t size = mi_usable_size(p); |
459 | if (newsize <= size && newsize >= (size / 2)) { |
460 | return p; // reallocation still fits and not more than 50% waste |
461 | } |
462 | void* newp = mi_heap_malloc(heap,newsize); |
463 | if (mi_likely(newp != NULL)) { |
464 | if (zero && newsize > size) { |
465 | // also set last word in the previous allocation to zero to ensure any padding is zero-initialized |
466 | size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); |
467 | memset((uint8_t*)newp + start, 0, newsize - start); |
468 | } |
469 | memcpy(newp, p, (newsize > size ? size : newsize)); |
470 | mi_free(p); // only free if successful |
471 | } |
472 | return newp; |
473 | } |
474 | |
475 | mi_decl_allocator void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { |
476 | return _mi_heap_realloc_zero(heap, p, newsize, false); |
477 | } |
478 | |
479 | mi_decl_allocator void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { |
480 | size_t total; |
481 | if (mi_mul_overflow(count, size, &total)) return NULL; |
482 | return mi_heap_realloc(heap, p, total); |
483 | } |
484 | |
485 | |
486 | // Reallocate but free `p` on errors |
487 | mi_decl_allocator void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { |
488 | void* newp = mi_heap_realloc(heap, p, newsize); |
489 | if (newp==NULL && p!=NULL) mi_free(p); |
490 | return newp; |
491 | } |
492 | |
493 | mi_decl_allocator void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { |
494 | return _mi_heap_realloc_zero(heap, p, newsize, true); |
495 | } |
496 | |
497 | mi_decl_allocator void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { |
498 | size_t total; |
499 | if (mi_mul_overflow(count, size, &total)) return NULL; |
500 | return mi_heap_rezalloc(heap, p, total); |
501 | } |
502 | |
503 | |
504 | mi_decl_allocator void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept { |
505 | return mi_heap_realloc(mi_get_default_heap(),p,newsize); |
506 | } |
507 | |
508 | mi_decl_allocator void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept { |
509 | return mi_heap_reallocn(mi_get_default_heap(),p,count,size); |
510 | } |
511 | |
512 | // Reallocate but free `p` on errors |
513 | mi_decl_allocator void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept { |
514 | return mi_heap_reallocf(mi_get_default_heap(),p,newsize); |
515 | } |
516 | |
517 | mi_decl_allocator void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept { |
518 | return mi_heap_rezalloc(mi_get_default_heap(), p, newsize); |
519 | } |
520 | |
521 | mi_decl_allocator void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept { |
522 | return mi_heap_recalloc(mi_get_default_heap(), p, count, size); |
523 | } |
524 | |
525 | |
526 | |
527 | // ------------------------------------------------------ |
528 | // strdup, strndup, and realpath |
529 | // ------------------------------------------------------ |
530 | |
531 | // `strdup` using mi_malloc |
532 | char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept { |
533 | if (s == NULL) return NULL; |
534 | size_t n = strlen(s); |
535 | char* t = (char*)mi_heap_malloc(heap,n+1); |
536 | if (t != NULL) memcpy(t, s, n + 1); |
537 | return t; |
538 | } |
539 | |
540 | char* mi_strdup(const char* s) mi_attr_noexcept { |
541 | return mi_heap_strdup(mi_get_default_heap(), s); |
542 | } |
543 | |
544 | // `strndup` using mi_malloc |
545 | char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept { |
546 | if (s == NULL) return NULL; |
547 | size_t m = strlen(s); |
548 | if (n > m) n = m; |
549 | char* t = (char*)mi_heap_malloc(heap, n+1); |
550 | if (t == NULL) return NULL; |
551 | memcpy(t, s, n); |
552 | t[n] = 0; |
553 | return t; |
554 | } |
555 | |
556 | char* mi_strndup(const char* s, size_t n) mi_attr_noexcept { |
557 | return mi_heap_strndup(mi_get_default_heap(),s,n); |
558 | } |
559 | |
560 | #ifndef __wasi__ |
561 | // `realpath` using mi_malloc |
562 | #ifdef _WIN32 |
563 | #ifndef PATH_MAX |
564 | #define PATH_MAX MAX_PATH |
565 | #endif |
566 | #include <windows.h> |
567 | #include <errno.h> |
568 | char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept { |
569 | // todo: use GetFullPathNameW to allow longer file names |
570 | char buf[PATH_MAX]; |
571 | DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL); |
572 | if (res == 0) { |
573 | errno = GetLastError(); return NULL; |
574 | } |
575 | else if (res > PATH_MAX) { |
576 | errno = EINVAL; return NULL; |
577 | } |
578 | else if (resolved_name != NULL) { |
579 | return resolved_name; |
580 | } |
581 | else { |
582 | return mi_heap_strndup(heap, buf, PATH_MAX); |
583 | } |
584 | } |
585 | #else |
586 | #include <unistd.h> // pathconf |
587 | static size_t mi_path_max() { |
588 | static size_t path_max = 0; |
589 | if (path_max <= 0) { |
590 | long m = pathconf("/" ,_PC_PATH_MAX); |
591 | if (m <= 0) path_max = 4096; // guess |
592 | else if (m < 256) path_max = 256; // at least 256 |
593 | else path_max = m; |
594 | } |
595 | return path_max; |
596 | } |
597 | |
598 | char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept { |
599 | if (resolved_name != NULL) { |
600 | return realpath(fname,resolved_name); |
601 | } |
602 | else { |
603 | size_t n = mi_path_max(); |
604 | char* buf = (char*)mi_malloc(n+1); |
605 | if (buf==NULL) return NULL; |
606 | char* rname = realpath(fname,buf); |
607 | char* result = mi_heap_strndup(heap,rname,n); // ok if `rname==NULL` |
608 | mi_free(buf); |
609 | return result; |
610 | } |
611 | } |
612 | #endif |
613 | |
614 | char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept { |
615 | return mi_heap_realpath(mi_get_default_heap(),fname,resolved_name); |
616 | } |
617 | #endif |
618 | |
619 | /*------------------------------------------------------- |
620 | C++ new and new_aligned |
621 | The standard requires calling into `get_new_handler` and |
622 | throwing the bad_alloc exception on failure. If we compile |
623 | with a C++ compiler we can implement this precisely. If we |
624 | use a C compiler we cannot throw a `bad_alloc` exception |
625 | but we call `exit` instead (i.e. not returning). |
626 | -------------------------------------------------------*/ |
627 | |
628 | #ifdef __cplusplus |
629 | #include <new> |
630 | static bool mi_try_new_handler(bool nothrow) { |
631 | std::new_handler h = std::get_new_handler(); |
632 | if (h==NULL) { |
633 | if (!nothrow) throw std::bad_alloc(); |
634 | return false; |
635 | } |
636 | else { |
637 | h(); |
638 | return true; |
639 | } |
640 | } |
641 | #else |
642 | #include <errno.h> |
643 | #ifndef ENOMEM |
644 | #define ENOMEM 12 |
645 | #endif |
646 | typedef void (*std_new_handler_t)(); |
647 | |
648 | #if (defined(__GNUC__) || defined(__clang__)) |
649 | std_new_handler_t __attribute((weak)) _ZSt15get_new_handlerv() { |
650 | return NULL; |
651 | } |
652 | std_new_handler_t mi_get_new_handler() { |
653 | return _ZSt15get_new_handlerv(); |
654 | } |
655 | #else |
656 | // note: on windows we could dynamically link to `?get_new_handler@std@@YAP6AXXZXZ`. |
657 | std_new_handler_t mi_get_new_handler() { |
658 | return NULL; |
659 | } |
660 | #endif |
661 | |
662 | static bool mi_try_new_handler(bool nothrow) { |
663 | std_new_handler_t h = mi_get_new_handler(); |
664 | if (h==NULL) { |
665 | if (!nothrow) exit(ENOMEM); |
666 | return false; |
667 | } |
668 | else { |
669 | h(); |
670 | return true; |
671 | } |
672 | } |
673 | #endif |
674 | |
675 | static mi_decl_noinline void* mi_try_new(size_t n, bool nothrow ) { |
676 | void* p = NULL; |
677 | while(p == NULL && mi_try_new_handler(nothrow)) { |
678 | p = mi_malloc(n); |
679 | } |
680 | return p; |
681 | } |
682 | |
683 | void* mi_new(size_t n) { |
684 | void* p = mi_malloc(n); |
685 | if (mi_unlikely(p == NULL)) return mi_try_new(n,false); |
686 | return p; |
687 | } |
688 | |
689 | void* mi_new_aligned(size_t n, size_t alignment) { |
690 | void* p; |
691 | do { p = mi_malloc_aligned(n, alignment); } |
692 | while(p == NULL && mi_try_new_handler(false)); |
693 | return p; |
694 | } |
695 | |
696 | void* mi_new_nothrow(size_t n) { |
697 | void* p = mi_malloc(n); |
698 | if (mi_unlikely(p == NULL)) return mi_try_new(n,true); |
699 | return p; |
700 | } |
701 | |
702 | void* mi_new_aligned_nothrow(size_t n, size_t alignment) { |
703 | void* p; |
704 | do { p = mi_malloc_aligned(n, alignment); } |
705 | while (p == NULL && mi_try_new_handler(true)); |
706 | return p; |
707 | } |
708 | |