1/*----------------------------------------------------------------------------
2Copyright (c) 2018, Microsoft Research, Daan Leijen
3This is free software; you can redistribute it and/or modify it under the
4terms of the MIT license. A copy of the license can be found in the file
5"LICENSE" at the root of this distribution.
6-----------------------------------------------------------------------------*/
7
8/* -----------------------------------------------------------
9 The core of the allocator. Every segment contains
10 pages of a certain block size. The main function
11 exported is `mi_malloc_generic`.
12----------------------------------------------------------- */
13
14#include "mimalloc.h"
15#include "mimalloc-internal.h"
16#include "mimalloc-atomic.h"
17
18/* -----------------------------------------------------------
19 Definition of page queues for each block size
20----------------------------------------------------------- */
21
22#define MI_IN_PAGE_C
23#include "page-queue.c"
24#undef MI_IN_PAGE_C
25
26
27/* -----------------------------------------------------------
28 Page helpers
29----------------------------------------------------------- */
30
31// Index a block in a page
32static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_start, size_t i) {
33 mi_assert_internal(page != NULL);
34 mi_assert_internal(i <= page->reserved);
35 return (mi_block_t*)((uint8_t*)page_start + (i * page->block_size));
36}
37
38static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_stats_t* stats);
39
40
41#if (MI_DEBUG>1)
42static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) {
43 size_t count = 0;
44 while (head != NULL) {
45 mi_assert_internal(page == _mi_ptr_page(head));
46 count++;
47 head = mi_block_next(page, head);
48 }
49 return count;
50}
51
52/*
53// Start of the page available memory
54static inline uint8_t* mi_page_area(const mi_page_t* page) {
55 return _mi_page_start(_mi_page_segment(page), page, NULL);
56}
57*/
58
59static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
60 size_t psize;
61 uint8_t* page_area = _mi_page_start(_mi_page_segment(page), page, &psize);
62 mi_block_t* start = (mi_block_t*)page_area;
63 mi_block_t* end = (mi_block_t*)(page_area + psize);
64 while(p != NULL) {
65 if (p < start || p >= end) return false;
66 p = mi_block_next(page, p);
67 }
68 return true;
69}
70
71static bool mi_page_is_valid_init(mi_page_t* page) {
72 mi_assert_internal(page->block_size > 0);
73 mi_assert_internal(page->used <= page->capacity);
74 mi_assert_internal(page->capacity <= page->reserved);
75
76 mi_segment_t* segment = _mi_page_segment(page);
77 uint8_t* start = _mi_page_start(segment,page,NULL);
78 mi_assert_internal(start == _mi_segment_page_start(segment,page,page->block_size,NULL));
79 //mi_assert_internal(start + page->capacity*page->block_size == page->top);
80
81 mi_assert_internal(mi_page_list_is_valid(page,page->free));
82 mi_assert_internal(mi_page_list_is_valid(page,page->local_free));
83
84 #if MI_DEBUG>3 // generally too expensive to check this
85 if (page->flags.is_zero) {
86 for(mi_block_t* block = page->free; block != NULL; mi_block_next(page,block)) {
87 mi_assert_expensive(mi_mem_is_zero(block + 1, page->block_size - sizeof(mi_block_t)));
88 }
89 }
90 #endif
91
92 mi_block_t* tfree = mi_tf_block(page->thread_free);
93 mi_assert_internal(mi_page_list_is_valid(page, tfree));
94 size_t tfree_count = mi_page_list_count(page, tfree);
95 mi_assert_internal(tfree_count <= page->thread_freed + 1);
96
97 size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free);
98 mi_assert_internal(page->used + free_count == page->capacity);
99
100 return true;
101}
102
103bool _mi_page_is_valid(mi_page_t* page) {
104 mi_assert_internal(mi_page_is_valid_init(page));
105 #if MI_SECURE
106 mi_assert_internal(page->cookie != 0);
107 #endif
108 if (page->heap!=NULL) {
109 mi_segment_t* segment = _mi_page_segment(page);
110 mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == page->heap->thread_id || segment->thread_id==0);
111 if (segment->page_kind != MI_PAGE_HUGE) {
112 mi_page_queue_t* pq = mi_page_queue_of(page);
113 mi_assert_internal(mi_page_queue_contains(pq, page));
114 mi_assert_internal(pq->block_size==page->block_size || page->block_size > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page));
115 mi_assert_internal(mi_heap_contains_queue(page->heap,pq));
116 }
117 }
118 return true;
119}
120#endif
121
122
123void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay ) {
124 mi_thread_free_t tfree;
125 mi_thread_free_t tfreex;
126
127 do {
128 tfreex = tfree = page->thread_free;
129 if (mi_unlikely(mi_tf_delayed(tfree) < MI_DELAYED_FREEING)) {
130 tfreex = mi_tf_set_delayed(tfree,delay);
131 }
132 else if (mi_unlikely(mi_tf_delayed(tfree) == MI_DELAYED_FREEING)) {
133 mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done.
134 continue; // and try again
135 }
136 }
137 while((mi_tf_delayed(tfreex) != mi_tf_delayed(tfree)) && // avoid atomic operation if already equal
138 !mi_atomic_cas_weak(mi_atomic_cast(uintptr_t,&page->thread_free), tfreex, tfree));
139}
140
141
142/* -----------------------------------------------------------
143 Page collect the `local_free` and `thread_free` lists
144----------------------------------------------------------- */
145
146// Collect the local `thread_free` list using an atomic exchange.
147// Note: The exchange must be done atomically as this is used right after
148// moving to the full list in `mi_page_collect_ex` and we need to
149// ensure that there was no race where the page became unfull just before the move.
150static void _mi_page_thread_free_collect(mi_page_t* page)
151{
152 mi_block_t* head;
153 mi_thread_free_t tfree;
154 mi_thread_free_t tfreex;
155 do {
156 tfree = page->thread_free;
157 head = mi_tf_block(tfree);
158 tfreex = mi_tf_set_block(tfree,NULL);
159 } while (!mi_atomic_cas_weak(mi_atomic_cast(uintptr_t,&page->thread_free), tfreex, tfree));
160
161 // return if the list is empty
162 if (head == NULL) return;
163
164 // find the tail -- also to get a proper count (without data races)
165 uintptr_t max_count = page->capacity; // cannot collect more than capacity
166 uintptr_t count = 1;
167 mi_block_t* tail = head;
168 mi_block_t* next;
169 while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) {
170 count++;
171 tail = next;
172 }
173 // if `count > max_count` there was a memory corruption (possibly infinite list due to double multi-threaded free)
174 if (count > max_count) {
175 _mi_fatal_error("corrupted thread-free list\n");
176 return; // the thread-free items cannot be freed
177 }
178
179 // and append the current local free list
180 mi_block_set_next(page,tail, page->local_free);
181 page->local_free = head;
182
183 // update counts now
184 mi_atomic_subu(&page->thread_freed, count);
185 page->used -= count;
186}
187
188void _mi_page_free_collect(mi_page_t* page, bool force) {
189 mi_assert_internal(page!=NULL);
190
191 // collect the thread free list
192 if (force || mi_tf_block(page->thread_free) != NULL) { // quick test to avoid an atomic operation
193 _mi_page_thread_free_collect(page);
194 }
195
196 // and the local free list
197 if (page->local_free != NULL) {
198 if (mi_likely(page->free == NULL)) {
199 // usual case
200 page->free = page->local_free;
201 page->local_free = NULL;
202 page->is_zero = false;
203 }
204 else if (force) {
205 // append -- only on shutdown (force) as this is a linear operation
206 mi_block_t* tail = page->local_free;
207 mi_block_t* next;
208 while ((next = mi_block_next(page, tail)) != NULL) {
209 tail = next;
210 }
211 mi_block_set_next(page, tail, page->free);
212 page->free = page->local_free;
213 page->local_free = NULL;
214 page->is_zero = false;
215 }
216 }
217
218 mi_assert_internal(!force || page->local_free == NULL);
219}
220
221
222
223/* -----------------------------------------------------------
224 Page fresh and retire
225----------------------------------------------------------- */
226
227// called from segments when reclaiming abandoned pages
228void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
229 mi_assert_expensive(mi_page_is_valid_init(page));
230 mi_assert_internal(page->heap == NULL);
231 mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
232 _mi_page_free_collect(page,false);
233 mi_page_queue_t* pq = mi_page_queue(heap, page->block_size);
234 mi_page_queue_push(heap, pq, page);
235 mi_assert_expensive(_mi_page_is_valid(page));
236}
237
238// allocate a fresh page from a segment
239static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size) {
240 mi_assert_internal(pq==NULL||mi_heap_contains_queue(heap, pq));
241 mi_page_t* page = _mi_segment_page_alloc(block_size, &heap->tld->segments, &heap->tld->os);
242 if (page == NULL) return NULL;
243 mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
244 mi_page_init(heap, page, block_size, &heap->tld->stats);
245 _mi_stat_increase( &heap->tld->stats.pages, 1);
246 if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL
247 mi_assert_expensive(_mi_page_is_valid(page));
248 return page;
249}
250
251// Get a fresh page to use
252static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) {
253 mi_assert_internal(mi_heap_contains_queue(heap, pq));
254
255 // try to reclaim an abandoned page first
256 mi_page_t* page = pq->first;
257 if (!heap->no_reclaim &&
258 _mi_segment_try_reclaim_abandoned(heap, false, &heap->tld->segments) &&
259 page != pq->first)
260 {
261 // we reclaimed, and we got lucky with a reclaimed page in our queue
262 page = pq->first;
263 if (page->free != NULL) return page;
264 }
265 // otherwise allocate the page
266 page = mi_page_fresh_alloc(heap, pq, pq->block_size);
267 if (page==NULL) return NULL;
268 mi_assert_internal(pq->block_size==page->block_size);
269 mi_assert_internal(pq==mi_page_queue(heap,page->block_size));
270 return page;
271}
272
273/* -----------------------------------------------------------
274 Do any delayed frees
275 (put there by other threads if they deallocated in a full page)
276----------------------------------------------------------- */
277void _mi_heap_delayed_free(mi_heap_t* heap) {
278 // take over the list
279 mi_block_t* block;
280 do {
281 block = (mi_block_t*)heap->thread_delayed_free;
282 } while (block != NULL && !mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&heap->thread_delayed_free), NULL, block));
283
284 // and free them all
285 while(block != NULL) {
286 mi_block_t* next = mi_block_nextx(heap,block, heap->cookie);
287 // use internal free instead of regular one to keep stats etc correct
288 if (!_mi_free_delayed_block(block)) {
289 // we might already start delayed freeing while another thread has not yet
290 // reset the delayed_freeing flag; in that case delay it further by reinserting.
291 mi_block_t* dfree;
292 do {
293 dfree = (mi_block_t*)heap->thread_delayed_free;
294 mi_block_set_nextx(heap, block, dfree, heap->cookie);
295 } while (!mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&heap->thread_delayed_free), block, dfree));
296
297 }
298 block = next;
299 }
300}
301
302/* -----------------------------------------------------------
303 Unfull, abandon, free and retire
304----------------------------------------------------------- */
305
306// Move a page from the full list back to a regular list
307void _mi_page_unfull(mi_page_t* page) {
308 mi_assert_internal(page != NULL);
309 mi_assert_expensive(_mi_page_is_valid(page));
310 mi_assert_internal(mi_page_is_in_full(page));
311
312 _mi_page_use_delayed_free(page, MI_NO_DELAYED_FREE);
313 if (!mi_page_is_in_full(page)) return;
314
315 mi_heap_t* heap = page->heap;
316 mi_page_queue_t* pqfull = &heap->pages[MI_BIN_FULL];
317 mi_page_set_in_full(page, false); // to get the right queue
318 mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
319 mi_page_set_in_full(page, true);
320 mi_page_queue_enqueue_from(pq, pqfull, page);
321}
322
323static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
324 mi_assert_internal(pq == mi_page_queue_of(page));
325 mi_assert_internal(!mi_page_immediate_available(page));
326 mi_assert_internal(!mi_page_is_in_full(page));
327
328 _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE);
329 if (mi_page_is_in_full(page)) return;
330
331 mi_page_queue_enqueue_from(&page->heap->pages[MI_BIN_FULL], pq, page);
332 _mi_page_free_collect(page,false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set
333}
334
335
336// Abandon a page with used blocks at the end of a thread.
337// Note: only call if it is ensured that no references exist from
338// the `page->heap->thread_delayed_free` into this page.
339// Currently only called through `mi_heap_collect_ex` which ensures this.
340void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
341 mi_assert_internal(page != NULL);
342 mi_assert_expensive(_mi_page_is_valid(page));
343 mi_assert_internal(pq == mi_page_queue_of(page));
344 mi_assert_internal(page->heap != NULL);
345
346#if MI_DEBUG > 1
347 mi_heap_t* pheap = (mi_heap_t*)mi_atomic_read_ptr(mi_atomic_cast(void*, &page->heap));
348#endif
349
350 // remove from our page list
351 mi_segments_tld_t* segments_tld = &page->heap->tld->segments;
352 mi_page_queue_remove(pq, page);
353
354 // page is no longer associated with our heap
355 mi_atomic_write_ptr(mi_atomic_cast(void*, &page->heap), NULL);
356
357#if MI_DEBUG>1
358 // check there are no references left..
359 for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->cookie)) {
360 mi_assert_internal(_mi_ptr_page(block) != page);
361 }
362#endif
363
364 // and abandon it
365 mi_assert_internal(page->heap == NULL);
366 _mi_segment_page_abandon(page,segments_tld);
367}
368
369
370// Free a page with no more free blocks
371void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
372 mi_assert_internal(page != NULL);
373 mi_assert_expensive(_mi_page_is_valid(page));
374 mi_assert_internal(pq == mi_page_queue_of(page));
375 mi_assert_internal(mi_page_all_free(page));
376 #if MI_DEBUG>1
377 // check if we can safely free
378 mi_thread_free_t free = mi_tf_set_delayed(page->thread_free,MI_NEVER_DELAYED_FREE);
379 free = mi_atomic_exchange(&page->thread_free, free);
380 mi_assert_internal(mi_tf_delayed(free) != MI_DELAYED_FREEING);
381 #endif
382
383 mi_page_set_has_aligned(page, false);
384
385 // account for huge pages here
386 // (note: no longer necessary as huge pages are always abandoned)
387 if (page->block_size > MI_LARGE_OBJ_SIZE_MAX) {
388 if (page->block_size > MI_HUGE_OBJ_SIZE_MAX) {
389 _mi_stat_decrease(&page->heap->tld->stats.giant, page->block_size);
390 }
391 else {
392 _mi_stat_decrease(&page->heap->tld->stats.huge, page->block_size);
393 }
394 }
395
396 // remove from the page list
397 // (no need to do _mi_heap_delayed_free first as all blocks are already free)
398 mi_segments_tld_t* segments_tld = &page->heap->tld->segments;
399 mi_page_queue_remove(pq, page);
400
401 // and free it
402 mi_assert_internal(page->heap == NULL);
403 _mi_segment_page_free(page, force, segments_tld);
404}
405
406// Retire a page with no more used blocks
407// Important to not retire too quickly though as new
408// allocations might coming.
409// Note: called from `mi_free` and benchmarks often
410// trigger this due to freeing everything and then
411// allocating again so careful when changing this.
412void _mi_page_retire(mi_page_t* page) {
413 mi_assert_internal(page != NULL);
414 mi_assert_expensive(_mi_page_is_valid(page));
415 mi_assert_internal(mi_page_all_free(page));
416
417 mi_page_set_has_aligned(page, false);
418
419 // don't retire too often..
420 // (or we end up retiring and re-allocating most of the time)
421 // NOTE: refine this more: we should not retire if this
422 // is the only page left with free blocks. It is not clear
423 // how to check this efficiently though...
424 // for now, we don't retire if it is the only page left of this size class.
425 mi_page_queue_t* pq = mi_page_queue_of(page);
426 if (mi_likely(page->block_size <= (MI_SMALL_SIZE_MAX/4))) {
427 // if (mi_page_mostly_used(page->prev) && mi_page_mostly_used(page->next)) {
428 if (pq->last==page && pq->first==page) {
429 mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
430 return; // dont't retire after all
431 }
432 }
433
434 _mi_page_free(page, pq, false);
435}
436
437
438/* -----------------------------------------------------------
439 Initialize the initial free list in a page.
440 In secure mode we initialize a randomized list by
441 alternating between slices.
442----------------------------------------------------------- */
443
444#define MI_MAX_SLICE_SHIFT (6) // at most 64 slices
445#define MI_MAX_SLICES (1UL << MI_MAX_SLICE_SHIFT)
446#define MI_MIN_SLICES (2)
447
448static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* const page, const size_t extend, mi_stats_t* const stats) {
449 UNUSED(stats);
450 #if (MI_SECURE<=2)
451 mi_assert_internal(page->free == NULL);
452 mi_assert_internal(page->local_free == NULL);
453 #endif
454 mi_assert_internal(page->capacity + extend <= page->reserved);
455 void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL);
456 const size_t bsize = page->block_size;
457
458 // initialize a randomized free list
459 // set up `slice_count` slices to alternate between
460 size_t shift = MI_MAX_SLICE_SHIFT;
461 while ((extend >> shift) == 0) {
462 shift--;
463 }
464 const size_t slice_count = (size_t)1U << shift;
465 const size_t slice_extend = extend / slice_count;
466 mi_assert_internal(slice_extend >= 1);
467 mi_block_t* blocks[MI_MAX_SLICES]; // current start of the slice
468 size_t counts[MI_MAX_SLICES]; // available objects in the slice
469 for (size_t i = 0; i < slice_count; i++) {
470 blocks[i] = mi_page_block_at(page, page_area, page->capacity + i*slice_extend);
471 counts[i] = slice_extend;
472 }
473 counts[slice_count-1] += (extend % slice_count); // final slice holds the modulus too (todo: distribute evenly?)
474
475 // and initialize the free list by randomly threading through them
476 // set up first element
477 size_t current = _mi_heap_random(heap) % slice_count;
478 counts[current]--;
479 mi_block_t* const free_start = blocks[current];
480 // and iterate through the rest
481 uintptr_t rnd = heap->random;
482 for (size_t i = 1; i < extend; i++) {
483 // call random_shuffle only every INTPTR_SIZE rounds
484 const size_t round = i%MI_INTPTR_SIZE;
485 if (round == 0) rnd = _mi_random_shuffle(rnd);
486 // select a random next slice index
487 size_t next = ((rnd >> 8*round) & (slice_count-1));
488 while (counts[next]==0) { // ensure it still has space
489 next++;
490 if (next==slice_count) next = 0;
491 }
492 // and link the current block to it
493 counts[next]--;
494 mi_block_t* const block = blocks[current];
495 blocks[current] = (mi_block_t*)((uint8_t*)block + bsize); // bump to the following block
496 mi_block_set_next(page, block, blocks[next]); // and set next; note: we may have `current == next`
497 current = next;
498 }
499 // prepend to the free list (usually NULL)
500 mi_block_set_next(page, blocks[current], page->free); // end of the list
501 page->free = free_start;
502 heap->random = _mi_random_shuffle(rnd);
503}
504
505static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t extend, mi_stats_t* const stats)
506{
507 UNUSED(stats);
508 #if (MI_SECURE <= 2)
509 mi_assert_internal(page->free == NULL);
510 mi_assert_internal(page->local_free == NULL);
511 #endif
512 mi_assert_internal(page->capacity + extend <= page->reserved);
513 void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL );
514 const size_t bsize = page->block_size;
515 mi_block_t* const start = mi_page_block_at(page, page_area, page->capacity);
516
517 // initialize a sequential free list
518 mi_block_t* const last = mi_page_block_at(page, page_area, page->capacity + extend - 1);
519 mi_block_t* block = start;
520 while(block <= last) {
521 mi_block_t* next = (mi_block_t*)((uint8_t*)block + bsize);
522 mi_block_set_next(page,block,next);
523 block = next;
524 }
525 // prepend to free list (usually `NULL`)
526 mi_block_set_next(page, last, page->free);
527 page->free = start;
528}
529
530/* -----------------------------------------------------------
531 Page initialize and extend the capacity
532----------------------------------------------------------- */
533
534#define MI_MAX_EXTEND_SIZE (4*1024) // heuristic, one OS page seems to work well.
535#if (MI_SECURE>0)
536#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many
537#else
538#define MI_MIN_EXTEND (1)
539#endif
540
541// Extend the capacity (up to reserved) by initializing a free list
542// We do at most `MI_MAX_EXTEND` to avoid touching too much memory
543// Note: we also experimented with "bump" allocation on the first
544// allocations but this did not speed up any benchmark (due to an
545// extra test in malloc? or cache effects?)
546static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_stats_t* stats) {
547 UNUSED(stats);
548 mi_assert_expensive(mi_page_is_valid_init(page));
549 #if (MI_SECURE<=2)
550 mi_assert(page->free == NULL);
551 mi_assert(page->local_free == NULL);
552 if (page->free != NULL) return;
553 #endif
554 if (page->capacity >= page->reserved) return;
555
556 size_t page_size;
557 _mi_page_start(_mi_page_segment(page), page, &page_size);
558 mi_stat_counter_increase(stats->pages_extended, 1);
559
560 // calculate the extend count
561 size_t extend = page->reserved - page->capacity;
562 size_t max_extend = (page->block_size >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/(uint32_t)page->block_size);
563 if (max_extend < MI_MIN_EXTEND) max_extend = MI_MIN_EXTEND;
564
565 if (extend > max_extend) {
566 // ensure we don't touch memory beyond the page to reduce page commit.
567 // the `lean` benchmark tests this. Going from 1 to 8 increases rss by 50%.
568 extend = (max_extend==0 ? 1 : max_extend);
569 }
570
571 mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved);
572 mi_assert_internal(extend < (1UL<<16));
573
574 // and append the extend the free list
575 if (extend < MI_MIN_SLICES || MI_SECURE==0) { //!mi_option_is_enabled(mi_option_secure)) {
576 mi_page_free_list_extend(page, extend, stats );
577 }
578 else {
579 mi_page_free_list_extend_secure(heap, page, extend, stats);
580 }
581 // enable the new free list
582 page->capacity += (uint16_t)extend;
583 mi_stat_increase(stats->page_committed, extend * page->block_size);
584
585 // extension into zero initialized memory preserves the zero'd free list
586 if (!page->is_zero_init) {
587 page->is_zero = false;
588 }
589 mi_assert_expensive(mi_page_is_valid_init(page));
590}
591
592// Initialize a fresh page
593static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi_stats_t* stats) {
594 mi_assert(page != NULL);
595 mi_segment_t* segment = _mi_page_segment(page);
596 mi_assert(segment != NULL);
597 mi_assert_internal(block_size > 0);
598 // set fields
599 size_t page_size;
600 _mi_segment_page_start(segment, page, block_size, &page_size);
601 page->block_size = block_size;
602 mi_assert_internal(page_size / block_size < (1L<<16));
603 page->reserved = (uint16_t)(page_size / block_size);
604 #ifdef MI_ENCODE_FREELIST
605 page->cookie = _mi_heap_random(heap) | 1;
606 #endif
607 page->is_zero = page->is_zero_init;
608
609 mi_assert_internal(page->capacity == 0);
610 mi_assert_internal(page->free == NULL);
611 mi_assert_internal(page->used == 0);
612 mi_assert_internal(page->thread_free == 0);
613 mi_assert_internal(page->thread_freed == 0);
614 mi_assert_internal(page->next == NULL);
615 mi_assert_internal(page->prev == NULL);
616 mi_assert_internal(!mi_page_has_aligned(page));
617 #if (MI_ENCODE_FREELIST)
618 mi_assert_internal(page->cookie != 0);
619 #endif
620 mi_assert_expensive(mi_page_is_valid_init(page));
621
622 // initialize an initial free list
623 mi_page_extend_free(heap,page,stats);
624 mi_assert(mi_page_immediate_available(page));
625}
626
627
628/* -----------------------------------------------------------
629 Find pages with free blocks
630-------------------------------------------------------------*/
631
632// Find a page with free blocks of `page->block_size`.
633static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq)
634{
635 // search through the pages in "next fit" order
636 mi_page_t* rpage = NULL;
637 size_t count = 0;
638 size_t page_free_count = 0;
639 mi_page_t* page = pq->first;
640 while( page != NULL)
641 {
642 mi_page_t* next = page->next; // remember next
643 count++;
644
645 // 0. collect freed blocks by us and other threads
646 _mi_page_free_collect(page,false);
647
648 // 1. if the page contains free blocks, we are done
649 if (mi_page_immediate_available(page)) {
650 // If all blocks are free, we might retire this page instead.
651 // do this at most 8 times to bound allocation time.
652 // (note: this can happen if a page was earlier not retired due
653 // to having neighbours that were mostly full or due to concurrent frees)
654 if (page_free_count < 8 && mi_page_all_free(page)) {
655 page_free_count++;
656 if (rpage != NULL) _mi_page_free(rpage,pq,false);
657 rpage = page;
658 page = next;
659 continue; // and keep looking
660 }
661 else {
662 break; // pick this one
663 }
664 }
665
666 // 2. Try to extend
667 if (page->capacity < page->reserved) {
668 mi_page_extend_free(heap, page, &heap->tld->stats);
669 mi_assert_internal(mi_page_immediate_available(page));
670 break;
671 }
672
673 // 3. If the page is completely full, move it to the `mi_pages_full`
674 // queue so we don't visit long-lived pages too often.
675 mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
676 mi_page_to_full(page,pq);
677
678 page = next;
679 } // for each page
680
681 mi_stat_counter_increase(heap->tld->stats.searches,count);
682
683 if (page == NULL) {
684 page = rpage;
685 rpage = NULL;
686 }
687 if (rpage != NULL) {
688 _mi_page_free(rpage,pq,false);
689 }
690
691 if (page == NULL) {
692 page = mi_page_fresh(heap, pq);
693 }
694 else {
695 mi_assert(pq->first == page);
696 }
697 mi_assert_internal(page == NULL || mi_page_immediate_available(page));
698 return page;
699}
700
701
702// Find a page with free blocks of `size`.
703static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
704 mi_page_queue_t* pq = mi_page_queue(heap,size);
705 mi_page_t* page = pq->first;
706 if (page != NULL) {
707 if ((MI_SECURE >= 3) && page->capacity < page->reserved && ((_mi_heap_random(heap) & 1) == 1)) {
708 // in secure mode, we extend half the time to increase randomness
709 mi_page_extend_free(heap, page, &heap->tld->stats);
710 mi_assert_internal(mi_page_immediate_available(page));
711 }
712 else {
713 _mi_page_free_collect(page,false);
714 }
715 if (mi_page_immediate_available(page)) {
716 return page; // fast path
717 }
718 }
719 return mi_page_queue_find_free_ex(heap, pq);
720}
721
722
723/* -----------------------------------------------------------
724 Users can register a deferred free function called
725 when the `free` list is empty. Since the `local_free`
726 is separate this is deterministically called after
727 a certain number of allocations.
728----------------------------------------------------------- */
729
730static mi_deferred_free_fun* volatile deferred_free = NULL;
731
732void _mi_deferred_free(mi_heap_t* heap, bool force) {
733 heap->tld->heartbeat++;
734 if (deferred_free != NULL && !heap->tld->recurse) {
735 heap->tld->recurse = true;
736 deferred_free(force, heap->tld->heartbeat);
737 heap->tld->recurse = false;
738 }
739}
740
741void mi_register_deferred_free(mi_deferred_free_fun* fn) mi_attr_noexcept {
742 deferred_free = fn;
743}
744
745
746/* -----------------------------------------------------------
747 General allocation
748----------------------------------------------------------- */
749
750// A huge page is allocated directly without being in a queue.
751// Because huge pages contain just one block, and the segment contains
752// just that page, we always treat them as abandoned and any thread
753// that frees the block can free the whole page and segment directly.
754static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
755 size_t block_size = _mi_os_good_alloc_size(size);
756 mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE);
757 mi_page_t* page = mi_page_fresh_alloc(heap,NULL,block_size);
758 if (page != NULL) {
759 mi_assert_internal(mi_page_immediate_available(page));
760 mi_assert_internal(page->block_size == block_size);
761 mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE);
762 mi_assert_internal(_mi_page_segment(page)->used==1);
763 mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
764 mi_atomic_write_ptr(mi_atomic_cast(void*, &page->heap), NULL);
765
766 if (page->block_size > MI_HUGE_OBJ_SIZE_MAX) {
767 _mi_stat_increase(&heap->tld->stats.giant, block_size);
768 _mi_stat_counter_increase(&heap->tld->stats.giant_count, 1);
769 }
770 else {
771 _mi_stat_increase(&heap->tld->stats.huge, block_size);
772 _mi_stat_counter_increase(&heap->tld->stats.huge_count, 1);
773 }
774 }
775 return page;
776}
777
778
779// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
780void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
781{
782 mi_assert_internal(heap != NULL);
783
784 // initialize if necessary
785 if (mi_unlikely(!mi_heap_is_initialized(heap))) {
786 mi_thread_init(); // calls `_mi_heap_init` in turn
787 heap = mi_get_default_heap();
788 }
789 mi_assert_internal(mi_heap_is_initialized(heap));
790
791 // call potential deferred free routines
792 _mi_deferred_free(heap, false);
793
794 // free delayed frees from other threads
795 _mi_heap_delayed_free(heap);
796
797 // huge allocation?
798 mi_page_t* page;
799 if (mi_unlikely(size > MI_LARGE_OBJ_SIZE_MAX)) {
800 if (mi_unlikely(size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
801 page = NULL;
802 }
803 else {
804 page = mi_huge_page_alloc(heap,size);
805 }
806 }
807 else {
808 // otherwise find a page with free blocks in our size segregated queues
809 page = mi_find_free_page(heap,size);
810 }
811 if (page == NULL) return NULL; // out of memory
812
813 mi_assert_internal(mi_page_immediate_available(page));
814 mi_assert_internal(page->block_size >= size);
815
816 // and try again, this time succeeding! (i.e. this should never recurse)
817 return _mi_page_malloc(heap, page, size);
818}
819