1/* ----------------------------------------------------------------------------
2Copyright (c) 2018, Microsoft Research, Daan Leijen
3This is free software; you can redistribute it and/or modify it under the
4terms of the MIT license. A copy of the license can be found in the file
5"LICENSE" at the root of this distribution.
6-----------------------------------------------------------------------------*/
7#include "mimalloc.h"
8#include "mimalloc-internal.h"
9#include "mimalloc-atomic.h"
10
11#include <string.h> // memset
12#include <stdio.h>
13
14#define MI_PAGE_HUGE_ALIGN (256*1024)
15
16/* -----------------------------------------------------------
17 Segment allocation
18 We allocate pages inside big OS allocated "segments"
19 (4mb on 64-bit). This is to avoid splitting VMA's on Linux
20 and reduce fragmentation on other OS's. Each thread
21 owns its own segments.
22
23 Currently we have:
24 - small pages (64kb), 64 in one segment
25 - medium pages (512kb), 8 in one segment
26 - large pages (4mb), 1 in one segment
27 - huge blocks > MI_LARGE_OBJ_SIZE_MAX (512kb) are directly allocated by the OS
28
29 In any case the memory for a segment is virtual and only
30 committed on demand (i.e. we are careful to not touch the memory
31 until we actually allocate a block there)
32
33 If a thread ends, it "abandons" pages with used blocks
34 and there is an abandoned segment list whose segments can
35 be reclaimed by still running threads, much like work-stealing.
36----------------------------------------------------------- */
37
38
39/* -----------------------------------------------------------
40 Queue of segments containing free pages
41----------------------------------------------------------- */
42
43
44#if (MI_DEBUG>1)
45static bool mi_segment_queue_contains(const mi_segment_queue_t* queue, mi_segment_t* segment) {
46 mi_assert_internal(segment != NULL);
47 mi_segment_t* list = queue->first;
48 while (list != NULL) {
49 if (list == segment) break;
50 mi_assert_internal(list->next==NULL || list->next->prev == list);
51 mi_assert_internal(list->prev==NULL || list->prev->next == list);
52 list = list->next;
53 }
54 return (list == segment);
55}
56#endif
57
58static bool mi_segment_queue_is_empty(const mi_segment_queue_t* queue) {
59 return (queue->first == NULL);
60}
61
62static void mi_segment_queue_remove(mi_segment_queue_t* queue, mi_segment_t* segment) {
63 mi_assert_expensive(mi_segment_queue_contains(queue, segment));
64 if (segment->prev != NULL) segment->prev->next = segment->next;
65 if (segment->next != NULL) segment->next->prev = segment->prev;
66 if (segment == queue->first) queue->first = segment->next;
67 if (segment == queue->last) queue->last = segment->prev;
68 segment->next = NULL;
69 segment->prev = NULL;
70}
71
72static void mi_segment_enqueue(mi_segment_queue_t* queue, mi_segment_t* segment) {
73 mi_assert_expensive(!mi_segment_queue_contains(queue, segment));
74 segment->next = NULL;
75 segment->prev = queue->last;
76 if (queue->last != NULL) {
77 mi_assert_internal(queue->last->next == NULL);
78 queue->last->next = segment;
79 queue->last = segment;
80 }
81 else {
82 queue->last = queue->first = segment;
83 }
84}
85
86static mi_segment_queue_t* mi_segment_free_queue_of_kind(mi_page_kind_t kind, mi_segments_tld_t* tld) {
87 if (kind == MI_PAGE_SMALL) return &tld->small_free;
88 else if (kind == MI_PAGE_MEDIUM) return &tld->medium_free;
89 else return NULL;
90}
91
92static mi_segment_queue_t* mi_segment_free_queue(mi_segment_t* segment, mi_segments_tld_t* tld) {
93 return mi_segment_free_queue_of_kind(segment->page_kind, tld);
94}
95
96// remove from free queue if it is in one
97static void mi_segment_remove_from_free_queue(mi_segment_t* segment, mi_segments_tld_t* tld) {
98 mi_segment_queue_t* queue = mi_segment_free_queue(segment, tld); // may be NULL
99 bool in_queue = (queue!=NULL && (segment->next != NULL || segment->prev != NULL || queue->first == segment));
100 if (in_queue) {
101 mi_segment_queue_remove(queue, segment);
102 }
103}
104
105static void mi_segment_insert_in_free_queue(mi_segment_t* segment, mi_segments_tld_t* tld) {
106 mi_segment_enqueue(mi_segment_free_queue(segment, tld), segment);
107}
108
109
110/* -----------------------------------------------------------
111 Invariant checking
112----------------------------------------------------------- */
113
114#if (MI_DEBUG > 1)
115static bool mi_segment_is_in_free_queue(mi_segment_t* segment, mi_segments_tld_t* tld) {
116 mi_segment_queue_t* queue = mi_segment_free_queue(segment, tld);
117 bool in_queue = (queue!=NULL && (segment->next != NULL || segment->prev != NULL || queue->first == segment));
118 if (in_queue) {
119 mi_assert_expensive(mi_segment_queue_contains(queue, segment));
120 }
121 return in_queue;
122}
123
124static size_t mi_segment_pagesize(mi_segment_t* segment) {
125 return ((size_t)1 << segment->page_shift);
126}
127static bool mi_segment_is_valid(mi_segment_t* segment) {
128 mi_assert_internal(segment != NULL);
129 mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
130 mi_assert_internal(segment->used <= segment->capacity);
131 mi_assert_internal(segment->abandoned <= segment->used);
132 size_t nfree = 0;
133 for (size_t i = 0; i < segment->capacity; i++) {
134 if (!segment->pages[i].segment_in_use) nfree++;
135 }
136 mi_assert_internal(nfree + segment->used == segment->capacity);
137 mi_assert_internal(segment->thread_id == _mi_thread_id() || (segment->thread_id==0)); // or 0
138 mi_assert_internal(segment->page_kind == MI_PAGE_HUGE ||
139 (mi_segment_pagesize(segment) * segment->capacity == segment->segment_size));
140 return true;
141}
142#endif
143
144/* -----------------------------------------------------------
145 Segment size calculations
146----------------------------------------------------------- */
147
148// Start of the page available memory; can be used on uninitialized pages (only `segment_idx` must be set)
149uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size)
150{
151 size_t psize = (segment->page_kind == MI_PAGE_HUGE ? segment->segment_size : (size_t)1 << segment->page_shift);
152 uint8_t* p = (uint8_t*)segment + page->segment_idx*psize;
153
154 if (page->segment_idx == 0) {
155 // the first page starts after the segment info (and possible guard page)
156 p += segment->segment_info_size;
157 psize -= segment->segment_info_size;
158 // for small and medium objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore)
159 if (block_size > 0 && segment->page_kind <= MI_PAGE_MEDIUM) {
160 size_t adjust = block_size - ((uintptr_t)p % block_size);
161 if (adjust < block_size) {
162 p += adjust;
163 psize -= adjust;
164 }
165 mi_assert_internal((uintptr_t)p % block_size == 0);
166 }
167 }
168
169 if (MI_SECURE > 1 || (MI_SECURE == 1 && page->segment_idx == segment->capacity - 1)) {
170 // secure == 1: the last page has an os guard page at the end
171 // secure > 1: every page has an os guard page
172 psize -= _mi_os_page_size();
173 }
174
175 if (page_size != NULL) *page_size = psize;
176 mi_assert_internal(_mi_ptr_page(p) == page);
177 mi_assert_internal(_mi_ptr_segment(p) == segment);
178 return p;
179}
180
181static size_t mi_segment_size(size_t capacity, size_t required, size_t* pre_size, size_t* info_size) {
182 /*
183 if (mi_option_is_enabled(mi_option_secure)) {
184 // always reserve maximally so the protection falls on
185 // the same address area, as we need to reuse them from the caches interchangably.
186 capacity = MI_SMALL_PAGES_PER_SEGMENT;
187 }
188 */
189 const size_t minsize = sizeof(mi_segment_t) + ((capacity - 1) * sizeof(mi_page_t)) + 16 /* padding */;
190 size_t guardsize = 0;
191 size_t isize = 0;
192
193 if (MI_SECURE == 0) {
194 // normally no guard pages
195 isize = _mi_align_up(minsize, 16 * MI_MAX_ALIGN_SIZE);
196 }
197 else {
198 // in secure mode, we set up a protected page in between the segment info
199 // and the page data (and one at the end of the segment)
200 const size_t page_size = _mi_os_page_size();
201 isize = _mi_align_up(minsize, page_size);
202 guardsize = page_size;
203 required = _mi_align_up(required, page_size);
204 }
205;
206 if (info_size != NULL) *info_size = isize;
207 if (pre_size != NULL) *pre_size = isize + guardsize;
208 return (required==0 ? MI_SEGMENT_SIZE : _mi_align_up( required + isize + 2*guardsize, MI_PAGE_HUGE_ALIGN) );
209}
210
211
212/* ----------------------------------------------------------------------------
213Segment caches
214We keep a small segment cache per thread to increase local
215reuse and avoid setting/clearing guard pages in secure mode.
216------------------------------------------------------------------------------- */
217
218static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) {
219 if (segment_size>=0) _mi_stat_increase(&tld->stats->segments,1);
220 else _mi_stat_decrease(&tld->stats->segments,1);
221 tld->count += (segment_size >= 0 ? 1 : -1);
222 if (tld->count > tld->peak_count) tld->peak_count = tld->count;
223 tld->current_size += segment_size;
224 if (tld->current_size > tld->peak_size) tld->peak_size = tld->current_size;
225}
226
227
228static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_segments_tld_t* tld) {
229 segment->thread_id = 0;
230 mi_segments_track_size(-((long)segment_size),tld);
231 if (MI_SECURE != 0) {
232 mi_assert_internal(!segment->mem_is_fixed);
233 _mi_mem_unprotect(segment, segment->segment_size); // ensure no more guard pages are set
234 }
235 _mi_mem_free(segment, segment_size, segment->memid, tld->stats);
236}
237
238
239// The thread local segment cache is limited to be at most 1/8 of the peak size of segments in use,
240#define MI_SEGMENT_CACHE_FRACTION (8)
241
242// note: returned segment may be partially reset
243static mi_segment_t* mi_segment_cache_pop(size_t segment_size, mi_segments_tld_t* tld) {
244 if (segment_size != 0 && segment_size != MI_SEGMENT_SIZE) return NULL;
245 mi_segment_t* segment = tld->cache;
246 if (segment == NULL) return NULL;
247 tld->cache_count--;
248 tld->cache = segment->next;
249 segment->next = NULL;
250 mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
251 _mi_stat_decrease(&tld->stats->segments_cache, 1);
252 return segment;
253}
254
255static bool mi_segment_cache_full(mi_segments_tld_t* tld)
256{
257 if (tld->count == 1 && tld->cache_count==0) return false; // always cache at least the final segment of a thread
258 size_t max_cache = mi_option_get(mi_option_segment_cache);
259 if (tld->cache_count < max_cache
260 && tld->cache_count < (1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION)) // at least allow a 1 element cache
261 ) {
262 return false;
263 }
264 // take the opportunity to reduce the segment cache if it is too large (now)
265 // TODO: this never happens as we check against peak usage, should we use current usage instead?
266 while (tld->cache_count > max_cache) { //(1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))) {
267 mi_segment_t* segment = mi_segment_cache_pop(0,tld);
268 mi_assert_internal(segment != NULL);
269 if (segment != NULL) mi_segment_os_free(segment, segment->segment_size, tld);
270 }
271 return true;
272}
273
274static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld) {
275 mi_assert_internal(!mi_segment_is_in_free_queue(segment, tld));
276 mi_assert_internal(segment->next == NULL);
277 if (segment->segment_size != MI_SEGMENT_SIZE || mi_segment_cache_full(tld)) {
278 return false;
279 }
280 mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
281 if (!segment->mem_is_fixed && mi_option_is_enabled(mi_option_cache_reset)) {
282 _mi_mem_reset((uint8_t*)segment + segment->segment_info_size, segment->segment_size - segment->segment_info_size, tld->stats);
283 }
284 segment->next = tld->cache;
285 tld->cache = segment;
286 tld->cache_count++;
287 _mi_stat_increase(&tld->stats->segments_cache,1);
288 return true;
289}
290
291// called by threads that are terminating to free cached segments
292void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
293 mi_segment_t* segment;
294 while ((segment = mi_segment_cache_pop(0,tld)) != NULL) {
295 mi_segment_os_free(segment, segment->segment_size, tld);
296 }
297 mi_assert_internal(tld->cache_count == 0);
298 mi_assert_internal(tld->cache == NULL);
299}
300
301
302/* -----------------------------------------------------------
303 Segment allocation
304----------------------------------------------------------- */
305
306// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
307static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
308{
309 // calculate needed sizes first
310 size_t capacity;
311 if (page_kind == MI_PAGE_HUGE) {
312 mi_assert_internal(page_shift == MI_SEGMENT_SHIFT && required > 0);
313 capacity = 1;
314 }
315 else {
316 mi_assert_internal(required == 0);
317 size_t page_size = (size_t)1 << page_shift;
318 capacity = MI_SEGMENT_SIZE / page_size;
319 mi_assert_internal(MI_SEGMENT_SIZE % page_size == 0);
320 mi_assert_internal(capacity >= 1 && capacity <= MI_SMALL_PAGES_PER_SEGMENT);
321 }
322 size_t info_size;
323 size_t pre_size;
324 size_t segment_size = mi_segment_size(capacity, required, &pre_size, &info_size);
325 mi_assert_internal(segment_size >= required);
326 size_t page_size = (page_kind == MI_PAGE_HUGE ? segment_size : (size_t)1 << page_shift);
327
328 // Try to get it from our thread local cache first
329 bool eager_delay = (tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay));
330 bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit);
331 bool commit = eager || (page_kind > MI_PAGE_MEDIUM);
332 bool protection_still_good = false;
333 bool is_zero = false;
334 mi_segment_t* segment = mi_segment_cache_pop(segment_size, tld);
335 if (segment != NULL) {
336 if (MI_SECURE!=0) {
337 mi_assert_internal(!segment->mem_is_fixed);
338 if (segment->page_kind != page_kind) {
339 _mi_mem_unprotect(segment, segment->segment_size); // reset protection if the page kind differs
340 }
341 else {
342 protection_still_good = true; // otherwise, the guard pages are still in place
343 }
344 }
345 if (!segment->mem_is_committed && page_kind > MI_PAGE_MEDIUM) {
346 mi_assert_internal(!segment->mem_is_fixed);
347 _mi_mem_commit(segment, segment->segment_size, &is_zero, tld->stats);
348 segment->mem_is_committed = true;
349 }
350 if (!segment->mem_is_fixed &&
351 (mi_option_is_enabled(mi_option_cache_reset) || mi_option_is_enabled(mi_option_page_reset))) {
352 bool reset_zero = false;
353 _mi_mem_unreset(segment, segment->segment_size, &reset_zero, tld->stats);
354 if (reset_zero) is_zero = true;
355 }
356 }
357 else {
358 // Allocate the segment from the OS
359 size_t memid;
360 bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
361 segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_zero, &memid, os_tld);
362 if (segment == NULL) return NULL; // failed to allocate
363 if (!commit) {
364 // ensure the initial info is committed
365 bool commit_zero = false;
366 _mi_mem_commit(segment, info_size, &commit_zero, tld->stats);
367 if (commit_zero) is_zero = true;
368 }
369 segment->memid = memid;
370 segment->mem_is_fixed = mem_large;
371 segment->mem_is_committed = commit;
372 mi_segments_track_size((long)segment_size, tld);
373 }
374 mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
375
376 // zero the segment info (but not the `mem` fields)
377 ptrdiff_t ofs = offsetof(mi_segment_t,next);
378 memset((uint8_t*)segment + ofs, 0, info_size - ofs);
379
380 // guard pages
381 if ((MI_SECURE != 0) && !protection_still_good) {
382 // in secure mode, we set up a protected page in between the segment info
383 // and the page data
384 mi_assert_internal( info_size == pre_size - _mi_os_page_size() && info_size % _mi_os_page_size() == 0);
385 _mi_mem_protect( (uint8_t*)segment + info_size, (pre_size - info_size) );
386 size_t os_page_size = _mi_os_page_size();
387 if (MI_SECURE <= 1) {
388 // and protect the last page too
389 _mi_mem_protect( (uint8_t*)segment + segment_size - os_page_size, os_page_size );
390 }
391 else {
392 // protect every page
393 for (size_t i = 0; i < capacity; i++) {
394 _mi_mem_protect( (uint8_t*)segment + (i+1)*page_size - os_page_size, os_page_size );
395 }
396 }
397 }
398
399 // initialize
400 segment->page_kind = page_kind;
401 segment->capacity = capacity;
402 segment->page_shift = page_shift;
403 segment->segment_size = segment_size;
404 segment->segment_info_size = pre_size;
405 segment->thread_id = _mi_thread_id();
406 segment->cookie = _mi_ptr_cookie(segment);
407 for (uint8_t i = 0; i < segment->capacity; i++) {
408 segment->pages[i].segment_idx = i;
409 segment->pages[i].is_reset = false;
410 segment->pages[i].is_committed = commit;
411 segment->pages[i].is_zero_init = is_zero;
412 }
413 _mi_stat_increase(&tld->stats->page_committed, segment->segment_info_size);
414 //fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment);
415 return segment;
416}
417
418
419static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
420 UNUSED(force);
421 //fprintf(stderr,"mimalloc: free segment at %p\n", (void*)segment);
422 mi_assert(segment != NULL);
423 mi_segment_remove_from_free_queue(segment,tld);
424
425 mi_assert_expensive(!mi_segment_queue_contains(&tld->small_free, segment));
426 mi_assert_expensive(!mi_segment_queue_contains(&tld->medium_free, segment));
427 mi_assert(segment->next == NULL);
428 mi_assert(segment->prev == NULL);
429 _mi_stat_decrease(&tld->stats->page_committed, segment->segment_info_size);
430
431 // update reset memory statistics
432 /*
433 for (uint8_t i = 0; i < segment->capacity; i++) {
434 mi_page_t* page = &segment->pages[i];
435 if (page->is_reset) {
436 page->is_reset = false;
437 mi_stat_decrease( tld->stats->reset,mi_page_size(page));
438 }
439 }
440 */
441
442 if (!force && mi_segment_cache_push(segment, tld)) {
443 // it is put in our cache
444 }
445 else {
446 // otherwise return it to the OS
447 mi_segment_os_free(segment, segment->segment_size, tld);
448 }
449}
450
451/* -----------------------------------------------------------
452 Free page management inside a segment
453----------------------------------------------------------- */
454
455
456static bool mi_segment_has_free(const mi_segment_t* segment) {
457 return (segment->used < segment->capacity);
458}
459
460static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_stats_t* stats) {
461 mi_assert_internal(mi_segment_has_free(segment));
462 mi_assert_expensive(mi_segment_is_valid(segment));
463 for (size_t i = 0; i < segment->capacity; i++) {
464 mi_page_t* page = &segment->pages[i];
465 if (!page->segment_in_use) {
466 if (page->is_reset || !page->is_committed) {
467 size_t psize;
468 uint8_t* start = _mi_page_start(segment, page, &psize);
469 if (!page->is_committed) {
470 mi_assert_internal(!segment->mem_is_fixed);
471 page->is_committed = true;
472 bool is_zero = false;
473 _mi_mem_commit(start,psize,&is_zero,stats);
474 if (is_zero) page->is_zero_init = true;
475 }
476 if (page->is_reset) {
477 mi_assert_internal(!segment->mem_is_fixed);
478 page->is_reset = false;
479 bool is_zero = false;
480 _mi_mem_unreset(start, psize, &is_zero, stats);
481 if (is_zero) page->is_zero_init = true;
482 }
483 }
484 return page;
485 }
486 }
487 mi_assert(false);
488 return NULL;
489}
490
491
492/* -----------------------------------------------------------
493 Free
494----------------------------------------------------------- */
495
496static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld);
497
498static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_stats_t* stats) {
499 UNUSED(stats);
500 mi_assert_internal(page->segment_in_use);
501 mi_assert_internal(mi_page_all_free(page));
502 mi_assert_internal(page->is_committed);
503 size_t inuse = page->capacity * page->block_size;
504 _mi_stat_decrease(&stats->page_committed, inuse);
505 _mi_stat_decrease(&stats->pages, 1);
506
507 // reset the page memory to reduce memory pressure?
508 if (!segment->mem_is_fixed && !page->is_reset && mi_option_is_enabled(mi_option_page_reset)) {
509 size_t psize;
510 uint8_t* start = _mi_page_start(segment, page, &psize);
511 page->is_reset = true;
512 _mi_mem_reset(start, psize, stats);
513 }
514
515 // zero the page data, but not the segment fields
516 page->is_zero_init = false;
517 ptrdiff_t ofs = offsetof(mi_page_t,capacity);
518 memset((uint8_t*)page + ofs, 0, sizeof(*page) - ofs);
519 page->segment_in_use = false;
520 segment->used--;
521}
522
523void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
524{
525 mi_assert(page != NULL);
526 mi_segment_t* segment = _mi_page_segment(page);
527 mi_assert_expensive(mi_segment_is_valid(segment));
528
529 // mark it as free now
530 mi_segment_page_clear(segment, page, tld->stats);
531
532 if (segment->used == 0) {
533 // no more used pages; remove from the free list and free the segment
534 mi_segment_free(segment, force, tld);
535 }
536 else {
537 if (segment->used == segment->abandoned) {
538 // only abandoned pages; remove from free list and abandon
539 mi_segment_abandon(segment,tld);
540 }
541 else if (segment->used + 1 == segment->capacity) {
542 mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM); // for now we only support small and medium pages
543 // move back to segments free list
544 mi_segment_insert_in_free_queue(segment,tld);
545 }
546 }
547}
548
549
550/* -----------------------------------------------------------
551 Abandonment
552----------------------------------------------------------- */
553
554// When threads terminate, they can leave segments with
555// live blocks (reached through other threads). Such segments
556// are "abandoned" and will be reclaimed by other threads to
557// reuse their pages and/or free them eventually
558static volatile _Atomic(mi_segment_t*) abandoned; // = NULL;
559static volatile _Atomic(uintptr_t) abandoned_count; // = 0;
560
561static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
562 mi_assert_internal(segment->used == segment->abandoned);
563 mi_assert_internal(segment->used > 0);
564 mi_assert_internal(segment->abandoned_next == NULL);
565 mi_assert_expensive(mi_segment_is_valid(segment));
566
567 // remove the segment from the free page queue if needed
568 mi_segment_remove_from_free_queue(segment,tld);
569 mi_assert_internal(segment->next == NULL && segment->prev == NULL);
570
571 // all pages in the segment are abandoned; add it to the abandoned list
572 _mi_stat_increase(&tld->stats->segments_abandoned, 1);
573 mi_segments_track_size(-((long)segment->segment_size), tld);
574 segment->thread_id = 0;
575 mi_segment_t* next;
576 do {
577 next = (mi_segment_t*)mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*,&abandoned));
578 mi_atomic_write_ptr(mi_atomic_cast(void*,&segment->abandoned_next), next);
579 } while (!mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&abandoned), segment, next));
580 mi_atomic_increment(&abandoned_count);
581}
582
583void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
584 mi_assert(page != NULL);
585 mi_segment_t* segment = _mi_page_segment(page);
586 mi_assert_expensive(mi_segment_is_valid(segment));
587 segment->abandoned++;
588 _mi_stat_increase(&tld->stats->pages_abandoned, 1);
589 mi_assert_internal(segment->abandoned <= segment->used);
590 if (segment->used == segment->abandoned) {
591 // all pages are abandoned, abandon the entire segment
592 mi_segment_abandon(segment,tld);
593 }
594}
595
596bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld) {
597 uintptr_t reclaimed = 0;
598 uintptr_t atmost;
599 if (try_all) {
600 atmost = abandoned_count+16; // close enough
601 }
602 else {
603 atmost = abandoned_count/8; // at most 1/8th of all outstanding (estimated)
604 if (atmost < 8) atmost = 8; // but at least 8
605 }
606
607 // for `atmost` `reclaimed` abandoned segments...
608 while(atmost > reclaimed) {
609 // try to claim the head of the abandoned segments
610 mi_segment_t* segment;
611 do {
612 segment = (mi_segment_t*)abandoned;
613 } while(segment != NULL && !mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&abandoned), (mi_segment_t*)segment->abandoned_next, segment));
614 if (segment==NULL) break; // stop early if no more segments available
615
616 // got it.
617 mi_atomic_decrement(&abandoned_count);
618 segment->thread_id = _mi_thread_id();
619 segment->abandoned_next = NULL;
620 mi_segments_track_size((long)segment->segment_size,tld);
621 mi_assert_internal(segment->next == NULL && segment->prev == NULL);
622 mi_assert_expensive(mi_segment_is_valid(segment));
623 _mi_stat_decrease(&tld->stats->segments_abandoned,1);
624
625 // add its abandoned pages to the current thread
626 mi_assert(segment->abandoned == segment->used);
627 for (size_t i = 0; i < segment->capacity; i++) {
628 mi_page_t* page = &segment->pages[i];
629 if (page->segment_in_use) {
630 segment->abandoned--;
631 mi_assert(page->next == NULL);
632 _mi_stat_decrease(&tld->stats->pages_abandoned, 1);
633 if (mi_page_all_free(page)) {
634 // if everything free by now, free the page
635 mi_segment_page_clear(segment,page,tld->stats);
636 }
637 else {
638 // otherwise reclaim it
639 _mi_page_reclaim(heap,page);
640 }
641 }
642 }
643 mi_assert(segment->abandoned == 0);
644 if (segment->used == 0) { // due to page_clear
645 mi_segment_free(segment,false,tld);
646 }
647 else {
648 reclaimed++;
649 // add its free pages to the the current thread free small segment queue
650 if (segment->page_kind <= MI_PAGE_MEDIUM && mi_segment_has_free(segment)) {
651 mi_segment_insert_in_free_queue(segment,tld);
652 }
653 }
654 }
655 return (reclaimed>0);
656}
657
658
659/* -----------------------------------------------------------
660 Small page allocation
661----------------------------------------------------------- */
662
663// Allocate a small page inside a segment.
664// Requires that the page has free pages
665static mi_page_t* mi_segment_page_alloc_in(mi_segment_t* segment, mi_segments_tld_t* tld) {
666 mi_assert_internal(mi_segment_has_free(segment));
667 mi_page_t* page = mi_segment_find_free(segment, tld->stats);
668 page->segment_in_use = true;
669 segment->used++;
670 mi_assert_internal(segment->used <= segment->capacity);
671 if (segment->used == segment->capacity) {
672 // if no more free pages, remove from the queue
673 mi_assert_internal(!mi_segment_has_free(segment));
674 mi_segment_remove_from_free_queue(segment,tld);
675 }
676 return page;
677}
678
679static mi_page_t* mi_segment_page_alloc(mi_page_kind_t kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
680 mi_segment_queue_t* free_queue = mi_segment_free_queue_of_kind(kind,tld);
681 if (mi_segment_queue_is_empty(free_queue)) {
682 mi_segment_t* segment = mi_segment_alloc(0,kind,page_shift,tld,os_tld);
683 if (segment == NULL) return NULL;
684 mi_segment_enqueue(free_queue, segment);
685 }
686 mi_assert_internal(free_queue->first != NULL);
687 return mi_segment_page_alloc_in(free_queue->first,tld);
688}
689
690static mi_page_t* mi_segment_small_page_alloc(mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
691 return mi_segment_page_alloc(MI_PAGE_SMALL,MI_SMALL_PAGE_SHIFT,tld,os_tld);
692}
693
694static mi_page_t* mi_segment_medium_page_alloc(mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
695 return mi_segment_page_alloc(MI_PAGE_MEDIUM, MI_MEDIUM_PAGE_SHIFT, tld, os_tld);
696}
697
698/* -----------------------------------------------------------
699 large page allocation
700----------------------------------------------------------- */
701
702static mi_page_t* mi_segment_large_page_alloc(mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
703 mi_segment_t* segment = mi_segment_alloc(0,MI_PAGE_LARGE,MI_LARGE_PAGE_SHIFT,tld,os_tld);
704 if (segment == NULL) return NULL;
705 segment->used = 1;
706 mi_page_t* page = &segment->pages[0];
707 page->segment_in_use = true;
708 return page;
709}
710
711static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
712{
713 mi_segment_t* segment = mi_segment_alloc(size, MI_PAGE_HUGE, MI_SEGMENT_SHIFT,tld,os_tld);
714 if (segment == NULL) return NULL;
715 mi_assert_internal(segment->segment_size - segment->segment_info_size >= size);
716 segment->used = 1;
717 segment->thread_id = 0; // huge pages are immediately abandoned
718 mi_page_t* page = &segment->pages[0];
719 page->segment_in_use = true;
720 return page;
721}
722
723/* -----------------------------------------------------------
724 Page allocation and free
725----------------------------------------------------------- */
726
727mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
728 mi_page_t* page;
729 if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
730 page = mi_segment_small_page_alloc(tld,os_tld);
731 }
732 else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
733 page = mi_segment_medium_page_alloc(tld, os_tld);
734 }
735 else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) {
736 page = mi_segment_large_page_alloc(tld, os_tld);
737 }
738 else {
739 page = mi_segment_huge_page_alloc(block_size,tld,os_tld);
740 }
741 mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page)));
742 return page;
743}
744