1#define JEMALLOC_EXTENT_C_
2#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
5#include "jemalloc/internal/assert.h"
6#include "jemalloc/internal/extent_dss.h"
7#include "jemalloc/internal/extent_mmap.h"
8#include "jemalloc/internal/ph.h"
9#include "jemalloc/internal/rtree.h"
10#include "jemalloc/internal/mutex.h"
11#include "jemalloc/internal/mutex_pool.h"
12
13/******************************************************************************/
14/* Data. */
15
16rtree_t extents_rtree;
17/* Keyed by the address of the extent_t being protected. */
18mutex_pool_t extent_mutex_pool;
19
20size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
21
22static const bitmap_info_t extents_bitmap_info =
23 BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
24
25static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
26 size_t size, size_t alignment, bool *zero, bool *commit,
27 unsigned arena_ind);
28static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
29 size_t size, bool committed, unsigned arena_ind);
30static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
31 size_t size, bool committed, unsigned arena_ind);
32static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
33 size_t size, size_t offset, size_t length, unsigned arena_ind);
34static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
35 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
36 size_t length, bool growing_retained);
37static bool extent_decommit_default(extent_hooks_t *extent_hooks,
38 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
39#ifdef PAGES_CAN_PURGE_LAZY
40static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
41 size_t size, size_t offset, size_t length, unsigned arena_ind);
42#endif
43static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
44 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
45 size_t length, bool growing_retained);
46#ifdef PAGES_CAN_PURGE_FORCED
47static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
48 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
49#endif
50static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
51 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
52 size_t length, bool growing_retained);
53#ifdef JEMALLOC_MAPS_COALESCE
54static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
55 size_t size, size_t size_a, size_t size_b, bool committed,
56 unsigned arena_ind);
57#endif
58static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
59 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
60 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
61 bool growing_retained);
62#ifdef JEMALLOC_MAPS_COALESCE
63static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
64 size_t size_a, void *addr_b, size_t size_b, bool committed,
65 unsigned arena_ind);
66#endif
67static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
68 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
69 bool growing_retained);
70
71const extent_hooks_t extent_hooks_default = {
72 extent_alloc_default,
73 extent_dalloc_default,
74 extent_destroy_default,
75 extent_commit_default,
76 extent_decommit_default
77#ifdef PAGES_CAN_PURGE_LAZY
78 ,
79 extent_purge_lazy_default
80#else
81 ,
82 NULL
83#endif
84#ifdef PAGES_CAN_PURGE_FORCED
85 ,
86 extent_purge_forced_default
87#else
88 ,
89 NULL
90#endif
91#ifdef JEMALLOC_MAPS_COALESCE
92 ,
93 extent_split_default,
94 extent_merge_default
95#endif
96};
97
98/* Used exclusively for gdump triggering. */
99static atomic_zu_t curpages;
100static atomic_zu_t highpages;
101
102/******************************************************************************/
103/*
104 * Function prototypes for static functions that are referenced prior to
105 * definition.
106 */
107
108static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
109static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
110 extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
111 size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
112 bool *zero, bool *commit, bool growing_retained);
113static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
114 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
115 extent_t *extent, bool *coalesced, bool growing_retained);
116static void extent_record(tsdn_t *tsdn, arena_t *arena,
117 extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
118 bool growing_retained);
119
120/******************************************************************************/
121
122#define ATTR_NONE /* does nothing */
123
124ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
125 extent_esnead_comp)
126
127#undef ATTR_NONE
128
129typedef enum {
130 lock_result_success,
131 lock_result_failure,
132 lock_result_no_extent
133} lock_result_t;
134
135static lock_result_t
136extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
137 extent_t **result, bool inactive_only) {
138 extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
139 elm, true);
140
141 /* Slab implies active extents and should be skipped. */
142 if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
143 &extents_rtree, elm, true))) {
144 return lock_result_no_extent;
145 }
146
147 /*
148 * It's possible that the extent changed out from under us, and with it
149 * the leaf->extent mapping. We have to recheck while holding the lock.
150 */
151 extent_lock(tsdn, extent1);
152 extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
153 &extents_rtree, elm, true);
154
155 if (extent1 == extent2) {
156 *result = extent1;
157 return lock_result_success;
158 } else {
159 extent_unlock(tsdn, extent1);
160 return lock_result_failure;
161 }
162}
163
164/*
165 * Returns a pool-locked extent_t * if there's one associated with the given
166 * address, and NULL otherwise.
167 */
168static extent_t *
169extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
170 bool inactive_only) {
171 extent_t *ret = NULL;
172 rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
173 rtree_ctx, (uintptr_t)addr, false, false);
174 if (elm == NULL) {
175 return NULL;
176 }
177 lock_result_t lock_result;
178 do {
179 lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret,
180 inactive_only);
181 } while (lock_result == lock_result_failure);
182 return ret;
183}
184
185extent_t *
186extent_alloc(tsdn_t *tsdn, arena_t *arena) {
187 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
188 extent_t *extent = extent_avail_first(&arena->extent_avail);
189 if (extent == NULL) {
190 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
191 return base_alloc_extent(tsdn, arena->base);
192 }
193 extent_avail_remove(&arena->extent_avail, extent);
194 atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
195 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
196 return extent;
197}
198
199void
200extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
201 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
202 extent_avail_insert(&arena->extent_avail, extent);
203 atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
204 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
205}
206
207extent_hooks_t *
208extent_hooks_get(arena_t *arena) {
209 return base_extent_hooks_get(arena->base);
210}
211
212extent_hooks_t *
213extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
214 background_thread_info_t *info;
215 if (have_background_thread) {
216 info = arena_background_thread_info_get(arena);
217 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
218 }
219 extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
220 if (have_background_thread) {
221 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
222 }
223
224 return ret;
225}
226
227static void
228extent_hooks_assure_initialized(arena_t *arena,
229 extent_hooks_t **r_extent_hooks) {
230 if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
231 *r_extent_hooks = extent_hooks_get(arena);
232 }
233}
234
235#ifndef JEMALLOC_JET
236static
237#endif
238size_t
239extent_size_quantize_floor(size_t size) {
240 size_t ret;
241 pszind_t pind;
242
243 assert(size > 0);
244 assert((size & PAGE_MASK) == 0);
245
246 pind = sz_psz2ind(size - sz_large_pad + 1);
247 if (pind == 0) {
248 /*
249 * Avoid underflow. This short-circuit would also do the right
250 * thing for all sizes in the range for which there are
251 * PAGE-spaced size classes, but it's simplest to just handle
252 * the one case that would cause erroneous results.
253 */
254 return size;
255 }
256 ret = sz_pind2sz(pind - 1) + sz_large_pad;
257 assert(ret <= size);
258 return ret;
259}
260
261#ifndef JEMALLOC_JET
262static
263#endif
264size_t
265extent_size_quantize_ceil(size_t size) {
266 size_t ret;
267
268 assert(size > 0);
269 assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
270 assert((size & PAGE_MASK) == 0);
271
272 ret = extent_size_quantize_floor(size);
273 if (ret < size) {
274 /*
275 * Skip a quantization that may have an adequately large extent,
276 * because under-sized extents may be mixed in. This only
277 * happens when an unusual size is requested, i.e. for aligned
278 * allocation, and is just one of several places where linear
279 * search would potentially find sufficiently aligned available
280 * memory somewhere lower.
281 */
282 ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
283 sz_large_pad;
284 }
285 return ret;
286}
287
288/* Generate pairing heap functions. */
289ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
290
291bool
292extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
293 bool delay_coalesce) {
294 if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
295 malloc_mutex_rank_exclusive)) {
296 return true;
297 }
298 for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
299 extent_heap_new(&extents->heaps[i]);
300 }
301 bitmap_init(extents->bitmap, &extents_bitmap_info, true);
302 extent_list_init(&extents->lru);
303 atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
304 extents->state = state;
305 extents->delay_coalesce = delay_coalesce;
306 return false;
307}
308
309extent_state_t
310extents_state_get(const extents_t *extents) {
311 return extents->state;
312}
313
314size_t
315extents_npages_get(extents_t *extents) {
316 return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
317}
318
319size_t
320extents_nextents_get(extents_t *extents, pszind_t pind) {
321 return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED);
322}
323
324size_t
325extents_nbytes_get(extents_t *extents, pszind_t pind) {
326 return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED);
327}
328
329static void
330extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) {
331 size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
332 atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED);
333 cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
334 atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED);
335}
336
337static void
338extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) {
339 size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
340 atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED);
341 cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
342 atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED);
343}
344
345static void
346extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
347 malloc_mutex_assert_owner(tsdn, &extents->mtx);
348 assert(extent_state_get(extent) == extents->state);
349
350 size_t size = extent_size_get(extent);
351 size_t psz = extent_size_quantize_floor(size);
352 pszind_t pind = sz_psz2ind(psz);
353 if (extent_heap_empty(&extents->heaps[pind])) {
354 bitmap_unset(extents->bitmap, &extents_bitmap_info,
355 (size_t)pind);
356 }
357 extent_heap_insert(&extents->heaps[pind], extent);
358
359 if (config_stats) {
360 extents_stats_add(extents, pind, size);
361 }
362
363 extent_list_append(&extents->lru, extent);
364 size_t npages = size >> LG_PAGE;
365 /*
366 * All modifications to npages hold the mutex (as asserted above), so we
367 * don't need an atomic fetch-add; we can get by with a load followed by
368 * a store.
369 */
370 size_t cur_extents_npages =
371 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
372 atomic_store_zu(&extents->npages, cur_extents_npages + npages,
373 ATOMIC_RELAXED);
374}
375
376static void
377extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
378 malloc_mutex_assert_owner(tsdn, &extents->mtx);
379 assert(extent_state_get(extent) == extents->state);
380
381 size_t size = extent_size_get(extent);
382 size_t psz = extent_size_quantize_floor(size);
383 pszind_t pind = sz_psz2ind(psz);
384 extent_heap_remove(&extents->heaps[pind], extent);
385
386 if (config_stats) {
387 extents_stats_sub(extents, pind, size);
388 }
389
390 if (extent_heap_empty(&extents->heaps[pind])) {
391 bitmap_set(extents->bitmap, &extents_bitmap_info,
392 (size_t)pind);
393 }
394 extent_list_remove(&extents->lru, extent);
395 size_t npages = size >> LG_PAGE;
396 /*
397 * As in extents_insert_locked, we hold extents->mtx and so don't need
398 * atomic operations for updating extents->npages.
399 */
400 size_t cur_extents_npages =
401 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
402 assert(cur_extents_npages >= npages);
403 atomic_store_zu(&extents->npages,
404 cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
405}
406
407/*
408 * Find an extent with size [min_size, max_size) to satisfy the alignment
409 * requirement. For each size, try only the first extent in the heap.
410 */
411static extent_t *
412extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
413 size_t alignment) {
414 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
415 pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
416
417 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
418 &extents_bitmap_info, (size_t)pind); i < pind_max; i =
419 (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
420 (size_t)i+1)) {
421 assert(i < SC_NPSIZES);
422 assert(!extent_heap_empty(&extents->heaps[i]));
423 extent_t *extent = extent_heap_first(&extents->heaps[i]);
424 uintptr_t base = (uintptr_t)extent_base_get(extent);
425 size_t candidate_size = extent_size_get(extent);
426 assert(candidate_size >= min_size);
427
428 uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
429 PAGE_CEILING(alignment));
430 if (base > next_align || base + candidate_size <= next_align) {
431 /* Overflow or not crossing the next alignment. */
432 continue;
433 }
434
435 size_t leadsize = next_align - base;
436 if (candidate_size - leadsize >= min_size) {
437 return extent;
438 }
439 }
440
441 return NULL;
442}
443
444/* Do any-best-fit extent selection, i.e. select any extent that best fits. */
445static extent_t *
446extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
447 size_t size) {
448 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
449 pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
450 (size_t)pind);
451 if (i < SC_NPSIZES + 1) {
452 /*
453 * In order to reduce fragmentation, avoid reusing and splitting
454 * large extents for much smaller sizes.
455 */
456 if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
457 return NULL;
458 }
459 assert(!extent_heap_empty(&extents->heaps[i]));
460 extent_t *extent = extent_heap_first(&extents->heaps[i]);
461 assert(extent_size_get(extent) >= size);
462 return extent;
463 }
464
465 return NULL;
466}
467
468/*
469 * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
470 * large enough.
471 */
472static extent_t *
473extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
474 size_t size) {
475 extent_t *ret = NULL;
476
477 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
478 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
479 &extents_bitmap_info, (size_t)pind);
480 i < SC_NPSIZES + 1;
481 i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
482 (size_t)i+1)) {
483 assert(!extent_heap_empty(&extents->heaps[i]));
484 extent_t *extent = extent_heap_first(&extents->heaps[i]);
485 assert(extent_size_get(extent) >= size);
486 if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
487 ret = extent;
488 }
489 if (i == SC_NPSIZES) {
490 break;
491 }
492 assert(i < SC_NPSIZES);
493 }
494
495 return ret;
496}
497
498/*
499 * Do {best,first}-fit extent selection, where the selection policy choice is
500 * based on extents->delay_coalesce. Best-fit selection requires less
501 * searching, but its layout policy is less stable and may cause higher virtual
502 * memory fragmentation as a side effect.
503 */
504static extent_t *
505extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
506 size_t esize, size_t alignment) {
507 malloc_mutex_assert_owner(tsdn, &extents->mtx);
508
509 size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
510 /* Beware size_t wrap-around. */
511 if (max_size < esize) {
512 return NULL;
513 }
514
515 extent_t *extent = extents->delay_coalesce ?
516 extents_best_fit_locked(tsdn, arena, extents, max_size) :
517 extents_first_fit_locked(tsdn, arena, extents, max_size);
518
519 if (alignment > PAGE && extent == NULL) {
520 /*
521 * max_size guarantees the alignment requirement but is rather
522 * pessimistic. Next we try to satisfy the aligned allocation
523 * with sizes in [esize, max_size).
524 */
525 extent = extents_fit_alignment(extents, esize, max_size,
526 alignment);
527 }
528
529 return extent;
530}
531
532static bool
533extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
534 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
535 extent_t *extent) {
536 extent_state_set(extent, extent_state_active);
537 bool coalesced;
538 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
539 extents, extent, &coalesced, false);
540 extent_state_set(extent, extents_state_get(extents));
541
542 if (!coalesced) {
543 return true;
544 }
545 extents_insert_locked(tsdn, extents, extent);
546 return false;
547}
548
549extent_t *
550extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
551 extents_t *extents, void *new_addr, size_t size, size_t pad,
552 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
553 assert(size + pad != 0);
554 assert(alignment != 0);
555 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
556 WITNESS_RANK_CORE, 0);
557
558 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
559 new_addr, size, pad, alignment, slab, szind, zero, commit, false);
560 assert(extent == NULL || extent_dumpable_get(extent));
561 return extent;
562}
563
564void
565extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
566 extents_t *extents, extent_t *extent) {
567 assert(extent_base_get(extent) != NULL);
568 assert(extent_size_get(extent) != 0);
569 assert(extent_dumpable_get(extent));
570 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
571 WITNESS_RANK_CORE, 0);
572
573 extent_addr_set(extent, extent_base_get(extent));
574 extent_zeroed_set(extent, false);
575
576 extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
577}
578
579extent_t *
580extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
581 extents_t *extents, size_t npages_min) {
582 rtree_ctx_t rtree_ctx_fallback;
583 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
584
585 malloc_mutex_lock(tsdn, &extents->mtx);
586
587 /*
588 * Get the LRU coalesced extent, if any. If coalescing was delayed,
589 * the loop will iterate until the LRU extent is fully coalesced.
590 */
591 extent_t *extent;
592 while (true) {
593 /* Get the LRU extent, if any. */
594 extent = extent_list_first(&extents->lru);
595 if (extent == NULL) {
596 goto label_return;
597 }
598 /* Check the eviction limit. */
599 size_t extents_npages = atomic_load_zu(&extents->npages,
600 ATOMIC_RELAXED);
601 if (extents_npages <= npages_min) {
602 extent = NULL;
603 goto label_return;
604 }
605 extents_remove_locked(tsdn, extents, extent);
606 if (!extents->delay_coalesce) {
607 break;
608 }
609 /* Try to coalesce. */
610 if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
611 rtree_ctx, extents, extent)) {
612 break;
613 }
614 /*
615 * The LRU extent was just coalesced and the result placed in
616 * the LRU at its neighbor's position. Start over.
617 */
618 }
619
620 /*
621 * Either mark the extent active or deregister it to protect against
622 * concurrent operations.
623 */
624 switch (extents_state_get(extents)) {
625 case extent_state_active:
626 not_reached();
627 case extent_state_dirty:
628 case extent_state_muzzy:
629 extent_state_set(extent, extent_state_active);
630 break;
631 case extent_state_retained:
632 extent_deregister(tsdn, extent);
633 break;
634 default:
635 not_reached();
636 }
637
638label_return:
639 malloc_mutex_unlock(tsdn, &extents->mtx);
640 return extent;
641}
642
643static void
644extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
645 extents_t *extents, extent_t *extent, bool growing_retained) {
646 /*
647 * Leak extent after making sure its pages have already been purged, so
648 * that this is only a virtual memory leak.
649 */
650 if (extents_state_get(extents) == extent_state_dirty) {
651 if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
652 extent, 0, extent_size_get(extent), growing_retained)) {
653 extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
654 extent, 0, extent_size_get(extent),
655 growing_retained);
656 }
657 }
658 extent_dalloc(tsdn, arena, extent);
659}
660
661void
662extents_prefork(tsdn_t *tsdn, extents_t *extents) {
663 malloc_mutex_prefork(tsdn, &extents->mtx);
664}
665
666void
667extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
668 malloc_mutex_postfork_parent(tsdn, &extents->mtx);
669}
670
671void
672extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
673 malloc_mutex_postfork_child(tsdn, &extents->mtx);
674}
675
676static void
677extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
678 extent_t *extent) {
679 assert(extent_arena_get(extent) == arena);
680 assert(extent_state_get(extent) == extent_state_active);
681
682 extent_state_set(extent, extents_state_get(extents));
683 extents_insert_locked(tsdn, extents, extent);
684}
685
686static void
687extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
688 extent_t *extent) {
689 malloc_mutex_lock(tsdn, &extents->mtx);
690 extent_deactivate_locked(tsdn, arena, extents, extent);
691 malloc_mutex_unlock(tsdn, &extents->mtx);
692}
693
694static void
695extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
696 extent_t *extent) {
697 assert(extent_arena_get(extent) == arena);
698 assert(extent_state_get(extent) == extents_state_get(extents));
699
700 extents_remove_locked(tsdn, extents, extent);
701 extent_state_set(extent, extent_state_active);
702}
703
704static bool
705extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
706 const extent_t *extent, bool dependent, bool init_missing,
707 rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
708 *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
709 (uintptr_t)extent_base_get(extent), dependent, init_missing);
710 if (!dependent && *r_elm_a == NULL) {
711 return true;
712 }
713 assert(*r_elm_a != NULL);
714
715 *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
716 (uintptr_t)extent_last_get(extent), dependent, init_missing);
717 if (!dependent && *r_elm_b == NULL) {
718 return true;
719 }
720 assert(*r_elm_b != NULL);
721
722 return false;
723}
724
725static void
726extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
727 rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
728 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
729 if (elm_b != NULL) {
730 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
731 slab);
732 }
733}
734
735static void
736extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
737 szind_t szind) {
738 assert(extent_slab_get(extent));
739
740 /* Register interior. */
741 for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
742 rtree_write(tsdn, &extents_rtree, rtree_ctx,
743 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
744 LG_PAGE), extent, szind, true);
745 }
746}
747
748static void
749extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
750 cassert(config_prof);
751 /* prof_gdump() requirement. */
752 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
753 WITNESS_RANK_CORE, 0);
754
755 if (opt_prof && extent_state_get(extent) == extent_state_active) {
756 size_t nadd = extent_size_get(extent) >> LG_PAGE;
757 size_t cur = atomic_fetch_add_zu(&curpages, nadd,
758 ATOMIC_RELAXED) + nadd;
759 size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
760 while (cur > high && !atomic_compare_exchange_weak_zu(
761 &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
762 /*
763 * Don't refresh cur, because it may have decreased
764 * since this thread lost the highpages update race.
765 * Note that high is updated in case of CAS failure.
766 */
767 }
768 if (cur > high && prof_gdump_get_unlocked()) {
769 prof_gdump(tsdn);
770 }
771 }
772}
773
774static void
775extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
776 cassert(config_prof);
777
778 if (opt_prof && extent_state_get(extent) == extent_state_active) {
779 size_t nsub = extent_size_get(extent) >> LG_PAGE;
780 assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
781 atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
782 }
783}
784
785static bool
786extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
787 rtree_ctx_t rtree_ctx_fallback;
788 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
789 rtree_leaf_elm_t *elm_a, *elm_b;
790
791 /*
792 * We need to hold the lock to protect against a concurrent coalesce
793 * operation that sees us in a partial state.
794 */
795 extent_lock(tsdn, extent);
796
797 if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
798 &elm_a, &elm_b)) {
799 return true;
800 }
801
802 szind_t szind = extent_szind_get_maybe_invalid(extent);
803 bool slab = extent_slab_get(extent);
804 extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
805 if (slab) {
806 extent_interior_register(tsdn, rtree_ctx, extent, szind);
807 }
808
809 extent_unlock(tsdn, extent);
810
811 if (config_prof && gdump_add) {
812 extent_gdump_add(tsdn, extent);
813 }
814
815 return false;
816}
817
818static bool
819extent_register(tsdn_t *tsdn, extent_t *extent) {
820 return extent_register_impl(tsdn, extent, true);
821}
822
823static bool
824extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
825 return extent_register_impl(tsdn, extent, false);
826}
827
828static void
829extent_reregister(tsdn_t *tsdn, extent_t *extent) {
830 bool err = extent_register(tsdn, extent);
831 assert(!err);
832}
833
834/*
835 * Removes all pointers to the given extent from the global rtree indices for
836 * its interior. This is relevant for slab extents, for which we need to do
837 * metadata lookups at places other than the head of the extent. We deregister
838 * on the interior, then, when an extent moves from being an active slab to an
839 * inactive state.
840 */
841static void
842extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
843 extent_t *extent) {
844 size_t i;
845
846 assert(extent_slab_get(extent));
847
848 for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
849 rtree_clear(tsdn, &extents_rtree, rtree_ctx,
850 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
851 LG_PAGE));
852 }
853}
854
855/*
856 * Removes all pointers to the given extent from the global rtree.
857 */
858static void
859extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
860 rtree_ctx_t rtree_ctx_fallback;
861 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
862 rtree_leaf_elm_t *elm_a, *elm_b;
863 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
864 &elm_a, &elm_b);
865
866 extent_lock(tsdn, extent);
867
868 extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
869 if (extent_slab_get(extent)) {
870 extent_interior_deregister(tsdn, rtree_ctx, extent);
871 extent_slab_set(extent, false);
872 }
873
874 extent_unlock(tsdn, extent);
875
876 if (config_prof && gdump) {
877 extent_gdump_sub(tsdn, extent);
878 }
879}
880
881static void
882extent_deregister(tsdn_t *tsdn, extent_t *extent) {
883 extent_deregister_impl(tsdn, extent, true);
884}
885
886static void
887extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
888 extent_deregister_impl(tsdn, extent, false);
889}
890
891/*
892 * Tries to find and remove an extent from extents that can be used for the
893 * given allocation request.
894 */
895static extent_t *
896extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
897 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
898 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
899 bool growing_retained) {
900 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
901 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
902 assert(alignment > 0);
903 if (config_debug && new_addr != NULL) {
904 /*
905 * Non-NULL new_addr has two use cases:
906 *
907 * 1) Recycle a known-extant extent, e.g. during purging.
908 * 2) Perform in-place expanding reallocation.
909 *
910 * Regardless of use case, new_addr must either refer to a
911 * non-existing extent, or to the base of an extant extent,
912 * since only active slabs support interior lookups (which of
913 * course cannot be recycled).
914 */
915 assert(PAGE_ADDR2BASE(new_addr) == new_addr);
916 assert(pad == 0);
917 assert(alignment <= PAGE);
918 }
919
920 size_t esize = size + pad;
921 malloc_mutex_lock(tsdn, &extents->mtx);
922 extent_hooks_assure_initialized(arena, r_extent_hooks);
923 extent_t *extent;
924 if (new_addr != NULL) {
925 extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
926 false);
927 if (extent != NULL) {
928 /*
929 * We might null-out extent to report an error, but we
930 * still need to unlock the associated mutex after.
931 */
932 extent_t *unlock_extent = extent;
933 assert(extent_base_get(extent) == new_addr);
934 if (extent_arena_get(extent) != arena ||
935 extent_size_get(extent) < esize ||
936 extent_state_get(extent) !=
937 extents_state_get(extents)) {
938 extent = NULL;
939 }
940 extent_unlock(tsdn, unlock_extent);
941 }
942 } else {
943 extent = extents_fit_locked(tsdn, arena, extents, esize,
944 alignment);
945 }
946 if (extent == NULL) {
947 malloc_mutex_unlock(tsdn, &extents->mtx);
948 return NULL;
949 }
950
951 extent_activate_locked(tsdn, arena, extents, extent);
952 malloc_mutex_unlock(tsdn, &extents->mtx);
953
954 return extent;
955}
956
957/*
958 * Given an allocation request and an extent guaranteed to be able to satisfy
959 * it, this splits off lead and trail extents, leaving extent pointing to an
960 * extent satisfying the allocation.
961 * This function doesn't put lead or trail into any extents_t; it's the caller's
962 * job to ensure that they can be reused.
963 */
964typedef enum {
965 /*
966 * Split successfully. lead, extent, and trail, are modified to extents
967 * describing the ranges before, in, and after the given allocation.
968 */
969 extent_split_interior_ok,
970 /*
971 * The extent can't satisfy the given allocation request. None of the
972 * input extent_t *s are touched.
973 */
974 extent_split_interior_cant_alloc,
975 /*
976 * In a potentially invalid state. Must leak (if *to_leak is non-NULL),
977 * and salvage what's still salvageable (if *to_salvage is non-NULL).
978 * None of lead, extent, or trail are valid.
979 */
980 extent_split_interior_error
981} extent_split_interior_result_t;
982
983static extent_split_interior_result_t
984extent_split_interior(tsdn_t *tsdn, arena_t *arena,
985 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
986 /* The result of splitting, in case of success. */
987 extent_t **extent, extent_t **lead, extent_t **trail,
988 /* The mess to clean up, in case of error. */
989 extent_t **to_leak, extent_t **to_salvage,
990 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
991 szind_t szind, bool growing_retained) {
992 size_t esize = size + pad;
993 size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
994 PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
995 assert(new_addr == NULL || leadsize == 0);
996 if (extent_size_get(*extent) < leadsize + esize) {
997 return extent_split_interior_cant_alloc;
998 }
999 size_t trailsize = extent_size_get(*extent) - leadsize - esize;
1000
1001 *lead = NULL;
1002 *trail = NULL;
1003 *to_leak = NULL;
1004 *to_salvage = NULL;
1005
1006 /* Split the lead. */
1007 if (leadsize != 0) {
1008 *lead = *extent;
1009 *extent = extent_split_impl(tsdn, arena, r_extent_hooks,
1010 *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind,
1011 slab, growing_retained);
1012 if (*extent == NULL) {
1013 *to_leak = *lead;
1014 *lead = NULL;
1015 return extent_split_interior_error;
1016 }
1017 }
1018
1019 /* Split the trail. */
1020 if (trailsize != 0) {
1021 *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
1022 esize, szind, slab, trailsize, SC_NSIZES, false,
1023 growing_retained);
1024 if (*trail == NULL) {
1025 *to_leak = *extent;
1026 *to_salvage = *lead;
1027 *lead = NULL;
1028 *extent = NULL;
1029 return extent_split_interior_error;
1030 }
1031 }
1032
1033 if (leadsize == 0 && trailsize == 0) {
1034 /*
1035 * Splitting causes szind to be set as a side effect, but no
1036 * splitting occurred.
1037 */
1038 extent_szind_set(*extent, szind);
1039 if (szind != SC_NSIZES) {
1040 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
1041 (uintptr_t)extent_addr_get(*extent), szind, slab);
1042 if (slab && extent_size_get(*extent) > PAGE) {
1043 rtree_szind_slab_update(tsdn, &extents_rtree,
1044 rtree_ctx,
1045 (uintptr_t)extent_past_get(*extent) -
1046 (uintptr_t)PAGE, szind, slab);
1047 }
1048 }
1049 }
1050
1051 return extent_split_interior_ok;
1052}
1053
1054/*
1055 * This fulfills the indicated allocation request out of the given extent (which
1056 * the caller should have ensured was big enough). If there's any unused space
1057 * before or after the resulting allocation, that space is given its own extent
1058 * and put back into extents.
1059 */
1060static extent_t *
1061extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
1062 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1063 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
1064 szind_t szind, extent_t *extent, bool growing_retained) {
1065 extent_t *lead;
1066 extent_t *trail;
1067 extent_t *to_leak;
1068 extent_t *to_salvage;
1069
1070 extent_split_interior_result_t result = extent_split_interior(
1071 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1072 &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
1073 growing_retained);
1074
1075 if (result == extent_split_interior_ok) {
1076 if (lead != NULL) {
1077 extent_deactivate(tsdn, arena, extents, lead);
1078 }
1079 if (trail != NULL) {
1080 extent_deactivate(tsdn, arena, extents, trail);
1081 }
1082 return extent;
1083 } else {
1084 /*
1085 * We should have picked an extent that was large enough to
1086 * fulfill our allocation request.
1087 */
1088 assert(result == extent_split_interior_error);
1089 if (to_salvage != NULL) {
1090 extent_deregister(tsdn, to_salvage);
1091 }
1092 if (to_leak != NULL) {
1093 void *leak = extent_base_get(to_leak);
1094 extent_deregister_no_gdump_sub(tsdn, to_leak);
1095 extents_leak(tsdn, arena, r_extent_hooks, extents,
1096 to_leak, growing_retained);
1097 assert(extent_lock_from_addr(tsdn, rtree_ctx, leak,
1098 false) == NULL);
1099 }
1100 return NULL;
1101 }
1102 unreachable();
1103}
1104
1105/*
1106 * Tries to satisfy the given allocation request by reusing one of the extents
1107 * in the given extents_t.
1108 */
1109static extent_t *
1110extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1111 extents_t *extents, void *new_addr, size_t size, size_t pad,
1112 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
1113 bool growing_retained) {
1114 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1115 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1116 assert(new_addr == NULL || !slab);
1117 assert(pad == 0 || !slab);
1118 assert(!*zero || !slab);
1119
1120 rtree_ctx_t rtree_ctx_fallback;
1121 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1122
1123 extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
1124 rtree_ctx, extents, new_addr, size, pad, alignment, slab,
1125 growing_retained);
1126 if (extent == NULL) {
1127 return NULL;
1128 }
1129
1130 extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
1131 extents, new_addr, size, pad, alignment, slab, szind, extent,
1132 growing_retained);
1133 if (extent == NULL) {
1134 return NULL;
1135 }
1136
1137 if (*commit && !extent_committed_get(extent)) {
1138 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
1139 0, extent_size_get(extent), growing_retained)) {
1140 extent_record(tsdn, arena, r_extent_hooks, extents,
1141 extent, growing_retained);
1142 return NULL;
1143 }
1144 extent_zeroed_set(extent, true);
1145 }
1146
1147 if (extent_committed_get(extent)) {
1148 *commit = true;
1149 }
1150 if (extent_zeroed_get(extent)) {
1151 *zero = true;
1152 }
1153
1154 if (pad != 0) {
1155 extent_addr_randomize(tsdn, extent, alignment);
1156 }
1157 assert(extent_state_get(extent) == extent_state_active);
1158 if (slab) {
1159 extent_slab_set(extent, slab);
1160 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1161 }
1162
1163 if (*zero) {
1164 void *addr = extent_base_get(extent);
1165 size_t size = extent_size_get(extent);
1166 if (!extent_zeroed_get(extent)) {
1167 if (pages_purge_forced(addr, size)) {
1168 memset(addr, 0, size);
1169 }
1170 } else if (config_debug) {
1171 size_t *p = (size_t *)(uintptr_t)addr;
1172 for (size_t i = 0; i < size / sizeof(size_t); i++) {
1173 assert(p[i] == 0);
1174 }
1175 }
1176 }
1177 return extent;
1178}
1179
1180/*
1181 * If the caller specifies (!*zero), it is still possible to receive zeroed
1182 * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
1183 * advantage of this to avoid demanding zeroed extents, but taking advantage of
1184 * them if they are returned.
1185 */
1186static void *
1187extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
1188 size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
1189 void *ret;
1190
1191 assert(size != 0);
1192 assert(alignment != 0);
1193
1194 /* "primary" dss. */
1195 if (have_dss && dss_prec == dss_prec_primary && (ret =
1196 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1197 commit)) != NULL) {
1198 return ret;
1199 }
1200 /* mmap. */
1201 if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
1202 != NULL) {
1203 return ret;
1204 }
1205 /* "secondary" dss. */
1206 if (have_dss && dss_prec == dss_prec_secondary && (ret =
1207 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1208 commit)) != NULL) {
1209 return ret;
1210 }
1211
1212 /* All strategies for allocation failed. */
1213 return NULL;
1214}
1215
1216static void *
1217extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
1218 size_t size, size_t alignment, bool *zero, bool *commit) {
1219 void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
1220 commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
1221 ATOMIC_RELAXED));
1222 if (have_madvise_huge && ret) {
1223 pages_set_thp_state(ret, size);
1224 }
1225 return ret;
1226}
1227
1228static void *
1229extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
1230 size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
1231 tsdn_t *tsdn;
1232 arena_t *arena;
1233
1234 tsdn = tsdn_fetch();
1235 arena = arena_get(tsdn, arena_ind, false);
1236 /*
1237 * The arena we're allocating on behalf of must have been initialized
1238 * already.
1239 */
1240 assert(arena != NULL);
1241
1242 return extent_alloc_default_impl(tsdn, arena, new_addr, size,
1243 alignment, zero, commit);
1244}
1245
1246static void
1247extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
1248 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1249 if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
1250 /*
1251 * The only legitimate case of customized extent hooks for a0 is
1252 * hooks with no allocation activities. One such example is to
1253 * place metadata on pre-allocated resources such as huge pages.
1254 * In that case, rely on reentrancy_level checks to catch
1255 * infinite recursions.
1256 */
1257 pre_reentrancy(tsd, NULL);
1258 } else {
1259 pre_reentrancy(tsd, arena);
1260 }
1261}
1262
1263static void
1264extent_hook_post_reentrancy(tsdn_t *tsdn) {
1265 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1266 post_reentrancy(tsd);
1267}
1268
1269/*
1270 * If virtual memory is retained, create increasingly larger extents from which
1271 * to split requested extents in order to limit the total number of disjoint
1272 * virtual memory ranges retained by each arena.
1273 */
1274static extent_t *
1275extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
1276 extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
1277 bool slab, szind_t szind, bool *zero, bool *commit) {
1278 malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
1279 assert(pad == 0 || !slab);
1280 assert(!*zero || !slab);
1281
1282 size_t esize = size + pad;
1283 size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
1284 /* Beware size_t wrap-around. */
1285 if (alloc_size_min < esize) {
1286 goto label_err;
1287 }
1288 /*
1289 * Find the next extent size in the series that would be large enough to
1290 * satisfy this request.
1291 */
1292 pszind_t egn_skip = 0;
1293 size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1294 while (alloc_size < alloc_size_min) {
1295 egn_skip++;
1296 if (arena->extent_grow_next + egn_skip >=
1297 sz_psz2ind(SC_LARGE_MAXCLASS)) {
1298 /* Outside legal range. */
1299 goto label_err;
1300 }
1301 alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1302 }
1303
1304 extent_t *extent = extent_alloc(tsdn, arena);
1305 if (extent == NULL) {
1306 goto label_err;
1307 }
1308 bool zeroed = false;
1309 bool committed = false;
1310
1311 void *ptr;
1312 if (*r_extent_hooks == &extent_hooks_default) {
1313 ptr = extent_alloc_default_impl(tsdn, arena, NULL,
1314 alloc_size, PAGE, &zeroed, &committed);
1315 } else {
1316 extent_hook_pre_reentrancy(tsdn, arena);
1317 ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
1318 alloc_size, PAGE, &zeroed, &committed,
1319 arena_ind_get(arena));
1320 extent_hook_post_reentrancy(tsdn);
1321 }
1322
1323 extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
1324 arena_extent_sn_next(arena), extent_state_active, zeroed,
1325 committed, true);
1326 if (ptr == NULL) {
1327 extent_dalloc(tsdn, arena, extent);
1328 goto label_err;
1329 }
1330
1331 if (extent_register_no_gdump_add(tsdn, extent)) {
1332 extents_leak(tsdn, arena, r_extent_hooks,
1333 &arena->extents_retained, extent, true);
1334 goto label_err;
1335 }
1336
1337 if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1338 *zero = true;
1339 }
1340 if (extent_committed_get(extent)) {
1341 *commit = true;
1342 }
1343
1344 rtree_ctx_t rtree_ctx_fallback;
1345 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1346
1347 extent_t *lead;
1348 extent_t *trail;
1349 extent_t *to_leak;
1350 extent_t *to_salvage;
1351 extent_split_interior_result_t result = extent_split_interior(
1352 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1353 &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
1354 true);
1355
1356 if (result == extent_split_interior_ok) {
1357 if (lead != NULL) {
1358 extent_record(tsdn, arena, r_extent_hooks,
1359 &arena->extents_retained, lead, true);
1360 }
1361 if (trail != NULL) {
1362 extent_record(tsdn, arena, r_extent_hooks,
1363 &arena->extents_retained, trail, true);
1364 }
1365 } else {
1366 /*
1367 * We should have allocated a sufficiently large extent; the
1368 * cant_alloc case should not occur.
1369 */
1370 assert(result == extent_split_interior_error);
1371 if (to_salvage != NULL) {
1372 if (config_prof) {
1373 extent_gdump_add(tsdn, to_salvage);
1374 }
1375 extent_record(tsdn, arena, r_extent_hooks,
1376 &arena->extents_retained, to_salvage, true);
1377 }
1378 if (to_leak != NULL) {
1379 extent_deregister_no_gdump_sub(tsdn, to_leak);
1380 extents_leak(tsdn, arena, r_extent_hooks,
1381 &arena->extents_retained, to_leak, true);
1382 }
1383 goto label_err;
1384 }
1385
1386 if (*commit && !extent_committed_get(extent)) {
1387 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1388 extent_size_get(extent), true)) {
1389 extent_record(tsdn, arena, r_extent_hooks,
1390 &arena->extents_retained, extent, true);
1391 goto label_err;
1392 }
1393 extent_zeroed_set(extent, true);
1394 }
1395
1396 /*
1397 * Increment extent_grow_next if doing so wouldn't exceed the allowed
1398 * range.
1399 */
1400 if (arena->extent_grow_next + egn_skip + 1 <=
1401 arena->retain_grow_limit) {
1402 arena->extent_grow_next += egn_skip + 1;
1403 } else {
1404 arena->extent_grow_next = arena->retain_grow_limit;
1405 }
1406 /* All opportunities for failure are past. */
1407 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1408
1409 if (config_prof) {
1410 /* Adjust gdump stats now that extent is final size. */
1411 extent_gdump_add(tsdn, extent);
1412 }
1413 if (pad != 0) {
1414 extent_addr_randomize(tsdn, extent, alignment);
1415 }
1416 if (slab) {
1417 rtree_ctx_t rtree_ctx_fallback;
1418 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
1419 &rtree_ctx_fallback);
1420
1421 extent_slab_set(extent, true);
1422 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1423 }
1424 if (*zero && !extent_zeroed_get(extent)) {
1425 void *addr = extent_base_get(extent);
1426 size_t size = extent_size_get(extent);
1427 if (pages_purge_forced(addr, size)) {
1428 memset(addr, 0, size);
1429 }
1430 }
1431
1432 return extent;
1433label_err:
1434 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1435 return NULL;
1436}
1437
1438static extent_t *
1439extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
1440 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1441 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1442 assert(size != 0);
1443 assert(alignment != 0);
1444
1445 malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
1446
1447 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1448 &arena->extents_retained, new_addr, size, pad, alignment, slab,
1449 szind, zero, commit, true);
1450 if (extent != NULL) {
1451 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1452 if (config_prof) {
1453 extent_gdump_add(tsdn, extent);
1454 }
1455 } else if (opt_retain && new_addr == NULL) {
1456 extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1457 pad, alignment, slab, szind, zero, commit);
1458 /* extent_grow_retained() always releases extent_grow_mtx. */
1459 } else {
1460 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1461 }
1462 malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
1463
1464 return extent;
1465}
1466
1467static extent_t *
1468extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
1469 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1470 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1471 size_t esize = size + pad;
1472 extent_t *extent = extent_alloc(tsdn, arena);
1473 if (extent == NULL) {
1474 return NULL;
1475 }
1476 void *addr;
1477 if (*r_extent_hooks == &extent_hooks_default) {
1478 /* Call directly to propagate tsdn. */
1479 addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
1480 alignment, zero, commit);
1481 } else {
1482 extent_hook_pre_reentrancy(tsdn, arena);
1483 addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
1484 esize, alignment, zero, commit, arena_ind_get(arena));
1485 extent_hook_post_reentrancy(tsdn);
1486 }
1487 if (addr == NULL) {
1488 extent_dalloc(tsdn, arena, extent);
1489 return NULL;
1490 }
1491 extent_init(extent, arena, addr, esize, slab, szind,
1492 arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
1493 true);
1494 if (pad != 0) {
1495 extent_addr_randomize(tsdn, extent, alignment);
1496 }
1497 if (extent_register(tsdn, extent)) {
1498 extents_leak(tsdn, arena, r_extent_hooks,
1499 &arena->extents_retained, extent, false);
1500 return NULL;
1501 }
1502
1503 return extent;
1504}
1505
1506extent_t *
1507extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1508 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1509 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1510 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1511 WITNESS_RANK_CORE, 0);
1512
1513 extent_hooks_assure_initialized(arena, r_extent_hooks);
1514
1515 extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1516 new_addr, size, pad, alignment, slab, szind, zero, commit);
1517 if (extent == NULL) {
1518 if (opt_retain && new_addr != NULL) {
1519 /*
1520 * When retain is enabled and new_addr is set, we do not
1521 * attempt extent_alloc_wrapper_hard which does mmap
1522 * that is very unlikely to succeed (unless it happens
1523 * to be at the end).
1524 */
1525 return NULL;
1526 }
1527 extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1528 new_addr, size, pad, alignment, slab, szind, zero, commit);
1529 }
1530
1531 assert(extent == NULL || extent_dumpable_get(extent));
1532 return extent;
1533}
1534
1535static bool
1536extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
1537 const extent_t *outer) {
1538 assert(extent_arena_get(inner) == arena);
1539 if (extent_arena_get(outer) != arena) {
1540 return false;
1541 }
1542
1543 assert(extent_state_get(inner) == extent_state_active);
1544 if (extent_state_get(outer) != extents->state) {
1545 return false;
1546 }
1547
1548 if (extent_committed_get(inner) != extent_committed_get(outer)) {
1549 return false;
1550 }
1551
1552 return true;
1553}
1554
1555static bool
1556extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1557 extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
1558 bool growing_retained) {
1559 assert(extent_can_coalesce(arena, extents, inner, outer));
1560
1561 extent_activate_locked(tsdn, arena, extents, outer);
1562
1563 malloc_mutex_unlock(tsdn, &extents->mtx);
1564 bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
1565 forward ? inner : outer, forward ? outer : inner, growing_retained);
1566 malloc_mutex_lock(tsdn, &extents->mtx);
1567
1568 if (err) {
1569 extent_deactivate_locked(tsdn, arena, extents, outer);
1570 }
1571
1572 return err;
1573}
1574
1575static extent_t *
1576extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
1577 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1578 extent_t *extent, bool *coalesced, bool growing_retained,
1579 bool inactive_only) {
1580 /*
1581 * We avoid checking / locking inactive neighbors for large size
1582 * classes, since they are eagerly coalesced on deallocation which can
1583 * cause lock contention.
1584 */
1585 /*
1586 * Continue attempting to coalesce until failure, to protect against
1587 * races with other threads that are thwarted by this one.
1588 */
1589 bool again;
1590 do {
1591 again = false;
1592
1593 /* Try to coalesce forward. */
1594 extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
1595 extent_past_get(extent), inactive_only);
1596 if (next != NULL) {
1597 /*
1598 * extents->mtx only protects against races for
1599 * like-state extents, so call extent_can_coalesce()
1600 * before releasing next's pool lock.
1601 */
1602 bool can_coalesce = extent_can_coalesce(arena, extents,
1603 extent, next);
1604
1605 extent_unlock(tsdn, next);
1606
1607 if (can_coalesce && !extent_coalesce(tsdn, arena,
1608 r_extent_hooks, extents, extent, next, true,
1609 growing_retained)) {
1610 if (extents->delay_coalesce) {
1611 /* Do minimal coalescing. */
1612 *coalesced = true;
1613 return extent;
1614 }
1615 again = true;
1616 }
1617 }
1618
1619 /* Try to coalesce backward. */
1620 extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
1621 extent_before_get(extent), inactive_only);
1622 if (prev != NULL) {
1623 bool can_coalesce = extent_can_coalesce(arena, extents,
1624 extent, prev);
1625 extent_unlock(tsdn, prev);
1626
1627 if (can_coalesce && !extent_coalesce(tsdn, arena,
1628 r_extent_hooks, extents, extent, prev, false,
1629 growing_retained)) {
1630 extent = prev;
1631 if (extents->delay_coalesce) {
1632 /* Do minimal coalescing. */
1633 *coalesced = true;
1634 return extent;
1635 }
1636 again = true;
1637 }
1638 }
1639 } while (again);
1640
1641 if (extents->delay_coalesce) {
1642 *coalesced = false;
1643 }
1644 return extent;
1645}
1646
1647static extent_t *
1648extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
1649 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1650 extent_t *extent, bool *coalesced, bool growing_retained) {
1651 return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
1652 extents, extent, coalesced, growing_retained, false);
1653}
1654
1655static extent_t *
1656extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena,
1657 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1658 extent_t *extent, bool *coalesced, bool growing_retained) {
1659 return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
1660 extents, extent, coalesced, growing_retained, true);
1661}
1662
1663/*
1664 * Does the metadata management portions of putting an unused extent into the
1665 * given extents_t (coalesces, deregisters slab interiors, the heap operations).
1666 */
1667static void
1668extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1669 extents_t *extents, extent_t *extent, bool growing_retained) {
1670 rtree_ctx_t rtree_ctx_fallback;
1671 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1672
1673 assert((extents_state_get(extents) != extent_state_dirty &&
1674 extents_state_get(extents) != extent_state_muzzy) ||
1675 !extent_zeroed_get(extent));
1676
1677 malloc_mutex_lock(tsdn, &extents->mtx);
1678 extent_hooks_assure_initialized(arena, r_extent_hooks);
1679
1680 extent_szind_set(extent, SC_NSIZES);
1681 if (extent_slab_get(extent)) {
1682 extent_interior_deregister(tsdn, rtree_ctx, extent);
1683 extent_slab_set(extent, false);
1684 }
1685
1686 assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1687 (uintptr_t)extent_base_get(extent), true) == extent);
1688
1689 if (!extents->delay_coalesce) {
1690 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1691 rtree_ctx, extents, extent, NULL, growing_retained);
1692 } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
1693 /* Always coalesce large extents eagerly. */
1694 bool coalesced;
1695 do {
1696 assert(extent_state_get(extent) == extent_state_active);
1697 extent = extent_try_coalesce_large(tsdn, arena,
1698 r_extent_hooks, rtree_ctx, extents, extent,
1699 &coalesced, growing_retained);
1700 } while (coalesced);
1701 }
1702 extent_deactivate_locked(tsdn, arena, extents, extent);
1703
1704 malloc_mutex_unlock(tsdn, &extents->mtx);
1705}
1706
1707void
1708extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1709 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1710
1711 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1712 WITNESS_RANK_CORE, 0);
1713
1714 if (extent_register(tsdn, extent)) {
1715 extents_leak(tsdn, arena, &extent_hooks,
1716 &arena->extents_retained, extent, false);
1717 return;
1718 }
1719 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1720}
1721
1722static bool
1723extent_may_dalloc(void) {
1724 /* With retain enabled, the default dalloc always fails. */
1725 return !opt_retain;
1726}
1727
1728static bool
1729extent_dalloc_default_impl(void *addr, size_t size) {
1730 if (!have_dss || !extent_in_dss(addr)) {
1731 return extent_dalloc_mmap(addr, size);
1732 }
1733 return true;
1734}
1735
1736static bool
1737extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1738 bool committed, unsigned arena_ind) {
1739 return extent_dalloc_default_impl(addr, size);
1740}
1741
1742static bool
1743extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
1744 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1745 bool err;
1746
1747 assert(extent_base_get(extent) != NULL);
1748 assert(extent_size_get(extent) != 0);
1749 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1750 WITNESS_RANK_CORE, 0);
1751
1752 extent_addr_set(extent, extent_base_get(extent));
1753
1754 extent_hooks_assure_initialized(arena, r_extent_hooks);
1755 /* Try to deallocate. */
1756 if (*r_extent_hooks == &extent_hooks_default) {
1757 /* Call directly to propagate tsdn. */
1758 err = extent_dalloc_default_impl(extent_base_get(extent),
1759 extent_size_get(extent));
1760 } else {
1761 extent_hook_pre_reentrancy(tsdn, arena);
1762 err = ((*r_extent_hooks)->dalloc == NULL ||
1763 (*r_extent_hooks)->dalloc(*r_extent_hooks,
1764 extent_base_get(extent), extent_size_get(extent),
1765 extent_committed_get(extent), arena_ind_get(arena)));
1766 extent_hook_post_reentrancy(tsdn);
1767 }
1768
1769 if (!err) {
1770 extent_dalloc(tsdn, arena, extent);
1771 }
1772
1773 return err;
1774}
1775
1776void
1777extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1778 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1779 assert(extent_dumpable_get(extent));
1780 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1781 WITNESS_RANK_CORE, 0);
1782
1783 /* Avoid calling the default extent_dalloc unless have to. */
1784 if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) {
1785 /*
1786 * Deregister first to avoid a race with other allocating
1787 * threads, and reregister if deallocation fails.
1788 */
1789 extent_deregister(tsdn, extent);
1790 if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks,
1791 extent)) {
1792 return;
1793 }
1794 extent_reregister(tsdn, extent);
1795 }
1796
1797 if (*r_extent_hooks != &extent_hooks_default) {
1798 extent_hook_pre_reentrancy(tsdn, arena);
1799 }
1800 /* Try to decommit; purge if that fails. */
1801 bool zeroed;
1802 if (!extent_committed_get(extent)) {
1803 zeroed = true;
1804 } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1805 0, extent_size_get(extent))) {
1806 zeroed = true;
1807 } else if ((*r_extent_hooks)->purge_forced != NULL &&
1808 !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
1809 extent_base_get(extent), extent_size_get(extent), 0,
1810 extent_size_get(extent), arena_ind_get(arena))) {
1811 zeroed = true;
1812 } else if (extent_state_get(extent) == extent_state_muzzy ||
1813 ((*r_extent_hooks)->purge_lazy != NULL &&
1814 !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1815 extent_base_get(extent), extent_size_get(extent), 0,
1816 extent_size_get(extent), arena_ind_get(arena)))) {
1817 zeroed = false;
1818 } else {
1819 zeroed = false;
1820 }
1821 if (*r_extent_hooks != &extent_hooks_default) {
1822 extent_hook_post_reentrancy(tsdn);
1823 }
1824 extent_zeroed_set(extent, zeroed);
1825
1826 if (config_prof) {
1827 extent_gdump_sub(tsdn, extent);
1828 }
1829
1830 extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
1831 extent, false);
1832}
1833
1834static void
1835extent_destroy_default_impl(void *addr, size_t size) {
1836 if (!have_dss || !extent_in_dss(addr)) {
1837 pages_unmap(addr, size);
1838 }
1839}
1840
1841static void
1842extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1843 bool committed, unsigned arena_ind) {
1844 extent_destroy_default_impl(addr, size);
1845}
1846
1847void
1848extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
1849 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1850 assert(extent_base_get(extent) != NULL);
1851 assert(extent_size_get(extent) != 0);
1852 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1853 WITNESS_RANK_CORE, 0);
1854
1855 /* Deregister first to avoid a race with other allocating threads. */
1856 extent_deregister(tsdn, extent);
1857
1858 extent_addr_set(extent, extent_base_get(extent));
1859
1860 extent_hooks_assure_initialized(arena, r_extent_hooks);
1861 /* Try to destroy; silently fail otherwise. */
1862 if (*r_extent_hooks == &extent_hooks_default) {
1863 /* Call directly to propagate tsdn. */
1864 extent_destroy_default_impl(extent_base_get(extent),
1865 extent_size_get(extent));
1866 } else if ((*r_extent_hooks)->destroy != NULL) {
1867 extent_hook_pre_reentrancy(tsdn, arena);
1868 (*r_extent_hooks)->destroy(*r_extent_hooks,
1869 extent_base_get(extent), extent_size_get(extent),
1870 extent_committed_get(extent), arena_ind_get(arena));
1871 extent_hook_post_reentrancy(tsdn);
1872 }
1873
1874 extent_dalloc(tsdn, arena, extent);
1875}
1876
1877static bool
1878extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1879 size_t offset, size_t length, unsigned arena_ind) {
1880 return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
1881 length);
1882}
1883
1884static bool
1885extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
1886 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1887 size_t length, bool growing_retained) {
1888 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1889 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1890
1891 extent_hooks_assure_initialized(arena, r_extent_hooks);
1892 if (*r_extent_hooks != &extent_hooks_default) {
1893 extent_hook_pre_reentrancy(tsdn, arena);
1894 }
1895 bool err = ((*r_extent_hooks)->commit == NULL ||
1896 (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1897 extent_size_get(extent), offset, length, arena_ind_get(arena)));
1898 if (*r_extent_hooks != &extent_hooks_default) {
1899 extent_hook_post_reentrancy(tsdn);
1900 }
1901 extent_committed_set(extent, extent_committed_get(extent) || !err);
1902 return err;
1903}
1904
1905bool
1906extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
1907 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1908 size_t length) {
1909 return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1910 length, false);
1911}
1912
1913static bool
1914extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1915 size_t offset, size_t length, unsigned arena_ind) {
1916 return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
1917 length);
1918}
1919
1920bool
1921extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
1922 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1923 size_t length) {
1924 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1925 WITNESS_RANK_CORE, 0);
1926
1927 extent_hooks_assure_initialized(arena, r_extent_hooks);
1928
1929 if (*r_extent_hooks != &extent_hooks_default) {
1930 extent_hook_pre_reentrancy(tsdn, arena);
1931 }
1932 bool err = ((*r_extent_hooks)->decommit == NULL ||
1933 (*r_extent_hooks)->decommit(*r_extent_hooks,
1934 extent_base_get(extent), extent_size_get(extent), offset, length,
1935 arena_ind_get(arena)));
1936 if (*r_extent_hooks != &extent_hooks_default) {
1937 extent_hook_post_reentrancy(tsdn);
1938 }
1939 extent_committed_set(extent, extent_committed_get(extent) && err);
1940 return err;
1941}
1942
1943#ifdef PAGES_CAN_PURGE_LAZY
1944static bool
1945extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1946 size_t offset, size_t length, unsigned arena_ind) {
1947 assert(addr != NULL);
1948 assert((offset & PAGE_MASK) == 0);
1949 assert(length != 0);
1950 assert((length & PAGE_MASK) == 0);
1951
1952 return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
1953 length);
1954}
1955#endif
1956
1957static bool
1958extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
1959 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1960 size_t length, bool growing_retained) {
1961 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1962 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1963
1964 extent_hooks_assure_initialized(arena, r_extent_hooks);
1965
1966 if ((*r_extent_hooks)->purge_lazy == NULL) {
1967 return true;
1968 }
1969 if (*r_extent_hooks != &extent_hooks_default) {
1970 extent_hook_pre_reentrancy(tsdn, arena);
1971 }
1972 bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1973 extent_base_get(extent), extent_size_get(extent), offset, length,
1974 arena_ind_get(arena));
1975 if (*r_extent_hooks != &extent_hooks_default) {
1976 extent_hook_post_reentrancy(tsdn);
1977 }
1978
1979 return err;
1980}
1981
1982bool
1983extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
1984 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1985 size_t length) {
1986 return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
1987 offset, length, false);
1988}
1989
1990#ifdef PAGES_CAN_PURGE_FORCED
1991static bool
1992extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
1993 size_t size, size_t offset, size_t length, unsigned arena_ind) {
1994 assert(addr != NULL);
1995 assert((offset & PAGE_MASK) == 0);
1996 assert(length != 0);
1997 assert((length & PAGE_MASK) == 0);
1998
1999 return pages_purge_forced((void *)((uintptr_t)addr +
2000 (uintptr_t)offset), length);
2001}
2002#endif
2003
2004static bool
2005extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
2006 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2007 size_t length, bool growing_retained) {
2008 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2009 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2010
2011 extent_hooks_assure_initialized(arena, r_extent_hooks);
2012
2013 if ((*r_extent_hooks)->purge_forced == NULL) {
2014 return true;
2015 }
2016 if (*r_extent_hooks != &extent_hooks_default) {
2017 extent_hook_pre_reentrancy(tsdn, arena);
2018 }
2019 bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
2020 extent_base_get(extent), extent_size_get(extent), offset, length,
2021 arena_ind_get(arena));
2022 if (*r_extent_hooks != &extent_hooks_default) {
2023 extent_hook_post_reentrancy(tsdn);
2024 }
2025 return err;
2026}
2027
2028bool
2029extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
2030 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2031 size_t length) {
2032 return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
2033 offset, length, false);
2034}
2035
2036#ifdef JEMALLOC_MAPS_COALESCE
2037static bool
2038extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
2039 size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
2040 return !maps_coalesce;
2041}
2042#endif
2043
2044/*
2045 * Accepts the extent to split, and the characteristics of each side of the
2046 * split. The 'a' parameters go with the 'lead' of the resulting pair of
2047 * extents (the lower addressed portion of the split), and the 'b' parameters go
2048 * with the trail (the higher addressed portion). This makes 'extent' the lead,
2049 * and returns the trail (except in case of error).
2050 */
2051static extent_t *
2052extent_split_impl(tsdn_t *tsdn, arena_t *arena,
2053 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2054 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
2055 bool growing_retained) {
2056 assert(extent_size_get(extent) == size_a + size_b);
2057 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2058 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2059
2060 extent_hooks_assure_initialized(arena, r_extent_hooks);
2061
2062 if ((*r_extent_hooks)->split == NULL) {
2063 return NULL;
2064 }
2065
2066 extent_t *trail = extent_alloc(tsdn, arena);
2067 if (trail == NULL) {
2068 goto label_error_a;
2069 }
2070
2071 extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
2072 size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
2073 extent_state_get(extent), extent_zeroed_get(extent),
2074 extent_committed_get(extent), extent_dumpable_get(extent));
2075
2076 rtree_ctx_t rtree_ctx_fallback;
2077 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2078 rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
2079 {
2080 extent_t lead;
2081
2082 extent_init(&lead, arena, extent_addr_get(extent), size_a,
2083 slab_a, szind_a, extent_sn_get(extent),
2084 extent_state_get(extent), extent_zeroed_get(extent),
2085 extent_committed_get(extent), extent_dumpable_get(extent));
2086
2087 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
2088 true, &lead_elm_a, &lead_elm_b);
2089 }
2090 rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
2091 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
2092 &trail_elm_a, &trail_elm_b);
2093
2094 if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
2095 || trail_elm_b == NULL) {
2096 goto label_error_b;
2097 }
2098
2099 extent_lock2(tsdn, extent, trail);
2100
2101 if (*r_extent_hooks != &extent_hooks_default) {
2102 extent_hook_pre_reentrancy(tsdn, arena);
2103 }
2104 bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
2105 size_a + size_b, size_a, size_b, extent_committed_get(extent),
2106 arena_ind_get(arena));
2107 if (*r_extent_hooks != &extent_hooks_default) {
2108 extent_hook_post_reentrancy(tsdn);
2109 }
2110 if (err) {
2111 goto label_error_c;
2112 }
2113
2114 extent_size_set(extent, size_a);
2115 extent_szind_set(extent, szind_a);
2116
2117 extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
2118 szind_a, slab_a);
2119 extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
2120 szind_b, slab_b);
2121
2122 extent_unlock2(tsdn, extent, trail);
2123
2124 return trail;
2125label_error_c:
2126 extent_unlock2(tsdn, extent, trail);
2127label_error_b:
2128 extent_dalloc(tsdn, arena, trail);
2129label_error_a:
2130 return NULL;
2131}
2132
2133extent_t *
2134extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
2135 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2136 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
2137 return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
2138 szind_a, slab_a, size_b, szind_b, slab_b, false);
2139}
2140
2141static bool
2142extent_merge_default_impl(void *addr_a, void *addr_b) {
2143 if (!maps_coalesce) {
2144 return true;
2145 }
2146 if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
2147 return true;
2148 }
2149
2150 return false;
2151}
2152
2153#ifdef JEMALLOC_MAPS_COALESCE
2154static bool
2155extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
2156 void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
2157 return extent_merge_default_impl(addr_a, addr_b);
2158}
2159#endif
2160
2161static bool
2162extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
2163 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
2164 bool growing_retained) {
2165 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2166 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2167
2168 extent_hooks_assure_initialized(arena, r_extent_hooks);
2169
2170 if ((*r_extent_hooks)->merge == NULL) {
2171 return true;
2172 }
2173
2174 bool err;
2175 if (*r_extent_hooks == &extent_hooks_default) {
2176 /* Call directly to propagate tsdn. */
2177 err = extent_merge_default_impl(extent_base_get(a),
2178 extent_base_get(b));
2179 } else {
2180 extent_hook_pre_reentrancy(tsdn, arena);
2181 err = (*r_extent_hooks)->merge(*r_extent_hooks,
2182 extent_base_get(a), extent_size_get(a), extent_base_get(b),
2183 extent_size_get(b), extent_committed_get(a),
2184 arena_ind_get(arena));
2185 extent_hook_post_reentrancy(tsdn);
2186 }
2187
2188 if (err) {
2189 return true;
2190 }
2191
2192 /*
2193 * The rtree writes must happen while all the relevant elements are
2194 * owned, so the following code uses decomposed helper functions rather
2195 * than extent_{,de}register() to do things in the right order.
2196 */
2197 rtree_ctx_t rtree_ctx_fallback;
2198 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2199 rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
2200 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
2201 &a_elm_b);
2202 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
2203 &b_elm_b);
2204
2205 extent_lock2(tsdn, a, b);
2206
2207 if (a_elm_b != NULL) {
2208 rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
2209 SC_NSIZES, false);
2210 }
2211 if (b_elm_b != NULL) {
2212 rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
2213 SC_NSIZES, false);
2214 } else {
2215 b_elm_b = b_elm_a;
2216 }
2217
2218 extent_size_set(a, extent_size_get(a) + extent_size_get(b));
2219 extent_szind_set(a, SC_NSIZES);
2220 extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
2221 extent_sn_get(a) : extent_sn_get(b));
2222 extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
2223
2224 extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
2225 false);
2226
2227 extent_unlock2(tsdn, a, b);
2228
2229 extent_dalloc(tsdn, extent_arena_get(b), b);
2230
2231 return false;
2232}
2233
2234bool
2235extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
2236 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
2237 return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
2238}
2239
2240bool
2241extent_boot(void) {
2242 if (rtree_new(&extents_rtree, true)) {
2243 return true;
2244 }
2245
2246 if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
2247 WITNESS_RANK_EXTENT_POOL)) {
2248 return true;
2249 }
2250
2251 if (have_dss) {
2252 extent_dss_boot();
2253 }
2254
2255 return false;
2256}
2257