1#define JEMALLOC_CHUNK_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7const char *opt_dss = DSS_DEFAULT;
8size_t opt_lg_chunk = 0;
9
10/* Used exclusively for gdump triggering. */
11static size_t curchunks;
12static size_t highchunks;
13
14rtree_t chunks_rtree;
15
16/* Various chunk-related settings. */
17size_t chunksize;
18size_t chunksize_mask; /* (chunksize - 1). */
19size_t chunk_npages;
20
21static void *chunk_alloc_default(void *new_addr, size_t size,
22 size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
23static bool chunk_dalloc_default(void *chunk, size_t size, bool committed,
24 unsigned arena_ind);
25static bool chunk_commit_default(void *chunk, size_t size, size_t offset,
26 size_t length, unsigned arena_ind);
27static bool chunk_decommit_default(void *chunk, size_t size, size_t offset,
28 size_t length, unsigned arena_ind);
29static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
30 size_t length, unsigned arena_ind);
31static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
32 size_t size_b, bool committed, unsigned arena_ind);
33static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
34 size_t size_b, bool committed, unsigned arena_ind);
35
36const chunk_hooks_t chunk_hooks_default = {
37 chunk_alloc_default,
38 chunk_dalloc_default,
39 chunk_commit_default,
40 chunk_decommit_default,
41 chunk_purge_default,
42 chunk_split_default,
43 chunk_merge_default
44};
45
46/******************************************************************************/
47/*
48 * Function prototypes for static functions that are referenced prior to
49 * definition.
50 */
51
52static void chunk_record(tsdn_t *tsdn, arena_t *arena,
53 chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad,
54 extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed,
55 bool committed);
56
57/******************************************************************************/
58
59static chunk_hooks_t
60chunk_hooks_get_locked(arena_t *arena)
61{
62
63 return (arena->chunk_hooks);
64}
65
66chunk_hooks_t
67chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
68{
69 chunk_hooks_t chunk_hooks;
70
71 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
72 chunk_hooks = chunk_hooks_get_locked(arena);
73 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
74
75 return (chunk_hooks);
76}
77
78chunk_hooks_t
79chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
80{
81 chunk_hooks_t old_chunk_hooks;
82
83 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
84 old_chunk_hooks = arena->chunk_hooks;
85 /*
86 * Copy each field atomically so that it is impossible for readers to
87 * see partially updated pointers. There are places where readers only
88 * need one hook function pointer (therefore no need to copy the
89 * entirety of arena->chunk_hooks), and stale reads do not affect
90 * correctness, so they perform unlocked reads.
91 */
92#define ATOMIC_COPY_HOOK(n) do { \
93 union { \
94 chunk_##n##_t **n; \
95 void **v; \
96 } u; \
97 u.n = &arena->chunk_hooks.n; \
98 atomic_write_p(u.v, chunk_hooks->n); \
99} while (0)
100 ATOMIC_COPY_HOOK(alloc);
101 ATOMIC_COPY_HOOK(dalloc);
102 ATOMIC_COPY_HOOK(commit);
103 ATOMIC_COPY_HOOK(decommit);
104 ATOMIC_COPY_HOOK(purge);
105 ATOMIC_COPY_HOOK(split);
106 ATOMIC_COPY_HOOK(merge);
107#undef ATOMIC_COPY_HOOK
108 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
109
110 return (old_chunk_hooks);
111}
112
113static void
114chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
115 chunk_hooks_t *chunk_hooks, bool locked)
116{
117 static const chunk_hooks_t uninitialized_hooks =
118 CHUNK_HOOKS_INITIALIZER;
119
120 if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
121 0) {
122 *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
123 chunk_hooks_get(tsdn, arena);
124 }
125}
126
127static void
128chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
129 chunk_hooks_t *chunk_hooks)
130{
131
132 chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
133}
134
135static void
136chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
137 chunk_hooks_t *chunk_hooks)
138{
139
140 chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
141}
142
143bool
144chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
145{
146
147 assert(extent_node_addr_get(node) == chunk);
148
149 if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
150 return (true);
151 if (config_prof && opt_prof) {
152 size_t size = extent_node_size_get(node);
153 size_t nadd = (size == 0) ? 1 : size / chunksize;
154 size_t cur = atomic_add_z(&curchunks, nadd);
155 size_t high = atomic_read_z(&highchunks);
156 while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
157 /*
158 * Don't refresh cur, because it may have decreased
159 * since this thread lost the highchunks update race.
160 */
161 high = atomic_read_z(&highchunks);
162 }
163 if (cur > high && prof_gdump_get_unlocked())
164 prof_gdump(tsdn);
165 }
166
167 return (false);
168}
169
170void
171chunk_deregister(const void *chunk, const extent_node_t *node)
172{
173 bool err;
174
175 err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
176 assert(!err);
177 if (config_prof && opt_prof) {
178 size_t size = extent_node_size_get(node);
179 size_t nsub = (size == 0) ? 1 : size / chunksize;
180 assert(atomic_read_z(&curchunks) >= nsub);
181 atomic_sub_z(&curchunks, nsub);
182 }
183}
184
185/*
186 * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
187 * fits.
188 */
189static extent_node_t *
190chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
191 extent_tree_t *chunks_ad, size_t size)
192{
193 extent_node_t key;
194
195 assert(size == CHUNK_CEILING(size));
196
197 extent_node_init(&key, arena, NULL, size, false, false);
198 return (extent_tree_szad_nsearch(chunks_szad, &key));
199}
200
201static void *
202chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
203 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
204 void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
205 bool dalloc_node)
206{
207 void *ret;
208 extent_node_t *node;
209 size_t alloc_size, leadsize, trailsize;
210 bool zeroed, committed;
211
212 assert(new_addr == NULL || alignment == chunksize);
213 /*
214 * Cached chunks use the node linkage embedded in their headers, in
215 * which case dalloc_node is true, and new_addr is non-NULL because
216 * we're operating on a specific chunk.
217 */
218 assert(dalloc_node || new_addr != NULL);
219
220 alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
221 /* Beware size_t wrap-around. */
222 if (alloc_size < size)
223 return (NULL);
224 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
225 chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
226 if (new_addr != NULL) {
227 extent_node_t key;
228 extent_node_init(&key, arena, new_addr, alloc_size, false,
229 false);
230 node = extent_tree_ad_search(chunks_ad, &key);
231 } else {
232 node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
233 alloc_size);
234 }
235 if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
236 size)) {
237 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
238 return (NULL);
239 }
240 leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
241 alignment) - (uintptr_t)extent_node_addr_get(node);
242 assert(new_addr == NULL || leadsize == 0);
243 assert(extent_node_size_get(node) >= leadsize + size);
244 trailsize = extent_node_size_get(node) - leadsize - size;
245 ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
246 zeroed = extent_node_zeroed_get(node);
247 if (zeroed)
248 *zero = true;
249 committed = extent_node_committed_get(node);
250 if (committed)
251 *commit = true;
252 /* Split the lead. */
253 if (leadsize != 0 &&
254 chunk_hooks->split(extent_node_addr_get(node),
255 extent_node_size_get(node), leadsize, size, false, arena->ind)) {
256 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
257 return (NULL);
258 }
259 /* Remove node from the tree. */
260 extent_tree_szad_remove(chunks_szad, node);
261 extent_tree_ad_remove(chunks_ad, node);
262 arena_chunk_cache_maybe_remove(arena, node, cache);
263 if (leadsize != 0) {
264 /* Insert the leading space as a smaller chunk. */
265 extent_node_size_set(node, leadsize);
266 extent_tree_szad_insert(chunks_szad, node);
267 extent_tree_ad_insert(chunks_ad, node);
268 arena_chunk_cache_maybe_insert(arena, node, cache);
269 node = NULL;
270 }
271 if (trailsize != 0) {
272 /* Split the trail. */
273 if (chunk_hooks->split(ret, size + trailsize, size,
274 trailsize, false, arena->ind)) {
275 if (dalloc_node && node != NULL)
276 arena_node_dalloc(tsdn, arena, node);
277 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
278 chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
279 chunks_ad, cache, ret, size + trailsize, zeroed,
280 committed);
281 return (NULL);
282 }
283 /* Insert the trailing space as a smaller chunk. */
284 if (node == NULL) {
285 node = arena_node_alloc(tsdn, arena);
286 if (node == NULL) {
287 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
288 chunk_record(tsdn, arena, chunk_hooks,
289 chunks_szad, chunks_ad, cache, ret, size +
290 trailsize, zeroed, committed);
291 return (NULL);
292 }
293 }
294 extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
295 trailsize, zeroed, committed);
296 extent_tree_szad_insert(chunks_szad, node);
297 extent_tree_ad_insert(chunks_ad, node);
298 arena_chunk_cache_maybe_insert(arena, node, cache);
299 node = NULL;
300 }
301 if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
302 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
303 chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad,
304 cache, ret, size, zeroed, committed);
305 return (NULL);
306 }
307 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
308
309 assert(dalloc_node || node != NULL);
310 if (dalloc_node && node != NULL)
311 arena_node_dalloc(tsdn, arena, node);
312 if (*zero) {
313 if (!zeroed) {
314 if (config_valgrind)
315 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
316 memset(ret, 0, size);
317 } else if (config_debug) {
318 size_t i;
319 size_t *p = (size_t *)(uintptr_t)ret;
320
321 for (i = 0; i < size / sizeof(size_t); i++)
322 assert(p[i] == 0);
323 }
324 if (config_valgrind)
325 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
326 }
327 return (ret);
328}
329
330/*
331 * If the caller specifies (!*zero), it is still possible to receive zeroed
332 * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
333 * advantage of this to avoid demanding zeroed chunks, but taking advantage of
334 * them if they are returned.
335 */
336static void *
337chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
338 size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
339{
340 void *ret;
341
342 assert(size != 0);
343 assert((size & chunksize_mask) == 0);
344 assert(alignment != 0);
345 assert((alignment & chunksize_mask) == 0);
346
347 /* "primary" dss. */
348 if (have_dss && dss_prec == dss_prec_primary && (ret =
349 chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
350 commit)) != NULL)
351 return (ret);
352 /* mmap. */
353 if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
354 NULL)
355 return (ret);
356 /* "secondary" dss. */
357 if (have_dss && dss_prec == dss_prec_secondary && (ret =
358 chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
359 commit)) != NULL)
360 return (ret);
361
362 /* All strategies for allocation failed. */
363 return (NULL);
364}
365
366void *
367chunk_alloc_base(size_t size)
368{
369 void *ret;
370 bool zero, commit;
371
372 /*
373 * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
374 * because it's critical that chunk_alloc_base() return untouched
375 * demand-zeroed virtual memory.
376 */
377 zero = true;
378 commit = true;
379 ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
380 if (ret == NULL)
381 return (NULL);
382 if (config_valgrind)
383 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
384
385 return (ret);
386}
387
388void *
389chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
390 void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
391{
392 void *ret;
393 bool commit;
394
395 assert(size != 0);
396 assert((size & chunksize_mask) == 0);
397 assert(alignment != 0);
398 assert((alignment & chunksize_mask) == 0);
399
400 commit = true;
401 ret = chunk_recycle(tsdn, arena, chunk_hooks,
402 &arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
403 new_addr, size, alignment, zero, &commit, dalloc_node);
404 if (ret == NULL)
405 return (NULL);
406 assert(commit);
407 if (config_valgrind)
408 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
409 return (ret);
410}
411
412static arena_t *
413chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
414{
415 arena_t *arena;
416
417 arena = arena_get(tsdn, arena_ind, false);
418 /*
419 * The arena we're allocating on behalf of must have been initialized
420 * already.
421 */
422 assert(arena != NULL);
423 return (arena);
424}
425
426static void *
427chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
428 size_t size, size_t alignment, bool *zero, bool *commit)
429{
430 void *ret;
431
432 ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
433 commit, arena->dss_prec);
434 if (ret == NULL)
435 return (NULL);
436 if (config_valgrind)
437 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
438
439 return (ret);
440}
441
442static void *
443chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
444 bool *commit, unsigned arena_ind)
445{
446 tsdn_t *tsdn;
447 arena_t *arena;
448
449 tsdn = tsdn_fetch();
450 arena = chunk_arena_get(tsdn, arena_ind);
451
452 return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
453 zero, commit));
454}
455
456static void *
457chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
458 void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
459{
460 void *ret;
461
462 assert(size != 0);
463 assert((size & chunksize_mask) == 0);
464 assert(alignment != 0);
465 assert((alignment & chunksize_mask) == 0);
466
467 ret = chunk_recycle(tsdn, arena, chunk_hooks,
468 &arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
469 new_addr, size, alignment, zero, commit, true);
470
471 if (config_stats && ret != NULL)
472 arena->stats.retained -= size;
473
474 return (ret);
475}
476
477void *
478chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
479 void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
480{
481 void *ret;
482
483 chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
484
485 ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
486 alignment, zero, commit);
487 if (ret == NULL) {
488 if (chunk_hooks->alloc == chunk_alloc_default) {
489 /* Call directly to propagate tsdn. */
490 ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
491 size, alignment, zero, commit);
492 } else {
493 ret = chunk_hooks->alloc(new_addr, size, alignment,
494 zero, commit, arena->ind);
495 }
496
497 if (ret == NULL)
498 return (NULL);
499
500 if (config_valgrind && chunk_hooks->alloc !=
501 chunk_alloc_default)
502 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
503 }
504
505 return (ret);
506}
507
508static void
509chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
510 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
511 void *chunk, size_t size, bool zeroed, bool committed)
512{
513 bool unzeroed;
514 extent_node_t *node, *prev;
515 extent_node_t key;
516
517 assert(!cache || !zeroed);
518 unzeroed = cache || !zeroed;
519 JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
520
521 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
522 chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
523 extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
524 false, false);
525 node = extent_tree_ad_nsearch(chunks_ad, &key);
526 /* Try to coalesce forward. */
527 if (node != NULL && extent_node_addr_get(node) ==
528 extent_node_addr_get(&key) && extent_node_committed_get(node) ==
529 committed && !chunk_hooks->merge(chunk, size,
530 extent_node_addr_get(node), extent_node_size_get(node), false,
531 arena->ind)) {
532 /*
533 * Coalesce chunk with the following address range. This does
534 * not change the position within chunks_ad, so only
535 * remove/insert from/into chunks_szad.
536 */
537 extent_tree_szad_remove(chunks_szad, node);
538 arena_chunk_cache_maybe_remove(arena, node, cache);
539 extent_node_addr_set(node, chunk);
540 extent_node_size_set(node, size + extent_node_size_get(node));
541 extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
542 !unzeroed);
543 extent_tree_szad_insert(chunks_szad, node);
544 arena_chunk_cache_maybe_insert(arena, node, cache);
545 } else {
546 /* Coalescing forward failed, so insert a new node. */
547 node = arena_node_alloc(tsdn, arena);
548 if (node == NULL) {
549 /*
550 * Node allocation failed, which is an exceedingly
551 * unlikely failure. Leak chunk after making sure its
552 * pages have already been purged, so that this is only
553 * a virtual memory leak.
554 */
555 if (cache) {
556 chunk_purge_wrapper(tsdn, arena, chunk_hooks,
557 chunk, size, 0, size);
558 }
559 goto label_return;
560 }
561 extent_node_init(node, arena, chunk, size, !unzeroed,
562 committed);
563 extent_tree_ad_insert(chunks_ad, node);
564 extent_tree_szad_insert(chunks_szad, node);
565 arena_chunk_cache_maybe_insert(arena, node, cache);
566 }
567
568 /* Try to coalesce backward. */
569 prev = extent_tree_ad_prev(chunks_ad, node);
570 if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
571 extent_node_size_get(prev)) == chunk &&
572 extent_node_committed_get(prev) == committed &&
573 !chunk_hooks->merge(extent_node_addr_get(prev),
574 extent_node_size_get(prev), chunk, size, false, arena->ind)) {
575 /*
576 * Coalesce chunk with the previous address range. This does
577 * not change the position within chunks_ad, so only
578 * remove/insert node from/into chunks_szad.
579 */
580 extent_tree_szad_remove(chunks_szad, prev);
581 extent_tree_ad_remove(chunks_ad, prev);
582 arena_chunk_cache_maybe_remove(arena, prev, cache);
583 extent_tree_szad_remove(chunks_szad, node);
584 arena_chunk_cache_maybe_remove(arena, node, cache);
585 extent_node_addr_set(node, extent_node_addr_get(prev));
586 extent_node_size_set(node, extent_node_size_get(prev) +
587 extent_node_size_get(node));
588 extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
589 extent_node_zeroed_get(node));
590 extent_tree_szad_insert(chunks_szad, node);
591 arena_chunk_cache_maybe_insert(arena, node, cache);
592
593 arena_node_dalloc(tsdn, arena, prev);
594 }
595
596label_return:
597 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
598}
599
600void
601chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
602 void *chunk, size_t size, bool committed)
603{
604
605 assert(chunk != NULL);
606 assert(CHUNK_ADDR2BASE(chunk) == chunk);
607 assert(size != 0);
608 assert((size & chunksize_mask) == 0);
609
610 chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached,
611 &arena->chunks_ad_cached, true, chunk, size, false, committed);
612 arena_maybe_purge(tsdn, arena);
613}
614
615static bool
616chunk_dalloc_default_impl(tsdn_t *tsdn, void *chunk, size_t size)
617{
618
619 if (!have_dss || !chunk_in_dss(tsdn, chunk))
620 return (chunk_dalloc_mmap(chunk, size));
621 return (true);
622}
623
624static bool
625chunk_dalloc_default(void *chunk, size_t size, bool committed,
626 unsigned arena_ind)
627{
628 tsdn_t *tsdn;
629
630 tsdn = tsdn_fetch();
631
632 return (chunk_dalloc_default_impl(tsdn, chunk, size));
633}
634
635void
636chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
637 void *chunk, size_t size, bool zeroed, bool committed)
638{
639 bool err;
640
641 assert(chunk != NULL);
642 assert(CHUNK_ADDR2BASE(chunk) == chunk);
643 assert(size != 0);
644 assert((size & chunksize_mask) == 0);
645
646 chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
647 /* Try to deallocate. */
648 if (chunk_hooks->dalloc == chunk_dalloc_default) {
649 /* Call directly to propagate tsdn. */
650 err = chunk_dalloc_default_impl(tsdn, chunk, size);
651 } else
652 err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
653
654 if (!err)
655 return;
656 /* Try to decommit; purge if that fails. */
657 if (committed) {
658 committed = chunk_hooks->decommit(chunk, size, 0, size,
659 arena->ind);
660 }
661 zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
662 arena->ind);
663 chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained,
664 &arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
665
666 if (config_stats)
667 arena->stats.retained += size;
668}
669
670static bool
671chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
672 unsigned arena_ind)
673{
674
675 return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
676 length));
677}
678
679static bool
680chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
681 unsigned arena_ind)
682{
683
684 return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
685 length));
686}
687
688static bool
689chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
690 unsigned arena_ind)
691{
692
693 assert(chunk != NULL);
694 assert(CHUNK_ADDR2BASE(chunk) == chunk);
695 assert((offset & PAGE_MASK) == 0);
696 assert(length != 0);
697 assert((length & PAGE_MASK) == 0);
698
699 return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
700 length));
701}
702
703bool
704chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
705 void *chunk, size_t size, size_t offset, size_t length)
706{
707
708 chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
709 return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
710}
711
712static bool
713chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
714 bool committed, unsigned arena_ind)
715{
716
717 if (!maps_coalesce)
718 return (true);
719 return (false);
720}
721
722static bool
723chunk_merge_default_impl(tsdn_t *tsdn, void *chunk_a, void *chunk_b)
724{
725
726 if (!maps_coalesce)
727 return (true);
728 if (have_dss && chunk_in_dss(tsdn, chunk_a) != chunk_in_dss(tsdn,
729 chunk_b))
730 return (true);
731
732 return (false);
733}
734
735static bool
736chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
737 bool committed, unsigned arena_ind)
738{
739 tsdn_t *tsdn;
740
741 tsdn = tsdn_fetch();
742
743 return (chunk_merge_default_impl(tsdn, chunk_a, chunk_b));
744}
745
746static rtree_node_elm_t *
747chunks_rtree_node_alloc(size_t nelms)
748{
749
750 return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
751 sizeof(rtree_node_elm_t)));
752}
753
754bool
755chunk_boot(void)
756{
757#ifdef _WIN32
758 SYSTEM_INFO info;
759 GetSystemInfo(&info);
760
761 /*
762 * Verify actual page size is equal to or an integral multiple of
763 * configured page size.
764 */
765 if (info.dwPageSize & ((1U << LG_PAGE) - 1))
766 return (true);
767
768 /*
769 * Configure chunksize (if not set) to match granularity (usually 64K),
770 * so pages_map will always take fast path.
771 */
772 if (!opt_lg_chunk) {
773 opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
774 - 1;
775 }
776#else
777 if (!opt_lg_chunk)
778 opt_lg_chunk = LG_CHUNK_DEFAULT;
779#endif
780
781 /* Set variables according to the value of opt_lg_chunk. */
782 chunksize = (ZU(1) << opt_lg_chunk);
783 assert(chunksize >= PAGE);
784 chunksize_mask = chunksize - 1;
785 chunk_npages = (chunksize >> LG_PAGE);
786
787 if (have_dss && chunk_dss_boot())
788 return (true);
789 if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
790 opt_lg_chunk), chunks_rtree_node_alloc, NULL))
791 return (true);
792
793 return (false);
794}
795
796void
797chunk_prefork(tsdn_t *tsdn)
798{
799
800 chunk_dss_prefork(tsdn);
801}
802
803void
804chunk_postfork_parent(tsdn_t *tsdn)
805{
806
807 chunk_dss_postfork_parent(tsdn);
808}
809
810void
811chunk_postfork_child(tsdn_t *tsdn)
812{
813
814 chunk_dss_postfork_child(tsdn);
815}
816