1 | #define JEMALLOC_ARENA_C_ |
2 | #include "jemalloc/internal/jemalloc_preamble.h" |
3 | #include "jemalloc/internal/jemalloc_internal_includes.h" |
4 | |
5 | #include "jemalloc/internal/assert.h" |
6 | #include "jemalloc/internal/div.h" |
7 | #include "jemalloc/internal/extent_dss.h" |
8 | #include "jemalloc/internal/extent_mmap.h" |
9 | #include "jemalloc/internal/mutex.h" |
10 | #include "jemalloc/internal/rtree.h" |
11 | #include "jemalloc/internal/util.h" |
12 | |
13 | JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS |
14 | |
15 | /******************************************************************************/ |
16 | /* Data. */ |
17 | |
18 | /* |
19 | * Define names for both unininitialized and initialized phases, so that |
20 | * options and mallctl processing are straightforward. |
21 | */ |
22 | const char *percpu_arena_mode_names[] = { |
23 | "percpu" , |
24 | "phycpu" , |
25 | "disabled" , |
26 | "percpu" , |
27 | "phycpu" |
28 | }; |
29 | percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; |
30 | |
31 | ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; |
32 | ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; |
33 | |
34 | static atomic_zd_t dirty_decay_ms_default; |
35 | static atomic_zd_t muzzy_decay_ms_default; |
36 | |
37 | const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { |
38 | #define STEP(step, h, x, y) \ |
39 | h, |
40 | SMOOTHSTEP |
41 | #undef STEP |
42 | }; |
43 | |
44 | static div_info_t arena_binind_div_info[SC_NBINS]; |
45 | |
46 | size_t opt_huge_threshold = HUGE_THRESHOLD_DEFAULT; |
47 | size_t huge_threshold = HUGE_THRESHOLD_DEFAULT; |
48 | static unsigned huge_arena_ind; |
49 | |
50 | /******************************************************************************/ |
51 | /* |
52 | * Function prototypes for static functions that are referenced prior to |
53 | * definition. |
54 | */ |
55 | |
56 | static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, |
57 | arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, |
58 | size_t npages_decay_max, bool is_background_thread); |
59 | static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, |
60 | bool is_background_thread, bool all); |
61 | static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, |
62 | bin_t *bin); |
63 | static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, |
64 | bin_t *bin); |
65 | |
66 | /******************************************************************************/ |
67 | |
68 | void |
69 | arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, |
70 | const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, |
71 | size_t *nactive, size_t *ndirty, size_t *nmuzzy) { |
72 | *nthreads += arena_nthreads_get(arena, false); |
73 | *dss = dss_prec_names[arena_dss_prec_get(arena)]; |
74 | *dirty_decay_ms = arena_dirty_decay_ms_get(arena); |
75 | *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); |
76 | *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); |
77 | *ndirty += extents_npages_get(&arena->extents_dirty); |
78 | *nmuzzy += extents_npages_get(&arena->extents_muzzy); |
79 | } |
80 | |
81 | void |
82 | arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, |
83 | const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, |
84 | size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, |
85 | bin_stats_t *bstats, arena_stats_large_t *lstats, |
86 | arena_stats_extents_t *estats) { |
87 | cassert(config_stats); |
88 | |
89 | arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, |
90 | muzzy_decay_ms, nactive, ndirty, nmuzzy); |
91 | |
92 | size_t base_allocated, base_resident, base_mapped, metadata_thp; |
93 | base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, |
94 | &base_mapped, &metadata_thp); |
95 | |
96 | arena_stats_lock(tsdn, &arena->stats); |
97 | |
98 | arena_stats_accum_zu(&astats->mapped, base_mapped |
99 | + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); |
100 | arena_stats_accum_zu(&astats->retained, |
101 | extents_npages_get(&arena->extents_retained) << LG_PAGE); |
102 | |
103 | atomic_store_zu(&astats->extent_avail, |
104 | atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED), |
105 | ATOMIC_RELAXED); |
106 | |
107 | arena_stats_accum_u64(&astats->decay_dirty.npurge, |
108 | arena_stats_read_u64(tsdn, &arena->stats, |
109 | &arena->stats.decay_dirty.npurge)); |
110 | arena_stats_accum_u64(&astats->decay_dirty.nmadvise, |
111 | arena_stats_read_u64(tsdn, &arena->stats, |
112 | &arena->stats.decay_dirty.nmadvise)); |
113 | arena_stats_accum_u64(&astats->decay_dirty.purged, |
114 | arena_stats_read_u64(tsdn, &arena->stats, |
115 | &arena->stats.decay_dirty.purged)); |
116 | |
117 | arena_stats_accum_u64(&astats->decay_muzzy.npurge, |
118 | arena_stats_read_u64(tsdn, &arena->stats, |
119 | &arena->stats.decay_muzzy.npurge)); |
120 | arena_stats_accum_u64(&astats->decay_muzzy.nmadvise, |
121 | arena_stats_read_u64(tsdn, &arena->stats, |
122 | &arena->stats.decay_muzzy.nmadvise)); |
123 | arena_stats_accum_u64(&astats->decay_muzzy.purged, |
124 | arena_stats_read_u64(tsdn, &arena->stats, |
125 | &arena->stats.decay_muzzy.purged)); |
126 | |
127 | arena_stats_accum_zu(&astats->base, base_allocated); |
128 | arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); |
129 | arena_stats_accum_zu(&astats->metadata_thp, metadata_thp); |
130 | arena_stats_accum_zu(&astats->resident, base_resident + |
131 | (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + |
132 | extents_npages_get(&arena->extents_dirty) + |
133 | extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); |
134 | |
135 | for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) { |
136 | uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, |
137 | &arena->stats.lstats[i].nmalloc); |
138 | arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); |
139 | arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); |
140 | |
141 | uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, |
142 | &arena->stats.lstats[i].ndalloc); |
143 | arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); |
144 | arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); |
145 | |
146 | uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, |
147 | &arena->stats.lstats[i].nrequests); |
148 | arena_stats_accum_u64(&lstats[i].nrequests, |
149 | nmalloc + nrequests); |
150 | arena_stats_accum_u64(&astats->nrequests_large, |
151 | nmalloc + nrequests); |
152 | |
153 | assert(nmalloc >= ndalloc); |
154 | assert(nmalloc - ndalloc <= SIZE_T_MAX); |
155 | size_t curlextents = (size_t)(nmalloc - ndalloc); |
156 | lstats[i].curlextents += curlextents; |
157 | arena_stats_accum_zu(&astats->allocated_large, |
158 | curlextents * sz_index2size(SC_NBINS + i)); |
159 | } |
160 | |
161 | for (pszind_t i = 0; i < SC_NPSIZES; i++) { |
162 | size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes, |
163 | retained_bytes; |
164 | dirty = extents_nextents_get(&arena->extents_dirty, i); |
165 | muzzy = extents_nextents_get(&arena->extents_muzzy, i); |
166 | retained = extents_nextents_get(&arena->extents_retained, i); |
167 | dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i); |
168 | muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i); |
169 | retained_bytes = |
170 | extents_nbytes_get(&arena->extents_retained, i); |
171 | |
172 | atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED); |
173 | atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED); |
174 | atomic_store_zu(&estats[i].nretained, retained, ATOMIC_RELAXED); |
175 | atomic_store_zu(&estats[i].dirty_bytes, dirty_bytes, |
176 | ATOMIC_RELAXED); |
177 | atomic_store_zu(&estats[i].muzzy_bytes, muzzy_bytes, |
178 | ATOMIC_RELAXED); |
179 | atomic_store_zu(&estats[i].retained_bytes, retained_bytes, |
180 | ATOMIC_RELAXED); |
181 | } |
182 | |
183 | arena_stats_unlock(tsdn, &arena->stats); |
184 | |
185 | /* tcache_bytes counts currently cached bytes. */ |
186 | atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED); |
187 | malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); |
188 | cache_bin_array_descriptor_t *descriptor; |
189 | ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) { |
190 | szind_t i = 0; |
191 | for (; i < SC_NBINS; i++) { |
192 | cache_bin_t *tbin = &descriptor->bins_small[i]; |
193 | arena_stats_accum_zu(&astats->tcache_bytes, |
194 | tbin->ncached * sz_index2size(i)); |
195 | } |
196 | for (; i < nhbins; i++) { |
197 | cache_bin_t *tbin = &descriptor->bins_large[i]; |
198 | arena_stats_accum_zu(&astats->tcache_bytes, |
199 | tbin->ncached * sz_index2size(i)); |
200 | } |
201 | } |
202 | malloc_mutex_prof_read(tsdn, |
203 | &astats->mutex_prof_data[arena_prof_mutex_tcache_list], |
204 | &arena->tcache_ql_mtx); |
205 | malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); |
206 | |
207 | #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ |
208 | malloc_mutex_lock(tsdn, &arena->mtx); \ |
209 | malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ |
210 | &arena->mtx); \ |
211 | malloc_mutex_unlock(tsdn, &arena->mtx); |
212 | |
213 | /* Gather per arena mutex profiling data. */ |
214 | READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); |
215 | READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, |
216 | arena_prof_mutex_extent_avail) |
217 | READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx, |
218 | arena_prof_mutex_extents_dirty) |
219 | READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx, |
220 | arena_prof_mutex_extents_muzzy) |
221 | READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx, |
222 | arena_prof_mutex_extents_retained) |
223 | READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, |
224 | arena_prof_mutex_decay_dirty) |
225 | READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx, |
226 | arena_prof_mutex_decay_muzzy) |
227 | READ_ARENA_MUTEX_PROF_DATA(base->mtx, |
228 | arena_prof_mutex_base) |
229 | #undef READ_ARENA_MUTEX_PROF_DATA |
230 | |
231 | nstime_copy(&astats->uptime, &arena->create_time); |
232 | nstime_update(&astats->uptime); |
233 | nstime_subtract(&astats->uptime, &arena->create_time); |
234 | |
235 | for (szind_t i = 0; i < SC_NBINS; i++) { |
236 | bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]); |
237 | } |
238 | } |
239 | |
240 | void |
241 | arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, |
242 | extent_hooks_t **r_extent_hooks, extent_t *extent) { |
243 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
244 | WITNESS_RANK_CORE, 0); |
245 | |
246 | extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, |
247 | extent); |
248 | if (arena_dirty_decay_ms_get(arena) == 0) { |
249 | arena_decay_dirty(tsdn, arena, false, true); |
250 | } else { |
251 | arena_background_thread_inactivity_check(tsdn, arena, false); |
252 | } |
253 | } |
254 | |
255 | static void * |
256 | arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { |
257 | void *ret; |
258 | arena_slab_data_t *slab_data = extent_slab_data_get(slab); |
259 | size_t regind; |
260 | |
261 | assert(extent_nfree_get(slab) > 0); |
262 | assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); |
263 | |
264 | regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); |
265 | ret = (void *)((uintptr_t)extent_addr_get(slab) + |
266 | (uintptr_t)(bin_info->reg_size * regind)); |
267 | extent_nfree_dec(slab); |
268 | return ret; |
269 | } |
270 | |
271 | #ifndef JEMALLOC_JET |
272 | static |
273 | #endif |
274 | size_t |
275 | arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { |
276 | size_t diff, regind; |
277 | |
278 | /* Freeing a pointer outside the slab can cause assertion failure. */ |
279 | assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); |
280 | assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); |
281 | /* Freeing an interior pointer can cause assertion failure. */ |
282 | assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % |
283 | (uintptr_t)bin_infos[binind].reg_size == 0); |
284 | |
285 | diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); |
286 | |
287 | /* Avoid doing division with a variable divisor. */ |
288 | regind = div_compute(&arena_binind_div_info[binind], diff); |
289 | |
290 | assert(regind < bin_infos[binind].nregs); |
291 | |
292 | return regind; |
293 | } |
294 | |
295 | static void |
296 | arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) { |
297 | szind_t binind = extent_szind_get(slab); |
298 | const bin_info_t *bin_info = &bin_infos[binind]; |
299 | size_t regind = arena_slab_regind(slab, binind, ptr); |
300 | |
301 | assert(extent_nfree_get(slab) < bin_info->nregs); |
302 | /* Freeing an unallocated pointer can cause assertion failure. */ |
303 | assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); |
304 | |
305 | bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); |
306 | extent_nfree_inc(slab); |
307 | } |
308 | |
309 | static void |
310 | arena_nactive_add(arena_t *arena, size_t add_pages) { |
311 | atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); |
312 | } |
313 | |
314 | static void |
315 | arena_nactive_sub(arena_t *arena, size_t sub_pages) { |
316 | assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); |
317 | atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); |
318 | } |
319 | |
320 | static void |
321 | arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { |
322 | szind_t index, hindex; |
323 | |
324 | cassert(config_stats); |
325 | |
326 | if (usize < SC_LARGE_MINCLASS) { |
327 | usize = SC_LARGE_MINCLASS; |
328 | } |
329 | index = sz_size2index(usize); |
330 | hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; |
331 | |
332 | arena_stats_add_u64(tsdn, &arena->stats, |
333 | &arena->stats.lstats[hindex].nmalloc, 1); |
334 | } |
335 | |
336 | static void |
337 | arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { |
338 | szind_t index, hindex; |
339 | |
340 | cassert(config_stats); |
341 | |
342 | if (usize < SC_LARGE_MINCLASS) { |
343 | usize = SC_LARGE_MINCLASS; |
344 | } |
345 | index = sz_size2index(usize); |
346 | hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; |
347 | |
348 | arena_stats_add_u64(tsdn, &arena->stats, |
349 | &arena->stats.lstats[hindex].ndalloc, 1); |
350 | } |
351 | |
352 | static void |
353 | arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, |
354 | size_t usize) { |
355 | arena_large_dalloc_stats_update(tsdn, arena, oldusize); |
356 | arena_large_malloc_stats_update(tsdn, arena, usize); |
357 | } |
358 | |
359 | static bool |
360 | arena_may_have_muzzy(arena_t *arena) { |
361 | return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0)); |
362 | } |
363 | |
364 | extent_t * |
365 | arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, |
366 | size_t alignment, bool *zero) { |
367 | extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; |
368 | |
369 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
370 | WITNESS_RANK_CORE, 0); |
371 | |
372 | szind_t szind = sz_size2index(usize); |
373 | size_t mapped_add; |
374 | bool commit = true; |
375 | extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, |
376 | &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, |
377 | szind, zero, &commit); |
378 | if (extent == NULL && arena_may_have_muzzy(arena)) { |
379 | extent = extents_alloc(tsdn, arena, &extent_hooks, |
380 | &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, |
381 | false, szind, zero, &commit); |
382 | } |
383 | size_t size = usize + sz_large_pad; |
384 | if (extent == NULL) { |
385 | extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, |
386 | usize, sz_large_pad, alignment, false, szind, zero, |
387 | &commit); |
388 | if (config_stats) { |
389 | /* |
390 | * extent may be NULL on OOM, but in that case |
391 | * mapped_add isn't used below, so there's no need to |
392 | * conditionlly set it to 0 here. |
393 | */ |
394 | mapped_add = size; |
395 | } |
396 | } else if (config_stats) { |
397 | mapped_add = 0; |
398 | } |
399 | |
400 | if (extent != NULL) { |
401 | if (config_stats) { |
402 | arena_stats_lock(tsdn, &arena->stats); |
403 | arena_large_malloc_stats_update(tsdn, arena, usize); |
404 | if (mapped_add != 0) { |
405 | arena_stats_add_zu(tsdn, &arena->stats, |
406 | &arena->stats.mapped, mapped_add); |
407 | } |
408 | arena_stats_unlock(tsdn, &arena->stats); |
409 | } |
410 | arena_nactive_add(arena, size >> LG_PAGE); |
411 | } |
412 | |
413 | return extent; |
414 | } |
415 | |
416 | void |
417 | arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { |
418 | if (config_stats) { |
419 | arena_stats_lock(tsdn, &arena->stats); |
420 | arena_large_dalloc_stats_update(tsdn, arena, |
421 | extent_usize_get(extent)); |
422 | arena_stats_unlock(tsdn, &arena->stats); |
423 | } |
424 | arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); |
425 | } |
426 | |
427 | void |
428 | arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, |
429 | size_t oldusize) { |
430 | size_t usize = extent_usize_get(extent); |
431 | size_t udiff = oldusize - usize; |
432 | |
433 | if (config_stats) { |
434 | arena_stats_lock(tsdn, &arena->stats); |
435 | arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); |
436 | arena_stats_unlock(tsdn, &arena->stats); |
437 | } |
438 | arena_nactive_sub(arena, udiff >> LG_PAGE); |
439 | } |
440 | |
441 | void |
442 | arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, |
443 | size_t oldusize) { |
444 | size_t usize = extent_usize_get(extent); |
445 | size_t udiff = usize - oldusize; |
446 | |
447 | if (config_stats) { |
448 | arena_stats_lock(tsdn, &arena->stats); |
449 | arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); |
450 | arena_stats_unlock(tsdn, &arena->stats); |
451 | } |
452 | arena_nactive_add(arena, udiff >> LG_PAGE); |
453 | } |
454 | |
455 | static ssize_t |
456 | arena_decay_ms_read(arena_decay_t *decay) { |
457 | return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); |
458 | } |
459 | |
460 | static void |
461 | arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { |
462 | atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); |
463 | } |
464 | |
465 | static void |
466 | arena_decay_deadline_init(arena_decay_t *decay) { |
467 | /* |
468 | * Generate a new deadline that is uniformly random within the next |
469 | * epoch after the current one. |
470 | */ |
471 | nstime_copy(&decay->deadline, &decay->epoch); |
472 | nstime_add(&decay->deadline, &decay->interval); |
473 | if (arena_decay_ms_read(decay) > 0) { |
474 | nstime_t jitter; |
475 | |
476 | nstime_init(&jitter, prng_range_u64(&decay->jitter_state, |
477 | nstime_ns(&decay->interval))); |
478 | nstime_add(&decay->deadline, &jitter); |
479 | } |
480 | } |
481 | |
482 | static bool |
483 | arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { |
484 | return (nstime_compare(&decay->deadline, time) <= 0); |
485 | } |
486 | |
487 | static size_t |
488 | arena_decay_backlog_npages_limit(const arena_decay_t *decay) { |
489 | uint64_t sum; |
490 | size_t npages_limit_backlog; |
491 | unsigned i; |
492 | |
493 | /* |
494 | * For each element of decay_backlog, multiply by the corresponding |
495 | * fixed-point smoothstep decay factor. Sum the products, then divide |
496 | * to round down to the nearest whole number of pages. |
497 | */ |
498 | sum = 0; |
499 | for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { |
500 | sum += decay->backlog[i] * h_steps[i]; |
501 | } |
502 | npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); |
503 | |
504 | return npages_limit_backlog; |
505 | } |
506 | |
507 | static void |
508 | arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { |
509 | size_t npages_delta = (current_npages > decay->nunpurged) ? |
510 | current_npages - decay->nunpurged : 0; |
511 | decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; |
512 | |
513 | if (config_debug) { |
514 | if (current_npages > decay->ceil_npages) { |
515 | decay->ceil_npages = current_npages; |
516 | } |
517 | size_t npages_limit = arena_decay_backlog_npages_limit(decay); |
518 | assert(decay->ceil_npages >= npages_limit); |
519 | if (decay->ceil_npages > npages_limit) { |
520 | decay->ceil_npages = npages_limit; |
521 | } |
522 | } |
523 | } |
524 | |
525 | static void |
526 | arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, |
527 | size_t current_npages) { |
528 | if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { |
529 | memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * |
530 | sizeof(size_t)); |
531 | } else { |
532 | size_t nadvance_z = (size_t)nadvance_u64; |
533 | |
534 | assert((uint64_t)nadvance_z == nadvance_u64); |
535 | |
536 | memmove(decay->backlog, &decay->backlog[nadvance_z], |
537 | (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); |
538 | if (nadvance_z > 1) { |
539 | memset(&decay->backlog[SMOOTHSTEP_NSTEPS - |
540 | nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); |
541 | } |
542 | } |
543 | |
544 | arena_decay_backlog_update_last(decay, current_npages); |
545 | } |
546 | |
547 | static void |
548 | arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, |
549 | extents_t *extents, size_t current_npages, size_t npages_limit, |
550 | bool is_background_thread) { |
551 | if (current_npages > npages_limit) { |
552 | arena_decay_to_limit(tsdn, arena, decay, extents, false, |
553 | npages_limit, current_npages - npages_limit, |
554 | is_background_thread); |
555 | } |
556 | } |
557 | |
558 | static void |
559 | arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, |
560 | size_t current_npages) { |
561 | assert(arena_decay_deadline_reached(decay, time)); |
562 | |
563 | nstime_t delta; |
564 | nstime_copy(&delta, time); |
565 | nstime_subtract(&delta, &decay->epoch); |
566 | |
567 | uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); |
568 | assert(nadvance_u64 > 0); |
569 | |
570 | /* Add nadvance_u64 decay intervals to epoch. */ |
571 | nstime_copy(&delta, &decay->interval); |
572 | nstime_imultiply(&delta, nadvance_u64); |
573 | nstime_add(&decay->epoch, &delta); |
574 | |
575 | /* Set a new deadline. */ |
576 | arena_decay_deadline_init(decay); |
577 | |
578 | /* Update the backlog. */ |
579 | arena_decay_backlog_update(decay, nadvance_u64, current_npages); |
580 | } |
581 | |
582 | static void |
583 | arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, |
584 | extents_t *extents, const nstime_t *time, bool is_background_thread) { |
585 | size_t current_npages = extents_npages_get(extents); |
586 | arena_decay_epoch_advance_helper(decay, time, current_npages); |
587 | |
588 | size_t npages_limit = arena_decay_backlog_npages_limit(decay); |
589 | /* We may unlock decay->mtx when try_purge(). Finish logging first. */ |
590 | decay->nunpurged = (npages_limit > current_npages) ? npages_limit : |
591 | current_npages; |
592 | |
593 | if (!background_thread_enabled() || is_background_thread) { |
594 | arena_decay_try_purge(tsdn, arena, decay, extents, |
595 | current_npages, npages_limit, is_background_thread); |
596 | } |
597 | } |
598 | |
599 | static void |
600 | arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) { |
601 | arena_decay_ms_write(decay, decay_ms); |
602 | if (decay_ms > 0) { |
603 | nstime_init(&decay->interval, (uint64_t)decay_ms * |
604 | KQU(1000000)); |
605 | nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); |
606 | } |
607 | |
608 | nstime_init(&decay->epoch, 0); |
609 | nstime_update(&decay->epoch); |
610 | decay->jitter_state = (uint64_t)(uintptr_t)decay; |
611 | arena_decay_deadline_init(decay); |
612 | decay->nunpurged = 0; |
613 | memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); |
614 | } |
615 | |
616 | static bool |
617 | arena_decay_init(arena_decay_t *decay, ssize_t decay_ms, |
618 | arena_stats_decay_t *stats) { |
619 | if (config_debug) { |
620 | for (size_t i = 0; i < sizeof(arena_decay_t); i++) { |
621 | assert(((char *)decay)[i] == 0); |
622 | } |
623 | decay->ceil_npages = 0; |
624 | } |
625 | if (malloc_mutex_init(&decay->mtx, "decay" , WITNESS_RANK_DECAY, |
626 | malloc_mutex_rank_exclusive)) { |
627 | return true; |
628 | } |
629 | decay->purging = false; |
630 | arena_decay_reinit(decay, decay_ms); |
631 | /* Memory is zeroed, so there is no need to clear stats. */ |
632 | if (config_stats) { |
633 | decay->stats = stats; |
634 | } |
635 | return false; |
636 | } |
637 | |
638 | static bool |
639 | arena_decay_ms_valid(ssize_t decay_ms) { |
640 | if (decay_ms < -1) { |
641 | return false; |
642 | } |
643 | if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * |
644 | KQU(1000)) { |
645 | return true; |
646 | } |
647 | return false; |
648 | } |
649 | |
650 | static bool |
651 | arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, |
652 | extents_t *extents, bool is_background_thread) { |
653 | malloc_mutex_assert_owner(tsdn, &decay->mtx); |
654 | |
655 | /* Purge all or nothing if the option is disabled. */ |
656 | ssize_t decay_ms = arena_decay_ms_read(decay); |
657 | if (decay_ms <= 0) { |
658 | if (decay_ms == 0) { |
659 | arena_decay_to_limit(tsdn, arena, decay, extents, false, |
660 | 0, extents_npages_get(extents), |
661 | is_background_thread); |
662 | } |
663 | return false; |
664 | } |
665 | |
666 | nstime_t time; |
667 | nstime_init(&time, 0); |
668 | nstime_update(&time); |
669 | if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time) |
670 | > 0)) { |
671 | /* |
672 | * Time went backwards. Move the epoch back in time and |
673 | * generate a new deadline, with the expectation that time |
674 | * typically flows forward for long enough periods of time that |
675 | * epochs complete. Unfortunately, this strategy is susceptible |
676 | * to clock jitter triggering premature epoch advances, but |
677 | * clock jitter estimation and compensation isn't feasible here |
678 | * because calls into this code are event-driven. |
679 | */ |
680 | nstime_copy(&decay->epoch, &time); |
681 | arena_decay_deadline_init(decay); |
682 | } else { |
683 | /* Verify that time does not go backwards. */ |
684 | assert(nstime_compare(&decay->epoch, &time) <= 0); |
685 | } |
686 | |
687 | /* |
688 | * If the deadline has been reached, advance to the current epoch and |
689 | * purge to the new limit if necessary. Note that dirty pages created |
690 | * during the current epoch are not subject to purge until a future |
691 | * epoch, so as a result purging only happens during epoch advances, or |
692 | * being triggered by background threads (scheduled event). |
693 | */ |
694 | bool advance_epoch = arena_decay_deadline_reached(decay, &time); |
695 | if (advance_epoch) { |
696 | arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, |
697 | is_background_thread); |
698 | } else if (is_background_thread) { |
699 | arena_decay_try_purge(tsdn, arena, decay, extents, |
700 | extents_npages_get(extents), |
701 | arena_decay_backlog_npages_limit(decay), |
702 | is_background_thread); |
703 | } |
704 | |
705 | return advance_epoch; |
706 | } |
707 | |
708 | static ssize_t |
709 | arena_decay_ms_get(arena_decay_t *decay) { |
710 | return arena_decay_ms_read(decay); |
711 | } |
712 | |
713 | ssize_t |
714 | arena_dirty_decay_ms_get(arena_t *arena) { |
715 | return arena_decay_ms_get(&arena->decay_dirty); |
716 | } |
717 | |
718 | ssize_t |
719 | arena_muzzy_decay_ms_get(arena_t *arena) { |
720 | return arena_decay_ms_get(&arena->decay_muzzy); |
721 | } |
722 | |
723 | static bool |
724 | arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, |
725 | extents_t *extents, ssize_t decay_ms) { |
726 | if (!arena_decay_ms_valid(decay_ms)) { |
727 | return true; |
728 | } |
729 | |
730 | malloc_mutex_lock(tsdn, &decay->mtx); |
731 | /* |
732 | * Restart decay backlog from scratch, which may cause many dirty pages |
733 | * to be immediately purged. It would conceptually be possible to map |
734 | * the old backlog onto the new backlog, but there is no justification |
735 | * for such complexity since decay_ms changes are intended to be |
736 | * infrequent, either between the {-1, 0, >0} states, or a one-time |
737 | * arbitrary change during initial arena configuration. |
738 | */ |
739 | arena_decay_reinit(decay, decay_ms); |
740 | arena_maybe_decay(tsdn, arena, decay, extents, false); |
741 | malloc_mutex_unlock(tsdn, &decay->mtx); |
742 | |
743 | return false; |
744 | } |
745 | |
746 | bool |
747 | arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, |
748 | ssize_t decay_ms) { |
749 | return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, |
750 | &arena->extents_dirty, decay_ms); |
751 | } |
752 | |
753 | bool |
754 | arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, |
755 | ssize_t decay_ms) { |
756 | return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, |
757 | &arena->extents_muzzy, decay_ms); |
758 | } |
759 | |
760 | static size_t |
761 | arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, |
762 | extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, |
763 | size_t npages_decay_max, extent_list_t *decay_extents) { |
764 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
765 | WITNESS_RANK_CORE, 0); |
766 | |
767 | /* Stash extents according to npages_limit. */ |
768 | size_t nstashed = 0; |
769 | extent_t *extent; |
770 | while (nstashed < npages_decay_max && |
771 | (extent = extents_evict(tsdn, arena, r_extent_hooks, extents, |
772 | npages_limit)) != NULL) { |
773 | extent_list_append(decay_extents, extent); |
774 | nstashed += extent_size_get(extent) >> LG_PAGE; |
775 | } |
776 | return nstashed; |
777 | } |
778 | |
779 | static size_t |
780 | arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, |
781 | extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, |
782 | bool all, extent_list_t *decay_extents, bool is_background_thread) { |
783 | size_t nmadvise, nunmapped; |
784 | size_t npurged; |
785 | |
786 | if (config_stats) { |
787 | nmadvise = 0; |
788 | nunmapped = 0; |
789 | } |
790 | npurged = 0; |
791 | |
792 | ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); |
793 | for (extent_t *extent = extent_list_first(decay_extents); extent != |
794 | NULL; extent = extent_list_first(decay_extents)) { |
795 | if (config_stats) { |
796 | nmadvise++; |
797 | } |
798 | size_t npages = extent_size_get(extent) >> LG_PAGE; |
799 | npurged += npages; |
800 | extent_list_remove(decay_extents, extent); |
801 | switch (extents_state_get(extents)) { |
802 | case extent_state_active: |
803 | not_reached(); |
804 | case extent_state_dirty: |
805 | if (!all && muzzy_decay_ms != 0 && |
806 | !extent_purge_lazy_wrapper(tsdn, arena, |
807 | r_extent_hooks, extent, 0, |
808 | extent_size_get(extent))) { |
809 | extents_dalloc(tsdn, arena, r_extent_hooks, |
810 | &arena->extents_muzzy, extent); |
811 | arena_background_thread_inactivity_check(tsdn, |
812 | arena, is_background_thread); |
813 | break; |
814 | } |
815 | /* Fall through. */ |
816 | case extent_state_muzzy: |
817 | extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, |
818 | extent); |
819 | if (config_stats) { |
820 | nunmapped += npages; |
821 | } |
822 | break; |
823 | case extent_state_retained: |
824 | default: |
825 | not_reached(); |
826 | } |
827 | } |
828 | |
829 | if (config_stats) { |
830 | arena_stats_lock(tsdn, &arena->stats); |
831 | arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, |
832 | 1); |
833 | arena_stats_add_u64(tsdn, &arena->stats, |
834 | &decay->stats->nmadvise, nmadvise); |
835 | arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged, |
836 | npurged); |
837 | arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, |
838 | nunmapped << LG_PAGE); |
839 | arena_stats_unlock(tsdn, &arena->stats); |
840 | } |
841 | |
842 | return npurged; |
843 | } |
844 | |
845 | /* |
846 | * npages_limit: Decay at most npages_decay_max pages without violating the |
847 | * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper |
848 | * bound on number of pages in order to prevent unbounded growth (namely in |
849 | * stashed), otherwise unbounded new pages could be added to extents during the |
850 | * current decay run, so that the purging thread never finishes. |
851 | */ |
852 | static void |
853 | arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, |
854 | extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max, |
855 | bool is_background_thread) { |
856 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
857 | WITNESS_RANK_CORE, 1); |
858 | malloc_mutex_assert_owner(tsdn, &decay->mtx); |
859 | |
860 | if (decay->purging) { |
861 | return; |
862 | } |
863 | decay->purging = true; |
864 | malloc_mutex_unlock(tsdn, &decay->mtx); |
865 | |
866 | extent_hooks_t *extent_hooks = extent_hooks_get(arena); |
867 | |
868 | extent_list_t decay_extents; |
869 | extent_list_init(&decay_extents); |
870 | |
871 | size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, |
872 | npages_limit, npages_decay_max, &decay_extents); |
873 | if (npurge != 0) { |
874 | size_t npurged = arena_decay_stashed(tsdn, arena, |
875 | &extent_hooks, decay, extents, all, &decay_extents, |
876 | is_background_thread); |
877 | assert(npurged == npurge); |
878 | } |
879 | |
880 | malloc_mutex_lock(tsdn, &decay->mtx); |
881 | decay->purging = false; |
882 | } |
883 | |
884 | static bool |
885 | arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, |
886 | extents_t *extents, bool is_background_thread, bool all) { |
887 | if (all) { |
888 | malloc_mutex_lock(tsdn, &decay->mtx); |
889 | arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, |
890 | extents_npages_get(extents), is_background_thread); |
891 | malloc_mutex_unlock(tsdn, &decay->mtx); |
892 | |
893 | return false; |
894 | } |
895 | |
896 | if (malloc_mutex_trylock(tsdn, &decay->mtx)) { |
897 | /* No need to wait if another thread is in progress. */ |
898 | return true; |
899 | } |
900 | |
901 | bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, |
902 | is_background_thread); |
903 | size_t npages_new; |
904 | if (epoch_advanced) { |
905 | /* Backlog is updated on epoch advance. */ |
906 | npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; |
907 | } |
908 | malloc_mutex_unlock(tsdn, &decay->mtx); |
909 | |
910 | if (have_background_thread && background_thread_enabled() && |
911 | epoch_advanced && !is_background_thread) { |
912 | background_thread_interval_check(tsdn, arena, decay, |
913 | npages_new); |
914 | } |
915 | |
916 | return false; |
917 | } |
918 | |
919 | static bool |
920 | arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, |
921 | bool all) { |
922 | return arena_decay_impl(tsdn, arena, &arena->decay_dirty, |
923 | &arena->extents_dirty, is_background_thread, all); |
924 | } |
925 | |
926 | static bool |
927 | arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, |
928 | bool all) { |
929 | return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, |
930 | &arena->extents_muzzy, is_background_thread, all); |
931 | } |
932 | |
933 | void |
934 | arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { |
935 | if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { |
936 | return; |
937 | } |
938 | arena_decay_muzzy(tsdn, arena, is_background_thread, all); |
939 | } |
940 | |
941 | static void |
942 | arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { |
943 | arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); |
944 | |
945 | extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; |
946 | arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); |
947 | } |
948 | |
949 | static void |
950 | arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { |
951 | assert(extent_nfree_get(slab) > 0); |
952 | extent_heap_insert(&bin->slabs_nonfull, slab); |
953 | } |
954 | |
955 | static void |
956 | arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { |
957 | extent_heap_remove(&bin->slabs_nonfull, slab); |
958 | } |
959 | |
960 | static extent_t * |
961 | arena_bin_slabs_nonfull_tryget(bin_t *bin) { |
962 | extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); |
963 | if (slab == NULL) { |
964 | return NULL; |
965 | } |
966 | if (config_stats) { |
967 | bin->stats.reslabs++; |
968 | } |
969 | return slab; |
970 | } |
971 | |
972 | static void |
973 | arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) { |
974 | assert(extent_nfree_get(slab) == 0); |
975 | /* |
976 | * Tracking extents is required by arena_reset, which is not allowed |
977 | * for auto arenas. Bypass this step to avoid touching the extent |
978 | * linkage (often results in cache misses) for auto arenas. |
979 | */ |
980 | if (arena_is_auto(arena)) { |
981 | return; |
982 | } |
983 | extent_list_append(&bin->slabs_full, slab); |
984 | } |
985 | |
986 | static void |
987 | arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) { |
988 | if (arena_is_auto(arena)) { |
989 | return; |
990 | } |
991 | extent_list_remove(&bin->slabs_full, slab); |
992 | } |
993 | |
994 | void |
995 | arena_reset(tsd_t *tsd, arena_t *arena) { |
996 | /* |
997 | * Locking in this function is unintuitive. The caller guarantees that |
998 | * no concurrent operations are happening in this arena, but there are |
999 | * still reasons that some locking is necessary: |
1000 | * |
1001 | * - Some of the functions in the transitive closure of calls assume |
1002 | * appropriate locks are held, and in some cases these locks are |
1003 | * temporarily dropped to avoid lock order reversal or deadlock due to |
1004 | * reentry. |
1005 | * - mallctl("epoch", ...) may concurrently refresh stats. While |
1006 | * strictly speaking this is a "concurrent operation", disallowing |
1007 | * stats refreshes would impose an inconvenient burden. |
1008 | */ |
1009 | |
1010 | /* Large allocations. */ |
1011 | malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); |
1012 | |
1013 | for (extent_t *extent = extent_list_first(&arena->large); extent != |
1014 | NULL; extent = extent_list_first(&arena->large)) { |
1015 | void *ptr = extent_base_get(extent); |
1016 | size_t usize; |
1017 | |
1018 | malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); |
1019 | alloc_ctx_t alloc_ctx; |
1020 | rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); |
1021 | rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, |
1022 | (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); |
1023 | assert(alloc_ctx.szind != SC_NSIZES); |
1024 | |
1025 | if (config_stats || (config_prof && opt_prof)) { |
1026 | usize = sz_index2size(alloc_ctx.szind); |
1027 | assert(usize == isalloc(tsd_tsdn(tsd), ptr)); |
1028 | } |
1029 | /* Remove large allocation from prof sample set. */ |
1030 | if (config_prof && opt_prof) { |
1031 | prof_free(tsd, ptr, usize, &alloc_ctx); |
1032 | } |
1033 | large_dalloc(tsd_tsdn(tsd), extent); |
1034 | malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); |
1035 | } |
1036 | malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); |
1037 | |
1038 | /* Bins. */ |
1039 | for (unsigned i = 0; i < SC_NBINS; i++) { |
1040 | extent_t *slab; |
1041 | bin_t *bin = &arena->bins[i]; |
1042 | malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); |
1043 | if (bin->slabcur != NULL) { |
1044 | slab = bin->slabcur; |
1045 | bin->slabcur = NULL; |
1046 | malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); |
1047 | arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); |
1048 | malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); |
1049 | } |
1050 | while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != |
1051 | NULL) { |
1052 | malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); |
1053 | arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); |
1054 | malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); |
1055 | } |
1056 | for (slab = extent_list_first(&bin->slabs_full); slab != NULL; |
1057 | slab = extent_list_first(&bin->slabs_full)) { |
1058 | arena_bin_slabs_full_remove(arena, bin, slab); |
1059 | malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); |
1060 | arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); |
1061 | malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); |
1062 | } |
1063 | if (config_stats) { |
1064 | bin->stats.curregs = 0; |
1065 | bin->stats.curslabs = 0; |
1066 | } |
1067 | malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); |
1068 | } |
1069 | |
1070 | atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); |
1071 | } |
1072 | |
1073 | static void |
1074 | arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { |
1075 | /* |
1076 | * Iterate over the retained extents and destroy them. This gives the |
1077 | * extent allocator underlying the extent hooks an opportunity to unmap |
1078 | * all retained memory without having to keep its own metadata |
1079 | * structures. In practice, virtual memory for dss-allocated extents is |
1080 | * leaked here, so best practice is to avoid dss for arenas to be |
1081 | * destroyed, or provide custom extent hooks that track retained |
1082 | * dss-based extents for later reuse. |
1083 | */ |
1084 | extent_hooks_t *extent_hooks = extent_hooks_get(arena); |
1085 | extent_t *extent; |
1086 | while ((extent = extents_evict(tsdn, arena, &extent_hooks, |
1087 | &arena->extents_retained, 0)) != NULL) { |
1088 | extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); |
1089 | } |
1090 | } |
1091 | |
1092 | void |
1093 | arena_destroy(tsd_t *tsd, arena_t *arena) { |
1094 | assert(base_ind_get(arena->base) >= narenas_auto); |
1095 | assert(arena_nthreads_get(arena, false) == 0); |
1096 | assert(arena_nthreads_get(arena, true) == 0); |
1097 | |
1098 | /* |
1099 | * No allocations have occurred since arena_reset() was called. |
1100 | * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached |
1101 | * extents, so only retained extents may remain. |
1102 | */ |
1103 | assert(extents_npages_get(&arena->extents_dirty) == 0); |
1104 | assert(extents_npages_get(&arena->extents_muzzy) == 0); |
1105 | |
1106 | /* Deallocate retained memory. */ |
1107 | arena_destroy_retained(tsd_tsdn(tsd), arena); |
1108 | |
1109 | /* |
1110 | * Remove the arena pointer from the arenas array. We rely on the fact |
1111 | * that there is no way for the application to get a dirty read from the |
1112 | * arenas array unless there is an inherent race in the application |
1113 | * involving access of an arena being concurrently destroyed. The |
1114 | * application must synchronize knowledge of the arena's validity, so as |
1115 | * long as we use an atomic write to update the arenas array, the |
1116 | * application will get a clean read any time after it synchronizes |
1117 | * knowledge that the arena is no longer valid. |
1118 | */ |
1119 | arena_set(base_ind_get(arena->base), NULL); |
1120 | |
1121 | /* |
1122 | * Destroy the base allocator, which manages all metadata ever mapped by |
1123 | * this arena. |
1124 | */ |
1125 | base_delete(tsd_tsdn(tsd), arena->base); |
1126 | } |
1127 | |
1128 | static extent_t * |
1129 | arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, |
1130 | extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info, |
1131 | szind_t szind) { |
1132 | extent_t *slab; |
1133 | bool zero, commit; |
1134 | |
1135 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
1136 | WITNESS_RANK_CORE, 0); |
1137 | |
1138 | zero = false; |
1139 | commit = true; |
1140 | slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, |
1141 | bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); |
1142 | |
1143 | if (config_stats && slab != NULL) { |
1144 | arena_stats_mapped_add(tsdn, &arena->stats, |
1145 | bin_info->slab_size); |
1146 | } |
1147 | |
1148 | return slab; |
1149 | } |
1150 | |
1151 | static extent_t * |
1152 | arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, |
1153 | const bin_info_t *bin_info) { |
1154 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
1155 | WITNESS_RANK_CORE, 0); |
1156 | |
1157 | extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; |
1158 | szind_t szind = sz_size2index(bin_info->reg_size); |
1159 | bool zero = false; |
1160 | bool commit = true; |
1161 | extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, |
1162 | &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, |
1163 | binind, &zero, &commit); |
1164 | if (slab == NULL && arena_may_have_muzzy(arena)) { |
1165 | slab = extents_alloc(tsdn, arena, &extent_hooks, |
1166 | &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, |
1167 | true, binind, &zero, &commit); |
1168 | } |
1169 | if (slab == NULL) { |
1170 | slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, |
1171 | bin_info, szind); |
1172 | if (slab == NULL) { |
1173 | return NULL; |
1174 | } |
1175 | } |
1176 | assert(extent_slab_get(slab)); |
1177 | |
1178 | /* Initialize slab internals. */ |
1179 | arena_slab_data_t *slab_data = extent_slab_data_get(slab); |
1180 | extent_nfree_set(slab, bin_info->nregs); |
1181 | bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); |
1182 | |
1183 | arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); |
1184 | |
1185 | return slab; |
1186 | } |
1187 | |
1188 | static extent_t * |
1189 | arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, |
1190 | szind_t binind) { |
1191 | extent_t *slab; |
1192 | const bin_info_t *bin_info; |
1193 | |
1194 | /* Look for a usable slab. */ |
1195 | slab = arena_bin_slabs_nonfull_tryget(bin); |
1196 | if (slab != NULL) { |
1197 | return slab; |
1198 | } |
1199 | /* No existing slabs have any space available. */ |
1200 | |
1201 | bin_info = &bin_infos[binind]; |
1202 | |
1203 | /* Allocate a new slab. */ |
1204 | malloc_mutex_unlock(tsdn, &bin->lock); |
1205 | /******************************/ |
1206 | slab = arena_slab_alloc(tsdn, arena, binind, bin_info); |
1207 | /********************************/ |
1208 | malloc_mutex_lock(tsdn, &bin->lock); |
1209 | if (slab != NULL) { |
1210 | if (config_stats) { |
1211 | bin->stats.nslabs++; |
1212 | bin->stats.curslabs++; |
1213 | } |
1214 | return slab; |
1215 | } |
1216 | |
1217 | /* |
1218 | * arena_slab_alloc() failed, but another thread may have made |
1219 | * sufficient memory available while this one dropped bin->lock above, |
1220 | * so search one more time. |
1221 | */ |
1222 | slab = arena_bin_slabs_nonfull_tryget(bin); |
1223 | if (slab != NULL) { |
1224 | return slab; |
1225 | } |
1226 | |
1227 | return NULL; |
1228 | } |
1229 | |
1230 | /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ |
1231 | static void * |
1232 | arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, |
1233 | szind_t binind) { |
1234 | const bin_info_t *bin_info; |
1235 | extent_t *slab; |
1236 | |
1237 | bin_info = &bin_infos[binind]; |
1238 | if (!arena_is_auto(arena) && bin->slabcur != NULL) { |
1239 | arena_bin_slabs_full_insert(arena, bin, bin->slabcur); |
1240 | bin->slabcur = NULL; |
1241 | } |
1242 | slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind); |
1243 | if (bin->slabcur != NULL) { |
1244 | /* |
1245 | * Another thread updated slabcur while this one ran without the |
1246 | * bin lock in arena_bin_nonfull_slab_get(). |
1247 | */ |
1248 | if (extent_nfree_get(bin->slabcur) > 0) { |
1249 | void *ret = arena_slab_reg_alloc(bin->slabcur, |
1250 | bin_info); |
1251 | if (slab != NULL) { |
1252 | /* |
1253 | * arena_slab_alloc() may have allocated slab, |
1254 | * or it may have been pulled from |
1255 | * slabs_nonfull. Therefore it is unsafe to |
1256 | * make any assumptions about how slab has |
1257 | * previously been used, and |
1258 | * arena_bin_lower_slab() must be called, as if |
1259 | * a region were just deallocated from the slab. |
1260 | */ |
1261 | if (extent_nfree_get(slab) == bin_info->nregs) { |
1262 | arena_dalloc_bin_slab(tsdn, arena, slab, |
1263 | bin); |
1264 | } else { |
1265 | arena_bin_lower_slab(tsdn, arena, slab, |
1266 | bin); |
1267 | } |
1268 | } |
1269 | return ret; |
1270 | } |
1271 | |
1272 | arena_bin_slabs_full_insert(arena, bin, bin->slabcur); |
1273 | bin->slabcur = NULL; |
1274 | } |
1275 | |
1276 | if (slab == NULL) { |
1277 | return NULL; |
1278 | } |
1279 | bin->slabcur = slab; |
1280 | |
1281 | assert(extent_nfree_get(bin->slabcur) > 0); |
1282 | |
1283 | return arena_slab_reg_alloc(slab, bin_info); |
1284 | } |
1285 | |
1286 | void |
1287 | arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, |
1288 | cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { |
1289 | unsigned i, nfill; |
1290 | bin_t *bin; |
1291 | |
1292 | assert(tbin->ncached == 0); |
1293 | |
1294 | if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { |
1295 | prof_idump(tsdn); |
1296 | } |
1297 | bin = &arena->bins[binind]; |
1298 | malloc_mutex_lock(tsdn, &bin->lock); |
1299 | for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> |
1300 | tcache->lg_fill_div[binind]); i < nfill; i++) { |
1301 | extent_t *slab; |
1302 | void *ptr; |
1303 | if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > |
1304 | 0) { |
1305 | ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]); |
1306 | } else { |
1307 | ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind); |
1308 | } |
1309 | if (ptr == NULL) { |
1310 | /* |
1311 | * OOM. tbin->avail isn't yet filled down to its first |
1312 | * element, so the successful allocations (if any) must |
1313 | * be moved just before tbin->avail before bailing out. |
1314 | */ |
1315 | if (i > 0) { |
1316 | memmove(tbin->avail - i, tbin->avail - nfill, |
1317 | i * sizeof(void *)); |
1318 | } |
1319 | break; |
1320 | } |
1321 | if (config_fill && unlikely(opt_junk_alloc)) { |
1322 | arena_alloc_junk_small(ptr, &bin_infos[binind], true); |
1323 | } |
1324 | /* Insert such that low regions get used first. */ |
1325 | *(tbin->avail - nfill + i) = ptr; |
1326 | } |
1327 | if (config_stats) { |
1328 | bin->stats.nmalloc += i; |
1329 | bin->stats.nrequests += tbin->tstats.nrequests; |
1330 | bin->stats.curregs += i; |
1331 | bin->stats.nfills++; |
1332 | tbin->tstats.nrequests = 0; |
1333 | } |
1334 | malloc_mutex_unlock(tsdn, &bin->lock); |
1335 | tbin->ncached = i; |
1336 | arena_decay_tick(tsdn, arena); |
1337 | } |
1338 | |
1339 | void |
1340 | arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) { |
1341 | if (!zero) { |
1342 | memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); |
1343 | } |
1344 | } |
1345 | |
1346 | static void |
1347 | arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) { |
1348 | memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); |
1349 | } |
1350 | arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = |
1351 | arena_dalloc_junk_small_impl; |
1352 | |
1353 | static void * |
1354 | arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { |
1355 | void *ret; |
1356 | bin_t *bin; |
1357 | size_t usize; |
1358 | extent_t *slab; |
1359 | |
1360 | assert(binind < SC_NBINS); |
1361 | bin = &arena->bins[binind]; |
1362 | usize = sz_index2size(binind); |
1363 | |
1364 | malloc_mutex_lock(tsdn, &bin->lock); |
1365 | if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { |
1366 | ret = arena_slab_reg_alloc(slab, &bin_infos[binind]); |
1367 | } else { |
1368 | ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); |
1369 | } |
1370 | |
1371 | if (ret == NULL) { |
1372 | malloc_mutex_unlock(tsdn, &bin->lock); |
1373 | return NULL; |
1374 | } |
1375 | |
1376 | if (config_stats) { |
1377 | bin->stats.nmalloc++; |
1378 | bin->stats.nrequests++; |
1379 | bin->stats.curregs++; |
1380 | } |
1381 | malloc_mutex_unlock(tsdn, &bin->lock); |
1382 | if (config_prof && arena_prof_accum(tsdn, arena, usize)) { |
1383 | prof_idump(tsdn); |
1384 | } |
1385 | |
1386 | if (!zero) { |
1387 | if (config_fill) { |
1388 | if (unlikely(opt_junk_alloc)) { |
1389 | arena_alloc_junk_small(ret, |
1390 | &bin_infos[binind], false); |
1391 | } else if (unlikely(opt_zero)) { |
1392 | memset(ret, 0, usize); |
1393 | } |
1394 | } |
1395 | } else { |
1396 | if (config_fill && unlikely(opt_junk_alloc)) { |
1397 | arena_alloc_junk_small(ret, &bin_infos[binind], |
1398 | true); |
1399 | } |
1400 | memset(ret, 0, usize); |
1401 | } |
1402 | |
1403 | arena_decay_tick(tsdn, arena); |
1404 | return ret; |
1405 | } |
1406 | |
1407 | void * |
1408 | arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, |
1409 | bool zero) { |
1410 | assert(!tsdn_null(tsdn) || arena != NULL); |
1411 | |
1412 | if (likely(!tsdn_null(tsdn))) { |
1413 | arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size); |
1414 | } |
1415 | if (unlikely(arena == NULL)) { |
1416 | return NULL; |
1417 | } |
1418 | |
1419 | if (likely(size <= SC_SMALL_MAXCLASS)) { |
1420 | return arena_malloc_small(tsdn, arena, ind, zero); |
1421 | } |
1422 | return large_malloc(tsdn, arena, sz_index2size(ind), zero); |
1423 | } |
1424 | |
1425 | void * |
1426 | arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, |
1427 | bool zero, tcache_t *tcache) { |
1428 | void *ret; |
1429 | |
1430 | if (usize <= SC_SMALL_MAXCLASS |
1431 | && (alignment < PAGE |
1432 | || (alignment == PAGE && (usize & PAGE_MASK) == 0))) { |
1433 | /* Small; alignment doesn't require special slab placement. */ |
1434 | ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), |
1435 | zero, tcache, true); |
1436 | } else { |
1437 | if (likely(alignment <= CACHELINE)) { |
1438 | ret = large_malloc(tsdn, arena, usize, zero); |
1439 | } else { |
1440 | ret = large_palloc(tsdn, arena, usize, alignment, zero); |
1441 | } |
1442 | } |
1443 | return ret; |
1444 | } |
1445 | |
1446 | void |
1447 | arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) { |
1448 | cassert(config_prof); |
1449 | assert(ptr != NULL); |
1450 | assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); |
1451 | assert(usize <= SC_SMALL_MAXCLASS); |
1452 | |
1453 | rtree_ctx_t rtree_ctx_fallback; |
1454 | rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); |
1455 | |
1456 | extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, |
1457 | (uintptr_t)ptr, true); |
1458 | arena_t *arena = extent_arena_get(extent); |
1459 | |
1460 | szind_t szind = sz_size2index(usize); |
1461 | extent_szind_set(extent, szind); |
1462 | rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, |
1463 | szind, false); |
1464 | |
1465 | prof_accum_cancel(tsdn, &arena->prof_accum, usize); |
1466 | |
1467 | assert(isalloc(tsdn, ptr) == usize); |
1468 | } |
1469 | |
1470 | static size_t |
1471 | arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { |
1472 | cassert(config_prof); |
1473 | assert(ptr != NULL); |
1474 | |
1475 | extent_szind_set(extent, SC_NBINS); |
1476 | rtree_ctx_t rtree_ctx_fallback; |
1477 | rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); |
1478 | rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, |
1479 | SC_NBINS, false); |
1480 | |
1481 | assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); |
1482 | |
1483 | return SC_LARGE_MINCLASS; |
1484 | } |
1485 | |
1486 | void |
1487 | arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, |
1488 | bool slow_path) { |
1489 | cassert(config_prof); |
1490 | assert(opt_prof); |
1491 | |
1492 | extent_t *extent = iealloc(tsdn, ptr); |
1493 | size_t usize = arena_prof_demote(tsdn, extent, ptr); |
1494 | if (usize <= tcache_maxclass) { |
1495 | tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, |
1496 | sz_size2index(usize), slow_path); |
1497 | } else { |
1498 | large_dalloc(tsdn, extent); |
1499 | } |
1500 | } |
1501 | |
1502 | static void |
1503 | arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { |
1504 | /* Dissociate slab from bin. */ |
1505 | if (slab == bin->slabcur) { |
1506 | bin->slabcur = NULL; |
1507 | } else { |
1508 | szind_t binind = extent_szind_get(slab); |
1509 | const bin_info_t *bin_info = &bin_infos[binind]; |
1510 | |
1511 | /* |
1512 | * The following block's conditional is necessary because if the |
1513 | * slab only contains one region, then it never gets inserted |
1514 | * into the non-full slabs heap. |
1515 | */ |
1516 | if (bin_info->nregs == 1) { |
1517 | arena_bin_slabs_full_remove(arena, bin, slab); |
1518 | } else { |
1519 | arena_bin_slabs_nonfull_remove(bin, slab); |
1520 | } |
1521 | } |
1522 | } |
1523 | |
1524 | static void |
1525 | arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, |
1526 | bin_t *bin) { |
1527 | assert(slab != bin->slabcur); |
1528 | |
1529 | malloc_mutex_unlock(tsdn, &bin->lock); |
1530 | /******************************/ |
1531 | arena_slab_dalloc(tsdn, arena, slab); |
1532 | /****************************/ |
1533 | malloc_mutex_lock(tsdn, &bin->lock); |
1534 | if (config_stats) { |
1535 | bin->stats.curslabs--; |
1536 | } |
1537 | } |
1538 | |
1539 | static void |
1540 | arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, |
1541 | bin_t *bin) { |
1542 | assert(extent_nfree_get(slab) > 0); |
1543 | |
1544 | /* |
1545 | * Make sure that if bin->slabcur is non-NULL, it refers to the |
1546 | * oldest/lowest non-full slab. It is okay to NULL slabcur out rather |
1547 | * than proactively keeping it pointing at the oldest/lowest non-full |
1548 | * slab. |
1549 | */ |
1550 | if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { |
1551 | /* Switch slabcur. */ |
1552 | if (extent_nfree_get(bin->slabcur) > 0) { |
1553 | arena_bin_slabs_nonfull_insert(bin, bin->slabcur); |
1554 | } else { |
1555 | arena_bin_slabs_full_insert(arena, bin, bin->slabcur); |
1556 | } |
1557 | bin->slabcur = slab; |
1558 | if (config_stats) { |
1559 | bin->stats.reslabs++; |
1560 | } |
1561 | } else { |
1562 | arena_bin_slabs_nonfull_insert(bin, slab); |
1563 | } |
1564 | } |
1565 | |
1566 | static void |
1567 | arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, |
1568 | void *ptr, bool junked) { |
1569 | arena_slab_data_t *slab_data = extent_slab_data_get(slab); |
1570 | szind_t binind = extent_szind_get(slab); |
1571 | bin_t *bin = &arena->bins[binind]; |
1572 | const bin_info_t *bin_info = &bin_infos[binind]; |
1573 | |
1574 | if (!junked && config_fill && unlikely(opt_junk_free)) { |
1575 | arena_dalloc_junk_small(ptr, bin_info); |
1576 | } |
1577 | |
1578 | arena_slab_reg_dalloc(slab, slab_data, ptr); |
1579 | unsigned nfree = extent_nfree_get(slab); |
1580 | if (nfree == bin_info->nregs) { |
1581 | arena_dissociate_bin_slab(arena, slab, bin); |
1582 | arena_dalloc_bin_slab(tsdn, arena, slab, bin); |
1583 | } else if (nfree == 1 && slab != bin->slabcur) { |
1584 | arena_bin_slabs_full_remove(arena, bin, slab); |
1585 | arena_bin_lower_slab(tsdn, arena, slab, bin); |
1586 | } |
1587 | |
1588 | if (config_stats) { |
1589 | bin->stats.ndalloc++; |
1590 | bin->stats.curregs--; |
1591 | } |
1592 | } |
1593 | |
1594 | void |
1595 | arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, |
1596 | void *ptr) { |
1597 | arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true); |
1598 | } |
1599 | |
1600 | static void |
1601 | arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { |
1602 | szind_t binind = extent_szind_get(extent); |
1603 | bin_t *bin = &arena->bins[binind]; |
1604 | |
1605 | malloc_mutex_lock(tsdn, &bin->lock); |
1606 | arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false); |
1607 | malloc_mutex_unlock(tsdn, &bin->lock); |
1608 | } |
1609 | |
1610 | void |
1611 | arena_dalloc_small(tsdn_t *tsdn, void *ptr) { |
1612 | extent_t *extent = iealloc(tsdn, ptr); |
1613 | arena_t *arena = extent_arena_get(extent); |
1614 | |
1615 | arena_dalloc_bin(tsdn, arena, extent, ptr); |
1616 | arena_decay_tick(tsdn, arena); |
1617 | } |
1618 | |
1619 | bool |
1620 | arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, |
1621 | size_t , bool zero, size_t *newsize) { |
1622 | bool ret; |
1623 | /* Calls with non-zero extra had to clamp extra. */ |
1624 | assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS); |
1625 | |
1626 | extent_t *extent = iealloc(tsdn, ptr); |
1627 | if (unlikely(size > SC_LARGE_MAXCLASS)) { |
1628 | ret = true; |
1629 | goto done; |
1630 | } |
1631 | |
1632 | size_t usize_min = sz_s2u(size); |
1633 | size_t usize_max = sz_s2u(size + extra); |
1634 | if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min |
1635 | <= SC_SMALL_MAXCLASS)) { |
1636 | /* |
1637 | * Avoid moving the allocation if the size class can be left the |
1638 | * same. |
1639 | */ |
1640 | assert(bin_infos[sz_size2index(oldsize)].reg_size == |
1641 | oldsize); |
1642 | if ((usize_max > SC_SMALL_MAXCLASS |
1643 | || sz_size2index(usize_max) != sz_size2index(oldsize)) |
1644 | && (size > oldsize || usize_max < oldsize)) { |
1645 | ret = true; |
1646 | goto done; |
1647 | } |
1648 | |
1649 | arena_decay_tick(tsdn, extent_arena_get(extent)); |
1650 | ret = false; |
1651 | } else if (oldsize >= SC_LARGE_MINCLASS |
1652 | && usize_max >= SC_LARGE_MINCLASS) { |
1653 | ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max, |
1654 | zero); |
1655 | } else { |
1656 | ret = true; |
1657 | } |
1658 | done: |
1659 | assert(extent == iealloc(tsdn, ptr)); |
1660 | *newsize = extent_usize_get(extent); |
1661 | |
1662 | return ret; |
1663 | } |
1664 | |
1665 | static void * |
1666 | arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, |
1667 | size_t alignment, bool zero, tcache_t *tcache) { |
1668 | if (alignment == 0) { |
1669 | return arena_malloc(tsdn, arena, usize, sz_size2index(usize), |
1670 | zero, tcache, true); |
1671 | } |
1672 | usize = sz_sa2u(usize, alignment); |
1673 | if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { |
1674 | return NULL; |
1675 | } |
1676 | return ipalloct(tsdn, usize, alignment, zero, tcache, arena); |
1677 | } |
1678 | |
1679 | void * |
1680 | arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, |
1681 | size_t size, size_t alignment, bool zero, tcache_t *tcache, |
1682 | hook_ralloc_args_t *hook_args) { |
1683 | size_t usize = sz_s2u(size); |
1684 | if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) { |
1685 | return NULL; |
1686 | } |
1687 | |
1688 | if (likely(usize <= SC_SMALL_MAXCLASS)) { |
1689 | /* Try to avoid moving the allocation. */ |
1690 | UNUSED size_t newsize; |
1691 | if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero, |
1692 | &newsize)) { |
1693 | hook_invoke_expand(hook_args->is_realloc |
1694 | ? hook_expand_realloc : hook_expand_rallocx, |
1695 | ptr, oldsize, usize, (uintptr_t)ptr, |
1696 | hook_args->args); |
1697 | return ptr; |
1698 | } |
1699 | } |
1700 | |
1701 | if (oldsize >= SC_LARGE_MINCLASS |
1702 | && usize >= SC_LARGE_MINCLASS) { |
1703 | return large_ralloc(tsdn, arena, ptr, usize, |
1704 | alignment, zero, tcache, hook_args); |
1705 | } |
1706 | |
1707 | /* |
1708 | * size and oldsize are different enough that we need to move the |
1709 | * object. In that case, fall back to allocating new space and copying. |
1710 | */ |
1711 | void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, |
1712 | zero, tcache); |
1713 | if (ret == NULL) { |
1714 | return NULL; |
1715 | } |
1716 | |
1717 | hook_invoke_alloc(hook_args->is_realloc |
1718 | ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret, |
1719 | hook_args->args); |
1720 | hook_invoke_dalloc(hook_args->is_realloc |
1721 | ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); |
1722 | |
1723 | /* |
1724 | * Junk/zero-filling were already done by |
1725 | * ipalloc()/arena_malloc(). |
1726 | */ |
1727 | size_t copysize = (usize < oldsize) ? usize : oldsize; |
1728 | memcpy(ret, ptr, copysize); |
1729 | isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); |
1730 | return ret; |
1731 | } |
1732 | |
1733 | dss_prec_t |
1734 | arena_dss_prec_get(arena_t *arena) { |
1735 | return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); |
1736 | } |
1737 | |
1738 | bool |
1739 | arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { |
1740 | if (!have_dss) { |
1741 | return (dss_prec != dss_prec_disabled); |
1742 | } |
1743 | atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); |
1744 | return false; |
1745 | } |
1746 | |
1747 | ssize_t |
1748 | arena_dirty_decay_ms_default_get(void) { |
1749 | return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); |
1750 | } |
1751 | |
1752 | bool |
1753 | arena_dirty_decay_ms_default_set(ssize_t decay_ms) { |
1754 | if (!arena_decay_ms_valid(decay_ms)) { |
1755 | return true; |
1756 | } |
1757 | atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); |
1758 | return false; |
1759 | } |
1760 | |
1761 | ssize_t |
1762 | arena_muzzy_decay_ms_default_get(void) { |
1763 | return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); |
1764 | } |
1765 | |
1766 | bool |
1767 | arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { |
1768 | if (!arena_decay_ms_valid(decay_ms)) { |
1769 | return true; |
1770 | } |
1771 | atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); |
1772 | return false; |
1773 | } |
1774 | |
1775 | bool |
1776 | arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, |
1777 | size_t *new_limit) { |
1778 | assert(opt_retain); |
1779 | |
1780 | pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0); |
1781 | if (new_limit != NULL) { |
1782 | size_t limit = *new_limit; |
1783 | /* Grow no more than the new limit. */ |
1784 | if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) { |
1785 | return true; |
1786 | } |
1787 | } |
1788 | |
1789 | malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx); |
1790 | if (old_limit != NULL) { |
1791 | *old_limit = sz_pind2sz(arena->retain_grow_limit); |
1792 | } |
1793 | if (new_limit != NULL) { |
1794 | arena->retain_grow_limit = new_ind; |
1795 | } |
1796 | malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx); |
1797 | |
1798 | return false; |
1799 | } |
1800 | |
1801 | unsigned |
1802 | arena_nthreads_get(arena_t *arena, bool internal) { |
1803 | return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); |
1804 | } |
1805 | |
1806 | void |
1807 | arena_nthreads_inc(arena_t *arena, bool internal) { |
1808 | atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); |
1809 | } |
1810 | |
1811 | void |
1812 | arena_nthreads_dec(arena_t *arena, bool internal) { |
1813 | atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); |
1814 | } |
1815 | |
1816 | size_t |
1817 | arena_extent_sn_next(arena_t *arena) { |
1818 | return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); |
1819 | } |
1820 | |
1821 | arena_t * |
1822 | arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { |
1823 | arena_t *arena; |
1824 | base_t *base; |
1825 | unsigned i; |
1826 | |
1827 | if (ind == 0) { |
1828 | base = b0get(); |
1829 | } else { |
1830 | base = base_new(tsdn, ind, extent_hooks); |
1831 | if (base == NULL) { |
1832 | return NULL; |
1833 | } |
1834 | } |
1835 | |
1836 | arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE); |
1837 | if (arena == NULL) { |
1838 | goto label_error; |
1839 | } |
1840 | |
1841 | atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); |
1842 | atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); |
1843 | arena->last_thd = NULL; |
1844 | |
1845 | if (config_stats) { |
1846 | if (arena_stats_init(tsdn, &arena->stats)) { |
1847 | goto label_error; |
1848 | } |
1849 | |
1850 | ql_new(&arena->tcache_ql); |
1851 | ql_new(&arena->cache_bin_array_descriptor_ql); |
1852 | if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql" , |
1853 | WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { |
1854 | goto label_error; |
1855 | } |
1856 | } |
1857 | |
1858 | if (config_prof) { |
1859 | if (prof_accum_init(tsdn, &arena->prof_accum)) { |
1860 | goto label_error; |
1861 | } |
1862 | } |
1863 | |
1864 | if (config_cache_oblivious) { |
1865 | /* |
1866 | * A nondeterministic seed based on the address of arena reduces |
1867 | * the likelihood of lockstep non-uniform cache index |
1868 | * utilization among identical concurrent processes, but at the |
1869 | * cost of test repeatability. For debug builds, instead use a |
1870 | * deterministic seed. |
1871 | */ |
1872 | atomic_store_zu(&arena->offset_state, config_debug ? ind : |
1873 | (size_t)(uintptr_t)arena, ATOMIC_RELAXED); |
1874 | } |
1875 | |
1876 | atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); |
1877 | |
1878 | atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), |
1879 | ATOMIC_RELAXED); |
1880 | |
1881 | atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); |
1882 | |
1883 | extent_list_init(&arena->large); |
1884 | if (malloc_mutex_init(&arena->large_mtx, "arena_large" , |
1885 | WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { |
1886 | goto label_error; |
1887 | } |
1888 | |
1889 | /* |
1890 | * Delay coalescing for dirty extents despite the disruptive effect on |
1891 | * memory layout for best-fit extent allocation, since cached extents |
1892 | * are likely to be reused soon after deallocation, and the cost of |
1893 | * merging/splitting extents is non-trivial. |
1894 | */ |
1895 | if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, |
1896 | true)) { |
1897 | goto label_error; |
1898 | } |
1899 | /* |
1900 | * Coalesce muzzy extents immediately, because operations on them are in |
1901 | * the critical path much less often than for dirty extents. |
1902 | */ |
1903 | if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, |
1904 | false)) { |
1905 | goto label_error; |
1906 | } |
1907 | /* |
1908 | * Coalesce retained extents immediately, in part because they will |
1909 | * never be evicted (and therefore there's no opportunity for delayed |
1910 | * coalescing), but also because operations on retained extents are not |
1911 | * in the critical path. |
1912 | */ |
1913 | if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, |
1914 | false)) { |
1915 | goto label_error; |
1916 | } |
1917 | |
1918 | if (arena_decay_init(&arena->decay_dirty, |
1919 | arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { |
1920 | goto label_error; |
1921 | } |
1922 | if (arena_decay_init(&arena->decay_muzzy, |
1923 | arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { |
1924 | goto label_error; |
1925 | } |
1926 | |
1927 | arena->extent_grow_next = sz_psz2ind(HUGEPAGE); |
1928 | arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS); |
1929 | if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow" , |
1930 | WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { |
1931 | goto label_error; |
1932 | } |
1933 | |
1934 | extent_avail_new(&arena->extent_avail); |
1935 | if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail" , |
1936 | WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { |
1937 | goto label_error; |
1938 | } |
1939 | |
1940 | /* Initialize bins. */ |
1941 | for (i = 0; i < SC_NBINS; i++) { |
1942 | bool err = bin_init(&arena->bins[i]); |
1943 | if (err) { |
1944 | goto label_error; |
1945 | } |
1946 | } |
1947 | |
1948 | arena->base = base; |
1949 | /* Set arena before creating background threads. */ |
1950 | arena_set(ind, arena); |
1951 | |
1952 | nstime_init(&arena->create_time, 0); |
1953 | nstime_update(&arena->create_time); |
1954 | |
1955 | /* We don't support reentrancy for arena 0 bootstrapping. */ |
1956 | if (ind != 0) { |
1957 | /* |
1958 | * If we're here, then arena 0 already exists, so bootstrapping |
1959 | * is done enough that we should have tsd. |
1960 | */ |
1961 | assert(!tsdn_null(tsdn)); |
1962 | pre_reentrancy(tsdn_tsd(tsdn), arena); |
1963 | if (test_hooks_arena_new_hook) { |
1964 | test_hooks_arena_new_hook(); |
1965 | } |
1966 | post_reentrancy(tsdn_tsd(tsdn)); |
1967 | } |
1968 | |
1969 | return arena; |
1970 | label_error: |
1971 | if (ind != 0) { |
1972 | base_delete(tsdn, base); |
1973 | } |
1974 | return NULL; |
1975 | } |
1976 | |
1977 | arena_t * |
1978 | arena_choose_huge(tsd_t *tsd) { |
1979 | /* huge_arena_ind can be 0 during init (will use a0). */ |
1980 | if (huge_arena_ind == 0) { |
1981 | assert(!malloc_initialized()); |
1982 | } |
1983 | |
1984 | arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false); |
1985 | if (huge_arena == NULL) { |
1986 | /* Create the huge arena on demand. */ |
1987 | assert(huge_arena_ind != 0); |
1988 | huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true); |
1989 | if (huge_arena == NULL) { |
1990 | return NULL; |
1991 | } |
1992 | /* |
1993 | * Purge eagerly for huge allocations, because: 1) number of |
1994 | * huge allocations is usually small, which means ticker based |
1995 | * decay is not reliable; and 2) less immediate reuse is |
1996 | * expected for huge allocations. |
1997 | */ |
1998 | if (arena_dirty_decay_ms_default_get() > 0) { |
1999 | arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0); |
2000 | } |
2001 | if (arena_muzzy_decay_ms_default_get() > 0) { |
2002 | arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0); |
2003 | } |
2004 | } |
2005 | |
2006 | return huge_arena; |
2007 | } |
2008 | |
2009 | bool |
2010 | arena_init_huge(void) { |
2011 | bool huge_enabled; |
2012 | |
2013 | /* The threshold should be large size class. */ |
2014 | if (opt_huge_threshold > SC_LARGE_MAXCLASS || |
2015 | opt_huge_threshold < SC_LARGE_MINCLASS) { |
2016 | opt_huge_threshold = 0; |
2017 | huge_threshold = SC_LARGE_MAXCLASS + PAGE; |
2018 | huge_enabled = false; |
2019 | } else { |
2020 | /* Reserve the index for the huge arena. */ |
2021 | huge_arena_ind = narenas_total_get(); |
2022 | huge_threshold = opt_huge_threshold; |
2023 | huge_enabled = true; |
2024 | } |
2025 | |
2026 | return huge_enabled; |
2027 | } |
2028 | |
2029 | void |
2030 | arena_boot(sc_data_t *sc_data) { |
2031 | arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); |
2032 | arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); |
2033 | for (unsigned i = 0; i < SC_NBINS; i++) { |
2034 | sc_t *sc = &sc_data->sc[i]; |
2035 | div_init(&arena_binind_div_info[i], |
2036 | (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta)); |
2037 | } |
2038 | } |
2039 | |
2040 | void |
2041 | arena_prefork0(tsdn_t *tsdn, arena_t *arena) { |
2042 | malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx); |
2043 | malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); |
2044 | } |
2045 | |
2046 | void |
2047 | arena_prefork1(tsdn_t *tsdn, arena_t *arena) { |
2048 | if (config_stats) { |
2049 | malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); |
2050 | } |
2051 | } |
2052 | |
2053 | void |
2054 | arena_prefork2(tsdn_t *tsdn, arena_t *arena) { |
2055 | malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); |
2056 | } |
2057 | |
2058 | void |
2059 | arena_prefork3(tsdn_t *tsdn, arena_t *arena) { |
2060 | extents_prefork(tsdn, &arena->extents_dirty); |
2061 | extents_prefork(tsdn, &arena->extents_muzzy); |
2062 | extents_prefork(tsdn, &arena->extents_retained); |
2063 | } |
2064 | |
2065 | void |
2066 | arena_prefork4(tsdn_t *tsdn, arena_t *arena) { |
2067 | malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); |
2068 | } |
2069 | |
2070 | void |
2071 | arena_prefork5(tsdn_t *tsdn, arena_t *arena) { |
2072 | base_prefork(tsdn, arena->base); |
2073 | } |
2074 | |
2075 | void |
2076 | arena_prefork6(tsdn_t *tsdn, arena_t *arena) { |
2077 | malloc_mutex_prefork(tsdn, &arena->large_mtx); |
2078 | } |
2079 | |
2080 | void |
2081 | arena_prefork7(tsdn_t *tsdn, arena_t *arena) { |
2082 | for (unsigned i = 0; i < SC_NBINS; i++) { |
2083 | bin_prefork(tsdn, &arena->bins[i]); |
2084 | } |
2085 | } |
2086 | |
2087 | void |
2088 | arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { |
2089 | unsigned i; |
2090 | |
2091 | for (i = 0; i < SC_NBINS; i++) { |
2092 | bin_postfork_parent(tsdn, &arena->bins[i]); |
2093 | } |
2094 | malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); |
2095 | base_postfork_parent(tsdn, arena->base); |
2096 | malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); |
2097 | extents_postfork_parent(tsdn, &arena->extents_dirty); |
2098 | extents_postfork_parent(tsdn, &arena->extents_muzzy); |
2099 | extents_postfork_parent(tsdn, &arena->extents_retained); |
2100 | malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); |
2101 | malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); |
2102 | malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); |
2103 | if (config_stats) { |
2104 | malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); |
2105 | } |
2106 | } |
2107 | |
2108 | void |
2109 | arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { |
2110 | unsigned i; |
2111 | |
2112 | atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); |
2113 | atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); |
2114 | if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { |
2115 | arena_nthreads_inc(arena, false); |
2116 | } |
2117 | if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) { |
2118 | arena_nthreads_inc(arena, true); |
2119 | } |
2120 | if (config_stats) { |
2121 | ql_new(&arena->tcache_ql); |
2122 | ql_new(&arena->cache_bin_array_descriptor_ql); |
2123 | tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); |
2124 | if (tcache != NULL && tcache->arena == arena) { |
2125 | ql_elm_new(tcache, link); |
2126 | ql_tail_insert(&arena->tcache_ql, tcache, link); |
2127 | cache_bin_array_descriptor_init( |
2128 | &tcache->cache_bin_array_descriptor, |
2129 | tcache->bins_small, tcache->bins_large); |
2130 | ql_tail_insert(&arena->cache_bin_array_descriptor_ql, |
2131 | &tcache->cache_bin_array_descriptor, link); |
2132 | } |
2133 | } |
2134 | |
2135 | for (i = 0; i < SC_NBINS; i++) { |
2136 | bin_postfork_child(tsdn, &arena->bins[i]); |
2137 | } |
2138 | malloc_mutex_postfork_child(tsdn, &arena->large_mtx); |
2139 | base_postfork_child(tsdn, arena->base); |
2140 | malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); |
2141 | extents_postfork_child(tsdn, &arena->extents_dirty); |
2142 | extents_postfork_child(tsdn, &arena->extents_muzzy); |
2143 | extents_postfork_child(tsdn, &arena->extents_retained); |
2144 | malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); |
2145 | malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); |
2146 | malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); |
2147 | if (config_stats) { |
2148 | malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); |
2149 | } |
2150 | } |
2151 | |