| 1 | #define JEMALLOC_ARENA_C_ |
| 2 | #include "jemalloc/internal/jemalloc_internal.h" |
| 3 | |
| 4 | /******************************************************************************/ |
| 5 | /* Data. */ |
| 6 | |
| 7 | purge_mode_t opt_purge = PURGE_DEFAULT; |
| 8 | const char *purge_mode_names[] = { |
| 9 | "ratio" , |
| 10 | "decay" , |
| 11 | "N/A" |
| 12 | }; |
| 13 | ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; |
| 14 | static ssize_t lg_dirty_mult_default; |
| 15 | ssize_t opt_decay_time = DECAY_TIME_DEFAULT; |
| 16 | static ssize_t decay_time_default; |
| 17 | |
| 18 | arena_bin_info_t arena_bin_info[NBINS]; |
| 19 | |
| 20 | size_t map_bias; |
| 21 | size_t map_misc_offset; |
| 22 | size_t arena_maxrun; /* Max run size for arenas. */ |
| 23 | size_t large_maxclass; /* Max large size class. */ |
| 24 | size_t run_quantize_max; /* Max run_quantize_*() input. */ |
| 25 | static size_t small_maxrun; /* Max run size for small size classes. */ |
| 26 | static bool *small_run_tab; /* Valid small run page multiples. */ |
| 27 | static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */ |
| 28 | static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */ |
| 29 | unsigned nlclasses; /* Number of large size classes. */ |
| 30 | unsigned nhclasses; /* Number of huge size classes. */ |
| 31 | static szind_t runs_avail_bias; /* Size index for first runs_avail tree. */ |
| 32 | static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */ |
| 33 | |
| 34 | /******************************************************************************/ |
| 35 | /* |
| 36 | * Function prototypes for static functions that are referenced prior to |
| 37 | * definition. |
| 38 | */ |
| 39 | |
| 40 | static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, |
| 41 | size_t ndirty_limit); |
| 42 | static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, |
| 43 | bool dirty, bool cleaned, bool decommitted); |
| 44 | static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, |
| 45 | arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); |
| 46 | static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, |
| 47 | arena_run_t *run, arena_bin_t *bin); |
| 48 | |
| 49 | /******************************************************************************/ |
| 50 | |
| 51 | JEMALLOC_INLINE_C size_t |
| 52 | arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm) |
| 53 | { |
| 54 | arena_chunk_t *chunk; |
| 55 | size_t pageind, mapbits; |
| 56 | |
| 57 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); |
| 58 | pageind = arena_miscelm_to_pageind(miscelm); |
| 59 | mapbits = arena_mapbits_get(chunk, pageind); |
| 60 | return (arena_mapbits_size_decode(mapbits)); |
| 61 | } |
| 62 | |
| 63 | JEMALLOC_INLINE_C int |
| 64 | arena_run_addr_comp(const arena_chunk_map_misc_t *a, |
| 65 | const arena_chunk_map_misc_t *b) |
| 66 | { |
| 67 | uintptr_t a_miscelm = (uintptr_t)a; |
| 68 | uintptr_t b_miscelm = (uintptr_t)b; |
| 69 | |
| 70 | assert(a != NULL); |
| 71 | assert(b != NULL); |
| 72 | |
| 73 | return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); |
| 74 | } |
| 75 | |
| 76 | /* Generate pairing heap functions. */ |
| 77 | ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t, |
| 78 | ph_link, arena_run_addr_comp) |
| 79 | |
| 80 | static size_t |
| 81 | run_quantize_floor_compute(size_t size) |
| 82 | { |
| 83 | size_t qsize; |
| 84 | |
| 85 | assert(size != 0); |
| 86 | assert(size == PAGE_CEILING(size)); |
| 87 | |
| 88 | /* Don't change sizes that are valid small run sizes. */ |
| 89 | if (size <= small_maxrun && small_run_tab[size >> LG_PAGE]) |
| 90 | return (size); |
| 91 | |
| 92 | /* |
| 93 | * Round down to the nearest run size that can actually be requested |
| 94 | * during normal large allocation. Add large_pad so that cache index |
| 95 | * randomization can offset the allocation from the page boundary. |
| 96 | */ |
| 97 | qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad; |
| 98 | if (qsize <= SMALL_MAXCLASS + large_pad) |
| 99 | return (run_quantize_floor_compute(size - large_pad)); |
| 100 | assert(qsize <= size); |
| 101 | return (qsize); |
| 102 | } |
| 103 | |
| 104 | static size_t |
| 105 | run_quantize_ceil_compute_hard(size_t size) |
| 106 | { |
| 107 | size_t large_run_size_next; |
| 108 | |
| 109 | assert(size != 0); |
| 110 | assert(size == PAGE_CEILING(size)); |
| 111 | |
| 112 | /* |
| 113 | * Return the next quantized size greater than the input size. |
| 114 | * Quantized sizes comprise the union of run sizes that back small |
| 115 | * region runs, and run sizes that back large regions with no explicit |
| 116 | * alignment constraints. |
| 117 | */ |
| 118 | |
| 119 | if (size > SMALL_MAXCLASS) { |
| 120 | large_run_size_next = PAGE_CEILING(index2size(size2index(size - |
| 121 | large_pad) + 1) + large_pad); |
| 122 | } else |
| 123 | large_run_size_next = SIZE_T_MAX; |
| 124 | if (size >= small_maxrun) |
| 125 | return (large_run_size_next); |
| 126 | |
| 127 | while (true) { |
| 128 | size += PAGE; |
| 129 | assert(size <= small_maxrun); |
| 130 | if (small_run_tab[size >> LG_PAGE]) { |
| 131 | if (large_run_size_next < size) |
| 132 | return (large_run_size_next); |
| 133 | return (size); |
| 134 | } |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | static size_t |
| 139 | run_quantize_ceil_compute(size_t size) |
| 140 | { |
| 141 | size_t qsize = run_quantize_floor_compute(size); |
| 142 | |
| 143 | if (qsize < size) { |
| 144 | /* |
| 145 | * Skip a quantization that may have an adequately large run, |
| 146 | * because under-sized runs may be mixed in. This only happens |
| 147 | * when an unusual size is requested, i.e. for aligned |
| 148 | * allocation, and is just one of several places where linear |
| 149 | * search would potentially find sufficiently aligned available |
| 150 | * memory somewhere lower. |
| 151 | */ |
| 152 | qsize = run_quantize_ceil_compute_hard(qsize); |
| 153 | } |
| 154 | return (qsize); |
| 155 | } |
| 156 | |
| 157 | #ifdef JEMALLOC_JET |
| 158 | #undef run_quantize_floor |
| 159 | #define run_quantize_floor JEMALLOC_N(n_run_quantize_floor) |
| 160 | #endif |
| 161 | static size_t |
| 162 | run_quantize_floor(size_t size) |
| 163 | { |
| 164 | size_t ret; |
| 165 | |
| 166 | assert(size > 0); |
| 167 | assert(size <= run_quantize_max); |
| 168 | assert((size & PAGE_MASK) == 0); |
| 169 | |
| 170 | ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1]; |
| 171 | assert(ret == run_quantize_floor_compute(size)); |
| 172 | return (ret); |
| 173 | } |
| 174 | #ifdef JEMALLOC_JET |
| 175 | #undef run_quantize_floor |
| 176 | #define run_quantize_floor JEMALLOC_N(run_quantize_floor) |
| 177 | run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor); |
| 178 | #endif |
| 179 | |
| 180 | #ifdef JEMALLOC_JET |
| 181 | #undef run_quantize_ceil |
| 182 | #define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil) |
| 183 | #endif |
| 184 | static size_t |
| 185 | run_quantize_ceil(size_t size) |
| 186 | { |
| 187 | size_t ret; |
| 188 | |
| 189 | assert(size > 0); |
| 190 | assert(size <= run_quantize_max); |
| 191 | assert((size & PAGE_MASK) == 0); |
| 192 | |
| 193 | ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1]; |
| 194 | assert(ret == run_quantize_ceil_compute(size)); |
| 195 | return (ret); |
| 196 | } |
| 197 | #ifdef JEMALLOC_JET |
| 198 | #undef run_quantize_ceil |
| 199 | #define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) |
| 200 | run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil); |
| 201 | #endif |
| 202 | |
| 203 | static arena_run_heap_t * |
| 204 | arena_runs_avail_get(arena_t *arena, szind_t ind) |
| 205 | { |
| 206 | |
| 207 | assert(ind >= runs_avail_bias); |
| 208 | assert(ind - runs_avail_bias < runs_avail_nclasses); |
| 209 | |
| 210 | return (&arena->runs_avail[ind - runs_avail_bias]); |
| 211 | } |
| 212 | |
| 213 | static void |
| 214 | arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, |
| 215 | size_t npages) |
| 216 | { |
| 217 | szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get( |
| 218 | arena_miscelm_get_const(chunk, pageind)))); |
| 219 | assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> |
| 220 | LG_PAGE)); |
| 221 | arena_run_heap_insert(arena_runs_avail_get(arena, ind), |
| 222 | arena_miscelm_get_mutable(chunk, pageind)); |
| 223 | } |
| 224 | |
| 225 | static void |
| 226 | arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, |
| 227 | size_t npages) |
| 228 | { |
| 229 | szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get( |
| 230 | arena_miscelm_get_const(chunk, pageind)))); |
| 231 | assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> |
| 232 | LG_PAGE)); |
| 233 | arena_run_heap_remove(arena_runs_avail_get(arena, ind), |
| 234 | arena_miscelm_get_mutable(chunk, pageind)); |
| 235 | } |
| 236 | |
| 237 | static void |
| 238 | arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, |
| 239 | size_t npages) |
| 240 | { |
| 241 | arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, |
| 242 | pageind); |
| 243 | |
| 244 | assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> |
| 245 | LG_PAGE)); |
| 246 | assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); |
| 247 | assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == |
| 248 | CHUNK_MAP_DIRTY); |
| 249 | |
| 250 | qr_new(&miscelm->rd, rd_link); |
| 251 | qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); |
| 252 | arena->ndirty += npages; |
| 253 | } |
| 254 | |
| 255 | static void |
| 256 | arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, |
| 257 | size_t npages) |
| 258 | { |
| 259 | arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, |
| 260 | pageind); |
| 261 | |
| 262 | assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> |
| 263 | LG_PAGE)); |
| 264 | assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); |
| 265 | assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == |
| 266 | CHUNK_MAP_DIRTY); |
| 267 | |
| 268 | qr_remove(&miscelm->rd, rd_link); |
| 269 | assert(arena->ndirty >= npages); |
| 270 | arena->ndirty -= npages; |
| 271 | } |
| 272 | |
| 273 | static size_t |
| 274 | arena_chunk_dirty_npages(const extent_node_t *node) |
| 275 | { |
| 276 | |
| 277 | return (extent_node_size_get(node) >> LG_PAGE); |
| 278 | } |
| 279 | |
| 280 | void |
| 281 | arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) |
| 282 | { |
| 283 | |
| 284 | if (cache) { |
| 285 | extent_node_dirty_linkage_init(node); |
| 286 | extent_node_dirty_insert(node, &arena->runs_dirty, |
| 287 | &arena->chunks_cache); |
| 288 | arena->ndirty += arena_chunk_dirty_npages(node); |
| 289 | } |
| 290 | } |
| 291 | |
| 292 | void |
| 293 | arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) |
| 294 | { |
| 295 | |
| 296 | if (dirty) { |
| 297 | extent_node_dirty_remove(node); |
| 298 | assert(arena->ndirty >= arena_chunk_dirty_npages(node)); |
| 299 | arena->ndirty -= arena_chunk_dirty_npages(node); |
| 300 | } |
| 301 | } |
| 302 | |
| 303 | JEMALLOC_INLINE_C void * |
| 304 | arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) |
| 305 | { |
| 306 | void *ret; |
| 307 | size_t regind; |
| 308 | arena_chunk_map_misc_t *miscelm; |
| 309 | void *rpages; |
| 310 | |
| 311 | assert(run->nfree > 0); |
| 312 | assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); |
| 313 | |
| 314 | regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info); |
| 315 | miscelm = arena_run_to_miscelm(run); |
| 316 | rpages = arena_miscelm_to_rpages(miscelm); |
| 317 | ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + |
| 318 | (uintptr_t)(bin_info->reg_interval * regind)); |
| 319 | run->nfree--; |
| 320 | return (ret); |
| 321 | } |
| 322 | |
| 323 | JEMALLOC_INLINE_C void |
| 324 | arena_run_reg_dalloc(arena_run_t *run, void *ptr) |
| 325 | { |
| 326 | arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); |
| 327 | size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; |
| 328 | size_t mapbits = arena_mapbits_get(chunk, pageind); |
| 329 | szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); |
| 330 | arena_bin_info_t *bin_info = &arena_bin_info[binind]; |
| 331 | size_t regind = arena_run_regind(run, bin_info, ptr); |
| 332 | |
| 333 | assert(run->nfree < bin_info->nregs); |
| 334 | /* Freeing an interior pointer can cause assertion failure. */ |
| 335 | assert(((uintptr_t)ptr - |
| 336 | ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + |
| 337 | (uintptr_t)bin_info->reg0_offset)) % |
| 338 | (uintptr_t)bin_info->reg_interval == 0); |
| 339 | assert((uintptr_t)ptr >= |
| 340 | (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + |
| 341 | (uintptr_t)bin_info->reg0_offset); |
| 342 | /* Freeing an unallocated pointer can cause assertion failure. */ |
| 343 | assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); |
| 344 | |
| 345 | bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); |
| 346 | run->nfree++; |
| 347 | } |
| 348 | |
| 349 | JEMALLOC_INLINE_C void |
| 350 | arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) |
| 351 | { |
| 352 | |
| 353 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + |
| 354 | (run_ind << LG_PAGE)), (npages << LG_PAGE)); |
| 355 | memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, |
| 356 | (npages << LG_PAGE)); |
| 357 | } |
| 358 | |
| 359 | JEMALLOC_INLINE_C void |
| 360 | arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) |
| 361 | { |
| 362 | |
| 363 | JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind |
| 364 | << LG_PAGE)), PAGE); |
| 365 | } |
| 366 | |
| 367 | JEMALLOC_INLINE_C void |
| 368 | arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) |
| 369 | { |
| 370 | size_t i; |
| 371 | UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); |
| 372 | |
| 373 | arena_run_page_mark_zeroed(chunk, run_ind); |
| 374 | for (i = 0; i < PAGE / sizeof(size_t); i++) |
| 375 | assert(p[i] == 0); |
| 376 | } |
| 377 | |
| 378 | static void |
| 379 | arena_nactive_add(arena_t *arena, size_t add_pages) |
| 380 | { |
| 381 | |
| 382 | if (config_stats) { |
| 383 | size_t cactive_add = CHUNK_CEILING((arena->nactive + |
| 384 | add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << |
| 385 | LG_PAGE); |
| 386 | if (cactive_add != 0) |
| 387 | stats_cactive_add(cactive_add); |
| 388 | } |
| 389 | arena->nactive += add_pages; |
| 390 | } |
| 391 | |
| 392 | static void |
| 393 | arena_nactive_sub(arena_t *arena, size_t sub_pages) |
| 394 | { |
| 395 | |
| 396 | if (config_stats) { |
| 397 | size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) - |
| 398 | CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE); |
| 399 | if (cactive_sub != 0) |
| 400 | stats_cactive_sub(cactive_sub); |
| 401 | } |
| 402 | arena->nactive -= sub_pages; |
| 403 | } |
| 404 | |
| 405 | static void |
| 406 | arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, |
| 407 | size_t flag_dirty, size_t flag_decommitted, size_t need_pages) |
| 408 | { |
| 409 | size_t total_pages, rem_pages; |
| 410 | |
| 411 | assert(flag_dirty == 0 || flag_decommitted == 0); |
| 412 | |
| 413 | total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> |
| 414 | LG_PAGE; |
| 415 | assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == |
| 416 | flag_dirty); |
| 417 | assert(need_pages <= total_pages); |
| 418 | rem_pages = total_pages - need_pages; |
| 419 | |
| 420 | arena_avail_remove(arena, chunk, run_ind, total_pages); |
| 421 | if (flag_dirty != 0) |
| 422 | arena_run_dirty_remove(arena, chunk, run_ind, total_pages); |
| 423 | arena_nactive_add(arena, need_pages); |
| 424 | |
| 425 | /* Keep track of trailing unused pages for later use. */ |
| 426 | if (rem_pages > 0) { |
| 427 | size_t flags = flag_dirty | flag_decommitted; |
| 428 | size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : |
| 429 | 0; |
| 430 | |
| 431 | arena_mapbits_unallocated_set(chunk, run_ind+need_pages, |
| 432 | (rem_pages << LG_PAGE), flags | |
| 433 | (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) & |
| 434 | flag_unzeroed_mask)); |
| 435 | arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, |
| 436 | (rem_pages << LG_PAGE), flags | |
| 437 | (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) & |
| 438 | flag_unzeroed_mask)); |
| 439 | if (flag_dirty != 0) { |
| 440 | arena_run_dirty_insert(arena, chunk, run_ind+need_pages, |
| 441 | rem_pages); |
| 442 | } |
| 443 | arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); |
| 444 | } |
| 445 | } |
| 446 | |
| 447 | static bool |
| 448 | arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, |
| 449 | bool remove, bool zero) |
| 450 | { |
| 451 | arena_chunk_t *chunk; |
| 452 | arena_chunk_map_misc_t *miscelm; |
| 453 | size_t flag_dirty, flag_decommitted, run_ind, need_pages; |
| 454 | size_t flag_unzeroed_mask; |
| 455 | |
| 456 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); |
| 457 | miscelm = arena_run_to_miscelm(run); |
| 458 | run_ind = arena_miscelm_to_pageind(miscelm); |
| 459 | flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); |
| 460 | flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); |
| 461 | need_pages = (size >> LG_PAGE); |
| 462 | assert(need_pages > 0); |
| 463 | |
| 464 | if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, |
| 465 | run_ind << LG_PAGE, size, arena->ind)) |
| 466 | return (true); |
| 467 | |
| 468 | if (remove) { |
| 469 | arena_run_split_remove(arena, chunk, run_ind, flag_dirty, |
| 470 | flag_decommitted, need_pages); |
| 471 | } |
| 472 | |
| 473 | if (zero) { |
| 474 | if (flag_decommitted != 0) { |
| 475 | /* The run is untouched, and therefore zeroed. */ |
| 476 | JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void |
| 477 | *)((uintptr_t)chunk + (run_ind << LG_PAGE)), |
| 478 | (need_pages << LG_PAGE)); |
| 479 | } else if (flag_dirty != 0) { |
| 480 | /* The run is dirty, so all pages must be zeroed. */ |
| 481 | arena_run_zero(chunk, run_ind, need_pages); |
| 482 | } else { |
| 483 | /* |
| 484 | * The run is clean, so some pages may be zeroed (i.e. |
| 485 | * never before touched). |
| 486 | */ |
| 487 | size_t i; |
| 488 | for (i = 0; i < need_pages; i++) { |
| 489 | if (arena_mapbits_unzeroed_get(chunk, run_ind+i) |
| 490 | != 0) |
| 491 | arena_run_zero(chunk, run_ind+i, 1); |
| 492 | else if (config_debug) { |
| 493 | arena_run_page_validate_zeroed(chunk, |
| 494 | run_ind+i); |
| 495 | } else { |
| 496 | arena_run_page_mark_zeroed(chunk, |
| 497 | run_ind+i); |
| 498 | } |
| 499 | } |
| 500 | } |
| 501 | } else { |
| 502 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + |
| 503 | (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); |
| 504 | } |
| 505 | |
| 506 | /* |
| 507 | * Set the last element first, in case the run only contains one page |
| 508 | * (i.e. both statements set the same element). |
| 509 | */ |
| 510 | flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? |
| 511 | CHUNK_MAP_UNZEROED : 0; |
| 512 | arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty | |
| 513 | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, |
| 514 | run_ind+need_pages-1))); |
| 515 | arena_mapbits_large_set(chunk, run_ind, size, flag_dirty | |
| 516 | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind))); |
| 517 | return (false); |
| 518 | } |
| 519 | |
| 520 | static bool |
| 521 | arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) |
| 522 | { |
| 523 | |
| 524 | return (arena_run_split_large_helper(arena, run, size, true, zero)); |
| 525 | } |
| 526 | |
| 527 | static bool |
| 528 | arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) |
| 529 | { |
| 530 | |
| 531 | return (arena_run_split_large_helper(arena, run, size, false, zero)); |
| 532 | } |
| 533 | |
| 534 | static bool |
| 535 | arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, |
| 536 | szind_t binind) |
| 537 | { |
| 538 | arena_chunk_t *chunk; |
| 539 | arena_chunk_map_misc_t *miscelm; |
| 540 | size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; |
| 541 | |
| 542 | assert(binind != BININD_INVALID); |
| 543 | |
| 544 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); |
| 545 | miscelm = arena_run_to_miscelm(run); |
| 546 | run_ind = arena_miscelm_to_pageind(miscelm); |
| 547 | flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); |
| 548 | flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); |
| 549 | need_pages = (size >> LG_PAGE); |
| 550 | assert(need_pages > 0); |
| 551 | |
| 552 | if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, |
| 553 | run_ind << LG_PAGE, size, arena->ind)) |
| 554 | return (true); |
| 555 | |
| 556 | arena_run_split_remove(arena, chunk, run_ind, flag_dirty, |
| 557 | flag_decommitted, need_pages); |
| 558 | |
| 559 | for (i = 0; i < need_pages; i++) { |
| 560 | size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, |
| 561 | run_ind+i); |
| 562 | arena_mapbits_small_set(chunk, run_ind+i, i, binind, |
| 563 | flag_unzeroed); |
| 564 | if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) |
| 565 | arena_run_page_validate_zeroed(chunk, run_ind+i); |
| 566 | } |
| 567 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + |
| 568 | (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); |
| 569 | return (false); |
| 570 | } |
| 571 | |
| 572 | static arena_chunk_t * |
| 573 | arena_chunk_init_spare(arena_t *arena) |
| 574 | { |
| 575 | arena_chunk_t *chunk; |
| 576 | |
| 577 | assert(arena->spare != NULL); |
| 578 | |
| 579 | chunk = arena->spare; |
| 580 | arena->spare = NULL; |
| 581 | |
| 582 | assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); |
| 583 | assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); |
| 584 | assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == |
| 585 | arena_maxrun); |
| 586 | assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == |
| 587 | arena_maxrun); |
| 588 | assert(arena_mapbits_dirty_get(chunk, map_bias) == |
| 589 | arena_mapbits_dirty_get(chunk, chunk_npages-1)); |
| 590 | |
| 591 | return (chunk); |
| 592 | } |
| 593 | |
| 594 | static bool |
| 595 | arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, |
| 596 | bool zero) |
| 597 | { |
| 598 | |
| 599 | /* |
| 600 | * The extent node notion of "committed" doesn't directly apply to |
| 601 | * arena chunks. Arbitrarily mark them as committed. The commit state |
| 602 | * of runs is tracked individually, and upon chunk deallocation the |
| 603 | * entire chunk is in a consistent commit state. |
| 604 | */ |
| 605 | extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); |
| 606 | extent_node_achunk_set(&chunk->node, true); |
| 607 | return (chunk_register(tsdn, chunk, &chunk->node)); |
| 608 | } |
| 609 | |
| 610 | static arena_chunk_t * |
| 611 | arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena, |
| 612 | chunk_hooks_t *chunk_hooks, bool *zero, bool *commit) |
| 613 | { |
| 614 | arena_chunk_t *chunk; |
| 615 | |
| 616 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 617 | |
| 618 | chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks, |
| 619 | NULL, chunksize, chunksize, zero, commit); |
| 620 | if (chunk != NULL && !*commit) { |
| 621 | /* Commit header. */ |
| 622 | if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << |
| 623 | LG_PAGE, arena->ind)) { |
| 624 | chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, |
| 625 | (void *)chunk, chunksize, *zero, *commit); |
| 626 | chunk = NULL; |
| 627 | } |
| 628 | } |
| 629 | if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) { |
| 630 | if (!*commit) { |
| 631 | /* Undo commit of header. */ |
| 632 | chunk_hooks->decommit(chunk, chunksize, 0, map_bias << |
| 633 | LG_PAGE, arena->ind); |
| 634 | } |
| 635 | chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk, |
| 636 | chunksize, *zero, *commit); |
| 637 | chunk = NULL; |
| 638 | } |
| 639 | |
| 640 | malloc_mutex_lock(tsdn, &arena->lock); |
| 641 | return (chunk); |
| 642 | } |
| 643 | |
| 644 | static arena_chunk_t * |
| 645 | arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero, |
| 646 | bool *commit) |
| 647 | { |
| 648 | arena_chunk_t *chunk; |
| 649 | chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; |
| 650 | |
| 651 | chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize, |
| 652 | chunksize, zero, true); |
| 653 | if (chunk != NULL) { |
| 654 | if (arena_chunk_register(tsdn, arena, chunk, *zero)) { |
| 655 | chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, |
| 656 | chunksize, true); |
| 657 | return (NULL); |
| 658 | } |
| 659 | *commit = true; |
| 660 | } |
| 661 | if (chunk == NULL) { |
| 662 | chunk = arena_chunk_alloc_internal_hard(tsdn, arena, |
| 663 | &chunk_hooks, zero, commit); |
| 664 | } |
| 665 | |
| 666 | if (config_stats && chunk != NULL) { |
| 667 | arena->stats.mapped += chunksize; |
| 668 | arena->stats.metadata_mapped += (map_bias << LG_PAGE); |
| 669 | } |
| 670 | |
| 671 | return (chunk); |
| 672 | } |
| 673 | |
| 674 | static arena_chunk_t * |
| 675 | arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena) |
| 676 | { |
| 677 | arena_chunk_t *chunk; |
| 678 | bool zero, commit; |
| 679 | size_t flag_unzeroed, flag_decommitted, i; |
| 680 | |
| 681 | assert(arena->spare == NULL); |
| 682 | |
| 683 | zero = false; |
| 684 | commit = false; |
| 685 | chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit); |
| 686 | if (chunk == NULL) |
| 687 | return (NULL); |
| 688 | |
| 689 | /* |
| 690 | * Initialize the map to contain one maximal free untouched run. Mark |
| 691 | * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed |
| 692 | * or decommitted chunk. |
| 693 | */ |
| 694 | flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; |
| 695 | flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; |
| 696 | arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, |
| 697 | flag_unzeroed | flag_decommitted); |
| 698 | /* |
| 699 | * There is no need to initialize the internal page map entries unless |
| 700 | * the chunk is not zeroed. |
| 701 | */ |
| 702 | if (!zero) { |
| 703 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( |
| 704 | (void *)arena_bitselm_get_const(chunk, map_bias+1), |
| 705 | (size_t)((uintptr_t)arena_bitselm_get_const(chunk, |
| 706 | chunk_npages-1) - |
| 707 | (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); |
| 708 | for (i = map_bias+1; i < chunk_npages-1; i++) |
| 709 | arena_mapbits_internal_set(chunk, i, flag_unzeroed); |
| 710 | } else { |
| 711 | JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void |
| 712 | *)arena_bitselm_get_const(chunk, map_bias+1), |
| 713 | (size_t)((uintptr_t)arena_bitselm_get_const(chunk, |
| 714 | chunk_npages-1) - |
| 715 | (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); |
| 716 | if (config_debug) { |
| 717 | for (i = map_bias+1; i < chunk_npages-1; i++) { |
| 718 | assert(arena_mapbits_unzeroed_get(chunk, i) == |
| 719 | flag_unzeroed); |
| 720 | } |
| 721 | } |
| 722 | } |
| 723 | arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, |
| 724 | flag_unzeroed); |
| 725 | |
| 726 | return (chunk); |
| 727 | } |
| 728 | |
| 729 | static arena_chunk_t * |
| 730 | arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena) |
| 731 | { |
| 732 | arena_chunk_t *chunk; |
| 733 | |
| 734 | if (arena->spare != NULL) |
| 735 | chunk = arena_chunk_init_spare(arena); |
| 736 | else { |
| 737 | chunk = arena_chunk_init_hard(tsdn, arena); |
| 738 | if (chunk == NULL) |
| 739 | return (NULL); |
| 740 | } |
| 741 | |
| 742 | ql_elm_new(&chunk->node, ql_link); |
| 743 | ql_tail_insert(&arena->achunks, &chunk->node, ql_link); |
| 744 | arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); |
| 745 | |
| 746 | return (chunk); |
| 747 | } |
| 748 | |
| 749 | static void |
| 750 | arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) |
| 751 | { |
| 752 | bool committed; |
| 753 | chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; |
| 754 | |
| 755 | chunk_deregister(chunk, &chunk->node); |
| 756 | |
| 757 | committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0); |
| 758 | if (!committed) { |
| 759 | /* |
| 760 | * Decommit the header. Mark the chunk as decommitted even if |
| 761 | * header decommit fails, since treating a partially committed |
| 762 | * chunk as committed has a high potential for causing later |
| 763 | * access of decommitted memory. |
| 764 | */ |
| 765 | chunk_hooks = chunk_hooks_get(tsdn, arena); |
| 766 | chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE, |
| 767 | arena->ind); |
| 768 | } |
| 769 | |
| 770 | chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize, |
| 771 | committed); |
| 772 | |
| 773 | if (config_stats) { |
| 774 | arena->stats.mapped -= chunksize; |
| 775 | arena->stats.metadata_mapped -= (map_bias << LG_PAGE); |
| 776 | } |
| 777 | } |
| 778 | |
| 779 | static void |
| 780 | arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare) |
| 781 | { |
| 782 | |
| 783 | assert(arena->spare != spare); |
| 784 | |
| 785 | if (arena_mapbits_dirty_get(spare, map_bias) != 0) { |
| 786 | arena_run_dirty_remove(arena, spare, map_bias, |
| 787 | chunk_npages-map_bias); |
| 788 | } |
| 789 | |
| 790 | arena_chunk_discard(tsdn, arena, spare); |
| 791 | } |
| 792 | |
| 793 | static void |
| 794 | arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) |
| 795 | { |
| 796 | arena_chunk_t *spare; |
| 797 | |
| 798 | assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); |
| 799 | assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); |
| 800 | assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == |
| 801 | arena_maxrun); |
| 802 | assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == |
| 803 | arena_maxrun); |
| 804 | assert(arena_mapbits_dirty_get(chunk, map_bias) == |
| 805 | arena_mapbits_dirty_get(chunk, chunk_npages-1)); |
| 806 | assert(arena_mapbits_decommitted_get(chunk, map_bias) == |
| 807 | arena_mapbits_decommitted_get(chunk, chunk_npages-1)); |
| 808 | |
| 809 | /* Remove run from runs_avail, so that the arena does not use it. */ |
| 810 | arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); |
| 811 | |
| 812 | ql_remove(&arena->achunks, &chunk->node, ql_link); |
| 813 | spare = arena->spare; |
| 814 | arena->spare = chunk; |
| 815 | if (spare != NULL) |
| 816 | arena_spare_discard(tsdn, arena, spare); |
| 817 | } |
| 818 | |
| 819 | static void |
| 820 | arena_huge_malloc_stats_update(arena_t *arena, size_t usize) |
| 821 | { |
| 822 | szind_t index = size2index(usize) - nlclasses - NBINS; |
| 823 | |
| 824 | cassert(config_stats); |
| 825 | |
| 826 | arena->stats.nmalloc_huge++; |
| 827 | arena->stats.allocated_huge += usize; |
| 828 | arena->stats.hstats[index].nmalloc++; |
| 829 | arena->stats.hstats[index].curhchunks++; |
| 830 | } |
| 831 | |
| 832 | static void |
| 833 | arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) |
| 834 | { |
| 835 | szind_t index = size2index(usize) - nlclasses - NBINS; |
| 836 | |
| 837 | cassert(config_stats); |
| 838 | |
| 839 | arena->stats.nmalloc_huge--; |
| 840 | arena->stats.allocated_huge -= usize; |
| 841 | arena->stats.hstats[index].nmalloc--; |
| 842 | arena->stats.hstats[index].curhchunks--; |
| 843 | } |
| 844 | |
| 845 | static void |
| 846 | arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) |
| 847 | { |
| 848 | szind_t index = size2index(usize) - nlclasses - NBINS; |
| 849 | |
| 850 | cassert(config_stats); |
| 851 | |
| 852 | arena->stats.ndalloc_huge++; |
| 853 | arena->stats.allocated_huge -= usize; |
| 854 | arena->stats.hstats[index].ndalloc++; |
| 855 | arena->stats.hstats[index].curhchunks--; |
| 856 | } |
| 857 | |
| 858 | static void |
| 859 | arena_huge_reset_stats_cancel(arena_t *arena, size_t usize) |
| 860 | { |
| 861 | szind_t index = size2index(usize) - nlclasses - NBINS; |
| 862 | |
| 863 | cassert(config_stats); |
| 864 | |
| 865 | arena->stats.ndalloc_huge++; |
| 866 | arena->stats.hstats[index].ndalloc--; |
| 867 | } |
| 868 | |
| 869 | static void |
| 870 | arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) |
| 871 | { |
| 872 | szind_t index = size2index(usize) - nlclasses - NBINS; |
| 873 | |
| 874 | cassert(config_stats); |
| 875 | |
| 876 | arena->stats.ndalloc_huge--; |
| 877 | arena->stats.allocated_huge += usize; |
| 878 | arena->stats.hstats[index].ndalloc--; |
| 879 | arena->stats.hstats[index].curhchunks++; |
| 880 | } |
| 881 | |
| 882 | static void |
| 883 | arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) |
| 884 | { |
| 885 | |
| 886 | arena_huge_dalloc_stats_update(arena, oldsize); |
| 887 | arena_huge_malloc_stats_update(arena, usize); |
| 888 | } |
| 889 | |
| 890 | static void |
| 891 | arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, |
| 892 | size_t usize) |
| 893 | { |
| 894 | |
| 895 | arena_huge_dalloc_stats_update_undo(arena, oldsize); |
| 896 | arena_huge_malloc_stats_update_undo(arena, usize); |
| 897 | } |
| 898 | |
| 899 | extent_node_t * |
| 900 | arena_node_alloc(tsdn_t *tsdn, arena_t *arena) |
| 901 | { |
| 902 | extent_node_t *node; |
| 903 | |
| 904 | malloc_mutex_lock(tsdn, &arena->node_cache_mtx); |
| 905 | node = ql_last(&arena->node_cache, ql_link); |
| 906 | if (node == NULL) { |
| 907 | malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); |
| 908 | return (base_alloc(tsdn, sizeof(extent_node_t))); |
| 909 | } |
| 910 | ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); |
| 911 | malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); |
| 912 | return (node); |
| 913 | } |
| 914 | |
| 915 | void |
| 916 | arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node) |
| 917 | { |
| 918 | |
| 919 | malloc_mutex_lock(tsdn, &arena->node_cache_mtx); |
| 920 | ql_elm_new(node, ql_link); |
| 921 | ql_tail_insert(&arena->node_cache, node, ql_link); |
| 922 | malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); |
| 923 | } |
| 924 | |
| 925 | static void * |
| 926 | arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, |
| 927 | chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero, |
| 928 | size_t csize) |
| 929 | { |
| 930 | void *ret; |
| 931 | bool commit = true; |
| 932 | |
| 933 | ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize, |
| 934 | alignment, zero, &commit); |
| 935 | if (ret == NULL) { |
| 936 | /* Revert optimistic stats updates. */ |
| 937 | malloc_mutex_lock(tsdn, &arena->lock); |
| 938 | if (config_stats) { |
| 939 | arena_huge_malloc_stats_update_undo(arena, usize); |
| 940 | arena->stats.mapped -= usize; |
| 941 | } |
| 942 | arena_nactive_sub(arena, usize >> LG_PAGE); |
| 943 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 944 | } |
| 945 | |
| 946 | return (ret); |
| 947 | } |
| 948 | |
| 949 | void * |
| 950 | arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, |
| 951 | size_t alignment, bool *zero) |
| 952 | { |
| 953 | void *ret; |
| 954 | chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; |
| 955 | size_t csize = CHUNK_CEILING(usize); |
| 956 | |
| 957 | malloc_mutex_lock(tsdn, &arena->lock); |
| 958 | |
| 959 | /* Optimistically update stats. */ |
| 960 | if (config_stats) { |
| 961 | arena_huge_malloc_stats_update(arena, usize); |
| 962 | arena->stats.mapped += usize; |
| 963 | } |
| 964 | arena_nactive_add(arena, usize >> LG_PAGE); |
| 965 | |
| 966 | ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize, |
| 967 | alignment, zero, true); |
| 968 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 969 | if (ret == NULL) { |
| 970 | ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks, |
| 971 | usize, alignment, zero, csize); |
| 972 | } |
| 973 | |
| 974 | return (ret); |
| 975 | } |
| 976 | |
| 977 | void |
| 978 | arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize) |
| 979 | { |
| 980 | chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; |
| 981 | size_t csize; |
| 982 | |
| 983 | csize = CHUNK_CEILING(usize); |
| 984 | malloc_mutex_lock(tsdn, &arena->lock); |
| 985 | if (config_stats) { |
| 986 | arena_huge_dalloc_stats_update(arena, usize); |
| 987 | arena->stats.mapped -= usize; |
| 988 | } |
| 989 | arena_nactive_sub(arena, usize >> LG_PAGE); |
| 990 | |
| 991 | chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true); |
| 992 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 993 | } |
| 994 | |
| 995 | void |
| 996 | arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk, |
| 997 | size_t oldsize, size_t usize) |
| 998 | { |
| 999 | |
| 1000 | assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); |
| 1001 | assert(oldsize != usize); |
| 1002 | |
| 1003 | malloc_mutex_lock(tsdn, &arena->lock); |
| 1004 | if (config_stats) |
| 1005 | arena_huge_ralloc_stats_update(arena, oldsize, usize); |
| 1006 | if (oldsize < usize) |
| 1007 | arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE); |
| 1008 | else |
| 1009 | arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE); |
| 1010 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 1011 | } |
| 1012 | |
| 1013 | void |
| 1014 | arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk, |
| 1015 | size_t oldsize, size_t usize) |
| 1016 | { |
| 1017 | size_t udiff = oldsize - usize; |
| 1018 | size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); |
| 1019 | |
| 1020 | malloc_mutex_lock(tsdn, &arena->lock); |
| 1021 | if (config_stats) { |
| 1022 | arena_huge_ralloc_stats_update(arena, oldsize, usize); |
| 1023 | if (cdiff != 0) |
| 1024 | arena->stats.mapped -= cdiff; |
| 1025 | } |
| 1026 | arena_nactive_sub(arena, udiff >> LG_PAGE); |
| 1027 | |
| 1028 | if (cdiff != 0) { |
| 1029 | chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; |
| 1030 | void *nchunk = (void *)((uintptr_t)chunk + |
| 1031 | CHUNK_CEILING(usize)); |
| 1032 | |
| 1033 | chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, |
| 1034 | true); |
| 1035 | } |
| 1036 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 1037 | } |
| 1038 | |
| 1039 | static bool |
| 1040 | arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena, |
| 1041 | chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize, |
| 1042 | bool *zero, void *nchunk, size_t udiff, size_t cdiff) |
| 1043 | { |
| 1044 | bool err; |
| 1045 | bool commit = true; |
| 1046 | |
| 1047 | err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, |
| 1048 | chunksize, zero, &commit) == NULL); |
| 1049 | if (err) { |
| 1050 | /* Revert optimistic stats updates. */ |
| 1051 | malloc_mutex_lock(tsdn, &arena->lock); |
| 1052 | if (config_stats) { |
| 1053 | arena_huge_ralloc_stats_update_undo(arena, oldsize, |
| 1054 | usize); |
| 1055 | arena->stats.mapped -= cdiff; |
| 1056 | } |
| 1057 | arena_nactive_sub(arena, udiff >> LG_PAGE); |
| 1058 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 1059 | } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, |
| 1060 | cdiff, true, arena->ind)) { |
| 1061 | chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, |
| 1062 | *zero, true); |
| 1063 | err = true; |
| 1064 | } |
| 1065 | return (err); |
| 1066 | } |
| 1067 | |
| 1068 | bool |
| 1069 | arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk, |
| 1070 | size_t oldsize, size_t usize, bool *zero) |
| 1071 | { |
| 1072 | bool err; |
| 1073 | chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); |
| 1074 | void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); |
| 1075 | size_t udiff = usize - oldsize; |
| 1076 | size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); |
| 1077 | |
| 1078 | malloc_mutex_lock(tsdn, &arena->lock); |
| 1079 | |
| 1080 | /* Optimistically update stats. */ |
| 1081 | if (config_stats) { |
| 1082 | arena_huge_ralloc_stats_update(arena, oldsize, usize); |
| 1083 | arena->stats.mapped += cdiff; |
| 1084 | } |
| 1085 | arena_nactive_add(arena, udiff >> LG_PAGE); |
| 1086 | |
| 1087 | err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, |
| 1088 | chunksize, zero, true) == NULL); |
| 1089 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 1090 | if (err) { |
| 1091 | err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena, |
| 1092 | &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff, |
| 1093 | cdiff); |
| 1094 | } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, |
| 1095 | cdiff, true, arena->ind)) { |
| 1096 | chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff, |
| 1097 | *zero, true); |
| 1098 | err = true; |
| 1099 | } |
| 1100 | |
| 1101 | return (err); |
| 1102 | } |
| 1103 | |
| 1104 | /* |
| 1105 | * Do first-best-fit run selection, i.e. select the lowest run that best fits. |
| 1106 | * Run sizes are indexed, so not all candidate runs are necessarily exactly the |
| 1107 | * same size. |
| 1108 | */ |
| 1109 | static arena_run_t * |
| 1110 | arena_run_first_best_fit(arena_t *arena, size_t size) |
| 1111 | { |
| 1112 | szind_t ind, i; |
| 1113 | |
| 1114 | ind = size2index(run_quantize_ceil(size)); |
| 1115 | for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) { |
| 1116 | arena_chunk_map_misc_t *miscelm = arena_run_heap_first( |
| 1117 | arena_runs_avail_get(arena, i)); |
| 1118 | if (miscelm != NULL) |
| 1119 | return (&miscelm->run); |
| 1120 | } |
| 1121 | |
| 1122 | return (NULL); |
| 1123 | } |
| 1124 | |
| 1125 | static arena_run_t * |
| 1126 | arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) |
| 1127 | { |
| 1128 | arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); |
| 1129 | if (run != NULL) { |
| 1130 | if (arena_run_split_large(arena, run, size, zero)) |
| 1131 | run = NULL; |
| 1132 | } |
| 1133 | return (run); |
| 1134 | } |
| 1135 | |
| 1136 | static arena_run_t * |
| 1137 | arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero) |
| 1138 | { |
| 1139 | arena_chunk_t *chunk; |
| 1140 | arena_run_t *run; |
| 1141 | |
| 1142 | assert(size <= arena_maxrun); |
| 1143 | assert(size == PAGE_CEILING(size)); |
| 1144 | |
| 1145 | /* Search the arena's chunks for the lowest best fit. */ |
| 1146 | run = arena_run_alloc_large_helper(arena, size, zero); |
| 1147 | if (run != NULL) |
| 1148 | return (run); |
| 1149 | |
| 1150 | /* |
| 1151 | * No usable runs. Create a new chunk from which to allocate the run. |
| 1152 | */ |
| 1153 | chunk = arena_chunk_alloc(tsdn, arena); |
| 1154 | if (chunk != NULL) { |
| 1155 | run = &arena_miscelm_get_mutable(chunk, map_bias)->run; |
| 1156 | if (arena_run_split_large(arena, run, size, zero)) |
| 1157 | run = NULL; |
| 1158 | return (run); |
| 1159 | } |
| 1160 | |
| 1161 | /* |
| 1162 | * arena_chunk_alloc() failed, but another thread may have made |
| 1163 | * sufficient memory available while this one dropped arena->lock in |
| 1164 | * arena_chunk_alloc(), so search one more time. |
| 1165 | */ |
| 1166 | return (arena_run_alloc_large_helper(arena, size, zero)); |
| 1167 | } |
| 1168 | |
| 1169 | static arena_run_t * |
| 1170 | arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) |
| 1171 | { |
| 1172 | arena_run_t *run = arena_run_first_best_fit(arena, size); |
| 1173 | if (run != NULL) { |
| 1174 | if (arena_run_split_small(arena, run, size, binind)) |
| 1175 | run = NULL; |
| 1176 | } |
| 1177 | return (run); |
| 1178 | } |
| 1179 | |
| 1180 | static arena_run_t * |
| 1181 | arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind) |
| 1182 | { |
| 1183 | arena_chunk_t *chunk; |
| 1184 | arena_run_t *run; |
| 1185 | |
| 1186 | assert(size <= arena_maxrun); |
| 1187 | assert(size == PAGE_CEILING(size)); |
| 1188 | assert(binind != BININD_INVALID); |
| 1189 | |
| 1190 | /* Search the arena's chunks for the lowest best fit. */ |
| 1191 | run = arena_run_alloc_small_helper(arena, size, binind); |
| 1192 | if (run != NULL) |
| 1193 | return (run); |
| 1194 | |
| 1195 | /* |
| 1196 | * No usable runs. Create a new chunk from which to allocate the run. |
| 1197 | */ |
| 1198 | chunk = arena_chunk_alloc(tsdn, arena); |
| 1199 | if (chunk != NULL) { |
| 1200 | run = &arena_miscelm_get_mutable(chunk, map_bias)->run; |
| 1201 | if (arena_run_split_small(arena, run, size, binind)) |
| 1202 | run = NULL; |
| 1203 | return (run); |
| 1204 | } |
| 1205 | |
| 1206 | /* |
| 1207 | * arena_chunk_alloc() failed, but another thread may have made |
| 1208 | * sufficient memory available while this one dropped arena->lock in |
| 1209 | * arena_chunk_alloc(), so search one more time. |
| 1210 | */ |
| 1211 | return (arena_run_alloc_small_helper(arena, size, binind)); |
| 1212 | } |
| 1213 | |
| 1214 | static bool |
| 1215 | arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) |
| 1216 | { |
| 1217 | |
| 1218 | return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) |
| 1219 | << 3)); |
| 1220 | } |
| 1221 | |
| 1222 | ssize_t |
| 1223 | arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena) |
| 1224 | { |
| 1225 | ssize_t lg_dirty_mult; |
| 1226 | |
| 1227 | malloc_mutex_lock(tsdn, &arena->lock); |
| 1228 | lg_dirty_mult = arena->lg_dirty_mult; |
| 1229 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 1230 | |
| 1231 | return (lg_dirty_mult); |
| 1232 | } |
| 1233 | |
| 1234 | bool |
| 1235 | arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult) |
| 1236 | { |
| 1237 | |
| 1238 | if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) |
| 1239 | return (true); |
| 1240 | |
| 1241 | malloc_mutex_lock(tsdn, &arena->lock); |
| 1242 | arena->lg_dirty_mult = lg_dirty_mult; |
| 1243 | arena_maybe_purge(tsdn, arena); |
| 1244 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 1245 | |
| 1246 | return (false); |
| 1247 | } |
| 1248 | |
| 1249 | static void |
| 1250 | arena_decay_deadline_init(arena_t *arena) |
| 1251 | { |
| 1252 | |
| 1253 | assert(opt_purge == purge_mode_decay); |
| 1254 | |
| 1255 | /* |
| 1256 | * Generate a new deadline that is uniformly random within the next |
| 1257 | * epoch after the current one. |
| 1258 | */ |
| 1259 | nstime_copy(&arena->decay_deadline, &arena->decay_epoch); |
| 1260 | nstime_add(&arena->decay_deadline, &arena->decay_interval); |
| 1261 | if (arena->decay_time > 0) { |
| 1262 | nstime_t jitter; |
| 1263 | |
| 1264 | nstime_init(&jitter, prng_range(&arena->decay_jitter_state, |
| 1265 | nstime_ns(&arena->decay_interval))); |
| 1266 | nstime_add(&arena->decay_deadline, &jitter); |
| 1267 | } |
| 1268 | } |
| 1269 | |
| 1270 | static bool |
| 1271 | arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time) |
| 1272 | { |
| 1273 | |
| 1274 | assert(opt_purge == purge_mode_decay); |
| 1275 | |
| 1276 | return (nstime_compare(&arena->decay_deadline, time) <= 0); |
| 1277 | } |
| 1278 | |
| 1279 | static size_t |
| 1280 | arena_decay_backlog_npages_limit(const arena_t *arena) |
| 1281 | { |
| 1282 | static const uint64_t h_steps[] = { |
| 1283 | #define STEP(step, h, x, y) \ |
| 1284 | h, |
| 1285 | SMOOTHSTEP |
| 1286 | #undef STEP |
| 1287 | }; |
| 1288 | uint64_t sum; |
| 1289 | size_t npages_limit_backlog; |
| 1290 | unsigned i; |
| 1291 | |
| 1292 | assert(opt_purge == purge_mode_decay); |
| 1293 | |
| 1294 | /* |
| 1295 | * For each element of decay_backlog, multiply by the corresponding |
| 1296 | * fixed-point smoothstep decay factor. Sum the products, then divide |
| 1297 | * to round down to the nearest whole number of pages. |
| 1298 | */ |
| 1299 | sum = 0; |
| 1300 | for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) |
| 1301 | sum += arena->decay_backlog[i] * h_steps[i]; |
| 1302 | npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); |
| 1303 | |
| 1304 | return (npages_limit_backlog); |
| 1305 | } |
| 1306 | |
| 1307 | static void |
| 1308 | arena_decay_epoch_advance(arena_t *arena, const nstime_t *time) |
| 1309 | { |
| 1310 | uint64_t nadvance_u64; |
| 1311 | nstime_t delta; |
| 1312 | size_t ndirty_delta; |
| 1313 | |
| 1314 | assert(opt_purge == purge_mode_decay); |
| 1315 | assert(arena_decay_deadline_reached(arena, time)); |
| 1316 | |
| 1317 | nstime_copy(&delta, time); |
| 1318 | nstime_subtract(&delta, &arena->decay_epoch); |
| 1319 | nadvance_u64 = nstime_divide(&delta, &arena->decay_interval); |
| 1320 | assert(nadvance_u64 > 0); |
| 1321 | |
| 1322 | /* Add nadvance_u64 decay intervals to epoch. */ |
| 1323 | nstime_copy(&delta, &arena->decay_interval); |
| 1324 | nstime_imultiply(&delta, nadvance_u64); |
| 1325 | nstime_add(&arena->decay_epoch, &delta); |
| 1326 | |
| 1327 | /* Set a new deadline. */ |
| 1328 | arena_decay_deadline_init(arena); |
| 1329 | |
| 1330 | /* Update the backlog. */ |
| 1331 | if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { |
| 1332 | memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) * |
| 1333 | sizeof(size_t)); |
| 1334 | } else { |
| 1335 | size_t nadvance_z = (size_t)nadvance_u64; |
| 1336 | |
| 1337 | assert((uint64_t)nadvance_z == nadvance_u64); |
| 1338 | |
| 1339 | memmove(arena->decay_backlog, &arena->decay_backlog[nadvance_z], |
| 1340 | (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); |
| 1341 | if (nadvance_z > 1) { |
| 1342 | memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS - |
| 1343 | nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); |
| 1344 | } |
| 1345 | } |
| 1346 | ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty - |
| 1347 | arena->decay_ndirty : 0; |
| 1348 | arena->decay_ndirty = arena->ndirty; |
| 1349 | arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; |
| 1350 | arena->decay_backlog_npages_limit = |
| 1351 | arena_decay_backlog_npages_limit(arena); |
| 1352 | } |
| 1353 | |
| 1354 | static size_t |
| 1355 | arena_decay_npages_limit(arena_t *arena) |
| 1356 | { |
| 1357 | size_t npages_limit; |
| 1358 | |
| 1359 | assert(opt_purge == purge_mode_decay); |
| 1360 | |
| 1361 | npages_limit = arena->decay_backlog_npages_limit; |
| 1362 | |
| 1363 | /* Add in any dirty pages created during the current epoch. */ |
| 1364 | if (arena->ndirty > arena->decay_ndirty) |
| 1365 | npages_limit += arena->ndirty - arena->decay_ndirty; |
| 1366 | |
| 1367 | return (npages_limit); |
| 1368 | } |
| 1369 | |
| 1370 | static void |
| 1371 | arena_decay_init(arena_t *arena, ssize_t decay_time) |
| 1372 | { |
| 1373 | |
| 1374 | arena->decay_time = decay_time; |
| 1375 | if (decay_time > 0) { |
| 1376 | nstime_init2(&arena->decay_interval, decay_time, 0); |
| 1377 | nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS); |
| 1378 | } |
| 1379 | |
| 1380 | nstime_init(&arena->decay_epoch, 0); |
| 1381 | nstime_update(&arena->decay_epoch); |
| 1382 | arena->decay_jitter_state = (uint64_t)(uintptr_t)arena; |
| 1383 | arena_decay_deadline_init(arena); |
| 1384 | arena->decay_ndirty = arena->ndirty; |
| 1385 | arena->decay_backlog_npages_limit = 0; |
| 1386 | memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); |
| 1387 | } |
| 1388 | |
| 1389 | static bool |
| 1390 | arena_decay_time_valid(ssize_t decay_time) |
| 1391 | { |
| 1392 | |
| 1393 | if (decay_time < -1) |
| 1394 | return (false); |
| 1395 | if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) |
| 1396 | return (true); |
| 1397 | return (false); |
| 1398 | } |
| 1399 | |
| 1400 | ssize_t |
| 1401 | arena_decay_time_get(tsdn_t *tsdn, arena_t *arena) |
| 1402 | { |
| 1403 | ssize_t decay_time; |
| 1404 | |
| 1405 | malloc_mutex_lock(tsdn, &arena->lock); |
| 1406 | decay_time = arena->decay_time; |
| 1407 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 1408 | |
| 1409 | return (decay_time); |
| 1410 | } |
| 1411 | |
| 1412 | bool |
| 1413 | arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) |
| 1414 | { |
| 1415 | |
| 1416 | if (!arena_decay_time_valid(decay_time)) |
| 1417 | return (true); |
| 1418 | |
| 1419 | malloc_mutex_lock(tsdn, &arena->lock); |
| 1420 | /* |
| 1421 | * Restart decay backlog from scratch, which may cause many dirty pages |
| 1422 | * to be immediately purged. It would conceptually be possible to map |
| 1423 | * the old backlog onto the new backlog, but there is no justification |
| 1424 | * for such complexity since decay_time changes are intended to be |
| 1425 | * infrequent, either between the {-1, 0, >0} states, or a one-time |
| 1426 | * arbitrary change during initial arena configuration. |
| 1427 | */ |
| 1428 | arena_decay_init(arena, decay_time); |
| 1429 | arena_maybe_purge(tsdn, arena); |
| 1430 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 1431 | |
| 1432 | return (false); |
| 1433 | } |
| 1434 | |
| 1435 | static void |
| 1436 | arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena) |
| 1437 | { |
| 1438 | |
| 1439 | assert(opt_purge == purge_mode_ratio); |
| 1440 | |
| 1441 | /* Don't purge if the option is disabled. */ |
| 1442 | if (arena->lg_dirty_mult < 0) |
| 1443 | return; |
| 1444 | |
| 1445 | /* |
| 1446 | * Iterate, since preventing recursive purging could otherwise leave too |
| 1447 | * many dirty pages. |
| 1448 | */ |
| 1449 | while (true) { |
| 1450 | size_t threshold = (arena->nactive >> arena->lg_dirty_mult); |
| 1451 | if (threshold < chunk_npages) |
| 1452 | threshold = chunk_npages; |
| 1453 | /* |
| 1454 | * Don't purge unless the number of purgeable pages exceeds the |
| 1455 | * threshold. |
| 1456 | */ |
| 1457 | if (arena->ndirty <= threshold) |
| 1458 | return; |
| 1459 | arena_purge_to_limit(tsdn, arena, threshold); |
| 1460 | } |
| 1461 | } |
| 1462 | |
| 1463 | static void |
| 1464 | arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena) |
| 1465 | { |
| 1466 | nstime_t time; |
| 1467 | size_t ndirty_limit; |
| 1468 | |
| 1469 | assert(opt_purge == purge_mode_decay); |
| 1470 | |
| 1471 | /* Purge all or nothing if the option is disabled. */ |
| 1472 | if (arena->decay_time <= 0) { |
| 1473 | if (arena->decay_time == 0) |
| 1474 | arena_purge_to_limit(tsdn, arena, 0); |
| 1475 | return; |
| 1476 | } |
| 1477 | |
| 1478 | nstime_copy(&time, &arena->decay_epoch); |
| 1479 | if (unlikely(nstime_update(&time))) { |
| 1480 | /* Time went backwards. Force an epoch advance. */ |
| 1481 | nstime_copy(&time, &arena->decay_deadline); |
| 1482 | } |
| 1483 | |
| 1484 | if (arena_decay_deadline_reached(arena, &time)) |
| 1485 | arena_decay_epoch_advance(arena, &time); |
| 1486 | |
| 1487 | ndirty_limit = arena_decay_npages_limit(arena); |
| 1488 | |
| 1489 | /* |
| 1490 | * Don't try to purge unless the number of purgeable pages exceeds the |
| 1491 | * current limit. |
| 1492 | */ |
| 1493 | if (arena->ndirty <= ndirty_limit) |
| 1494 | return; |
| 1495 | arena_purge_to_limit(tsdn, arena, ndirty_limit); |
| 1496 | } |
| 1497 | |
| 1498 | void |
| 1499 | arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) |
| 1500 | { |
| 1501 | |
| 1502 | /* Don't recursively purge. */ |
| 1503 | if (arena->purging) |
| 1504 | return; |
| 1505 | |
| 1506 | if (opt_purge == purge_mode_ratio) |
| 1507 | arena_maybe_purge_ratio(tsdn, arena); |
| 1508 | else |
| 1509 | arena_maybe_purge_decay(tsdn, arena); |
| 1510 | } |
| 1511 | |
| 1512 | static size_t |
| 1513 | arena_dirty_count(arena_t *arena) |
| 1514 | { |
| 1515 | size_t ndirty = 0; |
| 1516 | arena_runs_dirty_link_t *rdelm; |
| 1517 | extent_node_t *chunkselm; |
| 1518 | |
| 1519 | for (rdelm = qr_next(&arena->runs_dirty, rd_link), |
| 1520 | chunkselm = qr_next(&arena->chunks_cache, cc_link); |
| 1521 | rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) { |
| 1522 | size_t npages; |
| 1523 | |
| 1524 | if (rdelm == &chunkselm->rd) { |
| 1525 | npages = extent_node_size_get(chunkselm) >> LG_PAGE; |
| 1526 | chunkselm = qr_next(chunkselm, cc_link); |
| 1527 | } else { |
| 1528 | arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( |
| 1529 | rdelm); |
| 1530 | arena_chunk_map_misc_t *miscelm = |
| 1531 | arena_rd_to_miscelm(rdelm); |
| 1532 | size_t pageind = arena_miscelm_to_pageind(miscelm); |
| 1533 | assert(arena_mapbits_allocated_get(chunk, pageind) == |
| 1534 | 0); |
| 1535 | assert(arena_mapbits_large_get(chunk, pageind) == 0); |
| 1536 | assert(arena_mapbits_dirty_get(chunk, pageind) != 0); |
| 1537 | npages = arena_mapbits_unallocated_size_get(chunk, |
| 1538 | pageind) >> LG_PAGE; |
| 1539 | } |
| 1540 | ndirty += npages; |
| 1541 | } |
| 1542 | |
| 1543 | return (ndirty); |
| 1544 | } |
| 1545 | |
| 1546 | static size_t |
| 1547 | arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, |
| 1548 | size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel, |
| 1549 | extent_node_t *purge_chunks_sentinel) |
| 1550 | { |
| 1551 | arena_runs_dirty_link_t *rdelm, *rdelm_next; |
| 1552 | extent_node_t *chunkselm; |
| 1553 | size_t nstashed = 0; |
| 1554 | |
| 1555 | /* Stash runs/chunks according to ndirty_limit. */ |
| 1556 | for (rdelm = qr_next(&arena->runs_dirty, rd_link), |
| 1557 | chunkselm = qr_next(&arena->chunks_cache, cc_link); |
| 1558 | rdelm != &arena->runs_dirty; rdelm = rdelm_next) { |
| 1559 | size_t npages; |
| 1560 | rdelm_next = qr_next(rdelm, rd_link); |
| 1561 | |
| 1562 | if (rdelm == &chunkselm->rd) { |
| 1563 | extent_node_t *chunkselm_next; |
| 1564 | bool zero; |
| 1565 | UNUSED void *chunk; |
| 1566 | |
| 1567 | npages = extent_node_size_get(chunkselm) >> LG_PAGE; |
| 1568 | if (opt_purge == purge_mode_decay && arena->ndirty - |
| 1569 | (nstashed + npages) < ndirty_limit) |
| 1570 | break; |
| 1571 | |
| 1572 | chunkselm_next = qr_next(chunkselm, cc_link); |
| 1573 | /* |
| 1574 | * Allocate. chunkselm remains valid due to the |
| 1575 | * dalloc_node=false argument to chunk_alloc_cache(). |
| 1576 | */ |
| 1577 | zero = false; |
| 1578 | chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks, |
| 1579 | extent_node_addr_get(chunkselm), |
| 1580 | extent_node_size_get(chunkselm), chunksize, &zero, |
| 1581 | false); |
| 1582 | assert(chunk == extent_node_addr_get(chunkselm)); |
| 1583 | assert(zero == extent_node_zeroed_get(chunkselm)); |
| 1584 | extent_node_dirty_insert(chunkselm, purge_runs_sentinel, |
| 1585 | purge_chunks_sentinel); |
| 1586 | assert(npages == (extent_node_size_get(chunkselm) >> |
| 1587 | LG_PAGE)); |
| 1588 | chunkselm = chunkselm_next; |
| 1589 | } else { |
| 1590 | arena_chunk_t *chunk = |
| 1591 | (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); |
| 1592 | arena_chunk_map_misc_t *miscelm = |
| 1593 | arena_rd_to_miscelm(rdelm); |
| 1594 | size_t pageind = arena_miscelm_to_pageind(miscelm); |
| 1595 | arena_run_t *run = &miscelm->run; |
| 1596 | size_t run_size = |
| 1597 | arena_mapbits_unallocated_size_get(chunk, pageind); |
| 1598 | |
| 1599 | npages = run_size >> LG_PAGE; |
| 1600 | if (opt_purge == purge_mode_decay && arena->ndirty - |
| 1601 | (nstashed + npages) < ndirty_limit) |
| 1602 | break; |
| 1603 | |
| 1604 | assert(pageind + npages <= chunk_npages); |
| 1605 | assert(arena_mapbits_dirty_get(chunk, pageind) == |
| 1606 | arena_mapbits_dirty_get(chunk, pageind+npages-1)); |
| 1607 | |
| 1608 | /* |
| 1609 | * If purging the spare chunk's run, make it available |
| 1610 | * prior to allocation. |
| 1611 | */ |
| 1612 | if (chunk == arena->spare) |
| 1613 | arena_chunk_alloc(tsdn, arena); |
| 1614 | |
| 1615 | /* Temporarily allocate the free dirty run. */ |
| 1616 | arena_run_split_large(arena, run, run_size, false); |
| 1617 | /* Stash. */ |
| 1618 | if (false) |
| 1619 | qr_new(rdelm, rd_link); /* Redundant. */ |
| 1620 | else { |
| 1621 | assert(qr_next(rdelm, rd_link) == rdelm); |
| 1622 | assert(qr_prev(rdelm, rd_link) == rdelm); |
| 1623 | } |
| 1624 | qr_meld(purge_runs_sentinel, rdelm, rd_link); |
| 1625 | } |
| 1626 | |
| 1627 | nstashed += npages; |
| 1628 | if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <= |
| 1629 | ndirty_limit) |
| 1630 | break; |
| 1631 | } |
| 1632 | |
| 1633 | return (nstashed); |
| 1634 | } |
| 1635 | |
| 1636 | static size_t |
| 1637 | arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, |
| 1638 | arena_runs_dirty_link_t *purge_runs_sentinel, |
| 1639 | extent_node_t *purge_chunks_sentinel) |
| 1640 | { |
| 1641 | size_t npurged, nmadvise; |
| 1642 | arena_runs_dirty_link_t *rdelm; |
| 1643 | extent_node_t *chunkselm; |
| 1644 | |
| 1645 | if (config_stats) |
| 1646 | nmadvise = 0; |
| 1647 | npurged = 0; |
| 1648 | |
| 1649 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 1650 | for (rdelm = qr_next(purge_runs_sentinel, rd_link), |
| 1651 | chunkselm = qr_next(purge_chunks_sentinel, cc_link); |
| 1652 | rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { |
| 1653 | size_t npages; |
| 1654 | |
| 1655 | if (rdelm == &chunkselm->rd) { |
| 1656 | /* |
| 1657 | * Don't actually purge the chunk here because 1) |
| 1658 | * chunkselm is embedded in the chunk and must remain |
| 1659 | * valid, and 2) we deallocate the chunk in |
| 1660 | * arena_unstash_purged(), where it is destroyed, |
| 1661 | * decommitted, or purged, depending on chunk |
| 1662 | * deallocation policy. |
| 1663 | */ |
| 1664 | size_t size = extent_node_size_get(chunkselm); |
| 1665 | npages = size >> LG_PAGE; |
| 1666 | chunkselm = qr_next(chunkselm, cc_link); |
| 1667 | } else { |
| 1668 | size_t pageind, run_size, flag_unzeroed, flags, i; |
| 1669 | bool decommitted; |
| 1670 | arena_chunk_t *chunk = |
| 1671 | (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); |
| 1672 | arena_chunk_map_misc_t *miscelm = |
| 1673 | arena_rd_to_miscelm(rdelm); |
| 1674 | pageind = arena_miscelm_to_pageind(miscelm); |
| 1675 | run_size = arena_mapbits_large_size_get(chunk, pageind); |
| 1676 | npages = run_size >> LG_PAGE; |
| 1677 | |
| 1678 | assert(pageind + npages <= chunk_npages); |
| 1679 | assert(!arena_mapbits_decommitted_get(chunk, pageind)); |
| 1680 | assert(!arena_mapbits_decommitted_get(chunk, |
| 1681 | pageind+npages-1)); |
| 1682 | decommitted = !chunk_hooks->decommit(chunk, chunksize, |
| 1683 | pageind << LG_PAGE, npages << LG_PAGE, arena->ind); |
| 1684 | if (decommitted) { |
| 1685 | flag_unzeroed = 0; |
| 1686 | flags = CHUNK_MAP_DECOMMITTED; |
| 1687 | } else { |
| 1688 | flag_unzeroed = chunk_purge_wrapper(tsdn, arena, |
| 1689 | chunk_hooks, chunk, chunksize, pageind << |
| 1690 | LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; |
| 1691 | flags = flag_unzeroed; |
| 1692 | } |
| 1693 | arena_mapbits_large_set(chunk, pageind+npages-1, 0, |
| 1694 | flags); |
| 1695 | arena_mapbits_large_set(chunk, pageind, run_size, |
| 1696 | flags); |
| 1697 | |
| 1698 | /* |
| 1699 | * Set the unzeroed flag for internal pages, now that |
| 1700 | * chunk_purge_wrapper() has returned whether the pages |
| 1701 | * were zeroed as a side effect of purging. This chunk |
| 1702 | * map modification is safe even though the arena mutex |
| 1703 | * isn't currently owned by this thread, because the run |
| 1704 | * is marked as allocated, thus protecting it from being |
| 1705 | * modified by any other thread. As long as these |
| 1706 | * writes don't perturb the first and last elements' |
| 1707 | * CHUNK_MAP_ALLOCATED bits, behavior is well defined. |
| 1708 | */ |
| 1709 | for (i = 1; i < npages-1; i++) { |
| 1710 | arena_mapbits_internal_set(chunk, pageind+i, |
| 1711 | flag_unzeroed); |
| 1712 | } |
| 1713 | } |
| 1714 | |
| 1715 | npurged += npages; |
| 1716 | if (config_stats) |
| 1717 | nmadvise++; |
| 1718 | } |
| 1719 | malloc_mutex_lock(tsdn, &arena->lock); |
| 1720 | |
| 1721 | if (config_stats) { |
| 1722 | arena->stats.nmadvise += nmadvise; |
| 1723 | arena->stats.purged += npurged; |
| 1724 | } |
| 1725 | |
| 1726 | return (npurged); |
| 1727 | } |
| 1728 | |
| 1729 | static void |
| 1730 | arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, |
| 1731 | arena_runs_dirty_link_t *purge_runs_sentinel, |
| 1732 | extent_node_t *purge_chunks_sentinel) |
| 1733 | { |
| 1734 | arena_runs_dirty_link_t *rdelm, *rdelm_next; |
| 1735 | extent_node_t *chunkselm; |
| 1736 | |
| 1737 | /* Deallocate chunks/runs. */ |
| 1738 | for (rdelm = qr_next(purge_runs_sentinel, rd_link), |
| 1739 | chunkselm = qr_next(purge_chunks_sentinel, cc_link); |
| 1740 | rdelm != purge_runs_sentinel; rdelm = rdelm_next) { |
| 1741 | rdelm_next = qr_next(rdelm, rd_link); |
| 1742 | if (rdelm == &chunkselm->rd) { |
| 1743 | extent_node_t *chunkselm_next = qr_next(chunkselm, |
| 1744 | cc_link); |
| 1745 | void *addr = extent_node_addr_get(chunkselm); |
| 1746 | size_t size = extent_node_size_get(chunkselm); |
| 1747 | bool zeroed = extent_node_zeroed_get(chunkselm); |
| 1748 | bool committed = extent_node_committed_get(chunkselm); |
| 1749 | extent_node_dirty_remove(chunkselm); |
| 1750 | arena_node_dalloc(tsdn, arena, chunkselm); |
| 1751 | chunkselm = chunkselm_next; |
| 1752 | chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr, |
| 1753 | size, zeroed, committed); |
| 1754 | } else { |
| 1755 | arena_chunk_t *chunk = |
| 1756 | (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); |
| 1757 | arena_chunk_map_misc_t *miscelm = |
| 1758 | arena_rd_to_miscelm(rdelm); |
| 1759 | size_t pageind = arena_miscelm_to_pageind(miscelm); |
| 1760 | bool decommitted = (arena_mapbits_decommitted_get(chunk, |
| 1761 | pageind) != 0); |
| 1762 | arena_run_t *run = &miscelm->run; |
| 1763 | qr_remove(rdelm, rd_link); |
| 1764 | arena_run_dalloc(tsdn, arena, run, false, true, |
| 1765 | decommitted); |
| 1766 | } |
| 1767 | } |
| 1768 | } |
| 1769 | |
| 1770 | /* |
| 1771 | * NB: ndirty_limit is interpreted differently depending on opt_purge: |
| 1772 | * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the |
| 1773 | * desired state: |
| 1774 | * (arena->ndirty <= ndirty_limit) |
| 1775 | * - purge_mode_decay: Purge as many dirty runs/chunks as possible without |
| 1776 | * violating the invariant: |
| 1777 | * (arena->ndirty >= ndirty_limit) |
| 1778 | */ |
| 1779 | static void |
| 1780 | arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) |
| 1781 | { |
| 1782 | chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); |
| 1783 | size_t npurge, npurged; |
| 1784 | arena_runs_dirty_link_t purge_runs_sentinel; |
| 1785 | extent_node_t purge_chunks_sentinel; |
| 1786 | |
| 1787 | arena->purging = true; |
| 1788 | |
| 1789 | /* |
| 1790 | * Calls to arena_dirty_count() are disabled even for debug builds |
| 1791 | * because overhead grows nonlinearly as memory usage increases. |
| 1792 | */ |
| 1793 | if (false && config_debug) { |
| 1794 | size_t ndirty = arena_dirty_count(arena); |
| 1795 | assert(ndirty == arena->ndirty); |
| 1796 | } |
| 1797 | assert(opt_purge != purge_mode_ratio || (arena->nactive >> |
| 1798 | arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0); |
| 1799 | |
| 1800 | qr_new(&purge_runs_sentinel, rd_link); |
| 1801 | extent_node_dirty_linkage_init(&purge_chunks_sentinel); |
| 1802 | |
| 1803 | npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit, |
| 1804 | &purge_runs_sentinel, &purge_chunks_sentinel); |
| 1805 | if (npurge == 0) |
| 1806 | goto label_return; |
| 1807 | npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks, |
| 1808 | &purge_runs_sentinel, &purge_chunks_sentinel); |
| 1809 | assert(npurged == npurge); |
| 1810 | arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel, |
| 1811 | &purge_chunks_sentinel); |
| 1812 | |
| 1813 | if (config_stats) |
| 1814 | arena->stats.npurge++; |
| 1815 | |
| 1816 | label_return: |
| 1817 | arena->purging = false; |
| 1818 | } |
| 1819 | |
| 1820 | void |
| 1821 | arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) |
| 1822 | { |
| 1823 | |
| 1824 | malloc_mutex_lock(tsdn, &arena->lock); |
| 1825 | if (all) |
| 1826 | arena_purge_to_limit(tsdn, arena, 0); |
| 1827 | else |
| 1828 | arena_maybe_purge(tsdn, arena); |
| 1829 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 1830 | } |
| 1831 | |
| 1832 | static void |
| 1833 | arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) |
| 1834 | { |
| 1835 | size_t pageind, npages; |
| 1836 | |
| 1837 | cassert(config_prof); |
| 1838 | assert(opt_prof); |
| 1839 | |
| 1840 | /* |
| 1841 | * Iterate over the allocated runs and remove profiled allocations from |
| 1842 | * the sample set. |
| 1843 | */ |
| 1844 | for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { |
| 1845 | if (arena_mapbits_allocated_get(chunk, pageind) != 0) { |
| 1846 | if (arena_mapbits_large_get(chunk, pageind) != 0) { |
| 1847 | void *ptr = (void *)((uintptr_t)chunk + (pageind |
| 1848 | << LG_PAGE)); |
| 1849 | size_t usize = isalloc(tsd_tsdn(tsd), ptr, |
| 1850 | config_prof); |
| 1851 | |
| 1852 | prof_free(tsd, ptr, usize); |
| 1853 | npages = arena_mapbits_large_size_get(chunk, |
| 1854 | pageind) >> LG_PAGE; |
| 1855 | } else { |
| 1856 | /* Skip small run. */ |
| 1857 | size_t binind = arena_mapbits_binind_get(chunk, |
| 1858 | pageind); |
| 1859 | arena_bin_info_t *bin_info = |
| 1860 | &arena_bin_info[binind]; |
| 1861 | npages = bin_info->run_size >> LG_PAGE; |
| 1862 | } |
| 1863 | } else { |
| 1864 | /* Skip unallocated run. */ |
| 1865 | npages = arena_mapbits_unallocated_size_get(chunk, |
| 1866 | pageind) >> LG_PAGE; |
| 1867 | } |
| 1868 | assert(pageind + npages <= chunk_npages); |
| 1869 | } |
| 1870 | } |
| 1871 | |
| 1872 | void |
| 1873 | arena_reset(tsd_t *tsd, arena_t *arena) |
| 1874 | { |
| 1875 | unsigned i; |
| 1876 | extent_node_t *node; |
| 1877 | |
| 1878 | /* |
| 1879 | * Locking in this function is unintuitive. The caller guarantees that |
| 1880 | * no concurrent operations are happening in this arena, but there are |
| 1881 | * still reasons that some locking is necessary: |
| 1882 | * |
| 1883 | * - Some of the functions in the transitive closure of calls assume |
| 1884 | * appropriate locks are held, and in some cases these locks are |
| 1885 | * temporarily dropped to avoid lock order reversal or deadlock due to |
| 1886 | * reentry. |
| 1887 | * - mallctl("epoch", ...) may concurrently refresh stats. While |
| 1888 | * strictly speaking this is a "concurrent operation", disallowing |
| 1889 | * stats refreshes would impose an inconvenient burden. |
| 1890 | */ |
| 1891 | |
| 1892 | /* Remove large allocations from prof sample set. */ |
| 1893 | if (config_prof && opt_prof) { |
| 1894 | ql_foreach(node, &arena->achunks, ql_link) { |
| 1895 | arena_achunk_prof_reset(tsd, arena, |
| 1896 | extent_node_addr_get(node)); |
| 1897 | } |
| 1898 | } |
| 1899 | |
| 1900 | /* Reset curruns for large size classes. */ |
| 1901 | if (config_stats) { |
| 1902 | for (i = 0; i < nlclasses; i++) |
| 1903 | arena->stats.lstats[i].curruns = 0; |
| 1904 | } |
| 1905 | |
| 1906 | /* Huge allocations. */ |
| 1907 | malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); |
| 1908 | for (node = ql_last(&arena->huge, ql_link); node != NULL; node = |
| 1909 | ql_last(&arena->huge, ql_link)) { |
| 1910 | void *ptr = extent_node_addr_get(node); |
| 1911 | size_t usize; |
| 1912 | |
| 1913 | malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); |
| 1914 | if (config_stats || (config_prof && opt_prof)) |
| 1915 | usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); |
| 1916 | /* Remove huge allocation from prof sample set. */ |
| 1917 | if (config_prof && opt_prof) |
| 1918 | prof_free(tsd, ptr, usize); |
| 1919 | huge_dalloc(tsd_tsdn(tsd), ptr); |
| 1920 | malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); |
| 1921 | /* Cancel out unwanted effects on stats. */ |
| 1922 | if (config_stats) |
| 1923 | arena_huge_reset_stats_cancel(arena, usize); |
| 1924 | } |
| 1925 | malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); |
| 1926 | |
| 1927 | malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); |
| 1928 | |
| 1929 | /* Bins. */ |
| 1930 | for (i = 0; i < NBINS; i++) { |
| 1931 | arena_bin_t *bin = &arena->bins[i]; |
| 1932 | malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); |
| 1933 | bin->runcur = NULL; |
| 1934 | arena_run_heap_new(&bin->runs); |
| 1935 | if (config_stats) { |
| 1936 | bin->stats.curregs = 0; |
| 1937 | bin->stats.curruns = 0; |
| 1938 | } |
| 1939 | malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); |
| 1940 | } |
| 1941 | |
| 1942 | /* |
| 1943 | * Re-initialize runs_dirty such that the chunks_cache and runs_dirty |
| 1944 | * chains directly correspond. |
| 1945 | */ |
| 1946 | qr_new(&arena->runs_dirty, rd_link); |
| 1947 | for (node = qr_next(&arena->chunks_cache, cc_link); |
| 1948 | node != &arena->chunks_cache; node = qr_next(node, cc_link)) { |
| 1949 | qr_new(&node->rd, rd_link); |
| 1950 | qr_meld(&arena->runs_dirty, &node->rd, rd_link); |
| 1951 | } |
| 1952 | |
| 1953 | /* Arena chunks. */ |
| 1954 | for (node = ql_last(&arena->achunks, ql_link); node != NULL; node = |
| 1955 | ql_last(&arena->achunks, ql_link)) { |
| 1956 | ql_remove(&arena->achunks, node, ql_link); |
| 1957 | arena_chunk_discard(tsd_tsdn(tsd), arena, |
| 1958 | extent_node_addr_get(node)); |
| 1959 | } |
| 1960 | |
| 1961 | /* Spare. */ |
| 1962 | if (arena->spare != NULL) { |
| 1963 | arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare); |
| 1964 | arena->spare = NULL; |
| 1965 | } |
| 1966 | |
| 1967 | assert(!arena->purging); |
| 1968 | arena->nactive = 0; |
| 1969 | |
| 1970 | for(i = 0; i < runs_avail_nclasses; i++) |
| 1971 | arena_run_heap_new(&arena->runs_avail[i]); |
| 1972 | |
| 1973 | malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); |
| 1974 | } |
| 1975 | |
| 1976 | static void |
| 1977 | arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, |
| 1978 | size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, |
| 1979 | size_t flag_decommitted) |
| 1980 | { |
| 1981 | size_t size = *p_size; |
| 1982 | size_t run_ind = *p_run_ind; |
| 1983 | size_t run_pages = *p_run_pages; |
| 1984 | |
| 1985 | /* Try to coalesce forward. */ |
| 1986 | if (run_ind + run_pages < chunk_npages && |
| 1987 | arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && |
| 1988 | arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty && |
| 1989 | arena_mapbits_decommitted_get(chunk, run_ind+run_pages) == |
| 1990 | flag_decommitted) { |
| 1991 | size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, |
| 1992 | run_ind+run_pages); |
| 1993 | size_t nrun_pages = nrun_size >> LG_PAGE; |
| 1994 | |
| 1995 | /* |
| 1996 | * Remove successor from runs_avail; the coalesced run is |
| 1997 | * inserted later. |
| 1998 | */ |
| 1999 | assert(arena_mapbits_unallocated_size_get(chunk, |
| 2000 | run_ind+run_pages+nrun_pages-1) == nrun_size); |
| 2001 | assert(arena_mapbits_dirty_get(chunk, |
| 2002 | run_ind+run_pages+nrun_pages-1) == flag_dirty); |
| 2003 | assert(arena_mapbits_decommitted_get(chunk, |
| 2004 | run_ind+run_pages+nrun_pages-1) == flag_decommitted); |
| 2005 | arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); |
| 2006 | |
| 2007 | /* |
| 2008 | * If the successor is dirty, remove it from the set of dirty |
| 2009 | * pages. |
| 2010 | */ |
| 2011 | if (flag_dirty != 0) { |
| 2012 | arena_run_dirty_remove(arena, chunk, run_ind+run_pages, |
| 2013 | nrun_pages); |
| 2014 | } |
| 2015 | |
| 2016 | size += nrun_size; |
| 2017 | run_pages += nrun_pages; |
| 2018 | |
| 2019 | arena_mapbits_unallocated_size_set(chunk, run_ind, size); |
| 2020 | arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, |
| 2021 | size); |
| 2022 | } |
| 2023 | |
| 2024 | /* Try to coalesce backward. */ |
| 2025 | if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, |
| 2026 | run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == |
| 2027 | flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) == |
| 2028 | flag_decommitted) { |
| 2029 | size_t prun_size = arena_mapbits_unallocated_size_get(chunk, |
| 2030 | run_ind-1); |
| 2031 | size_t prun_pages = prun_size >> LG_PAGE; |
| 2032 | |
| 2033 | run_ind -= prun_pages; |
| 2034 | |
| 2035 | /* |
| 2036 | * Remove predecessor from runs_avail; the coalesced run is |
| 2037 | * inserted later. |
| 2038 | */ |
| 2039 | assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == |
| 2040 | prun_size); |
| 2041 | assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); |
| 2042 | assert(arena_mapbits_decommitted_get(chunk, run_ind) == |
| 2043 | flag_decommitted); |
| 2044 | arena_avail_remove(arena, chunk, run_ind, prun_pages); |
| 2045 | |
| 2046 | /* |
| 2047 | * If the predecessor is dirty, remove it from the set of dirty |
| 2048 | * pages. |
| 2049 | */ |
| 2050 | if (flag_dirty != 0) { |
| 2051 | arena_run_dirty_remove(arena, chunk, run_ind, |
| 2052 | prun_pages); |
| 2053 | } |
| 2054 | |
| 2055 | size += prun_size; |
| 2056 | run_pages += prun_pages; |
| 2057 | |
| 2058 | arena_mapbits_unallocated_size_set(chunk, run_ind, size); |
| 2059 | arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, |
| 2060 | size); |
| 2061 | } |
| 2062 | |
| 2063 | *p_size = size; |
| 2064 | *p_run_ind = run_ind; |
| 2065 | *p_run_pages = run_pages; |
| 2066 | } |
| 2067 | |
| 2068 | static size_t |
| 2069 | arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, |
| 2070 | size_t run_ind) |
| 2071 | { |
| 2072 | size_t size; |
| 2073 | |
| 2074 | assert(run_ind >= map_bias); |
| 2075 | assert(run_ind < chunk_npages); |
| 2076 | |
| 2077 | if (arena_mapbits_large_get(chunk, run_ind) != 0) { |
| 2078 | size = arena_mapbits_large_size_get(chunk, run_ind); |
| 2079 | assert(size == PAGE || arena_mapbits_large_size_get(chunk, |
| 2080 | run_ind+(size>>LG_PAGE)-1) == 0); |
| 2081 | } else { |
| 2082 | arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; |
| 2083 | size = bin_info->run_size; |
| 2084 | } |
| 2085 | |
| 2086 | return (size); |
| 2087 | } |
| 2088 | |
| 2089 | static void |
| 2090 | arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty, |
| 2091 | bool cleaned, bool decommitted) |
| 2092 | { |
| 2093 | arena_chunk_t *chunk; |
| 2094 | arena_chunk_map_misc_t *miscelm; |
| 2095 | size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; |
| 2096 | |
| 2097 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); |
| 2098 | miscelm = arena_run_to_miscelm(run); |
| 2099 | run_ind = arena_miscelm_to_pageind(miscelm); |
| 2100 | assert(run_ind >= map_bias); |
| 2101 | assert(run_ind < chunk_npages); |
| 2102 | size = arena_run_size_get(arena, chunk, run, run_ind); |
| 2103 | run_pages = (size >> LG_PAGE); |
| 2104 | arena_nactive_sub(arena, run_pages); |
| 2105 | |
| 2106 | /* |
| 2107 | * The run is dirty if the caller claims to have dirtied it, as well as |
| 2108 | * if it was already dirty before being allocated and the caller |
| 2109 | * doesn't claim to have cleaned it. |
| 2110 | */ |
| 2111 | assert(arena_mapbits_dirty_get(chunk, run_ind) == |
| 2112 | arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); |
| 2113 | if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind) |
| 2114 | != 0) |
| 2115 | dirty = true; |
| 2116 | flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; |
| 2117 | flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0; |
| 2118 | |
| 2119 | /* Mark pages as unallocated in the chunk map. */ |
| 2120 | if (dirty || decommitted) { |
| 2121 | size_t flags = flag_dirty | flag_decommitted; |
| 2122 | arena_mapbits_unallocated_set(chunk, run_ind, size, flags); |
| 2123 | arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, |
| 2124 | flags); |
| 2125 | } else { |
| 2126 | arena_mapbits_unallocated_set(chunk, run_ind, size, |
| 2127 | arena_mapbits_unzeroed_get(chunk, run_ind)); |
| 2128 | arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, |
| 2129 | arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); |
| 2130 | } |
| 2131 | |
| 2132 | arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, |
| 2133 | flag_dirty, flag_decommitted); |
| 2134 | |
| 2135 | /* Insert into runs_avail, now that coalescing is complete. */ |
| 2136 | assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == |
| 2137 | arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); |
| 2138 | assert(arena_mapbits_dirty_get(chunk, run_ind) == |
| 2139 | arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); |
| 2140 | assert(arena_mapbits_decommitted_get(chunk, run_ind) == |
| 2141 | arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1)); |
| 2142 | arena_avail_insert(arena, chunk, run_ind, run_pages); |
| 2143 | |
| 2144 | if (dirty) |
| 2145 | arena_run_dirty_insert(arena, chunk, run_ind, run_pages); |
| 2146 | |
| 2147 | /* Deallocate chunk if it is now completely unused. */ |
| 2148 | if (size == arena_maxrun) { |
| 2149 | assert(run_ind == map_bias); |
| 2150 | assert(run_pages == (arena_maxrun >> LG_PAGE)); |
| 2151 | arena_chunk_dalloc(tsdn, arena, chunk); |
| 2152 | } |
| 2153 | |
| 2154 | /* |
| 2155 | * It is okay to do dirty page processing here even if the chunk was |
| 2156 | * deallocated above, since in that case it is the spare. Waiting |
| 2157 | * until after possible chunk deallocation to do dirty processing |
| 2158 | * allows for an old spare to be fully deallocated, thus decreasing the |
| 2159 | * chances of spuriously crossing the dirty page purging threshold. |
| 2160 | */ |
| 2161 | if (dirty) |
| 2162 | arena_maybe_purge(tsdn, arena); |
| 2163 | } |
| 2164 | |
| 2165 | static void |
| 2166 | arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, |
| 2167 | arena_run_t *run, size_t oldsize, size_t newsize) |
| 2168 | { |
| 2169 | arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); |
| 2170 | size_t pageind = arena_miscelm_to_pageind(miscelm); |
| 2171 | size_t head_npages = (oldsize - newsize) >> LG_PAGE; |
| 2172 | size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); |
| 2173 | size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); |
| 2174 | size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? |
| 2175 | CHUNK_MAP_UNZEROED : 0; |
| 2176 | |
| 2177 | assert(oldsize > newsize); |
| 2178 | |
| 2179 | /* |
| 2180 | * Update the chunk map so that arena_run_dalloc() can treat the |
| 2181 | * leading run as separately allocated. Set the last element of each |
| 2182 | * run first, in case of single-page runs. |
| 2183 | */ |
| 2184 | assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); |
| 2185 | arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | |
| 2186 | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, |
| 2187 | pageind+head_npages-1))); |
| 2188 | arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty | |
| 2189 | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); |
| 2190 | |
| 2191 | if (config_debug) { |
| 2192 | UNUSED size_t tail_npages = newsize >> LG_PAGE; |
| 2193 | assert(arena_mapbits_large_size_get(chunk, |
| 2194 | pageind+head_npages+tail_npages-1) == 0); |
| 2195 | assert(arena_mapbits_dirty_get(chunk, |
| 2196 | pageind+head_npages+tail_npages-1) == flag_dirty); |
| 2197 | } |
| 2198 | arena_mapbits_large_set(chunk, pageind+head_npages, newsize, |
| 2199 | flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, |
| 2200 | pageind+head_npages))); |
| 2201 | |
| 2202 | arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted != |
| 2203 | 0)); |
| 2204 | } |
| 2205 | |
| 2206 | static void |
| 2207 | arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, |
| 2208 | arena_run_t *run, size_t oldsize, size_t newsize, bool dirty) |
| 2209 | { |
| 2210 | arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); |
| 2211 | size_t pageind = arena_miscelm_to_pageind(miscelm); |
| 2212 | size_t head_npages = newsize >> LG_PAGE; |
| 2213 | size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); |
| 2214 | size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); |
| 2215 | size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? |
| 2216 | CHUNK_MAP_UNZEROED : 0; |
| 2217 | arena_chunk_map_misc_t *tail_miscelm; |
| 2218 | arena_run_t *tail_run; |
| 2219 | |
| 2220 | assert(oldsize > newsize); |
| 2221 | |
| 2222 | /* |
| 2223 | * Update the chunk map so that arena_run_dalloc() can treat the |
| 2224 | * trailing run as separately allocated. Set the last element of each |
| 2225 | * run first, in case of single-page runs. |
| 2226 | */ |
| 2227 | assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); |
| 2228 | arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | |
| 2229 | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, |
| 2230 | pageind+head_npages-1))); |
| 2231 | arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | |
| 2232 | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); |
| 2233 | |
| 2234 | if (config_debug) { |
| 2235 | UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; |
| 2236 | assert(arena_mapbits_large_size_get(chunk, |
| 2237 | pageind+head_npages+tail_npages-1) == 0); |
| 2238 | assert(arena_mapbits_dirty_get(chunk, |
| 2239 | pageind+head_npages+tail_npages-1) == flag_dirty); |
| 2240 | } |
| 2241 | arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, |
| 2242 | flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, |
| 2243 | pageind+head_npages))); |
| 2244 | |
| 2245 | tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages); |
| 2246 | tail_run = &tail_miscelm->run; |
| 2247 | arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted |
| 2248 | != 0)); |
| 2249 | } |
| 2250 | |
| 2251 | static void |
| 2252 | arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) |
| 2253 | { |
| 2254 | arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); |
| 2255 | |
| 2256 | arena_run_heap_insert(&bin->runs, miscelm); |
| 2257 | } |
| 2258 | |
| 2259 | static arena_run_t * |
| 2260 | arena_bin_nonfull_run_tryget(arena_bin_t *bin) |
| 2261 | { |
| 2262 | arena_chunk_map_misc_t *miscelm; |
| 2263 | |
| 2264 | miscelm = arena_run_heap_remove_first(&bin->runs); |
| 2265 | if (miscelm == NULL) |
| 2266 | return (NULL); |
| 2267 | if (config_stats) |
| 2268 | bin->stats.reruns++; |
| 2269 | |
| 2270 | return (&miscelm->run); |
| 2271 | } |
| 2272 | |
| 2273 | static arena_run_t * |
| 2274 | arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) |
| 2275 | { |
| 2276 | arena_run_t *run; |
| 2277 | szind_t binind; |
| 2278 | arena_bin_info_t *bin_info; |
| 2279 | |
| 2280 | /* Look for a usable run. */ |
| 2281 | run = arena_bin_nonfull_run_tryget(bin); |
| 2282 | if (run != NULL) |
| 2283 | return (run); |
| 2284 | /* No existing runs have any space available. */ |
| 2285 | |
| 2286 | binind = arena_bin_index(arena, bin); |
| 2287 | bin_info = &arena_bin_info[binind]; |
| 2288 | |
| 2289 | /* Allocate a new run. */ |
| 2290 | malloc_mutex_unlock(tsdn, &bin->lock); |
| 2291 | /******************************/ |
| 2292 | malloc_mutex_lock(tsdn, &arena->lock); |
| 2293 | run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind); |
| 2294 | if (run != NULL) { |
| 2295 | /* Initialize run internals. */ |
| 2296 | run->binind = binind; |
| 2297 | run->nfree = bin_info->nregs; |
| 2298 | bitmap_init(run->bitmap, &bin_info->bitmap_info); |
| 2299 | } |
| 2300 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 2301 | /********************************/ |
| 2302 | malloc_mutex_lock(tsdn, &bin->lock); |
| 2303 | if (run != NULL) { |
| 2304 | if (config_stats) { |
| 2305 | bin->stats.nruns++; |
| 2306 | bin->stats.curruns++; |
| 2307 | } |
| 2308 | return (run); |
| 2309 | } |
| 2310 | |
| 2311 | /* |
| 2312 | * arena_run_alloc_small() failed, but another thread may have made |
| 2313 | * sufficient memory available while this one dropped bin->lock above, |
| 2314 | * so search one more time. |
| 2315 | */ |
| 2316 | run = arena_bin_nonfull_run_tryget(bin); |
| 2317 | if (run != NULL) |
| 2318 | return (run); |
| 2319 | |
| 2320 | return (NULL); |
| 2321 | } |
| 2322 | |
| 2323 | /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ |
| 2324 | static void * |
| 2325 | arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) |
| 2326 | { |
| 2327 | szind_t binind; |
| 2328 | arena_bin_info_t *bin_info; |
| 2329 | arena_run_t *run; |
| 2330 | |
| 2331 | binind = arena_bin_index(arena, bin); |
| 2332 | bin_info = &arena_bin_info[binind]; |
| 2333 | bin->runcur = NULL; |
| 2334 | run = arena_bin_nonfull_run_get(tsdn, arena, bin); |
| 2335 | if (bin->runcur != NULL && bin->runcur->nfree > 0) { |
| 2336 | /* |
| 2337 | * Another thread updated runcur while this one ran without the |
| 2338 | * bin lock in arena_bin_nonfull_run_get(). |
| 2339 | */ |
| 2340 | void *ret; |
| 2341 | assert(bin->runcur->nfree > 0); |
| 2342 | ret = arena_run_reg_alloc(bin->runcur, bin_info); |
| 2343 | if (run != NULL) { |
| 2344 | arena_chunk_t *chunk; |
| 2345 | |
| 2346 | /* |
| 2347 | * arena_run_alloc_small() may have allocated run, or |
| 2348 | * it may have pulled run from the bin's run tree. |
| 2349 | * Therefore it is unsafe to make any assumptions about |
| 2350 | * how run has previously been used, and |
| 2351 | * arena_bin_lower_run() must be called, as if a region |
| 2352 | * were just deallocated from the run. |
| 2353 | */ |
| 2354 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); |
| 2355 | if (run->nfree == bin_info->nregs) { |
| 2356 | arena_dalloc_bin_run(tsdn, arena, chunk, run, |
| 2357 | bin); |
| 2358 | } else |
| 2359 | arena_bin_lower_run(arena, chunk, run, bin); |
| 2360 | } |
| 2361 | return (ret); |
| 2362 | } |
| 2363 | |
| 2364 | if (run == NULL) |
| 2365 | return (NULL); |
| 2366 | |
| 2367 | bin->runcur = run; |
| 2368 | |
| 2369 | assert(bin->runcur->nfree > 0); |
| 2370 | |
| 2371 | return (arena_run_reg_alloc(bin->runcur, bin_info)); |
| 2372 | } |
| 2373 | |
| 2374 | void |
| 2375 | arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, |
| 2376 | szind_t binind, uint64_t prof_accumbytes) |
| 2377 | { |
| 2378 | unsigned i, nfill; |
| 2379 | arena_bin_t *bin; |
| 2380 | |
| 2381 | assert(tbin->ncached == 0); |
| 2382 | |
| 2383 | if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) |
| 2384 | prof_idump(tsdn); |
| 2385 | bin = &arena->bins[binind]; |
| 2386 | malloc_mutex_lock(tsdn, &bin->lock); |
| 2387 | for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> |
| 2388 | tbin->lg_fill_div); i < nfill; i++) { |
| 2389 | arena_run_t *run; |
| 2390 | void *ptr; |
| 2391 | if ((run = bin->runcur) != NULL && run->nfree > 0) |
| 2392 | ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); |
| 2393 | else |
| 2394 | ptr = arena_bin_malloc_hard(tsdn, arena, bin); |
| 2395 | if (ptr == NULL) { |
| 2396 | /* |
| 2397 | * OOM. tbin->avail isn't yet filled down to its first |
| 2398 | * element, so the successful allocations (if any) must |
| 2399 | * be moved just before tbin->avail before bailing out. |
| 2400 | */ |
| 2401 | if (i > 0) { |
| 2402 | memmove(tbin->avail - i, tbin->avail - nfill, |
| 2403 | i * sizeof(void *)); |
| 2404 | } |
| 2405 | break; |
| 2406 | } |
| 2407 | if (config_fill && unlikely(opt_junk_alloc)) { |
| 2408 | arena_alloc_junk_small(ptr, &arena_bin_info[binind], |
| 2409 | true); |
| 2410 | } |
| 2411 | /* Insert such that low regions get used first. */ |
| 2412 | *(tbin->avail - nfill + i) = ptr; |
| 2413 | } |
| 2414 | if (config_stats) { |
| 2415 | bin->stats.nmalloc += i; |
| 2416 | bin->stats.nrequests += tbin->tstats.nrequests; |
| 2417 | bin->stats.curregs += i; |
| 2418 | bin->stats.nfills++; |
| 2419 | tbin->tstats.nrequests = 0; |
| 2420 | } |
| 2421 | malloc_mutex_unlock(tsdn, &bin->lock); |
| 2422 | tbin->ncached = i; |
| 2423 | arena_decay_tick(tsdn, arena); |
| 2424 | } |
| 2425 | |
| 2426 | void |
| 2427 | arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) |
| 2428 | { |
| 2429 | |
| 2430 | size_t redzone_size = bin_info->redzone_size; |
| 2431 | |
| 2432 | if (zero) { |
| 2433 | memset((void *)((uintptr_t)ptr - redzone_size), |
| 2434 | JEMALLOC_ALLOC_JUNK, redzone_size); |
| 2435 | memset((void *)((uintptr_t)ptr + bin_info->reg_size), |
| 2436 | JEMALLOC_ALLOC_JUNK, redzone_size); |
| 2437 | } else { |
| 2438 | memset((void *)((uintptr_t)ptr - redzone_size), |
| 2439 | JEMALLOC_ALLOC_JUNK, bin_info->reg_interval); |
| 2440 | } |
| 2441 | } |
| 2442 | |
| 2443 | #ifdef JEMALLOC_JET |
| 2444 | #undef arena_redzone_corruption |
| 2445 | #define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption) |
| 2446 | #endif |
| 2447 | static void |
| 2448 | arena_redzone_corruption(void *ptr, size_t usize, bool after, |
| 2449 | size_t offset, uint8_t byte) |
| 2450 | { |
| 2451 | |
| 2452 | malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " |
| 2453 | "(size %zu), byte=%#x\n" , offset, (offset == 1) ? "" : "s" , |
| 2454 | after ? "after" : "before" , ptr, usize, byte); |
| 2455 | } |
| 2456 | #ifdef JEMALLOC_JET |
| 2457 | #undef arena_redzone_corruption |
| 2458 | #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) |
| 2459 | arena_redzone_corruption_t *arena_redzone_corruption = |
| 2460 | JEMALLOC_N(n_arena_redzone_corruption); |
| 2461 | #endif |
| 2462 | |
| 2463 | static void |
| 2464 | arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) |
| 2465 | { |
| 2466 | bool error = false; |
| 2467 | |
| 2468 | if (opt_junk_alloc) { |
| 2469 | size_t size = bin_info->reg_size; |
| 2470 | size_t redzone_size = bin_info->redzone_size; |
| 2471 | size_t i; |
| 2472 | |
| 2473 | for (i = 1; i <= redzone_size; i++) { |
| 2474 | uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); |
| 2475 | if (*byte != JEMALLOC_ALLOC_JUNK) { |
| 2476 | error = true; |
| 2477 | arena_redzone_corruption(ptr, size, false, i, |
| 2478 | *byte); |
| 2479 | if (reset) |
| 2480 | *byte = JEMALLOC_ALLOC_JUNK; |
| 2481 | } |
| 2482 | } |
| 2483 | for (i = 0; i < redzone_size; i++) { |
| 2484 | uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); |
| 2485 | if (*byte != JEMALLOC_ALLOC_JUNK) { |
| 2486 | error = true; |
| 2487 | arena_redzone_corruption(ptr, size, true, i, |
| 2488 | *byte); |
| 2489 | if (reset) |
| 2490 | *byte = JEMALLOC_ALLOC_JUNK; |
| 2491 | } |
| 2492 | } |
| 2493 | } |
| 2494 | |
| 2495 | if (opt_abort && error) |
| 2496 | abort(); |
| 2497 | } |
| 2498 | |
| 2499 | #ifdef JEMALLOC_JET |
| 2500 | #undef arena_dalloc_junk_small |
| 2501 | #define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small) |
| 2502 | #endif |
| 2503 | void |
| 2504 | arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) |
| 2505 | { |
| 2506 | size_t redzone_size = bin_info->redzone_size; |
| 2507 | |
| 2508 | arena_redzones_validate(ptr, bin_info, false); |
| 2509 | memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK, |
| 2510 | bin_info->reg_interval); |
| 2511 | } |
| 2512 | #ifdef JEMALLOC_JET |
| 2513 | #undef arena_dalloc_junk_small |
| 2514 | #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) |
| 2515 | arena_dalloc_junk_small_t *arena_dalloc_junk_small = |
| 2516 | JEMALLOC_N(n_arena_dalloc_junk_small); |
| 2517 | #endif |
| 2518 | |
| 2519 | void |
| 2520 | arena_quarantine_junk_small(void *ptr, size_t usize) |
| 2521 | { |
| 2522 | szind_t binind; |
| 2523 | arena_bin_info_t *bin_info; |
| 2524 | cassert(config_fill); |
| 2525 | assert(opt_junk_free); |
| 2526 | assert(opt_quarantine); |
| 2527 | assert(usize <= SMALL_MAXCLASS); |
| 2528 | |
| 2529 | binind = size2index(usize); |
| 2530 | bin_info = &arena_bin_info[binind]; |
| 2531 | arena_redzones_validate(ptr, bin_info, true); |
| 2532 | } |
| 2533 | |
| 2534 | static void * |
| 2535 | arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) |
| 2536 | { |
| 2537 | void *ret; |
| 2538 | arena_bin_t *bin; |
| 2539 | size_t usize; |
| 2540 | arena_run_t *run; |
| 2541 | |
| 2542 | assert(binind < NBINS); |
| 2543 | bin = &arena->bins[binind]; |
| 2544 | usize = index2size(binind); |
| 2545 | |
| 2546 | malloc_mutex_lock(tsdn, &bin->lock); |
| 2547 | if ((run = bin->runcur) != NULL && run->nfree > 0) |
| 2548 | ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); |
| 2549 | else |
| 2550 | ret = arena_bin_malloc_hard(tsdn, arena, bin); |
| 2551 | |
| 2552 | if (ret == NULL) { |
| 2553 | malloc_mutex_unlock(tsdn, &bin->lock); |
| 2554 | return (NULL); |
| 2555 | } |
| 2556 | |
| 2557 | if (config_stats) { |
| 2558 | bin->stats.nmalloc++; |
| 2559 | bin->stats.nrequests++; |
| 2560 | bin->stats.curregs++; |
| 2561 | } |
| 2562 | malloc_mutex_unlock(tsdn, &bin->lock); |
| 2563 | if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize)) |
| 2564 | prof_idump(tsdn); |
| 2565 | |
| 2566 | if (!zero) { |
| 2567 | if (config_fill) { |
| 2568 | if (unlikely(opt_junk_alloc)) { |
| 2569 | arena_alloc_junk_small(ret, |
| 2570 | &arena_bin_info[binind], false); |
| 2571 | } else if (unlikely(opt_zero)) |
| 2572 | memset(ret, 0, usize); |
| 2573 | } |
| 2574 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); |
| 2575 | } else { |
| 2576 | if (config_fill && unlikely(opt_junk_alloc)) { |
| 2577 | arena_alloc_junk_small(ret, &arena_bin_info[binind], |
| 2578 | true); |
| 2579 | } |
| 2580 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); |
| 2581 | memset(ret, 0, usize); |
| 2582 | } |
| 2583 | |
| 2584 | arena_decay_tick(tsdn, arena); |
| 2585 | return (ret); |
| 2586 | } |
| 2587 | |
| 2588 | void * |
| 2589 | arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) |
| 2590 | { |
| 2591 | void *ret; |
| 2592 | size_t usize; |
| 2593 | uintptr_t random_offset; |
| 2594 | arena_run_t *run; |
| 2595 | arena_chunk_map_misc_t *miscelm; |
| 2596 | UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); |
| 2597 | |
| 2598 | /* Large allocation. */ |
| 2599 | usize = index2size(binind); |
| 2600 | malloc_mutex_lock(tsdn, &arena->lock); |
| 2601 | if (config_cache_oblivious) { |
| 2602 | uint64_t r; |
| 2603 | |
| 2604 | /* |
| 2605 | * Compute a uniformly distributed offset within the first page |
| 2606 | * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 |
| 2607 | * for 4 KiB pages and 64-byte cachelines. |
| 2608 | */ |
| 2609 | r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE); |
| 2610 | random_offset = ((uintptr_t)r) << LG_CACHELINE; |
| 2611 | } else |
| 2612 | random_offset = 0; |
| 2613 | run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero); |
| 2614 | if (run == NULL) { |
| 2615 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 2616 | return (NULL); |
| 2617 | } |
| 2618 | miscelm = arena_run_to_miscelm(run); |
| 2619 | ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + |
| 2620 | random_offset); |
| 2621 | if (config_stats) { |
| 2622 | szind_t index = binind - NBINS; |
| 2623 | |
| 2624 | arena->stats.nmalloc_large++; |
| 2625 | arena->stats.nrequests_large++; |
| 2626 | arena->stats.allocated_large += usize; |
| 2627 | arena->stats.lstats[index].nmalloc++; |
| 2628 | arena->stats.lstats[index].nrequests++; |
| 2629 | arena->stats.lstats[index].curruns++; |
| 2630 | } |
| 2631 | if (config_prof) |
| 2632 | idump = arena_prof_accum_locked(arena, usize); |
| 2633 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 2634 | if (config_prof && idump) |
| 2635 | prof_idump(tsdn); |
| 2636 | |
| 2637 | if (!zero) { |
| 2638 | if (config_fill) { |
| 2639 | if (unlikely(opt_junk_alloc)) |
| 2640 | memset(ret, JEMALLOC_ALLOC_JUNK, usize); |
| 2641 | else if (unlikely(opt_zero)) |
| 2642 | memset(ret, 0, usize); |
| 2643 | } |
| 2644 | } |
| 2645 | |
| 2646 | arena_decay_tick(tsdn, arena); |
| 2647 | return (ret); |
| 2648 | } |
| 2649 | |
| 2650 | void * |
| 2651 | arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, |
| 2652 | bool zero) |
| 2653 | { |
| 2654 | |
| 2655 | assert(!tsdn_null(tsdn) || arena != NULL); |
| 2656 | |
| 2657 | if (likely(!tsdn_null(tsdn))) |
| 2658 | arena = arena_choose(tsdn_tsd(tsdn), arena); |
| 2659 | if (unlikely(arena == NULL)) |
| 2660 | return (NULL); |
| 2661 | |
| 2662 | if (likely(size <= SMALL_MAXCLASS)) |
| 2663 | return (arena_malloc_small(tsdn, arena, ind, zero)); |
| 2664 | if (likely(size <= large_maxclass)) |
| 2665 | return (arena_malloc_large(tsdn, arena, ind, zero)); |
| 2666 | return (huge_malloc(tsdn, arena, index2size(ind), zero)); |
| 2667 | } |
| 2668 | |
| 2669 | /* Only handles large allocations that require more than page alignment. */ |
| 2670 | static void * |
| 2671 | arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, |
| 2672 | bool zero) |
| 2673 | { |
| 2674 | void *ret; |
| 2675 | size_t alloc_size, leadsize, trailsize; |
| 2676 | arena_run_t *run; |
| 2677 | arena_chunk_t *chunk; |
| 2678 | arena_chunk_map_misc_t *miscelm; |
| 2679 | void *rpages; |
| 2680 | |
| 2681 | assert(!tsdn_null(tsdn) || arena != NULL); |
| 2682 | assert(usize == PAGE_CEILING(usize)); |
| 2683 | |
| 2684 | if (likely(!tsdn_null(tsdn))) |
| 2685 | arena = arena_choose(tsdn_tsd(tsdn), arena); |
| 2686 | if (unlikely(arena == NULL)) |
| 2687 | return (NULL); |
| 2688 | |
| 2689 | alignment = PAGE_CEILING(alignment); |
| 2690 | alloc_size = usize + large_pad + alignment - PAGE; |
| 2691 | |
| 2692 | malloc_mutex_lock(tsdn, &arena->lock); |
| 2693 | run = arena_run_alloc_large(tsdn, arena, alloc_size, false); |
| 2694 | if (run == NULL) { |
| 2695 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 2696 | return (NULL); |
| 2697 | } |
| 2698 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); |
| 2699 | miscelm = arena_run_to_miscelm(run); |
| 2700 | rpages = arena_miscelm_to_rpages(miscelm); |
| 2701 | |
| 2702 | leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - |
| 2703 | (uintptr_t)rpages; |
| 2704 | assert(alloc_size >= leadsize + usize); |
| 2705 | trailsize = alloc_size - leadsize - usize - large_pad; |
| 2706 | if (leadsize != 0) { |
| 2707 | arena_chunk_map_misc_t *head_miscelm = miscelm; |
| 2708 | arena_run_t *head_run = run; |
| 2709 | |
| 2710 | miscelm = arena_miscelm_get_mutable(chunk, |
| 2711 | arena_miscelm_to_pageind(head_miscelm) + (leadsize >> |
| 2712 | LG_PAGE)); |
| 2713 | run = &miscelm->run; |
| 2714 | |
| 2715 | arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size, |
| 2716 | alloc_size - leadsize); |
| 2717 | } |
| 2718 | if (trailsize != 0) { |
| 2719 | arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad + |
| 2720 | trailsize, usize + large_pad, false); |
| 2721 | } |
| 2722 | if (arena_run_init_large(arena, run, usize + large_pad, zero)) { |
| 2723 | size_t run_ind = |
| 2724 | arena_miscelm_to_pageind(arena_run_to_miscelm(run)); |
| 2725 | bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0); |
| 2726 | bool decommitted = (arena_mapbits_decommitted_get(chunk, |
| 2727 | run_ind) != 0); |
| 2728 | |
| 2729 | assert(decommitted); /* Cause of OOM. */ |
| 2730 | arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted); |
| 2731 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 2732 | return (NULL); |
| 2733 | } |
| 2734 | ret = arena_miscelm_to_rpages(miscelm); |
| 2735 | |
| 2736 | if (config_stats) { |
| 2737 | szind_t index = size2index(usize) - NBINS; |
| 2738 | |
| 2739 | arena->stats.nmalloc_large++; |
| 2740 | arena->stats.nrequests_large++; |
| 2741 | arena->stats.allocated_large += usize; |
| 2742 | arena->stats.lstats[index].nmalloc++; |
| 2743 | arena->stats.lstats[index].nrequests++; |
| 2744 | arena->stats.lstats[index].curruns++; |
| 2745 | } |
| 2746 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 2747 | |
| 2748 | if (config_fill && !zero) { |
| 2749 | if (unlikely(opt_junk_alloc)) |
| 2750 | memset(ret, JEMALLOC_ALLOC_JUNK, usize); |
| 2751 | else if (unlikely(opt_zero)) |
| 2752 | memset(ret, 0, usize); |
| 2753 | } |
| 2754 | arena_decay_tick(tsdn, arena); |
| 2755 | return (ret); |
| 2756 | } |
| 2757 | |
| 2758 | void * |
| 2759 | arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, |
| 2760 | bool zero, tcache_t *tcache) |
| 2761 | { |
| 2762 | void *ret; |
| 2763 | |
| 2764 | if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE |
| 2765 | && (usize & PAGE_MASK) == 0))) { |
| 2766 | /* Small; alignment doesn't require special run placement. */ |
| 2767 | ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, |
| 2768 | tcache, true); |
| 2769 | } else if (usize <= large_maxclass && alignment <= PAGE) { |
| 2770 | /* |
| 2771 | * Large; alignment doesn't require special run placement. |
| 2772 | * However, the cached pointer may be at a random offset from |
| 2773 | * the base of the run, so do some bit manipulation to retrieve |
| 2774 | * the base. |
| 2775 | */ |
| 2776 | ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, |
| 2777 | tcache, true); |
| 2778 | if (config_cache_oblivious) |
| 2779 | ret = (void *)((uintptr_t)ret & ~PAGE_MASK); |
| 2780 | } else { |
| 2781 | if (likely(usize <= large_maxclass)) { |
| 2782 | ret = arena_palloc_large(tsdn, arena, usize, alignment, |
| 2783 | zero); |
| 2784 | } else if (likely(alignment <= chunksize)) |
| 2785 | ret = huge_malloc(tsdn, arena, usize, zero); |
| 2786 | else { |
| 2787 | ret = huge_palloc(tsdn, arena, usize, alignment, zero); |
| 2788 | } |
| 2789 | } |
| 2790 | return (ret); |
| 2791 | } |
| 2792 | |
| 2793 | void |
| 2794 | arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size) |
| 2795 | { |
| 2796 | arena_chunk_t *chunk; |
| 2797 | size_t pageind; |
| 2798 | szind_t binind; |
| 2799 | |
| 2800 | cassert(config_prof); |
| 2801 | assert(ptr != NULL); |
| 2802 | assert(CHUNK_ADDR2BASE(ptr) != ptr); |
| 2803 | assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); |
| 2804 | assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS); |
| 2805 | assert(size <= SMALL_MAXCLASS); |
| 2806 | |
| 2807 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); |
| 2808 | pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; |
| 2809 | binind = size2index(size); |
| 2810 | assert(binind < NBINS); |
| 2811 | arena_mapbits_large_binind_set(chunk, pageind, binind); |
| 2812 | |
| 2813 | assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); |
| 2814 | assert(isalloc(tsdn, ptr, true) == size); |
| 2815 | } |
| 2816 | |
| 2817 | static void |
| 2818 | arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, |
| 2819 | arena_bin_t *bin) |
| 2820 | { |
| 2821 | |
| 2822 | /* Dissociate run from bin. */ |
| 2823 | if (run == bin->runcur) |
| 2824 | bin->runcur = NULL; |
| 2825 | else { |
| 2826 | szind_t binind = arena_bin_index(extent_node_arena_get( |
| 2827 | &chunk->node), bin); |
| 2828 | arena_bin_info_t *bin_info = &arena_bin_info[binind]; |
| 2829 | |
| 2830 | /* |
| 2831 | * The following block's conditional is necessary because if the |
| 2832 | * run only contains one region, then it never gets inserted |
| 2833 | * into the non-full runs tree. |
| 2834 | */ |
| 2835 | if (bin_info->nregs != 1) { |
| 2836 | arena_chunk_map_misc_t *miscelm = |
| 2837 | arena_run_to_miscelm(run); |
| 2838 | |
| 2839 | arena_run_heap_remove(&bin->runs, miscelm); |
| 2840 | } |
| 2841 | } |
| 2842 | } |
| 2843 | |
| 2844 | static void |
| 2845 | arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, |
| 2846 | arena_run_t *run, arena_bin_t *bin) |
| 2847 | { |
| 2848 | |
| 2849 | assert(run != bin->runcur); |
| 2850 | |
| 2851 | malloc_mutex_unlock(tsdn, &bin->lock); |
| 2852 | /******************************/ |
| 2853 | malloc_mutex_lock(tsdn, &arena->lock); |
| 2854 | arena_run_dalloc(tsdn, arena, run, true, false, false); |
| 2855 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 2856 | /****************************/ |
| 2857 | malloc_mutex_lock(tsdn, &bin->lock); |
| 2858 | if (config_stats) |
| 2859 | bin->stats.curruns--; |
| 2860 | } |
| 2861 | |
| 2862 | static void |
| 2863 | arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, |
| 2864 | arena_bin_t *bin) |
| 2865 | { |
| 2866 | |
| 2867 | /* |
| 2868 | * Make sure that if bin->runcur is non-NULL, it refers to the lowest |
| 2869 | * non-full run. It is okay to NULL runcur out rather than proactively |
| 2870 | * keeping it pointing at the lowest non-full run. |
| 2871 | */ |
| 2872 | if ((uintptr_t)run < (uintptr_t)bin->runcur) { |
| 2873 | /* Switch runcur. */ |
| 2874 | if (bin->runcur->nfree > 0) |
| 2875 | arena_bin_runs_insert(bin, bin->runcur); |
| 2876 | bin->runcur = run; |
| 2877 | if (config_stats) |
| 2878 | bin->stats.reruns++; |
| 2879 | } else |
| 2880 | arena_bin_runs_insert(bin, run); |
| 2881 | } |
| 2882 | |
| 2883 | static void |
| 2884 | arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, |
| 2885 | void *ptr, arena_chunk_map_bits_t *bitselm, bool junked) |
| 2886 | { |
| 2887 | size_t pageind, rpages_ind; |
| 2888 | arena_run_t *run; |
| 2889 | arena_bin_t *bin; |
| 2890 | arena_bin_info_t *bin_info; |
| 2891 | szind_t binind; |
| 2892 | |
| 2893 | pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; |
| 2894 | rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); |
| 2895 | run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; |
| 2896 | binind = run->binind; |
| 2897 | bin = &arena->bins[binind]; |
| 2898 | bin_info = &arena_bin_info[binind]; |
| 2899 | |
| 2900 | if (!junked && config_fill && unlikely(opt_junk_free)) |
| 2901 | arena_dalloc_junk_small(ptr, bin_info); |
| 2902 | |
| 2903 | arena_run_reg_dalloc(run, ptr); |
| 2904 | if (run->nfree == bin_info->nregs) { |
| 2905 | arena_dissociate_bin_run(chunk, run, bin); |
| 2906 | arena_dalloc_bin_run(tsdn, arena, chunk, run, bin); |
| 2907 | } else if (run->nfree == 1 && run != bin->runcur) |
| 2908 | arena_bin_lower_run(arena, chunk, run, bin); |
| 2909 | |
| 2910 | if (config_stats) { |
| 2911 | bin->stats.ndalloc++; |
| 2912 | bin->stats.curregs--; |
| 2913 | } |
| 2914 | } |
| 2915 | |
| 2916 | void |
| 2917 | arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, |
| 2918 | arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm) |
| 2919 | { |
| 2920 | |
| 2921 | arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true); |
| 2922 | } |
| 2923 | |
| 2924 | void |
| 2925 | arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, |
| 2926 | size_t pageind, arena_chunk_map_bits_t *bitselm) |
| 2927 | { |
| 2928 | arena_run_t *run; |
| 2929 | arena_bin_t *bin; |
| 2930 | size_t rpages_ind; |
| 2931 | |
| 2932 | rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); |
| 2933 | run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; |
| 2934 | bin = &arena->bins[run->binind]; |
| 2935 | malloc_mutex_lock(tsdn, &bin->lock); |
| 2936 | arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false); |
| 2937 | malloc_mutex_unlock(tsdn, &bin->lock); |
| 2938 | } |
| 2939 | |
| 2940 | void |
| 2941 | arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, |
| 2942 | void *ptr, size_t pageind) |
| 2943 | { |
| 2944 | arena_chunk_map_bits_t *bitselm; |
| 2945 | |
| 2946 | if (config_debug) { |
| 2947 | /* arena_ptr_small_binind_get() does extra sanity checking. */ |
| 2948 | assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, |
| 2949 | pageind)) != BININD_INVALID); |
| 2950 | } |
| 2951 | bitselm = arena_bitselm_get_mutable(chunk, pageind); |
| 2952 | arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm); |
| 2953 | |
| 2954 | // AER-5943: Don't recreate TLS data when the dynamic linker free()s |
| 2955 | // TLS on thread exit, as it would lead to a memory leak. |
| 2956 | // |
| 2957 | // We (heuristically) recognize this situation by comparing |
| 2958 | // tsd_arenas_tdata to NULL. In this case, we don't count a decay tick, |
| 2959 | // which would otherwise recreate tsd_arenas_tdata. |
| 2960 | |
| 2961 | if (tsdn_null(tsdn) || tsd_arenas_tdata_get(tsdn_tsd(tsdn)) != NULL) { |
| 2962 | arena_decay_tick(tsdn, arena); |
| 2963 | } |
| 2964 | } |
| 2965 | |
| 2966 | #ifdef JEMALLOC_JET |
| 2967 | #undef arena_dalloc_junk_large |
| 2968 | #define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large) |
| 2969 | #endif |
| 2970 | void |
| 2971 | arena_dalloc_junk_large(void *ptr, size_t usize) |
| 2972 | { |
| 2973 | |
| 2974 | if (config_fill && unlikely(opt_junk_free)) |
| 2975 | memset(ptr, JEMALLOC_FREE_JUNK, usize); |
| 2976 | } |
| 2977 | #ifdef JEMALLOC_JET |
| 2978 | #undef arena_dalloc_junk_large |
| 2979 | #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) |
| 2980 | arena_dalloc_junk_large_t *arena_dalloc_junk_large = |
| 2981 | JEMALLOC_N(n_arena_dalloc_junk_large); |
| 2982 | #endif |
| 2983 | |
| 2984 | static void |
| 2985 | arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena, |
| 2986 | arena_chunk_t *chunk, void *ptr, bool junked) |
| 2987 | { |
| 2988 | size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; |
| 2989 | arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, |
| 2990 | pageind); |
| 2991 | arena_run_t *run = &miscelm->run; |
| 2992 | |
| 2993 | if (config_fill || config_stats) { |
| 2994 | size_t usize = arena_mapbits_large_size_get(chunk, pageind) - |
| 2995 | large_pad; |
| 2996 | |
| 2997 | if (!junked) |
| 2998 | arena_dalloc_junk_large(ptr, usize); |
| 2999 | if (config_stats) { |
| 3000 | szind_t index = size2index(usize) - NBINS; |
| 3001 | |
| 3002 | arena->stats.ndalloc_large++; |
| 3003 | arena->stats.allocated_large -= usize; |
| 3004 | arena->stats.lstats[index].ndalloc++; |
| 3005 | arena->stats.lstats[index].curruns--; |
| 3006 | } |
| 3007 | } |
| 3008 | |
| 3009 | arena_run_dalloc(tsdn, arena, run, true, false, false); |
| 3010 | } |
| 3011 | |
| 3012 | void |
| 3013 | arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, |
| 3014 | arena_chunk_t *chunk, void *ptr) |
| 3015 | { |
| 3016 | |
| 3017 | arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true); |
| 3018 | } |
| 3019 | |
| 3020 | void |
| 3021 | arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, |
| 3022 | void *ptr) |
| 3023 | { |
| 3024 | |
| 3025 | malloc_mutex_lock(tsdn, &arena->lock); |
| 3026 | arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false); |
| 3027 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 3028 | arena_decay_tick(tsdn, arena); |
| 3029 | } |
| 3030 | |
| 3031 | static void |
| 3032 | arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, |
| 3033 | void *ptr, size_t oldsize, size_t size) |
| 3034 | { |
| 3035 | size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; |
| 3036 | arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, |
| 3037 | pageind); |
| 3038 | arena_run_t *run = &miscelm->run; |
| 3039 | |
| 3040 | assert(size < oldsize); |
| 3041 | |
| 3042 | /* |
| 3043 | * Shrink the run, and make trailing pages available for other |
| 3044 | * allocations. |
| 3045 | */ |
| 3046 | malloc_mutex_lock(tsdn, &arena->lock); |
| 3047 | arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size + |
| 3048 | large_pad, true); |
| 3049 | if (config_stats) { |
| 3050 | szind_t oldindex = size2index(oldsize) - NBINS; |
| 3051 | szind_t index = size2index(size) - NBINS; |
| 3052 | |
| 3053 | arena->stats.ndalloc_large++; |
| 3054 | arena->stats.allocated_large -= oldsize; |
| 3055 | arena->stats.lstats[oldindex].ndalloc++; |
| 3056 | arena->stats.lstats[oldindex].curruns--; |
| 3057 | |
| 3058 | arena->stats.nmalloc_large++; |
| 3059 | arena->stats.nrequests_large++; |
| 3060 | arena->stats.allocated_large += size; |
| 3061 | arena->stats.lstats[index].nmalloc++; |
| 3062 | arena->stats.lstats[index].nrequests++; |
| 3063 | arena->stats.lstats[index].curruns++; |
| 3064 | } |
| 3065 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 3066 | } |
| 3067 | |
| 3068 | static bool |
| 3069 | arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, |
| 3070 | void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) |
| 3071 | { |
| 3072 | size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; |
| 3073 | size_t npages = (oldsize + large_pad) >> LG_PAGE; |
| 3074 | size_t followsize; |
| 3075 | |
| 3076 | assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - |
| 3077 | large_pad); |
| 3078 | |
| 3079 | /* Try to extend the run. */ |
| 3080 | malloc_mutex_lock(tsdn, &arena->lock); |
| 3081 | if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, |
| 3082 | pageind+npages) != 0) |
| 3083 | goto label_fail; |
| 3084 | followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); |
| 3085 | if (oldsize + followsize >= usize_min) { |
| 3086 | /* |
| 3087 | * The next run is available and sufficiently large. Split the |
| 3088 | * following run, then merge the first part with the existing |
| 3089 | * allocation. |
| 3090 | */ |
| 3091 | arena_run_t *run; |
| 3092 | size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; |
| 3093 | |
| 3094 | usize = usize_max; |
| 3095 | while (oldsize + followsize < usize) |
| 3096 | usize = index2size(size2index(usize)-1); |
| 3097 | assert(usize >= usize_min); |
| 3098 | assert(usize >= oldsize); |
| 3099 | splitsize = usize - oldsize; |
| 3100 | if (splitsize == 0) |
| 3101 | goto label_fail; |
| 3102 | |
| 3103 | run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run; |
| 3104 | if (arena_run_split_large(arena, run, splitsize, zero)) |
| 3105 | goto label_fail; |
| 3106 | |
| 3107 | if (config_cache_oblivious && zero) { |
| 3108 | /* |
| 3109 | * Zero the trailing bytes of the original allocation's |
| 3110 | * last page, since they are in an indeterminate state. |
| 3111 | * There will always be trailing bytes, because ptr's |
| 3112 | * offset from the beginning of the run is a multiple of |
| 3113 | * CACHELINE in [0 .. PAGE). |
| 3114 | */ |
| 3115 | void *zbase = (void *)((uintptr_t)ptr + oldsize); |
| 3116 | void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + |
| 3117 | PAGE)); |
| 3118 | size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; |
| 3119 | assert(nzero > 0); |
| 3120 | memset(zbase, 0, nzero); |
| 3121 | } |
| 3122 | |
| 3123 | size = oldsize + splitsize; |
| 3124 | npages = (size + large_pad) >> LG_PAGE; |
| 3125 | |
| 3126 | /* |
| 3127 | * Mark the extended run as dirty if either portion of the run |
| 3128 | * was dirty before allocation. This is rather pedantic, |
| 3129 | * because there's not actually any sequence of events that |
| 3130 | * could cause the resulting run to be passed to |
| 3131 | * arena_run_dalloc() with the dirty argument set to false |
| 3132 | * (which is when dirty flag consistency would really matter). |
| 3133 | */ |
| 3134 | flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | |
| 3135 | arena_mapbits_dirty_get(chunk, pageind+npages-1); |
| 3136 | flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0; |
| 3137 | arena_mapbits_large_set(chunk, pageind, size + large_pad, |
| 3138 | flag_dirty | (flag_unzeroed_mask & |
| 3139 | arena_mapbits_unzeroed_get(chunk, pageind))); |
| 3140 | arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty | |
| 3141 | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, |
| 3142 | pageind+npages-1))); |
| 3143 | |
| 3144 | if (config_stats) { |
| 3145 | szind_t oldindex = size2index(oldsize) - NBINS; |
| 3146 | szind_t index = size2index(size) - NBINS; |
| 3147 | |
| 3148 | arena->stats.ndalloc_large++; |
| 3149 | arena->stats.allocated_large -= oldsize; |
| 3150 | arena->stats.lstats[oldindex].ndalloc++; |
| 3151 | arena->stats.lstats[oldindex].curruns--; |
| 3152 | |
| 3153 | arena->stats.nmalloc_large++; |
| 3154 | arena->stats.nrequests_large++; |
| 3155 | arena->stats.allocated_large += size; |
| 3156 | arena->stats.lstats[index].nmalloc++; |
| 3157 | arena->stats.lstats[index].nrequests++; |
| 3158 | arena->stats.lstats[index].curruns++; |
| 3159 | } |
| 3160 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 3161 | return (false); |
| 3162 | } |
| 3163 | label_fail: |
| 3164 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 3165 | return (true); |
| 3166 | } |
| 3167 | |
| 3168 | #ifdef JEMALLOC_JET |
| 3169 | #undef arena_ralloc_junk_large |
| 3170 | #define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large) |
| 3171 | #endif |
| 3172 | static void |
| 3173 | arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) |
| 3174 | { |
| 3175 | |
| 3176 | if (config_fill && unlikely(opt_junk_free)) { |
| 3177 | memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK, |
| 3178 | old_usize - usize); |
| 3179 | } |
| 3180 | } |
| 3181 | #ifdef JEMALLOC_JET |
| 3182 | #undef arena_ralloc_junk_large |
| 3183 | #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) |
| 3184 | arena_ralloc_junk_large_t *arena_ralloc_junk_large = |
| 3185 | JEMALLOC_N(n_arena_ralloc_junk_large); |
| 3186 | #endif |
| 3187 | |
| 3188 | /* |
| 3189 | * Try to resize a large allocation, in order to avoid copying. This will |
| 3190 | * always fail if growing an object, and the following run is already in use. |
| 3191 | */ |
| 3192 | static bool |
| 3193 | arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, |
| 3194 | size_t usize_max, bool zero) |
| 3195 | { |
| 3196 | arena_chunk_t *chunk; |
| 3197 | arena_t *arena; |
| 3198 | |
| 3199 | if (oldsize == usize_max) { |
| 3200 | /* Current size class is compatible and maximal. */ |
| 3201 | return (false); |
| 3202 | } |
| 3203 | |
| 3204 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); |
| 3205 | arena = extent_node_arena_get(&chunk->node); |
| 3206 | |
| 3207 | if (oldsize < usize_max) { |
| 3208 | bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr, |
| 3209 | oldsize, usize_min, usize_max, zero); |
| 3210 | if (config_fill && !ret && !zero) { |
| 3211 | if (unlikely(opt_junk_alloc)) { |
| 3212 | memset((void *)((uintptr_t)ptr + oldsize), |
| 3213 | JEMALLOC_ALLOC_JUNK, |
| 3214 | isalloc(tsdn, ptr, config_prof) - oldsize); |
| 3215 | } else if (unlikely(opt_zero)) { |
| 3216 | memset((void *)((uintptr_t)ptr + oldsize), 0, |
| 3217 | isalloc(tsdn, ptr, config_prof) - oldsize); |
| 3218 | } |
| 3219 | } |
| 3220 | return (ret); |
| 3221 | } |
| 3222 | |
| 3223 | assert(oldsize > usize_max); |
| 3224 | /* Fill before shrinking in order avoid a race. */ |
| 3225 | arena_ralloc_junk_large(ptr, oldsize, usize_max); |
| 3226 | arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max); |
| 3227 | return (false); |
| 3228 | } |
| 3229 | |
| 3230 | bool |
| 3231 | arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, |
| 3232 | size_t , bool zero) |
| 3233 | { |
| 3234 | size_t usize_min, usize_max; |
| 3235 | |
| 3236 | /* Calls with non-zero extra had to clamp extra. */ |
| 3237 | assert(extra == 0 || size + extra <= HUGE_MAXCLASS); |
| 3238 | |
| 3239 | if (unlikely(size > HUGE_MAXCLASS)) |
| 3240 | return (true); |
| 3241 | |
| 3242 | usize_min = s2u(size); |
| 3243 | usize_max = s2u(size + extra); |
| 3244 | if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { |
| 3245 | arena_chunk_t *chunk; |
| 3246 | |
| 3247 | /* |
| 3248 | * Avoid moving the allocation if the size class can be left the |
| 3249 | * same. |
| 3250 | */ |
| 3251 | if (oldsize <= SMALL_MAXCLASS) { |
| 3252 | assert(arena_bin_info[size2index(oldsize)].reg_size == |
| 3253 | oldsize); |
| 3254 | if ((usize_max > SMALL_MAXCLASS || |
| 3255 | size2index(usize_max) != size2index(oldsize)) && |
| 3256 | (size > oldsize || usize_max < oldsize)) |
| 3257 | return (true); |
| 3258 | } else { |
| 3259 | if (usize_max <= SMALL_MAXCLASS) |
| 3260 | return (true); |
| 3261 | if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min, |
| 3262 | usize_max, zero)) |
| 3263 | return (true); |
| 3264 | } |
| 3265 | |
| 3266 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); |
| 3267 | arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node)); |
| 3268 | return (false); |
| 3269 | } else { |
| 3270 | return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min, |
| 3271 | usize_max, zero)); |
| 3272 | } |
| 3273 | } |
| 3274 | |
| 3275 | static void * |
| 3276 | arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, |
| 3277 | size_t alignment, bool zero, tcache_t *tcache) |
| 3278 | { |
| 3279 | |
| 3280 | if (alignment == 0) |
| 3281 | return (arena_malloc(tsdn, arena, usize, size2index(usize), |
| 3282 | zero, tcache, true)); |
| 3283 | usize = sa2u(usize, alignment); |
| 3284 | if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) |
| 3285 | return (NULL); |
| 3286 | return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); |
| 3287 | } |
| 3288 | |
| 3289 | void * |
| 3290 | arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, |
| 3291 | size_t alignment, bool zero, tcache_t *tcache) |
| 3292 | { |
| 3293 | void *ret; |
| 3294 | size_t usize; |
| 3295 | |
| 3296 | usize = s2u(size); |
| 3297 | if (unlikely(usize == 0 || size > HUGE_MAXCLASS)) |
| 3298 | return (NULL); |
| 3299 | |
| 3300 | if (likely(usize <= large_maxclass)) { |
| 3301 | size_t copysize; |
| 3302 | |
| 3303 | /* Try to avoid moving the allocation. */ |
| 3304 | if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0, |
| 3305 | zero)) |
| 3306 | return (ptr); |
| 3307 | |
| 3308 | /* |
| 3309 | * size and oldsize are different enough that we need to move |
| 3310 | * the object. In that case, fall back to allocating new space |
| 3311 | * and copying. |
| 3312 | */ |
| 3313 | ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, |
| 3314 | alignment, zero, tcache); |
| 3315 | if (ret == NULL) |
| 3316 | return (NULL); |
| 3317 | |
| 3318 | /* |
| 3319 | * Junk/zero-filling were already done by |
| 3320 | * ipalloc()/arena_malloc(). |
| 3321 | */ |
| 3322 | |
| 3323 | copysize = (usize < oldsize) ? usize : oldsize; |
| 3324 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); |
| 3325 | memcpy(ret, ptr, copysize); |
| 3326 | isqalloc(tsd, ptr, oldsize, tcache, true); |
| 3327 | } else { |
| 3328 | ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, |
| 3329 | zero, tcache); |
| 3330 | } |
| 3331 | return (ret); |
| 3332 | } |
| 3333 | |
| 3334 | dss_prec_t |
| 3335 | arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) |
| 3336 | { |
| 3337 | dss_prec_t ret; |
| 3338 | |
| 3339 | malloc_mutex_lock(tsdn, &arena->lock); |
| 3340 | ret = arena->dss_prec; |
| 3341 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 3342 | return (ret); |
| 3343 | } |
| 3344 | |
| 3345 | bool |
| 3346 | arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) |
| 3347 | { |
| 3348 | |
| 3349 | if (!have_dss) |
| 3350 | return (dss_prec != dss_prec_disabled); |
| 3351 | malloc_mutex_lock(tsdn, &arena->lock); |
| 3352 | arena->dss_prec = dss_prec; |
| 3353 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 3354 | return (false); |
| 3355 | } |
| 3356 | |
| 3357 | ssize_t |
| 3358 | arena_lg_dirty_mult_default_get(void) |
| 3359 | { |
| 3360 | |
| 3361 | return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); |
| 3362 | } |
| 3363 | |
| 3364 | bool |
| 3365 | arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) |
| 3366 | { |
| 3367 | |
| 3368 | if (opt_purge != purge_mode_ratio) |
| 3369 | return (true); |
| 3370 | if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) |
| 3371 | return (true); |
| 3372 | atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); |
| 3373 | return (false); |
| 3374 | } |
| 3375 | |
| 3376 | ssize_t |
| 3377 | arena_decay_time_default_get(void) |
| 3378 | { |
| 3379 | |
| 3380 | return ((ssize_t)atomic_read_z((size_t *)&decay_time_default)); |
| 3381 | } |
| 3382 | |
| 3383 | bool |
| 3384 | arena_decay_time_default_set(ssize_t decay_time) |
| 3385 | { |
| 3386 | |
| 3387 | if (opt_purge != purge_mode_decay) |
| 3388 | return (true); |
| 3389 | if (!arena_decay_time_valid(decay_time)) |
| 3390 | return (true); |
| 3391 | atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time); |
| 3392 | return (false); |
| 3393 | } |
| 3394 | |
| 3395 | static void |
| 3396 | arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads, |
| 3397 | const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, |
| 3398 | size_t *nactive, size_t *ndirty) |
| 3399 | { |
| 3400 | |
| 3401 | *nthreads += arena_nthreads_get(arena, false); |
| 3402 | *dss = dss_prec_names[arena->dss_prec]; |
| 3403 | *lg_dirty_mult = arena->lg_dirty_mult; |
| 3404 | *decay_time = arena->decay_time; |
| 3405 | *nactive += arena->nactive; |
| 3406 | *ndirty += arena->ndirty; |
| 3407 | } |
| 3408 | |
| 3409 | void |
| 3410 | arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, |
| 3411 | const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, |
| 3412 | size_t *nactive, size_t *ndirty) |
| 3413 | { |
| 3414 | |
| 3415 | malloc_mutex_lock(tsdn, &arena->lock); |
| 3416 | arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, |
| 3417 | decay_time, nactive, ndirty); |
| 3418 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 3419 | } |
| 3420 | |
| 3421 | void |
| 3422 | arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, |
| 3423 | const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, |
| 3424 | size_t *nactive, size_t *ndirty, arena_stats_t *astats, |
| 3425 | malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, |
| 3426 | malloc_huge_stats_t *hstats) |
| 3427 | { |
| 3428 | unsigned i; |
| 3429 | |
| 3430 | cassert(config_stats); |
| 3431 | |
| 3432 | malloc_mutex_lock(tsdn, &arena->lock); |
| 3433 | arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, |
| 3434 | decay_time, nactive, ndirty); |
| 3435 | |
| 3436 | astats->mapped += arena->stats.mapped; |
| 3437 | astats->retained += arena->stats.retained; |
| 3438 | astats->npurge += arena->stats.npurge; |
| 3439 | astats->nmadvise += arena->stats.nmadvise; |
| 3440 | astats->purged += arena->stats.purged; |
| 3441 | astats->metadata_mapped += arena->stats.metadata_mapped; |
| 3442 | astats->metadata_allocated += arena_metadata_allocated_get(arena); |
| 3443 | astats->allocated_large += arena->stats.allocated_large; |
| 3444 | astats->nmalloc_large += arena->stats.nmalloc_large; |
| 3445 | astats->ndalloc_large += arena->stats.ndalloc_large; |
| 3446 | astats->nrequests_large += arena->stats.nrequests_large; |
| 3447 | astats->allocated_huge += arena->stats.allocated_huge; |
| 3448 | astats->nmalloc_huge += arena->stats.nmalloc_huge; |
| 3449 | astats->ndalloc_huge += arena->stats.ndalloc_huge; |
| 3450 | |
| 3451 | for (i = 0; i < nlclasses; i++) { |
| 3452 | lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; |
| 3453 | lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; |
| 3454 | lstats[i].nrequests += arena->stats.lstats[i].nrequests; |
| 3455 | lstats[i].curruns += arena->stats.lstats[i].curruns; |
| 3456 | } |
| 3457 | |
| 3458 | for (i = 0; i < nhclasses; i++) { |
| 3459 | hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; |
| 3460 | hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; |
| 3461 | hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; |
| 3462 | } |
| 3463 | malloc_mutex_unlock(tsdn, &arena->lock); |
| 3464 | |
| 3465 | for (i = 0; i < NBINS; i++) { |
| 3466 | arena_bin_t *bin = &arena->bins[i]; |
| 3467 | |
| 3468 | malloc_mutex_lock(tsdn, &bin->lock); |
| 3469 | bstats[i].nmalloc += bin->stats.nmalloc; |
| 3470 | bstats[i].ndalloc += bin->stats.ndalloc; |
| 3471 | bstats[i].nrequests += bin->stats.nrequests; |
| 3472 | bstats[i].curregs += bin->stats.curregs; |
| 3473 | if (config_tcache) { |
| 3474 | bstats[i].nfills += bin->stats.nfills; |
| 3475 | bstats[i].nflushes += bin->stats.nflushes; |
| 3476 | } |
| 3477 | bstats[i].nruns += bin->stats.nruns; |
| 3478 | bstats[i].reruns += bin->stats.reruns; |
| 3479 | bstats[i].curruns += bin->stats.curruns; |
| 3480 | malloc_mutex_unlock(tsdn, &bin->lock); |
| 3481 | } |
| 3482 | } |
| 3483 | |
| 3484 | unsigned |
| 3485 | arena_nthreads_get(arena_t *arena, bool internal) |
| 3486 | { |
| 3487 | |
| 3488 | return (atomic_read_u(&arena->nthreads[internal])); |
| 3489 | } |
| 3490 | |
| 3491 | void |
| 3492 | arena_nthreads_inc(arena_t *arena, bool internal) |
| 3493 | { |
| 3494 | |
| 3495 | atomic_add_u(&arena->nthreads[internal], 1); |
| 3496 | } |
| 3497 | |
| 3498 | void |
| 3499 | arena_nthreads_dec(arena_t *arena, bool internal) |
| 3500 | { |
| 3501 | |
| 3502 | atomic_sub_u(&arena->nthreads[internal], 1); |
| 3503 | } |
| 3504 | |
| 3505 | arena_t * |
| 3506 | arena_new(tsdn_t *tsdn, unsigned ind) |
| 3507 | { |
| 3508 | arena_t *arena; |
| 3509 | size_t arena_size; |
| 3510 | unsigned i; |
| 3511 | |
| 3512 | /* Compute arena size to incorporate sufficient runs_avail elements. */ |
| 3513 | arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_heap_t) * |
| 3514 | runs_avail_nclasses); |
| 3515 | /* |
| 3516 | * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly |
| 3517 | * because there is no way to clean up if base_alloc() OOMs. |
| 3518 | */ |
| 3519 | if (config_stats) { |
| 3520 | arena = (arena_t *)base_alloc(tsdn, |
| 3521 | CACHELINE_CEILING(arena_size) + QUANTUM_CEILING(nlclasses * |
| 3522 | sizeof(malloc_large_stats_t) + nhclasses) * |
| 3523 | sizeof(malloc_huge_stats_t)); |
| 3524 | } else |
| 3525 | arena = (arena_t *)base_alloc(tsdn, arena_size); |
| 3526 | if (arena == NULL) |
| 3527 | return (NULL); |
| 3528 | |
| 3529 | arena->ind = ind; |
| 3530 | arena->nthreads[0] = arena->nthreads[1] = 0; |
| 3531 | if (malloc_mutex_init(&arena->lock, "arena" , WITNESS_RANK_ARENA)) |
| 3532 | return (NULL); |
| 3533 | |
| 3534 | if (config_stats) { |
| 3535 | memset(&arena->stats, 0, sizeof(arena_stats_t)); |
| 3536 | arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena |
| 3537 | + CACHELINE_CEILING(arena_size)); |
| 3538 | memset(arena->stats.lstats, 0, nlclasses * |
| 3539 | sizeof(malloc_large_stats_t)); |
| 3540 | arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena |
| 3541 | + CACHELINE_CEILING(arena_size) + |
| 3542 | QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); |
| 3543 | memset(arena->stats.hstats, 0, nhclasses * |
| 3544 | sizeof(malloc_huge_stats_t)); |
| 3545 | if (config_tcache) |
| 3546 | ql_new(&arena->tcache_ql); |
| 3547 | } |
| 3548 | |
| 3549 | if (config_prof) |
| 3550 | arena->prof_accumbytes = 0; |
| 3551 | |
| 3552 | if (config_cache_oblivious) { |
| 3553 | /* |
| 3554 | * A nondeterministic seed based on the address of arena reduces |
| 3555 | * the likelihood of lockstep non-uniform cache index |
| 3556 | * utilization among identical concurrent processes, but at the |
| 3557 | * cost of test repeatability. For debug builds, instead use a |
| 3558 | * deterministic seed. |
| 3559 | */ |
| 3560 | arena->offset_state = config_debug ? ind : |
| 3561 | (uint64_t)(uintptr_t)arena; |
| 3562 | } |
| 3563 | |
| 3564 | arena->dss_prec = chunk_dss_prec_get(tsdn); |
| 3565 | |
| 3566 | ql_new(&arena->achunks); |
| 3567 | |
| 3568 | arena->spare = NULL; |
| 3569 | |
| 3570 | arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); |
| 3571 | arena->purging = false; |
| 3572 | arena->nactive = 0; |
| 3573 | arena->ndirty = 0; |
| 3574 | |
| 3575 | for(i = 0; i < runs_avail_nclasses; i++) |
| 3576 | arena_run_heap_new(&arena->runs_avail[i]); |
| 3577 | qr_new(&arena->runs_dirty, rd_link); |
| 3578 | qr_new(&arena->chunks_cache, cc_link); |
| 3579 | |
| 3580 | if (opt_purge == purge_mode_decay) |
| 3581 | arena_decay_init(arena, arena_decay_time_default_get()); |
| 3582 | |
| 3583 | ql_new(&arena->huge); |
| 3584 | if (malloc_mutex_init(&arena->huge_mtx, "arena_huge" , |
| 3585 | WITNESS_RANK_ARENA_HUGE)) |
| 3586 | return (NULL); |
| 3587 | |
| 3588 | extent_tree_szad_new(&arena->chunks_szad_cached); |
| 3589 | extent_tree_ad_new(&arena->chunks_ad_cached); |
| 3590 | extent_tree_szad_new(&arena->chunks_szad_retained); |
| 3591 | extent_tree_ad_new(&arena->chunks_ad_retained); |
| 3592 | if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks" , |
| 3593 | WITNESS_RANK_ARENA_CHUNKS)) |
| 3594 | return (NULL); |
| 3595 | ql_new(&arena->node_cache); |
| 3596 | if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache" , |
| 3597 | WITNESS_RANK_ARENA_NODE_CACHE)) |
| 3598 | return (NULL); |
| 3599 | |
| 3600 | arena->chunk_hooks = chunk_hooks_default; |
| 3601 | |
| 3602 | /* Initialize bins. */ |
| 3603 | for (i = 0; i < NBINS; i++) { |
| 3604 | arena_bin_t *bin = &arena->bins[i]; |
| 3605 | if (malloc_mutex_init(&bin->lock, "arena_bin" , |
| 3606 | WITNESS_RANK_ARENA_BIN)) |
| 3607 | return (NULL); |
| 3608 | bin->runcur = NULL; |
| 3609 | arena_run_heap_new(&bin->runs); |
| 3610 | if (config_stats) |
| 3611 | memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); |
| 3612 | } |
| 3613 | |
| 3614 | return (arena); |
| 3615 | } |
| 3616 | |
| 3617 | /* |
| 3618 | * Calculate bin_info->run_size such that it meets the following constraints: |
| 3619 | * |
| 3620 | * *) bin_info->run_size <= arena_maxrun |
| 3621 | * *) bin_info->nregs <= RUN_MAXREGS |
| 3622 | * |
| 3623 | * bin_info->nregs and bin_info->reg0_offset are also calculated here, since |
| 3624 | * these settings are all interdependent. |
| 3625 | */ |
| 3626 | static void |
| 3627 | bin_info_run_size_calc(arena_bin_info_t *bin_info) |
| 3628 | { |
| 3629 | size_t pad_size; |
| 3630 | size_t try_run_size, perfect_run_size, actual_run_size; |
| 3631 | uint32_t try_nregs, perfect_nregs, actual_nregs; |
| 3632 | |
| 3633 | /* |
| 3634 | * Determine redzone size based on minimum alignment and minimum |
| 3635 | * redzone size. Add padding to the end of the run if it is needed to |
| 3636 | * align the regions. The padding allows each redzone to be half the |
| 3637 | * minimum alignment; without the padding, each redzone would have to |
| 3638 | * be twice as large in order to maintain alignment. |
| 3639 | */ |
| 3640 | if (config_fill && unlikely(opt_redzone)) { |
| 3641 | size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1); |
| 3642 | if (align_min <= REDZONE_MINSIZE) { |
| 3643 | bin_info->redzone_size = REDZONE_MINSIZE; |
| 3644 | pad_size = 0; |
| 3645 | } else { |
| 3646 | bin_info->redzone_size = align_min >> 1; |
| 3647 | pad_size = bin_info->redzone_size; |
| 3648 | } |
| 3649 | } else { |
| 3650 | bin_info->redzone_size = 0; |
| 3651 | pad_size = 0; |
| 3652 | } |
| 3653 | bin_info->reg_interval = bin_info->reg_size + |
| 3654 | (bin_info->redzone_size << 1); |
| 3655 | |
| 3656 | /* |
| 3657 | * Compute run size under ideal conditions (no redzones, no limit on run |
| 3658 | * size). |
| 3659 | */ |
| 3660 | try_run_size = PAGE; |
| 3661 | try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); |
| 3662 | do { |
| 3663 | perfect_run_size = try_run_size; |
| 3664 | perfect_nregs = try_nregs; |
| 3665 | |
| 3666 | try_run_size += PAGE; |
| 3667 | try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); |
| 3668 | } while (perfect_run_size != perfect_nregs * bin_info->reg_size); |
| 3669 | assert(perfect_nregs <= RUN_MAXREGS); |
| 3670 | |
| 3671 | actual_run_size = perfect_run_size; |
| 3672 | actual_nregs = (uint32_t)((actual_run_size - pad_size) / |
| 3673 | bin_info->reg_interval); |
| 3674 | |
| 3675 | /* |
| 3676 | * Redzones can require enough padding that not even a single region can |
| 3677 | * fit within the number of pages that would normally be dedicated to a |
| 3678 | * run for this size class. Increase the run size until at least one |
| 3679 | * region fits. |
| 3680 | */ |
| 3681 | while (actual_nregs == 0) { |
| 3682 | assert(config_fill && unlikely(opt_redzone)); |
| 3683 | |
| 3684 | actual_run_size += PAGE; |
| 3685 | actual_nregs = (uint32_t)((actual_run_size - pad_size) / |
| 3686 | bin_info->reg_interval); |
| 3687 | } |
| 3688 | |
| 3689 | /* |
| 3690 | * Make sure that the run will fit within an arena chunk. |
| 3691 | */ |
| 3692 | while (actual_run_size > arena_maxrun) { |
| 3693 | actual_run_size -= PAGE; |
| 3694 | actual_nregs = (uint32_t)((actual_run_size - pad_size) / |
| 3695 | bin_info->reg_interval); |
| 3696 | } |
| 3697 | assert(actual_nregs > 0); |
| 3698 | assert(actual_run_size == s2u(actual_run_size)); |
| 3699 | |
| 3700 | /* Copy final settings. */ |
| 3701 | bin_info->run_size = actual_run_size; |
| 3702 | bin_info->nregs = actual_nregs; |
| 3703 | bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs * |
| 3704 | bin_info->reg_interval) - pad_size + bin_info->redzone_size); |
| 3705 | |
| 3706 | if (actual_run_size > small_maxrun) |
| 3707 | small_maxrun = actual_run_size; |
| 3708 | |
| 3709 | assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs |
| 3710 | * bin_info->reg_interval) + pad_size == bin_info->run_size); |
| 3711 | } |
| 3712 | |
| 3713 | static void |
| 3714 | bin_info_init(void) |
| 3715 | { |
| 3716 | arena_bin_info_t *bin_info; |
| 3717 | |
| 3718 | #define BIN_INFO_INIT_bin_yes(index, size) \ |
| 3719 | bin_info = &arena_bin_info[index]; \ |
| 3720 | bin_info->reg_size = size; \ |
| 3721 | bin_info_run_size_calc(bin_info); \ |
| 3722 | bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); |
| 3723 | #define BIN_INFO_INIT_bin_no(index, size) |
| 3724 | #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ |
| 3725 | BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) |
| 3726 | SIZE_CLASSES |
| 3727 | #undef BIN_INFO_INIT_bin_yes |
| 3728 | #undef BIN_INFO_INIT_bin_no |
| 3729 | #undef SC |
| 3730 | } |
| 3731 | |
| 3732 | static bool |
| 3733 | small_run_size_init(void) |
| 3734 | { |
| 3735 | |
| 3736 | assert(small_maxrun != 0); |
| 3737 | |
| 3738 | small_run_tab = (bool *)base_alloc(NULL, sizeof(bool) * (small_maxrun >> |
| 3739 | LG_PAGE)); |
| 3740 | if (small_run_tab == NULL) |
| 3741 | return (true); |
| 3742 | |
| 3743 | #define TAB_INIT_bin_yes(index, size) { \ |
| 3744 | arena_bin_info_t *bin_info = &arena_bin_info[index]; \ |
| 3745 | small_run_tab[bin_info->run_size >> LG_PAGE] = true; \ |
| 3746 | } |
| 3747 | #define TAB_INIT_bin_no(index, size) |
| 3748 | #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ |
| 3749 | TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) |
| 3750 | SIZE_CLASSES |
| 3751 | #undef TAB_INIT_bin_yes |
| 3752 | #undef TAB_INIT_bin_no |
| 3753 | #undef SC |
| 3754 | |
| 3755 | return (false); |
| 3756 | } |
| 3757 | |
| 3758 | static bool |
| 3759 | run_quantize_init(void) |
| 3760 | { |
| 3761 | unsigned i; |
| 3762 | |
| 3763 | run_quantize_max = chunksize + large_pad; |
| 3764 | |
| 3765 | run_quantize_floor_tab = (size_t *)base_alloc(NULL, sizeof(size_t) * |
| 3766 | (run_quantize_max >> LG_PAGE)); |
| 3767 | if (run_quantize_floor_tab == NULL) |
| 3768 | return (true); |
| 3769 | |
| 3770 | run_quantize_ceil_tab = (size_t *)base_alloc(NULL, sizeof(size_t) * |
| 3771 | (run_quantize_max >> LG_PAGE)); |
| 3772 | if (run_quantize_ceil_tab == NULL) |
| 3773 | return (true); |
| 3774 | |
| 3775 | for (i = 1; i <= run_quantize_max >> LG_PAGE; i++) { |
| 3776 | size_t run_size = i << LG_PAGE; |
| 3777 | |
| 3778 | run_quantize_floor_tab[i-1] = |
| 3779 | run_quantize_floor_compute(run_size); |
| 3780 | run_quantize_ceil_tab[i-1] = |
| 3781 | run_quantize_ceil_compute(run_size); |
| 3782 | } |
| 3783 | |
| 3784 | return (false); |
| 3785 | } |
| 3786 | |
| 3787 | bool |
| 3788 | arena_boot(void) |
| 3789 | { |
| 3790 | unsigned i; |
| 3791 | |
| 3792 | arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); |
| 3793 | arena_decay_time_default_set(opt_decay_time); |
| 3794 | |
| 3795 | /* |
| 3796 | * Compute the header size such that it is large enough to contain the |
| 3797 | * page map. The page map is biased to omit entries for the header |
| 3798 | * itself, so some iteration is necessary to compute the map bias. |
| 3799 | * |
| 3800 | * 1) Compute safe header_size and map_bias values that include enough |
| 3801 | * space for an unbiased page map. |
| 3802 | * 2) Refine map_bias based on (1) to omit the header pages in the page |
| 3803 | * map. The resulting map_bias may be one too small. |
| 3804 | * 3) Refine map_bias based on (2). The result will be >= the result |
| 3805 | * from (2), and will always be correct. |
| 3806 | */ |
| 3807 | map_bias = 0; |
| 3808 | for (i = 0; i < 3; i++) { |
| 3809 | size_t = offsetof(arena_chunk_t, map_bits) + |
| 3810 | ((sizeof(arena_chunk_map_bits_t) + |
| 3811 | sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); |
| 3812 | map_bias = (header_size + PAGE_MASK) >> LG_PAGE; |
| 3813 | } |
| 3814 | assert(map_bias > 0); |
| 3815 | |
| 3816 | map_misc_offset = offsetof(arena_chunk_t, map_bits) + |
| 3817 | sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); |
| 3818 | |
| 3819 | arena_maxrun = chunksize - (map_bias << LG_PAGE); |
| 3820 | assert(arena_maxrun > 0); |
| 3821 | large_maxclass = index2size(size2index(chunksize)-1); |
| 3822 | if (large_maxclass > arena_maxrun) { |
| 3823 | /* |
| 3824 | * For small chunk sizes it's possible for there to be fewer |
| 3825 | * non-header pages available than are necessary to serve the |
| 3826 | * size classes just below chunksize. |
| 3827 | */ |
| 3828 | large_maxclass = arena_maxrun; |
| 3829 | } |
| 3830 | assert(large_maxclass > 0); |
| 3831 | nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); |
| 3832 | nhclasses = NSIZES - nlclasses - NBINS; |
| 3833 | |
| 3834 | bin_info_init(); |
| 3835 | if (small_run_size_init()) |
| 3836 | return (true); |
| 3837 | if (run_quantize_init()) |
| 3838 | return (true); |
| 3839 | |
| 3840 | runs_avail_bias = size2index(PAGE); |
| 3841 | runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias; |
| 3842 | |
| 3843 | return (false); |
| 3844 | } |
| 3845 | |
| 3846 | void |
| 3847 | arena_prefork0(tsdn_t *tsdn, arena_t *arena) |
| 3848 | { |
| 3849 | |
| 3850 | malloc_mutex_prefork(tsdn, &arena->lock); |
| 3851 | } |
| 3852 | |
| 3853 | void |
| 3854 | arena_prefork1(tsdn_t *tsdn, arena_t *arena) |
| 3855 | { |
| 3856 | |
| 3857 | malloc_mutex_prefork(tsdn, &arena->chunks_mtx); |
| 3858 | } |
| 3859 | |
| 3860 | void |
| 3861 | arena_prefork2(tsdn_t *tsdn, arena_t *arena) |
| 3862 | { |
| 3863 | |
| 3864 | malloc_mutex_prefork(tsdn, &arena->node_cache_mtx); |
| 3865 | } |
| 3866 | |
| 3867 | void |
| 3868 | arena_prefork3(tsdn_t *tsdn, arena_t *arena) |
| 3869 | { |
| 3870 | unsigned i; |
| 3871 | |
| 3872 | for (i = 0; i < NBINS; i++) |
| 3873 | malloc_mutex_prefork(tsdn, &arena->bins[i].lock); |
| 3874 | malloc_mutex_prefork(tsdn, &arena->huge_mtx); |
| 3875 | } |
| 3876 | |
| 3877 | void |
| 3878 | arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) |
| 3879 | { |
| 3880 | unsigned i; |
| 3881 | |
| 3882 | malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx); |
| 3883 | for (i = 0; i < NBINS; i++) |
| 3884 | malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); |
| 3885 | malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx); |
| 3886 | malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx); |
| 3887 | malloc_mutex_postfork_parent(tsdn, &arena->lock); |
| 3888 | } |
| 3889 | |
| 3890 | void |
| 3891 | arena_postfork_child(tsdn_t *tsdn, arena_t *arena) |
| 3892 | { |
| 3893 | unsigned i; |
| 3894 | |
| 3895 | malloc_mutex_postfork_child(tsdn, &arena->huge_mtx); |
| 3896 | for (i = 0; i < NBINS; i++) |
| 3897 | malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); |
| 3898 | malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx); |
| 3899 | malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx); |
| 3900 | malloc_mutex_postfork_child(tsdn, &arena->lock); |
| 3901 | } |
| 3902 | |