| 1 | #define JEMALLOC_TCACHE_C_ |
| 2 | #include "jemalloc/internal/jemalloc_preamble.h" |
| 3 | #include "jemalloc/internal/jemalloc_internal_includes.h" |
| 4 | |
| 5 | #include "jemalloc/internal/assert.h" |
| 6 | #include "jemalloc/internal/mutex.h" |
| 7 | #include "jemalloc/internal/sc.h" |
| 8 | |
| 9 | /******************************************************************************/ |
| 10 | /* Data. */ |
| 11 | |
| 12 | bool opt_tcache = true; |
| 13 | ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; |
| 14 | |
| 15 | cache_bin_info_t *tcache_bin_info; |
| 16 | static unsigned stack_nelms; /* Total stack elms per tcache. */ |
| 17 | |
| 18 | unsigned nhbins; |
| 19 | size_t tcache_maxclass; |
| 20 | |
| 21 | tcaches_t *tcaches; |
| 22 | |
| 23 | /* Index of first element within tcaches that has never been used. */ |
| 24 | static unsigned tcaches_past; |
| 25 | |
| 26 | /* Head of singly linked list tracking available tcaches elements. */ |
| 27 | static tcaches_t *tcaches_avail; |
| 28 | |
| 29 | /* Protects tcaches{,_past,_avail}. */ |
| 30 | static malloc_mutex_t tcaches_mtx; |
| 31 | |
| 32 | /******************************************************************************/ |
| 33 | |
| 34 | size_t |
| 35 | tcache_salloc(tsdn_t *tsdn, const void *ptr) { |
| 36 | return arena_salloc(tsdn, ptr); |
| 37 | } |
| 38 | |
| 39 | void |
| 40 | tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { |
| 41 | szind_t binind = tcache->next_gc_bin; |
| 42 | |
| 43 | cache_bin_t *tbin; |
| 44 | if (binind < SC_NBINS) { |
| 45 | tbin = tcache_small_bin_get(tcache, binind); |
| 46 | } else { |
| 47 | tbin = tcache_large_bin_get(tcache, binind); |
| 48 | } |
| 49 | if (tbin->low_water > 0) { |
| 50 | /* |
| 51 | * Flush (ceiling) 3/4 of the objects below the low water mark. |
| 52 | */ |
| 53 | if (binind < SC_NBINS) { |
| 54 | tcache_bin_flush_small(tsd, tcache, tbin, binind, |
| 55 | tbin->ncached - tbin->low_water + (tbin->low_water |
| 56 | >> 2)); |
| 57 | /* |
| 58 | * Reduce fill count by 2X. Limit lg_fill_div such that |
| 59 | * the fill count is always at least 1. |
| 60 | */ |
| 61 | cache_bin_info_t *tbin_info = &tcache_bin_info[binind]; |
| 62 | if ((tbin_info->ncached_max >> |
| 63 | (tcache->lg_fill_div[binind] + 1)) >= 1) { |
| 64 | tcache->lg_fill_div[binind]++; |
| 65 | } |
| 66 | } else { |
| 67 | tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached |
| 68 | - tbin->low_water + (tbin->low_water >> 2), tcache); |
| 69 | } |
| 70 | } else if (tbin->low_water < 0) { |
| 71 | /* |
| 72 | * Increase fill count by 2X for small bins. Make sure |
| 73 | * lg_fill_div stays greater than 0. |
| 74 | */ |
| 75 | if (binind < SC_NBINS && tcache->lg_fill_div[binind] > 1) { |
| 76 | tcache->lg_fill_div[binind]--; |
| 77 | } |
| 78 | } |
| 79 | tbin->low_water = tbin->ncached; |
| 80 | |
| 81 | tcache->next_gc_bin++; |
| 82 | if (tcache->next_gc_bin == nhbins) { |
| 83 | tcache->next_gc_bin = 0; |
| 84 | } |
| 85 | } |
| 86 | |
| 87 | void * |
| 88 | tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, |
| 89 | cache_bin_t *tbin, szind_t binind, bool *tcache_success) { |
| 90 | void *ret; |
| 91 | |
| 92 | assert(tcache->arena != NULL); |
| 93 | arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind, |
| 94 | config_prof ? tcache->prof_accumbytes : 0); |
| 95 | if (config_prof) { |
| 96 | tcache->prof_accumbytes = 0; |
| 97 | } |
| 98 | ret = cache_bin_alloc_easy(tbin, tcache_success); |
| 99 | |
| 100 | return ret; |
| 101 | } |
| 102 | |
| 103 | void |
| 104 | tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, |
| 105 | szind_t binind, unsigned rem) { |
| 106 | bool merged_stats = false; |
| 107 | |
| 108 | assert(binind < SC_NBINS); |
| 109 | assert((cache_bin_sz_t)rem <= tbin->ncached); |
| 110 | |
| 111 | arena_t *arena = tcache->arena; |
| 112 | assert(arena != NULL); |
| 113 | unsigned nflush = tbin->ncached - rem; |
| 114 | VARIABLE_ARRAY(extent_t *, item_extent, nflush); |
| 115 | /* Look up extent once per item. */ |
| 116 | for (unsigned i = 0 ; i < nflush; i++) { |
| 117 | item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); |
| 118 | } |
| 119 | |
| 120 | while (nflush > 0) { |
| 121 | /* Lock the arena bin associated with the first object. */ |
| 122 | extent_t *extent = item_extent[0]; |
| 123 | arena_t *bin_arena = extent_arena_get(extent); |
| 124 | bin_t *bin = &bin_arena->bins[binind]; |
| 125 | |
| 126 | if (config_prof && bin_arena == arena) { |
| 127 | if (arena_prof_accum(tsd_tsdn(tsd), arena, |
| 128 | tcache->prof_accumbytes)) { |
| 129 | prof_idump(tsd_tsdn(tsd)); |
| 130 | } |
| 131 | tcache->prof_accumbytes = 0; |
| 132 | } |
| 133 | |
| 134 | malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); |
| 135 | if (config_stats && bin_arena == arena) { |
| 136 | assert(!merged_stats); |
| 137 | merged_stats = true; |
| 138 | bin->stats.nflushes++; |
| 139 | bin->stats.nrequests += tbin->tstats.nrequests; |
| 140 | tbin->tstats.nrequests = 0; |
| 141 | } |
| 142 | unsigned ndeferred = 0; |
| 143 | for (unsigned i = 0; i < nflush; i++) { |
| 144 | void *ptr = *(tbin->avail - 1 - i); |
| 145 | extent = item_extent[i]; |
| 146 | assert(ptr != NULL && extent != NULL); |
| 147 | |
| 148 | if (extent_arena_get(extent) == bin_arena) { |
| 149 | arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), |
| 150 | bin_arena, extent, ptr); |
| 151 | } else { |
| 152 | /* |
| 153 | * This object was allocated via a different |
| 154 | * arena bin than the one that is currently |
| 155 | * locked. Stash the object, so that it can be |
| 156 | * handled in a future pass. |
| 157 | */ |
| 158 | *(tbin->avail - 1 - ndeferred) = ptr; |
| 159 | item_extent[ndeferred] = extent; |
| 160 | ndeferred++; |
| 161 | } |
| 162 | } |
| 163 | malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); |
| 164 | arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); |
| 165 | nflush = ndeferred; |
| 166 | } |
| 167 | if (config_stats && !merged_stats) { |
| 168 | /* |
| 169 | * The flush loop didn't happen to flush to this thread's |
| 170 | * arena, so the stats didn't get merged. Manually do so now. |
| 171 | */ |
| 172 | bin_t *bin = &arena->bins[binind]; |
| 173 | malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); |
| 174 | bin->stats.nflushes++; |
| 175 | bin->stats.nrequests += tbin->tstats.nrequests; |
| 176 | tbin->tstats.nrequests = 0; |
| 177 | malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); |
| 178 | } |
| 179 | |
| 180 | memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * |
| 181 | sizeof(void *)); |
| 182 | tbin->ncached = rem; |
| 183 | if (tbin->ncached < tbin->low_water) { |
| 184 | tbin->low_water = tbin->ncached; |
| 185 | } |
| 186 | } |
| 187 | |
| 188 | void |
| 189 | tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, |
| 190 | unsigned rem, tcache_t *tcache) { |
| 191 | bool merged_stats = false; |
| 192 | |
| 193 | assert(binind < nhbins); |
| 194 | assert((cache_bin_sz_t)rem <= tbin->ncached); |
| 195 | |
| 196 | arena_t *tcache_arena = tcache->arena; |
| 197 | assert(tcache_arena != NULL); |
| 198 | unsigned nflush = tbin->ncached - rem; |
| 199 | VARIABLE_ARRAY(extent_t *, item_extent, nflush); |
| 200 | /* Look up extent once per item. */ |
| 201 | for (unsigned i = 0 ; i < nflush; i++) { |
| 202 | item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); |
| 203 | } |
| 204 | |
| 205 | while (nflush > 0) { |
| 206 | /* Lock the arena associated with the first object. */ |
| 207 | extent_t *extent = item_extent[0]; |
| 208 | arena_t *locked_arena = extent_arena_get(extent); |
| 209 | bool idump; |
| 210 | |
| 211 | if (config_prof) { |
| 212 | idump = false; |
| 213 | } |
| 214 | |
| 215 | bool lock_large = !arena_is_auto(locked_arena); |
| 216 | if (lock_large) { |
| 217 | malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx); |
| 218 | } |
| 219 | for (unsigned i = 0; i < nflush; i++) { |
| 220 | void *ptr = *(tbin->avail - 1 - i); |
| 221 | assert(ptr != NULL); |
| 222 | extent = item_extent[i]; |
| 223 | if (extent_arena_get(extent) == locked_arena) { |
| 224 | large_dalloc_prep_junked_locked(tsd_tsdn(tsd), |
| 225 | extent); |
| 226 | } |
| 227 | } |
| 228 | if ((config_prof || config_stats) && |
| 229 | (locked_arena == tcache_arena)) { |
| 230 | if (config_prof) { |
| 231 | idump = arena_prof_accum(tsd_tsdn(tsd), |
| 232 | tcache_arena, tcache->prof_accumbytes); |
| 233 | tcache->prof_accumbytes = 0; |
| 234 | } |
| 235 | if (config_stats) { |
| 236 | merged_stats = true; |
| 237 | arena_stats_large_nrequests_add(tsd_tsdn(tsd), |
| 238 | &tcache_arena->stats, binind, |
| 239 | tbin->tstats.nrequests); |
| 240 | tbin->tstats.nrequests = 0; |
| 241 | } |
| 242 | } |
| 243 | if (lock_large) { |
| 244 | malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx); |
| 245 | } |
| 246 | |
| 247 | unsigned ndeferred = 0; |
| 248 | for (unsigned i = 0; i < nflush; i++) { |
| 249 | void *ptr = *(tbin->avail - 1 - i); |
| 250 | extent = item_extent[i]; |
| 251 | assert(ptr != NULL && extent != NULL); |
| 252 | |
| 253 | if (extent_arena_get(extent) == locked_arena) { |
| 254 | large_dalloc_finish(tsd_tsdn(tsd), extent); |
| 255 | } else { |
| 256 | /* |
| 257 | * This object was allocated via a different |
| 258 | * arena than the one that is currently locked. |
| 259 | * Stash the object, so that it can be handled |
| 260 | * in a future pass. |
| 261 | */ |
| 262 | *(tbin->avail - 1 - ndeferred) = ptr; |
| 263 | item_extent[ndeferred] = extent; |
| 264 | ndeferred++; |
| 265 | } |
| 266 | } |
| 267 | if (config_prof && idump) { |
| 268 | prof_idump(tsd_tsdn(tsd)); |
| 269 | } |
| 270 | arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - |
| 271 | ndeferred); |
| 272 | nflush = ndeferred; |
| 273 | } |
| 274 | if (config_stats && !merged_stats) { |
| 275 | /* |
| 276 | * The flush loop didn't happen to flush to this thread's |
| 277 | * arena, so the stats didn't get merged. Manually do so now. |
| 278 | */ |
| 279 | arena_stats_large_nrequests_add(tsd_tsdn(tsd), |
| 280 | &tcache_arena->stats, binind, tbin->tstats.nrequests); |
| 281 | tbin->tstats.nrequests = 0; |
| 282 | } |
| 283 | |
| 284 | memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * |
| 285 | sizeof(void *)); |
| 286 | tbin->ncached = rem; |
| 287 | if (tbin->ncached < tbin->low_water) { |
| 288 | tbin->low_water = tbin->ncached; |
| 289 | } |
| 290 | } |
| 291 | |
| 292 | void |
| 293 | tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { |
| 294 | assert(tcache->arena == NULL); |
| 295 | tcache->arena = arena; |
| 296 | |
| 297 | if (config_stats) { |
| 298 | /* Link into list of extant tcaches. */ |
| 299 | malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); |
| 300 | |
| 301 | ql_elm_new(tcache, link); |
| 302 | ql_tail_insert(&arena->tcache_ql, tcache, link); |
| 303 | cache_bin_array_descriptor_init( |
| 304 | &tcache->cache_bin_array_descriptor, tcache->bins_small, |
| 305 | tcache->bins_large); |
| 306 | ql_tail_insert(&arena->cache_bin_array_descriptor_ql, |
| 307 | &tcache->cache_bin_array_descriptor, link); |
| 308 | |
| 309 | malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); |
| 310 | } |
| 311 | } |
| 312 | |
| 313 | static void |
| 314 | tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) { |
| 315 | arena_t *arena = tcache->arena; |
| 316 | assert(arena != NULL); |
| 317 | if (config_stats) { |
| 318 | /* Unlink from list of extant tcaches. */ |
| 319 | malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); |
| 320 | if (config_debug) { |
| 321 | bool in_ql = false; |
| 322 | tcache_t *iter; |
| 323 | ql_foreach(iter, &arena->tcache_ql, link) { |
| 324 | if (iter == tcache) { |
| 325 | in_ql = true; |
| 326 | break; |
| 327 | } |
| 328 | } |
| 329 | assert(in_ql); |
| 330 | } |
| 331 | ql_remove(&arena->tcache_ql, tcache, link); |
| 332 | ql_remove(&arena->cache_bin_array_descriptor_ql, |
| 333 | &tcache->cache_bin_array_descriptor, link); |
| 334 | tcache_stats_merge(tsdn, tcache, arena); |
| 335 | malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); |
| 336 | } |
| 337 | tcache->arena = NULL; |
| 338 | } |
| 339 | |
| 340 | void |
| 341 | tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { |
| 342 | tcache_arena_dissociate(tsdn, tcache); |
| 343 | tcache_arena_associate(tsdn, tcache, arena); |
| 344 | } |
| 345 | |
| 346 | bool |
| 347 | tsd_tcache_enabled_data_init(tsd_t *tsd) { |
| 348 | /* Called upon tsd initialization. */ |
| 349 | tsd_tcache_enabled_set(tsd, opt_tcache); |
| 350 | tsd_slow_update(tsd); |
| 351 | |
| 352 | if (opt_tcache) { |
| 353 | /* Trigger tcache init. */ |
| 354 | tsd_tcache_data_init(tsd); |
| 355 | } |
| 356 | |
| 357 | return false; |
| 358 | } |
| 359 | |
| 360 | /* Initialize auto tcache (embedded in TSD). */ |
| 361 | static void |
| 362 | tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) { |
| 363 | memset(&tcache->link, 0, sizeof(ql_elm(tcache_t))); |
| 364 | tcache->prof_accumbytes = 0; |
| 365 | tcache->next_gc_bin = 0; |
| 366 | tcache->arena = NULL; |
| 367 | |
| 368 | ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); |
| 369 | |
| 370 | size_t stack_offset = 0; |
| 371 | assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); |
| 372 | memset(tcache->bins_small, 0, sizeof(cache_bin_t) * SC_NBINS); |
| 373 | memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - SC_NBINS)); |
| 374 | unsigned i = 0; |
| 375 | for (; i < SC_NBINS; i++) { |
| 376 | tcache->lg_fill_div[i] = 1; |
| 377 | stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); |
| 378 | /* |
| 379 | * avail points past the available space. Allocations will |
| 380 | * access the slots toward higher addresses (for the benefit of |
| 381 | * prefetch). |
| 382 | */ |
| 383 | tcache_small_bin_get(tcache, i)->avail = |
| 384 | (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); |
| 385 | } |
| 386 | for (; i < nhbins; i++) { |
| 387 | stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); |
| 388 | tcache_large_bin_get(tcache, i)->avail = |
| 389 | (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); |
| 390 | } |
| 391 | assert(stack_offset == stack_nelms * sizeof(void *)); |
| 392 | } |
| 393 | |
| 394 | /* Initialize auto tcache (embedded in TSD). */ |
| 395 | bool |
| 396 | tsd_tcache_data_init(tsd_t *tsd) { |
| 397 | tcache_t *tcache = tsd_tcachep_get_unsafe(tsd); |
| 398 | assert(tcache_small_bin_get(tcache, 0)->avail == NULL); |
| 399 | size_t size = stack_nelms * sizeof(void *); |
| 400 | /* Avoid false cacheline sharing. */ |
| 401 | size = sz_sa2u(size, CACHELINE); |
| 402 | |
| 403 | void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, |
| 404 | NULL, true, arena_get(TSDN_NULL, 0, true)); |
| 405 | if (avail_array == NULL) { |
| 406 | return true; |
| 407 | } |
| 408 | |
| 409 | tcache_init(tsd, tcache, avail_array); |
| 410 | /* |
| 411 | * Initialization is a bit tricky here. After malloc init is done, all |
| 412 | * threads can rely on arena_choose and associate tcache accordingly. |
| 413 | * However, the thread that does actual malloc bootstrapping relies on |
| 414 | * functional tsd, and it can only rely on a0. In that case, we |
| 415 | * associate its tcache to a0 temporarily, and later on |
| 416 | * arena_choose_hard() will re-associate properly. |
| 417 | */ |
| 418 | tcache->arena = NULL; |
| 419 | arena_t *arena; |
| 420 | if (!malloc_initialized()) { |
| 421 | /* If in initialization, assign to a0. */ |
| 422 | arena = arena_get(tsd_tsdn(tsd), 0, false); |
| 423 | tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); |
| 424 | } else { |
| 425 | arena = arena_choose(tsd, NULL); |
| 426 | /* This may happen if thread.tcache.enabled is used. */ |
| 427 | if (tcache->arena == NULL) { |
| 428 | tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); |
| 429 | } |
| 430 | } |
| 431 | assert(arena == tcache->arena); |
| 432 | |
| 433 | return false; |
| 434 | } |
| 435 | |
| 436 | /* Created manual tcache for tcache.create mallctl. */ |
| 437 | tcache_t * |
| 438 | tcache_create_explicit(tsd_t *tsd) { |
| 439 | tcache_t *tcache; |
| 440 | size_t size, stack_offset; |
| 441 | |
| 442 | size = sizeof(tcache_t); |
| 443 | /* Naturally align the pointer stacks. */ |
| 444 | size = PTR_CEILING(size); |
| 445 | stack_offset = size; |
| 446 | size += stack_nelms * sizeof(void *); |
| 447 | /* Avoid false cacheline sharing. */ |
| 448 | size = sz_sa2u(size, CACHELINE); |
| 449 | |
| 450 | tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true, |
| 451 | arena_get(TSDN_NULL, 0, true)); |
| 452 | if (tcache == NULL) { |
| 453 | return NULL; |
| 454 | } |
| 455 | |
| 456 | tcache_init(tsd, tcache, |
| 457 | (void *)((uintptr_t)tcache + (uintptr_t)stack_offset)); |
| 458 | tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL)); |
| 459 | |
| 460 | return tcache; |
| 461 | } |
| 462 | |
| 463 | static void |
| 464 | tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) { |
| 465 | assert(tcache->arena != NULL); |
| 466 | |
| 467 | for (unsigned i = 0; i < SC_NBINS; i++) { |
| 468 | cache_bin_t *tbin = tcache_small_bin_get(tcache, i); |
| 469 | tcache_bin_flush_small(tsd, tcache, tbin, i, 0); |
| 470 | |
| 471 | if (config_stats) { |
| 472 | assert(tbin->tstats.nrequests == 0); |
| 473 | } |
| 474 | } |
| 475 | for (unsigned i = SC_NBINS; i < nhbins; i++) { |
| 476 | cache_bin_t *tbin = tcache_large_bin_get(tcache, i); |
| 477 | tcache_bin_flush_large(tsd, tbin, i, 0, tcache); |
| 478 | |
| 479 | if (config_stats) { |
| 480 | assert(tbin->tstats.nrequests == 0); |
| 481 | } |
| 482 | } |
| 483 | |
| 484 | if (config_prof && tcache->prof_accumbytes > 0 && |
| 485 | arena_prof_accum(tsd_tsdn(tsd), tcache->arena, |
| 486 | tcache->prof_accumbytes)) { |
| 487 | prof_idump(tsd_tsdn(tsd)); |
| 488 | } |
| 489 | } |
| 490 | |
| 491 | void |
| 492 | tcache_flush(tsd_t *tsd) { |
| 493 | assert(tcache_available(tsd)); |
| 494 | tcache_flush_cache(tsd, tsd_tcachep_get(tsd)); |
| 495 | } |
| 496 | |
| 497 | static void |
| 498 | tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { |
| 499 | tcache_flush_cache(tsd, tcache); |
| 500 | arena_t *arena = tcache->arena; |
| 501 | tcache_arena_dissociate(tsd_tsdn(tsd), tcache); |
| 502 | |
| 503 | if (tsd_tcache) { |
| 504 | /* Release the avail array for the TSD embedded auto tcache. */ |
| 505 | void *avail_array = |
| 506 | (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail - |
| 507 | (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *)); |
| 508 | idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true); |
| 509 | } else { |
| 510 | /* Release both the tcache struct and avail array. */ |
| 511 | idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true); |
| 512 | } |
| 513 | |
| 514 | /* |
| 515 | * The deallocation and tcache flush above may not trigger decay since |
| 516 | * we are on the tcache shutdown path (potentially with non-nominal |
| 517 | * tsd). Manually trigger decay to avoid pathological cases. Also |
| 518 | * include arena 0 because the tcache array is allocated from it. |
| 519 | */ |
| 520 | arena_decay(tsd_tsdn(tsd), arena_get(tsd_tsdn(tsd), 0, false), |
| 521 | false, false); |
| 522 | |
| 523 | unsigned nthreads = arena_nthreads_get(arena, false); |
| 524 | if (nthreads == 0) { |
| 525 | /* Force purging when no threads assigned to the arena anymore. */ |
| 526 | arena_decay(tsd_tsdn(tsd), arena, false, true); |
| 527 | } else { |
| 528 | arena_decay(tsd_tsdn(tsd), arena, false, false); |
| 529 | } |
| 530 | } |
| 531 | |
| 532 | /* For auto tcache (embedded in TSD) only. */ |
| 533 | void |
| 534 | tcache_cleanup(tsd_t *tsd) { |
| 535 | tcache_t *tcache = tsd_tcachep_get(tsd); |
| 536 | if (!tcache_available(tsd)) { |
| 537 | assert(tsd_tcache_enabled_get(tsd) == false); |
| 538 | if (config_debug) { |
| 539 | assert(tcache_small_bin_get(tcache, 0)->avail == NULL); |
| 540 | } |
| 541 | return; |
| 542 | } |
| 543 | assert(tsd_tcache_enabled_get(tsd)); |
| 544 | assert(tcache_small_bin_get(tcache, 0)->avail != NULL); |
| 545 | |
| 546 | tcache_destroy(tsd, tcache, true); |
| 547 | if (config_debug) { |
| 548 | tcache_small_bin_get(tcache, 0)->avail = NULL; |
| 549 | } |
| 550 | } |
| 551 | |
| 552 | void |
| 553 | tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { |
| 554 | unsigned i; |
| 555 | |
| 556 | cassert(config_stats); |
| 557 | |
| 558 | /* Merge and reset tcache stats. */ |
| 559 | for (i = 0; i < SC_NBINS; i++) { |
| 560 | bin_t *bin = &arena->bins[i]; |
| 561 | cache_bin_t *tbin = tcache_small_bin_get(tcache, i); |
| 562 | malloc_mutex_lock(tsdn, &bin->lock); |
| 563 | bin->stats.nrequests += tbin->tstats.nrequests; |
| 564 | malloc_mutex_unlock(tsdn, &bin->lock); |
| 565 | tbin->tstats.nrequests = 0; |
| 566 | } |
| 567 | |
| 568 | for (; i < nhbins; i++) { |
| 569 | cache_bin_t *tbin = tcache_large_bin_get(tcache, i); |
| 570 | arena_stats_large_nrequests_add(tsdn, &arena->stats, i, |
| 571 | tbin->tstats.nrequests); |
| 572 | tbin->tstats.nrequests = 0; |
| 573 | } |
| 574 | } |
| 575 | |
| 576 | static bool |
| 577 | tcaches_create_prep(tsd_t *tsd) { |
| 578 | bool err; |
| 579 | |
| 580 | malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); |
| 581 | |
| 582 | if (tcaches == NULL) { |
| 583 | tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *) |
| 584 | * (MALLOCX_TCACHE_MAX+1), CACHELINE); |
| 585 | if (tcaches == NULL) { |
| 586 | err = true; |
| 587 | goto label_return; |
| 588 | } |
| 589 | } |
| 590 | |
| 591 | if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) { |
| 592 | err = true; |
| 593 | goto label_return; |
| 594 | } |
| 595 | |
| 596 | err = false; |
| 597 | label_return: |
| 598 | malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); |
| 599 | return err; |
| 600 | } |
| 601 | |
| 602 | bool |
| 603 | tcaches_create(tsd_t *tsd, unsigned *r_ind) { |
| 604 | witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); |
| 605 | |
| 606 | bool err; |
| 607 | |
| 608 | if (tcaches_create_prep(tsd)) { |
| 609 | err = true; |
| 610 | goto label_return; |
| 611 | } |
| 612 | |
| 613 | tcache_t *tcache = tcache_create_explicit(tsd); |
| 614 | if (tcache == NULL) { |
| 615 | err = true; |
| 616 | goto label_return; |
| 617 | } |
| 618 | |
| 619 | tcaches_t *elm; |
| 620 | malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); |
| 621 | if (tcaches_avail != NULL) { |
| 622 | elm = tcaches_avail; |
| 623 | tcaches_avail = tcaches_avail->next; |
| 624 | elm->tcache = tcache; |
| 625 | *r_ind = (unsigned)(elm - tcaches); |
| 626 | } else { |
| 627 | elm = &tcaches[tcaches_past]; |
| 628 | elm->tcache = tcache; |
| 629 | *r_ind = tcaches_past; |
| 630 | tcaches_past++; |
| 631 | } |
| 632 | malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); |
| 633 | |
| 634 | err = false; |
| 635 | label_return: |
| 636 | witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); |
| 637 | return err; |
| 638 | } |
| 639 | |
| 640 | static tcache_t * |
| 641 | tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm) { |
| 642 | malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx); |
| 643 | |
| 644 | if (elm->tcache == NULL) { |
| 645 | return NULL; |
| 646 | } |
| 647 | tcache_t *tcache = elm->tcache; |
| 648 | elm->tcache = NULL; |
| 649 | return tcache; |
| 650 | } |
| 651 | |
| 652 | void |
| 653 | tcaches_flush(tsd_t *tsd, unsigned ind) { |
| 654 | malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); |
| 655 | tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind]); |
| 656 | malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); |
| 657 | if (tcache != NULL) { |
| 658 | tcache_flush_cache(tsd, tcache); |
| 659 | } |
| 660 | } |
| 661 | |
| 662 | void |
| 663 | tcaches_destroy(tsd_t *tsd, unsigned ind) { |
| 664 | malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); |
| 665 | tcaches_t *elm = &tcaches[ind]; |
| 666 | tcache_t *tcache = tcaches_elm_remove(tsd, elm); |
| 667 | elm->next = tcaches_avail; |
| 668 | tcaches_avail = elm; |
| 669 | malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); |
| 670 | if (tcache != NULL) { |
| 671 | tcache_destroy(tsd, tcache, false); |
| 672 | } |
| 673 | } |
| 674 | |
| 675 | bool |
| 676 | tcache_boot(tsdn_t *tsdn) { |
| 677 | /* If necessary, clamp opt_lg_tcache_max. */ |
| 678 | if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < |
| 679 | SC_SMALL_MAXCLASS) { |
| 680 | tcache_maxclass = SC_SMALL_MAXCLASS; |
| 681 | } else { |
| 682 | tcache_maxclass = (ZU(1) << opt_lg_tcache_max); |
| 683 | } |
| 684 | |
| 685 | if (malloc_mutex_init(&tcaches_mtx, "tcaches" , WITNESS_RANK_TCACHES, |
| 686 | malloc_mutex_rank_exclusive)) { |
| 687 | return true; |
| 688 | } |
| 689 | |
| 690 | nhbins = sz_size2index(tcache_maxclass) + 1; |
| 691 | |
| 692 | /* Initialize tcache_bin_info. */ |
| 693 | tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins |
| 694 | * sizeof(cache_bin_info_t), CACHELINE); |
| 695 | if (tcache_bin_info == NULL) { |
| 696 | return true; |
| 697 | } |
| 698 | stack_nelms = 0; |
| 699 | unsigned i; |
| 700 | for (i = 0; i < SC_NBINS; i++) { |
| 701 | if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { |
| 702 | tcache_bin_info[i].ncached_max = |
| 703 | TCACHE_NSLOTS_SMALL_MIN; |
| 704 | } else if ((bin_infos[i].nregs << 1) <= |
| 705 | TCACHE_NSLOTS_SMALL_MAX) { |
| 706 | tcache_bin_info[i].ncached_max = |
| 707 | (bin_infos[i].nregs << 1); |
| 708 | } else { |
| 709 | tcache_bin_info[i].ncached_max = |
| 710 | TCACHE_NSLOTS_SMALL_MAX; |
| 711 | } |
| 712 | stack_nelms += tcache_bin_info[i].ncached_max; |
| 713 | } |
| 714 | for (; i < nhbins; i++) { |
| 715 | tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; |
| 716 | stack_nelms += tcache_bin_info[i].ncached_max; |
| 717 | } |
| 718 | |
| 719 | return false; |
| 720 | } |
| 721 | |
| 722 | void |
| 723 | tcache_prefork(tsdn_t *tsdn) { |
| 724 | if (!config_prof && opt_tcache) { |
| 725 | malloc_mutex_prefork(tsdn, &tcaches_mtx); |
| 726 | } |
| 727 | } |
| 728 | |
| 729 | void |
| 730 | tcache_postfork_parent(tsdn_t *tsdn) { |
| 731 | if (!config_prof && opt_tcache) { |
| 732 | malloc_mutex_postfork_parent(tsdn, &tcaches_mtx); |
| 733 | } |
| 734 | } |
| 735 | |
| 736 | void |
| 737 | tcache_postfork_child(tsdn_t *tsdn) { |
| 738 | if (!config_prof && opt_tcache) { |
| 739 | malloc_mutex_postfork_child(tsdn, &tcaches_mtx); |
| 740 | } |
| 741 | } |
| 742 | |