1 | #define JEMALLOC_C_ |
2 | #include "jemalloc/internal/jemalloc_internal.h" |
3 | |
4 | /******************************************************************************/ |
5 | /* Data. */ |
6 | |
7 | /* Runtime configuration options. */ |
8 | const char *je_malloc_conf JEMALLOC_ATTR(weak); |
9 | bool opt_abort = |
10 | #ifdef JEMALLOC_DEBUG |
11 | true |
12 | #else |
13 | false |
14 | #endif |
15 | ; |
16 | const char *opt_junk = |
17 | #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) |
18 | "true" |
19 | #else |
20 | "false" |
21 | #endif |
22 | ; |
23 | bool opt_junk_alloc = |
24 | #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) |
25 | true |
26 | #else |
27 | false |
28 | #endif |
29 | ; |
30 | bool opt_junk_free = |
31 | #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) |
32 | true |
33 | #else |
34 | false |
35 | #endif |
36 | ; |
37 | |
38 | size_t opt_quarantine = ZU(0); |
39 | bool opt_redzone = false; |
40 | bool opt_utrace = false; |
41 | bool opt_xmalloc = false; |
42 | bool opt_zero = false; |
43 | unsigned opt_narenas = 0; |
44 | |
45 | /* Initialized to true if the process is running inside Valgrind. */ |
46 | bool in_valgrind; |
47 | |
48 | unsigned ncpus; |
49 | |
50 | /* Protects arenas initialization. */ |
51 | static malloc_mutex_t arenas_lock; |
52 | /* |
53 | * Arenas that are used to service external requests. Not all elements of the |
54 | * arenas array are necessarily used; arenas are created lazily as needed. |
55 | * |
56 | * arenas[0..narenas_auto) are used for automatic multiplexing of threads and |
57 | * arenas. arenas[narenas_auto..narenas_total) are only used if the application |
58 | * takes some action to create them and allocate from them. |
59 | */ |
60 | arena_t **arenas; |
61 | static unsigned narenas_total; /* Use narenas_total_*(). */ |
62 | static arena_t *a0; /* arenas[0]; read-only after initialization. */ |
63 | unsigned narenas_auto; /* Read-only after initialization. */ |
64 | |
65 | typedef enum { |
66 | malloc_init_uninitialized = 3, |
67 | malloc_init_a0_initialized = 2, |
68 | malloc_init_recursible = 1, |
69 | malloc_init_initialized = 0 /* Common case --> jnz. */ |
70 | } malloc_init_t; |
71 | static malloc_init_t malloc_init_state = malloc_init_uninitialized; |
72 | |
73 | /* False should be the common case. Set to true to trigger initialization. */ |
74 | static bool malloc_slow = true; |
75 | |
76 | /* When malloc_slow is true, set the corresponding bits for sanity check. */ |
77 | enum { |
78 | flag_opt_junk_alloc = (1U), |
79 | flag_opt_junk_free = (1U << 1), |
80 | flag_opt_quarantine = (1U << 2), |
81 | flag_opt_zero = (1U << 3), |
82 | flag_opt_utrace = (1U << 4), |
83 | flag_in_valgrind = (1U << 5), |
84 | flag_opt_xmalloc = (1U << 6) |
85 | }; |
86 | static uint8_t malloc_slow_flags; |
87 | |
88 | /* Last entry for overflow detection only. */ |
89 | JEMALLOC_ALIGNED(CACHELINE) |
90 | const size_t index2size_tab[NSIZES+1] = { |
91 | #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ |
92 | ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)), |
93 | SIZE_CLASSES |
94 | #undef SC |
95 | ZU(0) |
96 | }; |
97 | |
98 | JEMALLOC_ALIGNED(CACHELINE) |
99 | const uint8_t size2index_tab[] = { |
100 | #if LG_TINY_MIN == 0 |
101 | #warning "Dangerous LG_TINY_MIN" |
102 | #define S2B_0(i) i, |
103 | #elif LG_TINY_MIN == 1 |
104 | #warning "Dangerous LG_TINY_MIN" |
105 | #define S2B_1(i) i, |
106 | #elif LG_TINY_MIN == 2 |
107 | #warning "Dangerous LG_TINY_MIN" |
108 | #define S2B_2(i) i, |
109 | #elif LG_TINY_MIN == 3 |
110 | #define S2B_3(i) i, |
111 | #elif LG_TINY_MIN == 4 |
112 | #define S2B_4(i) i, |
113 | #elif LG_TINY_MIN == 5 |
114 | #define S2B_5(i) i, |
115 | #elif LG_TINY_MIN == 6 |
116 | #define S2B_6(i) i, |
117 | #elif LG_TINY_MIN == 7 |
118 | #define S2B_7(i) i, |
119 | #elif LG_TINY_MIN == 8 |
120 | #define S2B_8(i) i, |
121 | #elif LG_TINY_MIN == 9 |
122 | #define S2B_9(i) i, |
123 | #elif LG_TINY_MIN == 10 |
124 | #define S2B_10(i) i, |
125 | #elif LG_TINY_MIN == 11 |
126 | #define S2B_11(i) i, |
127 | #else |
128 | #error "Unsupported LG_TINY_MIN" |
129 | #endif |
130 | #if LG_TINY_MIN < 1 |
131 | #define S2B_1(i) S2B_0(i) S2B_0(i) |
132 | #endif |
133 | #if LG_TINY_MIN < 2 |
134 | #define S2B_2(i) S2B_1(i) S2B_1(i) |
135 | #endif |
136 | #if LG_TINY_MIN < 3 |
137 | #define S2B_3(i) S2B_2(i) S2B_2(i) |
138 | #endif |
139 | #if LG_TINY_MIN < 4 |
140 | #define S2B_4(i) S2B_3(i) S2B_3(i) |
141 | #endif |
142 | #if LG_TINY_MIN < 5 |
143 | #define S2B_5(i) S2B_4(i) S2B_4(i) |
144 | #endif |
145 | #if LG_TINY_MIN < 6 |
146 | #define S2B_6(i) S2B_5(i) S2B_5(i) |
147 | #endif |
148 | #if LG_TINY_MIN < 7 |
149 | #define S2B_7(i) S2B_6(i) S2B_6(i) |
150 | #endif |
151 | #if LG_TINY_MIN < 8 |
152 | #define S2B_8(i) S2B_7(i) S2B_7(i) |
153 | #endif |
154 | #if LG_TINY_MIN < 9 |
155 | #define S2B_9(i) S2B_8(i) S2B_8(i) |
156 | #endif |
157 | #if LG_TINY_MIN < 10 |
158 | #define S2B_10(i) S2B_9(i) S2B_9(i) |
159 | #endif |
160 | #if LG_TINY_MIN < 11 |
161 | #define S2B_11(i) S2B_10(i) S2B_10(i) |
162 | #endif |
163 | #define S2B_no(i) |
164 | #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ |
165 | S2B_##lg_delta_lookup(index) |
166 | SIZE_CLASSES |
167 | #undef S2B_3 |
168 | #undef S2B_4 |
169 | #undef S2B_5 |
170 | #undef S2B_6 |
171 | #undef S2B_7 |
172 | #undef S2B_8 |
173 | #undef S2B_9 |
174 | #undef S2B_10 |
175 | #undef S2B_11 |
176 | #undef S2B_no |
177 | #undef SC |
178 | }; |
179 | |
180 | #ifdef JEMALLOC_THREADED_INIT |
181 | /* Used to let the initializing thread recursively allocate. */ |
182 | # define NO_INITIALIZER ((unsigned long)0) |
183 | # define INITIALIZER pthread_self() |
184 | # define IS_INITIALIZER (malloc_initializer == pthread_self()) |
185 | static pthread_t malloc_initializer = NO_INITIALIZER; |
186 | #else |
187 | # define NO_INITIALIZER false |
188 | # define INITIALIZER true |
189 | # define IS_INITIALIZER malloc_initializer |
190 | static bool malloc_initializer = NO_INITIALIZER; |
191 | #endif |
192 | |
193 | /* Used to avoid initialization races. */ |
194 | #ifdef _WIN32 |
195 | #if _WIN32_WINNT >= 0x0600 |
196 | static malloc_mutex_t init_lock = SRWLOCK_INIT; |
197 | #else |
198 | static malloc_mutex_t init_lock; |
199 | static bool init_lock_initialized = false; |
200 | |
201 | JEMALLOC_ATTR(constructor) |
202 | static void WINAPI |
203 | _init_init_lock(void) |
204 | { |
205 | |
206 | /* If another constructor in the same binary is using mallctl to |
207 | * e.g. setup chunk hooks, it may end up running before this one, |
208 | * and malloc_init_hard will crash trying to lock the uninitialized |
209 | * lock. So we force an initialization of the lock in |
210 | * malloc_init_hard as well. We don't try to care about atomicity |
211 | * of the accessed to the init_lock_initialized boolean, since it |
212 | * really only matters early in the process creation, before any |
213 | * separate thread normally starts doing anything. */ |
214 | if (!init_lock_initialized) |
215 | malloc_mutex_init(&init_lock, "init" , WITNESS_RANK_INIT); |
216 | init_lock_initialized = true; |
217 | } |
218 | |
219 | #ifdef _MSC_VER |
220 | # pragma section(".CRT$XCU", read) |
221 | JEMALLOC_SECTION(".CRT$XCU" ) JEMALLOC_ATTR(used) |
222 | static const void (WINAPI *init_init_lock)(void) = _init_init_lock; |
223 | #endif |
224 | #endif |
225 | #else |
226 | static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; |
227 | #endif |
228 | |
229 | typedef struct { |
230 | void *p; /* Input pointer (as in realloc(p, s)). */ |
231 | size_t s; /* Request size. */ |
232 | void *r; /* Result pointer. */ |
233 | } malloc_utrace_t; |
234 | |
235 | #ifdef JEMALLOC_UTRACE |
236 | # define UTRACE(a, b, c) do { \ |
237 | if (unlikely(opt_utrace)) { \ |
238 | int utrace_serrno = errno; \ |
239 | malloc_utrace_t ut; \ |
240 | ut.p = (a); \ |
241 | ut.s = (b); \ |
242 | ut.r = (c); \ |
243 | utrace(&ut, sizeof(ut)); \ |
244 | errno = utrace_serrno; \ |
245 | } \ |
246 | } while (0) |
247 | #else |
248 | # define UTRACE(a, b, c) |
249 | #endif |
250 | |
251 | /******************************************************************************/ |
252 | /* |
253 | * Function prototypes for static functions that are referenced prior to |
254 | * definition. |
255 | */ |
256 | |
257 | static bool malloc_init_hard_a0(void); |
258 | static bool malloc_init_hard(void); |
259 | |
260 | /******************************************************************************/ |
261 | /* |
262 | * Begin miscellaneous support functions. |
263 | */ |
264 | |
265 | JEMALLOC_ALWAYS_INLINE_C bool |
266 | malloc_initialized(void) |
267 | { |
268 | |
269 | return (malloc_init_state == malloc_init_initialized); |
270 | } |
271 | |
272 | JEMALLOC_ALWAYS_INLINE_C void |
273 | malloc_thread_init(void) |
274 | { |
275 | |
276 | /* |
277 | * TSD initialization can't be safely done as a side effect of |
278 | * deallocation, because it is possible for a thread to do nothing but |
279 | * deallocate its TLS data via free(), in which case writing to TLS |
280 | * would cause write-after-free memory corruption. The quarantine |
281 | * facility *only* gets used as a side effect of deallocation, so make |
282 | * a best effort attempt at initializing its TSD by hooking all |
283 | * allocation events. |
284 | */ |
285 | if (config_fill && unlikely(opt_quarantine)) |
286 | quarantine_alloc_hook(); |
287 | } |
288 | |
289 | JEMALLOC_ALWAYS_INLINE_C bool |
290 | malloc_init_a0(void) |
291 | { |
292 | |
293 | if (unlikely(malloc_init_state == malloc_init_uninitialized)) |
294 | return (malloc_init_hard_a0()); |
295 | return (false); |
296 | } |
297 | |
298 | JEMALLOC_ALWAYS_INLINE_C bool |
299 | malloc_init(void) |
300 | { |
301 | |
302 | if (unlikely(!malloc_initialized()) && malloc_init_hard()) |
303 | return (true); |
304 | malloc_thread_init(); |
305 | |
306 | return (false); |
307 | } |
308 | |
309 | /* |
310 | * The a0*() functions are used instead of i{d,}alloc() in situations that |
311 | * cannot tolerate TLS variable access. |
312 | */ |
313 | |
314 | static void * |
315 | a0ialloc(size_t size, bool zero, bool is_metadata) |
316 | { |
317 | |
318 | if (unlikely(malloc_init_a0())) |
319 | return (NULL); |
320 | |
321 | return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, |
322 | is_metadata, arena_get(TSDN_NULL, 0, true), true)); |
323 | } |
324 | |
325 | static void |
326 | a0idalloc(void *ptr, bool is_metadata) |
327 | { |
328 | |
329 | idalloctm(TSDN_NULL, ptr, false, is_metadata, true); |
330 | } |
331 | |
332 | void * |
333 | a0malloc(size_t size) |
334 | { |
335 | |
336 | return (a0ialloc(size, false, true)); |
337 | } |
338 | |
339 | void |
340 | a0dalloc(void *ptr) |
341 | { |
342 | |
343 | a0idalloc(ptr, true); |
344 | } |
345 | |
346 | /* |
347 | * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive |
348 | * situations that cannot tolerate TLS variable access (TLS allocation and very |
349 | * early internal data structure initialization). |
350 | */ |
351 | |
352 | void * |
353 | bootstrap_malloc(size_t size) |
354 | { |
355 | |
356 | if (unlikely(size == 0)) |
357 | size = 1; |
358 | |
359 | return (a0ialloc(size, false, false)); |
360 | } |
361 | |
362 | void * |
363 | bootstrap_calloc(size_t num, size_t size) |
364 | { |
365 | size_t num_size; |
366 | |
367 | num_size = num * size; |
368 | if (unlikely(num_size == 0)) { |
369 | assert(num == 0 || size == 0); |
370 | num_size = 1; |
371 | } |
372 | |
373 | return (a0ialloc(num_size, true, false)); |
374 | } |
375 | |
376 | void |
377 | bootstrap_free(void *ptr) |
378 | { |
379 | |
380 | if (unlikely(ptr == NULL)) |
381 | return; |
382 | |
383 | a0idalloc(ptr, false); |
384 | } |
385 | |
386 | static void |
387 | arena_set(unsigned ind, arena_t *arena) |
388 | { |
389 | |
390 | atomic_write_p((void **)&arenas[ind], arena); |
391 | } |
392 | |
393 | static void |
394 | narenas_total_set(unsigned narenas) |
395 | { |
396 | |
397 | atomic_write_u(&narenas_total, narenas); |
398 | } |
399 | |
400 | static void |
401 | narenas_total_inc(void) |
402 | { |
403 | |
404 | atomic_add_u(&narenas_total, 1); |
405 | } |
406 | |
407 | unsigned |
408 | narenas_total_get(void) |
409 | { |
410 | |
411 | return (atomic_read_u(&narenas_total)); |
412 | } |
413 | |
414 | /* Create a new arena and insert it into the arenas array at index ind. */ |
415 | static arena_t * |
416 | arena_init_locked(tsdn_t *tsdn, unsigned ind) |
417 | { |
418 | arena_t *arena; |
419 | |
420 | assert(ind <= narenas_total_get()); |
421 | if (ind > MALLOCX_ARENA_MAX) |
422 | return (NULL); |
423 | if (ind == narenas_total_get()) |
424 | narenas_total_inc(); |
425 | |
426 | /* |
427 | * Another thread may have already initialized arenas[ind] if it's an |
428 | * auto arena. |
429 | */ |
430 | arena = arena_get(tsdn, ind, false); |
431 | if (arena != NULL) { |
432 | assert(ind < narenas_auto); |
433 | return (arena); |
434 | } |
435 | |
436 | /* Actually initialize the arena. */ |
437 | arena = arena_new(tsdn, ind); |
438 | arena_set(ind, arena); |
439 | return (arena); |
440 | } |
441 | |
442 | arena_t * |
443 | arena_init(tsdn_t *tsdn, unsigned ind) |
444 | { |
445 | arena_t *arena; |
446 | |
447 | malloc_mutex_lock(tsdn, &arenas_lock); |
448 | arena = arena_init_locked(tsdn, ind); |
449 | malloc_mutex_unlock(tsdn, &arenas_lock); |
450 | return (arena); |
451 | } |
452 | |
453 | static void |
454 | arena_bind(tsd_t *tsd, unsigned ind, bool internal) |
455 | { |
456 | arena_t *arena; |
457 | |
458 | arena = arena_get(tsd_tsdn(tsd), ind, false); |
459 | arena_nthreads_inc(arena, internal); |
460 | |
461 | if (tsd_nominal(tsd)) { |
462 | if (internal) |
463 | tsd_iarena_set(tsd, arena); |
464 | else |
465 | tsd_arena_set(tsd, arena); |
466 | } |
467 | } |
468 | |
469 | void |
470 | arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) |
471 | { |
472 | arena_t *oldarena, *newarena; |
473 | |
474 | oldarena = arena_get(tsd_tsdn(tsd), oldind, false); |
475 | newarena = arena_get(tsd_tsdn(tsd), newind, false); |
476 | arena_nthreads_dec(oldarena, false); |
477 | arena_nthreads_inc(newarena, false); |
478 | tsd_arena_set(tsd, newarena); |
479 | } |
480 | |
481 | static void |
482 | arena_unbind(tsd_t *tsd, unsigned ind, bool internal) |
483 | { |
484 | arena_t *arena; |
485 | |
486 | arena = arena_get(tsd_tsdn(tsd), ind, false); |
487 | arena_nthreads_dec(arena, internal); |
488 | if (internal) |
489 | tsd_iarena_set(tsd, NULL); |
490 | else |
491 | tsd_arena_set(tsd, NULL); |
492 | } |
493 | |
494 | arena_tdata_t * |
495 | arena_tdata_get_hard(tsd_t *tsd, unsigned ind) |
496 | { |
497 | arena_tdata_t *tdata, *arenas_tdata_old; |
498 | arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); |
499 | unsigned narenas_tdata_old, i; |
500 | unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); |
501 | unsigned narenas_actual = narenas_total_get(); |
502 | |
503 | /* |
504 | * Dissociate old tdata array (and set up for deallocation upon return) |
505 | * if it's too small. |
506 | */ |
507 | if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { |
508 | arenas_tdata_old = arenas_tdata; |
509 | narenas_tdata_old = narenas_tdata; |
510 | arenas_tdata = NULL; |
511 | narenas_tdata = 0; |
512 | tsd_arenas_tdata_set(tsd, arenas_tdata); |
513 | tsd_narenas_tdata_set(tsd, narenas_tdata); |
514 | } else { |
515 | arenas_tdata_old = NULL; |
516 | narenas_tdata_old = 0; |
517 | } |
518 | |
519 | /* Allocate tdata array if it's missing. */ |
520 | if (arenas_tdata == NULL) { |
521 | bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); |
522 | narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; |
523 | |
524 | if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { |
525 | *arenas_tdata_bypassp = true; |
526 | arenas_tdata = (arena_tdata_t *)a0malloc( |
527 | sizeof(arena_tdata_t) * narenas_tdata); |
528 | *arenas_tdata_bypassp = false; |
529 | } |
530 | if (arenas_tdata == NULL) { |
531 | tdata = NULL; |
532 | goto label_return; |
533 | } |
534 | assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); |
535 | tsd_arenas_tdata_set(tsd, arenas_tdata); |
536 | tsd_narenas_tdata_set(tsd, narenas_tdata); |
537 | } |
538 | |
539 | /* |
540 | * Copy to tdata array. It's possible that the actual number of arenas |
541 | * has increased since narenas_total_get() was called above, but that |
542 | * causes no correctness issues unless two threads concurrently execute |
543 | * the arenas.extend mallctl, which we trust mallctl synchronization to |
544 | * prevent. |
545 | */ |
546 | |
547 | /* Copy/initialize tickers. */ |
548 | for (i = 0; i < narenas_actual; i++) { |
549 | if (i < narenas_tdata_old) { |
550 | ticker_copy(&arenas_tdata[i].decay_ticker, |
551 | &arenas_tdata_old[i].decay_ticker); |
552 | } else { |
553 | ticker_init(&arenas_tdata[i].decay_ticker, |
554 | DECAY_NTICKS_PER_UPDATE); |
555 | } |
556 | } |
557 | if (narenas_tdata > narenas_actual) { |
558 | memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) |
559 | * (narenas_tdata - narenas_actual)); |
560 | } |
561 | |
562 | /* Read the refreshed tdata array. */ |
563 | tdata = &arenas_tdata[ind]; |
564 | label_return: |
565 | if (arenas_tdata_old != NULL) |
566 | a0dalloc(arenas_tdata_old); |
567 | return (tdata); |
568 | } |
569 | |
570 | /* Slow path, called only by arena_choose(). */ |
571 | arena_t * |
572 | arena_choose_hard(tsd_t *tsd, bool internal) |
573 | { |
574 | arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); |
575 | |
576 | if (narenas_auto > 1) { |
577 | unsigned i, j, choose[2], first_null; |
578 | |
579 | /* |
580 | * Determine binding for both non-internal and internal |
581 | * allocation. |
582 | * |
583 | * choose[0]: For application allocation. |
584 | * choose[1]: For internal metadata allocation. |
585 | */ |
586 | |
587 | for (j = 0; j < 2; j++) |
588 | choose[j] = 0; |
589 | |
590 | first_null = narenas_auto; |
591 | malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); |
592 | assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); |
593 | for (i = 1; i < narenas_auto; i++) { |
594 | if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { |
595 | /* |
596 | * Choose the first arena that has the lowest |
597 | * number of threads assigned to it. |
598 | */ |
599 | for (j = 0; j < 2; j++) { |
600 | if (arena_nthreads_get(arena_get( |
601 | tsd_tsdn(tsd), i, false), !!j) < |
602 | arena_nthreads_get(arena_get( |
603 | tsd_tsdn(tsd), choose[j], false), |
604 | !!j)) |
605 | choose[j] = i; |
606 | } |
607 | } else if (first_null == narenas_auto) { |
608 | /* |
609 | * Record the index of the first uninitialized |
610 | * arena, in case all extant arenas are in use. |
611 | * |
612 | * NB: It is possible for there to be |
613 | * discontinuities in terms of initialized |
614 | * versus uninitialized arenas, due to the |
615 | * "thread.arena" mallctl. |
616 | */ |
617 | first_null = i; |
618 | } |
619 | } |
620 | |
621 | for (j = 0; j < 2; j++) { |
622 | if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), |
623 | choose[j], false), !!j) == 0 || first_null == |
624 | narenas_auto) { |
625 | /* |
626 | * Use an unloaded arena, or the least loaded |
627 | * arena if all arenas are already initialized. |
628 | */ |
629 | if (!!j == internal) { |
630 | ret = arena_get(tsd_tsdn(tsd), |
631 | choose[j], false); |
632 | } |
633 | } else { |
634 | arena_t *arena; |
635 | |
636 | /* Initialize a new arena. */ |
637 | choose[j] = first_null; |
638 | arena = arena_init_locked(tsd_tsdn(tsd), |
639 | choose[j]); |
640 | if (arena == NULL) { |
641 | malloc_mutex_unlock(tsd_tsdn(tsd), |
642 | &arenas_lock); |
643 | return (NULL); |
644 | } |
645 | if (!!j == internal) |
646 | ret = arena; |
647 | } |
648 | arena_bind(tsd, choose[j], !!j); |
649 | } |
650 | malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); |
651 | } else { |
652 | ret = arena_get(tsd_tsdn(tsd), 0, false); |
653 | arena_bind(tsd, 0, false); |
654 | arena_bind(tsd, 0, true); |
655 | } |
656 | |
657 | return (ret); |
658 | } |
659 | |
660 | void |
661 | thread_allocated_cleanup(tsd_t *tsd) |
662 | { |
663 | |
664 | /* Do nothing. */ |
665 | } |
666 | |
667 | void |
668 | thread_deallocated_cleanup(tsd_t *tsd) |
669 | { |
670 | |
671 | /* Do nothing. */ |
672 | } |
673 | |
674 | void |
675 | iarena_cleanup(tsd_t *tsd) |
676 | { |
677 | arena_t *iarena; |
678 | |
679 | iarena = tsd_iarena_get(tsd); |
680 | if (iarena != NULL) |
681 | arena_unbind(tsd, iarena->ind, true); |
682 | } |
683 | |
684 | void |
685 | arena_cleanup(tsd_t *tsd) |
686 | { |
687 | arena_t *arena; |
688 | |
689 | arena = tsd_arena_get(tsd); |
690 | if (arena != NULL) |
691 | arena_unbind(tsd, arena->ind, false); |
692 | } |
693 | |
694 | void |
695 | arenas_tdata_cleanup(tsd_t *tsd) |
696 | { |
697 | arena_tdata_t *arenas_tdata; |
698 | |
699 | /* Prevent tsd->arenas_tdata from being (re)created. */ |
700 | *tsd_arenas_tdata_bypassp_get(tsd) = true; |
701 | |
702 | arenas_tdata = tsd_arenas_tdata_get(tsd); |
703 | if (arenas_tdata != NULL) { |
704 | tsd_arenas_tdata_set(tsd, NULL); |
705 | a0dalloc(arenas_tdata); |
706 | } |
707 | } |
708 | |
709 | void |
710 | narenas_tdata_cleanup(tsd_t *tsd) |
711 | { |
712 | |
713 | /* Do nothing. */ |
714 | } |
715 | |
716 | void |
717 | arenas_tdata_bypass_cleanup(tsd_t *tsd) |
718 | { |
719 | |
720 | /* Do nothing. */ |
721 | } |
722 | |
723 | static void |
724 | stats_print_atexit(void) |
725 | { |
726 | |
727 | if (config_tcache && config_stats) { |
728 | tsdn_t *tsdn; |
729 | unsigned narenas, i; |
730 | |
731 | tsdn = tsdn_fetch(); |
732 | |
733 | /* |
734 | * Merge stats from extant threads. This is racy, since |
735 | * individual threads do not lock when recording tcache stats |
736 | * events. As a consequence, the final stats may be slightly |
737 | * out of date by the time they are reported, if other threads |
738 | * continue to allocate. |
739 | */ |
740 | for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { |
741 | arena_t *arena = arena_get(tsdn, i, false); |
742 | if (arena != NULL) { |
743 | tcache_t *tcache; |
744 | |
745 | /* |
746 | * tcache_stats_merge() locks bins, so if any |
747 | * code is introduced that acquires both arena |
748 | * and bin locks in the opposite order, |
749 | * deadlocks may result. |
750 | */ |
751 | malloc_mutex_lock(tsdn, &arena->lock); |
752 | ql_foreach(tcache, &arena->tcache_ql, link) { |
753 | tcache_stats_merge(tsdn, tcache, arena); |
754 | } |
755 | malloc_mutex_unlock(tsdn, &arena->lock); |
756 | } |
757 | } |
758 | } |
759 | je_malloc_stats_print(NULL, NULL, NULL); |
760 | } |
761 | |
762 | /* |
763 | * End miscellaneous support functions. |
764 | */ |
765 | /******************************************************************************/ |
766 | /* |
767 | * Begin initialization functions. |
768 | */ |
769 | |
770 | #ifndef JEMALLOC_HAVE_SECURE_GETENV |
771 | static char * |
772 | secure_getenv(const char *name) |
773 | { |
774 | |
775 | # ifdef JEMALLOC_HAVE_ISSETUGID |
776 | if (issetugid() != 0) |
777 | return (NULL); |
778 | # endif |
779 | return (getenv(name)); |
780 | } |
781 | #endif |
782 | |
783 | static unsigned |
784 | malloc_ncpus(void) |
785 | { |
786 | long result; |
787 | |
788 | #ifdef _WIN32 |
789 | SYSTEM_INFO si; |
790 | GetSystemInfo(&si); |
791 | result = si.dwNumberOfProcessors; |
792 | #else |
793 | result = sysconf(_SC_NPROCESSORS_ONLN); |
794 | #endif |
795 | return ((result == -1) ? 1 : (unsigned)result); |
796 | } |
797 | |
798 | static bool |
799 | malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, |
800 | char const **v_p, size_t *vlen_p) |
801 | { |
802 | bool accept; |
803 | const char *opts = *opts_p; |
804 | |
805 | *k_p = opts; |
806 | |
807 | for (accept = false; !accept;) { |
808 | switch (*opts) { |
809 | case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': |
810 | case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': |
811 | case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': |
812 | case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': |
813 | case 'Y': case 'Z': |
814 | case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': |
815 | case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': |
816 | case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': |
817 | case 's': case 't': case 'u': case 'v': case 'w': case 'x': |
818 | case 'y': case 'z': |
819 | case '0': case '1': case '2': case '3': case '4': case '5': |
820 | case '6': case '7': case '8': case '9': |
821 | case '_': |
822 | opts++; |
823 | break; |
824 | case ':': |
825 | opts++; |
826 | *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; |
827 | *v_p = opts; |
828 | accept = true; |
829 | break; |
830 | case '\0': |
831 | if (opts != *opts_p) { |
832 | malloc_write("<jemalloc>: Conf string ends " |
833 | "with key\n" ); |
834 | } |
835 | return (true); |
836 | default: |
837 | malloc_write("<jemalloc>: Malformed conf string\n" ); |
838 | return (true); |
839 | } |
840 | } |
841 | |
842 | for (accept = false; !accept;) { |
843 | switch (*opts) { |
844 | case ',': |
845 | opts++; |
846 | /* |
847 | * Look ahead one character here, because the next time |
848 | * this function is called, it will assume that end of |
849 | * input has been cleanly reached if no input remains, |
850 | * but we have optimistically already consumed the |
851 | * comma if one exists. |
852 | */ |
853 | if (*opts == '\0') { |
854 | malloc_write("<jemalloc>: Conf string ends " |
855 | "with comma\n" ); |
856 | } |
857 | *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; |
858 | accept = true; |
859 | break; |
860 | case '\0': |
861 | *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; |
862 | accept = true; |
863 | break; |
864 | default: |
865 | opts++; |
866 | break; |
867 | } |
868 | } |
869 | |
870 | *opts_p = opts; |
871 | return (false); |
872 | } |
873 | |
874 | static void |
875 | malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, |
876 | size_t vlen) |
877 | { |
878 | |
879 | malloc_printf("<jemalloc>: %s: %.*s:%.*s\n" , msg, (int)klen, k, |
880 | (int)vlen, v); |
881 | } |
882 | |
883 | static void |
884 | malloc_slow_flag_init(void) |
885 | { |
886 | /* |
887 | * Combine the runtime options into malloc_slow for fast path. Called |
888 | * after processing all the options. |
889 | */ |
890 | malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) |
891 | | (opt_junk_free ? flag_opt_junk_free : 0) |
892 | | (opt_quarantine ? flag_opt_quarantine : 0) |
893 | | (opt_zero ? flag_opt_zero : 0) |
894 | | (opt_utrace ? flag_opt_utrace : 0) |
895 | | (opt_xmalloc ? flag_opt_xmalloc : 0); |
896 | |
897 | if (config_valgrind) |
898 | malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0); |
899 | |
900 | malloc_slow = (malloc_slow_flags != 0); |
901 | } |
902 | |
903 | static void |
904 | malloc_conf_init(void) |
905 | { |
906 | unsigned i; |
907 | char buf[PATH_MAX + 1]; |
908 | const char *opts, *k, *v; |
909 | size_t klen, vlen; |
910 | |
911 | /* |
912 | * Automatically configure valgrind before processing options. The |
913 | * valgrind option remains in jemalloc 3.x for compatibility reasons. |
914 | */ |
915 | if (config_valgrind) { |
916 | in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; |
917 | if (config_fill && unlikely(in_valgrind)) { |
918 | opt_junk = "false" ; |
919 | opt_junk_alloc = false; |
920 | opt_junk_free = false; |
921 | assert(!opt_zero); |
922 | opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; |
923 | opt_redzone = true; |
924 | } |
925 | if (config_tcache && unlikely(in_valgrind)) |
926 | opt_tcache = false; |
927 | } |
928 | |
929 | for (i = 0; i < 4; i++) { |
930 | /* Get runtime configuration. */ |
931 | switch (i) { |
932 | case 0: |
933 | opts = config_malloc_conf; |
934 | break; |
935 | case 1: |
936 | if (je_malloc_conf != NULL) { |
937 | /* |
938 | * Use options that were compiled into the |
939 | * program. |
940 | */ |
941 | opts = je_malloc_conf; |
942 | } else { |
943 | /* No configuration specified. */ |
944 | buf[0] = '\0'; |
945 | opts = buf; |
946 | } |
947 | break; |
948 | case 2: { |
949 | ssize_t linklen = 0; |
950 | #ifndef _WIN32 |
951 | int saved_errno = errno; |
952 | const char *linkname = |
953 | # ifdef JEMALLOC_PREFIX |
954 | "/etc/" JEMALLOC_PREFIX"malloc.conf" |
955 | # else |
956 | "/etc/malloc.conf" |
957 | # endif |
958 | ; |
959 | |
960 | /* |
961 | * Try to use the contents of the "/etc/malloc.conf" |
962 | * symbolic link's name. |
963 | */ |
964 | linklen = readlink(linkname, buf, sizeof(buf) - 1); |
965 | if (linklen == -1) { |
966 | /* No configuration specified. */ |
967 | linklen = 0; |
968 | /* Restore errno. */ |
969 | set_errno(saved_errno); |
970 | } |
971 | #endif |
972 | buf[linklen] = '\0'; |
973 | opts = buf; |
974 | break; |
975 | } case 3: { |
976 | const char *envname = |
977 | #ifdef JEMALLOC_PREFIX |
978 | JEMALLOC_CPREFIX"MALLOC_CONF" |
979 | #else |
980 | "MALLOC_CONF" |
981 | #endif |
982 | ; |
983 | |
984 | if ((opts = secure_getenv(envname)) != NULL) { |
985 | /* |
986 | * Do nothing; opts is already initialized to |
987 | * the value of the MALLOC_CONF environment |
988 | * variable. |
989 | */ |
990 | } else { |
991 | /* No configuration specified. */ |
992 | buf[0] = '\0'; |
993 | opts = buf; |
994 | } |
995 | break; |
996 | } default: |
997 | not_reached(); |
998 | buf[0] = '\0'; |
999 | opts = buf; |
1000 | } |
1001 | |
1002 | while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, |
1003 | &vlen)) { |
1004 | #define CONF_MATCH(n) \ |
1005 | (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) |
1006 | #define CONF_MATCH_VALUE(n) \ |
1007 | (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) |
1008 | #define CONF_HANDLE_BOOL(o, n, cont) \ |
1009 | if (CONF_MATCH(n)) { \ |
1010 | if (CONF_MATCH_VALUE("true")) \ |
1011 | o = true; \ |
1012 | else if (CONF_MATCH_VALUE("false")) \ |
1013 | o = false; \ |
1014 | else { \ |
1015 | malloc_conf_error( \ |
1016 | "Invalid conf value", \ |
1017 | k, klen, v, vlen); \ |
1018 | } \ |
1019 | if (cont) \ |
1020 | continue; \ |
1021 | } |
1022 | #define CONF_HANDLE_T_U(t, o, n, min, max, clip) \ |
1023 | if (CONF_MATCH(n)) { \ |
1024 | uintmax_t um; \ |
1025 | char *end; \ |
1026 | \ |
1027 | set_errno(0); \ |
1028 | um = malloc_strtoumax(v, &end, 0); \ |
1029 | if (get_errno() != 0 || (uintptr_t)end -\ |
1030 | (uintptr_t)v != vlen) { \ |
1031 | malloc_conf_error( \ |
1032 | "Invalid conf value", \ |
1033 | k, klen, v, vlen); \ |
1034 | } else if (clip) { \ |
1035 | if ((min) != 0 && um < (min)) \ |
1036 | o = (t)(min); \ |
1037 | else if (um > (max)) \ |
1038 | o = (t)(max); \ |
1039 | else \ |
1040 | o = (t)um; \ |
1041 | } else { \ |
1042 | if (((min) != 0 && um < (min)) \ |
1043 | || um > (max)) { \ |
1044 | malloc_conf_error( \ |
1045 | "Out-of-range " \ |
1046 | "conf value", \ |
1047 | k, klen, v, vlen); \ |
1048 | } else \ |
1049 | o = (t)um; \ |
1050 | } \ |
1051 | continue; \ |
1052 | } |
1053 | #define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \ |
1054 | CONF_HANDLE_T_U(unsigned, o, n, min, max, clip) |
1055 | #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ |
1056 | CONF_HANDLE_T_U(size_t, o, n, min, max, clip) |
1057 | #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ |
1058 | if (CONF_MATCH(n)) { \ |
1059 | long l; \ |
1060 | char *end; \ |
1061 | \ |
1062 | set_errno(0); \ |
1063 | l = strtol(v, &end, 0); \ |
1064 | if (get_errno() != 0 || (uintptr_t)end -\ |
1065 | (uintptr_t)v != vlen) { \ |
1066 | malloc_conf_error( \ |
1067 | "Invalid conf value", \ |
1068 | k, klen, v, vlen); \ |
1069 | } else if (l < (ssize_t)(min) || l > \ |
1070 | (ssize_t)(max)) { \ |
1071 | malloc_conf_error( \ |
1072 | "Out-of-range conf value", \ |
1073 | k, klen, v, vlen); \ |
1074 | } else \ |
1075 | o = l; \ |
1076 | continue; \ |
1077 | } |
1078 | #define CONF_HANDLE_CHAR_P(o, n, d) \ |
1079 | if (CONF_MATCH(n)) { \ |
1080 | size_t cpylen = (vlen <= \ |
1081 | sizeof(o)-1) ? vlen : \ |
1082 | sizeof(o)-1; \ |
1083 | strncpy(o, v, cpylen); \ |
1084 | o[cpylen] = '\0'; \ |
1085 | continue; \ |
1086 | } |
1087 | |
1088 | CONF_HANDLE_BOOL(opt_abort, "abort" , true) |
1089 | /* |
1090 | * Chunks always require at least one header page, |
1091 | * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and |
1092 | * possibly an additional page in the presence of |
1093 | * redzones. In order to simplify options processing, |
1094 | * use a conservative bound that accommodates all these |
1095 | * constraints. |
1096 | */ |
1097 | CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk" , LG_PAGE + |
1098 | LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1), |
1099 | (sizeof(size_t) << 3) - 1, true) |
1100 | if (strncmp("dss" , k, klen) == 0) { |
1101 | int i; |
1102 | bool match = false; |
1103 | for (i = 0; i < dss_prec_limit; i++) { |
1104 | if (strncmp(dss_prec_names[i], v, vlen) |
1105 | == 0) { |
1106 | if (chunk_dss_prec_set(NULL, |
1107 | i)) { |
1108 | malloc_conf_error( |
1109 | "Error setting dss" , |
1110 | k, klen, v, vlen); |
1111 | } else { |
1112 | opt_dss = |
1113 | dss_prec_names[i]; |
1114 | match = true; |
1115 | break; |
1116 | } |
1117 | } |
1118 | } |
1119 | if (!match) { |
1120 | malloc_conf_error("Invalid conf value" , |
1121 | k, klen, v, vlen); |
1122 | } |
1123 | continue; |
1124 | } |
1125 | CONF_HANDLE_UNSIGNED(opt_narenas, "narenas" , 1, |
1126 | UINT_MAX, false) |
1127 | if (strncmp("purge" , k, klen) == 0) { |
1128 | int i; |
1129 | bool match = false; |
1130 | for (i = 0; i < purge_mode_limit; i++) { |
1131 | if (strncmp(purge_mode_names[i], v, |
1132 | vlen) == 0) { |
1133 | opt_purge = (purge_mode_t)i; |
1134 | match = true; |
1135 | break; |
1136 | } |
1137 | } |
1138 | if (!match) { |
1139 | malloc_conf_error("Invalid conf value" , |
1140 | k, klen, v, vlen); |
1141 | } |
1142 | continue; |
1143 | } |
1144 | CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult" , |
1145 | -1, (sizeof(size_t) << 3) - 1) |
1146 | CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time" , -1, |
1147 | NSTIME_SEC_MAX); |
1148 | CONF_HANDLE_BOOL(opt_stats_print, "stats_print" , true) |
1149 | if (config_fill) { |
1150 | if (CONF_MATCH("junk" )) { |
1151 | if (CONF_MATCH_VALUE("true" )) { |
1152 | opt_junk = "true" ; |
1153 | opt_junk_alloc = opt_junk_free = |
1154 | true; |
1155 | } else if (CONF_MATCH_VALUE("false" )) { |
1156 | opt_junk = "false" ; |
1157 | opt_junk_alloc = opt_junk_free = |
1158 | false; |
1159 | } else if (CONF_MATCH_VALUE("alloc" )) { |
1160 | opt_junk = "alloc" ; |
1161 | opt_junk_alloc = true; |
1162 | opt_junk_free = false; |
1163 | } else if (CONF_MATCH_VALUE("free" )) { |
1164 | opt_junk = "free" ; |
1165 | opt_junk_alloc = false; |
1166 | opt_junk_free = true; |
1167 | } else { |
1168 | malloc_conf_error( |
1169 | "Invalid conf value" , k, |
1170 | klen, v, vlen); |
1171 | } |
1172 | continue; |
1173 | } |
1174 | CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine" , |
1175 | 0, SIZE_T_MAX, false) |
1176 | CONF_HANDLE_BOOL(opt_redzone, "redzone" , true) |
1177 | CONF_HANDLE_BOOL(opt_zero, "zero" , true) |
1178 | } |
1179 | if (config_utrace) { |
1180 | CONF_HANDLE_BOOL(opt_utrace, "utrace" , true) |
1181 | } |
1182 | if (config_xmalloc) { |
1183 | CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc" , true) |
1184 | } |
1185 | if (config_tcache) { |
1186 | CONF_HANDLE_BOOL(opt_tcache, "tcache" , |
1187 | !config_valgrind || !in_valgrind) |
1188 | if (CONF_MATCH("tcache" )) { |
1189 | assert(config_valgrind && in_valgrind); |
1190 | if (opt_tcache) { |
1191 | opt_tcache = false; |
1192 | malloc_conf_error( |
1193 | "tcache cannot be enabled " |
1194 | "while running inside Valgrind" , |
1195 | k, klen, v, vlen); |
1196 | } |
1197 | continue; |
1198 | } |
1199 | CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, |
1200 | "lg_tcache_max" , -1, |
1201 | (sizeof(size_t) << 3) - 1) |
1202 | } |
1203 | if (config_prof) { |
1204 | CONF_HANDLE_BOOL(opt_prof, "prof" , true) |
1205 | CONF_HANDLE_CHAR_P(opt_prof_prefix, |
1206 | "prof_prefix" , "jeprof" ) |
1207 | CONF_HANDLE_BOOL(opt_prof_active, "prof_active" , |
1208 | true) |
1209 | CONF_HANDLE_BOOL(opt_prof_thread_active_init, |
1210 | "prof_thread_active_init" , true) |
1211 | CONF_HANDLE_SIZE_T(opt_lg_prof_sample, |
1212 | "lg_prof_sample" , 0, |
1213 | (sizeof(uint64_t) << 3) - 1, true) |
1214 | CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum" , |
1215 | true) |
1216 | CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, |
1217 | "lg_prof_interval" , -1, |
1218 | (sizeof(uint64_t) << 3) - 1) |
1219 | CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump" , |
1220 | true) |
1221 | CONF_HANDLE_BOOL(opt_prof_final, "prof_final" , |
1222 | true) |
1223 | CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak" , |
1224 | true) |
1225 | } |
1226 | malloc_conf_error("Invalid conf pair" , k, klen, v, |
1227 | vlen); |
1228 | #undef CONF_MATCH |
1229 | #undef CONF_HANDLE_BOOL |
1230 | #undef CONF_HANDLE_SIZE_T |
1231 | #undef CONF_HANDLE_SSIZE_T |
1232 | #undef CONF_HANDLE_CHAR_P |
1233 | } |
1234 | } |
1235 | } |
1236 | |
1237 | static bool |
1238 | malloc_init_hard_needed(void) |
1239 | { |
1240 | |
1241 | if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == |
1242 | malloc_init_recursible)) { |
1243 | /* |
1244 | * Another thread initialized the allocator before this one |
1245 | * acquired init_lock, or this thread is the initializing |
1246 | * thread, and it is recursively allocating. |
1247 | */ |
1248 | return (false); |
1249 | } |
1250 | #ifdef JEMALLOC_THREADED_INIT |
1251 | if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { |
1252 | /* Busy-wait until the initializing thread completes. */ |
1253 | do { |
1254 | malloc_mutex_unlock(NULL, &init_lock); |
1255 | CPU_SPINWAIT; |
1256 | malloc_mutex_lock(NULL, &init_lock); |
1257 | } while (!malloc_initialized()); |
1258 | return (false); |
1259 | } |
1260 | #endif |
1261 | return (true); |
1262 | } |
1263 | |
1264 | static bool |
1265 | malloc_init_hard_a0_locked() |
1266 | { |
1267 | |
1268 | malloc_initializer = INITIALIZER; |
1269 | |
1270 | if (config_prof) |
1271 | prof_boot0(); |
1272 | malloc_conf_init(); |
1273 | if (opt_stats_print) { |
1274 | /* Print statistics at exit. */ |
1275 | if (atexit(stats_print_atexit) != 0) { |
1276 | malloc_write("<jemalloc>: Error in atexit()\n" ); |
1277 | if (opt_abort) |
1278 | abort(); |
1279 | } |
1280 | } |
1281 | pages_boot(); |
1282 | if (base_boot()) |
1283 | return (true); |
1284 | if (chunk_boot()) |
1285 | return (true); |
1286 | if (ctl_boot()) |
1287 | return (true); |
1288 | if (config_prof) |
1289 | prof_boot1(); |
1290 | if (arena_boot()) |
1291 | return (true); |
1292 | if (config_tcache && tcache_boot(TSDN_NULL)) |
1293 | return (true); |
1294 | if (malloc_mutex_init(&arenas_lock, "arenas" , WITNESS_RANK_ARENAS)) |
1295 | return (true); |
1296 | /* |
1297 | * Create enough scaffolding to allow recursive allocation in |
1298 | * malloc_ncpus(). |
1299 | */ |
1300 | narenas_auto = 1; |
1301 | narenas_total_set(narenas_auto); |
1302 | arenas = &a0; |
1303 | memset(arenas, 0, sizeof(arena_t *) * narenas_auto); |
1304 | /* |
1305 | * Initialize one arena here. The rest are lazily created in |
1306 | * arena_choose_hard(). |
1307 | */ |
1308 | if (arena_init(TSDN_NULL, 0) == NULL) |
1309 | return (true); |
1310 | |
1311 | malloc_init_state = malloc_init_a0_initialized; |
1312 | |
1313 | return (false); |
1314 | } |
1315 | |
1316 | static bool |
1317 | malloc_init_hard_a0(void) |
1318 | { |
1319 | bool ret; |
1320 | |
1321 | malloc_mutex_lock(TSDN_NULL, &init_lock); |
1322 | ret = malloc_init_hard_a0_locked(); |
1323 | malloc_mutex_unlock(TSDN_NULL, &init_lock); |
1324 | return (ret); |
1325 | } |
1326 | |
1327 | /* Initialize data structures which may trigger recursive allocation. */ |
1328 | static bool |
1329 | malloc_init_hard_recursible(void) |
1330 | { |
1331 | |
1332 | malloc_init_state = malloc_init_recursible; |
1333 | |
1334 | ncpus = malloc_ncpus(); |
1335 | |
1336 | #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ |
1337 | && !defined(_WIN32) && !defined(__native_client__)) |
1338 | /* LinuxThreads' pthread_atfork() allocates. */ |
1339 | if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, |
1340 | jemalloc_postfork_child) != 0) { |
1341 | malloc_write("<jemalloc>: Error in pthread_atfork()\n" ); |
1342 | if (opt_abort) |
1343 | abort(); |
1344 | return (true); |
1345 | } |
1346 | #endif |
1347 | |
1348 | return (false); |
1349 | } |
1350 | |
1351 | static bool |
1352 | malloc_init_hard_finish(tsdn_t *tsdn) |
1353 | { |
1354 | |
1355 | if (malloc_mutex_boot()) |
1356 | return (true); |
1357 | |
1358 | if (opt_narenas == 0) { |
1359 | /* |
1360 | * For SMP systems, create more than one arena per CPU by |
1361 | * default. |
1362 | */ |
1363 | if (ncpus > 1) |
1364 | opt_narenas = ncpus << 2; |
1365 | else |
1366 | opt_narenas = 1; |
1367 | } |
1368 | narenas_auto = opt_narenas; |
1369 | /* |
1370 | * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). |
1371 | */ |
1372 | if (narenas_auto > MALLOCX_ARENA_MAX) { |
1373 | narenas_auto = MALLOCX_ARENA_MAX; |
1374 | malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n" , |
1375 | narenas_auto); |
1376 | } |
1377 | narenas_total_set(narenas_auto); |
1378 | |
1379 | /* Allocate and initialize arenas. */ |
1380 | arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) * |
1381 | (MALLOCX_ARENA_MAX+1)); |
1382 | if (arenas == NULL) |
1383 | return (true); |
1384 | /* Copy the pointer to the one arena that was already initialized. */ |
1385 | arena_set(0, a0); |
1386 | |
1387 | malloc_init_state = malloc_init_initialized; |
1388 | malloc_slow_flag_init(); |
1389 | |
1390 | return (false); |
1391 | } |
1392 | |
1393 | static bool |
1394 | malloc_init_hard(void) |
1395 | { |
1396 | tsd_t *tsd; |
1397 | |
1398 | #if defined(_WIN32) && _WIN32_WINNT < 0x0600 |
1399 | _init_init_lock(); |
1400 | #endif |
1401 | malloc_mutex_lock(TSDN_NULL, &init_lock); |
1402 | if (!malloc_init_hard_needed()) { |
1403 | malloc_mutex_unlock(TSDN_NULL, &init_lock); |
1404 | return (false); |
1405 | } |
1406 | |
1407 | if (malloc_init_state != malloc_init_a0_initialized && |
1408 | malloc_init_hard_a0_locked()) { |
1409 | malloc_mutex_unlock(TSDN_NULL, &init_lock); |
1410 | return (true); |
1411 | } |
1412 | |
1413 | malloc_mutex_unlock(TSDN_NULL, &init_lock); |
1414 | /* Recursive allocation relies on functional tsd. */ |
1415 | tsd = malloc_tsd_boot0(); |
1416 | if (tsd == NULL) |
1417 | return (true); |
1418 | if (malloc_init_hard_recursible()) |
1419 | return (true); |
1420 | malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); |
1421 | |
1422 | if (config_prof && prof_boot2(tsd_tsdn(tsd))) { |
1423 | malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); |
1424 | return (true); |
1425 | } |
1426 | |
1427 | if (malloc_init_hard_finish(tsd_tsdn(tsd))) { |
1428 | malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); |
1429 | return (true); |
1430 | } |
1431 | |
1432 | malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); |
1433 | malloc_tsd_boot1(); |
1434 | return (false); |
1435 | } |
1436 | |
1437 | /* |
1438 | * End initialization functions. |
1439 | */ |
1440 | /******************************************************************************/ |
1441 | /* |
1442 | * Begin malloc(3)-compatible functions. |
1443 | */ |
1444 | |
1445 | static void * |
1446 | ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero, |
1447 | prof_tctx_t *tctx, bool slow_path) |
1448 | { |
1449 | void *p; |
1450 | |
1451 | if (tctx == NULL) |
1452 | return (NULL); |
1453 | if (usize <= SMALL_MAXCLASS) { |
1454 | szind_t ind_large = size2index(LARGE_MINCLASS); |
1455 | p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path); |
1456 | if (p == NULL) |
1457 | return (NULL); |
1458 | arena_prof_promoted(tsd_tsdn(tsd), p, usize); |
1459 | } else |
1460 | p = ialloc(tsd, usize, ind, zero, slow_path); |
1461 | |
1462 | return (p); |
1463 | } |
1464 | |
1465 | JEMALLOC_ALWAYS_INLINE_C void * |
1466 | ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) |
1467 | { |
1468 | void *p; |
1469 | prof_tctx_t *tctx; |
1470 | |
1471 | tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); |
1472 | if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) |
1473 | p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path); |
1474 | else |
1475 | p = ialloc(tsd, usize, ind, zero, slow_path); |
1476 | if (unlikely(p == NULL)) { |
1477 | prof_alloc_rollback(tsd, tctx, true); |
1478 | return (NULL); |
1479 | } |
1480 | prof_malloc(tsd_tsdn(tsd), p, usize, tctx); |
1481 | |
1482 | return (p); |
1483 | } |
1484 | |
1485 | /* |
1486 | * ialloc_body() is inlined so that fast and slow paths are generated separately |
1487 | * with statically known slow_path. |
1488 | * |
1489 | * This function guarantees that *tsdn is non-NULL on success. |
1490 | */ |
1491 | JEMALLOC_ALWAYS_INLINE_C void * |
1492 | ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, |
1493 | bool slow_path) |
1494 | { |
1495 | tsd_t *tsd; |
1496 | szind_t ind; |
1497 | |
1498 | if (slow_path && unlikely(malloc_init())) { |
1499 | *tsdn = NULL; |
1500 | return (NULL); |
1501 | } |
1502 | |
1503 | tsd = tsd_fetch(); |
1504 | *tsdn = tsd_tsdn(tsd); |
1505 | witness_assert_lockless(tsd_tsdn(tsd)); |
1506 | |
1507 | ind = size2index(size); |
1508 | if (unlikely(ind >= NSIZES)) |
1509 | return (NULL); |
1510 | |
1511 | if (config_stats || (config_prof && opt_prof) || (slow_path && |
1512 | config_valgrind && unlikely(in_valgrind))) { |
1513 | *usize = index2size(ind); |
1514 | assert(*usize > 0 && *usize <= HUGE_MAXCLASS); |
1515 | } |
1516 | |
1517 | if (config_prof && opt_prof) |
1518 | return (ialloc_prof(tsd, *usize, ind, zero, slow_path)); |
1519 | |
1520 | return (ialloc(tsd, size, ind, zero, slow_path)); |
1521 | } |
1522 | |
1523 | JEMALLOC_ALWAYS_INLINE_C void |
1524 | ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func, |
1525 | bool update_errno, bool slow_path) |
1526 | { |
1527 | |
1528 | assert(!tsdn_null(tsdn) || ret == NULL); |
1529 | |
1530 | if (unlikely(ret == NULL)) { |
1531 | if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) { |
1532 | malloc_printf("<jemalloc>: Error in %s(): out of " |
1533 | "memory\n" , func); |
1534 | abort(); |
1535 | } |
1536 | if (update_errno) |
1537 | set_errno(ENOMEM); |
1538 | } |
1539 | if (config_stats && likely(ret != NULL)) { |
1540 | assert(usize == isalloc(tsdn, ret, config_prof)); |
1541 | *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize; |
1542 | } |
1543 | witness_assert_lockless(tsdn); |
1544 | } |
1545 | |
1546 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
1547 | void JEMALLOC_NOTHROW * |
1548 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) |
1549 | je_malloc(size_t size) |
1550 | { |
1551 | void *ret; |
1552 | tsdn_t *tsdn; |
1553 | size_t usize JEMALLOC_CC_SILENCE_INIT(0); |
1554 | |
1555 | if (size == 0) |
1556 | size = 1; |
1557 | |
1558 | if (likely(!malloc_slow)) { |
1559 | ret = ialloc_body(size, false, &tsdn, &usize, false); |
1560 | ialloc_post_check(ret, tsdn, usize, "malloc" , true, false); |
1561 | } else { |
1562 | ret = ialloc_body(size, false, &tsdn, &usize, true); |
1563 | ialloc_post_check(ret, tsdn, usize, "malloc" , true, true); |
1564 | UTRACE(0, size, ret); |
1565 | JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); |
1566 | } |
1567 | |
1568 | return (ret); |
1569 | } |
1570 | |
1571 | static void * |
1572 | imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, |
1573 | prof_tctx_t *tctx) |
1574 | { |
1575 | void *p; |
1576 | |
1577 | if (tctx == NULL) |
1578 | return (NULL); |
1579 | if (usize <= SMALL_MAXCLASS) { |
1580 | assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); |
1581 | p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); |
1582 | if (p == NULL) |
1583 | return (NULL); |
1584 | arena_prof_promoted(tsd_tsdn(tsd), p, usize); |
1585 | } else |
1586 | p = ipalloc(tsd, usize, alignment, false); |
1587 | |
1588 | return (p); |
1589 | } |
1590 | |
1591 | JEMALLOC_ALWAYS_INLINE_C void * |
1592 | imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) |
1593 | { |
1594 | void *p; |
1595 | prof_tctx_t *tctx; |
1596 | |
1597 | tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); |
1598 | if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) |
1599 | p = imemalign_prof_sample(tsd, alignment, usize, tctx); |
1600 | else |
1601 | p = ipalloc(tsd, usize, alignment, false); |
1602 | if (unlikely(p == NULL)) { |
1603 | prof_alloc_rollback(tsd, tctx, true); |
1604 | return (NULL); |
1605 | } |
1606 | prof_malloc(tsd_tsdn(tsd), p, usize, tctx); |
1607 | |
1608 | return (p); |
1609 | } |
1610 | |
1611 | JEMALLOC_ATTR(nonnull(1)) |
1612 | static int |
1613 | imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) |
1614 | { |
1615 | int ret; |
1616 | tsd_t *tsd; |
1617 | size_t usize; |
1618 | void *result; |
1619 | |
1620 | assert(min_alignment != 0); |
1621 | |
1622 | if (unlikely(malloc_init())) { |
1623 | tsd = NULL; |
1624 | result = NULL; |
1625 | goto label_oom; |
1626 | } |
1627 | tsd = tsd_fetch(); |
1628 | witness_assert_lockless(tsd_tsdn(tsd)); |
1629 | if (size == 0) |
1630 | size = 1; |
1631 | |
1632 | /* Make sure that alignment is a large enough power of 2. */ |
1633 | if (unlikely(((alignment - 1) & alignment) != 0 |
1634 | || (alignment < min_alignment))) { |
1635 | if (config_xmalloc && unlikely(opt_xmalloc)) { |
1636 | malloc_write("<jemalloc>: Error allocating " |
1637 | "aligned memory: invalid alignment\n" ); |
1638 | abort(); |
1639 | } |
1640 | result = NULL; |
1641 | ret = EINVAL; |
1642 | goto label_return; |
1643 | } |
1644 | |
1645 | usize = sa2u(size, alignment); |
1646 | if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { |
1647 | result = NULL; |
1648 | goto label_oom; |
1649 | } |
1650 | |
1651 | if (config_prof && opt_prof) |
1652 | result = imemalign_prof(tsd, alignment, usize); |
1653 | else |
1654 | result = ipalloc(tsd, usize, alignment, false); |
1655 | if (unlikely(result == NULL)) |
1656 | goto label_oom; |
1657 | assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); |
1658 | |
1659 | *memptr = result; |
1660 | ret = 0; |
1661 | label_return: |
1662 | if (config_stats && likely(result != NULL)) { |
1663 | assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof)); |
1664 | *tsd_thread_allocatedp_get(tsd) += usize; |
1665 | } |
1666 | UTRACE(0, size, result); |
1667 | JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize, |
1668 | false); |
1669 | witness_assert_lockless(tsd_tsdn(tsd)); |
1670 | return (ret); |
1671 | label_oom: |
1672 | assert(result == NULL); |
1673 | if (config_xmalloc && unlikely(opt_xmalloc)) { |
1674 | malloc_write("<jemalloc>: Error allocating aligned memory: " |
1675 | "out of memory\n" ); |
1676 | abort(); |
1677 | } |
1678 | ret = ENOMEM; |
1679 | witness_assert_lockless(tsd_tsdn(tsd)); |
1680 | goto label_return; |
1681 | } |
1682 | |
1683 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW |
1684 | JEMALLOC_ATTR(nonnull(1)) |
1685 | je_posix_memalign(void **memptr, size_t alignment, size_t size) |
1686 | { |
1687 | int ret; |
1688 | |
1689 | ret = imemalign(memptr, alignment, size, sizeof(void *)); |
1690 | |
1691 | return (ret); |
1692 | } |
1693 | |
1694 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
1695 | void JEMALLOC_NOTHROW * |
1696 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) |
1697 | je_aligned_alloc(size_t alignment, size_t size) |
1698 | { |
1699 | void *ret; |
1700 | int err; |
1701 | |
1702 | if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { |
1703 | ret = NULL; |
1704 | set_errno(err); |
1705 | } |
1706 | |
1707 | return (ret); |
1708 | } |
1709 | |
1710 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
1711 | void JEMALLOC_NOTHROW * |
1712 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) |
1713 | je_calloc(size_t num, size_t size) |
1714 | { |
1715 | void *ret; |
1716 | tsdn_t *tsdn; |
1717 | size_t num_size; |
1718 | size_t usize JEMALLOC_CC_SILENCE_INIT(0); |
1719 | |
1720 | num_size = num * size; |
1721 | if (unlikely(num_size == 0)) { |
1722 | if (num == 0 || size == 0) |
1723 | num_size = 1; |
1724 | else |
1725 | num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */ |
1726 | /* |
1727 | * Try to avoid division here. We know that it isn't possible to |
1728 | * overflow during multiplication if neither operand uses any of the |
1729 | * most significant half of the bits in a size_t. |
1730 | */ |
1731 | } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << |
1732 | 2))) && (num_size / size != num))) |
1733 | num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */ |
1734 | |
1735 | if (likely(!malloc_slow)) { |
1736 | ret = ialloc_body(num_size, true, &tsdn, &usize, false); |
1737 | ialloc_post_check(ret, tsdn, usize, "calloc" , true, false); |
1738 | } else { |
1739 | ret = ialloc_body(num_size, true, &tsdn, &usize, true); |
1740 | ialloc_post_check(ret, tsdn, usize, "calloc" , true, true); |
1741 | UTRACE(0, num_size, ret); |
1742 | JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true); |
1743 | } |
1744 | |
1745 | return (ret); |
1746 | } |
1747 | |
1748 | static void * |
1749 | irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, |
1750 | prof_tctx_t *tctx) |
1751 | { |
1752 | void *p; |
1753 | |
1754 | if (tctx == NULL) |
1755 | return (NULL); |
1756 | if (usize <= SMALL_MAXCLASS) { |
1757 | p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); |
1758 | if (p == NULL) |
1759 | return (NULL); |
1760 | arena_prof_promoted(tsd_tsdn(tsd), p, usize); |
1761 | } else |
1762 | p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); |
1763 | |
1764 | return (p); |
1765 | } |
1766 | |
1767 | JEMALLOC_ALWAYS_INLINE_C void * |
1768 | irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) |
1769 | { |
1770 | void *p; |
1771 | bool prof_active; |
1772 | prof_tctx_t *old_tctx, *tctx; |
1773 | |
1774 | prof_active = prof_active_get_unlocked(); |
1775 | old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); |
1776 | tctx = prof_alloc_prep(tsd, usize, prof_active, true); |
1777 | if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) |
1778 | p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); |
1779 | else |
1780 | p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); |
1781 | if (unlikely(p == NULL)) { |
1782 | prof_alloc_rollback(tsd, tctx, true); |
1783 | return (NULL); |
1784 | } |
1785 | prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, |
1786 | old_tctx); |
1787 | |
1788 | return (p); |
1789 | } |
1790 | |
1791 | JEMALLOC_INLINE_C void |
1792 | ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) |
1793 | { |
1794 | size_t usize; |
1795 | UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); |
1796 | |
1797 | witness_assert_lockless(tsd_tsdn(tsd)); |
1798 | |
1799 | assert(ptr != NULL); |
1800 | assert(malloc_initialized() || IS_INITIALIZER); |
1801 | |
1802 | if (config_prof && opt_prof) { |
1803 | usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); |
1804 | prof_free(tsd, ptr, usize); |
1805 | } else if (config_stats || config_valgrind) |
1806 | usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); |
1807 | if (config_stats) |
1808 | *tsd_thread_deallocatedp_get(tsd) += usize; |
1809 | |
1810 | if (likely(!slow_path)) |
1811 | iqalloc(tsd, ptr, tcache, false); |
1812 | else { |
1813 | if (config_valgrind && unlikely(in_valgrind)) |
1814 | rzsize = p2rz(tsd_tsdn(tsd), ptr); |
1815 | iqalloc(tsd, ptr, tcache, true); |
1816 | JEMALLOC_VALGRIND_FREE(ptr, rzsize); |
1817 | } |
1818 | } |
1819 | |
1820 | JEMALLOC_INLINE_C void |
1821 | isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) |
1822 | { |
1823 | UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); |
1824 | |
1825 | witness_assert_lockless(tsd_tsdn(tsd)); |
1826 | |
1827 | assert(ptr != NULL); |
1828 | assert(malloc_initialized() || IS_INITIALIZER); |
1829 | |
1830 | if (config_prof && opt_prof) |
1831 | prof_free(tsd, ptr, usize); |
1832 | if (config_stats) |
1833 | *tsd_thread_deallocatedp_get(tsd) += usize; |
1834 | if (config_valgrind && unlikely(in_valgrind)) |
1835 | rzsize = p2rz(tsd_tsdn(tsd), ptr); |
1836 | isqalloc(tsd, ptr, usize, tcache, slow_path); |
1837 | JEMALLOC_VALGRIND_FREE(ptr, rzsize); |
1838 | } |
1839 | |
1840 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
1841 | void JEMALLOC_NOTHROW * |
1842 | JEMALLOC_ALLOC_SIZE(2) |
1843 | je_realloc(void *ptr, size_t size) |
1844 | { |
1845 | void *ret; |
1846 | tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); |
1847 | size_t usize JEMALLOC_CC_SILENCE_INIT(0); |
1848 | size_t old_usize = 0; |
1849 | UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); |
1850 | |
1851 | if (unlikely(size == 0)) { |
1852 | if (ptr != NULL) { |
1853 | tsd_t *tsd; |
1854 | |
1855 | /* realloc(ptr, 0) is equivalent to free(ptr). */ |
1856 | UTRACE(ptr, 0, 0); |
1857 | tsd = tsd_fetch(); |
1858 | ifree(tsd, ptr, tcache_get(tsd, false), true); |
1859 | return (NULL); |
1860 | } |
1861 | size = 1; |
1862 | } |
1863 | |
1864 | if (likely(ptr != NULL)) { |
1865 | tsd_t *tsd; |
1866 | |
1867 | assert(malloc_initialized() || IS_INITIALIZER); |
1868 | malloc_thread_init(); |
1869 | tsd = tsd_fetch(); |
1870 | |
1871 | witness_assert_lockless(tsd_tsdn(tsd)); |
1872 | |
1873 | old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); |
1874 | if (config_valgrind && unlikely(in_valgrind)) { |
1875 | old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) : |
1876 | u2rz(old_usize); |
1877 | } |
1878 | |
1879 | if (config_prof && opt_prof) { |
1880 | usize = s2u(size); |
1881 | ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? |
1882 | NULL : irealloc_prof(tsd, ptr, old_usize, usize); |
1883 | } else { |
1884 | if (config_stats || (config_valgrind && |
1885 | unlikely(in_valgrind))) |
1886 | usize = s2u(size); |
1887 | ret = iralloc(tsd, ptr, old_usize, size, 0, false); |
1888 | } |
1889 | tsdn = tsd_tsdn(tsd); |
1890 | } else { |
1891 | /* realloc(NULL, size) is equivalent to malloc(size). */ |
1892 | if (likely(!malloc_slow)) |
1893 | ret = ialloc_body(size, false, &tsdn, &usize, false); |
1894 | else |
1895 | ret = ialloc_body(size, false, &tsdn, &usize, true); |
1896 | assert(!tsdn_null(tsdn) || ret == NULL); |
1897 | } |
1898 | |
1899 | if (unlikely(ret == NULL)) { |
1900 | if (config_xmalloc && unlikely(opt_xmalloc)) { |
1901 | malloc_write("<jemalloc>: Error in realloc(): " |
1902 | "out of memory\n" ); |
1903 | abort(); |
1904 | } |
1905 | set_errno(ENOMEM); |
1906 | } |
1907 | if (config_stats && likely(ret != NULL)) { |
1908 | tsd_t *tsd; |
1909 | |
1910 | assert(usize == isalloc(tsdn, ret, config_prof)); |
1911 | tsd = tsdn_tsd(tsdn); |
1912 | *tsd_thread_allocatedp_get(tsd) += usize; |
1913 | *tsd_thread_deallocatedp_get(tsd) += old_usize; |
1914 | } |
1915 | UTRACE(ptr, size, ret); |
1916 | JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize, |
1917 | old_rzsize, true, false); |
1918 | witness_assert_lockless(tsdn); |
1919 | return (ret); |
1920 | } |
1921 | |
1922 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW |
1923 | je_free(void *ptr) |
1924 | { |
1925 | |
1926 | UTRACE(ptr, 0, 0); |
1927 | if (likely(ptr != NULL)) { |
1928 | tsd_t *tsd = tsd_fetch(); |
1929 | witness_assert_lockless(tsd_tsdn(tsd)); |
1930 | if (likely(!malloc_slow)) |
1931 | ifree(tsd, ptr, tcache_get(tsd, false), false); |
1932 | else |
1933 | ifree(tsd, ptr, tcache_get(tsd, false), true); |
1934 | witness_assert_lockless(tsd_tsdn(tsd)); |
1935 | } |
1936 | } |
1937 | |
1938 | /* |
1939 | * End malloc(3)-compatible functions. |
1940 | */ |
1941 | /******************************************************************************/ |
1942 | /* |
1943 | * Begin non-standard override functions. |
1944 | */ |
1945 | |
1946 | #ifdef JEMALLOC_OVERRIDE_MEMALIGN |
1947 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
1948 | void JEMALLOC_NOTHROW * |
1949 | JEMALLOC_ATTR(malloc) |
1950 | je_memalign(size_t alignment, size_t size) |
1951 | { |
1952 | void *ret JEMALLOC_CC_SILENCE_INIT(NULL); |
1953 | if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) |
1954 | ret = NULL; |
1955 | return (ret); |
1956 | } |
1957 | #endif |
1958 | |
1959 | #ifdef JEMALLOC_OVERRIDE_VALLOC |
1960 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
1961 | void JEMALLOC_NOTHROW * |
1962 | JEMALLOC_ATTR(malloc) |
1963 | je_valloc(size_t size) |
1964 | { |
1965 | void *ret JEMALLOC_CC_SILENCE_INIT(NULL); |
1966 | if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) |
1967 | ret = NULL; |
1968 | return (ret); |
1969 | } |
1970 | #endif |
1971 | |
1972 | /* |
1973 | * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has |
1974 | * #define je_malloc malloc |
1975 | */ |
1976 | #define malloc_is_malloc 1 |
1977 | #define is_malloc_(a) malloc_is_ ## a |
1978 | #define is_malloc(a) is_malloc_(a) |
1979 | |
1980 | #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)) |
1981 | /* |
1982 | * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible |
1983 | * to inconsistently reference libc's malloc(3)-compatible functions |
1984 | * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). |
1985 | * |
1986 | * These definitions interpose hooks in glibc. The functions are actually |
1987 | * passed an extra argument for the caller return address, which will be |
1988 | * ignored. |
1989 | */ |
1990 | JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; |
1991 | JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; |
1992 | JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; |
1993 | # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK |
1994 | JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = |
1995 | je_memalign; |
1996 | # endif |
1997 | #endif |
1998 | |
1999 | /* |
2000 | * End non-standard override functions. |
2001 | */ |
2002 | /******************************************************************************/ |
2003 | /* |
2004 | * Begin non-standard functions. |
2005 | */ |
2006 | |
2007 | JEMALLOC_ALWAYS_INLINE_C bool |
2008 | imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, |
2009 | size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) |
2010 | { |
2011 | |
2012 | if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { |
2013 | *alignment = 0; |
2014 | *usize = s2u(size); |
2015 | } else { |
2016 | *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); |
2017 | *usize = sa2u(size, *alignment); |
2018 | } |
2019 | if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) |
2020 | return (true); |
2021 | *zero = MALLOCX_ZERO_GET(flags); |
2022 | if ((flags & MALLOCX_TCACHE_MASK) != 0) { |
2023 | if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) |
2024 | *tcache = NULL; |
2025 | else |
2026 | *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); |
2027 | } else |
2028 | *tcache = tcache_get(tsd, true); |
2029 | if ((flags & MALLOCX_ARENA_MASK) != 0) { |
2030 | unsigned arena_ind = MALLOCX_ARENA_GET(flags); |
2031 | *arena = arena_get(tsd_tsdn(tsd), arena_ind, true); |
2032 | if (unlikely(*arena == NULL)) |
2033 | return (true); |
2034 | } else |
2035 | *arena = NULL; |
2036 | return (false); |
2037 | } |
2038 | |
2039 | JEMALLOC_ALWAYS_INLINE_C void * |
2040 | imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, |
2041 | tcache_t *tcache, arena_t *arena, bool slow_path) |
2042 | { |
2043 | szind_t ind; |
2044 | |
2045 | if (unlikely(alignment != 0)) |
2046 | return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); |
2047 | ind = size2index(usize); |
2048 | assert(ind < NSIZES); |
2049 | return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena, |
2050 | slow_path)); |
2051 | } |
2052 | |
2053 | static void * |
2054 | imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, |
2055 | tcache_t *tcache, arena_t *arena, bool slow_path) |
2056 | { |
2057 | void *p; |
2058 | |
2059 | if (usize <= SMALL_MAXCLASS) { |
2060 | assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : |
2061 | sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); |
2062 | p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero, |
2063 | tcache, arena, slow_path); |
2064 | if (p == NULL) |
2065 | return (NULL); |
2066 | arena_prof_promoted(tsdn, p, usize); |
2067 | } else { |
2068 | p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena, |
2069 | slow_path); |
2070 | } |
2071 | |
2072 | return (p); |
2073 | } |
2074 | |
2075 | JEMALLOC_ALWAYS_INLINE_C void * |
2076 | imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) |
2077 | { |
2078 | void *p; |
2079 | size_t alignment; |
2080 | bool zero; |
2081 | tcache_t *tcache; |
2082 | arena_t *arena; |
2083 | prof_tctx_t *tctx; |
2084 | |
2085 | if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, |
2086 | &zero, &tcache, &arena))) |
2087 | return (NULL); |
2088 | tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); |
2089 | if (likely((uintptr_t)tctx == (uintptr_t)1U)) { |
2090 | p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, |
2091 | tcache, arena, slow_path); |
2092 | } else if ((uintptr_t)tctx > (uintptr_t)1U) { |
2093 | p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero, |
2094 | tcache, arena, slow_path); |
2095 | } else |
2096 | p = NULL; |
2097 | if (unlikely(p == NULL)) { |
2098 | prof_alloc_rollback(tsd, tctx, true); |
2099 | return (NULL); |
2100 | } |
2101 | prof_malloc(tsd_tsdn(tsd), p, *usize, tctx); |
2102 | |
2103 | assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); |
2104 | return (p); |
2105 | } |
2106 | |
2107 | JEMALLOC_ALWAYS_INLINE_C void * |
2108 | imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, |
2109 | bool slow_path) |
2110 | { |
2111 | void *p; |
2112 | size_t alignment; |
2113 | bool zero; |
2114 | tcache_t *tcache; |
2115 | arena_t *arena; |
2116 | |
2117 | if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, |
2118 | &zero, &tcache, &arena))) |
2119 | return (NULL); |
2120 | p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache, |
2121 | arena, slow_path); |
2122 | assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); |
2123 | return (p); |
2124 | } |
2125 | |
2126 | /* This function guarantees that *tsdn is non-NULL on success. */ |
2127 | JEMALLOC_ALWAYS_INLINE_C void * |
2128 | imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize, |
2129 | bool slow_path) |
2130 | { |
2131 | tsd_t *tsd; |
2132 | |
2133 | if (slow_path && unlikely(malloc_init())) { |
2134 | *tsdn = NULL; |
2135 | return (NULL); |
2136 | } |
2137 | |
2138 | tsd = tsd_fetch(); |
2139 | *tsdn = tsd_tsdn(tsd); |
2140 | witness_assert_lockless(tsd_tsdn(tsd)); |
2141 | |
2142 | if (likely(flags == 0)) { |
2143 | szind_t ind = size2index(size); |
2144 | if (unlikely(ind >= NSIZES)) |
2145 | return (NULL); |
2146 | if (config_stats || (config_prof && opt_prof) || (slow_path && |
2147 | config_valgrind && unlikely(in_valgrind))) { |
2148 | *usize = index2size(ind); |
2149 | assert(*usize > 0 && *usize <= HUGE_MAXCLASS); |
2150 | } |
2151 | |
2152 | if (config_prof && opt_prof) { |
2153 | return (ialloc_prof(tsd, *usize, ind, false, |
2154 | slow_path)); |
2155 | } |
2156 | |
2157 | return (ialloc(tsd, size, ind, false, slow_path)); |
2158 | } |
2159 | |
2160 | if (config_prof && opt_prof) |
2161 | return (imallocx_prof(tsd, size, flags, usize, slow_path)); |
2162 | |
2163 | return (imallocx_no_prof(tsd, size, flags, usize, slow_path)); |
2164 | } |
2165 | |
2166 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
2167 | void JEMALLOC_NOTHROW * |
2168 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) |
2169 | je_mallocx(size_t size, int flags) |
2170 | { |
2171 | tsdn_t *tsdn; |
2172 | void *p; |
2173 | size_t usize; |
2174 | |
2175 | assert(size != 0); |
2176 | |
2177 | if (likely(!malloc_slow)) { |
2178 | p = imallocx_body(size, flags, &tsdn, &usize, false); |
2179 | ialloc_post_check(p, tsdn, usize, "mallocx" , false, false); |
2180 | } else { |
2181 | p = imallocx_body(size, flags, &tsdn, &usize, true); |
2182 | ialloc_post_check(p, tsdn, usize, "mallocx" , false, true); |
2183 | UTRACE(0, size, p); |
2184 | JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize, |
2185 | MALLOCX_ZERO_GET(flags)); |
2186 | } |
2187 | |
2188 | return (p); |
2189 | } |
2190 | |
2191 | static void * |
2192 | irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, |
2193 | size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, |
2194 | prof_tctx_t *tctx) |
2195 | { |
2196 | void *p; |
2197 | |
2198 | if (tctx == NULL) |
2199 | return (NULL); |
2200 | if (usize <= SMALL_MAXCLASS) { |
2201 | p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, |
2202 | zero, tcache, arena); |
2203 | if (p == NULL) |
2204 | return (NULL); |
2205 | arena_prof_promoted(tsd_tsdn(tsd), p, usize); |
2206 | } else { |
2207 | p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, |
2208 | tcache, arena); |
2209 | } |
2210 | |
2211 | return (p); |
2212 | } |
2213 | |
2214 | JEMALLOC_ALWAYS_INLINE_C void * |
2215 | irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, |
2216 | size_t alignment, size_t *usize, bool zero, tcache_t *tcache, |
2217 | arena_t *arena) |
2218 | { |
2219 | void *p; |
2220 | bool prof_active; |
2221 | prof_tctx_t *old_tctx, *tctx; |
2222 | |
2223 | prof_active = prof_active_get_unlocked(); |
2224 | old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); |
2225 | tctx = prof_alloc_prep(tsd, *usize, prof_active, false); |
2226 | if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { |
2227 | p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, |
2228 | alignment, zero, tcache, arena, tctx); |
2229 | } else { |
2230 | p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, |
2231 | tcache, arena); |
2232 | } |
2233 | if (unlikely(p == NULL)) { |
2234 | prof_alloc_rollback(tsd, tctx, false); |
2235 | return (NULL); |
2236 | } |
2237 | |
2238 | if (p == old_ptr && alignment != 0) { |
2239 | /* |
2240 | * The allocation did not move, so it is possible that the size |
2241 | * class is smaller than would guarantee the requested |
2242 | * alignment, and that the alignment constraint was |
2243 | * serendipitously satisfied. Additionally, old_usize may not |
2244 | * be the same as the current usize because of in-place large |
2245 | * reallocation. Therefore, query the actual value of usize. |
2246 | */ |
2247 | *usize = isalloc(tsd_tsdn(tsd), p, config_prof); |
2248 | } |
2249 | prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, |
2250 | old_usize, old_tctx); |
2251 | |
2252 | return (p); |
2253 | } |
2254 | |
2255 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
2256 | void JEMALLOC_NOTHROW * |
2257 | JEMALLOC_ALLOC_SIZE(2) |
2258 | je_rallocx(void *ptr, size_t size, int flags) |
2259 | { |
2260 | void *p; |
2261 | tsd_t *tsd; |
2262 | size_t usize; |
2263 | size_t old_usize; |
2264 | UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); |
2265 | size_t alignment = MALLOCX_ALIGN_GET(flags); |
2266 | bool zero = flags & MALLOCX_ZERO; |
2267 | arena_t *arena; |
2268 | tcache_t *tcache; |
2269 | |
2270 | assert(ptr != NULL); |
2271 | assert(size != 0); |
2272 | assert(malloc_initialized() || IS_INITIALIZER); |
2273 | malloc_thread_init(); |
2274 | tsd = tsd_fetch(); |
2275 | witness_assert_lockless(tsd_tsdn(tsd)); |
2276 | |
2277 | if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { |
2278 | unsigned arena_ind = MALLOCX_ARENA_GET(flags); |
2279 | arena = arena_get(tsd_tsdn(tsd), arena_ind, true); |
2280 | if (unlikely(arena == NULL)) |
2281 | goto label_oom; |
2282 | } else |
2283 | arena = NULL; |
2284 | |
2285 | if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { |
2286 | if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) |
2287 | tcache = NULL; |
2288 | else |
2289 | tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); |
2290 | } else |
2291 | tcache = tcache_get(tsd, true); |
2292 | |
2293 | old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); |
2294 | if (config_valgrind && unlikely(in_valgrind)) |
2295 | old_rzsize = u2rz(old_usize); |
2296 | |
2297 | if (config_prof && opt_prof) { |
2298 | usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); |
2299 | if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) |
2300 | goto label_oom; |
2301 | p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, |
2302 | zero, tcache, arena); |
2303 | if (unlikely(p == NULL)) |
2304 | goto label_oom; |
2305 | } else { |
2306 | p = iralloct(tsd, ptr, old_usize, size, alignment, zero, |
2307 | tcache, arena); |
2308 | if (unlikely(p == NULL)) |
2309 | goto label_oom; |
2310 | if (config_stats || (config_valgrind && unlikely(in_valgrind))) |
2311 | usize = isalloc(tsd_tsdn(tsd), p, config_prof); |
2312 | } |
2313 | assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); |
2314 | |
2315 | if (config_stats) { |
2316 | *tsd_thread_allocatedp_get(tsd) += usize; |
2317 | *tsd_thread_deallocatedp_get(tsd) += old_usize; |
2318 | } |
2319 | UTRACE(ptr, size, p); |
2320 | JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr, |
2321 | old_usize, old_rzsize, false, zero); |
2322 | witness_assert_lockless(tsd_tsdn(tsd)); |
2323 | return (p); |
2324 | label_oom: |
2325 | if (config_xmalloc && unlikely(opt_xmalloc)) { |
2326 | malloc_write("<jemalloc>: Error in rallocx(): out of memory\n" ); |
2327 | abort(); |
2328 | } |
2329 | UTRACE(ptr, size, 0); |
2330 | witness_assert_lockless(tsd_tsdn(tsd)); |
2331 | return (NULL); |
2332 | } |
2333 | |
2334 | JEMALLOC_ALWAYS_INLINE_C size_t |
2335 | ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, |
2336 | size_t , size_t alignment, bool zero) |
2337 | { |
2338 | size_t usize; |
2339 | |
2340 | if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) |
2341 | return (old_usize); |
2342 | usize = isalloc(tsdn, ptr, config_prof); |
2343 | |
2344 | return (usize); |
2345 | } |
2346 | |
2347 | static size_t |
2348 | ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, |
2349 | size_t , size_t alignment, bool zero, prof_tctx_t *tctx) |
2350 | { |
2351 | size_t usize; |
2352 | |
2353 | if (tctx == NULL) |
2354 | return (old_usize); |
2355 | usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, |
2356 | zero); |
2357 | |
2358 | return (usize); |
2359 | } |
2360 | |
2361 | JEMALLOC_ALWAYS_INLINE_C size_t |
2362 | ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, |
2363 | size_t , size_t alignment, bool zero) |
2364 | { |
2365 | size_t usize_max, usize; |
2366 | bool prof_active; |
2367 | prof_tctx_t *old_tctx, *tctx; |
2368 | |
2369 | prof_active = prof_active_get_unlocked(); |
2370 | old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); |
2371 | /* |
2372 | * usize isn't knowable before ixalloc() returns when extra is non-zero. |
2373 | * Therefore, compute its maximum possible value and use that in |
2374 | * prof_alloc_prep() to decide whether to capture a backtrace. |
2375 | * prof_realloc() will use the actual usize to decide whether to sample. |
2376 | */ |
2377 | if (alignment == 0) { |
2378 | usize_max = s2u(size+extra); |
2379 | assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS); |
2380 | } else { |
2381 | usize_max = sa2u(size+extra, alignment); |
2382 | if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) { |
2383 | /* |
2384 | * usize_max is out of range, and chances are that |
2385 | * allocation will fail, but use the maximum possible |
2386 | * value and carry on with prof_alloc_prep(), just in |
2387 | * case allocation succeeds. |
2388 | */ |
2389 | usize_max = HUGE_MAXCLASS; |
2390 | } |
2391 | } |
2392 | tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); |
2393 | |
2394 | if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { |
2395 | usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, |
2396 | size, extra, alignment, zero, tctx); |
2397 | } else { |
2398 | usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, |
2399 | extra, alignment, zero); |
2400 | } |
2401 | if (usize == old_usize) { |
2402 | prof_alloc_rollback(tsd, tctx, false); |
2403 | return (usize); |
2404 | } |
2405 | prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, |
2406 | old_tctx); |
2407 | |
2408 | return (usize); |
2409 | } |
2410 | |
2411 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW |
2412 | je_xallocx(void *ptr, size_t size, size_t , int flags) |
2413 | { |
2414 | tsd_t *tsd; |
2415 | size_t usize, old_usize; |
2416 | UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); |
2417 | size_t alignment = MALLOCX_ALIGN_GET(flags); |
2418 | bool zero = flags & MALLOCX_ZERO; |
2419 | |
2420 | assert(ptr != NULL); |
2421 | assert(size != 0); |
2422 | assert(SIZE_T_MAX - size >= extra); |
2423 | assert(malloc_initialized() || IS_INITIALIZER); |
2424 | malloc_thread_init(); |
2425 | tsd = tsd_fetch(); |
2426 | witness_assert_lockless(tsd_tsdn(tsd)); |
2427 | |
2428 | old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); |
2429 | |
2430 | /* |
2431 | * The API explicitly absolves itself of protecting against (size + |
2432 | * extra) numerical overflow, but we may need to clamp extra to avoid |
2433 | * exceeding HUGE_MAXCLASS. |
2434 | * |
2435 | * Ordinarily, size limit checking is handled deeper down, but here we |
2436 | * have to check as part of (size + extra) clamping, since we need the |
2437 | * clamped value in the above helper functions. |
2438 | */ |
2439 | if (unlikely(size > HUGE_MAXCLASS)) { |
2440 | usize = old_usize; |
2441 | goto label_not_resized; |
2442 | } |
2443 | if (unlikely(HUGE_MAXCLASS - size < extra)) |
2444 | extra = HUGE_MAXCLASS - size; |
2445 | |
2446 | if (config_valgrind && unlikely(in_valgrind)) |
2447 | old_rzsize = u2rz(old_usize); |
2448 | |
2449 | if (config_prof && opt_prof) { |
2450 | usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, |
2451 | alignment, zero); |
2452 | } else { |
2453 | usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, |
2454 | extra, alignment, zero); |
2455 | } |
2456 | if (unlikely(usize == old_usize)) |
2457 | goto label_not_resized; |
2458 | |
2459 | if (config_stats) { |
2460 | *tsd_thread_allocatedp_get(tsd) += usize; |
2461 | *tsd_thread_deallocatedp_get(tsd) += old_usize; |
2462 | } |
2463 | JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr, |
2464 | old_usize, old_rzsize, false, zero); |
2465 | label_not_resized: |
2466 | UTRACE(ptr, size, ptr); |
2467 | witness_assert_lockless(tsd_tsdn(tsd)); |
2468 | return (usize); |
2469 | } |
2470 | |
2471 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW |
2472 | JEMALLOC_ATTR(pure) |
2473 | je_sallocx(const void *ptr, int flags) |
2474 | { |
2475 | size_t usize; |
2476 | tsdn_t *tsdn; |
2477 | |
2478 | assert(malloc_initialized() || IS_INITIALIZER); |
2479 | malloc_thread_init(); |
2480 | |
2481 | tsdn = tsdn_fetch(); |
2482 | witness_assert_lockless(tsdn); |
2483 | |
2484 | if (config_ivsalloc) |
2485 | usize = ivsalloc(tsdn, ptr, config_prof); |
2486 | else |
2487 | usize = isalloc(tsdn, ptr, config_prof); |
2488 | |
2489 | witness_assert_lockless(tsdn); |
2490 | return (usize); |
2491 | } |
2492 | |
2493 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW |
2494 | je_dallocx(void *ptr, int flags) |
2495 | { |
2496 | tsd_t *tsd; |
2497 | tcache_t *tcache; |
2498 | |
2499 | assert(ptr != NULL); |
2500 | assert(malloc_initialized() || IS_INITIALIZER); |
2501 | |
2502 | tsd = tsd_fetch(); |
2503 | witness_assert_lockless(tsd_tsdn(tsd)); |
2504 | if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { |
2505 | if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) |
2506 | tcache = NULL; |
2507 | else |
2508 | tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); |
2509 | } else |
2510 | tcache = tcache_get(tsd, false); |
2511 | |
2512 | UTRACE(ptr, 0, 0); |
2513 | if (likely(!malloc_slow)) |
2514 | ifree(tsd, ptr, tcache, false); |
2515 | else |
2516 | ifree(tsd, ptr, tcache, true); |
2517 | witness_assert_lockless(tsd_tsdn(tsd)); |
2518 | } |
2519 | |
2520 | JEMALLOC_ALWAYS_INLINE_C size_t |
2521 | inallocx(tsdn_t *tsdn, size_t size, int flags) |
2522 | { |
2523 | size_t usize; |
2524 | |
2525 | witness_assert_lockless(tsdn); |
2526 | |
2527 | if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) |
2528 | usize = s2u(size); |
2529 | else |
2530 | usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); |
2531 | witness_assert_lockless(tsdn); |
2532 | return (usize); |
2533 | } |
2534 | |
2535 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW |
2536 | je_sdallocx(void *ptr, size_t size, int flags) |
2537 | { |
2538 | tsd_t *tsd; |
2539 | tcache_t *tcache; |
2540 | size_t usize; |
2541 | |
2542 | assert(ptr != NULL); |
2543 | assert(malloc_initialized() || IS_INITIALIZER); |
2544 | tsd = tsd_fetch(); |
2545 | usize = inallocx(tsd_tsdn(tsd), size, flags); |
2546 | assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof)); |
2547 | |
2548 | witness_assert_lockless(tsd_tsdn(tsd)); |
2549 | if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { |
2550 | if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) |
2551 | tcache = NULL; |
2552 | else |
2553 | tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); |
2554 | } else |
2555 | tcache = tcache_get(tsd, false); |
2556 | |
2557 | UTRACE(ptr, 0, 0); |
2558 | if (likely(!malloc_slow)) |
2559 | isfree(tsd, ptr, usize, tcache, false); |
2560 | else |
2561 | isfree(tsd, ptr, usize, tcache, true); |
2562 | witness_assert_lockless(tsd_tsdn(tsd)); |
2563 | } |
2564 | |
2565 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW |
2566 | JEMALLOC_ATTR(pure) |
2567 | je_nallocx(size_t size, int flags) |
2568 | { |
2569 | size_t usize; |
2570 | tsdn_t *tsdn; |
2571 | |
2572 | assert(size != 0); |
2573 | |
2574 | if (unlikely(malloc_init())) |
2575 | return (0); |
2576 | |
2577 | tsdn = tsdn_fetch(); |
2578 | witness_assert_lockless(tsdn); |
2579 | |
2580 | usize = inallocx(tsdn, size, flags); |
2581 | if (unlikely(usize > HUGE_MAXCLASS)) |
2582 | return (0); |
2583 | |
2584 | witness_assert_lockless(tsdn); |
2585 | return (usize); |
2586 | } |
2587 | |
2588 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW |
2589 | je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, |
2590 | size_t newlen) |
2591 | { |
2592 | int ret; |
2593 | tsd_t *tsd; |
2594 | |
2595 | if (unlikely(malloc_init())) |
2596 | return (EAGAIN); |
2597 | |
2598 | tsd = tsd_fetch(); |
2599 | witness_assert_lockless(tsd_tsdn(tsd)); |
2600 | ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); |
2601 | witness_assert_lockless(tsd_tsdn(tsd)); |
2602 | return (ret); |
2603 | } |
2604 | |
2605 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW |
2606 | je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) |
2607 | { |
2608 | int ret; |
2609 | tsdn_t *tsdn; |
2610 | |
2611 | if (unlikely(malloc_init())) |
2612 | return (EAGAIN); |
2613 | |
2614 | tsdn = tsdn_fetch(); |
2615 | witness_assert_lockless(tsdn); |
2616 | ret = ctl_nametomib(tsdn, name, mibp, miblenp); |
2617 | witness_assert_lockless(tsdn); |
2618 | return (ret); |
2619 | } |
2620 | |
2621 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW |
2622 | je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, |
2623 | void *newp, size_t newlen) |
2624 | { |
2625 | int ret; |
2626 | tsd_t *tsd; |
2627 | |
2628 | if (unlikely(malloc_init())) |
2629 | return (EAGAIN); |
2630 | |
2631 | tsd = tsd_fetch(); |
2632 | witness_assert_lockless(tsd_tsdn(tsd)); |
2633 | ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); |
2634 | witness_assert_lockless(tsd_tsdn(tsd)); |
2635 | return (ret); |
2636 | } |
2637 | |
2638 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW |
2639 | je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, |
2640 | const char *opts) |
2641 | { |
2642 | tsdn_t *tsdn; |
2643 | |
2644 | tsdn = tsdn_fetch(); |
2645 | witness_assert_lockless(tsdn); |
2646 | stats_print(write_cb, cbopaque, opts); |
2647 | witness_assert_lockless(tsdn); |
2648 | } |
2649 | |
2650 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW |
2651 | je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) |
2652 | { |
2653 | size_t ret; |
2654 | tsdn_t *tsdn; |
2655 | |
2656 | assert(malloc_initialized() || IS_INITIALIZER); |
2657 | malloc_thread_init(); |
2658 | |
2659 | tsdn = tsdn_fetch(); |
2660 | witness_assert_lockless(tsdn); |
2661 | |
2662 | if (config_ivsalloc) |
2663 | ret = ivsalloc(tsdn, ptr, config_prof); |
2664 | else |
2665 | ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof); |
2666 | |
2667 | witness_assert_lockless(tsdn); |
2668 | return (ret); |
2669 | } |
2670 | |
2671 | /* |
2672 | * End non-standard functions. |
2673 | */ |
2674 | /******************************************************************************/ |
2675 | /* |
2676 | * The following functions are used by threading libraries for protection of |
2677 | * malloc during fork(). |
2678 | */ |
2679 | |
2680 | /* |
2681 | * If an application creates a thread before doing any allocation in the main |
2682 | * thread, then calls fork(2) in the main thread followed by memory allocation |
2683 | * in the child process, a race can occur that results in deadlock within the |
2684 | * child: the main thread may have forked while the created thread had |
2685 | * partially initialized the allocator. Ordinarily jemalloc prevents |
2686 | * fork/malloc races via the following functions it registers during |
2687 | * initialization using pthread_atfork(), but of course that does no good if |
2688 | * the allocator isn't fully initialized at fork time. The following library |
2689 | * constructor is a partial solution to this problem. It may still be possible |
2690 | * to trigger the deadlock described above, but doing so would involve forking |
2691 | * via a library constructor that runs before jemalloc's runs. |
2692 | */ |
2693 | #ifndef JEMALLOC_JET |
2694 | JEMALLOC_ATTR(constructor) |
2695 | static void |
2696 | jemalloc_constructor(void) |
2697 | { |
2698 | |
2699 | malloc_init(); |
2700 | } |
2701 | #endif |
2702 | |
2703 | #ifndef JEMALLOC_MUTEX_INIT_CB |
2704 | void |
2705 | jemalloc_prefork(void) |
2706 | #else |
2707 | JEMALLOC_EXPORT void |
2708 | _malloc_prefork(void) |
2709 | #endif |
2710 | { |
2711 | tsd_t *tsd; |
2712 | unsigned i, j, narenas; |
2713 | arena_t *arena; |
2714 | |
2715 | #ifdef JEMALLOC_MUTEX_INIT_CB |
2716 | if (!malloc_initialized()) |
2717 | return; |
2718 | #endif |
2719 | assert(malloc_initialized()); |
2720 | |
2721 | tsd = tsd_fetch(); |
2722 | |
2723 | narenas = narenas_total_get(); |
2724 | |
2725 | witness_prefork(tsd); |
2726 | /* Acquire all mutexes in a safe order. */ |
2727 | ctl_prefork(tsd_tsdn(tsd)); |
2728 | malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); |
2729 | prof_prefork0(tsd_tsdn(tsd)); |
2730 | for (i = 0; i < 3; i++) { |
2731 | for (j = 0; j < narenas; j++) { |
2732 | if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != |
2733 | NULL) { |
2734 | switch (i) { |
2735 | case 0: |
2736 | arena_prefork0(tsd_tsdn(tsd), arena); |
2737 | break; |
2738 | case 1: |
2739 | arena_prefork1(tsd_tsdn(tsd), arena); |
2740 | break; |
2741 | case 2: |
2742 | arena_prefork2(tsd_tsdn(tsd), arena); |
2743 | break; |
2744 | default: not_reached(); |
2745 | } |
2746 | } |
2747 | } |
2748 | } |
2749 | base_prefork(tsd_tsdn(tsd)); |
2750 | chunk_prefork(tsd_tsdn(tsd)); |
2751 | for (i = 0; i < narenas; i++) { |
2752 | if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) |
2753 | arena_prefork3(tsd_tsdn(tsd), arena); |
2754 | } |
2755 | prof_prefork1(tsd_tsdn(tsd)); |
2756 | } |
2757 | |
2758 | #ifndef JEMALLOC_MUTEX_INIT_CB |
2759 | void |
2760 | jemalloc_postfork_parent(void) |
2761 | #else |
2762 | JEMALLOC_EXPORT void |
2763 | _malloc_postfork(void) |
2764 | #endif |
2765 | { |
2766 | tsd_t *tsd; |
2767 | unsigned i, narenas; |
2768 | |
2769 | #ifdef JEMALLOC_MUTEX_INIT_CB |
2770 | if (!malloc_initialized()) |
2771 | return; |
2772 | #endif |
2773 | assert(malloc_initialized()); |
2774 | |
2775 | tsd = tsd_fetch(); |
2776 | |
2777 | witness_postfork_parent(tsd); |
2778 | /* Release all mutexes, now that fork() has completed. */ |
2779 | chunk_postfork_parent(tsd_tsdn(tsd)); |
2780 | base_postfork_parent(tsd_tsdn(tsd)); |
2781 | for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { |
2782 | arena_t *arena; |
2783 | |
2784 | if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) |
2785 | arena_postfork_parent(tsd_tsdn(tsd), arena); |
2786 | } |
2787 | prof_postfork_parent(tsd_tsdn(tsd)); |
2788 | malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); |
2789 | ctl_postfork_parent(tsd_tsdn(tsd)); |
2790 | } |
2791 | |
2792 | void |
2793 | jemalloc_postfork_child(void) |
2794 | { |
2795 | tsd_t *tsd; |
2796 | unsigned i, narenas; |
2797 | |
2798 | assert(malloc_initialized()); |
2799 | |
2800 | tsd = tsd_fetch(); |
2801 | |
2802 | witness_postfork_child(tsd); |
2803 | /* Release all mutexes, now that fork() has completed. */ |
2804 | chunk_postfork_child(tsd_tsdn(tsd)); |
2805 | base_postfork_child(tsd_tsdn(tsd)); |
2806 | for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { |
2807 | arena_t *arena; |
2808 | |
2809 | if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) |
2810 | arena_postfork_child(tsd_tsdn(tsd), arena); |
2811 | } |
2812 | prof_postfork_child(tsd_tsdn(tsd)); |
2813 | malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); |
2814 | ctl_postfork_child(tsd_tsdn(tsd)); |
2815 | } |
2816 | |
2817 | /******************************************************************************/ |
2818 | |