1 | #define JEMALLOC_C_ |
2 | #include "jemalloc/internal/jemalloc_preamble.h" |
3 | #include "jemalloc/internal/jemalloc_internal_includes.h" |
4 | |
5 | #include "jemalloc/internal/assert.h" |
6 | #include "jemalloc/internal/atomic.h" |
7 | #include "jemalloc/internal/ctl.h" |
8 | #include "jemalloc/internal/extent_dss.h" |
9 | #include "jemalloc/internal/extent_mmap.h" |
10 | #include "jemalloc/internal/hook.h" |
11 | #include "jemalloc/internal/jemalloc_internal_types.h" |
12 | #include "jemalloc/internal/log.h" |
13 | #include "jemalloc/internal/malloc_io.h" |
14 | #include "jemalloc/internal/mutex.h" |
15 | #include "jemalloc/internal/rtree.h" |
16 | #include "jemalloc/internal/sc.h" |
17 | #include "jemalloc/internal/spin.h" |
18 | #include "jemalloc/internal/sz.h" |
19 | #include "jemalloc/internal/ticker.h" |
20 | #include "jemalloc/internal/util.h" |
21 | |
22 | /******************************************************************************/ |
23 | /* Data. */ |
24 | |
25 | /* Runtime configuration options. */ |
26 | const char *je_malloc_conf |
27 | #ifndef _WIN32 |
28 | JEMALLOC_ATTR(weak) |
29 | #endif |
30 | ; |
31 | bool opt_abort = |
32 | #ifdef JEMALLOC_DEBUG |
33 | true |
34 | #else |
35 | false |
36 | #endif |
37 | ; |
38 | bool opt_abort_conf = |
39 | #ifdef JEMALLOC_DEBUG |
40 | true |
41 | #else |
42 | false |
43 | #endif |
44 | ; |
45 | const char *opt_junk = |
46 | #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) |
47 | "true" |
48 | #else |
49 | "false" |
50 | #endif |
51 | ; |
52 | bool opt_junk_alloc = |
53 | #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) |
54 | true |
55 | #else |
56 | false |
57 | #endif |
58 | ; |
59 | bool opt_junk_free = |
60 | #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) |
61 | true |
62 | #else |
63 | false |
64 | #endif |
65 | ; |
66 | |
67 | bool opt_utrace = false; |
68 | bool opt_xmalloc = false; |
69 | bool opt_zero = false; |
70 | unsigned opt_narenas = 0; |
71 | |
72 | unsigned ncpus; |
73 | |
74 | /* Protects arenas initialization. */ |
75 | malloc_mutex_t arenas_lock; |
76 | /* |
77 | * Arenas that are used to service external requests. Not all elements of the |
78 | * arenas array are necessarily used; arenas are created lazily as needed. |
79 | * |
80 | * arenas[0..narenas_auto) are used for automatic multiplexing of threads and |
81 | * arenas. arenas[narenas_auto..narenas_total) are only used if the application |
82 | * takes some action to create them and allocate from them. |
83 | * |
84 | * Points to an arena_t. |
85 | */ |
86 | JEMALLOC_ALIGNED(CACHELINE) |
87 | atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; |
88 | static atomic_u_t narenas_total; /* Use narenas_total_*(). */ |
89 | /* Below three are read-only after initialization. */ |
90 | static arena_t *a0; /* arenas[0]. */ |
91 | unsigned narenas_auto; |
92 | unsigned manual_arena_base; |
93 | |
94 | typedef enum { |
95 | malloc_init_uninitialized = 3, |
96 | malloc_init_a0_initialized = 2, |
97 | malloc_init_recursible = 1, |
98 | malloc_init_initialized = 0 /* Common case --> jnz. */ |
99 | } malloc_init_t; |
100 | static malloc_init_t malloc_init_state = malloc_init_uninitialized; |
101 | |
102 | /* False should be the common case. Set to true to trigger initialization. */ |
103 | bool malloc_slow = true; |
104 | |
105 | /* When malloc_slow is true, set the corresponding bits for sanity check. */ |
106 | enum { |
107 | flag_opt_junk_alloc = (1U), |
108 | flag_opt_junk_free = (1U << 1), |
109 | flag_opt_zero = (1U << 2), |
110 | flag_opt_utrace = (1U << 3), |
111 | flag_opt_xmalloc = (1U << 4) |
112 | }; |
113 | static uint8_t malloc_slow_flags; |
114 | |
115 | #ifdef JEMALLOC_THREADED_INIT |
116 | /* Used to let the initializing thread recursively allocate. */ |
117 | # define NO_INITIALIZER ((unsigned long)0) |
118 | # define INITIALIZER pthread_self() |
119 | # define IS_INITIALIZER (malloc_initializer == pthread_self()) |
120 | static pthread_t malloc_initializer = NO_INITIALIZER; |
121 | #else |
122 | # define NO_INITIALIZER false |
123 | # define INITIALIZER true |
124 | # define IS_INITIALIZER malloc_initializer |
125 | static bool malloc_initializer = NO_INITIALIZER; |
126 | #endif |
127 | |
128 | /* Used to avoid initialization races. */ |
129 | #ifdef _WIN32 |
130 | #if _WIN32_WINNT >= 0x0600 |
131 | static malloc_mutex_t init_lock = SRWLOCK_INIT; |
132 | #else |
133 | static malloc_mutex_t init_lock; |
134 | static bool init_lock_initialized = false; |
135 | |
136 | JEMALLOC_ATTR(constructor) |
137 | static void WINAPI |
138 | _init_init_lock(void) { |
139 | /* |
140 | * If another constructor in the same binary is using mallctl to e.g. |
141 | * set up extent hooks, it may end up running before this one, and |
142 | * malloc_init_hard will crash trying to lock the uninitialized lock. So |
143 | * we force an initialization of the lock in malloc_init_hard as well. |
144 | * We don't try to care about atomicity of the accessed to the |
145 | * init_lock_initialized boolean, since it really only matters early in |
146 | * the process creation, before any separate thread normally starts |
147 | * doing anything. |
148 | */ |
149 | if (!init_lock_initialized) { |
150 | malloc_mutex_init(&init_lock, "init" , WITNESS_RANK_INIT, |
151 | malloc_mutex_rank_exclusive); |
152 | } |
153 | init_lock_initialized = true; |
154 | } |
155 | |
156 | #ifdef _MSC_VER |
157 | # pragma section(".CRT$XCU", read) |
158 | JEMALLOC_SECTION(".CRT$XCU" ) JEMALLOC_ATTR(used) |
159 | static const void (WINAPI *init_init_lock)(void) = _init_init_lock; |
160 | #endif |
161 | #endif |
162 | #else |
163 | static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; |
164 | #endif |
165 | |
166 | typedef struct { |
167 | void *p; /* Input pointer (as in realloc(p, s)). */ |
168 | size_t s; /* Request size. */ |
169 | void *r; /* Result pointer. */ |
170 | } malloc_utrace_t; |
171 | |
172 | #ifdef JEMALLOC_UTRACE |
173 | # define UTRACE(a, b, c) do { \ |
174 | if (unlikely(opt_utrace)) { \ |
175 | int utrace_serrno = errno; \ |
176 | malloc_utrace_t ut; \ |
177 | ut.p = (a); \ |
178 | ut.s = (b); \ |
179 | ut.r = (c); \ |
180 | utrace(&ut, sizeof(ut)); \ |
181 | errno = utrace_serrno; \ |
182 | } \ |
183 | } while (0) |
184 | #else |
185 | # define UTRACE(a, b, c) |
186 | #endif |
187 | |
188 | /* Whether encountered any invalid config options. */ |
189 | static bool had_conf_error = false; |
190 | |
191 | /******************************************************************************/ |
192 | /* |
193 | * Function prototypes for static functions that are referenced prior to |
194 | * definition. |
195 | */ |
196 | |
197 | static bool malloc_init_hard_a0(void); |
198 | static bool malloc_init_hard(void); |
199 | |
200 | /******************************************************************************/ |
201 | /* |
202 | * Begin miscellaneous support functions. |
203 | */ |
204 | |
205 | bool |
206 | malloc_initialized(void) { |
207 | return (malloc_init_state == malloc_init_initialized); |
208 | } |
209 | |
210 | JEMALLOC_ALWAYS_INLINE bool |
211 | malloc_init_a0(void) { |
212 | if (unlikely(malloc_init_state == malloc_init_uninitialized)) { |
213 | return malloc_init_hard_a0(); |
214 | } |
215 | return false; |
216 | } |
217 | |
218 | JEMALLOC_ALWAYS_INLINE bool |
219 | malloc_init(void) { |
220 | if (unlikely(!malloc_initialized()) && malloc_init_hard()) { |
221 | return true; |
222 | } |
223 | return false; |
224 | } |
225 | |
226 | /* |
227 | * The a0*() functions are used instead of i{d,}alloc() in situations that |
228 | * cannot tolerate TLS variable access. |
229 | */ |
230 | |
231 | static void * |
232 | a0ialloc(size_t size, bool zero, bool is_internal) { |
233 | if (unlikely(malloc_init_a0())) { |
234 | return NULL; |
235 | } |
236 | |
237 | return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL, |
238 | is_internal, arena_get(TSDN_NULL, 0, true), true); |
239 | } |
240 | |
241 | static void |
242 | a0idalloc(void *ptr, bool is_internal) { |
243 | idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true); |
244 | } |
245 | |
246 | void * |
247 | a0malloc(size_t size) { |
248 | return a0ialloc(size, false, true); |
249 | } |
250 | |
251 | void |
252 | a0dalloc(void *ptr) { |
253 | a0idalloc(ptr, true); |
254 | } |
255 | |
256 | /* |
257 | * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive |
258 | * situations that cannot tolerate TLS variable access (TLS allocation and very |
259 | * early internal data structure initialization). |
260 | */ |
261 | |
262 | void * |
263 | bootstrap_malloc(size_t size) { |
264 | if (unlikely(size == 0)) { |
265 | size = 1; |
266 | } |
267 | |
268 | return a0ialloc(size, false, false); |
269 | } |
270 | |
271 | void * |
272 | bootstrap_calloc(size_t num, size_t size) { |
273 | size_t num_size; |
274 | |
275 | num_size = num * size; |
276 | if (unlikely(num_size == 0)) { |
277 | assert(num == 0 || size == 0); |
278 | num_size = 1; |
279 | } |
280 | |
281 | return a0ialloc(num_size, true, false); |
282 | } |
283 | |
284 | void |
285 | bootstrap_free(void *ptr) { |
286 | if (unlikely(ptr == NULL)) { |
287 | return; |
288 | } |
289 | |
290 | a0idalloc(ptr, false); |
291 | } |
292 | |
293 | void |
294 | arena_set(unsigned ind, arena_t *arena) { |
295 | atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE); |
296 | } |
297 | |
298 | static void |
299 | narenas_total_set(unsigned narenas) { |
300 | atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE); |
301 | } |
302 | |
303 | static void |
304 | narenas_total_inc(void) { |
305 | atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE); |
306 | } |
307 | |
308 | unsigned |
309 | narenas_total_get(void) { |
310 | return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); |
311 | } |
312 | |
313 | /* Create a new arena and insert it into the arenas array at index ind. */ |
314 | static arena_t * |
315 | arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { |
316 | arena_t *arena; |
317 | |
318 | assert(ind <= narenas_total_get()); |
319 | if (ind >= MALLOCX_ARENA_LIMIT) { |
320 | return NULL; |
321 | } |
322 | if (ind == narenas_total_get()) { |
323 | narenas_total_inc(); |
324 | } |
325 | |
326 | /* |
327 | * Another thread may have already initialized arenas[ind] if it's an |
328 | * auto arena. |
329 | */ |
330 | arena = arena_get(tsdn, ind, false); |
331 | if (arena != NULL) { |
332 | assert(arena_is_auto(arena)); |
333 | return arena; |
334 | } |
335 | |
336 | /* Actually initialize the arena. */ |
337 | arena = arena_new(tsdn, ind, extent_hooks); |
338 | |
339 | return arena; |
340 | } |
341 | |
342 | static void |
343 | arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { |
344 | if (ind == 0) { |
345 | return; |
346 | } |
347 | if (have_background_thread) { |
348 | bool err; |
349 | malloc_mutex_lock(tsdn, &background_thread_lock); |
350 | err = background_thread_create(tsdn_tsd(tsdn), ind); |
351 | malloc_mutex_unlock(tsdn, &background_thread_lock); |
352 | if (err) { |
353 | malloc_printf("<jemalloc>: error in background thread " |
354 | "creation for arena %u. Abort.\n" , ind); |
355 | abort(); |
356 | } |
357 | } |
358 | } |
359 | |
360 | arena_t * |
361 | arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { |
362 | arena_t *arena; |
363 | |
364 | malloc_mutex_lock(tsdn, &arenas_lock); |
365 | arena = arena_init_locked(tsdn, ind, extent_hooks); |
366 | malloc_mutex_unlock(tsdn, &arenas_lock); |
367 | |
368 | arena_new_create_background_thread(tsdn, ind); |
369 | |
370 | return arena; |
371 | } |
372 | |
373 | static void |
374 | arena_bind(tsd_t *tsd, unsigned ind, bool internal) { |
375 | arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); |
376 | arena_nthreads_inc(arena, internal); |
377 | |
378 | if (internal) { |
379 | tsd_iarena_set(tsd, arena); |
380 | } else { |
381 | tsd_arena_set(tsd, arena); |
382 | } |
383 | } |
384 | |
385 | void |
386 | arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { |
387 | arena_t *oldarena, *newarena; |
388 | |
389 | oldarena = arena_get(tsd_tsdn(tsd), oldind, false); |
390 | newarena = arena_get(tsd_tsdn(tsd), newind, false); |
391 | arena_nthreads_dec(oldarena, false); |
392 | arena_nthreads_inc(newarena, false); |
393 | tsd_arena_set(tsd, newarena); |
394 | } |
395 | |
396 | static void |
397 | arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { |
398 | arena_t *arena; |
399 | |
400 | arena = arena_get(tsd_tsdn(tsd), ind, false); |
401 | arena_nthreads_dec(arena, internal); |
402 | |
403 | if (internal) { |
404 | tsd_iarena_set(tsd, NULL); |
405 | } else { |
406 | tsd_arena_set(tsd, NULL); |
407 | } |
408 | } |
409 | |
410 | arena_tdata_t * |
411 | arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { |
412 | arena_tdata_t *tdata, *arenas_tdata_old; |
413 | arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); |
414 | unsigned narenas_tdata_old, i; |
415 | unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); |
416 | unsigned narenas_actual = narenas_total_get(); |
417 | |
418 | /* |
419 | * Dissociate old tdata array (and set up for deallocation upon return) |
420 | * if it's too small. |
421 | */ |
422 | if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { |
423 | arenas_tdata_old = arenas_tdata; |
424 | narenas_tdata_old = narenas_tdata; |
425 | arenas_tdata = NULL; |
426 | narenas_tdata = 0; |
427 | tsd_arenas_tdata_set(tsd, arenas_tdata); |
428 | tsd_narenas_tdata_set(tsd, narenas_tdata); |
429 | } else { |
430 | arenas_tdata_old = NULL; |
431 | narenas_tdata_old = 0; |
432 | } |
433 | |
434 | /* Allocate tdata array if it's missing. */ |
435 | if (arenas_tdata == NULL) { |
436 | bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); |
437 | narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; |
438 | |
439 | if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { |
440 | *arenas_tdata_bypassp = true; |
441 | arenas_tdata = (arena_tdata_t *)a0malloc( |
442 | sizeof(arena_tdata_t) * narenas_tdata); |
443 | *arenas_tdata_bypassp = false; |
444 | } |
445 | if (arenas_tdata == NULL) { |
446 | tdata = NULL; |
447 | goto label_return; |
448 | } |
449 | assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); |
450 | tsd_arenas_tdata_set(tsd, arenas_tdata); |
451 | tsd_narenas_tdata_set(tsd, narenas_tdata); |
452 | } |
453 | |
454 | /* |
455 | * Copy to tdata array. It's possible that the actual number of arenas |
456 | * has increased since narenas_total_get() was called above, but that |
457 | * causes no correctness issues unless two threads concurrently execute |
458 | * the arenas.create mallctl, which we trust mallctl synchronization to |
459 | * prevent. |
460 | */ |
461 | |
462 | /* Copy/initialize tickers. */ |
463 | for (i = 0; i < narenas_actual; i++) { |
464 | if (i < narenas_tdata_old) { |
465 | ticker_copy(&arenas_tdata[i].decay_ticker, |
466 | &arenas_tdata_old[i].decay_ticker); |
467 | } else { |
468 | ticker_init(&arenas_tdata[i].decay_ticker, |
469 | DECAY_NTICKS_PER_UPDATE); |
470 | } |
471 | } |
472 | if (narenas_tdata > narenas_actual) { |
473 | memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) |
474 | * (narenas_tdata - narenas_actual)); |
475 | } |
476 | |
477 | /* Read the refreshed tdata array. */ |
478 | tdata = &arenas_tdata[ind]; |
479 | label_return: |
480 | if (arenas_tdata_old != NULL) { |
481 | a0dalloc(arenas_tdata_old); |
482 | } |
483 | return tdata; |
484 | } |
485 | |
486 | /* Slow path, called only by arena_choose(). */ |
487 | arena_t * |
488 | arena_choose_hard(tsd_t *tsd, bool internal) { |
489 | arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); |
490 | |
491 | if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { |
492 | unsigned choose = percpu_arena_choose(); |
493 | ret = arena_get(tsd_tsdn(tsd), choose, true); |
494 | assert(ret != NULL); |
495 | arena_bind(tsd, arena_ind_get(ret), false); |
496 | arena_bind(tsd, arena_ind_get(ret), true); |
497 | |
498 | return ret; |
499 | } |
500 | |
501 | if (narenas_auto > 1) { |
502 | unsigned i, j, choose[2], first_null; |
503 | bool is_new_arena[2]; |
504 | |
505 | /* |
506 | * Determine binding for both non-internal and internal |
507 | * allocation. |
508 | * |
509 | * choose[0]: For application allocation. |
510 | * choose[1]: For internal metadata allocation. |
511 | */ |
512 | |
513 | for (j = 0; j < 2; j++) { |
514 | choose[j] = 0; |
515 | is_new_arena[j] = false; |
516 | } |
517 | |
518 | first_null = narenas_auto; |
519 | malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); |
520 | assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); |
521 | for (i = 1; i < narenas_auto; i++) { |
522 | if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { |
523 | /* |
524 | * Choose the first arena that has the lowest |
525 | * number of threads assigned to it. |
526 | */ |
527 | for (j = 0; j < 2; j++) { |
528 | if (arena_nthreads_get(arena_get( |
529 | tsd_tsdn(tsd), i, false), !!j) < |
530 | arena_nthreads_get(arena_get( |
531 | tsd_tsdn(tsd), choose[j], false), |
532 | !!j)) { |
533 | choose[j] = i; |
534 | } |
535 | } |
536 | } else if (first_null == narenas_auto) { |
537 | /* |
538 | * Record the index of the first uninitialized |
539 | * arena, in case all extant arenas are in use. |
540 | * |
541 | * NB: It is possible for there to be |
542 | * discontinuities in terms of initialized |
543 | * versus uninitialized arenas, due to the |
544 | * "thread.arena" mallctl. |
545 | */ |
546 | first_null = i; |
547 | } |
548 | } |
549 | |
550 | for (j = 0; j < 2; j++) { |
551 | if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), |
552 | choose[j], false), !!j) == 0 || first_null == |
553 | narenas_auto) { |
554 | /* |
555 | * Use an unloaded arena, or the least loaded |
556 | * arena if all arenas are already initialized. |
557 | */ |
558 | if (!!j == internal) { |
559 | ret = arena_get(tsd_tsdn(tsd), |
560 | choose[j], false); |
561 | } |
562 | } else { |
563 | arena_t *arena; |
564 | |
565 | /* Initialize a new arena. */ |
566 | choose[j] = first_null; |
567 | arena = arena_init_locked(tsd_tsdn(tsd), |
568 | choose[j], |
569 | (extent_hooks_t *)&extent_hooks_default); |
570 | if (arena == NULL) { |
571 | malloc_mutex_unlock(tsd_tsdn(tsd), |
572 | &arenas_lock); |
573 | return NULL; |
574 | } |
575 | is_new_arena[j] = true; |
576 | if (!!j == internal) { |
577 | ret = arena; |
578 | } |
579 | } |
580 | arena_bind(tsd, choose[j], !!j); |
581 | } |
582 | malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); |
583 | |
584 | for (j = 0; j < 2; j++) { |
585 | if (is_new_arena[j]) { |
586 | assert(choose[j] > 0); |
587 | arena_new_create_background_thread( |
588 | tsd_tsdn(tsd), choose[j]); |
589 | } |
590 | } |
591 | |
592 | } else { |
593 | ret = arena_get(tsd_tsdn(tsd), 0, false); |
594 | arena_bind(tsd, 0, false); |
595 | arena_bind(tsd, 0, true); |
596 | } |
597 | |
598 | return ret; |
599 | } |
600 | |
601 | void |
602 | iarena_cleanup(tsd_t *tsd) { |
603 | arena_t *iarena; |
604 | |
605 | iarena = tsd_iarena_get(tsd); |
606 | if (iarena != NULL) { |
607 | arena_unbind(tsd, arena_ind_get(iarena), true); |
608 | } |
609 | } |
610 | |
611 | void |
612 | arena_cleanup(tsd_t *tsd) { |
613 | arena_t *arena; |
614 | |
615 | arena = tsd_arena_get(tsd); |
616 | if (arena != NULL) { |
617 | arena_unbind(tsd, arena_ind_get(arena), false); |
618 | } |
619 | } |
620 | |
621 | void |
622 | arenas_tdata_cleanup(tsd_t *tsd) { |
623 | arena_tdata_t *arenas_tdata; |
624 | |
625 | /* Prevent tsd->arenas_tdata from being (re)created. */ |
626 | *tsd_arenas_tdata_bypassp_get(tsd) = true; |
627 | |
628 | arenas_tdata = tsd_arenas_tdata_get(tsd); |
629 | if (arenas_tdata != NULL) { |
630 | tsd_arenas_tdata_set(tsd, NULL); |
631 | a0dalloc(arenas_tdata); |
632 | } |
633 | } |
634 | |
635 | static void |
636 | stats_print_atexit(void) { |
637 | if (config_stats) { |
638 | tsdn_t *tsdn; |
639 | unsigned narenas, i; |
640 | |
641 | tsdn = tsdn_fetch(); |
642 | |
643 | /* |
644 | * Merge stats from extant threads. This is racy, since |
645 | * individual threads do not lock when recording tcache stats |
646 | * events. As a consequence, the final stats may be slightly |
647 | * out of date by the time they are reported, if other threads |
648 | * continue to allocate. |
649 | */ |
650 | for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { |
651 | arena_t *arena = arena_get(tsdn, i, false); |
652 | if (arena != NULL) { |
653 | tcache_t *tcache; |
654 | |
655 | malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); |
656 | ql_foreach(tcache, &arena->tcache_ql, link) { |
657 | tcache_stats_merge(tsdn, tcache, arena); |
658 | } |
659 | malloc_mutex_unlock(tsdn, |
660 | &arena->tcache_ql_mtx); |
661 | } |
662 | } |
663 | } |
664 | je_malloc_stats_print(NULL, NULL, opt_stats_print_opts); |
665 | } |
666 | |
667 | /* |
668 | * Ensure that we don't hold any locks upon entry to or exit from allocator |
669 | * code (in a "broad" sense that doesn't count a reentrant allocation as an |
670 | * entrance or exit). |
671 | */ |
672 | JEMALLOC_ALWAYS_INLINE void |
673 | check_entry_exit_locking(tsdn_t *tsdn) { |
674 | if (!config_debug) { |
675 | return; |
676 | } |
677 | if (tsdn_null(tsdn)) { |
678 | return; |
679 | } |
680 | tsd_t *tsd = tsdn_tsd(tsdn); |
681 | /* |
682 | * It's possible we hold locks at entry/exit if we're in a nested |
683 | * allocation. |
684 | */ |
685 | int8_t reentrancy_level = tsd_reentrancy_level_get(tsd); |
686 | if (reentrancy_level != 0) { |
687 | return; |
688 | } |
689 | witness_assert_lockless(tsdn_witness_tsdp_get(tsdn)); |
690 | } |
691 | |
692 | /* |
693 | * End miscellaneous support functions. |
694 | */ |
695 | /******************************************************************************/ |
696 | /* |
697 | * Begin initialization functions. |
698 | */ |
699 | |
700 | static char * |
701 | jemalloc_secure_getenv(const char *name) { |
702 | #ifdef JEMALLOC_HAVE_SECURE_GETENV |
703 | return secure_getenv(name); |
704 | #else |
705 | # ifdef JEMALLOC_HAVE_ISSETUGID |
706 | if (issetugid() != 0) { |
707 | return NULL; |
708 | } |
709 | # endif |
710 | return getenv(name); |
711 | #endif |
712 | } |
713 | |
714 | static unsigned |
715 | malloc_ncpus(void) { |
716 | long result; |
717 | |
718 | #ifdef _WIN32 |
719 | SYSTEM_INFO si; |
720 | GetSystemInfo(&si); |
721 | result = si.dwNumberOfProcessors; |
722 | #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) |
723 | /* |
724 | * glibc >= 2.6 has the CPU_COUNT macro. |
725 | * |
726 | * glibc's sysconf() uses isspace(). glibc allocates for the first time |
727 | * *before* setting up the isspace tables. Therefore we need a |
728 | * different method to get the number of CPUs. |
729 | */ |
730 | { |
731 | cpu_set_t set; |
732 | |
733 | pthread_getaffinity_np(pthread_self(), sizeof(set), &set); |
734 | result = CPU_COUNT(&set); |
735 | } |
736 | #else |
737 | result = sysconf(_SC_NPROCESSORS_ONLN); |
738 | #endif |
739 | return ((result == -1) ? 1 : (unsigned)result); |
740 | } |
741 | |
742 | static void |
743 | init_opt_stats_print_opts(const char *v, size_t vlen) { |
744 | size_t opts_len = strlen(opt_stats_print_opts); |
745 | assert(opts_len <= stats_print_tot_num_options); |
746 | |
747 | for (size_t i = 0; i < vlen; i++) { |
748 | switch (v[i]) { |
749 | #define OPTION(o, v, d, s) case o: break; |
750 | STATS_PRINT_OPTIONS |
751 | #undef OPTION |
752 | default: continue; |
753 | } |
754 | |
755 | if (strchr(opt_stats_print_opts, v[i]) != NULL) { |
756 | /* Ignore repeated. */ |
757 | continue; |
758 | } |
759 | |
760 | opt_stats_print_opts[opts_len++] = v[i]; |
761 | opt_stats_print_opts[opts_len] = '\0'; |
762 | assert(opts_len <= stats_print_tot_num_options); |
763 | } |
764 | assert(opts_len == strlen(opt_stats_print_opts)); |
765 | } |
766 | |
767 | static bool |
768 | malloc_conf_slab_sizes_next(const char **slab_size_segment_cur, |
769 | size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *pgs) { |
770 | const char *cur = *slab_size_segment_cur; |
771 | char *end; |
772 | uintmax_t um; |
773 | |
774 | set_errno(0); |
775 | |
776 | /* First number, then '-' */ |
777 | um = malloc_strtoumax(cur, &end, 0); |
778 | if (get_errno() != 0 || *end != '-') { |
779 | return true; |
780 | } |
781 | *slab_start = (size_t)um; |
782 | cur = end + 1; |
783 | |
784 | /* Second number, then ':' */ |
785 | um = malloc_strtoumax(cur, &end, 0); |
786 | if (get_errno() != 0 || *end != ':') { |
787 | return true; |
788 | } |
789 | *slab_end = (size_t)um; |
790 | cur = end + 1; |
791 | |
792 | /* Last number */ |
793 | um = malloc_strtoumax(cur, &end, 0); |
794 | if (get_errno() != 0) { |
795 | return true; |
796 | } |
797 | *pgs = (size_t)um; |
798 | |
799 | /* Consume the separator if there is one. */ |
800 | if (*end == '|') { |
801 | end++; |
802 | } |
803 | |
804 | *vlen_left -= end - *slab_size_segment_cur; |
805 | *slab_size_segment_cur = end; |
806 | |
807 | return false; |
808 | } |
809 | |
810 | static bool |
811 | malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, |
812 | char const **v_p, size_t *vlen_p) { |
813 | bool accept; |
814 | const char *opts = *opts_p; |
815 | |
816 | *k_p = opts; |
817 | |
818 | for (accept = false; !accept;) { |
819 | switch (*opts) { |
820 | case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': |
821 | case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': |
822 | case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': |
823 | case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': |
824 | case 'Y': case 'Z': |
825 | case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': |
826 | case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': |
827 | case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': |
828 | case 's': case 't': case 'u': case 'v': case 'w': case 'x': |
829 | case 'y': case 'z': |
830 | case '0': case '1': case '2': case '3': case '4': case '5': |
831 | case '6': case '7': case '8': case '9': |
832 | case '_': |
833 | opts++; |
834 | break; |
835 | case ':': |
836 | opts++; |
837 | *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; |
838 | *v_p = opts; |
839 | accept = true; |
840 | break; |
841 | case '\0': |
842 | if (opts != *opts_p) { |
843 | malloc_write("<jemalloc>: Conf string ends " |
844 | "with key\n" ); |
845 | } |
846 | return true; |
847 | default: |
848 | malloc_write("<jemalloc>: Malformed conf string\n" ); |
849 | return true; |
850 | } |
851 | } |
852 | |
853 | for (accept = false; !accept;) { |
854 | switch (*opts) { |
855 | case ',': |
856 | opts++; |
857 | /* |
858 | * Look ahead one character here, because the next time |
859 | * this function is called, it will assume that end of |
860 | * input has been cleanly reached if no input remains, |
861 | * but we have optimistically already consumed the |
862 | * comma if one exists. |
863 | */ |
864 | if (*opts == '\0') { |
865 | malloc_write("<jemalloc>: Conf string ends " |
866 | "with comma\n" ); |
867 | } |
868 | *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; |
869 | accept = true; |
870 | break; |
871 | case '\0': |
872 | *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; |
873 | accept = true; |
874 | break; |
875 | default: |
876 | opts++; |
877 | break; |
878 | } |
879 | } |
880 | |
881 | *opts_p = opts; |
882 | return false; |
883 | } |
884 | |
885 | static void |
886 | malloc_abort_invalid_conf(void) { |
887 | assert(opt_abort_conf); |
888 | malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf " |
889 | "value (see above).\n" ); |
890 | abort(); |
891 | } |
892 | |
893 | static void |
894 | malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, |
895 | size_t vlen) { |
896 | malloc_printf("<jemalloc>: %s: %.*s:%.*s\n" , msg, (int)klen, k, |
897 | (int)vlen, v); |
898 | /* If abort_conf is set, error out after processing all options. */ |
899 | const char *experimental = "experimental_" ; |
900 | if (strncmp(k, experimental, strlen(experimental)) == 0) { |
901 | /* However, tolerate experimental features. */ |
902 | return; |
903 | } |
904 | had_conf_error = true; |
905 | } |
906 | |
907 | static void |
908 | malloc_slow_flag_init(void) { |
909 | /* |
910 | * Combine the runtime options into malloc_slow for fast path. Called |
911 | * after processing all the options. |
912 | */ |
913 | malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) |
914 | | (opt_junk_free ? flag_opt_junk_free : 0) |
915 | | (opt_zero ? flag_opt_zero : 0) |
916 | | (opt_utrace ? flag_opt_utrace : 0) |
917 | | (opt_xmalloc ? flag_opt_xmalloc : 0); |
918 | |
919 | malloc_slow = (malloc_slow_flags != 0); |
920 | } |
921 | |
922 | static void |
923 | malloc_conf_init(sc_data_t *sc_data) { |
924 | unsigned i; |
925 | char buf[PATH_MAX + 1]; |
926 | const char *opts, *k, *v; |
927 | size_t klen, vlen; |
928 | |
929 | for (i = 0; i < 4; i++) { |
930 | /* Get runtime configuration. */ |
931 | switch (i) { |
932 | case 0: |
933 | opts = config_malloc_conf; |
934 | break; |
935 | case 1: |
936 | if (je_malloc_conf != NULL) { |
937 | /* |
938 | * Use options that were compiled into the |
939 | * program. |
940 | */ |
941 | opts = je_malloc_conf; |
942 | } else { |
943 | /* No configuration specified. */ |
944 | buf[0] = '\0'; |
945 | opts = buf; |
946 | } |
947 | break; |
948 | case 2: { |
949 | ssize_t linklen = 0; |
950 | #ifndef _WIN32 |
951 | int saved_errno = errno; |
952 | const char *linkname = |
953 | # ifdef JEMALLOC_PREFIX |
954 | "/etc/" JEMALLOC_PREFIX"malloc.conf" |
955 | # else |
956 | "/etc/malloc.conf" |
957 | # endif |
958 | ; |
959 | |
960 | /* |
961 | * Try to use the contents of the "/etc/malloc.conf" |
962 | * symbolic link's name. |
963 | */ |
964 | #ifndef JEMALLOC_READLINKAT |
965 | linklen = readlink(linkname, buf, sizeof(buf) - 1); |
966 | #else |
967 | linklen = readlinkat(AT_FDCWD, linkname, buf, |
968 | sizeof(buf) - 1); |
969 | #endif |
970 | if (linklen == -1) { |
971 | /* No configuration specified. */ |
972 | linklen = 0; |
973 | /* Restore errno. */ |
974 | set_errno(saved_errno); |
975 | } |
976 | #endif |
977 | buf[linklen] = '\0'; |
978 | opts = buf; |
979 | break; |
980 | } case 3: { |
981 | const char *envname = |
982 | #ifdef JEMALLOC_PREFIX |
983 | JEMALLOC_CPREFIX"MALLOC_CONF" |
984 | #else |
985 | "MALLOC_CONF" |
986 | #endif |
987 | ; |
988 | |
989 | if ((opts = jemalloc_secure_getenv(envname)) != NULL) { |
990 | /* |
991 | * Do nothing; opts is already initialized to |
992 | * the value of the MALLOC_CONF environment |
993 | * variable. |
994 | */ |
995 | } else { |
996 | /* No configuration specified. */ |
997 | buf[0] = '\0'; |
998 | opts = buf; |
999 | } |
1000 | break; |
1001 | } default: |
1002 | not_reached(); |
1003 | buf[0] = '\0'; |
1004 | opts = buf; |
1005 | } |
1006 | |
1007 | while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, |
1008 | &vlen)) { |
1009 | #define CONF_MATCH(n) \ |
1010 | (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) |
1011 | #define CONF_MATCH_VALUE(n) \ |
1012 | (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) |
1013 | #define CONF_HANDLE_BOOL(o, n) \ |
1014 | if (CONF_MATCH(n)) { \ |
1015 | if (CONF_MATCH_VALUE("true")) { \ |
1016 | o = true; \ |
1017 | } else if (CONF_MATCH_VALUE("false")) { \ |
1018 | o = false; \ |
1019 | } else { \ |
1020 | malloc_conf_error( \ |
1021 | "Invalid conf value", \ |
1022 | k, klen, v, vlen); \ |
1023 | } \ |
1024 | continue; \ |
1025 | } |
1026 | /* |
1027 | * One of the CONF_MIN macros below expands, in one of the use points, |
1028 | * to "unsigned integer < 0", which is always false, triggering the |
1029 | * GCC -Wtype-limits warning, which we disable here and re-enable below. |
1030 | */ |
1031 | JEMALLOC_DIAGNOSTIC_PUSH |
1032 | JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS |
1033 | |
1034 | #define CONF_MIN_no(um, min) false |
1035 | #define CONF_MIN_yes(um, min) ((um) < (min)) |
1036 | #define CONF_MAX_no(um, max) false |
1037 | #define CONF_MAX_yes(um, max) ((um) > (max)) |
1038 | #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ |
1039 | if (CONF_MATCH(n)) { \ |
1040 | uintmax_t um; \ |
1041 | char *end; \ |
1042 | \ |
1043 | set_errno(0); \ |
1044 | um = malloc_strtoumax(v, &end, 0); \ |
1045 | if (get_errno() != 0 || (uintptr_t)end -\ |
1046 | (uintptr_t)v != vlen) { \ |
1047 | malloc_conf_error( \ |
1048 | "Invalid conf value", \ |
1049 | k, klen, v, vlen); \ |
1050 | } else if (clip) { \ |
1051 | if (CONF_MIN_##check_min(um, \ |
1052 | (t)(min))) { \ |
1053 | o = (t)(min); \ |
1054 | } else if ( \ |
1055 | CONF_MAX_##check_max(um, \ |
1056 | (t)(max))) { \ |
1057 | o = (t)(max); \ |
1058 | } else { \ |
1059 | o = (t)um; \ |
1060 | } \ |
1061 | } else { \ |
1062 | if (CONF_MIN_##check_min(um, \ |
1063 | (t)(min)) || \ |
1064 | CONF_MAX_##check_max(um, \ |
1065 | (t)(max))) { \ |
1066 | malloc_conf_error( \ |
1067 | "Out-of-range " \ |
1068 | "conf value", \ |
1069 | k, klen, v, vlen); \ |
1070 | } else { \ |
1071 | o = (t)um; \ |
1072 | } \ |
1073 | } \ |
1074 | continue; \ |
1075 | } |
1076 | #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ |
1077 | clip) \ |
1078 | CONF_HANDLE_T_U(unsigned, o, n, min, max, \ |
1079 | check_min, check_max, clip) |
1080 | #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ |
1081 | CONF_HANDLE_T_U(size_t, o, n, min, max, \ |
1082 | check_min, check_max, clip) |
1083 | #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ |
1084 | if (CONF_MATCH(n)) { \ |
1085 | long l; \ |
1086 | char *end; \ |
1087 | \ |
1088 | set_errno(0); \ |
1089 | l = strtol(v, &end, 0); \ |
1090 | if (get_errno() != 0 || (uintptr_t)end -\ |
1091 | (uintptr_t)v != vlen) { \ |
1092 | malloc_conf_error( \ |
1093 | "Invalid conf value", \ |
1094 | k, klen, v, vlen); \ |
1095 | } else if (l < (ssize_t)(min) || l > \ |
1096 | (ssize_t)(max)) { \ |
1097 | malloc_conf_error( \ |
1098 | "Out-of-range conf value", \ |
1099 | k, klen, v, vlen); \ |
1100 | } else { \ |
1101 | o = l; \ |
1102 | } \ |
1103 | continue; \ |
1104 | } |
1105 | #define CONF_HANDLE_CHAR_P(o, n, d) \ |
1106 | if (CONF_MATCH(n)) { \ |
1107 | size_t cpylen = (vlen <= \ |
1108 | sizeof(o)-1) ? vlen : \ |
1109 | sizeof(o)-1; \ |
1110 | strncpy(o, v, cpylen); \ |
1111 | o[cpylen] = '\0'; \ |
1112 | continue; \ |
1113 | } |
1114 | |
1115 | CONF_HANDLE_BOOL(opt_abort, "abort" ) |
1116 | CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf" ) |
1117 | if (strncmp("metadata_thp" , k, klen) == 0) { |
1118 | int i; |
1119 | bool match = false; |
1120 | for (i = 0; i < metadata_thp_mode_limit; i++) { |
1121 | if (strncmp(metadata_thp_mode_names[i], |
1122 | v, vlen) == 0) { |
1123 | opt_metadata_thp = i; |
1124 | match = true; |
1125 | break; |
1126 | } |
1127 | } |
1128 | if (!match) { |
1129 | malloc_conf_error("Invalid conf value" , |
1130 | k, klen, v, vlen); |
1131 | } |
1132 | continue; |
1133 | } |
1134 | CONF_HANDLE_BOOL(opt_retain, "retain" ) |
1135 | if (strncmp("dss" , k, klen) == 0) { |
1136 | int i; |
1137 | bool match = false; |
1138 | for (i = 0; i < dss_prec_limit; i++) { |
1139 | if (strncmp(dss_prec_names[i], v, vlen) |
1140 | == 0) { |
1141 | if (extent_dss_prec_set(i)) { |
1142 | malloc_conf_error( |
1143 | "Error setting dss" , |
1144 | k, klen, v, vlen); |
1145 | } else { |
1146 | opt_dss = |
1147 | dss_prec_names[i]; |
1148 | match = true; |
1149 | break; |
1150 | } |
1151 | } |
1152 | } |
1153 | if (!match) { |
1154 | malloc_conf_error("Invalid conf value" , |
1155 | k, klen, v, vlen); |
1156 | } |
1157 | continue; |
1158 | } |
1159 | CONF_HANDLE_UNSIGNED(opt_narenas, "narenas" , 1, |
1160 | UINT_MAX, yes, no, false) |
1161 | CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, |
1162 | "dirty_decay_ms" , -1, NSTIME_SEC_MAX * KQU(1000) < |
1163 | QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : |
1164 | SSIZE_MAX); |
1165 | CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, |
1166 | "muzzy_decay_ms" , -1, NSTIME_SEC_MAX * KQU(1000) < |
1167 | QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : |
1168 | SSIZE_MAX); |
1169 | CONF_HANDLE_BOOL(opt_stats_print, "stats_print" ) |
1170 | if (CONF_MATCH("stats_print_opts" )) { |
1171 | init_opt_stats_print_opts(v, vlen); |
1172 | continue; |
1173 | } |
1174 | if (config_fill) { |
1175 | if (CONF_MATCH("junk" )) { |
1176 | if (CONF_MATCH_VALUE("true" )) { |
1177 | opt_junk = "true" ; |
1178 | opt_junk_alloc = opt_junk_free = |
1179 | true; |
1180 | } else if (CONF_MATCH_VALUE("false" )) { |
1181 | opt_junk = "false" ; |
1182 | opt_junk_alloc = opt_junk_free = |
1183 | false; |
1184 | } else if (CONF_MATCH_VALUE("alloc" )) { |
1185 | opt_junk = "alloc" ; |
1186 | opt_junk_alloc = true; |
1187 | opt_junk_free = false; |
1188 | } else if (CONF_MATCH_VALUE("free" )) { |
1189 | opt_junk = "free" ; |
1190 | opt_junk_alloc = false; |
1191 | opt_junk_free = true; |
1192 | } else { |
1193 | malloc_conf_error( |
1194 | "Invalid conf value" , k, |
1195 | klen, v, vlen); |
1196 | } |
1197 | continue; |
1198 | } |
1199 | CONF_HANDLE_BOOL(opt_zero, "zero" ) |
1200 | } |
1201 | if (config_utrace) { |
1202 | CONF_HANDLE_BOOL(opt_utrace, "utrace" ) |
1203 | } |
1204 | if (config_xmalloc) { |
1205 | CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc" ) |
1206 | } |
1207 | CONF_HANDLE_BOOL(opt_tcache, "tcache" ) |
1208 | CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max" , |
1209 | -1, (sizeof(size_t) << 3) - 1) |
1210 | |
1211 | /* Experimental feature. Will be documented later.*/ |
1212 | CONF_HANDLE_SIZE_T(opt_huge_threshold, |
1213 | "experimental_huge_threshold" , |
1214 | SC_LARGE_MINCLASS, |
1215 | SC_LARGE_MAXCLASS, yes, yes, false) |
1216 | CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, |
1217 | "lg_extent_max_active_fit" , 0, |
1218 | (sizeof(size_t) << 3), yes, yes, false) |
1219 | |
1220 | if (strncmp("percpu_arena" , k, klen) == 0) { |
1221 | bool match = false; |
1222 | for (int i = percpu_arena_mode_names_base; i < |
1223 | percpu_arena_mode_names_limit; i++) { |
1224 | if (strncmp(percpu_arena_mode_names[i], |
1225 | v, vlen) == 0) { |
1226 | if (!have_percpu_arena) { |
1227 | malloc_conf_error( |
1228 | "No getcpu support" , |
1229 | k, klen, v, vlen); |
1230 | } |
1231 | opt_percpu_arena = i; |
1232 | match = true; |
1233 | break; |
1234 | } |
1235 | } |
1236 | if (!match) { |
1237 | malloc_conf_error("Invalid conf value" , |
1238 | k, klen, v, vlen); |
1239 | } |
1240 | continue; |
1241 | } |
1242 | CONF_HANDLE_BOOL(opt_background_thread, |
1243 | "background_thread" ); |
1244 | CONF_HANDLE_SIZE_T(opt_max_background_threads, |
1245 | "max_background_threads" , 1, |
1246 | opt_max_background_threads, yes, yes, |
1247 | true); |
1248 | if (CONF_MATCH("slab_sizes" )) { |
1249 | bool err; |
1250 | const char *slab_size_segment_cur = v; |
1251 | size_t vlen_left = vlen; |
1252 | do { |
1253 | size_t slab_start; |
1254 | size_t slab_end; |
1255 | size_t pgs; |
1256 | err = malloc_conf_slab_sizes_next( |
1257 | &slab_size_segment_cur, |
1258 | &vlen_left, &slab_start, &slab_end, |
1259 | &pgs); |
1260 | if (!err) { |
1261 | sc_data_update_slab_size( |
1262 | sc_data, slab_start, |
1263 | slab_end, (int)pgs); |
1264 | } else { |
1265 | malloc_conf_error( |
1266 | "Invalid settings for " |
1267 | "slab_sizes" , k, klen, v, |
1268 | vlen); |
1269 | } |
1270 | } while (!err && vlen_left > 0); |
1271 | continue; |
1272 | } |
1273 | if (config_prof) { |
1274 | CONF_HANDLE_BOOL(opt_prof, "prof" ) |
1275 | CONF_HANDLE_CHAR_P(opt_prof_prefix, |
1276 | "prof_prefix" , "jeprof" ) |
1277 | CONF_HANDLE_BOOL(opt_prof_active, "prof_active" ) |
1278 | CONF_HANDLE_BOOL(opt_prof_thread_active_init, |
1279 | "prof_thread_active_init" ) |
1280 | CONF_HANDLE_SIZE_T(opt_lg_prof_sample, |
1281 | "lg_prof_sample" , 0, (sizeof(uint64_t) << 3) |
1282 | - 1, no, yes, true) |
1283 | CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum" ) |
1284 | CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, |
1285 | "lg_prof_interval" , -1, |
1286 | (sizeof(uint64_t) << 3) - 1) |
1287 | CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump" ) |
1288 | CONF_HANDLE_BOOL(opt_prof_final, "prof_final" ) |
1289 | CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak" ) |
1290 | CONF_HANDLE_BOOL(opt_prof_log, "prof_log" ) |
1291 | } |
1292 | if (config_log) { |
1293 | if (CONF_MATCH("log" )) { |
1294 | size_t cpylen = ( |
1295 | vlen <= sizeof(log_var_names) ? |
1296 | vlen : sizeof(log_var_names) - 1); |
1297 | strncpy(log_var_names, v, cpylen); |
1298 | log_var_names[cpylen] = '\0'; |
1299 | continue; |
1300 | } |
1301 | } |
1302 | if (CONF_MATCH("thp" )) { |
1303 | bool match = false; |
1304 | for (int i = 0; i < thp_mode_names_limit; i++) { |
1305 | if (strncmp(thp_mode_names[i],v, vlen) |
1306 | == 0) { |
1307 | if (!have_madvise_huge) { |
1308 | malloc_conf_error( |
1309 | "No THP support" , |
1310 | k, klen, v, vlen); |
1311 | } |
1312 | opt_thp = i; |
1313 | match = true; |
1314 | break; |
1315 | } |
1316 | } |
1317 | if (!match) { |
1318 | malloc_conf_error("Invalid conf value" , |
1319 | k, klen, v, vlen); |
1320 | } |
1321 | continue; |
1322 | } |
1323 | malloc_conf_error("Invalid conf pair" , k, klen, v, |
1324 | vlen); |
1325 | #undef CONF_MATCH |
1326 | #undef CONF_MATCH_VALUE |
1327 | #undef CONF_HANDLE_BOOL |
1328 | #undef CONF_MIN_no |
1329 | #undef CONF_MIN_yes |
1330 | #undef CONF_MAX_no |
1331 | #undef CONF_MAX_yes |
1332 | #undef CONF_HANDLE_T_U |
1333 | #undef CONF_HANDLE_UNSIGNED |
1334 | #undef CONF_HANDLE_SIZE_T |
1335 | #undef CONF_HANDLE_SSIZE_T |
1336 | #undef CONF_HANDLE_CHAR_P |
1337 | /* Re-enable diagnostic "-Wtype-limits" */ |
1338 | JEMALLOC_DIAGNOSTIC_POP |
1339 | } |
1340 | if (opt_abort_conf && had_conf_error) { |
1341 | malloc_abort_invalid_conf(); |
1342 | } |
1343 | } |
1344 | atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); |
1345 | } |
1346 | |
1347 | static bool |
1348 | malloc_init_hard_needed(void) { |
1349 | if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == |
1350 | malloc_init_recursible)) { |
1351 | /* |
1352 | * Another thread initialized the allocator before this one |
1353 | * acquired init_lock, or this thread is the initializing |
1354 | * thread, and it is recursively allocating. |
1355 | */ |
1356 | return false; |
1357 | } |
1358 | #ifdef JEMALLOC_THREADED_INIT |
1359 | if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { |
1360 | /* Busy-wait until the initializing thread completes. */ |
1361 | spin_t spinner = SPIN_INITIALIZER; |
1362 | do { |
1363 | malloc_mutex_unlock(TSDN_NULL, &init_lock); |
1364 | spin_adaptive(&spinner); |
1365 | malloc_mutex_lock(TSDN_NULL, &init_lock); |
1366 | } while (!malloc_initialized()); |
1367 | return false; |
1368 | } |
1369 | #endif |
1370 | return true; |
1371 | } |
1372 | |
1373 | static bool |
1374 | malloc_init_hard_a0_locked() { |
1375 | malloc_initializer = INITIALIZER; |
1376 | |
1377 | JEMALLOC_DIAGNOSTIC_PUSH |
1378 | JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS |
1379 | sc_data_t sc_data = {0}; |
1380 | JEMALLOC_DIAGNOSTIC_POP |
1381 | |
1382 | /* |
1383 | * Ordering here is somewhat tricky; we need sc_boot() first, since that |
1384 | * determines what the size classes will be, and then |
1385 | * malloc_conf_init(), since any slab size tweaking will need to be done |
1386 | * before sz_boot and bin_boot, which assume that the values they read |
1387 | * out of sc_data_global are final. |
1388 | */ |
1389 | sc_boot(&sc_data); |
1390 | /* |
1391 | * prof_boot0 only initializes opt_prof_prefix. We need to do it before |
1392 | * we parse malloc_conf options, in case malloc_conf parsing overwrites |
1393 | * it. |
1394 | */ |
1395 | if (config_prof) { |
1396 | prof_boot0(); |
1397 | } |
1398 | malloc_conf_init(&sc_data); |
1399 | sz_boot(&sc_data); |
1400 | bin_boot(&sc_data); |
1401 | |
1402 | if (opt_stats_print) { |
1403 | /* Print statistics at exit. */ |
1404 | if (atexit(stats_print_atexit) != 0) { |
1405 | malloc_write("<jemalloc>: Error in atexit()\n" ); |
1406 | if (opt_abort) { |
1407 | abort(); |
1408 | } |
1409 | } |
1410 | } |
1411 | if (pages_boot()) { |
1412 | return true; |
1413 | } |
1414 | if (base_boot(TSDN_NULL)) { |
1415 | return true; |
1416 | } |
1417 | if (extent_boot()) { |
1418 | return true; |
1419 | } |
1420 | if (ctl_boot()) { |
1421 | return true; |
1422 | } |
1423 | if (config_prof) { |
1424 | prof_boot1(); |
1425 | } |
1426 | arena_boot(&sc_data); |
1427 | if (tcache_boot(TSDN_NULL)) { |
1428 | return true; |
1429 | } |
1430 | if (malloc_mutex_init(&arenas_lock, "arenas" , WITNESS_RANK_ARENAS, |
1431 | malloc_mutex_rank_exclusive)) { |
1432 | return true; |
1433 | } |
1434 | hook_boot(); |
1435 | /* |
1436 | * Create enough scaffolding to allow recursive allocation in |
1437 | * malloc_ncpus(). |
1438 | */ |
1439 | narenas_auto = 1; |
1440 | manual_arena_base = narenas_auto + 1; |
1441 | memset(arenas, 0, sizeof(arena_t *) * narenas_auto); |
1442 | /* |
1443 | * Initialize one arena here. The rest are lazily created in |
1444 | * arena_choose_hard(). |
1445 | */ |
1446 | if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) |
1447 | == NULL) { |
1448 | return true; |
1449 | } |
1450 | a0 = arena_get(TSDN_NULL, 0, false); |
1451 | malloc_init_state = malloc_init_a0_initialized; |
1452 | |
1453 | return false; |
1454 | } |
1455 | |
1456 | static bool |
1457 | malloc_init_hard_a0(void) { |
1458 | bool ret; |
1459 | |
1460 | malloc_mutex_lock(TSDN_NULL, &init_lock); |
1461 | ret = malloc_init_hard_a0_locked(); |
1462 | malloc_mutex_unlock(TSDN_NULL, &init_lock); |
1463 | return ret; |
1464 | } |
1465 | |
1466 | /* Initialize data structures which may trigger recursive allocation. */ |
1467 | static bool |
1468 | malloc_init_hard_recursible(void) { |
1469 | malloc_init_state = malloc_init_recursible; |
1470 | |
1471 | ncpus = malloc_ncpus(); |
1472 | |
1473 | #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ |
1474 | && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ |
1475 | !defined(__native_client__)) |
1476 | /* LinuxThreads' pthread_atfork() allocates. */ |
1477 | if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, |
1478 | jemalloc_postfork_child) != 0) { |
1479 | malloc_write("<jemalloc>: Error in pthread_atfork()\n" ); |
1480 | if (opt_abort) { |
1481 | abort(); |
1482 | } |
1483 | return true; |
1484 | } |
1485 | #endif |
1486 | |
1487 | if (background_thread_boot0()) { |
1488 | return true; |
1489 | } |
1490 | |
1491 | return false; |
1492 | } |
1493 | |
1494 | static unsigned |
1495 | malloc_narenas_default(void) { |
1496 | assert(ncpus > 0); |
1497 | /* |
1498 | * For SMP systems, create more than one arena per CPU by |
1499 | * default. |
1500 | */ |
1501 | if (ncpus > 1) { |
1502 | return ncpus << 2; |
1503 | } else { |
1504 | return 1; |
1505 | } |
1506 | } |
1507 | |
1508 | static percpu_arena_mode_t |
1509 | percpu_arena_as_initialized(percpu_arena_mode_t mode) { |
1510 | assert(!malloc_initialized()); |
1511 | assert(mode <= percpu_arena_disabled); |
1512 | |
1513 | if (mode != percpu_arena_disabled) { |
1514 | mode += percpu_arena_mode_enabled_base; |
1515 | } |
1516 | |
1517 | return mode; |
1518 | } |
1519 | |
1520 | static bool |
1521 | malloc_init_narenas(void) { |
1522 | assert(ncpus > 0); |
1523 | |
1524 | if (opt_percpu_arena != percpu_arena_disabled) { |
1525 | if (!have_percpu_arena || malloc_getcpu() < 0) { |
1526 | opt_percpu_arena = percpu_arena_disabled; |
1527 | malloc_printf("<jemalloc>: perCPU arena getcpu() not " |
1528 | "available. Setting narenas to %u.\n" , opt_narenas ? |
1529 | opt_narenas : malloc_narenas_default()); |
1530 | if (opt_abort) { |
1531 | abort(); |
1532 | } |
1533 | } else { |
1534 | if (ncpus >= MALLOCX_ARENA_LIMIT) { |
1535 | malloc_printf("<jemalloc>: narenas w/ percpu" |
1536 | "arena beyond limit (%d)\n" , ncpus); |
1537 | if (opt_abort) { |
1538 | abort(); |
1539 | } |
1540 | return true; |
1541 | } |
1542 | /* NB: opt_percpu_arena isn't fully initialized yet. */ |
1543 | if (percpu_arena_as_initialized(opt_percpu_arena) == |
1544 | per_phycpu_arena && ncpus % 2 != 0) { |
1545 | malloc_printf("<jemalloc>: invalid " |
1546 | "configuration -- per physical CPU arena " |
1547 | "with odd number (%u) of CPUs (no hyper " |
1548 | "threading?).\n" , ncpus); |
1549 | if (opt_abort) |
1550 | abort(); |
1551 | } |
1552 | unsigned n = percpu_arena_ind_limit( |
1553 | percpu_arena_as_initialized(opt_percpu_arena)); |
1554 | if (opt_narenas < n) { |
1555 | /* |
1556 | * If narenas is specified with percpu_arena |
1557 | * enabled, actual narenas is set as the greater |
1558 | * of the two. percpu_arena_choose will be free |
1559 | * to use any of the arenas based on CPU |
1560 | * id. This is conservative (at a small cost) |
1561 | * but ensures correctness. |
1562 | * |
1563 | * If for some reason the ncpus determined at |
1564 | * boot is not the actual number (e.g. because |
1565 | * of affinity setting from numactl), reserving |
1566 | * narenas this way provides a workaround for |
1567 | * percpu_arena. |
1568 | */ |
1569 | opt_narenas = n; |
1570 | } |
1571 | } |
1572 | } |
1573 | if (opt_narenas == 0) { |
1574 | opt_narenas = malloc_narenas_default(); |
1575 | } |
1576 | assert(opt_narenas > 0); |
1577 | |
1578 | narenas_auto = opt_narenas; |
1579 | /* |
1580 | * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). |
1581 | */ |
1582 | if (narenas_auto >= MALLOCX_ARENA_LIMIT) { |
1583 | narenas_auto = MALLOCX_ARENA_LIMIT - 1; |
1584 | malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n" , |
1585 | narenas_auto); |
1586 | } |
1587 | narenas_total_set(narenas_auto); |
1588 | if (arena_init_huge()) { |
1589 | narenas_total_inc(); |
1590 | } |
1591 | manual_arena_base = narenas_total_get(); |
1592 | |
1593 | return false; |
1594 | } |
1595 | |
1596 | static void |
1597 | malloc_init_percpu(void) { |
1598 | opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); |
1599 | } |
1600 | |
1601 | static bool |
1602 | malloc_init_hard_finish(void) { |
1603 | if (malloc_mutex_boot()) { |
1604 | return true; |
1605 | } |
1606 | |
1607 | malloc_init_state = malloc_init_initialized; |
1608 | malloc_slow_flag_init(); |
1609 | |
1610 | return false; |
1611 | } |
1612 | |
1613 | static void |
1614 | malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { |
1615 | malloc_mutex_assert_owner(tsdn, &init_lock); |
1616 | malloc_mutex_unlock(tsdn, &init_lock); |
1617 | if (reentrancy_set) { |
1618 | assert(!tsdn_null(tsdn)); |
1619 | tsd_t *tsd = tsdn_tsd(tsdn); |
1620 | assert(tsd_reentrancy_level_get(tsd) > 0); |
1621 | post_reentrancy(tsd); |
1622 | } |
1623 | } |
1624 | |
1625 | static bool |
1626 | malloc_init_hard(void) { |
1627 | tsd_t *tsd; |
1628 | |
1629 | #if defined(_WIN32) && _WIN32_WINNT < 0x0600 |
1630 | _init_init_lock(); |
1631 | #endif |
1632 | malloc_mutex_lock(TSDN_NULL, &init_lock); |
1633 | |
1634 | #define UNLOCK_RETURN(tsdn, ret, reentrancy) \ |
1635 | malloc_init_hard_cleanup(tsdn, reentrancy); \ |
1636 | return ret; |
1637 | |
1638 | if (!malloc_init_hard_needed()) { |
1639 | UNLOCK_RETURN(TSDN_NULL, false, false) |
1640 | } |
1641 | |
1642 | if (malloc_init_state != malloc_init_a0_initialized && |
1643 | malloc_init_hard_a0_locked()) { |
1644 | UNLOCK_RETURN(TSDN_NULL, true, false) |
1645 | } |
1646 | |
1647 | malloc_mutex_unlock(TSDN_NULL, &init_lock); |
1648 | /* Recursive allocation relies on functional tsd. */ |
1649 | tsd = malloc_tsd_boot0(); |
1650 | if (tsd == NULL) { |
1651 | return true; |
1652 | } |
1653 | if (malloc_init_hard_recursible()) { |
1654 | return true; |
1655 | } |
1656 | |
1657 | malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); |
1658 | /* Set reentrancy level to 1 during init. */ |
1659 | pre_reentrancy(tsd, NULL); |
1660 | /* Initialize narenas before prof_boot2 (for allocation). */ |
1661 | if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { |
1662 | UNLOCK_RETURN(tsd_tsdn(tsd), true, true) |
1663 | } |
1664 | if (config_prof && prof_boot2(tsd)) { |
1665 | UNLOCK_RETURN(tsd_tsdn(tsd), true, true) |
1666 | } |
1667 | |
1668 | malloc_init_percpu(); |
1669 | |
1670 | if (malloc_init_hard_finish()) { |
1671 | UNLOCK_RETURN(tsd_tsdn(tsd), true, true) |
1672 | } |
1673 | post_reentrancy(tsd); |
1674 | malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); |
1675 | |
1676 | witness_assert_lockless(witness_tsd_tsdn( |
1677 | tsd_witness_tsdp_get_unsafe(tsd))); |
1678 | malloc_tsd_boot1(); |
1679 | /* Update TSD after tsd_boot1. */ |
1680 | tsd = tsd_fetch(); |
1681 | if (opt_background_thread) { |
1682 | assert(have_background_thread); |
1683 | /* |
1684 | * Need to finish init & unlock first before creating background |
1685 | * threads (pthread_create depends on malloc). ctl_init (which |
1686 | * sets isthreaded) needs to be called without holding any lock. |
1687 | */ |
1688 | background_thread_ctl_init(tsd_tsdn(tsd)); |
1689 | |
1690 | malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); |
1691 | bool err = background_thread_create(tsd, 0); |
1692 | malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); |
1693 | if (err) { |
1694 | return true; |
1695 | } |
1696 | } |
1697 | #undef UNLOCK_RETURN |
1698 | return false; |
1699 | } |
1700 | |
1701 | /* |
1702 | * End initialization functions. |
1703 | */ |
1704 | /******************************************************************************/ |
1705 | /* |
1706 | * Begin allocation-path internal functions and data structures. |
1707 | */ |
1708 | |
1709 | /* |
1710 | * Settings determined by the documented behavior of the allocation functions. |
1711 | */ |
1712 | typedef struct static_opts_s static_opts_t; |
1713 | struct static_opts_s { |
1714 | /* Whether or not allocation size may overflow. */ |
1715 | bool may_overflow; |
1716 | |
1717 | /* |
1718 | * Whether to assert that allocations are not of size 0 (after any |
1719 | * bumping). |
1720 | */ |
1721 | bool assert_nonempty_alloc; |
1722 | |
1723 | /* |
1724 | * Whether or not to modify the 'result' argument to malloc in case of |
1725 | * error. |
1726 | */ |
1727 | bool null_out_result_on_error; |
1728 | /* Whether to set errno when we encounter an error condition. */ |
1729 | bool set_errno_on_error; |
1730 | |
1731 | /* |
1732 | * The minimum valid alignment for functions requesting aligned storage. |
1733 | */ |
1734 | size_t min_alignment; |
1735 | |
1736 | /* The error string to use if we oom. */ |
1737 | const char *oom_string; |
1738 | /* The error string to use if the passed-in alignment is invalid. */ |
1739 | const char *invalid_alignment_string; |
1740 | |
1741 | /* |
1742 | * False if we're configured to skip some time-consuming operations. |
1743 | * |
1744 | * This isn't really a malloc "behavior", but it acts as a useful |
1745 | * summary of several other static (or at least, static after program |
1746 | * initialization) options. |
1747 | */ |
1748 | bool slow; |
1749 | /* |
1750 | * Return size. |
1751 | */ |
1752 | bool usize; |
1753 | }; |
1754 | |
1755 | JEMALLOC_ALWAYS_INLINE void |
1756 | static_opts_init(static_opts_t *static_opts) { |
1757 | static_opts->may_overflow = false; |
1758 | static_opts->assert_nonempty_alloc = false; |
1759 | static_opts->null_out_result_on_error = false; |
1760 | static_opts->set_errno_on_error = false; |
1761 | static_opts->min_alignment = 0; |
1762 | static_opts->oom_string = "" ; |
1763 | static_opts->invalid_alignment_string = "" ; |
1764 | static_opts->slow = false; |
1765 | static_opts->usize = false; |
1766 | } |
1767 | |
1768 | /* |
1769 | * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we |
1770 | * should have one constant here per magic value there. Note however that the |
1771 | * representations need not be related. |
1772 | */ |
1773 | #define TCACHE_IND_NONE ((unsigned)-1) |
1774 | #define TCACHE_IND_AUTOMATIC ((unsigned)-2) |
1775 | #define ARENA_IND_AUTOMATIC ((unsigned)-1) |
1776 | |
1777 | typedef struct dynamic_opts_s dynamic_opts_t; |
1778 | struct dynamic_opts_s { |
1779 | void **result; |
1780 | size_t usize; |
1781 | size_t num_items; |
1782 | size_t item_size; |
1783 | size_t alignment; |
1784 | bool zero; |
1785 | unsigned tcache_ind; |
1786 | unsigned arena_ind; |
1787 | }; |
1788 | |
1789 | JEMALLOC_ALWAYS_INLINE void |
1790 | dynamic_opts_init(dynamic_opts_t *dynamic_opts) { |
1791 | dynamic_opts->result = NULL; |
1792 | dynamic_opts->usize = 0; |
1793 | dynamic_opts->num_items = 0; |
1794 | dynamic_opts->item_size = 0; |
1795 | dynamic_opts->alignment = 0; |
1796 | dynamic_opts->zero = false; |
1797 | dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; |
1798 | dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; |
1799 | } |
1800 | |
1801 | /* ind is ignored if dopts->alignment > 0. */ |
1802 | JEMALLOC_ALWAYS_INLINE void * |
1803 | imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, |
1804 | size_t size, size_t usize, szind_t ind) { |
1805 | tcache_t *tcache; |
1806 | arena_t *arena; |
1807 | |
1808 | /* Fill in the tcache. */ |
1809 | if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) { |
1810 | if (likely(!sopts->slow)) { |
1811 | /* Getting tcache ptr unconditionally. */ |
1812 | tcache = tsd_tcachep_get(tsd); |
1813 | assert(tcache == tcache_get(tsd)); |
1814 | } else { |
1815 | tcache = tcache_get(tsd); |
1816 | } |
1817 | } else if (dopts->tcache_ind == TCACHE_IND_NONE) { |
1818 | tcache = NULL; |
1819 | } else { |
1820 | tcache = tcaches_get(tsd, dopts->tcache_ind); |
1821 | } |
1822 | |
1823 | /* Fill in the arena. */ |
1824 | if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { |
1825 | /* |
1826 | * In case of automatic arena management, we defer arena |
1827 | * computation until as late as we can, hoping to fill the |
1828 | * allocation out of the tcache. |
1829 | */ |
1830 | arena = NULL; |
1831 | } else { |
1832 | arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); |
1833 | } |
1834 | |
1835 | if (unlikely(dopts->alignment != 0)) { |
1836 | return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, |
1837 | dopts->zero, tcache, arena); |
1838 | } |
1839 | |
1840 | return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false, |
1841 | arena, sopts->slow); |
1842 | } |
1843 | |
1844 | JEMALLOC_ALWAYS_INLINE void * |
1845 | imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, |
1846 | size_t usize, szind_t ind) { |
1847 | void *ret; |
1848 | |
1849 | /* |
1850 | * For small allocations, sampling bumps the usize. If so, we allocate |
1851 | * from the ind_large bucket. |
1852 | */ |
1853 | szind_t ind_large; |
1854 | size_t bumped_usize = usize; |
1855 | |
1856 | if (usize <= SC_SMALL_MAXCLASS) { |
1857 | assert(((dopts->alignment == 0) ? |
1858 | sz_s2u(SC_LARGE_MINCLASS) : |
1859 | sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment)) |
1860 | == SC_LARGE_MINCLASS); |
1861 | ind_large = sz_size2index(SC_LARGE_MINCLASS); |
1862 | bumped_usize = sz_s2u(SC_LARGE_MINCLASS); |
1863 | ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, |
1864 | bumped_usize, ind_large); |
1865 | if (unlikely(ret == NULL)) { |
1866 | return NULL; |
1867 | } |
1868 | arena_prof_promote(tsd_tsdn(tsd), ret, usize); |
1869 | } else { |
1870 | ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); |
1871 | } |
1872 | |
1873 | return ret; |
1874 | } |
1875 | |
1876 | /* |
1877 | * Returns true if the allocation will overflow, and false otherwise. Sets |
1878 | * *size to the product either way. |
1879 | */ |
1880 | JEMALLOC_ALWAYS_INLINE bool |
1881 | compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, |
1882 | size_t *size) { |
1883 | /* |
1884 | * This function is just num_items * item_size, except that we may have |
1885 | * to check for overflow. |
1886 | */ |
1887 | |
1888 | if (!may_overflow) { |
1889 | assert(dopts->num_items == 1); |
1890 | *size = dopts->item_size; |
1891 | return false; |
1892 | } |
1893 | |
1894 | /* A size_t with its high-half bits all set to 1. */ |
1895 | static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); |
1896 | |
1897 | *size = dopts->item_size * dopts->num_items; |
1898 | |
1899 | if (unlikely(*size == 0)) { |
1900 | return (dopts->num_items != 0 && dopts->item_size != 0); |
1901 | } |
1902 | |
1903 | /* |
1904 | * We got a non-zero size, but we don't know if we overflowed to get |
1905 | * there. To avoid having to do a divide, we'll be clever and note that |
1906 | * if both A and B can be represented in N/2 bits, then their product |
1907 | * can be represented in N bits (without the possibility of overflow). |
1908 | */ |
1909 | if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) { |
1910 | return false; |
1911 | } |
1912 | if (likely(*size / dopts->item_size == dopts->num_items)) { |
1913 | return false; |
1914 | } |
1915 | return true; |
1916 | } |
1917 | |
1918 | JEMALLOC_ALWAYS_INLINE int |
1919 | imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { |
1920 | /* Where the actual allocated memory will live. */ |
1921 | void *allocation = NULL; |
1922 | /* Filled in by compute_size_with_overflow below. */ |
1923 | size_t size = 0; |
1924 | /* |
1925 | * For unaligned allocations, we need only ind. For aligned |
1926 | * allocations, or in case of stats or profiling we need usize. |
1927 | * |
1928 | * These are actually dead stores, in that their values are reset before |
1929 | * any branch on their value is taken. Sometimes though, it's |
1930 | * convenient to pass them as arguments before this point. To avoid |
1931 | * undefined behavior then, we initialize them with dummy stores. |
1932 | */ |
1933 | szind_t ind = 0; |
1934 | size_t usize = 0; |
1935 | |
1936 | /* Reentrancy is only checked on slow path. */ |
1937 | int8_t reentrancy_level; |
1938 | |
1939 | /* Compute the amount of memory the user wants. */ |
1940 | if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, |
1941 | &size))) { |
1942 | goto label_oom; |
1943 | } |
1944 | |
1945 | /* Validate the user input. */ |
1946 | if (sopts->assert_nonempty_alloc) { |
1947 | assert (size != 0); |
1948 | } |
1949 | |
1950 | if (unlikely(dopts->alignment < sopts->min_alignment |
1951 | || (dopts->alignment & (dopts->alignment - 1)) != 0)) { |
1952 | goto label_invalid_alignment; |
1953 | } |
1954 | |
1955 | /* This is the beginning of the "core" algorithm. */ |
1956 | |
1957 | if (dopts->alignment == 0) { |
1958 | ind = sz_size2index(size); |
1959 | if (unlikely(ind >= SC_NSIZES)) { |
1960 | goto label_oom; |
1961 | } |
1962 | if (config_stats || (config_prof && opt_prof) || sopts->usize) { |
1963 | usize = sz_index2size(ind); |
1964 | dopts->usize = usize; |
1965 | assert(usize > 0 && usize |
1966 | <= SC_LARGE_MAXCLASS); |
1967 | } |
1968 | } else { |
1969 | usize = sz_sa2u(size, dopts->alignment); |
1970 | dopts->usize = usize; |
1971 | if (unlikely(usize == 0 |
1972 | || usize > SC_LARGE_MAXCLASS)) { |
1973 | goto label_oom; |
1974 | } |
1975 | } |
1976 | |
1977 | check_entry_exit_locking(tsd_tsdn(tsd)); |
1978 | |
1979 | /* |
1980 | * If we need to handle reentrancy, we can do it out of a |
1981 | * known-initialized arena (i.e. arena 0). |
1982 | */ |
1983 | reentrancy_level = tsd_reentrancy_level_get(tsd); |
1984 | if (sopts->slow && unlikely(reentrancy_level > 0)) { |
1985 | /* |
1986 | * We should never specify particular arenas or tcaches from |
1987 | * within our internal allocations. |
1988 | */ |
1989 | assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || |
1990 | dopts->tcache_ind == TCACHE_IND_NONE); |
1991 | assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); |
1992 | dopts->tcache_ind = TCACHE_IND_NONE; |
1993 | /* We know that arena 0 has already been initialized. */ |
1994 | dopts->arena_ind = 0; |
1995 | } |
1996 | |
1997 | /* If profiling is on, get our profiling context. */ |
1998 | if (config_prof && opt_prof) { |
1999 | /* |
2000 | * Note that if we're going down this path, usize must have been |
2001 | * initialized in the previous if statement. |
2002 | */ |
2003 | prof_tctx_t *tctx = prof_alloc_prep( |
2004 | tsd, usize, prof_active_get_unlocked(), true); |
2005 | |
2006 | alloc_ctx_t alloc_ctx; |
2007 | if (likely((uintptr_t)tctx == (uintptr_t)1U)) { |
2008 | alloc_ctx.slab = (usize |
2009 | <= SC_SMALL_MAXCLASS); |
2010 | allocation = imalloc_no_sample( |
2011 | sopts, dopts, tsd, usize, usize, ind); |
2012 | } else if ((uintptr_t)tctx > (uintptr_t)1U) { |
2013 | /* |
2014 | * Note that ind might still be 0 here. This is fine; |
2015 | * imalloc_sample ignores ind if dopts->alignment > 0. |
2016 | */ |
2017 | allocation = imalloc_sample( |
2018 | sopts, dopts, tsd, usize, ind); |
2019 | alloc_ctx.slab = false; |
2020 | } else { |
2021 | allocation = NULL; |
2022 | } |
2023 | |
2024 | if (unlikely(allocation == NULL)) { |
2025 | prof_alloc_rollback(tsd, tctx, true); |
2026 | goto label_oom; |
2027 | } |
2028 | prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx); |
2029 | } else { |
2030 | /* |
2031 | * If dopts->alignment > 0, then ind is still 0, but usize was |
2032 | * computed in the previous if statement. Down the positive |
2033 | * alignment path, imalloc_no_sample ignores ind and size |
2034 | * (relying only on usize). |
2035 | */ |
2036 | allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, |
2037 | ind); |
2038 | if (unlikely(allocation == NULL)) { |
2039 | goto label_oom; |
2040 | } |
2041 | } |
2042 | |
2043 | /* |
2044 | * Allocation has been done at this point. We still have some |
2045 | * post-allocation work to do though. |
2046 | */ |
2047 | assert(dopts->alignment == 0 |
2048 | || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); |
2049 | |
2050 | if (config_stats) { |
2051 | assert(usize == isalloc(tsd_tsdn(tsd), allocation)); |
2052 | *tsd_thread_allocatedp_get(tsd) += usize; |
2053 | } |
2054 | |
2055 | if (sopts->slow) { |
2056 | UTRACE(0, size, allocation); |
2057 | } |
2058 | |
2059 | /* Success! */ |
2060 | check_entry_exit_locking(tsd_tsdn(tsd)); |
2061 | *dopts->result = allocation; |
2062 | return 0; |
2063 | |
2064 | label_oom: |
2065 | if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { |
2066 | malloc_write(sopts->oom_string); |
2067 | abort(); |
2068 | } |
2069 | |
2070 | if (sopts->slow) { |
2071 | UTRACE(NULL, size, NULL); |
2072 | } |
2073 | |
2074 | check_entry_exit_locking(tsd_tsdn(tsd)); |
2075 | |
2076 | if (sopts->set_errno_on_error) { |
2077 | set_errno(ENOMEM); |
2078 | } |
2079 | |
2080 | if (sopts->null_out_result_on_error) { |
2081 | *dopts->result = NULL; |
2082 | } |
2083 | |
2084 | return ENOMEM; |
2085 | |
2086 | /* |
2087 | * This label is only jumped to by one goto; we move it out of line |
2088 | * anyways to avoid obscuring the non-error paths, and for symmetry with |
2089 | * the oom case. |
2090 | */ |
2091 | label_invalid_alignment: |
2092 | if (config_xmalloc && unlikely(opt_xmalloc)) { |
2093 | malloc_write(sopts->invalid_alignment_string); |
2094 | abort(); |
2095 | } |
2096 | |
2097 | if (sopts->set_errno_on_error) { |
2098 | set_errno(EINVAL); |
2099 | } |
2100 | |
2101 | if (sopts->slow) { |
2102 | UTRACE(NULL, size, NULL); |
2103 | } |
2104 | |
2105 | check_entry_exit_locking(tsd_tsdn(tsd)); |
2106 | |
2107 | if (sopts->null_out_result_on_error) { |
2108 | *dopts->result = NULL; |
2109 | } |
2110 | |
2111 | return EINVAL; |
2112 | } |
2113 | |
2114 | JEMALLOC_ALWAYS_INLINE bool |
2115 | imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) { |
2116 | if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { |
2117 | if (config_xmalloc && unlikely(opt_xmalloc)) { |
2118 | malloc_write(sopts->oom_string); |
2119 | abort(); |
2120 | } |
2121 | UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); |
2122 | set_errno(ENOMEM); |
2123 | *dopts->result = NULL; |
2124 | |
2125 | return false; |
2126 | } |
2127 | |
2128 | return true; |
2129 | } |
2130 | |
2131 | /* Returns the errno-style error code of the allocation. */ |
2132 | JEMALLOC_ALWAYS_INLINE int |
2133 | imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { |
2134 | if (tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) { |
2135 | return ENOMEM; |
2136 | } |
2137 | |
2138 | /* We always need the tsd. Let's grab it right away. */ |
2139 | tsd_t *tsd = tsd_fetch(); |
2140 | assert(tsd); |
2141 | if (likely(tsd_fast(tsd))) { |
2142 | /* Fast and common path. */ |
2143 | tsd_assert_fast(tsd); |
2144 | sopts->slow = false; |
2145 | return imalloc_body(sopts, dopts, tsd); |
2146 | } else { |
2147 | if (!tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) { |
2148 | return ENOMEM; |
2149 | } |
2150 | |
2151 | sopts->slow = true; |
2152 | return imalloc_body(sopts, dopts, tsd); |
2153 | } |
2154 | } |
2155 | |
2156 | void * |
2157 | malloc_default(size_t size) { |
2158 | void *ret; |
2159 | static_opts_t sopts; |
2160 | dynamic_opts_t dopts; |
2161 | |
2162 | LOG("core.malloc.entry" , "size: %zu" , size); |
2163 | |
2164 | static_opts_init(&sopts); |
2165 | dynamic_opts_init(&dopts); |
2166 | |
2167 | sopts.null_out_result_on_error = true; |
2168 | sopts.set_errno_on_error = true; |
2169 | sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n" ; |
2170 | |
2171 | dopts.result = &ret; |
2172 | dopts.num_items = 1; |
2173 | dopts.item_size = size; |
2174 | |
2175 | imalloc(&sopts, &dopts); |
2176 | /* |
2177 | * Note that this branch gets optimized away -- it immediately follows |
2178 | * the check on tsd_fast that sets sopts.slow. |
2179 | */ |
2180 | if (sopts.slow) { |
2181 | uintptr_t args[3] = {size}; |
2182 | hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args); |
2183 | } |
2184 | |
2185 | LOG("core.malloc.exit" , "result: %p" , ret); |
2186 | |
2187 | return ret; |
2188 | } |
2189 | |
2190 | /******************************************************************************/ |
2191 | /* |
2192 | * Begin malloc(3)-compatible functions. |
2193 | */ |
2194 | |
2195 | /* |
2196 | * malloc() fastpath. |
2197 | * |
2198 | * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit |
2199 | * tcache. If either of these is false, we tail-call to the slowpath, |
2200 | * malloc_default(). Tail-calling is used to avoid any caller-saved |
2201 | * registers. |
2202 | * |
2203 | * fastpath supports ticker and profiling, both of which will also |
2204 | * tail-call to the slowpath if they fire. |
2205 | */ |
2206 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
2207 | void JEMALLOC_NOTHROW * |
2208 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) |
2209 | je_malloc(size_t size) { |
2210 | LOG("core.malloc.entry" , "size: %zu" , size); |
2211 | |
2212 | if (tsd_get_allocates() && unlikely(!malloc_initialized())) { |
2213 | return malloc_default(size); |
2214 | } |
2215 | |
2216 | tsd_t *tsd = tsd_get(false); |
2217 | if (unlikely(!tsd || !tsd_fast(tsd) || (size > SC_LOOKUP_MAXCLASS))) { |
2218 | return malloc_default(size); |
2219 | } |
2220 | |
2221 | tcache_t *tcache = tsd_tcachep_get(tsd); |
2222 | |
2223 | if (unlikely(ticker_trytick(&tcache->gc_ticker))) { |
2224 | return malloc_default(size); |
2225 | } |
2226 | |
2227 | szind_t ind = sz_size2index_lookup(size); |
2228 | size_t usize; |
2229 | if (config_stats || config_prof) { |
2230 | usize = sz_index2size(ind); |
2231 | } |
2232 | /* Fast path relies on size being a bin. I.e. SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS */ |
2233 | assert(ind < SC_NBINS); |
2234 | assert(size <= SC_SMALL_MAXCLASS); |
2235 | |
2236 | if (config_prof) { |
2237 | int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd); |
2238 | bytes_until_sample -= usize; |
2239 | tsd_bytes_until_sample_set(tsd, bytes_until_sample); |
2240 | |
2241 | if (unlikely(bytes_until_sample < 0)) { |
2242 | /* |
2243 | * Avoid a prof_active check on the fastpath. |
2244 | * If prof_active is false, set bytes_until_sample to |
2245 | * a large value. If prof_active is set to true, |
2246 | * bytes_until_sample will be reset. |
2247 | */ |
2248 | if (!prof_active) { |
2249 | tsd_bytes_until_sample_set(tsd, SSIZE_MAX); |
2250 | } |
2251 | return malloc_default(size); |
2252 | } |
2253 | } |
2254 | |
2255 | cache_bin_t *bin = tcache_small_bin_get(tcache, ind); |
2256 | bool tcache_success; |
2257 | void* ret = cache_bin_alloc_easy(bin, &tcache_success); |
2258 | |
2259 | if (tcache_success) { |
2260 | if (config_stats) { |
2261 | *tsd_thread_allocatedp_get(tsd) += usize; |
2262 | bin->tstats.nrequests++; |
2263 | } |
2264 | if (config_prof) { |
2265 | tcache->prof_accumbytes += usize; |
2266 | } |
2267 | |
2268 | LOG("core.malloc.exit" , "result: %p" , ret); |
2269 | |
2270 | /* Fastpath success */ |
2271 | return ret; |
2272 | } |
2273 | |
2274 | return malloc_default(size); |
2275 | } |
2276 | |
2277 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW |
2278 | JEMALLOC_ATTR(nonnull(1)) |
2279 | je_posix_memalign(void **memptr, size_t alignment, size_t size) { |
2280 | int ret; |
2281 | static_opts_t sopts; |
2282 | dynamic_opts_t dopts; |
2283 | |
2284 | LOG("core.posix_memalign.entry" , "mem ptr: %p, alignment: %zu, " |
2285 | "size: %zu" , memptr, alignment, size); |
2286 | |
2287 | static_opts_init(&sopts); |
2288 | dynamic_opts_init(&dopts); |
2289 | |
2290 | sopts.min_alignment = sizeof(void *); |
2291 | sopts.oom_string = |
2292 | "<jemalloc>: Error allocating aligned memory: out of memory\n" ; |
2293 | sopts.invalid_alignment_string = |
2294 | "<jemalloc>: Error allocating aligned memory: invalid alignment\n" ; |
2295 | |
2296 | dopts.result = memptr; |
2297 | dopts.num_items = 1; |
2298 | dopts.item_size = size; |
2299 | dopts.alignment = alignment; |
2300 | |
2301 | ret = imalloc(&sopts, &dopts); |
2302 | if (sopts.slow) { |
2303 | uintptr_t args[3] = {(uintptr_t)memptr, (uintptr_t)alignment, |
2304 | (uintptr_t)size}; |
2305 | hook_invoke_alloc(hook_alloc_posix_memalign, *memptr, |
2306 | (uintptr_t)ret, args); |
2307 | } |
2308 | |
2309 | LOG("core.posix_memalign.exit" , "result: %d, alloc ptr: %p" , ret, |
2310 | *memptr); |
2311 | |
2312 | return ret; |
2313 | } |
2314 | |
2315 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
2316 | void JEMALLOC_NOTHROW * |
2317 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) |
2318 | je_aligned_alloc(size_t alignment, size_t size) { |
2319 | void *ret; |
2320 | |
2321 | static_opts_t sopts; |
2322 | dynamic_opts_t dopts; |
2323 | |
2324 | LOG("core.aligned_alloc.entry" , "alignment: %zu, size: %zu\n" , |
2325 | alignment, size); |
2326 | |
2327 | static_opts_init(&sopts); |
2328 | dynamic_opts_init(&dopts); |
2329 | |
2330 | sopts.null_out_result_on_error = true; |
2331 | sopts.set_errno_on_error = true; |
2332 | sopts.min_alignment = 1; |
2333 | sopts.oom_string = |
2334 | "<jemalloc>: Error allocating aligned memory: out of memory\n" ; |
2335 | sopts.invalid_alignment_string = |
2336 | "<jemalloc>: Error allocating aligned memory: invalid alignment\n" ; |
2337 | |
2338 | dopts.result = &ret; |
2339 | dopts.num_items = 1; |
2340 | dopts.item_size = size; |
2341 | dopts.alignment = alignment; |
2342 | |
2343 | imalloc(&sopts, &dopts); |
2344 | if (sopts.slow) { |
2345 | uintptr_t args[3] = {(uintptr_t)alignment, (uintptr_t)size}; |
2346 | hook_invoke_alloc(hook_alloc_aligned_alloc, ret, |
2347 | (uintptr_t)ret, args); |
2348 | } |
2349 | |
2350 | LOG("core.aligned_alloc.exit" , "result: %p" , ret); |
2351 | |
2352 | return ret; |
2353 | } |
2354 | |
2355 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
2356 | void JEMALLOC_NOTHROW * |
2357 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) |
2358 | je_calloc(size_t num, size_t size) { |
2359 | void *ret; |
2360 | static_opts_t sopts; |
2361 | dynamic_opts_t dopts; |
2362 | |
2363 | LOG("core.calloc.entry" , "num: %zu, size: %zu\n" , num, size); |
2364 | |
2365 | static_opts_init(&sopts); |
2366 | dynamic_opts_init(&dopts); |
2367 | |
2368 | sopts.may_overflow = true; |
2369 | sopts.null_out_result_on_error = true; |
2370 | sopts.set_errno_on_error = true; |
2371 | sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n" ; |
2372 | |
2373 | dopts.result = &ret; |
2374 | dopts.num_items = num; |
2375 | dopts.item_size = size; |
2376 | dopts.zero = true; |
2377 | |
2378 | imalloc(&sopts, &dopts); |
2379 | if (sopts.slow) { |
2380 | uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size}; |
2381 | hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args); |
2382 | } |
2383 | |
2384 | LOG("core.calloc.exit" , "result: %p" , ret); |
2385 | |
2386 | return ret; |
2387 | } |
2388 | |
2389 | static void * |
2390 | irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, |
2391 | prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) { |
2392 | void *p; |
2393 | |
2394 | if (tctx == NULL) { |
2395 | return NULL; |
2396 | } |
2397 | if (usize <= SC_SMALL_MAXCLASS) { |
2398 | p = iralloc(tsd, old_ptr, old_usize, |
2399 | SC_LARGE_MINCLASS, 0, false, hook_args); |
2400 | if (p == NULL) { |
2401 | return NULL; |
2402 | } |
2403 | arena_prof_promote(tsd_tsdn(tsd), p, usize); |
2404 | } else { |
2405 | p = iralloc(tsd, old_ptr, old_usize, usize, 0, false, |
2406 | hook_args); |
2407 | } |
2408 | |
2409 | return p; |
2410 | } |
2411 | |
2412 | JEMALLOC_ALWAYS_INLINE void * |
2413 | irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, |
2414 | alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) { |
2415 | void *p; |
2416 | bool prof_active; |
2417 | prof_tctx_t *old_tctx, *tctx; |
2418 | |
2419 | prof_active = prof_active_get_unlocked(); |
2420 | old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); |
2421 | tctx = prof_alloc_prep(tsd, usize, prof_active, true); |
2422 | if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { |
2423 | p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx, |
2424 | hook_args); |
2425 | } else { |
2426 | p = iralloc(tsd, old_ptr, old_usize, usize, 0, false, |
2427 | hook_args); |
2428 | } |
2429 | if (unlikely(p == NULL)) { |
2430 | prof_alloc_rollback(tsd, tctx, true); |
2431 | return NULL; |
2432 | } |
2433 | prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, |
2434 | old_tctx); |
2435 | |
2436 | return p; |
2437 | } |
2438 | |
2439 | JEMALLOC_ALWAYS_INLINE void |
2440 | ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { |
2441 | if (!slow_path) { |
2442 | tsd_assert_fast(tsd); |
2443 | } |
2444 | check_entry_exit_locking(tsd_tsdn(tsd)); |
2445 | if (tsd_reentrancy_level_get(tsd) != 0) { |
2446 | assert(slow_path); |
2447 | } |
2448 | |
2449 | assert(ptr != NULL); |
2450 | assert(malloc_initialized() || IS_INITIALIZER); |
2451 | |
2452 | alloc_ctx_t alloc_ctx; |
2453 | rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); |
2454 | rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, |
2455 | (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); |
2456 | assert(alloc_ctx.szind != SC_NSIZES); |
2457 | |
2458 | size_t usize; |
2459 | if (config_prof && opt_prof) { |
2460 | usize = sz_index2size(alloc_ctx.szind); |
2461 | prof_free(tsd, ptr, usize, &alloc_ctx); |
2462 | } else if (config_stats) { |
2463 | usize = sz_index2size(alloc_ctx.szind); |
2464 | } |
2465 | if (config_stats) { |
2466 | *tsd_thread_deallocatedp_get(tsd) += usize; |
2467 | } |
2468 | |
2469 | if (likely(!slow_path)) { |
2470 | idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, |
2471 | false); |
2472 | } else { |
2473 | idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, |
2474 | true); |
2475 | } |
2476 | } |
2477 | |
2478 | JEMALLOC_ALWAYS_INLINE void |
2479 | isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { |
2480 | if (!slow_path) { |
2481 | tsd_assert_fast(tsd); |
2482 | } |
2483 | check_entry_exit_locking(tsd_tsdn(tsd)); |
2484 | if (tsd_reentrancy_level_get(tsd) != 0) { |
2485 | assert(slow_path); |
2486 | } |
2487 | |
2488 | assert(ptr != NULL); |
2489 | assert(malloc_initialized() || IS_INITIALIZER); |
2490 | |
2491 | alloc_ctx_t alloc_ctx, *ctx; |
2492 | if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) { |
2493 | /* |
2494 | * When cache_oblivious is disabled and ptr is not page aligned, |
2495 | * the allocation was not sampled -- usize can be used to |
2496 | * determine szind directly. |
2497 | */ |
2498 | alloc_ctx.szind = sz_size2index(usize); |
2499 | alloc_ctx.slab = true; |
2500 | ctx = &alloc_ctx; |
2501 | if (config_debug) { |
2502 | alloc_ctx_t dbg_ctx; |
2503 | rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); |
2504 | rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, |
2505 | rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind, |
2506 | &dbg_ctx.slab); |
2507 | assert(dbg_ctx.szind == alloc_ctx.szind); |
2508 | assert(dbg_ctx.slab == alloc_ctx.slab); |
2509 | } |
2510 | } else if (config_prof && opt_prof) { |
2511 | rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); |
2512 | rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, |
2513 | (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); |
2514 | assert(alloc_ctx.szind == sz_size2index(usize)); |
2515 | ctx = &alloc_ctx; |
2516 | } else { |
2517 | ctx = NULL; |
2518 | } |
2519 | |
2520 | if (config_prof && opt_prof) { |
2521 | prof_free(tsd, ptr, usize, ctx); |
2522 | } |
2523 | if (config_stats) { |
2524 | *tsd_thread_deallocatedp_get(tsd) += usize; |
2525 | } |
2526 | |
2527 | if (likely(!slow_path)) { |
2528 | isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false); |
2529 | } else { |
2530 | isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); |
2531 | } |
2532 | } |
2533 | |
2534 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
2535 | void JEMALLOC_NOTHROW * |
2536 | JEMALLOC_ALLOC_SIZE(2) |
2537 | je_realloc(void *ptr, size_t arg_size) { |
2538 | void *ret; |
2539 | tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); |
2540 | size_t usize JEMALLOC_CC_SILENCE_INIT(0); |
2541 | size_t old_usize = 0; |
2542 | size_t size = arg_size; |
2543 | |
2544 | LOG("core.realloc.entry" , "ptr: %p, size: %zu\n" , ptr, size); |
2545 | |
2546 | if (unlikely(size == 0)) { |
2547 | if (ptr != NULL) { |
2548 | /* realloc(ptr, 0) is equivalent to free(ptr). */ |
2549 | UTRACE(ptr, 0, 0); |
2550 | tcache_t *tcache; |
2551 | tsd_t *tsd = tsd_fetch(); |
2552 | if (tsd_reentrancy_level_get(tsd) == 0) { |
2553 | tcache = tcache_get(tsd); |
2554 | } else { |
2555 | tcache = NULL; |
2556 | } |
2557 | |
2558 | uintptr_t args[3] = {(uintptr_t)ptr, size}; |
2559 | hook_invoke_dalloc(hook_dalloc_realloc, ptr, args); |
2560 | |
2561 | ifree(tsd, ptr, tcache, true); |
2562 | |
2563 | LOG("core.realloc.exit" , "result: %p" , NULL); |
2564 | return NULL; |
2565 | } |
2566 | size = 1; |
2567 | } |
2568 | |
2569 | if (likely(ptr != NULL)) { |
2570 | assert(malloc_initialized() || IS_INITIALIZER); |
2571 | tsd_t *tsd = tsd_fetch(); |
2572 | |
2573 | check_entry_exit_locking(tsd_tsdn(tsd)); |
2574 | |
2575 | |
2576 | hook_ralloc_args_t hook_args = {true, {(uintptr_t)ptr, |
2577 | (uintptr_t)arg_size, 0, 0}}; |
2578 | |
2579 | alloc_ctx_t alloc_ctx; |
2580 | rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); |
2581 | rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, |
2582 | (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); |
2583 | assert(alloc_ctx.szind != SC_NSIZES); |
2584 | old_usize = sz_index2size(alloc_ctx.szind); |
2585 | assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); |
2586 | if (config_prof && opt_prof) { |
2587 | usize = sz_s2u(size); |
2588 | if (unlikely(usize == 0 |
2589 | || usize > SC_LARGE_MAXCLASS)) { |
2590 | ret = NULL; |
2591 | } else { |
2592 | ret = irealloc_prof(tsd, ptr, old_usize, usize, |
2593 | &alloc_ctx, &hook_args); |
2594 | } |
2595 | } else { |
2596 | if (config_stats) { |
2597 | usize = sz_s2u(size); |
2598 | } |
2599 | ret = iralloc(tsd, ptr, old_usize, size, 0, false, |
2600 | &hook_args); |
2601 | } |
2602 | tsdn = tsd_tsdn(tsd); |
2603 | } else { |
2604 | /* realloc(NULL, size) is equivalent to malloc(size). */ |
2605 | static_opts_t sopts; |
2606 | dynamic_opts_t dopts; |
2607 | |
2608 | static_opts_init(&sopts); |
2609 | dynamic_opts_init(&dopts); |
2610 | |
2611 | sopts.null_out_result_on_error = true; |
2612 | sopts.set_errno_on_error = true; |
2613 | sopts.oom_string = |
2614 | "<jemalloc>: Error in realloc(): out of memory\n" ; |
2615 | |
2616 | dopts.result = &ret; |
2617 | dopts.num_items = 1; |
2618 | dopts.item_size = size; |
2619 | |
2620 | imalloc(&sopts, &dopts); |
2621 | if (sopts.slow) { |
2622 | uintptr_t args[3] = {(uintptr_t)ptr, arg_size}; |
2623 | hook_invoke_alloc(hook_alloc_realloc, ret, |
2624 | (uintptr_t)ret, args); |
2625 | } |
2626 | |
2627 | return ret; |
2628 | } |
2629 | |
2630 | if (unlikely(ret == NULL)) { |
2631 | if (config_xmalloc && unlikely(opt_xmalloc)) { |
2632 | malloc_write("<jemalloc>: Error in realloc(): " |
2633 | "out of memory\n" ); |
2634 | abort(); |
2635 | } |
2636 | set_errno(ENOMEM); |
2637 | } |
2638 | if (config_stats && likely(ret != NULL)) { |
2639 | tsd_t *tsd; |
2640 | |
2641 | assert(usize == isalloc(tsdn, ret)); |
2642 | tsd = tsdn_tsd(tsdn); |
2643 | *tsd_thread_allocatedp_get(tsd) += usize; |
2644 | *tsd_thread_deallocatedp_get(tsd) += old_usize; |
2645 | } |
2646 | UTRACE(ptr, size, ret); |
2647 | check_entry_exit_locking(tsdn); |
2648 | |
2649 | LOG("core.realloc.exit" , "result: %p" , ret); |
2650 | return ret; |
2651 | } |
2652 | |
2653 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW |
2654 | je_free(void *ptr) { |
2655 | LOG("core.free.entry" , "ptr: %p" , ptr); |
2656 | |
2657 | UTRACE(ptr, 0, 0); |
2658 | if (likely(ptr != NULL)) { |
2659 | /* |
2660 | * We avoid setting up tsd fully (e.g. tcache, arena binding) |
2661 | * based on only free() calls -- other activities trigger the |
2662 | * minimal to full transition. This is because free() may |
2663 | * happen during thread shutdown after tls deallocation: if a |
2664 | * thread never had any malloc activities until then, a |
2665 | * fully-setup tsd won't be destructed properly. |
2666 | */ |
2667 | tsd_t *tsd = tsd_fetch_min(); |
2668 | check_entry_exit_locking(tsd_tsdn(tsd)); |
2669 | |
2670 | tcache_t *tcache; |
2671 | if (likely(tsd_fast(tsd))) { |
2672 | tsd_assert_fast(tsd); |
2673 | /* Unconditionally get tcache ptr on fast path. */ |
2674 | tcache = tsd_tcachep_get(tsd); |
2675 | ifree(tsd, ptr, tcache, false); |
2676 | } else { |
2677 | if (likely(tsd_reentrancy_level_get(tsd) == 0)) { |
2678 | tcache = tcache_get(tsd); |
2679 | } else { |
2680 | tcache = NULL; |
2681 | } |
2682 | uintptr_t args_raw[3] = {(uintptr_t)ptr}; |
2683 | hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw); |
2684 | ifree(tsd, ptr, tcache, true); |
2685 | } |
2686 | check_entry_exit_locking(tsd_tsdn(tsd)); |
2687 | } |
2688 | LOG("core.free.exit" , "" ); |
2689 | } |
2690 | |
2691 | /* |
2692 | * End malloc(3)-compatible functions. |
2693 | */ |
2694 | /******************************************************************************/ |
2695 | /* |
2696 | * Begin non-standard override functions. |
2697 | */ |
2698 | |
2699 | #ifdef JEMALLOC_OVERRIDE_MEMALIGN |
2700 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
2701 | void JEMALLOC_NOTHROW * |
2702 | JEMALLOC_ATTR(malloc) |
2703 | je_memalign(size_t alignment, size_t size) { |
2704 | void *ret; |
2705 | static_opts_t sopts; |
2706 | dynamic_opts_t dopts; |
2707 | |
2708 | LOG("core.memalign.entry" , "alignment: %zu, size: %zu\n" , alignment, |
2709 | size); |
2710 | |
2711 | static_opts_init(&sopts); |
2712 | dynamic_opts_init(&dopts); |
2713 | |
2714 | sopts.min_alignment = 1; |
2715 | sopts.oom_string = |
2716 | "<jemalloc>: Error allocating aligned memory: out of memory\n" ; |
2717 | sopts.invalid_alignment_string = |
2718 | "<jemalloc>: Error allocating aligned memory: invalid alignment\n" ; |
2719 | sopts.null_out_result_on_error = true; |
2720 | |
2721 | dopts.result = &ret; |
2722 | dopts.num_items = 1; |
2723 | dopts.item_size = size; |
2724 | dopts.alignment = alignment; |
2725 | |
2726 | imalloc(&sopts, &dopts); |
2727 | if (sopts.slow) { |
2728 | uintptr_t args[3] = {alignment, size}; |
2729 | hook_invoke_alloc(hook_alloc_memalign, ret, (uintptr_t)ret, |
2730 | args); |
2731 | } |
2732 | |
2733 | LOG("core.memalign.exit" , "result: %p" , ret); |
2734 | return ret; |
2735 | } |
2736 | #endif |
2737 | |
2738 | #ifdef JEMALLOC_OVERRIDE_VALLOC |
2739 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
2740 | void JEMALLOC_NOTHROW * |
2741 | JEMALLOC_ATTR(malloc) |
2742 | je_valloc(size_t size) { |
2743 | void *ret; |
2744 | |
2745 | static_opts_t sopts; |
2746 | dynamic_opts_t dopts; |
2747 | |
2748 | LOG("core.valloc.entry" , "size: %zu\n" , size); |
2749 | |
2750 | static_opts_init(&sopts); |
2751 | dynamic_opts_init(&dopts); |
2752 | |
2753 | sopts.null_out_result_on_error = true; |
2754 | sopts.min_alignment = PAGE; |
2755 | sopts.oom_string = |
2756 | "<jemalloc>: Error allocating aligned memory: out of memory\n" ; |
2757 | sopts.invalid_alignment_string = |
2758 | "<jemalloc>: Error allocating aligned memory: invalid alignment\n" ; |
2759 | |
2760 | dopts.result = &ret; |
2761 | dopts.num_items = 1; |
2762 | dopts.item_size = size; |
2763 | dopts.alignment = PAGE; |
2764 | |
2765 | imalloc(&sopts, &dopts); |
2766 | if (sopts.slow) { |
2767 | uintptr_t args[3] = {size}; |
2768 | hook_invoke_alloc(hook_alloc_valloc, ret, (uintptr_t)ret, args); |
2769 | } |
2770 | |
2771 | LOG("core.valloc.exit" , "result: %p\n" , ret); |
2772 | return ret; |
2773 | } |
2774 | #endif |
2775 | |
2776 | #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) |
2777 | /* |
2778 | * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible |
2779 | * to inconsistently reference libc's malloc(3)-compatible functions |
2780 | * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). |
2781 | * |
2782 | * These definitions interpose hooks in glibc. The functions are actually |
2783 | * passed an extra argument for the caller return address, which will be |
2784 | * ignored. |
2785 | */ |
2786 | JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; |
2787 | JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; |
2788 | JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; |
2789 | # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK |
2790 | JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = |
2791 | je_memalign; |
2792 | # endif |
2793 | |
2794 | # ifdef CPU_COUNT |
2795 | /* |
2796 | * To enable static linking with glibc, the libc specific malloc interface must |
2797 | * be implemented also, so none of glibc's malloc.o functions are added to the |
2798 | * link. |
2799 | */ |
2800 | # define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) |
2801 | /* To force macro expansion of je_ prefix before stringification. */ |
2802 | # define PREALIAS(je_fn) ALIAS(je_fn) |
2803 | # ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC |
2804 | void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); |
2805 | # endif |
2806 | # ifdef JEMALLOC_OVERRIDE___LIBC_FREE |
2807 | void __libc_free(void* ptr) PREALIAS(je_free); |
2808 | # endif |
2809 | # ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC |
2810 | void *__libc_malloc(size_t size) PREALIAS(je_malloc); |
2811 | # endif |
2812 | # ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN |
2813 | void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); |
2814 | # endif |
2815 | # ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC |
2816 | void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); |
2817 | # endif |
2818 | # ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC |
2819 | void *__libc_valloc(size_t size) PREALIAS(je_valloc); |
2820 | # endif |
2821 | # ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN |
2822 | int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); |
2823 | # endif |
2824 | # undef PREALIAS |
2825 | # undef ALIAS |
2826 | # endif |
2827 | #endif |
2828 | |
2829 | /* |
2830 | * End non-standard override functions. |
2831 | */ |
2832 | /******************************************************************************/ |
2833 | /* |
2834 | * Begin non-standard functions. |
2835 | */ |
2836 | |
2837 | #ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API |
2838 | |
2839 | #define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y |
2840 | #define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y) \ |
2841 | JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) |
2842 | |
2843 | typedef struct { |
2844 | void *ptr; |
2845 | size_t size; |
2846 | } smallocx_return_t; |
2847 | |
2848 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
2849 | smallocx_return_t JEMALLOC_NOTHROW |
2850 | /* |
2851 | * The attribute JEMALLOC_ATTR(malloc) cannot be used due to: |
2852 | * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86488 |
2853 | */ |
2854 | JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT) |
2855 | (size_t size, int flags) { |
2856 | /* |
2857 | * Note: the attribute JEMALLOC_ALLOC_SIZE(1) cannot be |
2858 | * used here because it makes writing beyond the `size` |
2859 | * of the `ptr` undefined behavior, but the objective |
2860 | * of this function is to allow writing beyond `size` |
2861 | * up to `smallocx_return_t::size`. |
2862 | */ |
2863 | smallocx_return_t ret; |
2864 | static_opts_t sopts; |
2865 | dynamic_opts_t dopts; |
2866 | |
2867 | LOG("core.smallocx.entry" , "size: %zu, flags: %d" , size, flags); |
2868 | |
2869 | static_opts_init(&sopts); |
2870 | dynamic_opts_init(&dopts); |
2871 | |
2872 | sopts.assert_nonempty_alloc = true; |
2873 | sopts.null_out_result_on_error = true; |
2874 | sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n" ; |
2875 | sopts.usize = true; |
2876 | |
2877 | dopts.result = &ret.ptr; |
2878 | dopts.num_items = 1; |
2879 | dopts.item_size = size; |
2880 | if (unlikely(flags != 0)) { |
2881 | if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { |
2882 | dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); |
2883 | } |
2884 | |
2885 | dopts.zero = MALLOCX_ZERO_GET(flags); |
2886 | |
2887 | if ((flags & MALLOCX_TCACHE_MASK) != 0) { |
2888 | if ((flags & MALLOCX_TCACHE_MASK) |
2889 | == MALLOCX_TCACHE_NONE) { |
2890 | dopts.tcache_ind = TCACHE_IND_NONE; |
2891 | } else { |
2892 | dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); |
2893 | } |
2894 | } else { |
2895 | dopts.tcache_ind = TCACHE_IND_AUTOMATIC; |
2896 | } |
2897 | |
2898 | if ((flags & MALLOCX_ARENA_MASK) != 0) |
2899 | dopts.arena_ind = MALLOCX_ARENA_GET(flags); |
2900 | } |
2901 | |
2902 | imalloc(&sopts, &dopts); |
2903 | assert(dopts.usize == je_nallocx(size, flags)); |
2904 | ret.size = dopts.usize; |
2905 | |
2906 | LOG("core.smallocx.exit" , "result: %p, size: %zu" , ret.ptr, ret.size); |
2907 | return ret; |
2908 | } |
2909 | #undef JEMALLOC_SMALLOCX_CONCAT_HELPER |
2910 | #undef JEMALLOC_SMALLOCX_CONCAT_HELPER2 |
2911 | #endif |
2912 | |
2913 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
2914 | void JEMALLOC_NOTHROW * |
2915 | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) |
2916 | je_mallocx(size_t size, int flags) { |
2917 | void *ret; |
2918 | static_opts_t sopts; |
2919 | dynamic_opts_t dopts; |
2920 | |
2921 | LOG("core.mallocx.entry" , "size: %zu, flags: %d" , size, flags); |
2922 | |
2923 | static_opts_init(&sopts); |
2924 | dynamic_opts_init(&dopts); |
2925 | |
2926 | sopts.assert_nonempty_alloc = true; |
2927 | sopts.null_out_result_on_error = true; |
2928 | sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n" ; |
2929 | |
2930 | dopts.result = &ret; |
2931 | dopts.num_items = 1; |
2932 | dopts.item_size = size; |
2933 | if (unlikely(flags != 0)) { |
2934 | if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { |
2935 | dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); |
2936 | } |
2937 | |
2938 | dopts.zero = MALLOCX_ZERO_GET(flags); |
2939 | |
2940 | if ((flags & MALLOCX_TCACHE_MASK) != 0) { |
2941 | if ((flags & MALLOCX_TCACHE_MASK) |
2942 | == MALLOCX_TCACHE_NONE) { |
2943 | dopts.tcache_ind = TCACHE_IND_NONE; |
2944 | } else { |
2945 | dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); |
2946 | } |
2947 | } else { |
2948 | dopts.tcache_ind = TCACHE_IND_AUTOMATIC; |
2949 | } |
2950 | |
2951 | if ((flags & MALLOCX_ARENA_MASK) != 0) |
2952 | dopts.arena_ind = MALLOCX_ARENA_GET(flags); |
2953 | } |
2954 | |
2955 | imalloc(&sopts, &dopts); |
2956 | if (sopts.slow) { |
2957 | uintptr_t args[3] = {size, flags}; |
2958 | hook_invoke_alloc(hook_alloc_mallocx, ret, (uintptr_t)ret, |
2959 | args); |
2960 | } |
2961 | |
2962 | LOG("core.mallocx.exit" , "result: %p" , ret); |
2963 | return ret; |
2964 | } |
2965 | |
2966 | static void * |
2967 | irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, |
2968 | size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, |
2969 | prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) { |
2970 | void *p; |
2971 | |
2972 | if (tctx == NULL) { |
2973 | return NULL; |
2974 | } |
2975 | if (usize <= SC_SMALL_MAXCLASS) { |
2976 | p = iralloct(tsdn, old_ptr, old_usize, |
2977 | SC_LARGE_MINCLASS, alignment, zero, tcache, |
2978 | arena, hook_args); |
2979 | if (p == NULL) { |
2980 | return NULL; |
2981 | } |
2982 | arena_prof_promote(tsdn, p, usize); |
2983 | } else { |
2984 | p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, |
2985 | tcache, arena, hook_args); |
2986 | } |
2987 | |
2988 | return p; |
2989 | } |
2990 | |
2991 | JEMALLOC_ALWAYS_INLINE void * |
2992 | irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, |
2993 | size_t alignment, size_t *usize, bool zero, tcache_t *tcache, |
2994 | arena_t *arena, alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) { |
2995 | void *p; |
2996 | bool prof_active; |
2997 | prof_tctx_t *old_tctx, *tctx; |
2998 | |
2999 | prof_active = prof_active_get_unlocked(); |
3000 | old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); |
3001 | tctx = prof_alloc_prep(tsd, *usize, prof_active, false); |
3002 | if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { |
3003 | p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, |
3004 | *usize, alignment, zero, tcache, arena, tctx, hook_args); |
3005 | } else { |
3006 | p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, |
3007 | zero, tcache, arena, hook_args); |
3008 | } |
3009 | if (unlikely(p == NULL)) { |
3010 | prof_alloc_rollback(tsd, tctx, false); |
3011 | return NULL; |
3012 | } |
3013 | |
3014 | if (p == old_ptr && alignment != 0) { |
3015 | /* |
3016 | * The allocation did not move, so it is possible that the size |
3017 | * class is smaller than would guarantee the requested |
3018 | * alignment, and that the alignment constraint was |
3019 | * serendipitously satisfied. Additionally, old_usize may not |
3020 | * be the same as the current usize because of in-place large |
3021 | * reallocation. Therefore, query the actual value of usize. |
3022 | */ |
3023 | *usize = isalloc(tsd_tsdn(tsd), p); |
3024 | } |
3025 | prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, |
3026 | old_usize, old_tctx); |
3027 | |
3028 | return p; |
3029 | } |
3030 | |
3031 | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN |
3032 | void JEMALLOC_NOTHROW * |
3033 | JEMALLOC_ALLOC_SIZE(2) |
3034 | je_rallocx(void *ptr, size_t size, int flags) { |
3035 | void *p; |
3036 | tsd_t *tsd; |
3037 | size_t usize; |
3038 | size_t old_usize; |
3039 | size_t alignment = MALLOCX_ALIGN_GET(flags); |
3040 | bool zero = flags & MALLOCX_ZERO; |
3041 | arena_t *arena; |
3042 | tcache_t *tcache; |
3043 | |
3044 | LOG("core.rallocx.entry" , "ptr: %p, size: %zu, flags: %d" , ptr, |
3045 | size, flags); |
3046 | |
3047 | |
3048 | assert(ptr != NULL); |
3049 | assert(size != 0); |
3050 | assert(malloc_initialized() || IS_INITIALIZER); |
3051 | tsd = tsd_fetch(); |
3052 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3053 | |
3054 | if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { |
3055 | unsigned arena_ind = MALLOCX_ARENA_GET(flags); |
3056 | arena = arena_get(tsd_tsdn(tsd), arena_ind, true); |
3057 | if (unlikely(arena == NULL)) { |
3058 | goto label_oom; |
3059 | } |
3060 | } else { |
3061 | arena = NULL; |
3062 | } |
3063 | |
3064 | if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { |
3065 | if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { |
3066 | tcache = NULL; |
3067 | } else { |
3068 | tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); |
3069 | } |
3070 | } else { |
3071 | tcache = tcache_get(tsd); |
3072 | } |
3073 | |
3074 | alloc_ctx_t alloc_ctx; |
3075 | rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); |
3076 | rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, |
3077 | (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); |
3078 | assert(alloc_ctx.szind != SC_NSIZES); |
3079 | old_usize = sz_index2size(alloc_ctx.szind); |
3080 | assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); |
3081 | |
3082 | hook_ralloc_args_t hook_args = {false, {(uintptr_t)ptr, size, flags, |
3083 | 0}}; |
3084 | if (config_prof && opt_prof) { |
3085 | usize = (alignment == 0) ? |
3086 | sz_s2u(size) : sz_sa2u(size, alignment); |
3087 | if (unlikely(usize == 0 |
3088 | || usize > SC_LARGE_MAXCLASS)) { |
3089 | goto label_oom; |
3090 | } |
3091 | p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, |
3092 | zero, tcache, arena, &alloc_ctx, &hook_args); |
3093 | if (unlikely(p == NULL)) { |
3094 | goto label_oom; |
3095 | } |
3096 | } else { |
3097 | p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, |
3098 | zero, tcache, arena, &hook_args); |
3099 | if (unlikely(p == NULL)) { |
3100 | goto label_oom; |
3101 | } |
3102 | if (config_stats) { |
3103 | usize = isalloc(tsd_tsdn(tsd), p); |
3104 | } |
3105 | } |
3106 | assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); |
3107 | |
3108 | if (config_stats) { |
3109 | *tsd_thread_allocatedp_get(tsd) += usize; |
3110 | *tsd_thread_deallocatedp_get(tsd) += old_usize; |
3111 | } |
3112 | UTRACE(ptr, size, p); |
3113 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3114 | |
3115 | LOG("core.rallocx.exit" , "result: %p" , p); |
3116 | return p; |
3117 | label_oom: |
3118 | if (config_xmalloc && unlikely(opt_xmalloc)) { |
3119 | malloc_write("<jemalloc>: Error in rallocx(): out of memory\n" ); |
3120 | abort(); |
3121 | } |
3122 | UTRACE(ptr, size, 0); |
3123 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3124 | |
3125 | LOG("core.rallocx.exit" , "result: %p" , NULL); |
3126 | return NULL; |
3127 | } |
3128 | |
3129 | JEMALLOC_ALWAYS_INLINE size_t |
3130 | ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, |
3131 | size_t , size_t alignment, bool zero) { |
3132 | size_t newsize; |
3133 | |
3134 | if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero, |
3135 | &newsize)) { |
3136 | return old_usize; |
3137 | } |
3138 | |
3139 | return newsize; |
3140 | } |
3141 | |
3142 | static size_t |
3143 | ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, |
3144 | size_t , size_t alignment, bool zero, prof_tctx_t *tctx) { |
3145 | size_t usize; |
3146 | |
3147 | if (tctx == NULL) { |
3148 | return old_usize; |
3149 | } |
3150 | usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, |
3151 | zero); |
3152 | |
3153 | return usize; |
3154 | } |
3155 | |
3156 | JEMALLOC_ALWAYS_INLINE size_t |
3157 | ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, |
3158 | size_t , size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) { |
3159 | size_t usize_max, usize; |
3160 | bool prof_active; |
3161 | prof_tctx_t *old_tctx, *tctx; |
3162 | |
3163 | prof_active = prof_active_get_unlocked(); |
3164 | old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); |
3165 | /* |
3166 | * usize isn't knowable before ixalloc() returns when extra is non-zero. |
3167 | * Therefore, compute its maximum possible value and use that in |
3168 | * prof_alloc_prep() to decide whether to capture a backtrace. |
3169 | * prof_realloc() will use the actual usize to decide whether to sample. |
3170 | */ |
3171 | if (alignment == 0) { |
3172 | usize_max = sz_s2u(size+extra); |
3173 | assert(usize_max > 0 |
3174 | && usize_max <= SC_LARGE_MAXCLASS); |
3175 | } else { |
3176 | usize_max = sz_sa2u(size+extra, alignment); |
3177 | if (unlikely(usize_max == 0 |
3178 | || usize_max > SC_LARGE_MAXCLASS)) { |
3179 | /* |
3180 | * usize_max is out of range, and chances are that |
3181 | * allocation will fail, but use the maximum possible |
3182 | * value and carry on with prof_alloc_prep(), just in |
3183 | * case allocation succeeds. |
3184 | */ |
3185 | usize_max = SC_LARGE_MAXCLASS; |
3186 | } |
3187 | } |
3188 | tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); |
3189 | |
3190 | if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { |
3191 | usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, |
3192 | size, extra, alignment, zero, tctx); |
3193 | } else { |
3194 | usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, |
3195 | extra, alignment, zero); |
3196 | } |
3197 | if (usize == old_usize) { |
3198 | prof_alloc_rollback(tsd, tctx, false); |
3199 | return usize; |
3200 | } |
3201 | prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, |
3202 | old_tctx); |
3203 | |
3204 | return usize; |
3205 | } |
3206 | |
3207 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW |
3208 | je_xallocx(void *ptr, size_t size, size_t , int flags) { |
3209 | tsd_t *tsd; |
3210 | size_t usize, old_usize; |
3211 | size_t alignment = MALLOCX_ALIGN_GET(flags); |
3212 | bool zero = flags & MALLOCX_ZERO; |
3213 | |
3214 | LOG("core.xallocx.entry" , "ptr: %p, size: %zu, extra: %zu, " |
3215 | "flags: %d" , ptr, size, extra, flags); |
3216 | |
3217 | assert(ptr != NULL); |
3218 | assert(size != 0); |
3219 | assert(SIZE_T_MAX - size >= extra); |
3220 | assert(malloc_initialized() || IS_INITIALIZER); |
3221 | tsd = tsd_fetch(); |
3222 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3223 | |
3224 | alloc_ctx_t alloc_ctx; |
3225 | rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); |
3226 | rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, |
3227 | (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); |
3228 | assert(alloc_ctx.szind != SC_NSIZES); |
3229 | old_usize = sz_index2size(alloc_ctx.szind); |
3230 | assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); |
3231 | /* |
3232 | * The API explicitly absolves itself of protecting against (size + |
3233 | * extra) numerical overflow, but we may need to clamp extra to avoid |
3234 | * exceeding SC_LARGE_MAXCLASS. |
3235 | * |
3236 | * Ordinarily, size limit checking is handled deeper down, but here we |
3237 | * have to check as part of (size + extra) clamping, since we need the |
3238 | * clamped value in the above helper functions. |
3239 | */ |
3240 | if (unlikely(size > SC_LARGE_MAXCLASS)) { |
3241 | usize = old_usize; |
3242 | goto label_not_resized; |
3243 | } |
3244 | if (unlikely(SC_LARGE_MAXCLASS - size < extra)) { |
3245 | extra = SC_LARGE_MAXCLASS - size; |
3246 | } |
3247 | |
3248 | if (config_prof && opt_prof) { |
3249 | usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, |
3250 | alignment, zero, &alloc_ctx); |
3251 | } else { |
3252 | usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, |
3253 | extra, alignment, zero); |
3254 | } |
3255 | if (unlikely(usize == old_usize)) { |
3256 | goto label_not_resized; |
3257 | } |
3258 | |
3259 | if (config_stats) { |
3260 | *tsd_thread_allocatedp_get(tsd) += usize; |
3261 | *tsd_thread_deallocatedp_get(tsd) += old_usize; |
3262 | } |
3263 | label_not_resized: |
3264 | if (unlikely(!tsd_fast(tsd))) { |
3265 | uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags}; |
3266 | hook_invoke_expand(hook_expand_xallocx, ptr, old_usize, |
3267 | usize, (uintptr_t)usize, args); |
3268 | } |
3269 | |
3270 | UTRACE(ptr, size, ptr); |
3271 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3272 | |
3273 | LOG("core.xallocx.exit" , "result: %zu" , usize); |
3274 | return usize; |
3275 | } |
3276 | |
3277 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW |
3278 | JEMALLOC_ATTR(pure) |
3279 | je_sallocx(const void *ptr, int flags) { |
3280 | size_t usize; |
3281 | tsdn_t *tsdn; |
3282 | |
3283 | LOG("core.sallocx.entry" , "ptr: %p, flags: %d" , ptr, flags); |
3284 | |
3285 | assert(malloc_initialized() || IS_INITIALIZER); |
3286 | assert(ptr != NULL); |
3287 | |
3288 | tsdn = tsdn_fetch(); |
3289 | check_entry_exit_locking(tsdn); |
3290 | |
3291 | if (config_debug || force_ivsalloc) { |
3292 | usize = ivsalloc(tsdn, ptr); |
3293 | assert(force_ivsalloc || usize != 0); |
3294 | } else { |
3295 | usize = isalloc(tsdn, ptr); |
3296 | } |
3297 | |
3298 | check_entry_exit_locking(tsdn); |
3299 | |
3300 | LOG("core.sallocx.exit" , "result: %zu" , usize); |
3301 | return usize; |
3302 | } |
3303 | |
3304 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW |
3305 | je_dallocx(void *ptr, int flags) { |
3306 | LOG("core.dallocx.entry" , "ptr: %p, flags: %d" , ptr, flags); |
3307 | |
3308 | assert(ptr != NULL); |
3309 | assert(malloc_initialized() || IS_INITIALIZER); |
3310 | |
3311 | tsd_t *tsd = tsd_fetch(); |
3312 | bool fast = tsd_fast(tsd); |
3313 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3314 | |
3315 | tcache_t *tcache; |
3316 | if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { |
3317 | /* Not allowed to be reentrant and specify a custom tcache. */ |
3318 | assert(tsd_reentrancy_level_get(tsd) == 0); |
3319 | if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { |
3320 | tcache = NULL; |
3321 | } else { |
3322 | tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); |
3323 | } |
3324 | } else { |
3325 | if (likely(fast)) { |
3326 | tcache = tsd_tcachep_get(tsd); |
3327 | assert(tcache == tcache_get(tsd)); |
3328 | } else { |
3329 | if (likely(tsd_reentrancy_level_get(tsd) == 0)) { |
3330 | tcache = tcache_get(tsd); |
3331 | } else { |
3332 | tcache = NULL; |
3333 | } |
3334 | } |
3335 | } |
3336 | |
3337 | UTRACE(ptr, 0, 0); |
3338 | if (likely(fast)) { |
3339 | tsd_assert_fast(tsd); |
3340 | ifree(tsd, ptr, tcache, false); |
3341 | } else { |
3342 | uintptr_t args_raw[3] = {(uintptr_t)ptr, flags}; |
3343 | hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw); |
3344 | ifree(tsd, ptr, tcache, true); |
3345 | } |
3346 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3347 | |
3348 | LOG("core.dallocx.exit" , "" ); |
3349 | } |
3350 | |
3351 | JEMALLOC_ALWAYS_INLINE size_t |
3352 | inallocx(tsdn_t *tsdn, size_t size, int flags) { |
3353 | check_entry_exit_locking(tsdn); |
3354 | |
3355 | size_t usize; |
3356 | if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { |
3357 | usize = sz_s2u(size); |
3358 | } else { |
3359 | usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); |
3360 | } |
3361 | check_entry_exit_locking(tsdn); |
3362 | return usize; |
3363 | } |
3364 | |
3365 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW |
3366 | je_sdallocx(void *ptr, size_t size, int flags) { |
3367 | assert(ptr != NULL); |
3368 | assert(malloc_initialized() || IS_INITIALIZER); |
3369 | |
3370 | LOG("core.sdallocx.entry" , "ptr: %p, size: %zu, flags: %d" , ptr, |
3371 | size, flags); |
3372 | |
3373 | tsd_t *tsd = tsd_fetch(); |
3374 | bool fast = tsd_fast(tsd); |
3375 | size_t usize = inallocx(tsd_tsdn(tsd), size, flags); |
3376 | assert(usize == isalloc(tsd_tsdn(tsd), ptr)); |
3377 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3378 | |
3379 | tcache_t *tcache; |
3380 | if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { |
3381 | /* Not allowed to be reentrant and specify a custom tcache. */ |
3382 | assert(tsd_reentrancy_level_get(tsd) == 0); |
3383 | if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { |
3384 | tcache = NULL; |
3385 | } else { |
3386 | tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); |
3387 | } |
3388 | } else { |
3389 | if (likely(fast)) { |
3390 | tcache = tsd_tcachep_get(tsd); |
3391 | assert(tcache == tcache_get(tsd)); |
3392 | } else { |
3393 | if (likely(tsd_reentrancy_level_get(tsd) == 0)) { |
3394 | tcache = tcache_get(tsd); |
3395 | } else { |
3396 | tcache = NULL; |
3397 | } |
3398 | } |
3399 | } |
3400 | |
3401 | UTRACE(ptr, 0, 0); |
3402 | if (likely(fast)) { |
3403 | tsd_assert_fast(tsd); |
3404 | isfree(tsd, ptr, usize, tcache, false); |
3405 | } else { |
3406 | uintptr_t args_raw[3] = {(uintptr_t)ptr, size, flags}; |
3407 | hook_invoke_dalloc(hook_dalloc_sdallocx, ptr, args_raw); |
3408 | isfree(tsd, ptr, usize, tcache, true); |
3409 | } |
3410 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3411 | |
3412 | LOG("core.sdallocx.exit" , "" ); |
3413 | } |
3414 | |
3415 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW |
3416 | JEMALLOC_ATTR(pure) |
3417 | je_nallocx(size_t size, int flags) { |
3418 | size_t usize; |
3419 | tsdn_t *tsdn; |
3420 | |
3421 | assert(size != 0); |
3422 | |
3423 | if (unlikely(malloc_init())) { |
3424 | LOG("core.nallocx.exit" , "result: %zu" , ZU(0)); |
3425 | return 0; |
3426 | } |
3427 | |
3428 | tsdn = tsdn_fetch(); |
3429 | check_entry_exit_locking(tsdn); |
3430 | |
3431 | usize = inallocx(tsdn, size, flags); |
3432 | if (unlikely(usize > SC_LARGE_MAXCLASS)) { |
3433 | LOG("core.nallocx.exit" , "result: %zu" , ZU(0)); |
3434 | return 0; |
3435 | } |
3436 | |
3437 | check_entry_exit_locking(tsdn); |
3438 | LOG("core.nallocx.exit" , "result: %zu" , usize); |
3439 | return usize; |
3440 | } |
3441 | |
3442 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW |
3443 | je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, |
3444 | size_t newlen) { |
3445 | int ret; |
3446 | tsd_t *tsd; |
3447 | |
3448 | LOG("core.mallctl.entry" , "name: %s" , name); |
3449 | |
3450 | if (unlikely(malloc_init())) { |
3451 | LOG("core.mallctl.exit" , "result: %d" , EAGAIN); |
3452 | return EAGAIN; |
3453 | } |
3454 | |
3455 | tsd = tsd_fetch(); |
3456 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3457 | ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); |
3458 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3459 | |
3460 | LOG("core.mallctl.exit" , "result: %d" , ret); |
3461 | return ret; |
3462 | } |
3463 | |
3464 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW |
3465 | je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { |
3466 | int ret; |
3467 | |
3468 | LOG("core.mallctlnametomib.entry" , "name: %s" , name); |
3469 | |
3470 | if (unlikely(malloc_init())) { |
3471 | LOG("core.mallctlnametomib.exit" , "result: %d" , EAGAIN); |
3472 | return EAGAIN; |
3473 | } |
3474 | |
3475 | tsd_t *tsd = tsd_fetch(); |
3476 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3477 | ret = ctl_nametomib(tsd, name, mibp, miblenp); |
3478 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3479 | |
3480 | LOG("core.mallctlnametomib.exit" , "result: %d" , ret); |
3481 | return ret; |
3482 | } |
3483 | |
3484 | JEMALLOC_EXPORT int JEMALLOC_NOTHROW |
3485 | je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, |
3486 | void *newp, size_t newlen) { |
3487 | int ret; |
3488 | tsd_t *tsd; |
3489 | |
3490 | LOG("core.mallctlbymib.entry" , "" ); |
3491 | |
3492 | if (unlikely(malloc_init())) { |
3493 | LOG("core.mallctlbymib.exit" , "result: %d" , EAGAIN); |
3494 | return EAGAIN; |
3495 | } |
3496 | |
3497 | tsd = tsd_fetch(); |
3498 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3499 | ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); |
3500 | check_entry_exit_locking(tsd_tsdn(tsd)); |
3501 | LOG("core.mallctlbymib.exit" , "result: %d" , ret); |
3502 | return ret; |
3503 | } |
3504 | |
3505 | JEMALLOC_EXPORT void JEMALLOC_NOTHROW |
3506 | je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, |
3507 | const char *opts) { |
3508 | tsdn_t *tsdn; |
3509 | |
3510 | LOG("core.malloc_stats_print.entry" , "" ); |
3511 | |
3512 | tsdn = tsdn_fetch(); |
3513 | check_entry_exit_locking(tsdn); |
3514 | stats_print(write_cb, cbopaque, opts); |
3515 | check_entry_exit_locking(tsdn); |
3516 | LOG("core.malloc_stats_print.exit" , "" ); |
3517 | } |
3518 | |
3519 | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW |
3520 | je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { |
3521 | size_t ret; |
3522 | tsdn_t *tsdn; |
3523 | |
3524 | LOG("core.malloc_usable_size.entry" , "ptr: %p" , ptr); |
3525 | |
3526 | assert(malloc_initialized() || IS_INITIALIZER); |
3527 | |
3528 | tsdn = tsdn_fetch(); |
3529 | check_entry_exit_locking(tsdn); |
3530 | |
3531 | if (unlikely(ptr == NULL)) { |
3532 | ret = 0; |
3533 | } else { |
3534 | if (config_debug || force_ivsalloc) { |
3535 | ret = ivsalloc(tsdn, ptr); |
3536 | assert(force_ivsalloc || ret != 0); |
3537 | } else { |
3538 | ret = isalloc(tsdn, ptr); |
3539 | } |
3540 | } |
3541 | |
3542 | check_entry_exit_locking(tsdn); |
3543 | LOG("core.malloc_usable_size.exit" , "result: %zu" , ret); |
3544 | return ret; |
3545 | } |
3546 | |
3547 | /* |
3548 | * End non-standard functions. |
3549 | */ |
3550 | /******************************************************************************/ |
3551 | /* |
3552 | * The following functions are used by threading libraries for protection of |
3553 | * malloc during fork(). |
3554 | */ |
3555 | |
3556 | /* |
3557 | * If an application creates a thread before doing any allocation in the main |
3558 | * thread, then calls fork(2) in the main thread followed by memory allocation |
3559 | * in the child process, a race can occur that results in deadlock within the |
3560 | * child: the main thread may have forked while the created thread had |
3561 | * partially initialized the allocator. Ordinarily jemalloc prevents |
3562 | * fork/malloc races via the following functions it registers during |
3563 | * initialization using pthread_atfork(), but of course that does no good if |
3564 | * the allocator isn't fully initialized at fork time. The following library |
3565 | * constructor is a partial solution to this problem. It may still be possible |
3566 | * to trigger the deadlock described above, but doing so would involve forking |
3567 | * via a library constructor that runs before jemalloc's runs. |
3568 | */ |
3569 | #ifndef JEMALLOC_JET |
3570 | JEMALLOC_ATTR(constructor) |
3571 | static void |
3572 | jemalloc_constructor(void) { |
3573 | malloc_init(); |
3574 | } |
3575 | #endif |
3576 | |
3577 | #ifndef JEMALLOC_MUTEX_INIT_CB |
3578 | void |
3579 | jemalloc_prefork(void) |
3580 | #else |
3581 | JEMALLOC_EXPORT void |
3582 | _malloc_prefork(void) |
3583 | #endif |
3584 | { |
3585 | tsd_t *tsd; |
3586 | unsigned i, j, narenas; |
3587 | arena_t *arena; |
3588 | |
3589 | #ifdef JEMALLOC_MUTEX_INIT_CB |
3590 | if (!malloc_initialized()) { |
3591 | return; |
3592 | } |
3593 | #endif |
3594 | assert(malloc_initialized()); |
3595 | |
3596 | tsd = tsd_fetch(); |
3597 | |
3598 | narenas = narenas_total_get(); |
3599 | |
3600 | witness_prefork(tsd_witness_tsdp_get(tsd)); |
3601 | /* Acquire all mutexes in a safe order. */ |
3602 | ctl_prefork(tsd_tsdn(tsd)); |
3603 | tcache_prefork(tsd_tsdn(tsd)); |
3604 | malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); |
3605 | if (have_background_thread) { |
3606 | background_thread_prefork0(tsd_tsdn(tsd)); |
3607 | } |
3608 | prof_prefork0(tsd_tsdn(tsd)); |
3609 | if (have_background_thread) { |
3610 | background_thread_prefork1(tsd_tsdn(tsd)); |
3611 | } |
3612 | /* Break arena prefork into stages to preserve lock order. */ |
3613 | for (i = 0; i < 8; i++) { |
3614 | for (j = 0; j < narenas; j++) { |
3615 | if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != |
3616 | NULL) { |
3617 | switch (i) { |
3618 | case 0: |
3619 | arena_prefork0(tsd_tsdn(tsd), arena); |
3620 | break; |
3621 | case 1: |
3622 | arena_prefork1(tsd_tsdn(tsd), arena); |
3623 | break; |
3624 | case 2: |
3625 | arena_prefork2(tsd_tsdn(tsd), arena); |
3626 | break; |
3627 | case 3: |
3628 | arena_prefork3(tsd_tsdn(tsd), arena); |
3629 | break; |
3630 | case 4: |
3631 | arena_prefork4(tsd_tsdn(tsd), arena); |
3632 | break; |
3633 | case 5: |
3634 | arena_prefork5(tsd_tsdn(tsd), arena); |
3635 | break; |
3636 | case 6: |
3637 | arena_prefork6(tsd_tsdn(tsd), arena); |
3638 | break; |
3639 | case 7: |
3640 | arena_prefork7(tsd_tsdn(tsd), arena); |
3641 | break; |
3642 | default: not_reached(); |
3643 | } |
3644 | } |
3645 | } |
3646 | } |
3647 | prof_prefork1(tsd_tsdn(tsd)); |
3648 | tsd_prefork(tsd); |
3649 | } |
3650 | |
3651 | #ifndef JEMALLOC_MUTEX_INIT_CB |
3652 | void |
3653 | jemalloc_postfork_parent(void) |
3654 | #else |
3655 | JEMALLOC_EXPORT void |
3656 | _malloc_postfork(void) |
3657 | #endif |
3658 | { |
3659 | tsd_t *tsd; |
3660 | unsigned i, narenas; |
3661 | |
3662 | #ifdef JEMALLOC_MUTEX_INIT_CB |
3663 | if (!malloc_initialized()) { |
3664 | return; |
3665 | } |
3666 | #endif |
3667 | assert(malloc_initialized()); |
3668 | |
3669 | tsd = tsd_fetch(); |
3670 | |
3671 | tsd_postfork_parent(tsd); |
3672 | |
3673 | witness_postfork_parent(tsd_witness_tsdp_get(tsd)); |
3674 | /* Release all mutexes, now that fork() has completed. */ |
3675 | for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { |
3676 | arena_t *arena; |
3677 | |
3678 | if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { |
3679 | arena_postfork_parent(tsd_tsdn(tsd), arena); |
3680 | } |
3681 | } |
3682 | prof_postfork_parent(tsd_tsdn(tsd)); |
3683 | if (have_background_thread) { |
3684 | background_thread_postfork_parent(tsd_tsdn(tsd)); |
3685 | } |
3686 | malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); |
3687 | tcache_postfork_parent(tsd_tsdn(tsd)); |
3688 | ctl_postfork_parent(tsd_tsdn(tsd)); |
3689 | } |
3690 | |
3691 | void |
3692 | jemalloc_postfork_child(void) { |
3693 | tsd_t *tsd; |
3694 | unsigned i, narenas; |
3695 | |
3696 | assert(malloc_initialized()); |
3697 | |
3698 | tsd = tsd_fetch(); |
3699 | |
3700 | tsd_postfork_child(tsd); |
3701 | |
3702 | witness_postfork_child(tsd_witness_tsdp_get(tsd)); |
3703 | /* Release all mutexes, now that fork() has completed. */ |
3704 | for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { |
3705 | arena_t *arena; |
3706 | |
3707 | if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { |
3708 | arena_postfork_child(tsd_tsdn(tsd), arena); |
3709 | } |
3710 | } |
3711 | prof_postfork_child(tsd_tsdn(tsd)); |
3712 | if (have_background_thread) { |
3713 | background_thread_postfork_child(tsd_tsdn(tsd)); |
3714 | } |
3715 | malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); |
3716 | tcache_postfork_child(tsd_tsdn(tsd)); |
3717 | ctl_postfork_child(tsd_tsdn(tsd)); |
3718 | } |
3719 | |
3720 | /******************************************************************************/ |
3721 | |