1#define JEMALLOC_PROF_C_
2#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
5#include "jemalloc/internal/assert.h"
6#include "jemalloc/internal/ckh.h"
7#include "jemalloc/internal/hash.h"
8#include "jemalloc/internal/malloc_io.h"
9#include "jemalloc/internal/mutex.h"
10#include "jemalloc/internal/emitter.h"
11
12/******************************************************************************/
13
14#ifdef JEMALLOC_PROF_LIBUNWIND
15#define UNW_LOCAL_ONLY
16#include <libunwind.h>
17#endif
18
19#ifdef JEMALLOC_PROF_LIBGCC
20/*
21 * We have a circular dependency -- jemalloc_internal.h tells us if we should
22 * use libgcc's unwinding functionality, but after we've included that, we've
23 * already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
24 */
25#undef _Unwind_Backtrace
26#include <unwind.h>
27#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
28#endif
29
30/******************************************************************************/
31/* Data. */
32
33bool opt_prof = false;
34bool opt_prof_active = true;
35bool opt_prof_thread_active_init = true;
36size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
37ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
38bool opt_prof_gdump = false;
39bool opt_prof_final = false;
40bool opt_prof_leak = false;
41bool opt_prof_accum = false;
42bool opt_prof_log = false;
43char opt_prof_prefix[
44 /* Minimize memory bloat for non-prof builds. */
45#ifdef JEMALLOC_PROF
46 PATH_MAX +
47#endif
48 1];
49
50/*
51 * Initialized as opt_prof_active, and accessed via
52 * prof_active_[gs]et{_unlocked,}().
53 */
54bool prof_active;
55static malloc_mutex_t prof_active_mtx;
56
57/*
58 * Initialized as opt_prof_thread_active_init, and accessed via
59 * prof_thread_active_init_[gs]et().
60 */
61static bool prof_thread_active_init;
62static malloc_mutex_t prof_thread_active_init_mtx;
63
64/*
65 * Initialized as opt_prof_gdump, and accessed via
66 * prof_gdump_[gs]et{_unlocked,}().
67 */
68bool prof_gdump_val;
69static malloc_mutex_t prof_gdump_mtx;
70
71uint64_t prof_interval = 0;
72
73size_t lg_prof_sample;
74
75typedef enum prof_logging_state_e prof_logging_state_t;
76enum prof_logging_state_e {
77 prof_logging_state_stopped,
78 prof_logging_state_started,
79 prof_logging_state_dumping
80};
81
82/*
83 * - stopped: log_start never called, or previous log_stop has completed.
84 * - started: log_start called, log_stop not called yet. Allocations are logged.
85 * - dumping: log_stop called but not finished; samples are not logged anymore.
86 */
87prof_logging_state_t prof_logging_state = prof_logging_state_stopped;
88
89#ifdef JEMALLOC_JET
90static bool prof_log_dummy = false;
91#endif
92
93/* Incremented for every log file that is output. */
94static uint64_t log_seq = 0;
95static char log_filename[
96 /* Minimize memory bloat for non-prof builds. */
97#ifdef JEMALLOC_PROF
98 PATH_MAX +
99#endif
100 1];
101
102/* Timestamp for most recent call to log_start(). */
103static nstime_t log_start_timestamp = NSTIME_ZERO_INITIALIZER;
104
105/* Increment these when adding to the log_bt and log_thr linked lists. */
106static size_t log_bt_index = 0;
107static size_t log_thr_index = 0;
108
109/* Linked list node definitions. These are only used in prof.c. */
110typedef struct prof_bt_node_s prof_bt_node_t;
111
112struct prof_bt_node_s {
113 prof_bt_node_t *next;
114 size_t index;
115 prof_bt_t bt;
116 /* Variable size backtrace vector pointed to by bt. */
117 void *vec[1];
118};
119
120typedef struct prof_thr_node_s prof_thr_node_t;
121
122struct prof_thr_node_s {
123 prof_thr_node_t *next;
124 size_t index;
125 uint64_t thr_uid;
126 /* Variable size based on thr_name_sz. */
127 char name[1];
128};
129
130typedef struct prof_alloc_node_s prof_alloc_node_t;
131
132/* This is output when logging sampled allocations. */
133struct prof_alloc_node_s {
134 prof_alloc_node_t *next;
135 /* Indices into an array of thread data. */
136 size_t alloc_thr_ind;
137 size_t free_thr_ind;
138
139 /* Indices into an array of backtraces. */
140 size_t alloc_bt_ind;
141 size_t free_bt_ind;
142
143 uint64_t alloc_time_ns;
144 uint64_t free_time_ns;
145
146 size_t usize;
147};
148
149/*
150 * Created on the first call to prof_log_start and deleted on prof_log_stop.
151 * These are the backtraces and threads that have already been logged by an
152 * allocation.
153 */
154static bool log_tables_initialized = false;
155static ckh_t log_bt_node_set;
156static ckh_t log_thr_node_set;
157
158/* Store linked lists for logged data. */
159static prof_bt_node_t *log_bt_first = NULL;
160static prof_bt_node_t *log_bt_last = NULL;
161static prof_thr_node_t *log_thr_first = NULL;
162static prof_thr_node_t *log_thr_last = NULL;
163static prof_alloc_node_t *log_alloc_first = NULL;
164static prof_alloc_node_t *log_alloc_last = NULL;
165
166/* Protects the prof_logging_state and any log_{...} variable. */
167static malloc_mutex_t log_mtx;
168
169/*
170 * Table of mutexes that are shared among gctx's. These are leaf locks, so
171 * there is no problem with using them for more than one gctx at the same time.
172 * The primary motivation for this sharing though is that gctx's are ephemeral,
173 * and destroying mutexes causes complications for systems that allocate when
174 * creating/destroying mutexes.
175 */
176static malloc_mutex_t *gctx_locks;
177static atomic_u_t cum_gctxs; /* Atomic counter. */
178
179/*
180 * Table of mutexes that are shared among tdata's. No operations require
181 * holding multiple tdata locks, so there is no problem with using them for more
182 * than one tdata at the same time, even though a gctx lock may be acquired
183 * while holding a tdata lock.
184 */
185static malloc_mutex_t *tdata_locks;
186
187/*
188 * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
189 * structure that knows about all backtraces currently captured.
190 */
191static ckh_t bt2gctx;
192/* Non static to enable profiling. */
193malloc_mutex_t bt2gctx_mtx;
194
195/*
196 * Tree of all extant prof_tdata_t structures, regardless of state,
197 * {attached,detached,expired}.
198 */
199static prof_tdata_tree_t tdatas;
200static malloc_mutex_t tdatas_mtx;
201
202static uint64_t next_thr_uid;
203static malloc_mutex_t next_thr_uid_mtx;
204
205static malloc_mutex_t prof_dump_seq_mtx;
206static uint64_t prof_dump_seq;
207static uint64_t prof_dump_iseq;
208static uint64_t prof_dump_mseq;
209static uint64_t prof_dump_useq;
210
211/*
212 * This buffer is rather large for stack allocation, so use a single buffer for
213 * all profile dumps.
214 */
215static malloc_mutex_t prof_dump_mtx;
216static char prof_dump_buf[
217 /* Minimize memory bloat for non-prof builds. */
218#ifdef JEMALLOC_PROF
219 PROF_DUMP_BUFSIZE
220#else
221 1
222#endif
223];
224static size_t prof_dump_buf_end;
225static int prof_dump_fd;
226
227/* Do not dump any profiles until bootstrapping is complete. */
228static bool prof_booted = false;
229
230/******************************************************************************/
231/*
232 * Function prototypes for static functions that are referenced prior to
233 * definition.
234 */
235
236static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
237static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
238static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
239 bool even_if_attached);
240static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
241 bool even_if_attached);
242static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
243
244/* Hashtable functions for log_bt_node_set and log_thr_node_set. */
245static void prof_thr_node_hash(const void *key, size_t r_hash[2]);
246static bool prof_thr_node_keycomp(const void *k1, const void *k2);
247static void prof_bt_node_hash(const void *key, size_t r_hash[2]);
248static bool prof_bt_node_keycomp(const void *k1, const void *k2);
249
250/******************************************************************************/
251/* Red-black trees. */
252
253static int
254prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
255 uint64_t a_thr_uid = a->thr_uid;
256 uint64_t b_thr_uid = b->thr_uid;
257 int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
258 if (ret == 0) {
259 uint64_t a_thr_discrim = a->thr_discrim;
260 uint64_t b_thr_discrim = b->thr_discrim;
261 ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
262 b_thr_discrim);
263 if (ret == 0) {
264 uint64_t a_tctx_uid = a->tctx_uid;
265 uint64_t b_tctx_uid = b->tctx_uid;
266 ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
267 b_tctx_uid);
268 }
269 }
270 return ret;
271}
272
273rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
274 tctx_link, prof_tctx_comp)
275
276static int
277prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
278 unsigned a_len = a->bt.len;
279 unsigned b_len = b->bt.len;
280 unsigned comp_len = (a_len < b_len) ? a_len : b_len;
281 int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
282 if (ret == 0) {
283 ret = (a_len > b_len) - (a_len < b_len);
284 }
285 return ret;
286}
287
288rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
289 prof_gctx_comp)
290
291static int
292prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
293 int ret;
294 uint64_t a_uid = a->thr_uid;
295 uint64_t b_uid = b->thr_uid;
296
297 ret = ((a_uid > b_uid) - (a_uid < b_uid));
298 if (ret == 0) {
299 uint64_t a_discrim = a->thr_discrim;
300 uint64_t b_discrim = b->thr_discrim;
301
302 ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
303 }
304 return ret;
305}
306
307rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
308 prof_tdata_comp)
309
310/******************************************************************************/
311
312void
313prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
314 prof_tdata_t *tdata;
315
316 cassert(config_prof);
317
318 if (updated) {
319 /*
320 * Compute a new sample threshold. This isn't very important in
321 * practice, because this function is rarely executed, so the
322 * potential for sample bias is minimal except in contrived
323 * programs.
324 */
325 tdata = prof_tdata_get(tsd, true);
326 if (tdata != NULL) {
327 prof_sample_threshold_update(tdata);
328 }
329 }
330
331 if ((uintptr_t)tctx > (uintptr_t)1U) {
332 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
333 tctx->prepared = false;
334 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
335 prof_tctx_destroy(tsd, tctx);
336 } else {
337 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
338 }
339 }
340}
341
342void
343prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
344 prof_tctx_t *tctx) {
345 prof_tctx_set(tsdn, ptr, usize, NULL, tctx);
346
347 /* Get the current time and set this in the extent_t. We'll read this
348 * when free() is called. */
349 nstime_t t = NSTIME_ZERO_INITIALIZER;
350 nstime_update(&t);
351 prof_alloc_time_set(tsdn, ptr, NULL, t);
352
353 malloc_mutex_lock(tsdn, tctx->tdata->lock);
354 tctx->cnts.curobjs++;
355 tctx->cnts.curbytes += usize;
356 if (opt_prof_accum) {
357 tctx->cnts.accumobjs++;
358 tctx->cnts.accumbytes += usize;
359 }
360 tctx->prepared = false;
361 malloc_mutex_unlock(tsdn, tctx->tdata->lock);
362}
363
364static size_t
365prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) {
366 assert(prof_logging_state == prof_logging_state_started);
367 malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
368
369 prof_bt_node_t dummy_node;
370 dummy_node.bt = *bt;
371 prof_bt_node_t *node;
372
373 /* See if this backtrace is already cached in the table. */
374 if (ckh_search(&log_bt_node_set, (void *)(&dummy_node),
375 (void **)(&node), NULL)) {
376 size_t sz = offsetof(prof_bt_node_t, vec) +
377 (bt->len * sizeof(void *));
378 prof_bt_node_t *new_node = (prof_bt_node_t *)
379 ialloc(tsd, sz, sz_size2index(sz), false, true);
380 if (log_bt_first == NULL) {
381 log_bt_first = new_node;
382 log_bt_last = new_node;
383 } else {
384 log_bt_last->next = new_node;
385 log_bt_last = new_node;
386 }
387
388 new_node->next = NULL;
389 new_node->index = log_bt_index;
390 /*
391 * Copy the backtrace: bt is inside a tdata or gctx, which
392 * might die before prof_log_stop is called.
393 */
394 new_node->bt.len = bt->len;
395 memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *));
396 new_node->bt.vec = new_node->vec;
397
398 log_bt_index++;
399 ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL);
400 return new_node->index;
401 } else {
402 return node->index;
403 }
404}
405static size_t
406prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) {
407 assert(prof_logging_state == prof_logging_state_started);
408 malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
409
410 prof_thr_node_t dummy_node;
411 dummy_node.thr_uid = thr_uid;
412 prof_thr_node_t *node;
413
414 /* See if this thread is already cached in the table. */
415 if (ckh_search(&log_thr_node_set, (void *)(&dummy_node),
416 (void **)(&node), NULL)) {
417 size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1;
418 prof_thr_node_t *new_node = (prof_thr_node_t *)
419 ialloc(tsd, sz, sz_size2index(sz), false, true);
420 if (log_thr_first == NULL) {
421 log_thr_first = new_node;
422 log_thr_last = new_node;
423 } else {
424 log_thr_last->next = new_node;
425 log_thr_last = new_node;
426 }
427
428 new_node->next = NULL;
429 new_node->index = log_thr_index;
430 new_node->thr_uid = thr_uid;
431 strcpy(new_node->name, name);
432
433 log_thr_index++;
434 ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL);
435 return new_node->index;
436 } else {
437 return node->index;
438 }
439}
440
441static void
442prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) {
443 malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
444
445 prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false);
446 if (cons_tdata == NULL) {
447 /*
448 * We decide not to log these allocations. cons_tdata will be
449 * NULL only when the current thread is in a weird state (e.g.
450 * it's being destroyed).
451 */
452 return;
453 }
454
455 malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx);
456
457 if (prof_logging_state != prof_logging_state_started) {
458 goto label_done;
459 }
460
461 if (!log_tables_initialized) {
462 bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
463 prof_bt_node_hash, prof_bt_node_keycomp);
464 bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
465 prof_thr_node_hash, prof_thr_node_keycomp);
466 if (err1 || err2) {
467 goto label_done;
468 }
469 log_tables_initialized = true;
470 }
471
472 nstime_t alloc_time = prof_alloc_time_get(tsd_tsdn(tsd), ptr,
473 (alloc_ctx_t *)NULL);
474 nstime_t free_time = NSTIME_ZERO_INITIALIZER;
475 nstime_update(&free_time);
476
477 prof_alloc_node_t *new_node = (prof_alloc_node_t *)
478 ialloc(tsd, sizeof(prof_alloc_node_t),
479 sz_size2index(sizeof(prof_alloc_node_t)), false, true);
480
481 const char *prod_thr_name = (tctx->tdata->thread_name == NULL)?
482 "" : tctx->tdata->thread_name;
483 const char *cons_thr_name = prof_thread_name_get(tsd);
484
485 prof_bt_t bt;
486 /* Initialize the backtrace, using the buffer in tdata to store it. */
487 bt_init(&bt, cons_tdata->vec);
488 prof_backtrace(&bt);
489 prof_bt_t *cons_bt = &bt;
490
491 /* We haven't destroyed tctx yet, so gctx should be good to read. */
492 prof_bt_t *prod_bt = &tctx->gctx->bt;
493
494 new_node->next = NULL;
495 new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid,
496 prod_thr_name);
497 new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid,
498 cons_thr_name);
499 new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt);
500 new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt);
501 new_node->alloc_time_ns = nstime_ns(&alloc_time);
502 new_node->free_time_ns = nstime_ns(&free_time);
503 new_node->usize = usize;
504
505 if (log_alloc_first == NULL) {
506 log_alloc_first = new_node;
507 log_alloc_last = new_node;
508 } else {
509 log_alloc_last->next = new_node;
510 log_alloc_last = new_node;
511 }
512
513label_done:
514 malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx);
515}
516
517void
518prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
519 prof_tctx_t *tctx) {
520 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
521
522 assert(tctx->cnts.curobjs > 0);
523 assert(tctx->cnts.curbytes >= usize);
524 tctx->cnts.curobjs--;
525 tctx->cnts.curbytes -= usize;
526
527 prof_try_log(tsd, ptr, usize, tctx);
528
529 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
530 prof_tctx_destroy(tsd, tctx);
531 } else {
532 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
533 }
534}
535
536void
537bt_init(prof_bt_t *bt, void **vec) {
538 cassert(config_prof);
539
540 bt->vec = vec;
541 bt->len = 0;
542}
543
544static void
545prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
546 cassert(config_prof);
547 assert(tdata == prof_tdata_get(tsd, false));
548
549 if (tdata != NULL) {
550 assert(!tdata->enq);
551 tdata->enq = true;
552 }
553
554 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
555}
556
557static void
558prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
559 cassert(config_prof);
560 assert(tdata == prof_tdata_get(tsd, false));
561
562 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
563
564 if (tdata != NULL) {
565 bool idump, gdump;
566
567 assert(tdata->enq);
568 tdata->enq = false;
569 idump = tdata->enq_idump;
570 tdata->enq_idump = false;
571 gdump = tdata->enq_gdump;
572 tdata->enq_gdump = false;
573
574 if (idump) {
575 prof_idump(tsd_tsdn(tsd));
576 }
577 if (gdump) {
578 prof_gdump(tsd_tsdn(tsd));
579 }
580 }
581}
582
583#ifdef JEMALLOC_PROF_LIBUNWIND
584void
585prof_backtrace(prof_bt_t *bt) {
586 int nframes;
587
588 cassert(config_prof);
589 assert(bt->len == 0);
590 assert(bt->vec != NULL);
591
592 nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
593 if (nframes <= 0) {
594 return;
595 }
596 bt->len = nframes;
597}
598#elif (defined(JEMALLOC_PROF_LIBGCC))
599static _Unwind_Reason_Code
600prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
601 cassert(config_prof);
602
603 return _URC_NO_REASON;
604}
605
606static _Unwind_Reason_Code
607prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
608 prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
609 void *ip;
610
611 cassert(config_prof);
612
613 ip = (void *)_Unwind_GetIP(context);
614 if (ip == NULL) {
615 return _URC_END_OF_STACK;
616 }
617 data->bt->vec[data->bt->len] = ip;
618 data->bt->len++;
619 if (data->bt->len == data->max) {
620 return _URC_END_OF_STACK;
621 }
622
623 return _URC_NO_REASON;
624}
625
626void
627prof_backtrace(prof_bt_t *bt) {
628 prof_unwind_data_t data = {bt, PROF_BT_MAX};
629
630 cassert(config_prof);
631
632 _Unwind_Backtrace(prof_unwind_callback, &data);
633}
634#elif (defined(JEMALLOC_PROF_GCC))
635void
636prof_backtrace(prof_bt_t *bt) {
637#define BT_FRAME(i) \
638 if ((i) < PROF_BT_MAX) { \
639 void *p; \
640 if (__builtin_frame_address(i) == 0) { \
641 return; \
642 } \
643 p = __builtin_return_address(i); \
644 if (p == NULL) { \
645 return; \
646 } \
647 bt->vec[(i)] = p; \
648 bt->len = (i) + 1; \
649 } else { \
650 return; \
651 }
652
653 cassert(config_prof);
654
655 BT_FRAME(0)
656 BT_FRAME(1)
657 BT_FRAME(2)
658 BT_FRAME(3)
659 BT_FRAME(4)
660 BT_FRAME(5)
661 BT_FRAME(6)
662 BT_FRAME(7)
663 BT_FRAME(8)
664 BT_FRAME(9)
665
666 BT_FRAME(10)
667 BT_FRAME(11)
668 BT_FRAME(12)
669 BT_FRAME(13)
670 BT_FRAME(14)
671 BT_FRAME(15)
672 BT_FRAME(16)
673 BT_FRAME(17)
674 BT_FRAME(18)
675 BT_FRAME(19)
676
677 BT_FRAME(20)
678 BT_FRAME(21)
679 BT_FRAME(22)
680 BT_FRAME(23)
681 BT_FRAME(24)
682 BT_FRAME(25)
683 BT_FRAME(26)
684 BT_FRAME(27)
685 BT_FRAME(28)
686 BT_FRAME(29)
687
688 BT_FRAME(30)
689 BT_FRAME(31)
690 BT_FRAME(32)
691 BT_FRAME(33)
692 BT_FRAME(34)
693 BT_FRAME(35)
694 BT_FRAME(36)
695 BT_FRAME(37)
696 BT_FRAME(38)
697 BT_FRAME(39)
698
699 BT_FRAME(40)
700 BT_FRAME(41)
701 BT_FRAME(42)
702 BT_FRAME(43)
703 BT_FRAME(44)
704 BT_FRAME(45)
705 BT_FRAME(46)
706 BT_FRAME(47)
707 BT_FRAME(48)
708 BT_FRAME(49)
709
710 BT_FRAME(50)
711 BT_FRAME(51)
712 BT_FRAME(52)
713 BT_FRAME(53)
714 BT_FRAME(54)
715 BT_FRAME(55)
716 BT_FRAME(56)
717 BT_FRAME(57)
718 BT_FRAME(58)
719 BT_FRAME(59)
720
721 BT_FRAME(60)
722 BT_FRAME(61)
723 BT_FRAME(62)
724 BT_FRAME(63)
725 BT_FRAME(64)
726 BT_FRAME(65)
727 BT_FRAME(66)
728 BT_FRAME(67)
729 BT_FRAME(68)
730 BT_FRAME(69)
731
732 BT_FRAME(70)
733 BT_FRAME(71)
734 BT_FRAME(72)
735 BT_FRAME(73)
736 BT_FRAME(74)
737 BT_FRAME(75)
738 BT_FRAME(76)
739 BT_FRAME(77)
740 BT_FRAME(78)
741 BT_FRAME(79)
742
743 BT_FRAME(80)
744 BT_FRAME(81)
745 BT_FRAME(82)
746 BT_FRAME(83)
747 BT_FRAME(84)
748 BT_FRAME(85)
749 BT_FRAME(86)
750 BT_FRAME(87)
751 BT_FRAME(88)
752 BT_FRAME(89)
753
754 BT_FRAME(90)
755 BT_FRAME(91)
756 BT_FRAME(92)
757 BT_FRAME(93)
758 BT_FRAME(94)
759 BT_FRAME(95)
760 BT_FRAME(96)
761 BT_FRAME(97)
762 BT_FRAME(98)
763 BT_FRAME(99)
764
765 BT_FRAME(100)
766 BT_FRAME(101)
767 BT_FRAME(102)
768 BT_FRAME(103)
769 BT_FRAME(104)
770 BT_FRAME(105)
771 BT_FRAME(106)
772 BT_FRAME(107)
773 BT_FRAME(108)
774 BT_FRAME(109)
775
776 BT_FRAME(110)
777 BT_FRAME(111)
778 BT_FRAME(112)
779 BT_FRAME(113)
780 BT_FRAME(114)
781 BT_FRAME(115)
782 BT_FRAME(116)
783 BT_FRAME(117)
784 BT_FRAME(118)
785 BT_FRAME(119)
786
787 BT_FRAME(120)
788 BT_FRAME(121)
789 BT_FRAME(122)
790 BT_FRAME(123)
791 BT_FRAME(124)
792 BT_FRAME(125)
793 BT_FRAME(126)
794 BT_FRAME(127)
795#undef BT_FRAME
796}
797#else
798void
799prof_backtrace(prof_bt_t *bt) {
800 cassert(config_prof);
801 not_reached();
802}
803#endif
804
805static malloc_mutex_t *
806prof_gctx_mutex_choose(void) {
807 unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
808
809 return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
810}
811
812static malloc_mutex_t *
813prof_tdata_mutex_choose(uint64_t thr_uid) {
814 return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
815}
816
817static prof_gctx_t *
818prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
819 /*
820 * Create a single allocation that has space for vec of length bt->len.
821 */
822 size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
823 prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
824 sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
825 true);
826 if (gctx == NULL) {
827 return NULL;
828 }
829 gctx->lock = prof_gctx_mutex_choose();
830 /*
831 * Set nlimbo to 1, in order to avoid a race condition with
832 * prof_tctx_destroy()/prof_gctx_try_destroy().
833 */
834 gctx->nlimbo = 1;
835 tctx_tree_new(&gctx->tctxs);
836 /* Duplicate bt. */
837 memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
838 gctx->bt.vec = gctx->vec;
839 gctx->bt.len = bt->len;
840 return gctx;
841}
842
843static void
844prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
845 prof_tdata_t *tdata) {
846 cassert(config_prof);
847
848 /*
849 * Check that gctx is still unused by any thread cache before destroying
850 * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
851 * condition with this function, as does prof_tctx_destroy() in order to
852 * avoid a race between the main body of prof_tctx_destroy() and entry
853 * into this function.
854 */
855 prof_enter(tsd, tdata_self);
856 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
857 assert(gctx->nlimbo != 0);
858 if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
859 /* Remove gctx from bt2gctx. */
860 if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
861 not_reached();
862 }
863 prof_leave(tsd, tdata_self);
864 /* Destroy gctx. */
865 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
866 idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true);
867 } else {
868 /*
869 * Compensate for increment in prof_tctx_destroy() or
870 * prof_lookup().
871 */
872 gctx->nlimbo--;
873 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
874 prof_leave(tsd, tdata_self);
875 }
876}
877
878static bool
879prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
880 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
881
882 if (opt_prof_accum) {
883 return false;
884 }
885 if (tctx->cnts.curobjs != 0) {
886 return false;
887 }
888 if (tctx->prepared) {
889 return false;
890 }
891 return true;
892}
893
894static bool
895prof_gctx_should_destroy(prof_gctx_t *gctx) {
896 if (opt_prof_accum) {
897 return false;
898 }
899 if (!tctx_tree_empty(&gctx->tctxs)) {
900 return false;
901 }
902 if (gctx->nlimbo != 0) {
903 return false;
904 }
905 return true;
906}
907
908static void
909prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
910 prof_tdata_t *tdata = tctx->tdata;
911 prof_gctx_t *gctx = tctx->gctx;
912 bool destroy_tdata, destroy_tctx, destroy_gctx;
913
914 malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
915
916 assert(tctx->cnts.curobjs == 0);
917 assert(tctx->cnts.curbytes == 0);
918 assert(!opt_prof_accum);
919 assert(tctx->cnts.accumobjs == 0);
920 assert(tctx->cnts.accumbytes == 0);
921
922 ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
923 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
924 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
925
926 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
927 switch (tctx->state) {
928 case prof_tctx_state_nominal:
929 tctx_tree_remove(&gctx->tctxs, tctx);
930 destroy_tctx = true;
931 if (prof_gctx_should_destroy(gctx)) {
932 /*
933 * Increment gctx->nlimbo in order to keep another
934 * thread from winning the race to destroy gctx while
935 * this one has gctx->lock dropped. Without this, it
936 * would be possible for another thread to:
937 *
938 * 1) Sample an allocation associated with gctx.
939 * 2) Deallocate the sampled object.
940 * 3) Successfully prof_gctx_try_destroy(gctx).
941 *
942 * The result would be that gctx no longer exists by the
943 * time this thread accesses it in
944 * prof_gctx_try_destroy().
945 */
946 gctx->nlimbo++;
947 destroy_gctx = true;
948 } else {
949 destroy_gctx = false;
950 }
951 break;
952 case prof_tctx_state_dumping:
953 /*
954 * A dumping thread needs tctx to remain valid until dumping
955 * has finished. Change state such that the dumping thread will
956 * complete destruction during a late dump iteration phase.
957 */
958 tctx->state = prof_tctx_state_purgatory;
959 destroy_tctx = false;
960 destroy_gctx = false;
961 break;
962 default:
963 not_reached();
964 destroy_tctx = false;
965 destroy_gctx = false;
966 }
967 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
968 if (destroy_gctx) {
969 prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
970 tdata);
971 }
972
973 malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
974
975 if (destroy_tdata) {
976 prof_tdata_destroy(tsd, tdata, false);
977 }
978
979 if (destroy_tctx) {
980 idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true);
981 }
982}
983
984static bool
985prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
986 void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
987 union {
988 prof_gctx_t *p;
989 void *v;
990 } gctx, tgctx;
991 union {
992 prof_bt_t *p;
993 void *v;
994 } btkey;
995 bool new_gctx;
996
997 prof_enter(tsd, tdata);
998 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
999 /* bt has never been seen before. Insert it. */
1000 prof_leave(tsd, tdata);
1001 tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
1002 if (tgctx.v == NULL) {
1003 return true;
1004 }
1005 prof_enter(tsd, tdata);
1006 if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
1007 gctx.p = tgctx.p;
1008 btkey.p = &gctx.p->bt;
1009 if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
1010 /* OOM. */
1011 prof_leave(tsd, tdata);
1012 idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL,
1013 true, true);
1014 return true;
1015 }
1016 new_gctx = true;
1017 } else {
1018 new_gctx = false;
1019 }
1020 } else {
1021 tgctx.v = NULL;
1022 new_gctx = false;
1023 }
1024
1025 if (!new_gctx) {
1026 /*
1027 * Increment nlimbo, in order to avoid a race condition with
1028 * prof_tctx_destroy()/prof_gctx_try_destroy().
1029 */
1030 malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
1031 gctx.p->nlimbo++;
1032 malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
1033 new_gctx = false;
1034
1035 if (tgctx.v != NULL) {
1036 /* Lost race to insert. */
1037 idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
1038 true);
1039 }
1040 }
1041 prof_leave(tsd, tdata);
1042
1043 *p_btkey = btkey.v;
1044 *p_gctx = gctx.p;
1045 *p_new_gctx = new_gctx;
1046 return false;
1047}
1048
1049prof_tctx_t *
1050prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
1051 union {
1052 prof_tctx_t *p;
1053 void *v;
1054 } ret;
1055 prof_tdata_t *tdata;
1056 bool not_found;
1057
1058 cassert(config_prof);
1059
1060 tdata = prof_tdata_get(tsd, false);
1061 if (tdata == NULL) {
1062 return NULL;
1063 }
1064
1065 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
1066 not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
1067 if (!not_found) { /* Note double negative! */
1068 ret.p->prepared = true;
1069 }
1070 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
1071 if (not_found) {
1072 void *btkey;
1073 prof_gctx_t *gctx;
1074 bool new_gctx, error;
1075
1076 /*
1077 * This thread's cache lacks bt. Look for it in the global
1078 * cache.
1079 */
1080 if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
1081 &new_gctx)) {
1082 return NULL;
1083 }
1084
1085 /* Link a prof_tctx_t into gctx for this thread. */
1086 ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
1087 sz_size2index(sizeof(prof_tctx_t)), false, NULL, true,
1088 arena_ichoose(tsd, NULL), true);
1089 if (ret.p == NULL) {
1090 if (new_gctx) {
1091 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
1092 }
1093 return NULL;
1094 }
1095 ret.p->tdata = tdata;
1096 ret.p->thr_uid = tdata->thr_uid;
1097 ret.p->thr_discrim = tdata->thr_discrim;
1098 memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
1099 ret.p->gctx = gctx;
1100 ret.p->tctx_uid = tdata->tctx_uid_next++;
1101 ret.p->prepared = true;
1102 ret.p->state = prof_tctx_state_initializing;
1103 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
1104 error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
1105 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
1106 if (error) {
1107 if (new_gctx) {
1108 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
1109 }
1110 idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true);
1111 return NULL;
1112 }
1113 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
1114 ret.p->state = prof_tctx_state_nominal;
1115 tctx_tree_insert(&gctx->tctxs, ret.p);
1116 gctx->nlimbo--;
1117 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
1118 }
1119
1120 return ret.p;
1121}
1122
1123/*
1124 * The bodies of this function and prof_leakcheck() are compiled out unless heap
1125 * profiling is enabled, so that it is possible to compile jemalloc with
1126 * floating point support completely disabled. Avoiding floating point code is
1127 * important on memory-constrained systems, but it also enables a workaround for
1128 * versions of glibc that don't properly save/restore floating point registers
1129 * during dynamic lazy symbol loading (which internally calls into whatever
1130 * malloc implementation happens to be integrated into the application). Note
1131 * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
1132 * memory moves, so jemalloc must be compiled with such optimizations disabled
1133 * (e.g.
1134 * -mno-sse) in order for the workaround to be complete.
1135 */
1136void
1137prof_sample_threshold_update(prof_tdata_t *tdata) {
1138#ifdef JEMALLOC_PROF
1139 if (!config_prof) {
1140 return;
1141 }
1142
1143 if (lg_prof_sample == 0) {
1144 tsd_bytes_until_sample_set(tsd_fetch(), 0);
1145 return;
1146 }
1147
1148 /*
1149 * Compute sample interval as a geometrically distributed random
1150 * variable with mean (2^lg_prof_sample).
1151 *
1152 * __ __
1153 * | log(u) | 1
1154 * tdata->bytes_until_sample = | -------- |, where p = ---------------
1155 * | log(1-p) | lg_prof_sample
1156 * 2
1157 *
1158 * For more information on the math, see:
1159 *
1160 * Non-Uniform Random Variate Generation
1161 * Luc Devroye
1162 * Springer-Verlag, New York, 1986
1163 * pp 500
1164 * (http://luc.devroye.org/rnbookindex.html)
1165 */
1166 uint64_t r = prng_lg_range_u64(&tdata->prng_state, 53);
1167 double u = (double)r * (1.0/9007199254740992.0L);
1168 uint64_t bytes_until_sample = (uint64_t)(log(u) /
1169 log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
1170 + (uint64_t)1U;
1171 if (bytes_until_sample > SSIZE_MAX) {
1172 bytes_until_sample = SSIZE_MAX;
1173 }
1174 tsd_bytes_until_sample_set(tsd_fetch(), bytes_until_sample);
1175
1176#endif
1177}
1178
1179#ifdef JEMALLOC_JET
1180static prof_tdata_t *
1181prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
1182 void *arg) {
1183 size_t *tdata_count = (size_t *)arg;
1184
1185 (*tdata_count)++;
1186
1187 return NULL;
1188}
1189
1190size_t
1191prof_tdata_count(void) {
1192 size_t tdata_count = 0;
1193 tsdn_t *tsdn;
1194
1195 tsdn = tsdn_fetch();
1196 malloc_mutex_lock(tsdn, &tdatas_mtx);
1197 tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
1198 (void *)&tdata_count);
1199 malloc_mutex_unlock(tsdn, &tdatas_mtx);
1200
1201 return tdata_count;
1202}
1203
1204size_t
1205prof_bt_count(void) {
1206 size_t bt_count;
1207 tsd_t *tsd;
1208 prof_tdata_t *tdata;
1209
1210 tsd = tsd_fetch();
1211 tdata = prof_tdata_get(tsd, false);
1212 if (tdata == NULL) {
1213 return 0;
1214 }
1215
1216 malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
1217 bt_count = ckh_count(&bt2gctx);
1218 malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
1219
1220 return bt_count;
1221}
1222#endif
1223
1224static int
1225prof_dump_open_impl(bool propagate_err, const char *filename) {
1226 int fd;
1227
1228 fd = creat(filename, 0644);
1229 if (fd == -1 && !propagate_err) {
1230 malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
1231 filename);
1232 if (opt_abort) {
1233 abort();
1234 }
1235 }
1236
1237 return fd;
1238}
1239prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl;
1240
1241static bool
1242prof_dump_flush(bool propagate_err) {
1243 bool ret = false;
1244 ssize_t err;
1245
1246 cassert(config_prof);
1247
1248 err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
1249 if (err == -1) {
1250 if (!propagate_err) {
1251 malloc_write("<jemalloc>: write() failed during heap "
1252 "profile flush\n");
1253 if (opt_abort) {
1254 abort();
1255 }
1256 }
1257 ret = true;
1258 }
1259 prof_dump_buf_end = 0;
1260
1261 return ret;
1262}
1263
1264static bool
1265prof_dump_close(bool propagate_err) {
1266 bool ret;
1267
1268 assert(prof_dump_fd != -1);
1269 ret = prof_dump_flush(propagate_err);
1270 close(prof_dump_fd);
1271 prof_dump_fd = -1;
1272
1273 return ret;
1274}
1275
1276static bool
1277prof_dump_write(bool propagate_err, const char *s) {
1278 size_t i, slen, n;
1279
1280 cassert(config_prof);
1281
1282 i = 0;
1283 slen = strlen(s);
1284 while (i < slen) {
1285 /* Flush the buffer if it is full. */
1286 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
1287 if (prof_dump_flush(propagate_err) && propagate_err) {
1288 return true;
1289 }
1290 }
1291
1292 if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
1293 /* Finish writing. */
1294 n = slen - i;
1295 } else {
1296 /* Write as much of s as will fit. */
1297 n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
1298 }
1299 memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
1300 prof_dump_buf_end += n;
1301 i += n;
1302 }
1303
1304 return false;
1305}
1306
1307JEMALLOC_FORMAT_PRINTF(2, 3)
1308static bool
1309prof_dump_printf(bool propagate_err, const char *format, ...) {
1310 bool ret;
1311 va_list ap;
1312 char buf[PROF_PRINTF_BUFSIZE];
1313
1314 va_start(ap, format);
1315 malloc_vsnprintf(buf, sizeof(buf), format, ap);
1316 va_end(ap);
1317 ret = prof_dump_write(propagate_err, buf);
1318
1319 return ret;
1320}
1321
1322static void
1323prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
1324 malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
1325
1326 malloc_mutex_lock(tsdn, tctx->gctx->lock);
1327
1328 switch (tctx->state) {
1329 case prof_tctx_state_initializing:
1330 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
1331 return;
1332 case prof_tctx_state_nominal:
1333 tctx->state = prof_tctx_state_dumping;
1334 malloc_mutex_unlock(tsdn, tctx->gctx->lock);
1335
1336 memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
1337
1338 tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1339 tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1340 if (opt_prof_accum) {
1341 tdata->cnt_summed.accumobjs +=
1342 tctx->dump_cnts.accumobjs;
1343 tdata->cnt_summed.accumbytes +=
1344 tctx->dump_cnts.accumbytes;
1345 }
1346 break;
1347 case prof_tctx_state_dumping:
1348 case prof_tctx_state_purgatory:
1349 not_reached();
1350 }
1351}
1352
1353static void
1354prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
1355 malloc_mutex_assert_owner(tsdn, gctx->lock);
1356
1357 gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
1358 gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
1359 if (opt_prof_accum) {
1360 gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
1361 gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
1362 }
1363}
1364
1365static prof_tctx_t *
1366prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
1367 tsdn_t *tsdn = (tsdn_t *)arg;
1368
1369 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
1370
1371 switch (tctx->state) {
1372 case prof_tctx_state_nominal:
1373 /* New since dumping started; ignore. */
1374 break;
1375 case prof_tctx_state_dumping:
1376 case prof_tctx_state_purgatory:
1377 prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
1378 break;
1379 default:
1380 not_reached();
1381 }
1382
1383 return NULL;
1384}
1385
1386struct prof_tctx_dump_iter_arg_s {
1387 tsdn_t *tsdn;
1388 bool propagate_err;
1389};
1390
1391static prof_tctx_t *
1392prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
1393 struct prof_tctx_dump_iter_arg_s *arg =
1394 (struct prof_tctx_dump_iter_arg_s *)opaque;
1395
1396 malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
1397
1398 switch (tctx->state) {
1399 case prof_tctx_state_initializing:
1400 case prof_tctx_state_nominal:
1401 /* Not captured by this dump. */
1402 break;
1403 case prof_tctx_state_dumping:
1404 case prof_tctx_state_purgatory:
1405 if (prof_dump_printf(arg->propagate_err,
1406 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
1407 "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
1408 tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
1409 tctx->dump_cnts.accumbytes)) {
1410 return tctx;
1411 }
1412 break;
1413 default:
1414 not_reached();
1415 }
1416 return NULL;
1417}
1418
1419static prof_tctx_t *
1420prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
1421 tsdn_t *tsdn = (tsdn_t *)arg;
1422 prof_tctx_t *ret;
1423
1424 malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
1425
1426 switch (tctx->state) {
1427 case prof_tctx_state_nominal:
1428 /* New since dumping started; ignore. */
1429 break;
1430 case prof_tctx_state_dumping:
1431 tctx->state = prof_tctx_state_nominal;
1432 break;
1433 case prof_tctx_state_purgatory:
1434 ret = tctx;
1435 goto label_return;
1436 default:
1437 not_reached();
1438 }
1439
1440 ret = NULL;
1441label_return:
1442 return ret;
1443}
1444
1445static void
1446prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
1447 cassert(config_prof);
1448
1449 malloc_mutex_lock(tsdn, gctx->lock);
1450
1451 /*
1452 * Increment nlimbo so that gctx won't go away before dump.
1453 * Additionally, link gctx into the dump list so that it is included in
1454 * prof_dump()'s second pass.
1455 */
1456 gctx->nlimbo++;
1457 gctx_tree_insert(gctxs, gctx);
1458
1459 memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
1460
1461 malloc_mutex_unlock(tsdn, gctx->lock);
1462}
1463
1464struct prof_gctx_merge_iter_arg_s {
1465 tsdn_t *tsdn;
1466 size_t leak_ngctx;
1467};
1468
1469static prof_gctx_t *
1470prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
1471 struct prof_gctx_merge_iter_arg_s *arg =
1472 (struct prof_gctx_merge_iter_arg_s *)opaque;
1473
1474 malloc_mutex_lock(arg->tsdn, gctx->lock);
1475 tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
1476 (void *)arg->tsdn);
1477 if (gctx->cnt_summed.curobjs != 0) {
1478 arg->leak_ngctx++;
1479 }
1480 malloc_mutex_unlock(arg->tsdn, gctx->lock);
1481
1482 return NULL;
1483}
1484
1485static void
1486prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
1487 prof_tdata_t *tdata = prof_tdata_get(tsd, false);
1488 prof_gctx_t *gctx;
1489
1490 /*
1491 * Standard tree iteration won't work here, because as soon as we
1492 * decrement gctx->nlimbo and unlock gctx, another thread can
1493 * concurrently destroy it, which will corrupt the tree. Therefore,
1494 * tear down the tree one node at a time during iteration.
1495 */
1496 while ((gctx = gctx_tree_first(gctxs)) != NULL) {
1497 gctx_tree_remove(gctxs, gctx);
1498 malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
1499 {
1500 prof_tctx_t *next;
1501
1502 next = NULL;
1503 do {
1504 prof_tctx_t *to_destroy =
1505 tctx_tree_iter(&gctx->tctxs, next,
1506 prof_tctx_finish_iter,
1507 (void *)tsd_tsdn(tsd));
1508 if (to_destroy != NULL) {
1509 next = tctx_tree_next(&gctx->tctxs,
1510 to_destroy);
1511 tctx_tree_remove(&gctx->tctxs,
1512 to_destroy);
1513 idalloctm(tsd_tsdn(tsd), to_destroy,
1514 NULL, NULL, true, true);
1515 } else {
1516 next = NULL;
1517 }
1518 } while (next != NULL);
1519 }
1520 gctx->nlimbo--;
1521 if (prof_gctx_should_destroy(gctx)) {
1522 gctx->nlimbo++;
1523 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
1524 prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
1525 } else {
1526 malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
1527 }
1528 }
1529}
1530
1531struct prof_tdata_merge_iter_arg_s {
1532 tsdn_t *tsdn;
1533 prof_cnt_t cnt_all;
1534};
1535
1536static prof_tdata_t *
1537prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
1538 void *opaque) {
1539 struct prof_tdata_merge_iter_arg_s *arg =
1540 (struct prof_tdata_merge_iter_arg_s *)opaque;
1541
1542 malloc_mutex_lock(arg->tsdn, tdata->lock);
1543 if (!tdata->expired) {
1544 size_t tabind;
1545 union {
1546 prof_tctx_t *p;
1547 void *v;
1548 } tctx;
1549
1550 tdata->dumping = true;
1551 memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
1552 for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
1553 &tctx.v);) {
1554 prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
1555 }
1556
1557 arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
1558 arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
1559 if (opt_prof_accum) {
1560 arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
1561 arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
1562 }
1563 } else {
1564 tdata->dumping = false;
1565 }
1566 malloc_mutex_unlock(arg->tsdn, tdata->lock);
1567
1568 return NULL;
1569}
1570
1571static prof_tdata_t *
1572prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
1573 void *arg) {
1574 bool propagate_err = *(bool *)arg;
1575
1576 if (!tdata->dumping) {
1577 return NULL;
1578 }
1579
1580 if (prof_dump_printf(propagate_err,
1581 " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
1582 tdata->thr_uid, tdata->cnt_summed.curobjs,
1583 tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
1584 tdata->cnt_summed.accumbytes,
1585 (tdata->thread_name != NULL) ? " " : "",
1586 (tdata->thread_name != NULL) ? tdata->thread_name : "")) {
1587 return tdata;
1588 }
1589 return NULL;
1590}
1591
1592static bool
1593prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err,
1594 const prof_cnt_t *cnt_all) {
1595 bool ret;
1596
1597 if (prof_dump_printf(propagate_err,
1598 "heap_v2/%"FMTu64"\n"
1599 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1600 ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
1601 cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) {
1602 return true;
1603 }
1604
1605 malloc_mutex_lock(tsdn, &tdatas_mtx);
1606 ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
1607 (void *)&propagate_err) != NULL);
1608 malloc_mutex_unlock(tsdn, &tdatas_mtx);
1609 return ret;
1610}
1611prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl;
1612
1613static bool
1614prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
1615 const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
1616 bool ret;
1617 unsigned i;
1618 struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
1619
1620 cassert(config_prof);
1621 malloc_mutex_assert_owner(tsdn, gctx->lock);
1622
1623 /* Avoid dumping such gctx's that have no useful data. */
1624 if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
1625 (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
1626 assert(gctx->cnt_summed.curobjs == 0);
1627 assert(gctx->cnt_summed.curbytes == 0);
1628 assert(gctx->cnt_summed.accumobjs == 0);
1629 assert(gctx->cnt_summed.accumbytes == 0);
1630 ret = false;
1631 goto label_return;
1632 }
1633
1634 if (prof_dump_printf(propagate_err, "@")) {
1635 ret = true;
1636 goto label_return;
1637 }
1638 for (i = 0; i < bt->len; i++) {
1639 if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
1640 (uintptr_t)bt->vec[i])) {
1641 ret = true;
1642 goto label_return;
1643 }
1644 }
1645
1646 if (prof_dump_printf(propagate_err,
1647 "\n"
1648 " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1649 gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
1650 gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
1651 ret = true;
1652 goto label_return;
1653 }
1654
1655 prof_tctx_dump_iter_arg.tsdn = tsdn;
1656 prof_tctx_dump_iter_arg.propagate_err = propagate_err;
1657 if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
1658 (void *)&prof_tctx_dump_iter_arg) != NULL) {
1659 ret = true;
1660 goto label_return;
1661 }
1662
1663 ret = false;
1664label_return:
1665 return ret;
1666}
1667
1668#ifndef _WIN32
1669JEMALLOC_FORMAT_PRINTF(1, 2)
1670static int
1671prof_open_maps(const char *format, ...) {
1672 int mfd;
1673 va_list ap;
1674 char filename[PATH_MAX + 1];
1675
1676 va_start(ap, format);
1677 malloc_vsnprintf(filename, sizeof(filename), format, ap);
1678 va_end(ap);
1679
1680#if defined(O_CLOEXEC)
1681 mfd = open(filename, O_RDONLY | O_CLOEXEC);
1682#else
1683 mfd = open(filename, O_RDONLY);
1684 if (mfd != -1) {
1685 fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC);
1686 }
1687#endif
1688
1689 return mfd;
1690}
1691#endif
1692
1693static int
1694prof_getpid(void) {
1695#ifdef _WIN32
1696 return GetCurrentProcessId();
1697#else
1698 return getpid();
1699#endif
1700}
1701
1702static bool
1703prof_dump_maps(bool propagate_err) {
1704 bool ret;
1705 int mfd;
1706
1707 cassert(config_prof);
1708#ifdef __FreeBSD__
1709 mfd = prof_open_maps("/proc/curproc/map");
1710#elif defined(_WIN32)
1711 mfd = -1; // Not implemented
1712#else
1713 {
1714 int pid = prof_getpid();
1715
1716 mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
1717 if (mfd == -1) {
1718 mfd = prof_open_maps("/proc/%d/maps", pid);
1719 }
1720 }
1721#endif
1722 if (mfd != -1) {
1723 ssize_t nread;
1724
1725 if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
1726 propagate_err) {
1727 ret = true;
1728 goto label_return;
1729 }
1730 nread = 0;
1731 do {
1732 prof_dump_buf_end += nread;
1733 if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
1734 /* Make space in prof_dump_buf before read(). */
1735 if (prof_dump_flush(propagate_err) &&
1736 propagate_err) {
1737 ret = true;
1738 goto label_return;
1739 }
1740 }
1741 nread = malloc_read_fd(mfd,
1742 &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE
1743 - prof_dump_buf_end);
1744 } while (nread > 0);
1745 } else {
1746 ret = true;
1747 goto label_return;
1748 }
1749
1750 ret = false;
1751label_return:
1752 if (mfd != -1) {
1753 close(mfd);
1754 }
1755 return ret;
1756}
1757
1758/*
1759 * See prof_sample_threshold_update() comment for why the body of this function
1760 * is conditionally compiled.
1761 */
1762static void
1763prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
1764 const char *filename) {
1765#ifdef JEMALLOC_PROF
1766 /*
1767 * Scaling is equivalent AdjustSamples() in jeprof, but the result may
1768 * differ slightly from what jeprof reports, because here we scale the
1769 * summary values, whereas jeprof scales each context individually and
1770 * reports the sums of the scaled values.
1771 */
1772 if (cnt_all->curbytes != 0) {
1773 double sample_period = (double)((uint64_t)1 << lg_prof_sample);
1774 double ratio = (((double)cnt_all->curbytes) /
1775 (double)cnt_all->curobjs) / sample_period;
1776 double scale_factor = 1.0 / (1.0 - exp(-ratio));
1777 uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
1778 * scale_factor);
1779 uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
1780 scale_factor);
1781
1782 malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
1783 " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
1784 curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
1785 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
1786 malloc_printf(
1787 "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
1788 filename);
1789 }
1790#endif
1791}
1792
1793struct prof_gctx_dump_iter_arg_s {
1794 tsdn_t *tsdn;
1795 bool propagate_err;
1796};
1797
1798static prof_gctx_t *
1799prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
1800 prof_gctx_t *ret;
1801 struct prof_gctx_dump_iter_arg_s *arg =
1802 (struct prof_gctx_dump_iter_arg_s *)opaque;
1803
1804 malloc_mutex_lock(arg->tsdn, gctx->lock);
1805
1806 if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
1807 gctxs)) {
1808 ret = gctx;
1809 goto label_return;
1810 }
1811
1812 ret = NULL;
1813label_return:
1814 malloc_mutex_unlock(arg->tsdn, gctx->lock);
1815 return ret;
1816}
1817
1818static void
1819prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata,
1820 struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
1821 struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
1822 prof_gctx_tree_t *gctxs) {
1823 size_t tabind;
1824 union {
1825 prof_gctx_t *p;
1826 void *v;
1827 } gctx;
1828
1829 prof_enter(tsd, tdata);
1830
1831 /*
1832 * Put gctx's in limbo and clear their counters in preparation for
1833 * summing.
1834 */
1835 gctx_tree_new(gctxs);
1836 for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
1837 prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
1838 }
1839
1840 /*
1841 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
1842 * stats and merge them into the associated gctx's.
1843 */
1844 prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd);
1845 memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t));
1846 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
1847 tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
1848 (void *)prof_tdata_merge_iter_arg);
1849 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
1850
1851 /* Merge tctx stats into gctx's. */
1852 prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd);
1853 prof_gctx_merge_iter_arg->leak_ngctx = 0;
1854 gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
1855 (void *)prof_gctx_merge_iter_arg);
1856
1857 prof_leave(tsd, tdata);
1858}
1859
1860static bool
1861prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename,
1862 bool leakcheck, prof_tdata_t *tdata,
1863 struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
1864 struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
1865 struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg,
1866 prof_gctx_tree_t *gctxs) {
1867 /* Create dump file. */
1868 if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) {
1869 return true;
1870 }
1871
1872 /* Dump profile header. */
1873 if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
1874 &prof_tdata_merge_iter_arg->cnt_all)) {
1875 goto label_write_error;
1876 }
1877
1878 /* Dump per gctx profile stats. */
1879 prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd);
1880 prof_gctx_dump_iter_arg->propagate_err = propagate_err;
1881 if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter,
1882 (void *)prof_gctx_dump_iter_arg) != NULL) {
1883 goto label_write_error;
1884 }
1885
1886 /* Dump /proc/<pid>/maps if possible. */
1887 if (prof_dump_maps(propagate_err)) {
1888 goto label_write_error;
1889 }
1890
1891 if (prof_dump_close(propagate_err)) {
1892 return true;
1893 }
1894
1895 return false;
1896label_write_error:
1897 prof_dump_close(propagate_err);
1898 return true;
1899}
1900
1901static bool
1902prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
1903 bool leakcheck) {
1904 cassert(config_prof);
1905 assert(tsd_reentrancy_level_get(tsd) == 0);
1906
1907 prof_tdata_t * tdata = prof_tdata_get(tsd, true);
1908 if (tdata == NULL) {
1909 return true;
1910 }
1911
1912 pre_reentrancy(tsd, NULL);
1913 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
1914
1915 prof_gctx_tree_t gctxs;
1916 struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
1917 struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
1918 struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
1919 prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
1920 &prof_gctx_merge_iter_arg, &gctxs);
1921 bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata,
1922 &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg,
1923 &prof_gctx_dump_iter_arg, &gctxs);
1924 prof_gctx_finish(tsd, &gctxs);
1925
1926 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
1927 post_reentrancy(tsd);
1928
1929 if (err) {
1930 return true;
1931 }
1932
1933 if (leakcheck) {
1934 prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
1935 prof_gctx_merge_iter_arg.leak_ngctx, filename);
1936 }
1937 return false;
1938}
1939
1940#ifdef JEMALLOC_JET
1941void
1942prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
1943 uint64_t *accumbytes) {
1944 tsd_t *tsd;
1945 prof_tdata_t *tdata;
1946 struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
1947 struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
1948 prof_gctx_tree_t gctxs;
1949
1950 tsd = tsd_fetch();
1951 tdata = prof_tdata_get(tsd, false);
1952 if (tdata == NULL) {
1953 if (curobjs != NULL) {
1954 *curobjs = 0;
1955 }
1956 if (curbytes != NULL) {
1957 *curbytes = 0;
1958 }
1959 if (accumobjs != NULL) {
1960 *accumobjs = 0;
1961 }
1962 if (accumbytes != NULL) {
1963 *accumbytes = 0;
1964 }
1965 return;
1966 }
1967
1968 prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
1969 &prof_gctx_merge_iter_arg, &gctxs);
1970 prof_gctx_finish(tsd, &gctxs);
1971
1972 if (curobjs != NULL) {
1973 *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs;
1974 }
1975 if (curbytes != NULL) {
1976 *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes;
1977 }
1978 if (accumobjs != NULL) {
1979 *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs;
1980 }
1981 if (accumbytes != NULL) {
1982 *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes;
1983 }
1984}
1985#endif
1986
1987#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
1988#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
1989static void
1990prof_dump_filename(char *filename, char v, uint64_t vseq) {
1991 cassert(config_prof);
1992
1993 if (vseq != VSEQ_INVALID) {
1994 /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
1995 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
1996 "%s.%d.%"FMTu64".%c%"FMTu64".heap",
1997 opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
1998 } else {
1999 /* "<prefix>.<pid>.<seq>.<v>.heap" */
2000 malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
2001 "%s.%d.%"FMTu64".%c.heap",
2002 opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
2003 }
2004 prof_dump_seq++;
2005}
2006
2007static void
2008prof_fdump(void) {
2009 tsd_t *tsd;
2010 char filename[DUMP_FILENAME_BUFSIZE];
2011
2012 cassert(config_prof);
2013 assert(opt_prof_final);
2014 assert(opt_prof_prefix[0] != '\0');
2015
2016 if (!prof_booted) {
2017 return;
2018 }
2019 tsd = tsd_fetch();
2020 assert(tsd_reentrancy_level_get(tsd) == 0);
2021
2022 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
2023 prof_dump_filename(filename, 'f', VSEQ_INVALID);
2024 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
2025 prof_dump(tsd, false, filename, opt_prof_leak);
2026}
2027
2028bool
2029prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) {
2030 cassert(config_prof);
2031
2032#ifndef JEMALLOC_ATOMIC_U64
2033 if (malloc_mutex_init(&prof_accum->mtx, "prof_accum",
2034 WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
2035 return true;
2036 }
2037 prof_accum->accumbytes = 0;
2038#else
2039 atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED);
2040#endif
2041 return false;
2042}
2043
2044void
2045prof_idump(tsdn_t *tsdn) {
2046 tsd_t *tsd;
2047 prof_tdata_t *tdata;
2048
2049 cassert(config_prof);
2050
2051 if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
2052 return;
2053 }
2054 tsd = tsdn_tsd(tsdn);
2055 if (tsd_reentrancy_level_get(tsd) > 0) {
2056 return;
2057 }
2058
2059 tdata = prof_tdata_get(tsd, false);
2060 if (tdata == NULL) {
2061 return;
2062 }
2063 if (tdata->enq) {
2064 tdata->enq_idump = true;
2065 return;
2066 }
2067
2068 if (opt_prof_prefix[0] != '\0') {
2069 char filename[PATH_MAX + 1];
2070 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
2071 prof_dump_filename(filename, 'i', prof_dump_iseq);
2072 prof_dump_iseq++;
2073 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
2074 prof_dump(tsd, false, filename, false);
2075 }
2076}
2077
2078bool
2079prof_mdump(tsd_t *tsd, const char *filename) {
2080 cassert(config_prof);
2081 assert(tsd_reentrancy_level_get(tsd) == 0);
2082
2083 if (!opt_prof || !prof_booted) {
2084 return true;
2085 }
2086 char filename_buf[DUMP_FILENAME_BUFSIZE];
2087 if (filename == NULL) {
2088 /* No filename specified, so automatically generate one. */
2089 if (opt_prof_prefix[0] == '\0') {
2090 return true;
2091 }
2092 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
2093 prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
2094 prof_dump_mseq++;
2095 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
2096 filename = filename_buf;
2097 }
2098 return prof_dump(tsd, true, filename, false);
2099}
2100
2101void
2102prof_gdump(tsdn_t *tsdn) {
2103 tsd_t *tsd;
2104 prof_tdata_t *tdata;
2105
2106 cassert(config_prof);
2107
2108 if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
2109 return;
2110 }
2111 tsd = tsdn_tsd(tsdn);
2112 if (tsd_reentrancy_level_get(tsd) > 0) {
2113 return;
2114 }
2115
2116 tdata = prof_tdata_get(tsd, false);
2117 if (tdata == NULL) {
2118 return;
2119 }
2120 if (tdata->enq) {
2121 tdata->enq_gdump = true;
2122 return;
2123 }
2124
2125 if (opt_prof_prefix[0] != '\0') {
2126 char filename[DUMP_FILENAME_BUFSIZE];
2127 malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
2128 prof_dump_filename(filename, 'u', prof_dump_useq);
2129 prof_dump_useq++;
2130 malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
2131 prof_dump(tsd, false, filename, false);
2132 }
2133}
2134
2135static void
2136prof_bt_hash(const void *key, size_t r_hash[2]) {
2137 prof_bt_t *bt = (prof_bt_t *)key;
2138
2139 cassert(config_prof);
2140
2141 hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
2142}
2143
2144static bool
2145prof_bt_keycomp(const void *k1, const void *k2) {
2146 const prof_bt_t *bt1 = (prof_bt_t *)k1;
2147 const prof_bt_t *bt2 = (prof_bt_t *)k2;
2148
2149 cassert(config_prof);
2150
2151 if (bt1->len != bt2->len) {
2152 return false;
2153 }
2154 return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
2155}
2156
2157static void
2158prof_bt_node_hash(const void *key, size_t r_hash[2]) {
2159 const prof_bt_node_t *bt_node = (prof_bt_node_t *)key;
2160 prof_bt_hash((void *)(&bt_node->bt), r_hash);
2161}
2162
2163static bool
2164prof_bt_node_keycomp(const void *k1, const void *k2) {
2165 const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1;
2166 const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2;
2167 return prof_bt_keycomp((void *)(&bt_node1->bt),
2168 (void *)(&bt_node2->bt));
2169}
2170
2171static void
2172prof_thr_node_hash(const void *key, size_t r_hash[2]) {
2173 const prof_thr_node_t *thr_node = (prof_thr_node_t *)key;
2174 hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash);
2175}
2176
2177static bool
2178prof_thr_node_keycomp(const void *k1, const void *k2) {
2179 const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1;
2180 const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2;
2181 return thr_node1->thr_uid == thr_node2->thr_uid;
2182}
2183
2184static uint64_t
2185prof_thr_uid_alloc(tsdn_t *tsdn) {
2186 uint64_t thr_uid;
2187
2188 malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
2189 thr_uid = next_thr_uid;
2190 next_thr_uid++;
2191 malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
2192
2193 return thr_uid;
2194}
2195
2196static prof_tdata_t *
2197prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
2198 char *thread_name, bool active) {
2199 prof_tdata_t *tdata;
2200
2201 cassert(config_prof);
2202
2203 /* Initialize an empty cache for this thread. */
2204 tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
2205 sz_size2index(sizeof(prof_tdata_t)), false, NULL, true,
2206 arena_get(TSDN_NULL, 0, true), true);
2207 if (tdata == NULL) {
2208 return NULL;
2209 }
2210
2211 tdata->lock = prof_tdata_mutex_choose(thr_uid);
2212 tdata->thr_uid = thr_uid;
2213 tdata->thr_discrim = thr_discrim;
2214 tdata->thread_name = thread_name;
2215 tdata->attached = true;
2216 tdata->expired = false;
2217 tdata->tctx_uid_next = 0;
2218
2219 if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
2220 prof_bt_keycomp)) {
2221 idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
2222 return NULL;
2223 }
2224
2225 tdata->prng_state = (uint64_t)(uintptr_t)tdata;
2226 prof_sample_threshold_update(tdata);
2227
2228 tdata->enq = false;
2229 tdata->enq_idump = false;
2230 tdata->enq_gdump = false;
2231
2232 tdata->dumping = false;
2233 tdata->active = active;
2234
2235 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
2236 tdata_tree_insert(&tdatas, tdata);
2237 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
2238
2239 return tdata;
2240}
2241
2242prof_tdata_t *
2243prof_tdata_init(tsd_t *tsd) {
2244 return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
2245 NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
2246}
2247
2248static bool
2249prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
2250 if (tdata->attached && !even_if_attached) {
2251 return false;
2252 }
2253 if (ckh_count(&tdata->bt2tctx) != 0) {
2254 return false;
2255 }
2256 return true;
2257}
2258
2259static bool
2260prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
2261 bool even_if_attached) {
2262 malloc_mutex_assert_owner(tsdn, tdata->lock);
2263
2264 return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
2265}
2266
2267static void
2268prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
2269 bool even_if_attached) {
2270 malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
2271
2272 tdata_tree_remove(&tdatas, tdata);
2273
2274 assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
2275
2276 if (tdata->thread_name != NULL) {
2277 idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
2278 true);
2279 }
2280 ckh_delete(tsd, &tdata->bt2tctx);
2281 idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
2282}
2283
2284static void
2285prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
2286 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
2287 prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
2288 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
2289}
2290
2291static void
2292prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
2293 bool destroy_tdata;
2294
2295 malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
2296 if (tdata->attached) {
2297 destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
2298 true);
2299 /*
2300 * Only detach if !destroy_tdata, because detaching would allow
2301 * another thread to win the race to destroy tdata.
2302 */
2303 if (!destroy_tdata) {
2304 tdata->attached = false;
2305 }
2306 tsd_prof_tdata_set(tsd, NULL);
2307 } else {
2308 destroy_tdata = false;
2309 }
2310 malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
2311 if (destroy_tdata) {
2312 prof_tdata_destroy(tsd, tdata, true);
2313 }
2314}
2315
2316prof_tdata_t *
2317prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
2318 uint64_t thr_uid = tdata->thr_uid;
2319 uint64_t thr_discrim = tdata->thr_discrim + 1;
2320 char *thread_name = (tdata->thread_name != NULL) ?
2321 prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
2322 bool active = tdata->active;
2323
2324 prof_tdata_detach(tsd, tdata);
2325 return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
2326 active);
2327}
2328
2329static bool
2330prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
2331 bool destroy_tdata;
2332
2333 malloc_mutex_lock(tsdn, tdata->lock);
2334 if (!tdata->expired) {
2335 tdata->expired = true;
2336 destroy_tdata = tdata->attached ? false :
2337 prof_tdata_should_destroy(tsdn, tdata, false);
2338 } else {
2339 destroy_tdata = false;
2340 }
2341 malloc_mutex_unlock(tsdn, tdata->lock);
2342
2343 return destroy_tdata;
2344}
2345
2346static prof_tdata_t *
2347prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
2348 void *arg) {
2349 tsdn_t *tsdn = (tsdn_t *)arg;
2350
2351 return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
2352}
2353
2354void
2355prof_reset(tsd_t *tsd, size_t lg_sample) {
2356 prof_tdata_t *next;
2357
2358 assert(lg_sample < (sizeof(uint64_t) << 3));
2359
2360 malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
2361 malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
2362
2363 lg_prof_sample = lg_sample;
2364
2365 next = NULL;
2366 do {
2367 prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
2368 prof_tdata_reset_iter, (void *)tsd);
2369 if (to_destroy != NULL) {
2370 next = tdata_tree_next(&tdatas, to_destroy);
2371 prof_tdata_destroy_locked(tsd, to_destroy, false);
2372 } else {
2373 next = NULL;
2374 }
2375 } while (next != NULL);
2376
2377 malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
2378 malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
2379}
2380
2381void
2382prof_tdata_cleanup(tsd_t *tsd) {
2383 prof_tdata_t *tdata;
2384
2385 if (!config_prof) {
2386 return;
2387 }
2388
2389 tdata = tsd_prof_tdata_get(tsd);
2390 if (tdata != NULL) {
2391 prof_tdata_detach(tsd, tdata);
2392 }
2393}
2394
2395bool
2396prof_active_get(tsdn_t *tsdn) {
2397 bool prof_active_current;
2398
2399 malloc_mutex_lock(tsdn, &prof_active_mtx);
2400 prof_active_current = prof_active;
2401 malloc_mutex_unlock(tsdn, &prof_active_mtx);
2402 return prof_active_current;
2403}
2404
2405bool
2406prof_active_set(tsdn_t *tsdn, bool active) {
2407 bool prof_active_old;
2408
2409 malloc_mutex_lock(tsdn, &prof_active_mtx);
2410 prof_active_old = prof_active;
2411 prof_active = active;
2412 malloc_mutex_unlock(tsdn, &prof_active_mtx);
2413 return prof_active_old;
2414}
2415
2416#ifdef JEMALLOC_JET
2417size_t
2418prof_log_bt_count(void) {
2419 size_t cnt = 0;
2420 prof_bt_node_t *node = log_bt_first;
2421 while (node != NULL) {
2422 cnt++;
2423 node = node->next;
2424 }
2425 return cnt;
2426}
2427
2428size_t
2429prof_log_alloc_count(void) {
2430 size_t cnt = 0;
2431 prof_alloc_node_t *node = log_alloc_first;
2432 while (node != NULL) {
2433 cnt++;
2434 node = node->next;
2435 }
2436 return cnt;
2437}
2438
2439size_t
2440prof_log_thr_count(void) {
2441 size_t cnt = 0;
2442 prof_thr_node_t *node = log_thr_first;
2443 while (node != NULL) {
2444 cnt++;
2445 node = node->next;
2446 }
2447 return cnt;
2448}
2449
2450bool
2451prof_log_is_logging(void) {
2452 return prof_logging_state == prof_logging_state_started;
2453}
2454
2455bool
2456prof_log_rep_check(void) {
2457 if (prof_logging_state == prof_logging_state_stopped
2458 && log_tables_initialized) {
2459 return true;
2460 }
2461
2462 if (log_bt_last != NULL && log_bt_last->next != NULL) {
2463 return true;
2464 }
2465 if (log_thr_last != NULL && log_thr_last->next != NULL) {
2466 return true;
2467 }
2468 if (log_alloc_last != NULL && log_alloc_last->next != NULL) {
2469 return true;
2470 }
2471
2472 size_t bt_count = prof_log_bt_count();
2473 size_t thr_count = prof_log_thr_count();
2474 size_t alloc_count = prof_log_alloc_count();
2475
2476
2477 if (prof_logging_state == prof_logging_state_stopped) {
2478 if (bt_count != 0 || thr_count != 0 || alloc_count || 0) {
2479 return true;
2480 }
2481 }
2482
2483 prof_alloc_node_t *node = log_alloc_first;
2484 while (node != NULL) {
2485 if (node->alloc_bt_ind >= bt_count) {
2486 return true;
2487 }
2488 if (node->free_bt_ind >= bt_count) {
2489 return true;
2490 }
2491 if (node->alloc_thr_ind >= thr_count) {
2492 return true;
2493 }
2494 if (node->free_thr_ind >= thr_count) {
2495 return true;
2496 }
2497 if (node->alloc_time_ns > node->free_time_ns) {
2498 return true;
2499 }
2500 node = node->next;
2501 }
2502
2503 return false;
2504}
2505
2506void
2507prof_log_dummy_set(bool new_value) {
2508 prof_log_dummy = new_value;
2509}
2510#endif
2511
2512bool
2513prof_log_start(tsdn_t *tsdn, const char *filename) {
2514 if (!opt_prof || !prof_booted) {
2515 return true;
2516 }
2517
2518 bool ret = false;
2519 size_t buf_size = PATH_MAX + 1;
2520
2521 malloc_mutex_lock(tsdn, &log_mtx);
2522
2523 if (prof_logging_state != prof_logging_state_stopped) {
2524 ret = true;
2525 } else if (filename == NULL) {
2526 /* Make default name. */
2527 malloc_snprintf(log_filename, buf_size, "%s.%d.%"FMTu64".json",
2528 opt_prof_prefix, prof_getpid(), log_seq);
2529 log_seq++;
2530 prof_logging_state = prof_logging_state_started;
2531 } else if (strlen(filename) >= buf_size) {
2532 ret = true;
2533 } else {
2534 strcpy(log_filename, filename);
2535 prof_logging_state = prof_logging_state_started;
2536 }
2537
2538 if (!ret) {
2539 nstime_update(&log_start_timestamp);
2540 }
2541
2542 malloc_mutex_unlock(tsdn, &log_mtx);
2543
2544 return ret;
2545}
2546
2547/* Used as an atexit function to stop logging on exit. */
2548static void
2549prof_log_stop_final(void) {
2550 tsd_t *tsd = tsd_fetch();
2551 prof_log_stop(tsd_tsdn(tsd));
2552}
2553
2554struct prof_emitter_cb_arg_s {
2555 int fd;
2556 ssize_t ret;
2557};
2558
2559static void
2560prof_emitter_write_cb(void *opaque, const char *to_write) {
2561 struct prof_emitter_cb_arg_s *arg =
2562 (struct prof_emitter_cb_arg_s *)opaque;
2563 size_t bytes = strlen(to_write);
2564#ifdef JEMALLOC_JET
2565 if (prof_log_dummy) {
2566 return;
2567 }
2568#endif
2569 arg->ret = write(arg->fd, (void *)to_write, bytes);
2570}
2571
2572/*
2573 * prof_log_emit_{...} goes through the appropriate linked list, emitting each
2574 * node to the json and deallocating it.
2575 */
2576static void
2577prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) {
2578 emitter_json_array_kv_begin(emitter, "threads");
2579 prof_thr_node_t *thr_node = log_thr_first;
2580 prof_thr_node_t *thr_old_node;
2581 while (thr_node != NULL) {
2582 emitter_json_object_begin(emitter);
2583
2584 emitter_json_kv(emitter, "thr_uid", emitter_type_uint64,
2585 &thr_node->thr_uid);
2586
2587 char *thr_name = thr_node->name;
2588
2589 emitter_json_kv(emitter, "thr_name", emitter_type_string,
2590 &thr_name);
2591
2592 emitter_json_object_end(emitter);
2593 thr_old_node = thr_node;
2594 thr_node = thr_node->next;
2595 idalloc(tsd, thr_old_node);
2596 }
2597 emitter_json_array_end(emitter);
2598}
2599
2600static void
2601prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) {
2602 emitter_json_array_kv_begin(emitter, "stack_traces");
2603 prof_bt_node_t *bt_node = log_bt_first;
2604 prof_bt_node_t *bt_old_node;
2605 /*
2606 * Calculate how many hex digits we need: twice number of bytes, two for
2607 * "0x", and then one more for terminating '\0'.
2608 */
2609 char buf[2 * sizeof(intptr_t) + 3];
2610 size_t buf_sz = sizeof(buf);
2611 while (bt_node != NULL) {
2612 emitter_json_array_begin(emitter);
2613 size_t i;
2614 for (i = 0; i < bt_node->bt.len; i++) {
2615 malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]);
2616 char *trace_str = buf;
2617 emitter_json_value(emitter, emitter_type_string,
2618 &trace_str);
2619 }
2620 emitter_json_array_end(emitter);
2621
2622 bt_old_node = bt_node;
2623 bt_node = bt_node->next;
2624 idalloc(tsd, bt_old_node);
2625 }
2626 emitter_json_array_end(emitter);
2627}
2628
2629static void
2630prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) {
2631 emitter_json_array_kv_begin(emitter, "allocations");
2632 prof_alloc_node_t *alloc_node = log_alloc_first;
2633 prof_alloc_node_t *alloc_old_node;
2634 while (alloc_node != NULL) {
2635 emitter_json_object_begin(emitter);
2636
2637 emitter_json_kv(emitter, "alloc_thread", emitter_type_size,
2638 &alloc_node->alloc_thr_ind);
2639
2640 emitter_json_kv(emitter, "free_thread", emitter_type_size,
2641 &alloc_node->free_thr_ind);
2642
2643 emitter_json_kv(emitter, "alloc_trace", emitter_type_size,
2644 &alloc_node->alloc_bt_ind);
2645
2646 emitter_json_kv(emitter, "free_trace", emitter_type_size,
2647 &alloc_node->free_bt_ind);
2648
2649 emitter_json_kv(emitter, "alloc_timestamp",
2650 emitter_type_uint64, &alloc_node->alloc_time_ns);
2651
2652 emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64,
2653 &alloc_node->free_time_ns);
2654
2655 emitter_json_kv(emitter, "usize", emitter_type_uint64,
2656 &alloc_node->usize);
2657
2658 emitter_json_object_end(emitter);
2659
2660 alloc_old_node = alloc_node;
2661 alloc_node = alloc_node->next;
2662 idalloc(tsd, alloc_old_node);
2663 }
2664 emitter_json_array_end(emitter);
2665}
2666
2667static void
2668prof_log_emit_metadata(emitter_t *emitter) {
2669 emitter_json_object_kv_begin(emitter, "info");
2670
2671 nstime_t now = NSTIME_ZERO_INITIALIZER;
2672
2673 nstime_update(&now);
2674 uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp);
2675 emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns);
2676
2677 char *vers = JEMALLOC_VERSION;
2678 emitter_json_kv(emitter, "version",
2679 emitter_type_string, &vers);
2680
2681 emitter_json_kv(emitter, "lg_sample_rate",
2682 emitter_type_int, &lg_prof_sample);
2683
2684 int pid = prof_getpid();
2685 emitter_json_kv(emitter, "pid", emitter_type_int, &pid);
2686
2687 emitter_json_object_end(emitter);
2688}
2689
2690
2691bool
2692prof_log_stop(tsdn_t *tsdn) {
2693 if (!opt_prof || !prof_booted) {
2694 return true;
2695 }
2696
2697 tsd_t *tsd = tsdn_tsd(tsdn);
2698 malloc_mutex_lock(tsdn, &log_mtx);
2699
2700 if (prof_logging_state != prof_logging_state_started) {
2701 malloc_mutex_unlock(tsdn, &log_mtx);
2702 return true;
2703 }
2704
2705 /*
2706 * Set the state to dumping. We'll set it to stopped when we're done.
2707 * Since other threads won't be able to start/stop/log when the state is
2708 * dumping, we don't have to hold the lock during the whole method.
2709 */
2710 prof_logging_state = prof_logging_state_dumping;
2711 malloc_mutex_unlock(tsdn, &log_mtx);
2712
2713
2714 emitter_t emitter;
2715
2716 /* Create a file. */
2717
2718 int fd;
2719#ifdef JEMALLOC_JET
2720 if (prof_log_dummy) {
2721 fd = 0;
2722 } else {
2723 fd = creat(log_filename, 0644);
2724 }
2725#else
2726 fd = creat(log_filename, 0644);
2727#endif
2728
2729 if (fd == -1) {
2730 malloc_printf("<jemalloc>: creat() for log file \"%s\" "
2731 " failed with %d\n", log_filename, errno);
2732 if (opt_abort) {
2733 abort();
2734 }
2735 return true;
2736 }
2737
2738 /* Emit to json. */
2739 struct prof_emitter_cb_arg_s arg;
2740 arg.fd = fd;
2741 emitter_init(&emitter, emitter_output_json, &prof_emitter_write_cb,
2742 (void *)(&arg));
2743
2744 emitter_json_object_begin(&emitter);
2745 prof_log_emit_metadata(&emitter);
2746 prof_log_emit_threads(tsd, &emitter);
2747 prof_log_emit_traces(tsd, &emitter);
2748 prof_log_emit_allocs(tsd, &emitter);
2749 emitter_json_object_end(&emitter);
2750
2751 /* Reset global state. */
2752 if (log_tables_initialized) {
2753 ckh_delete(tsd, &log_bt_node_set);
2754 ckh_delete(tsd, &log_thr_node_set);
2755 }
2756 log_tables_initialized = false;
2757 log_bt_index = 0;
2758 log_thr_index = 0;
2759 log_bt_first = NULL;
2760 log_bt_last = NULL;
2761 log_thr_first = NULL;
2762 log_thr_last = NULL;
2763 log_alloc_first = NULL;
2764 log_alloc_last = NULL;
2765
2766 malloc_mutex_lock(tsdn, &log_mtx);
2767 prof_logging_state = prof_logging_state_stopped;
2768 malloc_mutex_unlock(tsdn, &log_mtx);
2769
2770#ifdef JEMALLOC_JET
2771 if (prof_log_dummy) {
2772 return false;
2773 }
2774#endif
2775 return close(fd);
2776}
2777
2778const char *
2779prof_thread_name_get(tsd_t *tsd) {
2780 prof_tdata_t *tdata;
2781
2782 tdata = prof_tdata_get(tsd, true);
2783 if (tdata == NULL) {
2784 return "";
2785 }
2786 return (tdata->thread_name != NULL ? tdata->thread_name : "");
2787}
2788
2789static char *
2790prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
2791 char *ret;
2792 size_t size;
2793
2794 if (thread_name == NULL) {
2795 return NULL;
2796 }
2797
2798 size = strlen(thread_name) + 1;
2799 if (size == 1) {
2800 return "";
2801 }
2802
2803 ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true,
2804 arena_get(TSDN_NULL, 0, true), true);
2805 if (ret == NULL) {
2806 return NULL;
2807 }
2808 memcpy(ret, thread_name, size);
2809 return ret;
2810}
2811
2812int
2813prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
2814 prof_tdata_t *tdata;
2815 unsigned i;
2816 char *s;
2817
2818 tdata = prof_tdata_get(tsd, true);
2819 if (tdata == NULL) {
2820 return EAGAIN;
2821 }
2822
2823 /* Validate input. */
2824 if (thread_name == NULL) {
2825 return EFAULT;
2826 }
2827 for (i = 0; thread_name[i] != '\0'; i++) {
2828 char c = thread_name[i];
2829 if (!isgraph(c) && !isblank(c)) {
2830 return EFAULT;
2831 }
2832 }
2833
2834 s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
2835 if (s == NULL) {
2836 return EAGAIN;
2837 }
2838
2839 if (tdata->thread_name != NULL) {
2840 idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
2841 true);
2842 tdata->thread_name = NULL;
2843 }
2844 if (strlen(s) > 0) {
2845 tdata->thread_name = s;
2846 }
2847 return 0;
2848}
2849
2850bool
2851prof_thread_active_get(tsd_t *tsd) {
2852 prof_tdata_t *tdata;
2853
2854 tdata = prof_tdata_get(tsd, true);
2855 if (tdata == NULL) {
2856 return false;
2857 }
2858 return tdata->active;
2859}
2860
2861bool
2862prof_thread_active_set(tsd_t *tsd, bool active) {
2863 prof_tdata_t *tdata;
2864
2865 tdata = prof_tdata_get(tsd, true);
2866 if (tdata == NULL) {
2867 return true;
2868 }
2869 tdata->active = active;
2870 return false;
2871}
2872
2873bool
2874prof_thread_active_init_get(tsdn_t *tsdn) {
2875 bool active_init;
2876
2877 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
2878 active_init = prof_thread_active_init;
2879 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
2880 return active_init;
2881}
2882
2883bool
2884prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) {
2885 bool active_init_old;
2886
2887 malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
2888 active_init_old = prof_thread_active_init;
2889 prof_thread_active_init = active_init;
2890 malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
2891 return active_init_old;
2892}
2893
2894bool
2895prof_gdump_get(tsdn_t *tsdn) {
2896 bool prof_gdump_current;
2897
2898 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
2899 prof_gdump_current = prof_gdump_val;
2900 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
2901 return prof_gdump_current;
2902}
2903
2904bool
2905prof_gdump_set(tsdn_t *tsdn, bool gdump) {
2906 bool prof_gdump_old;
2907
2908 malloc_mutex_lock(tsdn, &prof_gdump_mtx);
2909 prof_gdump_old = prof_gdump_val;
2910 prof_gdump_val = gdump;
2911 malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
2912 return prof_gdump_old;
2913}
2914
2915void
2916prof_boot0(void) {
2917 cassert(config_prof);
2918
2919 memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
2920 sizeof(PROF_PREFIX_DEFAULT));
2921}
2922
2923void
2924prof_boot1(void) {
2925 cassert(config_prof);
2926
2927 /*
2928 * opt_prof must be in its final state before any arenas are
2929 * initialized, so this function must be executed early.
2930 */
2931
2932 if (opt_prof_leak && !opt_prof) {
2933 /*
2934 * Enable opt_prof, but in such a way that profiles are never
2935 * automatically dumped.
2936 */
2937 opt_prof = true;
2938 opt_prof_gdump = false;
2939 } else if (opt_prof) {
2940 if (opt_lg_prof_interval >= 0) {
2941 prof_interval = (((uint64_t)1U) <<
2942 opt_lg_prof_interval);
2943 }
2944 }
2945}
2946
2947bool
2948prof_boot2(tsd_t *tsd) {
2949 cassert(config_prof);
2950
2951 if (opt_prof) {
2952 unsigned i;
2953
2954 lg_prof_sample = opt_lg_prof_sample;
2955
2956 prof_active = opt_prof_active;
2957 if (malloc_mutex_init(&prof_active_mtx, "prof_active",
2958 WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
2959 return true;
2960 }
2961
2962 prof_gdump_val = opt_prof_gdump;
2963 if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
2964 WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
2965 return true;
2966 }
2967
2968 prof_thread_active_init = opt_prof_thread_active_init;
2969 if (malloc_mutex_init(&prof_thread_active_init_mtx,
2970 "prof_thread_active_init",
2971 WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
2972 malloc_mutex_rank_exclusive)) {
2973 return true;
2974 }
2975
2976 if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
2977 prof_bt_keycomp)) {
2978 return true;
2979 }
2980 if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
2981 WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
2982 return true;
2983 }
2984
2985 tdata_tree_new(&tdatas);
2986 if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
2987 WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
2988 return true;
2989 }
2990
2991 next_thr_uid = 0;
2992 if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
2993 WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
2994 return true;
2995 }
2996
2997 if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
2998 WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) {
2999 return true;
3000 }
3001 if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
3002 WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
3003 return true;
3004 }
3005
3006 if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
3007 atexit(prof_fdump) != 0) {
3008 malloc_write("<jemalloc>: Error in atexit()\n");
3009 if (opt_abort) {
3010 abort();
3011 }
3012 }
3013
3014 if (opt_prof_log) {
3015 prof_log_start(tsd_tsdn(tsd), NULL);
3016 }
3017
3018 if (atexit(prof_log_stop_final) != 0) {
3019 malloc_write("<jemalloc>: Error in atexit() "
3020 "for logging\n");
3021 if (opt_abort) {
3022 abort();
3023 }
3024 }
3025
3026 if (malloc_mutex_init(&log_mtx, "prof_log",
3027 WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) {
3028 return true;
3029 }
3030
3031 if (ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
3032 prof_bt_node_hash, prof_bt_node_keycomp)) {
3033 return true;
3034 }
3035
3036 if (ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
3037 prof_thr_node_hash, prof_thr_node_keycomp)) {
3038 return true;
3039 }
3040
3041 log_tables_initialized = true;
3042
3043 gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
3044 b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
3045 CACHELINE);
3046 if (gctx_locks == NULL) {
3047 return true;
3048 }
3049 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
3050 if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
3051 WITNESS_RANK_PROF_GCTX,
3052 malloc_mutex_rank_exclusive)) {
3053 return true;
3054 }
3055 }
3056
3057 tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
3058 b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
3059 CACHELINE);
3060 if (tdata_locks == NULL) {
3061 return true;
3062 }
3063 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
3064 if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
3065 WITNESS_RANK_PROF_TDATA,
3066 malloc_mutex_rank_exclusive)) {
3067 return true;
3068 }
3069 }
3070 }
3071
3072#ifdef JEMALLOC_PROF_LIBGCC
3073 /*
3074 * Cause the backtracing machinery to allocate its internal state
3075 * before enabling profiling.
3076 */
3077 _Unwind_Backtrace(prof_unwind_init_callback, NULL);
3078#endif
3079
3080 prof_booted = true;
3081
3082 return false;
3083}
3084
3085void
3086prof_prefork0(tsdn_t *tsdn) {
3087 if (config_prof && opt_prof) {
3088 unsigned i;
3089
3090 malloc_mutex_prefork(tsdn, &prof_dump_mtx);
3091 malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
3092 malloc_mutex_prefork(tsdn, &tdatas_mtx);
3093 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
3094 malloc_mutex_prefork(tsdn, &tdata_locks[i]);
3095 }
3096 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
3097 malloc_mutex_prefork(tsdn, &gctx_locks[i]);
3098 }
3099 }
3100}
3101
3102void
3103prof_prefork1(tsdn_t *tsdn) {
3104 if (config_prof && opt_prof) {
3105 malloc_mutex_prefork(tsdn, &prof_active_mtx);
3106 malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
3107 malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
3108 malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
3109 malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
3110 }
3111}
3112
3113void
3114prof_postfork_parent(tsdn_t *tsdn) {
3115 if (config_prof && opt_prof) {
3116 unsigned i;
3117
3118 malloc_mutex_postfork_parent(tsdn,
3119 &prof_thread_active_init_mtx);
3120 malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
3121 malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
3122 malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
3123 malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
3124 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
3125 malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
3126 }
3127 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
3128 malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
3129 }
3130 malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
3131 malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
3132 malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
3133 }
3134}
3135
3136void
3137prof_postfork_child(tsdn_t *tsdn) {
3138 if (config_prof && opt_prof) {
3139 unsigned i;
3140
3141 malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
3142 malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
3143 malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
3144 malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
3145 malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
3146 for (i = 0; i < PROF_NCTX_LOCKS; i++) {
3147 malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
3148 }
3149 for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
3150 malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
3151 }
3152 malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
3153 malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
3154 malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
3155 }
3156}
3157
3158/******************************************************************************/
3159