1#define JEMALLOC_CTL_C_
2#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
5#include "jemalloc/internal/assert.h"
6#include "jemalloc/internal/ctl.h"
7#include "jemalloc/internal/extent_dss.h"
8#include "jemalloc/internal/extent_mmap.h"
9#include "jemalloc/internal/mutex.h"
10#include "jemalloc/internal/nstime.h"
11#include "jemalloc/internal/sc.h"
12#include "jemalloc/internal/util.h"
13
14/******************************************************************************/
15/* Data. */
16
17/*
18 * ctl_mtx protects the following:
19 * - ctl_stats->*
20 */
21static malloc_mutex_t ctl_mtx;
22static bool ctl_initialized;
23static ctl_stats_t *ctl_stats;
24static ctl_arenas_t *ctl_arenas;
25
26/******************************************************************************/
27/* Helpers for named and indexed nodes. */
28
29static const ctl_named_node_t *
30ctl_named_node(const ctl_node_t *node) {
31 return ((node->named) ? (const ctl_named_node_t *)node : NULL);
32}
33
34static const ctl_named_node_t *
35ctl_named_children(const ctl_named_node_t *node, size_t index) {
36 const ctl_named_node_t *children = ctl_named_node(node->children);
37
38 return (children ? &children[index] : NULL);
39}
40
41static const ctl_indexed_node_t *
42ctl_indexed_node(const ctl_node_t *node) {
43 return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
44}
45
46/******************************************************************************/
47/* Function prototypes for non-inline static functions. */
48
49#define CTL_PROTO(n) \
50static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
51 void *oldp, size_t *oldlenp, void *newp, size_t newlen);
52
53#define INDEX_PROTO(n) \
54static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
55 const size_t *mib, size_t miblen, size_t i);
56
57CTL_PROTO(version)
58CTL_PROTO(epoch)
59CTL_PROTO(background_thread)
60CTL_PROTO(max_background_threads)
61CTL_PROTO(thread_tcache_enabled)
62CTL_PROTO(thread_tcache_flush)
63CTL_PROTO(thread_prof_name)
64CTL_PROTO(thread_prof_active)
65CTL_PROTO(thread_arena)
66CTL_PROTO(thread_allocated)
67CTL_PROTO(thread_allocatedp)
68CTL_PROTO(thread_deallocated)
69CTL_PROTO(thread_deallocatedp)
70CTL_PROTO(config_cache_oblivious)
71CTL_PROTO(config_debug)
72CTL_PROTO(config_fill)
73CTL_PROTO(config_lazy_lock)
74CTL_PROTO(config_malloc_conf)
75CTL_PROTO(config_prof)
76CTL_PROTO(config_prof_libgcc)
77CTL_PROTO(config_prof_libunwind)
78CTL_PROTO(config_stats)
79CTL_PROTO(config_utrace)
80CTL_PROTO(config_xmalloc)
81CTL_PROTO(opt_abort)
82CTL_PROTO(opt_abort_conf)
83CTL_PROTO(opt_metadata_thp)
84CTL_PROTO(opt_retain)
85CTL_PROTO(opt_dss)
86CTL_PROTO(opt_narenas)
87CTL_PROTO(opt_percpu_arena)
88CTL_PROTO(opt_huge_threshold)
89CTL_PROTO(opt_background_thread)
90CTL_PROTO(opt_max_background_threads)
91CTL_PROTO(opt_dirty_decay_ms)
92CTL_PROTO(opt_muzzy_decay_ms)
93CTL_PROTO(opt_stats_print)
94CTL_PROTO(opt_stats_print_opts)
95CTL_PROTO(opt_junk)
96CTL_PROTO(opt_zero)
97CTL_PROTO(opt_utrace)
98CTL_PROTO(opt_xmalloc)
99CTL_PROTO(opt_tcache)
100CTL_PROTO(opt_thp)
101CTL_PROTO(opt_lg_extent_max_active_fit)
102CTL_PROTO(opt_lg_tcache_max)
103CTL_PROTO(opt_prof)
104CTL_PROTO(opt_prof_prefix)
105CTL_PROTO(opt_prof_active)
106CTL_PROTO(opt_prof_thread_active_init)
107CTL_PROTO(opt_lg_prof_sample)
108CTL_PROTO(opt_lg_prof_interval)
109CTL_PROTO(opt_prof_gdump)
110CTL_PROTO(opt_prof_final)
111CTL_PROTO(opt_prof_leak)
112CTL_PROTO(opt_prof_accum)
113CTL_PROTO(tcache_create)
114CTL_PROTO(tcache_flush)
115CTL_PROTO(tcache_destroy)
116CTL_PROTO(arena_i_initialized)
117CTL_PROTO(arena_i_decay)
118CTL_PROTO(arena_i_purge)
119CTL_PROTO(arena_i_reset)
120CTL_PROTO(arena_i_destroy)
121CTL_PROTO(arena_i_dss)
122CTL_PROTO(arena_i_dirty_decay_ms)
123CTL_PROTO(arena_i_muzzy_decay_ms)
124CTL_PROTO(arena_i_extent_hooks)
125CTL_PROTO(arena_i_retain_grow_limit)
126INDEX_PROTO(arena_i)
127CTL_PROTO(arenas_bin_i_size)
128CTL_PROTO(arenas_bin_i_nregs)
129CTL_PROTO(arenas_bin_i_slab_size)
130INDEX_PROTO(arenas_bin_i)
131CTL_PROTO(arenas_lextent_i_size)
132INDEX_PROTO(arenas_lextent_i)
133CTL_PROTO(arenas_narenas)
134CTL_PROTO(arenas_dirty_decay_ms)
135CTL_PROTO(arenas_muzzy_decay_ms)
136CTL_PROTO(arenas_quantum)
137CTL_PROTO(arenas_page)
138CTL_PROTO(arenas_tcache_max)
139CTL_PROTO(arenas_nbins)
140CTL_PROTO(arenas_nhbins)
141CTL_PROTO(arenas_nlextents)
142CTL_PROTO(arenas_create)
143CTL_PROTO(arenas_lookup)
144CTL_PROTO(prof_thread_active_init)
145CTL_PROTO(prof_active)
146CTL_PROTO(prof_dump)
147CTL_PROTO(prof_gdump)
148CTL_PROTO(prof_reset)
149CTL_PROTO(prof_interval)
150CTL_PROTO(lg_prof_sample)
151CTL_PROTO(prof_log_start)
152CTL_PROTO(prof_log_stop)
153CTL_PROTO(stats_arenas_i_small_allocated)
154CTL_PROTO(stats_arenas_i_small_nmalloc)
155CTL_PROTO(stats_arenas_i_small_ndalloc)
156CTL_PROTO(stats_arenas_i_small_nrequests)
157CTL_PROTO(stats_arenas_i_large_allocated)
158CTL_PROTO(stats_arenas_i_large_nmalloc)
159CTL_PROTO(stats_arenas_i_large_ndalloc)
160CTL_PROTO(stats_arenas_i_large_nrequests)
161CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
162CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
163CTL_PROTO(stats_arenas_i_bins_j_nrequests)
164CTL_PROTO(stats_arenas_i_bins_j_curregs)
165CTL_PROTO(stats_arenas_i_bins_j_nfills)
166CTL_PROTO(stats_arenas_i_bins_j_nflushes)
167CTL_PROTO(stats_arenas_i_bins_j_nslabs)
168CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
169CTL_PROTO(stats_arenas_i_bins_j_curslabs)
170INDEX_PROTO(stats_arenas_i_bins_j)
171CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
172CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
173CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
174CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
175INDEX_PROTO(stats_arenas_i_lextents_j)
176CTL_PROTO(stats_arenas_i_extents_j_ndirty)
177CTL_PROTO(stats_arenas_i_extents_j_nmuzzy)
178CTL_PROTO(stats_arenas_i_extents_j_nretained)
179CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
180CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
181CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
182INDEX_PROTO(stats_arenas_i_extents_j)
183CTL_PROTO(stats_arenas_i_nthreads)
184CTL_PROTO(stats_arenas_i_uptime)
185CTL_PROTO(stats_arenas_i_dss)
186CTL_PROTO(stats_arenas_i_dirty_decay_ms)
187CTL_PROTO(stats_arenas_i_muzzy_decay_ms)
188CTL_PROTO(stats_arenas_i_pactive)
189CTL_PROTO(stats_arenas_i_pdirty)
190CTL_PROTO(stats_arenas_i_pmuzzy)
191CTL_PROTO(stats_arenas_i_mapped)
192CTL_PROTO(stats_arenas_i_retained)
193CTL_PROTO(stats_arenas_i_extent_avail)
194CTL_PROTO(stats_arenas_i_dirty_npurge)
195CTL_PROTO(stats_arenas_i_dirty_nmadvise)
196CTL_PROTO(stats_arenas_i_dirty_purged)
197CTL_PROTO(stats_arenas_i_muzzy_npurge)
198CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
199CTL_PROTO(stats_arenas_i_muzzy_purged)
200CTL_PROTO(stats_arenas_i_base)
201CTL_PROTO(stats_arenas_i_internal)
202CTL_PROTO(stats_arenas_i_metadata_thp)
203CTL_PROTO(stats_arenas_i_tcache_bytes)
204CTL_PROTO(stats_arenas_i_resident)
205INDEX_PROTO(stats_arenas_i)
206CTL_PROTO(stats_allocated)
207CTL_PROTO(stats_active)
208CTL_PROTO(stats_background_thread_num_threads)
209CTL_PROTO(stats_background_thread_num_runs)
210CTL_PROTO(stats_background_thread_run_interval)
211CTL_PROTO(stats_metadata)
212CTL_PROTO(stats_metadata_thp)
213CTL_PROTO(stats_resident)
214CTL_PROTO(stats_mapped)
215CTL_PROTO(stats_retained)
216CTL_PROTO(experimental_hooks_install)
217CTL_PROTO(experimental_hooks_remove)
218
219#define MUTEX_STATS_CTL_PROTO_GEN(n) \
220CTL_PROTO(stats_##n##_num_ops) \
221CTL_PROTO(stats_##n##_num_wait) \
222CTL_PROTO(stats_##n##_num_spin_acq) \
223CTL_PROTO(stats_##n##_num_owner_switch) \
224CTL_PROTO(stats_##n##_total_wait_time) \
225CTL_PROTO(stats_##n##_max_wait_time) \
226CTL_PROTO(stats_##n##_max_num_thds)
227
228/* Global mutexes. */
229#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
230MUTEX_PROF_GLOBAL_MUTEXES
231#undef OP
232
233/* Per arena mutexes. */
234#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
235MUTEX_PROF_ARENA_MUTEXES
236#undef OP
237
238/* Arena bin mutexes. */
239MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex)
240#undef MUTEX_STATS_CTL_PROTO_GEN
241
242CTL_PROTO(stats_mutexes_reset)
243
244/******************************************************************************/
245/* mallctl tree. */
246
247#define NAME(n) {true}, n
248#define CHILD(t, c) \
249 sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
250 (ctl_node_t *)c##_node, \
251 NULL
252#define CTL(c) 0, NULL, c##_ctl
253
254/*
255 * Only handles internal indexed nodes, since there are currently no external
256 * ones.
257 */
258#define INDEX(i) {false}, i##_index
259
260static const ctl_named_node_t thread_tcache_node[] = {
261 {NAME("enabled"), CTL(thread_tcache_enabled)},
262 {NAME("flush"), CTL(thread_tcache_flush)}
263};
264
265static const ctl_named_node_t thread_prof_node[] = {
266 {NAME("name"), CTL(thread_prof_name)},
267 {NAME("active"), CTL(thread_prof_active)}
268};
269
270static const ctl_named_node_t thread_node[] = {
271 {NAME("arena"), CTL(thread_arena)},
272 {NAME("allocated"), CTL(thread_allocated)},
273 {NAME("allocatedp"), CTL(thread_allocatedp)},
274 {NAME("deallocated"), CTL(thread_deallocated)},
275 {NAME("deallocatedp"), CTL(thread_deallocatedp)},
276 {NAME("tcache"), CHILD(named, thread_tcache)},
277 {NAME("prof"), CHILD(named, thread_prof)}
278};
279
280static const ctl_named_node_t config_node[] = {
281 {NAME("cache_oblivious"), CTL(config_cache_oblivious)},
282 {NAME("debug"), CTL(config_debug)},
283 {NAME("fill"), CTL(config_fill)},
284 {NAME("lazy_lock"), CTL(config_lazy_lock)},
285 {NAME("malloc_conf"), CTL(config_malloc_conf)},
286 {NAME("prof"), CTL(config_prof)},
287 {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
288 {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
289 {NAME("stats"), CTL(config_stats)},
290 {NAME("utrace"), CTL(config_utrace)},
291 {NAME("xmalloc"), CTL(config_xmalloc)}
292};
293
294static const ctl_named_node_t opt_node[] = {
295 {NAME("abort"), CTL(opt_abort)},
296 {NAME("abort_conf"), CTL(opt_abort_conf)},
297 {NAME("metadata_thp"), CTL(opt_metadata_thp)},
298 {NAME("retain"), CTL(opt_retain)},
299 {NAME("dss"), CTL(opt_dss)},
300 {NAME("narenas"), CTL(opt_narenas)},
301 {NAME("percpu_arena"), CTL(opt_percpu_arena)},
302 {NAME("experimental_huge_threshold"), CTL(opt_huge_threshold)},
303 {NAME("background_thread"), CTL(opt_background_thread)},
304 {NAME("max_background_threads"), CTL(opt_max_background_threads)},
305 {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
306 {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
307 {NAME("stats_print"), CTL(opt_stats_print)},
308 {NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
309 {NAME("junk"), CTL(opt_junk)},
310 {NAME("zero"), CTL(opt_zero)},
311 {NAME("utrace"), CTL(opt_utrace)},
312 {NAME("xmalloc"), CTL(opt_xmalloc)},
313 {NAME("tcache"), CTL(opt_tcache)},
314 {NAME("thp"), CTL(opt_thp)},
315 {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
316 {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
317 {NAME("prof"), CTL(opt_prof)},
318 {NAME("prof_prefix"), CTL(opt_prof_prefix)},
319 {NAME("prof_active"), CTL(opt_prof_active)},
320 {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
321 {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
322 {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
323 {NAME("prof_gdump"), CTL(opt_prof_gdump)},
324 {NAME("prof_final"), CTL(opt_prof_final)},
325 {NAME("prof_leak"), CTL(opt_prof_leak)},
326 {NAME("prof_accum"), CTL(opt_prof_accum)}
327};
328
329static const ctl_named_node_t tcache_node[] = {
330 {NAME("create"), CTL(tcache_create)},
331 {NAME("flush"), CTL(tcache_flush)},
332 {NAME("destroy"), CTL(tcache_destroy)}
333};
334
335static const ctl_named_node_t arena_i_node[] = {
336 {NAME("initialized"), CTL(arena_i_initialized)},
337 {NAME("decay"), CTL(arena_i_decay)},
338 {NAME("purge"), CTL(arena_i_purge)},
339 {NAME("reset"), CTL(arena_i_reset)},
340 {NAME("destroy"), CTL(arena_i_destroy)},
341 {NAME("dss"), CTL(arena_i_dss)},
342 {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
343 {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
344 {NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
345 {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)}
346};
347static const ctl_named_node_t super_arena_i_node[] = {
348 {NAME(""), CHILD(named, arena_i)}
349};
350
351static const ctl_indexed_node_t arena_node[] = {
352 {INDEX(arena_i)}
353};
354
355static const ctl_named_node_t arenas_bin_i_node[] = {
356 {NAME("size"), CTL(arenas_bin_i_size)},
357 {NAME("nregs"), CTL(arenas_bin_i_nregs)},
358 {NAME("slab_size"), CTL(arenas_bin_i_slab_size)}
359};
360static const ctl_named_node_t super_arenas_bin_i_node[] = {
361 {NAME(""), CHILD(named, arenas_bin_i)}
362};
363
364static const ctl_indexed_node_t arenas_bin_node[] = {
365 {INDEX(arenas_bin_i)}
366};
367
368static const ctl_named_node_t arenas_lextent_i_node[] = {
369 {NAME("size"), CTL(arenas_lextent_i_size)}
370};
371static const ctl_named_node_t super_arenas_lextent_i_node[] = {
372 {NAME(""), CHILD(named, arenas_lextent_i)}
373};
374
375static const ctl_indexed_node_t arenas_lextent_node[] = {
376 {INDEX(arenas_lextent_i)}
377};
378
379static const ctl_named_node_t arenas_node[] = {
380 {NAME("narenas"), CTL(arenas_narenas)},
381 {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)},
382 {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)},
383 {NAME("quantum"), CTL(arenas_quantum)},
384 {NAME("page"), CTL(arenas_page)},
385 {NAME("tcache_max"), CTL(arenas_tcache_max)},
386 {NAME("nbins"), CTL(arenas_nbins)},
387 {NAME("nhbins"), CTL(arenas_nhbins)},
388 {NAME("bin"), CHILD(indexed, arenas_bin)},
389 {NAME("nlextents"), CTL(arenas_nlextents)},
390 {NAME("lextent"), CHILD(indexed, arenas_lextent)},
391 {NAME("create"), CTL(arenas_create)},
392 {NAME("lookup"), CTL(arenas_lookup)}
393};
394
395static const ctl_named_node_t prof_node[] = {
396 {NAME("thread_active_init"), CTL(prof_thread_active_init)},
397 {NAME("active"), CTL(prof_active)},
398 {NAME("dump"), CTL(prof_dump)},
399 {NAME("gdump"), CTL(prof_gdump)},
400 {NAME("reset"), CTL(prof_reset)},
401 {NAME("interval"), CTL(prof_interval)},
402 {NAME("lg_sample"), CTL(lg_prof_sample)},
403 {NAME("log_start"), CTL(prof_log_start)},
404 {NAME("log_stop"), CTL(prof_log_stop)}
405};
406static const ctl_named_node_t stats_arenas_i_small_node[] = {
407 {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
408 {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
409 {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
410 {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
411};
412
413static const ctl_named_node_t stats_arenas_i_large_node[] = {
414 {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
415 {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
416 {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
417 {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
418};
419
420#define MUTEX_PROF_DATA_NODE(prefix) \
421static const ctl_named_node_t stats_##prefix##_node[] = { \
422 {NAME("num_ops"), \
423 CTL(stats_##prefix##_num_ops)}, \
424 {NAME("num_wait"), \
425 CTL(stats_##prefix##_num_wait)}, \
426 {NAME("num_spin_acq"), \
427 CTL(stats_##prefix##_num_spin_acq)}, \
428 {NAME("num_owner_switch"), \
429 CTL(stats_##prefix##_num_owner_switch)}, \
430 {NAME("total_wait_time"), \
431 CTL(stats_##prefix##_total_wait_time)}, \
432 {NAME("max_wait_time"), \
433 CTL(stats_##prefix##_max_wait_time)}, \
434 {NAME("max_num_thds"), \
435 CTL(stats_##prefix##_max_num_thds)} \
436 /* Note that # of current waiting thread not provided. */ \
437};
438
439MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex)
440
441static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
442 {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
443 {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
444 {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
445 {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
446 {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
447 {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
448 {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)},
449 {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)},
450 {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)},
451 {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)}
452};
453
454static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
455 {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
456};
457
458static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
459 {INDEX(stats_arenas_i_bins_j)}
460};
461
462static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
463 {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)},
464 {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)},
465 {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)},
466 {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)}
467};
468static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
469 {NAME(""), CHILD(named, stats_arenas_i_lextents_j)}
470};
471
472static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
473 {INDEX(stats_arenas_i_lextents_j)}
474};
475
476static const ctl_named_node_t stats_arenas_i_extents_j_node[] = {
477 {NAME("ndirty"), CTL(stats_arenas_i_extents_j_ndirty)},
478 {NAME("nmuzzy"), CTL(stats_arenas_i_extents_j_nmuzzy)},
479 {NAME("nretained"), CTL(stats_arenas_i_extents_j_nretained)},
480 {NAME("dirty_bytes"), CTL(stats_arenas_i_extents_j_dirty_bytes)},
481 {NAME("muzzy_bytes"), CTL(stats_arenas_i_extents_j_muzzy_bytes)},
482 {NAME("retained_bytes"), CTL(stats_arenas_i_extents_j_retained_bytes)}
483};
484
485static const ctl_named_node_t super_stats_arenas_i_extents_j_node[] = {
486 {NAME(""), CHILD(named, stats_arenas_i_extents_j)}
487};
488
489static const ctl_indexed_node_t stats_arenas_i_extents_node[] = {
490 {INDEX(stats_arenas_i_extents_j)}
491};
492
493#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
494MUTEX_PROF_ARENA_MUTEXES
495#undef OP
496
497static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
498#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
499MUTEX_PROF_ARENA_MUTEXES
500#undef OP
501};
502
503static const ctl_named_node_t stats_arenas_i_node[] = {
504 {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
505 {NAME("uptime"), CTL(stats_arenas_i_uptime)},
506 {NAME("dss"), CTL(stats_arenas_i_dss)},
507 {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)},
508 {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)},
509 {NAME("pactive"), CTL(stats_arenas_i_pactive)},
510 {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
511 {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)},
512 {NAME("mapped"), CTL(stats_arenas_i_mapped)},
513 {NAME("retained"), CTL(stats_arenas_i_retained)},
514 {NAME("extent_avail"), CTL(stats_arenas_i_extent_avail)},
515 {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)},
516 {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
517 {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)},
518 {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)},
519 {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
520 {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)},
521 {NAME("base"), CTL(stats_arenas_i_base)},
522 {NAME("internal"), CTL(stats_arenas_i_internal)},
523 {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
524 {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
525 {NAME("resident"), CTL(stats_arenas_i_resident)},
526 {NAME("small"), CHILD(named, stats_arenas_i_small)},
527 {NAME("large"), CHILD(named, stats_arenas_i_large)},
528 {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
529 {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
530 {NAME("extents"), CHILD(indexed, stats_arenas_i_extents)},
531 {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}
532};
533static const ctl_named_node_t super_stats_arenas_i_node[] = {
534 {NAME(""), CHILD(named, stats_arenas_i)}
535};
536
537static const ctl_indexed_node_t stats_arenas_node[] = {
538 {INDEX(stats_arenas_i)}
539};
540
541static const ctl_named_node_t stats_background_thread_node[] = {
542 {NAME("num_threads"), CTL(stats_background_thread_num_threads)},
543 {NAME("num_runs"), CTL(stats_background_thread_num_runs)},
544 {NAME("run_interval"), CTL(stats_background_thread_run_interval)}
545};
546
547#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
548MUTEX_PROF_GLOBAL_MUTEXES
549#undef OP
550
551static const ctl_named_node_t stats_mutexes_node[] = {
552#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
553MUTEX_PROF_GLOBAL_MUTEXES
554#undef OP
555 {NAME("reset"), CTL(stats_mutexes_reset)}
556};
557#undef MUTEX_PROF_DATA_NODE
558
559static const ctl_named_node_t stats_node[] = {
560 {NAME("allocated"), CTL(stats_allocated)},
561 {NAME("active"), CTL(stats_active)},
562 {NAME("metadata"), CTL(stats_metadata)},
563 {NAME("metadata_thp"), CTL(stats_metadata_thp)},
564 {NAME("resident"), CTL(stats_resident)},
565 {NAME("mapped"), CTL(stats_mapped)},
566 {NAME("retained"), CTL(stats_retained)},
567 {NAME("background_thread"),
568 CHILD(named, stats_background_thread)},
569 {NAME("mutexes"), CHILD(named, stats_mutexes)},
570 {NAME("arenas"), CHILD(indexed, stats_arenas)}
571};
572
573static const ctl_named_node_t hooks_node[] = {
574 {NAME("install"), CTL(experimental_hooks_install)},
575 {NAME("remove"), CTL(experimental_hooks_remove)},
576};
577
578static const ctl_named_node_t experimental_node[] = {
579 {NAME("hooks"), CHILD(named, hooks)}
580};
581
582static const ctl_named_node_t root_node[] = {
583 {NAME("version"), CTL(version)},
584 {NAME("epoch"), CTL(epoch)},
585 {NAME("background_thread"), CTL(background_thread)},
586 {NAME("max_background_threads"), CTL(max_background_threads)},
587 {NAME("thread"), CHILD(named, thread)},
588 {NAME("config"), CHILD(named, config)},
589 {NAME("opt"), CHILD(named, opt)},
590 {NAME("tcache"), CHILD(named, tcache)},
591 {NAME("arena"), CHILD(indexed, arena)},
592 {NAME("arenas"), CHILD(named, arenas)},
593 {NAME("prof"), CHILD(named, prof)},
594 {NAME("stats"), CHILD(named, stats)},
595 {NAME("experimental"), CHILD(named, experimental)}
596};
597static const ctl_named_node_t super_root_node[] = {
598 {NAME(""), CHILD(named, root)}
599};
600
601#undef NAME
602#undef CHILD
603#undef CTL
604#undef INDEX
605
606/******************************************************************************/
607
608/*
609 * Sets *dst + *src non-atomically. This is safe, since everything is
610 * synchronized by the ctl mutex.
611 */
612static void
613ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
614#ifdef JEMALLOC_ATOMIC_U64
615 uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
616 uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
617 atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
618#else
619 *dst += *src;
620#endif
621}
622
623/* Likewise: with ctl mutex synchronization, reading is simple. */
624static uint64_t
625ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
626#ifdef JEMALLOC_ATOMIC_U64
627 return atomic_load_u64(p, ATOMIC_RELAXED);
628#else
629 return *p;
630#endif
631}
632
633static void
634accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
635 size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
636 size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
637 atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
638}
639
640/******************************************************************************/
641
642static unsigned
643arenas_i2a_impl(size_t i, bool compat, bool validate) {
644 unsigned a;
645
646 switch (i) {
647 case MALLCTL_ARENAS_ALL:
648 a = 0;
649 break;
650 case MALLCTL_ARENAS_DESTROYED:
651 a = 1;
652 break;
653 default:
654 if (compat && i == ctl_arenas->narenas) {
655 /*
656 * Provide deprecated backward compatibility for
657 * accessing the merged stats at index narenas rather
658 * than via MALLCTL_ARENAS_ALL. This is scheduled for
659 * removal in 6.0.0.
660 */
661 a = 0;
662 } else if (validate && i >= ctl_arenas->narenas) {
663 a = UINT_MAX;
664 } else {
665 /*
666 * This function should never be called for an index
667 * more than one past the range of indices that have
668 * initialized ctl data.
669 */
670 assert(i < ctl_arenas->narenas || (!validate && i ==
671 ctl_arenas->narenas));
672 a = (unsigned)i + 2;
673 }
674 break;
675 }
676
677 return a;
678}
679
680static unsigned
681arenas_i2a(size_t i) {
682 return arenas_i2a_impl(i, true, false);
683}
684
685static ctl_arena_t *
686arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) {
687 ctl_arena_t *ret;
688
689 assert(!compat || !init);
690
691 ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
692 if (init && ret == NULL) {
693 if (config_stats) {
694 struct container_s {
695 ctl_arena_t ctl_arena;
696 ctl_arena_stats_t astats;
697 };
698 struct container_s *cont =
699 (struct container_s *)base_alloc(tsd_tsdn(tsd),
700 b0get(), sizeof(struct container_s), QUANTUM);
701 if (cont == NULL) {
702 return NULL;
703 }
704 ret = &cont->ctl_arena;
705 ret->astats = &cont->astats;
706 } else {
707 ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(),
708 sizeof(ctl_arena_t), QUANTUM);
709 if (ret == NULL) {
710 return NULL;
711 }
712 }
713 ret->arena_ind = (unsigned)i;
714 ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
715 }
716
717 assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
718 return ret;
719}
720
721static ctl_arena_t *
722arenas_i(size_t i) {
723 ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false);
724 assert(ret != NULL);
725 return ret;
726}
727
728static void
729ctl_arena_clear(ctl_arena_t *ctl_arena) {
730 ctl_arena->nthreads = 0;
731 ctl_arena->dss = dss_prec_names[dss_prec_limit];
732 ctl_arena->dirty_decay_ms = -1;
733 ctl_arena->muzzy_decay_ms = -1;
734 ctl_arena->pactive = 0;
735 ctl_arena->pdirty = 0;
736 ctl_arena->pmuzzy = 0;
737 if (config_stats) {
738 memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
739 ctl_arena->astats->allocated_small = 0;
740 ctl_arena->astats->nmalloc_small = 0;
741 ctl_arena->astats->ndalloc_small = 0;
742 ctl_arena->astats->nrequests_small = 0;
743 memset(ctl_arena->astats->bstats, 0, SC_NBINS *
744 sizeof(bin_stats_t));
745 memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
746 sizeof(arena_stats_large_t));
747 memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
748 sizeof(arena_stats_extents_t));
749 }
750}
751
752static void
753ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
754 unsigned i;
755
756 if (config_stats) {
757 arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
758 &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
759 &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
760 &ctl_arena->pdirty, &ctl_arena->pmuzzy,
761 &ctl_arena->astats->astats, ctl_arena->astats->bstats,
762 ctl_arena->astats->lstats, ctl_arena->astats->estats);
763
764 for (i = 0; i < SC_NBINS; i++) {
765 ctl_arena->astats->allocated_small +=
766 ctl_arena->astats->bstats[i].curregs *
767 sz_index2size(i);
768 ctl_arena->astats->nmalloc_small +=
769 ctl_arena->astats->bstats[i].nmalloc;
770 ctl_arena->astats->ndalloc_small +=
771 ctl_arena->astats->bstats[i].ndalloc;
772 ctl_arena->astats->nrequests_small +=
773 ctl_arena->astats->bstats[i].nrequests;
774 }
775 } else {
776 arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
777 &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
778 &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
779 &ctl_arena->pdirty, &ctl_arena->pmuzzy);
780 }
781}
782
783static void
784ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
785 bool destroyed) {
786 unsigned i;
787
788 if (!destroyed) {
789 ctl_sdarena->nthreads += ctl_arena->nthreads;
790 ctl_sdarena->pactive += ctl_arena->pactive;
791 ctl_sdarena->pdirty += ctl_arena->pdirty;
792 ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
793 } else {
794 assert(ctl_arena->nthreads == 0);
795 assert(ctl_arena->pactive == 0);
796 assert(ctl_arena->pdirty == 0);
797 assert(ctl_arena->pmuzzy == 0);
798 }
799
800 if (config_stats) {
801 ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
802 ctl_arena_stats_t *astats = ctl_arena->astats;
803
804 if (!destroyed) {
805 accum_atomic_zu(&sdstats->astats.mapped,
806 &astats->astats.mapped);
807 accum_atomic_zu(&sdstats->astats.retained,
808 &astats->astats.retained);
809 accum_atomic_zu(&sdstats->astats.extent_avail,
810 &astats->astats.extent_avail);
811 }
812
813 ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
814 &astats->astats.decay_dirty.npurge);
815 ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
816 &astats->astats.decay_dirty.nmadvise);
817 ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
818 &astats->astats.decay_dirty.purged);
819
820 ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
821 &astats->astats.decay_muzzy.npurge);
822 ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
823 &astats->astats.decay_muzzy.nmadvise);
824 ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
825 &astats->astats.decay_muzzy.purged);
826
827#define OP(mtx) malloc_mutex_prof_merge( \
828 &(sdstats->astats.mutex_prof_data[ \
829 arena_prof_mutex_##mtx]), \
830 &(astats->astats.mutex_prof_data[ \
831 arena_prof_mutex_##mtx]));
832MUTEX_PROF_ARENA_MUTEXES
833#undef OP
834 if (!destroyed) {
835 accum_atomic_zu(&sdstats->astats.base,
836 &astats->astats.base);
837 accum_atomic_zu(&sdstats->astats.internal,
838 &astats->astats.internal);
839 accum_atomic_zu(&sdstats->astats.resident,
840 &astats->astats.resident);
841 accum_atomic_zu(&sdstats->astats.metadata_thp,
842 &astats->astats.metadata_thp);
843 } else {
844 assert(atomic_load_zu(
845 &astats->astats.internal, ATOMIC_RELAXED) == 0);
846 }
847
848 if (!destroyed) {
849 sdstats->allocated_small += astats->allocated_small;
850 } else {
851 assert(astats->allocated_small == 0);
852 }
853 sdstats->nmalloc_small += astats->nmalloc_small;
854 sdstats->ndalloc_small += astats->ndalloc_small;
855 sdstats->nrequests_small += astats->nrequests_small;
856
857 if (!destroyed) {
858 accum_atomic_zu(&sdstats->astats.allocated_large,
859 &astats->astats.allocated_large);
860 } else {
861 assert(atomic_load_zu(&astats->astats.allocated_large,
862 ATOMIC_RELAXED) == 0);
863 }
864 ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
865 &astats->astats.nmalloc_large);
866 ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
867 &astats->astats.ndalloc_large);
868 ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
869 &astats->astats.nrequests_large);
870
871 accum_atomic_zu(&sdstats->astats.tcache_bytes,
872 &astats->astats.tcache_bytes);
873
874 if (ctl_arena->arena_ind == 0) {
875 sdstats->astats.uptime = astats->astats.uptime;
876 }
877
878 /* Merge bin stats. */
879 for (i = 0; i < SC_NBINS; i++) {
880 sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
881 sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
882 sdstats->bstats[i].nrequests +=
883 astats->bstats[i].nrequests;
884 if (!destroyed) {
885 sdstats->bstats[i].curregs +=
886 astats->bstats[i].curregs;
887 } else {
888 assert(astats->bstats[i].curregs == 0);
889 }
890 sdstats->bstats[i].nfills += astats->bstats[i].nfills;
891 sdstats->bstats[i].nflushes +=
892 astats->bstats[i].nflushes;
893 sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
894 sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
895 if (!destroyed) {
896 sdstats->bstats[i].curslabs +=
897 astats->bstats[i].curslabs;
898 } else {
899 assert(astats->bstats[i].curslabs == 0);
900 }
901 malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
902 &astats->bstats[i].mutex_data);
903 }
904
905 /* Merge stats for large allocations. */
906 for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
907 ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
908 &astats->lstats[i].nmalloc);
909 ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
910 &astats->lstats[i].ndalloc);
911 ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
912 &astats->lstats[i].nrequests);
913 if (!destroyed) {
914 sdstats->lstats[i].curlextents +=
915 astats->lstats[i].curlextents;
916 } else {
917 assert(astats->lstats[i].curlextents == 0);
918 }
919 }
920
921 /* Merge extents stats. */
922 for (i = 0; i < SC_NPSIZES; i++) {
923 accum_atomic_zu(&sdstats->estats[i].ndirty,
924 &astats->estats[i].ndirty);
925 accum_atomic_zu(&sdstats->estats[i].nmuzzy,
926 &astats->estats[i].nmuzzy);
927 accum_atomic_zu(&sdstats->estats[i].nretained,
928 &astats->estats[i].nretained);
929 accum_atomic_zu(&sdstats->estats[i].dirty_bytes,
930 &astats->estats[i].dirty_bytes);
931 accum_atomic_zu(&sdstats->estats[i].muzzy_bytes,
932 &astats->estats[i].muzzy_bytes);
933 accum_atomic_zu(&sdstats->estats[i].retained_bytes,
934 &astats->estats[i].retained_bytes);
935 }
936 }
937}
938
939static void
940ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
941 unsigned i, bool destroyed) {
942 ctl_arena_t *ctl_arena = arenas_i(i);
943
944 ctl_arena_clear(ctl_arena);
945 ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
946 /* Merge into sum stats as well. */
947 ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
948}
949
950static unsigned
951ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
952 unsigned arena_ind;
953 ctl_arena_t *ctl_arena;
954
955 if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
956 NULL) {
957 ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
958 arena_ind = ctl_arena->arena_ind;
959 } else {
960 arena_ind = ctl_arenas->narenas;
961 }
962
963 /* Trigger stats allocation. */
964 if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) {
965 return UINT_MAX;
966 }
967
968 /* Initialize new arena. */
969 if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
970 return UINT_MAX;
971 }
972
973 if (arena_ind == ctl_arenas->narenas) {
974 ctl_arenas->narenas++;
975 }
976
977 return arena_ind;
978}
979
980static void
981ctl_background_thread_stats_read(tsdn_t *tsdn) {
982 background_thread_stats_t *stats = &ctl_stats->background_thread;
983 if (!have_background_thread ||
984 background_thread_stats_read(tsdn, stats)) {
985 memset(stats, 0, sizeof(background_thread_stats_t));
986 nstime_init(&stats->run_interval, 0);
987 }
988}
989
990static void
991ctl_refresh(tsdn_t *tsdn) {
992 unsigned i;
993 ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
994 VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
995
996 /*
997 * Clear sum stats, since they will be merged into by
998 * ctl_arena_refresh().
999 */
1000 ctl_arena_clear(ctl_sarena);
1001
1002 for (i = 0; i < ctl_arenas->narenas; i++) {
1003 tarenas[i] = arena_get(tsdn, i, false);
1004 }
1005
1006 for (i = 0; i < ctl_arenas->narenas; i++) {
1007 ctl_arena_t *ctl_arena = arenas_i(i);
1008 bool initialized = (tarenas[i] != NULL);
1009
1010 ctl_arena->initialized = initialized;
1011 if (initialized) {
1012 ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
1013 false);
1014 }
1015 }
1016
1017 if (config_stats) {
1018 ctl_stats->allocated = ctl_sarena->astats->allocated_small +
1019 atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
1020 ATOMIC_RELAXED);
1021 ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
1022 ctl_stats->metadata = atomic_load_zu(
1023 &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
1024 atomic_load_zu(&ctl_sarena->astats->astats.internal,
1025 ATOMIC_RELAXED);
1026 ctl_stats->metadata_thp = atomic_load_zu(
1027 &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
1028 ctl_stats->resident = atomic_load_zu(
1029 &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
1030 ctl_stats->mapped = atomic_load_zu(
1031 &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
1032 ctl_stats->retained = atomic_load_zu(
1033 &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
1034
1035 ctl_background_thread_stats_read(tsdn);
1036
1037#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \
1038 malloc_mutex_lock(tsdn, &mtx); \
1039 malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \
1040 malloc_mutex_unlock(tsdn, &mtx);
1041
1042 if (config_prof && opt_prof) {
1043 READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
1044 bt2gctx_mtx);
1045 }
1046 if (have_background_thread) {
1047 READ_GLOBAL_MUTEX_PROF_DATA(
1048 global_prof_mutex_background_thread,
1049 background_thread_lock);
1050 } else {
1051 memset(&ctl_stats->mutex_prof_data[
1052 global_prof_mutex_background_thread], 0,
1053 sizeof(mutex_prof_data_t));
1054 }
1055 /* We own ctl mutex already. */
1056 malloc_mutex_prof_read(tsdn,
1057 &ctl_stats->mutex_prof_data[global_prof_mutex_ctl],
1058 &ctl_mtx);
1059#undef READ_GLOBAL_MUTEX_PROF_DATA
1060 }
1061 ctl_arenas->epoch++;
1062}
1063
1064static bool
1065ctl_init(tsd_t *tsd) {
1066 bool ret;
1067 tsdn_t *tsdn = tsd_tsdn(tsd);
1068
1069 malloc_mutex_lock(tsdn, &ctl_mtx);
1070 if (!ctl_initialized) {
1071 ctl_arena_t *ctl_sarena, *ctl_darena;
1072 unsigned i;
1073
1074 /*
1075 * Allocate demand-zeroed space for pointers to the full
1076 * range of supported arena indices.
1077 */
1078 if (ctl_arenas == NULL) {
1079 ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
1080 b0get(), sizeof(ctl_arenas_t), QUANTUM);
1081 if (ctl_arenas == NULL) {
1082 ret = true;
1083 goto label_return;
1084 }
1085 }
1086
1087 if (config_stats && ctl_stats == NULL) {
1088 ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
1089 sizeof(ctl_stats_t), QUANTUM);
1090 if (ctl_stats == NULL) {
1091 ret = true;
1092 goto label_return;
1093 }
1094 }
1095
1096 /*
1097 * Allocate space for the current full range of arenas
1098 * here rather than doing it lazily elsewhere, in order
1099 * to limit when OOM-caused errors can occur.
1100 */
1101 if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false,
1102 true)) == NULL) {
1103 ret = true;
1104 goto label_return;
1105 }
1106 ctl_sarena->initialized = true;
1107
1108 if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED,
1109 false, true)) == NULL) {
1110 ret = true;
1111 goto label_return;
1112 }
1113 ctl_arena_clear(ctl_darena);
1114 /*
1115 * Don't toggle ctl_darena to initialized until an arena is
1116 * actually destroyed, so that arena.<i>.initialized can be used
1117 * to query whether the stats are relevant.
1118 */
1119
1120 ctl_arenas->narenas = narenas_total_get();
1121 for (i = 0; i < ctl_arenas->narenas; i++) {
1122 if (arenas_i_impl(tsd, i, false, true) == NULL) {
1123 ret = true;
1124 goto label_return;
1125 }
1126 }
1127
1128 ql_new(&ctl_arenas->destroyed);
1129 ctl_refresh(tsdn);
1130
1131 ctl_initialized = true;
1132 }
1133
1134 ret = false;
1135label_return:
1136 malloc_mutex_unlock(tsdn, &ctl_mtx);
1137 return ret;
1138}
1139
1140static int
1141ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
1142 size_t *mibp, size_t *depthp) {
1143 int ret;
1144 const char *elm, *tdot, *dot;
1145 size_t elen, i, j;
1146 const ctl_named_node_t *node;
1147
1148 elm = name;
1149 /* Equivalent to strchrnul(). */
1150 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
1151 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1152 if (elen == 0) {
1153 ret = ENOENT;
1154 goto label_return;
1155 }
1156 node = super_root_node;
1157 for (i = 0; i < *depthp; i++) {
1158 assert(node);
1159 assert(node->nchildren > 0);
1160 if (ctl_named_node(node->children) != NULL) {
1161 const ctl_named_node_t *pnode = node;
1162
1163 /* Children are named. */
1164 for (j = 0; j < node->nchildren; j++) {
1165 const ctl_named_node_t *child =
1166 ctl_named_children(node, j);
1167 if (strlen(child->name) == elen &&
1168 strncmp(elm, child->name, elen) == 0) {
1169 node = child;
1170 if (nodesp != NULL) {
1171 nodesp[i] =
1172 (const ctl_node_t *)node;
1173 }
1174 mibp[i] = j;
1175 break;
1176 }
1177 }
1178 if (node == pnode) {
1179 ret = ENOENT;
1180 goto label_return;
1181 }
1182 } else {
1183 uintmax_t index;
1184 const ctl_indexed_node_t *inode;
1185
1186 /* Children are indexed. */
1187 index = malloc_strtoumax(elm, NULL, 10);
1188 if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
1189 ret = ENOENT;
1190 goto label_return;
1191 }
1192
1193 inode = ctl_indexed_node(node->children);
1194 node = inode->index(tsdn, mibp, *depthp, (size_t)index);
1195 if (node == NULL) {
1196 ret = ENOENT;
1197 goto label_return;
1198 }
1199
1200 if (nodesp != NULL) {
1201 nodesp[i] = (const ctl_node_t *)node;
1202 }
1203 mibp[i] = (size_t)index;
1204 }
1205
1206 if (node->ctl != NULL) {
1207 /* Terminal node. */
1208 if (*dot != '\0') {
1209 /*
1210 * The name contains more elements than are
1211 * in this path through the tree.
1212 */
1213 ret = ENOENT;
1214 goto label_return;
1215 }
1216 /* Complete lookup successful. */
1217 *depthp = i + 1;
1218 break;
1219 }
1220
1221 /* Update elm. */
1222 if (*dot == '\0') {
1223 /* No more elements. */
1224 ret = ENOENT;
1225 goto label_return;
1226 }
1227 elm = &dot[1];
1228 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
1229 strchr(elm, '\0');
1230 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1231 }
1232
1233 ret = 0;
1234label_return:
1235 return ret;
1236}
1237
1238int
1239ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
1240 void *newp, size_t newlen) {
1241 int ret;
1242 size_t depth;
1243 ctl_node_t const *nodes[CTL_MAX_DEPTH];
1244 size_t mib[CTL_MAX_DEPTH];
1245 const ctl_named_node_t *node;
1246
1247 if (!ctl_initialized && ctl_init(tsd)) {
1248 ret = EAGAIN;
1249 goto label_return;
1250 }
1251
1252 depth = CTL_MAX_DEPTH;
1253 ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
1254 if (ret != 0) {
1255 goto label_return;
1256 }
1257
1258 node = ctl_named_node(nodes[depth-1]);
1259 if (node != NULL && node->ctl) {
1260 ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
1261 } else {
1262 /* The name refers to a partial path through the ctl tree. */
1263 ret = ENOENT;
1264 }
1265
1266label_return:
1267 return(ret);
1268}
1269
1270int
1271ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
1272 int ret;
1273
1274 if (!ctl_initialized && ctl_init(tsd)) {
1275 ret = EAGAIN;
1276 goto label_return;
1277 }
1278
1279 ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
1280label_return:
1281 return(ret);
1282}
1283
1284int
1285ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1286 size_t *oldlenp, void *newp, size_t newlen) {
1287 int ret;
1288 const ctl_named_node_t *node;
1289 size_t i;
1290
1291 if (!ctl_initialized && ctl_init(tsd)) {
1292 ret = EAGAIN;
1293 goto label_return;
1294 }
1295
1296 /* Iterate down the tree. */
1297 node = super_root_node;
1298 for (i = 0; i < miblen; i++) {
1299 assert(node);
1300 assert(node->nchildren > 0);
1301 if (ctl_named_node(node->children) != NULL) {
1302 /* Children are named. */
1303 if (node->nchildren <= mib[i]) {
1304 ret = ENOENT;
1305 goto label_return;
1306 }
1307 node = ctl_named_children(node, mib[i]);
1308 } else {
1309 const ctl_indexed_node_t *inode;
1310
1311 /* Indexed element. */
1312 inode = ctl_indexed_node(node->children);
1313 node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
1314 if (node == NULL) {
1315 ret = ENOENT;
1316 goto label_return;
1317 }
1318 }
1319 }
1320
1321 /* Call the ctl function. */
1322 if (node && node->ctl) {
1323 ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
1324 } else {
1325 /* Partial MIB. */
1326 ret = ENOENT;
1327 }
1328
1329label_return:
1330 return(ret);
1331}
1332
1333bool
1334ctl_boot(void) {
1335 if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
1336 malloc_mutex_rank_exclusive)) {
1337 return true;
1338 }
1339
1340 ctl_initialized = false;
1341
1342 return false;
1343}
1344
1345void
1346ctl_prefork(tsdn_t *tsdn) {
1347 malloc_mutex_prefork(tsdn, &ctl_mtx);
1348}
1349
1350void
1351ctl_postfork_parent(tsdn_t *tsdn) {
1352 malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
1353}
1354
1355void
1356ctl_postfork_child(tsdn_t *tsdn) {
1357 malloc_mutex_postfork_child(tsdn, &ctl_mtx);
1358}
1359
1360/******************************************************************************/
1361/* *_ctl() functions. */
1362
1363#define READONLY() do { \
1364 if (newp != NULL || newlen != 0) { \
1365 ret = EPERM; \
1366 goto label_return; \
1367 } \
1368} while (0)
1369
1370#define WRITEONLY() do { \
1371 if (oldp != NULL || oldlenp != NULL) { \
1372 ret = EPERM; \
1373 goto label_return; \
1374 } \
1375} while (0)
1376
1377#define READ_XOR_WRITE() do { \
1378 if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
1379 newlen != 0)) { \
1380 ret = EPERM; \
1381 goto label_return; \
1382 } \
1383} while (0)
1384
1385#define READ(v, t) do { \
1386 if (oldp != NULL && oldlenp != NULL) { \
1387 if (*oldlenp != sizeof(t)) { \
1388 size_t copylen = (sizeof(t) <= *oldlenp) \
1389 ? sizeof(t) : *oldlenp; \
1390 memcpy(oldp, (void *)&(v), copylen); \
1391 ret = EINVAL; \
1392 goto label_return; \
1393 } \
1394 *(t *)oldp = (v); \
1395 } \
1396} while (0)
1397
1398#define WRITE(v, t) do { \
1399 if (newp != NULL) { \
1400 if (newlen != sizeof(t)) { \
1401 ret = EINVAL; \
1402 goto label_return; \
1403 } \
1404 (v) = *(t *)newp; \
1405 } \
1406} while (0)
1407
1408#define MIB_UNSIGNED(v, i) do { \
1409 if (mib[i] > UINT_MAX) { \
1410 ret = EFAULT; \
1411 goto label_return; \
1412 } \
1413 v = (unsigned)mib[i]; \
1414} while (0)
1415
1416/*
1417 * There's a lot of code duplication in the following macros due to limitations
1418 * in how nested cpp macros are expanded.
1419 */
1420#define CTL_RO_CLGEN(c, l, n, v, t) \
1421static int \
1422n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1423 size_t *oldlenp, void *newp, size_t newlen) { \
1424 int ret; \
1425 t oldval; \
1426 \
1427 if (!(c)) { \
1428 return ENOENT; \
1429 } \
1430 if (l) { \
1431 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1432 } \
1433 READONLY(); \
1434 oldval = (v); \
1435 READ(oldval, t); \
1436 \
1437 ret = 0; \
1438label_return: \
1439 if (l) { \
1440 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1441 } \
1442 return ret; \
1443}
1444
1445#define CTL_RO_CGEN(c, n, v, t) \
1446static int \
1447n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1448 void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
1449 int ret; \
1450 t oldval; \
1451 \
1452 if (!(c)) { \
1453 return ENOENT; \
1454 } \
1455 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1456 READONLY(); \
1457 oldval = (v); \
1458 READ(oldval, t); \
1459 \
1460 ret = 0; \
1461label_return: \
1462 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1463 return ret; \
1464}
1465
1466#define CTL_RO_GEN(n, v, t) \
1467static int \
1468n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1469 size_t *oldlenp, void *newp, size_t newlen) { \
1470 int ret; \
1471 t oldval; \
1472 \
1473 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1474 READONLY(); \
1475 oldval = (v); \
1476 READ(oldval, t); \
1477 \
1478 ret = 0; \
1479label_return: \
1480 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1481 return ret; \
1482}
1483
1484/*
1485 * ctl_mtx is not acquired, under the assumption that no pertinent data will
1486 * mutate during the call.
1487 */
1488#define CTL_RO_NL_CGEN(c, n, v, t) \
1489static int \
1490n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1491 void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
1492 int ret; \
1493 t oldval; \
1494 \
1495 if (!(c)) { \
1496 return ENOENT; \
1497 } \
1498 READONLY(); \
1499 oldval = (v); \
1500 READ(oldval, t); \
1501 \
1502 ret = 0; \
1503label_return: \
1504 return ret; \
1505}
1506
1507#define CTL_RO_NL_GEN(n, v, t) \
1508static int \
1509n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1510 void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
1511 int ret; \
1512 t oldval; \
1513 \
1514 READONLY(); \
1515 oldval = (v); \
1516 READ(oldval, t); \
1517 \
1518 ret = 0; \
1519label_return: \
1520 return ret; \
1521}
1522
1523#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
1524static int \
1525n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1526 size_t *oldlenp, void *newp, size_t newlen) { \
1527 int ret; \
1528 t oldval; \
1529 \
1530 if (!(c)) { \
1531 return ENOENT; \
1532 } \
1533 READONLY(); \
1534 oldval = (m(tsd)); \
1535 READ(oldval, t); \
1536 \
1537 ret = 0; \
1538label_return: \
1539 return ret; \
1540}
1541
1542#define CTL_RO_CONFIG_GEN(n, t) \
1543static int \
1544n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1545 void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
1546 int ret; \
1547 t oldval; \
1548 \
1549 READONLY(); \
1550 oldval = n; \
1551 READ(oldval, t); \
1552 \
1553 ret = 0; \
1554label_return: \
1555 return ret; \
1556}
1557
1558/******************************************************************************/
1559
1560CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
1561
1562static int
1563epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1564 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1565 int ret;
1566 UNUSED uint64_t newval;
1567
1568 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1569 WRITE(newval, uint64_t);
1570 if (newp != NULL) {
1571 ctl_refresh(tsd_tsdn(tsd));
1572 }
1573 READ(ctl_arenas->epoch, uint64_t);
1574
1575 ret = 0;
1576label_return:
1577 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1578 return ret;
1579}
1580
1581static int
1582background_thread_ctl(tsd_t *tsd, const size_t *mib,
1583 size_t miblen, void *oldp, size_t *oldlenp,
1584 void *newp, size_t newlen) {
1585 int ret;
1586 bool oldval;
1587
1588 if (!have_background_thread) {
1589 return ENOENT;
1590 }
1591 background_thread_ctl_init(tsd_tsdn(tsd));
1592
1593 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1594 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1595 if (newp == NULL) {
1596 oldval = background_thread_enabled();
1597 READ(oldval, bool);
1598 } else {
1599 if (newlen != sizeof(bool)) {
1600 ret = EINVAL;
1601 goto label_return;
1602 }
1603 oldval = background_thread_enabled();
1604 READ(oldval, bool);
1605
1606 bool newval = *(bool *)newp;
1607 if (newval == oldval) {
1608 ret = 0;
1609 goto label_return;
1610 }
1611
1612 background_thread_enabled_set(tsd_tsdn(tsd), newval);
1613 if (newval) {
1614 if (background_threads_enable(tsd)) {
1615 ret = EFAULT;
1616 goto label_return;
1617 }
1618 } else {
1619 if (background_threads_disable(tsd)) {
1620 ret = EFAULT;
1621 goto label_return;
1622 }
1623 }
1624 }
1625 ret = 0;
1626label_return:
1627 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1628 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1629
1630 return ret;
1631}
1632
1633static int
1634max_background_threads_ctl(tsd_t *tsd, const size_t *mib,
1635 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1636 size_t newlen) {
1637 int ret;
1638 size_t oldval;
1639
1640 if (!have_background_thread) {
1641 return ENOENT;
1642 }
1643 background_thread_ctl_init(tsd_tsdn(tsd));
1644
1645 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1646 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1647 if (newp == NULL) {
1648 oldval = max_background_threads;
1649 READ(oldval, size_t);
1650 } else {
1651 if (newlen != sizeof(size_t)) {
1652 ret = EINVAL;
1653 goto label_return;
1654 }
1655 oldval = max_background_threads;
1656 READ(oldval, size_t);
1657
1658 size_t newval = *(size_t *)newp;
1659 if (newval == oldval) {
1660 ret = 0;
1661 goto label_return;
1662 }
1663 if (newval > opt_max_background_threads) {
1664 ret = EINVAL;
1665 goto label_return;
1666 }
1667
1668 if (background_thread_enabled()) {
1669 background_thread_enabled_set(tsd_tsdn(tsd), false);
1670 if (background_threads_disable(tsd)) {
1671 ret = EFAULT;
1672 goto label_return;
1673 }
1674 max_background_threads = newval;
1675 background_thread_enabled_set(tsd_tsdn(tsd), true);
1676 if (background_threads_enable(tsd)) {
1677 ret = EFAULT;
1678 goto label_return;
1679 }
1680 } else {
1681 max_background_threads = newval;
1682 }
1683 }
1684 ret = 0;
1685label_return:
1686 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1687 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1688
1689 return ret;
1690}
1691
1692/******************************************************************************/
1693
1694CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
1695CTL_RO_CONFIG_GEN(config_debug, bool)
1696CTL_RO_CONFIG_GEN(config_fill, bool)
1697CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
1698CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
1699CTL_RO_CONFIG_GEN(config_prof, bool)
1700CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
1701CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
1702CTL_RO_CONFIG_GEN(config_stats, bool)
1703CTL_RO_CONFIG_GEN(config_utrace, bool)
1704CTL_RO_CONFIG_GEN(config_xmalloc, bool)
1705
1706/******************************************************************************/
1707
1708CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
1709CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
1710CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
1711 const char *)
1712CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
1713CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
1714CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
1715CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
1716 const char *)
1717CTL_RO_NL_GEN(opt_huge_threshold, opt_huge_threshold, size_t)
1718CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
1719CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
1720CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
1721CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
1722CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
1723CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
1724CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
1725CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
1726CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
1727CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
1728CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
1729CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
1730CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
1731 size_t)
1732CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
1733CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
1734CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
1735CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
1736CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
1737 opt_prof_thread_active_init, bool)
1738CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
1739CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
1740CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
1741CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
1742CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
1743CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
1744
1745/******************************************************************************/
1746
1747static int
1748thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1749 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1750 int ret;
1751 arena_t *oldarena;
1752 unsigned newind, oldind;
1753
1754 oldarena = arena_choose(tsd, NULL);
1755 if (oldarena == NULL) {
1756 return EAGAIN;
1757 }
1758 newind = oldind = arena_ind_get(oldarena);
1759 WRITE(newind, unsigned);
1760 READ(oldind, unsigned);
1761
1762 if (newind != oldind) {
1763 arena_t *newarena;
1764
1765 if (newind >= narenas_total_get()) {
1766 /* New arena index is out of range. */
1767 ret = EFAULT;
1768 goto label_return;
1769 }
1770
1771 if (have_percpu_arena &&
1772 PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
1773 if (newind < percpu_arena_ind_limit(opt_percpu_arena)) {
1774 /*
1775 * If perCPU arena is enabled, thread_arena
1776 * control is not allowed for the auto arena
1777 * range.
1778 */
1779 ret = EPERM;
1780 goto label_return;
1781 }
1782 }
1783
1784 /* Initialize arena if necessary. */
1785 newarena = arena_get(tsd_tsdn(tsd), newind, true);
1786 if (newarena == NULL) {
1787 ret = EAGAIN;
1788 goto label_return;
1789 }
1790 /* Set new arena/tcache associations. */
1791 arena_migrate(tsd, oldind, newind);
1792 if (tcache_available(tsd)) {
1793 tcache_arena_reassociate(tsd_tsdn(tsd),
1794 tsd_tcachep_get(tsd), newarena);
1795 }
1796 }
1797
1798 ret = 0;
1799label_return:
1800 return ret;
1801}
1802
1803CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
1804 uint64_t)
1805CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
1806 uint64_t *)
1807CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
1808 uint64_t)
1809CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
1810 tsd_thread_deallocatedp_get, uint64_t *)
1811
1812static int
1813thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
1814 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1815 size_t newlen) {
1816 int ret;
1817 bool oldval;
1818
1819 oldval = tcache_enabled_get(tsd);
1820 if (newp != NULL) {
1821 if (newlen != sizeof(bool)) {
1822 ret = EINVAL;
1823 goto label_return;
1824 }
1825 tcache_enabled_set(tsd, *(bool *)newp);
1826 }
1827 READ(oldval, bool);
1828
1829 ret = 0;
1830label_return:
1831 return ret;
1832}
1833
1834static int
1835thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
1836 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1837 size_t newlen) {
1838 int ret;
1839
1840 if (!tcache_available(tsd)) {
1841 ret = EFAULT;
1842 goto label_return;
1843 }
1844
1845 READONLY();
1846 WRITEONLY();
1847
1848 tcache_flush(tsd);
1849
1850 ret = 0;
1851label_return:
1852 return ret;
1853}
1854
1855static int
1856thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
1857 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1858 size_t newlen) {
1859 int ret;
1860
1861 if (!config_prof) {
1862 return ENOENT;
1863 }
1864
1865 READ_XOR_WRITE();
1866
1867 if (newp != NULL) {
1868 if (newlen != sizeof(const char *)) {
1869 ret = EINVAL;
1870 goto label_return;
1871 }
1872
1873 if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
1874 0) {
1875 goto label_return;
1876 }
1877 } else {
1878 const char *oldname = prof_thread_name_get(tsd);
1879 READ(oldname, const char *);
1880 }
1881
1882 ret = 0;
1883label_return:
1884 return ret;
1885}
1886
1887static int
1888thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
1889 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1890 size_t newlen) {
1891 int ret;
1892 bool oldval;
1893
1894 if (!config_prof) {
1895 return ENOENT;
1896 }
1897
1898 oldval = prof_thread_active_get(tsd);
1899 if (newp != NULL) {
1900 if (newlen != sizeof(bool)) {
1901 ret = EINVAL;
1902 goto label_return;
1903 }
1904 if (prof_thread_active_set(tsd, *(bool *)newp)) {
1905 ret = EAGAIN;
1906 goto label_return;
1907 }
1908 }
1909 READ(oldval, bool);
1910
1911 ret = 0;
1912label_return:
1913 return ret;
1914}
1915
1916/******************************************************************************/
1917
1918static int
1919tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1920 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1921 int ret;
1922 unsigned tcache_ind;
1923
1924 READONLY();
1925 if (tcaches_create(tsd, &tcache_ind)) {
1926 ret = EFAULT;
1927 goto label_return;
1928 }
1929 READ(tcache_ind, unsigned);
1930
1931 ret = 0;
1932label_return:
1933 return ret;
1934}
1935
1936static int
1937tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1938 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1939 int ret;
1940 unsigned tcache_ind;
1941
1942 WRITEONLY();
1943 tcache_ind = UINT_MAX;
1944 WRITE(tcache_ind, unsigned);
1945 if (tcache_ind == UINT_MAX) {
1946 ret = EFAULT;
1947 goto label_return;
1948 }
1949 tcaches_flush(tsd, tcache_ind);
1950
1951 ret = 0;
1952label_return:
1953 return ret;
1954}
1955
1956static int
1957tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1958 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1959 int ret;
1960 unsigned tcache_ind;
1961
1962 WRITEONLY();
1963 tcache_ind = UINT_MAX;
1964 WRITE(tcache_ind, unsigned);
1965 if (tcache_ind == UINT_MAX) {
1966 ret = EFAULT;
1967 goto label_return;
1968 }
1969 tcaches_destroy(tsd, tcache_ind);
1970
1971 ret = 0;
1972label_return:
1973 return ret;
1974}
1975
1976/******************************************************************************/
1977
1978static int
1979arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1980 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1981 int ret;
1982 tsdn_t *tsdn = tsd_tsdn(tsd);
1983 unsigned arena_ind;
1984 bool initialized;
1985
1986 READONLY();
1987 MIB_UNSIGNED(arena_ind, 1);
1988
1989 malloc_mutex_lock(tsdn, &ctl_mtx);
1990 initialized = arenas_i(arena_ind)->initialized;
1991 malloc_mutex_unlock(tsdn, &ctl_mtx);
1992
1993 READ(initialized, bool);
1994
1995 ret = 0;
1996label_return:
1997 return ret;
1998}
1999
2000static void
2001arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
2002 malloc_mutex_lock(tsdn, &ctl_mtx);
2003 {
2004 unsigned narenas = ctl_arenas->narenas;
2005
2006 /*
2007 * Access via index narenas is deprecated, and scheduled for
2008 * removal in 6.0.0.
2009 */
2010 if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
2011 unsigned i;
2012 VARIABLE_ARRAY(arena_t *, tarenas, narenas);
2013
2014 for (i = 0; i < narenas; i++) {
2015 tarenas[i] = arena_get(tsdn, i, false);
2016 }
2017
2018 /*
2019 * No further need to hold ctl_mtx, since narenas and
2020 * tarenas contain everything needed below.
2021 */
2022 malloc_mutex_unlock(tsdn, &ctl_mtx);
2023
2024 for (i = 0; i < narenas; i++) {
2025 if (tarenas[i] != NULL) {
2026 arena_decay(tsdn, tarenas[i], false,
2027 all);
2028 }
2029 }
2030 } else {
2031 arena_t *tarena;
2032
2033 assert(arena_ind < narenas);
2034
2035 tarena = arena_get(tsdn, arena_ind, false);
2036
2037 /* No further need to hold ctl_mtx. */
2038 malloc_mutex_unlock(tsdn, &ctl_mtx);
2039
2040 if (tarena != NULL) {
2041 arena_decay(tsdn, tarena, false, all);
2042 }
2043 }
2044 }
2045}
2046
2047static int
2048arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2049 size_t *oldlenp, void *newp, size_t newlen) {
2050 int ret;
2051 unsigned arena_ind;
2052
2053 READONLY();
2054 WRITEONLY();
2055 MIB_UNSIGNED(arena_ind, 1);
2056 arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
2057
2058 ret = 0;
2059label_return:
2060 return ret;
2061}
2062
2063static int
2064arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2065 size_t *oldlenp, void *newp, size_t newlen) {
2066 int ret;
2067 unsigned arena_ind;
2068
2069 READONLY();
2070 WRITEONLY();
2071 MIB_UNSIGNED(arena_ind, 1);
2072 arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
2073
2074 ret = 0;
2075label_return:
2076 return ret;
2077}
2078
2079static int
2080arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
2081 void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
2082 arena_t **arena) {
2083 int ret;
2084
2085 READONLY();
2086 WRITEONLY();
2087 MIB_UNSIGNED(*arena_ind, 1);
2088
2089 *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
2090 if (*arena == NULL || arena_is_auto(*arena)) {
2091 ret = EFAULT;
2092 goto label_return;
2093 }
2094
2095 ret = 0;
2096label_return:
2097 return ret;
2098}
2099
2100static void
2101arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
2102 /* Temporarily disable the background thread during arena reset. */
2103 if (have_background_thread) {
2104 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
2105 if (background_thread_enabled()) {
2106 background_thread_info_t *info =
2107 background_thread_info_get(arena_ind);
2108 assert(info->state == background_thread_started);
2109 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
2110 info->state = background_thread_paused;
2111 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
2112 }
2113 }
2114}
2115
2116static void
2117arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
2118 if (have_background_thread) {
2119 if (background_thread_enabled()) {
2120 background_thread_info_t *info =
2121 background_thread_info_get(arena_ind);
2122 assert(info->state == background_thread_paused);
2123 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
2124 info->state = background_thread_started;
2125 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
2126 }
2127 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
2128 }
2129}
2130
2131static int
2132arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2133 size_t *oldlenp, void *newp, size_t newlen) {
2134 int ret;
2135 unsigned arena_ind;
2136 arena_t *arena;
2137
2138 ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2139 newp, newlen, &arena_ind, &arena);
2140 if (ret != 0) {
2141 return ret;
2142 }
2143
2144 arena_reset_prepare_background_thread(tsd, arena_ind);
2145 arena_reset(tsd, arena);
2146 arena_reset_finish_background_thread(tsd, arena_ind);
2147
2148 return ret;
2149}
2150
2151static int
2152arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2153 size_t *oldlenp, void *newp, size_t newlen) {
2154 int ret;
2155 unsigned arena_ind;
2156 arena_t *arena;
2157 ctl_arena_t *ctl_darena, *ctl_arena;
2158
2159 ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2160 newp, newlen, &arena_ind, &arena);
2161 if (ret != 0) {
2162 goto label_return;
2163 }
2164
2165 if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
2166 true) != 0) {
2167 ret = EFAULT;
2168 goto label_return;
2169 }
2170
2171 arena_reset_prepare_background_thread(tsd, arena_ind);
2172 /* Merge stats after resetting and purging arena. */
2173 arena_reset(tsd, arena);
2174 arena_decay(tsd_tsdn(tsd), arena, false, true);
2175 ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
2176 ctl_darena->initialized = true;
2177 ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
2178 /* Destroy arena. */
2179 arena_destroy(tsd, arena);
2180 ctl_arena = arenas_i(arena_ind);
2181 ctl_arena->initialized = false;
2182 /* Record arena index for later recycling via arenas.create. */
2183 ql_elm_new(ctl_arena, destroyed_link);
2184 ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
2185 arena_reset_finish_background_thread(tsd, arena_ind);
2186
2187 assert(ret == 0);
2188label_return:
2189 return ret;
2190}
2191
2192static int
2193arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2194 size_t *oldlenp, void *newp, size_t newlen) {
2195 int ret;
2196 const char *dss = NULL;
2197 unsigned arena_ind;
2198 dss_prec_t dss_prec_old = dss_prec_limit;
2199 dss_prec_t dss_prec = dss_prec_limit;
2200
2201 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2202 WRITE(dss, const char *);
2203 MIB_UNSIGNED(arena_ind, 1);
2204 if (dss != NULL) {
2205 int i;
2206 bool match = false;
2207
2208 for (i = 0; i < dss_prec_limit; i++) {
2209 if (strcmp(dss_prec_names[i], dss) == 0) {
2210 dss_prec = i;
2211 match = true;
2212 break;
2213 }
2214 }
2215
2216 if (!match) {
2217 ret = EINVAL;
2218 goto label_return;
2219 }
2220 }
2221
2222 /*
2223 * Access via index narenas is deprecated, and scheduled for removal in
2224 * 6.0.0.
2225 */
2226 if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
2227 ctl_arenas->narenas) {
2228 if (dss_prec != dss_prec_limit &&
2229 extent_dss_prec_set(dss_prec)) {
2230 ret = EFAULT;
2231 goto label_return;
2232 }
2233 dss_prec_old = extent_dss_prec_get();
2234 } else {
2235 arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2236 if (arena == NULL || (dss_prec != dss_prec_limit &&
2237 arena_dss_prec_set(arena, dss_prec))) {
2238 ret = EFAULT;
2239 goto label_return;
2240 }
2241 dss_prec_old = arena_dss_prec_get(arena);
2242 }
2243
2244 dss = dss_prec_names[dss_prec_old];
2245 READ(dss, const char *);
2246
2247 ret = 0;
2248label_return:
2249 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2250 return ret;
2251}
2252
2253static int
2254arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
2255 void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
2256 int ret;
2257 unsigned arena_ind;
2258 arena_t *arena;
2259
2260 MIB_UNSIGNED(arena_ind, 1);
2261 arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2262 if (arena == NULL) {
2263 ret = EFAULT;
2264 goto label_return;
2265 }
2266
2267 if (oldp != NULL && oldlenp != NULL) {
2268 size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
2269 arena_muzzy_decay_ms_get(arena);
2270 READ(oldval, ssize_t);
2271 }
2272 if (newp != NULL) {
2273 if (newlen != sizeof(ssize_t)) {
2274 ret = EINVAL;
2275 goto label_return;
2276 }
2277 if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
2278 *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
2279 arena, *(ssize_t *)newp)) {
2280 ret = EFAULT;
2281 goto label_return;
2282 }
2283 }
2284
2285 ret = 0;
2286label_return:
2287 return ret;
2288}
2289
2290static int
2291arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2292 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2293 return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2294 newlen, true);
2295}
2296
2297static int
2298arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2299 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2300 return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2301 newlen, false);
2302}
2303
2304static int
2305arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2306 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2307 int ret;
2308 unsigned arena_ind;
2309 arena_t *arena;
2310
2311 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2312 MIB_UNSIGNED(arena_ind, 1);
2313 if (arena_ind < narenas_total_get()) {
2314 extent_hooks_t *old_extent_hooks;
2315 arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2316 if (arena == NULL) {
2317 if (arena_ind >= narenas_auto) {
2318 ret = EFAULT;
2319 goto label_return;
2320 }
2321 old_extent_hooks =
2322 (extent_hooks_t *)&extent_hooks_default;
2323 READ(old_extent_hooks, extent_hooks_t *);
2324 if (newp != NULL) {
2325 /* Initialize a new arena as a side effect. */
2326 extent_hooks_t *new_extent_hooks
2327 JEMALLOC_CC_SILENCE_INIT(NULL);
2328 WRITE(new_extent_hooks, extent_hooks_t *);
2329 arena = arena_init(tsd_tsdn(tsd), arena_ind,
2330 new_extent_hooks);
2331 if (arena == NULL) {
2332 ret = EFAULT;
2333 goto label_return;
2334 }
2335 }
2336 } else {
2337 if (newp != NULL) {
2338 extent_hooks_t *new_extent_hooks
2339 JEMALLOC_CC_SILENCE_INIT(NULL);
2340 WRITE(new_extent_hooks, extent_hooks_t *);
2341 old_extent_hooks = extent_hooks_set(tsd, arena,
2342 new_extent_hooks);
2343 READ(old_extent_hooks, extent_hooks_t *);
2344 } else {
2345 old_extent_hooks = extent_hooks_get(arena);
2346 READ(old_extent_hooks, extent_hooks_t *);
2347 }
2348 }
2349 } else {
2350 ret = EFAULT;
2351 goto label_return;
2352 }
2353 ret = 0;
2354label_return:
2355 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2356 return ret;
2357}
2358
2359static int
2360arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib,
2361 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2362 size_t newlen) {
2363 int ret;
2364 unsigned arena_ind;
2365 arena_t *arena;
2366
2367 if (!opt_retain) {
2368 /* Only relevant when retain is enabled. */
2369 return ENOENT;
2370 }
2371
2372 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2373 MIB_UNSIGNED(arena_ind, 1);
2374 if (arena_ind < narenas_total_get() && (arena =
2375 arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
2376 size_t old_limit, new_limit;
2377 if (newp != NULL) {
2378 WRITE(new_limit, size_t);
2379 }
2380 bool err = arena_retain_grow_limit_get_set(tsd, arena,
2381 &old_limit, newp != NULL ? &new_limit : NULL);
2382 if (!err) {
2383 READ(old_limit, size_t);
2384 ret = 0;
2385 } else {
2386 ret = EFAULT;
2387 }
2388 } else {
2389 ret = EFAULT;
2390 }
2391label_return:
2392 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2393 return ret;
2394}
2395
2396static const ctl_named_node_t *
2397arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2398 size_t i) {
2399 const ctl_named_node_t *ret;
2400
2401 malloc_mutex_lock(tsdn, &ctl_mtx);
2402 switch (i) {
2403 case MALLCTL_ARENAS_ALL:
2404 case MALLCTL_ARENAS_DESTROYED:
2405 break;
2406 default:
2407 if (i > ctl_arenas->narenas) {
2408 ret = NULL;
2409 goto label_return;
2410 }
2411 break;
2412 }
2413
2414 ret = super_arena_i_node;
2415label_return:
2416 malloc_mutex_unlock(tsdn, &ctl_mtx);
2417 return ret;
2418}
2419
2420/******************************************************************************/
2421
2422static int
2423arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2424 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2425 int ret;
2426 unsigned narenas;
2427
2428 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2429 READONLY();
2430 if (*oldlenp != sizeof(unsigned)) {
2431 ret = EINVAL;
2432 goto label_return;
2433 }
2434 narenas = ctl_arenas->narenas;
2435 READ(narenas, unsigned);
2436
2437 ret = 0;
2438label_return:
2439 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2440 return ret;
2441}
2442
2443static int
2444arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib,
2445 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2446 size_t newlen, bool dirty) {
2447 int ret;
2448
2449 if (oldp != NULL && oldlenp != NULL) {
2450 size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() :
2451 arena_muzzy_decay_ms_default_get());
2452 READ(oldval, ssize_t);
2453 }
2454 if (newp != NULL) {
2455 if (newlen != sizeof(ssize_t)) {
2456 ret = EINVAL;
2457 goto label_return;
2458 }
2459 if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp)
2460 : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) {
2461 ret = EFAULT;
2462 goto label_return;
2463 }
2464 }
2465
2466 ret = 0;
2467label_return:
2468 return ret;
2469}
2470
2471static int
2472arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2473 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2474 return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2475 newlen, true);
2476}
2477
2478static int
2479arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2480 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2481 return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2482 newlen, false);
2483}
2484
2485CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
2486CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
2487CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
2488CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned)
2489CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
2490CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
2491CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
2492CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
2493static const ctl_named_node_t *
2494arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib,
2495 size_t miblen, size_t i) {
2496 if (i > SC_NBINS) {
2497 return NULL;
2498 }
2499 return super_arenas_bin_i_node;
2500}
2501
2502CTL_RO_NL_GEN(arenas_nlextents, SC_NSIZES - SC_NBINS, unsigned)
2503CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(SC_NBINS+(szind_t)mib[2]),
2504 size_t)
2505static const ctl_named_node_t *
2506arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib,
2507 size_t miblen, size_t i) {
2508 if (i > SC_NSIZES - SC_NBINS) {
2509 return NULL;
2510 }
2511 return super_arenas_lextent_i_node;
2512}
2513
2514static int
2515arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2516 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2517 int ret;
2518 extent_hooks_t *extent_hooks;
2519 unsigned arena_ind;
2520
2521 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2522
2523 extent_hooks = (extent_hooks_t *)&extent_hooks_default;
2524 WRITE(extent_hooks, extent_hooks_t *);
2525 if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
2526 ret = EAGAIN;
2527 goto label_return;
2528 }
2529 READ(arena_ind, unsigned);
2530
2531 ret = 0;
2532label_return:
2533 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2534 return ret;
2535}
2536
2537static int
2538arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
2539 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2540 size_t newlen) {
2541 int ret;
2542 unsigned arena_ind;
2543 void *ptr;
2544 extent_t *extent;
2545 arena_t *arena;
2546
2547 ptr = NULL;
2548 ret = EINVAL;
2549 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2550 WRITE(ptr, void *);
2551 extent = iealloc(tsd_tsdn(tsd), ptr);
2552 if (extent == NULL)
2553 goto label_return;
2554
2555 arena = extent_arena_get(extent);
2556 if (arena == NULL)
2557 goto label_return;
2558
2559 arena_ind = arena_ind_get(arena);
2560 READ(arena_ind, unsigned);
2561
2562 ret = 0;
2563label_return:
2564 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2565 return ret;
2566}
2567
2568/******************************************************************************/
2569
2570static int
2571prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
2572 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2573 size_t newlen) {
2574 int ret;
2575 bool oldval;
2576
2577 if (!config_prof) {
2578 return ENOENT;
2579 }
2580
2581 if (newp != NULL) {
2582 if (newlen != sizeof(bool)) {
2583 ret = EINVAL;
2584 goto label_return;
2585 }
2586 oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
2587 *(bool *)newp);
2588 } else {
2589 oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
2590 }
2591 READ(oldval, bool);
2592
2593 ret = 0;
2594label_return:
2595 return ret;
2596}
2597
2598static int
2599prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2600 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2601 int ret;
2602 bool oldval;
2603
2604 if (!config_prof) {
2605 return ENOENT;
2606 }
2607
2608 if (newp != NULL) {
2609 if (newlen != sizeof(bool)) {
2610 ret = EINVAL;
2611 goto label_return;
2612 }
2613 oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
2614 } else {
2615 oldval = prof_active_get(tsd_tsdn(tsd));
2616 }
2617 READ(oldval, bool);
2618
2619 ret = 0;
2620label_return:
2621 return ret;
2622}
2623
2624static int
2625prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2626 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2627 int ret;
2628 const char *filename = NULL;
2629
2630 if (!config_prof) {
2631 return ENOENT;
2632 }
2633
2634 WRITEONLY();
2635 WRITE(filename, const char *);
2636
2637 if (prof_mdump(tsd, filename)) {
2638 ret = EFAULT;
2639 goto label_return;
2640 }
2641
2642 ret = 0;
2643label_return:
2644 return ret;
2645}
2646
2647static int
2648prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2649 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2650 int ret;
2651 bool oldval;
2652
2653 if (!config_prof) {
2654 return ENOENT;
2655 }
2656
2657 if (newp != NULL) {
2658 if (newlen != sizeof(bool)) {
2659 ret = EINVAL;
2660 goto label_return;
2661 }
2662 oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
2663 } else {
2664 oldval = prof_gdump_get(tsd_tsdn(tsd));
2665 }
2666 READ(oldval, bool);
2667
2668 ret = 0;
2669label_return:
2670 return ret;
2671}
2672
2673static int
2674prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2675 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2676 int ret;
2677 size_t lg_sample = lg_prof_sample;
2678
2679 if (!config_prof) {
2680 return ENOENT;
2681 }
2682
2683 WRITEONLY();
2684 WRITE(lg_sample, size_t);
2685 if (lg_sample >= (sizeof(uint64_t) << 3)) {
2686 lg_sample = (sizeof(uint64_t) << 3) - 1;
2687 }
2688
2689 prof_reset(tsd, lg_sample);
2690
2691 ret = 0;
2692label_return:
2693 return ret;
2694}
2695
2696CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
2697CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
2698
2699static int
2700prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2701 size_t *oldlenp, void *newp, size_t newlen) {
2702 int ret;
2703
2704 const char *filename = NULL;
2705
2706 if (!config_prof) {
2707 return ENOENT;
2708 }
2709
2710 WRITEONLY();
2711 WRITE(filename, const char *);
2712
2713 if (prof_log_start(tsd_tsdn(tsd), filename)) {
2714 ret = EFAULT;
2715 goto label_return;
2716 }
2717
2718 ret = 0;
2719label_return:
2720 return ret;
2721}
2722
2723static int
2724prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2725 size_t *oldlenp, void *newp, size_t newlen) {
2726 if (!config_prof) {
2727 return ENOENT;
2728 }
2729
2730 if (prof_log_stop(tsd_tsdn(tsd))) {
2731 return EFAULT;
2732 }
2733
2734 return 0;
2735}
2736
2737/******************************************************************************/
2738
2739CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
2740CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
2741CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
2742CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t)
2743CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
2744CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
2745CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
2746
2747CTL_RO_CGEN(config_stats, stats_background_thread_num_threads,
2748 ctl_stats->background_thread.num_threads, size_t)
2749CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
2750 ctl_stats->background_thread.num_runs, uint64_t)
2751CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
2752 nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
2753
2754CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
2755CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
2756 ssize_t)
2757CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms,
2758 ssize_t)
2759CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
2760CTL_RO_GEN(stats_arenas_i_uptime,
2761 nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t)
2762CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
2763CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
2764CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
2765CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
2766 atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
2767 size_t)
2768CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
2769 atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
2770 size_t)
2771CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
2772 atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail,
2773 ATOMIC_RELAXED),
2774 size_t)
2775
2776CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
2777 ctl_arena_stats_read_u64(
2778 &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t)
2779CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
2780 ctl_arena_stats_read_u64(
2781 &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
2782CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
2783 ctl_arena_stats_read_u64(
2784 &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t)
2785
2786CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
2787 ctl_arena_stats_read_u64(
2788 &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t)
2789CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
2790 ctl_arena_stats_read_u64(
2791 &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
2792CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
2793 ctl_arena_stats_read_u64(
2794 &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t)
2795
2796CTL_RO_CGEN(config_stats, stats_arenas_i_base,
2797 atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
2798 size_t)
2799CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
2800 atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
2801 size_t)
2802CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
2803 atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp,
2804 ATOMIC_RELAXED), size_t)
2805CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
2806 atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
2807 ATOMIC_RELAXED), size_t)
2808CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
2809 atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
2810 size_t)
2811
2812CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
2813 arenas_i(mib[2])->astats->allocated_small, size_t)
2814CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
2815 arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
2816CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
2817 arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
2818CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
2819 arenas_i(mib[2])->astats->nrequests_small, uint64_t)
2820CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
2821 atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
2822 ATOMIC_RELAXED), size_t)
2823CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
2824 ctl_arena_stats_read_u64(
2825 &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
2826CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
2827 ctl_arena_stats_read_u64(
2828 &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
2829/*
2830 * Note: "nmalloc" here instead of "nrequests" in the read. This is intentional.
2831 */
2832CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
2833 ctl_arena_stats_read_u64(
2834 &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) /* Intentional. */
2835
2836/* Lock profiling related APIs below. */
2837#define RO_MUTEX_CTL_GEN(n, l) \
2838CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \
2839 l.n_lock_ops, uint64_t) \
2840CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \
2841 l.n_wait_times, uint64_t) \
2842CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \
2843 l.n_spin_acquired, uint64_t) \
2844CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \
2845 l.n_owner_switches, uint64_t) \
2846CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \
2847 nstime_ns(&l.tot_wait_time), uint64_t) \
2848CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \
2849 nstime_ns(&l.max_wait_time), uint64_t) \
2850CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \
2851 l.max_n_thds, uint32_t)
2852
2853/* Global mutexes. */
2854#define OP(mtx) \
2855 RO_MUTEX_CTL_GEN(mutexes_##mtx, \
2856 ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
2857MUTEX_PROF_GLOBAL_MUTEXES
2858#undef OP
2859
2860/* Per arena mutexes */
2861#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \
2862 arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
2863MUTEX_PROF_ARENA_MUTEXES
2864#undef OP
2865
2866/* tcache bin mutex */
2867RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
2868 arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data)
2869#undef RO_MUTEX_CTL_GEN
2870
2871/* Resets all mutex stats, including global, arena and bin mutexes. */
2872static int
2873stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
2874 size_t miblen, void *oldp, size_t *oldlenp,
2875 void *newp, size_t newlen) {
2876 if (!config_stats) {
2877 return ENOENT;
2878 }
2879
2880 tsdn_t *tsdn = tsd_tsdn(tsd);
2881
2882#define MUTEX_PROF_RESET(mtx) \
2883 malloc_mutex_lock(tsdn, &mtx); \
2884 malloc_mutex_prof_data_reset(tsdn, &mtx); \
2885 malloc_mutex_unlock(tsdn, &mtx);
2886
2887 /* Global mutexes: ctl and prof. */
2888 MUTEX_PROF_RESET(ctl_mtx);
2889 if (have_background_thread) {
2890 MUTEX_PROF_RESET(background_thread_lock);
2891 }
2892 if (config_prof && opt_prof) {
2893 MUTEX_PROF_RESET(bt2gctx_mtx);
2894 }
2895
2896
2897 /* Per arena mutexes. */
2898 unsigned n = narenas_total_get();
2899
2900 for (unsigned i = 0; i < n; i++) {
2901 arena_t *arena = arena_get(tsdn, i, false);
2902 if (!arena) {
2903 continue;
2904 }
2905 MUTEX_PROF_RESET(arena->large_mtx);
2906 MUTEX_PROF_RESET(arena->extent_avail_mtx);
2907 MUTEX_PROF_RESET(arena->extents_dirty.mtx);
2908 MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
2909 MUTEX_PROF_RESET(arena->extents_retained.mtx);
2910 MUTEX_PROF_RESET(arena->decay_dirty.mtx);
2911 MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
2912 MUTEX_PROF_RESET(arena->tcache_ql_mtx);
2913 MUTEX_PROF_RESET(arena->base->mtx);
2914
2915 for (szind_t i = 0; i < SC_NBINS; i++) {
2916 bin_t *bin = &arena->bins[i];
2917 MUTEX_PROF_RESET(bin->lock);
2918 }
2919 }
2920#undef MUTEX_PROF_RESET
2921 return 0;
2922}
2923
2924CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
2925 arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
2926CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
2927 arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
2928CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
2929 arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
2930CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
2931 arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
2932CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
2933 arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
2934CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
2935 arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
2936CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
2937 arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
2938CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
2939 arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
2940CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
2941 arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
2942
2943static const ctl_named_node_t *
2944stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
2945 size_t miblen, size_t j) {
2946 if (j > SC_NBINS) {
2947 return NULL;
2948 }
2949 return super_stats_arenas_i_bins_j_node;
2950}
2951
2952CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
2953 ctl_arena_stats_read_u64(
2954 &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
2955CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
2956 ctl_arena_stats_read_u64(
2957 &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
2958CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
2959 ctl_arena_stats_read_u64(
2960 &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
2961CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
2962 arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
2963
2964static const ctl_named_node_t *
2965stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
2966 size_t miblen, size_t j) {
2967 if (j > SC_NSIZES - SC_NBINS) {
2968 return NULL;
2969 }
2970 return super_stats_arenas_i_lextents_j_node;
2971}
2972
2973CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty,
2974 atomic_load_zu(
2975 &arenas_i(mib[2])->astats->estats[mib[4]].ndirty,
2976 ATOMIC_RELAXED), size_t);
2977CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy,
2978 atomic_load_zu(
2979 &arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy,
2980 ATOMIC_RELAXED), size_t);
2981CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained,
2982 atomic_load_zu(
2983 &arenas_i(mib[2])->astats->estats[mib[4]].nretained,
2984 ATOMIC_RELAXED), size_t);
2985CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes,
2986 atomic_load_zu(
2987 &arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes,
2988 ATOMIC_RELAXED), size_t);
2989CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes,
2990 atomic_load_zu(
2991 &arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes,
2992 ATOMIC_RELAXED), size_t);
2993CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes,
2994 atomic_load_zu(
2995 &arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes,
2996 ATOMIC_RELAXED), size_t);
2997
2998static const ctl_named_node_t *
2999stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
3000 size_t miblen, size_t j) {
3001 if (j >= SC_NPSIZES) {
3002 return NULL;
3003 }
3004 return super_stats_arenas_i_extents_j_node;
3005}
3006
3007static const ctl_named_node_t *
3008stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
3009 size_t miblen, size_t i) {
3010 const ctl_named_node_t *ret;
3011 size_t a;
3012
3013 malloc_mutex_lock(tsdn, &ctl_mtx);
3014 a = arenas_i2a_impl(i, true, true);
3015 if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
3016 ret = NULL;
3017 goto label_return;
3018 }
3019
3020 ret = super_stats_arenas_i_node;
3021label_return:
3022 malloc_mutex_unlock(tsdn, &ctl_mtx);
3023 return ret;
3024}
3025
3026static int
3027experimental_hooks_install_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3028 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3029 int ret;
3030 if (oldp == NULL || oldlenp == NULL|| newp == NULL) {
3031 ret = EINVAL;
3032 goto label_return;
3033 }
3034 /*
3035 * Note: this is a *private* struct. This is an experimental interface;
3036 * forcing the user to know the jemalloc internals well enough to
3037 * extract the ABI hopefully ensures nobody gets too comfortable with
3038 * this API, which can change at a moment's notice.
3039 */
3040 hooks_t hooks;
3041 WRITE(hooks, hooks_t);
3042 void *handle = hook_install(tsd_tsdn(tsd), &hooks);
3043 if (handle == NULL) {
3044 ret = EAGAIN;
3045 goto label_return;
3046 }
3047 READ(handle, void *);
3048
3049 ret = 0;
3050label_return:
3051 return ret;
3052}
3053
3054static int
3055experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3056 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3057 int ret;
3058 WRITEONLY();
3059 void *handle = NULL;
3060 WRITE(handle, void *);
3061 if (handle == NULL) {
3062 ret = EINVAL;
3063 goto label_return;
3064 }
3065 hook_remove(tsd_tsdn(tsd), handle);
3066 ret = 0;
3067label_return:
3068 return ret;
3069}
3070