| 1 | #ifndef JEMALLOC_INTERNAL_ARENA_STATS_H |
| 2 | #define JEMALLOC_INTERNAL_ARENA_STATS_H |
| 3 | |
| 4 | #include "jemalloc/internal/atomic.h" |
| 5 | #include "jemalloc/internal/mutex.h" |
| 6 | #include "jemalloc/internal/mutex_prof.h" |
| 7 | #include "jemalloc/internal/sc.h" |
| 8 | |
| 9 | JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS |
| 10 | |
| 11 | /* |
| 12 | * In those architectures that support 64-bit atomics, we use atomic updates for |
| 13 | * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize |
| 14 | * externally. |
| 15 | */ |
| 16 | #ifdef JEMALLOC_ATOMIC_U64 |
| 17 | typedef atomic_u64_t arena_stats_u64_t; |
| 18 | #else |
| 19 | /* Must hold the arena stats mutex while reading atomically. */ |
| 20 | typedef uint64_t arena_stats_u64_t; |
| 21 | #endif |
| 22 | |
| 23 | typedef struct arena_stats_large_s arena_stats_large_t; |
| 24 | struct arena_stats_large_s { |
| 25 | /* |
| 26 | * Total number of allocation/deallocation requests served directly by |
| 27 | * the arena. |
| 28 | */ |
| 29 | arena_stats_u64_t nmalloc; |
| 30 | arena_stats_u64_t ndalloc; |
| 31 | |
| 32 | /* |
| 33 | * Number of allocation requests that correspond to this size class. |
| 34 | * This includes requests served by tcache, though tcache only |
| 35 | * periodically merges into this counter. |
| 36 | */ |
| 37 | arena_stats_u64_t nrequests; /* Partially derived. */ |
| 38 | |
| 39 | /* Current number of allocations of this size class. */ |
| 40 | size_t curlextents; /* Derived. */ |
| 41 | }; |
| 42 | |
| 43 | typedef struct arena_stats_decay_s arena_stats_decay_t; |
| 44 | struct arena_stats_decay_s { |
| 45 | /* Total number of purge sweeps. */ |
| 46 | arena_stats_u64_t npurge; |
| 47 | /* Total number of madvise calls made. */ |
| 48 | arena_stats_u64_t nmadvise; |
| 49 | /* Total number of pages purged. */ |
| 50 | arena_stats_u64_t purged; |
| 51 | }; |
| 52 | |
| 53 | typedef struct arena_stats_extents_s arena_stats_extents_t; |
| 54 | struct arena_stats_extents_s { |
| 55 | /* |
| 56 | * Stats for a given index in the range [0, SC_NPSIZES] in an extents_t. |
| 57 | * We track both bytes and # of extents: two extents in the same bucket |
| 58 | * may have different sizes if adjacent size classes differ by more than |
| 59 | * a page, so bytes cannot always be derived from # of extents. |
| 60 | */ |
| 61 | atomic_zu_t ndirty; |
| 62 | atomic_zu_t dirty_bytes; |
| 63 | atomic_zu_t nmuzzy; |
| 64 | atomic_zu_t muzzy_bytes; |
| 65 | atomic_zu_t nretained; |
| 66 | atomic_zu_t retained_bytes; |
| 67 | }; |
| 68 | |
| 69 | /* |
| 70 | * Arena stats. Note that fields marked "derived" are not directly maintained |
| 71 | * within the arena code; rather their values are derived during stats merge |
| 72 | * requests. |
| 73 | */ |
| 74 | typedef struct arena_stats_s arena_stats_t; |
| 75 | struct arena_stats_s { |
| 76 | #ifndef JEMALLOC_ATOMIC_U64 |
| 77 | malloc_mutex_t mtx; |
| 78 | #endif |
| 79 | |
| 80 | /* Number of bytes currently mapped, excluding retained memory. */ |
| 81 | atomic_zu_t mapped; /* Partially derived. */ |
| 82 | |
| 83 | /* |
| 84 | * Number of unused virtual memory bytes currently retained. Retained |
| 85 | * bytes are technically mapped (though always decommitted or purged), |
| 86 | * but they are excluded from the mapped statistic (above). |
| 87 | */ |
| 88 | atomic_zu_t retained; /* Derived. */ |
| 89 | |
| 90 | /* Number of extent_t structs allocated by base, but not being used. */ |
| 91 | atomic_zu_t extent_avail; |
| 92 | |
| 93 | arena_stats_decay_t decay_dirty; |
| 94 | arena_stats_decay_t decay_muzzy; |
| 95 | |
| 96 | atomic_zu_t base; /* Derived. */ |
| 97 | atomic_zu_t internal; |
| 98 | atomic_zu_t resident; /* Derived. */ |
| 99 | atomic_zu_t metadata_thp; |
| 100 | |
| 101 | atomic_zu_t allocated_large; /* Derived. */ |
| 102 | arena_stats_u64_t nmalloc_large; /* Derived. */ |
| 103 | arena_stats_u64_t ndalloc_large; /* Derived. */ |
| 104 | arena_stats_u64_t nrequests_large; /* Derived. */ |
| 105 | |
| 106 | /* Number of bytes cached in tcache associated with this arena. */ |
| 107 | atomic_zu_t tcache_bytes; /* Derived. */ |
| 108 | |
| 109 | mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]; |
| 110 | |
| 111 | /* One element for each large size class. */ |
| 112 | arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; |
| 113 | |
| 114 | /* Arena uptime. */ |
| 115 | nstime_t uptime; |
| 116 | }; |
| 117 | |
| 118 | static inline bool |
| 119 | arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) { |
| 120 | if (config_debug) { |
| 121 | for (size_t i = 0; i < sizeof(arena_stats_t); i++) { |
| 122 | assert(((char *)arena_stats)[i] == 0); |
| 123 | } |
| 124 | } |
| 125 | #ifndef JEMALLOC_ATOMIC_U64 |
| 126 | if (malloc_mutex_init(&arena_stats->mtx, "arena_stats" , |
| 127 | WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { |
| 128 | return true; |
| 129 | } |
| 130 | #endif |
| 131 | /* Memory is zeroed, so there is no need to clear stats. */ |
| 132 | return false; |
| 133 | } |
| 134 | |
| 135 | static inline void |
| 136 | arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) { |
| 137 | #ifndef JEMALLOC_ATOMIC_U64 |
| 138 | malloc_mutex_lock(tsdn, &arena_stats->mtx); |
| 139 | #endif |
| 140 | } |
| 141 | |
| 142 | static inline void |
| 143 | arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) { |
| 144 | #ifndef JEMALLOC_ATOMIC_U64 |
| 145 | malloc_mutex_unlock(tsdn, &arena_stats->mtx); |
| 146 | #endif |
| 147 | } |
| 148 | |
| 149 | static inline uint64_t |
| 150 | arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, |
| 151 | arena_stats_u64_t *p) { |
| 152 | #ifdef JEMALLOC_ATOMIC_U64 |
| 153 | return atomic_load_u64(p, ATOMIC_RELAXED); |
| 154 | #else |
| 155 | malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); |
| 156 | return *p; |
| 157 | #endif |
| 158 | } |
| 159 | |
| 160 | static inline void |
| 161 | arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, |
| 162 | arena_stats_u64_t *p, uint64_t x) { |
| 163 | #ifdef JEMALLOC_ATOMIC_U64 |
| 164 | atomic_fetch_add_u64(p, x, ATOMIC_RELAXED); |
| 165 | #else |
| 166 | malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); |
| 167 | *p += x; |
| 168 | #endif |
| 169 | } |
| 170 | |
| 171 | static inline void |
| 172 | arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, |
| 173 | arena_stats_u64_t *p, uint64_t x) { |
| 174 | #ifdef JEMALLOC_ATOMIC_U64 |
| 175 | uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED); |
| 176 | assert(r - x <= r); |
| 177 | #else |
| 178 | malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); |
| 179 | *p -= x; |
| 180 | assert(*p + x >= *p); |
| 181 | #endif |
| 182 | } |
| 183 | |
| 184 | /* |
| 185 | * Non-atomically sets *dst += src. *dst needs external synchronization. |
| 186 | * This lets us avoid the cost of a fetch_add when its unnecessary (note that |
| 187 | * the types here are atomic). |
| 188 | */ |
| 189 | static inline void |
| 190 | arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) { |
| 191 | #ifdef JEMALLOC_ATOMIC_U64 |
| 192 | uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); |
| 193 | atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED); |
| 194 | #else |
| 195 | *dst += src; |
| 196 | #endif |
| 197 | } |
| 198 | |
| 199 | static inline size_t |
| 200 | arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, |
| 201 | atomic_zu_t *p) { |
| 202 | #ifdef JEMALLOC_ATOMIC_U64 |
| 203 | return atomic_load_zu(p, ATOMIC_RELAXED); |
| 204 | #else |
| 205 | malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); |
| 206 | return atomic_load_zu(p, ATOMIC_RELAXED); |
| 207 | #endif |
| 208 | } |
| 209 | |
| 210 | static inline void |
| 211 | arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, |
| 212 | atomic_zu_t *p, size_t x) { |
| 213 | #ifdef JEMALLOC_ATOMIC_U64 |
| 214 | atomic_fetch_add_zu(p, x, ATOMIC_RELAXED); |
| 215 | #else |
| 216 | malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); |
| 217 | size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); |
| 218 | atomic_store_zu(p, cur + x, ATOMIC_RELAXED); |
| 219 | #endif |
| 220 | } |
| 221 | |
| 222 | static inline void |
| 223 | arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, |
| 224 | atomic_zu_t *p, size_t x) { |
| 225 | #ifdef JEMALLOC_ATOMIC_U64 |
| 226 | size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED); |
| 227 | assert(r - x <= r); |
| 228 | #else |
| 229 | malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); |
| 230 | size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); |
| 231 | atomic_store_zu(p, cur - x, ATOMIC_RELAXED); |
| 232 | #endif |
| 233 | } |
| 234 | |
| 235 | /* Like the _u64 variant, needs an externally synchronized *dst. */ |
| 236 | static inline void |
| 237 | arena_stats_accum_zu(atomic_zu_t *dst, size_t src) { |
| 238 | size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); |
| 239 | atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED); |
| 240 | } |
| 241 | |
| 242 | static inline void |
| 243 | arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, |
| 244 | szind_t szind, uint64_t nrequests) { |
| 245 | arena_stats_lock(tsdn, arena_stats); |
| 246 | arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind - |
| 247 | SC_NBINS].nrequests, nrequests); |
| 248 | arena_stats_unlock(tsdn, arena_stats); |
| 249 | } |
| 250 | |
| 251 | static inline void |
| 252 | arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) { |
| 253 | arena_stats_lock(tsdn, arena_stats); |
| 254 | arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size); |
| 255 | arena_stats_unlock(tsdn, arena_stats); |
| 256 | } |
| 257 | |
| 258 | #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */ |
| 259 | |