| 1 | /* |
| 2 | * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_SERVICES_MALLOCTRACKER_HPP |
| 26 | #define SHARE_SERVICES_MALLOCTRACKER_HPP |
| 27 | |
| 28 | #if INCLUDE_NMT |
| 29 | |
| 30 | #include "memory/allocation.hpp" |
| 31 | #include "runtime/atomic.hpp" |
| 32 | #include "runtime/threadCritical.hpp" |
| 33 | #include "services/nmtCommon.hpp" |
| 34 | #include "utilities/nativeCallStack.hpp" |
| 35 | |
| 36 | /* |
| 37 | * This counter class counts memory allocation and deallocation, |
| 38 | * records total memory allocation size and number of allocations. |
| 39 | * The counters are updated atomically. |
| 40 | */ |
| 41 | class MemoryCounter { |
| 42 | private: |
| 43 | volatile size_t _count; |
| 44 | volatile size_t _size; |
| 45 | |
| 46 | DEBUG_ONLY(size_t _peak_count;) |
| 47 | DEBUG_ONLY(size_t _peak_size; ) |
| 48 | |
| 49 | public: |
| 50 | MemoryCounter() : _count(0), _size(0) { |
| 51 | DEBUG_ONLY(_peak_count = 0;) |
| 52 | DEBUG_ONLY(_peak_size = 0;) |
| 53 | } |
| 54 | |
| 55 | inline void allocate(size_t sz) { |
| 56 | Atomic::inc(&_count); |
| 57 | if (sz > 0) { |
| 58 | Atomic::add(sz, &_size); |
| 59 | DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size)); |
| 60 | } |
| 61 | DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);) |
| 62 | } |
| 63 | |
| 64 | inline void deallocate(size_t sz) { |
| 65 | assert(_count > 0, "Nothing allocated yet" ); |
| 66 | assert(_size >= sz, "deallocation > allocated" ); |
| 67 | Atomic::dec(&_count); |
| 68 | if (sz > 0) { |
| 69 | Atomic::sub(sz, &_size); |
| 70 | } |
| 71 | } |
| 72 | |
| 73 | inline void resize(long sz) { |
| 74 | if (sz != 0) { |
| 75 | Atomic::add(size_t(sz), &_size); |
| 76 | DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);) |
| 77 | } |
| 78 | } |
| 79 | |
| 80 | inline size_t count() const { return _count; } |
| 81 | inline size_t size() const { return _size; } |
| 82 | DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; }) |
| 83 | DEBUG_ONLY(inline size_t peak_size() const { return _peak_size; }) |
| 84 | |
| 85 | }; |
| 86 | |
| 87 | /* |
| 88 | * Malloc memory used by a particular subsystem. |
| 89 | * It includes the memory acquired through os::malloc() |
| 90 | * call and arena's backing memory. |
| 91 | */ |
| 92 | class MallocMemory { |
| 93 | private: |
| 94 | MemoryCounter _malloc; |
| 95 | MemoryCounter _arena; |
| 96 | |
| 97 | public: |
| 98 | MallocMemory() { } |
| 99 | |
| 100 | inline void record_malloc(size_t sz) { |
| 101 | _malloc.allocate(sz); |
| 102 | } |
| 103 | |
| 104 | inline void record_free(size_t sz) { |
| 105 | _malloc.deallocate(sz); |
| 106 | } |
| 107 | |
| 108 | inline void record_new_arena() { |
| 109 | _arena.allocate(0); |
| 110 | } |
| 111 | |
| 112 | inline void record_arena_free() { |
| 113 | _arena.deallocate(0); |
| 114 | } |
| 115 | |
| 116 | inline void record_arena_size_change(long sz) { |
| 117 | _arena.resize(sz); |
| 118 | } |
| 119 | |
| 120 | inline size_t malloc_size() const { return _malloc.size(); } |
| 121 | inline size_t malloc_count() const { return _malloc.count();} |
| 122 | inline size_t arena_size() const { return _arena.size(); } |
| 123 | inline size_t arena_count() const { return _arena.count(); } |
| 124 | |
| 125 | DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; }) |
| 126 | DEBUG_ONLY(inline const MemoryCounter& arena_counter() const { return _arena; }) |
| 127 | }; |
| 128 | |
| 129 | class MallocMemorySummary; |
| 130 | |
| 131 | // A snapshot of malloc'd memory, includes malloc memory |
| 132 | // usage by types and memory used by tracking itself. |
| 133 | class MallocMemorySnapshot : public ResourceObj { |
| 134 | friend class MallocMemorySummary; |
| 135 | |
| 136 | private: |
| 137 | MallocMemory _malloc[mt_number_of_types]; |
| 138 | MemoryCounter ; |
| 139 | |
| 140 | |
| 141 | public: |
| 142 | inline MallocMemory* by_type(MEMFLAGS flags) { |
| 143 | int index = NMTUtil::flag_to_index(flags); |
| 144 | return &_malloc[index]; |
| 145 | } |
| 146 | |
| 147 | inline MallocMemory* by_index(int index) { |
| 148 | assert(index >= 0, "Index out of bound" ); |
| 149 | assert(index < mt_number_of_types, "Index out of bound" ); |
| 150 | return &_malloc[index]; |
| 151 | } |
| 152 | |
| 153 | inline MemoryCounter* malloc_overhead() { |
| 154 | return &_tracking_header; |
| 155 | } |
| 156 | |
| 157 | // Total malloc'd memory amount |
| 158 | size_t total() const; |
| 159 | // Total malloc'd memory used by arenas |
| 160 | size_t total_arena() const; |
| 161 | |
| 162 | inline size_t thread_count() const { |
| 163 | MallocMemorySnapshot* s = const_cast<MallocMemorySnapshot*>(this); |
| 164 | return s->by_type(mtThreadStack)->malloc_count(); |
| 165 | } |
| 166 | |
| 167 | void copy_to(MallocMemorySnapshot* s) { |
| 168 | // Need to make sure that mtChunks don't get deallocated while the |
| 169 | // copy is going on, because their size is adjusted using this |
| 170 | // buffer in make_adjustment(). |
| 171 | ThreadCritical tc; |
| 172 | s->_tracking_header = _tracking_header; |
| 173 | for (int index = 0; index < mt_number_of_types; index ++) { |
| 174 | s->_malloc[index] = _malloc[index]; |
| 175 | } |
| 176 | } |
| 177 | |
| 178 | // Make adjustment by subtracting chunks used by arenas |
| 179 | // from total chunks to get total free chunk size |
| 180 | void make_adjustment(); |
| 181 | }; |
| 182 | |
| 183 | /* |
| 184 | * This class is for collecting malloc statistics at summary level |
| 185 | */ |
| 186 | class MallocMemorySummary : AllStatic { |
| 187 | private: |
| 188 | // Reserve memory for placement of MallocMemorySnapshot object |
| 189 | static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)]; |
| 190 | |
| 191 | public: |
| 192 | static void initialize(); |
| 193 | |
| 194 | static inline void record_malloc(size_t size, MEMFLAGS flag) { |
| 195 | as_snapshot()->by_type(flag)->record_malloc(size); |
| 196 | } |
| 197 | |
| 198 | static inline void record_free(size_t size, MEMFLAGS flag) { |
| 199 | as_snapshot()->by_type(flag)->record_free(size); |
| 200 | } |
| 201 | |
| 202 | static inline void record_new_arena(MEMFLAGS flag) { |
| 203 | as_snapshot()->by_type(flag)->record_new_arena(); |
| 204 | } |
| 205 | |
| 206 | static inline void record_arena_free(MEMFLAGS flag) { |
| 207 | as_snapshot()->by_type(flag)->record_arena_free(); |
| 208 | } |
| 209 | |
| 210 | static inline void record_arena_size_change(long size, MEMFLAGS flag) { |
| 211 | as_snapshot()->by_type(flag)->record_arena_size_change(size); |
| 212 | } |
| 213 | |
| 214 | static void snapshot(MallocMemorySnapshot* s) { |
| 215 | as_snapshot()->copy_to(s); |
| 216 | s->make_adjustment(); |
| 217 | } |
| 218 | |
| 219 | // Record memory used by malloc tracking header |
| 220 | static inline void (size_t sz) { |
| 221 | as_snapshot()->malloc_overhead()->allocate(sz); |
| 222 | } |
| 223 | |
| 224 | static inline void (size_t sz) { |
| 225 | as_snapshot()->malloc_overhead()->deallocate(sz); |
| 226 | } |
| 227 | |
| 228 | // The memory used by malloc tracking headers |
| 229 | static inline size_t tracking_overhead() { |
| 230 | return as_snapshot()->malloc_overhead()->size(); |
| 231 | } |
| 232 | |
| 233 | static MallocMemorySnapshot* as_snapshot() { |
| 234 | return (MallocMemorySnapshot*)_snapshot; |
| 235 | } |
| 236 | }; |
| 237 | |
| 238 | |
| 239 | /* |
| 240 | * Malloc tracking header. |
| 241 | * To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose, |
| 242 | * which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build). |
| 243 | */ |
| 244 | |
| 245 | class { |
| 246 | #ifdef _LP64 |
| 247 | size_t : 64; |
| 248 | size_t : 8; |
| 249 | size_t : 16; |
| 250 | size_t : 40; |
| 251 | #define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(40) |
| 252 | #define MAX_BUCKET_LENGTH right_n_bits(16) |
| 253 | #else |
| 254 | size_t _size : 32; |
| 255 | size_t _flags : 8; |
| 256 | size_t _pos_idx : 8; |
| 257 | size_t _bucket_idx: 16; |
| 258 | #define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(16) |
| 259 | #define MAX_BUCKET_LENGTH right_n_bits(8) |
| 260 | #endif // _LP64 |
| 261 | |
| 262 | public: |
| 263 | (size_t size, MEMFLAGS flags, const NativeCallStack& stack, NMT_TrackingLevel level) { |
| 264 | assert(sizeof(MallocHeader) == sizeof(void*) * 2, |
| 265 | "Wrong header size" ); |
| 266 | |
| 267 | if (level == NMT_minimal) { |
| 268 | return; |
| 269 | } |
| 270 | |
| 271 | _flags = flags; |
| 272 | set_size(size); |
| 273 | if (level == NMT_detail) { |
| 274 | size_t bucket_idx; |
| 275 | size_t pos_idx; |
| 276 | if (record_malloc_site(stack, size, &bucket_idx, &pos_idx, flags)) { |
| 277 | assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index" ); |
| 278 | assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index" ); |
| 279 | _bucket_idx = bucket_idx; |
| 280 | _pos_idx = pos_idx; |
| 281 | } |
| 282 | } |
| 283 | |
| 284 | MallocMemorySummary::record_malloc(size, flags); |
| 285 | MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader)); |
| 286 | } |
| 287 | |
| 288 | inline size_t () const { return _size; } |
| 289 | inline MEMFLAGS () const { return (MEMFLAGS)_flags; } |
| 290 | bool (NativeCallStack& stack) const; |
| 291 | |
| 292 | // Cleanup tracking information before the memory is released. |
| 293 | void () const; |
| 294 | |
| 295 | private: |
| 296 | inline void (size_t size) { |
| 297 | _size = size; |
| 298 | } |
| 299 | bool (const NativeCallStack& stack, size_t size, |
| 300 | size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) const; |
| 301 | }; |
| 302 | |
| 303 | |
| 304 | // Main class called from MemTracker to track malloc activities |
| 305 | class MallocTracker : AllStatic { |
| 306 | public: |
| 307 | // Initialize malloc tracker for specific tracking level |
| 308 | static bool initialize(NMT_TrackingLevel level); |
| 309 | |
| 310 | static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to); |
| 311 | |
| 312 | // malloc tracking header size for specific tracking level |
| 313 | static inline size_t (NMT_TrackingLevel level) { |
| 314 | return (level == NMT_off) ? 0 : sizeof(MallocHeader); |
| 315 | } |
| 316 | |
| 317 | // Parameter name convention: |
| 318 | // memblock : the beginning address for user data |
| 319 | // malloc_base: the beginning address that includes malloc tracking header |
| 320 | // |
| 321 | // The relationship: |
| 322 | // memblock = (char*)malloc_base + sizeof(nmt header) |
| 323 | // |
| 324 | |
| 325 | // Record malloc on specified memory block |
| 326 | static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags, |
| 327 | const NativeCallStack& stack, NMT_TrackingLevel level); |
| 328 | |
| 329 | // Record free on specified memory block |
| 330 | static void* record_free(void* memblock); |
| 331 | |
| 332 | // Offset memory address to header address |
| 333 | static inline void* get_base(void* memblock); |
| 334 | static inline void* get_base(void* memblock, NMT_TrackingLevel level) { |
| 335 | if (memblock == NULL || level == NMT_off) return memblock; |
| 336 | return (char*)memblock - malloc_header_size(level); |
| 337 | } |
| 338 | |
| 339 | // Get memory size |
| 340 | static inline size_t get_size(void* memblock) { |
| 341 | MallocHeader* = malloc_header(memblock); |
| 342 | return header->size(); |
| 343 | } |
| 344 | |
| 345 | // Get memory type |
| 346 | static inline MEMFLAGS get_flags(void* memblock) { |
| 347 | MallocHeader* = malloc_header(memblock); |
| 348 | return header->flags(); |
| 349 | } |
| 350 | |
| 351 | // Get header size |
| 352 | static inline size_t (void* memblock) { |
| 353 | return (memblock == NULL) ? 0 : sizeof(MallocHeader); |
| 354 | } |
| 355 | |
| 356 | static inline void record_new_arena(MEMFLAGS flags) { |
| 357 | MallocMemorySummary::record_new_arena(flags); |
| 358 | } |
| 359 | |
| 360 | static inline void record_arena_free(MEMFLAGS flags) { |
| 361 | MallocMemorySummary::record_arena_free(flags); |
| 362 | } |
| 363 | |
| 364 | static inline void record_arena_size_change(int size, MEMFLAGS flags) { |
| 365 | MallocMemorySummary::record_arena_size_change(size, flags); |
| 366 | } |
| 367 | private: |
| 368 | static inline MallocHeader* (void *memblock) { |
| 369 | assert(memblock != NULL, "NULL pointer" ); |
| 370 | MallocHeader* = (MallocHeader*)((char*)memblock - sizeof(MallocHeader)); |
| 371 | return header; |
| 372 | } |
| 373 | }; |
| 374 | |
| 375 | #endif // INCLUDE_NMT |
| 376 | |
| 377 | |
| 378 | #endif // SHARE_SERVICES_MALLOCTRACKER_HPP |
| 379 | |