1 | /* |
2 | * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | #include "precompiled.hpp" |
25 | |
26 | #include "memory/allocation.hpp" |
27 | #include "services/mallocTracker.hpp" |
28 | #include "services/memReporter.hpp" |
29 | #include "services/threadStackTracker.hpp" |
30 | #include "services/virtualMemoryTracker.hpp" |
31 | #include "utilities/globalDefinitions.hpp" |
32 | |
33 | size_t MemReporterBase::reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) const { |
34 | return malloc->malloc_size() + malloc->arena_size() + vm->reserved(); |
35 | } |
36 | |
37 | size_t MemReporterBase::committed_total(const MallocMemory* malloc, const VirtualMemory* vm) const { |
38 | return malloc->malloc_size() + malloc->arena_size() + vm->committed(); |
39 | } |
40 | |
41 | void MemReporterBase::print_total(size_t reserved, size_t committed) const { |
42 | const char* scale = current_scale(); |
43 | output()->print("reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s" , |
44 | amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale); |
45 | } |
46 | |
47 | void MemReporterBase::print_malloc(size_t amount, size_t count, MEMFLAGS flag) const { |
48 | const char* scale = current_scale(); |
49 | outputStream* out = output(); |
50 | const char* alloc_type = (flag == mtThreadStack) ? "" : "malloc=" ; |
51 | |
52 | if (flag != mtNone) { |
53 | out->print("(%s" SIZE_FORMAT "%s type=%s" , alloc_type, |
54 | amount_in_current_scale(amount), scale, NMTUtil::flag_to_name(flag)); |
55 | } else { |
56 | out->print("(%s" SIZE_FORMAT "%s" , alloc_type, |
57 | amount_in_current_scale(amount), scale); |
58 | } |
59 | |
60 | if (count > 0) { |
61 | out->print(" #" SIZE_FORMAT "" , count); |
62 | } |
63 | |
64 | out->print(")" ); |
65 | } |
66 | |
67 | void MemReporterBase::print_virtual_memory(size_t reserved, size_t committed) const { |
68 | const char* scale = current_scale(); |
69 | output()->print("(mmap: reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s)" , |
70 | amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale); |
71 | } |
72 | |
73 | void MemReporterBase::print_malloc_line(size_t amount, size_t count) const { |
74 | output()->print("%28s" , " " ); |
75 | print_malloc(amount, count); |
76 | output()->print_cr(" " ); |
77 | } |
78 | |
79 | void MemReporterBase::print_virtual_memory_line(size_t reserved, size_t committed) const { |
80 | output()->print("%28s" , " " ); |
81 | print_virtual_memory(reserved, committed); |
82 | output()->print_cr(" " ); |
83 | } |
84 | |
85 | void MemReporterBase::print_arena_line(size_t amount, size_t count) const { |
86 | const char* scale = current_scale(); |
87 | output()->print_cr("%27s (arena=" SIZE_FORMAT "%s #" SIZE_FORMAT ")" , " " , |
88 | amount_in_current_scale(amount), scale, count); |
89 | } |
90 | |
91 | void MemReporterBase::print_virtual_memory_region(const char* type, address base, size_t size) const { |
92 | const char* scale = current_scale(); |
93 | output()->print("[" PTR_FORMAT " - " PTR_FORMAT "] %s " SIZE_FORMAT "%s" , |
94 | p2i(base), p2i(base + size), type, amount_in_current_scale(size), scale); |
95 | } |
96 | |
97 | |
98 | void MemSummaryReporter::report() { |
99 | const char* scale = current_scale(); |
100 | outputStream* out = output(); |
101 | size_t total_reserved_amount = _malloc_snapshot->total() + |
102 | _vm_snapshot->total_reserved(); |
103 | size_t total_committed_amount = _malloc_snapshot->total() + |
104 | _vm_snapshot->total_committed(); |
105 | |
106 | // Overall total |
107 | out->print_cr("\nNative Memory Tracking:\n" ); |
108 | out->print("Total: " ); |
109 | print_total(total_reserved_amount, total_committed_amount); |
110 | out->print("\n" ); |
111 | |
112 | // Summary by memory type |
113 | for (int index = 0; index < mt_number_of_types; index ++) { |
114 | MEMFLAGS flag = NMTUtil::index_to_flag(index); |
115 | // thread stack is reported as part of thread category |
116 | if (flag == mtThreadStack) continue; |
117 | MallocMemory* malloc_memory = _malloc_snapshot->by_type(flag); |
118 | VirtualMemory* virtual_memory = _vm_snapshot->by_type(flag); |
119 | |
120 | report_summary_of_type(flag, malloc_memory, virtual_memory); |
121 | } |
122 | } |
123 | |
124 | void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag, |
125 | MallocMemory* malloc_memory, VirtualMemory* virtual_memory) { |
126 | |
127 | size_t reserved_amount = reserved_total (malloc_memory, virtual_memory); |
128 | size_t committed_amount = committed_total(malloc_memory, virtual_memory); |
129 | |
130 | // Count thread's native stack in "Thread" category |
131 | if (flag == mtThread) { |
132 | if (ThreadStackTracker::track_as_vm()) { |
133 | const VirtualMemory* thread_stack_usage = |
134 | (const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack); |
135 | reserved_amount += thread_stack_usage->reserved(); |
136 | committed_amount += thread_stack_usage->committed(); |
137 | } else { |
138 | const MallocMemory* thread_stack_usage = |
139 | (const MallocMemory*)_malloc_snapshot->by_type(mtThreadStack); |
140 | reserved_amount += thread_stack_usage->malloc_size(); |
141 | committed_amount += thread_stack_usage->malloc_size(); |
142 | } |
143 | } else if (flag == mtNMT) { |
144 | // Count malloc headers in "NMT" category |
145 | reserved_amount += _malloc_snapshot->malloc_overhead()->size(); |
146 | committed_amount += _malloc_snapshot->malloc_overhead()->size(); |
147 | } |
148 | |
149 | if (amount_in_current_scale(reserved_amount) > 0) { |
150 | outputStream* out = output(); |
151 | const char* scale = current_scale(); |
152 | out->print("-%26s (" , NMTUtil::flag_to_name(flag)); |
153 | print_total(reserved_amount, committed_amount); |
154 | out->print_cr(")" ); |
155 | |
156 | if (flag == mtClass) { |
157 | // report class count |
158 | out->print_cr("%27s (classes #" SIZE_FORMAT ")" , |
159 | " " , (_instance_class_count + _array_class_count)); |
160 | out->print_cr("%27s ( instance classes #" SIZE_FORMAT ", array classes #" SIZE_FORMAT ")" , |
161 | " " , _instance_class_count, _array_class_count); |
162 | } else if (flag == mtThread) { |
163 | if (ThreadStackTracker::track_as_vm()) { |
164 | const VirtualMemory* thread_stack_usage = |
165 | _vm_snapshot->by_type(mtThreadStack); |
166 | // report thread count |
167 | out->print_cr("%27s (thread #" SIZE_FORMAT ")" , " " , ThreadStackTracker::thread_count()); |
168 | out->print("%27s (stack: " , " " ); |
169 | print_total(thread_stack_usage->reserved(), thread_stack_usage->committed()); |
170 | } else { |
171 | MallocMemory* thread_stack_memory = _malloc_snapshot->by_type(mtThreadStack); |
172 | const char* scale = current_scale(); |
173 | // report thread count |
174 | assert(ThreadStackTracker::thread_count() == 0, "Not used" ); |
175 | out->print_cr("%27s (thread #" SIZE_FORMAT ")" , " " , thread_stack_memory->malloc_count()); |
176 | out->print("%27s (Stack: " SIZE_FORMAT "%s" , " " , |
177 | amount_in_current_scale(thread_stack_memory->malloc_size()), scale); |
178 | } |
179 | out->print_cr(")" ); |
180 | } |
181 | |
182 | // report malloc'd memory |
183 | if (amount_in_current_scale(malloc_memory->malloc_size()) > 0) { |
184 | // We don't know how many arena chunks are in used, so don't report the count |
185 | size_t count = (flag == mtChunk) ? 0 : malloc_memory->malloc_count(); |
186 | print_malloc_line(malloc_memory->malloc_size(), count); |
187 | } |
188 | |
189 | if (amount_in_current_scale(virtual_memory->reserved()) > 0) { |
190 | print_virtual_memory_line(virtual_memory->reserved(), virtual_memory->committed()); |
191 | } |
192 | |
193 | if (amount_in_current_scale(malloc_memory->arena_size()) > 0) { |
194 | print_arena_line(malloc_memory->arena_size(), malloc_memory->arena_count()); |
195 | } |
196 | |
197 | if (flag == mtNMT && |
198 | amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()) > 0) { |
199 | out->print_cr("%27s (tracking overhead=" SIZE_FORMAT "%s)" , " " , |
200 | amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()), scale); |
201 | } else if (flag == mtClass) { |
202 | // Metadata information |
203 | report_metadata(Metaspace::NonClassType); |
204 | if (Metaspace::using_class_space()) { |
205 | report_metadata(Metaspace::ClassType); |
206 | } |
207 | } |
208 | out->print_cr(" " ); |
209 | } |
210 | } |
211 | |
212 | void MemSummaryReporter::report_metadata(Metaspace::MetadataType type) const { |
213 | assert(type == Metaspace::NonClassType || type == Metaspace::ClassType, |
214 | "Invalid metadata type" ); |
215 | const char* name = (type == Metaspace::NonClassType) ? |
216 | "Metadata: " : "Class space:" ; |
217 | |
218 | outputStream* out = output(); |
219 | const char* scale = current_scale(); |
220 | size_t committed = MetaspaceUtils::committed_bytes(type); |
221 | size_t used = MetaspaceUtils::used_bytes(type); |
222 | size_t free = (MetaspaceUtils::capacity_bytes(type) - used) |
223 | + MetaspaceUtils::free_chunks_total_bytes(type) |
224 | + MetaspaceUtils::free_in_vs_bytes(type); |
225 | |
226 | assert(committed >= used + free, "Sanity" ); |
227 | size_t waste = committed - (used + free); |
228 | |
229 | out->print_cr("%27s ( %s)" , " " , name); |
230 | out->print("%27s ( " , " " ); |
231 | print_total(MetaspaceUtils::reserved_bytes(type), committed); |
232 | out->print_cr(")" ); |
233 | out->print_cr("%27s ( used=" SIZE_FORMAT "%s)" , " " , amount_in_current_scale(used), scale); |
234 | out->print_cr("%27s ( free=" SIZE_FORMAT "%s)" , " " , amount_in_current_scale(free), scale); |
235 | out->print_cr("%27s ( waste=" SIZE_FORMAT "%s =%2.2f%%)" , " " , amount_in_current_scale(waste), |
236 | scale, ((float)waste * 100)/committed); |
237 | } |
238 | |
239 | void MemDetailReporter::report_detail() { |
240 | // Start detail report |
241 | outputStream* out = output(); |
242 | out->print_cr("Details:\n" ); |
243 | |
244 | report_malloc_sites(); |
245 | report_virtual_memory_allocation_sites(); |
246 | } |
247 | |
248 | void MemDetailReporter::report_malloc_sites() { |
249 | MallocSiteIterator malloc_itr = _baseline.malloc_sites(MemBaseline::by_size); |
250 | if (malloc_itr.is_empty()) return; |
251 | |
252 | outputStream* out = output(); |
253 | |
254 | const MallocSite* malloc_site; |
255 | while ((malloc_site = malloc_itr.next()) != NULL) { |
256 | // Don't report if size is too small |
257 | if (amount_in_current_scale(malloc_site->size()) == 0) |
258 | continue; |
259 | |
260 | const NativeCallStack* stack = malloc_site->call_stack(); |
261 | stack->print_on(out); |
262 | out->print("%29s" , " " ); |
263 | MEMFLAGS flag = malloc_site->flag(); |
264 | assert((flag >= 0 && flag < (int)mt_number_of_types) && flag != mtNone, |
265 | "Must have a valid memory type" ); |
266 | print_malloc(malloc_site->size(), malloc_site->count(),flag); |
267 | out->print_cr("\n" ); |
268 | } |
269 | } |
270 | |
271 | void MemDetailReporter::report_virtual_memory_allocation_sites() { |
272 | VirtualMemorySiteIterator virtual_memory_itr = |
273 | _baseline.virtual_memory_sites(MemBaseline::by_size); |
274 | |
275 | if (virtual_memory_itr.is_empty()) return; |
276 | |
277 | outputStream* out = output(); |
278 | const VirtualMemoryAllocationSite* virtual_memory_site; |
279 | |
280 | while ((virtual_memory_site = virtual_memory_itr.next()) != NULL) { |
281 | // Don't report if size is too small |
282 | if (amount_in_current_scale(virtual_memory_site->reserved()) == 0) |
283 | continue; |
284 | |
285 | const NativeCallStack* stack = virtual_memory_site->call_stack(); |
286 | stack->print_on(out); |
287 | out->print("%28s (" , " " ); |
288 | print_total(virtual_memory_site->reserved(), virtual_memory_site->committed()); |
289 | MEMFLAGS flag = virtual_memory_site->flag(); |
290 | if (flag != mtNone) { |
291 | out->print(" Type=%s" , NMTUtil::flag_to_name(flag)); |
292 | } |
293 | out->print_cr(")\n" ); |
294 | } |
295 | } |
296 | |
297 | |
298 | void MemDetailReporter::report_virtual_memory_map() { |
299 | // Virtual memory map always in base address order |
300 | VirtualMemoryAllocationIterator itr = _baseline.virtual_memory_allocations(); |
301 | const ReservedMemoryRegion* rgn; |
302 | |
303 | output()->print_cr("Virtual memory map:" ); |
304 | while ((rgn = itr.next()) != NULL) { |
305 | report_virtual_memory_region(rgn); |
306 | } |
307 | } |
308 | |
309 | void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* reserved_rgn) { |
310 | assert(reserved_rgn != NULL, "NULL pointer" ); |
311 | |
312 | // Don't report if size is too small |
313 | if (amount_in_current_scale(reserved_rgn->size()) == 0) return; |
314 | |
315 | outputStream* out = output(); |
316 | const char* scale = current_scale(); |
317 | const NativeCallStack* stack = reserved_rgn->call_stack(); |
318 | bool all_committed = reserved_rgn->size() == reserved_rgn->committed_size(); |
319 | const char* region_type = (all_committed ? "reserved and committed" : "reserved" ); |
320 | out->print_cr(" " ); |
321 | print_virtual_memory_region(region_type, reserved_rgn->base(), reserved_rgn->size()); |
322 | out->print(" for %s" , NMTUtil::flag_to_name(reserved_rgn->flag())); |
323 | if (stack->is_empty()) { |
324 | out->print_cr(" " ); |
325 | } else { |
326 | out->print_cr(" from" ); |
327 | stack->print_on(out, 4); |
328 | } |
329 | |
330 | if (all_committed) { |
331 | CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions(); |
332 | const CommittedMemoryRegion* committed_rgn = itr.next(); |
333 | if (committed_rgn->size() == reserved_rgn->size() && committed_rgn->call_stack()->equals(*stack)) { |
334 | // One region spanning the entire reserved region, with the same stack trace. |
335 | // Don't print this regions because the "reserved and committed" line above |
336 | // already indicates that the region is comitted. |
337 | assert(itr.next() == NULL, "Unexpectedly more than one regions" ); |
338 | return; |
339 | } |
340 | } |
341 | |
342 | CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions(); |
343 | const CommittedMemoryRegion* committed_rgn; |
344 | while ((committed_rgn = itr.next()) != NULL) { |
345 | // Don't report if size is too small |
346 | if (amount_in_current_scale(committed_rgn->size()) == 0) continue; |
347 | stack = committed_rgn->call_stack(); |
348 | out->print("\n\t" ); |
349 | print_virtual_memory_region("committed" , committed_rgn->base(), committed_rgn->size()); |
350 | if (stack->is_empty()) { |
351 | out->print_cr(" " ); |
352 | } else { |
353 | out->print_cr(" from" ); |
354 | stack->print_on(out, 12); |
355 | } |
356 | } |
357 | } |
358 | |
359 | void MemSummaryDiffReporter::report_diff() { |
360 | const char* scale = current_scale(); |
361 | outputStream* out = output(); |
362 | out->print_cr("\nNative Memory Tracking:\n" ); |
363 | |
364 | // Overall diff |
365 | out->print("Total: " ); |
366 | print_virtual_memory_diff(_current_baseline.total_reserved_memory(), |
367 | _current_baseline.total_committed_memory(), _early_baseline.total_reserved_memory(), |
368 | _early_baseline.total_committed_memory()); |
369 | |
370 | out->print_cr("\n" ); |
371 | |
372 | // Summary diff by memory type |
373 | for (int index = 0; index < mt_number_of_types; index ++) { |
374 | MEMFLAGS flag = NMTUtil::index_to_flag(index); |
375 | // thread stack is reported as part of thread category |
376 | if (flag == mtThreadStack) continue; |
377 | diff_summary_of_type(flag, |
378 | _early_baseline.malloc_memory(flag), |
379 | _early_baseline.virtual_memory(flag), |
380 | _early_baseline.metaspace_snapshot(), |
381 | _current_baseline.malloc_memory(flag), |
382 | _current_baseline.virtual_memory(flag), |
383 | _current_baseline.metaspace_snapshot()); |
384 | } |
385 | } |
386 | |
387 | void MemSummaryDiffReporter::print_malloc_diff(size_t current_amount, size_t current_count, |
388 | size_t early_amount, size_t early_count, MEMFLAGS flags) const { |
389 | const char* scale = current_scale(); |
390 | outputStream* out = output(); |
391 | const char* alloc_type = (flags == mtThread) ? "" : "malloc=" ; |
392 | |
393 | out->print("%s" SIZE_FORMAT "%s" , alloc_type, amount_in_current_scale(current_amount), scale); |
394 | // Report type only if it is valid and not under "thread" category |
395 | if (flags != mtNone && flags != mtThread) { |
396 | out->print(" type=%s" , NMTUtil::flag_to_name(flags)); |
397 | } |
398 | |
399 | long amount_diff = diff_in_current_scale(current_amount, early_amount); |
400 | if (amount_diff != 0) { |
401 | out->print(" %+ld%s" , amount_diff, scale); |
402 | } |
403 | if (current_count > 0) { |
404 | out->print(" #" SIZE_FORMAT "" , current_count); |
405 | if (current_count != early_count) { |
406 | out->print(" %+d" , (int)(current_count - early_count)); |
407 | } |
408 | } |
409 | } |
410 | |
411 | void MemSummaryDiffReporter::print_arena_diff(size_t current_amount, size_t current_count, |
412 | size_t early_amount, size_t early_count) const { |
413 | const char* scale = current_scale(); |
414 | outputStream* out = output(); |
415 | out->print("arena=" SIZE_FORMAT "%s" , amount_in_current_scale(current_amount), scale); |
416 | if (diff_in_current_scale(current_amount, early_amount) != 0) { |
417 | out->print(" %+ld" , diff_in_current_scale(current_amount, early_amount)); |
418 | } |
419 | |
420 | out->print(" #" SIZE_FORMAT "" , current_count); |
421 | if (current_count != early_count) { |
422 | out->print(" %+d" , (int)(current_count - early_count)); |
423 | } |
424 | } |
425 | |
426 | void MemSummaryDiffReporter::print_virtual_memory_diff(size_t current_reserved, size_t current_committed, |
427 | size_t early_reserved, size_t early_committed) const { |
428 | const char* scale = current_scale(); |
429 | outputStream* out = output(); |
430 | out->print("reserved=" SIZE_FORMAT "%s" , amount_in_current_scale(current_reserved), scale); |
431 | long reserved_diff = diff_in_current_scale(current_reserved, early_reserved); |
432 | if (reserved_diff != 0) { |
433 | out->print(" %+ld%s" , reserved_diff, scale); |
434 | } |
435 | |
436 | out->print(", committed=" SIZE_FORMAT "%s" , amount_in_current_scale(current_committed), scale); |
437 | long committed_diff = diff_in_current_scale(current_committed, early_committed); |
438 | if (committed_diff != 0) { |
439 | out->print(" %+ld%s" , committed_diff, scale); |
440 | } |
441 | } |
442 | |
443 | |
444 | void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, |
445 | const MallocMemory* early_malloc, const VirtualMemory* early_vm, |
446 | const MetaspaceSnapshot* early_ms, |
447 | const MallocMemory* current_malloc, const VirtualMemory* current_vm, |
448 | const MetaspaceSnapshot* current_ms) const { |
449 | |
450 | outputStream* out = output(); |
451 | const char* scale = current_scale(); |
452 | |
453 | // Total reserved and committed memory in current baseline |
454 | size_t current_reserved_amount = reserved_total (current_malloc, current_vm); |
455 | size_t current_committed_amount = committed_total(current_malloc, current_vm); |
456 | |
457 | // Total reserved and committed memory in early baseline |
458 | size_t early_reserved_amount = reserved_total(early_malloc, early_vm); |
459 | size_t early_committed_amount = committed_total(early_malloc, early_vm); |
460 | |
461 | // Adjust virtual memory total |
462 | if (flag == mtThread) { |
463 | const VirtualMemory* early_thread_stack_usage = |
464 | _early_baseline.virtual_memory(mtThreadStack); |
465 | const VirtualMemory* current_thread_stack_usage = |
466 | _current_baseline.virtual_memory(mtThreadStack); |
467 | |
468 | early_reserved_amount += early_thread_stack_usage->reserved(); |
469 | early_committed_amount += early_thread_stack_usage->committed(); |
470 | |
471 | current_reserved_amount += current_thread_stack_usage->reserved(); |
472 | current_committed_amount += current_thread_stack_usage->committed(); |
473 | } else if (flag == mtNMT) { |
474 | early_reserved_amount += _early_baseline.malloc_tracking_overhead(); |
475 | early_committed_amount += _early_baseline.malloc_tracking_overhead(); |
476 | |
477 | current_reserved_amount += _current_baseline.malloc_tracking_overhead(); |
478 | current_committed_amount += _current_baseline.malloc_tracking_overhead(); |
479 | } |
480 | |
481 | if (amount_in_current_scale(current_reserved_amount) > 0 || |
482 | diff_in_current_scale(current_reserved_amount, early_reserved_amount) != 0) { |
483 | |
484 | // print summary line |
485 | out->print("-%26s (" , NMTUtil::flag_to_name(flag)); |
486 | print_virtual_memory_diff(current_reserved_amount, current_committed_amount, |
487 | early_reserved_amount, early_committed_amount); |
488 | out->print_cr(")" ); |
489 | |
490 | // detail lines |
491 | if (flag == mtClass) { |
492 | // report class count |
493 | out->print("%27s (classes #" SIZE_FORMAT "" , " " , _current_baseline.class_count()); |
494 | int class_count_diff = (int)(_current_baseline.class_count() - |
495 | _early_baseline.class_count()); |
496 | if (_current_baseline.class_count() != _early_baseline.class_count()) { |
497 | out->print(" %+d" , (int)(_current_baseline.class_count() - _early_baseline.class_count())); |
498 | } |
499 | out->print_cr(")" ); |
500 | |
501 | out->print("%27s ( instance classes #" SIZE_FORMAT, " " , _current_baseline.instance_class_count()); |
502 | if (_current_baseline.instance_class_count() != _early_baseline.instance_class_count()) { |
503 | out->print(" %+d" , (int)(_current_baseline.instance_class_count() - _early_baseline.instance_class_count())); |
504 | } |
505 | out->print(", array classes #" SIZE_FORMAT, _current_baseline.array_class_count()); |
506 | if (_current_baseline.array_class_count() != _early_baseline.array_class_count()) { |
507 | out->print(" %+d" , (int)(_current_baseline.array_class_count() - _early_baseline.array_class_count())); |
508 | } |
509 | out->print_cr(")" ); |
510 | |
511 | } else if (flag == mtThread) { |
512 | // report thread count |
513 | out->print("%27s (thread #" SIZE_FORMAT "" , " " , _current_baseline.thread_count()); |
514 | int thread_count_diff = (int)(_current_baseline.thread_count() - |
515 | _early_baseline.thread_count()); |
516 | if (thread_count_diff != 0) { |
517 | out->print(" %+d" , thread_count_diff); |
518 | } |
519 | out->print_cr(")" ); |
520 | |
521 | out->print("%27s (stack: " , " " ); |
522 | if (ThreadStackTracker::track_as_vm()) { |
523 | // report thread stack |
524 | const VirtualMemory* current_thread_stack = |
525 | _current_baseline.virtual_memory(mtThreadStack); |
526 | const VirtualMemory* early_thread_stack = |
527 | _early_baseline.virtual_memory(mtThreadStack); |
528 | |
529 | print_virtual_memory_diff(current_thread_stack->reserved(), current_thread_stack->committed(), |
530 | early_thread_stack->reserved(), early_thread_stack->committed()); |
531 | } else { |
532 | const MallocMemory* current_thread_stack = |
533 | _current_baseline.malloc_memory(mtThreadStack); |
534 | const MallocMemory* early_thread_stack = |
535 | _early_baseline.malloc_memory(mtThreadStack); |
536 | |
537 | print_malloc_diff(current_thread_stack->malloc_size(), current_thread_stack->malloc_count(), |
538 | early_thread_stack->malloc_size(), early_thread_stack->malloc_count(), flag); |
539 | } |
540 | out->print_cr(")" ); |
541 | } |
542 | |
543 | // Report malloc'd memory |
544 | size_t current_malloc_amount = current_malloc->malloc_size(); |
545 | size_t early_malloc_amount = early_malloc->malloc_size(); |
546 | if (amount_in_current_scale(current_malloc_amount) > 0 || |
547 | diff_in_current_scale(current_malloc_amount, early_malloc_amount) != 0) { |
548 | out->print("%28s(" , " " ); |
549 | print_malloc_diff(current_malloc_amount, (flag == mtChunk) ? 0 : current_malloc->malloc_count(), |
550 | early_malloc_amount, early_malloc->malloc_count(), mtNone); |
551 | out->print_cr(")" ); |
552 | } |
553 | |
554 | // Report virtual memory |
555 | if (amount_in_current_scale(current_vm->reserved()) > 0 || |
556 | diff_in_current_scale(current_vm->reserved(), early_vm->reserved()) != 0) { |
557 | out->print("%27s (mmap: " , " " ); |
558 | print_virtual_memory_diff(current_vm->reserved(), current_vm->committed(), |
559 | early_vm->reserved(), early_vm->committed()); |
560 | out->print_cr(")" ); |
561 | } |
562 | |
563 | // Report arena memory |
564 | if (amount_in_current_scale(current_malloc->arena_size()) > 0 || |
565 | diff_in_current_scale(current_malloc->arena_size(), early_malloc->arena_size()) != 0) { |
566 | out->print("%28s(" , " " ); |
567 | print_arena_diff(current_malloc->arena_size(), current_malloc->arena_count(), |
568 | early_malloc->arena_size(), early_malloc->arena_count()); |
569 | out->print_cr(")" ); |
570 | } |
571 | |
572 | // Report native memory tracking overhead |
573 | if (flag == mtNMT) { |
574 | size_t current_tracking_overhead = amount_in_current_scale(_current_baseline.malloc_tracking_overhead()); |
575 | size_t early_tracking_overhead = amount_in_current_scale(_early_baseline.malloc_tracking_overhead()); |
576 | |
577 | out->print("%27s (tracking overhead=" SIZE_FORMAT "%s" , " " , |
578 | amount_in_current_scale(_current_baseline.malloc_tracking_overhead()), scale); |
579 | |
580 | long overhead_diff = diff_in_current_scale(_current_baseline.malloc_tracking_overhead(), |
581 | _early_baseline.malloc_tracking_overhead()); |
582 | if (overhead_diff != 0) { |
583 | out->print(" %+ld%s" , overhead_diff, scale); |
584 | } |
585 | out->print_cr(")" ); |
586 | } else if (flag == mtClass) { |
587 | assert(current_ms != NULL && early_ms != NULL, "Sanity" ); |
588 | print_metaspace_diff(current_ms, early_ms); |
589 | } |
590 | out->print_cr(" " ); |
591 | } |
592 | } |
593 | |
594 | void MemSummaryDiffReporter::print_metaspace_diff(const MetaspaceSnapshot* current_ms, |
595 | const MetaspaceSnapshot* early_ms) const { |
596 | print_metaspace_diff(Metaspace::NonClassType, current_ms, early_ms); |
597 | if (Metaspace::using_class_space()) { |
598 | print_metaspace_diff(Metaspace::ClassType, current_ms, early_ms); |
599 | } |
600 | } |
601 | |
602 | void MemSummaryDiffReporter::print_metaspace_diff(Metaspace::MetadataType type, |
603 | const MetaspaceSnapshot* current_ms, |
604 | const MetaspaceSnapshot* early_ms) const { |
605 | const char* name = (type == Metaspace::NonClassType) ? |
606 | "Metadata: " : "Class space:" ; |
607 | |
608 | outputStream* out = output(); |
609 | const char* scale = current_scale(); |
610 | |
611 | out->print_cr("%27s ( %s)" , " " , name); |
612 | out->print("%27s ( " , " " ); |
613 | print_virtual_memory_diff(current_ms->reserved_in_bytes(type), |
614 | current_ms->committed_in_bytes(type), |
615 | early_ms->reserved_in_bytes(type), |
616 | early_ms->committed_in_bytes(type)); |
617 | out->print_cr(")" ); |
618 | |
619 | long diff_used = diff_in_current_scale(current_ms->used_in_bytes(type), |
620 | early_ms->used_in_bytes(type)); |
621 | long diff_free = diff_in_current_scale(current_ms->free_in_bytes(type), |
622 | early_ms->free_in_bytes(type)); |
623 | |
624 | size_t current_waste = current_ms->committed_in_bytes(type) |
625 | - (current_ms->used_in_bytes(type) + current_ms->free_in_bytes(type)); |
626 | size_t early_waste = early_ms->committed_in_bytes(type) |
627 | - (early_ms->used_in_bytes(type) + early_ms->free_in_bytes(type)); |
628 | long diff_waste = diff_in_current_scale(current_waste, early_waste); |
629 | |
630 | // Diff used |
631 | out->print("%27s ( used=" SIZE_FORMAT "%s" , " " , |
632 | amount_in_current_scale(current_ms->used_in_bytes(type)), scale); |
633 | if (diff_used != 0) { |
634 | out->print(" %+ld%s" , diff_used, scale); |
635 | } |
636 | out->print_cr(")" ); |
637 | |
638 | // Diff free |
639 | out->print("%27s ( free=" SIZE_FORMAT "%s" , " " , |
640 | amount_in_current_scale(current_ms->free_in_bytes(type)), scale); |
641 | if (diff_free != 0) { |
642 | out->print(" %+ld%s" , diff_free, scale); |
643 | } |
644 | out->print_cr(")" ); |
645 | |
646 | |
647 | // Diff waste |
648 | out->print("%27s ( waste=" SIZE_FORMAT "%s =%2.2f%%" , " " , |
649 | amount_in_current_scale(current_waste), scale, |
650 | ((float)current_waste * 100) / current_ms->committed_in_bytes(type)); |
651 | if (diff_waste != 0) { |
652 | out->print(" %+ld%s" , diff_waste, scale); |
653 | } |
654 | out->print_cr(")" ); |
655 | } |
656 | |
657 | void MemDetailDiffReporter::report_diff() { |
658 | MemSummaryDiffReporter::report_diff(); |
659 | diff_malloc_sites(); |
660 | diff_virtual_memory_sites(); |
661 | } |
662 | |
663 | void MemDetailDiffReporter::diff_malloc_sites() const { |
664 | MallocSiteIterator early_itr = _early_baseline.malloc_sites(MemBaseline::by_site_and_type); |
665 | MallocSiteIterator current_itr = _current_baseline.malloc_sites(MemBaseline::by_site_and_type); |
666 | |
667 | const MallocSite* early_site = early_itr.next(); |
668 | const MallocSite* current_site = current_itr.next(); |
669 | |
670 | while (early_site != NULL || current_site != NULL) { |
671 | if (early_site == NULL) { |
672 | new_malloc_site(current_site); |
673 | current_site = current_itr.next(); |
674 | } else if (current_site == NULL) { |
675 | old_malloc_site(early_site); |
676 | early_site = early_itr.next(); |
677 | } else { |
678 | int compVal = current_site->call_stack()->compare(*early_site->call_stack()); |
679 | if (compVal < 0) { |
680 | new_malloc_site(current_site); |
681 | current_site = current_itr.next(); |
682 | } else if (compVal > 0) { |
683 | old_malloc_site(early_site); |
684 | early_site = early_itr.next(); |
685 | } else { |
686 | diff_malloc_site(early_site, current_site); |
687 | early_site = early_itr.next(); |
688 | current_site = current_itr.next(); |
689 | } |
690 | } |
691 | } |
692 | } |
693 | |
694 | void MemDetailDiffReporter::diff_virtual_memory_sites() const { |
695 | VirtualMemorySiteIterator early_itr = _early_baseline.virtual_memory_sites(MemBaseline::by_site); |
696 | VirtualMemorySiteIterator current_itr = _current_baseline.virtual_memory_sites(MemBaseline::by_site); |
697 | |
698 | const VirtualMemoryAllocationSite* early_site = early_itr.next(); |
699 | const VirtualMemoryAllocationSite* current_site = current_itr.next(); |
700 | |
701 | while (early_site != NULL || current_site != NULL) { |
702 | if (early_site == NULL) { |
703 | new_virtual_memory_site(current_site); |
704 | current_site = current_itr.next(); |
705 | } else if (current_site == NULL) { |
706 | old_virtual_memory_site(early_site); |
707 | early_site = early_itr.next(); |
708 | } else { |
709 | int compVal = current_site->call_stack()->compare(*early_site->call_stack()); |
710 | if (compVal < 0) { |
711 | new_virtual_memory_site(current_site); |
712 | current_site = current_itr.next(); |
713 | } else if (compVal > 0) { |
714 | old_virtual_memory_site(early_site); |
715 | early_site = early_itr.next(); |
716 | } else { |
717 | diff_virtual_memory_site(early_site, current_site); |
718 | early_site = early_itr.next(); |
719 | current_site = current_itr.next(); |
720 | } |
721 | } |
722 | } |
723 | } |
724 | |
725 | |
726 | void MemDetailDiffReporter::new_malloc_site(const MallocSite* malloc_site) const { |
727 | diff_malloc_site(malloc_site->call_stack(), malloc_site->size(), malloc_site->count(), |
728 | 0, 0, malloc_site->flag()); |
729 | } |
730 | |
731 | void MemDetailDiffReporter::old_malloc_site(const MallocSite* malloc_site) const { |
732 | diff_malloc_site(malloc_site->call_stack(), 0, 0, malloc_site->size(), |
733 | malloc_site->count(), malloc_site->flag()); |
734 | } |
735 | |
736 | void MemDetailDiffReporter::diff_malloc_site(const MallocSite* early, |
737 | const MallocSite* current) const { |
738 | if (early->flag() != current->flag()) { |
739 | // If malloc site type changed, treat it as deallocation of old type and |
740 | // allocation of new type. |
741 | old_malloc_site(early); |
742 | new_malloc_site(current); |
743 | } else { |
744 | diff_malloc_site(current->call_stack(), current->size(), current->count(), |
745 | early->size(), early->count(), early->flag()); |
746 | } |
747 | } |
748 | |
749 | void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_t current_size, |
750 | size_t current_count, size_t early_size, size_t early_count, MEMFLAGS flags) const { |
751 | outputStream* out = output(); |
752 | |
753 | assert(stack != NULL, "NULL stack" ); |
754 | |
755 | if (diff_in_current_scale(current_size, early_size) == 0) { |
756 | return; |
757 | } |
758 | |
759 | stack->print_on(out); |
760 | out->print("%28s (" , " " ); |
761 | print_malloc_diff(current_size, current_count, |
762 | early_size, early_count, flags); |
763 | |
764 | out->print_cr(")\n" ); |
765 | } |
766 | |
767 | |
768 | void MemDetailDiffReporter::new_virtual_memory_site(const VirtualMemoryAllocationSite* site) const { |
769 | diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0, site->flag()); |
770 | } |
771 | |
772 | void MemDetailDiffReporter::old_virtual_memory_site(const VirtualMemoryAllocationSite* site) const { |
773 | diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed(), site->flag()); |
774 | } |
775 | |
776 | void MemDetailDiffReporter::diff_virtual_memory_site(const VirtualMemoryAllocationSite* early, |
777 | const VirtualMemoryAllocationSite* current) const { |
778 | assert(early->flag() == current->flag(), "Should be the same" ); |
779 | diff_virtual_memory_site(current->call_stack(), current->reserved(), current->committed(), |
780 | early->reserved(), early->committed(), current->flag()); |
781 | } |
782 | |
783 | void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved, |
784 | size_t current_committed, size_t early_reserved, size_t early_committed, MEMFLAGS flag) const { |
785 | outputStream* out = output(); |
786 | |
787 | // no change |
788 | if (diff_in_current_scale(current_reserved, early_reserved) == 0 && |
789 | diff_in_current_scale(current_committed, early_committed) == 0) { |
790 | return; |
791 | } |
792 | |
793 | stack->print_on(out); |
794 | out->print("%28s (mmap: " , " " ); |
795 | print_virtual_memory_diff(current_reserved, current_committed, |
796 | early_reserved, early_committed); |
797 | |
798 | if (flag != mtNone) { |
799 | out->print(" Type=%s" , NMTUtil::flag_to_name(flag)); |
800 | } |
801 | |
802 | out->print_cr(")\n" ); |
803 | } |
804 | |