1/*
2 * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24#include "precompiled.hpp"
25#include "jvm.h"
26
27#include "runtime/orderAccess.hpp"
28#include "runtime/vmThread.hpp"
29#include "runtime/vmOperations.hpp"
30#include "services/memBaseline.hpp"
31#include "services/memReporter.hpp"
32#include "services/mallocTracker.inline.hpp"
33#include "services/memTracker.hpp"
34#include "services/threadStackTracker.hpp"
35#include "utilities/debug.hpp"
36#include "utilities/defaultStream.hpp"
37#include "utilities/vmError.hpp"
38
39#ifdef _WINDOWS
40#include <windows.h>
41#endif
42
43#ifdef SOLARIS
44 volatile bool NMT_stack_walkable = false;
45#else
46 volatile bool NMT_stack_walkable = true;
47#endif
48
49volatile NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown;
50NMT_TrackingLevel MemTracker::_cmdline_tracking_level = NMT_unknown;
51
52MemBaseline MemTracker::_baseline;
53bool MemTracker::_is_nmt_env_valid = true;
54
55static const size_t buffer_size = 64;
56
57NMT_TrackingLevel MemTracker::init_tracking_level() {
58 // Memory type is encoded into tracking header as a byte field,
59 // make sure that we don't overflow it.
60 STATIC_ASSERT(mt_number_of_types <= max_jubyte);
61
62 char nmt_env_variable[buffer_size];
63 jio_snprintf(nmt_env_variable, sizeof(nmt_env_variable), "NMT_LEVEL_%d", os::current_process_id());
64 const char* nmt_env_value;
65#ifdef _WINDOWS
66 // Read the NMT environment variable from the PEB instead of the CRT
67 char value[buffer_size];
68 nmt_env_value = GetEnvironmentVariable(nmt_env_variable, value, (DWORD)sizeof(value)) != 0 ? value : NULL;
69#else
70 nmt_env_value = ::getenv(nmt_env_variable);
71#endif
72 NMT_TrackingLevel level = NMT_off;
73 if (nmt_env_value != NULL) {
74 if (strcmp(nmt_env_value, "summary") == 0) {
75 level = NMT_summary;
76 } else if (strcmp(nmt_env_value, "detail") == 0) {
77 level = NMT_detail;
78 } else if (strcmp(nmt_env_value, "off") != 0) {
79 // The value of the environment variable is invalid
80 _is_nmt_env_valid = false;
81 }
82 // Remove the environment variable to avoid leaking to child processes
83 os::unsetenv(nmt_env_variable);
84 }
85
86 if (!MallocTracker::initialize(level) ||
87 !VirtualMemoryTracker::initialize(level)) {
88 level = NMT_off;
89 }
90 return level;
91}
92
93void MemTracker::init() {
94 NMT_TrackingLevel level = tracking_level();
95 if (level >= NMT_summary) {
96 if (!VirtualMemoryTracker::late_initialize(level) ||
97 !ThreadStackTracker::late_initialize(level)) {
98 shutdown();
99 return;
100 }
101 }
102}
103
104bool MemTracker::check_launcher_nmt_support(const char* value) {
105 if (strcmp(value, "=detail") == 0) {
106 if (MemTracker::tracking_level() != NMT_detail) {
107 return false;
108 }
109 } else if (strcmp(value, "=summary") == 0) {
110 if (MemTracker::tracking_level() != NMT_summary) {
111 return false;
112 }
113 } else if (strcmp(value, "=off") == 0) {
114 if (MemTracker::tracking_level() != NMT_off) {
115 return false;
116 }
117 } else {
118 _is_nmt_env_valid = false;
119 }
120
121 return true;
122}
123
124bool MemTracker::verify_nmt_option() {
125 return _is_nmt_env_valid;
126}
127
128void* MemTracker::malloc_base(void* memblock) {
129 return MallocTracker::get_base(memblock);
130}
131
132void Tracker::record(address addr, size_t size) {
133 if (MemTracker::tracking_level() < NMT_summary) return;
134 switch(_type) {
135 case uncommit:
136 VirtualMemoryTracker::remove_uncommitted_region(addr, size);
137 break;
138 case release:
139 VirtualMemoryTracker::remove_released_region(addr, size);
140 break;
141 default:
142 ShouldNotReachHere();
143 }
144}
145
146
147// Shutdown can only be issued via JCmd, and NMT JCmd is serialized by lock
148void MemTracker::shutdown() {
149 // We can only shutdown NMT to minimal tracking level if it is ever on.
150 if (tracking_level () > NMT_minimal) {
151 transition_to(NMT_minimal);
152 }
153}
154
155bool MemTracker::transition_to(NMT_TrackingLevel level) {
156 NMT_TrackingLevel current_level = tracking_level();
157
158 assert(level != NMT_off || current_level == NMT_off, "Cannot transition NMT to off");
159
160 if (current_level == level) {
161 return true;
162 } else if (current_level > level) {
163 // Downgrade tracking level, we want to lower the tracking level first
164 _tracking_level = level;
165 // Make _tracking_level visible immediately.
166 OrderAccess::fence();
167 VirtualMemoryTracker::transition(current_level, level);
168 MallocTracker::transition(current_level, level);
169 ThreadStackTracker::transition(current_level, level);
170 } else {
171 // Upgrading tracking level is not supported and has never been supported.
172 // Allocating and deallocating malloc tracking structures is not thread safe and
173 // leads to inconsistencies unless a lot coarser locks are added.
174 }
175 return true;
176}
177
178void MemTracker::report(bool summary_only, outputStream* output) {
179 assert(output != NULL, "No output stream");
180 MemBaseline baseline;
181 if (baseline.baseline(summary_only)) {
182 if (summary_only) {
183 MemSummaryReporter rpt(baseline, output);
184 rpt.report();
185 } else {
186 MemDetailReporter rpt(baseline, output);
187 rpt.report();
188 output->print("Metaspace:");
189 // Metadata reporting requires a safepoint, so avoid it if VM is not in good state.
190 assert(!VMError::fatal_error_in_progress(), "Do not report metadata in error report");
191 VM_PrintMetadata vmop(output, K,
192 MetaspaceUtils::rf_show_loaders |
193 MetaspaceUtils::rf_break_down_by_spacetype);
194 VMThread::execute(&vmop);
195 }
196 }
197}
198
199// This is a walker to gather malloc site hashtable statistics,
200// the result is used for tuning.
201class StatisticsWalker : public MallocSiteWalker {
202 private:
203 enum Threshold {
204 // aggregates statistics over this threshold into one
205 // line item.
206 report_threshold = 20
207 };
208
209 private:
210 // Number of allocation sites that have all memory freed
211 int _empty_entries;
212 // Total number of allocation sites, include empty sites
213 int _total_entries;
214 // Number of captured call stack distribution
215 int _stack_depth_distribution[NMT_TrackingStackDepth];
216 // Hash distribution
217 int _hash_distribution[report_threshold];
218 // Number of hash buckets that have entries over the threshold
219 int _bucket_over_threshold;
220
221 // The hash bucket that walker is currently walking
222 int _current_hash_bucket;
223 // The length of current hash bucket
224 int _current_bucket_length;
225 // Number of hash buckets that are not empty
226 int _used_buckets;
227 // Longest hash bucket length
228 int _longest_bucket_length;
229
230 public:
231 StatisticsWalker() : _empty_entries(0), _total_entries(0) {
232 int index = 0;
233 for (index = 0; index < NMT_TrackingStackDepth; index ++) {
234 _stack_depth_distribution[index] = 0;
235 }
236 for (index = 0; index < report_threshold; index ++) {
237 _hash_distribution[index] = 0;
238 }
239 _bucket_over_threshold = 0;
240 _longest_bucket_length = 0;
241 _current_hash_bucket = -1;
242 _current_bucket_length = 0;
243 _used_buckets = 0;
244 }
245
246 virtual bool do_malloc_site(const MallocSite* e) {
247 if (e->size() == 0) _empty_entries ++;
248 _total_entries ++;
249
250 // stack depth distrubution
251 int frames = e->call_stack()->frames();
252 _stack_depth_distribution[frames - 1] ++;
253
254 // hash distribution
255 int hash_bucket = e->hash() % MallocSiteTable::hash_buckets();
256 if (_current_hash_bucket == -1) {
257 _current_hash_bucket = hash_bucket;
258 _current_bucket_length = 1;
259 } else if (_current_hash_bucket == hash_bucket) {
260 _current_bucket_length ++;
261 } else {
262 record_bucket_length(_current_bucket_length);
263 _current_hash_bucket = hash_bucket;
264 _current_bucket_length = 1;
265 }
266 return true;
267 }
268
269 // walk completed
270 void completed() {
271 record_bucket_length(_current_bucket_length);
272 }
273
274 void report_statistics(outputStream* out) {
275 int index;
276 out->print_cr("Malloc allocation site table:");
277 out->print_cr("\tTotal entries: %d", _total_entries);
278 out->print_cr("\tEmpty entries: %d (%2.2f%%)", _empty_entries, ((float)_empty_entries * 100) / _total_entries);
279 out->print_cr(" ");
280 out->print_cr("Hash distribution:");
281 if (_used_buckets < MallocSiteTable::hash_buckets()) {
282 out->print_cr("empty bucket: %d", (MallocSiteTable::hash_buckets() - _used_buckets));
283 }
284 for (index = 0; index < report_threshold; index ++) {
285 if (_hash_distribution[index] != 0) {
286 if (index == 0) {
287 out->print_cr(" %d entry: %d", 1, _hash_distribution[0]);
288 } else if (index < 9) { // single digit
289 out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]);
290 } else {
291 out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]);
292 }
293 }
294 }
295 if (_bucket_over_threshold > 0) {
296 out->print_cr(" >%d entries: %d", report_threshold, _bucket_over_threshold);
297 }
298 out->print_cr("most entries: %d", _longest_bucket_length);
299 out->print_cr(" ");
300 out->print_cr("Call stack depth distribution:");
301 for (index = 0; index < NMT_TrackingStackDepth; index ++) {
302 if (_stack_depth_distribution[index] > 0) {
303 out->print_cr("\t%d: %d", index + 1, _stack_depth_distribution[index]);
304 }
305 }
306 }
307
308 private:
309 void record_bucket_length(int length) {
310 _used_buckets ++;
311 if (length <= report_threshold) {
312 _hash_distribution[length - 1] ++;
313 } else {
314 _bucket_over_threshold ++;
315 }
316 _longest_bucket_length = MAX2(_longest_bucket_length, length);
317 }
318};
319
320
321void MemTracker::tuning_statistics(outputStream* out) {
322 // NMT statistics
323 StatisticsWalker walker;
324 MallocSiteTable::walk_malloc_site(&walker);
325 walker.completed();
326
327 out->print_cr("Native Memory Tracking Statistics:");
328 out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets());
329 out->print_cr(" Tracking stack depth: %d", NMT_TrackingStackDepth);
330 NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());)
331 out->print_cr(" ");
332 walker.report_statistics(out);
333}
334