1/*
2 * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_SERVICES_MEMTRACKER_HPP
26#define SHARE_SERVICES_MEMTRACKER_HPP
27
28#include "services/nmtCommon.hpp"
29#include "utilities/nativeCallStack.hpp"
30
31
32#if !INCLUDE_NMT
33
34#define CURRENT_PC NativeCallStack::empty_stack()
35#define CALLER_PC NativeCallStack::empty_stack()
36
37class Tracker : public StackObj {
38 public:
39 enum TrackerType {
40 uncommit,
41 release
42 };
43 Tracker(enum TrackerType type) : _type(type) { }
44 void record(address addr, size_t size) { }
45 private:
46 enum TrackerType _type;
47};
48
49class MemTracker : AllStatic {
50 public:
51 static inline NMT_TrackingLevel tracking_level() { return NMT_off; }
52 static inline void shutdown() { }
53 static inline void init() { }
54 static bool check_launcher_nmt_support(const char* value) { return true; }
55 static bool verify_nmt_option() { return true; }
56
57 static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
58 const NativeCallStack& stack, NMT_TrackingLevel level) { return mem_base; }
59 static inline size_t malloc_header_size(NMT_TrackingLevel level) { return 0; }
60 static inline size_t malloc_header_size(void* memblock) { return 0; }
61 static inline void* malloc_base(void* memblock) { return memblock; }
62 static inline void* record_free(void* memblock) { return memblock; }
63
64 static inline void record_new_arena(MEMFLAGS flag) { }
65 static inline void record_arena_free(MEMFLAGS flag) { }
66 static inline void record_arena_size_change(int diff, MEMFLAGS flag) { }
67 static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
68 MEMFLAGS flag = mtNone) { }
69 static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
70 const NativeCallStack& stack, MEMFLAGS flag = mtNone) { }
71 static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { }
72 static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { }
73 static inline void record_thread_stack(void* addr, size_t size) { }
74 static inline void release_thread_stack(void* addr, size_t size) { }
75
76 static void final_report(outputStream*) { }
77 static void error_report(outputStream*) { }
78};
79
80#else
81
82#include "runtime/mutexLocker.hpp"
83#include "runtime/threadCritical.hpp"
84#include "services/mallocTracker.hpp"
85#include "services/threadStackTracker.hpp"
86#include "services/virtualMemoryTracker.hpp"
87
88extern volatile bool NMT_stack_walkable;
89
90#define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \
91 NativeCallStack(0, true) : NativeCallStack::empty_stack())
92#define CALLER_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \
93 NativeCallStack(1, true) : NativeCallStack::empty_stack())
94
95class MemBaseline;
96
97// Tracker is used for guarding 'release' semantics of virtual memory operation, to avoid
98// the other thread obtains and records the same region that is just 'released' by current
99// thread but before it can record the operation.
100class Tracker : public StackObj {
101 public:
102 enum TrackerType {
103 uncommit,
104 release
105 };
106
107 public:
108 Tracker(enum TrackerType type) : _type(type) { }
109 void record(address addr, size_t size);
110 private:
111 enum TrackerType _type;
112 // Virtual memory tracking data structures are protected by ThreadCritical lock.
113 ThreadCritical _tc;
114};
115
116class MemTracker : AllStatic {
117 friend class VirtualMemoryTrackerTest;
118
119 public:
120 static inline NMT_TrackingLevel tracking_level() {
121 if (_tracking_level == NMT_unknown) {
122 // No fencing is needed here, since JVM is in single-threaded
123 // mode.
124 _tracking_level = init_tracking_level();
125 _cmdline_tracking_level = _tracking_level;
126 }
127 return _tracking_level;
128 }
129
130 // A late initialization, for the stuff(s) can not be
131 // done in init_tracking_level(), which can NOT malloc
132 // any memory.
133 static void init();
134
135 // Shutdown native memory tracking
136 static void shutdown();
137
138 // Verify native memory tracking command line option.
139 // This check allows JVM to detect if compatible launcher
140 // is used.
141 // If an incompatible launcher is used, NMT may not be
142 // able to start, even it is enabled by command line option.
143 // A warning message should be given if it is encountered.
144 static bool check_launcher_nmt_support(const char* value);
145
146 // This method checks native memory tracking environment
147 // variable value passed by launcher.
148 // Launcher only obligated to pass native memory tracking
149 // option value, but not obligated to validate the value,
150 // and launcher has option to discard native memory tracking
151 // option from the command line once it sets up the environment
152 // variable, so NMT has to catch the bad value here.
153 static bool verify_nmt_option();
154
155 // Transition the tracking level to specified level
156 static bool transition_to(NMT_TrackingLevel level);
157
158 static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
159 const NativeCallStack& stack, NMT_TrackingLevel level) {
160 return MallocTracker::record_malloc(mem_base, size, flag, stack, level);
161 }
162
163 static inline size_t malloc_header_size(NMT_TrackingLevel level) {
164 return MallocTracker::malloc_header_size(level);
165 }
166
167 static size_t malloc_header_size(void* memblock) {
168 if (tracking_level() != NMT_off) {
169 return MallocTracker::get_header_size(memblock);
170 }
171 return 0;
172 }
173
174 // To malloc base address, which is the starting address
175 // of malloc tracking header if tracking is enabled.
176 // Otherwise, it returns the same address.
177 static void* malloc_base(void* memblock);
178
179 // Record malloc free and return malloc base address
180 static inline void* record_free(void* memblock) {
181 return MallocTracker::record_free(memblock);
182 }
183
184
185 // Record creation of an arena
186 static inline void record_new_arena(MEMFLAGS flag) {
187 if (tracking_level() < NMT_summary) return;
188 MallocTracker::record_new_arena(flag);
189 }
190
191 // Record destruction of an arena
192 static inline void record_arena_free(MEMFLAGS flag) {
193 if (tracking_level() < NMT_summary) return;
194 MallocTracker::record_arena_free(flag);
195 }
196
197 // Record arena size change. Arena size is the size of all arena
198 // chuncks that backing up the arena.
199 static inline void record_arena_size_change(int diff, MEMFLAGS flag) {
200 if (tracking_level() < NMT_summary) return;
201 MallocTracker::record_arena_size_change(diff, flag);
202 }
203
204 static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
205 MEMFLAGS flag = mtNone) {
206 if (tracking_level() < NMT_summary) return;
207 if (addr != NULL) {
208 ThreadCritical tc;
209 // Recheck to avoid potential racing during NMT shutdown
210 if (tracking_level() < NMT_summary) return;
211 VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
212 }
213 }
214
215 static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
216 const NativeCallStack& stack, MEMFLAGS flag = mtNone) {
217 if (tracking_level() < NMT_summary) return;
218 if (addr != NULL) {
219 ThreadCritical tc;
220 if (tracking_level() < NMT_summary) return;
221 VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
222 VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
223 }
224 }
225
226 static inline void record_virtual_memory_commit(void* addr, size_t size,
227 const NativeCallStack& stack) {
228 if (tracking_level() < NMT_summary) return;
229 if (addr != NULL) {
230 ThreadCritical tc;
231 if (tracking_level() < NMT_summary) return;
232 VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
233 }
234 }
235
236 static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) {
237 if (tracking_level() < NMT_summary) return;
238 if (addr != NULL) {
239 ThreadCritical tc;
240 if (tracking_level() < NMT_summary) return;
241 VirtualMemoryTracker::set_reserved_region_type((address)addr, flag);
242 }
243 }
244
245 static void record_thread_stack(void* addr, size_t size) {
246 if (tracking_level() < NMT_summary) return;
247 if (addr != NULL) {
248 ThreadStackTracker::new_thread_stack((address)addr, size, CALLER_PC);
249 }
250 }
251
252 static inline void release_thread_stack(void* addr, size_t size) {
253 if (tracking_level() < NMT_summary) return;
254 if (addr != NULL) {
255 ThreadStackTracker::delete_thread_stack((address)addr, size);
256 }
257 }
258
259 // Query lock is used to synchronize the access to tracking data.
260 // So far, it is only used by JCmd query, but it may be used by
261 // other tools.
262 static inline Mutex* query_lock() {
263 assert(NMTQuery_lock != NULL, "not initialized!");
264 return NMTQuery_lock;
265 }
266
267 // Make a final report or report for hs_err file.
268 static void error_report(outputStream* output) {
269 if (tracking_level() >= NMT_summary) {
270 report(true, output); // just print summary for error case.
271 }
272 }
273
274 static void final_report(outputStream* output) {
275 NMT_TrackingLevel level = tracking_level();
276 if (level >= NMT_summary) {
277 report(level == NMT_summary, output);
278 }
279 }
280
281
282 // Stored baseline
283 static inline MemBaseline& get_baseline() {
284 return _baseline;
285 }
286
287 static NMT_TrackingLevel cmdline_tracking_level() {
288 return _cmdline_tracking_level;
289 }
290
291 static void tuning_statistics(outputStream* out);
292
293 private:
294 static NMT_TrackingLevel init_tracking_level();
295 static void report(bool summary_only, outputStream* output);
296
297 private:
298 // Tracking level
299 static volatile NMT_TrackingLevel _tracking_level;
300 // If NMT option value passed by launcher through environment
301 // variable is valid
302 static bool _is_nmt_env_valid;
303 // command line tracking level
304 static NMT_TrackingLevel _cmdline_tracking_level;
305 // Stored baseline
306 static MemBaseline _baseline;
307 // Query lock
308 static Mutex* _query_lock;
309};
310
311#endif // INCLUDE_NMT
312
313#endif // SHARE_SERVICES_MEMTRACKER_HPP
314