1/*
2 * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_SERVICES_VIRTUALMEMORYTRACKER_HPP
26#define SHARE_SERVICES_VIRTUALMEMORYTRACKER_HPP
27
28#if INCLUDE_NMT
29
30#include "memory/allocation.hpp"
31#include "memory/metaspace.hpp"
32#include "services/allocationSite.hpp"
33#include "services/nmtCommon.hpp"
34#include "utilities/linkedlist.hpp"
35#include "utilities/nativeCallStack.hpp"
36#include "utilities/ostream.hpp"
37
38
39/*
40 * Virtual memory counter
41 */
42class VirtualMemory {
43 private:
44 size_t _reserved;
45 size_t _committed;
46
47 public:
48 VirtualMemory() : _reserved(0), _committed(0) { }
49
50 inline void reserve_memory(size_t sz) { _reserved += sz; }
51 inline void commit_memory (size_t sz) {
52 _committed += sz;
53 assert(_committed <= _reserved, "Sanity check");
54 }
55
56 inline void release_memory (size_t sz) {
57 assert(_reserved >= sz, "Negative amount");
58 _reserved -= sz;
59 }
60
61 inline void uncommit_memory(size_t sz) {
62 assert(_committed >= sz, "Negative amount");
63 _committed -= sz;
64 }
65
66 inline size_t reserved() const { return _reserved; }
67 inline size_t committed() const { return _committed; }
68};
69
70// Virtual memory allocation site, keeps track where the virtual memory is reserved.
71class VirtualMemoryAllocationSite : public AllocationSite<VirtualMemory> {
72 public:
73 VirtualMemoryAllocationSite(const NativeCallStack& stack, MEMFLAGS flag) :
74 AllocationSite<VirtualMemory>(stack, flag) { }
75
76 inline void reserve_memory(size_t sz) { data()->reserve_memory(sz); }
77 inline void commit_memory (size_t sz) { data()->commit_memory(sz); }
78 inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); }
79 inline void release_memory(size_t sz) { data()->release_memory(sz); }
80 inline size_t reserved() const { return peek()->reserved(); }
81 inline size_t committed() const { return peek()->committed(); }
82};
83
84class VirtualMemorySummary;
85
86// This class represents a snapshot of virtual memory at a given time.
87// The latest snapshot is saved in a static area.
88class VirtualMemorySnapshot : public ResourceObj {
89 friend class VirtualMemorySummary;
90
91 private:
92 VirtualMemory _virtual_memory[mt_number_of_types];
93
94 public:
95 inline VirtualMemory* by_type(MEMFLAGS flag) {
96 int index = NMTUtil::flag_to_index(flag);
97 return &_virtual_memory[index];
98 }
99
100 inline VirtualMemory* by_index(int index) {
101 assert(index >= 0, "Index out of bound");
102 assert(index < mt_number_of_types, "Index out of bound");
103 return &_virtual_memory[index];
104 }
105
106 inline size_t total_reserved() const {
107 size_t amount = 0;
108 for (int index = 0; index < mt_number_of_types; index ++) {
109 amount += _virtual_memory[index].reserved();
110 }
111 return amount;
112 }
113
114 inline size_t total_committed() const {
115 size_t amount = 0;
116 for (int index = 0; index < mt_number_of_types; index ++) {
117 amount += _virtual_memory[index].committed();
118 }
119 return amount;
120 }
121
122 void copy_to(VirtualMemorySnapshot* s) {
123 for (int index = 0; index < mt_number_of_types; index ++) {
124 s->_virtual_memory[index] = _virtual_memory[index];
125 }
126 }
127};
128
129class VirtualMemorySummary : AllStatic {
130 public:
131 static void initialize();
132
133 static inline void record_reserved_memory(size_t size, MEMFLAGS flag) {
134 as_snapshot()->by_type(flag)->reserve_memory(size);
135 }
136
137 static inline void record_committed_memory(size_t size, MEMFLAGS flag) {
138 as_snapshot()->by_type(flag)->commit_memory(size);
139 }
140
141 static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) {
142 as_snapshot()->by_type(flag)->uncommit_memory(size);
143 }
144
145 static inline void record_released_memory(size_t size, MEMFLAGS flag) {
146 as_snapshot()->by_type(flag)->release_memory(size);
147 }
148
149 // Move virtual memory from one memory type to another.
150 // Virtual memory can be reserved before it is associated with a memory type, and tagged
151 // as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown'
152 // type to specified memory type.
153 static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
154 as_snapshot()->by_type(from)->release_memory(size);
155 as_snapshot()->by_type(to)->reserve_memory(size);
156 }
157
158 static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
159 as_snapshot()->by_type(from)->uncommit_memory(size);
160 as_snapshot()->by_type(to)->commit_memory(size);
161 }
162
163 static void snapshot(VirtualMemorySnapshot* s);
164
165 static VirtualMemorySnapshot* as_snapshot() {
166 return (VirtualMemorySnapshot*)_snapshot;
167 }
168
169 private:
170 static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
171};
172
173
174
175/*
176 * A virtual memory region
177 */
178class VirtualMemoryRegion {
179 private:
180 address _base_address;
181 size_t _size;
182
183 public:
184 VirtualMemoryRegion(address addr, size_t size) :
185 _base_address(addr), _size(size) {
186 assert(addr != NULL, "Invalid address");
187 assert(size > 0, "Invalid size");
188 }
189
190 inline address base() const { return _base_address; }
191 inline address end() const { return base() + size(); }
192 inline size_t size() const { return _size; }
193
194 inline bool is_empty() const { return size() == 0; }
195
196 inline bool contain_address(address addr) const {
197 return (addr >= base() && addr < end());
198 }
199
200
201 inline bool contain_region(address addr, size_t size) const {
202 return contain_address(addr) && contain_address(addr + size - 1);
203 }
204
205 inline bool same_region(address addr, size_t sz) const {
206 return (addr == base() && sz == size());
207 }
208
209
210 inline bool overlap_region(address addr, size_t sz) const {
211 assert(sz > 0, "Invalid size");
212 assert(size() > 0, "Invalid size");
213 VirtualMemoryRegion rgn(addr, sz);
214 return contain_address(addr) ||
215 contain_address(addr + sz - 1) ||
216 rgn.contain_address(base()) ||
217 rgn.contain_address(end() - 1);
218 }
219
220 inline bool adjacent_to(address addr, size_t sz) const {
221 return (addr == end() || (addr + sz) == base());
222 }
223
224 void exclude_region(address addr, size_t sz) {
225 assert(contain_region(addr, sz), "Not containment");
226 assert(addr == base() || addr + sz == end(), "Can not exclude from middle");
227 size_t new_size = size() - sz;
228
229 if (addr == base()) {
230 set_base(addr + sz);
231 }
232 set_size(new_size);
233 }
234
235 void expand_region(address addr, size_t sz) {
236 assert(adjacent_to(addr, sz), "Not adjacent regions");
237 if (base() == addr + sz) {
238 set_base(addr);
239 }
240 set_size(size() + sz);
241 }
242
243 protected:
244 void set_base(address base) {
245 assert(base != NULL, "Sanity check");
246 _base_address = base;
247 }
248
249 void set_size(size_t size) {
250 assert(size > 0, "Sanity check");
251 _size = size;
252 }
253};
254
255
256class CommittedMemoryRegion : public VirtualMemoryRegion {
257 private:
258 NativeCallStack _stack;
259
260 public:
261 CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) :
262 VirtualMemoryRegion(addr, size), _stack(stack) { }
263
264 inline int compare(const CommittedMemoryRegion& rgn) const {
265 if (overlap_region(rgn.base(), rgn.size())) {
266 return 0;
267 } else {
268 if (base() == rgn.base()) {
269 return 0;
270 } else if (base() > rgn.base()) {
271 return 1;
272 } else {
273 return -1;
274 }
275 }
276 }
277
278 inline bool equals(const CommittedMemoryRegion& rgn) const {
279 return compare(rgn) == 0;
280 }
281
282 inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
283 inline const NativeCallStack* call_stack() const { return &_stack; }
284};
285
286
287typedef LinkedListIterator<CommittedMemoryRegion> CommittedRegionIterator;
288
289int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&);
290class ReservedMemoryRegion : public VirtualMemoryRegion {
291 private:
292 SortedLinkedList<CommittedMemoryRegion, compare_committed_region>
293 _committed_regions;
294
295 NativeCallStack _stack;
296 MEMFLAGS _flag;
297
298 public:
299 ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
300 MEMFLAGS flag = mtNone) :
301 VirtualMemoryRegion(base, size), _stack(stack), _flag(flag) { }
302
303
304 ReservedMemoryRegion(address base, size_t size) :
305 VirtualMemoryRegion(base, size), _stack(NativeCallStack::empty_stack()), _flag(mtNone) { }
306
307 // Copy constructor
308 ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
309 VirtualMemoryRegion(rr.base(), rr.size()) {
310 *this = rr;
311 }
312
313 inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
314 inline const NativeCallStack* call_stack() const { return &_stack; }
315
316 void set_flag(MEMFLAGS flag);
317 inline MEMFLAGS flag() const { return _flag; }
318
319 inline int compare(const ReservedMemoryRegion& rgn) const {
320 if (overlap_region(rgn.base(), rgn.size())) {
321 return 0;
322 } else {
323 if (base() == rgn.base()) {
324 return 0;
325 } else if (base() > rgn.base()) {
326 return 1;
327 } else {
328 return -1;
329 }
330 }
331 }
332
333 inline bool equals(const ReservedMemoryRegion& rgn) const {
334 return compare(rgn) == 0;
335 }
336
337 // uncommitted thread stack bottom, above guard pages if there is any.
338 address thread_stack_uncommitted_bottom() const;
339
340 bool add_committed_region(address addr, size_t size, const NativeCallStack& stack);
341 bool remove_uncommitted_region(address addr, size_t size);
342
343 size_t committed_size() const;
344
345 // move committed regions that higher than specified address to
346 // the new region
347 void move_committed_regions(address addr, ReservedMemoryRegion& rgn);
348
349 CommittedRegionIterator iterate_committed_regions() const {
350 return CommittedRegionIterator(_committed_regions.head());
351 }
352
353 ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) {
354 set_base(other.base());
355 set_size(other.size());
356
357 _stack = *other.call_stack();
358 _flag = other.flag();
359
360 CommittedRegionIterator itr = other.iterate_committed_regions();
361 const CommittedMemoryRegion* rgn = itr.next();
362 while (rgn != NULL) {
363 _committed_regions.add(*rgn);
364 rgn = itr.next();
365 }
366
367 return *this;
368 }
369
370 private:
371 // The committed region contains the uncommitted region, subtract the uncommitted
372 // region from this committed region
373 bool remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
374 address addr, size_t sz);
375
376 bool add_committed_region(const CommittedMemoryRegion& rgn) {
377 assert(rgn.base() != NULL, "Invalid base address");
378 assert(size() > 0, "Invalid size");
379 return _committed_regions.add(rgn) != NULL;
380 }
381};
382
383int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2);
384
385class VirtualMemoryWalker : public StackObj {
386 public:
387 virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; }
388};
389
390// Main class called from MemTracker to track virtual memory allocations, commits and releases.
391class VirtualMemoryTracker : AllStatic {
392 friend class VirtualMemoryTrackerTest;
393 friend class CommittedVirtualMemoryTest;
394
395 public:
396 static bool initialize(NMT_TrackingLevel level);
397
398 // Late phase initialization
399 static bool late_initialize(NMT_TrackingLevel level);
400
401 static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, MEMFLAGS flag = mtNone);
402
403 static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack);
404 static bool remove_uncommitted_region (address base_addr, size_t size);
405 static bool remove_released_region (address base_addr, size_t size);
406 static void set_reserved_region_type (address addr, MEMFLAGS flag);
407
408 // Walk virtual memory data structure for creating baseline, etc.
409 static bool walk_virtual_memory(VirtualMemoryWalker* walker);
410
411 static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
412
413 // Snapshot current thread stacks
414 static void snapshot_thread_stacks();
415
416 private:
417 static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* _reserved_regions;
418};
419
420
421class MetaspaceSnapshot : public ResourceObj {
422private:
423 size_t _reserved_in_bytes[Metaspace::MetadataTypeCount];
424 size_t _committed_in_bytes[Metaspace::MetadataTypeCount];
425 size_t _used_in_bytes[Metaspace::MetadataTypeCount];
426 size_t _free_in_bytes[Metaspace::MetadataTypeCount];
427
428public:
429 MetaspaceSnapshot();
430 size_t reserved_in_bytes(Metaspace::MetadataType type) const { assert_valid_metadata_type(type); return _reserved_in_bytes[type]; }
431 size_t committed_in_bytes(Metaspace::MetadataType type) const { assert_valid_metadata_type(type); return _committed_in_bytes[type]; }
432 size_t used_in_bytes(Metaspace::MetadataType type) const { assert_valid_metadata_type(type); return _used_in_bytes[type]; }
433 size_t free_in_bytes(Metaspace::MetadataType type) const { assert_valid_metadata_type(type); return _free_in_bytes[type]; }
434
435 static void snapshot(MetaspaceSnapshot& s);
436
437private:
438 static void snapshot(Metaspace::MetadataType type, MetaspaceSnapshot& s);
439
440 static void assert_valid_metadata_type(Metaspace::MetadataType type) {
441 assert(type == Metaspace::ClassType || type == Metaspace::NonClassType,
442 "Invalid metadata type");
443 }
444};
445
446#endif // INCLUDE_NMT
447
448#endif // SHARE_SERVICES_VIRTUALMEMORYTRACKER_HPP
449