1 | /* |
2 | * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | #include "precompiled.hpp" |
25 | |
26 | #include "logging/log.hpp" |
27 | #include "memory/metaspace.hpp" |
28 | #include "runtime/atomic.hpp" |
29 | #include "runtime/os.hpp" |
30 | #include "runtime/threadCritical.hpp" |
31 | #include "services/memTracker.hpp" |
32 | #include "services/threadStackTracker.hpp" |
33 | #include "services/virtualMemoryTracker.hpp" |
34 | |
35 | size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)]; |
36 | |
37 | void VirtualMemorySummary::initialize() { |
38 | assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check" ); |
39 | // Use placement operator new to initialize static data area. |
40 | ::new ((void*)_snapshot) VirtualMemorySnapshot(); |
41 | } |
42 | |
43 | void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) { |
44 | // Only if thread stack is backed by virtual memory |
45 | if (ThreadStackTracker::track_as_vm()) { |
46 | // Snapshot current thread stacks |
47 | VirtualMemoryTracker::snapshot_thread_stacks(); |
48 | as_snapshot()->copy_to(s); |
49 | } |
50 | } |
51 | |
52 | SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions; |
53 | |
54 | int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) { |
55 | return r1.compare(r2); |
56 | } |
57 | |
58 | int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) { |
59 | return r1.compare(r2); |
60 | } |
61 | |
62 | static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) { |
63 | return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack); |
64 | } |
65 | |
66 | static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) { |
67 | // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions. |
68 | return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack); |
69 | } |
70 | |
71 | static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) { |
72 | LinkedListNode<CommittedMemoryRegion>* preceding = NULL; |
73 | |
74 | for (LinkedListNode<CommittedMemoryRegion>* node = from; node != NULL; node = node->next()) { |
75 | CommittedMemoryRegion* rgn = node->data(); |
76 | |
77 | // We searched past the region start. |
78 | if (rgn->end() > addr) { |
79 | break; |
80 | } |
81 | |
82 | preceding = node; |
83 | } |
84 | |
85 | return preceding; |
86 | } |
87 | |
88 | static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, address addr, size_t size, const NativeCallStack& stack) { |
89 | if (node != NULL) { |
90 | CommittedMemoryRegion* rgn = node->data(); |
91 | |
92 | if (is_mergeable_with(rgn, addr, size, stack)) { |
93 | rgn->expand_region(addr, size); |
94 | return true; |
95 | } |
96 | } |
97 | |
98 | return false; |
99 | } |
100 | |
101 | static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, LinkedListNode<CommittedMemoryRegion>* other) { |
102 | if (other == NULL) { |
103 | return false; |
104 | } |
105 | |
106 | CommittedMemoryRegion* rgn = other->data(); |
107 | return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack()); |
108 | } |
109 | |
110 | bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) { |
111 | assert(addr != NULL, "Invalid address" ); |
112 | assert(size > 0, "Invalid size" ); |
113 | assert(contain_region(addr, size), "Not contain this region" ); |
114 | |
115 | // Find the region that fully precedes the [addr, addr + size) region. |
116 | LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr); |
117 | LinkedListNode<CommittedMemoryRegion>* next = (prev != NULL ? prev->next() : _committed_regions.head()); |
118 | |
119 | if (next != NULL) { |
120 | // Ignore request if region already exists. |
121 | if (is_same_as(next->data(), addr, size, stack)) { |
122 | return true; |
123 | } |
124 | |
125 | // The new region is after prev, and either overlaps with the |
126 | // next region (and maybe more regions), or overlaps with no region. |
127 | if (next->data()->overlap_region(addr, size)) { |
128 | // Remove _all_ overlapping regions, and parts of regions, |
129 | // in preparation for the addition of this new region. |
130 | remove_uncommitted_region(addr, size); |
131 | |
132 | // The remove could have split a region into two and created a |
133 | // new prev region. Need to reset the prev and next pointers. |
134 | prev = find_preceding_node_from((prev != NULL ? prev : _committed_regions.head()), addr); |
135 | next = (prev != NULL ? prev->next() : _committed_regions.head()); |
136 | } |
137 | } |
138 | |
139 | // At this point the previous overlapping regions have been |
140 | // cleared, and the full region is guaranteed to be inserted. |
141 | VirtualMemorySummary::record_committed_memory(size, flag()); |
142 | |
143 | // Try to merge with prev and possibly next. |
144 | if (try_merge_with(prev, addr, size, stack)) { |
145 | if (try_merge_with(prev, next)) { |
146 | // prev was expanded to contain the new region |
147 | // and next, need to remove next from the list |
148 | _committed_regions.remove_after(prev); |
149 | } |
150 | |
151 | return true; |
152 | } |
153 | |
154 | // Didn't merge with prev, try with next. |
155 | if (try_merge_with(next, addr, size, stack)) { |
156 | return true; |
157 | } |
158 | |
159 | // Couldn't merge with any regions - create a new region. |
160 | return add_committed_region(CommittedMemoryRegion(addr, size, stack)); |
161 | } |
162 | |
163 | bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node, |
164 | address addr, size_t size) { |
165 | assert(addr != NULL, "Invalid address" ); |
166 | assert(size > 0, "Invalid size" ); |
167 | |
168 | CommittedMemoryRegion* rgn = node->data(); |
169 | assert(rgn->contain_region(addr, size), "Has to be contained" ); |
170 | assert(!rgn->same_region(addr, size), "Can not be the same region" ); |
171 | |
172 | if (rgn->base() == addr || |
173 | rgn->end() == addr + size) { |
174 | rgn->exclude_region(addr, size); |
175 | return true; |
176 | } else { |
177 | // split this region |
178 | address top =rgn->end(); |
179 | // use this region for lower part |
180 | size_t exclude_size = rgn->end() - addr; |
181 | rgn->exclude_region(addr, exclude_size); |
182 | |
183 | // higher part |
184 | address high_base = addr + size; |
185 | size_t high_size = top - high_base; |
186 | |
187 | CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack()); |
188 | LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn); |
189 | assert(high_node == NULL || node->next() == high_node, "Should be right after" ); |
190 | return (high_node != NULL); |
191 | } |
192 | |
193 | return false; |
194 | } |
195 | |
196 | bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) { |
197 | assert(addr != NULL, "Invalid address" ); |
198 | assert(sz > 0, "Invalid size" ); |
199 | |
200 | CommittedMemoryRegion del_rgn(addr, sz, *call_stack()); |
201 | address end = addr + sz; |
202 | |
203 | LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head(); |
204 | LinkedListNode<CommittedMemoryRegion>* prev = NULL; |
205 | CommittedMemoryRegion* crgn; |
206 | |
207 | while (head != NULL) { |
208 | crgn = head->data(); |
209 | |
210 | if (crgn->same_region(addr, sz)) { |
211 | VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); |
212 | _committed_regions.remove_after(prev); |
213 | return true; |
214 | } |
215 | |
216 | // del_rgn contains crgn |
217 | if (del_rgn.contain_region(crgn->base(), crgn->size())) { |
218 | VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); |
219 | head = head->next(); |
220 | _committed_regions.remove_after(prev); |
221 | continue; // don't update head or prev |
222 | } |
223 | |
224 | // Found addr in the current crgn. There are 2 subcases: |
225 | if (crgn->contain_address(addr)) { |
226 | |
227 | // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn) |
228 | if (crgn->contain_address(end - 1)) { |
229 | VirtualMemorySummary::record_uncommitted_memory(sz, flag()); |
230 | return remove_uncommitted_region(head, addr, sz); // done! |
231 | } else { |
232 | // (2) Did not find del_rgn's end in crgn. |
233 | size_t size = crgn->end() - del_rgn.base(); |
234 | crgn->exclude_region(addr, size); |
235 | VirtualMemorySummary::record_uncommitted_memory(size, flag()); |
236 | } |
237 | |
238 | } else if (crgn->contain_address(end - 1)) { |
239 | // Found del_rgn's end, but not its base addr. |
240 | size_t size = del_rgn.end() - crgn->base(); |
241 | crgn->exclude_region(crgn->base(), size); |
242 | VirtualMemorySummary::record_uncommitted_memory(size, flag()); |
243 | return true; // should be done if the list is sorted properly! |
244 | } |
245 | |
246 | prev = head; |
247 | head = head->next(); |
248 | } |
249 | |
250 | return true; |
251 | } |
252 | |
253 | void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) { |
254 | assert(addr != NULL, "Invalid address" ); |
255 | |
256 | // split committed regions |
257 | LinkedListNode<CommittedMemoryRegion>* head = |
258 | _committed_regions.head(); |
259 | LinkedListNode<CommittedMemoryRegion>* prev = NULL; |
260 | |
261 | while (head != NULL) { |
262 | if (head->data()->base() >= addr) { |
263 | break; |
264 | } |
265 | prev = head; |
266 | head = head->next(); |
267 | } |
268 | |
269 | if (head != NULL) { |
270 | if (prev != NULL) { |
271 | prev->set_next(head->next()); |
272 | } else { |
273 | _committed_regions.set_head(NULL); |
274 | } |
275 | } |
276 | |
277 | rgn._committed_regions.set_head(head); |
278 | } |
279 | |
280 | size_t ReservedMemoryRegion::committed_size() const { |
281 | size_t committed = 0; |
282 | LinkedListNode<CommittedMemoryRegion>* head = |
283 | _committed_regions.head(); |
284 | while (head != NULL) { |
285 | committed += head->data()->size(); |
286 | head = head->next(); |
287 | } |
288 | return committed; |
289 | } |
290 | |
291 | void ReservedMemoryRegion::set_flag(MEMFLAGS f) { |
292 | assert((flag() == mtNone || flag() == f), "Overwrite memory type" ); |
293 | if (flag() != f) { |
294 | VirtualMemorySummary::move_reserved_memory(flag(), f, size()); |
295 | VirtualMemorySummary::move_committed_memory(flag(), f, committed_size()); |
296 | _flag = f; |
297 | } |
298 | } |
299 | |
300 | address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const { |
301 | assert(flag() == mtThreadStack, "Only for thread stack" ); |
302 | LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head(); |
303 | address bottom = base(); |
304 | address top = base() + size(); |
305 | while (head != NULL) { |
306 | address committed_top = head->data()->base() + head->data()->size(); |
307 | if (committed_top < top) { |
308 | // committed stack guard pages, skip them |
309 | bottom = head->data()->base() + head->data()->size(); |
310 | head = head->next(); |
311 | } else { |
312 | assert(top == committed_top, "Sanity" ); |
313 | break; |
314 | } |
315 | } |
316 | |
317 | return bottom; |
318 | } |
319 | |
320 | bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) { |
321 | if (level >= NMT_summary) { |
322 | VirtualMemorySummary::initialize(); |
323 | } |
324 | return true; |
325 | } |
326 | |
327 | bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) { |
328 | if (level >= NMT_summary) { |
329 | _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT) |
330 | SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>(); |
331 | return (_reserved_regions != NULL); |
332 | } |
333 | return true; |
334 | } |
335 | |
336 | bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, |
337 | const NativeCallStack& stack, MEMFLAGS flag) { |
338 | assert(base_addr != NULL, "Invalid address" ); |
339 | assert(size > 0, "Invalid size" ); |
340 | assert(_reserved_regions != NULL, "Sanity check" ); |
341 | ReservedMemoryRegion rgn(base_addr, size, stack, flag); |
342 | ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); |
343 | |
344 | if (reserved_rgn == NULL) { |
345 | VirtualMemorySummary::record_reserved_memory(size, flag); |
346 | return _reserved_regions->add(rgn) != NULL; |
347 | } else { |
348 | if (reserved_rgn->same_region(base_addr, size)) { |
349 | reserved_rgn->set_call_stack(stack); |
350 | reserved_rgn->set_flag(flag); |
351 | return true; |
352 | } else if (reserved_rgn->adjacent_to(base_addr, size)) { |
353 | VirtualMemorySummary::record_reserved_memory(size, flag); |
354 | reserved_rgn->expand_region(base_addr, size); |
355 | reserved_rgn->set_call_stack(stack); |
356 | return true; |
357 | } else { |
358 | // Overlapped reservation. |
359 | // It can happen when the regions are thread stacks, as JNI |
360 | // thread does not detach from VM before exits, and leads to |
361 | // leak JavaThread object |
362 | if (reserved_rgn->flag() == mtThreadStack) { |
363 | guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached" ); |
364 | // Overwrite with new region |
365 | |
366 | // Release old region |
367 | VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag()); |
368 | VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag()); |
369 | |
370 | // Add new region |
371 | VirtualMemorySummary::record_reserved_memory(rgn.size(), flag); |
372 | |
373 | *reserved_rgn = rgn; |
374 | return true; |
375 | } |
376 | |
377 | // CDS mapping region. |
378 | // CDS reserves the whole region for mapping CDS archive, then maps each section into the region. |
379 | // NMT reports CDS as a whole. |
380 | if (reserved_rgn->flag() == mtClassShared) { |
381 | assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region" ); |
382 | return true; |
383 | } |
384 | |
385 | // Mapped CDS string region. |
386 | // The string region(s) is part of the java heap. |
387 | if (reserved_rgn->flag() == mtJavaHeap) { |
388 | assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region" ); |
389 | return true; |
390 | } |
391 | |
392 | ShouldNotReachHere(); |
393 | return false; |
394 | } |
395 | } |
396 | } |
397 | |
398 | void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) { |
399 | assert(addr != NULL, "Invalid address" ); |
400 | assert(_reserved_regions != NULL, "Sanity check" ); |
401 | |
402 | ReservedMemoryRegion rgn(addr, 1); |
403 | ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); |
404 | if (reserved_rgn != NULL) { |
405 | assert(reserved_rgn->contain_address(addr), "Containment" ); |
406 | if (reserved_rgn->flag() != flag) { |
407 | assert(reserved_rgn->flag() == mtNone, "Overwrite memory type" ); |
408 | reserved_rgn->set_flag(flag); |
409 | } |
410 | } |
411 | } |
412 | |
413 | bool VirtualMemoryTracker::add_committed_region(address addr, size_t size, |
414 | const NativeCallStack& stack) { |
415 | assert(addr != NULL, "Invalid address" ); |
416 | assert(size > 0, "Invalid size" ); |
417 | assert(_reserved_regions != NULL, "Sanity check" ); |
418 | |
419 | ReservedMemoryRegion rgn(addr, size); |
420 | ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); |
421 | |
422 | assert(reserved_rgn != NULL, "No reserved region" ); |
423 | assert(reserved_rgn->contain_region(addr, size), "Not completely contained" ); |
424 | bool result = reserved_rgn->add_committed_region(addr, size, stack); |
425 | return result; |
426 | } |
427 | |
428 | bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) { |
429 | assert(addr != NULL, "Invalid address" ); |
430 | assert(size > 0, "Invalid size" ); |
431 | assert(_reserved_regions != NULL, "Sanity check" ); |
432 | |
433 | ReservedMemoryRegion rgn(addr, size); |
434 | ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); |
435 | assert(reserved_rgn != NULL, "No reserved region" ); |
436 | assert(reserved_rgn->contain_region(addr, size), "Not completely contained" ); |
437 | bool result = reserved_rgn->remove_uncommitted_region(addr, size); |
438 | return result; |
439 | } |
440 | |
441 | bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) { |
442 | assert(addr != NULL, "Invalid address" ); |
443 | assert(size > 0, "Invalid size" ); |
444 | assert(_reserved_regions != NULL, "Sanity check" ); |
445 | |
446 | ReservedMemoryRegion rgn(addr, size); |
447 | ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); |
448 | |
449 | assert(reserved_rgn != NULL, "No reserved region" ); |
450 | |
451 | // uncommit regions within the released region |
452 | if (!reserved_rgn->remove_uncommitted_region(addr, size)) { |
453 | return false; |
454 | } |
455 | |
456 | if (reserved_rgn->flag() == mtClassShared && |
457 | reserved_rgn->contain_region(addr, size) && |
458 | !reserved_rgn->same_region(addr, size)) { |
459 | // This is an unmapped CDS region, which is part of the reserved shared |
460 | // memory region. |
461 | // See special handling in VirtualMemoryTracker::add_reserved_region also. |
462 | return true; |
463 | } |
464 | |
465 | VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag()); |
466 | |
467 | if (reserved_rgn->same_region(addr, size)) { |
468 | return _reserved_regions->remove(rgn); |
469 | } else { |
470 | assert(reserved_rgn->contain_region(addr, size), "Not completely contained" ); |
471 | if (reserved_rgn->base() == addr || |
472 | reserved_rgn->end() == addr + size) { |
473 | reserved_rgn->exclude_region(addr, size); |
474 | return true; |
475 | } else { |
476 | address top = reserved_rgn->end(); |
477 | address high_base = addr + size; |
478 | ReservedMemoryRegion high_rgn(high_base, top - high_base, |
479 | *reserved_rgn->call_stack(), reserved_rgn->flag()); |
480 | |
481 | // use original region for lower region |
482 | reserved_rgn->exclude_region(addr, top - addr); |
483 | LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn); |
484 | if (new_rgn == NULL) { |
485 | return false; |
486 | } else { |
487 | reserved_rgn->move_committed_regions(addr, *new_rgn->data()); |
488 | return true; |
489 | } |
490 | } |
491 | } |
492 | } |
493 | |
494 | // Iterate the range, find committed region within its bound. |
495 | class RegionIterator : public StackObj { |
496 | private: |
497 | const address _start; |
498 | const size_t _size; |
499 | |
500 | address _current_start; |
501 | size_t _current_size; |
502 | public: |
503 | RegionIterator(address start, size_t size) : |
504 | _start(start), _size(size), _current_start(start), _current_size(size) { |
505 | } |
506 | |
507 | // return true if committed region is found |
508 | bool next_committed(address& start, size_t& size); |
509 | private: |
510 | address end() const { return _start + _size; } |
511 | }; |
512 | |
513 | bool RegionIterator::next_committed(address& committed_start, size_t& committed_size) { |
514 | if (end() <= _current_start) return false; |
515 | |
516 | const size_t page_sz = os::vm_page_size(); |
517 | assert(_current_start + _current_size == end(), "Must be" ); |
518 | if (os::committed_in_range(_current_start, _current_size, committed_start, committed_size)) { |
519 | assert(committed_start != NULL, "Must be" ); |
520 | assert(committed_size > 0 && is_aligned(committed_size, os::vm_page_size()), "Must be" ); |
521 | |
522 | size_t remaining_size = (_current_start + _current_size) - (committed_start + committed_size); |
523 | _current_start = committed_start + committed_size; |
524 | _current_size = remaining_size; |
525 | return true; |
526 | } else { |
527 | return false; |
528 | } |
529 | } |
530 | |
531 | // Walk all known thread stacks, snapshot their committed ranges. |
532 | class SnapshotThreadStackWalker : public VirtualMemoryWalker { |
533 | public: |
534 | SnapshotThreadStackWalker() {} |
535 | |
536 | bool do_allocation_site(const ReservedMemoryRegion* rgn) { |
537 | if (rgn->flag() == mtThreadStack) { |
538 | address stack_bottom = rgn->thread_stack_uncommitted_bottom(); |
539 | address committed_start; |
540 | size_t committed_size; |
541 | size_t stack_size = rgn->base() + rgn->size() - stack_bottom; |
542 | |
543 | ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn); |
544 | NativeCallStack ncs; // empty stack |
545 | |
546 | RegionIterator itr(stack_bottom, stack_size); |
547 | DEBUG_ONLY(bool found_stack = false;) |
548 | while (itr.next_committed(committed_start, committed_size)) { |
549 | assert(committed_start != NULL, "Should not be null" ); |
550 | assert(committed_size > 0, "Should not be 0" ); |
551 | region->add_committed_region(committed_start, committed_size, ncs); |
552 | DEBUG_ONLY(found_stack = true;) |
553 | } |
554 | #ifdef ASSERT |
555 | if (!found_stack) { |
556 | log_debug(thread)("Thread exited without proper cleanup, may leak thread object" ); |
557 | } |
558 | #endif |
559 | } |
560 | return true; |
561 | } |
562 | }; |
563 | |
564 | void VirtualMemoryTracker::snapshot_thread_stacks() { |
565 | SnapshotThreadStackWalker walker; |
566 | walk_virtual_memory(&walker); |
567 | } |
568 | |
569 | bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) { |
570 | assert(_reserved_regions != NULL, "Sanity check" ); |
571 | ThreadCritical tc; |
572 | // Check that the _reserved_regions haven't been deleted. |
573 | if (_reserved_regions != NULL) { |
574 | LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head(); |
575 | while (head != NULL) { |
576 | const ReservedMemoryRegion* rgn = head->peek(); |
577 | if (!walker->do_allocation_site(rgn)) { |
578 | return false; |
579 | } |
580 | head = head->next(); |
581 | } |
582 | } |
583 | return true; |
584 | } |
585 | |
586 | // Transition virtual memory tracking level. |
587 | bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) { |
588 | assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything" ); |
589 | if (to == NMT_minimal) { |
590 | assert(from == NMT_summary || from == NMT_detail, "Just check" ); |
591 | // Clean up virtual memory tracking data structures. |
592 | ThreadCritical tc; |
593 | // Check for potential race with other thread calling transition |
594 | if (_reserved_regions != NULL) { |
595 | delete _reserved_regions; |
596 | _reserved_regions = NULL; |
597 | } |
598 | } |
599 | |
600 | return true; |
601 | } |
602 | |
603 | // Metaspace Support |
604 | MetaspaceSnapshot::MetaspaceSnapshot() { |
605 | for (int index = (int)Metaspace::ClassType; index < (int)Metaspace::MetadataTypeCount; index ++) { |
606 | Metaspace::MetadataType type = (Metaspace::MetadataType)index; |
607 | assert_valid_metadata_type(type); |
608 | _reserved_in_bytes[type] = 0; |
609 | _committed_in_bytes[type] = 0; |
610 | _used_in_bytes[type] = 0; |
611 | _free_in_bytes[type] = 0; |
612 | } |
613 | } |
614 | |
615 | void MetaspaceSnapshot::snapshot(Metaspace::MetadataType type, MetaspaceSnapshot& mss) { |
616 | assert_valid_metadata_type(type); |
617 | |
618 | mss._reserved_in_bytes[type] = MetaspaceUtils::reserved_bytes(type); |
619 | mss._committed_in_bytes[type] = MetaspaceUtils::committed_bytes(type); |
620 | mss._used_in_bytes[type] = MetaspaceUtils::used_bytes(type); |
621 | |
622 | size_t free_in_bytes = (MetaspaceUtils::capacity_bytes(type) - MetaspaceUtils::used_bytes(type)) |
623 | + MetaspaceUtils::free_chunks_total_bytes(type) |
624 | + MetaspaceUtils::free_in_vs_bytes(type); |
625 | mss._free_in_bytes[type] = free_in_bytes; |
626 | } |
627 | |
628 | void MetaspaceSnapshot::snapshot(MetaspaceSnapshot& mss) { |
629 | snapshot(Metaspace::ClassType, mss); |
630 | if (Metaspace::using_class_space()) { |
631 | snapshot(Metaspace::NonClassType, mss); |
632 | } |
633 | } |
634 | |