| 1 | /* | 
|---|
| 2 | * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. | 
|---|
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
|---|
| 4 | * | 
|---|
| 5 | * This code is free software; you can redistribute it and/or modify it | 
|---|
| 6 | * under the terms of the GNU General Public License version 2 only, as | 
|---|
| 7 | * published by the Free Software Foundation. | 
|---|
| 8 | * | 
|---|
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | 
|---|
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|---|
| 11 | * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
|---|
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | 
|---|
| 13 | * accompanied this code). | 
|---|
| 14 | * | 
|---|
| 15 | * You should have received a copy of the GNU General Public License version | 
|---|
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | 
|---|
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 
|---|
| 18 | * | 
|---|
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | 
|---|
| 20 | * or visit www.oracle.com if you need additional information or have any | 
|---|
| 21 | * questions. | 
|---|
| 22 | */ | 
|---|
| 23 |  | 
|---|
| 24 | #include "precompiled.hpp" | 
|---|
| 25 | #include "gc/shared/gcArguments.hpp" | 
|---|
| 26 | #include "gc/shared/oopStorage.hpp" | 
|---|
| 27 | #include "gc/z/zAddress.hpp" | 
|---|
| 28 | #include "gc/z/zGlobals.hpp" | 
|---|
| 29 | #include "gc/z/zHeap.inline.hpp" | 
|---|
| 30 | #include "gc/z/zHeapIterator.hpp" | 
|---|
| 31 | #include "gc/z/zList.inline.hpp" | 
|---|
| 32 | #include "gc/z/zLock.inline.hpp" | 
|---|
| 33 | #include "gc/z/zMark.inline.hpp" | 
|---|
| 34 | #include "gc/z/zOopClosures.inline.hpp" | 
|---|
| 35 | #include "gc/z/zPage.inline.hpp" | 
|---|
| 36 | #include "gc/z/zPageTable.inline.hpp" | 
|---|
| 37 | #include "gc/z/zRelocationSet.inline.hpp" | 
|---|
| 38 | #include "gc/z/zResurrection.hpp" | 
|---|
| 39 | #include "gc/z/zRootsIterator.hpp" | 
|---|
| 40 | #include "gc/z/zStat.hpp" | 
|---|
| 41 | #include "gc/z/zTask.hpp" | 
|---|
| 42 | #include "gc/z/zThread.hpp" | 
|---|
| 43 | #include "gc/z/zTracer.inline.hpp" | 
|---|
| 44 | #include "gc/z/zVerify.hpp" | 
|---|
| 45 | #include "gc/z/zVirtualMemory.inline.hpp" | 
|---|
| 46 | #include "gc/z/zWorkers.inline.hpp" | 
|---|
| 47 | #include "logging/log.hpp" | 
|---|
| 48 | #include "memory/resourceArea.hpp" | 
|---|
| 49 | #include "oops/oop.inline.hpp" | 
|---|
| 50 | #include "runtime/arguments.hpp" | 
|---|
| 51 | #include "runtime/safepoint.hpp" | 
|---|
| 52 | #include "runtime/thread.hpp" | 
|---|
| 53 | #include "utilities/align.hpp" | 
|---|
| 54 | #include "utilities/debug.hpp" | 
|---|
| 55 |  | 
|---|
| 56 | static const ZStatSampler ( "Memory", "Heap Used Before Mark", ZStatUnitBytes); | 
|---|
| 57 | static const ZStatSampler ZSamplerHeapUsedAfterMark( "Memory", "Heap Used After Mark", ZStatUnitBytes); | 
|---|
| 58 | static const ZStatSampler ZSamplerHeapUsedBeforeRelocation( "Memory", "Heap Used Before Relocation", ZStatUnitBytes); | 
|---|
| 59 | static const ZStatSampler ZSamplerHeapUsedAfterRelocation( "Memory", "Heap Used After Relocation", ZStatUnitBytes); | 
|---|
| 60 | static const ZStatCounter ZCounterUndoPageAllocation( "Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond); | 
|---|
| 61 | static const ZStatCounter ZCounterOutOfMemory( "Memory", "Out Of Memory", ZStatUnitOpsPerSecond); | 
|---|
| 62 |  | 
|---|
| 63 | ZHeap* ZHeap::_heap = NULL; | 
|---|
| 64 |  | 
|---|
| 65 | ZHeap::ZHeap() : | 
|---|
| 66 | _workers(), | 
|---|
| 67 | _object_allocator(_workers.nworkers()), | 
|---|
| 68 | _page_allocator(heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()), | 
|---|
| 69 | _page_table(), | 
|---|
| 70 | _forwarding_table(), | 
|---|
| 71 | _mark(&_workers, &_page_table), | 
|---|
| 72 | _reference_processor(&_workers), | 
|---|
| 73 | _weak_roots_processor(&_workers), | 
|---|
| 74 | _relocate(&_workers), | 
|---|
| 75 | _relocation_set(), | 
|---|
| 76 | _unload(&_workers), | 
|---|
| 77 | _serviceability(heap_min_size(), heap_max_size()) { | 
|---|
| 78 | // Install global heap instance | 
|---|
| 79 | assert(_heap == NULL, "Already initialized"); | 
|---|
| 80 | _heap = this; | 
|---|
| 81 |  | 
|---|
| 82 | // Update statistics | 
|---|
| 83 | ZStatHeap::set_at_initialize(heap_min_size(), heap_max_size(), heap_max_reserve_size()); | 
|---|
| 84 | } | 
|---|
| 85 |  | 
|---|
| 86 | size_t ZHeap::heap_min_size() const { | 
|---|
| 87 | return MinHeapSize; | 
|---|
| 88 | } | 
|---|
| 89 |  | 
|---|
| 90 | size_t ZHeap::heap_initial_size() const { | 
|---|
| 91 | return InitialHeapSize; | 
|---|
| 92 | } | 
|---|
| 93 |  | 
|---|
| 94 | size_t ZHeap::heap_max_size() const { | 
|---|
| 95 | return MaxHeapSize; | 
|---|
| 96 | } | 
|---|
| 97 |  | 
|---|
| 98 | size_t ZHeap::heap_max_reserve_size() const { | 
|---|
| 99 | // Reserve one small page per worker plus one shared medium page. This is still just | 
|---|
| 100 | // an estimate and doesn't guarantee that we can't run out of memory during relocation. | 
|---|
| 101 | const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium; | 
|---|
| 102 | return MIN2(max_reserve_size, heap_max_size()); | 
|---|
| 103 | } | 
|---|
| 104 |  | 
|---|
| 105 | bool ZHeap::is_initialized() const { | 
|---|
| 106 | return _page_allocator.is_initialized() && _mark.is_initialized(); | 
|---|
| 107 | } | 
|---|
| 108 |  | 
|---|
| 109 | size_t ZHeap::min_capacity() const { | 
|---|
| 110 | return _page_allocator.min_capacity(); | 
|---|
| 111 | } | 
|---|
| 112 |  | 
|---|
| 113 | size_t ZHeap::max_capacity() const { | 
|---|
| 114 | return _page_allocator.max_capacity(); | 
|---|
| 115 | } | 
|---|
| 116 |  | 
|---|
| 117 | size_t ZHeap::soft_max_capacity() const { | 
|---|
| 118 | return _page_allocator.soft_max_capacity(); | 
|---|
| 119 | } | 
|---|
| 120 |  | 
|---|
| 121 | size_t ZHeap::capacity() const { | 
|---|
| 122 | return _page_allocator.capacity(); | 
|---|
| 123 | } | 
|---|
| 124 |  | 
|---|
| 125 | size_t ZHeap::max_reserve() const { | 
|---|
| 126 | return _page_allocator.max_reserve(); | 
|---|
| 127 | } | 
|---|
| 128 |  | 
|---|
| 129 | size_t ZHeap::used_high() const { | 
|---|
| 130 | return _page_allocator.used_high(); | 
|---|
| 131 | } | 
|---|
| 132 |  | 
|---|
| 133 | size_t ZHeap::used_low() const { | 
|---|
| 134 | return _page_allocator.used_low(); | 
|---|
| 135 | } | 
|---|
| 136 |  | 
|---|
| 137 | size_t ZHeap::used() const { | 
|---|
| 138 | return _page_allocator.used(); | 
|---|
| 139 | } | 
|---|
| 140 |  | 
|---|
| 141 | size_t ZHeap::unused() const { | 
|---|
| 142 | return _page_allocator.unused(); | 
|---|
| 143 | } | 
|---|
| 144 |  | 
|---|
| 145 | size_t ZHeap::allocated() const { | 
|---|
| 146 | return _page_allocator.allocated(); | 
|---|
| 147 | } | 
|---|
| 148 |  | 
|---|
| 149 | size_t ZHeap::reclaimed() const { | 
|---|
| 150 | return _page_allocator.reclaimed(); | 
|---|
| 151 | } | 
|---|
| 152 |  | 
|---|
| 153 | size_t ZHeap::tlab_capacity() const { | 
|---|
| 154 | return capacity(); | 
|---|
| 155 | } | 
|---|
| 156 |  | 
|---|
| 157 | size_t ZHeap::tlab_used() const { | 
|---|
| 158 | return _object_allocator.used(); | 
|---|
| 159 | } | 
|---|
| 160 |  | 
|---|
| 161 | size_t ZHeap::max_tlab_size() const { | 
|---|
| 162 | return ZObjectSizeLimitSmall; | 
|---|
| 163 | } | 
|---|
| 164 |  | 
|---|
| 165 | size_t ZHeap::unsafe_max_tlab_alloc() const { | 
|---|
| 166 | size_t size = _object_allocator.remaining(); | 
|---|
| 167 |  | 
|---|
| 168 | if (size < MinTLABSize) { | 
|---|
| 169 | // The remaining space in the allocator is not enough to | 
|---|
| 170 | // fit the smallest possible TLAB. This means that the next | 
|---|
| 171 | // TLAB allocation will force the allocator to get a new | 
|---|
| 172 | // backing page anyway, which in turn means that we can then | 
|---|
| 173 | // fit the largest possible TLAB. | 
|---|
| 174 | size = max_tlab_size(); | 
|---|
| 175 | } | 
|---|
| 176 |  | 
|---|
| 177 | return MIN2(size, max_tlab_size()); | 
|---|
| 178 | } | 
|---|
| 179 |  | 
|---|
| 180 | bool ZHeap::is_in(uintptr_t addr) const { | 
|---|
| 181 | // An address is considered to be "in the heap" if it points into | 
|---|
| 182 | // the allocated part of a pages, regardless of which heap view is | 
|---|
| 183 | // used. Note that an address with the finalizable metadata bit set | 
|---|
| 184 | // is not pointing into a heap view, and therefore not considered | 
|---|
| 185 | // to be "in the heap". | 
|---|
| 186 |  | 
|---|
| 187 | if (ZAddress::is_in(addr)) { | 
|---|
| 188 | const ZPage* const page = _page_table.get(addr); | 
|---|
| 189 | if (page != NULL) { | 
|---|
| 190 | return page->is_in(addr); | 
|---|
| 191 | } | 
|---|
| 192 | } | 
|---|
| 193 |  | 
|---|
| 194 | return false; | 
|---|
| 195 | } | 
|---|
| 196 |  | 
|---|
| 197 | uintptr_t ZHeap::block_start(uintptr_t addr) const { | 
|---|
| 198 | const ZPage* const page = _page_table.get(addr); | 
|---|
| 199 | return page->block_start(addr); | 
|---|
| 200 | } | 
|---|
| 201 |  | 
|---|
| 202 | bool ZHeap::block_is_obj(uintptr_t addr) const { | 
|---|
| 203 | const ZPage* const page = _page_table.get(addr); | 
|---|
| 204 | return page->block_is_obj(addr); | 
|---|
| 205 | } | 
|---|
| 206 |  | 
|---|
| 207 | uint ZHeap::nconcurrent_worker_threads() const { | 
|---|
| 208 | return _workers.nconcurrent(); | 
|---|
| 209 | } | 
|---|
| 210 |  | 
|---|
| 211 | uint ZHeap::nconcurrent_no_boost_worker_threads() const { | 
|---|
| 212 | return _workers.nconcurrent_no_boost(); | 
|---|
| 213 | } | 
|---|
| 214 |  | 
|---|
| 215 | void ZHeap::set_boost_worker_threads(bool boost) { | 
|---|
| 216 | _workers.set_boost(boost); | 
|---|
| 217 | } | 
|---|
| 218 |  | 
|---|
| 219 | void ZHeap::worker_threads_do(ThreadClosure* tc) const { | 
|---|
| 220 | _workers.threads_do(tc); | 
|---|
| 221 | } | 
|---|
| 222 |  | 
|---|
| 223 | void ZHeap::print_worker_threads_on(outputStream* st) const { | 
|---|
| 224 | _workers.print_threads_on(st); | 
|---|
| 225 | } | 
|---|
| 226 |  | 
|---|
| 227 | void ZHeap::out_of_memory() { | 
|---|
| 228 | ResourceMark rm; | 
|---|
| 229 |  | 
|---|
| 230 | ZStatInc(ZCounterOutOfMemory); | 
|---|
| 231 | log_info(gc)( "Out Of Memory (%s)", Thread::current()->name()); | 
|---|
| 232 | } | 
|---|
| 233 |  | 
|---|
| 234 | ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { | 
|---|
| 235 | ZPage* const page = _page_allocator.alloc_page(type, size, flags); | 
|---|
| 236 | if (page != NULL) { | 
|---|
| 237 | // Insert page table entry | 
|---|
| 238 | _page_table.insert(page); | 
|---|
| 239 | } | 
|---|
| 240 |  | 
|---|
| 241 | return page; | 
|---|
| 242 | } | 
|---|
| 243 |  | 
|---|
| 244 | void ZHeap::undo_alloc_page(ZPage* page) { | 
|---|
| 245 | assert(page->is_allocating(), "Invalid page state"); | 
|---|
| 246 |  | 
|---|
| 247 | ZStatInc(ZCounterUndoPageAllocation); | 
|---|
| 248 | log_trace(gc)( "Undo page allocation, thread: "PTR_FORMAT " (%s), page: "PTR_FORMAT ", size: "SIZE_FORMAT, | 
|---|
| 249 | ZThread::id(), ZThread::name(), p2i(page), page->size()); | 
|---|
| 250 |  | 
|---|
| 251 | free_page(page, false /* reclaimed */); | 
|---|
| 252 | } | 
|---|
| 253 |  | 
|---|
| 254 | void ZHeap::free_page(ZPage* page, bool reclaimed) { | 
|---|
| 255 | // Remove page table entry | 
|---|
| 256 | _page_table.remove(page); | 
|---|
| 257 |  | 
|---|
| 258 | // Free page | 
|---|
| 259 | _page_allocator.free_page(page, reclaimed); | 
|---|
| 260 | } | 
|---|
| 261 |  | 
|---|
| 262 | uint64_t ZHeap::uncommit(uint64_t delay) { | 
|---|
| 263 | return _page_allocator.uncommit(delay); | 
|---|
| 264 | } | 
|---|
| 265 |  | 
|---|
| 266 | void ZHeap::before_flip() { | 
|---|
| 267 | if (ZVerifyViews) { | 
|---|
| 268 | // Unmap all pages | 
|---|
| 269 | _page_allocator.debug_unmap_all_pages(); | 
|---|
| 270 | } | 
|---|
| 271 | } | 
|---|
| 272 |  | 
|---|
| 273 | void ZHeap::after_flip() { | 
|---|
| 274 | if (ZVerifyViews) { | 
|---|
| 275 | // Map all pages | 
|---|
| 276 | ZPageTableIterator iter(&_page_table); | 
|---|
| 277 | for (ZPage* page; iter.next(&page);) { | 
|---|
| 278 | _page_allocator.debug_map_page(page); | 
|---|
| 279 | } | 
|---|
| 280 | _page_allocator.debug_map_cached_pages(); | 
|---|
| 281 | } | 
|---|
| 282 | } | 
|---|
| 283 |  | 
|---|
| 284 | void ZHeap::flip_to_marked() { | 
|---|
| 285 | before_flip(); | 
|---|
| 286 | ZAddress::flip_to_marked(); | 
|---|
| 287 | after_flip(); | 
|---|
| 288 | } | 
|---|
| 289 |  | 
|---|
| 290 | void ZHeap::flip_to_remapped() { | 
|---|
| 291 | before_flip(); | 
|---|
| 292 | ZAddress::flip_to_remapped(); | 
|---|
| 293 | after_flip(); | 
|---|
| 294 | } | 
|---|
| 295 |  | 
|---|
| 296 | void ZHeap::mark_start() { | 
|---|
| 297 | assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); | 
|---|
| 298 |  | 
|---|
| 299 | // Update statistics | 
|---|
| 300 | ZStatSample(ZSamplerHeapUsedBeforeMark, used()); | 
|---|
| 301 |  | 
|---|
| 302 | // Flip address view | 
|---|
| 303 | flip_to_marked(); | 
|---|
| 304 |  | 
|---|
| 305 | // Retire allocating pages | 
|---|
| 306 | _object_allocator.retire_pages(); | 
|---|
| 307 |  | 
|---|
| 308 | // Reset allocated/reclaimed/used statistics | 
|---|
| 309 | _page_allocator.reset_statistics(); | 
|---|
| 310 |  | 
|---|
| 311 | // Reset encountered/dropped/enqueued statistics | 
|---|
| 312 | _reference_processor.reset_statistics(); | 
|---|
| 313 |  | 
|---|
| 314 | // Enter mark phase | 
|---|
| 315 | ZGlobalPhase = ZPhaseMark; | 
|---|
| 316 |  | 
|---|
| 317 | // Reset marking information and mark roots | 
|---|
| 318 | _mark.start(); | 
|---|
| 319 |  | 
|---|
| 320 | // Update statistics | 
|---|
| 321 | ZStatHeap::set_at_mark_start(soft_max_capacity(), capacity(), used()); | 
|---|
| 322 | } | 
|---|
| 323 |  | 
|---|
| 324 | void ZHeap::mark(bool initial) { | 
|---|
| 325 | _mark.mark(initial); | 
|---|
| 326 | } | 
|---|
| 327 |  | 
|---|
| 328 | void ZHeap::mark_flush_and_free(Thread* thread) { | 
|---|
| 329 | _mark.flush_and_free(thread); | 
|---|
| 330 | } | 
|---|
| 331 |  | 
|---|
| 332 | bool ZHeap::mark_end() { | 
|---|
| 333 | assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); | 
|---|
| 334 |  | 
|---|
| 335 | // Try end marking | 
|---|
| 336 | if (!_mark.end()) { | 
|---|
| 337 | // Marking not completed, continue concurrent mark | 
|---|
| 338 | return false; | 
|---|
| 339 | } | 
|---|
| 340 |  | 
|---|
| 341 | // Enter mark completed phase | 
|---|
| 342 | ZGlobalPhase = ZPhaseMarkCompleted; | 
|---|
| 343 |  | 
|---|
| 344 | // Verify after mark | 
|---|
| 345 | ZVerify::after_mark(); | 
|---|
| 346 |  | 
|---|
| 347 | // Update statistics | 
|---|
| 348 | ZStatSample(ZSamplerHeapUsedAfterMark, used()); | 
|---|
| 349 | ZStatHeap::set_at_mark_end(capacity(), allocated(), used()); | 
|---|
| 350 |  | 
|---|
| 351 | // Block resurrection of weak/phantom references | 
|---|
| 352 | ZResurrection::block(); | 
|---|
| 353 |  | 
|---|
| 354 | // Process weak roots | 
|---|
| 355 | _weak_roots_processor.process_weak_roots(); | 
|---|
| 356 |  | 
|---|
| 357 | // Prepare to unload unused classes and code | 
|---|
| 358 | _unload.prepare(); | 
|---|
| 359 |  | 
|---|
| 360 | return true; | 
|---|
| 361 | } | 
|---|
| 362 |  | 
|---|
| 363 | void ZHeap::set_soft_reference_policy(bool clear) { | 
|---|
| 364 | _reference_processor.set_soft_reference_policy(clear); | 
|---|
| 365 | } | 
|---|
| 366 |  | 
|---|
| 367 | void ZHeap::process_non_strong_references() { | 
|---|
| 368 | // Process Soft/Weak/Final/PhantomReferences | 
|---|
| 369 | _reference_processor.process_references(); | 
|---|
| 370 |  | 
|---|
| 371 | // Process concurrent weak roots | 
|---|
| 372 | _weak_roots_processor.process_concurrent_weak_roots(); | 
|---|
| 373 |  | 
|---|
| 374 | // Unload unused classes and code | 
|---|
| 375 | _unload.unload(); | 
|---|
| 376 |  | 
|---|
| 377 | // Unblock resurrection of weak/phantom references | 
|---|
| 378 | ZResurrection::unblock(); | 
|---|
| 379 |  | 
|---|
| 380 | // Enqueue Soft/Weak/Final/PhantomReferences. Note that this | 
|---|
| 381 | // must be done after unblocking resurrection. Otherwise the | 
|---|
| 382 | // Finalizer thread could call Reference.get() on the Finalizers | 
|---|
| 383 | // that were just enqueued, which would incorrectly return null | 
|---|
| 384 | // during the resurrection block window, since such referents | 
|---|
| 385 | // are only Finalizable marked. | 
|---|
| 386 | _reference_processor.enqueue_references(); | 
|---|
| 387 | } | 
|---|
| 388 |  | 
|---|
| 389 | void ZHeap::select_relocation_set() { | 
|---|
| 390 | // Do not allow pages to be deleted | 
|---|
| 391 | _page_allocator.enable_deferred_delete(); | 
|---|
| 392 |  | 
|---|
| 393 | // Register relocatable pages with selector | 
|---|
| 394 | ZRelocationSetSelector selector; | 
|---|
| 395 | ZPageTableIterator pt_iter(&_page_table); | 
|---|
| 396 | for (ZPage* page; pt_iter.next(&page);) { | 
|---|
| 397 | if (!page->is_relocatable()) { | 
|---|
| 398 | // Not relocatable, don't register | 
|---|
| 399 | continue; | 
|---|
| 400 | } | 
|---|
| 401 |  | 
|---|
| 402 | if (page->is_marked()) { | 
|---|
| 403 | // Register live page | 
|---|
| 404 | selector.register_live_page(page); | 
|---|
| 405 | } else { | 
|---|
| 406 | // Register garbage page | 
|---|
| 407 | selector.register_garbage_page(page); | 
|---|
| 408 |  | 
|---|
| 409 | // Reclaim page immediately | 
|---|
| 410 | free_page(page, true /* reclaimed */); | 
|---|
| 411 | } | 
|---|
| 412 | } | 
|---|
| 413 |  | 
|---|
| 414 | // Allow pages to be deleted | 
|---|
| 415 | _page_allocator.disable_deferred_delete(); | 
|---|
| 416 |  | 
|---|
| 417 | // Select pages to relocate | 
|---|
| 418 | selector.select(&_relocation_set); | 
|---|
| 419 |  | 
|---|
| 420 | // Setup forwarding table | 
|---|
| 421 | ZRelocationSetIterator rs_iter(&_relocation_set); | 
|---|
| 422 | for (ZForwarding* forwarding; rs_iter.next(&forwarding);) { | 
|---|
| 423 | _forwarding_table.insert(forwarding); | 
|---|
| 424 | } | 
|---|
| 425 |  | 
|---|
| 426 | // Update statistics | 
|---|
| 427 | ZStatRelocation::set_at_select_relocation_set(selector.relocating()); | 
|---|
| 428 | ZStatHeap::set_at_select_relocation_set(selector.live(), | 
|---|
| 429 | selector.garbage(), | 
|---|
| 430 | reclaimed()); | 
|---|
| 431 | } | 
|---|
| 432 |  | 
|---|
| 433 | void ZHeap::reset_relocation_set() { | 
|---|
| 434 | // Reset forwarding table | 
|---|
| 435 | ZRelocationSetIterator iter(&_relocation_set); | 
|---|
| 436 | for (ZForwarding* forwarding; iter.next(&forwarding);) { | 
|---|
| 437 | _forwarding_table.remove(forwarding); | 
|---|
| 438 | } | 
|---|
| 439 |  | 
|---|
| 440 | // Reset relocation set | 
|---|
| 441 | _relocation_set.reset(); | 
|---|
| 442 | } | 
|---|
| 443 |  | 
|---|
| 444 | void ZHeap::relocate_start() { | 
|---|
| 445 | assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); | 
|---|
| 446 |  | 
|---|
| 447 | // Finish unloading of classes and code | 
|---|
| 448 | _unload.finish(); | 
|---|
| 449 |  | 
|---|
| 450 | // Flip address view | 
|---|
| 451 | flip_to_remapped(); | 
|---|
| 452 |  | 
|---|
| 453 | // Enter relocate phase | 
|---|
| 454 | ZGlobalPhase = ZPhaseRelocate; | 
|---|
| 455 |  | 
|---|
| 456 | // Update statistics | 
|---|
| 457 | ZStatSample(ZSamplerHeapUsedBeforeRelocation, used()); | 
|---|
| 458 | ZStatHeap::set_at_relocate_start(capacity(), allocated(), used()); | 
|---|
| 459 |  | 
|---|
| 460 | // Remap/Relocate roots | 
|---|
| 461 | _relocate.start(); | 
|---|
| 462 | } | 
|---|
| 463 |  | 
|---|
| 464 | void ZHeap::relocate() { | 
|---|
| 465 | // Relocate relocation set | 
|---|
| 466 | const bool success = _relocate.relocate(&_relocation_set); | 
|---|
| 467 |  | 
|---|
| 468 | // Update statistics | 
|---|
| 469 | ZStatSample(ZSamplerHeapUsedAfterRelocation, used()); | 
|---|
| 470 | ZStatRelocation::set_at_relocate_end(success); | 
|---|
| 471 | ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(), | 
|---|
| 472 | used(), used_high(), used_low()); | 
|---|
| 473 | } | 
|---|
| 474 |  | 
|---|
| 475 | void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) { | 
|---|
| 476 | assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); | 
|---|
| 477 |  | 
|---|
| 478 | ZHeapIterator iter; | 
|---|
| 479 | iter.objects_do(cl, visit_weaks); | 
|---|
| 480 | } | 
|---|
| 481 |  | 
|---|
| 482 | void ZHeap::serviceability_initialize() { | 
|---|
| 483 | _serviceability.initialize(); | 
|---|
| 484 | } | 
|---|
| 485 |  | 
|---|
| 486 | GCMemoryManager* ZHeap::serviceability_memory_manager() { | 
|---|
| 487 | return _serviceability.memory_manager(); | 
|---|
| 488 | } | 
|---|
| 489 |  | 
|---|
| 490 | MemoryPool* ZHeap::serviceability_memory_pool() { | 
|---|
| 491 | return _serviceability.memory_pool(); | 
|---|
| 492 | } | 
|---|
| 493 |  | 
|---|
| 494 | ZServiceabilityCounters* ZHeap::serviceability_counters() { | 
|---|
| 495 | return _serviceability.counters(); | 
|---|
| 496 | } | 
|---|
| 497 |  | 
|---|
| 498 | void ZHeap::print_on(outputStream* st) const { | 
|---|
| 499 | st->print_cr( " ZHeap           used "SIZE_FORMAT "M, capacity "SIZE_FORMAT "M, max capacity "SIZE_FORMAT "M", | 
|---|
| 500 | used() / M, | 
|---|
| 501 | capacity() / M, | 
|---|
| 502 | max_capacity() / M); | 
|---|
| 503 | MetaspaceUtils::print_on(st); | 
|---|
| 504 | } | 
|---|
| 505 |  | 
|---|
| 506 | void ZHeap::print_extended_on(outputStream* st) const { | 
|---|
| 507 | print_on(st); | 
|---|
| 508 | st->cr(); | 
|---|
| 509 |  | 
|---|
| 510 | // Do not allow pages to be deleted | 
|---|
| 511 | _page_allocator.enable_deferred_delete(); | 
|---|
| 512 |  | 
|---|
| 513 | // Print all pages | 
|---|
| 514 | ZPageTableIterator iter(&_page_table); | 
|---|
| 515 | for (ZPage* page; iter.next(&page);) { | 
|---|
| 516 | page->print_on(st); | 
|---|
| 517 | } | 
|---|
| 518 |  | 
|---|
| 519 | // Allow pages to be deleted | 
|---|
| 520 | _page_allocator.enable_deferred_delete(); | 
|---|
| 521 |  | 
|---|
| 522 | st->cr(); | 
|---|
| 523 | } | 
|---|
| 524 |  | 
|---|
| 525 | void ZHeap::verify() { | 
|---|
| 526 | // Heap verification can only be done between mark end and | 
|---|
| 527 | // relocate start. This is the only window where all oop are | 
|---|
| 528 | // good and the whole heap is in a consistent state. | 
|---|
| 529 | guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase"); | 
|---|
| 530 |  | 
|---|
| 531 | ZVerify::after_weak_processing(); | 
|---|
| 532 | } | 
|---|
| 533 |  | 
|---|