| 1 | /* |
| 2 | * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "code/codeCache.hpp" |
| 27 | #include "gc/g1/g1CollectedHeap.hpp" |
| 28 | #include "gc/g1/g1FullCollector.hpp" |
| 29 | #include "gc/g1/g1FullGCAdjustTask.hpp" |
| 30 | #include "gc/g1/g1FullGCCompactTask.hpp" |
| 31 | #include "gc/g1/g1FullGCMarker.inline.hpp" |
| 32 | #include "gc/g1/g1FullGCMarkTask.hpp" |
| 33 | #include "gc/g1/g1FullGCPrepareTask.hpp" |
| 34 | #include "gc/g1/g1FullGCReferenceProcessorExecutor.hpp" |
| 35 | #include "gc/g1/g1FullGCScope.hpp" |
| 36 | #include "gc/g1/g1OopClosures.hpp" |
| 37 | #include "gc/g1/g1Policy.hpp" |
| 38 | #include "gc/g1/g1StringDedup.hpp" |
| 39 | #include "gc/shared/gcTraceTime.inline.hpp" |
| 40 | #include "gc/shared/preservedMarks.hpp" |
| 41 | #include "gc/shared/referenceProcessor.hpp" |
| 42 | #include "gc/shared/verifyOption.hpp" |
| 43 | #include "gc/shared/weakProcessor.inline.hpp" |
| 44 | #include "gc/shared/workerPolicy.hpp" |
| 45 | #include "logging/log.hpp" |
| 46 | #include "runtime/biasedLocking.hpp" |
| 47 | #include "runtime/handles.inline.hpp" |
| 48 | #include "utilities/debug.hpp" |
| 49 | |
| 50 | static void clear_and_activate_derived_pointers() { |
| 51 | #if COMPILER2_OR_JVMCI |
| 52 | DerivedPointerTable::clear(); |
| 53 | #endif |
| 54 | } |
| 55 | |
| 56 | static void deactivate_derived_pointers() { |
| 57 | #if COMPILER2_OR_JVMCI |
| 58 | DerivedPointerTable::set_active(false); |
| 59 | #endif |
| 60 | } |
| 61 | |
| 62 | static void update_derived_pointers() { |
| 63 | #if COMPILER2_OR_JVMCI |
| 64 | DerivedPointerTable::update_pointers(); |
| 65 | #endif |
| 66 | } |
| 67 | |
| 68 | G1CMBitMap* G1FullCollector::mark_bitmap() { |
| 69 | return _heap->concurrent_mark()->next_mark_bitmap(); |
| 70 | } |
| 71 | |
| 72 | ReferenceProcessor* G1FullCollector::reference_processor() { |
| 73 | return _heap->ref_processor_stw(); |
| 74 | } |
| 75 | |
| 76 | uint G1FullCollector::calc_active_workers() { |
| 77 | G1CollectedHeap* heap = G1CollectedHeap::heap(); |
| 78 | uint max_worker_count = heap->workers()->total_workers(); |
| 79 | // Only calculate number of workers if UseDynamicNumberOfGCThreads |
| 80 | // is enabled, otherwise use max. |
| 81 | if (!UseDynamicNumberOfGCThreads) { |
| 82 | return max_worker_count; |
| 83 | } |
| 84 | |
| 85 | // Consider G1HeapWastePercent to decide max number of workers. Each worker |
| 86 | // will in average cause half a region waste. |
| 87 | uint max_wasted_regions_allowed = ((heap->num_regions() * G1HeapWastePercent) / 100); |
| 88 | uint waste_worker_count = MAX2((max_wasted_regions_allowed * 2) , 1u); |
| 89 | uint heap_waste_worker_limit = MIN2(waste_worker_count, max_worker_count); |
| 90 | |
| 91 | // Also consider HeapSizePerGCThread by calling WorkerPolicy to calculate |
| 92 | // the number of workers. |
| 93 | uint current_active_workers = heap->workers()->active_workers(); |
| 94 | uint active_worker_limit = WorkerPolicy::calc_active_workers(max_worker_count, current_active_workers, 0); |
| 95 | |
| 96 | // Update active workers to the lower of the limits. |
| 97 | uint worker_count = MIN2(heap_waste_worker_limit, active_worker_limit); |
| 98 | log_debug(gc, task)("Requesting %u active workers for full compaction (waste limited workers: %u, adaptive workers: %u)" , |
| 99 | worker_count, heap_waste_worker_limit, active_worker_limit); |
| 100 | worker_count = heap->workers()->update_active_workers(worker_count); |
| 101 | log_info(gc, task)("Using %u workers of %u for full compaction" , worker_count, max_worker_count); |
| 102 | |
| 103 | return worker_count; |
| 104 | } |
| 105 | |
| 106 | G1FullCollector::G1FullCollector(G1CollectedHeap* heap, bool explicit_gc, bool clear_soft_refs) : |
| 107 | _heap(heap), |
| 108 | _scope(heap->g1mm(), explicit_gc, clear_soft_refs), |
| 109 | _num_workers(calc_active_workers()), |
| 110 | _oop_queue_set(_num_workers), |
| 111 | _array_queue_set(_num_workers), |
| 112 | _preserved_marks_set(true), |
| 113 | _serial_compaction_point(), |
| 114 | _is_alive(heap->concurrent_mark()->next_mark_bitmap()), |
| 115 | _is_alive_mutator(heap->ref_processor_stw(), &_is_alive), |
| 116 | _always_subject_to_discovery(), |
| 117 | _is_subject_mutator(heap->ref_processor_stw(), &_always_subject_to_discovery) { |
| 118 | assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint" ); |
| 119 | |
| 120 | _preserved_marks_set.init(_num_workers); |
| 121 | _markers = NEW_C_HEAP_ARRAY(G1FullGCMarker*, _num_workers, mtGC); |
| 122 | _compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC); |
| 123 | for (uint i = 0; i < _num_workers; i++) { |
| 124 | _markers[i] = new G1FullGCMarker(i, _preserved_marks_set.get(i), mark_bitmap()); |
| 125 | _compaction_points[i] = new G1FullGCCompactionPoint(); |
| 126 | _oop_queue_set.register_queue(i, marker(i)->oop_stack()); |
| 127 | _array_queue_set.register_queue(i, marker(i)->objarray_stack()); |
| 128 | } |
| 129 | } |
| 130 | |
| 131 | G1FullCollector::~G1FullCollector() { |
| 132 | for (uint i = 0; i < _num_workers; i++) { |
| 133 | delete _markers[i]; |
| 134 | delete _compaction_points[i]; |
| 135 | } |
| 136 | FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers); |
| 137 | FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points); |
| 138 | } |
| 139 | |
| 140 | void G1FullCollector::prepare_collection() { |
| 141 | _heap->policy()->record_full_collection_start(); |
| 142 | |
| 143 | _heap->print_heap_before_gc(); |
| 144 | _heap->print_heap_regions(); |
| 145 | |
| 146 | _heap->abort_concurrent_cycle(); |
| 147 | _heap->verify_before_full_collection(scope()->is_explicit_gc()); |
| 148 | |
| 149 | _heap->gc_prologue(true); |
| 150 | _heap->prepare_heap_for_full_collection(); |
| 151 | |
| 152 | reference_processor()->enable_discovery(); |
| 153 | reference_processor()->setup_policy(scope()->should_clear_soft_refs()); |
| 154 | |
| 155 | // We should save the marks of the currently locked biased monitors. |
| 156 | // The marking doesn't preserve the marks of biased objects. |
| 157 | BiasedLocking::preserve_marks(); |
| 158 | |
| 159 | // Clear and activate derived pointer collection. |
| 160 | clear_and_activate_derived_pointers(); |
| 161 | } |
| 162 | |
| 163 | void G1FullCollector::collect() { |
| 164 | phase1_mark_live_objects(); |
| 165 | verify_after_marking(); |
| 166 | |
| 167 | // Don't add any more derived pointers during later phases |
| 168 | deactivate_derived_pointers(); |
| 169 | |
| 170 | phase2_prepare_compaction(); |
| 171 | |
| 172 | phase3_adjust_pointers(); |
| 173 | |
| 174 | phase4_do_compaction(); |
| 175 | } |
| 176 | |
| 177 | void G1FullCollector::complete_collection() { |
| 178 | // Restore all marks. |
| 179 | restore_marks(); |
| 180 | |
| 181 | // When the pointers have been adjusted and moved, we can |
| 182 | // update the derived pointer table. |
| 183 | update_derived_pointers(); |
| 184 | |
| 185 | BiasedLocking::restore_marks(); |
| 186 | JvmtiExport::gc_epilogue(); |
| 187 | |
| 188 | _heap->prepare_heap_for_mutators(); |
| 189 | |
| 190 | _heap->policy()->record_full_collection_end(); |
| 191 | _heap->gc_epilogue(true); |
| 192 | |
| 193 | _heap->verify_after_full_collection(); |
| 194 | |
| 195 | _heap->print_heap_after_full_collection(scope()->heap_transition()); |
| 196 | } |
| 197 | |
| 198 | void G1FullCollector::phase1_mark_live_objects() { |
| 199 | // Recursively traverse all live objects and mark them. |
| 200 | GCTraceTime(Info, gc, phases) info("Phase 1: Mark live objects" , scope()->timer()); |
| 201 | |
| 202 | // Do the actual marking. |
| 203 | G1FullGCMarkTask marking_task(this); |
| 204 | run_task(&marking_task); |
| 205 | |
| 206 | // Process references discovered during marking. |
| 207 | G1FullGCReferenceProcessingExecutor reference_processing(this); |
| 208 | reference_processing.execute(scope()->timer(), scope()->tracer()); |
| 209 | |
| 210 | // Weak oops cleanup. |
| 211 | { |
| 212 | GCTraceTime(Debug, gc, phases) debug("Phase 1: Weak Processing" , scope()->timer()); |
| 213 | WeakProcessor::weak_oops_do(_heap->workers(), &_is_alive, &do_nothing_cl, 1); |
| 214 | } |
| 215 | |
| 216 | // Class unloading and cleanup. |
| 217 | if (ClassUnloading) { |
| 218 | GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup" , scope()->timer()); |
| 219 | // Unload classes and purge the SystemDictionary. |
| 220 | bool purged_class = SystemDictionary::do_unloading(scope()->timer()); |
| 221 | _heap->complete_cleaning(&_is_alive, purged_class); |
| 222 | } else if (G1StringDedup::is_enabled()) { |
| 223 | GCTraceTime(Debug, gc, phases) debug("Phase 1: String Dedup Cleanup" , scope()->timer()); |
| 224 | // If no class unloading just clean out string deduplication data. |
| 225 | _heap->string_dedup_cleaning(&_is_alive, NULL); |
| 226 | } |
| 227 | |
| 228 | scope()->tracer()->report_object_count_after_gc(&_is_alive); |
| 229 | } |
| 230 | |
| 231 | void G1FullCollector::phase2_prepare_compaction() { |
| 232 | GCTraceTime(Info, gc, phases) info("Phase 2: Prepare for compaction" , scope()->timer()); |
| 233 | G1FullGCPrepareTask task(this); |
| 234 | run_task(&task); |
| 235 | |
| 236 | // To avoid OOM when there is memory left. |
| 237 | if (!task.has_freed_regions()) { |
| 238 | task.prepare_serial_compaction(); |
| 239 | } |
| 240 | } |
| 241 | |
| 242 | void G1FullCollector::phase3_adjust_pointers() { |
| 243 | // Adjust the pointers to reflect the new locations |
| 244 | GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers" , scope()->timer()); |
| 245 | |
| 246 | G1FullGCAdjustTask task(this); |
| 247 | run_task(&task); |
| 248 | } |
| 249 | |
| 250 | void G1FullCollector::phase4_do_compaction() { |
| 251 | // Compact the heap using the compaction queues created in phase 2. |
| 252 | GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap" , scope()->timer()); |
| 253 | G1FullGCCompactTask task(this); |
| 254 | run_task(&task); |
| 255 | |
| 256 | // Serial compact to avoid OOM when very few free regions. |
| 257 | if (serial_compaction_point()->has_regions()) { |
| 258 | task.serial_compaction(); |
| 259 | } |
| 260 | } |
| 261 | |
| 262 | void G1FullCollector::restore_marks() { |
| 263 | SharedRestorePreservedMarksTaskExecutor task_executor(_heap->workers()); |
| 264 | _preserved_marks_set.restore(&task_executor); |
| 265 | _preserved_marks_set.reclaim(); |
| 266 | } |
| 267 | |
| 268 | void G1FullCollector::run_task(AbstractGangTask* task) { |
| 269 | _heap->workers()->run_task(task, _num_workers); |
| 270 | } |
| 271 | |
| 272 | void G1FullCollector::verify_after_marking() { |
| 273 | if (!VerifyDuringGC || !_heap->verifier()->should_verify(G1HeapVerifier::G1VerifyFull)) { |
| 274 | // Only do verification if VerifyDuringGC and G1VerifyFull is set. |
| 275 | return; |
| 276 | } |
| 277 | |
| 278 | HandleMark hm; // handle scope |
| 279 | #if COMPILER2_OR_JVMCI |
| 280 | DerivedPointerTableDeactivate dpt_deact; |
| 281 | #endif |
| 282 | _heap->prepare_for_verify(); |
| 283 | // Note: we can verify only the heap here. When an object is |
| 284 | // marked, the previous value of the mark word (including |
| 285 | // identity hash values, ages, etc) is preserved, and the mark |
| 286 | // word is set to markOop::marked_value - effectively removing |
| 287 | // any hash values from the mark word. These hash values are |
| 288 | // used when verifying the dictionaries and so removing them |
| 289 | // from the mark word can make verification of the dictionaries |
| 290 | // fail. At the end of the GC, the original mark word values |
| 291 | // (including hash values) are restored to the appropriate |
| 292 | // objects. |
| 293 | GCTraceTime(Info, gc, verify) tm("Verifying During GC (full)" ); |
| 294 | _heap->verify(VerifyOption_G1UseFullMarking); |
| 295 | } |
| 296 | |