| 1 | /* |
| 2 | * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "classfile/classLoader.hpp" |
| 27 | #include "classfile/javaClasses.hpp" |
| 28 | #include "gc/shared/allocTracer.hpp" |
| 29 | #include "gc/shared/gcId.hpp" |
| 30 | #include "gc/shared/gcLocker.hpp" |
| 31 | #include "gc/shared/gcVMOperations.hpp" |
| 32 | #include "gc/shared/genCollectedHeap.hpp" |
| 33 | #include "interpreter/oopMapCache.hpp" |
| 34 | #include "logging/log.hpp" |
| 35 | #include "memory/oopFactory.hpp" |
| 36 | #include "memory/universe.hpp" |
| 37 | #include "runtime/handles.inline.hpp" |
| 38 | #include "runtime/init.hpp" |
| 39 | #include "utilities/dtrace.hpp" |
| 40 | #include "utilities/macros.hpp" |
| 41 | #include "utilities/preserveException.hpp" |
| 42 | #if INCLUDE_G1GC |
| 43 | #include "gc/g1/g1CollectedHeap.inline.hpp" |
| 44 | #include "gc/g1/g1Policy.hpp" |
| 45 | #endif // INCLUDE_G1GC |
| 46 | |
| 47 | VM_GC_Operation::~VM_GC_Operation() { |
| 48 | CollectedHeap* ch = Universe::heap(); |
| 49 | ch->soft_ref_policy()->set_all_soft_refs_clear(false); |
| 50 | } |
| 51 | |
| 52 | // The same dtrace probe can't be inserted in two different files, so we |
| 53 | // have to call it here, so it's only in one file. Can't create new probes |
| 54 | // for the other file anymore. The dtrace probes have to remain stable. |
| 55 | void VM_GC_Operation::notify_gc_begin(bool full) { |
| 56 | HOTSPOT_GC_BEGIN( |
| 57 | full); |
| 58 | HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); |
| 59 | } |
| 60 | |
| 61 | void VM_GC_Operation::notify_gc_end() { |
| 62 | HOTSPOT_GC_END(); |
| 63 | HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); |
| 64 | } |
| 65 | |
| 66 | // Allocations may fail in several threads at about the same time, |
| 67 | // resulting in multiple gc requests. We only want to do one of them. |
| 68 | // In case a GC locker is active and the need for a GC is already signaled, |
| 69 | // we want to skip this GC attempt altogether, without doing a futile |
| 70 | // safepoint operation. |
| 71 | bool VM_GC_Operation::skip_operation() const { |
| 72 | bool skip = (_gc_count_before != Universe::heap()->total_collections()); |
| 73 | if (_full && skip) { |
| 74 | skip = (_full_gc_count_before != Universe::heap()->total_full_collections()); |
| 75 | } |
| 76 | if (!skip && GCLocker::is_active_and_needs_gc()) { |
| 77 | skip = Universe::heap()->is_maximal_no_gc(); |
| 78 | assert(!(skip && (_gc_cause == GCCause::_gc_locker)), |
| 79 | "GCLocker cannot be active when initiating GC" ); |
| 80 | } |
| 81 | return skip; |
| 82 | } |
| 83 | |
| 84 | bool VM_GC_Operation::doit_prologue() { |
| 85 | assert(((_gc_cause != GCCause::_no_gc) && |
| 86 | (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause" ); |
| 87 | |
| 88 | // To be able to handle a GC the VM initialization needs to be completed. |
| 89 | if (!is_init_completed()) { |
| 90 | vm_exit_during_initialization( |
| 91 | err_msg("GC triggered before VM initialization completed. Try increasing " |
| 92 | "NewSize, current value " SIZE_FORMAT "%s." , |
| 93 | byte_size_in_proper_unit(NewSize), |
| 94 | proper_unit_for_byte_size(NewSize))); |
| 95 | } |
| 96 | |
| 97 | // If the GC count has changed someone beat us to the collection |
| 98 | Heap_lock->lock(); |
| 99 | |
| 100 | // Check invocations |
| 101 | if (skip_operation()) { |
| 102 | // skip collection |
| 103 | Heap_lock->unlock(); |
| 104 | _prologue_succeeded = false; |
| 105 | } else { |
| 106 | _prologue_succeeded = true; |
| 107 | } |
| 108 | return _prologue_succeeded; |
| 109 | } |
| 110 | |
| 111 | |
| 112 | void VM_GC_Operation::doit_epilogue() { |
| 113 | // Clean up old interpreter OopMap entries that were replaced |
| 114 | // during the GC thread root traversal. |
| 115 | OopMapCache::cleanup_old_entries(); |
| 116 | if (Universe::has_reference_pending_list()) { |
| 117 | Heap_lock->notify_all(); |
| 118 | } |
| 119 | Heap_lock->unlock(); |
| 120 | } |
| 121 | |
| 122 | bool VM_GC_HeapInspection::skip_operation() const { |
| 123 | return false; |
| 124 | } |
| 125 | |
| 126 | bool VM_GC_HeapInspection::collect() { |
| 127 | if (GCLocker::is_active()) { |
| 128 | return false; |
| 129 | } |
| 130 | Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection); |
| 131 | return true; |
| 132 | } |
| 133 | |
| 134 | void VM_GC_HeapInspection::doit() { |
| 135 | HandleMark hm; |
| 136 | Universe::heap()->ensure_parsability(false); // must happen, even if collection does |
| 137 | // not happen (e.g. due to GCLocker) |
| 138 | // or _full_gc being false |
| 139 | if (_full_gc) { |
| 140 | if (!collect()) { |
| 141 | // The collection attempt was skipped because the gc locker is held. |
| 142 | // The following dump may then be a tad misleading to someone expecting |
| 143 | // only live objects to show up in the dump (see CR 6944195). Just issue |
| 144 | // a suitable warning in that case and do not attempt to do a collection. |
| 145 | // The latter is a subtle point, because even a failed attempt |
| 146 | // to GC will, in fact, induce one in the future, which we |
| 147 | // probably want to avoid in this case because the GC that we may |
| 148 | // be about to attempt holds value for us only |
| 149 | // if it happens now and not if it happens in the eventual |
| 150 | // future. |
| 151 | log_warning(gc)("GC locker is held; pre-dump GC was skipped" ); |
| 152 | } |
| 153 | } |
| 154 | HeapInspection inspect(_csv_format, _print_help, _print_class_stats, |
| 155 | _columns); |
| 156 | inspect.heap_inspection(_out); |
| 157 | } |
| 158 | |
| 159 | |
| 160 | void VM_GenCollectForAllocation::doit() { |
| 161 | SvcGCMarker sgcm(SvcGCMarker::MINOR); |
| 162 | |
| 163 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
| 164 | GCCauseSetter gccs(gch, _gc_cause); |
| 165 | _result = gch->satisfy_failed_allocation(_word_size, _tlab); |
| 166 | assert(gch->is_in_reserved_or_null(_result), "result not in heap" ); |
| 167 | |
| 168 | if (_result == NULL && GCLocker::is_active_and_needs_gc()) { |
| 169 | set_gc_locked(); |
| 170 | } |
| 171 | } |
| 172 | |
| 173 | void VM_GenCollectFull::doit() { |
| 174 | SvcGCMarker sgcm(SvcGCMarker::FULL); |
| 175 | |
| 176 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
| 177 | GCCauseSetter gccs(gch, _gc_cause); |
| 178 | gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation); |
| 179 | } |
| 180 | |
| 181 | VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data, |
| 182 | size_t size, |
| 183 | Metaspace::MetadataType mdtype, |
| 184 | uint gc_count_before, |
| 185 | uint full_gc_count_before, |
| 186 | GCCause::Cause gc_cause) |
| 187 | : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), |
| 188 | _result(NULL), _size(size), _mdtype(mdtype), _loader_data(loader_data) { |
| 189 | assert(_size != 0, "An allocation should always be requested with this operation." ); |
| 190 | AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek()); |
| 191 | } |
| 192 | |
| 193 | // Returns true iff concurrent GCs unloads metadata. |
| 194 | bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() { |
| 195 | #if INCLUDE_CMSGC |
| 196 | if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) { |
| 197 | MetaspaceGC::set_should_concurrent_collect(true); |
| 198 | return true; |
| 199 | } |
| 200 | #endif |
| 201 | |
| 202 | #if INCLUDE_G1GC |
| 203 | if (UseG1GC && ClassUnloadingWithConcurrentMark) { |
| 204 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
| 205 | g1h->policy()->collector_state()->set_initiate_conc_mark_if_possible(true); |
| 206 | |
| 207 | GCCauseSetter x(g1h, _gc_cause); |
| 208 | |
| 209 | // At this point we are supposed to start a concurrent cycle. We |
| 210 | // will do so if one is not already in progress. |
| 211 | bool should_start = g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause); |
| 212 | |
| 213 | if (should_start) { |
| 214 | double pause_target = g1h->policy()->max_pause_time_ms(); |
| 215 | g1h->do_collection_pause_at_safepoint(pause_target); |
| 216 | } |
| 217 | return true; |
| 218 | } |
| 219 | #endif |
| 220 | |
| 221 | return false; |
| 222 | } |
| 223 | |
| 224 | void VM_CollectForMetadataAllocation::doit() { |
| 225 | SvcGCMarker sgcm(SvcGCMarker::FULL); |
| 226 | |
| 227 | CollectedHeap* heap = Universe::heap(); |
| 228 | GCCauseSetter gccs(heap, _gc_cause); |
| 229 | |
| 230 | // Check again if the space is available. Another thread |
| 231 | // may have similarly failed a metadata allocation and induced |
| 232 | // a GC that freed space for the allocation. |
| 233 | if (!MetadataAllocationFailALot) { |
| 234 | _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); |
| 235 | if (_result != NULL) { |
| 236 | return; |
| 237 | } |
| 238 | } |
| 239 | |
| 240 | if (initiate_concurrent_GC()) { |
| 241 | // For CMS and G1 expand since the collection is going to be concurrent. |
| 242 | _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); |
| 243 | if (_result != NULL) { |
| 244 | return; |
| 245 | } |
| 246 | |
| 247 | log_debug(gc)("%s full GC for Metaspace" , UseConcMarkSweepGC ? "CMS" : "G1" ); |
| 248 | } |
| 249 | |
| 250 | // Don't clear the soft refs yet. |
| 251 | heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); |
| 252 | // After a GC try to allocate without expanding. Could fail |
| 253 | // and expansion will be tried below. |
| 254 | _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); |
| 255 | if (_result != NULL) { |
| 256 | return; |
| 257 | } |
| 258 | |
| 259 | // If still failing, allow the Metaspace to expand. |
| 260 | // See delta_capacity_until_GC() for explanation of the |
| 261 | // amount of the expansion. |
| 262 | // This should work unless there really is no more space |
| 263 | // or a MaxMetaspaceSize has been specified on the command line. |
| 264 | _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); |
| 265 | if (_result != NULL) { |
| 266 | return; |
| 267 | } |
| 268 | |
| 269 | // If expansion failed, do a collection clearing soft references. |
| 270 | heap->collect_as_vm_thread(GCCause::_metadata_GC_clear_soft_refs); |
| 271 | _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); |
| 272 | if (_result != NULL) { |
| 273 | return; |
| 274 | } |
| 275 | |
| 276 | log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size); |
| 277 | |
| 278 | if (GCLocker::is_active_and_needs_gc()) { |
| 279 | set_gc_locked(); |
| 280 | } |
| 281 | } |
| 282 | |
| 283 | VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause) |
| 284 | : VM_GC_Operation(gc_count_before, cause), _word_size(word_size), _result(NULL) { |
| 285 | // Only report if operation was really caused by an allocation. |
| 286 | if (_word_size != 0) { |
| 287 | AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek()); |
| 288 | } |
| 289 | } |
| 290 | |