| 1 | /* |
| 2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "gc/serial/genMarkSweep.hpp" |
| 27 | #include "gc/serial/tenuredGeneration.inline.hpp" |
| 28 | #include "gc/shared/blockOffsetTable.inline.hpp" |
| 29 | #include "gc/shared/cardGeneration.inline.hpp" |
| 30 | #include "gc/shared/collectorCounters.hpp" |
| 31 | #include "gc/shared/gcTimer.hpp" |
| 32 | #include "gc/shared/gcTrace.hpp" |
| 33 | #include "gc/shared/genCollectedHeap.hpp" |
| 34 | #include "gc/shared/genOopClosures.inline.hpp" |
| 35 | #include "gc/shared/generationSpec.hpp" |
| 36 | #include "gc/shared/space.hpp" |
| 37 | #include "logging/log.hpp" |
| 38 | #include "memory/allocation.inline.hpp" |
| 39 | #include "oops/oop.inline.hpp" |
| 40 | #include "runtime/java.hpp" |
| 41 | #include "utilities/macros.hpp" |
| 42 | #if INCLUDE_CMSGC |
| 43 | #include "gc/cms/parOopClosures.hpp" |
| 44 | #endif |
| 45 | |
| 46 | TenuredGeneration::TenuredGeneration(ReservedSpace rs, |
| 47 | size_t initial_byte_size, |
| 48 | size_t min_byte_size, |
| 49 | size_t max_byte_size, |
| 50 | CardTableRS* remset) : |
| 51 | CardGeneration(rs, initial_byte_size, remset) |
| 52 | { |
| 53 | HeapWord* bottom = (HeapWord*) _virtual_space.low(); |
| 54 | HeapWord* end = (HeapWord*) _virtual_space.high(); |
| 55 | _the_space = new TenuredSpace(_bts, MemRegion(bottom, end)); |
| 56 | _the_space->reset_saved_mark(); |
| 57 | _shrink_factor = 0; |
| 58 | _capacity_at_prologue = 0; |
| 59 | |
| 60 | _gc_stats = new GCStats(); |
| 61 | |
| 62 | // initialize performance counters |
| 63 | |
| 64 | const char* gen_name = "old" ; |
| 65 | // Generation Counters -- generation 1, 1 subspace |
| 66 | _gen_counters = new GenerationCounters(gen_name, 1, 1, |
| 67 | min_byte_size, max_byte_size, &_virtual_space); |
| 68 | |
| 69 | _gc_counters = new CollectorCounters("Serial full collection pauses" , 1); |
| 70 | |
| 71 | _space_counters = new CSpaceCounters(gen_name, 0, |
| 72 | _virtual_space.reserved_size(), |
| 73 | _the_space, _gen_counters); |
| 74 | } |
| 75 | |
| 76 | void TenuredGeneration::gc_prologue(bool full) { |
| 77 | _capacity_at_prologue = capacity(); |
| 78 | _used_at_prologue = used(); |
| 79 | } |
| 80 | |
| 81 | bool TenuredGeneration::should_collect(bool full, |
| 82 | size_t size, |
| 83 | bool is_tlab) { |
| 84 | // This should be one big conditional or (||), but I want to be able to tell |
| 85 | // why it returns what it returns (without re-evaluating the conditionals |
| 86 | // in case they aren't idempotent), so I'm doing it this way. |
| 87 | // DeMorgan says it's okay. |
| 88 | if (full) { |
| 89 | log_trace(gc)("TenuredGeneration::should_collect: because full" ); |
| 90 | return true; |
| 91 | } |
| 92 | if (should_allocate(size, is_tlab)) { |
| 93 | log_trace(gc)("TenuredGeneration::should_collect: because should_allocate(" SIZE_FORMAT ")" , size); |
| 94 | return true; |
| 95 | } |
| 96 | // If we don't have very much free space. |
| 97 | // XXX: 10000 should be a percentage of the capacity!!! |
| 98 | if (free() < 10000) { |
| 99 | log_trace(gc)("TenuredGeneration::should_collect: because free(): " SIZE_FORMAT, free()); |
| 100 | return true; |
| 101 | } |
| 102 | // If we had to expand to accommodate promotions from the young generation |
| 103 | if (_capacity_at_prologue < capacity()) { |
| 104 | log_trace(gc)("TenuredGeneration::should_collect: because_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT, |
| 105 | _capacity_at_prologue, capacity()); |
| 106 | return true; |
| 107 | } |
| 108 | |
| 109 | return false; |
| 110 | } |
| 111 | |
| 112 | void TenuredGeneration::compute_new_size() { |
| 113 | assert_locked_or_safepoint(Heap_lock); |
| 114 | |
| 115 | // Compute some numbers about the state of the heap. |
| 116 | const size_t used_after_gc = used(); |
| 117 | const size_t capacity_after_gc = capacity(); |
| 118 | |
| 119 | CardGeneration::compute_new_size(); |
| 120 | |
| 121 | assert(used() == used_after_gc && used_after_gc <= capacity(), |
| 122 | "used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT |
| 123 | " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()); |
| 124 | } |
| 125 | |
| 126 | void TenuredGeneration::update_gc_stats(Generation* current_generation, |
| 127 | bool full) { |
| 128 | // If the young generation has been collected, gather any statistics |
| 129 | // that are of interest at this point. |
| 130 | bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation); |
| 131 | if (!full && current_is_young) { |
| 132 | // Calculate size of data promoted from the young generation |
| 133 | // before doing the collection. |
| 134 | size_t used_before_gc = used(); |
| 135 | |
| 136 | // If the young gen collection was skipped, then the |
| 137 | // number of promoted bytes will be 0 and adding it to the |
| 138 | // average will incorrectly lessen the average. It is, however, |
| 139 | // also possible that no promotion was needed. |
| 140 | if (used_before_gc >= _used_at_prologue) { |
| 141 | size_t promoted_in_bytes = used_before_gc - _used_at_prologue; |
| 142 | gc_stats()->avg_promoted()->sample(promoted_in_bytes); |
| 143 | } |
| 144 | } |
| 145 | } |
| 146 | |
| 147 | void TenuredGeneration::update_counters() { |
| 148 | if (UsePerfData) { |
| 149 | _space_counters->update_all(); |
| 150 | _gen_counters->update_all(); |
| 151 | } |
| 152 | } |
| 153 | |
| 154 | bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { |
| 155 | size_t available = max_contiguous_available(); |
| 156 | size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average(); |
| 157 | bool res = (available >= av_promo) || (available >= max_promotion_in_bytes); |
| 158 | |
| 159 | log_trace(gc)("Tenured: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")" , |
| 160 | res? "" :" not" , available, res? ">=" :"<" , av_promo, max_promotion_in_bytes); |
| 161 | |
| 162 | return res; |
| 163 | } |
| 164 | |
| 165 | void TenuredGeneration::collect(bool full, |
| 166 | bool clear_all_soft_refs, |
| 167 | size_t size, |
| 168 | bool is_tlab) { |
| 169 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
| 170 | |
| 171 | // Temporarily expand the span of our ref processor, so |
| 172 | // refs discovery is over the entire heap, not just this generation |
| 173 | ReferenceProcessorSpanMutator |
| 174 | x(ref_processor(), gch->reserved_region()); |
| 175 | |
| 176 | STWGCTimer* gc_timer = GenMarkSweep::gc_timer(); |
| 177 | gc_timer->register_gc_start(); |
| 178 | |
| 179 | SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); |
| 180 | gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); |
| 181 | |
| 182 | gch->pre_full_gc_dump(gc_timer); |
| 183 | |
| 184 | GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); |
| 185 | |
| 186 | gch->post_full_gc_dump(gc_timer); |
| 187 | |
| 188 | gc_timer->register_gc_end(); |
| 189 | |
| 190 | gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); |
| 191 | } |
| 192 | |
| 193 | HeapWord* |
| 194 | TenuredGeneration::expand_and_allocate(size_t word_size, |
| 195 | bool is_tlab, |
| 196 | bool parallel) { |
| 197 | assert(!is_tlab, "TenuredGeneration does not support TLAB allocation" ); |
| 198 | if (parallel) { |
| 199 | MutexLocker x(ParGCRareEvent_lock); |
| 200 | HeapWord* result = NULL; |
| 201 | size_t byte_size = word_size * HeapWordSize; |
| 202 | while (true) { |
| 203 | expand(byte_size, _min_heap_delta_bytes); |
| 204 | if (GCExpandToAllocateDelayMillis > 0) { |
| 205 | os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); |
| 206 | } |
| 207 | result = _the_space->par_allocate(word_size); |
| 208 | if ( result != NULL) { |
| 209 | return result; |
| 210 | } else { |
| 211 | // If there's not enough expansion space available, give up. |
| 212 | if (_virtual_space.uncommitted_size() < byte_size) { |
| 213 | return NULL; |
| 214 | } |
| 215 | // else try again |
| 216 | } |
| 217 | } |
| 218 | } else { |
| 219 | expand(word_size*HeapWordSize, _min_heap_delta_bytes); |
| 220 | return _the_space->allocate(word_size); |
| 221 | } |
| 222 | } |
| 223 | |
| 224 | bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) { |
| 225 | GCMutexLocker x(ExpandHeap_lock); |
| 226 | return CardGeneration::expand(bytes, expand_bytes); |
| 227 | } |
| 228 | |
| 229 | size_t TenuredGeneration::unsafe_max_alloc_nogc() const { |
| 230 | return _the_space->free(); |
| 231 | } |
| 232 | |
| 233 | size_t TenuredGeneration::contiguous_available() const { |
| 234 | return _the_space->free() + _virtual_space.uncommitted_size(); |
| 235 | } |
| 236 | |
| 237 | void TenuredGeneration::assert_correct_size_change_locking() { |
| 238 | assert_locked_or_safepoint(ExpandHeap_lock); |
| 239 | } |
| 240 | |
| 241 | // Currently nothing to do. |
| 242 | void TenuredGeneration::prepare_for_verify() {} |
| 243 | |
| 244 | void TenuredGeneration::object_iterate(ObjectClosure* blk) { |
| 245 | _the_space->object_iterate(blk); |
| 246 | } |
| 247 | |
| 248 | void TenuredGeneration::save_marks() { |
| 249 | _the_space->set_saved_mark(); |
| 250 | } |
| 251 | |
| 252 | void TenuredGeneration::reset_saved_marks() { |
| 253 | _the_space->reset_saved_mark(); |
| 254 | } |
| 255 | |
| 256 | bool TenuredGeneration::no_allocs_since_save_marks() { |
| 257 | return _the_space->saved_mark_at_top(); |
| 258 | } |
| 259 | |
| 260 | void TenuredGeneration::gc_epilogue(bool full) { |
| 261 | // update the generation and space performance counters |
| 262 | update_counters(); |
| 263 | if (ZapUnusedHeapArea) { |
| 264 | _the_space->check_mangled_unused_area_complete(); |
| 265 | } |
| 266 | } |
| 267 | |
| 268 | void TenuredGeneration::record_spaces_top() { |
| 269 | assert(ZapUnusedHeapArea, "Not mangling unused space" ); |
| 270 | _the_space->set_top_for_allocations(); |
| 271 | } |
| 272 | |
| 273 | void TenuredGeneration::verify() { |
| 274 | _the_space->verify(); |
| 275 | } |
| 276 | |
| 277 | void TenuredGeneration::print_on(outputStream* st) const { |
| 278 | Generation::print_on(st); |
| 279 | st->print(" the" ); |
| 280 | _the_space->print_on(st); |
| 281 | } |
| 282 | |