| 1 | /* |
| 2 | * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | |
| 27 | #include "gc/shared/blockOffsetTable.inline.hpp" |
| 28 | #include "gc/shared/cardGeneration.inline.hpp" |
| 29 | #include "gc/shared/cardTableRS.hpp" |
| 30 | #include "gc/shared/gcLocker.hpp" |
| 31 | #include "gc/shared/genCollectedHeap.hpp" |
| 32 | #include "gc/shared/genOopClosures.inline.hpp" |
| 33 | #include "gc/shared/generationSpec.hpp" |
| 34 | #include "gc/shared/space.inline.hpp" |
| 35 | #include "memory/iterator.hpp" |
| 36 | #include "memory/memRegion.hpp" |
| 37 | #include "logging/log.hpp" |
| 38 | #include "runtime/java.hpp" |
| 39 | |
| 40 | CardGeneration::CardGeneration(ReservedSpace rs, |
| 41 | size_t initial_byte_size, |
| 42 | CardTableRS* remset) : |
| 43 | Generation(rs, initial_byte_size), _rs(remset), |
| 44 | _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(), |
| 45 | _used_at_prologue() |
| 46 | { |
| 47 | HeapWord* start = (HeapWord*)rs.base(); |
| 48 | size_t reserved_byte_size = rs.size(); |
| 49 | assert((uintptr_t(start) & 3) == 0, "bad alignment" ); |
| 50 | assert((reserved_byte_size & 3) == 0, "bad alignment" ); |
| 51 | MemRegion reserved_mr(start, heap_word_size(reserved_byte_size)); |
| 52 | _bts = new BlockOffsetSharedArray(reserved_mr, |
| 53 | heap_word_size(initial_byte_size)); |
| 54 | MemRegion committed_mr(start, heap_word_size(initial_byte_size)); |
| 55 | _rs->resize_covered_region(committed_mr); |
| 56 | if (_bts == NULL) { |
| 57 | vm_exit_during_initialization("Could not allocate a BlockOffsetArray" ); |
| 58 | } |
| 59 | |
| 60 | // Verify that the start and end of this generation is the start of a card. |
| 61 | // If this wasn't true, a single card could span more than on generation, |
| 62 | // which would cause problems when we commit/uncommit memory, and when we |
| 63 | // clear and dirty cards. |
| 64 | guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned" ); |
| 65 | if (reserved_mr.end() != GenCollectedHeap::heap()->reserved_region().end()) { |
| 66 | // Don't check at the very end of the heap as we'll assert that we're probing off |
| 67 | // the end if we try. |
| 68 | guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned" ); |
| 69 | } |
| 70 | _min_heap_delta_bytes = MinHeapDeltaBytes; |
| 71 | _capacity_at_prologue = initial_byte_size; |
| 72 | _used_at_prologue = 0; |
| 73 | } |
| 74 | |
| 75 | bool CardGeneration::grow_by(size_t bytes) { |
| 76 | assert_correct_size_change_locking(); |
| 77 | bool result = _virtual_space.expand_by(bytes); |
| 78 | if (result) { |
| 79 | size_t new_word_size = |
| 80 | heap_word_size(_virtual_space.committed_size()); |
| 81 | MemRegion mr(space()->bottom(), new_word_size); |
| 82 | // Expand card table |
| 83 | GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr); |
| 84 | // Expand shared block offset array |
| 85 | _bts->resize(new_word_size); |
| 86 | |
| 87 | // Fix for bug #4668531 |
| 88 | if (ZapUnusedHeapArea) { |
| 89 | MemRegion mangle_region(space()->end(), |
| 90 | (HeapWord*)_virtual_space.high()); |
| 91 | SpaceMangler::mangle_region(mangle_region); |
| 92 | } |
| 93 | |
| 94 | // Expand space -- also expands space's BOT |
| 95 | // (which uses (part of) shared array above) |
| 96 | space()->set_end((HeapWord*)_virtual_space.high()); |
| 97 | |
| 98 | // update the space and generation capacity counters |
| 99 | update_counters(); |
| 100 | |
| 101 | size_t new_mem_size = _virtual_space.committed_size(); |
| 102 | size_t old_mem_size = new_mem_size - bytes; |
| 103 | log_trace(gc, heap)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K" , |
| 104 | name(), old_mem_size/K, bytes/K, new_mem_size/K); |
| 105 | } |
| 106 | return result; |
| 107 | } |
| 108 | |
| 109 | bool CardGeneration::expand(size_t bytes, size_t expand_bytes) { |
| 110 | assert_locked_or_safepoint(Heap_lock); |
| 111 | if (bytes == 0) { |
| 112 | return true; // That's what grow_by(0) would return |
| 113 | } |
| 114 | size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); |
| 115 | if (aligned_bytes == 0){ |
| 116 | // The alignment caused the number of bytes to wrap. An expand_by(0) will |
| 117 | // return true with the implication that an expansion was done when it |
| 118 | // was not. A call to expand implies a best effort to expand by "bytes" |
| 119 | // but not a guarantee. Align down to give a best effort. This is likely |
| 120 | // the most that the generation can expand since it has some capacity to |
| 121 | // start with. |
| 122 | aligned_bytes = ReservedSpace::page_align_size_down(bytes); |
| 123 | } |
| 124 | size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); |
| 125 | bool success = false; |
| 126 | if (aligned_expand_bytes > aligned_bytes) { |
| 127 | success = grow_by(aligned_expand_bytes); |
| 128 | } |
| 129 | if (!success) { |
| 130 | success = grow_by(aligned_bytes); |
| 131 | } |
| 132 | if (!success) { |
| 133 | success = grow_to_reserved(); |
| 134 | } |
| 135 | if (success && GCLocker::is_active_and_needs_gc()) { |
| 136 | log_trace(gc, heap)("Garbage collection disabled, expanded heap instead" ); |
| 137 | } |
| 138 | |
| 139 | return success; |
| 140 | } |
| 141 | |
| 142 | bool CardGeneration::grow_to_reserved() { |
| 143 | assert_correct_size_change_locking(); |
| 144 | bool success = true; |
| 145 | const size_t remaining_bytes = _virtual_space.uncommitted_size(); |
| 146 | if (remaining_bytes > 0) { |
| 147 | success = grow_by(remaining_bytes); |
| 148 | DEBUG_ONLY(if (!success) log_warning(gc)("grow to reserved failed" );) |
| 149 | } |
| 150 | return success; |
| 151 | } |
| 152 | |
| 153 | void CardGeneration::shrink(size_t bytes) { |
| 154 | assert_correct_size_change_locking(); |
| 155 | |
| 156 | size_t size = ReservedSpace::page_align_size_down(bytes); |
| 157 | if (size == 0) { |
| 158 | return; |
| 159 | } |
| 160 | |
| 161 | // Shrink committed space |
| 162 | _virtual_space.shrink_by(size); |
| 163 | // Shrink space; this also shrinks the space's BOT |
| 164 | space()->set_end((HeapWord*) _virtual_space.high()); |
| 165 | size_t new_word_size = heap_word_size(space()->capacity()); |
| 166 | // Shrink the shared block offset array |
| 167 | _bts->resize(new_word_size); |
| 168 | MemRegion mr(space()->bottom(), new_word_size); |
| 169 | // Shrink the card table |
| 170 | GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr); |
| 171 | |
| 172 | size_t new_mem_size = _virtual_space.committed_size(); |
| 173 | size_t old_mem_size = new_mem_size + size; |
| 174 | log_trace(gc, heap)("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K" , |
| 175 | name(), old_mem_size/K, new_mem_size/K); |
| 176 | } |
| 177 | |
| 178 | // No young generation references, clear this generation's cards. |
| 179 | void CardGeneration::clear_remembered_set() { |
| 180 | _rs->clear(reserved()); |
| 181 | } |
| 182 | |
| 183 | // Objects in this generation may have moved, invalidate this |
| 184 | // generation's cards. |
| 185 | void CardGeneration::invalidate_remembered_set() { |
| 186 | _rs->invalidate(used_region()); |
| 187 | } |
| 188 | |
| 189 | void CardGeneration::compute_new_size() { |
| 190 | assert(_shrink_factor <= 100, "invalid shrink factor" ); |
| 191 | size_t current_shrink_factor = _shrink_factor; |
| 192 | _shrink_factor = 0; |
| 193 | |
| 194 | // We don't have floating point command-line arguments |
| 195 | // Note: argument processing ensures that MinHeapFreeRatio < 100. |
| 196 | const double minimum_free_percentage = MinHeapFreeRatio / 100.0; |
| 197 | const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
| 198 | |
| 199 | // Compute some numbers about the state of the heap. |
| 200 | const size_t used_after_gc = used(); |
| 201 | const size_t capacity_after_gc = capacity(); |
| 202 | |
| 203 | const double min_tmp = used_after_gc / maximum_used_percentage; |
| 204 | size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx)); |
| 205 | // Don't shrink less than the initial generation size |
| 206 | minimum_desired_capacity = MAX2(minimum_desired_capacity, initial_size()); |
| 207 | assert(used_after_gc <= minimum_desired_capacity, "sanity check" ); |
| 208 | |
| 209 | const size_t free_after_gc = free(); |
| 210 | const double free_percentage = ((double)free_after_gc) / capacity_after_gc; |
| 211 | log_trace(gc, heap)("CardGeneration::compute_new_size:" ); |
| 212 | log_trace(gc, heap)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f" , |
| 213 | minimum_free_percentage, |
| 214 | maximum_used_percentage); |
| 215 | log_trace(gc, heap)(" free_after_gc : %6.1fK used_after_gc : %6.1fK capacity_after_gc : %6.1fK" , |
| 216 | free_after_gc / (double) K, |
| 217 | used_after_gc / (double) K, |
| 218 | capacity_after_gc / (double) K); |
| 219 | log_trace(gc, heap)(" free_percentage: %6.2f" , free_percentage); |
| 220 | |
| 221 | if (capacity_after_gc < minimum_desired_capacity) { |
| 222 | // If we have less free space than we want then expand |
| 223 | size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; |
| 224 | // Don't expand unless it's significant |
| 225 | if (expand_bytes >= _min_heap_delta_bytes) { |
| 226 | expand(expand_bytes, 0); // safe if expansion fails |
| 227 | } |
| 228 | log_trace(gc, heap)(" expanding: minimum_desired_capacity: %6.1fK expand_bytes: %6.1fK _min_heap_delta_bytes: %6.1fK" , |
| 229 | minimum_desired_capacity / (double) K, |
| 230 | expand_bytes / (double) K, |
| 231 | _min_heap_delta_bytes / (double) K); |
| 232 | return; |
| 233 | } |
| 234 | |
| 235 | // No expansion, now see if we want to shrink |
| 236 | size_t shrink_bytes = 0; |
| 237 | // We would never want to shrink more than this |
| 238 | size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity; |
| 239 | |
| 240 | if (MaxHeapFreeRatio < 100) { |
| 241 | const double maximum_free_percentage = MaxHeapFreeRatio / 100.0; |
| 242 | const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
| 243 | const double max_tmp = used_after_gc / minimum_used_percentage; |
| 244 | size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); |
| 245 | maximum_desired_capacity = MAX2(maximum_desired_capacity, initial_size()); |
| 246 | log_trace(gc, heap)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f" , |
| 247 | maximum_free_percentage, minimum_used_percentage); |
| 248 | log_trace(gc, heap)(" _capacity_at_prologue: %6.1fK minimum_desired_capacity: %6.1fK maximum_desired_capacity: %6.1fK" , |
| 249 | _capacity_at_prologue / (double) K, |
| 250 | minimum_desired_capacity / (double) K, |
| 251 | maximum_desired_capacity / (double) K); |
| 252 | assert(minimum_desired_capacity <= maximum_desired_capacity, |
| 253 | "sanity check" ); |
| 254 | |
| 255 | if (capacity_after_gc > maximum_desired_capacity) { |
| 256 | // Capacity too large, compute shrinking size |
| 257 | shrink_bytes = capacity_after_gc - maximum_desired_capacity; |
| 258 | if (ShrinkHeapInSteps) { |
| 259 | // If ShrinkHeapInSteps is true (the default), |
| 260 | // we don't want to shrink all the way back to initSize if people call |
| 261 | // System.gc(), because some programs do that between "phases" and then |
| 262 | // we'd just have to grow the heap up again for the next phase. So we |
| 263 | // damp the shrinking: 0% on the first call, 10% on the second call, 40% |
| 264 | // on the third call, and 100% by the fourth call. But if we recompute |
| 265 | // size without shrinking, it goes back to 0%. |
| 266 | shrink_bytes = shrink_bytes / 100 * current_shrink_factor; |
| 267 | if (current_shrink_factor == 0) { |
| 268 | _shrink_factor = 10; |
| 269 | } else { |
| 270 | _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100); |
| 271 | } |
| 272 | } |
| 273 | assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size" ); |
| 274 | log_trace(gc, heap)(" shrinking: initSize: %.1fK maximum_desired_capacity: %.1fK" , |
| 275 | initial_size() / (double) K, maximum_desired_capacity / (double) K); |
| 276 | log_trace(gc, heap)(" shrink_bytes: %.1fK current_shrink_factor: " SIZE_FORMAT " new shrink factor: " SIZE_FORMAT " _min_heap_delta_bytes: %.1fK" , |
| 277 | shrink_bytes / (double) K, |
| 278 | current_shrink_factor, |
| 279 | _shrink_factor, |
| 280 | _min_heap_delta_bytes / (double) K); |
| 281 | } |
| 282 | } |
| 283 | |
| 284 | if (capacity_after_gc > _capacity_at_prologue) { |
| 285 | // We might have expanded for promotions, in which case we might want to |
| 286 | // take back that expansion if there's room after GC. That keeps us from |
| 287 | // stretching the heap with promotions when there's plenty of room. |
| 288 | size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue; |
| 289 | expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes); |
| 290 | // We have two shrinking computations, take the largest |
| 291 | shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion); |
| 292 | assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size" ); |
| 293 | log_trace(gc, heap)(" aggressive shrinking: _capacity_at_prologue: %.1fK capacity_after_gc: %.1fK expansion_for_promotion: %.1fK shrink_bytes: %.1fK" , |
| 294 | capacity_after_gc / (double) K, |
| 295 | _capacity_at_prologue / (double) K, |
| 296 | expansion_for_promotion / (double) K, |
| 297 | shrink_bytes / (double) K); |
| 298 | } |
| 299 | // Don't shrink unless it's significant |
| 300 | if (shrink_bytes >= _min_heap_delta_bytes) { |
| 301 | shrink(shrink_bytes); |
| 302 | } |
| 303 | } |
| 304 | |
| 305 | // Currently nothing to do. |
| 306 | void CardGeneration::prepare_for_verify() {} |
| 307 | |
| 308 | void CardGeneration::space_iterate(SpaceClosure* blk, |
| 309 | bool usedOnly) { |
| 310 | blk->do_space(space()); |
| 311 | } |
| 312 | |
| 313 | void CardGeneration::younger_refs_iterate(OopsInGenClosure* blk, uint n_threads) { |
| 314 | blk->set_generation(this); |
| 315 | younger_refs_in_space_iterate(space(), blk, n_threads); |
| 316 | blk->reset_generation(); |
| 317 | } |
| 318 | |