| 1 | /* |
| 2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "gc/g1/g1Analytics.hpp" |
| 27 | #include "gc/g1/g1Arguments.hpp" |
| 28 | #include "gc/g1/g1CollectedHeap.inline.hpp" |
| 29 | #include "gc/g1/g1CollectionSet.hpp" |
| 30 | #include "gc/g1/g1CollectionSetCandidates.hpp" |
| 31 | #include "gc/g1/g1ConcurrentMark.hpp" |
| 32 | #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" |
| 33 | #include "gc/g1/g1ConcurrentRefine.hpp" |
| 34 | #include "gc/g1/g1CollectionSetChooser.hpp" |
| 35 | #include "gc/g1/g1HeterogeneousHeapPolicy.hpp" |
| 36 | #include "gc/g1/g1HotCardCache.hpp" |
| 37 | #include "gc/g1/g1IHOPControl.hpp" |
| 38 | #include "gc/g1/g1GCPhaseTimes.hpp" |
| 39 | #include "gc/g1/g1Policy.hpp" |
| 40 | #include "gc/g1/g1SurvivorRegions.hpp" |
| 41 | #include "gc/g1/g1YoungGenSizer.hpp" |
| 42 | #include "gc/g1/heapRegion.inline.hpp" |
| 43 | #include "gc/g1/heapRegionRemSet.hpp" |
| 44 | #include "gc/shared/gcPolicyCounters.hpp" |
| 45 | #include "logging/logStream.hpp" |
| 46 | #include "runtime/arguments.hpp" |
| 47 | #include "runtime/java.hpp" |
| 48 | #include "runtime/mutexLocker.hpp" |
| 49 | #include "utilities/debug.hpp" |
| 50 | #include "utilities/growableArray.hpp" |
| 51 | #include "utilities/pair.hpp" |
| 52 | |
| 53 | G1Policy::G1Policy(STWGCTimer* gc_timer) : |
| 54 | _predictor(G1ConfidencePercent / 100.0), |
| 55 | _analytics(new G1Analytics(&_predictor)), |
| 56 | _remset_tracker(), |
| 57 | _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)), |
| 58 | _ihop_control(create_ihop_control(&_predictor)), |
| 59 | _policy_counters(new GCPolicyCounters("GarbageFirst" , 1, 2)), |
| 60 | _full_collection_start_sec(0.0), |
| 61 | _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC), |
| 62 | _young_list_target_length(0), |
| 63 | _young_list_fixed_length(0), |
| 64 | _young_list_max_length(0), |
| 65 | _short_lived_surv_rate_group(new SurvRateGroup()), |
| 66 | _survivor_surv_rate_group(new SurvRateGroup()), |
| 67 | _reserve_factor((double) G1ReservePercent / 100.0), |
| 68 | _reserve_regions(0), |
| 69 | _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()), |
| 70 | _free_regions_at_end_of_collection(0), |
| 71 | _max_rs_lengths(0), |
| 72 | _rs_lengths_prediction(0), |
| 73 | _pending_cards(0), |
| 74 | _bytes_allocated_in_old_since_last_gc(0), |
| 75 | _initial_mark_to_mixed(), |
| 76 | _collection_set(NULL), |
| 77 | _bytes_copied_during_gc(0), |
| 78 | _g1h(NULL), |
| 79 | _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)), |
| 80 | _mark_remark_start_sec(0), |
| 81 | _mark_cleanup_start_sec(0), |
| 82 | _tenuring_threshold(MaxTenuringThreshold), |
| 83 | _max_survivor_regions(0), |
| 84 | _survivors_age_table(true) |
| 85 | { |
| 86 | } |
| 87 | |
| 88 | G1Policy::~G1Policy() { |
| 89 | delete _ihop_control; |
| 90 | delete _young_gen_sizer; |
| 91 | } |
| 92 | |
| 93 | G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) { |
| 94 | if (G1Arguments::is_heterogeneous_heap()) { |
| 95 | return new G1HeterogeneousHeapPolicy(gc_timer_stw); |
| 96 | } else { |
| 97 | return new G1Policy(gc_timer_stw); |
| 98 | } |
| 99 | } |
| 100 | |
| 101 | G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); } |
| 102 | |
| 103 | void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) { |
| 104 | _g1h = g1h; |
| 105 | _collection_set = collection_set; |
| 106 | |
| 107 | assert(Heap_lock->owned_by_self(), "Locking discipline." ); |
| 108 | |
| 109 | if (!use_adaptive_young_list_length()) { |
| 110 | _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); |
| 111 | } |
| 112 | _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions()); |
| 113 | |
| 114 | _free_regions_at_end_of_collection = _g1h->num_free_regions(); |
| 115 | |
| 116 | update_young_list_max_and_target_length(); |
| 117 | // We may immediately start allocating regions and placing them on the |
| 118 | // collection set list. Initialize the per-collection set info |
| 119 | _collection_set->start_incremental_building(); |
| 120 | } |
| 121 | |
| 122 | void G1Policy::note_gc_start() { |
| 123 | phase_times()->note_gc_start(); |
| 124 | } |
| 125 | |
| 126 | class G1YoungLengthPredictor { |
| 127 | const bool _during_cm; |
| 128 | const double _base_time_ms; |
| 129 | const double _base_free_regions; |
| 130 | const double _target_pause_time_ms; |
| 131 | const G1Policy* const _policy; |
| 132 | |
| 133 | public: |
| 134 | G1YoungLengthPredictor(bool during_cm, |
| 135 | double base_time_ms, |
| 136 | double base_free_regions, |
| 137 | double target_pause_time_ms, |
| 138 | const G1Policy* policy) : |
| 139 | _during_cm(during_cm), |
| 140 | _base_time_ms(base_time_ms), |
| 141 | _base_free_regions(base_free_regions), |
| 142 | _target_pause_time_ms(target_pause_time_ms), |
| 143 | _policy(policy) {} |
| 144 | |
| 145 | bool will_fit(uint young_length) const { |
| 146 | if (young_length >= _base_free_regions) { |
| 147 | // end condition 1: not enough space for the young regions |
| 148 | return false; |
| 149 | } |
| 150 | |
| 151 | const double accum_surv_rate = _policy->accum_yg_surv_rate_pred((int) young_length - 1); |
| 152 | const size_t bytes_to_copy = |
| 153 | (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); |
| 154 | const double copy_time_ms = |
| 155 | _policy->analytics()->predict_object_copy_time_ms(bytes_to_copy, _during_cm); |
| 156 | const double young_other_time_ms = _policy->analytics()->predict_young_other_time_ms(young_length); |
| 157 | const double pause_time_ms = _base_time_ms + copy_time_ms + young_other_time_ms; |
| 158 | if (pause_time_ms > _target_pause_time_ms) { |
| 159 | // end condition 2: prediction is over the target pause time |
| 160 | return false; |
| 161 | } |
| 162 | |
| 163 | const size_t free_bytes = (_base_free_regions - young_length) * HeapRegion::GrainBytes; |
| 164 | |
| 165 | // When copying, we will likely need more bytes free than is live in the region. |
| 166 | // Add some safety margin to factor in the confidence of our guess, and the |
| 167 | // natural expected waste. |
| 168 | // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty |
| 169 | // of the calculation: the lower the confidence, the more headroom. |
| 170 | // (100 + TargetPLABWastePct) represents the increase in expected bytes during |
| 171 | // copying due to anticipated waste in the PLABs. |
| 172 | const double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0; |
| 173 | const size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy); |
| 174 | |
| 175 | if (expected_bytes_to_copy > free_bytes) { |
| 176 | // end condition 3: out-of-space |
| 177 | return false; |
| 178 | } |
| 179 | |
| 180 | // success! |
| 181 | return true; |
| 182 | } |
| 183 | }; |
| 184 | |
| 185 | void G1Policy::record_new_heap_size(uint new_number_of_regions) { |
| 186 | // re-calculate the necessary reserve |
| 187 | double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; |
| 188 | // We use ceiling so that if reserve_regions_d is > 0.0 (but |
| 189 | // smaller than 1.0) we'll get 1. |
| 190 | _reserve_regions = (uint) ceil(reserve_regions_d); |
| 191 | |
| 192 | _young_gen_sizer->heap_size_changed(new_number_of_regions); |
| 193 | |
| 194 | _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes); |
| 195 | } |
| 196 | |
| 197 | uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) const { |
| 198 | uint desired_min_length = 0; |
| 199 | if (use_adaptive_young_list_length()) { |
| 200 | if (_analytics->num_alloc_rate_ms() > 3) { |
| 201 | double now_sec = os::elapsedTime(); |
| 202 | double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; |
| 203 | double alloc_rate_ms = _analytics->predict_alloc_rate_ms(); |
| 204 | desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); |
| 205 | } else { |
| 206 | // otherwise we don't have enough info to make the prediction |
| 207 | } |
| 208 | } |
| 209 | desired_min_length += base_min_length; |
| 210 | // make sure we don't go below any user-defined minimum bound |
| 211 | return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); |
| 212 | } |
| 213 | |
| 214 | uint G1Policy::calculate_young_list_desired_max_length() const { |
| 215 | // Here, we might want to also take into account any additional |
| 216 | // constraints (i.e., user-defined minimum bound). Currently, we |
| 217 | // effectively don't set this bound. |
| 218 | return _young_gen_sizer->max_desired_young_length(); |
| 219 | } |
| 220 | |
| 221 | uint G1Policy::update_young_list_max_and_target_length() { |
| 222 | return update_young_list_max_and_target_length(_analytics->predict_rs_lengths()); |
| 223 | } |
| 224 | |
| 225 | uint G1Policy::update_young_list_max_and_target_length(size_t rs_lengths) { |
| 226 | uint unbounded_target_length = update_young_list_target_length(rs_lengths); |
| 227 | update_max_gc_locker_expansion(); |
| 228 | return unbounded_target_length; |
| 229 | } |
| 230 | |
| 231 | uint G1Policy::update_young_list_target_length(size_t rs_lengths) { |
| 232 | YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths); |
| 233 | _young_list_target_length = young_lengths.first; |
| 234 | |
| 235 | return young_lengths.second; |
| 236 | } |
| 237 | |
| 238 | G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_lengths) const { |
| 239 | YoungTargetLengths result; |
| 240 | |
| 241 | // Calculate the absolute and desired min bounds first. |
| 242 | |
| 243 | // This is how many young regions we already have (currently: the survivors). |
| 244 | const uint base_min_length = _g1h->survivor_regions_count(); |
| 245 | uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); |
| 246 | // This is the absolute minimum young length. Ensure that we |
| 247 | // will at least have one eden region available for allocation. |
| 248 | uint absolute_min_length = base_min_length + MAX2(_g1h->eden_regions_count(), (uint)1); |
| 249 | // If we shrank the young list target it should not shrink below the current size. |
| 250 | desired_min_length = MAX2(desired_min_length, absolute_min_length); |
| 251 | // Calculate the absolute and desired max bounds. |
| 252 | |
| 253 | uint desired_max_length = calculate_young_list_desired_max_length(); |
| 254 | |
| 255 | uint young_list_target_length = 0; |
| 256 | if (use_adaptive_young_list_length()) { |
| 257 | if (collector_state()->in_young_only_phase()) { |
| 258 | young_list_target_length = |
| 259 | calculate_young_list_target_length(rs_lengths, |
| 260 | base_min_length, |
| 261 | desired_min_length, |
| 262 | desired_max_length); |
| 263 | } else { |
| 264 | // Don't calculate anything and let the code below bound it to |
| 265 | // the desired_min_length, i.e., do the next GC as soon as |
| 266 | // possible to maximize how many old regions we can add to it. |
| 267 | } |
| 268 | } else { |
| 269 | // The user asked for a fixed young gen so we'll fix the young gen |
| 270 | // whether the next GC is young or mixed. |
| 271 | young_list_target_length = _young_list_fixed_length; |
| 272 | } |
| 273 | |
| 274 | result.second = young_list_target_length; |
| 275 | |
| 276 | // We will try our best not to "eat" into the reserve. |
| 277 | uint absolute_max_length = 0; |
| 278 | if (_free_regions_at_end_of_collection > _reserve_regions) { |
| 279 | absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; |
| 280 | } |
| 281 | if (desired_max_length > absolute_max_length) { |
| 282 | desired_max_length = absolute_max_length; |
| 283 | } |
| 284 | |
| 285 | // Make sure we don't go over the desired max length, nor under the |
| 286 | // desired min length. In case they clash, desired_min_length wins |
| 287 | // which is why that test is second. |
| 288 | if (young_list_target_length > desired_max_length) { |
| 289 | young_list_target_length = desired_max_length; |
| 290 | } |
| 291 | if (young_list_target_length < desired_min_length) { |
| 292 | young_list_target_length = desired_min_length; |
| 293 | } |
| 294 | |
| 295 | assert(young_list_target_length > base_min_length, |
| 296 | "we should be able to allocate at least one eden region" ); |
| 297 | assert(young_list_target_length >= absolute_min_length, "post-condition" ); |
| 298 | |
| 299 | result.first = young_list_target_length; |
| 300 | return result; |
| 301 | } |
| 302 | |
| 303 | uint |
| 304 | G1Policy::calculate_young_list_target_length(size_t rs_lengths, |
| 305 | uint base_min_length, |
| 306 | uint desired_min_length, |
| 307 | uint desired_max_length) const { |
| 308 | assert(use_adaptive_young_list_length(), "pre-condition" ); |
| 309 | assert(collector_state()->in_young_only_phase(), "only call this for young GCs" ); |
| 310 | |
| 311 | // In case some edge-condition makes the desired max length too small... |
| 312 | if (desired_max_length <= desired_min_length) { |
| 313 | return desired_min_length; |
| 314 | } |
| 315 | |
| 316 | // We'll adjust min_young_length and max_young_length not to include |
| 317 | // the already allocated young regions (i.e., so they reflect the |
| 318 | // min and max eden regions we'll allocate). The base_min_length |
| 319 | // will be reflected in the predictions by the |
| 320 | // survivor_regions_evac_time prediction. |
| 321 | assert(desired_min_length > base_min_length, "invariant" ); |
| 322 | uint min_young_length = desired_min_length - base_min_length; |
| 323 | assert(desired_max_length > base_min_length, "invariant" ); |
| 324 | uint max_young_length = desired_max_length - base_min_length; |
| 325 | |
| 326 | const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; |
| 327 | const double survivor_regions_evac_time = predict_survivor_regions_evac_time(); |
| 328 | const size_t pending_cards = _analytics->predict_pending_cards(); |
| 329 | const size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff(); |
| 330 | const size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, true /* for_young_gc */); |
| 331 | const double base_time_ms = |
| 332 | predict_base_elapsed_time_ms(pending_cards, scanned_cards) + |
| 333 | survivor_regions_evac_time; |
| 334 | const uint available_free_regions = _free_regions_at_end_of_collection; |
| 335 | const uint base_free_regions = |
| 336 | available_free_regions > _reserve_regions ? available_free_regions - _reserve_regions : 0; |
| 337 | |
| 338 | // Here, we will make sure that the shortest young length that |
| 339 | // makes sense fits within the target pause time. |
| 340 | |
| 341 | G1YoungLengthPredictor p(collector_state()->mark_or_rebuild_in_progress(), |
| 342 | base_time_ms, |
| 343 | base_free_regions, |
| 344 | target_pause_time_ms, |
| 345 | this); |
| 346 | if (p.will_fit(min_young_length)) { |
| 347 | // The shortest young length will fit into the target pause time; |
| 348 | // we'll now check whether the absolute maximum number of young |
| 349 | // regions will fit in the target pause time. If not, we'll do |
| 350 | // a binary search between min_young_length and max_young_length. |
| 351 | if (p.will_fit(max_young_length)) { |
| 352 | // The maximum young length will fit into the target pause time. |
| 353 | // We are done so set min young length to the maximum length (as |
| 354 | // the result is assumed to be returned in min_young_length). |
| 355 | min_young_length = max_young_length; |
| 356 | } else { |
| 357 | // The maximum possible number of young regions will not fit within |
| 358 | // the target pause time so we'll search for the optimal |
| 359 | // length. The loop invariants are: |
| 360 | // |
| 361 | // min_young_length < max_young_length |
| 362 | // min_young_length is known to fit into the target pause time |
| 363 | // max_young_length is known not to fit into the target pause time |
| 364 | // |
| 365 | // Going into the loop we know the above hold as we've just |
| 366 | // checked them. Every time around the loop we check whether |
| 367 | // the middle value between min_young_length and |
| 368 | // max_young_length fits into the target pause time. If it |
| 369 | // does, it becomes the new min. If it doesn't, it becomes |
| 370 | // the new max. This way we maintain the loop invariants. |
| 371 | |
| 372 | assert(min_young_length < max_young_length, "invariant" ); |
| 373 | uint diff = (max_young_length - min_young_length) / 2; |
| 374 | while (diff > 0) { |
| 375 | uint young_length = min_young_length + diff; |
| 376 | if (p.will_fit(young_length)) { |
| 377 | min_young_length = young_length; |
| 378 | } else { |
| 379 | max_young_length = young_length; |
| 380 | } |
| 381 | assert(min_young_length < max_young_length, "invariant" ); |
| 382 | diff = (max_young_length - min_young_length) / 2; |
| 383 | } |
| 384 | // The results is min_young_length which, according to the |
| 385 | // loop invariants, should fit within the target pause time. |
| 386 | |
| 387 | // These are the post-conditions of the binary search above: |
| 388 | assert(min_young_length < max_young_length, |
| 389 | "otherwise we should have discovered that max_young_length " |
| 390 | "fits into the pause target and not done the binary search" ); |
| 391 | assert(p.will_fit(min_young_length), |
| 392 | "min_young_length, the result of the binary search, should " |
| 393 | "fit into the pause target" ); |
| 394 | assert(!p.will_fit(min_young_length + 1), |
| 395 | "min_young_length, the result of the binary search, should be " |
| 396 | "optimal, so no larger length should fit into the pause target" ); |
| 397 | } |
| 398 | } else { |
| 399 | // Even the minimum length doesn't fit into the pause time |
| 400 | // target, return it as the result nevertheless. |
| 401 | } |
| 402 | return base_min_length + min_young_length; |
| 403 | } |
| 404 | |
| 405 | double G1Policy::predict_survivor_regions_evac_time() const { |
| 406 | double survivor_regions_evac_time = 0.0; |
| 407 | const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions(); |
| 408 | |
| 409 | for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin(); |
| 410 | it != survivor_regions->end(); |
| 411 | ++it) { |
| 412 | survivor_regions_evac_time += predict_region_elapsed_time_ms(*it, collector_state()->in_young_only_phase()); |
| 413 | } |
| 414 | return survivor_regions_evac_time; |
| 415 | } |
| 416 | |
| 417 | void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_lengths) { |
| 418 | guarantee(use_adaptive_young_list_length(), "should not call this otherwise" ); |
| 419 | |
| 420 | if (rs_lengths > _rs_lengths_prediction) { |
| 421 | // add 10% to avoid having to recalculate often |
| 422 | size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; |
| 423 | update_rs_lengths_prediction(rs_lengths_prediction); |
| 424 | |
| 425 | update_young_list_max_and_target_length(rs_lengths_prediction); |
| 426 | } |
| 427 | } |
| 428 | |
| 429 | void G1Policy::update_rs_lengths_prediction() { |
| 430 | update_rs_lengths_prediction(_analytics->predict_rs_lengths()); |
| 431 | } |
| 432 | |
| 433 | void G1Policy::update_rs_lengths_prediction(size_t prediction) { |
| 434 | if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) { |
| 435 | _rs_lengths_prediction = prediction; |
| 436 | } |
| 437 | } |
| 438 | |
| 439 | void G1Policy::record_full_collection_start() { |
| 440 | _full_collection_start_sec = os::elapsedTime(); |
| 441 | // Release the future to-space so that it is available for compaction into. |
| 442 | collector_state()->set_in_young_only_phase(false); |
| 443 | collector_state()->set_in_full_gc(true); |
| 444 | _collection_set->clear_candidates(); |
| 445 | } |
| 446 | |
| 447 | void G1Policy::record_full_collection_end() { |
| 448 | // Consider this like a collection pause for the purposes of allocation |
| 449 | // since last pause. |
| 450 | double end_sec = os::elapsedTime(); |
| 451 | double full_gc_time_sec = end_sec - _full_collection_start_sec; |
| 452 | double full_gc_time_ms = full_gc_time_sec * 1000.0; |
| 453 | |
| 454 | _analytics->update_recent_gc_times(end_sec, full_gc_time_ms); |
| 455 | |
| 456 | collector_state()->set_in_full_gc(false); |
| 457 | |
| 458 | // "Nuke" the heuristics that control the young/mixed GC |
| 459 | // transitions and make sure we start with young GCs after the Full GC. |
| 460 | collector_state()->set_in_young_only_phase(true); |
| 461 | collector_state()->set_in_young_gc_before_mixed(false); |
| 462 | collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC" , 0)); |
| 463 | collector_state()->set_in_initial_mark_gc(false); |
| 464 | collector_state()->set_mark_or_rebuild_in_progress(false); |
| 465 | collector_state()->set_clearing_next_bitmap(false); |
| 466 | |
| 467 | _short_lived_surv_rate_group->start_adding_regions(); |
| 468 | // also call this on any additional surv rate groups |
| 469 | |
| 470 | _free_regions_at_end_of_collection = _g1h->num_free_regions(); |
| 471 | // Reset survivors SurvRateGroup. |
| 472 | _survivor_surv_rate_group->reset(); |
| 473 | update_young_list_max_and_target_length(); |
| 474 | update_rs_lengths_prediction(); |
| 475 | |
| 476 | _bytes_allocated_in_old_since_last_gc = 0; |
| 477 | |
| 478 | record_pause(FullGC, _full_collection_start_sec, end_sec); |
| 479 | } |
| 480 | |
| 481 | void G1Policy::record_collection_pause_start(double start_time_sec) { |
| 482 | // We only need to do this here as the policy will only be applied |
| 483 | // to the GC we're about to start. so, no point is calculating this |
| 484 | // every time we calculate / recalculate the target young length. |
| 485 | update_survivors_policy(); |
| 486 | |
| 487 | assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(), |
| 488 | "Maximum survivor regions %u plus used regions %u exceeds max regions %u" , |
| 489 | max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions()); |
| 490 | assert_used_and_recalculate_used_equal(_g1h); |
| 491 | |
| 492 | phase_times()->record_cur_collection_start_sec(start_time_sec); |
| 493 | _pending_cards = _g1h->pending_card_num(); |
| 494 | |
| 495 | _collection_set->reset_bytes_used_before(); |
| 496 | _bytes_copied_during_gc = 0; |
| 497 | |
| 498 | // do that for any other surv rate groups |
| 499 | _short_lived_surv_rate_group->stop_adding_regions(); |
| 500 | _survivors_age_table.clear(); |
| 501 | |
| 502 | assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed" ); |
| 503 | } |
| 504 | |
| 505 | void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) { |
| 506 | assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now" ); |
| 507 | collector_state()->set_in_initial_mark_gc(false); |
| 508 | } |
| 509 | |
| 510 | void G1Policy::() { |
| 511 | _mark_remark_start_sec = os::elapsedTime(); |
| 512 | } |
| 513 | |
| 514 | void G1Policy::() { |
| 515 | double end_time_sec = os::elapsedTime(); |
| 516 | double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; |
| 517 | _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms); |
| 518 | _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); |
| 519 | |
| 520 | record_pause(Remark, _mark_remark_start_sec, end_time_sec); |
| 521 | } |
| 522 | |
| 523 | void G1Policy::record_concurrent_mark_cleanup_start() { |
| 524 | _mark_cleanup_start_sec = os::elapsedTime(); |
| 525 | } |
| 526 | |
| 527 | double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { |
| 528 | return phase_times()->average_time_ms(phase); |
| 529 | } |
| 530 | |
| 531 | double G1Policy::young_other_time_ms() const { |
| 532 | return phase_times()->young_cset_choice_time_ms() + |
| 533 | phase_times()->average_time_ms(G1GCPhaseTimes::YoungFreeCSet); |
| 534 | } |
| 535 | |
| 536 | double G1Policy::non_young_other_time_ms() const { |
| 537 | return phase_times()->non_young_cset_choice_time_ms() + |
| 538 | phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet); |
| 539 | } |
| 540 | |
| 541 | double G1Policy::other_time_ms(double pause_time_ms) const { |
| 542 | return pause_time_ms - phase_times()->cur_collection_par_time_ms(); |
| 543 | } |
| 544 | |
| 545 | double G1Policy::constant_other_time_ms(double pause_time_ms) const { |
| 546 | return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms(); |
| 547 | } |
| 548 | |
| 549 | bool G1Policy::about_to_start_mixed_phase() const { |
| 550 | return _g1h->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed(); |
| 551 | } |
| 552 | |
| 553 | bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { |
| 554 | if (about_to_start_mixed_phase()) { |
| 555 | return false; |
| 556 | } |
| 557 | |
| 558 | size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); |
| 559 | |
| 560 | size_t cur_used_bytes = _g1h->non_young_capacity_bytes(); |
| 561 | size_t alloc_byte_size = alloc_word_size * HeapWordSize; |
| 562 | size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; |
| 563 | |
| 564 | bool result = false; |
| 565 | if (marking_request_bytes > marking_initiating_used_threshold) { |
| 566 | result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed(); |
| 567 | log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s" , |
| 568 | result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)" , |
| 569 | cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source); |
| 570 | } |
| 571 | |
| 572 | return result; |
| 573 | } |
| 574 | |
| 575 | // Anything below that is considered to be zero |
| 576 | #define MIN_TIMER_GRANULARITY 0.0000001 |
| 577 | |
| 578 | void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) { |
| 579 | double end_time_sec = os::elapsedTime(); |
| 580 | |
| 581 | assert_used_and_recalculate_used_equal(_g1h); |
| 582 | size_t cur_used_bytes = _g1h->used(); |
| 583 | bool this_pause_included_initial_mark = false; |
| 584 | bool this_pause_was_young_only = collector_state()->in_young_only_phase(); |
| 585 | |
| 586 | bool update_stats = !_g1h->evacuation_failed(); |
| 587 | |
| 588 | record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); |
| 589 | |
| 590 | _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; |
| 591 | |
| 592 | this_pause_included_initial_mark = collector_state()->in_initial_mark_gc(); |
| 593 | if (this_pause_included_initial_mark) { |
| 594 | record_concurrent_mark_init_end(0.0); |
| 595 | } else { |
| 596 | maybe_start_marking(); |
| 597 | } |
| 598 | |
| 599 | double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms()); |
| 600 | if (app_time_ms < MIN_TIMER_GRANULARITY) { |
| 601 | // This usually happens due to the timer not having the required |
| 602 | // granularity. Some Linuxes are the usual culprits. |
| 603 | // We'll just set it to something (arbitrarily) small. |
| 604 | app_time_ms = 1.0; |
| 605 | } |
| 606 | |
| 607 | if (update_stats) { |
| 608 | // We maintain the invariant that all objects allocated by mutator |
| 609 | // threads will be allocated out of eden regions. So, we can use |
| 610 | // the eden region number allocated since the previous GC to |
| 611 | // calculate the application's allocate rate. The only exception |
| 612 | // to that is humongous objects that are allocated separately. But |
| 613 | // given that humongous object allocations do not really affect |
| 614 | // either the pause's duration nor when the next pause will take |
| 615 | // place we can safely ignore them here. |
| 616 | uint regions_allocated = _collection_set->eden_region_length(); |
| 617 | double alloc_rate_ms = (double) regions_allocated / app_time_ms; |
| 618 | _analytics->report_alloc_rate_ms(alloc_rate_ms); |
| 619 | |
| 620 | double interval_ms = |
| 621 | (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0; |
| 622 | _analytics->update_recent_gc_times(end_time_sec, pause_time_ms); |
| 623 | _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms); |
| 624 | } |
| 625 | |
| 626 | if (collector_state()->in_young_gc_before_mixed()) { |
| 627 | assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC" ); |
| 628 | // This has been the young GC before we start doing mixed GCs. We already |
| 629 | // decided to start mixed GCs much earlier, so there is nothing to do except |
| 630 | // advancing the state. |
| 631 | collector_state()->set_in_young_only_phase(false); |
| 632 | collector_state()->set_in_young_gc_before_mixed(false); |
| 633 | } else if (!this_pause_was_young_only) { |
| 634 | // This is a mixed GC. Here we decide whether to continue doing more |
| 635 | // mixed GCs or not. |
| 636 | if (!next_gc_should_be_mixed("continue mixed GCs" , |
| 637 | "do not continue mixed GCs" )) { |
| 638 | collector_state()->set_in_young_only_phase(true); |
| 639 | |
| 640 | clear_collection_set_candidates(); |
| 641 | maybe_start_marking(); |
| 642 | } |
| 643 | } |
| 644 | |
| 645 | _short_lived_surv_rate_group->start_adding_regions(); |
| 646 | // Do that for any other surv rate groups |
| 647 | |
| 648 | double scan_hcc_time_ms = G1HotCardCache::default_use_cache() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0; |
| 649 | |
| 650 | if (update_stats) { |
| 651 | double cost_per_card_ms = 0.0; |
| 652 | if (_pending_cards > 0) { |
| 653 | cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS)) / (double) _pending_cards; |
| 654 | _analytics->report_cost_per_card_ms(cost_per_card_ms); |
| 655 | } |
| 656 | _analytics->report_cost_scan_hcc(scan_hcc_time_ms); |
| 657 | |
| 658 | double cost_per_entry_ms = 0.0; |
| 659 | if (cards_scanned > 10) { |
| 660 | double avg_time_scan_rs = average_time_ms(G1GCPhaseTimes::ScanRS); |
| 661 | if (this_pause_was_young_only) { |
| 662 | avg_time_scan_rs += average_time_ms(G1GCPhaseTimes::OptScanRS); |
| 663 | } |
| 664 | cost_per_entry_ms = avg_time_scan_rs / cards_scanned; |
| 665 | _analytics->report_cost_per_entry_ms(cost_per_entry_ms, this_pause_was_young_only); |
| 666 | } |
| 667 | |
| 668 | if (_max_rs_lengths > 0) { |
| 669 | double cards_per_entry_ratio = |
| 670 | (double) cards_scanned / (double) _max_rs_lengths; |
| 671 | _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, this_pause_was_young_only); |
| 672 | } |
| 673 | |
| 674 | // This is defensive. For a while _max_rs_lengths could get |
| 675 | // smaller than _recorded_rs_lengths which was causing |
| 676 | // rs_length_diff to get very large and mess up the RSet length |
| 677 | // predictions. The reason was unsafe concurrent updates to the |
| 678 | // _inc_cset_recorded_rs_lengths field which the code below guards |
| 679 | // against (see CR 7118202). This bug has now been fixed (see CR |
| 680 | // 7119027). However, I'm still worried that |
| 681 | // _inc_cset_recorded_rs_lengths might still end up somewhat |
| 682 | // inaccurate. The concurrent refinement thread calculates an |
| 683 | // RSet's length concurrently with other CR threads updating it |
| 684 | // which might cause it to calculate the length incorrectly (if, |
| 685 | // say, it's in mid-coarsening). So I'll leave in the defensive |
| 686 | // conditional below just in case. |
| 687 | size_t rs_length_diff = 0; |
| 688 | size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths(); |
| 689 | if (_max_rs_lengths > recorded_rs_lengths) { |
| 690 | rs_length_diff = _max_rs_lengths - recorded_rs_lengths; |
| 691 | } |
| 692 | _analytics->report_rs_length_diff((double) rs_length_diff); |
| 693 | |
| 694 | size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes; |
| 695 | size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes; |
| 696 | double cost_per_byte_ms = 0.0; |
| 697 | |
| 698 | if (copied_bytes > 0) { |
| 699 | cost_per_byte_ms = (average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::OptObjCopy)) / (double) copied_bytes; |
| 700 | _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress()); |
| 701 | } |
| 702 | |
| 703 | if (_collection_set->young_region_length() > 0) { |
| 704 | _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() / |
| 705 | _collection_set->young_region_length()); |
| 706 | } |
| 707 | |
| 708 | if (_collection_set->old_region_length() > 0) { |
| 709 | _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() / |
| 710 | _collection_set->old_region_length()); |
| 711 | } |
| 712 | |
| 713 | _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms)); |
| 714 | |
| 715 | // Do not update RS lengths and the number of pending cards with information from mixed gc: |
| 716 | // these are is wildly different to during young only gc and mess up young gen sizing right |
| 717 | // after the mixed gc phase. |
| 718 | // During mixed gc we do not use them for young gen sizing. |
| 719 | if (this_pause_was_young_only) { |
| 720 | _analytics->report_pending_cards((double) _pending_cards); |
| 721 | _analytics->report_rs_lengths((double) _max_rs_lengths); |
| 722 | } |
| 723 | } |
| 724 | |
| 725 | assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()), |
| 726 | "If the last pause has been an initial mark, we should not have been in the marking window" ); |
| 727 | if (this_pause_included_initial_mark) { |
| 728 | collector_state()->set_mark_or_rebuild_in_progress(true); |
| 729 | } |
| 730 | |
| 731 | _free_regions_at_end_of_collection = _g1h->num_free_regions(); |
| 732 | |
| 733 | update_rs_lengths_prediction(); |
| 734 | |
| 735 | // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely |
| 736 | // that in this case we are not running in a "normal" operating mode. |
| 737 | if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { |
| 738 | // IHOP control wants to know the expected young gen length if it were not |
| 739 | // restrained by the heap reserve. Using the actual length would make the |
| 740 | // prediction too small and the limit the young gen every time we get to the |
| 741 | // predicted target occupancy. |
| 742 | size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); |
| 743 | |
| 744 | update_ihop_prediction(app_time_ms / 1000.0, |
| 745 | _bytes_allocated_in_old_since_last_gc, |
| 746 | last_unrestrained_young_length * HeapRegion::GrainBytes, |
| 747 | this_pause_was_young_only); |
| 748 | _bytes_allocated_in_old_since_last_gc = 0; |
| 749 | |
| 750 | _ihop_control->send_trace_event(_g1h->gc_tracer_stw()); |
| 751 | } else { |
| 752 | // Any garbage collection triggered as periodic collection resets the time-to-mixed |
| 753 | // measurement. Periodic collection typically means that the application is "inactive", i.e. |
| 754 | // the marking threads may have received an uncharacterisic amount of cpu time |
| 755 | // for completing the marking, i.e. are faster than expected. |
| 756 | // This skews the predicted marking length towards smaller values which might cause |
| 757 | // the mark start being too late. |
| 758 | _initial_mark_to_mixed.reset(); |
| 759 | } |
| 760 | |
| 761 | // Note that _mmu_tracker->max_gc_time() returns the time in seconds. |
| 762 | double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; |
| 763 | |
| 764 | if (update_rs_time_goal_ms < scan_hcc_time_ms) { |
| 765 | log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." |
| 766 | "Update RS time goal: %1.2fms Scan HCC time: %1.2fms" , |
| 767 | update_rs_time_goal_ms, scan_hcc_time_ms); |
| 768 | |
| 769 | update_rs_time_goal_ms = 0; |
| 770 | } else { |
| 771 | update_rs_time_goal_ms -= scan_hcc_time_ms; |
| 772 | } |
| 773 | _g1h->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS), |
| 774 | phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), |
| 775 | update_rs_time_goal_ms); |
| 776 | } |
| 777 | |
| 778 | G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){ |
| 779 | if (G1UseAdaptiveIHOP) { |
| 780 | return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, |
| 781 | predictor, |
| 782 | G1ReservePercent, |
| 783 | G1HeapWastePercent); |
| 784 | } else { |
| 785 | return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent); |
| 786 | } |
| 787 | } |
| 788 | |
| 789 | void G1Policy::update_ihop_prediction(double mutator_time_s, |
| 790 | size_t mutator_alloc_bytes, |
| 791 | size_t young_gen_size, |
| 792 | bool this_gc_was_young_only) { |
| 793 | // Always try to update IHOP prediction. Even evacuation failures give information |
| 794 | // about e.g. whether to start IHOP earlier next time. |
| 795 | |
| 796 | // Avoid using really small application times that might create samples with |
| 797 | // very high or very low values. They may be caused by e.g. back-to-back gcs. |
| 798 | double const min_valid_time = 1e-6; |
| 799 | |
| 800 | bool report = false; |
| 801 | |
| 802 | double marking_to_mixed_time = -1.0; |
| 803 | if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) { |
| 804 | marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); |
| 805 | assert(marking_to_mixed_time > 0.0, |
| 806 | "Initial mark to mixed time must be larger than zero but is %.3f" , |
| 807 | marking_to_mixed_time); |
| 808 | if (marking_to_mixed_time > min_valid_time) { |
| 809 | _ihop_control->update_marking_length(marking_to_mixed_time); |
| 810 | report = true; |
| 811 | } |
| 812 | } |
| 813 | |
| 814 | // As an approximation for the young gc promotion rates during marking we use |
| 815 | // all of them. In many applications there are only a few if any young gcs during |
| 816 | // marking, which makes any prediction useless. This increases the accuracy of the |
| 817 | // prediction. |
| 818 | if (this_gc_was_young_only && mutator_time_s > min_valid_time) { |
| 819 | _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); |
| 820 | report = true; |
| 821 | } |
| 822 | |
| 823 | if (report) { |
| 824 | report_ihop_statistics(); |
| 825 | } |
| 826 | } |
| 827 | |
| 828 | void G1Policy::report_ihop_statistics() { |
| 829 | _ihop_control->print(); |
| 830 | } |
| 831 | |
| 832 | void G1Policy::print_phases() { |
| 833 | phase_times()->print(); |
| 834 | } |
| 835 | |
| 836 | double G1Policy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const { |
| 837 | TruncatedSeq* seq = surv_rate_group->get_seq(age); |
| 838 | guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d" , age); |
| 839 | double pred = _predictor.get_new_prediction(seq); |
| 840 | if (pred > 1.0) { |
| 841 | pred = 1.0; |
| 842 | } |
| 843 | return pred; |
| 844 | } |
| 845 | |
| 846 | double G1Policy::accum_yg_surv_rate_pred(int age) const { |
| 847 | return _short_lived_surv_rate_group->accum_surv_rate_pred(age); |
| 848 | } |
| 849 | |
| 850 | double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards, |
| 851 | size_t scanned_cards) const { |
| 852 | return |
| 853 | _analytics->predict_rs_update_time_ms(pending_cards) + |
| 854 | _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->in_young_only_phase()) + |
| 855 | _analytics->predict_constant_other_time_ms(); |
| 856 | } |
| 857 | |
| 858 | double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const { |
| 859 | size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff(); |
| 860 | size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->in_young_only_phase()); |
| 861 | return predict_base_elapsed_time_ms(pending_cards, card_num); |
| 862 | } |
| 863 | |
| 864 | size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const { |
| 865 | size_t bytes_to_copy; |
| 866 | if (!hr->is_young()) { |
| 867 | bytes_to_copy = hr->max_live_bytes(); |
| 868 | } else { |
| 869 | assert(hr->age_in_surv_rate_group() != -1, "invariant" ); |
| 870 | int age = hr->age_in_surv_rate_group(); |
| 871 | double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); |
| 872 | bytes_to_copy = (size_t) (hr->used() * yg_surv_rate); |
| 873 | } |
| 874 | return bytes_to_copy; |
| 875 | } |
| 876 | |
| 877 | double G1Policy::predict_region_elapsed_time_ms(HeapRegion* hr, |
| 878 | bool for_young_gc) const { |
| 879 | size_t rs_length = hr->rem_set()->occupied(); |
| 880 | // Predicting the number of cards is based on which type of GC |
| 881 | // we're predicting for. |
| 882 | size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc); |
| 883 | size_t bytes_to_copy = predict_bytes_to_copy(hr); |
| 884 | |
| 885 | double region_elapsed_time_ms = |
| 886 | _analytics->predict_rs_scan_time_ms(card_num, collector_state()->in_young_only_phase()) + |
| 887 | _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress()); |
| 888 | |
| 889 | // The prediction of the "other" time for this region is based |
| 890 | // upon the region type and NOT the GC type. |
| 891 | if (hr->is_young()) { |
| 892 | region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1); |
| 893 | } else { |
| 894 | region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1); |
| 895 | } |
| 896 | return region_elapsed_time_ms; |
| 897 | } |
| 898 | |
| 899 | bool G1Policy::should_allocate_mutator_region() const { |
| 900 | uint young_list_length = _g1h->young_regions_count(); |
| 901 | uint young_list_target_length = _young_list_target_length; |
| 902 | return young_list_length < young_list_target_length; |
| 903 | } |
| 904 | |
| 905 | bool G1Policy::can_expand_young_list() const { |
| 906 | uint young_list_length = _g1h->young_regions_count(); |
| 907 | uint young_list_max_length = _young_list_max_length; |
| 908 | return young_list_length < young_list_max_length; |
| 909 | } |
| 910 | |
| 911 | bool G1Policy::use_adaptive_young_list_length() const { |
| 912 | return _young_gen_sizer->use_adaptive_young_list_length(); |
| 913 | } |
| 914 | |
| 915 | size_t G1Policy::desired_survivor_size(uint max_regions) const { |
| 916 | size_t const survivor_capacity = HeapRegion::GrainWords * max_regions; |
| 917 | return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100); |
| 918 | } |
| 919 | |
| 920 | void G1Policy::print_age_table() { |
| 921 | _survivors_age_table.print_age_table(_tenuring_threshold); |
| 922 | } |
| 923 | |
| 924 | void G1Policy::update_max_gc_locker_expansion() { |
| 925 | uint expansion_region_num = 0; |
| 926 | if (GCLockerEdenExpansionPercent > 0) { |
| 927 | double perc = (double) GCLockerEdenExpansionPercent / 100.0; |
| 928 | double expansion_region_num_d = perc * (double) _young_list_target_length; |
| 929 | // We use ceiling so that if expansion_region_num_d is > 0.0 (but |
| 930 | // less than 1.0) we'll get 1. |
| 931 | expansion_region_num = (uint) ceil(expansion_region_num_d); |
| 932 | } else { |
| 933 | assert(expansion_region_num == 0, "sanity" ); |
| 934 | } |
| 935 | _young_list_max_length = _young_list_target_length + expansion_region_num; |
| 936 | assert(_young_list_target_length <= _young_list_max_length, "post-condition" ); |
| 937 | } |
| 938 | |
| 939 | // Calculates survivor space parameters. |
| 940 | void G1Policy::update_survivors_policy() { |
| 941 | double max_survivor_regions_d = |
| 942 | (double) _young_list_target_length / (double) SurvivorRatio; |
| 943 | |
| 944 | // Calculate desired survivor size based on desired max survivor regions (unconstrained |
| 945 | // by remaining heap). Otherwise we may cause undesired promotions as we are |
| 946 | // already getting close to end of the heap, impacting performance even more. |
| 947 | uint const desired_max_survivor_regions = ceil(max_survivor_regions_d); |
| 948 | size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions); |
| 949 | |
| 950 | _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size); |
| 951 | if (UsePerfData) { |
| 952 | _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold); |
| 953 | _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize); |
| 954 | } |
| 955 | // The real maximum survivor size is bounded by the number of regions that can |
| 956 | // be allocated into. |
| 957 | _max_survivor_regions = MIN2(desired_max_survivor_regions, |
| 958 | _g1h->num_free_or_available_regions()); |
| 959 | } |
| 960 | |
| 961 | bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { |
| 962 | // We actually check whether we are marking here and not if we are in a |
| 963 | // reclamation phase. This means that we will schedule a concurrent mark |
| 964 | // even while we are still in the process of reclaiming memory. |
| 965 | bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle(); |
| 966 | if (!during_cycle) { |
| 967 | log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s" , GCCause::to_string(gc_cause)); |
| 968 | collector_state()->set_initiate_conc_mark_if_possible(true); |
| 969 | return true; |
| 970 | } else { |
| 971 | log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s" , GCCause::to_string(gc_cause)); |
| 972 | return false; |
| 973 | } |
| 974 | } |
| 975 | |
| 976 | void G1Policy::initiate_conc_mark() { |
| 977 | collector_state()->set_in_initial_mark_gc(true); |
| 978 | collector_state()->set_initiate_conc_mark_if_possible(false); |
| 979 | } |
| 980 | |
| 981 | void G1Policy::decide_on_conc_mark_initiation() { |
| 982 | // We are about to decide on whether this pause will be an |
| 983 | // initial-mark pause. |
| 984 | |
| 985 | // First, collector_state()->in_initial_mark_gc() should not be already set. We |
| 986 | // will set it here if we have to. However, it should be cleared by |
| 987 | // the end of the pause (it's only set for the duration of an |
| 988 | // initial-mark pause). |
| 989 | assert(!collector_state()->in_initial_mark_gc(), "pre-condition" ); |
| 990 | |
| 991 | if (collector_state()->initiate_conc_mark_if_possible()) { |
| 992 | // We had noticed on a previous pause that the heap occupancy has |
| 993 | // gone over the initiating threshold and we should start a |
| 994 | // concurrent marking cycle. So we might initiate one. |
| 995 | |
| 996 | if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) { |
| 997 | // Initiate a new initial mark if there is no marking or reclamation going on. |
| 998 | initiate_conc_mark(); |
| 999 | log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)" ); |
| 1000 | } else if (_g1h->is_user_requested_concurrent_full_gc(_g1h->gc_cause())) { |
| 1001 | // Initiate a user requested initial mark. An initial mark must be young only |
| 1002 | // GC, so the collector state must be updated to reflect this. |
| 1003 | collector_state()->set_in_young_only_phase(true); |
| 1004 | collector_state()->set_in_young_gc_before_mixed(false); |
| 1005 | |
| 1006 | // We might have ended up coming here about to start a mixed phase with a collection set |
| 1007 | // active. The following remark might change the change the "evacuation efficiency" of |
| 1008 | // the regions in this set, leading to failing asserts later. |
| 1009 | // Since the concurrent cycle will recreate the collection set anyway, simply drop it here. |
| 1010 | clear_collection_set_candidates(); |
| 1011 | abort_time_to_mixed_tracking(); |
| 1012 | initiate_conc_mark(); |
| 1013 | log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)" ); |
| 1014 | } else { |
| 1015 | // The concurrent marking thread is still finishing up the |
| 1016 | // previous cycle. If we start one right now the two cycles |
| 1017 | // overlap. In particular, the concurrent marking thread might |
| 1018 | // be in the process of clearing the next marking bitmap (which |
| 1019 | // we will use for the next cycle if we start one). Starting a |
| 1020 | // cycle now will be bad given that parts of the marking |
| 1021 | // information might get cleared by the marking thread. And we |
| 1022 | // cannot wait for the marking thread to finish the cycle as it |
| 1023 | // periodically yields while clearing the next marking bitmap |
| 1024 | // and, if it's in a yield point, it's waiting for us to |
| 1025 | // finish. So, at this point we will not start a cycle and we'll |
| 1026 | // let the concurrent marking thread complete the last one. |
| 1027 | log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)" ); |
| 1028 | } |
| 1029 | } |
| 1030 | } |
| 1031 | |
| 1032 | void G1Policy::record_concurrent_mark_cleanup_end() { |
| 1033 | G1CollectionSetCandidates* candidates = G1CollectionSetChooser::build(_g1h->workers(), _g1h->num_regions()); |
| 1034 | _collection_set->set_candidates(candidates); |
| 1035 | |
| 1036 | bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs" , "request young-only gcs" ); |
| 1037 | if (!mixed_gc_pending) { |
| 1038 | clear_collection_set_candidates(); |
| 1039 | abort_time_to_mixed_tracking(); |
| 1040 | } |
| 1041 | collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending); |
| 1042 | collector_state()->set_mark_or_rebuild_in_progress(false); |
| 1043 | |
| 1044 | double end_sec = os::elapsedTime(); |
| 1045 | double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; |
| 1046 | _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms); |
| 1047 | _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); |
| 1048 | |
| 1049 | record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); |
| 1050 | } |
| 1051 | |
| 1052 | double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const { |
| 1053 | return percent_of(reclaimable_bytes, _g1h->capacity()); |
| 1054 | } |
| 1055 | |
| 1056 | class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure { |
| 1057 | virtual bool do_heap_region(HeapRegion* r) { |
| 1058 | r->rem_set()->clear_locked(true /* only_cardset */); |
| 1059 | return false; |
| 1060 | } |
| 1061 | }; |
| 1062 | |
| 1063 | void G1Policy::clear_collection_set_candidates() { |
| 1064 | // Clear remembered sets of remaining candidate regions and the actual candidate |
| 1065 | // set. |
| 1066 | G1ClearCollectionSetCandidateRemSets cl; |
| 1067 | _collection_set->candidates()->iterate(&cl); |
| 1068 | _collection_set->clear_candidates(); |
| 1069 | } |
| 1070 | |
| 1071 | void G1Policy::maybe_start_marking() { |
| 1072 | if (need_to_start_conc_mark("end of GC" )) { |
| 1073 | // Note: this might have already been set, if during the last |
| 1074 | // pause we decided to start a cycle but at the beginning of |
| 1075 | // this pause we decided to postpone it. That's OK. |
| 1076 | collector_state()->set_initiate_conc_mark_if_possible(true); |
| 1077 | } |
| 1078 | } |
| 1079 | |
| 1080 | G1Policy::PauseKind G1Policy::young_gc_pause_kind() const { |
| 1081 | assert(!collector_state()->in_full_gc(), "must be" ); |
| 1082 | if (collector_state()->in_initial_mark_gc()) { |
| 1083 | assert(!collector_state()->in_young_gc_before_mixed(), "must be" ); |
| 1084 | return InitialMarkGC; |
| 1085 | } else if (collector_state()->in_young_gc_before_mixed()) { |
| 1086 | assert(!collector_state()->in_initial_mark_gc(), "must be" ); |
| 1087 | return LastYoungGC; |
| 1088 | } else if (collector_state()->in_mixed_phase()) { |
| 1089 | assert(!collector_state()->in_initial_mark_gc(), "must be" ); |
| 1090 | assert(!collector_state()->in_young_gc_before_mixed(), "must be" ); |
| 1091 | return MixedGC; |
| 1092 | } else { |
| 1093 | assert(!collector_state()->in_initial_mark_gc(), "must be" ); |
| 1094 | assert(!collector_state()->in_young_gc_before_mixed(), "must be" ); |
| 1095 | return YoungOnlyGC; |
| 1096 | } |
| 1097 | } |
| 1098 | |
| 1099 | void G1Policy::record_pause(PauseKind kind, double start, double end) { |
| 1100 | // Manage the MMU tracker. For some reason it ignores Full GCs. |
| 1101 | if (kind != FullGC) { |
| 1102 | _mmu_tracker->add_pause(start, end); |
| 1103 | } |
| 1104 | // Manage the mutator time tracking from initial mark to first mixed gc. |
| 1105 | switch (kind) { |
| 1106 | case FullGC: |
| 1107 | abort_time_to_mixed_tracking(); |
| 1108 | break; |
| 1109 | case Cleanup: |
| 1110 | case Remark: |
| 1111 | case YoungOnlyGC: |
| 1112 | case LastYoungGC: |
| 1113 | _initial_mark_to_mixed.add_pause(end - start); |
| 1114 | break; |
| 1115 | case InitialMarkGC: |
| 1116 | if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { |
| 1117 | _initial_mark_to_mixed.record_initial_mark_end(end); |
| 1118 | } |
| 1119 | break; |
| 1120 | case MixedGC: |
| 1121 | _initial_mark_to_mixed.record_mixed_gc_start(start); |
| 1122 | break; |
| 1123 | default: |
| 1124 | ShouldNotReachHere(); |
| 1125 | } |
| 1126 | } |
| 1127 | |
| 1128 | void G1Policy::abort_time_to_mixed_tracking() { |
| 1129 | _initial_mark_to_mixed.reset(); |
| 1130 | } |
| 1131 | |
| 1132 | bool G1Policy::next_gc_should_be_mixed(const char* true_action_str, |
| 1133 | const char* false_action_str) const { |
| 1134 | G1CollectionSetCandidates* candidates = _collection_set->candidates(); |
| 1135 | |
| 1136 | if (candidates->is_empty()) { |
| 1137 | log_debug(gc, ergo)("%s (candidate old regions not available)" , false_action_str); |
| 1138 | return false; |
| 1139 | } |
| 1140 | |
| 1141 | // Is the amount of uncollected reclaimable space above G1HeapWastePercent? |
| 1142 | size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes(); |
| 1143 | double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes); |
| 1144 | double threshold = (double) G1HeapWastePercent; |
| 1145 | if (reclaimable_percent <= threshold) { |
| 1146 | log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, |
| 1147 | false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); |
| 1148 | return false; |
| 1149 | } |
| 1150 | log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, |
| 1151 | true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); |
| 1152 | return true; |
| 1153 | } |
| 1154 | |
| 1155 | uint G1Policy::calc_min_old_cset_length() const { |
| 1156 | // The min old CSet region bound is based on the maximum desired |
| 1157 | // number of mixed GCs after a cycle. I.e., even if some old regions |
| 1158 | // look expensive, we should add them to the CSet anyway to make |
| 1159 | // sure we go through the available old regions in no more than the |
| 1160 | // maximum desired number of mixed GCs. |
| 1161 | // |
| 1162 | // The calculation is based on the number of marked regions we added |
| 1163 | // to the CSet candidates in the first place, not how many remain, so |
| 1164 | // that the result is the same during all mixed GCs that follow a cycle. |
| 1165 | |
| 1166 | const size_t region_num = _collection_set->candidates()->num_regions(); |
| 1167 | const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); |
| 1168 | size_t result = region_num / gc_num; |
| 1169 | // emulate ceiling |
| 1170 | if (result * gc_num < region_num) { |
| 1171 | result += 1; |
| 1172 | } |
| 1173 | return (uint) result; |
| 1174 | } |
| 1175 | |
| 1176 | uint G1Policy::calc_max_old_cset_length() const { |
| 1177 | // The max old CSet region bound is based on the threshold expressed |
| 1178 | // as a percentage of the heap size. I.e., it should bound the |
| 1179 | // number of old regions added to the CSet irrespective of how many |
| 1180 | // of them are available. |
| 1181 | |
| 1182 | const G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
| 1183 | const size_t region_num = g1h->num_regions(); |
| 1184 | const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; |
| 1185 | size_t result = region_num * perc / 100; |
| 1186 | // emulate ceiling |
| 1187 | if (100 * result < region_num * perc) { |
| 1188 | result += 1; |
| 1189 | } |
| 1190 | return (uint) result; |
| 1191 | } |
| 1192 | |
| 1193 | void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates, |
| 1194 | double time_remaining_ms, |
| 1195 | uint& num_initial_regions, |
| 1196 | uint& num_optional_regions) { |
| 1197 | assert(candidates != NULL, "Must be" ); |
| 1198 | |
| 1199 | num_initial_regions = 0; |
| 1200 | num_optional_regions = 0; |
| 1201 | uint num_expensive_regions = 0; |
| 1202 | |
| 1203 | double predicted_old_time_ms = 0.0; |
| 1204 | double predicted_initial_time_ms = 0.0; |
| 1205 | double predicted_optional_time_ms = 0.0; |
| 1206 | |
| 1207 | double optional_threshold_ms = time_remaining_ms * optional_prediction_fraction(); |
| 1208 | |
| 1209 | const uint min_old_cset_length = calc_min_old_cset_length(); |
| 1210 | const uint max_old_cset_length = MAX2(min_old_cset_length, calc_max_old_cset_length()); |
| 1211 | const uint max_optional_regions = max_old_cset_length - min_old_cset_length; |
| 1212 | bool check_time_remaining = use_adaptive_young_list_length(); |
| 1213 | |
| 1214 | uint candidate_idx = candidates->cur_idx(); |
| 1215 | |
| 1216 | log_debug(gc, ergo, cset)("Start adding old regions to collection set. Min %u regions, max %u regions, " |
| 1217 | "time remaining %1.2fms, optional threshold %1.2fms" , |
| 1218 | min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms); |
| 1219 | |
| 1220 | HeapRegion* hr = candidates->at(candidate_idx); |
| 1221 | while (hr != NULL) { |
| 1222 | if (num_initial_regions + num_optional_regions >= max_old_cset_length) { |
| 1223 | // Added maximum number of old regions to the CSet. |
| 1224 | log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Maximum number of regions). " |
| 1225 | "Initial %u regions, optional %u regions" , |
| 1226 | num_initial_regions, num_optional_regions); |
| 1227 | break; |
| 1228 | } |
| 1229 | |
| 1230 | // Stop adding regions if the remaining reclaimable space is |
| 1231 | // not above G1HeapWastePercent. |
| 1232 | size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes(); |
| 1233 | double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes); |
| 1234 | double threshold = (double) G1HeapWastePercent; |
| 1235 | if (reclaimable_percent <= threshold) { |
| 1236 | // We've added enough old regions that the amount of uncollected |
| 1237 | // reclaimable space is at or below the waste threshold. Stop |
| 1238 | // adding old regions to the CSet. |
| 1239 | log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Reclaimable percentage below threshold). " |
| 1240 | "Reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%" , |
| 1241 | byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes), |
| 1242 | reclaimable_percent, G1HeapWastePercent); |
| 1243 | break; |
| 1244 | } |
| 1245 | |
| 1246 | double predicted_time_ms = predict_region_elapsed_time_ms(hr, false); |
| 1247 | time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); |
| 1248 | // Add regions to old set until we reach the minimum amount |
| 1249 | if (num_initial_regions < min_old_cset_length) { |
| 1250 | predicted_old_time_ms += predicted_time_ms; |
| 1251 | num_initial_regions++; |
| 1252 | // Record the number of regions added with no time remaining |
| 1253 | if (time_remaining_ms == 0.0) { |
| 1254 | num_expensive_regions++; |
| 1255 | } |
| 1256 | } else if (!check_time_remaining) { |
| 1257 | // In the non-auto-tuning case, we'll finish adding regions |
| 1258 | // to the CSet if we reach the minimum. |
| 1259 | log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Region amount reached min)." ); |
| 1260 | break; |
| 1261 | } else { |
| 1262 | // Keep adding regions to old set until we reach the optional threshold |
| 1263 | if (time_remaining_ms > optional_threshold_ms) { |
| 1264 | predicted_old_time_ms += predicted_time_ms; |
| 1265 | num_initial_regions++; |
| 1266 | } else if (time_remaining_ms > 0) { |
| 1267 | // Keep adding optional regions until time is up. |
| 1268 | assert(num_optional_regions < max_optional_regions, "Should not be possible." ); |
| 1269 | predicted_optional_time_ms += predicted_time_ms; |
| 1270 | num_optional_regions++; |
| 1271 | } else { |
| 1272 | log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Predicted time too high)." ); |
| 1273 | break; |
| 1274 | } |
| 1275 | } |
| 1276 | hr = candidates->at(++candidate_idx); |
| 1277 | } |
| 1278 | if (hr == NULL) { |
| 1279 | log_debug(gc, ergo, cset)("Old candidate collection set empty." ); |
| 1280 | } |
| 1281 | |
| 1282 | if (num_expensive_regions > 0) { |
| 1283 | log_debug(gc, ergo, cset)("Added %u initial old regions to collection set although the predicted time was too high." , |
| 1284 | num_expensive_regions); |
| 1285 | } |
| 1286 | |
| 1287 | log_debug(gc, ergo, cset)("Finish choosing collection set old regions. Initial: %u, optional: %u, " |
| 1288 | "predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f" , |
| 1289 | num_initial_regions, num_optional_regions, |
| 1290 | predicted_initial_time_ms, predicted_optional_time_ms, time_remaining_ms); |
| 1291 | } |
| 1292 | |
| 1293 | void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates, |
| 1294 | uint const max_optional_regions, |
| 1295 | double time_remaining_ms, |
| 1296 | uint& num_optional_regions) { |
| 1297 | assert(_g1h->collector_state()->in_mixed_phase(), "Should only be called in mixed phase" ); |
| 1298 | |
| 1299 | num_optional_regions = 0; |
| 1300 | double prediction_ms = 0; |
| 1301 | uint candidate_idx = candidates->cur_idx(); |
| 1302 | |
| 1303 | HeapRegion* r = candidates->at(candidate_idx); |
| 1304 | while (num_optional_regions < max_optional_regions) { |
| 1305 | assert(r != NULL, "Region must exist" ); |
| 1306 | prediction_ms += predict_region_elapsed_time_ms(r, false); |
| 1307 | |
| 1308 | if (prediction_ms > time_remaining_ms) { |
| 1309 | log_debug(gc, ergo, cset)("Prediction %.3fms for region %u does not fit remaining time: %.3fms." , |
| 1310 | prediction_ms, r->hrm_index(), time_remaining_ms); |
| 1311 | break; |
| 1312 | } |
| 1313 | // This region will be included in the next optional evacuation. |
| 1314 | |
| 1315 | time_remaining_ms -= prediction_ms; |
| 1316 | num_optional_regions++; |
| 1317 | r = candidates->at(++candidate_idx); |
| 1318 | } |
| 1319 | |
| 1320 | log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Predicted time: %.3fms" , |
| 1321 | num_optional_regions, max_optional_regions, prediction_ms); |
| 1322 | } |
| 1323 | |
| 1324 | void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) { |
| 1325 | |
| 1326 | // Add survivor regions to SurvRateGroup. |
| 1327 | note_start_adding_survivor_regions(); |
| 1328 | finished_recalculating_age_indexes(true /* is_survivors */); |
| 1329 | |
| 1330 | HeapRegion* last = NULL; |
| 1331 | for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin(); |
| 1332 | it != survivors->regions()->end(); |
| 1333 | ++it) { |
| 1334 | HeapRegion* curr = *it; |
| 1335 | set_region_survivor(curr); |
| 1336 | |
| 1337 | // The region is a non-empty survivor so let's add it to |
| 1338 | // the incremental collection set for the next evacuation |
| 1339 | // pause. |
| 1340 | _collection_set->add_survivor_regions(curr); |
| 1341 | |
| 1342 | last = curr; |
| 1343 | } |
| 1344 | note_stop_adding_survivor_regions(); |
| 1345 | |
| 1346 | // Don't clear the survivor list handles until the start of |
| 1347 | // the next evacuation pause - we need it in order to re-tag |
| 1348 | // the survivor regions from this evacuation pause as 'young' |
| 1349 | // at the start of the next. |
| 1350 | |
| 1351 | finished_recalculating_age_indexes(false /* is_survivors */); |
| 1352 | } |
| 1353 | |