1 | /* |
2 | * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "gc/g1/g1CollectedHeap.inline.hpp" |
27 | #include "gc/g1/g1CollectionSet.hpp" |
28 | #include "gc/g1/g1CollectionSetCandidates.hpp" |
29 | #include "gc/g1/g1CollectorState.hpp" |
30 | #include "gc/g1/g1ParScanThreadState.hpp" |
31 | #include "gc/g1/g1Policy.hpp" |
32 | #include "gc/g1/heapRegion.inline.hpp" |
33 | #include "gc/g1/heapRegionRemSet.hpp" |
34 | #include "gc/g1/heapRegionSet.hpp" |
35 | #include "logging/logStream.hpp" |
36 | #include "utilities/debug.hpp" |
37 | #include "utilities/globalDefinitions.hpp" |
38 | #include "utilities/quickSort.hpp" |
39 | |
40 | G1CollectorState* G1CollectionSet::collector_state() { |
41 | return _g1h->collector_state(); |
42 | } |
43 | |
44 | G1GCPhaseTimes* G1CollectionSet::phase_times() { |
45 | return _policy->phase_times(); |
46 | } |
47 | |
48 | double G1CollectionSet::predict_region_elapsed_time_ms(HeapRegion* hr) { |
49 | return _policy->predict_region_elapsed_time_ms(hr, collector_state()->in_young_only_phase()); |
50 | } |
51 | |
52 | G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) : |
53 | _g1h(g1h), |
54 | _policy(policy), |
55 | _candidates(NULL), |
56 | _eden_region_length(0), |
57 | _survivor_region_length(0), |
58 | _old_region_length(0), |
59 | _collection_set_regions(NULL), |
60 | _collection_set_cur_length(0), |
61 | _collection_set_max_length(0), |
62 | _num_optional_regions(0), |
63 | _bytes_used_before(0), |
64 | _recorded_rs_lengths(0), |
65 | _inc_build_state(Inactive), |
66 | _inc_part_start(0), |
67 | _inc_bytes_used_before(0), |
68 | _inc_recorded_rs_lengths(0), |
69 | _inc_recorded_rs_lengths_diffs(0), |
70 | _inc_predicted_elapsed_time_ms(0.0), |
71 | _inc_predicted_elapsed_time_ms_diffs(0.0) { |
72 | } |
73 | |
74 | G1CollectionSet::~G1CollectionSet() { |
75 | if (_collection_set_regions != NULL) { |
76 | FREE_C_HEAP_ARRAY(uint, _collection_set_regions); |
77 | } |
78 | free_optional_regions(); |
79 | clear_candidates(); |
80 | } |
81 | |
82 | void G1CollectionSet::init_region_lengths(uint eden_cset_region_length, |
83 | uint survivor_cset_region_length) { |
84 | assert_at_safepoint_on_vm_thread(); |
85 | |
86 | _eden_region_length = eden_cset_region_length; |
87 | _survivor_region_length = survivor_cset_region_length; |
88 | |
89 | assert((size_t) young_region_length() == _collection_set_cur_length, |
90 | "Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length); |
91 | |
92 | _old_region_length = 0; |
93 | free_optional_regions(); |
94 | } |
95 | |
96 | void G1CollectionSet::initialize(uint max_region_length) { |
97 | guarantee(_collection_set_regions == NULL, "Must only initialize once." ); |
98 | _collection_set_max_length = max_region_length; |
99 | _collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC); |
100 | } |
101 | |
102 | void G1CollectionSet::free_optional_regions() { |
103 | _num_optional_regions = 0; |
104 | } |
105 | |
106 | void G1CollectionSet::clear_candidates() { |
107 | delete _candidates; |
108 | _candidates = NULL; |
109 | } |
110 | |
111 | void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) { |
112 | _recorded_rs_lengths = rs_lengths; |
113 | } |
114 | |
115 | // Add the heap region at the head of the non-incremental collection set |
116 | void G1CollectionSet::add_old_region(HeapRegion* hr) { |
117 | assert_at_safepoint_on_vm_thread(); |
118 | |
119 | assert(_inc_build_state == Active, |
120 | "Precondition, actively building cset or adding optional later on" ); |
121 | assert(hr->is_old(), "the region should be old" ); |
122 | |
123 | assert(!hr->in_collection_set(), "should not already be in the collection set" ); |
124 | _g1h->register_old_region_with_region_attr(hr); |
125 | |
126 | _collection_set_regions[_collection_set_cur_length++] = hr->hrm_index(); |
127 | assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size." ); |
128 | |
129 | _bytes_used_before += hr->used(); |
130 | _recorded_rs_lengths += hr->rem_set()->occupied(); |
131 | _old_region_length++; |
132 | |
133 | _g1h->old_set_remove(hr); |
134 | } |
135 | |
136 | void G1CollectionSet::add_optional_region(HeapRegion* hr) { |
137 | assert(hr->is_old(), "the region should be old" ); |
138 | assert(!hr->in_collection_set(), "should not already be in the CSet" ); |
139 | |
140 | _g1h->register_optional_region_with_region_attr(hr); |
141 | |
142 | hr->set_index_in_opt_cset(_num_optional_regions++); |
143 | } |
144 | |
145 | void G1CollectionSet::start_incremental_building() { |
146 | assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set." ); |
147 | assert(_inc_build_state == Inactive, "Precondition" ); |
148 | |
149 | _inc_bytes_used_before = 0; |
150 | |
151 | _inc_recorded_rs_lengths = 0; |
152 | _inc_recorded_rs_lengths_diffs = 0; |
153 | _inc_predicted_elapsed_time_ms = 0.0; |
154 | _inc_predicted_elapsed_time_ms_diffs = 0.0; |
155 | |
156 | update_incremental_marker(); |
157 | } |
158 | |
159 | void G1CollectionSet::finalize_incremental_building() { |
160 | assert(_inc_build_state == Active, "Precondition" ); |
161 | assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint" ); |
162 | |
163 | // The two "main" fields, _inc_recorded_rs_lengths and |
164 | // _inc_predicted_elapsed_time_ms, are updated by the thread |
165 | // that adds a new region to the CSet. Further updates by the |
166 | // concurrent refinement thread that samples the young RSet lengths |
167 | // are accumulated in the *_diffs fields. Here we add the diffs to |
168 | // the "main" fields. |
169 | |
170 | if (_inc_recorded_rs_lengths_diffs >= 0) { |
171 | _inc_recorded_rs_lengths += _inc_recorded_rs_lengths_diffs; |
172 | } else { |
173 | // This is defensive. The diff should in theory be always positive |
174 | // as RSets can only grow between GCs. However, given that we |
175 | // sample their size concurrently with other threads updating them |
176 | // it's possible that we might get the wrong size back, which |
177 | // could make the calculations somewhat inaccurate. |
178 | size_t diffs = (size_t) (-_inc_recorded_rs_lengths_diffs); |
179 | if (_inc_recorded_rs_lengths >= diffs) { |
180 | _inc_recorded_rs_lengths -= diffs; |
181 | } else { |
182 | _inc_recorded_rs_lengths = 0; |
183 | } |
184 | } |
185 | _inc_predicted_elapsed_time_ms += _inc_predicted_elapsed_time_ms_diffs; |
186 | |
187 | _inc_recorded_rs_lengths_diffs = 0; |
188 | _inc_predicted_elapsed_time_ms_diffs = 0.0; |
189 | } |
190 | |
191 | void G1CollectionSet::clear() { |
192 | assert_at_safepoint_on_vm_thread(); |
193 | _collection_set_cur_length = 0; |
194 | } |
195 | |
196 | void G1CollectionSet::iterate(HeapRegionClosure* cl) const { |
197 | size_t len = _collection_set_cur_length; |
198 | OrderAccess::loadload(); |
199 | |
200 | for (uint i = 0; i < len; i++) { |
201 | HeapRegion* r = _g1h->region_at(_collection_set_regions[i]); |
202 | bool result = cl->do_heap_region(r); |
203 | if (result) { |
204 | cl->set_incomplete(); |
205 | return; |
206 | } |
207 | } |
208 | } |
209 | |
210 | void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const { |
211 | assert_at_safepoint(); |
212 | |
213 | for (uint i = 0; i < _num_optional_regions; i++) { |
214 | HeapRegion* r = _candidates->at(i); |
215 | bool result = cl->do_heap_region(r); |
216 | guarantee(!result, "Must not cancel iteration" ); |
217 | } |
218 | } |
219 | |
220 | void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const { |
221 | assert_at_safepoint(); |
222 | |
223 | size_t len = _collection_set_cur_length - _inc_part_start; |
224 | if (len == 0) { |
225 | return; |
226 | } |
227 | |
228 | size_t start_pos = (worker_id * len) / total_workers; |
229 | size_t cur_pos = start_pos; |
230 | |
231 | do { |
232 | HeapRegion* r = _g1h->region_at(_collection_set_regions[cur_pos + _inc_part_start]); |
233 | bool result = cl->do_heap_region(r); |
234 | guarantee(!result, "Must not cancel iteration" ); |
235 | |
236 | cur_pos++; |
237 | if (cur_pos == len) { |
238 | cur_pos = 0; |
239 | } |
240 | } while (cur_pos != start_pos); |
241 | } |
242 | |
243 | void G1CollectionSet::update_young_region_prediction(HeapRegion* hr, |
244 | size_t new_rs_length) { |
245 | // Update the CSet information that is dependent on the new RS length |
246 | assert(hr->is_young(), "Precondition" ); |
247 | assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint" ); |
248 | |
249 | // We could have updated _inc_recorded_rs_lengths and |
250 | // _inc_predicted_elapsed_time_ms directly but we'd need to do |
251 | // that atomically, as this code is executed by a concurrent |
252 | // refinement thread, potentially concurrently with a mutator thread |
253 | // allocating a new region and also updating the same fields. To |
254 | // avoid the atomic operations we accumulate these updates on two |
255 | // separate fields (*_diffs) and we'll just add them to the "main" |
256 | // fields at the start of a GC. |
257 | |
258 | ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length(); |
259 | ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length; |
260 | _inc_recorded_rs_lengths_diffs += rs_lengths_diff; |
261 | |
262 | double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); |
263 | double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr); |
264 | double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; |
265 | _inc_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; |
266 | |
267 | hr->set_recorded_rs_length(new_rs_length); |
268 | hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms); |
269 | } |
270 | |
271 | void G1CollectionSet::add_young_region_common(HeapRegion* hr) { |
272 | assert(hr->is_young(), "invariant" ); |
273 | assert(_inc_build_state == Active, "Precondition" ); |
274 | |
275 | size_t collection_set_length = _collection_set_cur_length; |
276 | assert(collection_set_length <= INT_MAX, "Collection set is too large with %d entries" , (int)collection_set_length); |
277 | hr->set_young_index_in_cset((int)collection_set_length); |
278 | |
279 | _collection_set_regions[collection_set_length] = hr->hrm_index(); |
280 | // Concurrent readers must observe the store of the value in the array before an |
281 | // update to the length field. |
282 | OrderAccess::storestore(); |
283 | _collection_set_cur_length++; |
284 | assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set larger than maximum allowed." ); |
285 | |
286 | // This routine is used when: |
287 | // * adding survivor regions to the incremental cset at the end of an |
288 | // evacuation pause or |
289 | // * adding the current allocation region to the incremental cset |
290 | // when it is retired. |
291 | // Therefore this routine may be called at a safepoint by the |
292 | // VM thread, or in-between safepoints by mutator threads (when |
293 | // retiring the current allocation region) |
294 | // We need to clear and set the cached recorded/cached collection set |
295 | // information in the heap region here (before the region gets added |
296 | // to the collection set). An individual heap region's cached values |
297 | // are calculated, aggregated with the policy collection set info, |
298 | // and cached in the heap region here (initially) and (subsequently) |
299 | // by the Young List sampling code. |
300 | // Ignore calls to this due to retirement during full gc. |
301 | |
302 | if (!_g1h->collector_state()->in_full_gc()) { |
303 | size_t rs_length = hr->rem_set()->occupied(); |
304 | double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr); |
305 | |
306 | // Cache the values we have added to the aggregated information |
307 | // in the heap region in case we have to remove this region from |
308 | // the incremental collection set, or it is updated by the |
309 | // rset sampling code |
310 | hr->set_recorded_rs_length(rs_length); |
311 | hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); |
312 | |
313 | _inc_recorded_rs_lengths += rs_length; |
314 | _inc_predicted_elapsed_time_ms += region_elapsed_time_ms; |
315 | _inc_bytes_used_before += hr->used(); |
316 | } |
317 | |
318 | assert(!hr->in_collection_set(), "invariant" ); |
319 | _g1h->register_young_region_with_region_attr(hr); |
320 | } |
321 | |
322 | void G1CollectionSet::add_survivor_regions(HeapRegion* hr) { |
323 | assert(hr->is_survivor(), "Must only add survivor regions, but is %s" , hr->get_type_str()); |
324 | add_young_region_common(hr); |
325 | } |
326 | |
327 | void G1CollectionSet::add_eden_region(HeapRegion* hr) { |
328 | assert(hr->is_eden(), "Must only add eden regions, but is %s" , hr->get_type_str()); |
329 | add_young_region_common(hr); |
330 | } |
331 | |
332 | #ifndef PRODUCT |
333 | class G1VerifyYoungAgesClosure : public HeapRegionClosure { |
334 | public: |
335 | bool _valid; |
336 | public: |
337 | G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { } |
338 | |
339 | virtual bool do_heap_region(HeapRegion* r) { |
340 | guarantee(r->is_young(), "Region must be young but is %s" , r->get_type_str()); |
341 | |
342 | SurvRateGroup* group = r->surv_rate_group(); |
343 | |
344 | if (group == NULL) { |
345 | log_error(gc, verify)("## encountered NULL surv_rate_group in young region" ); |
346 | _valid = false; |
347 | } |
348 | |
349 | if (r->age_in_surv_rate_group() < 0) { |
350 | log_error(gc, verify)("## encountered negative age in young region" ); |
351 | _valid = false; |
352 | } |
353 | |
354 | return false; |
355 | } |
356 | |
357 | bool valid() const { return _valid; } |
358 | }; |
359 | |
360 | bool G1CollectionSet::verify_young_ages() { |
361 | assert_at_safepoint_on_vm_thread(); |
362 | |
363 | G1VerifyYoungAgesClosure cl; |
364 | iterate(&cl); |
365 | |
366 | if (!cl.valid()) { |
367 | LogStreamHandle(Error, gc, verify) log; |
368 | print(&log); |
369 | } |
370 | |
371 | return cl.valid(); |
372 | } |
373 | |
374 | class G1PrintCollectionSetDetailClosure : public HeapRegionClosure { |
375 | outputStream* _st; |
376 | public: |
377 | G1PrintCollectionSetDetailClosure(outputStream* st) : HeapRegionClosure(), _st(st) { } |
378 | |
379 | virtual bool do_heap_region(HeapRegion* r) { |
380 | assert(r->in_collection_set(), "Region %u should be in collection set" , r->hrm_index()); |
381 | _st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d" , |
382 | HR_FORMAT_PARAMS(r), |
383 | p2i(r->prev_top_at_mark_start()), |
384 | p2i(r->next_top_at_mark_start()), |
385 | r->age_in_surv_rate_group_cond()); |
386 | return false; |
387 | } |
388 | }; |
389 | |
390 | void G1CollectionSet::print(outputStream* st) { |
391 | st->print_cr("\nCollection_set:" ); |
392 | |
393 | G1PrintCollectionSetDetailClosure cl(st); |
394 | iterate(&cl); |
395 | } |
396 | #endif // !PRODUCT |
397 | |
398 | double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors) { |
399 | double young_start_time_sec = os::elapsedTime(); |
400 | |
401 | finalize_incremental_building(); |
402 | |
403 | guarantee(target_pause_time_ms > 0.0, |
404 | "target_pause_time_ms = %1.6lf should be positive" , target_pause_time_ms); |
405 | |
406 | size_t pending_cards = _policy->pending_cards(); |
407 | double base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards); |
408 | double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0); |
409 | |
410 | log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms" , |
411 | pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); |
412 | |
413 | // The young list is laid with the survivor regions from the previous |
414 | // pause are appended to the RHS of the young list, i.e. |
415 | // [Newly Young Regions ++ Survivors from last pause]. |
416 | |
417 | uint survivor_region_length = survivors->length(); |
418 | uint eden_region_length = _g1h->eden_regions_count(); |
419 | init_region_lengths(eden_region_length, survivor_region_length); |
420 | |
421 | verify_young_cset_indices(); |
422 | |
423 | // Clear the fields that point to the survivor list - they are all young now. |
424 | survivors->convert_to_eden(); |
425 | |
426 | _bytes_used_before = _inc_bytes_used_before; |
427 | time_remaining_ms = MAX2(time_remaining_ms - _inc_predicted_elapsed_time_ms, 0.0); |
428 | |
429 | log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms" , |
430 | eden_region_length, survivor_region_length, _inc_predicted_elapsed_time_ms, target_pause_time_ms); |
431 | |
432 | // The number of recorded young regions is the incremental |
433 | // collection set's current size |
434 | set_recorded_rs_lengths(_inc_recorded_rs_lengths); |
435 | |
436 | double young_end_time_sec = os::elapsedTime(); |
437 | phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0); |
438 | |
439 | return time_remaining_ms; |
440 | } |
441 | |
442 | static int compare_region_idx(const uint a, const uint b) { |
443 | if (a > b) { |
444 | return 1; |
445 | } else if (a == b) { |
446 | return 0; |
447 | } else { |
448 | return -1; |
449 | } |
450 | } |
451 | |
452 | void G1CollectionSet::finalize_old_part(double time_remaining_ms) { |
453 | double non_young_start_time_sec = os::elapsedTime(); |
454 | |
455 | if (collector_state()->in_mixed_phase()) { |
456 | candidates()->verify(); |
457 | |
458 | uint num_initial_old_regions; |
459 | uint num_optional_old_regions; |
460 | |
461 | _policy->calculate_old_collection_set_regions(candidates(), |
462 | time_remaining_ms, |
463 | num_initial_old_regions, |
464 | num_optional_old_regions); |
465 | |
466 | // Prepare initial old regions. |
467 | move_candidates_to_collection_set(num_initial_old_regions); |
468 | |
469 | // Prepare optional old regions for evacuation. |
470 | uint candidate_idx = candidates()->cur_idx(); |
471 | for (uint i = 0; i < num_optional_old_regions; i++) { |
472 | add_optional_region(candidates()->at(candidate_idx + i)); |
473 | } |
474 | |
475 | candidates()->verify(); |
476 | } |
477 | |
478 | stop_incremental_building(); |
479 | |
480 | double non_young_end_time_sec = os::elapsedTime(); |
481 | phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); |
482 | |
483 | QuickSort::sort(_collection_set_regions, _collection_set_cur_length, compare_region_idx, true); |
484 | } |
485 | |
486 | void G1CollectionSet::move_candidates_to_collection_set(uint num_old_candidate_regions) { |
487 | if (num_old_candidate_regions == 0) { |
488 | return; |
489 | } |
490 | uint candidate_idx = candidates()->cur_idx(); |
491 | for (uint i = 0; i < num_old_candidate_regions; i++) { |
492 | HeapRegion* r = candidates()->at(candidate_idx + i); |
493 | // This potentially optional candidate region is going to be an actual collection |
494 | // set region. Clear cset marker. |
495 | _g1h->clear_region_attr(r); |
496 | add_old_region(r); |
497 | } |
498 | candidates()->remove(num_old_candidate_regions); |
499 | |
500 | candidates()->verify(); |
501 | } |
502 | |
503 | void G1CollectionSet::finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) { |
504 | double time_remaining_ms = finalize_young_part(target_pause_time_ms, survivor); |
505 | finalize_old_part(time_remaining_ms); |
506 | } |
507 | |
508 | bool G1CollectionSet::finalize_optional_for_evacuation(double remaining_pause_time) { |
509 | update_incremental_marker(); |
510 | |
511 | uint num_selected_regions; |
512 | _policy->calculate_optional_collection_set_regions(candidates(), |
513 | _num_optional_regions, |
514 | remaining_pause_time, |
515 | num_selected_regions); |
516 | |
517 | move_candidates_to_collection_set(num_selected_regions); |
518 | |
519 | _num_optional_regions -= num_selected_regions; |
520 | |
521 | stop_incremental_building(); |
522 | |
523 | _g1h->verify_region_attr_remset_update(); |
524 | |
525 | return num_selected_regions > 0; |
526 | } |
527 | |
528 | void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* pss) { |
529 | for (uint i = 0; i < _num_optional_regions; i++) { |
530 | HeapRegion* r = candidates()->at(candidates()->cur_idx() + i); |
531 | pss->record_unused_optional_region(r); |
532 | // Clear collection set marker and make sure that the remembered set information |
533 | // is correct as we still need it later. |
534 | _g1h->clear_region_attr(r); |
535 | _g1h->register_region_with_region_attr(r); |
536 | r->clear_index_in_opt_cset(); |
537 | } |
538 | free_optional_regions(); |
539 | |
540 | _g1h->verify_region_attr_remset_update(); |
541 | } |
542 | |
543 | #ifdef ASSERT |
544 | class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure { |
545 | private: |
546 | size_t _young_length; |
547 | int* _heap_region_indices; |
548 | public: |
549 | G1VerifyYoungCSetIndicesClosure(size_t young_length) : HeapRegionClosure(), _young_length(young_length) { |
550 | _heap_region_indices = NEW_C_HEAP_ARRAY(int, young_length, mtGC); |
551 | for (size_t i = 0; i < young_length; i++) { |
552 | _heap_region_indices[i] = -1; |
553 | } |
554 | } |
555 | ~G1VerifyYoungCSetIndicesClosure() { |
556 | FREE_C_HEAP_ARRAY(int, _heap_region_indices); |
557 | } |
558 | |
559 | virtual bool do_heap_region(HeapRegion* r) { |
560 | const int idx = r->young_index_in_cset(); |
561 | |
562 | assert(idx > -1, "Young index must be set for all regions in the incremental collection set but is not for region %u." , r->hrm_index()); |
563 | assert((size_t)idx < _young_length, "Young cset index too large for region %u" , r->hrm_index()); |
564 | |
565 | assert(_heap_region_indices[idx] == -1, |
566 | "Index %d used by multiple regions, first use by region %u, second by region %u" , |
567 | idx, _heap_region_indices[idx], r->hrm_index()); |
568 | |
569 | _heap_region_indices[idx] = r->hrm_index(); |
570 | |
571 | return false; |
572 | } |
573 | }; |
574 | |
575 | void G1CollectionSet::verify_young_cset_indices() const { |
576 | assert_at_safepoint_on_vm_thread(); |
577 | |
578 | G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length); |
579 | iterate(&cl); |
580 | } |
581 | #endif |
582 | |