1 | /* |
2 | * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "gc/g1/g1Allocator.inline.hpp" |
27 | #include "gc/g1/g1CollectedHeap.inline.hpp" |
28 | #include "gc/g1/g1CollectionSet.hpp" |
29 | #include "gc/g1/g1OopClosures.inline.hpp" |
30 | #include "gc/g1/g1ParScanThreadState.inline.hpp" |
31 | #include "gc/g1/g1RootClosures.hpp" |
32 | #include "gc/g1/g1StringDedup.hpp" |
33 | #include "gc/shared/gcTrace.hpp" |
34 | #include "gc/shared/taskqueue.inline.hpp" |
35 | #include "memory/allocation.inline.hpp" |
36 | #include "oops/access.inline.hpp" |
37 | #include "oops/oop.inline.hpp" |
38 | #include "runtime/prefetch.inline.hpp" |
39 | |
40 | G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, |
41 | uint worker_id, |
42 | size_t young_cset_length, |
43 | size_t optional_cset_length) |
44 | : _g1h(g1h), |
45 | _refs(g1h->task_queue(worker_id)), |
46 | _dcq(&g1h->dirty_card_queue_set()), |
47 | _ct(g1h->card_table()), |
48 | _closures(NULL), |
49 | _plab_allocator(NULL), |
50 | _age_table(false), |
51 | _tenuring_threshold(g1h->policy()->tenuring_threshold()), |
52 | _scanner(g1h, this), |
53 | _worker_id(worker_id), |
54 | _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1), |
55 | _stack_trim_lower_threshold(GCDrainStackTargetSize), |
56 | _trim_ticks(), |
57 | _old_gen_is_full(false), |
58 | _num_optional_regions(optional_cset_length) |
59 | { |
60 | // we allocate G1YoungSurvRateNumRegions plus one entries, since |
61 | // we "sacrifice" entry 0 to keep track of surviving bytes for |
62 | // non-young regions (where the age is -1) |
63 | // We also add a few elements at the beginning and at the end in |
64 | // an attempt to eliminate cache contention |
65 | size_t real_length = 1 + young_cset_length; |
66 | size_t array_length = PADDING_ELEM_NUM + |
67 | real_length + |
68 | PADDING_ELEM_NUM; |
69 | _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); |
70 | if (_surviving_young_words_base == NULL) |
71 | vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR, |
72 | "Not enough space for young surv histo." ); |
73 | _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
74 | memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
75 | |
76 | _plab_allocator = new G1PLABAllocator(_g1h->allocator()); |
77 | |
78 | // The dest for Young is used when the objects are aged enough to |
79 | // need to be moved to the next space. |
80 | _dest[G1HeapRegionAttr::Young] = G1HeapRegionAttr::Old; |
81 | _dest[G1HeapRegionAttr::Old] = G1HeapRegionAttr::Old; |
82 | |
83 | _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h); |
84 | |
85 | _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions]; |
86 | } |
87 | |
88 | // Pass locally gathered statistics to global state. |
89 | void G1ParScanThreadState::flush(size_t* surviving_young_words) { |
90 | _dcq.flush(); |
91 | // Update allocation statistics. |
92 | _plab_allocator->flush_and_retire_stats(); |
93 | _g1h->policy()->record_age_table(&_age_table); |
94 | |
95 | uint length = _g1h->collection_set()->young_region_length(); |
96 | for (uint region_index = 0; region_index < length; region_index++) { |
97 | surviving_young_words[region_index] += _surviving_young_words[region_index]; |
98 | } |
99 | } |
100 | |
101 | G1ParScanThreadState::~G1ParScanThreadState() { |
102 | delete _plab_allocator; |
103 | delete _closures; |
104 | FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); |
105 | delete[] _oops_into_optional_regions; |
106 | } |
107 | |
108 | size_t G1ParScanThreadState::lab_waste_words() const { |
109 | return _plab_allocator->waste(); |
110 | } |
111 | |
112 | size_t G1ParScanThreadState::lab_undo_waste_words() const { |
113 | return _plab_allocator->undo_waste(); |
114 | } |
115 | |
116 | #ifdef ASSERT |
117 | bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { |
118 | assert(ref != NULL, "invariant" ); |
119 | assert(UseCompressedOops, "sanity" ); |
120 | assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref)); |
121 | oop p = RawAccess<>::oop_load(ref); |
122 | assert(_g1h->is_in_g1_reserved(p), |
123 | "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); |
124 | return true; |
125 | } |
126 | |
127 | bool G1ParScanThreadState::verify_ref(oop* ref) const { |
128 | assert(ref != NULL, "invariant" ); |
129 | if (has_partial_array_mask(ref)) { |
130 | // Must be in the collection set--it's already been copied. |
131 | oop p = clear_partial_array_mask(ref); |
132 | assert(_g1h->is_in_cset(p), |
133 | "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); |
134 | } else { |
135 | oop p = RawAccess<>::oop_load(ref); |
136 | assert(_g1h->is_in_g1_reserved(p), |
137 | "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); |
138 | } |
139 | return true; |
140 | } |
141 | |
142 | bool G1ParScanThreadState::verify_task(StarTask ref) const { |
143 | if (ref.is_narrow()) { |
144 | return verify_ref((narrowOop*) ref); |
145 | } else { |
146 | return verify_ref((oop*) ref); |
147 | } |
148 | } |
149 | #endif // ASSERT |
150 | |
151 | void G1ParScanThreadState::trim_queue() { |
152 | StarTask ref; |
153 | do { |
154 | // Fully drain the queue. |
155 | trim_queue_to_threshold(0); |
156 | } while (!_refs->is_empty()); |
157 | } |
158 | |
159 | HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr const region_attr, |
160 | G1HeapRegionAttr* dest, |
161 | size_t word_sz, |
162 | bool previous_plab_refill_failed) { |
163 | assert(region_attr.is_in_cset_or_humongous(), "Unexpected region attr type: %s" , region_attr.get_type_str()); |
164 | assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr" , dest->get_type_str()); |
165 | |
166 | // Right now we only have two types of regions (young / old) so |
167 | // let's keep the logic here simple. We can generalize it when necessary. |
168 | if (dest->is_young()) { |
169 | bool plab_refill_in_old_failed = false; |
170 | HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old, |
171 | word_sz, |
172 | &plab_refill_in_old_failed); |
173 | // Make sure that we won't attempt to copy any other objects out |
174 | // of a survivor region (given that apparently we cannot allocate |
175 | // any new ones) to avoid coming into this slow path again and again. |
176 | // Only consider failed PLAB refill here: failed inline allocations are |
177 | // typically large, so not indicative of remaining space. |
178 | if (previous_plab_refill_failed) { |
179 | _tenuring_threshold = 0; |
180 | } |
181 | |
182 | if (obj_ptr != NULL) { |
183 | dest->set_old(); |
184 | } else { |
185 | // We just failed to allocate in old gen. The same idea as explained above |
186 | // for making survivor gen unavailable for allocation applies for old gen. |
187 | _old_gen_is_full = plab_refill_in_old_failed; |
188 | } |
189 | return obj_ptr; |
190 | } else { |
191 | _old_gen_is_full = previous_plab_refill_failed; |
192 | assert(dest->is_old(), "Unexpected dest region attr: %s" , dest->get_type_str()); |
193 | // no other space to try. |
194 | return NULL; |
195 | } |
196 | } |
197 | |
198 | G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markOop const m, uint& age) { |
199 | if (region_attr.is_young()) { |
200 | age = !m->has_displaced_mark_helper() ? m->age() |
201 | : m->displaced_mark_helper()->age(); |
202 | if (age < _tenuring_threshold) { |
203 | return region_attr; |
204 | } |
205 | } |
206 | return dest(region_attr); |
207 | } |
208 | |
209 | void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr, |
210 | oop const old, size_t word_sz, uint age, |
211 | HeapWord * const obj_ptr) const { |
212 | PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr); |
213 | if (alloc_buf->contains(obj_ptr)) { |
214 | _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age, |
215 | dest_attr.type() == G1HeapRegionAttr::Old, |
216 | alloc_buf->word_sz() * HeapWordSize); |
217 | } else { |
218 | _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age, |
219 | dest_attr.type() == G1HeapRegionAttr::Old); |
220 | } |
221 | } |
222 | |
223 | oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_attr, |
224 | oop const old, |
225 | markOop const old_mark) { |
226 | const size_t word_sz = old->size(); |
227 | HeapRegion* const from_region = _g1h->heap_region_containing(old); |
228 | // +1 to make the -1 indexes valid... |
229 | const int young_index = from_region->young_index_in_cset()+1; |
230 | assert( (from_region->is_young() && young_index > 0) || |
231 | (!from_region->is_young() && young_index == 0), "invariant" ); |
232 | |
233 | uint age = 0; |
234 | G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age); |
235 | // The second clause is to prevent premature evacuation failure in case there |
236 | // is still space in survivor, but old gen is full. |
237 | if (_old_gen_is_full && dest_attr.is_old()) { |
238 | return handle_evacuation_failure_par(old, old_mark); |
239 | } |
240 | HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz); |
241 | |
242 | // PLAB allocations should succeed most of the time, so we'll |
243 | // normally check against NULL once and that's it. |
244 | if (obj_ptr == NULL) { |
245 | bool plab_refill_failed = false; |
246 | obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed); |
247 | if (obj_ptr == NULL) { |
248 | obj_ptr = allocate_in_next_plab(region_attr, &dest_attr, word_sz, plab_refill_failed); |
249 | if (obj_ptr == NULL) { |
250 | // This will either forward-to-self, or detect that someone else has |
251 | // installed a forwarding pointer. |
252 | return handle_evacuation_failure_par(old, old_mark); |
253 | } |
254 | } |
255 | if (_g1h->_gc_tracer_stw->should_report_promotion_events()) { |
256 | // The events are checked individually as part of the actual commit |
257 | report_promotion_event(dest_attr, old, word_sz, age, obj_ptr); |
258 | } |
259 | } |
260 | |
261 | assert(obj_ptr != NULL, "when we get here, allocation should have succeeded" ); |
262 | assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap" ); |
263 | |
264 | #ifndef PRODUCT |
265 | // Should this evacuation fail? |
266 | if (_g1h->evacuation_should_fail()) { |
267 | // Doing this after all the allocation attempts also tests the |
268 | // undo_allocation() method too. |
269 | _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz); |
270 | return handle_evacuation_failure_par(old, old_mark); |
271 | } |
272 | #endif // !PRODUCT |
273 | |
274 | // We're going to allocate linearly, so might as well prefetch ahead. |
275 | Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); |
276 | |
277 | const oop obj = oop(obj_ptr); |
278 | const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed); |
279 | if (forward_ptr == NULL) { |
280 | Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); |
281 | |
282 | if (dest_attr.is_young()) { |
283 | if (age < markOopDesc::max_age) { |
284 | age++; |
285 | } |
286 | if (old_mark->has_displaced_mark_helper()) { |
287 | // In this case, we have to install the mark word first, |
288 | // otherwise obj looks to be forwarded (the old mark word, |
289 | // which contains the forward pointer, was copied) |
290 | obj->set_mark_raw(old_mark); |
291 | markOop new_mark = old_mark->displaced_mark_helper()->set_age(age); |
292 | old_mark->set_displaced_mark_helper(new_mark); |
293 | } else { |
294 | obj->set_mark_raw(old_mark->set_age(age)); |
295 | } |
296 | _age_table.add(age, word_sz); |
297 | } else { |
298 | obj->set_mark_raw(old_mark); |
299 | } |
300 | |
301 | if (G1StringDedup::is_enabled()) { |
302 | const bool is_from_young = region_attr.is_young(); |
303 | const bool is_to_young = dest_attr.is_young(); |
304 | assert(is_from_young == _g1h->heap_region_containing(old)->is_young(), |
305 | "sanity" ); |
306 | assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(), |
307 | "sanity" ); |
308 | G1StringDedup::enqueue_from_evacuation(is_from_young, |
309 | is_to_young, |
310 | _worker_id, |
311 | obj); |
312 | } |
313 | |
314 | _surviving_young_words[young_index] += word_sz; |
315 | |
316 | if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { |
317 | // We keep track of the next start index in the length field of |
318 | // the to-space object. The actual length can be found in the |
319 | // length field of the from-space object. |
320 | arrayOop(obj)->set_length(0); |
321 | oop* old_p = set_partial_array_mask(old); |
322 | do_oop_partial_array(old_p); |
323 | } else { |
324 | G1ScanInYoungSetter x(&_scanner, dest_attr.is_young()); |
325 | obj->oop_iterate_backwards(&_scanner); |
326 | } |
327 | return obj; |
328 | } else { |
329 | _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz); |
330 | return forward_ptr; |
331 | } |
332 | } |
333 | |
334 | G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) { |
335 | assert(worker_id < _n_workers, "out of bounds access" ); |
336 | if (_states[worker_id] == NULL) { |
337 | _states[worker_id] = |
338 | new G1ParScanThreadState(_g1h, worker_id, _young_cset_length, _optional_cset_length); |
339 | } |
340 | return _states[worker_id]; |
341 | } |
342 | |
343 | const size_t* G1ParScanThreadStateSet::surviving_young_words() const { |
344 | assert(_flushed, "thread local state from the per thread states should have been flushed" ); |
345 | return _surviving_young_words_total; |
346 | } |
347 | |
348 | void G1ParScanThreadStateSet::flush() { |
349 | assert(!_flushed, "thread local state from the per thread states should be flushed once" ); |
350 | |
351 | for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) { |
352 | G1ParScanThreadState* pss = _states[worker_index]; |
353 | |
354 | if (pss == NULL) { |
355 | continue; |
356 | } |
357 | |
358 | pss->flush(_surviving_young_words_total); |
359 | delete pss; |
360 | _states[worker_index] = NULL; |
361 | } |
362 | _flushed = true; |
363 | } |
364 | |
365 | void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) { |
366 | for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) { |
367 | G1ParScanThreadState* pss = _states[worker_index]; |
368 | |
369 | if (pss == NULL) { |
370 | continue; |
371 | } |
372 | |
373 | size_t used_memory = pss->oops_into_optional_region(hr)->used_memory(); |
374 | _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_index, used_memory, G1GCPhaseTimes::ScanRSUsedMemory); |
375 | } |
376 | } |
377 | |
378 | oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) { |
379 | assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet" , p2i(old)); |
380 | |
381 | oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed); |
382 | if (forward_ptr == NULL) { |
383 | // Forward-to-self succeeded. We are the "owner" of the object. |
384 | HeapRegion* r = _g1h->heap_region_containing(old); |
385 | |
386 | if (!r->evacuation_failed()) { |
387 | r->set_evacuation_failed(true); |
388 | _g1h->hr_printer()->evac_failure(r); |
389 | } |
390 | |
391 | _g1h->preserve_mark_during_evac_failure(_worker_id, old, m); |
392 | |
393 | G1ScanInYoungSetter x(&_scanner, r->is_young()); |
394 | old->oop_iterate_backwards(&_scanner); |
395 | |
396 | return old; |
397 | } else { |
398 | // Forward-to-self failed. Either someone else managed to allocate |
399 | // space for this object (old != forward_ptr) or they beat us in |
400 | // self-forwarding it (old == forward_ptr). |
401 | assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr), |
402 | "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " " |
403 | "should not be in the CSet" , |
404 | p2i(old), p2i(forward_ptr)); |
405 | return forward_ptr; |
406 | } |
407 | } |
408 | G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, |
409 | uint n_workers, |
410 | size_t young_cset_length, |
411 | size_t optional_cset_length) : |
412 | _g1h(g1h), |
413 | _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)), |
414 | _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length, mtGC)), |
415 | _young_cset_length(young_cset_length), |
416 | _optional_cset_length(optional_cset_length), |
417 | _n_workers(n_workers), |
418 | _flushed(false) { |
419 | for (uint i = 0; i < n_workers; ++i) { |
420 | _states[i] = NULL; |
421 | } |
422 | memset(_surviving_young_words_total, 0, young_cset_length * sizeof(size_t)); |
423 | } |
424 | |
425 | G1ParScanThreadStateSet::~G1ParScanThreadStateSet() { |
426 | assert(_flushed, "thread local state from the per thread states should have been flushed" ); |
427 | FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states); |
428 | FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total); |
429 | } |
430 | |