1 | /* |
2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "classfile/classLoaderDataGraph.hpp" |
27 | #include "classfile/metadataOnStackMark.hpp" |
28 | #include "classfile/stringTable.hpp" |
29 | #include "code/codeCache.hpp" |
30 | #include "code/icBuffer.hpp" |
31 | #include "gc/g1/g1Allocator.inline.hpp" |
32 | #include "gc/g1/g1Arguments.hpp" |
33 | #include "gc/g1/g1BarrierSet.hpp" |
34 | #include "gc/g1/g1CollectedHeap.inline.hpp" |
35 | #include "gc/g1/g1CollectionSet.hpp" |
36 | #include "gc/g1/g1CollectorState.hpp" |
37 | #include "gc/g1/g1ConcurrentRefine.hpp" |
38 | #include "gc/g1/g1ConcurrentRefineThread.hpp" |
39 | #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" |
40 | #include "gc/g1/g1DirtyCardQueue.hpp" |
41 | #include "gc/g1/g1EvacStats.inline.hpp" |
42 | #include "gc/g1/g1FullCollector.hpp" |
43 | #include "gc/g1/g1GCPhaseTimes.hpp" |
44 | #include "gc/g1/g1HeapSizingPolicy.hpp" |
45 | #include "gc/g1/g1HeapTransition.hpp" |
46 | #include "gc/g1/g1HeapVerifier.hpp" |
47 | #include "gc/g1/g1HotCardCache.hpp" |
48 | #include "gc/g1/g1MemoryPool.hpp" |
49 | #include "gc/g1/g1OopClosures.inline.hpp" |
50 | #include "gc/g1/g1ParScanThreadState.inline.hpp" |
51 | #include "gc/g1/g1Policy.hpp" |
52 | #include "gc/g1/g1RegionToSpaceMapper.hpp" |
53 | #include "gc/g1/g1RemSet.hpp" |
54 | #include "gc/g1/g1RootClosures.hpp" |
55 | #include "gc/g1/g1RootProcessor.hpp" |
56 | #include "gc/g1/g1SATBMarkQueueSet.hpp" |
57 | #include "gc/g1/g1StringDedup.hpp" |
58 | #include "gc/g1/g1ThreadLocalData.hpp" |
59 | #include "gc/g1/g1YCTypes.hpp" |
60 | #include "gc/g1/g1YoungRemSetSamplingThread.hpp" |
61 | #include "gc/g1/g1VMOperations.hpp" |
62 | #include "gc/g1/heapRegion.inline.hpp" |
63 | #include "gc/g1/heapRegionRemSet.hpp" |
64 | #include "gc/g1/heapRegionSet.inline.hpp" |
65 | #include "gc/shared/gcBehaviours.hpp" |
66 | #include "gc/shared/gcHeapSummary.hpp" |
67 | #include "gc/shared/gcId.hpp" |
68 | #include "gc/shared/gcLocker.hpp" |
69 | #include "gc/shared/gcTimer.hpp" |
70 | #include "gc/shared/gcTrace.hpp" |
71 | #include "gc/shared/gcTraceTime.inline.hpp" |
72 | #include "gc/shared/generationSpec.hpp" |
73 | #include "gc/shared/isGCActiveMark.hpp" |
74 | #include "gc/shared/oopStorageParState.hpp" |
75 | #include "gc/shared/parallelCleaning.hpp" |
76 | #include "gc/shared/preservedMarks.inline.hpp" |
77 | #include "gc/shared/suspendibleThreadSet.hpp" |
78 | #include "gc/shared/referenceProcessor.inline.hpp" |
79 | #include "gc/shared/taskqueue.inline.hpp" |
80 | #include "gc/shared/weakProcessor.inline.hpp" |
81 | #include "gc/shared/workerPolicy.hpp" |
82 | #include "logging/log.hpp" |
83 | #include "memory/allocation.hpp" |
84 | #include "memory/iterator.hpp" |
85 | #include "memory/resourceArea.hpp" |
86 | #include "memory/universe.hpp" |
87 | #include "oops/access.inline.hpp" |
88 | #include "oops/compressedOops.inline.hpp" |
89 | #include "oops/oop.inline.hpp" |
90 | #include "runtime/atomic.hpp" |
91 | #include "runtime/flags/flagSetting.hpp" |
92 | #include "runtime/handles.inline.hpp" |
93 | #include "runtime/init.hpp" |
94 | #include "runtime/orderAccess.hpp" |
95 | #include "runtime/threadSMR.hpp" |
96 | #include "runtime/vmThread.hpp" |
97 | #include "utilities/align.hpp" |
98 | #include "utilities/globalDefinitions.hpp" |
99 | #include "utilities/stack.inline.hpp" |
100 | |
101 | size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; |
102 | |
103 | // INVARIANTS/NOTES |
104 | // |
105 | // All allocation activity covered by the G1CollectedHeap interface is |
106 | // serialized by acquiring the HeapLock. This happens in mem_allocate |
107 | // and allocate_new_tlab, which are the "entry" points to the |
108 | // allocation code from the rest of the JVM. (Note that this does not |
109 | // apply to TLAB allocation, which is not part of this interface: it |
110 | // is done by clients of this interface.) |
111 | |
112 | class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure { |
113 | private: |
114 | size_t _num_dirtied; |
115 | G1CollectedHeap* _g1h; |
116 | G1CardTable* _g1_ct; |
117 | |
118 | HeapRegion* region_for_card(CardValue* card_ptr) const { |
119 | return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr)); |
120 | } |
121 | |
122 | bool will_become_free(HeapRegion* hr) const { |
123 | // A region will be freed by free_collection_set if the region is in the |
124 | // collection set and has not had an evacuation failure. |
125 | return _g1h->is_in_cset(hr) && !hr->evacuation_failed(); |
126 | } |
127 | |
128 | public: |
129 | RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : G1CardTableEntryClosure(), |
130 | _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { } |
131 | |
132 | bool do_card_ptr(CardValue* card_ptr, uint worker_i) { |
133 | HeapRegion* hr = region_for_card(card_ptr); |
134 | |
135 | // Should only dirty cards in regions that won't be freed. |
136 | if (!will_become_free(hr)) { |
137 | *card_ptr = G1CardTable::dirty_card_val(); |
138 | _num_dirtied++; |
139 | } |
140 | |
141 | return true; |
142 | } |
143 | |
144 | size_t num_dirtied() const { return _num_dirtied; } |
145 | }; |
146 | |
147 | |
148 | void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) { |
149 | HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions); |
150 | } |
151 | |
152 | void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) { |
153 | // The from card cache is not the memory that is actually committed. So we cannot |
154 | // take advantage of the zero_filled parameter. |
155 | reset_from_card_cache(start_idx, num_regions); |
156 | } |
157 | |
158 | Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) { |
159 | Ticks start = Ticks::now(); |
160 | workers()->run_task(task, workers()->active_workers()); |
161 | return Ticks::now() - start; |
162 | } |
163 | |
164 | HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index, |
165 | MemRegion mr) { |
166 | return new HeapRegion(hrs_index, bot(), mr); |
167 | } |
168 | |
169 | // Private methods. |
170 | |
171 | HeapRegion* G1CollectedHeap::new_region(size_t word_size, HeapRegionType type, bool do_expand) { |
172 | assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords, |
173 | "the only time we use this to allocate a humongous region is " |
174 | "when we are allocating a single humongous region" ); |
175 | |
176 | HeapRegion* res = _hrm->allocate_free_region(type); |
177 | |
178 | if (res == NULL && do_expand && _expand_heap_after_alloc_failure) { |
179 | // Currently, only attempts to allocate GC alloc regions set |
180 | // do_expand to true. So, we should only reach here during a |
181 | // safepoint. If this assumption changes we might have to |
182 | // reconsider the use of _expand_heap_after_alloc_failure. |
183 | assert(SafepointSynchronize::is_at_safepoint(), "invariant" ); |
184 | |
185 | log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B" , |
186 | word_size * HeapWordSize); |
187 | |
188 | if (expand(word_size * HeapWordSize)) { |
189 | // Given that expand() succeeded in expanding the heap, and we |
190 | // always expand the heap by an amount aligned to the heap |
191 | // region size, the free list should in theory not be empty. |
192 | // In either case allocate_free_region() will check for NULL. |
193 | res = _hrm->allocate_free_region(type); |
194 | } else { |
195 | _expand_heap_after_alloc_failure = false; |
196 | } |
197 | } |
198 | return res; |
199 | } |
200 | |
201 | HeapWord* |
202 | G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first, |
203 | uint num_regions, |
204 | size_t word_size) { |
205 | assert(first != G1_NO_HRM_INDEX, "pre-condition" ); |
206 | assert(is_humongous(word_size), "word_size should be humongous" ); |
207 | assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition" ); |
208 | |
209 | // Index of last region in the series. |
210 | uint last = first + num_regions - 1; |
211 | |
212 | // We need to initialize the region(s) we just discovered. This is |
213 | // a bit tricky given that it can happen concurrently with |
214 | // refinement threads refining cards on these regions and |
215 | // potentially wanting to refine the BOT as they are scanning |
216 | // those cards (this can happen shortly after a cleanup; see CR |
217 | // 6991377). So we have to set up the region(s) carefully and in |
218 | // a specific order. |
219 | |
220 | // The word size sum of all the regions we will allocate. |
221 | size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords; |
222 | assert(word_size <= word_size_sum, "sanity" ); |
223 | |
224 | // This will be the "starts humongous" region. |
225 | HeapRegion* first_hr = region_at(first); |
226 | // The header of the new object will be placed at the bottom of |
227 | // the first region. |
228 | HeapWord* new_obj = first_hr->bottom(); |
229 | // This will be the new top of the new object. |
230 | HeapWord* obj_top = new_obj + word_size; |
231 | |
232 | // First, we need to zero the header of the space that we will be |
233 | // allocating. When we update top further down, some refinement |
234 | // threads might try to scan the region. By zeroing the header we |
235 | // ensure that any thread that will try to scan the region will |
236 | // come across the zero klass word and bail out. |
237 | // |
238 | // NOTE: It would not have been correct to have used |
239 | // CollectedHeap::fill_with_object() and make the space look like |
240 | // an int array. The thread that is doing the allocation will |
241 | // later update the object header to a potentially different array |
242 | // type and, for a very short period of time, the klass and length |
243 | // fields will be inconsistent. This could cause a refinement |
244 | // thread to calculate the object size incorrectly. |
245 | Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); |
246 | |
247 | // Next, pad out the unused tail of the last region with filler |
248 | // objects, for improved usage accounting. |
249 | // How many words we use for filler objects. |
250 | size_t word_fill_size = word_size_sum - word_size; |
251 | |
252 | // How many words memory we "waste" which cannot hold a filler object. |
253 | size_t words_not_fillable = 0; |
254 | |
255 | if (word_fill_size >= min_fill_size()) { |
256 | fill_with_objects(obj_top, word_fill_size); |
257 | } else if (word_fill_size > 0) { |
258 | // We have space to fill, but we cannot fit an object there. |
259 | words_not_fillable = word_fill_size; |
260 | word_fill_size = 0; |
261 | } |
262 | |
263 | // We will set up the first region as "starts humongous". This |
264 | // will also update the BOT covering all the regions to reflect |
265 | // that there is a single object that starts at the bottom of the |
266 | // first region. |
267 | first_hr->set_starts_humongous(obj_top, word_fill_size); |
268 | _policy->remset_tracker()->update_at_allocate(first_hr); |
269 | // Then, if there are any, we will set up the "continues |
270 | // humongous" regions. |
271 | HeapRegion* hr = NULL; |
272 | for (uint i = first + 1; i <= last; ++i) { |
273 | hr = region_at(i); |
274 | hr->set_continues_humongous(first_hr); |
275 | _policy->remset_tracker()->update_at_allocate(hr); |
276 | } |
277 | |
278 | // Up to this point no concurrent thread would have been able to |
279 | // do any scanning on any region in this series. All the top |
280 | // fields still point to bottom, so the intersection between |
281 | // [bottom,top] and [card_start,card_end] will be empty. Before we |
282 | // update the top fields, we'll do a storestore to make sure that |
283 | // no thread sees the update to top before the zeroing of the |
284 | // object header and the BOT initialization. |
285 | OrderAccess::storestore(); |
286 | |
287 | // Now, we will update the top fields of the "continues humongous" |
288 | // regions except the last one. |
289 | for (uint i = first; i < last; ++i) { |
290 | hr = region_at(i); |
291 | hr->set_top(hr->end()); |
292 | } |
293 | |
294 | hr = region_at(last); |
295 | // If we cannot fit a filler object, we must set top to the end |
296 | // of the humongous object, otherwise we cannot iterate the heap |
297 | // and the BOT will not be complete. |
298 | hr->set_top(hr->end() - words_not_fillable); |
299 | |
300 | assert(hr->bottom() < obj_top && obj_top <= hr->end(), |
301 | "obj_top should be in last region" ); |
302 | |
303 | _verifier->check_bitmaps("Humongous Region Allocation" , first_hr); |
304 | |
305 | assert(words_not_fillable == 0 || |
306 | first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(), |
307 | "Miscalculation in humongous allocation" ); |
308 | |
309 | increase_used((word_size_sum - words_not_fillable) * HeapWordSize); |
310 | |
311 | for (uint i = first; i <= last; ++i) { |
312 | hr = region_at(i); |
313 | _humongous_set.add(hr); |
314 | _hr_printer.alloc(hr); |
315 | } |
316 | |
317 | return new_obj; |
318 | } |
319 | |
320 | size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) { |
321 | assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here" , word_size); |
322 | return align_up(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; |
323 | } |
324 | |
325 | // If could fit into free regions w/o expansion, try. |
326 | // Otherwise, if can expand, do so. |
327 | // Otherwise, if using ex regions might help, try with ex given back. |
328 | HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { |
329 | assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
330 | |
331 | _verifier->verify_region_sets_optional(); |
332 | |
333 | uint first = G1_NO_HRM_INDEX; |
334 | uint obj_regions = (uint) humongous_obj_size_in_regions(word_size); |
335 | |
336 | if (obj_regions == 1) { |
337 | // Only one region to allocate, try to use a fast path by directly allocating |
338 | // from the free lists. Do not try to expand here, we will potentially do that |
339 | // later. |
340 | HeapRegion* hr = new_region(word_size, HeapRegionType::Humongous, false /* do_expand */); |
341 | if (hr != NULL) { |
342 | first = hr->hrm_index(); |
343 | } |
344 | } else { |
345 | // Policy: Try only empty regions (i.e. already committed first). Maybe we |
346 | // are lucky enough to find some. |
347 | first = _hrm->find_contiguous_only_empty(obj_regions); |
348 | if (first != G1_NO_HRM_INDEX) { |
349 | _hrm->allocate_free_regions_starting_at(first, obj_regions); |
350 | } |
351 | } |
352 | |
353 | if (first == G1_NO_HRM_INDEX) { |
354 | // Policy: We could not find enough regions for the humongous object in the |
355 | // free list. Look through the heap to find a mix of free and uncommitted regions. |
356 | // If so, try expansion. |
357 | first = _hrm->find_contiguous_empty_or_unavailable(obj_regions); |
358 | if (first != G1_NO_HRM_INDEX) { |
359 | // We found something. Make sure these regions are committed, i.e. expand |
360 | // the heap. Alternatively we could do a defragmentation GC. |
361 | log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B" , |
362 | word_size * HeapWordSize); |
363 | |
364 | _hrm->expand_at(first, obj_regions, workers()); |
365 | policy()->record_new_heap_size(num_regions()); |
366 | |
367 | #ifdef ASSERT |
368 | for (uint i = first; i < first + obj_regions; ++i) { |
369 | HeapRegion* hr = region_at(i); |
370 | assert(hr->is_free(), "sanity" ); |
371 | assert(hr->is_empty(), "sanity" ); |
372 | assert(is_on_master_free_list(hr), "sanity" ); |
373 | } |
374 | #endif |
375 | _hrm->allocate_free_regions_starting_at(first, obj_regions); |
376 | } else { |
377 | // Policy: Potentially trigger a defragmentation GC. |
378 | } |
379 | } |
380 | |
381 | HeapWord* result = NULL; |
382 | if (first != G1_NO_HRM_INDEX) { |
383 | result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size); |
384 | assert(result != NULL, "it should always return a valid result" ); |
385 | |
386 | // A successful humongous object allocation changes the used space |
387 | // information of the old generation so we need to recalculate the |
388 | // sizes and update the jstat counters here. |
389 | g1mm()->update_sizes(); |
390 | } |
391 | |
392 | _verifier->verify_region_sets_optional(); |
393 | |
394 | return result; |
395 | } |
396 | |
397 | HeapWord* G1CollectedHeap::allocate_new_tlab(size_t min_size, |
398 | size_t requested_size, |
399 | size_t* actual_size) { |
400 | assert_heap_not_locked_and_not_at_safepoint(); |
401 | assert(!is_humongous(requested_size), "we do not allow humongous TLABs" ); |
402 | |
403 | return attempt_allocation(min_size, requested_size, actual_size); |
404 | } |
405 | |
406 | HeapWord* |
407 | G1CollectedHeap::mem_allocate(size_t word_size, |
408 | bool* gc_overhead_limit_was_exceeded) { |
409 | assert_heap_not_locked_and_not_at_safepoint(); |
410 | |
411 | if (is_humongous(word_size)) { |
412 | return attempt_allocation_humongous(word_size); |
413 | } |
414 | size_t dummy = 0; |
415 | return attempt_allocation(word_size, word_size, &dummy); |
416 | } |
417 | |
418 | HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { |
419 | ResourceMark rm; // For retrieving the thread names in log messages. |
420 | |
421 | // Make sure you read the note in attempt_allocation_humongous(). |
422 | |
423 | assert_heap_not_locked_and_not_at_safepoint(); |
424 | assert(!is_humongous(word_size), "attempt_allocation_slow() should not " |
425 | "be called for humongous allocation requests" ); |
426 | |
427 | // We should only get here after the first-level allocation attempt |
428 | // (attempt_allocation()) failed to allocate. |
429 | |
430 | // We will loop until a) we manage to successfully perform the |
431 | // allocation or b) we successfully schedule a collection which |
432 | // fails to perform the allocation. b) is the only case when we'll |
433 | // return NULL. |
434 | HeapWord* result = NULL; |
435 | for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { |
436 | bool should_try_gc; |
437 | uint gc_count_before; |
438 | |
439 | { |
440 | MutexLocker x(Heap_lock); |
441 | result = _allocator->attempt_allocation_locked(word_size); |
442 | if (result != NULL) { |
443 | return result; |
444 | } |
445 | |
446 | // If the GCLocker is active and we are bound for a GC, try expanding young gen. |
447 | // This is different to when only GCLocker::needs_gc() is set: try to avoid |
448 | // waiting because the GCLocker is active to not wait too long. |
449 | if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) { |
450 | // No need for an ergo message here, can_expand_young_list() does this when |
451 | // it returns true. |
452 | result = _allocator->attempt_allocation_force(word_size); |
453 | if (result != NULL) { |
454 | return result; |
455 | } |
456 | } |
457 | // Only try a GC if the GCLocker does not signal the need for a GC. Wait until |
458 | // the GCLocker initiated GC has been performed and then retry. This includes |
459 | // the case when the GC Locker is not active but has not been performed. |
460 | should_try_gc = !GCLocker::needs_gc(); |
461 | // Read the GC count while still holding the Heap_lock. |
462 | gc_count_before = total_collections(); |
463 | } |
464 | |
465 | if (should_try_gc) { |
466 | bool succeeded; |
467 | result = do_collection_pause(word_size, gc_count_before, &succeeded, |
468 | GCCause::_g1_inc_collection_pause); |
469 | if (result != NULL) { |
470 | assert(succeeded, "only way to get back a non-NULL result" ); |
471 | log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT, |
472 | Thread::current()->name(), p2i(result)); |
473 | return result; |
474 | } |
475 | |
476 | if (succeeded) { |
477 | // We successfully scheduled a collection which failed to allocate. No |
478 | // point in trying to allocate further. We'll just return NULL. |
479 | log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate " |
480 | SIZE_FORMAT " words" , Thread::current()->name(), word_size); |
481 | return NULL; |
482 | } |
483 | log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words" , |
484 | Thread::current()->name(), word_size); |
485 | } else { |
486 | // Failed to schedule a collection. |
487 | if (gclocker_retry_count > GCLockerRetryAllocationCount) { |
488 | log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating " |
489 | SIZE_FORMAT " words" , Thread::current()->name(), word_size); |
490 | return NULL; |
491 | } |
492 | log_trace(gc, alloc)("%s: Stall until clear" , Thread::current()->name()); |
493 | // The GCLocker is either active or the GCLocker initiated |
494 | // GC has not yet been performed. Stall until it is and |
495 | // then retry the allocation. |
496 | GCLocker::stall_until_clear(); |
497 | gclocker_retry_count += 1; |
498 | } |
499 | |
500 | // We can reach here if we were unsuccessful in scheduling a |
501 | // collection (because another thread beat us to it) or if we were |
502 | // stalled due to the GC locker. In either can we should retry the |
503 | // allocation attempt in case another thread successfully |
504 | // performed a collection and reclaimed enough space. We do the |
505 | // first attempt (without holding the Heap_lock) here and the |
506 | // follow-on attempt will be at the start of the next loop |
507 | // iteration (after taking the Heap_lock). |
508 | size_t dummy = 0; |
509 | result = _allocator->attempt_allocation(word_size, word_size, &dummy); |
510 | if (result != NULL) { |
511 | return result; |
512 | } |
513 | |
514 | // Give a warning if we seem to be looping forever. |
515 | if ((QueuedAllocationWarningCount > 0) && |
516 | (try_count % QueuedAllocationWarningCount == 0)) { |
517 | log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words" , |
518 | Thread::current()->name(), try_count, word_size); |
519 | } |
520 | } |
521 | |
522 | ShouldNotReachHere(); |
523 | return NULL; |
524 | } |
525 | |
526 | void G1CollectedHeap::begin_archive_alloc_range(bool open) { |
527 | assert_at_safepoint_on_vm_thread(); |
528 | if (_archive_allocator == NULL) { |
529 | _archive_allocator = G1ArchiveAllocator::create_allocator(this, open); |
530 | } |
531 | } |
532 | |
533 | bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) { |
534 | // Allocations in archive regions cannot be of a size that would be considered |
535 | // humongous even for a minimum-sized region, because G1 region sizes/boundaries |
536 | // may be different at archive-restore time. |
537 | return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words()); |
538 | } |
539 | |
540 | HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) { |
541 | assert_at_safepoint_on_vm_thread(); |
542 | assert(_archive_allocator != NULL, "_archive_allocator not initialized" ); |
543 | if (is_archive_alloc_too_large(word_size)) { |
544 | return NULL; |
545 | } |
546 | return _archive_allocator->archive_mem_allocate(word_size); |
547 | } |
548 | |
549 | void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges, |
550 | size_t end_alignment_in_bytes) { |
551 | assert_at_safepoint_on_vm_thread(); |
552 | assert(_archive_allocator != NULL, "_archive_allocator not initialized" ); |
553 | |
554 | // Call complete_archive to do the real work, filling in the MemRegion |
555 | // array with the archive regions. |
556 | _archive_allocator->complete_archive(ranges, end_alignment_in_bytes); |
557 | delete _archive_allocator; |
558 | _archive_allocator = NULL; |
559 | } |
560 | |
561 | bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) { |
562 | assert(ranges != NULL, "MemRegion array NULL" ); |
563 | assert(count != 0, "No MemRegions provided" ); |
564 | MemRegion reserved = _hrm->reserved(); |
565 | for (size_t i = 0; i < count; i++) { |
566 | if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) { |
567 | return false; |
568 | } |
569 | } |
570 | return true; |
571 | } |
572 | |
573 | bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, |
574 | size_t count, |
575 | bool open) { |
576 | assert(!is_init_completed(), "Expect to be called at JVM init time" ); |
577 | assert(ranges != NULL, "MemRegion array NULL" ); |
578 | assert(count != 0, "No MemRegions provided" ); |
579 | MutexLocker x(Heap_lock); |
580 | |
581 | MemRegion reserved = _hrm->reserved(); |
582 | HeapWord* prev_last_addr = NULL; |
583 | HeapRegion* prev_last_region = NULL; |
584 | |
585 | // Temporarily disable pretouching of heap pages. This interface is used |
586 | // when mmap'ing archived heap data in, so pre-touching is wasted. |
587 | FlagSetting fs(AlwaysPreTouch, false); |
588 | |
589 | // Enable archive object checking used by G1MarkSweep. We have to let it know |
590 | // about each archive range, so that objects in those ranges aren't marked. |
591 | G1ArchiveAllocator::enable_archive_object_check(); |
592 | |
593 | // For each specified MemRegion range, allocate the corresponding G1 |
594 | // regions and mark them as archive regions. We expect the ranges |
595 | // in ascending starting address order, without overlap. |
596 | for (size_t i = 0; i < count; i++) { |
597 | MemRegion curr_range = ranges[i]; |
598 | HeapWord* start_address = curr_range.start(); |
599 | size_t word_size = curr_range.word_size(); |
600 | HeapWord* last_address = curr_range.last(); |
601 | size_t commits = 0; |
602 | |
603 | guarantee(reserved.contains(start_address) && reserved.contains(last_address), |
604 | "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]" , |
605 | p2i(start_address), p2i(last_address)); |
606 | guarantee(start_address > prev_last_addr, |
607 | "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT , |
608 | p2i(start_address), p2i(prev_last_addr)); |
609 | prev_last_addr = last_address; |
610 | |
611 | // Check for ranges that start in the same G1 region in which the previous |
612 | // range ended, and adjust the start address so we don't try to allocate |
613 | // the same region again. If the current range is entirely within that |
614 | // region, skip it, just adjusting the recorded top. |
615 | HeapRegion* start_region = _hrm->addr_to_region(start_address); |
616 | if ((prev_last_region != NULL) && (start_region == prev_last_region)) { |
617 | start_address = start_region->end(); |
618 | if (start_address > last_address) { |
619 | increase_used(word_size * HeapWordSize); |
620 | start_region->set_top(last_address + 1); |
621 | continue; |
622 | } |
623 | start_region->set_top(start_address); |
624 | curr_range = MemRegion(start_address, last_address + 1); |
625 | start_region = _hrm->addr_to_region(start_address); |
626 | } |
627 | |
628 | // Perform the actual region allocation, exiting if it fails. |
629 | // Then note how much new space we have allocated. |
630 | if (!_hrm->allocate_containing_regions(curr_range, &commits, workers())) { |
631 | return false; |
632 | } |
633 | increase_used(word_size * HeapWordSize); |
634 | if (commits != 0) { |
635 | log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B" , |
636 | HeapRegion::GrainWords * HeapWordSize * commits); |
637 | |
638 | } |
639 | |
640 | // Mark each G1 region touched by the range as archive, add it to |
641 | // the old set, and set top. |
642 | HeapRegion* curr_region = _hrm->addr_to_region(start_address); |
643 | HeapRegion* last_region = _hrm->addr_to_region(last_address); |
644 | prev_last_region = last_region; |
645 | |
646 | while (curr_region != NULL) { |
647 | assert(curr_region->is_empty() && !curr_region->is_pinned(), |
648 | "Region already in use (index %u)" , curr_region->hrm_index()); |
649 | if (open) { |
650 | curr_region->set_open_archive(); |
651 | } else { |
652 | curr_region->set_closed_archive(); |
653 | } |
654 | _hr_printer.alloc(curr_region); |
655 | _archive_set.add(curr_region); |
656 | HeapWord* top; |
657 | HeapRegion* next_region; |
658 | if (curr_region != last_region) { |
659 | top = curr_region->end(); |
660 | next_region = _hrm->next_region_in_heap(curr_region); |
661 | } else { |
662 | top = last_address + 1; |
663 | next_region = NULL; |
664 | } |
665 | curr_region->set_top(top); |
666 | curr_region->set_first_dead(top); |
667 | curr_region->set_end_of_live(top); |
668 | curr_region = next_region; |
669 | } |
670 | |
671 | // Notify mark-sweep of the archive |
672 | G1ArchiveAllocator::set_range_archive(curr_range, open); |
673 | } |
674 | return true; |
675 | } |
676 | |
677 | void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) { |
678 | assert(!is_init_completed(), "Expect to be called at JVM init time" ); |
679 | assert(ranges != NULL, "MemRegion array NULL" ); |
680 | assert(count != 0, "No MemRegions provided" ); |
681 | MemRegion reserved = _hrm->reserved(); |
682 | HeapWord *prev_last_addr = NULL; |
683 | HeapRegion* prev_last_region = NULL; |
684 | |
685 | // For each MemRegion, create filler objects, if needed, in the G1 regions |
686 | // that contain the address range. The address range actually within the |
687 | // MemRegion will not be modified. That is assumed to have been initialized |
688 | // elsewhere, probably via an mmap of archived heap data. |
689 | MutexLocker x(Heap_lock); |
690 | for (size_t i = 0; i < count; i++) { |
691 | HeapWord* start_address = ranges[i].start(); |
692 | HeapWord* last_address = ranges[i].last(); |
693 | |
694 | assert(reserved.contains(start_address) && reserved.contains(last_address), |
695 | "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]" , |
696 | p2i(start_address), p2i(last_address)); |
697 | assert(start_address > prev_last_addr, |
698 | "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT , |
699 | p2i(start_address), p2i(prev_last_addr)); |
700 | |
701 | HeapRegion* start_region = _hrm->addr_to_region(start_address); |
702 | HeapRegion* last_region = _hrm->addr_to_region(last_address); |
703 | HeapWord* bottom_address = start_region->bottom(); |
704 | |
705 | // Check for a range beginning in the same region in which the |
706 | // previous one ended. |
707 | if (start_region == prev_last_region) { |
708 | bottom_address = prev_last_addr + 1; |
709 | } |
710 | |
711 | // Verify that the regions were all marked as archive regions by |
712 | // alloc_archive_regions. |
713 | HeapRegion* curr_region = start_region; |
714 | while (curr_region != NULL) { |
715 | guarantee(curr_region->is_archive(), |
716 | "Expected archive region at index %u" , curr_region->hrm_index()); |
717 | if (curr_region != last_region) { |
718 | curr_region = _hrm->next_region_in_heap(curr_region); |
719 | } else { |
720 | curr_region = NULL; |
721 | } |
722 | } |
723 | |
724 | prev_last_addr = last_address; |
725 | prev_last_region = last_region; |
726 | |
727 | // Fill the memory below the allocated range with dummy object(s), |
728 | // if the region bottom does not match the range start, or if the previous |
729 | // range ended within the same G1 region, and there is a gap. |
730 | if (start_address != bottom_address) { |
731 | size_t fill_size = pointer_delta(start_address, bottom_address); |
732 | G1CollectedHeap::fill_with_objects(bottom_address, fill_size); |
733 | increase_used(fill_size * HeapWordSize); |
734 | } |
735 | } |
736 | } |
737 | |
738 | inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size, |
739 | size_t desired_word_size, |
740 | size_t* actual_word_size) { |
741 | assert_heap_not_locked_and_not_at_safepoint(); |
742 | assert(!is_humongous(desired_word_size), "attempt_allocation() should not " |
743 | "be called for humongous allocation requests" ); |
744 | |
745 | HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size); |
746 | |
747 | if (result == NULL) { |
748 | *actual_word_size = desired_word_size; |
749 | result = attempt_allocation_slow(desired_word_size); |
750 | } |
751 | |
752 | assert_heap_not_locked(); |
753 | if (result != NULL) { |
754 | assert(*actual_word_size != 0, "Actual size must have been set here" ); |
755 | dirty_young_block(result, *actual_word_size); |
756 | } else { |
757 | *actual_word_size = 0; |
758 | } |
759 | |
760 | return result; |
761 | } |
762 | |
763 | void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count, bool is_open) { |
764 | assert(!is_init_completed(), "Expect to be called at JVM init time" ); |
765 | assert(ranges != NULL, "MemRegion array NULL" ); |
766 | assert(count != 0, "No MemRegions provided" ); |
767 | MemRegion reserved = _hrm->reserved(); |
768 | HeapWord* prev_last_addr = NULL; |
769 | HeapRegion* prev_last_region = NULL; |
770 | size_t size_used = 0; |
771 | size_t uncommitted_regions = 0; |
772 | |
773 | // For each Memregion, free the G1 regions that constitute it, and |
774 | // notify mark-sweep that the range is no longer to be considered 'archive.' |
775 | MutexLocker x(Heap_lock); |
776 | for (size_t i = 0; i < count; i++) { |
777 | HeapWord* start_address = ranges[i].start(); |
778 | HeapWord* last_address = ranges[i].last(); |
779 | |
780 | assert(reserved.contains(start_address) && reserved.contains(last_address), |
781 | "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]" , |
782 | p2i(start_address), p2i(last_address)); |
783 | assert(start_address > prev_last_addr, |
784 | "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT , |
785 | p2i(start_address), p2i(prev_last_addr)); |
786 | size_used += ranges[i].byte_size(); |
787 | prev_last_addr = last_address; |
788 | |
789 | HeapRegion* start_region = _hrm->addr_to_region(start_address); |
790 | HeapRegion* last_region = _hrm->addr_to_region(last_address); |
791 | |
792 | // Check for ranges that start in the same G1 region in which the previous |
793 | // range ended, and adjust the start address so we don't try to free |
794 | // the same region again. If the current range is entirely within that |
795 | // region, skip it. |
796 | if (start_region == prev_last_region) { |
797 | start_address = start_region->end(); |
798 | if (start_address > last_address) { |
799 | continue; |
800 | } |
801 | start_region = _hrm->addr_to_region(start_address); |
802 | } |
803 | prev_last_region = last_region; |
804 | |
805 | // After verifying that each region was marked as an archive region by |
806 | // alloc_archive_regions, set it free and empty and uncommit it. |
807 | HeapRegion* curr_region = start_region; |
808 | while (curr_region != NULL) { |
809 | guarantee(curr_region->is_archive(), |
810 | "Expected archive region at index %u" , curr_region->hrm_index()); |
811 | uint curr_index = curr_region->hrm_index(); |
812 | _archive_set.remove(curr_region); |
813 | curr_region->set_free(); |
814 | curr_region->set_top(curr_region->bottom()); |
815 | if (curr_region != last_region) { |
816 | curr_region = _hrm->next_region_in_heap(curr_region); |
817 | } else { |
818 | curr_region = NULL; |
819 | } |
820 | _hrm->shrink_at(curr_index, 1); |
821 | uncommitted_regions++; |
822 | } |
823 | |
824 | // Notify mark-sweep that this is no longer an archive range. |
825 | G1ArchiveAllocator::clear_range_archive(ranges[i], is_open); |
826 | } |
827 | |
828 | if (uncommitted_regions != 0) { |
829 | log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B" , |
830 | HeapRegion::GrainWords * HeapWordSize * uncommitted_regions); |
831 | } |
832 | decrease_used(size_used); |
833 | } |
834 | |
835 | oop G1CollectedHeap::materialize_archived_object(oop obj) { |
836 | assert(obj != NULL, "archived obj is NULL" ); |
837 | assert(G1ArchiveAllocator::is_archived_object(obj), "must be archived object" ); |
838 | |
839 | // Loading an archived object makes it strongly reachable. If it is |
840 | // loaded during concurrent marking, it must be enqueued to the SATB |
841 | // queue, shading the previously white object gray. |
842 | G1BarrierSet::enqueue(obj); |
843 | |
844 | return obj; |
845 | } |
846 | |
847 | HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) { |
848 | ResourceMark rm; // For retrieving the thread names in log messages. |
849 | |
850 | // The structure of this method has a lot of similarities to |
851 | // attempt_allocation_slow(). The reason these two were not merged |
852 | // into a single one is that such a method would require several "if |
853 | // allocation is not humongous do this, otherwise do that" |
854 | // conditional paths which would obscure its flow. In fact, an early |
855 | // version of this code did use a unified method which was harder to |
856 | // follow and, as a result, it had subtle bugs that were hard to |
857 | // track down. So keeping these two methods separate allows each to |
858 | // be more readable. It will be good to keep these two in sync as |
859 | // much as possible. |
860 | |
861 | assert_heap_not_locked_and_not_at_safepoint(); |
862 | assert(is_humongous(word_size), "attempt_allocation_humongous() " |
863 | "should only be called for humongous allocations" ); |
864 | |
865 | // Humongous objects can exhaust the heap quickly, so we should check if we |
866 | // need to start a marking cycle at each humongous object allocation. We do |
867 | // the check before we do the actual allocation. The reason for doing it |
868 | // before the allocation is that we avoid having to keep track of the newly |
869 | // allocated memory while we do a GC. |
870 | if (policy()->need_to_start_conc_mark("concurrent humongous allocation" , |
871 | word_size)) { |
872 | collect(GCCause::_g1_humongous_allocation); |
873 | } |
874 | |
875 | // We will loop until a) we manage to successfully perform the |
876 | // allocation or b) we successfully schedule a collection which |
877 | // fails to perform the allocation. b) is the only case when we'll |
878 | // return NULL. |
879 | HeapWord* result = NULL; |
880 | for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { |
881 | bool should_try_gc; |
882 | uint gc_count_before; |
883 | |
884 | |
885 | { |
886 | MutexLocker x(Heap_lock); |
887 | |
888 | // Given that humongous objects are not allocated in young |
889 | // regions, we'll first try to do the allocation without doing a |
890 | // collection hoping that there's enough space in the heap. |
891 | result = humongous_obj_allocate(word_size); |
892 | if (result != NULL) { |
893 | size_t size_in_regions = humongous_obj_size_in_regions(word_size); |
894 | policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes); |
895 | return result; |
896 | } |
897 | |
898 | // Only try a GC if the GCLocker does not signal the need for a GC. Wait until |
899 | // the GCLocker initiated GC has been performed and then retry. This includes |
900 | // the case when the GC Locker is not active but has not been performed. |
901 | should_try_gc = !GCLocker::needs_gc(); |
902 | // Read the GC count while still holding the Heap_lock. |
903 | gc_count_before = total_collections(); |
904 | } |
905 | |
906 | if (should_try_gc) { |
907 | bool succeeded; |
908 | result = do_collection_pause(word_size, gc_count_before, &succeeded, |
909 | GCCause::_g1_humongous_allocation); |
910 | if (result != NULL) { |
911 | assert(succeeded, "only way to get back a non-NULL result" ); |
912 | log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT, |
913 | Thread::current()->name(), p2i(result)); |
914 | return result; |
915 | } |
916 | |
917 | if (succeeded) { |
918 | // We successfully scheduled a collection which failed to allocate. No |
919 | // point in trying to allocate further. We'll just return NULL. |
920 | log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate " |
921 | SIZE_FORMAT " words" , Thread::current()->name(), word_size); |
922 | return NULL; |
923 | } |
924 | log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "" , |
925 | Thread::current()->name(), word_size); |
926 | } else { |
927 | // Failed to schedule a collection. |
928 | if (gclocker_retry_count > GCLockerRetryAllocationCount) { |
929 | log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating " |
930 | SIZE_FORMAT " words" , Thread::current()->name(), word_size); |
931 | return NULL; |
932 | } |
933 | log_trace(gc, alloc)("%s: Stall until clear" , Thread::current()->name()); |
934 | // The GCLocker is either active or the GCLocker initiated |
935 | // GC has not yet been performed. Stall until it is and |
936 | // then retry the allocation. |
937 | GCLocker::stall_until_clear(); |
938 | gclocker_retry_count += 1; |
939 | } |
940 | |
941 | |
942 | // We can reach here if we were unsuccessful in scheduling a |
943 | // collection (because another thread beat us to it) or if we were |
944 | // stalled due to the GC locker. In either can we should retry the |
945 | // allocation attempt in case another thread successfully |
946 | // performed a collection and reclaimed enough space. |
947 | // Humongous object allocation always needs a lock, so we wait for the retry |
948 | // in the next iteration of the loop, unlike for the regular iteration case. |
949 | // Give a warning if we seem to be looping forever. |
950 | |
951 | if ((QueuedAllocationWarningCount > 0) && |
952 | (try_count % QueuedAllocationWarningCount == 0)) { |
953 | log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words" , |
954 | Thread::current()->name(), try_count, word_size); |
955 | } |
956 | } |
957 | |
958 | ShouldNotReachHere(); |
959 | return NULL; |
960 | } |
961 | |
962 | HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, |
963 | bool expect_null_mutator_alloc_region) { |
964 | assert_at_safepoint_on_vm_thread(); |
965 | assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region, |
966 | "the current alloc region was unexpectedly found to be non-NULL" ); |
967 | |
968 | if (!is_humongous(word_size)) { |
969 | return _allocator->attempt_allocation_locked(word_size); |
970 | } else { |
971 | HeapWord* result = humongous_obj_allocate(word_size); |
972 | if (result != NULL && policy()->need_to_start_conc_mark("STW humongous allocation" )) { |
973 | collector_state()->set_initiate_conc_mark_if_possible(true); |
974 | } |
975 | return result; |
976 | } |
977 | |
978 | ShouldNotReachHere(); |
979 | } |
980 | |
981 | class PostCompactionPrinterClosure: public HeapRegionClosure { |
982 | private: |
983 | G1HRPrinter* _hr_printer; |
984 | public: |
985 | bool do_heap_region(HeapRegion* hr) { |
986 | assert(!hr->is_young(), "not expecting to find young regions" ); |
987 | _hr_printer->post_compaction(hr); |
988 | return false; |
989 | } |
990 | |
991 | PostCompactionPrinterClosure(G1HRPrinter* hr_printer) |
992 | : _hr_printer(hr_printer) { } |
993 | }; |
994 | |
995 | void G1CollectedHeap::print_hrm_post_compaction() { |
996 | if (_hr_printer.is_active()) { |
997 | PostCompactionPrinterClosure cl(hr_printer()); |
998 | heap_region_iterate(&cl); |
999 | } |
1000 | } |
1001 | |
1002 | void G1CollectedHeap::abort_concurrent_cycle() { |
1003 | // If we start the compaction before the CM threads finish |
1004 | // scanning the root regions we might trip them over as we'll |
1005 | // be moving objects / updating references. So let's wait until |
1006 | // they are done. By telling them to abort, they should complete |
1007 | // early. |
1008 | _cm->root_regions()->abort(); |
1009 | _cm->root_regions()->wait_until_scan_finished(); |
1010 | |
1011 | // Disable discovery and empty the discovered lists |
1012 | // for the CM ref processor. |
1013 | _ref_processor_cm->disable_discovery(); |
1014 | _ref_processor_cm->abandon_partial_discovery(); |
1015 | _ref_processor_cm->verify_no_references_recorded(); |
1016 | |
1017 | // Abandon current iterations of concurrent marking and concurrent |
1018 | // refinement, if any are in progress. |
1019 | concurrent_mark()->concurrent_cycle_abort(); |
1020 | } |
1021 | |
1022 | void G1CollectedHeap::prepare_heap_for_full_collection() { |
1023 | // Make sure we'll choose a new allocation region afterwards. |
1024 | _allocator->release_mutator_alloc_region(); |
1025 | _allocator->abandon_gc_alloc_regions(); |
1026 | |
1027 | // We may have added regions to the current incremental collection |
1028 | // set between the last GC or pause and now. We need to clear the |
1029 | // incremental collection set and then start rebuilding it afresh |
1030 | // after this full GC. |
1031 | abandon_collection_set(collection_set()); |
1032 | |
1033 | tear_down_region_sets(false /* free_list_only */); |
1034 | |
1035 | hrm()->prepare_for_full_collection_start(); |
1036 | } |
1037 | |
1038 | void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) { |
1039 | assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant" ); |
1040 | assert_used_and_recalculate_used_equal(this); |
1041 | _verifier->verify_region_sets_optional(); |
1042 | _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull); |
1043 | _verifier->check_bitmaps("Full GC Start" ); |
1044 | } |
1045 | |
1046 | void G1CollectedHeap::prepare_heap_for_mutators() { |
1047 | hrm()->prepare_for_full_collection_end(); |
1048 | |
1049 | // Delete metaspaces for unloaded class loaders and clean up loader_data graph |
1050 | ClassLoaderDataGraph::purge(); |
1051 | MetaspaceUtils::verify_metrics(); |
1052 | |
1053 | // Prepare heap for normal collections. |
1054 | assert(num_free_regions() == 0, "we should not have added any free regions" ); |
1055 | rebuild_region_sets(false /* free_list_only */); |
1056 | abort_refinement(); |
1057 | resize_heap_if_necessary(); |
1058 | |
1059 | // Rebuild the strong code root lists for each region |
1060 | rebuild_strong_code_roots(); |
1061 | |
1062 | // Purge code root memory |
1063 | purge_code_root_memory(); |
1064 | |
1065 | // Start a new incremental collection set for the next pause |
1066 | start_new_collection_set(); |
1067 | |
1068 | _allocator->init_mutator_alloc_region(); |
1069 | |
1070 | // Post collection state updates. |
1071 | MetaspaceGC::compute_new_size(); |
1072 | } |
1073 | |
1074 | void G1CollectedHeap::abort_refinement() { |
1075 | if (_hot_card_cache->use_cache()) { |
1076 | _hot_card_cache->reset_hot_cache(); |
1077 | } |
1078 | |
1079 | // Discard all remembered set updates. |
1080 | G1BarrierSet::dirty_card_queue_set().abandon_logs(); |
1081 | assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty" ); |
1082 | } |
1083 | |
1084 | void G1CollectedHeap::verify_after_full_collection() { |
1085 | _hrm->verify_optional(); |
1086 | _verifier->verify_region_sets_optional(); |
1087 | _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull); |
1088 | // Clear the previous marking bitmap, if needed for bitmap verification. |
1089 | // Note we cannot do this when we clear the next marking bitmap in |
1090 | // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the |
1091 | // objects marked during a full GC against the previous bitmap. |
1092 | // But we need to clear it before calling check_bitmaps below since |
1093 | // the full GC has compacted objects and updated TAMS but not updated |
1094 | // the prev bitmap. |
1095 | if (G1VerifyBitmaps) { |
1096 | GCTraceTime(Debug, gc) tm("Clear Prev Bitmap for Verification" ); |
1097 | _cm->clear_prev_bitmap(workers()); |
1098 | } |
1099 | // This call implicitly verifies that the next bitmap is clear after Full GC. |
1100 | _verifier->check_bitmaps("Full GC End" ); |
1101 | |
1102 | // At this point there should be no regions in the |
1103 | // entire heap tagged as young. |
1104 | assert(check_young_list_empty(), "young list should be empty at this point" ); |
1105 | |
1106 | // Note: since we've just done a full GC, concurrent |
1107 | // marking is no longer active. Therefore we need not |
1108 | // re-enable reference discovery for the CM ref processor. |
1109 | // That will be done at the start of the next marking cycle. |
1110 | // We also know that the STW processor should no longer |
1111 | // discover any new references. |
1112 | assert(!_ref_processor_stw->discovery_enabled(), "Postcondition" ); |
1113 | assert(!_ref_processor_cm->discovery_enabled(), "Postcondition" ); |
1114 | _ref_processor_stw->verify_no_references_recorded(); |
1115 | _ref_processor_cm->verify_no_references_recorded(); |
1116 | } |
1117 | |
1118 | void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) { |
1119 | // Post collection logging. |
1120 | // We should do this after we potentially resize the heap so |
1121 | // that all the COMMIT / UNCOMMIT events are generated before |
1122 | // the compaction events. |
1123 | print_hrm_post_compaction(); |
1124 | heap_transition->print(); |
1125 | print_heap_after_gc(); |
1126 | print_heap_regions(); |
1127 | #ifdef TRACESPINNING |
1128 | ParallelTaskTerminator::print_termination_counts(); |
1129 | #endif |
1130 | } |
1131 | |
1132 | bool G1CollectedHeap::do_full_collection(bool explicit_gc, |
1133 | bool clear_all_soft_refs) { |
1134 | assert_at_safepoint_on_vm_thread(); |
1135 | |
1136 | if (GCLocker::check_active_before_gc()) { |
1137 | // Full GC was not completed. |
1138 | return false; |
1139 | } |
1140 | |
1141 | const bool do_clear_all_soft_refs = clear_all_soft_refs || |
1142 | soft_ref_policy()->should_clear_all_soft_refs(); |
1143 | |
1144 | G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs); |
1145 | GCTraceTime(Info, gc) tm("Pause Full" , NULL, gc_cause(), true); |
1146 | |
1147 | collector.prepare_collection(); |
1148 | collector.collect(); |
1149 | collector.complete_collection(); |
1150 | |
1151 | // Full collection was successfully completed. |
1152 | return true; |
1153 | } |
1154 | |
1155 | void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { |
1156 | // Currently, there is no facility in the do_full_collection(bool) API to notify |
1157 | // the caller that the collection did not succeed (e.g., because it was locked |
1158 | // out by the GC locker). So, right now, we'll ignore the return value. |
1159 | bool dummy = do_full_collection(true, /* explicit_gc */ |
1160 | clear_all_soft_refs); |
1161 | } |
1162 | |
1163 | void G1CollectedHeap::resize_heap_if_necessary() { |
1164 | assert_at_safepoint_on_vm_thread(); |
1165 | |
1166 | // Capacity, free and used after the GC counted as full regions to |
1167 | // include the waste in the following calculations. |
1168 | const size_t capacity_after_gc = capacity(); |
1169 | const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes(); |
1170 | |
1171 | // This is enforced in arguments.cpp. |
1172 | assert(MinHeapFreeRatio <= MaxHeapFreeRatio, |
1173 | "otherwise the code below doesn't make sense" ); |
1174 | |
1175 | // We don't have floating point command-line arguments |
1176 | const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; |
1177 | const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
1178 | const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; |
1179 | const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
1180 | |
1181 | // We have to be careful here as these two calculations can overflow |
1182 | // 32-bit size_t's. |
1183 | double used_after_gc_d = (double) used_after_gc; |
1184 | double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; |
1185 | double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; |
1186 | |
1187 | // Let's make sure that they are both under the max heap size, which |
1188 | // by default will make them fit into a size_t. |
1189 | double desired_capacity_upper_bound = (double) MaxHeapSize; |
1190 | minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, |
1191 | desired_capacity_upper_bound); |
1192 | maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, |
1193 | desired_capacity_upper_bound); |
1194 | |
1195 | // We can now safely turn them into size_t's. |
1196 | size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; |
1197 | size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; |
1198 | |
1199 | // This assert only makes sense here, before we adjust them |
1200 | // with respect to the min and max heap size. |
1201 | assert(minimum_desired_capacity <= maximum_desired_capacity, |
1202 | "minimum_desired_capacity = " SIZE_FORMAT ", " |
1203 | "maximum_desired_capacity = " SIZE_FORMAT, |
1204 | minimum_desired_capacity, maximum_desired_capacity); |
1205 | |
1206 | // Should not be greater than the heap max size. No need to adjust |
1207 | // it with respect to the heap min size as it's a lower bound (i.e., |
1208 | // we'll try to make the capacity larger than it, not smaller). |
1209 | minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize); |
1210 | // Should not be less than the heap min size. No need to adjust it |
1211 | // with respect to the heap max size as it's an upper bound (i.e., |
1212 | // we'll try to make the capacity smaller than it, not greater). |
1213 | maximum_desired_capacity = MAX2(maximum_desired_capacity, MinHeapSize); |
1214 | |
1215 | if (capacity_after_gc < minimum_desired_capacity) { |
1216 | // Don't expand unless it's significant |
1217 | size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; |
1218 | |
1219 | log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity). " |
1220 | "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B " |
1221 | "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)" , |
1222 | capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio); |
1223 | |
1224 | expand(expand_bytes, _workers); |
1225 | |
1226 | // No expansion, now see if we want to shrink |
1227 | } else if (capacity_after_gc > maximum_desired_capacity) { |
1228 | // Capacity too large, compute shrinking size |
1229 | size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; |
1230 | |
1231 | log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity). " |
1232 | "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B " |
1233 | "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)" , |
1234 | capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio); |
1235 | |
1236 | shrink(shrink_bytes); |
1237 | } |
1238 | } |
1239 | |
1240 | HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size, |
1241 | bool do_gc, |
1242 | bool clear_all_soft_refs, |
1243 | bool expect_null_mutator_alloc_region, |
1244 | bool* gc_succeeded) { |
1245 | *gc_succeeded = true; |
1246 | // Let's attempt the allocation first. |
1247 | HeapWord* result = |
1248 | attempt_allocation_at_safepoint(word_size, |
1249 | expect_null_mutator_alloc_region); |
1250 | if (result != NULL) { |
1251 | return result; |
1252 | } |
1253 | |
1254 | // In a G1 heap, we're supposed to keep allocation from failing by |
1255 | // incremental pauses. Therefore, at least for now, we'll favor |
1256 | // expansion over collection. (This might change in the future if we can |
1257 | // do something smarter than full collection to satisfy a failed alloc.) |
1258 | result = expand_and_allocate(word_size); |
1259 | if (result != NULL) { |
1260 | return result; |
1261 | } |
1262 | |
1263 | if (do_gc) { |
1264 | // Expansion didn't work, we'll try to do a Full GC. |
1265 | *gc_succeeded = do_full_collection(false, /* explicit_gc */ |
1266 | clear_all_soft_refs); |
1267 | } |
1268 | |
1269 | return NULL; |
1270 | } |
1271 | |
1272 | HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size, |
1273 | bool* succeeded) { |
1274 | assert_at_safepoint_on_vm_thread(); |
1275 | |
1276 | // Attempts to allocate followed by Full GC. |
1277 | HeapWord* result = |
1278 | satisfy_failed_allocation_helper(word_size, |
1279 | true, /* do_gc */ |
1280 | false, /* clear_all_soft_refs */ |
1281 | false, /* expect_null_mutator_alloc_region */ |
1282 | succeeded); |
1283 | |
1284 | if (result != NULL || !*succeeded) { |
1285 | return result; |
1286 | } |
1287 | |
1288 | // Attempts to allocate followed by Full GC that will collect all soft references. |
1289 | result = satisfy_failed_allocation_helper(word_size, |
1290 | true, /* do_gc */ |
1291 | true, /* clear_all_soft_refs */ |
1292 | true, /* expect_null_mutator_alloc_region */ |
1293 | succeeded); |
1294 | |
1295 | if (result != NULL || !*succeeded) { |
1296 | return result; |
1297 | } |
1298 | |
1299 | // Attempts to allocate, no GC |
1300 | result = satisfy_failed_allocation_helper(word_size, |
1301 | false, /* do_gc */ |
1302 | false, /* clear_all_soft_refs */ |
1303 | true, /* expect_null_mutator_alloc_region */ |
1304 | succeeded); |
1305 | |
1306 | if (result != NULL) { |
1307 | return result; |
1308 | } |
1309 | |
1310 | assert(!soft_ref_policy()->should_clear_all_soft_refs(), |
1311 | "Flag should have been handled and cleared prior to this point" ); |
1312 | |
1313 | // What else? We might try synchronous finalization later. If the total |
1314 | // space available is large enough for the allocation, then a more |
1315 | // complete compaction phase than we've tried so far might be |
1316 | // appropriate. |
1317 | return NULL; |
1318 | } |
1319 | |
1320 | // Attempting to expand the heap sufficiently |
1321 | // to support an allocation of the given "word_size". If |
1322 | // successful, perform the allocation and return the address of the |
1323 | // allocated block, or else "NULL". |
1324 | |
1325 | HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { |
1326 | assert_at_safepoint_on_vm_thread(); |
1327 | |
1328 | _verifier->verify_region_sets_optional(); |
1329 | |
1330 | size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); |
1331 | log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B" , |
1332 | word_size * HeapWordSize); |
1333 | |
1334 | |
1335 | if (expand(expand_bytes, _workers)) { |
1336 | _hrm->verify_optional(); |
1337 | _verifier->verify_region_sets_optional(); |
1338 | return attempt_allocation_at_safepoint(word_size, |
1339 | false /* expect_null_mutator_alloc_region */); |
1340 | } |
1341 | return NULL; |
1342 | } |
1343 | |
1344 | bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) { |
1345 | size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); |
1346 | aligned_expand_bytes = align_up(aligned_expand_bytes, |
1347 | HeapRegion::GrainBytes); |
1348 | |
1349 | log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B" , |
1350 | expand_bytes, aligned_expand_bytes); |
1351 | |
1352 | if (is_maximal_no_gc()) { |
1353 | log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)" ); |
1354 | return false; |
1355 | } |
1356 | |
1357 | double expand_heap_start_time_sec = os::elapsedTime(); |
1358 | uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes); |
1359 | assert(regions_to_expand > 0, "Must expand by at least one region" ); |
1360 | |
1361 | uint expanded_by = _hrm->expand_by(regions_to_expand, pretouch_workers); |
1362 | if (expand_time_ms != NULL) { |
1363 | *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS; |
1364 | } |
1365 | |
1366 | if (expanded_by > 0) { |
1367 | size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes; |
1368 | assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition" ); |
1369 | policy()->record_new_heap_size(num_regions()); |
1370 | } else { |
1371 | log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)" ); |
1372 | |
1373 | // The expansion of the virtual storage space was unsuccessful. |
1374 | // Let's see if it was because we ran out of swap. |
1375 | if (G1ExitOnExpansionFailure && |
1376 | _hrm->available() >= regions_to_expand) { |
1377 | // We had head room... |
1378 | vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion" ); |
1379 | } |
1380 | } |
1381 | return regions_to_expand > 0; |
1382 | } |
1383 | |
1384 | void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { |
1385 | size_t aligned_shrink_bytes = |
1386 | ReservedSpace::page_align_size_down(shrink_bytes); |
1387 | aligned_shrink_bytes = align_down(aligned_shrink_bytes, |
1388 | HeapRegion::GrainBytes); |
1389 | uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); |
1390 | |
1391 | uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove); |
1392 | size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; |
1393 | |
1394 | |
1395 | log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B" , |
1396 | shrink_bytes, aligned_shrink_bytes, shrunk_bytes); |
1397 | if (num_regions_removed > 0) { |
1398 | policy()->record_new_heap_size(num_regions()); |
1399 | } else { |
1400 | log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)" ); |
1401 | } |
1402 | } |
1403 | |
1404 | void G1CollectedHeap::shrink(size_t shrink_bytes) { |
1405 | _verifier->verify_region_sets_optional(); |
1406 | |
1407 | // We should only reach here at the end of a Full GC or during Remark which |
1408 | // means we should not not be holding to any GC alloc regions. The method |
1409 | // below will make sure of that and do any remaining clean up. |
1410 | _allocator->abandon_gc_alloc_regions(); |
1411 | |
1412 | // Instead of tearing down / rebuilding the free lists here, we |
1413 | // could instead use the remove_all_pending() method on free_list to |
1414 | // remove only the ones that we need to remove. |
1415 | tear_down_region_sets(true /* free_list_only */); |
1416 | shrink_helper(shrink_bytes); |
1417 | rebuild_region_sets(true /* free_list_only */); |
1418 | |
1419 | _hrm->verify_optional(); |
1420 | _verifier->verify_region_sets_optional(); |
1421 | } |
1422 | |
1423 | class OldRegionSetChecker : public HeapRegionSetChecker { |
1424 | public: |
1425 | void check_mt_safety() { |
1426 | // Master Old Set MT safety protocol: |
1427 | // (a) If we're at a safepoint, operations on the master old set |
1428 | // should be invoked: |
1429 | // - by the VM thread (which will serialize them), or |
1430 | // - by the GC workers while holding the FreeList_lock, if we're |
1431 | // at a safepoint for an evacuation pause (this lock is taken |
1432 | // anyway when an GC alloc region is retired so that a new one |
1433 | // is allocated from the free list), or |
1434 | // - by the GC workers while holding the OldSets_lock, if we're at a |
1435 | // safepoint for a cleanup pause. |
1436 | // (b) If we're not at a safepoint, operations on the master old set |
1437 | // should be invoked while holding the Heap_lock. |
1438 | |
1439 | if (SafepointSynchronize::is_at_safepoint()) { |
1440 | guarantee(Thread::current()->is_VM_thread() || |
1441 | FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(), |
1442 | "master old set MT safety protocol at a safepoint" ); |
1443 | } else { |
1444 | guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint" ); |
1445 | } |
1446 | } |
1447 | bool is_correct_type(HeapRegion* hr) { return hr->is_old(); } |
1448 | const char* get_description() { return "Old Regions" ; } |
1449 | }; |
1450 | |
1451 | class ArchiveRegionSetChecker : public HeapRegionSetChecker { |
1452 | public: |
1453 | void check_mt_safety() { |
1454 | guarantee(!Universe::is_fully_initialized() || SafepointSynchronize::is_at_safepoint(), |
1455 | "May only change archive regions during initialization or safepoint." ); |
1456 | } |
1457 | bool is_correct_type(HeapRegion* hr) { return hr->is_archive(); } |
1458 | const char* get_description() { return "Archive Regions" ; } |
1459 | }; |
1460 | |
1461 | class HumongousRegionSetChecker : public HeapRegionSetChecker { |
1462 | public: |
1463 | void check_mt_safety() { |
1464 | // Humongous Set MT safety protocol: |
1465 | // (a) If we're at a safepoint, operations on the master humongous |
1466 | // set should be invoked by either the VM thread (which will |
1467 | // serialize them) or by the GC workers while holding the |
1468 | // OldSets_lock. |
1469 | // (b) If we're not at a safepoint, operations on the master |
1470 | // humongous set should be invoked while holding the Heap_lock. |
1471 | |
1472 | if (SafepointSynchronize::is_at_safepoint()) { |
1473 | guarantee(Thread::current()->is_VM_thread() || |
1474 | OldSets_lock->owned_by_self(), |
1475 | "master humongous set MT safety protocol at a safepoint" ); |
1476 | } else { |
1477 | guarantee(Heap_lock->owned_by_self(), |
1478 | "master humongous set MT safety protocol outside a safepoint" ); |
1479 | } |
1480 | } |
1481 | bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); } |
1482 | const char* get_description() { return "Humongous Regions" ; } |
1483 | }; |
1484 | |
1485 | G1CollectedHeap::G1CollectedHeap() : |
1486 | CollectedHeap(), |
1487 | _young_gen_sampling_thread(NULL), |
1488 | _workers(NULL), |
1489 | _card_table(NULL), |
1490 | _soft_ref_policy(), |
1491 | _old_set("Old Region Set" , new OldRegionSetChecker()), |
1492 | _archive_set("Archive Region Set" , new ArchiveRegionSetChecker()), |
1493 | _humongous_set("Humongous Region Set" , new HumongousRegionSetChecker()), |
1494 | _bot(NULL), |
1495 | _listener(), |
1496 | _hrm(NULL), |
1497 | _allocator(NULL), |
1498 | _verifier(NULL), |
1499 | _summary_bytes_used(0), |
1500 | _archive_allocator(NULL), |
1501 | _survivor_evac_stats("Young" , YoungPLABSize, PLABWeight), |
1502 | _old_evac_stats("Old" , OldPLABSize, PLABWeight), |
1503 | _expand_heap_after_alloc_failure(true), |
1504 | _g1mm(NULL), |
1505 | _humongous_reclaim_candidates(), |
1506 | _has_humongous_reclaim_candidates(false), |
1507 | _hr_printer(), |
1508 | _collector_state(), |
1509 | _old_marking_cycles_started(0), |
1510 | _old_marking_cycles_completed(0), |
1511 | _eden(), |
1512 | _survivor(), |
1513 | _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), |
1514 | _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()), |
1515 | _policy(G1Policy::create_policy(_gc_timer_stw)), |
1516 | _heap_sizing_policy(NULL), |
1517 | _collection_set(this, _policy), |
1518 | _hot_card_cache(NULL), |
1519 | _rem_set(NULL), |
1520 | _dirty_card_queue_set(false), |
1521 | _cm(NULL), |
1522 | _cm_thread(NULL), |
1523 | _cr(NULL), |
1524 | _task_queues(NULL), |
1525 | _evacuation_failed(false), |
1526 | _evacuation_failed_info_array(NULL), |
1527 | _preserved_marks_set(true /* in_c_heap */), |
1528 | #ifndef PRODUCT |
1529 | _evacuation_failure_alot_for_current_gc(false), |
1530 | _evacuation_failure_alot_gc_number(0), |
1531 | _evacuation_failure_alot_count(0), |
1532 | #endif |
1533 | _ref_processor_stw(NULL), |
1534 | _is_alive_closure_stw(this), |
1535 | _is_subject_to_discovery_stw(this), |
1536 | _ref_processor_cm(NULL), |
1537 | _is_alive_closure_cm(this), |
1538 | _is_subject_to_discovery_cm(this), |
1539 | _region_attr() { |
1540 | |
1541 | _verifier = new G1HeapVerifier(this); |
1542 | |
1543 | _allocator = new G1Allocator(this); |
1544 | |
1545 | _heap_sizing_policy = G1HeapSizingPolicy::create(this, _policy->analytics()); |
1546 | |
1547 | _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords); |
1548 | |
1549 | // Override the default _filler_array_max_size so that no humongous filler |
1550 | // objects are created. |
1551 | _filler_array_max_size = _humongous_object_threshold_in_words; |
1552 | |
1553 | uint n_queues = ParallelGCThreads; |
1554 | _task_queues = new RefToScanQueueSet(n_queues); |
1555 | |
1556 | _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC); |
1557 | |
1558 | for (uint i = 0; i < n_queues; i++) { |
1559 | RefToScanQueue* q = new RefToScanQueue(); |
1560 | q->initialize(); |
1561 | _task_queues->register_queue(i, q); |
1562 | ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo(); |
1563 | } |
1564 | |
1565 | // Initialize the G1EvacuationFailureALot counters and flags. |
1566 | NOT_PRODUCT(reset_evacuation_should_fail();) |
1567 | |
1568 | guarantee(_task_queues != NULL, "task_queues allocation failure." ); |
1569 | } |
1570 | |
1571 | static size_t actual_reserved_page_size(ReservedSpace rs) { |
1572 | size_t page_size = os::vm_page_size(); |
1573 | if (UseLargePages) { |
1574 | // There are two ways to manage large page memory. |
1575 | // 1. OS supports committing large page memory. |
1576 | // 2. OS doesn't support committing large page memory so ReservedSpace manages it. |
1577 | // And ReservedSpace calls it 'special'. If we failed to set 'special', |
1578 | // we reserved memory without large page. |
1579 | if (os::can_commit_large_page_memory() || rs.special()) { |
1580 | // An alignment at ReservedSpace comes from preferred page size or |
1581 | // heap alignment, and if the alignment came from heap alignment, it could be |
1582 | // larger than large pages size. So need to cap with the large page size. |
1583 | page_size = MIN2(rs.alignment(), os::large_page_size()); |
1584 | } |
1585 | } |
1586 | |
1587 | return page_size; |
1588 | } |
1589 | |
1590 | G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description, |
1591 | size_t size, |
1592 | size_t translation_factor) { |
1593 | size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1); |
1594 | // Allocate a new reserved space, preferring to use large pages. |
1595 | ReservedSpace rs(size, preferred_page_size); |
1596 | size_t page_size = actual_reserved_page_size(rs); |
1597 | G1RegionToSpaceMapper* result = |
1598 | G1RegionToSpaceMapper::create_mapper(rs, |
1599 | size, |
1600 | page_size, |
1601 | HeapRegion::GrainBytes, |
1602 | translation_factor, |
1603 | mtGC); |
1604 | |
1605 | os::trace_page_sizes_for_requested_size(description, |
1606 | size, |
1607 | preferred_page_size, |
1608 | page_size, |
1609 | rs.base(), |
1610 | rs.size()); |
1611 | |
1612 | return result; |
1613 | } |
1614 | |
1615 | jint G1CollectedHeap::initialize_concurrent_refinement() { |
1616 | jint ecode = JNI_OK; |
1617 | _cr = G1ConcurrentRefine::create(&ecode); |
1618 | return ecode; |
1619 | } |
1620 | |
1621 | jint G1CollectedHeap::initialize_young_gen_sampling_thread() { |
1622 | _young_gen_sampling_thread = new G1YoungRemSetSamplingThread(); |
1623 | if (_young_gen_sampling_thread->osthread() == NULL) { |
1624 | vm_shutdown_during_initialization("Could not create G1YoungRemSetSamplingThread" ); |
1625 | return JNI_ENOMEM; |
1626 | } |
1627 | return JNI_OK; |
1628 | } |
1629 | |
1630 | jint G1CollectedHeap::initialize() { |
1631 | os::enable_vtime(); |
1632 | |
1633 | // Necessary to satisfy locking discipline assertions. |
1634 | |
1635 | MutexLocker x(Heap_lock); |
1636 | |
1637 | // While there are no constraints in the GC code that HeapWordSize |
1638 | // be any particular value, there are multiple other areas in the |
1639 | // system which believe this to be true (e.g. oop->object_size in some |
1640 | // cases incorrectly returns the size in wordSize units rather than |
1641 | // HeapWordSize). |
1642 | guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize" ); |
1643 | |
1644 | size_t init_byte_size = InitialHeapSize; |
1645 | size_t reserved_byte_size = G1Arguments::heap_reserved_size_bytes(); |
1646 | |
1647 | // Ensure that the sizes are properly aligned. |
1648 | Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap" ); |
1649 | Universe::check_alignment(reserved_byte_size, HeapRegion::GrainBytes, "g1 heap" ); |
1650 | Universe::check_alignment(reserved_byte_size, HeapAlignment, "g1 heap" ); |
1651 | |
1652 | // Reserve the maximum. |
1653 | |
1654 | // When compressed oops are enabled, the preferred heap base |
1655 | // is calculated by subtracting the requested size from the |
1656 | // 32Gb boundary and using the result as the base address for |
1657 | // heap reservation. If the requested size is not aligned to |
1658 | // HeapRegion::GrainBytes (i.e. the alignment that is passed |
1659 | // into the ReservedHeapSpace constructor) then the actual |
1660 | // base of the reserved heap may end up differing from the |
1661 | // address that was requested (i.e. the preferred heap base). |
1662 | // If this happens then we could end up using a non-optimal |
1663 | // compressed oops mode. |
1664 | |
1665 | ReservedSpace heap_rs = Universe::reserve_heap(reserved_byte_size, |
1666 | HeapAlignment); |
1667 | |
1668 | initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); |
1669 | |
1670 | // Create the barrier set for the entire reserved region. |
1671 | G1CardTable* ct = new G1CardTable(reserved_region()); |
1672 | ct->initialize(); |
1673 | G1BarrierSet* bs = new G1BarrierSet(ct); |
1674 | bs->initialize(); |
1675 | assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity" ); |
1676 | BarrierSet::set_barrier_set(bs); |
1677 | _card_table = ct; |
1678 | |
1679 | G1BarrierSet::satb_mark_queue_set().initialize(this, |
1680 | SATB_Q_CBL_mon, |
1681 | &bs->satb_mark_queue_buffer_allocator(), |
1682 | G1SATBProcessCompletedThreshold, |
1683 | G1SATBBufferEnqueueingThresholdPercent); |
1684 | |
1685 | // process_completed_buffers_threshold and max_completed_buffers are updated |
1686 | // later, based on the concurrent refinement object. |
1687 | G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
1688 | &bs->dirty_card_queue_buffer_allocator(), |
1689 | true); // init_free_ids |
1690 | |
1691 | dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
1692 | &bs->dirty_card_queue_buffer_allocator()); |
1693 | |
1694 | // Create the hot card cache. |
1695 | _hot_card_cache = new G1HotCardCache(this); |
1696 | |
1697 | // Carve out the G1 part of the heap. |
1698 | ReservedSpace g1_rs = heap_rs.first_part(reserved_byte_size); |
1699 | size_t page_size = actual_reserved_page_size(heap_rs); |
1700 | G1RegionToSpaceMapper* heap_storage = |
1701 | G1RegionToSpaceMapper::create_heap_mapper(g1_rs, |
1702 | g1_rs.size(), |
1703 | page_size, |
1704 | HeapRegion::GrainBytes, |
1705 | 1, |
1706 | mtJavaHeap); |
1707 | if(heap_storage == NULL) { |
1708 | vm_shutdown_during_initialization("Could not initialize G1 heap" ); |
1709 | return JNI_ERR; |
1710 | } |
1711 | |
1712 | os::trace_page_sizes("Heap" , |
1713 | MinHeapSize, |
1714 | reserved_byte_size, |
1715 | page_size, |
1716 | heap_rs.base(), |
1717 | heap_rs.size()); |
1718 | heap_storage->set_mapping_changed_listener(&_listener); |
1719 | |
1720 | // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps. |
1721 | G1RegionToSpaceMapper* bot_storage = |
1722 | create_aux_memory_mapper("Block Offset Table" , |
1723 | G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize), |
1724 | G1BlockOffsetTable::heap_map_factor()); |
1725 | |
1726 | G1RegionToSpaceMapper* cardtable_storage = |
1727 | create_aux_memory_mapper("Card Table" , |
1728 | G1CardTable::compute_size(g1_rs.size() / HeapWordSize), |
1729 | G1CardTable::heap_map_factor()); |
1730 | |
1731 | G1RegionToSpaceMapper* card_counts_storage = |
1732 | create_aux_memory_mapper("Card Counts Table" , |
1733 | G1CardCounts::compute_size(g1_rs.size() / HeapWordSize), |
1734 | G1CardCounts::heap_map_factor()); |
1735 | |
1736 | size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size()); |
1737 | G1RegionToSpaceMapper* prev_bitmap_storage = |
1738 | create_aux_memory_mapper("Prev Bitmap" , bitmap_size, G1CMBitMap::heap_map_factor()); |
1739 | G1RegionToSpaceMapper* next_bitmap_storage = |
1740 | create_aux_memory_mapper("Next Bitmap" , bitmap_size, G1CMBitMap::heap_map_factor()); |
1741 | |
1742 | _hrm = HeapRegionManager::create_manager(this); |
1743 | |
1744 | _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); |
1745 | _card_table->initialize(cardtable_storage); |
1746 | // Do later initialization work for concurrent refinement. |
1747 | _hot_card_cache->initialize(card_counts_storage); |
1748 | |
1749 | // 6843694 - ensure that the maximum region index can fit |
1750 | // in the remembered set structures. |
1751 | const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
1752 | guarantee((max_regions() - 1) <= max_region_idx, "too many regions" ); |
1753 | |
1754 | // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not |
1755 | // start within the first card. |
1756 | guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card." ); |
1757 | // Also create a G1 rem set. |
1758 | _rem_set = new G1RemSet(this, _card_table, _hot_card_cache); |
1759 | _rem_set->initialize(max_reserved_capacity(), max_regions()); |
1760 | |
1761 | size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
1762 | guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized" ); |
1763 | guarantee(HeapRegion::CardsPerRegion < max_cards_per_region, |
1764 | "too many cards per region" ); |
1765 | |
1766 | FreeRegionList::set_unrealistically_long_length(max_expandable_regions() + 1); |
1767 | |
1768 | _bot = new G1BlockOffsetTable(reserved_region(), bot_storage); |
1769 | |
1770 | { |
1771 | HeapWord* start = _hrm->reserved().start(); |
1772 | HeapWord* end = _hrm->reserved().end(); |
1773 | size_t granularity = HeapRegion::GrainBytes; |
1774 | |
1775 | _region_attr.initialize(start, end, granularity); |
1776 | _humongous_reclaim_candidates.initialize(start, end, granularity); |
1777 | } |
1778 | |
1779 | _workers = new WorkGang("GC Thread" , ParallelGCThreads, |
1780 | true /* are_GC_task_threads */, |
1781 | false /* are_ConcurrentGC_threads */); |
1782 | if (_workers == NULL) { |
1783 | return JNI_ENOMEM; |
1784 | } |
1785 | _workers->initialize_workers(); |
1786 | |
1787 | // Create the G1ConcurrentMark data structure and thread. |
1788 | // (Must do this late, so that "max_regions" is defined.) |
1789 | _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage); |
1790 | if (_cm == NULL || !_cm->completed_initialization()) { |
1791 | vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark" ); |
1792 | return JNI_ENOMEM; |
1793 | } |
1794 | _cm_thread = _cm->cm_thread(); |
1795 | |
1796 | // Now expand into the initial heap size. |
1797 | if (!expand(init_byte_size, _workers)) { |
1798 | vm_shutdown_during_initialization("Failed to allocate initial heap." ); |
1799 | return JNI_ENOMEM; |
1800 | } |
1801 | |
1802 | // Perform any initialization actions delegated to the policy. |
1803 | policy()->init(this, &_collection_set); |
1804 | |
1805 | jint ecode = initialize_concurrent_refinement(); |
1806 | if (ecode != JNI_OK) { |
1807 | return ecode; |
1808 | } |
1809 | |
1810 | ecode = initialize_young_gen_sampling_thread(); |
1811 | if (ecode != JNI_OK) { |
1812 | return ecode; |
1813 | } |
1814 | |
1815 | { |
1816 | G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); |
1817 | dcqs.set_process_completed_buffers_threshold(concurrent_refine()->yellow_zone()); |
1818 | dcqs.set_max_completed_buffers(concurrent_refine()->red_zone()); |
1819 | } |
1820 | |
1821 | // Here we allocate the dummy HeapRegion that is required by the |
1822 | // G1AllocRegion class. |
1823 | HeapRegion* dummy_region = _hrm->get_dummy_region(); |
1824 | |
1825 | // We'll re-use the same region whether the alloc region will |
1826 | // require BOT updates or not and, if it doesn't, then a non-young |
1827 | // region will complain that it cannot support allocations without |
1828 | // BOT updates. So we'll tag the dummy region as eden to avoid that. |
1829 | dummy_region->set_eden(); |
1830 | // Make sure it's full. |
1831 | dummy_region->set_top(dummy_region->end()); |
1832 | G1AllocRegion::setup(this, dummy_region); |
1833 | |
1834 | _allocator->init_mutator_alloc_region(); |
1835 | |
1836 | // Do create of the monitoring and management support so that |
1837 | // values in the heap have been properly initialized. |
1838 | _g1mm = new G1MonitoringSupport(this); |
1839 | |
1840 | G1StringDedup::initialize(); |
1841 | |
1842 | _preserved_marks_set.init(ParallelGCThreads); |
1843 | |
1844 | _collection_set.initialize(max_regions()); |
1845 | |
1846 | return JNI_OK; |
1847 | } |
1848 | |
1849 | void G1CollectedHeap::stop() { |
1850 | // Stop all concurrent threads. We do this to make sure these threads |
1851 | // do not continue to execute and access resources (e.g. logging) |
1852 | // that are destroyed during shutdown. |
1853 | _cr->stop(); |
1854 | _young_gen_sampling_thread->stop(); |
1855 | _cm_thread->stop(); |
1856 | if (G1StringDedup::is_enabled()) { |
1857 | G1StringDedup::stop(); |
1858 | } |
1859 | } |
1860 | |
1861 | void G1CollectedHeap::safepoint_synchronize_begin() { |
1862 | SuspendibleThreadSet::synchronize(); |
1863 | } |
1864 | |
1865 | void G1CollectedHeap::safepoint_synchronize_end() { |
1866 | SuspendibleThreadSet::desynchronize(); |
1867 | } |
1868 | |
1869 | void G1CollectedHeap::post_initialize() { |
1870 | CollectedHeap::post_initialize(); |
1871 | ref_processing_init(); |
1872 | } |
1873 | |
1874 | void G1CollectedHeap::ref_processing_init() { |
1875 | // Reference processing in G1 currently works as follows: |
1876 | // |
1877 | // * There are two reference processor instances. One is |
1878 | // used to record and process discovered references |
1879 | // during concurrent marking; the other is used to |
1880 | // record and process references during STW pauses |
1881 | // (both full and incremental). |
1882 | // * Both ref processors need to 'span' the entire heap as |
1883 | // the regions in the collection set may be dotted around. |
1884 | // |
1885 | // * For the concurrent marking ref processor: |
1886 | // * Reference discovery is enabled at initial marking. |
1887 | // * Reference discovery is disabled and the discovered |
1888 | // references processed etc during remarking. |
1889 | // * Reference discovery is MT (see below). |
1890 | // * Reference discovery requires a barrier (see below). |
1891 | // * Reference processing may or may not be MT |
1892 | // (depending on the value of ParallelRefProcEnabled |
1893 | // and ParallelGCThreads). |
1894 | // * A full GC disables reference discovery by the CM |
1895 | // ref processor and abandons any entries on it's |
1896 | // discovered lists. |
1897 | // |
1898 | // * For the STW processor: |
1899 | // * Non MT discovery is enabled at the start of a full GC. |
1900 | // * Processing and enqueueing during a full GC is non-MT. |
1901 | // * During a full GC, references are processed after marking. |
1902 | // |
1903 | // * Discovery (may or may not be MT) is enabled at the start |
1904 | // of an incremental evacuation pause. |
1905 | // * References are processed near the end of a STW evacuation pause. |
1906 | // * For both types of GC: |
1907 | // * Discovery is atomic - i.e. not concurrent. |
1908 | // * Reference discovery will not need a barrier. |
1909 | |
1910 | bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1); |
1911 | |
1912 | // Concurrent Mark ref processor |
1913 | _ref_processor_cm = |
1914 | new ReferenceProcessor(&_is_subject_to_discovery_cm, |
1915 | mt_processing, // mt processing |
1916 | ParallelGCThreads, // degree of mt processing |
1917 | (ParallelGCThreads > 1) || (ConcGCThreads > 1), // mt discovery |
1918 | MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery |
1919 | false, // Reference discovery is not atomic |
1920 | &_is_alive_closure_cm, // is alive closure |
1921 | true); // allow changes to number of processing threads |
1922 | |
1923 | // STW ref processor |
1924 | _ref_processor_stw = |
1925 | new ReferenceProcessor(&_is_subject_to_discovery_stw, |
1926 | mt_processing, // mt processing |
1927 | ParallelGCThreads, // degree of mt processing |
1928 | (ParallelGCThreads > 1), // mt discovery |
1929 | ParallelGCThreads, // degree of mt discovery |
1930 | true, // Reference discovery is atomic |
1931 | &_is_alive_closure_stw, // is alive closure |
1932 | true); // allow changes to number of processing threads |
1933 | } |
1934 | |
1935 | SoftRefPolicy* G1CollectedHeap::soft_ref_policy() { |
1936 | return &_soft_ref_policy; |
1937 | } |
1938 | |
1939 | size_t G1CollectedHeap::capacity() const { |
1940 | return _hrm->length() * HeapRegion::GrainBytes; |
1941 | } |
1942 | |
1943 | size_t G1CollectedHeap::unused_committed_regions_in_bytes() const { |
1944 | return _hrm->total_free_bytes(); |
1945 | } |
1946 | |
1947 | void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i) { |
1948 | _hot_card_cache->drain(cl, worker_i); |
1949 | } |
1950 | |
1951 | void G1CollectedHeap::iterate_dirty_card_closure(G1CardTableEntryClosure* cl, uint worker_i) { |
1952 | G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); |
1953 | size_t n_completed_buffers = 0; |
1954 | while (dcqs.apply_closure_during_gc(cl, worker_i)) { |
1955 | n_completed_buffers++; |
1956 | } |
1957 | assert(dcqs.completed_buffers_num() == 0, "Completed buffers exist!" ); |
1958 | phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers); |
1959 | } |
1960 | |
1961 | // Computes the sum of the storage used by the various regions. |
1962 | size_t G1CollectedHeap::used() const { |
1963 | size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions(); |
1964 | if (_archive_allocator != NULL) { |
1965 | result += _archive_allocator->used(); |
1966 | } |
1967 | return result; |
1968 | } |
1969 | |
1970 | size_t G1CollectedHeap::used_unlocked() const { |
1971 | return _summary_bytes_used; |
1972 | } |
1973 | |
1974 | class SumUsedClosure: public HeapRegionClosure { |
1975 | size_t _used; |
1976 | public: |
1977 | SumUsedClosure() : _used(0) {} |
1978 | bool do_heap_region(HeapRegion* r) { |
1979 | _used += r->used(); |
1980 | return false; |
1981 | } |
1982 | size_t result() { return _used; } |
1983 | }; |
1984 | |
1985 | size_t G1CollectedHeap::recalculate_used() const { |
1986 | SumUsedClosure blk; |
1987 | heap_region_iterate(&blk); |
1988 | return blk.result(); |
1989 | } |
1990 | |
1991 | bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) { |
1992 | switch (cause) { |
1993 | case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent; |
1994 | case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent; |
1995 | case GCCause::_wb_conc_mark: return true; |
1996 | default : return false; |
1997 | } |
1998 | } |
1999 | |
2000 | bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
2001 | switch (cause) { |
2002 | case GCCause::_gc_locker: return GCLockerInvokesConcurrent; |
2003 | case GCCause::_g1_humongous_allocation: return true; |
2004 | case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent; |
2005 | default: return is_user_requested_concurrent_full_gc(cause); |
2006 | } |
2007 | } |
2008 | |
2009 | bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) { |
2010 | if(policy()->force_upgrade_to_full()) { |
2011 | return true; |
2012 | } else if (should_do_concurrent_full_gc(_gc_cause)) { |
2013 | return false; |
2014 | } else if (has_regions_left_for_allocation()) { |
2015 | return false; |
2016 | } else { |
2017 | return true; |
2018 | } |
2019 | } |
2020 | |
2021 | #ifndef PRODUCT |
2022 | void G1CollectedHeap::allocate_dummy_regions() { |
2023 | // Let's fill up most of the region |
2024 | size_t word_size = HeapRegion::GrainWords - 1024; |
2025 | // And as a result the region we'll allocate will be humongous. |
2026 | guarantee(is_humongous(word_size), "sanity" ); |
2027 | |
2028 | // _filler_array_max_size is set to humongous object threshold |
2029 | // but temporarily change it to use CollectedHeap::fill_with_object(). |
2030 | SizeTFlagSetting fs(_filler_array_max_size, word_size); |
2031 | |
2032 | for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) { |
2033 | // Let's use the existing mechanism for the allocation |
2034 | HeapWord* dummy_obj = humongous_obj_allocate(word_size); |
2035 | if (dummy_obj != NULL) { |
2036 | MemRegion mr(dummy_obj, word_size); |
2037 | CollectedHeap::fill_with_object(mr); |
2038 | } else { |
2039 | // If we can't allocate once, we probably cannot allocate |
2040 | // again. Let's get out of the loop. |
2041 | break; |
2042 | } |
2043 | } |
2044 | } |
2045 | #endif // !PRODUCT |
2046 | |
2047 | void G1CollectedHeap::increment_old_marking_cycles_started() { |
2048 | assert(_old_marking_cycles_started == _old_marking_cycles_completed || |
2049 | _old_marking_cycles_started == _old_marking_cycles_completed + 1, |
2050 | "Wrong marking cycle count (started: %d, completed: %d)" , |
2051 | _old_marking_cycles_started, _old_marking_cycles_completed); |
2052 | |
2053 | _old_marking_cycles_started++; |
2054 | } |
2055 | |
2056 | void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) { |
2057 | MonitorLocker x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
2058 | |
2059 | // We assume that if concurrent == true, then the caller is a |
2060 | // concurrent thread that was joined the Suspendible Thread |
2061 | // Set. If there's ever a cheap way to check this, we should add an |
2062 | // assert here. |
2063 | |
2064 | // Given that this method is called at the end of a Full GC or of a |
2065 | // concurrent cycle, and those can be nested (i.e., a Full GC can |
2066 | // interrupt a concurrent cycle), the number of full collections |
2067 | // completed should be either one (in the case where there was no |
2068 | // nesting) or two (when a Full GC interrupted a concurrent cycle) |
2069 | // behind the number of full collections started. |
2070 | |
2071 | // This is the case for the inner caller, i.e. a Full GC. |
2072 | assert(concurrent || |
2073 | (_old_marking_cycles_started == _old_marking_cycles_completed + 1) || |
2074 | (_old_marking_cycles_started == _old_marking_cycles_completed + 2), |
2075 | "for inner caller (Full GC): _old_marking_cycles_started = %u " |
2076 | "is inconsistent with _old_marking_cycles_completed = %u" , |
2077 | _old_marking_cycles_started, _old_marking_cycles_completed); |
2078 | |
2079 | // This is the case for the outer caller, i.e. the concurrent cycle. |
2080 | assert(!concurrent || |
2081 | (_old_marking_cycles_started == _old_marking_cycles_completed + 1), |
2082 | "for outer caller (concurrent cycle): " |
2083 | "_old_marking_cycles_started = %u " |
2084 | "is inconsistent with _old_marking_cycles_completed = %u" , |
2085 | _old_marking_cycles_started, _old_marking_cycles_completed); |
2086 | |
2087 | _old_marking_cycles_completed += 1; |
2088 | |
2089 | // We need to clear the "in_progress" flag in the CM thread before |
2090 | // we wake up any waiters (especially when ExplicitInvokesConcurrent |
2091 | // is set) so that if a waiter requests another System.gc() it doesn't |
2092 | // incorrectly see that a marking cycle is still in progress. |
2093 | if (concurrent) { |
2094 | _cm_thread->set_idle(); |
2095 | } |
2096 | |
2097 | // This notify_all() will ensure that a thread that called |
2098 | // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
2099 | // and it's waiting for a full GC to finish will be woken up. It is |
2100 | // waiting in VM_G1CollectForAllocation::doit_epilogue(). |
2101 | FullGCCount_lock->notify_all(); |
2102 | } |
2103 | |
2104 | void G1CollectedHeap::collect(GCCause::Cause cause) { |
2105 | try_collect(cause, true); |
2106 | } |
2107 | |
2108 | bool G1CollectedHeap::try_collect(GCCause::Cause cause, bool retry_on_gc_failure) { |
2109 | assert_heap_not_locked(); |
2110 | |
2111 | bool gc_succeeded; |
2112 | bool should_retry_gc; |
2113 | |
2114 | do { |
2115 | should_retry_gc = false; |
2116 | |
2117 | uint gc_count_before; |
2118 | uint old_marking_count_before; |
2119 | uint full_gc_count_before; |
2120 | |
2121 | { |
2122 | MutexLocker ml(Heap_lock); |
2123 | |
2124 | // Read the GC count while holding the Heap_lock |
2125 | gc_count_before = total_collections(); |
2126 | full_gc_count_before = total_full_collections(); |
2127 | old_marking_count_before = _old_marking_cycles_started; |
2128 | } |
2129 | |
2130 | if (should_do_concurrent_full_gc(cause)) { |
2131 | // Schedule an initial-mark evacuation pause that will start a |
2132 | // concurrent cycle. We're setting word_size to 0 which means that |
2133 | // we are not requesting a post-GC allocation. |
2134 | VM_G1CollectForAllocation op(0, /* word_size */ |
2135 | gc_count_before, |
2136 | cause, |
2137 | true, /* should_initiate_conc_mark */ |
2138 | policy()->max_pause_time_ms()); |
2139 | VMThread::execute(&op); |
2140 | gc_succeeded = op.gc_succeeded(); |
2141 | if (!gc_succeeded && retry_on_gc_failure) { |
2142 | if (old_marking_count_before == _old_marking_cycles_started) { |
2143 | should_retry_gc = op.should_retry_gc(); |
2144 | } else { |
2145 | // A Full GC happened while we were trying to schedule the |
2146 | // concurrent cycle. No point in starting a new cycle given |
2147 | // that the whole heap was collected anyway. |
2148 | } |
2149 | |
2150 | if (should_retry_gc && GCLocker::is_active_and_needs_gc()) { |
2151 | GCLocker::stall_until_clear(); |
2152 | } |
2153 | } |
2154 | } else { |
2155 | if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc |
2156 | DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { |
2157 | |
2158 | // Schedule a standard evacuation pause. We're setting word_size |
2159 | // to 0 which means that we are not requesting a post-GC allocation. |
2160 | VM_G1CollectForAllocation op(0, /* word_size */ |
2161 | gc_count_before, |
2162 | cause, |
2163 | false, /* should_initiate_conc_mark */ |
2164 | policy()->max_pause_time_ms()); |
2165 | VMThread::execute(&op); |
2166 | gc_succeeded = op.gc_succeeded(); |
2167 | } else { |
2168 | // Schedule a Full GC. |
2169 | VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); |
2170 | VMThread::execute(&op); |
2171 | gc_succeeded = op.gc_succeeded(); |
2172 | } |
2173 | } |
2174 | } while (should_retry_gc); |
2175 | return gc_succeeded; |
2176 | } |
2177 | |
2178 | bool G1CollectedHeap::is_in(const void* p) const { |
2179 | if (_hrm->reserved().contains(p)) { |
2180 | // Given that we know that p is in the reserved space, |
2181 | // heap_region_containing() should successfully |
2182 | // return the containing region. |
2183 | HeapRegion* hr = heap_region_containing(p); |
2184 | return hr->is_in(p); |
2185 | } else { |
2186 | return false; |
2187 | } |
2188 | } |
2189 | |
2190 | #ifdef ASSERT |
2191 | bool G1CollectedHeap::is_in_exact(const void* p) const { |
2192 | bool contains = reserved_region().contains(p); |
2193 | bool available = _hrm->is_available(addr_to_region((HeapWord*)p)); |
2194 | if (contains && available) { |
2195 | return true; |
2196 | } else { |
2197 | return false; |
2198 | } |
2199 | } |
2200 | #endif |
2201 | |
2202 | // Iteration functions. |
2203 | |
2204 | // Iterates an ObjectClosure over all objects within a HeapRegion. |
2205 | |
2206 | class IterateObjectClosureRegionClosure: public HeapRegionClosure { |
2207 | ObjectClosure* _cl; |
2208 | public: |
2209 | IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} |
2210 | bool do_heap_region(HeapRegion* r) { |
2211 | if (!r->is_continues_humongous()) { |
2212 | r->object_iterate(_cl); |
2213 | } |
2214 | return false; |
2215 | } |
2216 | }; |
2217 | |
2218 | void G1CollectedHeap::object_iterate(ObjectClosure* cl) { |
2219 | IterateObjectClosureRegionClosure blk(cl); |
2220 | heap_region_iterate(&blk); |
2221 | } |
2222 | |
2223 | void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const { |
2224 | _hrm->iterate(cl); |
2225 | } |
2226 | |
2227 | void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl, |
2228 | HeapRegionClaimer *hrclaimer, |
2229 | uint worker_id) const { |
2230 | _hrm->par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id)); |
2231 | } |
2232 | |
2233 | void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl, |
2234 | HeapRegionClaimer *hrclaimer) const { |
2235 | _hrm->par_iterate(cl, hrclaimer, 0); |
2236 | } |
2237 | |
2238 | void G1CollectedHeap::collection_set_iterate_all(HeapRegionClosure* cl) { |
2239 | _collection_set.iterate(cl); |
2240 | } |
2241 | |
2242 | void G1CollectedHeap::collection_set_iterate_increment_from(HeapRegionClosure *cl, uint worker_id) { |
2243 | _collection_set.iterate_incremental_part_from(cl, worker_id, workers()->active_workers()); |
2244 | } |
2245 | |
2246 | HeapWord* G1CollectedHeap::block_start(const void* addr) const { |
2247 | HeapRegion* hr = heap_region_containing(addr); |
2248 | return hr->block_start(addr); |
2249 | } |
2250 | |
2251 | bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { |
2252 | HeapRegion* hr = heap_region_containing(addr); |
2253 | return hr->block_is_obj(addr); |
2254 | } |
2255 | |
2256 | bool G1CollectedHeap::supports_tlab_allocation() const { |
2257 | return true; |
2258 | } |
2259 | |
2260 | size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { |
2261 | return (_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes; |
2262 | } |
2263 | |
2264 | size_t G1CollectedHeap::tlab_used(Thread* ignored) const { |
2265 | return _eden.length() * HeapRegion::GrainBytes; |
2266 | } |
2267 | |
2268 | // For G1 TLABs should not contain humongous objects, so the maximum TLAB size |
2269 | // must be equal to the humongous object limit. |
2270 | size_t G1CollectedHeap::max_tlab_size() const { |
2271 | return align_down(_humongous_object_threshold_in_words, MinObjAlignment); |
2272 | } |
2273 | |
2274 | size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { |
2275 | return _allocator->unsafe_max_tlab_alloc(); |
2276 | } |
2277 | |
2278 | size_t G1CollectedHeap::max_capacity() const { |
2279 | return _hrm->max_expandable_length() * HeapRegion::GrainBytes; |
2280 | } |
2281 | |
2282 | size_t G1CollectedHeap::max_reserved_capacity() const { |
2283 | return _hrm->max_length() * HeapRegion::GrainBytes; |
2284 | } |
2285 | |
2286 | jlong G1CollectedHeap::millis_since_last_gc() { |
2287 | // See the notes in GenCollectedHeap::millis_since_last_gc() |
2288 | // for more information about the implementation. |
2289 | jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) - |
2290 | _policy->collection_pause_end_millis(); |
2291 | if (ret_val < 0) { |
2292 | log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT |
2293 | ". returning zero instead." , ret_val); |
2294 | return 0; |
2295 | } |
2296 | return ret_val; |
2297 | } |
2298 | |
2299 | void G1CollectedHeap::deduplicate_string(oop str) { |
2300 | assert(java_lang_String::is_instance(str), "invariant" ); |
2301 | |
2302 | if (G1StringDedup::is_enabled()) { |
2303 | G1StringDedup::deduplicate(str); |
2304 | } |
2305 | } |
2306 | |
2307 | void G1CollectedHeap::prepare_for_verify() { |
2308 | _verifier->prepare_for_verify(); |
2309 | } |
2310 | |
2311 | void G1CollectedHeap::verify(VerifyOption vo) { |
2312 | _verifier->verify(vo); |
2313 | } |
2314 | |
2315 | bool G1CollectedHeap::supports_concurrent_phase_control() const { |
2316 | return true; |
2317 | } |
2318 | |
2319 | bool G1CollectedHeap::request_concurrent_phase(const char* phase) { |
2320 | return _cm_thread->request_concurrent_phase(phase); |
2321 | } |
2322 | |
2323 | bool G1CollectedHeap::is_heterogeneous_heap() const { |
2324 | return G1Arguments::is_heterogeneous_heap(); |
2325 | } |
2326 | |
2327 | class PrintRegionClosure: public HeapRegionClosure { |
2328 | outputStream* _st; |
2329 | public: |
2330 | PrintRegionClosure(outputStream* st) : _st(st) {} |
2331 | bool do_heap_region(HeapRegion* r) { |
2332 | r->print_on(_st); |
2333 | return false; |
2334 | } |
2335 | }; |
2336 | |
2337 | bool G1CollectedHeap::is_obj_dead_cond(const oop obj, |
2338 | const HeapRegion* hr, |
2339 | const VerifyOption vo) const { |
2340 | switch (vo) { |
2341 | case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr); |
2342 | case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr); |
2343 | case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr); |
2344 | default: ShouldNotReachHere(); |
2345 | } |
2346 | return false; // keep some compilers happy |
2347 | } |
2348 | |
2349 | bool G1CollectedHeap::is_obj_dead_cond(const oop obj, |
2350 | const VerifyOption vo) const { |
2351 | switch (vo) { |
2352 | case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj); |
2353 | case VerifyOption_G1UseNextMarking: return is_obj_ill(obj); |
2354 | case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj); |
2355 | default: ShouldNotReachHere(); |
2356 | } |
2357 | return false; // keep some compilers happy |
2358 | } |
2359 | |
2360 | void G1CollectedHeap::print_heap_regions() const { |
2361 | LogTarget(Trace, gc, heap, region) lt; |
2362 | if (lt.is_enabled()) { |
2363 | LogStream ls(lt); |
2364 | print_regions_on(&ls); |
2365 | } |
2366 | } |
2367 | |
2368 | void G1CollectedHeap::print_on(outputStream* st) const { |
2369 | st->print(" %-20s" , "garbage-first heap" ); |
2370 | st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K" , |
2371 | capacity()/K, used_unlocked()/K); |
2372 | st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")" , |
2373 | p2i(_hrm->reserved().start()), |
2374 | p2i(_hrm->reserved().end())); |
2375 | st->cr(); |
2376 | st->print(" region size " SIZE_FORMAT "K, " , HeapRegion::GrainBytes / K); |
2377 | uint young_regions = young_regions_count(); |
2378 | st->print("%u young (" SIZE_FORMAT "K), " , young_regions, |
2379 | (size_t) young_regions * HeapRegion::GrainBytes / K); |
2380 | uint survivor_regions = survivor_regions_count(); |
2381 | st->print("%u survivors (" SIZE_FORMAT "K)" , survivor_regions, |
2382 | (size_t) survivor_regions * HeapRegion::GrainBytes / K); |
2383 | st->cr(); |
2384 | MetaspaceUtils::print_on(st); |
2385 | } |
2386 | |
2387 | void G1CollectedHeap::print_regions_on(outputStream* st) const { |
2388 | st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, " |
2389 | "HS=humongous(starts), HC=humongous(continues), " |
2390 | "CS=collection set, F=free, A=archive, " |
2391 | "TAMS=top-at-mark-start (previous, next)" ); |
2392 | PrintRegionClosure blk(st); |
2393 | heap_region_iterate(&blk); |
2394 | } |
2395 | |
2396 | void G1CollectedHeap::print_extended_on(outputStream* st) const { |
2397 | print_on(st); |
2398 | |
2399 | // Print the per-region information. |
2400 | print_regions_on(st); |
2401 | } |
2402 | |
2403 | void G1CollectedHeap::print_on_error(outputStream* st) const { |
2404 | this->CollectedHeap::print_on_error(st); |
2405 | |
2406 | if (_cm != NULL) { |
2407 | st->cr(); |
2408 | _cm->print_on_error(st); |
2409 | } |
2410 | } |
2411 | |
2412 | void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { |
2413 | workers()->print_worker_threads_on(st); |
2414 | _cm_thread->print_on(st); |
2415 | st->cr(); |
2416 | _cm->print_worker_threads_on(st); |
2417 | _cr->print_threads_on(st); |
2418 | _young_gen_sampling_thread->print_on(st); |
2419 | if (G1StringDedup::is_enabled()) { |
2420 | G1StringDedup::print_worker_threads_on(st); |
2421 | } |
2422 | } |
2423 | |
2424 | void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { |
2425 | workers()->threads_do(tc); |
2426 | tc->do_thread(_cm_thread); |
2427 | _cm->threads_do(tc); |
2428 | _cr->threads_do(tc); |
2429 | tc->do_thread(_young_gen_sampling_thread); |
2430 | if (G1StringDedup::is_enabled()) { |
2431 | G1StringDedup::threads_do(tc); |
2432 | } |
2433 | } |
2434 | |
2435 | void G1CollectedHeap::print_tracing_info() const { |
2436 | rem_set()->print_summary_info(); |
2437 | concurrent_mark()->print_summary_info(); |
2438 | } |
2439 | |
2440 | #ifndef PRODUCT |
2441 | // Helpful for debugging RSet issues. |
2442 | |
2443 | class PrintRSetsClosure : public HeapRegionClosure { |
2444 | private: |
2445 | const char* _msg; |
2446 | size_t _occupied_sum; |
2447 | |
2448 | public: |
2449 | bool do_heap_region(HeapRegion* r) { |
2450 | HeapRegionRemSet* hrrs = r->rem_set(); |
2451 | size_t occupied = hrrs->occupied(); |
2452 | _occupied_sum += occupied; |
2453 | |
2454 | tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r)); |
2455 | if (occupied == 0) { |
2456 | tty->print_cr(" RSet is empty" ); |
2457 | } else { |
2458 | hrrs->print(); |
2459 | } |
2460 | tty->print_cr("----------" ); |
2461 | return false; |
2462 | } |
2463 | |
2464 | PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) { |
2465 | tty->cr(); |
2466 | tty->print_cr("========================================" ); |
2467 | tty->print_cr("%s" , msg); |
2468 | tty->cr(); |
2469 | } |
2470 | |
2471 | ~PrintRSetsClosure() { |
2472 | tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum); |
2473 | tty->print_cr("========================================" ); |
2474 | tty->cr(); |
2475 | } |
2476 | }; |
2477 | |
2478 | void G1CollectedHeap::print_cset_rsets() { |
2479 | PrintRSetsClosure cl("Printing CSet RSets" ); |
2480 | collection_set_iterate_all(&cl); |
2481 | } |
2482 | |
2483 | void G1CollectedHeap::print_all_rsets() { |
2484 | PrintRSetsClosure cl("Printing All RSets" );; |
2485 | heap_region_iterate(&cl); |
2486 | } |
2487 | #endif // PRODUCT |
2488 | |
2489 | G1HeapSummary G1CollectedHeap::create_g1_heap_summary() { |
2490 | |
2491 | size_t eden_used_bytes = _eden.used_bytes(); |
2492 | size_t survivor_used_bytes = _survivor.used_bytes(); |
2493 | size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked(); |
2494 | |
2495 | size_t eden_capacity_bytes = |
2496 | (policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes; |
2497 | |
2498 | VirtualSpaceSummary heap_summary = create_heap_space_summary(); |
2499 | return G1HeapSummary(heap_summary, heap_used, eden_used_bytes, |
2500 | eden_capacity_bytes, survivor_used_bytes, num_regions()); |
2501 | } |
2502 | |
2503 | G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) { |
2504 | return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(), |
2505 | stats->unused(), stats->used(), stats->region_end_waste(), |
2506 | stats->regions_filled(), stats->direct_allocated(), |
2507 | stats->failure_used(), stats->failure_waste()); |
2508 | } |
2509 | |
2510 | void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { |
2511 | const G1HeapSummary& heap_summary = create_g1_heap_summary(); |
2512 | gc_tracer->report_gc_heap_summary(when, heap_summary); |
2513 | |
2514 | const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); |
2515 | gc_tracer->report_metaspace_summary(when, metaspace_summary); |
2516 | } |
2517 | |
2518 | G1CollectedHeap* G1CollectedHeap::heap() { |
2519 | CollectedHeap* heap = Universe::heap(); |
2520 | assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()" ); |
2521 | assert(heap->kind() == CollectedHeap::G1, "Invalid name" ); |
2522 | return (G1CollectedHeap*)heap; |
2523 | } |
2524 | |
2525 | void G1CollectedHeap::gc_prologue(bool full) { |
2526 | // always_do_update_barrier = false; |
2527 | assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer" ); |
2528 | |
2529 | // This summary needs to be printed before incrementing total collections. |
2530 | rem_set()->print_periodic_summary_info("Before GC RS summary" , total_collections()); |
2531 | |
2532 | // Update common counters. |
2533 | increment_total_collections(full /* full gc */); |
2534 | if (full || collector_state()->in_initial_mark_gc()) { |
2535 | increment_old_marking_cycles_started(); |
2536 | } |
2537 | |
2538 | // Fill TLAB's and such |
2539 | double start = os::elapsedTime(); |
2540 | ensure_parsability(true); |
2541 | phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0); |
2542 | } |
2543 | |
2544 | void G1CollectedHeap::gc_epilogue(bool full) { |
2545 | // Update common counters. |
2546 | if (full) { |
2547 | // Update the number of full collections that have been completed. |
2548 | increment_old_marking_cycles_completed(false /* concurrent */); |
2549 | } |
2550 | |
2551 | // We are at the end of the GC. Total collections has already been increased. |
2552 | rem_set()->print_periodic_summary_info("After GC RS summary" , total_collections() - 1); |
2553 | |
2554 | // FIXME: what is this about? |
2555 | // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" |
2556 | // is set. |
2557 | #if COMPILER2_OR_JVMCI |
2558 | assert(DerivedPointerTable::is_empty(), "derived pointer present" ); |
2559 | #endif |
2560 | // always_do_update_barrier = true; |
2561 | |
2562 | double start = os::elapsedTime(); |
2563 | resize_all_tlabs(); |
2564 | phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0); |
2565 | |
2566 | MemoryService::track_memory_usage(); |
2567 | // We have just completed a GC. Update the soft reference |
2568 | // policy with the new heap occupancy |
2569 | Universe::update_heap_info_at_gc(); |
2570 | } |
2571 | |
2572 | HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, |
2573 | uint gc_count_before, |
2574 | bool* succeeded, |
2575 | GCCause::Cause gc_cause) { |
2576 | assert_heap_not_locked_and_not_at_safepoint(); |
2577 | VM_G1CollectForAllocation op(word_size, |
2578 | gc_count_before, |
2579 | gc_cause, |
2580 | false, /* should_initiate_conc_mark */ |
2581 | policy()->max_pause_time_ms()); |
2582 | VMThread::execute(&op); |
2583 | |
2584 | HeapWord* result = op.result(); |
2585 | bool ret_succeeded = op.prologue_succeeded() && op.gc_succeeded(); |
2586 | assert(result == NULL || ret_succeeded, |
2587 | "the result should be NULL if the VM did not succeed" ); |
2588 | *succeeded = ret_succeeded; |
2589 | |
2590 | assert_heap_not_locked(); |
2591 | return result; |
2592 | } |
2593 | |
2594 | void G1CollectedHeap::do_concurrent_mark() { |
2595 | MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag); |
2596 | if (!_cm_thread->in_progress()) { |
2597 | _cm_thread->set_started(); |
2598 | CGC_lock->notify(); |
2599 | } |
2600 | } |
2601 | |
2602 | size_t G1CollectedHeap::pending_card_num() { |
2603 | struct CountCardsClosure : public ThreadClosure { |
2604 | size_t _cards; |
2605 | CountCardsClosure() : _cards(0) {} |
2606 | virtual void do_thread(Thread* t) { |
2607 | _cards += G1ThreadLocalData::dirty_card_queue(t).size(); |
2608 | } |
2609 | } count_from_threads; |
2610 | Threads::threads_do(&count_from_threads); |
2611 | |
2612 | G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); |
2613 | size_t buffer_size = dcqs.buffer_size(); |
2614 | size_t buffer_num = dcqs.completed_buffers_num(); |
2615 | |
2616 | return buffer_size * buffer_num + count_from_threads._cards; |
2617 | } |
2618 | |
2619 | bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const { |
2620 | // We don't nominate objects with many remembered set entries, on |
2621 | // the assumption that such objects are likely still live. |
2622 | HeapRegionRemSet* rem_set = r->rem_set(); |
2623 | |
2624 | return G1EagerReclaimHumongousObjectsWithStaleRefs ? |
2625 | rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) : |
2626 | G1EagerReclaimHumongousObjects && rem_set->is_empty(); |
2627 | } |
2628 | |
2629 | class RegisterRegionsWithRegionAttrTableClosure : public HeapRegionClosure { |
2630 | private: |
2631 | size_t _total_humongous; |
2632 | size_t _candidate_humongous; |
2633 | |
2634 | G1DirtyCardQueue _dcq; |
2635 | |
2636 | bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const { |
2637 | assert(region->is_starts_humongous(), "Must start a humongous object" ); |
2638 | |
2639 | oop obj = oop(region->bottom()); |
2640 | |
2641 | // Dead objects cannot be eager reclaim candidates. Due to class |
2642 | // unloading it is unsafe to query their classes so we return early. |
2643 | if (g1h->is_obj_dead(obj, region)) { |
2644 | return false; |
2645 | } |
2646 | |
2647 | // If we do not have a complete remembered set for the region, then we can |
2648 | // not be sure that we have all references to it. |
2649 | if (!region->rem_set()->is_complete()) { |
2650 | return false; |
2651 | } |
2652 | // Candidate selection must satisfy the following constraints |
2653 | // while concurrent marking is in progress: |
2654 | // |
2655 | // * In order to maintain SATB invariants, an object must not be |
2656 | // reclaimed if it was allocated before the start of marking and |
2657 | // has not had its references scanned. Such an object must have |
2658 | // its references (including type metadata) scanned to ensure no |
2659 | // live objects are missed by the marking process. Objects |
2660 | // allocated after the start of concurrent marking don't need to |
2661 | // be scanned. |
2662 | // |
2663 | // * An object must not be reclaimed if it is on the concurrent |
2664 | // mark stack. Objects allocated after the start of concurrent |
2665 | // marking are never pushed on the mark stack. |
2666 | // |
2667 | // Nominating only objects allocated after the start of concurrent |
2668 | // marking is sufficient to meet both constraints. This may miss |
2669 | // some objects that satisfy the constraints, but the marking data |
2670 | // structures don't support efficiently performing the needed |
2671 | // additional tests or scrubbing of the mark stack. |
2672 | // |
2673 | // However, we presently only nominate is_typeArray() objects. |
2674 | // A humongous object containing references induces remembered |
2675 | // set entries on other regions. In order to reclaim such an |
2676 | // object, those remembered sets would need to be cleaned up. |
2677 | // |
2678 | // We also treat is_typeArray() objects specially, allowing them |
2679 | // to be reclaimed even if allocated before the start of |
2680 | // concurrent mark. For this we rely on mark stack insertion to |
2681 | // exclude is_typeArray() objects, preventing reclaiming an object |
2682 | // that is in the mark stack. We also rely on the metadata for |
2683 | // such objects to be built-in and so ensured to be kept live. |
2684 | // Frequent allocation and drop of large binary blobs is an |
2685 | // important use case for eager reclaim, and this special handling |
2686 | // may reduce needed headroom. |
2687 | |
2688 | return obj->is_typeArray() && |
2689 | g1h->is_potential_eager_reclaim_candidate(region); |
2690 | } |
2691 | |
2692 | public: |
2693 | RegisterRegionsWithRegionAttrTableClosure() |
2694 | : _total_humongous(0), |
2695 | _candidate_humongous(0), |
2696 | _dcq(&G1BarrierSet::dirty_card_queue_set()) { |
2697 | } |
2698 | |
2699 | virtual bool do_heap_region(HeapRegion* r) { |
2700 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
2701 | |
2702 | if (!r->is_starts_humongous()) { |
2703 | g1h->register_region_with_region_attr(r); |
2704 | return false; |
2705 | } |
2706 | |
2707 | bool is_candidate = humongous_region_is_candidate(g1h, r); |
2708 | uint rindex = r->hrm_index(); |
2709 | g1h->set_humongous_reclaim_candidate(rindex, is_candidate); |
2710 | if (is_candidate) { |
2711 | _candidate_humongous++; |
2712 | g1h->register_humongous_region_with_region_attr(rindex); |
2713 | // Is_candidate already filters out humongous object with large remembered sets. |
2714 | // If we have a humongous object with a few remembered sets, we simply flush these |
2715 | // remembered set entries into the DCQS. That will result in automatic |
2716 | // re-evaluation of their remembered set entries during the following evacuation |
2717 | // phase. |
2718 | if (!r->rem_set()->is_empty()) { |
2719 | guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries), |
2720 | "Found a not-small remembered set here. This is inconsistent with previous assumptions." ); |
2721 | G1CardTable* ct = g1h->card_table(); |
2722 | HeapRegionRemSetIterator hrrs(r->rem_set()); |
2723 | size_t card_index; |
2724 | while (hrrs.has_next(card_index)) { |
2725 | CardTable::CardValue* card_ptr = ct->byte_for_index(card_index); |
2726 | // The remembered set might contain references to already freed |
2727 | // regions. Filter out such entries to avoid failing card table |
2728 | // verification. |
2729 | if (g1h->is_in(ct->addr_for(card_ptr))) { |
2730 | if (*card_ptr != G1CardTable::dirty_card_val()) { |
2731 | *card_ptr = G1CardTable::dirty_card_val(); |
2732 | _dcq.enqueue(card_ptr); |
2733 | } |
2734 | } |
2735 | } |
2736 | assert(hrrs.n_yielded() == r->rem_set()->occupied(), |
2737 | "Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries" , |
2738 | hrrs.n_yielded(), r->rem_set()->occupied()); |
2739 | // We should only clear the card based remembered set here as we will not |
2740 | // implicitly rebuild anything else during eager reclaim. Note that at the moment |
2741 | // (and probably never) we do not enter this path if there are other kind of |
2742 | // remembered sets for this region. |
2743 | r->rem_set()->clear_locked(true /* only_cardset */); |
2744 | // Clear_locked() above sets the state to Empty. However we want to continue |
2745 | // collecting remembered set entries for humongous regions that were not |
2746 | // reclaimed. |
2747 | r->rem_set()->set_state_complete(); |
2748 | #ifdef ASSERT |
2749 | G1HeapRegionAttr region_attr = g1h->region_attr(oop(r->bottom())); |
2750 | assert(region_attr.needs_remset_update(), "must be" ); |
2751 | #endif |
2752 | } |
2753 | assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty." ); |
2754 | } else { |
2755 | g1h->register_region_with_region_attr(r); |
2756 | } |
2757 | _total_humongous++; |
2758 | |
2759 | return false; |
2760 | } |
2761 | |
2762 | size_t total_humongous() const { return _total_humongous; } |
2763 | size_t candidate_humongous() const { return _candidate_humongous; } |
2764 | |
2765 | void flush_rem_set_entries() { _dcq.flush(); } |
2766 | }; |
2767 | |
2768 | void G1CollectedHeap::register_regions_with_region_attr() { |
2769 | Ticks start = Ticks::now(); |
2770 | |
2771 | RegisterRegionsWithRegionAttrTableClosure cl; |
2772 | heap_region_iterate(&cl); |
2773 | |
2774 | phase_times()->record_register_regions((Ticks::now() - start).seconds() * 1000.0, |
2775 | cl.total_humongous(), |
2776 | cl.candidate_humongous()); |
2777 | _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0; |
2778 | |
2779 | // Finally flush all remembered set entries to re-check into the global DCQS. |
2780 | cl.flush_rem_set_entries(); |
2781 | } |
2782 | |
2783 | #ifndef PRODUCT |
2784 | void G1CollectedHeap::verify_region_attr_remset_update() { |
2785 | class VerifyRegionAttrRemSet : public HeapRegionClosure { |
2786 | public: |
2787 | virtual bool do_heap_region(HeapRegion* r) { |
2788 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
2789 | bool const needs_remset_update = g1h->region_attr(r->bottom()).needs_remset_update(); |
2790 | assert(r->rem_set()->is_tracked() == needs_remset_update, |
2791 | "Region %u remset tracking status (%s) different to region attribute (%s)" , |
2792 | r->hrm_index(), BOOL_TO_STR(r->rem_set()->is_tracked()), BOOL_TO_STR(needs_remset_update)); |
2793 | return false; |
2794 | } |
2795 | } cl; |
2796 | heap_region_iterate(&cl); |
2797 | } |
2798 | #endif |
2799 | |
2800 | class VerifyRegionRemSetClosure : public HeapRegionClosure { |
2801 | public: |
2802 | bool do_heap_region(HeapRegion* hr) { |
2803 | if (!hr->is_archive() && !hr->is_continues_humongous()) { |
2804 | hr->verify_rem_set(); |
2805 | } |
2806 | return false; |
2807 | } |
2808 | }; |
2809 | |
2810 | uint G1CollectedHeap::num_task_queues() const { |
2811 | return _task_queues->size(); |
2812 | } |
2813 | |
2814 | #if TASKQUEUE_STATS |
2815 | void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { |
2816 | st->print_raw_cr("GC Task Stats" ); |
2817 | st->print_raw("thr " ); TaskQueueStats::print_header(1, st); st->cr(); |
2818 | st->print_raw("--- " ); TaskQueueStats::print_header(2, st); st->cr(); |
2819 | } |
2820 | |
2821 | void G1CollectedHeap::print_taskqueue_stats() const { |
2822 | if (!log_is_enabled(Trace, gc, task, stats)) { |
2823 | return; |
2824 | } |
2825 | Log(gc, task, stats) log; |
2826 | ResourceMark rm; |
2827 | LogStream ls(log.trace()); |
2828 | outputStream* st = &ls; |
2829 | |
2830 | print_taskqueue_stats_hdr(st); |
2831 | |
2832 | TaskQueueStats totals; |
2833 | const uint n = num_task_queues(); |
2834 | for (uint i = 0; i < n; ++i) { |
2835 | st->print("%3u " , i); task_queue(i)->stats.print(st); st->cr(); |
2836 | totals += task_queue(i)->stats; |
2837 | } |
2838 | st->print_raw("tot " ); totals.print(st); st->cr(); |
2839 | |
2840 | DEBUG_ONLY(totals.verify()); |
2841 | } |
2842 | |
2843 | void G1CollectedHeap::reset_taskqueue_stats() { |
2844 | const uint n = num_task_queues(); |
2845 | for (uint i = 0; i < n; ++i) { |
2846 | task_queue(i)->stats.reset(); |
2847 | } |
2848 | } |
2849 | #endif // TASKQUEUE_STATS |
2850 | |
2851 | void G1CollectedHeap::wait_for_root_region_scanning() { |
2852 | double scan_wait_start = os::elapsedTime(); |
2853 | // We have to wait until the CM threads finish scanning the |
2854 | // root regions as it's the only way to ensure that all the |
2855 | // objects on them have been correctly scanned before we start |
2856 | // moving them during the GC. |
2857 | bool waited = _cm->root_regions()->wait_until_scan_finished(); |
2858 | double wait_time_ms = 0.0; |
2859 | if (waited) { |
2860 | double scan_wait_end = os::elapsedTime(); |
2861 | wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0; |
2862 | } |
2863 | phase_times()->record_root_region_scan_wait_time(wait_time_ms); |
2864 | } |
2865 | |
2866 | class G1PrintCollectionSetClosure : public HeapRegionClosure { |
2867 | private: |
2868 | G1HRPrinter* _hr_printer; |
2869 | public: |
2870 | G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { } |
2871 | |
2872 | virtual bool do_heap_region(HeapRegion* r) { |
2873 | _hr_printer->cset(r); |
2874 | return false; |
2875 | } |
2876 | }; |
2877 | |
2878 | void G1CollectedHeap::start_new_collection_set() { |
2879 | double start = os::elapsedTime(); |
2880 | |
2881 | collection_set()->start_incremental_building(); |
2882 | |
2883 | clear_region_attr(); |
2884 | |
2885 | guarantee(_eden.length() == 0, "eden should have been cleared" ); |
2886 | policy()->transfer_survivors_to_cset(survivor()); |
2887 | |
2888 | // We redo the verification but now wrt to the new CSet which |
2889 | // has just got initialized after the previous CSet was freed. |
2890 | _cm->verify_no_collection_set_oops(); |
2891 | |
2892 | phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0); |
2893 | } |
2894 | |
2895 | void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms) { |
2896 | |
2897 | _collection_set.finalize_initial_collection_set(target_pause_time_ms, &_survivor); |
2898 | evacuation_info.set_collectionset_regions(collection_set()->region_length() + |
2899 | collection_set()->optional_region_length()); |
2900 | |
2901 | _cm->verify_no_collection_set_oops(); |
2902 | |
2903 | if (_hr_printer.is_active()) { |
2904 | G1PrintCollectionSetClosure cl(&_hr_printer); |
2905 | _collection_set.iterate(&cl); |
2906 | _collection_set.iterate_optional(&cl); |
2907 | } |
2908 | } |
2909 | |
2910 | G1HeapVerifier::G1VerifyType G1CollectedHeap::young_collection_verify_type() const { |
2911 | if (collector_state()->in_initial_mark_gc()) { |
2912 | return G1HeapVerifier::G1VerifyConcurrentStart; |
2913 | } else if (collector_state()->in_young_only_phase()) { |
2914 | return G1HeapVerifier::G1VerifyYoungNormal; |
2915 | } else { |
2916 | return G1HeapVerifier::G1VerifyMixed; |
2917 | } |
2918 | } |
2919 | |
2920 | void G1CollectedHeap::verify_before_young_collection(G1HeapVerifier::G1VerifyType type) { |
2921 | if (VerifyRememberedSets) { |
2922 | log_info(gc, verify)("[Verifying RemSets before GC]" ); |
2923 | VerifyRegionRemSetClosure v_cl; |
2924 | heap_region_iterate(&v_cl); |
2925 | } |
2926 | _verifier->verify_before_gc(type); |
2927 | _verifier->check_bitmaps("GC Start" ); |
2928 | } |
2929 | |
2930 | void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) { |
2931 | if (VerifyRememberedSets) { |
2932 | log_info(gc, verify)("[Verifying RemSets after GC]" ); |
2933 | VerifyRegionRemSetClosure v_cl; |
2934 | heap_region_iterate(&v_cl); |
2935 | } |
2936 | _verifier->verify_after_gc(type); |
2937 | _verifier->check_bitmaps("GC End" ); |
2938 | } |
2939 | |
2940 | void G1CollectedHeap::expand_heap_after_young_collection(){ |
2941 | size_t expand_bytes = _heap_sizing_policy->expansion_amount(); |
2942 | if (expand_bytes > 0) { |
2943 | // No need for an ergo logging here, |
2944 | // expansion_amount() does this when it returns a value > 0. |
2945 | double expand_ms; |
2946 | if (!expand(expand_bytes, _workers, &expand_ms)) { |
2947 | // We failed to expand the heap. Cannot do anything about it. |
2948 | } |
2949 | phase_times()->record_expand_heap_time(expand_ms); |
2950 | } |
2951 | } |
2952 | |
2953 | const char* G1CollectedHeap::young_gc_name() const { |
2954 | if (collector_state()->in_initial_mark_gc()) { |
2955 | return "Pause Young (Concurrent Start)" ; |
2956 | } else if (collector_state()->in_young_only_phase()) { |
2957 | if (collector_state()->in_young_gc_before_mixed()) { |
2958 | return "Pause Young (Prepare Mixed)" ; |
2959 | } else { |
2960 | return "Pause Young (Normal)" ; |
2961 | } |
2962 | } else { |
2963 | return "Pause Young (Mixed)" ; |
2964 | } |
2965 | } |
2966 | |
2967 | bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { |
2968 | assert_at_safepoint_on_vm_thread(); |
2969 | guarantee(!is_gc_active(), "collection is not reentrant" ); |
2970 | |
2971 | if (GCLocker::check_active_before_gc()) { |
2972 | return false; |
2973 | } |
2974 | |
2975 | GCIdMark gc_id_mark; |
2976 | |
2977 | SvcGCMarker sgcm(SvcGCMarker::MINOR); |
2978 | ResourceMark rm; |
2979 | |
2980 | policy()->note_gc_start(); |
2981 | |
2982 | _gc_timer_stw->register_gc_start(); |
2983 | _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start()); |
2984 | |
2985 | wait_for_root_region_scanning(); |
2986 | |
2987 | print_heap_before_gc(); |
2988 | print_heap_regions(); |
2989 | trace_heap_before_gc(_gc_tracer_stw); |
2990 | |
2991 | _verifier->verify_region_sets_optional(); |
2992 | _verifier->verify_dirty_young_regions(); |
2993 | |
2994 | // We should not be doing initial mark unless the conc mark thread is running |
2995 | if (!_cm_thread->should_terminate()) { |
2996 | // This call will decide whether this pause is an initial-mark |
2997 | // pause. If it is, in_initial_mark_gc() will return true |
2998 | // for the duration of this pause. |
2999 | policy()->decide_on_conc_mark_initiation(); |
3000 | } |
3001 | |
3002 | // We do not allow initial-mark to be piggy-backed on a mixed GC. |
3003 | assert(!collector_state()->in_initial_mark_gc() || |
3004 | collector_state()->in_young_only_phase(), "sanity" ); |
3005 | // We also do not allow mixed GCs during marking. |
3006 | assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity" ); |
3007 | |
3008 | // Record whether this pause is an initial mark. When the current |
3009 | // thread has completed its logging output and it's safe to signal |
3010 | // the CM thread, the flag's value in the policy has been reset. |
3011 | bool should_start_conc_mark = collector_state()->in_initial_mark_gc(); |
3012 | if (should_start_conc_mark) { |
3013 | _cm->gc_tracer_cm()->set_gc_cause(gc_cause()); |
3014 | } |
3015 | |
3016 | // Inner scope for scope based logging, timers, and stats collection |
3017 | { |
3018 | G1EvacuationInfo evacuation_info; |
3019 | |
3020 | _gc_tracer_stw->report_yc_type(collector_state()->yc_type()); |
3021 | |
3022 | GCTraceCPUTime tcpu; |
3023 | |
3024 | GCTraceTime(Info, gc) tm(young_gc_name(), NULL, gc_cause(), true); |
3025 | |
3026 | uint active_workers = WorkerPolicy::calc_active_workers(workers()->total_workers(), |
3027 | workers()->active_workers(), |
3028 | Threads::number_of_non_daemon_threads()); |
3029 | active_workers = workers()->update_active_workers(active_workers); |
3030 | log_info(gc,task)("Using %u workers of %u for evacuation" , active_workers, workers()->total_workers()); |
3031 | |
3032 | G1MonitoringScope ms(g1mm(), |
3033 | false /* full_gc */, |
3034 | collector_state()->yc_type() == Mixed /* all_memory_pools_affected */); |
3035 | |
3036 | G1HeapTransition heap_transition(this); |
3037 | size_t heap_used_bytes_before_gc = used(); |
3038 | |
3039 | { |
3040 | IsGCActiveMark x; |
3041 | |
3042 | gc_prologue(false); |
3043 | |
3044 | G1HeapVerifier::G1VerifyType verify_type = young_collection_verify_type(); |
3045 | verify_before_young_collection(verify_type); |
3046 | |
3047 | { |
3048 | // The elapsed time induced by the start time below deliberately elides |
3049 | // the possible verification above. |
3050 | double sample_start_time_sec = os::elapsedTime(); |
3051 | |
3052 | // Please see comment in g1CollectedHeap.hpp and |
3053 | // G1CollectedHeap::ref_processing_init() to see how |
3054 | // reference processing currently works in G1. |
3055 | _ref_processor_stw->enable_discovery(); |
3056 | |
3057 | // We want to temporarily turn off discovery by the |
3058 | // CM ref processor, if necessary, and turn it back on |
3059 | // on again later if we do. Using a scoped |
3060 | // NoRefDiscovery object will do this. |
3061 | NoRefDiscovery no_cm_discovery(_ref_processor_cm); |
3062 | |
3063 | policy()->record_collection_pause_start(sample_start_time_sec); |
3064 | |
3065 | // Forget the current allocation region (we might even choose it to be part |
3066 | // of the collection set!). |
3067 | _allocator->release_mutator_alloc_region(); |
3068 | |
3069 | calculate_collection_set(evacuation_info, target_pause_time_ms); |
3070 | |
3071 | G1ParScanThreadStateSet per_thread_states(this, |
3072 | workers()->active_workers(), |
3073 | collection_set()->young_region_length(), |
3074 | collection_set()->optional_region_length()); |
3075 | pre_evacuate_collection_set(evacuation_info); |
3076 | |
3077 | // Actually do the work... |
3078 | evacuate_initial_collection_set(&per_thread_states); |
3079 | |
3080 | if (_collection_set.optional_region_length() != 0) { |
3081 | evacuate_optional_collection_set(&per_thread_states); |
3082 | } |
3083 | post_evacuate_collection_set(evacuation_info, &per_thread_states); |
3084 | |
3085 | start_new_collection_set(); |
3086 | |
3087 | _survivor_evac_stats.adjust_desired_plab_sz(); |
3088 | _old_evac_stats.adjust_desired_plab_sz(); |
3089 | |
3090 | if (should_start_conc_mark) { |
3091 | // We have to do this before we notify the CM threads that |
3092 | // they can start working to make sure that all the |
3093 | // appropriate initialization is done on the CM object. |
3094 | concurrent_mark()->post_initial_mark(); |
3095 | // Note that we don't actually trigger the CM thread at |
3096 | // this point. We do that later when we're sure that |
3097 | // the current thread has completed its logging output. |
3098 | } |
3099 | |
3100 | allocate_dummy_regions(); |
3101 | |
3102 | _allocator->init_mutator_alloc_region(); |
3103 | |
3104 | expand_heap_after_young_collection(); |
3105 | |
3106 | double sample_end_time_sec = os::elapsedTime(); |
3107 | double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; |
3108 | size_t total_cards_scanned = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards) + |
3109 | phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanRS, G1GCPhaseTimes::ScanRSScannedCards); |
3110 | policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc); |
3111 | } |
3112 | |
3113 | verify_after_young_collection(verify_type); |
3114 | |
3115 | #ifdef TRACESPINNING |
3116 | ParallelTaskTerminator::print_termination_counts(); |
3117 | #endif |
3118 | |
3119 | gc_epilogue(false); |
3120 | } |
3121 | |
3122 | // Print the remainder of the GC log output. |
3123 | if (evacuation_failed()) { |
3124 | log_info(gc)("To-space exhausted" ); |
3125 | } |
3126 | |
3127 | policy()->print_phases(); |
3128 | heap_transition.print(); |
3129 | |
3130 | _hrm->verify_optional(); |
3131 | _verifier->verify_region_sets_optional(); |
3132 | |
3133 | TASKQUEUE_STATS_ONLY(print_taskqueue_stats()); |
3134 | TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); |
3135 | |
3136 | print_heap_after_gc(); |
3137 | print_heap_regions(); |
3138 | trace_heap_after_gc(_gc_tracer_stw); |
3139 | |
3140 | // We must call G1MonitoringSupport::update_sizes() in the same scoping level |
3141 | // as an active TraceMemoryManagerStats object (i.e. before the destructor for the |
3142 | // TraceMemoryManagerStats is called) so that the G1 memory pools are updated |
3143 | // before any GC notifications are raised. |
3144 | g1mm()->update_sizes(); |
3145 | |
3146 | _gc_tracer_stw->report_evacuation_info(&evacuation_info); |
3147 | _gc_tracer_stw->report_tenuring_threshold(_policy->tenuring_threshold()); |
3148 | _gc_timer_stw->register_gc_end(); |
3149 | _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions()); |
3150 | } |
3151 | // It should now be safe to tell the concurrent mark thread to start |
3152 | // without its logging output interfering with the logging output |
3153 | // that came from the pause. |
3154 | |
3155 | if (should_start_conc_mark) { |
3156 | // CAUTION: after the doConcurrentMark() call below, the concurrent marking |
3157 | // thread(s) could be running concurrently with us. Make sure that anything |
3158 | // after this point does not assume that we are the only GC thread running. |
3159 | // Note: of course, the actual marking work will not start until the safepoint |
3160 | // itself is released in SuspendibleThreadSet::desynchronize(). |
3161 | do_concurrent_mark(); |
3162 | } |
3163 | |
3164 | return true; |
3165 | } |
3166 | |
3167 | void G1CollectedHeap::remove_self_forwarding_pointers() { |
3168 | G1ParRemoveSelfForwardPtrsTask rsfp_task; |
3169 | workers()->run_task(&rsfp_task); |
3170 | } |
3171 | |
3172 | void G1CollectedHeap::restore_after_evac_failure() { |
3173 | double remove_self_forwards_start = os::elapsedTime(); |
3174 | |
3175 | remove_self_forwarding_pointers(); |
3176 | SharedRestorePreservedMarksTaskExecutor task_executor(workers()); |
3177 | _preserved_marks_set.restore(&task_executor); |
3178 | |
3179 | phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0); |
3180 | } |
3181 | |
3182 | void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) { |
3183 | if (!_evacuation_failed) { |
3184 | _evacuation_failed = true; |
3185 | } |
3186 | |
3187 | _evacuation_failed_info_array[worker_id].register_copy_failure(obj->size()); |
3188 | _preserved_marks_set.get(worker_id)->push_if_necessary(obj, m); |
3189 | } |
3190 | |
3191 | bool G1ParEvacuateFollowersClosure::offer_termination() { |
3192 | EventGCPhaseParallel event; |
3193 | G1ParScanThreadState* const pss = par_scan_state(); |
3194 | start_term_time(); |
3195 | const bool res = terminator()->offer_termination(); |
3196 | end_term_time(); |
3197 | event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::Termination)); |
3198 | return res; |
3199 | } |
3200 | |
3201 | void G1ParEvacuateFollowersClosure::do_void() { |
3202 | EventGCPhaseParallel event; |
3203 | G1ParScanThreadState* const pss = par_scan_state(); |
3204 | pss->trim_queue(); |
3205 | event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase)); |
3206 | do { |
3207 | EventGCPhaseParallel event; |
3208 | pss->steal_and_trim_queue(queues()); |
3209 | event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase)); |
3210 | } while (!offer_termination()); |
3211 | } |
3212 | |
3213 | void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive, |
3214 | bool class_unloading_occurred) { |
3215 | uint num_workers = workers()->active_workers(); |
3216 | ParallelCleaningTask unlink_task(is_alive, num_workers, class_unloading_occurred, false); |
3217 | workers()->run_task(&unlink_task); |
3218 | } |
3219 | |
3220 | // Clean string dedup data structures. |
3221 | // Ideally we would prefer to use a StringDedupCleaningTask here, but we want to |
3222 | // record the durations of the phases. Hence the almost-copy. |
3223 | class G1StringDedupCleaningTask : public AbstractGangTask { |
3224 | BoolObjectClosure* _is_alive; |
3225 | OopClosure* _keep_alive; |
3226 | G1GCPhaseTimes* _phase_times; |
3227 | |
3228 | public: |
3229 | G1StringDedupCleaningTask(BoolObjectClosure* is_alive, |
3230 | OopClosure* keep_alive, |
3231 | G1GCPhaseTimes* phase_times) : |
3232 | AbstractGangTask("Partial Cleaning Task" ), |
3233 | _is_alive(is_alive), |
3234 | _keep_alive(keep_alive), |
3235 | _phase_times(phase_times) |
3236 | { |
3237 | assert(G1StringDedup::is_enabled(), "String deduplication disabled." ); |
3238 | StringDedup::gc_prologue(true); |
3239 | } |
3240 | |
3241 | ~G1StringDedupCleaningTask() { |
3242 | StringDedup::gc_epilogue(); |
3243 | } |
3244 | |
3245 | void work(uint worker_id) { |
3246 | StringDedupUnlinkOrOopsDoClosure cl(_is_alive, _keep_alive); |
3247 | { |
3248 | G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupQueueFixup, worker_id); |
3249 | StringDedupQueue::unlink_or_oops_do(&cl); |
3250 | } |
3251 | { |
3252 | G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupTableFixup, worker_id); |
3253 | StringDedupTable::unlink_or_oops_do(&cl, worker_id); |
3254 | } |
3255 | } |
3256 | }; |
3257 | |
3258 | void G1CollectedHeap::string_dedup_cleaning(BoolObjectClosure* is_alive, |
3259 | OopClosure* keep_alive, |
3260 | G1GCPhaseTimes* phase_times) { |
3261 | G1StringDedupCleaningTask cl(is_alive, keep_alive, phase_times); |
3262 | workers()->run_task(&cl); |
3263 | } |
3264 | |
3265 | class G1RedirtyLoggedCardsTask : public AbstractGangTask { |
3266 | private: |
3267 | G1DirtyCardQueueSet* _queue; |
3268 | G1CollectedHeap* _g1h; |
3269 | public: |
3270 | G1RedirtyLoggedCardsTask(G1DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards" ), |
3271 | _queue(queue), _g1h(g1h) { } |
3272 | |
3273 | virtual void work(uint worker_id) { |
3274 | G1GCPhaseTimes* p = _g1h->phase_times(); |
3275 | G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::RedirtyCards, worker_id); |
3276 | |
3277 | RedirtyLoggedCardTableEntryClosure cl(_g1h); |
3278 | _queue->par_apply_closure_to_all_completed_buffers(&cl); |
3279 | |
3280 | p->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied()); |
3281 | } |
3282 | }; |
3283 | |
3284 | void G1CollectedHeap::redirty_logged_cards() { |
3285 | double redirty_logged_cards_start = os::elapsedTime(); |
3286 | |
3287 | G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this); |
3288 | dirty_card_queue_set().reset_for_par_iteration(); |
3289 | workers()->run_task(&redirty_task); |
3290 | |
3291 | G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set(); |
3292 | dcq.merge_bufferlists(&dirty_card_queue_set()); |
3293 | assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed" ); |
3294 | |
3295 | phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0); |
3296 | } |
3297 | |
3298 | // Weak Reference Processing support |
3299 | |
3300 | bool G1STWIsAliveClosure::do_object_b(oop p) { |
3301 | // An object is reachable if it is outside the collection set, |
3302 | // or is inside and copied. |
3303 | return !_g1h->is_in_cset(p) || p->is_forwarded(); |
3304 | } |
3305 | |
3306 | bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) { |
3307 | assert(obj != NULL, "must not be NULL" ); |
3308 | assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap" , p2i(obj)); |
3309 | // The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below |
3310 | // may falsely indicate that this is not the case here: however the collection set only |
3311 | // contains old regions when concurrent mark is not running. |
3312 | return _g1h->is_in_cset(obj) || _g1h->heap_region_containing(obj)->is_survivor(); |
3313 | } |
3314 | |
3315 | // Non Copying Keep Alive closure |
3316 | class G1KeepAliveClosure: public OopClosure { |
3317 | G1CollectedHeap*_g1h; |
3318 | public: |
3319 | G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {} |
3320 | void do_oop(narrowOop* p) { guarantee(false, "Not needed" ); } |
3321 | void do_oop(oop* p) { |
3322 | oop obj = *p; |
3323 | assert(obj != NULL, "the caller should have filtered out NULL values" ); |
3324 | |
3325 | const G1HeapRegionAttr region_attr =_g1h->region_attr(obj); |
3326 | if (!region_attr.is_in_cset_or_humongous()) { |
3327 | return; |
3328 | } |
3329 | if (region_attr.is_in_cset()) { |
3330 | assert( obj->is_forwarded(), "invariant" ); |
3331 | *p = obj->forwardee(); |
3332 | } else { |
3333 | assert(!obj->is_forwarded(), "invariant" ); |
3334 | assert(region_attr.is_humongous(), |
3335 | "Only allowed G1HeapRegionAttr state is IsHumongous, but is %d" , region_attr.type()); |
3336 | _g1h->set_humongous_is_live(obj); |
3337 | } |
3338 | } |
3339 | }; |
3340 | |
3341 | // Copying Keep Alive closure - can be called from both |
3342 | // serial and parallel code as long as different worker |
3343 | // threads utilize different G1ParScanThreadState instances |
3344 | // and different queues. |
3345 | |
3346 | class G1CopyingKeepAliveClosure: public OopClosure { |
3347 | G1CollectedHeap* _g1h; |
3348 | G1ParScanThreadState* _par_scan_state; |
3349 | |
3350 | public: |
3351 | G1CopyingKeepAliveClosure(G1CollectedHeap* g1h, |
3352 | G1ParScanThreadState* pss): |
3353 | _g1h(g1h), |
3354 | _par_scan_state(pss) |
3355 | {} |
3356 | |
3357 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
3358 | virtual void do_oop( oop* p) { do_oop_work(p); } |
3359 | |
3360 | template <class T> void do_oop_work(T* p) { |
3361 | oop obj = RawAccess<>::oop_load(p); |
3362 | |
3363 | if (_g1h->is_in_cset_or_humongous(obj)) { |
3364 | // If the referent object has been forwarded (either copied |
3365 | // to a new location or to itself in the event of an |
3366 | // evacuation failure) then we need to update the reference |
3367 | // field and, if both reference and referent are in the G1 |
3368 | // heap, update the RSet for the referent. |
3369 | // |
3370 | // If the referent has not been forwarded then we have to keep |
3371 | // it alive by policy. Therefore we have copy the referent. |
3372 | // |
3373 | // When the queue is drained (after each phase of reference processing) |
3374 | // the object and it's followers will be copied, the reference field set |
3375 | // to point to the new location, and the RSet updated. |
3376 | _par_scan_state->push_on_queue(p); |
3377 | } |
3378 | } |
3379 | }; |
3380 | |
3381 | // Serial drain queue closure. Called as the 'complete_gc' |
3382 | // closure for each discovered list in some of the |
3383 | // reference processing phases. |
3384 | |
3385 | class G1STWDrainQueueClosure: public VoidClosure { |
3386 | protected: |
3387 | G1CollectedHeap* _g1h; |
3388 | G1ParScanThreadState* _par_scan_state; |
3389 | |
3390 | G1ParScanThreadState* par_scan_state() { return _par_scan_state; } |
3391 | |
3392 | public: |
3393 | G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) : |
3394 | _g1h(g1h), |
3395 | _par_scan_state(pss) |
3396 | { } |
3397 | |
3398 | void do_void() { |
3399 | G1ParScanThreadState* const pss = par_scan_state(); |
3400 | pss->trim_queue(); |
3401 | } |
3402 | }; |
3403 | |
3404 | // Parallel Reference Processing closures |
3405 | |
3406 | // Implementation of AbstractRefProcTaskExecutor for parallel reference |
3407 | // processing during G1 evacuation pauses. |
3408 | |
3409 | class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor { |
3410 | private: |
3411 | G1CollectedHeap* _g1h; |
3412 | G1ParScanThreadStateSet* _pss; |
3413 | RefToScanQueueSet* _queues; |
3414 | WorkGang* _workers; |
3415 | |
3416 | public: |
3417 | G1STWRefProcTaskExecutor(G1CollectedHeap* g1h, |
3418 | G1ParScanThreadStateSet* per_thread_states, |
3419 | WorkGang* workers, |
3420 | RefToScanQueueSet *task_queues) : |
3421 | _g1h(g1h), |
3422 | _pss(per_thread_states), |
3423 | _queues(task_queues), |
3424 | _workers(workers) |
3425 | { |
3426 | g1h->ref_processor_stw()->set_active_mt_degree(workers->active_workers()); |
3427 | } |
3428 | |
3429 | // Executes the given task using concurrent marking worker threads. |
3430 | virtual void execute(ProcessTask& task, uint ergo_workers); |
3431 | }; |
3432 | |
3433 | // Gang task for possibly parallel reference processing |
3434 | |
3435 | class G1STWRefProcTaskProxy: public AbstractGangTask { |
3436 | typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; |
3437 | ProcessTask& _proc_task; |
3438 | G1CollectedHeap* _g1h; |
3439 | G1ParScanThreadStateSet* _pss; |
3440 | RefToScanQueueSet* _task_queues; |
3441 | ParallelTaskTerminator* _terminator; |
3442 | |
3443 | public: |
3444 | G1STWRefProcTaskProxy(ProcessTask& proc_task, |
3445 | G1CollectedHeap* g1h, |
3446 | G1ParScanThreadStateSet* per_thread_states, |
3447 | RefToScanQueueSet *task_queues, |
3448 | ParallelTaskTerminator* terminator) : |
3449 | AbstractGangTask("Process reference objects in parallel" ), |
3450 | _proc_task(proc_task), |
3451 | _g1h(g1h), |
3452 | _pss(per_thread_states), |
3453 | _task_queues(task_queues), |
3454 | _terminator(terminator) |
3455 | {} |
3456 | |
3457 | virtual void work(uint worker_id) { |
3458 | // The reference processing task executed by a single worker. |
3459 | ResourceMark rm; |
3460 | HandleMark hm; |
3461 | |
3462 | G1STWIsAliveClosure is_alive(_g1h); |
3463 | |
3464 | G1ParScanThreadState* pss = _pss->state_for_worker(worker_id); |
3465 | pss->set_ref_discoverer(NULL); |
3466 | |
3467 | // Keep alive closure. |
3468 | G1CopyingKeepAliveClosure keep_alive(_g1h, pss); |
3469 | |
3470 | // Complete GC closure |
3471 | G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator, G1GCPhaseTimes::ObjCopy); |
3472 | |
3473 | // Call the reference processing task's work routine. |
3474 | _proc_task.work(worker_id, is_alive, keep_alive, drain_queue); |
3475 | |
3476 | // Note we cannot assert that the refs array is empty here as not all |
3477 | // of the processing tasks (specifically phase2 - pp2_work) execute |
3478 | // the complete_gc closure (which ordinarily would drain the queue) so |
3479 | // the queue may not be empty. |
3480 | } |
3481 | }; |
3482 | |
3483 | // Driver routine for parallel reference processing. |
3484 | // Creates an instance of the ref processing gang |
3485 | // task and has the worker threads execute it. |
3486 | void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) { |
3487 | assert(_workers != NULL, "Need parallel worker threads." ); |
3488 | |
3489 | assert(_workers->active_workers() >= ergo_workers, |
3490 | "Ergonomically chosen workers (%u) should be less than or equal to active workers (%u)" , |
3491 | ergo_workers, _workers->active_workers()); |
3492 | TaskTerminator terminator(ergo_workers, _queues); |
3493 | G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, terminator.terminator()); |
3494 | |
3495 | _workers->run_task(&proc_task_proxy, ergo_workers); |
3496 | } |
3497 | |
3498 | // End of weak reference support closures |
3499 | |
3500 | void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) { |
3501 | double ref_proc_start = os::elapsedTime(); |
3502 | |
3503 | ReferenceProcessor* rp = _ref_processor_stw; |
3504 | assert(rp->discovery_enabled(), "should have been enabled" ); |
3505 | |
3506 | // Closure to test whether a referent is alive. |
3507 | G1STWIsAliveClosure is_alive(this); |
3508 | |
3509 | // Even when parallel reference processing is enabled, the processing |
3510 | // of JNI refs is serial and performed serially by the current thread |
3511 | // rather than by a worker. The following PSS will be used for processing |
3512 | // JNI refs. |
3513 | |
3514 | // Use only a single queue for this PSS. |
3515 | G1ParScanThreadState* pss = per_thread_states->state_for_worker(0); |
3516 | pss->set_ref_discoverer(NULL); |
3517 | assert(pss->queue_is_empty(), "pre-condition" ); |
3518 | |
3519 | // Keep alive closure. |
3520 | G1CopyingKeepAliveClosure keep_alive(this, pss); |
3521 | |
3522 | // Serial Complete GC closure |
3523 | G1STWDrainQueueClosure drain_queue(this, pss); |
3524 | |
3525 | // Setup the soft refs policy... |
3526 | rp->setup_policy(false); |
3527 | |
3528 | ReferenceProcessorPhaseTimes* pt = phase_times()->ref_phase_times(); |
3529 | |
3530 | ReferenceProcessorStats stats; |
3531 | if (!rp->processing_is_mt()) { |
3532 | // Serial reference processing... |
3533 | stats = rp->process_discovered_references(&is_alive, |
3534 | &keep_alive, |
3535 | &drain_queue, |
3536 | NULL, |
3537 | pt); |
3538 | } else { |
3539 | uint no_of_gc_workers = workers()->active_workers(); |
3540 | |
3541 | // Parallel reference processing |
3542 | assert(no_of_gc_workers <= rp->max_num_queues(), |
3543 | "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u" , |
3544 | no_of_gc_workers, rp->max_num_queues()); |
3545 | |
3546 | G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues); |
3547 | stats = rp->process_discovered_references(&is_alive, |
3548 | &keep_alive, |
3549 | &drain_queue, |
3550 | &par_task_executor, |
3551 | pt); |
3552 | } |
3553 | |
3554 | _gc_tracer_stw->report_gc_reference_stats(stats); |
3555 | |
3556 | // We have completed copying any necessary live referent objects. |
3557 | assert(pss->queue_is_empty(), "both queue and overflow should be empty" ); |
3558 | |
3559 | make_pending_list_reachable(); |
3560 | |
3561 | assert(!rp->discovery_enabled(), "Postcondition" ); |
3562 | rp->verify_no_references_recorded(); |
3563 | |
3564 | double ref_proc_time = os::elapsedTime() - ref_proc_start; |
3565 | phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); |
3566 | } |
3567 | |
3568 | void G1CollectedHeap::make_pending_list_reachable() { |
3569 | if (collector_state()->in_initial_mark_gc()) { |
3570 | oop pll_head = Universe::reference_pending_list(); |
3571 | if (pll_head != NULL) { |
3572 | // Any valid worker id is fine here as we are in the VM thread and single-threaded. |
3573 | _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head); |
3574 | } |
3575 | } |
3576 | } |
3577 | |
3578 | void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) { |
3579 | double merge_pss_time_start = os::elapsedTime(); |
3580 | per_thread_states->flush(); |
3581 | phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0); |
3582 | } |
3583 | |
3584 | void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info) { |
3585 | _expand_heap_after_alloc_failure = true; |
3586 | _evacuation_failed = false; |
3587 | |
3588 | // Disable the hot card cache. |
3589 | _hot_card_cache->reset_hot_cache_claimed_index(); |
3590 | _hot_card_cache->set_use_cache(false); |
3591 | |
3592 | // Initialize the GC alloc regions. |
3593 | _allocator->init_gc_alloc_regions(evacuation_info); |
3594 | |
3595 | register_regions_with_region_attr(); |
3596 | assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table." ); |
3597 | |
3598 | rem_set()->prepare_for_scan_rem_set(); |
3599 | _preserved_marks_set.assert_empty(); |
3600 | |
3601 | #if COMPILER2_OR_JVMCI |
3602 | DerivedPointerTable::clear(); |
3603 | #endif |
3604 | |
3605 | // InitialMark needs claim bits to keep track of the marked-through CLDs. |
3606 | if (collector_state()->in_initial_mark_gc()) { |
3607 | concurrent_mark()->pre_initial_mark(); |
3608 | |
3609 | double start_clear_claimed_marks = os::elapsedTime(); |
3610 | |
3611 | ClassLoaderDataGraph::clear_claimed_marks(); |
3612 | |
3613 | double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0; |
3614 | phase_times()->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms); |
3615 | } |
3616 | |
3617 | // Should G1EvacuationFailureALot be in effect for this GC? |
3618 | NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();) |
3619 | |
3620 | assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty" ); |
3621 | } |
3622 | |
3623 | class G1EvacuateRegionsBaseTask : public AbstractGangTask { |
3624 | protected: |
3625 | G1CollectedHeap* _g1h; |
3626 | G1ParScanThreadStateSet* _per_thread_states; |
3627 | RefToScanQueueSet* _task_queues; |
3628 | TaskTerminator _terminator; |
3629 | uint _num_workers; |
3630 | |
3631 | void evacuate_live_objects(G1ParScanThreadState* pss, |
3632 | uint worker_id, |
3633 | G1GCPhaseTimes::GCParPhases objcopy_phase, |
3634 | G1GCPhaseTimes::GCParPhases termination_phase) { |
3635 | G1GCPhaseTimes* p = _g1h->phase_times(); |
3636 | |
3637 | Ticks start = Ticks::now(); |
3638 | G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, _terminator.terminator(), objcopy_phase); |
3639 | cl.do_void(); |
3640 | |
3641 | assert(pss->queue_is_empty(), "should be empty" ); |
3642 | |
3643 | Tickspan evac_time = (Ticks::now() - start); |
3644 | p->record_or_add_time_secs(objcopy_phase, worker_id, evac_time.seconds() - cl.term_time()); |
3645 | |
3646 | p->record_or_add_thread_work_item(objcopy_phase, worker_id, pss->lab_waste_words() * HeapWordSize, G1GCPhaseTimes::ObjCopyLABWaste); |
3647 | p->record_or_add_thread_work_item(objcopy_phase, worker_id, pss->lab_undo_waste_words() * HeapWordSize, G1GCPhaseTimes::ObjCopyLABUndoWaste); |
3648 | |
3649 | if (termination_phase == G1GCPhaseTimes::Termination) { |
3650 | p->record_time_secs(termination_phase, worker_id, cl.term_time()); |
3651 | p->record_thread_work_item(termination_phase, worker_id, cl.term_attempts()); |
3652 | } else { |
3653 | p->record_or_add_time_secs(termination_phase, worker_id, cl.term_time()); |
3654 | p->record_or_add_thread_work_item(termination_phase, worker_id, cl.term_attempts()); |
3655 | } |
3656 | assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming during evacuation" ); |
3657 | } |
3658 | |
3659 | virtual void start_work(uint worker_id) { } |
3660 | |
3661 | virtual void end_work(uint worker_id) { } |
3662 | |
3663 | virtual void scan_roots(G1ParScanThreadState* pss, uint worker_id) = 0; |
3664 | |
3665 | virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0; |
3666 | |
3667 | public: |
3668 | G1EvacuateRegionsBaseTask(const char* name, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet* task_queues, uint num_workers) : |
3669 | AbstractGangTask(name), |
3670 | _g1h(G1CollectedHeap::heap()), |
3671 | _per_thread_states(per_thread_states), |
3672 | _task_queues(task_queues), |
3673 | _terminator(num_workers, _task_queues), |
3674 | _num_workers(num_workers) |
3675 | { } |
3676 | |
3677 | void work(uint worker_id) { |
3678 | start_work(worker_id); |
3679 | |
3680 | { |
3681 | ResourceMark rm; |
3682 | HandleMark hm; |
3683 | |
3684 | G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id); |
3685 | pss->set_ref_discoverer(_g1h->ref_processor_stw()); |
3686 | |
3687 | scan_roots(pss, worker_id); |
3688 | evacuate_live_objects(pss, worker_id); |
3689 | } |
3690 | |
3691 | end_work(worker_id); |
3692 | } |
3693 | }; |
3694 | |
3695 | class G1EvacuateRegionsTask : public G1EvacuateRegionsBaseTask { |
3696 | G1RootProcessor* _root_processor; |
3697 | |
3698 | void scan_roots(G1ParScanThreadState* pss, uint worker_id) { |
3699 | _root_processor->evacuate_roots(pss, worker_id); |
3700 | _g1h->rem_set()->update_rem_set(pss, worker_id); |
3701 | _g1h->rem_set()->scan_rem_set(pss, worker_id, G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::CodeRoots); |
3702 | } |
3703 | |
3704 | void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) { |
3705 | G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::Termination); |
3706 | } |
3707 | |
3708 | void start_work(uint worker_id) { |
3709 | _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, Ticks::now().seconds()); |
3710 | } |
3711 | |
3712 | void end_work(uint worker_id) { |
3713 | _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, Ticks::now().seconds()); |
3714 | } |
3715 | |
3716 | public: |
3717 | G1EvacuateRegionsTask(G1CollectedHeap* g1h, |
3718 | G1ParScanThreadStateSet* per_thread_states, |
3719 | RefToScanQueueSet* task_queues, |
3720 | G1RootProcessor* root_processor, |
3721 | uint num_workers) : |
3722 | G1EvacuateRegionsBaseTask("G1 Evacuate Regions" , per_thread_states, task_queues, num_workers), |
3723 | _root_processor(root_processor) |
3724 | { } |
3725 | }; |
3726 | |
3727 | void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) { |
3728 | Tickspan task_time; |
3729 | const uint num_workers = workers()->active_workers(); |
3730 | |
3731 | Ticks start_processing = Ticks::now(); |
3732 | { |
3733 | G1RootProcessor root_processor(this, num_workers); |
3734 | G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers); |
3735 | task_time = run_task(&g1_par_task); |
3736 | // Closing the inner scope will execute the destructor for the G1RootProcessor object. |
3737 | // To extract its code root fixup time we measure total time of this scope and |
3738 | // subtract from the time the WorkGang task took. |
3739 | } |
3740 | Tickspan total_processing = Ticks::now() - start_processing; |
3741 | |
3742 | G1GCPhaseTimes* p = phase_times(); |
3743 | p->record_initial_evac_time(task_time.seconds() * 1000.0); |
3744 | p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0); |
3745 | } |
3746 | |
3747 | class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask { |
3748 | |
3749 | void scan_roots(G1ParScanThreadState* pss, uint worker_id) { |
3750 | _g1h->rem_set()->scan_rem_set(pss, worker_id, G1GCPhaseTimes::OptScanRS, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptCodeRoots); |
3751 | } |
3752 | |
3753 | void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) { |
3754 | G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination); |
3755 | } |
3756 | |
3757 | public: |
3758 | G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states, |
3759 | RefToScanQueueSet* queues, |
3760 | uint num_workers) : |
3761 | G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions" , per_thread_states, queues, num_workers) { |
3762 | } |
3763 | }; |
3764 | |
3765 | void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) { |
3766 | class G1MarkScope : public MarkScope { }; |
3767 | |
3768 | Tickspan task_time; |
3769 | |
3770 | Ticks start_processing = Ticks::now(); |
3771 | { |
3772 | G1MarkScope code_mark_scope; |
3773 | G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers()); |
3774 | task_time = run_task(&task); |
3775 | // See comment in evacuate_collection_set() for the reason of the scope. |
3776 | } |
3777 | Tickspan total_processing = Ticks::now() - start_processing; |
3778 | |
3779 | G1GCPhaseTimes* p = phase_times(); |
3780 | p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0); |
3781 | } |
3782 | |
3783 | void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) { |
3784 | const double gc_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0; |
3785 | |
3786 | Ticks start = Ticks::now(); |
3787 | |
3788 | while (!evacuation_failed() && _collection_set.optional_region_length() > 0) { |
3789 | |
3790 | double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms; |
3791 | double time_left_ms = MaxGCPauseMillis - time_used_ms; |
3792 | |
3793 | if (time_left_ms < 0 || |
3794 | !_collection_set.finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) { |
3795 | log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms" , |
3796 | _collection_set.optional_region_length(), time_left_ms); |
3797 | break; |
3798 | } |
3799 | |
3800 | evacuate_next_optional_regions(per_thread_states); |
3801 | } |
3802 | |
3803 | _collection_set.abandon_optional_collection_set(per_thread_states); |
3804 | |
3805 | phase_times()->record_or_add_optional_evac_time((Ticks::now() - start).seconds() * 1000.0); |
3806 | } |
3807 | |
3808 | void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) { |
3809 | // Also cleans the card table from temporary duplicate detection information used |
3810 | // during UpdateRS/ScanRS. |
3811 | rem_set()->cleanup_after_scan_rem_set(); |
3812 | |
3813 | // Process any discovered reference objects - we have |
3814 | // to do this _before_ we retire the GC alloc regions |
3815 | // as we may have to copy some 'reachable' referent |
3816 | // objects (and their reachable sub-graphs) that were |
3817 | // not copied during the pause. |
3818 | process_discovered_references(per_thread_states); |
3819 | |
3820 | G1STWIsAliveClosure is_alive(this); |
3821 | G1KeepAliveClosure keep_alive(this); |
3822 | |
3823 | WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive, |
3824 | phase_times()->weak_phase_times()); |
3825 | |
3826 | if (G1StringDedup::is_enabled()) { |
3827 | double string_dedup_time_ms = os::elapsedTime(); |
3828 | |
3829 | string_dedup_cleaning(&is_alive, &keep_alive, phase_times()); |
3830 | |
3831 | double string_cleanup_time_ms = (os::elapsedTime() - string_dedup_time_ms) * 1000.0; |
3832 | phase_times()->record_string_deduplication_time(string_cleanup_time_ms); |
3833 | } |
3834 | |
3835 | _allocator->release_gc_alloc_regions(evacuation_info); |
3836 | |
3837 | if (evacuation_failed()) { |
3838 | restore_after_evac_failure(); |
3839 | |
3840 | // Reset the G1EvacuationFailureALot counters and flags |
3841 | NOT_PRODUCT(reset_evacuation_should_fail();) |
3842 | |
3843 | double recalculate_used_start = os::elapsedTime(); |
3844 | set_used(recalculate_used()); |
3845 | phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0); |
3846 | |
3847 | if (_archive_allocator != NULL) { |
3848 | _archive_allocator->clear_used(); |
3849 | } |
3850 | for (uint i = 0; i < ParallelGCThreads; i++) { |
3851 | if (_evacuation_failed_info_array[i].has_failed()) { |
3852 | _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); |
3853 | } |
3854 | } |
3855 | } else { |
3856 | // The "used" of the the collection set have already been subtracted |
3857 | // when they were freed. Add in the bytes evacuated. |
3858 | increase_used(policy()->bytes_copied_during_gc()); |
3859 | } |
3860 | |
3861 | _preserved_marks_set.assert_empty(); |
3862 | |
3863 | merge_per_thread_state_info(per_thread_states); |
3864 | |
3865 | // Reset and re-enable the hot card cache. |
3866 | // Note the counts for the cards in the regions in the |
3867 | // collection set are reset when the collection set is freed. |
3868 | _hot_card_cache->reset_hot_cache(); |
3869 | _hot_card_cache->set_use_cache(true); |
3870 | |
3871 | purge_code_root_memory(); |
3872 | |
3873 | redirty_logged_cards(); |
3874 | |
3875 | free_collection_set(&_collection_set, evacuation_info, per_thread_states->surviving_young_words()); |
3876 | |
3877 | eagerly_reclaim_humongous_regions(); |
3878 | |
3879 | record_obj_copy_mem_stats(); |
3880 | |
3881 | evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before()); |
3882 | evacuation_info.set_bytes_copied(policy()->bytes_copied_during_gc()); |
3883 | |
3884 | #if COMPILER2_OR_JVMCI |
3885 | double start = os::elapsedTime(); |
3886 | DerivedPointerTable::update_pointers(); |
3887 | phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0); |
3888 | #endif |
3889 | policy()->print_age_table(); |
3890 | } |
3891 | |
3892 | void G1CollectedHeap::record_obj_copy_mem_stats() { |
3893 | policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize); |
3894 | |
3895 | _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats), |
3896 | create_g1_evac_summary(&_old_evac_stats)); |
3897 | } |
3898 | |
3899 | void G1CollectedHeap::free_region(HeapRegion* hr, |
3900 | FreeRegionList* free_list, |
3901 | bool skip_remset, |
3902 | bool skip_hot_card_cache, |
3903 | bool locked) { |
3904 | assert(!hr->is_free(), "the region should not be free" ); |
3905 | assert(!hr->is_empty(), "the region should not be empty" ); |
3906 | assert(_hrm->is_available(hr->hrm_index()), "region should be committed" ); |
3907 | assert(free_list != NULL, "pre-condition" ); |
3908 | |
3909 | if (G1VerifyBitmaps) { |
3910 | MemRegion mr(hr->bottom(), hr->end()); |
3911 | concurrent_mark()->clear_range_in_prev_bitmap(mr); |
3912 | } |
3913 | |
3914 | // Clear the card counts for this region. |
3915 | // Note: we only need to do this if the region is not young |
3916 | // (since we don't refine cards in young regions). |
3917 | if (!skip_hot_card_cache && !hr->is_young()) { |
3918 | _hot_card_cache->reset_card_counts(hr); |
3919 | } |
3920 | hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */); |
3921 | _policy->remset_tracker()->update_at_free(hr); |
3922 | free_list->add_ordered(hr); |
3923 | } |
3924 | |
3925 | void G1CollectedHeap::free_humongous_region(HeapRegion* hr, |
3926 | FreeRegionList* free_list) { |
3927 | assert(hr->is_humongous(), "this is only for humongous regions" ); |
3928 | assert(free_list != NULL, "pre-condition" ); |
3929 | hr->clear_humongous(); |
3930 | free_region(hr, free_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */); |
3931 | } |
3932 | |
3933 | void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed, |
3934 | const uint humongous_regions_removed) { |
3935 | if (old_regions_removed > 0 || humongous_regions_removed > 0) { |
3936 | MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag); |
3937 | _old_set.bulk_remove(old_regions_removed); |
3938 | _humongous_set.bulk_remove(humongous_regions_removed); |
3939 | } |
3940 | |
3941 | } |
3942 | |
3943 | void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) { |
3944 | assert(list != NULL, "list can't be null" ); |
3945 | if (!list->is_empty()) { |
3946 | MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
3947 | _hrm->insert_list_into_free_list(list); |
3948 | } |
3949 | } |
3950 | |
3951 | void G1CollectedHeap::decrement_summary_bytes(size_t bytes) { |
3952 | decrease_used(bytes); |
3953 | } |
3954 | |
3955 | class G1FreeCollectionSetTask : public AbstractGangTask { |
3956 | private: |
3957 | |
3958 | // Closure applied to all regions in the collection set to do work that needs to |
3959 | // be done serially in a single thread. |
3960 | class G1SerialFreeCollectionSetClosure : public HeapRegionClosure { |
3961 | private: |
3962 | G1EvacuationInfo* _evacuation_info; |
3963 | const size_t* _surviving_young_words; |
3964 | |
3965 | // Bytes used in successfully evacuated regions before the evacuation. |
3966 | size_t _before_used_bytes; |
3967 | // Bytes used in unsucessfully evacuated regions before the evacuation |
3968 | size_t _after_used_bytes; |
3969 | |
3970 | size_t _bytes_allocated_in_old_since_last_gc; |
3971 | |
3972 | size_t _failure_used_words; |
3973 | size_t _failure_waste_words; |
3974 | |
3975 | FreeRegionList _local_free_list; |
3976 | public: |
3977 | G1SerialFreeCollectionSetClosure(G1EvacuationInfo* evacuation_info, const size_t* surviving_young_words) : |
3978 | HeapRegionClosure(), |
3979 | _evacuation_info(evacuation_info), |
3980 | _surviving_young_words(surviving_young_words), |
3981 | _before_used_bytes(0), |
3982 | _after_used_bytes(0), |
3983 | _bytes_allocated_in_old_since_last_gc(0), |
3984 | _failure_used_words(0), |
3985 | _failure_waste_words(0), |
3986 | _local_free_list("Local Region List for CSet Freeing" ) { |
3987 | } |
3988 | |
3989 | virtual bool do_heap_region(HeapRegion* r) { |
3990 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
3991 | |
3992 | assert(r->in_collection_set(), "Region %u should be in collection set." , r->hrm_index()); |
3993 | g1h->clear_region_attr(r); |
3994 | |
3995 | if (r->is_young()) { |
3996 | assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(), |
3997 | "Young index %d is wrong for region %u of type %s with %u young regions" , |
3998 | r->young_index_in_cset(), |
3999 | r->hrm_index(), |
4000 | r->get_type_str(), |
4001 | g1h->collection_set()->young_region_length()); |
4002 | size_t words_survived = _surviving_young_words[r->young_index_in_cset()]; |
4003 | r->record_surv_words_in_group(words_survived); |
4004 | } |
4005 | |
4006 | if (!r->evacuation_failed()) { |
4007 | assert(r->not_empty(), "Region %u is an empty region in the collection set." , r->hrm_index()); |
4008 | _before_used_bytes += r->used(); |
4009 | g1h->free_region(r, |
4010 | &_local_free_list, |
4011 | true, /* skip_remset */ |
4012 | true, /* skip_hot_card_cache */ |
4013 | true /* locked */); |
4014 | } else { |
4015 | r->uninstall_surv_rate_group(); |
4016 | r->set_young_index_in_cset(-1); |
4017 | r->set_evacuation_failed(false); |
4018 | // When moving a young gen region to old gen, we "allocate" that whole region |
4019 | // there. This is in addition to any already evacuated objects. Notify the |
4020 | // policy about that. |
4021 | // Old gen regions do not cause an additional allocation: both the objects |
4022 | // still in the region and the ones already moved are accounted for elsewhere. |
4023 | if (r->is_young()) { |
4024 | _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes; |
4025 | } |
4026 | // The region is now considered to be old. |
4027 | r->set_old(); |
4028 | // Do some allocation statistics accounting. Regions that failed evacuation |
4029 | // are always made old, so there is no need to update anything in the young |
4030 | // gen statistics, but we need to update old gen statistics. |
4031 | size_t used_words = r->marked_bytes() / HeapWordSize; |
4032 | |
4033 | _failure_used_words += used_words; |
4034 | _failure_waste_words += HeapRegion::GrainWords - used_words; |
4035 | |
4036 | g1h->old_set_add(r); |
4037 | _after_used_bytes += r->used(); |
4038 | } |
4039 | return false; |
4040 | } |
4041 | |
4042 | void complete_work() { |
4043 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
4044 | |
4045 | _evacuation_info->set_regions_freed(_local_free_list.length()); |
4046 | _evacuation_info->increment_collectionset_used_after(_after_used_bytes); |
4047 | |
4048 | g1h->prepend_to_freelist(&_local_free_list); |
4049 | g1h->decrement_summary_bytes(_before_used_bytes); |
4050 | |
4051 | G1Policy* policy = g1h->policy(); |
4052 | policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc); |
4053 | |
4054 | g1h->alloc_buffer_stats(G1HeapRegionAttr::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words); |
4055 | } |
4056 | }; |
4057 | |
4058 | G1CollectionSet* _collection_set; |
4059 | G1SerialFreeCollectionSetClosure _cl; |
4060 | const size_t* _surviving_young_words; |
4061 | |
4062 | size_t _rs_lengths; |
4063 | |
4064 | volatile jint _serial_work_claim; |
4065 | |
4066 | struct WorkItem { |
4067 | uint region_idx; |
4068 | bool is_young; |
4069 | bool evacuation_failed; |
4070 | |
4071 | WorkItem(HeapRegion* r) { |
4072 | region_idx = r->hrm_index(); |
4073 | is_young = r->is_young(); |
4074 | evacuation_failed = r->evacuation_failed(); |
4075 | } |
4076 | }; |
4077 | |
4078 | volatile size_t _parallel_work_claim; |
4079 | size_t _num_work_items; |
4080 | WorkItem* _work_items; |
4081 | |
4082 | void do_serial_work() { |
4083 | // Need to grab the lock to be allowed to modify the old region list. |
4084 | MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag); |
4085 | _collection_set->iterate(&_cl); |
4086 | } |
4087 | |
4088 | void do_parallel_work_for_region(uint region_idx, bool is_young, bool evacuation_failed) { |
4089 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
4090 | |
4091 | HeapRegion* r = g1h->region_at(region_idx); |
4092 | assert(!g1h->is_on_master_free_list(r), "sanity" ); |
4093 | |
4094 | Atomic::add(r->rem_set()->occupied_locked(), &_rs_lengths); |
4095 | |
4096 | if (!is_young) { |
4097 | g1h->_hot_card_cache->reset_card_counts(r); |
4098 | } |
4099 | |
4100 | if (!evacuation_failed) { |
4101 | r->rem_set()->clear_locked(); |
4102 | } |
4103 | } |
4104 | |
4105 | class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure { |
4106 | private: |
4107 | size_t _cur_idx; |
4108 | WorkItem* _work_items; |
4109 | public: |
4110 | G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { } |
4111 | |
4112 | virtual bool do_heap_region(HeapRegion* r) { |
4113 | _work_items[_cur_idx++] = WorkItem(r); |
4114 | return false; |
4115 | } |
4116 | }; |
4117 | |
4118 | void prepare_work() { |
4119 | G1PrepareFreeCollectionSetClosure cl(_work_items); |
4120 | _collection_set->iterate(&cl); |
4121 | } |
4122 | |
4123 | void complete_work() { |
4124 | _cl.complete_work(); |
4125 | |
4126 | G1Policy* policy = G1CollectedHeap::heap()->policy(); |
4127 | policy->record_max_rs_lengths(_rs_lengths); |
4128 | policy->cset_regions_freed(); |
4129 | } |
4130 | public: |
4131 | G1FreeCollectionSetTask(G1CollectionSet* collection_set, G1EvacuationInfo* evacuation_info, const size_t* surviving_young_words) : |
4132 | AbstractGangTask("G1 Free Collection Set" ), |
4133 | _collection_set(collection_set), |
4134 | _cl(evacuation_info, surviving_young_words), |
4135 | _surviving_young_words(surviving_young_words), |
4136 | _rs_lengths(0), |
4137 | _serial_work_claim(0), |
4138 | _parallel_work_claim(0), |
4139 | _num_work_items(collection_set->region_length()), |
4140 | _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) { |
4141 | prepare_work(); |
4142 | } |
4143 | |
4144 | ~G1FreeCollectionSetTask() { |
4145 | complete_work(); |
4146 | FREE_C_HEAP_ARRAY(WorkItem, _work_items); |
4147 | } |
4148 | |
4149 | // Chunk size for work distribution. The chosen value has been determined experimentally |
4150 | // to be a good tradeoff between overhead and achievable parallelism. |
4151 | static uint chunk_size() { return 32; } |
4152 | |
4153 | virtual void work(uint worker_id) { |
4154 | G1GCPhaseTimes* timer = G1CollectedHeap::heap()->phase_times(); |
4155 | |
4156 | // Claim serial work. |
4157 | if (_serial_work_claim == 0) { |
4158 | jint value = Atomic::add(1, &_serial_work_claim) - 1; |
4159 | if (value == 0) { |
4160 | double serial_time = os::elapsedTime(); |
4161 | do_serial_work(); |
4162 | timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0); |
4163 | } |
4164 | } |
4165 | |
4166 | // Start parallel work. |
4167 | double young_time = 0.0; |
4168 | bool has_young_time = false; |
4169 | double non_young_time = 0.0; |
4170 | bool has_non_young_time = false; |
4171 | |
4172 | while (true) { |
4173 | size_t end = Atomic::add(chunk_size(), &_parallel_work_claim); |
4174 | size_t cur = end - chunk_size(); |
4175 | |
4176 | if (cur >= _num_work_items) { |
4177 | break; |
4178 | } |
4179 | |
4180 | EventGCPhaseParallel event; |
4181 | double start_time = os::elapsedTime(); |
4182 | |
4183 | end = MIN2(end, _num_work_items); |
4184 | |
4185 | for (; cur < end; cur++) { |
4186 | bool is_young = _work_items[cur].is_young; |
4187 | |
4188 | do_parallel_work_for_region(_work_items[cur].region_idx, is_young, _work_items[cur].evacuation_failed); |
4189 | |
4190 | double end_time = os::elapsedTime(); |
4191 | double time_taken = end_time - start_time; |
4192 | if (is_young) { |
4193 | young_time += time_taken; |
4194 | has_young_time = true; |
4195 | event.commit(GCId::current(), worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::YoungFreeCSet)); |
4196 | } else { |
4197 | non_young_time += time_taken; |
4198 | has_non_young_time = true; |
4199 | event.commit(GCId::current(), worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::NonYoungFreeCSet)); |
4200 | } |
4201 | start_time = end_time; |
4202 | } |
4203 | } |
4204 | |
4205 | if (has_young_time) { |
4206 | timer->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, worker_id, young_time); |
4207 | } |
4208 | if (has_non_young_time) { |
4209 | timer->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, worker_id, non_young_time); |
4210 | } |
4211 | } |
4212 | }; |
4213 | |
4214 | void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words) { |
4215 | _eden.clear(); |
4216 | |
4217 | double free_cset_start_time = os::elapsedTime(); |
4218 | |
4219 | { |
4220 | uint const num_regions = _collection_set.region_length(); |
4221 | uint const num_chunks = MAX2(num_regions / G1FreeCollectionSetTask::chunk_size(), 1U); |
4222 | uint const num_workers = MIN2(workers()->active_workers(), num_chunks); |
4223 | |
4224 | G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words); |
4225 | |
4226 | log_debug(gc, ergo)("Running %s using %u workers for collection set length %u" , |
4227 | cl.name(), num_workers, num_regions); |
4228 | workers()->run_task(&cl, num_workers); |
4229 | } |
4230 | phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0); |
4231 | |
4232 | collection_set->clear(); |
4233 | } |
4234 | |
4235 | class G1FreeHumongousRegionClosure : public HeapRegionClosure { |
4236 | private: |
4237 | FreeRegionList* _free_region_list; |
4238 | HeapRegionSet* _proxy_set; |
4239 | uint _humongous_objects_reclaimed; |
4240 | uint _humongous_regions_reclaimed; |
4241 | size_t _freed_bytes; |
4242 | public: |
4243 | |
4244 | G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) : |
4245 | _free_region_list(free_region_list), _proxy_set(NULL), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) { |
4246 | } |
4247 | |
4248 | virtual bool do_heap_region(HeapRegion* r) { |
4249 | if (!r->is_starts_humongous()) { |
4250 | return false; |
4251 | } |
4252 | |
4253 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
4254 | |
4255 | oop obj = (oop)r->bottom(); |
4256 | G1CMBitMap* next_bitmap = g1h->concurrent_mark()->next_mark_bitmap(); |
4257 | |
4258 | // The following checks whether the humongous object is live are sufficient. |
4259 | // The main additional check (in addition to having a reference from the roots |
4260 | // or the young gen) is whether the humongous object has a remembered set entry. |
4261 | // |
4262 | // A humongous object cannot be live if there is no remembered set for it |
4263 | // because: |
4264 | // - there can be no references from within humongous starts regions referencing |
4265 | // the object because we never allocate other objects into them. |
4266 | // (I.e. there are no intra-region references that may be missed by the |
4267 | // remembered set) |
4268 | // - as soon there is a remembered set entry to the humongous starts region |
4269 | // (i.e. it has "escaped" to an old object) this remembered set entry will stay |
4270 | // until the end of a concurrent mark. |
4271 | // |
4272 | // It is not required to check whether the object has been found dead by marking |
4273 | // or not, in fact it would prevent reclamation within a concurrent cycle, as |
4274 | // all objects allocated during that time are considered live. |
4275 | // SATB marking is even more conservative than the remembered set. |
4276 | // So if at this point in the collection there is no remembered set entry, |
4277 | // nobody has a reference to it. |
4278 | // At the start of collection we flush all refinement logs, and remembered sets |
4279 | // are completely up-to-date wrt to references to the humongous object. |
4280 | // |
4281 | // Other implementation considerations: |
4282 | // - never consider object arrays at this time because they would pose |
4283 | // considerable effort for cleaning up the the remembered sets. This is |
4284 | // required because stale remembered sets might reference locations that |
4285 | // are currently allocated into. |
4286 | uint region_idx = r->hrm_index(); |
4287 | if (!g1h->is_humongous_reclaim_candidate(region_idx) || |
4288 | !r->rem_set()->is_empty()) { |
4289 | log_debug(gc, humongous)("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d" , |
4290 | region_idx, |
4291 | (size_t)obj->size() * HeapWordSize, |
4292 | p2i(r->bottom()), |
4293 | r->rem_set()->occupied(), |
4294 | r->rem_set()->strong_code_roots_list_length(), |
4295 | next_bitmap->is_marked(r->bottom()), |
4296 | g1h->is_humongous_reclaim_candidate(region_idx), |
4297 | obj->is_typeArray() |
4298 | ); |
4299 | return false; |
4300 | } |
4301 | |
4302 | guarantee(obj->is_typeArray(), |
4303 | "Only eagerly reclaiming type arrays is supported, but the object " |
4304 | PTR_FORMAT " is not." , p2i(r->bottom())); |
4305 | |
4306 | log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d" , |
4307 | region_idx, |
4308 | (size_t)obj->size() * HeapWordSize, |
4309 | p2i(r->bottom()), |
4310 | r->rem_set()->occupied(), |
4311 | r->rem_set()->strong_code_roots_list_length(), |
4312 | next_bitmap->is_marked(r->bottom()), |
4313 | g1h->is_humongous_reclaim_candidate(region_idx), |
4314 | obj->is_typeArray() |
4315 | ); |
4316 | |
4317 | G1ConcurrentMark* const cm = g1h->concurrent_mark(); |
4318 | cm->humongous_object_eagerly_reclaimed(r); |
4319 | assert(!cm->is_marked_in_prev_bitmap(obj) && !cm->is_marked_in_next_bitmap(obj), |
4320 | "Eagerly reclaimed humongous region %u should not be marked at all but is in prev %s next %s" , |
4321 | region_idx, |
4322 | BOOL_TO_STR(cm->is_marked_in_prev_bitmap(obj)), |
4323 | BOOL_TO_STR(cm->is_marked_in_next_bitmap(obj))); |
4324 | _humongous_objects_reclaimed++; |
4325 | do { |
4326 | HeapRegion* next = g1h->next_region_in_humongous(r); |
4327 | _freed_bytes += r->used(); |
4328 | r->set_containing_set(NULL); |
4329 | _humongous_regions_reclaimed++; |
4330 | g1h->free_humongous_region(r, _free_region_list); |
4331 | r = next; |
4332 | } while (r != NULL); |
4333 | |
4334 | return false; |
4335 | } |
4336 | |
4337 | uint humongous_objects_reclaimed() { |
4338 | return _humongous_objects_reclaimed; |
4339 | } |
4340 | |
4341 | uint humongous_regions_reclaimed() { |
4342 | return _humongous_regions_reclaimed; |
4343 | } |
4344 | |
4345 | size_t bytes_freed() const { |
4346 | return _freed_bytes; |
4347 | } |
4348 | }; |
4349 | |
4350 | void G1CollectedHeap::eagerly_reclaim_humongous_regions() { |
4351 | assert_at_safepoint_on_vm_thread(); |
4352 | |
4353 | if (!G1EagerReclaimHumongousObjects || |
4354 | (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) { |
4355 | phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0); |
4356 | return; |
4357 | } |
4358 | |
4359 | double start_time = os::elapsedTime(); |
4360 | |
4361 | FreeRegionList local_cleanup_list("Local Humongous Cleanup List" ); |
4362 | |
4363 | G1FreeHumongousRegionClosure cl(&local_cleanup_list); |
4364 | heap_region_iterate(&cl); |
4365 | |
4366 | remove_from_old_sets(0, cl.humongous_regions_reclaimed()); |
4367 | |
4368 | G1HRPrinter* hrp = hr_printer(); |
4369 | if (hrp->is_active()) { |
4370 | FreeRegionListIterator iter(&local_cleanup_list); |
4371 | while (iter.more_available()) { |
4372 | HeapRegion* hr = iter.get_next(); |
4373 | hrp->cleanup(hr); |
4374 | } |
4375 | } |
4376 | |
4377 | prepend_to_freelist(&local_cleanup_list); |
4378 | decrement_summary_bytes(cl.bytes_freed()); |
4379 | |
4380 | phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0, |
4381 | cl.humongous_objects_reclaimed()); |
4382 | } |
4383 | |
4384 | class G1AbandonCollectionSetClosure : public HeapRegionClosure { |
4385 | public: |
4386 | virtual bool do_heap_region(HeapRegion* r) { |
4387 | assert(r->in_collection_set(), "Region %u must have been in collection set" , r->hrm_index()); |
4388 | G1CollectedHeap::heap()->clear_region_attr(r); |
4389 | r->set_young_index_in_cset(-1); |
4390 | return false; |
4391 | } |
4392 | }; |
4393 | |
4394 | void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) { |
4395 | G1AbandonCollectionSetClosure cl; |
4396 | collection_set_iterate_all(&cl); |
4397 | |
4398 | collection_set->clear(); |
4399 | collection_set->stop_incremental_building(); |
4400 | } |
4401 | |
4402 | bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) { |
4403 | return _allocator->is_retained_old_region(hr); |
4404 | } |
4405 | |
4406 | void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { |
4407 | _eden.add(hr); |
4408 | _policy->set_region_eden(hr); |
4409 | } |
4410 | |
4411 | #ifdef ASSERT |
4412 | |
4413 | class NoYoungRegionsClosure: public HeapRegionClosure { |
4414 | private: |
4415 | bool _success; |
4416 | public: |
4417 | NoYoungRegionsClosure() : _success(true) { } |
4418 | bool do_heap_region(HeapRegion* r) { |
4419 | if (r->is_young()) { |
4420 | log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young" , |
4421 | p2i(r->bottom()), p2i(r->end())); |
4422 | _success = false; |
4423 | } |
4424 | return false; |
4425 | } |
4426 | bool success() { return _success; } |
4427 | }; |
4428 | |
4429 | bool G1CollectedHeap::check_young_list_empty() { |
4430 | bool ret = (young_regions_count() == 0); |
4431 | |
4432 | NoYoungRegionsClosure closure; |
4433 | heap_region_iterate(&closure); |
4434 | ret = ret && closure.success(); |
4435 | |
4436 | return ret; |
4437 | } |
4438 | |
4439 | #endif // ASSERT |
4440 | |
4441 | class TearDownRegionSetsClosure : public HeapRegionClosure { |
4442 | HeapRegionSet *_old_set; |
4443 | |
4444 | public: |
4445 | TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { } |
4446 | |
4447 | bool do_heap_region(HeapRegion* r) { |
4448 | if (r->is_old()) { |
4449 | _old_set->remove(r); |
4450 | } else if(r->is_young()) { |
4451 | r->uninstall_surv_rate_group(); |
4452 | } else { |
4453 | // We ignore free regions, we'll empty the free list afterwards. |
4454 | // We ignore humongous and archive regions, we're not tearing down these |
4455 | // sets. |
4456 | assert(r->is_archive() || r->is_free() || r->is_humongous(), |
4457 | "it cannot be another type" ); |
4458 | } |
4459 | return false; |
4460 | } |
4461 | |
4462 | ~TearDownRegionSetsClosure() { |
4463 | assert(_old_set->is_empty(), "post-condition" ); |
4464 | } |
4465 | }; |
4466 | |
4467 | void G1CollectedHeap::tear_down_region_sets(bool free_list_only) { |
4468 | assert_at_safepoint_on_vm_thread(); |
4469 | |
4470 | if (!free_list_only) { |
4471 | TearDownRegionSetsClosure cl(&_old_set); |
4472 | heap_region_iterate(&cl); |
4473 | |
4474 | // Note that emptying the _young_list is postponed and instead done as |
4475 | // the first step when rebuilding the regions sets again. The reason for |
4476 | // this is that during a full GC string deduplication needs to know if |
4477 | // a collected region was young or old when the full GC was initiated. |
4478 | } |
4479 | _hrm->remove_all_free_regions(); |
4480 | } |
4481 | |
4482 | void G1CollectedHeap::increase_used(size_t bytes) { |
4483 | _summary_bytes_used += bytes; |
4484 | } |
4485 | |
4486 | void G1CollectedHeap::decrease_used(size_t bytes) { |
4487 | assert(_summary_bytes_used >= bytes, |
4488 | "invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT, |
4489 | _summary_bytes_used, bytes); |
4490 | _summary_bytes_used -= bytes; |
4491 | } |
4492 | |
4493 | void G1CollectedHeap::set_used(size_t bytes) { |
4494 | _summary_bytes_used = bytes; |
4495 | } |
4496 | |
4497 | class RebuildRegionSetsClosure : public HeapRegionClosure { |
4498 | private: |
4499 | bool _free_list_only; |
4500 | |
4501 | HeapRegionSet* _old_set; |
4502 | HeapRegionManager* _hrm; |
4503 | |
4504 | size_t _total_used; |
4505 | |
4506 | public: |
4507 | RebuildRegionSetsClosure(bool free_list_only, |
4508 | HeapRegionSet* old_set, |
4509 | HeapRegionManager* hrm) : |
4510 | _free_list_only(free_list_only), |
4511 | _old_set(old_set), _hrm(hrm), _total_used(0) { |
4512 | assert(_hrm->num_free_regions() == 0, "pre-condition" ); |
4513 | if (!free_list_only) { |
4514 | assert(_old_set->is_empty(), "pre-condition" ); |
4515 | } |
4516 | } |
4517 | |
4518 | bool do_heap_region(HeapRegion* r) { |
4519 | if (r->is_empty()) { |
4520 | assert(r->rem_set()->is_empty(), "Empty regions should have empty remembered sets." ); |
4521 | // Add free regions to the free list |
4522 | r->set_free(); |
4523 | _hrm->insert_into_free_list(r); |
4524 | } else if (!_free_list_only) { |
4525 | assert(r->rem_set()->is_empty(), "At this point remembered sets must have been cleared." ); |
4526 | |
4527 | if (r->is_archive() || r->is_humongous()) { |
4528 | // We ignore archive and humongous regions. We left these sets unchanged. |
4529 | } else { |
4530 | assert(r->is_young() || r->is_free() || r->is_old(), "invariant" ); |
4531 | // We now move all (non-humongous, non-old, non-archive) regions to old gen, and register them as such. |
4532 | r->move_to_old(); |
4533 | _old_set->add(r); |
4534 | } |
4535 | _total_used += r->used(); |
4536 | } |
4537 | |
4538 | return false; |
4539 | } |
4540 | |
4541 | size_t total_used() { |
4542 | return _total_used; |
4543 | } |
4544 | }; |
4545 | |
4546 | void G1CollectedHeap::rebuild_region_sets(bool free_list_only) { |
4547 | assert_at_safepoint_on_vm_thread(); |
4548 | |
4549 | if (!free_list_only) { |
4550 | _eden.clear(); |
4551 | _survivor.clear(); |
4552 | } |
4553 | |
4554 | RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm); |
4555 | heap_region_iterate(&cl); |
4556 | |
4557 | if (!free_list_only) { |
4558 | set_used(cl.total_used()); |
4559 | if (_archive_allocator != NULL) { |
4560 | _archive_allocator->clear_used(); |
4561 | } |
4562 | } |
4563 | assert_used_and_recalculate_used_equal(this); |
4564 | } |
4565 | |
4566 | // Methods for the mutator alloc region |
4567 | |
4568 | HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, |
4569 | bool force) { |
4570 | assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
4571 | bool should_allocate = policy()->should_allocate_mutator_region(); |
4572 | if (force || should_allocate) { |
4573 | HeapRegion* new_alloc_region = new_region(word_size, |
4574 | HeapRegionType::Eden, |
4575 | false /* do_expand */); |
4576 | if (new_alloc_region != NULL) { |
4577 | set_region_short_lived_locked(new_alloc_region); |
4578 | _hr_printer.alloc(new_alloc_region, !should_allocate); |
4579 | _verifier->check_bitmaps("Mutator Region Allocation" , new_alloc_region); |
4580 | _policy->remset_tracker()->update_at_allocate(new_alloc_region); |
4581 | return new_alloc_region; |
4582 | } |
4583 | } |
4584 | return NULL; |
4585 | } |
4586 | |
4587 | void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region, |
4588 | size_t allocated_bytes) { |
4589 | assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
4590 | assert(alloc_region->is_eden(), "all mutator alloc regions should be eden" ); |
4591 | |
4592 | collection_set()->add_eden_region(alloc_region); |
4593 | increase_used(allocated_bytes); |
4594 | _eden.add_used_bytes(allocated_bytes); |
4595 | _hr_printer.retire(alloc_region); |
4596 | |
4597 | // We update the eden sizes here, when the region is retired, |
4598 | // instead of when it's allocated, since this is the point that its |
4599 | // used space has been recorded in _summary_bytes_used. |
4600 | g1mm()->update_eden_size(); |
4601 | } |
4602 | |
4603 | // Methods for the GC alloc regions |
4604 | |
4605 | bool G1CollectedHeap::has_more_regions(G1HeapRegionAttr dest) { |
4606 | if (dest.is_old()) { |
4607 | return true; |
4608 | } else { |
4609 | return survivor_regions_count() < policy()->max_survivor_regions(); |
4610 | } |
4611 | } |
4612 | |
4613 | HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest) { |
4614 | assert(FreeList_lock->owned_by_self(), "pre-condition" ); |
4615 | |
4616 | if (!has_more_regions(dest)) { |
4617 | return NULL; |
4618 | } |
4619 | |
4620 | HeapRegionType type; |
4621 | if (dest.is_young()) { |
4622 | type = HeapRegionType::Survivor; |
4623 | } else { |
4624 | type = HeapRegionType::Old; |
4625 | } |
4626 | |
4627 | HeapRegion* new_alloc_region = new_region(word_size, |
4628 | type, |
4629 | true /* do_expand */); |
4630 | |
4631 | if (new_alloc_region != NULL) { |
4632 | if (type.is_survivor()) { |
4633 | new_alloc_region->set_survivor(); |
4634 | _survivor.add(new_alloc_region); |
4635 | _verifier->check_bitmaps("Survivor Region Allocation" , new_alloc_region); |
4636 | } else { |
4637 | new_alloc_region->set_old(); |
4638 | _verifier->check_bitmaps("Old Region Allocation" , new_alloc_region); |
4639 | } |
4640 | _policy->remset_tracker()->update_at_allocate(new_alloc_region); |
4641 | register_region_with_region_attr(new_alloc_region); |
4642 | _hr_printer.alloc(new_alloc_region); |
4643 | return new_alloc_region; |
4644 | } |
4645 | return NULL; |
4646 | } |
4647 | |
4648 | void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, |
4649 | size_t allocated_bytes, |
4650 | G1HeapRegionAttr dest) { |
4651 | policy()->record_bytes_copied_during_gc(allocated_bytes); |
4652 | if (dest.is_old()) { |
4653 | old_set_add(alloc_region); |
4654 | } else { |
4655 | assert(dest.is_young(), "Retiring alloc region should be young (%d)" , dest.type()); |
4656 | _survivor.add_used_bytes(allocated_bytes); |
4657 | } |
4658 | |
4659 | bool const during_im = collector_state()->in_initial_mark_gc(); |
4660 | if (during_im && allocated_bytes > 0) { |
4661 | _cm->root_regions()->add(alloc_region->next_top_at_mark_start(), alloc_region->top()); |
4662 | } |
4663 | _hr_printer.retire(alloc_region); |
4664 | } |
4665 | |
4666 | HeapRegion* G1CollectedHeap::alloc_highest_free_region() { |
4667 | bool expanded = false; |
4668 | uint index = _hrm->find_highest_free(&expanded); |
4669 | |
4670 | if (index != G1_NO_HRM_INDEX) { |
4671 | if (expanded) { |
4672 | log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B" , |
4673 | HeapRegion::GrainWords * HeapWordSize); |
4674 | } |
4675 | _hrm->allocate_free_regions_starting_at(index, 1); |
4676 | return region_at(index); |
4677 | } |
4678 | return NULL; |
4679 | } |
4680 | |
4681 | // Optimized nmethod scanning |
4682 | |
4683 | class RegisterNMethodOopClosure: public OopClosure { |
4684 | G1CollectedHeap* _g1h; |
4685 | nmethod* _nm; |
4686 | |
4687 | template <class T> void do_oop_work(T* p) { |
4688 | T heap_oop = RawAccess<>::oop_load(p); |
4689 | if (!CompressedOops::is_null(heap_oop)) { |
4690 | oop obj = CompressedOops::decode_not_null(heap_oop); |
4691 | HeapRegion* hr = _g1h->heap_region_containing(obj); |
4692 | assert(!hr->is_continues_humongous(), |
4693 | "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT |
4694 | " starting at " HR_FORMAT, |
4695 | p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())); |
4696 | |
4697 | // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries. |
4698 | hr->add_strong_code_root_locked(_nm); |
4699 | } |
4700 | } |
4701 | |
4702 | public: |
4703 | RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) : |
4704 | _g1h(g1h), _nm(nm) {} |
4705 | |
4706 | void do_oop(oop* p) { do_oop_work(p); } |
4707 | void do_oop(narrowOop* p) { do_oop_work(p); } |
4708 | }; |
4709 | |
4710 | class UnregisterNMethodOopClosure: public OopClosure { |
4711 | G1CollectedHeap* _g1h; |
4712 | nmethod* _nm; |
4713 | |
4714 | template <class T> void do_oop_work(T* p) { |
4715 | T heap_oop = RawAccess<>::oop_load(p); |
4716 | if (!CompressedOops::is_null(heap_oop)) { |
4717 | oop obj = CompressedOops::decode_not_null(heap_oop); |
4718 | HeapRegion* hr = _g1h->heap_region_containing(obj); |
4719 | assert(!hr->is_continues_humongous(), |
4720 | "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT |
4721 | " starting at " HR_FORMAT, |
4722 | p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())); |
4723 | |
4724 | hr->remove_strong_code_root(_nm); |
4725 | } |
4726 | } |
4727 | |
4728 | public: |
4729 | UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) : |
4730 | _g1h(g1h), _nm(nm) {} |
4731 | |
4732 | void do_oop(oop* p) { do_oop_work(p); } |
4733 | void do_oop(narrowOop* p) { do_oop_work(p); } |
4734 | }; |
4735 | |
4736 | void G1CollectedHeap::register_nmethod(nmethod* nm) { |
4737 | guarantee(nm != NULL, "sanity" ); |
4738 | RegisterNMethodOopClosure reg_cl(this, nm); |
4739 | nm->oops_do(®_cl); |
4740 | } |
4741 | |
4742 | void G1CollectedHeap::unregister_nmethod(nmethod* nm) { |
4743 | guarantee(nm != NULL, "sanity" ); |
4744 | UnregisterNMethodOopClosure reg_cl(this, nm); |
4745 | nm->oops_do(®_cl, true); |
4746 | } |
4747 | |
4748 | void G1CollectedHeap::purge_code_root_memory() { |
4749 | double purge_start = os::elapsedTime(); |
4750 | G1CodeRootSet::purge(); |
4751 | double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0; |
4752 | phase_times()->record_strong_code_root_purge_time(purge_time_ms); |
4753 | } |
4754 | |
4755 | class RebuildStrongCodeRootClosure: public CodeBlobClosure { |
4756 | G1CollectedHeap* _g1h; |
4757 | |
4758 | public: |
4759 | RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) : |
4760 | _g1h(g1h) {} |
4761 | |
4762 | void do_code_blob(CodeBlob* cb) { |
4763 | nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL; |
4764 | if (nm == NULL) { |
4765 | return; |
4766 | } |
4767 | |
4768 | _g1h->register_nmethod(nm); |
4769 | } |
4770 | }; |
4771 | |
4772 | void G1CollectedHeap::rebuild_strong_code_roots() { |
4773 | RebuildStrongCodeRootClosure blob_cl(this); |
4774 | CodeCache::blobs_do(&blob_cl); |
4775 | } |
4776 | |
4777 | void G1CollectedHeap::initialize_serviceability() { |
4778 | _g1mm->initialize_serviceability(); |
4779 | } |
4780 | |
4781 | MemoryUsage G1CollectedHeap::memory_usage() { |
4782 | return _g1mm->memory_usage(); |
4783 | } |
4784 | |
4785 | GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() { |
4786 | return _g1mm->memory_managers(); |
4787 | } |
4788 | |
4789 | GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() { |
4790 | return _g1mm->memory_pools(); |
4791 | } |
4792 | |