1 | /* |
2 | * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "gc/serial/defNewGeneration.inline.hpp" |
27 | #include "gc/serial/serialHeap.inline.hpp" |
28 | #include "gc/serial/tenuredGeneration.hpp" |
29 | #include "gc/shared/adaptiveSizePolicy.hpp" |
30 | #include "gc/shared/ageTable.inline.hpp" |
31 | #include "gc/shared/cardTableRS.hpp" |
32 | #include "gc/shared/collectorCounters.hpp" |
33 | #include "gc/shared/gcArguments.hpp" |
34 | #include "gc/shared/gcHeapSummary.hpp" |
35 | #include "gc/shared/gcLocker.hpp" |
36 | #include "gc/shared/gcPolicyCounters.hpp" |
37 | #include "gc/shared/gcTimer.hpp" |
38 | #include "gc/shared/gcTrace.hpp" |
39 | #include "gc/shared/gcTraceTime.inline.hpp" |
40 | #include "gc/shared/genOopClosures.inline.hpp" |
41 | #include "gc/shared/generationSpec.hpp" |
42 | #include "gc/shared/preservedMarks.inline.hpp" |
43 | #include "gc/shared/referencePolicy.hpp" |
44 | #include "gc/shared/referenceProcessorPhaseTimes.hpp" |
45 | #include "gc/shared/space.inline.hpp" |
46 | #include "gc/shared/spaceDecorator.hpp" |
47 | #include "gc/shared/strongRootsScope.hpp" |
48 | #include "gc/shared/weakProcessor.hpp" |
49 | #include "logging/log.hpp" |
50 | #include "memory/iterator.inline.hpp" |
51 | #include "memory/resourceArea.hpp" |
52 | #include "oops/instanceRefKlass.hpp" |
53 | #include "oops/oop.inline.hpp" |
54 | #include "runtime/atomic.hpp" |
55 | #include "runtime/java.hpp" |
56 | #include "runtime/prefetch.inline.hpp" |
57 | #include "runtime/thread.inline.hpp" |
58 | #include "utilities/align.hpp" |
59 | #include "utilities/copy.hpp" |
60 | #include "utilities/globalDefinitions.hpp" |
61 | #include "utilities/stack.inline.hpp" |
62 | |
63 | // |
64 | // DefNewGeneration functions. |
65 | |
66 | // Methods of protected closure types. |
67 | |
68 | DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) { |
69 | assert(_young_gen->kind() == Generation::ParNew || |
70 | _young_gen->kind() == Generation::DefNew, "Expected the young generation here" ); |
71 | } |
72 | |
73 | bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { |
74 | return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded(); |
75 | } |
76 | |
77 | DefNewGeneration::KeepAliveClosure:: |
78 | KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { |
79 | _rs = GenCollectedHeap::heap()->rem_set(); |
80 | } |
81 | |
82 | void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } |
83 | void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } |
84 | |
85 | |
86 | DefNewGeneration::FastKeepAliveClosure:: |
87 | FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : |
88 | DefNewGeneration::KeepAliveClosure(cl) { |
89 | _boundary = g->reserved().end(); |
90 | } |
91 | |
92 | void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } |
93 | void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } |
94 | |
95 | DefNewGeneration::FastEvacuateFollowersClosure:: |
96 | FastEvacuateFollowersClosure(SerialHeap* heap, |
97 | FastScanClosure* cur, |
98 | FastScanClosure* older) : |
99 | _heap(heap), _scan_cur_or_nonheap(cur), _scan_older(older) |
100 | { |
101 | } |
102 | |
103 | void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { |
104 | do { |
105 | _heap->oop_since_save_marks_iterate(_scan_cur_or_nonheap, _scan_older); |
106 | } while (!_heap->no_allocs_since_save_marks()); |
107 | guarantee(_heap->young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan" ); |
108 | } |
109 | |
110 | ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : |
111 | OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) |
112 | { |
113 | _boundary = _g->reserved().end(); |
114 | } |
115 | |
116 | FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : |
117 | OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) |
118 | { |
119 | _boundary = _g->reserved().end(); |
120 | } |
121 | |
122 | void CLDScanClosure::do_cld(ClassLoaderData* cld) { |
123 | NOT_PRODUCT(ResourceMark rm); |
124 | log_develop_trace(gc, scavenge)("CLDScanClosure::do_cld " PTR_FORMAT ", %s, dirty: %s" , |
125 | p2i(cld), |
126 | cld->loader_name_and_id(), |
127 | cld->has_modified_oops() ? "true" : "false" ); |
128 | |
129 | // If the cld has not been dirtied we know that there's |
130 | // no references into the young gen and we can skip it. |
131 | if (cld->has_modified_oops()) { |
132 | if (_accumulate_modified_oops) { |
133 | cld->accumulate_modified_oops(); |
134 | } |
135 | |
136 | // Tell the closure which CLD is being scanned so that it can be dirtied |
137 | // if oops are left pointing into the young gen. |
138 | _scavenge_closure->set_scanned_cld(cld); |
139 | |
140 | // Clean the cld since we're going to scavenge all the metadata. |
141 | cld->oops_do(_scavenge_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true); |
142 | |
143 | _scavenge_closure->set_scanned_cld(NULL); |
144 | } |
145 | } |
146 | |
147 | ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : |
148 | _g(g) |
149 | { |
150 | _boundary = _g->reserved().end(); |
151 | } |
152 | |
153 | DefNewGeneration::DefNewGeneration(ReservedSpace rs, |
154 | size_t initial_size, |
155 | size_t min_size, |
156 | size_t max_size, |
157 | const char* policy) |
158 | : Generation(rs, initial_size), |
159 | _preserved_marks_set(false /* in_c_heap */), |
160 | _promo_failure_drain_in_progress(false), |
161 | _should_allocate_from_space(false) |
162 | { |
163 | MemRegion cmr((HeapWord*)_virtual_space.low(), |
164 | (HeapWord*)_virtual_space.high()); |
165 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
166 | |
167 | gch->rem_set()->resize_covered_region(cmr); |
168 | |
169 | _eden_space = new ContiguousSpace(); |
170 | _from_space = new ContiguousSpace(); |
171 | _to_space = new ContiguousSpace(); |
172 | |
173 | if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) { |
174 | vm_exit_during_initialization("Could not allocate a new gen space" ); |
175 | } |
176 | |
177 | // Compute the maximum eden and survivor space sizes. These sizes |
178 | // are computed assuming the entire reserved space is committed. |
179 | // These values are exported as performance counters. |
180 | uintx size = _virtual_space.reserved_size(); |
181 | _max_survivor_size = compute_survivor_size(size, SpaceAlignment); |
182 | _max_eden_size = size - (2*_max_survivor_size); |
183 | |
184 | // allocate the performance counters |
185 | |
186 | // Generation counters -- generation 0, 3 subspaces |
187 | _gen_counters = new GenerationCounters("new" , 0, 3, |
188 | min_size, max_size, &_virtual_space); |
189 | _gc_counters = new CollectorCounters(policy, 0); |
190 | |
191 | _eden_counters = new CSpaceCounters("eden" , 0, _max_eden_size, _eden_space, |
192 | _gen_counters); |
193 | _from_counters = new CSpaceCounters("s0" , 1, _max_survivor_size, _from_space, |
194 | _gen_counters); |
195 | _to_counters = new CSpaceCounters("s1" , 2, _max_survivor_size, _to_space, |
196 | _gen_counters); |
197 | |
198 | compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
199 | update_counters(); |
200 | _old_gen = NULL; |
201 | _tenuring_threshold = MaxTenuringThreshold; |
202 | _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; |
203 | |
204 | _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); |
205 | } |
206 | |
207 | void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, |
208 | bool clear_space, |
209 | bool mangle_space) { |
210 | // If the spaces are being cleared (only done at heap initialization |
211 | // currently), the survivor spaces need not be empty. |
212 | // Otherwise, no care is taken for used areas in the survivor spaces |
213 | // so check. |
214 | assert(clear_space || (to()->is_empty() && from()->is_empty()), |
215 | "Initialization of the survivor spaces assumes these are empty" ); |
216 | |
217 | // Compute sizes |
218 | uintx size = _virtual_space.committed_size(); |
219 | uintx survivor_size = compute_survivor_size(size, SpaceAlignment); |
220 | uintx eden_size = size - (2*survivor_size); |
221 | assert(eden_size > 0 && survivor_size <= eden_size, "just checking" ); |
222 | |
223 | if (eden_size < minimum_eden_size) { |
224 | // May happen due to 64Kb rounding, if so adjust eden size back up |
225 | minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment); |
226 | uintx maximum_survivor_size = (size - minimum_eden_size) / 2; |
227 | uintx unaligned_survivor_size = |
228 | align_down(maximum_survivor_size, SpaceAlignment); |
229 | survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment); |
230 | eden_size = size - (2*survivor_size); |
231 | assert(eden_size > 0 && survivor_size <= eden_size, "just checking" ); |
232 | assert(eden_size >= minimum_eden_size, "just checking" ); |
233 | } |
234 | |
235 | char *eden_start = _virtual_space.low(); |
236 | char *from_start = eden_start + eden_size; |
237 | char *to_start = from_start + survivor_size; |
238 | char *to_end = to_start + survivor_size; |
239 | |
240 | assert(to_end == _virtual_space.high(), "just checking" ); |
241 | assert(Space::is_aligned(eden_start), "checking alignment" ); |
242 | assert(Space::is_aligned(from_start), "checking alignment" ); |
243 | assert(Space::is_aligned(to_start), "checking alignment" ); |
244 | |
245 | MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); |
246 | MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); |
247 | MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); |
248 | |
249 | // A minimum eden size implies that there is a part of eden that |
250 | // is being used and that affects the initialization of any |
251 | // newly formed eden. |
252 | bool live_in_eden = minimum_eden_size > 0; |
253 | |
254 | // If not clearing the spaces, do some checking to verify that |
255 | // the space are already mangled. |
256 | if (!clear_space) { |
257 | // Must check mangling before the spaces are reshaped. Otherwise, |
258 | // the bottom or end of one space may have moved into another |
259 | // a failure of the check may not correctly indicate which space |
260 | // is not properly mangled. |
261 | if (ZapUnusedHeapArea) { |
262 | HeapWord* limit = (HeapWord*) _virtual_space.high(); |
263 | eden()->check_mangled_unused_area(limit); |
264 | from()->check_mangled_unused_area(limit); |
265 | to()->check_mangled_unused_area(limit); |
266 | } |
267 | } |
268 | |
269 | // Reset the spaces for their new regions. |
270 | eden()->initialize(edenMR, |
271 | clear_space && !live_in_eden, |
272 | SpaceDecorator::Mangle); |
273 | // If clear_space and live_in_eden, we will not have cleared any |
274 | // portion of eden above its top. This can cause newly |
275 | // expanded space not to be mangled if using ZapUnusedHeapArea. |
276 | // We explicitly do such mangling here. |
277 | if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { |
278 | eden()->mangle_unused_area(); |
279 | } |
280 | from()->initialize(fromMR, clear_space, mangle_space); |
281 | to()->initialize(toMR, clear_space, mangle_space); |
282 | |
283 | // Set next compaction spaces. |
284 | eden()->set_next_compaction_space(from()); |
285 | // The to-space is normally empty before a compaction so need |
286 | // not be considered. The exception is during promotion |
287 | // failure handling when to-space can contain live objects. |
288 | from()->set_next_compaction_space(NULL); |
289 | } |
290 | |
291 | void DefNewGeneration::swap_spaces() { |
292 | ContiguousSpace* s = from(); |
293 | _from_space = to(); |
294 | _to_space = s; |
295 | eden()->set_next_compaction_space(from()); |
296 | // The to-space is normally empty before a compaction so need |
297 | // not be considered. The exception is during promotion |
298 | // failure handling when to-space can contain live objects. |
299 | from()->set_next_compaction_space(NULL); |
300 | |
301 | if (UsePerfData) { |
302 | CSpaceCounters* c = _from_counters; |
303 | _from_counters = _to_counters; |
304 | _to_counters = c; |
305 | } |
306 | } |
307 | |
308 | bool DefNewGeneration::expand(size_t bytes) { |
309 | MutexLocker x(ExpandHeap_lock); |
310 | HeapWord* prev_high = (HeapWord*) _virtual_space.high(); |
311 | bool success = _virtual_space.expand_by(bytes); |
312 | if (success && ZapUnusedHeapArea) { |
313 | // Mangle newly committed space immediately because it |
314 | // can be done here more simply that after the new |
315 | // spaces have been computed. |
316 | HeapWord* new_high = (HeapWord*) _virtual_space.high(); |
317 | MemRegion mangle_region(prev_high, new_high); |
318 | SpaceMangler::mangle_region(mangle_region); |
319 | } |
320 | |
321 | // Do not attempt an expand-to-the reserve size. The |
322 | // request should properly observe the maximum size of |
323 | // the generation so an expand-to-reserve should be |
324 | // unnecessary. Also a second call to expand-to-reserve |
325 | // value potentially can cause an undue expansion. |
326 | // For example if the first expand fail for unknown reasons, |
327 | // but the second succeeds and expands the heap to its maximum |
328 | // value. |
329 | if (GCLocker::is_active()) { |
330 | log_debug(gc)("Garbage collection disabled, expanded heap instead" ); |
331 | } |
332 | |
333 | return success; |
334 | } |
335 | |
336 | size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate, |
337 | size_t new_size_before, |
338 | size_t alignment) const { |
339 | size_t desired_new_size = new_size_before; |
340 | |
341 | if (NewSizeThreadIncrease > 0) { |
342 | int threads_count; |
343 | size_t thread_increase_size = 0; |
344 | |
345 | // 1. Check an overflow at 'threads_count * NewSizeThreadIncrease'. |
346 | threads_count = Threads::number_of_non_daemon_threads(); |
347 | if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) { |
348 | thread_increase_size = threads_count * NewSizeThreadIncrease; |
349 | |
350 | // 2. Check an overflow at 'new_size_candidate + thread_increase_size'. |
351 | if (new_size_candidate <= max_uintx - thread_increase_size) { |
352 | new_size_candidate += thread_increase_size; |
353 | |
354 | // 3. Check an overflow at 'align_up'. |
355 | size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1)); |
356 | if (new_size_candidate <= aligned_max) { |
357 | desired_new_size = align_up(new_size_candidate, alignment); |
358 | } |
359 | } |
360 | } |
361 | } |
362 | |
363 | return desired_new_size; |
364 | } |
365 | |
366 | void DefNewGeneration::compute_new_size() { |
367 | // This is called after a GC that includes the old generation, so from-space |
368 | // will normally be empty. |
369 | // Note that we check both spaces, since if scavenge failed they revert roles. |
370 | // If not we bail out (otherwise we would have to relocate the objects). |
371 | if (!from()->is_empty() || !to()->is_empty()) { |
372 | return; |
373 | } |
374 | |
375 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
376 | |
377 | size_t old_size = gch->old_gen()->capacity(); |
378 | size_t new_size_before = _virtual_space.committed_size(); |
379 | size_t min_new_size = initial_size(); |
380 | size_t max_new_size = reserved().byte_size(); |
381 | assert(min_new_size <= new_size_before && |
382 | new_size_before <= max_new_size, |
383 | "just checking" ); |
384 | // All space sizes must be multiples of Generation::GenGrain. |
385 | size_t alignment = Generation::GenGrain; |
386 | |
387 | int threads_count = 0; |
388 | size_t thread_increase_size = 0; |
389 | |
390 | size_t new_size_candidate = old_size / NewRatio; |
391 | // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease |
392 | // and reverts to previous value if any overflow happens |
393 | size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before, alignment); |
394 | |
395 | // Adjust new generation size |
396 | desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); |
397 | assert(desired_new_size <= max_new_size, "just checking" ); |
398 | |
399 | bool changed = false; |
400 | if (desired_new_size > new_size_before) { |
401 | size_t change = desired_new_size - new_size_before; |
402 | assert(change % alignment == 0, "just checking" ); |
403 | if (expand(change)) { |
404 | changed = true; |
405 | } |
406 | // If the heap failed to expand to the desired size, |
407 | // "changed" will be false. If the expansion failed |
408 | // (and at this point it was expected to succeed), |
409 | // ignore the failure (leaving "changed" as false). |
410 | } |
411 | if (desired_new_size < new_size_before && eden()->is_empty()) { |
412 | // bail out of shrinking if objects in eden |
413 | size_t change = new_size_before - desired_new_size; |
414 | assert(change % alignment == 0, "just checking" ); |
415 | _virtual_space.shrink_by(change); |
416 | changed = true; |
417 | } |
418 | if (changed) { |
419 | // The spaces have already been mangled at this point but |
420 | // may not have been cleared (set top = bottom) and should be. |
421 | // Mangling was done when the heap was being expanded. |
422 | compute_space_boundaries(eden()->used(), |
423 | SpaceDecorator::Clear, |
424 | SpaceDecorator::DontMangle); |
425 | MemRegion cmr((HeapWord*)_virtual_space.low(), |
426 | (HeapWord*)_virtual_space.high()); |
427 | gch->rem_set()->resize_covered_region(cmr); |
428 | |
429 | log_debug(gc, ergo, heap)( |
430 | "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]" , |
431 | new_size_before/K, _virtual_space.committed_size()/K, |
432 | eden()->capacity()/K, from()->capacity()/K); |
433 | log_trace(gc, ergo, heap)( |
434 | " [allowed " SIZE_FORMAT "K extra for %d threads]" , |
435 | thread_increase_size/K, threads_count); |
436 | } |
437 | } |
438 | |
439 | void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) { |
440 | assert(false, "NYI -- are you sure you want to call this?" ); |
441 | } |
442 | |
443 | |
444 | size_t DefNewGeneration::capacity() const { |
445 | return eden()->capacity() |
446 | + from()->capacity(); // to() is only used during scavenge |
447 | } |
448 | |
449 | |
450 | size_t DefNewGeneration::used() const { |
451 | return eden()->used() |
452 | + from()->used(); // to() is only used during scavenge |
453 | } |
454 | |
455 | |
456 | size_t DefNewGeneration::free() const { |
457 | return eden()->free() |
458 | + from()->free(); // to() is only used during scavenge |
459 | } |
460 | |
461 | size_t DefNewGeneration::max_capacity() const { |
462 | const size_t reserved_bytes = reserved().byte_size(); |
463 | return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment); |
464 | } |
465 | |
466 | size_t DefNewGeneration::unsafe_max_alloc_nogc() const { |
467 | return eden()->free(); |
468 | } |
469 | |
470 | size_t DefNewGeneration::capacity_before_gc() const { |
471 | return eden()->capacity(); |
472 | } |
473 | |
474 | size_t DefNewGeneration::contiguous_available() const { |
475 | return eden()->free(); |
476 | } |
477 | |
478 | |
479 | HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); } |
480 | HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } |
481 | |
482 | void DefNewGeneration::object_iterate(ObjectClosure* blk) { |
483 | eden()->object_iterate(blk); |
484 | from()->object_iterate(blk); |
485 | } |
486 | |
487 | |
488 | void DefNewGeneration::space_iterate(SpaceClosure* blk, |
489 | bool usedOnly) { |
490 | blk->do_space(eden()); |
491 | blk->do_space(from()); |
492 | blk->do_space(to()); |
493 | } |
494 | |
495 | // The last collection bailed out, we are running out of heap space, |
496 | // so we try to allocate the from-space, too. |
497 | HeapWord* DefNewGeneration::allocate_from_space(size_t size) { |
498 | bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc(); |
499 | |
500 | // If the Heap_lock is not locked by this thread, this will be called |
501 | // again later with the Heap_lock held. |
502 | bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread())); |
503 | |
504 | HeapWord* result = NULL; |
505 | if (do_alloc) { |
506 | result = from()->allocate(size); |
507 | } |
508 | |
509 | log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "): will_fail: %s heap_lock: %s free: " SIZE_FORMAT "%s%s returns %s" , |
510 | size, |
511 | GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? |
512 | "true" : "false" , |
513 | Heap_lock->is_locked() ? "locked" : "unlocked" , |
514 | from()->free(), |
515 | should_try_alloc ? "" : " should_allocate_from_space: NOT" , |
516 | do_alloc ? " Heap_lock is not owned by self" : "" , |
517 | result == NULL ? "NULL" : "object" ); |
518 | |
519 | return result; |
520 | } |
521 | |
522 | HeapWord* DefNewGeneration::expand_and_allocate(size_t size, |
523 | bool is_tlab, |
524 | bool parallel) { |
525 | // We don't attempt to expand the young generation (but perhaps we should.) |
526 | return allocate(size, is_tlab); |
527 | } |
528 | |
529 | void DefNewGeneration::adjust_desired_tenuring_threshold() { |
530 | // Set the desired survivor size to half the real survivor space |
531 | size_t const survivor_capacity = to()->capacity() / HeapWordSize; |
532 | size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100); |
533 | |
534 | _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size); |
535 | |
536 | if (UsePerfData) { |
537 | GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->counters(); |
538 | gc_counters->tenuring_threshold()->set_value(_tenuring_threshold); |
539 | gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize); |
540 | } |
541 | |
542 | age_table()->print_age_table(_tenuring_threshold); |
543 | } |
544 | |
545 | void DefNewGeneration::collect(bool full, |
546 | bool clear_all_soft_refs, |
547 | size_t size, |
548 | bool is_tlab) { |
549 | assert(full || size > 0, "otherwise we don't want to collect" ); |
550 | |
551 | SerialHeap* heap = SerialHeap::heap(); |
552 | |
553 | _gc_timer->register_gc_start(); |
554 | DefNewTracer gc_tracer; |
555 | gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer->gc_start()); |
556 | |
557 | _old_gen = heap->old_gen(); |
558 | |
559 | // If the next generation is too full to accommodate promotion |
560 | // from this generation, pass on collection; let the next generation |
561 | // do it. |
562 | if (!collection_attempt_is_safe()) { |
563 | log_trace(gc)(":: Collection attempt not safe ::" ); |
564 | heap->set_incremental_collection_failed(); // Slight lie: we did not even attempt one |
565 | return; |
566 | } |
567 | assert(to()->is_empty(), "Else not collection_attempt_is_safe" ); |
568 | |
569 | init_assuming_no_promotion_failure(); |
570 | |
571 | GCTraceTime(Trace, gc, phases) tm("DefNew" , NULL, heap->gc_cause()); |
572 | |
573 | heap->trace_heap_before_gc(&gc_tracer); |
574 | |
575 | // These can be shared for all code paths |
576 | IsAliveClosure is_alive(this); |
577 | ScanWeakRefClosure scan_weak_ref(this); |
578 | |
579 | age_table()->clear(); |
580 | to()->clear(SpaceDecorator::Mangle); |
581 | // The preserved marks should be empty at the start of the GC. |
582 | _preserved_marks_set.init(1); |
583 | |
584 | heap->rem_set()->prepare_for_younger_refs_iterate(false); |
585 | |
586 | assert(heap->no_allocs_since_save_marks(), |
587 | "save marks have not been newly set." ); |
588 | |
589 | FastScanClosure fsc_with_no_gc_barrier(this, false); |
590 | FastScanClosure fsc_with_gc_barrier(this, true); |
591 | |
592 | CLDScanClosure cld_scan_closure(&fsc_with_no_gc_barrier, |
593 | heap->rem_set()->cld_rem_set()->accumulate_modified_oops()); |
594 | |
595 | set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); |
596 | FastEvacuateFollowersClosure evacuate_followers(heap, |
597 | &fsc_with_no_gc_barrier, |
598 | &fsc_with_gc_barrier); |
599 | |
600 | assert(heap->no_allocs_since_save_marks(), |
601 | "save marks have not been newly set." ); |
602 | |
603 | { |
604 | // DefNew needs to run with n_threads == 0, to make sure the serial |
605 | // version of the card table scanning code is used. |
606 | // See: CardTableRS::non_clean_card_iterate_possibly_parallel. |
607 | StrongRootsScope srs(0); |
608 | |
609 | heap->young_process_roots(&srs, |
610 | &fsc_with_no_gc_barrier, |
611 | &fsc_with_gc_barrier, |
612 | &cld_scan_closure); |
613 | } |
614 | |
615 | // "evacuate followers". |
616 | evacuate_followers.do_void(); |
617 | |
618 | FastKeepAliveClosure keep_alive(this, &scan_weak_ref); |
619 | ReferenceProcessor* rp = ref_processor(); |
620 | rp->setup_policy(clear_all_soft_refs); |
621 | ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues()); |
622 | const ReferenceProcessorStats& stats = |
623 | rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, |
624 | NULL, &pt); |
625 | gc_tracer.report_gc_reference_stats(stats); |
626 | gc_tracer.report_tenuring_threshold(tenuring_threshold()); |
627 | pt.print_all_references(); |
628 | |
629 | assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set." ); |
630 | |
631 | WeakProcessor::weak_oops_do(&is_alive, &keep_alive); |
632 | |
633 | // Verify that the usage of keep_alive didn't copy any objects. |
634 | assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set." ); |
635 | |
636 | if (!_promotion_failed) { |
637 | // Swap the survivor spaces. |
638 | eden()->clear(SpaceDecorator::Mangle); |
639 | from()->clear(SpaceDecorator::Mangle); |
640 | if (ZapUnusedHeapArea) { |
641 | // This is now done here because of the piece-meal mangling which |
642 | // can check for valid mangling at intermediate points in the |
643 | // collection(s). When a young collection fails to collect |
644 | // sufficient space resizing of the young generation can occur |
645 | // an redistribute the spaces in the young generation. Mangle |
646 | // here so that unzapped regions don't get distributed to |
647 | // other spaces. |
648 | to()->mangle_unused_area(); |
649 | } |
650 | swap_spaces(); |
651 | |
652 | assert(to()->is_empty(), "to space should be empty now" ); |
653 | |
654 | adjust_desired_tenuring_threshold(); |
655 | |
656 | // A successful scavenge should restart the GC time limit count which is |
657 | // for full GC's. |
658 | AdaptiveSizePolicy* size_policy = heap->size_policy(); |
659 | size_policy->reset_gc_overhead_limit_count(); |
660 | assert(!heap->incremental_collection_failed(), "Should be clear" ); |
661 | } else { |
662 | assert(_promo_failure_scan_stack.is_empty(), "post condition" ); |
663 | _promo_failure_scan_stack.clear(true); // Clear cached segments. |
664 | |
665 | remove_forwarding_pointers(); |
666 | log_info(gc, promotion)("Promotion failed" ); |
667 | // Add to-space to the list of space to compact |
668 | // when a promotion failure has occurred. In that |
669 | // case there can be live objects in to-space |
670 | // as a result of a partial evacuation of eden |
671 | // and from-space. |
672 | swap_spaces(); // For uniformity wrt ParNewGeneration. |
673 | from()->set_next_compaction_space(to()); |
674 | heap->set_incremental_collection_failed(); |
675 | |
676 | // Inform the next generation that a promotion failure occurred. |
677 | _old_gen->promotion_failure_occurred(); |
678 | gc_tracer.report_promotion_failed(_promotion_failed_info); |
679 | |
680 | // Reset the PromotionFailureALot counters. |
681 | NOT_PRODUCT(heap->reset_promotion_should_fail();) |
682 | } |
683 | // We should have processed and cleared all the preserved marks. |
684 | _preserved_marks_set.reclaim(); |
685 | // set new iteration safe limit for the survivor spaces |
686 | from()->set_concurrent_iteration_safe_limit(from()->top()); |
687 | to()->set_concurrent_iteration_safe_limit(to()->top()); |
688 | |
689 | // We need to use a monotonically non-decreasing time in ms |
690 | // or we will see time-warp warnings and os::javaTimeMillis() |
691 | // does not guarantee monotonicity. |
692 | jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; |
693 | update_time_of_last_gc(now); |
694 | |
695 | heap->trace_heap_after_gc(&gc_tracer); |
696 | |
697 | _gc_timer->register_gc_end(); |
698 | |
699 | gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); |
700 | } |
701 | |
702 | void DefNewGeneration::init_assuming_no_promotion_failure() { |
703 | _promotion_failed = false; |
704 | _promotion_failed_info.reset(); |
705 | from()->set_next_compaction_space(NULL); |
706 | } |
707 | |
708 | void DefNewGeneration::remove_forwarding_pointers() { |
709 | RemoveForwardedPointerClosure rspc; |
710 | eden()->object_iterate(&rspc); |
711 | from()->object_iterate(&rspc); |
712 | restore_preserved_marks(); |
713 | } |
714 | |
715 | void DefNewGeneration::restore_preserved_marks() { |
716 | SharedRestorePreservedMarksTaskExecutor task_executor(NULL); |
717 | _preserved_marks_set.restore(&task_executor); |
718 | } |
719 | |
720 | void DefNewGeneration::handle_promotion_failure(oop old) { |
721 | log_debug(gc, promotion)("Promotion failure size = %d) " , old->size()); |
722 | |
723 | _promotion_failed = true; |
724 | _promotion_failed_info.register_copy_failure(old->size()); |
725 | _preserved_marks_set.get()->push_if_necessary(old, old->mark_raw()); |
726 | // forward to self |
727 | old->forward_to(old); |
728 | |
729 | _promo_failure_scan_stack.push(old); |
730 | |
731 | if (!_promo_failure_drain_in_progress) { |
732 | // prevent recursion in copy_to_survivor_space() |
733 | _promo_failure_drain_in_progress = true; |
734 | drain_promo_failure_scan_stack(); |
735 | _promo_failure_drain_in_progress = false; |
736 | } |
737 | } |
738 | |
739 | oop DefNewGeneration::copy_to_survivor_space(oop old) { |
740 | assert(is_in_reserved(old) && !old->is_forwarded(), |
741 | "shouldn't be scavenging this oop" ); |
742 | size_t s = old->size(); |
743 | oop obj = NULL; |
744 | |
745 | // Try allocating obj in to-space (unless too old) |
746 | if (old->age() < tenuring_threshold()) { |
747 | obj = (oop) to()->allocate_aligned(s); |
748 | } |
749 | |
750 | // Otherwise try allocating obj tenured |
751 | if (obj == NULL) { |
752 | obj = _old_gen->promote(old, s); |
753 | if (obj == NULL) { |
754 | handle_promotion_failure(old); |
755 | return old; |
756 | } |
757 | } else { |
758 | // Prefetch beyond obj |
759 | const intx interval = PrefetchCopyIntervalInBytes; |
760 | Prefetch::write(obj, interval); |
761 | |
762 | // Copy obj |
763 | Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); |
764 | |
765 | // Increment age if obj still in new generation |
766 | obj->incr_age(); |
767 | age_table()->add(obj, s); |
768 | } |
769 | |
770 | // Done, insert forward pointer to obj in this header |
771 | old->forward_to(obj); |
772 | |
773 | return obj; |
774 | } |
775 | |
776 | void DefNewGeneration::drain_promo_failure_scan_stack() { |
777 | while (!_promo_failure_scan_stack.is_empty()) { |
778 | oop obj = _promo_failure_scan_stack.pop(); |
779 | obj->oop_iterate(_promo_failure_scan_stack_closure); |
780 | } |
781 | } |
782 | |
783 | void DefNewGeneration::save_marks() { |
784 | eden()->set_saved_mark(); |
785 | to()->set_saved_mark(); |
786 | from()->set_saved_mark(); |
787 | } |
788 | |
789 | |
790 | void DefNewGeneration::reset_saved_marks() { |
791 | eden()->reset_saved_mark(); |
792 | to()->reset_saved_mark(); |
793 | from()->reset_saved_mark(); |
794 | } |
795 | |
796 | |
797 | bool DefNewGeneration::no_allocs_since_save_marks() { |
798 | assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden" ); |
799 | assert(from()->saved_mark_at_top(), "Violated spec - alloc in from" ); |
800 | return to()->saved_mark_at_top(); |
801 | } |
802 | |
803 | void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, |
804 | size_t max_alloc_words) { |
805 | if (requestor == this || _promotion_failed) { |
806 | return; |
807 | } |
808 | assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation" ); |
809 | |
810 | /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. |
811 | if (to_space->top() > to_space->bottom()) { |
812 | trace("to_space not empty when contribute_scratch called"); |
813 | } |
814 | */ |
815 | |
816 | ContiguousSpace* to_space = to(); |
817 | assert(to_space->end() >= to_space->top(), "pointers out of order" ); |
818 | size_t free_words = pointer_delta(to_space->end(), to_space->top()); |
819 | if (free_words >= MinFreeScratchWords) { |
820 | ScratchBlock* sb = (ScratchBlock*)to_space->top(); |
821 | sb->num_words = free_words; |
822 | sb->next = list; |
823 | list = sb; |
824 | } |
825 | } |
826 | |
827 | void DefNewGeneration::reset_scratch() { |
828 | // If contributing scratch in to_space, mangle all of |
829 | // to_space if ZapUnusedHeapArea. This is needed because |
830 | // top is not maintained while using to-space as scratch. |
831 | if (ZapUnusedHeapArea) { |
832 | to()->mangle_unused_area_complete(); |
833 | } |
834 | } |
835 | |
836 | bool DefNewGeneration::collection_attempt_is_safe() { |
837 | if (!to()->is_empty()) { |
838 | log_trace(gc)(":: to is not empty ::" ); |
839 | return false; |
840 | } |
841 | if (_old_gen == NULL) { |
842 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
843 | _old_gen = gch->old_gen(); |
844 | } |
845 | return _old_gen->promotion_attempt_is_safe(used()); |
846 | } |
847 | |
848 | void DefNewGeneration::gc_epilogue(bool full) { |
849 | DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) |
850 | |
851 | assert(!GCLocker::is_active(), "We should not be executing here" ); |
852 | // Check if the heap is approaching full after a collection has |
853 | // been done. Generally the young generation is empty at |
854 | // a minimum at the end of a collection. If it is not, then |
855 | // the heap is approaching full. |
856 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
857 | if (full) { |
858 | DEBUG_ONLY(seen_incremental_collection_failed = false;) |
859 | if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { |
860 | log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen" , |
861 | GCCause::to_string(gch->gc_cause())); |
862 | gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state |
863 | set_should_allocate_from_space(); // we seem to be running out of space |
864 | } else { |
865 | log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen" , |
866 | GCCause::to_string(gch->gc_cause())); |
867 | gch->clear_incremental_collection_failed(); // We just did a full collection |
868 | clear_should_allocate_from_space(); // if set |
869 | } |
870 | } else { |
871 | #ifdef ASSERT |
872 | // It is possible that incremental_collection_failed() == true |
873 | // here, because an attempted scavenge did not succeed. The policy |
874 | // is normally expected to cause a full collection which should |
875 | // clear that condition, so we should not be here twice in a row |
876 | // with incremental_collection_failed() == true without having done |
877 | // a full collection in between. |
878 | if (!seen_incremental_collection_failed && |
879 | gch->incremental_collection_failed()) { |
880 | log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed" , |
881 | GCCause::to_string(gch->gc_cause())); |
882 | seen_incremental_collection_failed = true; |
883 | } else if (seen_incremental_collection_failed) { |
884 | log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed" , |
885 | GCCause::to_string(gch->gc_cause())); |
886 | assert(gch->gc_cause() == GCCause::_scavenge_alot || |
887 | (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || |
888 | !gch->incremental_collection_failed(), |
889 | "Twice in a row" ); |
890 | seen_incremental_collection_failed = false; |
891 | } |
892 | #endif // ASSERT |
893 | } |
894 | |
895 | if (ZapUnusedHeapArea) { |
896 | eden()->check_mangled_unused_area_complete(); |
897 | from()->check_mangled_unused_area_complete(); |
898 | to()->check_mangled_unused_area_complete(); |
899 | } |
900 | |
901 | if (!CleanChunkPoolAsync) { |
902 | Chunk::clean_chunk_pool(); |
903 | } |
904 | |
905 | // update the generation and space performance counters |
906 | update_counters(); |
907 | gch->counters()->update_counters(); |
908 | } |
909 | |
910 | void DefNewGeneration::record_spaces_top() { |
911 | assert(ZapUnusedHeapArea, "Not mangling unused space" ); |
912 | eden()->set_top_for_allocations(); |
913 | to()->set_top_for_allocations(); |
914 | from()->set_top_for_allocations(); |
915 | } |
916 | |
917 | void DefNewGeneration::ref_processor_init() { |
918 | Generation::ref_processor_init(); |
919 | } |
920 | |
921 | |
922 | void DefNewGeneration::update_counters() { |
923 | if (UsePerfData) { |
924 | _eden_counters->update_all(); |
925 | _from_counters->update_all(); |
926 | _to_counters->update_all(); |
927 | _gen_counters->update_all(); |
928 | } |
929 | } |
930 | |
931 | void DefNewGeneration::verify() { |
932 | eden()->verify(); |
933 | from()->verify(); |
934 | to()->verify(); |
935 | } |
936 | |
937 | void DefNewGeneration::print_on(outputStream* st) const { |
938 | Generation::print_on(st); |
939 | st->print(" eden" ); |
940 | eden()->print_on(st); |
941 | st->print(" from" ); |
942 | from()->print_on(st); |
943 | st->print(" to " ); |
944 | to()->print_on(st); |
945 | } |
946 | |
947 | |
948 | const char* DefNewGeneration::name() const { |
949 | return "def new generation" ; |
950 | } |
951 | |
952 | // Moved from inline file as they are not called inline |
953 | CompactibleSpace* DefNewGeneration::first_compaction_space() const { |
954 | return eden(); |
955 | } |
956 | |
957 | HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) { |
958 | // This is the slow-path allocation for the DefNewGeneration. |
959 | // Most allocations are fast-path in compiled code. |
960 | // We try to allocate from the eden. If that works, we are happy. |
961 | // Note that since DefNewGeneration supports lock-free allocation, we |
962 | // have to use it here, as well. |
963 | HeapWord* result = eden()->par_allocate(word_size); |
964 | if (result != NULL) { |
965 | if (_old_gen != NULL) { |
966 | _old_gen->sample_eden_chunk(); |
967 | } |
968 | } else { |
969 | // If the eden is full and the last collection bailed out, we are running |
970 | // out of heap space, and we try to allocate the from-space, too. |
971 | // allocate_from_space can't be inlined because that would introduce a |
972 | // circular dependency at compile time. |
973 | result = allocate_from_space(word_size); |
974 | } |
975 | return result; |
976 | } |
977 | |
978 | HeapWord* DefNewGeneration::par_allocate(size_t word_size, |
979 | bool is_tlab) { |
980 | HeapWord* res = eden()->par_allocate(word_size); |
981 | if (_old_gen != NULL) { |
982 | _old_gen->sample_eden_chunk(); |
983 | } |
984 | return res; |
985 | } |
986 | |
987 | size_t DefNewGeneration::tlab_capacity() const { |
988 | return eden()->capacity(); |
989 | } |
990 | |
991 | size_t DefNewGeneration::tlab_used() const { |
992 | return eden()->used(); |
993 | } |
994 | |
995 | size_t DefNewGeneration::unsafe_max_tlab_alloc() const { |
996 | return unsafe_max_alloc_nogc(); |
997 | } |
998 | |