1 | /* |
2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "classfile/stringTable.hpp" |
27 | #include "gc/cms/cmsHeap.inline.hpp" |
28 | #include "gc/cms/compactibleFreeListSpace.hpp" |
29 | #include "gc/cms/concurrentMarkSweepGeneration.hpp" |
30 | #include "gc/cms/parNewGeneration.inline.hpp" |
31 | #include "gc/cms/parOopClosures.inline.hpp" |
32 | #include "gc/serial/defNewGeneration.inline.hpp" |
33 | #include "gc/shared/adaptiveSizePolicy.hpp" |
34 | #include "gc/shared/ageTable.inline.hpp" |
35 | #include "gc/shared/copyFailedInfo.hpp" |
36 | #include "gc/shared/gcHeapSummary.hpp" |
37 | #include "gc/shared/gcTimer.hpp" |
38 | #include "gc/shared/gcTrace.hpp" |
39 | #include "gc/shared/gcTraceTime.inline.hpp" |
40 | #include "gc/shared/genOopClosures.inline.hpp" |
41 | #include "gc/shared/generation.hpp" |
42 | #include "gc/shared/plab.inline.hpp" |
43 | #include "gc/shared/preservedMarks.inline.hpp" |
44 | #include "gc/shared/referencePolicy.hpp" |
45 | #include "gc/shared/referenceProcessorPhaseTimes.hpp" |
46 | #include "gc/shared/space.hpp" |
47 | #include "gc/shared/spaceDecorator.hpp" |
48 | #include "gc/shared/strongRootsScope.hpp" |
49 | #include "gc/shared/taskqueue.inline.hpp" |
50 | #include "gc/shared/weakProcessor.hpp" |
51 | #include "gc/shared/workgroup.hpp" |
52 | #include "gc/shared/workerPolicy.hpp" |
53 | #include "logging/log.hpp" |
54 | #include "logging/logStream.hpp" |
55 | #include "memory/iterator.inline.hpp" |
56 | #include "memory/resourceArea.hpp" |
57 | #include "oops/access.inline.hpp" |
58 | #include "oops/compressedOops.inline.hpp" |
59 | #include "oops/objArrayOop.hpp" |
60 | #include "oops/oop.inline.hpp" |
61 | #include "runtime/atomic.hpp" |
62 | #include "runtime/handles.inline.hpp" |
63 | #include "runtime/java.hpp" |
64 | #include "runtime/thread.inline.hpp" |
65 | #include "utilities/copy.hpp" |
66 | #include "utilities/globalDefinitions.hpp" |
67 | #include "utilities/stack.inline.hpp" |
68 | |
69 | ParScanThreadState::ParScanThreadState(Space* to_space_, |
70 | ParNewGeneration* young_gen_, |
71 | Generation* old_gen_, |
72 | int thread_num_, |
73 | ObjToScanQueueSet* work_queue_set_, |
74 | Stack<oop, mtGC>* overflow_stacks_, |
75 | PreservedMarks* preserved_marks_, |
76 | size_t desired_plab_sz_, |
77 | TaskTerminator& term_) : |
78 | _work_queue(work_queue_set_->queue(thread_num_)), |
79 | _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), |
80 | _preserved_marks(preserved_marks_), |
81 | _to_space_alloc_buffer(desired_plab_sz_), |
82 | _to_space_closure(young_gen_, this), |
83 | _old_gen_closure(young_gen_, this), |
84 | _to_space_root_closure(young_gen_, this), |
85 | _older_gen_closure(young_gen_, this), |
86 | _old_gen_root_closure(young_gen_, this), |
87 | _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, |
88 | &_to_space_root_closure, young_gen_, &_old_gen_root_closure, |
89 | work_queue_set_, term_.terminator()), |
90 | _is_alive_closure(young_gen_), |
91 | _scan_weak_ref_closure(young_gen_, this), |
92 | _keep_alive_closure(&_scan_weak_ref_closure), |
93 | _to_space(to_space_), |
94 | _young_gen(young_gen_), |
95 | _old_gen(old_gen_), |
96 | _young_old_boundary(NULL), |
97 | _thread_num(thread_num_), |
98 | _ageTable(false), // false ==> not the global age table, no perf data. |
99 | _to_space_full(false), |
100 | _strong_roots_time(0.0), |
101 | _term_time(0.0) |
102 | { |
103 | #if TASKQUEUE_STATS |
104 | _term_attempts = 0; |
105 | _overflow_refills = 0; |
106 | _overflow_refill_objs = 0; |
107 | #endif // TASKQUEUE_STATS |
108 | |
109 | _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num()); |
110 | _start = os::elapsedTime(); |
111 | _old_gen_closure.set_generation(old_gen_); |
112 | _old_gen_root_closure.set_generation(old_gen_); |
113 | } |
114 | |
115 | void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, |
116 | size_t plab_word_size) { |
117 | ChunkArray* sca = survivor_chunk_array(); |
118 | if (sca != NULL) { |
119 | // A non-null SCA implies that we want the PLAB data recorded. |
120 | sca->record_sample(plab_start, plab_word_size); |
121 | } |
122 | } |
123 | |
124 | bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { |
125 | return new_obj->is_objArray() && |
126 | arrayOop(new_obj)->length() > ParGCArrayScanChunk && |
127 | new_obj != old_obj; |
128 | } |
129 | |
130 | void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { |
131 | assert(old->is_objArray(), "must be obj array" ); |
132 | assert(old->is_forwarded(), "must be forwarded" ); |
133 | assert(CMSHeap::heap()->is_in_reserved(old), "must be in heap." ); |
134 | assert(!old_gen()->is_in(old), "must be in young generation." ); |
135 | |
136 | objArrayOop obj = objArrayOop(old->forwardee()); |
137 | // Process ParGCArrayScanChunk elements now |
138 | // and push the remainder back onto queue |
139 | int start = arrayOop(old)->length(); |
140 | int end = obj->length(); |
141 | int remainder = end - start; |
142 | assert(start <= end, "just checking" ); |
143 | if (remainder > 2 * ParGCArrayScanChunk) { |
144 | // Test above combines last partial chunk with a full chunk |
145 | end = start + ParGCArrayScanChunk; |
146 | arrayOop(old)->set_length(end); |
147 | // Push remainder. |
148 | bool ok = work_queue()->push(old); |
149 | assert(ok, "just popped, push must be okay" ); |
150 | } else { |
151 | // Restore length so that it can be used if there |
152 | // is a promotion failure and forwarding pointers |
153 | // must be removed. |
154 | arrayOop(old)->set_length(end); |
155 | } |
156 | |
157 | // process our set of indices (include header in first chunk) |
158 | // should make sure end is even (aligned to HeapWord in case of compressed oops) |
159 | if ((HeapWord *)obj < young_old_boundary()) { |
160 | // object is in to_space |
161 | obj->oop_iterate_range(&_to_space_closure, start, end); |
162 | } else { |
163 | // object is in old generation |
164 | obj->oop_iterate_range(&_old_gen_closure, start, end); |
165 | } |
166 | } |
167 | |
168 | void ParScanThreadState::trim_queues(int max_size) { |
169 | ObjToScanQueue* queue = work_queue(); |
170 | do { |
171 | while (queue->size() > (juint)max_size) { |
172 | oop obj_to_scan; |
173 | if (queue->pop_local(obj_to_scan)) { |
174 | if ((HeapWord *)obj_to_scan < young_old_boundary()) { |
175 | if (obj_to_scan->is_objArray() && |
176 | obj_to_scan->is_forwarded() && |
177 | obj_to_scan->forwardee() != obj_to_scan) { |
178 | scan_partial_array_and_push_remainder(obj_to_scan); |
179 | } else { |
180 | // object is in to_space |
181 | obj_to_scan->oop_iterate(&_to_space_closure); |
182 | } |
183 | } else { |
184 | // object is in old generation |
185 | obj_to_scan->oop_iterate(&_old_gen_closure); |
186 | } |
187 | } |
188 | } |
189 | // For the case of compressed oops, we have a private, non-shared |
190 | // overflow stack, so we eagerly drain it so as to more evenly |
191 | // distribute load early. Note: this may be good to do in |
192 | // general rather than delay for the final stealing phase. |
193 | // If applicable, we'll transfer a set of objects over to our |
194 | // work queue, allowing them to be stolen and draining our |
195 | // private overflow stack. |
196 | } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); |
197 | } |
198 | |
199 | bool ParScanThreadState::take_from_overflow_stack() { |
200 | assert(ParGCUseLocalOverflow, "Else should not call" ); |
201 | assert(young_gen()->overflow_list() == NULL, "Error" ); |
202 | ObjToScanQueue* queue = work_queue(); |
203 | Stack<oop, mtGC>* const of_stack = overflow_stack(); |
204 | const size_t num_overflow_elems = of_stack->size(); |
205 | const size_t space_available = queue->max_elems() - queue->size(); |
206 | const size_t num_take_elems = MIN3(space_available / 4, |
207 | (size_t)ParGCDesiredObjsFromOverflowList, |
208 | num_overflow_elems); |
209 | // Transfer the most recent num_take_elems from the overflow |
210 | // stack to our work queue. |
211 | for (size_t i = 0; i != num_take_elems; i++) { |
212 | oop cur = of_stack->pop(); |
213 | oop obj_to_push = cur->forwardee(); |
214 | assert(CMSHeap::heap()->is_in_reserved(cur), "Should be in heap" ); |
215 | assert(!old_gen()->is_in_reserved(cur), "Should be in young gen" ); |
216 | assert(CMSHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap" ); |
217 | if (should_be_partially_scanned(obj_to_push, cur)) { |
218 | assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned" ); |
219 | obj_to_push = cur; |
220 | } |
221 | bool ok = queue->push(obj_to_push); |
222 | assert(ok, "Should have succeeded" ); |
223 | } |
224 | assert(young_gen()->overflow_list() == NULL, "Error" ); |
225 | return num_take_elems > 0; // was something transferred? |
226 | } |
227 | |
228 | void ParScanThreadState::push_on_overflow_stack(oop p) { |
229 | assert(ParGCUseLocalOverflow, "Else should not call" ); |
230 | overflow_stack()->push(p); |
231 | assert(young_gen()->overflow_list() == NULL, "Error" ); |
232 | } |
233 | |
234 | HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { |
235 | // If the object is small enough, try to reallocate the buffer. |
236 | HeapWord* obj = NULL; |
237 | if (!_to_space_full) { |
238 | PLAB* const plab = to_space_alloc_buffer(); |
239 | Space* const sp = to_space(); |
240 | if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) { |
241 | // Is small enough; abandon this buffer and start a new one. |
242 | plab->retire(); |
243 | // The minimum size has to be twice SurvivorAlignmentInBytes to |
244 | // allow for padding used in the alignment of 1 word. A padding |
245 | // of 1 is too small for a filler word so the padding size will |
246 | // be increased by SurvivorAlignmentInBytes. |
247 | size_t min_usable_size = 2 * static_cast<size_t>(SurvivorAlignmentInBytes >> LogHeapWordSize); |
248 | size_t buf_size = MAX2(plab->word_sz(), min_usable_size); |
249 | HeapWord* buf_space = sp->par_allocate(buf_size); |
250 | if (buf_space == NULL) { |
251 | const size_t min_bytes = MAX2(PLAB::min_size(), min_usable_size) << LogHeapWordSize; |
252 | size_t free_bytes = sp->free(); |
253 | while(buf_space == NULL && free_bytes >= min_bytes) { |
254 | buf_size = free_bytes >> LogHeapWordSize; |
255 | assert(buf_size == (size_t)align_object_size(buf_size), "Invariant" ); |
256 | buf_space = sp->par_allocate(buf_size); |
257 | free_bytes = sp->free(); |
258 | } |
259 | } |
260 | if (buf_space != NULL) { |
261 | plab->set_buf(buf_space, buf_size); |
262 | record_survivor_plab(buf_space, buf_size); |
263 | obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes); |
264 | // Note that we cannot compare buf_size < word_sz below |
265 | // because of AlignmentReserve (see PLAB::allocate()). |
266 | assert(obj != NULL || plab->words_remaining() < word_sz, |
267 | "Else should have been able to allocate requested object size " |
268 | SIZE_FORMAT ", PLAB size " SIZE_FORMAT ", SurvivorAlignmentInBytes " |
269 | SIZE_FORMAT ", words_remaining " SIZE_FORMAT, |
270 | word_sz, buf_size, SurvivorAlignmentInBytes, plab->words_remaining()); |
271 | // It's conceivable that we may be able to use the |
272 | // buffer we just grabbed for subsequent small requests |
273 | // even if not for this one. |
274 | } else { |
275 | // We're used up. |
276 | _to_space_full = true; |
277 | } |
278 | } else { |
279 | // Too large; allocate the object individually. |
280 | obj = sp->par_allocate(word_sz); |
281 | } |
282 | } |
283 | return obj; |
284 | } |
285 | |
286 | void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) { |
287 | to_space_alloc_buffer()->undo_allocation(obj, word_sz); |
288 | } |
289 | |
290 | void ParScanThreadState::print_promotion_failure_size() { |
291 | if (_promotion_failed_info.has_failed()) { |
292 | log_trace(gc, promotion)(" (%d: promotion failure size = " SIZE_FORMAT ") " , |
293 | _thread_num, _promotion_failed_info.first_size()); |
294 | } |
295 | } |
296 | |
297 | class ParScanThreadStateSet: StackObj { |
298 | public: |
299 | // Initializes states for the specified number of threads; |
300 | ParScanThreadStateSet(int num_threads, |
301 | Space& to_space, |
302 | ParNewGeneration& young_gen, |
303 | Generation& old_gen, |
304 | ObjToScanQueueSet& queue_set, |
305 | Stack<oop, mtGC>* overflow_stacks_, |
306 | PreservedMarksSet& preserved_marks_set, |
307 | size_t desired_plab_sz, |
308 | TaskTerminator& term); |
309 | |
310 | ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } |
311 | |
312 | inline ParScanThreadState& thread_state(int i); |
313 | |
314 | void trace_promotion_failed(const YoungGCTracer* gc_tracer); |
315 | void reset(uint active_workers, bool promotion_failed); |
316 | void flush(); |
317 | |
318 | #if TASKQUEUE_STATS |
319 | static void |
320 | print_termination_stats_hdr(outputStream* const st); |
321 | void print_termination_stats(); |
322 | static void |
323 | print_taskqueue_stats_hdr(outputStream* const st); |
324 | void print_taskqueue_stats(); |
325 | void reset_stats(); |
326 | #endif // TASKQUEUE_STATS |
327 | |
328 | private: |
329 | TaskTerminator& _term; |
330 | ParNewGeneration& _young_gen; |
331 | Generation& _old_gen; |
332 | ParScanThreadState* _per_thread_states; |
333 | const int _num_threads; |
334 | public: |
335 | bool is_valid(int id) const { return id < _num_threads; } |
336 | ParallelTaskTerminator* terminator() { return _term.terminator(); } |
337 | }; |
338 | |
339 | ParScanThreadStateSet::ParScanThreadStateSet(int num_threads, |
340 | Space& to_space, |
341 | ParNewGeneration& young_gen, |
342 | Generation& old_gen, |
343 | ObjToScanQueueSet& queue_set, |
344 | Stack<oop, mtGC>* overflow_stacks, |
345 | PreservedMarksSet& preserved_marks_set, |
346 | size_t desired_plab_sz, |
347 | TaskTerminator& term) |
348 | : _term(term), |
349 | _young_gen(young_gen), |
350 | _old_gen(old_gen), |
351 | _per_thread_states(NEW_RESOURCE_ARRAY(ParScanThreadState, num_threads)), |
352 | _num_threads(num_threads) |
353 | { |
354 | assert(num_threads > 0, "sanity check!" ); |
355 | assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), |
356 | "overflow_stack allocation mismatch" ); |
357 | // Initialize states. |
358 | for (int i = 0; i < num_threads; ++i) { |
359 | new(_per_thread_states + i) |
360 | ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set, |
361 | overflow_stacks, preserved_marks_set.get(i), |
362 | desired_plab_sz, term); |
363 | } |
364 | } |
365 | |
366 | inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) { |
367 | assert(i >= 0 && i < _num_threads, "sanity check!" ); |
368 | return _per_thread_states[i]; |
369 | } |
370 | |
371 | void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) { |
372 | for (int i = 0; i < _num_threads; ++i) { |
373 | if (thread_state(i).promotion_failed()) { |
374 | gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info()); |
375 | thread_state(i).promotion_failed_info().reset(); |
376 | } |
377 | } |
378 | } |
379 | |
380 | void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) { |
381 | _term.terminator()->reset_for_reuse(active_threads); |
382 | if (promotion_failed) { |
383 | for (int i = 0; i < _num_threads; ++i) { |
384 | thread_state(i).print_promotion_failure_size(); |
385 | } |
386 | } |
387 | } |
388 | |
389 | #if TASKQUEUE_STATS |
390 | void ParScanThreadState::reset_stats() { |
391 | taskqueue_stats().reset(); |
392 | _term_attempts = 0; |
393 | _overflow_refills = 0; |
394 | _overflow_refill_objs = 0; |
395 | } |
396 | |
397 | void ParScanThreadStateSet::reset_stats() { |
398 | for (int i = 0; i < _num_threads; ++i) { |
399 | thread_state(i).reset_stats(); |
400 | } |
401 | } |
402 | |
403 | void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) { |
404 | st->print_raw_cr("GC Termination Stats" ); |
405 | st->print_raw_cr(" elapsed --strong roots-- -------termination-------" ); |
406 | st->print_raw_cr("thr ms ms % ms % attempts" ); |
407 | st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" ); |
408 | } |
409 | |
410 | void ParScanThreadStateSet::print_termination_stats() { |
411 | Log(gc, task, stats) log; |
412 | if (!log.is_debug()) { |
413 | return; |
414 | } |
415 | |
416 | ResourceMark rm; |
417 | LogStream ls(log.debug()); |
418 | outputStream* st = &ls; |
419 | |
420 | print_termination_stats_hdr(st); |
421 | |
422 | for (int i = 0; i < _num_threads; ++i) { |
423 | const ParScanThreadState & pss = thread_state(i); |
424 | const double elapsed_ms = pss.elapsed_time() * 1000.0; |
425 | const double s_roots_ms = pss.strong_roots_time() * 1000.0; |
426 | const double term_ms = pss.term_time() * 1000.0; |
427 | st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8), |
428 | i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, |
429 | term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); |
430 | } |
431 | } |
432 | |
433 | // Print stats related to work queue activity. |
434 | void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) { |
435 | st->print_raw_cr("GC Task Stats" ); |
436 | st->print_raw("thr " ); TaskQueueStats::print_header(1, st); st->cr(); |
437 | st->print_raw("--- " ); TaskQueueStats::print_header(2, st); st->cr(); |
438 | } |
439 | |
440 | void ParScanThreadStateSet::print_taskqueue_stats() { |
441 | if (!log_is_enabled(Trace, gc, task, stats)) { |
442 | return; |
443 | } |
444 | Log(gc, task, stats) log; |
445 | ResourceMark rm; |
446 | LogStream ls(log.trace()); |
447 | outputStream* st = &ls; |
448 | print_taskqueue_stats_hdr(st); |
449 | |
450 | TaskQueueStats totals; |
451 | for (int i = 0; i < _num_threads; ++i) { |
452 | const ParScanThreadState & pss = thread_state(i); |
453 | const TaskQueueStats & stats = pss.taskqueue_stats(); |
454 | st->print("%3d " , i); stats.print(st); st->cr(); |
455 | totals += stats; |
456 | |
457 | if (pss.overflow_refills() > 0) { |
458 | st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " |
459 | SIZE_FORMAT_W(10) " overflow objects" , |
460 | pss.overflow_refills(), pss.overflow_refill_objs()); |
461 | } |
462 | } |
463 | st->print("tot " ); totals.print(st); st->cr(); |
464 | |
465 | DEBUG_ONLY(totals.verify()); |
466 | } |
467 | #endif // TASKQUEUE_STATS |
468 | |
469 | void ParScanThreadStateSet::flush() { |
470 | // Work in this loop should be kept as lightweight as |
471 | // possible since this might otherwise become a bottleneck |
472 | // to scaling. Should we add heavy-weight work into this |
473 | // loop, consider parallelizing the loop into the worker threads. |
474 | for (int i = 0; i < _num_threads; ++i) { |
475 | ParScanThreadState& par_scan_state = thread_state(i); |
476 | |
477 | // Flush stats related to To-space PLAB activity and |
478 | // retire the last buffer. |
479 | par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats()); |
480 | |
481 | // Every thread has its own age table. We need to merge |
482 | // them all into one. |
483 | AgeTable *local_table = par_scan_state.age_table(); |
484 | _young_gen.age_table()->merge(local_table); |
485 | |
486 | // Inform old gen that we're done. |
487 | _old_gen.par_promote_alloc_done(i); |
488 | } |
489 | |
490 | if (UseConcMarkSweepGC) { |
491 | // We need to call this even when ResizeOldPLAB is disabled |
492 | // so as to avoid breaking some asserts. While we may be able |
493 | // to avoid this by reorganizing the code a bit, I am loathe |
494 | // to do that unless we find cases where ergo leads to bad |
495 | // performance. |
496 | CompactibleFreeListSpaceLAB::compute_desired_plab_size(); |
497 | } |
498 | } |
499 | |
500 | ParScanClosure::ParScanClosure(ParNewGeneration* g, |
501 | ParScanThreadState* par_scan_state) : |
502 | OopsInClassLoaderDataOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) { |
503 | _boundary = _g->reserved().end(); |
504 | } |
505 | |
506 | void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } |
507 | void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } |
508 | |
509 | void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } |
510 | void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } |
511 | |
512 | ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, |
513 | ParScanThreadState* par_scan_state) |
514 | : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) |
515 | {} |
516 | |
517 | #ifdef WIN32 |
518 | #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ |
519 | #endif |
520 | |
521 | ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( |
522 | ParScanThreadState* par_scan_state_, |
523 | ParScanWithoutBarrierClosure* to_space_closure_, |
524 | ParScanWithBarrierClosure* old_gen_closure_, |
525 | ParRootScanWithoutBarrierClosure* to_space_root_closure_, |
526 | ParNewGeneration* par_gen_, |
527 | ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, |
528 | ObjToScanQueueSet* task_queues_, |
529 | ParallelTaskTerminator* terminator_) : |
530 | |
531 | _par_scan_state(par_scan_state_), |
532 | _to_space_closure(to_space_closure_), |
533 | _to_space_root_closure(to_space_root_closure_), |
534 | _old_gen_closure(old_gen_closure_), |
535 | _old_gen_root_closure(old_gen_root_closure_), |
536 | _par_gen(par_gen_), |
537 | _task_queues(task_queues_), |
538 | _terminator(terminator_) |
539 | {} |
540 | |
541 | void ParEvacuateFollowersClosure::do_void() { |
542 | ObjToScanQueue* work_q = par_scan_state()->work_queue(); |
543 | |
544 | while (true) { |
545 | // Scan to-space and old-gen objs until we run out of both. |
546 | oop obj_to_scan; |
547 | par_scan_state()->trim_queues(0); |
548 | |
549 | // We have no local work, attempt to steal from other threads. |
550 | |
551 | // Attempt to steal work from promoted. |
552 | if (task_queues()->steal(par_scan_state()->thread_num(), |
553 | obj_to_scan)) { |
554 | bool res = work_q->push(obj_to_scan); |
555 | assert(res, "Empty queue should have room for a push." ); |
556 | |
557 | // If successful, goto Start. |
558 | continue; |
559 | |
560 | // Try global overflow list. |
561 | } else if (par_gen()->take_from_overflow_list(par_scan_state())) { |
562 | continue; |
563 | } |
564 | |
565 | // Otherwise, offer termination. |
566 | par_scan_state()->start_term_time(); |
567 | if (terminator()->offer_termination()) break; |
568 | par_scan_state()->end_term_time(); |
569 | } |
570 | assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, |
571 | "Broken overflow list?" ); |
572 | // Finish the last termination pause. |
573 | par_scan_state()->end_term_time(); |
574 | } |
575 | |
576 | ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, |
577 | Generation* old_gen, |
578 | HeapWord* young_old_boundary, |
579 | ParScanThreadStateSet* state_set, |
580 | StrongRootsScope* strong_roots_scope) : |
581 | AbstractGangTask("ParNewGeneration collection" ), |
582 | _young_gen(young_gen), _old_gen(old_gen), |
583 | _young_old_boundary(young_old_boundary), |
584 | _state_set(state_set), |
585 | _strong_roots_scope(strong_roots_scope) |
586 | {} |
587 | |
588 | void ParNewGenTask::work(uint worker_id) { |
589 | CMSHeap* heap = CMSHeap::heap(); |
590 | // Since this is being done in a separate thread, need new resource |
591 | // and handle marks. |
592 | ResourceMark rm; |
593 | HandleMark hm; |
594 | |
595 | ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); |
596 | assert(_state_set->is_valid(worker_id), "Should not have been called" ); |
597 | |
598 | par_scan_state.set_young_old_boundary(_young_old_boundary); |
599 | |
600 | CLDScanClosure cld_scan_closure(&par_scan_state.to_space_root_closure(), |
601 | heap->rem_set()->cld_rem_set()->accumulate_modified_oops()); |
602 | |
603 | par_scan_state.start_strong_roots(); |
604 | heap->young_process_roots(_strong_roots_scope, |
605 | &par_scan_state.to_space_root_closure(), |
606 | &par_scan_state.older_gen_closure(), |
607 | &cld_scan_closure); |
608 | |
609 | par_scan_state.end_strong_roots(); |
610 | |
611 | // "evacuate followers". |
612 | par_scan_state.evacuate_followers_closure().do_void(); |
613 | |
614 | // This will collapse this worker's promoted object list that's |
615 | // created during the main ParNew parallel phase of ParNew. This has |
616 | // to be called after all workers have finished promoting objects |
617 | // and scanning promoted objects. It should be safe calling it from |
618 | // here, given that we can only reach here after all thread have |
619 | // offered termination, i.e., after there is no more work to be |
620 | // done. It will also disable promotion tracking for the rest of |
621 | // this GC as it's not necessary to be on during reference processing. |
622 | _old_gen->par_oop_since_save_marks_iterate_done((int) worker_id); |
623 | } |
624 | |
625 | ParNewGeneration::ParNewGeneration(ReservedSpace rs, |
626 | size_t initial_byte_size, |
627 | size_t min_byte_size, |
628 | size_t max_byte_size) |
629 | : DefNewGeneration(rs, initial_byte_size, min_byte_size, max_byte_size, "CMS young collection pauses" ), |
630 | _plab_stats("Young" , YoungPLABSize, PLABWeight), |
631 | _overflow_list(NULL), |
632 | _is_alive_closure(this) |
633 | { |
634 | NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) |
635 | NOT_PRODUCT(_num_par_pushes = 0;) |
636 | _task_queues = new ObjToScanQueueSet(ParallelGCThreads); |
637 | guarantee(_task_queues != NULL, "task_queues allocation failure." ); |
638 | |
639 | for (uint i = 0; i < ParallelGCThreads; i++) { |
640 | ObjToScanQueue *q = new ObjToScanQueue(); |
641 | guarantee(q != NULL, "work_queue Allocation failure." ); |
642 | _task_queues->register_queue(i, q); |
643 | } |
644 | |
645 | for (uint i = 0; i < ParallelGCThreads; i++) { |
646 | _task_queues->queue(i)->initialize(); |
647 | } |
648 | |
649 | _overflow_stacks = NULL; |
650 | if (ParGCUseLocalOverflow) { |
651 | // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ',' |
652 | typedef Stack<oop, mtGC> GCOopStack; |
653 | |
654 | _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); |
655 | for (size_t i = 0; i < ParallelGCThreads; ++i) { |
656 | new (_overflow_stacks + i) Stack<oop, mtGC>(); |
657 | } |
658 | } |
659 | |
660 | if (UsePerfData) { |
661 | EXCEPTION_MARK; |
662 | ResourceMark rm; |
663 | |
664 | const char* cname = |
665 | PerfDataManager::counter_name(_gen_counters->name_space(), "threads" ); |
666 | PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, |
667 | ParallelGCThreads, CHECK); |
668 | } |
669 | } |
670 | |
671 | // ParNewGeneration:: |
672 | ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : |
673 | DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} |
674 | |
675 | template <class T> |
676 | void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { |
677 | #ifdef ASSERT |
678 | { |
679 | oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); |
680 | // We never expect to see a null reference being processed |
681 | // as a weak reference. |
682 | assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs" ); |
683 | } |
684 | #endif // ASSERT |
685 | |
686 | Devirtualizer::do_oop_no_verify(_par_cl, p); |
687 | |
688 | if (CMSHeap::heap()->is_in_reserved(p)) { |
689 | oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);; |
690 | _rs->write_ref_field_gc_par(p, obj); |
691 | } |
692 | } |
693 | |
694 | void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } |
695 | void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } |
696 | |
697 | // ParNewGeneration:: |
698 | KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : |
699 | DefNewGeneration::KeepAliveClosure(cl) {} |
700 | |
701 | template <class T> |
702 | void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { |
703 | #ifdef ASSERT |
704 | { |
705 | oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); |
706 | // We never expect to see a null reference being processed |
707 | // as a weak reference. |
708 | assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs" ); |
709 | } |
710 | #endif // ASSERT |
711 | |
712 | Devirtualizer::do_oop_no_verify(_cl, p); |
713 | |
714 | if (CMSHeap::heap()->is_in_reserved(p)) { |
715 | oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); |
716 | _rs->write_ref_field_gc_par(p, obj); |
717 | } |
718 | } |
719 | |
720 | void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } |
721 | void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } |
722 | |
723 | template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { |
724 | T heap_oop = RawAccess<>::oop_load(p); |
725 | if (!CompressedOops::is_null(heap_oop)) { |
726 | oop obj = CompressedOops::decode_not_null(heap_oop); |
727 | if ((HeapWord*)obj < _boundary) { |
728 | assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?" ); |
729 | oop new_obj = obj->is_forwarded() |
730 | ? obj->forwardee() |
731 | : _g->DefNewGeneration::copy_to_survivor_space(obj); |
732 | RawAccess<IS_NOT_NULL>::oop_store(p, new_obj); |
733 | } |
734 | if (_gc_barrier) { |
735 | // If p points to a younger generation, mark the card. |
736 | if ((HeapWord*)obj < _gen_boundary) { |
737 | _rs->write_ref_field_gc_par(p, obj); |
738 | } |
739 | } |
740 | } |
741 | } |
742 | |
743 | void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } |
744 | void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } |
745 | |
746 | class ParNewRefProcTaskProxy: public AbstractGangTask { |
747 | typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; |
748 | public: |
749 | ParNewRefProcTaskProxy(ProcessTask& task, |
750 | ParNewGeneration& young_gen, |
751 | Generation& old_gen, |
752 | HeapWord* young_old_boundary, |
753 | ParScanThreadStateSet& state_set); |
754 | |
755 | private: |
756 | virtual void work(uint worker_id); |
757 | private: |
758 | ParNewGeneration& _young_gen; |
759 | ProcessTask& _task; |
760 | Generation& _old_gen; |
761 | HeapWord* _young_old_boundary; |
762 | ParScanThreadStateSet& _state_set; |
763 | }; |
764 | |
765 | ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task, |
766 | ParNewGeneration& young_gen, |
767 | Generation& old_gen, |
768 | HeapWord* young_old_boundary, |
769 | ParScanThreadStateSet& state_set) |
770 | : AbstractGangTask("ParNewGeneration parallel reference processing" ), |
771 | _young_gen(young_gen), |
772 | _task(task), |
773 | _old_gen(old_gen), |
774 | _young_old_boundary(young_old_boundary), |
775 | _state_set(state_set) |
776 | { } |
777 | |
778 | void ParNewRefProcTaskProxy::work(uint worker_id) { |
779 | ResourceMark rm; |
780 | HandleMark hm; |
781 | ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); |
782 | par_scan_state.set_young_old_boundary(_young_old_boundary); |
783 | _task.work(worker_id, par_scan_state.is_alive_closure(), |
784 | par_scan_state.keep_alive_closure(), |
785 | par_scan_state.evacuate_followers_closure()); |
786 | } |
787 | |
788 | void ParNewRefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers) { |
789 | CMSHeap* gch = CMSHeap::heap(); |
790 | WorkGang* workers = gch->workers(); |
791 | assert(workers != NULL, "Need parallel worker threads." ); |
792 | assert(workers->active_workers() == ergo_workers, |
793 | "Ergonomically chosen workers (%u) must be equal to active workers (%u)" , |
794 | ergo_workers, workers->active_workers()); |
795 | _state_set.reset(workers->active_workers(), _young_gen.promotion_failed()); |
796 | ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen, |
797 | _young_gen.reserved().end(), _state_set); |
798 | workers->run_task(&rp_task, workers->active_workers()); |
799 | _state_set.reset(0 /* bad value in debug if not reset */, |
800 | _young_gen.promotion_failed()); |
801 | } |
802 | |
803 | void ParNewRefProcTaskExecutor::set_single_threaded_mode() { |
804 | _state_set.flush(); |
805 | CMSHeap* heap = CMSHeap::heap(); |
806 | heap->save_marks(); |
807 | } |
808 | |
809 | ScanClosureWithParBarrier:: |
810 | ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : |
811 | OopsInClassLoaderDataOrGenClosure(g), _g(g), _boundary(g->reserved().end()), _gc_barrier(gc_barrier) |
812 | { } |
813 | |
814 | template <typename OopClosureType1, typename OopClosureType2> |
815 | EvacuateFollowersClosureGeneral<OopClosureType1, OopClosureType2>:: |
816 | EvacuateFollowersClosureGeneral(CMSHeap* heap, |
817 | OopClosureType1* cur, |
818 | OopClosureType2* older) : |
819 | _heap(heap), |
820 | _scan_cur_or_nonheap(cur), _scan_older(older) |
821 | { } |
822 | |
823 | template <typename OopClosureType1, typename OopClosureType2> |
824 | void EvacuateFollowersClosureGeneral<OopClosureType1, OopClosureType2>::do_void() { |
825 | do { |
826 | _heap->oop_since_save_marks_iterate(_scan_cur_or_nonheap, |
827 | _scan_older); |
828 | } while (!_heap->no_allocs_since_save_marks()); |
829 | } |
830 | |
831 | // A Generation that does parallel young-gen collection. |
832 | |
833 | void ParNewGeneration::handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set) { |
834 | assert(_promo_failure_scan_stack.is_empty(), "post condition" ); |
835 | _promo_failure_scan_stack.clear(true); // Clear cached segments. |
836 | |
837 | remove_forwarding_pointers(); |
838 | log_info(gc, promotion)("Promotion failed" ); |
839 | // All the spaces are in play for mark-sweep. |
840 | swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. |
841 | from()->set_next_compaction_space(to()); |
842 | gch->set_incremental_collection_failed(); |
843 | // Inform the next generation that a promotion failure occurred. |
844 | _old_gen->promotion_failure_occurred(); |
845 | |
846 | // Trace promotion failure in the parallel GC threads |
847 | thread_state_set.trace_promotion_failed(gc_tracer()); |
848 | // Single threaded code may have reported promotion failure to the global state |
849 | if (_promotion_failed_info.has_failed()) { |
850 | _gc_tracer.report_promotion_failed(_promotion_failed_info); |
851 | } |
852 | // Reset the PromotionFailureALot counters. |
853 | NOT_PRODUCT(gch->reset_promotion_should_fail();) |
854 | } |
855 | |
856 | void ParNewGeneration::collect(bool full, |
857 | bool clear_all_soft_refs, |
858 | size_t size, |
859 | bool is_tlab) { |
860 | assert(full || size > 0, "otherwise we don't want to collect" ); |
861 | |
862 | CMSHeap* gch = CMSHeap::heap(); |
863 | |
864 | _gc_timer->register_gc_start(); |
865 | |
866 | AdaptiveSizePolicy* size_policy = gch->size_policy(); |
867 | WorkGang* workers = gch->workers(); |
868 | assert(workers != NULL, "Need workgang for parallel work" ); |
869 | uint active_workers = |
870 | WorkerPolicy::calc_active_workers(workers->total_workers(), |
871 | workers->active_workers(), |
872 | Threads::number_of_non_daemon_threads()); |
873 | active_workers = workers->update_active_workers(active_workers); |
874 | log_info(gc,task)("Using %u workers of %u for evacuation" , active_workers, workers->total_workers()); |
875 | |
876 | _old_gen = gch->old_gen(); |
877 | |
878 | // If the next generation is too full to accommodate worst-case promotion |
879 | // from this generation, pass on collection; let the next generation |
880 | // do it. |
881 | if (!collection_attempt_is_safe()) { |
882 | gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one |
883 | return; |
884 | } |
885 | assert(to()->is_empty(), "Else not collection_attempt_is_safe" ); |
886 | |
887 | _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); |
888 | gch->trace_heap_before_gc(gc_tracer()); |
889 | |
890 | init_assuming_no_promotion_failure(); |
891 | |
892 | GCTraceTime(Trace, gc, phases) t1("ParNew" , NULL, gch->gc_cause()); |
893 | |
894 | age_table()->clear(); |
895 | to()->clear(SpaceDecorator::Mangle); |
896 | |
897 | gch->save_marks(); |
898 | |
899 | // Set the correct parallelism (number of queues) in the reference processor |
900 | ref_processor()->set_active_mt_degree(active_workers); |
901 | |
902 | // Need to initialize the preserved marks before the ThreadStateSet c'tor. |
903 | _preserved_marks_set.init(active_workers); |
904 | |
905 | // Always set the terminator for the active number of workers |
906 | // because only those workers go through the termination protocol. |
907 | TaskTerminator _term(active_workers, task_queues()); |
908 | ParScanThreadStateSet thread_state_set(active_workers, |
909 | *to(), *this, *_old_gen, *task_queues(), |
910 | _overflow_stacks, _preserved_marks_set, |
911 | desired_plab_sz(), _term); |
912 | |
913 | thread_state_set.reset(active_workers, promotion_failed()); |
914 | |
915 | { |
916 | StrongRootsScope srs(active_workers); |
917 | |
918 | ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs); |
919 | gch->rem_set()->prepare_for_younger_refs_iterate(true); |
920 | // It turns out that even when we're using 1 thread, doing the work in a |
921 | // separate thread causes wide variance in run times. We can't help this |
922 | // in the multi-threaded case, but we special-case n=1 here to get |
923 | // repeatable measurements of the 1-thread overhead of the parallel code. |
924 | // Might multiple workers ever be used? If yes, initialization |
925 | // has been done such that the single threaded path should not be used. |
926 | if (workers->total_workers() > 1) { |
927 | workers->run_task(&tsk); |
928 | } else { |
929 | tsk.work(0); |
930 | } |
931 | } |
932 | |
933 | thread_state_set.reset(0 /* Bad value in debug if not reset */, |
934 | promotion_failed()); |
935 | |
936 | // Trace and reset failed promotion info. |
937 | if (promotion_failed()) { |
938 | thread_state_set.trace_promotion_failed(gc_tracer()); |
939 | } |
940 | |
941 | // Process (weak) reference objects found during scavenge. |
942 | ReferenceProcessor* rp = ref_processor(); |
943 | IsAliveClosure is_alive(this); |
944 | ScanWeakRefClosure scan_weak_ref(this); |
945 | KeepAliveClosure keep_alive(&scan_weak_ref); |
946 | ScanClosure scan_without_gc_barrier(this, false); |
947 | ScanClosureWithParBarrier scan_with_gc_barrier(this, true); |
948 | set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); |
949 | EvacuateFollowersClosureGeneral<ScanClosure, ScanClosureWithParBarrier> evacuate_followers( |
950 | gch, &scan_without_gc_barrier, &scan_with_gc_barrier); |
951 | rp->setup_policy(clear_all_soft_refs); |
952 | // Can the mt_degree be set later (at run_task() time would be best)? |
953 | rp->set_active_mt_degree(active_workers); |
954 | ReferenceProcessorStats stats; |
955 | ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues()); |
956 | if (rp->processing_is_mt()) { |
957 | ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set); |
958 | stats = rp->process_discovered_references(&is_alive, &keep_alive, |
959 | &evacuate_followers, &task_executor, |
960 | &pt); |
961 | } else { |
962 | thread_state_set.flush(); |
963 | gch->save_marks(); |
964 | stats = rp->process_discovered_references(&is_alive, &keep_alive, |
965 | &evacuate_followers, NULL, |
966 | &pt); |
967 | } |
968 | _gc_tracer.report_gc_reference_stats(stats); |
969 | _gc_tracer.report_tenuring_threshold(tenuring_threshold()); |
970 | pt.print_all_references(); |
971 | |
972 | assert(gch->no_allocs_since_save_marks(), "evacuation should be done at this point" ); |
973 | |
974 | WeakProcessor::weak_oops_do(&is_alive, &keep_alive); |
975 | |
976 | // Verify that the usage of keep_alive only forwarded |
977 | // the oops and did not find anything new to copy. |
978 | assert(gch->no_allocs_since_save_marks(), "unexpectedly copied objects" ); |
979 | |
980 | if (!promotion_failed()) { |
981 | // Swap the survivor spaces. |
982 | eden()->clear(SpaceDecorator::Mangle); |
983 | from()->clear(SpaceDecorator::Mangle); |
984 | if (ZapUnusedHeapArea) { |
985 | // This is now done here because of the piece-meal mangling which |
986 | // can check for valid mangling at intermediate points in the |
987 | // collection(s). When a young collection fails to collect |
988 | // sufficient space resizing of the young generation can occur |
989 | // and redistribute the spaces in the young generation. Mangle |
990 | // here so that unzapped regions don't get distributed to |
991 | // other spaces. |
992 | to()->mangle_unused_area(); |
993 | } |
994 | swap_spaces(); |
995 | |
996 | // A successful scavenge should restart the GC time limit count which is |
997 | // for full GC's. |
998 | size_policy->reset_gc_overhead_limit_count(); |
999 | |
1000 | assert(to()->is_empty(), "to space should be empty now" ); |
1001 | |
1002 | adjust_desired_tenuring_threshold(); |
1003 | } else { |
1004 | handle_promotion_failed(gch, thread_state_set); |
1005 | } |
1006 | _preserved_marks_set.reclaim(); |
1007 | // set new iteration safe limit for the survivor spaces |
1008 | from()->set_concurrent_iteration_safe_limit(from()->top()); |
1009 | to()->set_concurrent_iteration_safe_limit(to()->top()); |
1010 | |
1011 | plab_stats()->adjust_desired_plab_sz(); |
1012 | |
1013 | TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats()); |
1014 | TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats()); |
1015 | |
1016 | // We need to use a monotonically non-decreasing time in ms |
1017 | // or we will see time-warp warnings and os::javaTimeMillis() |
1018 | // does not guarantee monotonicity. |
1019 | jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; |
1020 | update_time_of_last_gc(now); |
1021 | |
1022 | rp->set_enqueuing_is_done(true); |
1023 | rp->verify_no_references_recorded(); |
1024 | |
1025 | gch->trace_heap_after_gc(gc_tracer()); |
1026 | |
1027 | _gc_timer->register_gc_end(); |
1028 | |
1029 | _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); |
1030 | } |
1031 | |
1032 | size_t ParNewGeneration::desired_plab_sz() { |
1033 | return _plab_stats.desired_plab_sz(CMSHeap::heap()->workers()->active_workers()); |
1034 | } |
1035 | |
1036 | static int sum; |
1037 | void ParNewGeneration::waste_some_time() { |
1038 | for (int i = 0; i < 100; i++) { |
1039 | sum += i; |
1040 | } |
1041 | } |
1042 | |
1043 | static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4); |
1044 | |
1045 | // Because of concurrency, there are times where an object for which |
1046 | // "is_forwarded()" is true contains an "interim" forwarding pointer |
1047 | // value. Such a value will soon be overwritten with a real value. |
1048 | // This method requires "obj" to have a forwarding pointer, and waits, if |
1049 | // necessary for a real one to be inserted, and returns it. |
1050 | |
1051 | oop ParNewGeneration::real_forwardee(oop obj) { |
1052 | oop forward_ptr = obj->forwardee(); |
1053 | if (forward_ptr != ClaimedForwardPtr) { |
1054 | return forward_ptr; |
1055 | } else { |
1056 | return real_forwardee_slow(obj); |
1057 | } |
1058 | } |
1059 | |
1060 | oop ParNewGeneration::real_forwardee_slow(oop obj) { |
1061 | // Spin-read if it is claimed but not yet written by another thread. |
1062 | oop forward_ptr = obj->forwardee(); |
1063 | while (forward_ptr == ClaimedForwardPtr) { |
1064 | waste_some_time(); |
1065 | assert(obj->is_forwarded(), "precondition" ); |
1066 | forward_ptr = obj->forwardee(); |
1067 | } |
1068 | return forward_ptr; |
1069 | } |
1070 | |
1071 | // Multiple GC threads may try to promote an object. If the object |
1072 | // is successfully promoted, a forwarding pointer will be installed in |
1073 | // the object in the young generation. This method claims the right |
1074 | // to install the forwarding pointer before it copies the object, |
1075 | // thus avoiding the need to undo the copy as in |
1076 | // copy_to_survivor_space_avoiding_with_undo. |
1077 | |
1078 | oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state, |
1079 | oop old, |
1080 | size_t sz, |
1081 | markOop m) { |
1082 | // In the sequential version, this assert also says that the object is |
1083 | // not forwarded. That might not be the case here. It is the case that |
1084 | // the caller observed it to be not forwarded at some time in the past. |
1085 | assert(is_in_reserved(old), "shouldn't be scavenging this oop" ); |
1086 | |
1087 | // The sequential code read "old->age()" below. That doesn't work here, |
1088 | // since the age is in the mark word, and that might be overwritten with |
1089 | // a forwarding pointer by a parallel thread. So we must save the mark |
1090 | // word in a local and then analyze it. |
1091 | oopDesc dummyOld; |
1092 | dummyOld.set_mark_raw(m); |
1093 | assert(!dummyOld.is_forwarded(), |
1094 | "should not be called with forwarding pointer mark word." ); |
1095 | |
1096 | oop new_obj = NULL; |
1097 | oop forward_ptr; |
1098 | |
1099 | // Try allocating obj in to-space (unless too old) |
1100 | if (dummyOld.age() < tenuring_threshold()) { |
1101 | new_obj = (oop)par_scan_state->alloc_in_to_space(sz); |
1102 | } |
1103 | |
1104 | if (new_obj == NULL) { |
1105 | // Either to-space is full or we decided to promote try allocating obj tenured |
1106 | |
1107 | // Attempt to install a null forwarding pointer (atomically), |
1108 | // to claim the right to install the real forwarding pointer. |
1109 | forward_ptr = old->forward_to_atomic(ClaimedForwardPtr, m); |
1110 | if (forward_ptr != NULL) { |
1111 | // someone else beat us to it. |
1112 | return real_forwardee(old); |
1113 | } |
1114 | |
1115 | if (!_promotion_failed) { |
1116 | new_obj = _old_gen->par_promote(par_scan_state->thread_num(), |
1117 | old, m, sz); |
1118 | } |
1119 | |
1120 | if (new_obj == NULL) { |
1121 | // promotion failed, forward to self |
1122 | _promotion_failed = true; |
1123 | new_obj = old; |
1124 | |
1125 | par_scan_state->preserved_marks()->push_if_necessary(old, m); |
1126 | par_scan_state->register_promotion_failure(sz); |
1127 | } |
1128 | |
1129 | old->forward_to(new_obj); |
1130 | forward_ptr = NULL; |
1131 | } else { |
1132 | // Is in to-space; do copying ourselves. |
1133 | Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); |
1134 | assert(CMSHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value." ); |
1135 | forward_ptr = old->forward_to_atomic(new_obj, m); |
1136 | // Restore the mark word copied above. |
1137 | new_obj->set_mark_raw(m); |
1138 | // Increment age if obj still in new generation |
1139 | new_obj->incr_age(); |
1140 | par_scan_state->age_table()->add(new_obj, sz); |
1141 | } |
1142 | assert(new_obj != NULL, "just checking" ); |
1143 | |
1144 | // This code must come after the CAS test, or it will print incorrect |
1145 | // information. |
1146 | log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}" , |
1147 | is_in_reserved(new_obj) ? "copying" : "tenuring" , |
1148 | new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size()); |
1149 | |
1150 | if (forward_ptr == NULL) { |
1151 | oop obj_to_push = new_obj; |
1152 | if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { |
1153 | // Length field used as index of next element to be scanned. |
1154 | // Real length can be obtained from real_forwardee() |
1155 | arrayOop(old)->set_length(0); |
1156 | obj_to_push = old; |
1157 | assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, |
1158 | "push forwarded object" ); |
1159 | } |
1160 | // Push it on one of the queues of to-be-scanned objects. |
1161 | bool simulate_overflow = false; |
1162 | NOT_PRODUCT( |
1163 | if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { |
1164 | // simulate a stack overflow |
1165 | simulate_overflow = true; |
1166 | } |
1167 | ) |
1168 | if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { |
1169 | // Add stats for overflow pushes. |
1170 | log_develop_trace(gc)("Queue Overflow" ); |
1171 | push_on_overflow_list(old, par_scan_state); |
1172 | TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); |
1173 | } |
1174 | |
1175 | return new_obj; |
1176 | } |
1177 | |
1178 | // Oops. Someone beat us to it. Undo the allocation. Where did we |
1179 | // allocate it? |
1180 | if (is_in_reserved(new_obj)) { |
1181 | // Must be in to_space. |
1182 | assert(to()->is_in_reserved(new_obj), "Checking" ); |
1183 | if (forward_ptr == ClaimedForwardPtr) { |
1184 | // Wait to get the real forwarding pointer value. |
1185 | forward_ptr = real_forwardee(old); |
1186 | } |
1187 | par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); |
1188 | } |
1189 | |
1190 | return forward_ptr; |
1191 | } |
1192 | |
1193 | #ifndef PRODUCT |
1194 | // It's OK to call this multi-threaded; the worst thing |
1195 | // that can happen is that we'll get a bunch of closely |
1196 | // spaced simulated overflows, but that's OK, in fact |
1197 | // probably good as it would exercise the overflow code |
1198 | // under contention. |
1199 | bool ParNewGeneration::should_simulate_overflow() { |
1200 | if (_overflow_counter-- <= 0) { // just being defensive |
1201 | _overflow_counter = ParGCWorkQueueOverflowInterval; |
1202 | return true; |
1203 | } else { |
1204 | return false; |
1205 | } |
1206 | } |
1207 | #endif |
1208 | |
1209 | // In case we are using compressed oops, we need to be careful. |
1210 | // If the object being pushed is an object array, then its length |
1211 | // field keeps track of the "grey boundary" at which the next |
1212 | // incremental scan will be done (see ParGCArrayScanChunk). |
1213 | // When using compressed oops, this length field is kept in the |
1214 | // lower 32 bits of the erstwhile klass word and cannot be used |
1215 | // for the overflow chaining pointer (OCP below). As such the OCP |
1216 | // would itself need to be compressed into the top 32-bits in this |
1217 | // case. Unfortunately, see below, in the event that we have a |
1218 | // promotion failure, the node to be pushed on the list can be |
1219 | // outside of the Java heap, so the heap-based pointer compression |
1220 | // would not work (we would have potential aliasing between C-heap |
1221 | // and Java-heap pointers). For this reason, when using compressed |
1222 | // oops, we simply use a worker-thread-local, non-shared overflow |
1223 | // list in the form of a growable array, with a slightly different |
1224 | // overflow stack draining strategy. If/when we start using fat |
1225 | // stacks here, we can go back to using (fat) pointer chains |
1226 | // (although some performance comparisons would be useful since |
1227 | // single global lists have their own performance disadvantages |
1228 | // as we were made painfully aware not long ago, see 6786503). |
1229 | #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff)) |
1230 | void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { |
1231 | assert(is_in_reserved(from_space_obj), "Should be from this generation" ); |
1232 | if (ParGCUseLocalOverflow) { |
1233 | // In the case of compressed oops, we use a private, not-shared |
1234 | // overflow stack. |
1235 | par_scan_state->push_on_overflow_stack(from_space_obj); |
1236 | } else { |
1237 | assert(!UseCompressedOops, "Error" ); |
1238 | // if the object has been forwarded to itself, then we cannot |
1239 | // use the klass pointer for the linked list. Instead we have |
1240 | // to allocate an oopDesc in the C-Heap and use that for the linked list. |
1241 | // XXX This is horribly inefficient when a promotion failure occurs |
1242 | // and should be fixed. XXX FIX ME !!! |
1243 | #ifndef PRODUCT |
1244 | Atomic::inc(&_num_par_pushes); |
1245 | assert(_num_par_pushes > 0, "Tautology" ); |
1246 | #endif |
1247 | if (from_space_obj->forwardee() == from_space_obj) { |
1248 | oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); |
1249 | listhead->forward_to(from_space_obj); |
1250 | from_space_obj = listhead; |
1251 | } |
1252 | oop observed_overflow_list = _overflow_list; |
1253 | oop cur_overflow_list; |
1254 | do { |
1255 | cur_overflow_list = observed_overflow_list; |
1256 | if (cur_overflow_list != BUSY) { |
1257 | from_space_obj->set_klass_to_list_ptr(cur_overflow_list); |
1258 | } else { |
1259 | from_space_obj->set_klass_to_list_ptr(NULL); |
1260 | } |
1261 | observed_overflow_list = |
1262 | Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list); |
1263 | } while (cur_overflow_list != observed_overflow_list); |
1264 | } |
1265 | } |
1266 | |
1267 | bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { |
1268 | bool res; |
1269 | |
1270 | if (ParGCUseLocalOverflow) { |
1271 | res = par_scan_state->take_from_overflow_stack(); |
1272 | } else { |
1273 | assert(!UseCompressedOops, "Error" ); |
1274 | res = take_from_overflow_list_work(par_scan_state); |
1275 | } |
1276 | return res; |
1277 | } |
1278 | |
1279 | |
1280 | // *NOTE*: The overflow list manipulation code here and |
1281 | // in CMSCollector:: are very similar in shape, |
1282 | // except that in the CMS case we thread the objects |
1283 | // directly into the list via their mark word, and do |
1284 | // not need to deal with special cases below related |
1285 | // to chunking of object arrays and promotion failure |
1286 | // handling. |
1287 | // CR 6797058 has been filed to attempt consolidation of |
1288 | // the common code. |
1289 | // Because of the common code, if you make any changes in |
1290 | // the code below, please check the CMS version to see if |
1291 | // similar changes might be needed. |
1292 | // See CMSCollector::par_take_from_overflow_list() for |
1293 | // more extensive documentation comments. |
1294 | bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { |
1295 | ObjToScanQueue* work_q = par_scan_state->work_queue(); |
1296 | // How many to take? |
1297 | size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, |
1298 | (size_t)ParGCDesiredObjsFromOverflowList); |
1299 | |
1300 | assert(!UseCompressedOops, "Error" ); |
1301 | assert(par_scan_state->overflow_stack() == NULL, "Error" ); |
1302 | if (_overflow_list == NULL) return false; |
1303 | |
1304 | // Otherwise, there was something there; try claiming the list. |
1305 | oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list)); |
1306 | // Trim off a prefix of at most objsFromOverflow items |
1307 | Thread* tid = Thread::current(); |
1308 | size_t spin_count = ParallelGCThreads; |
1309 | size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); |
1310 | for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { |
1311 | // someone grabbed it before we did ... |
1312 | // ... we spin for a short while... |
1313 | os::sleep(tid, sleep_time_millis, false); |
1314 | if (_overflow_list == NULL) { |
1315 | // nothing left to take |
1316 | return false; |
1317 | } else if (_overflow_list != BUSY) { |
1318 | // try and grab the prefix |
1319 | prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list)); |
1320 | } |
1321 | } |
1322 | if (prefix == NULL || prefix == BUSY) { |
1323 | // Nothing to take or waited long enough |
1324 | if (prefix == NULL) { |
1325 | // Write back the NULL in case we overwrote it with BUSY above |
1326 | // and it is still the same value. |
1327 | (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY); |
1328 | } |
1329 | return false; |
1330 | } |
1331 | assert(prefix != NULL && prefix != BUSY, "Error" ); |
1332 | oop cur = prefix; |
1333 | for (size_t i = 1; i < objsFromOverflow; ++i) { |
1334 | oop next = cur->list_ptr_from_klass(); |
1335 | if (next == NULL) break; |
1336 | cur = next; |
1337 | } |
1338 | assert(cur != NULL, "Loop postcondition" ); |
1339 | |
1340 | // Reattach remaining (suffix) to overflow list |
1341 | oop suffix = cur->list_ptr_from_klass(); |
1342 | if (suffix == NULL) { |
1343 | // Write back the NULL in lieu of the BUSY we wrote |
1344 | // above and it is still the same value. |
1345 | if (_overflow_list == BUSY) { |
1346 | (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY); |
1347 | } |
1348 | } else { |
1349 | assert(suffix != BUSY, "Error" ); |
1350 | // suffix will be put back on global list |
1351 | cur->set_klass_to_list_ptr(NULL); // break off suffix |
1352 | // It's possible that the list is still in the empty(busy) state |
1353 | // we left it in a short while ago; in that case we may be |
1354 | // able to place back the suffix. |
1355 | oop observed_overflow_list = _overflow_list; |
1356 | oop cur_overflow_list = observed_overflow_list; |
1357 | bool attached = false; |
1358 | while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { |
1359 | observed_overflow_list = |
1360 | Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list); |
1361 | if (cur_overflow_list == observed_overflow_list) { |
1362 | attached = true; |
1363 | break; |
1364 | } else cur_overflow_list = observed_overflow_list; |
1365 | } |
1366 | if (!attached) { |
1367 | // Too bad, someone else got in in between; we'll need to do a splice. |
1368 | // Find the last item of suffix list |
1369 | oop last = suffix; |
1370 | while (true) { |
1371 | oop next = last->list_ptr_from_klass(); |
1372 | if (next == NULL) break; |
1373 | last = next; |
1374 | } |
1375 | // Atomically prepend suffix to current overflow list |
1376 | observed_overflow_list = _overflow_list; |
1377 | do { |
1378 | cur_overflow_list = observed_overflow_list; |
1379 | if (cur_overflow_list != BUSY) { |
1380 | // Do the splice ... |
1381 | last->set_klass_to_list_ptr(cur_overflow_list); |
1382 | } else { // cur_overflow_list == BUSY |
1383 | last->set_klass_to_list_ptr(NULL); |
1384 | } |
1385 | observed_overflow_list = |
1386 | Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list); |
1387 | } while (cur_overflow_list != observed_overflow_list); |
1388 | } |
1389 | } |
1390 | |
1391 | // Push objects on prefix list onto this thread's work queue |
1392 | assert(prefix != NULL && prefix != BUSY, "program logic" ); |
1393 | cur = prefix; |
1394 | ssize_t n = 0; |
1395 | while (cur != NULL) { |
1396 | oop obj_to_push = cur->forwardee(); |
1397 | oop next = cur->list_ptr_from_klass(); |
1398 | cur->set_klass(obj_to_push->klass()); |
1399 | // This may be an array object that is self-forwarded. In that case, the list pointer |
1400 | // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. |
1401 | if (!is_in_reserved(cur)) { |
1402 | // This can become a scaling bottleneck when there is work queue overflow coincident |
1403 | // with promotion failure. |
1404 | oopDesc* f = cur; |
1405 | FREE_C_HEAP_ARRAY(oopDesc, f); |
1406 | } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { |
1407 | assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned" ); |
1408 | obj_to_push = cur; |
1409 | } |
1410 | bool ok = work_q->push(obj_to_push); |
1411 | assert(ok, "Should have succeeded" ); |
1412 | cur = next; |
1413 | n++; |
1414 | } |
1415 | TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); |
1416 | #ifndef PRODUCT |
1417 | assert(_num_par_pushes >= n, "Too many pops?" ); |
1418 | Atomic::sub(n, &_num_par_pushes); |
1419 | #endif |
1420 | return true; |
1421 | } |
1422 | #undef BUSY |
1423 | |
1424 | void ParNewGeneration::ref_processor_init() { |
1425 | if (_ref_processor == NULL) { |
1426 | // Allocate and initialize a reference processor |
1427 | _span_based_discoverer.set_span(_reserved); |
1428 | _ref_processor = |
1429 | new ReferenceProcessor(&_span_based_discoverer, // span |
1430 | ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing |
1431 | ParallelGCThreads, // mt processing degree |
1432 | refs_discovery_is_mt(), // mt discovery |
1433 | ParallelGCThreads, // mt discovery degree |
1434 | refs_discovery_is_atomic(), // atomic_discovery |
1435 | NULL, // is_alive_non_header |
1436 | false); // disable adjusting number of processing threads |
1437 | } |
1438 | } |
1439 | |
1440 | const char* ParNewGeneration::name() const { |
1441 | return "par new generation" ; |
1442 | } |
1443 | |
1444 | void ParNewGeneration::restore_preserved_marks() { |
1445 | SharedRestorePreservedMarksTaskExecutor task_executor(CMSHeap::heap()->workers()); |
1446 | _preserved_marks_set.restore(&task_executor); |
1447 | } |
1448 | |