1 | /* |
2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP |
26 | #define SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP |
27 | |
28 | #include "gc/cms/cmsOopClosures.hpp" |
29 | #include "gc/cms/gSpaceCounters.hpp" |
30 | #include "gc/cms/yieldingWorkgroup.hpp" |
31 | #include "gc/shared/cardGeneration.hpp" |
32 | #include "gc/shared/gcHeapSummary.hpp" |
33 | #include "gc/shared/gcStats.hpp" |
34 | #include "gc/shared/gcWhen.hpp" |
35 | #include "gc/shared/generationCounters.hpp" |
36 | #include "gc/shared/space.hpp" |
37 | #include "gc/shared/taskqueue.hpp" |
38 | #include "logging/log.hpp" |
39 | #include "memory/iterator.hpp" |
40 | #include "memory/virtualspace.hpp" |
41 | #include "runtime/mutexLocker.hpp" |
42 | #include "services/memoryService.hpp" |
43 | #include "utilities/bitMap.hpp" |
44 | #include "utilities/stack.hpp" |
45 | |
46 | // ConcurrentMarkSweepGeneration is in support of a concurrent |
47 | // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker |
48 | // style. We assume, for now, that this generation is always the |
49 | // seniormost generation and for simplicity |
50 | // in the first implementation, that this generation is a single compactible |
51 | // space. Neither of these restrictions appears essential, and will be |
52 | // relaxed in the future when more time is available to implement the |
53 | // greater generality (and there's a need for it). |
54 | // |
55 | // Concurrent mode failures are currently handled by |
56 | // means of a sliding mark-compact. |
57 | |
58 | class AdaptiveSizePolicy; |
59 | class CMSCollector; |
60 | class CMSConcMarkingTask; |
61 | class CMSGCAdaptivePolicyCounters; |
62 | class CMSTracer; |
63 | class ConcurrentGCTimer; |
64 | class ConcurrentMarkSweepGeneration; |
65 | class ConcurrentMarkSweepPolicy; |
66 | class ConcurrentMarkSweepThread; |
67 | class CompactibleFreeListSpace; |
68 | class FreeChunk; |
69 | class ParNewGeneration; |
70 | class PromotionInfo; |
71 | class ScanMarkedObjectsAgainCarefullyClosure; |
72 | class SerialOldTracer; |
73 | |
74 | // A generic CMS bit map. It's the basis for both the CMS marking bit map |
75 | // as well as for the mod union table (in each case only a subset of the |
76 | // methods are used). This is essentially a wrapper around the BitMap class, |
77 | // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, |
78 | // we have _shifter == 0. and for the mod union table we have |
79 | // shifter == CardTable::card_shift - LogHeapWordSize.) |
80 | // XXX 64-bit issues in BitMap? |
81 | class CMSBitMap { |
82 | friend class VMStructs; |
83 | |
84 | HeapWord* _bmStartWord; // base address of range covered by map |
85 | size_t _bmWordSize; // map size (in #HeapWords covered) |
86 | const int _shifter; // shifts to convert HeapWord to bit position |
87 | VirtualSpace _virtual_space; // underlying the bit map |
88 | BitMapView _bm; // the bit map itself |
89 | Mutex* const _lock; // mutex protecting _bm; |
90 | |
91 | public: |
92 | // constructor |
93 | CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); |
94 | |
95 | // allocates the actual storage for the map |
96 | bool allocate(MemRegion mr); |
97 | // field getter |
98 | Mutex* lock() const { return _lock; } |
99 | // locking verifier convenience function |
100 | void assert_locked() const PRODUCT_RETURN; |
101 | |
102 | // inquiries |
103 | HeapWord* startWord() const { return _bmStartWord; } |
104 | size_t sizeInWords() const { return _bmWordSize; } |
105 | size_t sizeInBits() const { return _bm.size(); } |
106 | // the following is one past the last word in space |
107 | HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } |
108 | |
109 | // reading marks |
110 | bool isMarked(HeapWord* addr) const; |
111 | bool par_isMarked(HeapWord* addr) const; // do not lock checks |
112 | bool isUnmarked(HeapWord* addr) const; |
113 | bool isAllClear() const; |
114 | |
115 | // writing marks |
116 | void mark(HeapWord* addr); |
117 | // For marking by parallel GC threads; |
118 | // returns true if we did, false if another thread did |
119 | bool par_mark(HeapWord* addr); |
120 | |
121 | void mark_range(MemRegion mr); |
122 | void par_mark_range(MemRegion mr); |
123 | void mark_large_range(MemRegion mr); |
124 | void par_mark_large_range(MemRegion mr); |
125 | void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. |
126 | void clear_range(MemRegion mr); |
127 | void par_clear_range(MemRegion mr); |
128 | void clear_large_range(MemRegion mr); |
129 | void par_clear_large_range(MemRegion mr); |
130 | void clear_all(); |
131 | void clear_all_incrementally(); // Not yet implemented!! |
132 | |
133 | NOT_PRODUCT( |
134 | // checks the memory region for validity |
135 | void region_invariant(MemRegion mr); |
136 | ) |
137 | |
138 | // iteration |
139 | void iterate(BitMapClosure* cl) { |
140 | _bm.iterate(cl); |
141 | } |
142 | void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); |
143 | void dirty_range_iterate_clear(MemRegionClosure* cl); |
144 | void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); |
145 | |
146 | // auxiliary support for iteration |
147 | HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; |
148 | HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, |
149 | HeapWord* end_addr) const; |
150 | HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; |
151 | HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, |
152 | HeapWord* end_addr) const; |
153 | MemRegion getAndClearMarkedRegion(HeapWord* addr); |
154 | MemRegion getAndClearMarkedRegion(HeapWord* start_addr, |
155 | HeapWord* end_addr); |
156 | |
157 | // conversion utilities |
158 | HeapWord* offsetToHeapWord(size_t offset) const; |
159 | size_t heapWordToOffset(HeapWord* addr) const; |
160 | size_t heapWordDiffToOffsetDiff(size_t diff) const; |
161 | |
162 | void print_on_error(outputStream* st, const char* prefix) const; |
163 | |
164 | // debugging |
165 | // is this address range covered by the bit-map? |
166 | NOT_PRODUCT( |
167 | bool covers(MemRegion mr) const; |
168 | bool covers(HeapWord* start, size_t size = 0) const; |
169 | ) |
170 | void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; |
171 | }; |
172 | |
173 | // Represents a marking stack used by the CMS collector. |
174 | // Ideally this should be GrowableArray<> just like MSC's marking stack(s). |
175 | class CMSMarkStack: public CHeapObj<mtGC> { |
176 | friend class CMSCollector; // To get at expansion stats further below. |
177 | |
178 | VirtualSpace _virtual_space; // Space for the stack |
179 | oop* _base; // Bottom of stack |
180 | size_t _index; // One more than last occupied index |
181 | size_t _capacity; // Max #elements |
182 | Mutex _par_lock; // An advisory lock used in case of parallel access |
183 | NOT_PRODUCT(size_t _max_depth;) // Max depth plumbed during run |
184 | |
185 | protected: |
186 | size_t _hit_limit; // We hit max stack size limit |
187 | size_t _failed_double; // We failed expansion before hitting limit |
188 | |
189 | public: |
190 | CMSMarkStack(): |
191 | _par_lock(Mutex::event, "CMSMarkStack._par_lock" , true, |
192 | Monitor::_safepoint_check_never), |
193 | _hit_limit(0), |
194 | _failed_double(0) {} |
195 | |
196 | bool allocate(size_t size); |
197 | |
198 | size_t capacity() const { return _capacity; } |
199 | |
200 | oop pop() { |
201 | if (!isEmpty()) { |
202 | return _base[--_index] ; |
203 | } |
204 | return NULL; |
205 | } |
206 | |
207 | bool push(oop ptr) { |
208 | if (isFull()) { |
209 | return false; |
210 | } else { |
211 | _base[_index++] = ptr; |
212 | NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); |
213 | return true; |
214 | } |
215 | } |
216 | |
217 | bool isEmpty() const { return _index == 0; } |
218 | bool isFull() const { |
219 | assert(_index <= _capacity, "buffer overflow" ); |
220 | return _index == _capacity; |
221 | } |
222 | |
223 | size_t length() { return _index; } |
224 | |
225 | // "Parallel versions" of some of the above |
226 | oop par_pop() { |
227 | // lock and pop |
228 | MutexLocker x(&_par_lock, Mutex::_no_safepoint_check_flag); |
229 | return pop(); |
230 | } |
231 | |
232 | bool par_push(oop ptr) { |
233 | // lock and push |
234 | MutexLocker x(&_par_lock, Mutex::_no_safepoint_check_flag); |
235 | return push(ptr); |
236 | } |
237 | |
238 | // Forcibly reset the stack, losing all of its contents. |
239 | void reset() { |
240 | _index = 0; |
241 | } |
242 | |
243 | // Expand the stack, typically in response to an overflow condition. |
244 | void expand(); |
245 | |
246 | // Compute the least valued stack element. |
247 | oop least_value(HeapWord* low) { |
248 | HeapWord* least = low; |
249 | for (size_t i = 0; i < _index; i++) { |
250 | least = MIN2(least, (HeapWord*)_base[i]); |
251 | } |
252 | return (oop)least; |
253 | } |
254 | |
255 | // Exposed here to allow stack expansion in || case. |
256 | Mutex* par_lock() { return &_par_lock; } |
257 | }; |
258 | |
259 | class CardTableRS; |
260 | class CMSParGCThreadState; |
261 | |
262 | class ModUnionClosure: public MemRegionClosure { |
263 | protected: |
264 | CMSBitMap* _t; |
265 | public: |
266 | ModUnionClosure(CMSBitMap* t): _t(t) { } |
267 | void do_MemRegion(MemRegion mr); |
268 | }; |
269 | |
270 | class ModUnionClosurePar: public ModUnionClosure { |
271 | public: |
272 | ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } |
273 | void do_MemRegion(MemRegion mr); |
274 | }; |
275 | |
276 | // Survivor Chunk Array in support of parallelization of |
277 | // Survivor Space rescan. |
278 | class ChunkArray: public CHeapObj<mtGC> { |
279 | size_t _index; |
280 | size_t _capacity; |
281 | size_t _overflows; |
282 | HeapWord** _array; // storage for array |
283 | |
284 | public: |
285 | ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {} |
286 | ChunkArray(HeapWord** a, size_t c): |
287 | _index(0), _capacity(c), _overflows(0), _array(a) {} |
288 | |
289 | HeapWord** array() { return _array; } |
290 | void set_array(HeapWord** a) { _array = a; } |
291 | |
292 | size_t capacity() { return _capacity; } |
293 | void set_capacity(size_t c) { _capacity = c; } |
294 | |
295 | size_t end() { |
296 | assert(_index <= capacity(), |
297 | "_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds" , |
298 | _index, _capacity); |
299 | return _index; |
300 | } // exclusive |
301 | |
302 | HeapWord* nth(size_t n) { |
303 | assert(n < end(), "Out of bounds access" ); |
304 | return _array[n]; |
305 | } |
306 | |
307 | void reset() { |
308 | _index = 0; |
309 | if (_overflows > 0) { |
310 | log_trace(gc)("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times" , _capacity, _overflows); |
311 | } |
312 | _overflows = 0; |
313 | } |
314 | |
315 | void record_sample(HeapWord* p, size_t sz) { |
316 | // For now we do not do anything with the size |
317 | if (_index < _capacity) { |
318 | _array[_index++] = p; |
319 | } else { |
320 | ++_overflows; |
321 | assert(_index == _capacity, |
322 | "_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT |
323 | "): out of bounds at overflow#" SIZE_FORMAT, |
324 | _index, _capacity, _overflows); |
325 | } |
326 | } |
327 | }; |
328 | |
329 | // |
330 | // Timing, allocation and promotion statistics for gc scheduling and incremental |
331 | // mode pacing. Most statistics are exponential averages. |
332 | // |
333 | class CMSStats { |
334 | private: |
335 | ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. |
336 | |
337 | // The following are exponential averages with factor alpha: |
338 | // avg = (100 - alpha) * avg + alpha * cur_sample |
339 | // |
340 | // The durations measure: end_time[n] - start_time[n] |
341 | // The periods measure: start_time[n] - start_time[n-1] |
342 | // |
343 | // The cms period and duration include only concurrent collections; time spent |
344 | // in foreground cms collections due to System.gc() or because of a failure to |
345 | // keep up are not included. |
346 | // |
347 | // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the |
348 | // real value, but is used only after the first period. A value of 100 is |
349 | // used for the first sample so it gets the entire weight. |
350 | unsigned int _saved_alpha; // 0-100 |
351 | unsigned int _gc0_alpha; |
352 | unsigned int _cms_alpha; |
353 | |
354 | double _gc0_duration; |
355 | double _gc0_period; |
356 | size_t _gc0_promoted; // bytes promoted per gc0 |
357 | double _cms_duration; |
358 | double _cms_duration_pre_sweep; // time from initiation to start of sweep |
359 | double _cms_period; |
360 | size_t _cms_allocated; // bytes of direct allocation per gc0 period |
361 | |
362 | // Timers. |
363 | elapsedTimer _cms_timer; |
364 | TimeStamp _gc0_begin_time; |
365 | TimeStamp _cms_begin_time; |
366 | TimeStamp _cms_end_time; |
367 | |
368 | // Snapshots of the amount used in the CMS generation. |
369 | size_t _cms_used_at_gc0_begin; |
370 | size_t _cms_used_at_gc0_end; |
371 | size_t _cms_used_at_cms_begin; |
372 | |
373 | // Used to prevent the duty cycle from being reduced in the middle of a cms |
374 | // cycle. |
375 | bool _allow_duty_cycle_reduction; |
376 | |
377 | enum { |
378 | _GC0_VALID = 0x1, |
379 | _CMS_VALID = 0x2, |
380 | _ALL_VALID = _GC0_VALID | _CMS_VALID |
381 | }; |
382 | |
383 | unsigned int _valid_bits; |
384 | |
385 | protected: |
386 | // In support of adjusting of cms trigger ratios based on history |
387 | // of concurrent mode failure. |
388 | double cms_free_adjustment_factor(size_t free) const; |
389 | void adjust_cms_free_adjustment_factor(bool fail, size_t free); |
390 | |
391 | public: |
392 | CMSStats(ConcurrentMarkSweepGeneration* cms_gen, |
393 | unsigned int alpha = CMSExpAvgFactor); |
394 | |
395 | // Whether or not the statistics contain valid data; higher level statistics |
396 | // cannot be called until this returns true (they require at least one young |
397 | // gen and one cms cycle to have completed). |
398 | bool valid() const; |
399 | |
400 | // Record statistics. |
401 | void record_gc0_begin(); |
402 | void record_gc0_end(size_t cms_gen_bytes_used); |
403 | void record_cms_begin(); |
404 | void record_cms_end(); |
405 | |
406 | // Allow management of the cms timer, which must be stopped/started around |
407 | // yield points. |
408 | elapsedTimer& cms_timer() { return _cms_timer; } |
409 | void start_cms_timer() { _cms_timer.start(); } |
410 | void stop_cms_timer() { _cms_timer.stop(); } |
411 | |
412 | // Basic statistics; units are seconds or bytes. |
413 | double gc0_period() const { return _gc0_period; } |
414 | double gc0_duration() const { return _gc0_duration; } |
415 | size_t gc0_promoted() const { return _gc0_promoted; } |
416 | double cms_period() const { return _cms_period; } |
417 | double cms_duration() const { return _cms_duration; } |
418 | size_t cms_allocated() const { return _cms_allocated; } |
419 | |
420 | size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} |
421 | |
422 | // Seconds since the last background cms cycle began or ended. |
423 | double cms_time_since_begin() const; |
424 | double cms_time_since_end() const; |
425 | |
426 | // Higher level statistics--caller must check that valid() returns true before |
427 | // calling. |
428 | |
429 | // Returns bytes promoted per second of wall clock time. |
430 | double promotion_rate() const; |
431 | |
432 | // Returns bytes directly allocated per second of wall clock time. |
433 | double cms_allocation_rate() const; |
434 | |
435 | // Rate at which space in the cms generation is being consumed (sum of the |
436 | // above two). |
437 | double cms_consumption_rate() const; |
438 | |
439 | // Returns an estimate of the number of seconds until the cms generation will |
440 | // fill up, assuming no collection work is done. |
441 | double time_until_cms_gen_full() const; |
442 | |
443 | // Returns an estimate of the number of seconds remaining until |
444 | // the cms generation collection should start. |
445 | double time_until_cms_start() const; |
446 | |
447 | // End of higher level statistics. |
448 | |
449 | // Debugging. |
450 | void print_on(outputStream* st) const PRODUCT_RETURN; |
451 | void print() const { print_on(tty); } |
452 | }; |
453 | |
454 | // A closure related to weak references processing which |
455 | // we embed in the CMSCollector, since we need to pass |
456 | // it to the reference processor for secondary filtering |
457 | // of references based on reachability of referent; |
458 | // see role of _is_alive_non_header closure in the |
459 | // ReferenceProcessor class. |
460 | // For objects in the CMS generation, this closure checks |
461 | // if the object is "live" (reachable). Used in weak |
462 | // reference processing. |
463 | class CMSIsAliveClosure: public BoolObjectClosure { |
464 | const MemRegion _span; |
465 | const CMSBitMap* _bit_map; |
466 | |
467 | friend class CMSCollector; |
468 | public: |
469 | CMSIsAliveClosure(MemRegion span, |
470 | CMSBitMap* bit_map): |
471 | _span(span), |
472 | _bit_map(bit_map) { |
473 | assert(!span.is_empty(), "Empty span could spell trouble" ); |
474 | } |
475 | |
476 | bool do_object_b(oop obj); |
477 | }; |
478 | |
479 | |
480 | // Implements AbstractRefProcTaskExecutor for CMS. |
481 | class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { |
482 | public: |
483 | |
484 | CMSRefProcTaskExecutor(CMSCollector& collector) |
485 | : _collector(collector) |
486 | { } |
487 | |
488 | // Executes a task using worker threads. |
489 | virtual void execute(ProcessTask& task, uint ergo_workers); |
490 | private: |
491 | CMSCollector& _collector; |
492 | }; |
493 | |
494 | |
495 | class CMSCollector: public CHeapObj<mtGC> { |
496 | friend class VMStructs; |
497 | friend class ConcurrentMarkSweepThread; |
498 | friend class ConcurrentMarkSweepGeneration; |
499 | friend class CompactibleFreeListSpace; |
500 | friend class CMSParMarkTask; |
501 | friend class CMSParInitialMarkTask; |
502 | friend class CMSParRemarkTask; |
503 | friend class CMSConcMarkingTask; |
504 | friend class CMSRefProcTaskProxy; |
505 | friend class CMSRefProcTaskExecutor; |
506 | friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden |
507 | friend class SurvivorSpacePrecleanClosure; // --- ditto ------- |
508 | friend class PushOrMarkClosure; // to access _restart_addr |
509 | friend class ParPushOrMarkClosure; // to access _restart_addr |
510 | friend class MarkFromRootsClosure; // -- ditto -- |
511 | // ... and for clearing cards |
512 | friend class ParMarkFromRootsClosure; // to access _restart_addr |
513 | // ... and for clearing cards |
514 | friend class ParConcMarkingClosure; // to access _restart_addr etc. |
515 | friend class MarkFromRootsVerifyClosure; // to access _restart_addr |
516 | friend class PushAndMarkVerifyClosure; // -- ditto -- |
517 | friend class MarkRefsIntoAndScanClosure; // to access _overflow_list |
518 | friend class PushAndMarkClosure; // -- ditto -- |
519 | friend class ParPushAndMarkClosure; // -- ditto -- |
520 | friend class CMSKeepAliveClosure; // -- ditto -- |
521 | friend class CMSDrainMarkingStackClosure; // -- ditto -- |
522 | friend class CMSInnerParMarkAndPushClosure; // -- ditto -- |
523 | NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list |
524 | friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait |
525 | friend class VM_CMS_Operation; |
526 | friend class VM_CMS_Initial_Mark; |
527 | friend class VM_CMS_Final_Remark; |
528 | friend class TraceCMSMemoryManagerStats; |
529 | |
530 | private: |
531 | jlong _time_of_last_gc; |
532 | void update_time_of_last_gc(jlong now) { |
533 | _time_of_last_gc = now; |
534 | } |
535 | |
536 | OopTaskQueueSet* _task_queues; |
537 | |
538 | // Overflow list of grey objects, threaded through mark-word |
539 | // Manipulated with CAS in the parallel/multi-threaded case. |
540 | oopDesc* volatile _overflow_list; |
541 | // The following array-pair keeps track of mark words |
542 | // displaced for accommodating overflow list above. |
543 | // This code will likely be revisited under RFE#4922830. |
544 | Stack<oop, mtGC> _preserved_oop_stack; |
545 | Stack<markOop, mtGC> _preserved_mark_stack; |
546 | |
547 | // In support of multi-threaded concurrent phases |
548 | YieldingFlexibleWorkGang* _conc_workers; |
549 | |
550 | // Performance Counters |
551 | CollectorCounters* _gc_counters; |
552 | CollectorCounters* _cgc_counters; |
553 | |
554 | // Initialization Errors |
555 | bool _completed_initialization; |
556 | |
557 | // In support of ExplicitGCInvokesConcurrent |
558 | static bool _full_gc_requested; |
559 | static GCCause::Cause _full_gc_cause; |
560 | unsigned int _collection_count_start; |
561 | |
562 | // Should we unload classes this concurrent cycle? |
563 | bool _should_unload_classes; |
564 | unsigned int _concurrent_cycles_since_last_unload; |
565 | unsigned int concurrent_cycles_since_last_unload() const { |
566 | return _concurrent_cycles_since_last_unload; |
567 | } |
568 | // Did we (allow) unload classes in the previous concurrent cycle? |
569 | bool unloaded_classes_last_cycle() const { |
570 | return concurrent_cycles_since_last_unload() == 0; |
571 | } |
572 | // Root scanning options for perm gen |
573 | int _roots_scanning_options; |
574 | int roots_scanning_options() const { return _roots_scanning_options; } |
575 | void add_root_scanning_option(int o) { _roots_scanning_options |= o; } |
576 | void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } |
577 | |
578 | // Verification support |
579 | CMSBitMap _verification_mark_bm; |
580 | void (); |
581 | void (); |
582 | |
583 | // True if any verification flag is on. |
584 | bool _verifying; |
585 | bool verifying() const { return _verifying; } |
586 | void set_verifying(bool v) { _verifying = v; } |
587 | |
588 | void set_did_compact(bool v); |
589 | |
590 | // XXX Move these to CMSStats ??? FIX ME !!! |
591 | elapsedTimer _inter_sweep_timer; // Time between sweeps |
592 | elapsedTimer _intra_sweep_timer; // Time _in_ sweeps |
593 | // Padded decaying average estimates of the above |
594 | AdaptivePaddedAverage _inter_sweep_estimate; |
595 | AdaptivePaddedAverage _intra_sweep_estimate; |
596 | |
597 | CMSTracer* _gc_tracer_cm; |
598 | ConcurrentGCTimer* _gc_timer_cm; |
599 | |
600 | bool _cms_start_registered; |
601 | |
602 | GCHeapSummary _last_heap_summary; |
603 | MetaspaceSummary _last_metaspace_summary; |
604 | |
605 | void register_gc_start(GCCause::Cause cause); |
606 | void register_gc_end(); |
607 | void save_heap_summary(); |
608 | void report_heap_summary(GCWhen::Type when); |
609 | |
610 | protected: |
611 | ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS) |
612 | MemRegion _span; // Span covering above |
613 | CardTableRS* _ct; // Card table |
614 | |
615 | // CMS marking support structures |
616 | CMSBitMap _markBitMap; |
617 | CMSBitMap _modUnionTable; |
618 | CMSMarkStack _markStack; |
619 | |
620 | HeapWord* _restart_addr; // In support of marking stack overflow |
621 | void lower_restart_addr(HeapWord* low); |
622 | |
623 | // Counters in support of marking stack / work queue overflow handling: |
624 | // a non-zero value indicates certain types of overflow events during |
625 | // the current CMS cycle and could lead to stack resizing efforts at |
626 | // an opportune future time. |
627 | size_t _ser_pmc_preclean_ovflw; |
628 | size_t ; |
629 | size_t ; |
630 | size_t _ser_kac_preclean_ovflw; |
631 | size_t _ser_kac_ovflw; |
632 | size_t _par_kac_ovflw; |
633 | NOT_PRODUCT(ssize_t _num_par_pushes;) |
634 | |
635 | // ("Weak") Reference processing support. |
636 | SpanSubjectToDiscoveryClosure _span_based_discoverer; |
637 | ReferenceProcessor* _ref_processor; |
638 | CMSIsAliveClosure _is_alive_closure; |
639 | // Keep this textually after _markBitMap and _span; c'tor dependency. |
640 | |
641 | ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work |
642 | ModUnionClosurePar _modUnionClosurePar; |
643 | |
644 | // CMS abstract state machine |
645 | // initial_state: Idling |
646 | // next_state(Idling) = {Marking} |
647 | // next_state(Marking) = {Precleaning, Sweeping} |
648 | // next_state(Precleaning) = {AbortablePreclean, FinalMarking} |
649 | // next_state(AbortablePreclean) = {FinalMarking} |
650 | // next_state(FinalMarking) = {Sweeping} |
651 | // next_state(Sweeping) = {Resizing} |
652 | // next_state(Resizing) = {Resetting} |
653 | // next_state(Resetting) = {Idling} |
654 | // The numeric values below are chosen so that: |
655 | // . _collectorState <= Idling == post-sweep && pre-mark |
656 | // . _collectorState in (Idling, Sweeping) == {initial,final}marking || |
657 | // precleaning || abortablePrecleanb |
658 | public: |
659 | enum CollectorState { |
660 | Resizing = 0, |
661 | Resetting = 1, |
662 | Idling = 2, |
663 | InitialMarking = 3, |
664 | Marking = 4, |
665 | Precleaning = 5, |
666 | AbortablePreclean = 6, |
667 | FinalMarking = 7, |
668 | Sweeping = 8 |
669 | }; |
670 | protected: |
671 | static CollectorState _collectorState; |
672 | |
673 | // State related to prologue/epilogue invocation for my generations |
674 | bool _between_prologue_and_epilogue; |
675 | |
676 | // Signaling/State related to coordination between fore- and background GC |
677 | // Note: When the baton has been passed from background GC to foreground GC, |
678 | // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. |
679 | static bool _foregroundGCIsActive; // true iff foreground collector is active or |
680 | // wants to go active |
681 | static bool _foregroundGCShouldWait; // true iff background GC is active and has not |
682 | // yet passed the baton to the foreground GC |
683 | |
684 | // Support for CMSScheduleRemark (abortable preclean) |
685 | bool _abort_preclean; |
686 | bool _start_sampling; |
687 | |
688 | int _numYields; |
689 | size_t _numDirtyCards; |
690 | size_t _sweep_count; |
691 | |
692 | // Occupancy used for bootstrapping stats |
693 | double _bootstrap_occupancy; |
694 | |
695 | // Timer |
696 | elapsedTimer _timer; |
697 | |
698 | // Timing, allocation and promotion statistics, used for scheduling. |
699 | CMSStats _stats; |
700 | |
701 | enum CMS_op_type { |
702 | CMS_op_checkpointRootsInitial, |
703 | CMS_op_checkpointRootsFinal |
704 | }; |
705 | |
706 | void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause); |
707 | bool stop_world_and_do(CMS_op_type op); |
708 | |
709 | OopTaskQueueSet* task_queues() { return _task_queues; } |
710 | YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } |
711 | |
712 | // Support for parallelizing Eden rescan in CMS remark phase |
713 | void sample_eden(); // ... sample Eden space top |
714 | |
715 | private: |
716 | // Support for parallelizing young gen rescan in CMS remark phase |
717 | ParNewGeneration* _young_gen; |
718 | |
719 | HeapWord* volatile* _top_addr; // ... Top of Eden |
720 | HeapWord** _end_addr; // ... End of Eden |
721 | Mutex* _eden_chunk_lock; |
722 | HeapWord** _eden_chunk_array; // ... Eden partitioning array |
723 | size_t _eden_chunk_index; // ... top (exclusive) of array |
724 | size_t _eden_chunk_capacity; // ... max entries in array |
725 | |
726 | // Support for parallelizing survivor space rescan |
727 | HeapWord** _survivor_chunk_array; |
728 | size_t _survivor_chunk_index; |
729 | size_t _survivor_chunk_capacity; |
730 | size_t* _cursor; |
731 | ChunkArray* _survivor_plab_array; |
732 | |
733 | // Support for marking stack overflow handling |
734 | bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); |
735 | bool par_take_from_overflow_list(size_t num, |
736 | OopTaskQueue* to_work_q, |
737 | int no_of_gc_threads); |
738 | void push_on_overflow_list(oop p); |
739 | void par_push_on_overflow_list(oop p); |
740 | // The following is, obviously, not, in general, "MT-stable" |
741 | bool overflow_list_is_empty() const; |
742 | |
743 | void preserve_mark_if_necessary(oop p); |
744 | void par_preserve_mark_if_necessary(oop p); |
745 | void preserve_mark_work(oop p, markOop m); |
746 | void restore_preserved_marks_if_any(); |
747 | NOT_PRODUCT(bool no_preserved_marks() const;) |
748 | // In support of testing overflow code |
749 | NOT_PRODUCT(int _overflow_counter;) |
750 | NOT_PRODUCT(bool simulate_overflow();) // Sequential |
751 | NOT_PRODUCT(bool par_simulate_overflow();) // MT version |
752 | |
753 | // CMS work methods |
754 | void checkpointRootsInitialWork(); // Initial checkpoint work |
755 | |
756 | // A return value of false indicates failure due to stack overflow |
757 | bool markFromRootsWork(); // Concurrent marking work |
758 | |
759 | public: // FIX ME!!! only for testing |
760 | bool do_marking_st(); // Single-threaded marking |
761 | bool do_marking_mt(); // Multi-threaded marking |
762 | |
763 | private: |
764 | |
765 | // Concurrent precleaning work |
766 | size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* old_gen, |
767 | ScanMarkedObjectsAgainCarefullyClosure* cl); |
768 | size_t preclean_card_table(ConcurrentMarkSweepGeneration* old_gen, |
769 | ScanMarkedObjectsAgainCarefullyClosure* cl); |
770 | // Does precleaning work, returning a quantity indicative of |
771 | // the amount of "useful work" done. |
772 | size_t preclean_work(bool clean_refs, bool clean_survivors); |
773 | void preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock); |
774 | void abortable_preclean(); // Preclean while looking for possible abort |
775 | void initialize_sequential_subtasks_for_young_gen_rescan(int i); |
776 | // Helper function for above; merge-sorts the per-thread plab samples |
777 | void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads); |
778 | // Resets (i.e. clears) the per-thread plab sample vectors |
779 | void reset_survivor_plab_arrays(); |
780 | |
781 | // Final (second) checkpoint work |
782 | void checkpointRootsFinalWork(); |
783 | // Work routine for parallel version of remark |
784 | void (); |
785 | // Work routine for non-parallel version of remark |
786 | void (); |
787 | // Reference processing work routine (during second checkpoint) |
788 | void refProcessingWork(); |
789 | |
790 | // Concurrent sweeping work |
791 | void sweepWork(ConcurrentMarkSweepGeneration* old_gen); |
792 | |
793 | // Concurrent resetting of support data structures |
794 | void reset_concurrent(); |
795 | // Resetting of support data structures from a STW full GC |
796 | void reset_stw(); |
797 | |
798 | // Clear _expansion_cause fields of constituent generations |
799 | void clear_expansion_cause(); |
800 | |
801 | // An auxiliary method used to record the ends of |
802 | // used regions of each generation to limit the extent of sweep |
803 | void save_sweep_limits(); |
804 | |
805 | // A work method used by the foreground collector to do |
806 | // a mark-sweep-compact. |
807 | void do_compaction_work(bool clear_all_soft_refs); |
808 | |
809 | // Work methods for reporting concurrent mode interruption or failure |
810 | bool is_external_interruption(); |
811 | void report_concurrent_mode_interruption(); |
812 | |
813 | // If the background GC is active, acquire control from the background |
814 | // GC and do the collection. |
815 | void acquire_control_and_collect(bool full, bool clear_all_soft_refs); |
816 | |
817 | // For synchronizing passing of control from background to foreground |
818 | // GC. waitForForegroundGC() is called by the background |
819 | // collector. It if had to wait for a foreground collection, |
820 | // it returns true and the background collection should assume |
821 | // that the collection was finished by the foreground |
822 | // collector. |
823 | bool waitForForegroundGC(); |
824 | |
825 | size_t block_size_using_printezis_bits(HeapWord* addr) const; |
826 | size_t block_size_if_printezis_bits(HeapWord* addr) const; |
827 | HeapWord* next_card_start_after_block(HeapWord* addr) const; |
828 | |
829 | void setup_cms_unloading_and_verification_state(); |
830 | public: |
831 | CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, |
832 | CardTableRS* ct); |
833 | ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } |
834 | |
835 | MemRegion ref_processor_span() const { return _span_based_discoverer.span(); } |
836 | ReferenceProcessor* ref_processor() { return _ref_processor; } |
837 | void ref_processor_init(); |
838 | |
839 | Mutex* bitMapLock() const { return _markBitMap.lock(); } |
840 | static CollectorState abstract_state() { return _collectorState; } |
841 | |
842 | bool should_abort_preclean() const; // Whether preclean should be aborted. |
843 | size_t get_eden_used() const; |
844 | size_t get_eden_capacity() const; |
845 | |
846 | ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } |
847 | |
848 | // Locking checks |
849 | NOT_PRODUCT(static bool have_cms_token();) |
850 | |
851 | bool shouldConcurrentCollect(); |
852 | |
853 | void collect(bool full, |
854 | bool clear_all_soft_refs, |
855 | size_t size, |
856 | bool tlab); |
857 | void collect_in_background(GCCause::Cause cause); |
858 | |
859 | // In support of ExplicitGCInvokesConcurrent |
860 | static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); |
861 | // Should we unload classes in a particular concurrent cycle? |
862 | bool should_unload_classes() const { |
863 | return _should_unload_classes; |
864 | } |
865 | void update_should_unload_classes(); |
866 | |
867 | void direct_allocated(HeapWord* start, size_t size); |
868 | |
869 | // Object is dead if not marked and current phase is sweeping. |
870 | bool is_dead_obj(oop obj) const; |
871 | |
872 | // After a promotion (of "start"), do any necessary marking. |
873 | // If "par", then it's being done by a parallel GC thread. |
874 | // The last two args indicate if we need precise marking |
875 | // and if so the size of the object so it can be dirtied |
876 | // in its entirety. |
877 | void promoted(bool par, HeapWord* start, |
878 | bool is_obj_array, size_t obj_size); |
879 | |
880 | void getFreelistLocks() const; |
881 | void releaseFreelistLocks() const; |
882 | bool haveFreelistLocks() const; |
883 | |
884 | // Adjust size of underlying generation |
885 | void compute_new_size(); |
886 | |
887 | // GC prologue and epilogue |
888 | void gc_prologue(bool full); |
889 | void gc_epilogue(bool full); |
890 | |
891 | jlong time_of_last_gc(jlong now) { |
892 | if (_collectorState <= Idling) { |
893 | // gc not in progress |
894 | return _time_of_last_gc; |
895 | } else { |
896 | // collection in progress |
897 | return now; |
898 | } |
899 | } |
900 | |
901 | // Support for parallel remark of survivor space |
902 | void* get_data_recorder(int thr_num); |
903 | void sample_eden_chunk(); |
904 | |
905 | CMSBitMap* markBitMap() { return &_markBitMap; } |
906 | void directAllocated(HeapWord* start, size_t size); |
907 | |
908 | // Main CMS steps and related support |
909 | void checkpointRootsInitial(); |
910 | bool markFromRoots(); // a return value of false indicates failure |
911 | // due to stack overflow |
912 | void preclean(); |
913 | void checkpointRootsFinal(); |
914 | void sweep(); |
915 | |
916 | // Check that the currently executing thread is the expected |
917 | // one (foreground collector or background collector). |
918 | static void check_correct_thread_executing() PRODUCT_RETURN; |
919 | |
920 | NOT_PRODUCT(bool is_cms_reachable(HeapWord* addr);) |
921 | |
922 | // Performance Counter Support |
923 | CollectorCounters* counters() { return _gc_counters; } |
924 | CollectorCounters* cgc_counters() { return _cgc_counters; } |
925 | |
926 | // Timer stuff |
927 | void startTimer() { assert(!_timer.is_active(), "Error" ); _timer.start(); } |
928 | void stopTimer() { assert( _timer.is_active(), "Error" ); _timer.stop(); } |
929 | void resetTimer() { assert(!_timer.is_active(), "Error" ); _timer.reset(); } |
930 | jlong timerTicks() { assert(!_timer.is_active(), "Error" ); return _timer.ticks(); } |
931 | |
932 | int yields() { return _numYields; } |
933 | void resetYields() { _numYields = 0; } |
934 | void incrementYields() { _numYields++; } |
935 | void resetNumDirtyCards() { _numDirtyCards = 0; } |
936 | void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } |
937 | size_t numDirtyCards() { return _numDirtyCards; } |
938 | |
939 | static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } |
940 | static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } |
941 | static bool foregroundGCIsActive() { return _foregroundGCIsActive; } |
942 | static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } |
943 | size_t sweep_count() const { return _sweep_count; } |
944 | void increment_sweep_count() { _sweep_count++; } |
945 | |
946 | // Timers/stats for gc scheduling and incremental mode pacing. |
947 | CMSStats& stats() { return _stats; } |
948 | |
949 | // Adaptive size policy |
950 | AdaptiveSizePolicy* size_policy(); |
951 | |
952 | static void print_on_error(outputStream* st); |
953 | |
954 | // Debugging |
955 | void verify(); |
956 | bool (); |
957 | void verify_ok_to_terminate() const PRODUCT_RETURN; |
958 | void verify_work_stacks_empty() const PRODUCT_RETURN; |
959 | void verify_overflow_empty() const PRODUCT_RETURN; |
960 | |
961 | // Convenience methods in support of debugging |
962 | static const size_t () PRODUCT_RETURN0; |
963 | HeapWord* block_start(const void* p) const PRODUCT_RETURN0; |
964 | |
965 | // Accessors |
966 | CMSMarkStack* verification_mark_stack() { return &_markStack; } |
967 | CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } |
968 | |
969 | // Initialization errors |
970 | bool completed_initialization() { return _completed_initialization; } |
971 | |
972 | void print_eden_and_survivor_chunk_arrays(); |
973 | |
974 | ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } |
975 | }; |
976 | |
977 | class CMSExpansionCause : public AllStatic { |
978 | public: |
979 | enum Cause { |
980 | _no_expansion, |
981 | _satisfy_free_ratio, |
982 | _satisfy_promotion, |
983 | _satisfy_allocation, |
984 | _allocate_par_lab, |
985 | _allocate_par_spooling_space, |
986 | _adaptive_size_policy |
987 | }; |
988 | // Return a string describing the cause of the expansion. |
989 | static const char* to_string(CMSExpansionCause::Cause cause); |
990 | }; |
991 | |
992 | class ConcurrentMarkSweepGeneration: public CardGeneration { |
993 | friend class VMStructs; |
994 | friend class ConcurrentMarkSweepThread; |
995 | friend class ConcurrentMarkSweep; |
996 | friend class CMSCollector; |
997 | protected: |
998 | static CMSCollector* _collector; // the collector that collects us |
999 | CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) |
1000 | |
1001 | // Performance Counters |
1002 | GenerationCounters* _gen_counters; |
1003 | GSpaceCounters* _space_counters; |
1004 | |
1005 | // Words directly allocated, used by CMSStats. |
1006 | size_t _direct_allocated_words; |
1007 | |
1008 | // Non-product stat counters |
1009 | NOT_PRODUCT( |
1010 | size_t _numObjectsPromoted; |
1011 | size_t _numWordsPromoted; |
1012 | size_t _numObjectsAllocated; |
1013 | size_t _numWordsAllocated; |
1014 | ) |
1015 | |
1016 | // Used for sizing decisions |
1017 | bool _incremental_collection_failed; |
1018 | bool incremental_collection_failed() { |
1019 | return _incremental_collection_failed; |
1020 | } |
1021 | void set_incremental_collection_failed() { |
1022 | _incremental_collection_failed = true; |
1023 | } |
1024 | void clear_incremental_collection_failed() { |
1025 | _incremental_collection_failed = false; |
1026 | } |
1027 | |
1028 | // accessors |
1029 | void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} |
1030 | CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } |
1031 | |
1032 | // Accessing spaces |
1033 | CompactibleSpace* space() const { return (CompactibleSpace*)_cmsSpace; } |
1034 | |
1035 | private: |
1036 | // For parallel young-gen GC support. |
1037 | CMSParGCThreadState** _par_gc_thread_states; |
1038 | |
1039 | // Reason generation was expanded |
1040 | CMSExpansionCause::Cause _expansion_cause; |
1041 | |
1042 | // In support of MinChunkSize being larger than min object size |
1043 | const double _dilatation_factor; |
1044 | |
1045 | // True if a compacting collection was done. |
1046 | bool _did_compact; |
1047 | bool did_compact() { return _did_compact; } |
1048 | |
1049 | // Fraction of current occupancy at which to start a CMS collection which |
1050 | // will collect this generation (at least). |
1051 | double _initiating_occupancy; |
1052 | |
1053 | protected: |
1054 | // Shrink generation by specified size (returns false if unable to shrink) |
1055 | void shrink_free_list_by(size_t bytes); |
1056 | |
1057 | // Update statistics for GC |
1058 | virtual void update_gc_stats(Generation* current_generation, bool full); |
1059 | |
1060 | // Maximum available space in the generation (including uncommitted) |
1061 | // space. |
1062 | size_t max_available() const; |
1063 | |
1064 | // getter and initializer for _initiating_occupancy field. |
1065 | double initiating_occupancy() const { return _initiating_occupancy; } |
1066 | void init_initiating_occupancy(intx io, uintx tr); |
1067 | |
1068 | void expand_for_gc_cause(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause); |
1069 | |
1070 | void assert_correct_size_change_locking(); |
1071 | |
1072 | public: |
1073 | ConcurrentMarkSweepGeneration(ReservedSpace rs, |
1074 | size_t initial_byte_size, |
1075 | size_t min_byte_size, |
1076 | size_t max_byte_size, |
1077 | CardTableRS* ct); |
1078 | |
1079 | // Accessors |
1080 | CMSCollector* collector() const { return _collector; } |
1081 | static void set_collector(CMSCollector* collector) { |
1082 | assert(_collector == NULL, "already set" ); |
1083 | _collector = collector; |
1084 | } |
1085 | CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } |
1086 | |
1087 | Mutex* freelistLock() const; |
1088 | |
1089 | virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } |
1090 | |
1091 | void set_did_compact(bool v) { _did_compact = v; } |
1092 | |
1093 | bool refs_discovery_is_atomic() const { return false; } |
1094 | bool refs_discovery_is_mt() const { |
1095 | // Note: CMS does MT-discovery during the parallel-remark |
1096 | // phases. Use ReferenceProcessorMTMutator to make refs |
1097 | // discovery MT-safe during such phases or other parallel |
1098 | // discovery phases in the future. This may all go away |
1099 | // if/when we decide that refs discovery is sufficiently |
1100 | // rare that the cost of the CAS's involved is in the |
1101 | // noise. That's a measurement that should be done, and |
1102 | // the code simplified if that turns out to be the case. |
1103 | return ConcGCThreads > 1; |
1104 | } |
1105 | |
1106 | // Override |
1107 | virtual void ref_processor_init(); |
1108 | |
1109 | void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } |
1110 | |
1111 | // Space enquiries |
1112 | double occupancy() const { return ((double)used())/((double)capacity()); } |
1113 | size_t contiguous_available() const; |
1114 | size_t unsafe_max_alloc_nogc() const; |
1115 | |
1116 | // over-rides |
1117 | MemRegion used_region_at_save_marks() const; |
1118 | |
1119 | // Adjust quantities in the generation affected by |
1120 | // the compaction. |
1121 | void reset_after_compaction(); |
1122 | |
1123 | // Allocation support |
1124 | HeapWord* allocate(size_t size, bool tlab); |
1125 | HeapWord* have_lock_and_allocate(size_t size, bool tlab); |
1126 | oop promote(oop obj, size_t obj_size); |
1127 | HeapWord* par_allocate(size_t size, bool tlab) { |
1128 | return allocate(size, tlab); |
1129 | } |
1130 | |
1131 | |
1132 | // Used by CMSStats to track direct allocation. The value is sampled and |
1133 | // reset after each young gen collection. |
1134 | size_t direct_allocated_words() const { return _direct_allocated_words; } |
1135 | void reset_direct_allocated_words() { _direct_allocated_words = 0; } |
1136 | |
1137 | // Overrides for parallel promotion. |
1138 | virtual oop par_promote(int thread_num, |
1139 | oop obj, markOop m, size_t word_sz); |
1140 | virtual void par_promote_alloc_done(int thread_num); |
1141 | virtual void par_oop_since_save_marks_iterate_done(int thread_num); |
1142 | |
1143 | virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; |
1144 | |
1145 | // Inform this (old) generation that a promotion failure was |
1146 | // encountered during a collection of the young generation. |
1147 | virtual void promotion_failure_occurred(); |
1148 | |
1149 | bool should_collect(bool full, size_t size, bool tlab); |
1150 | virtual bool should_concurrent_collect() const; |
1151 | virtual bool is_too_full() const; |
1152 | void collect(bool full, |
1153 | bool clear_all_soft_refs, |
1154 | size_t size, |
1155 | bool tlab); |
1156 | |
1157 | HeapWord* expand_and_allocate(size_t word_size, |
1158 | bool tlab, |
1159 | bool parallel = false); |
1160 | |
1161 | // GC prologue and epilogue |
1162 | void gc_prologue(bool full); |
1163 | void gc_prologue_work(bool full, bool registerClosure, |
1164 | ModUnionClosure* modUnionClosure); |
1165 | void gc_epilogue(bool full); |
1166 | void gc_epilogue_work(bool full); |
1167 | |
1168 | // Time since last GC of this generation |
1169 | jlong time_of_last_gc(jlong now) { |
1170 | return collector()->time_of_last_gc(now); |
1171 | } |
1172 | void update_time_of_last_gc(jlong now) { |
1173 | collector()-> update_time_of_last_gc(now); |
1174 | } |
1175 | |
1176 | // Allocation failure |
1177 | void shrink(size_t bytes); |
1178 | HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); |
1179 | bool expand_and_ensure_spooling_space(PromotionInfo* promo); |
1180 | |
1181 | // Iteration support and related enquiries |
1182 | void save_marks(); |
1183 | bool no_allocs_since_save_marks(); |
1184 | |
1185 | // Iteration support specific to CMS generations |
1186 | void save_sweep_limit(); |
1187 | |
1188 | // More iteration support |
1189 | virtual void oop_iterate(OopIterateClosure* cl); |
1190 | virtual void safe_object_iterate(ObjectClosure* cl); |
1191 | virtual void object_iterate(ObjectClosure* cl); |
1192 | |
1193 | template <typename OopClosureType> |
1194 | void oop_since_save_marks_iterate(OopClosureType* cl); |
1195 | |
1196 | // Smart allocation XXX -- move to CFLSpace? |
1197 | void setNearLargestChunk(); |
1198 | bool isNearLargestChunk(HeapWord* addr); |
1199 | |
1200 | // Get the chunk at the end of the space. Delegates to |
1201 | // the space. |
1202 | FreeChunk* find_chunk_at_end(); |
1203 | |
1204 | void post_compact(); |
1205 | |
1206 | // Debugging |
1207 | void prepare_for_verify(); |
1208 | void verify(); |
1209 | void print_statistics() PRODUCT_RETURN; |
1210 | |
1211 | // Performance Counters support |
1212 | virtual void update_counters(); |
1213 | virtual void update_counters(size_t used); |
1214 | void initialize_performance_counters(size_t min_old_size, size_t max_old_size); |
1215 | CollectorCounters* counters() { return collector()->counters(); } |
1216 | |
1217 | // Support for parallel remark of survivor space |
1218 | void* get_data_recorder(int thr_num) { |
1219 | //Delegate to collector |
1220 | return collector()->get_data_recorder(thr_num); |
1221 | } |
1222 | void sample_eden_chunk() { |
1223 | //Delegate to collector |
1224 | return collector()->sample_eden_chunk(); |
1225 | } |
1226 | |
1227 | // Printing |
1228 | const char* name() const; |
1229 | virtual const char* short_name() const { return "CMS" ; } |
1230 | void print() const; |
1231 | |
1232 | // Resize the generation after a compacting GC. The |
1233 | // generation can be treated as a contiguous space |
1234 | // after the compaction. |
1235 | virtual void compute_new_size(); |
1236 | // Resize the generation after a non-compacting |
1237 | // collection. |
1238 | void compute_new_size_free_list(); |
1239 | }; |
1240 | |
1241 | // |
1242 | // Closures of various sorts used by CMS to accomplish its work |
1243 | // |
1244 | |
1245 | // This closure is used to do concurrent marking from the roots |
1246 | // following the first checkpoint. |
1247 | class MarkFromRootsClosure: public BitMapClosure { |
1248 | CMSCollector* _collector; |
1249 | MemRegion _span; |
1250 | CMSBitMap* _bitMap; |
1251 | CMSBitMap* _mut; |
1252 | CMSMarkStack* _markStack; |
1253 | bool _yield; |
1254 | int _skipBits; |
1255 | HeapWord* _finger; |
1256 | HeapWord* _threshold; |
1257 | DEBUG_ONLY(bool _verifying;) |
1258 | |
1259 | public: |
1260 | MarkFromRootsClosure(CMSCollector* collector, MemRegion span, |
1261 | CMSBitMap* bitMap, |
1262 | CMSMarkStack* markStack, |
1263 | bool should_yield, bool verifying = false); |
1264 | bool do_bit(size_t offset); |
1265 | void reset(HeapWord* addr); |
1266 | inline void do_yield_check(); |
1267 | |
1268 | private: |
1269 | void scanOopsInOop(HeapWord* ptr); |
1270 | void do_yield_work(); |
1271 | }; |
1272 | |
1273 | // This closure is used to do concurrent multi-threaded |
1274 | // marking from the roots following the first checkpoint. |
1275 | // XXX This should really be a subclass of The serial version |
1276 | // above, but i have not had the time to refactor things cleanly. |
1277 | class ParMarkFromRootsClosure: public BitMapClosure { |
1278 | CMSCollector* _collector; |
1279 | MemRegion _whole_span; |
1280 | MemRegion _span; |
1281 | CMSBitMap* _bit_map; |
1282 | CMSBitMap* _mut; |
1283 | OopTaskQueue* _work_queue; |
1284 | CMSMarkStack* _overflow_stack; |
1285 | int _skip_bits; |
1286 | HeapWord* _finger; |
1287 | HeapWord* _threshold; |
1288 | CMSConcMarkingTask* _task; |
1289 | public: |
1290 | ParMarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, |
1291 | MemRegion span, |
1292 | CMSBitMap* bit_map, |
1293 | OopTaskQueue* work_queue, |
1294 | CMSMarkStack* overflow_stack); |
1295 | bool do_bit(size_t offset); |
1296 | inline void do_yield_check(); |
1297 | |
1298 | private: |
1299 | void scan_oops_in_oop(HeapWord* ptr); |
1300 | void do_yield_work(); |
1301 | bool get_work_from_overflow_stack(); |
1302 | }; |
1303 | |
1304 | // The following closures are used to do certain kinds of verification of |
1305 | // CMS marking. |
1306 | class PushAndMarkVerifyClosure: public MetadataVisitingOopIterateClosure { |
1307 | CMSCollector* _collector; |
1308 | MemRegion _span; |
1309 | CMSBitMap* _verification_bm; |
1310 | CMSBitMap* _cms_bm; |
1311 | CMSMarkStack* _mark_stack; |
1312 | protected: |
1313 | void do_oop(oop p); |
1314 | template <class T> void do_oop_work(T *p); |
1315 | |
1316 | public: |
1317 | PushAndMarkVerifyClosure(CMSCollector* cms_collector, |
1318 | MemRegion span, |
1319 | CMSBitMap* verification_bm, |
1320 | CMSBitMap* cms_bm, |
1321 | CMSMarkStack* mark_stack); |
1322 | void do_oop(oop* p); |
1323 | void do_oop(narrowOop* p); |
1324 | |
1325 | // Deal with a stack overflow condition |
1326 | void handle_stack_overflow(HeapWord* lost); |
1327 | }; |
1328 | |
1329 | class MarkFromRootsVerifyClosure: public BitMapClosure { |
1330 | CMSCollector* _collector; |
1331 | MemRegion _span; |
1332 | CMSBitMap* _verification_bm; |
1333 | CMSBitMap* _cms_bm; |
1334 | CMSMarkStack* _mark_stack; |
1335 | HeapWord* _finger; |
1336 | PushAndMarkVerifyClosure _pam_verify_closure; |
1337 | public: |
1338 | MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, |
1339 | CMSBitMap* verification_bm, |
1340 | CMSBitMap* cms_bm, |
1341 | CMSMarkStack* mark_stack); |
1342 | bool do_bit(size_t offset); |
1343 | void reset(HeapWord* addr); |
1344 | }; |
1345 | |
1346 | |
1347 | // This closure is used to check that a certain set of bits is |
1348 | // "empty" (i.e. the bit vector doesn't have any 1-bits). |
1349 | class FalseBitMapClosure: public BitMapClosure { |
1350 | public: |
1351 | bool do_bit(size_t offset) { |
1352 | guarantee(false, "Should not have a 1 bit" ); |
1353 | return true; |
1354 | } |
1355 | }; |
1356 | |
1357 | // A version of ObjectClosure with "memory" (see _previous_address below) |
1358 | class UpwardsObjectClosure: public BoolObjectClosure { |
1359 | HeapWord* _previous_address; |
1360 | public: |
1361 | UpwardsObjectClosure() : _previous_address(NULL) { } |
1362 | void set_previous(HeapWord* addr) { _previous_address = addr; } |
1363 | HeapWord* previous() { return _previous_address; } |
1364 | // A return value of "true" can be used by the caller to decide |
1365 | // if this object's end should *NOT* be recorded in |
1366 | // _previous_address above. |
1367 | virtual bool do_object_bm(oop obj, MemRegion mr) = 0; |
1368 | }; |
1369 | |
1370 | // This closure is used during the second checkpointing phase |
1371 | // to rescan the marked objects on the dirty cards in the mod |
1372 | // union table and the card table proper. It's invoked via |
1373 | // MarkFromDirtyCardsClosure below. It uses either |
1374 | // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) |
1375 | // declared in genOopClosures.hpp to accomplish some of its work. |
1376 | // In the parallel case the bitMap is shared, so access to |
1377 | // it needs to be suitably synchronized for updates by embedded |
1378 | // closures that update it; however, this closure itself only |
1379 | // reads the bit_map and because it is idempotent, is immune to |
1380 | // reading stale values. |
1381 | class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { |
1382 | #ifdef ASSERT |
1383 | CMSCollector* _collector; |
1384 | MemRegion _span; |
1385 | union { |
1386 | CMSMarkStack* _mark_stack; |
1387 | OopTaskQueue* _work_queue; |
1388 | }; |
1389 | #endif // ASSERT |
1390 | bool _parallel; |
1391 | CMSBitMap* _bit_map; |
1392 | union { |
1393 | MarkRefsIntoAndScanClosure* _scan_closure; |
1394 | ParMarkRefsIntoAndScanClosure* _par_scan_closure; |
1395 | }; |
1396 | |
1397 | public: |
1398 | ScanMarkedObjectsAgainClosure(CMSCollector* collector, |
1399 | MemRegion span, |
1400 | ReferenceProcessor* rp, |
1401 | CMSBitMap* bit_map, |
1402 | CMSMarkStack* mark_stack, |
1403 | MarkRefsIntoAndScanClosure* cl): |
1404 | #ifdef ASSERT |
1405 | _collector(collector), |
1406 | _span(span), |
1407 | _mark_stack(mark_stack), |
1408 | #endif // ASSERT |
1409 | _parallel(false), |
1410 | _bit_map(bit_map), |
1411 | _scan_closure(cl) { } |
1412 | |
1413 | ScanMarkedObjectsAgainClosure(CMSCollector* collector, |
1414 | MemRegion span, |
1415 | ReferenceProcessor* rp, |
1416 | CMSBitMap* bit_map, |
1417 | OopTaskQueue* work_queue, |
1418 | ParMarkRefsIntoAndScanClosure* cl): |
1419 | #ifdef ASSERT |
1420 | _collector(collector), |
1421 | _span(span), |
1422 | _work_queue(work_queue), |
1423 | #endif // ASSERT |
1424 | _parallel(true), |
1425 | _bit_map(bit_map), |
1426 | _par_scan_closure(cl) { } |
1427 | |
1428 | bool do_object_b(oop obj) { |
1429 | guarantee(false, "Call do_object_b(oop, MemRegion) form instead" ); |
1430 | return false; |
1431 | } |
1432 | bool do_object_bm(oop p, MemRegion mr); |
1433 | }; |
1434 | |
1435 | // This closure is used during the second checkpointing phase |
1436 | // to rescan the marked objects on the dirty cards in the mod |
1437 | // union table and the card table proper. It invokes |
1438 | // ScanMarkedObjectsAgainClosure above to accomplish much of its work. |
1439 | // In the parallel case, the bit map is shared and requires |
1440 | // synchronized access. |
1441 | class MarkFromDirtyCardsClosure: public MemRegionClosure { |
1442 | CompactibleFreeListSpace* _space; |
1443 | ScanMarkedObjectsAgainClosure _scan_cl; |
1444 | size_t _num_dirty_cards; |
1445 | |
1446 | public: |
1447 | MarkFromDirtyCardsClosure(CMSCollector* collector, |
1448 | MemRegion span, |
1449 | CompactibleFreeListSpace* space, |
1450 | CMSBitMap* bit_map, |
1451 | CMSMarkStack* mark_stack, |
1452 | MarkRefsIntoAndScanClosure* cl): |
1453 | _space(space), |
1454 | _scan_cl(collector, span, collector->ref_processor(), bit_map, |
1455 | mark_stack, cl), |
1456 | _num_dirty_cards(0) { } |
1457 | |
1458 | MarkFromDirtyCardsClosure(CMSCollector* collector, |
1459 | MemRegion span, |
1460 | CompactibleFreeListSpace* space, |
1461 | CMSBitMap* bit_map, |
1462 | OopTaskQueue* work_queue, |
1463 | ParMarkRefsIntoAndScanClosure* cl): |
1464 | _space(space), |
1465 | _scan_cl(collector, span, collector->ref_processor(), bit_map, |
1466 | work_queue, cl), |
1467 | _num_dirty_cards(0) { } |
1468 | |
1469 | void do_MemRegion(MemRegion mr); |
1470 | void set_space(CompactibleFreeListSpace* space) { _space = space; } |
1471 | size_t num_dirty_cards() { return _num_dirty_cards; } |
1472 | }; |
1473 | |
1474 | // This closure is used in the non-product build to check |
1475 | // that there are no MemRegions with a certain property. |
1476 | class FalseMemRegionClosure: public MemRegionClosure { |
1477 | void do_MemRegion(MemRegion mr) { |
1478 | guarantee(!mr.is_empty(), "Shouldn't be empty" ); |
1479 | guarantee(false, "Should never be here" ); |
1480 | } |
1481 | }; |
1482 | |
1483 | // This closure is used during the precleaning phase |
1484 | // to "carefully" rescan marked objects on dirty cards. |
1485 | // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp |
1486 | // to accomplish some of its work. |
1487 | class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { |
1488 | CMSCollector* _collector; |
1489 | MemRegion _span; |
1490 | bool _yield; |
1491 | Mutex* _freelistLock; |
1492 | CMSBitMap* _bitMap; |
1493 | CMSMarkStack* _markStack; |
1494 | MarkRefsIntoAndScanClosure* _scanningClosure; |
1495 | DEBUG_ONLY(HeapWord* _last_scanned_object;) |
1496 | |
1497 | public: |
1498 | ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, |
1499 | MemRegion span, |
1500 | CMSBitMap* bitMap, |
1501 | CMSMarkStack* markStack, |
1502 | MarkRefsIntoAndScanClosure* cl, |
1503 | bool should_yield): |
1504 | _collector(collector), |
1505 | _span(span), |
1506 | _yield(should_yield), |
1507 | _bitMap(bitMap), |
1508 | _markStack(markStack), |
1509 | _scanningClosure(cl) |
1510 | DEBUG_ONLY(COMMA _last_scanned_object(NULL)) |
1511 | { } |
1512 | |
1513 | void do_object(oop p) { |
1514 | guarantee(false, "call do_object_careful instead" ); |
1515 | } |
1516 | |
1517 | size_t do_object_careful(oop p) { |
1518 | guarantee(false, "Unexpected caller" ); |
1519 | return 0; |
1520 | } |
1521 | |
1522 | size_t do_object_careful_m(oop p, MemRegion mr); |
1523 | |
1524 | void setFreelistLock(Mutex* m) { |
1525 | _freelistLock = m; |
1526 | _scanningClosure->set_freelistLock(m); |
1527 | } |
1528 | |
1529 | private: |
1530 | inline bool do_yield_check(); |
1531 | |
1532 | void do_yield_work(); |
1533 | }; |
1534 | |
1535 | class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { |
1536 | CMSCollector* _collector; |
1537 | MemRegion _span; |
1538 | bool _yield; |
1539 | CMSBitMap* _bit_map; |
1540 | CMSMarkStack* _mark_stack; |
1541 | PushAndMarkClosure* _scanning_closure; |
1542 | unsigned int _before_count; |
1543 | |
1544 | public: |
1545 | SurvivorSpacePrecleanClosure(CMSCollector* collector, |
1546 | MemRegion span, |
1547 | CMSBitMap* bit_map, |
1548 | CMSMarkStack* mark_stack, |
1549 | PushAndMarkClosure* cl, |
1550 | unsigned int before_count, |
1551 | bool should_yield): |
1552 | _collector(collector), |
1553 | _span(span), |
1554 | _yield(should_yield), |
1555 | _bit_map(bit_map), |
1556 | _mark_stack(mark_stack), |
1557 | _scanning_closure(cl), |
1558 | _before_count(before_count) |
1559 | { } |
1560 | |
1561 | void do_object(oop p) { |
1562 | guarantee(false, "call do_object_careful instead" ); |
1563 | } |
1564 | |
1565 | size_t do_object_careful(oop p); |
1566 | |
1567 | size_t do_object_careful_m(oop p, MemRegion mr) { |
1568 | guarantee(false, "Unexpected caller" ); |
1569 | return 0; |
1570 | } |
1571 | |
1572 | private: |
1573 | inline void do_yield_check(); |
1574 | void do_yield_work(); |
1575 | }; |
1576 | |
1577 | // This closure is used to accomplish the sweeping work |
1578 | // after the second checkpoint but before the concurrent reset |
1579 | // phase. |
1580 | // |
1581 | // Terminology |
1582 | // left hand chunk (LHC) - block of one or more chunks currently being |
1583 | // coalesced. The LHC is available for coalescing with a new chunk. |
1584 | // right hand chunk (RHC) - block that is currently being swept that is |
1585 | // free or garbage that can be coalesced with the LHC. |
1586 | // _inFreeRange is true if there is currently a LHC |
1587 | // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. |
1588 | // _freeRangeInFreeLists is true if the LHC is in the free lists. |
1589 | // _freeFinger is the address of the current LHC |
1590 | class SweepClosure: public BlkClosureCareful { |
1591 | CMSCollector* _collector; // collector doing the work |
1592 | ConcurrentMarkSweepGeneration* _g; // Generation being swept |
1593 | CompactibleFreeListSpace* _sp; // Space being swept |
1594 | HeapWord* _limit;// the address at or above which the sweep should stop |
1595 | // because we do not expect newly garbage blocks |
1596 | // eligible for sweeping past that address. |
1597 | Mutex* _freelistLock; // Free list lock (in space) |
1598 | CMSBitMap* _bitMap; // Marking bit map (in |
1599 | // generation) |
1600 | bool _inFreeRange; // Indicates if we are in the |
1601 | // midst of a free run |
1602 | bool _freeRangeInFreeLists; |
1603 | // Often, we have just found |
1604 | // a free chunk and started |
1605 | // a new free range; we do not |
1606 | // eagerly remove this chunk from |
1607 | // the free lists unless there is |
1608 | // a possibility of coalescing. |
1609 | // When true, this flag indicates |
1610 | // that the _freeFinger below |
1611 | // points to a potentially free chunk |
1612 | // that may still be in the free lists |
1613 | bool _lastFreeRangeCoalesced; |
1614 | // free range contains chunks |
1615 | // coalesced |
1616 | bool _yield; |
1617 | // Whether sweeping should be |
1618 | // done with yields. For instance |
1619 | // when done by the foreground |
1620 | // collector we shouldn't yield. |
1621 | HeapWord* _freeFinger; // When _inFreeRange is set, the |
1622 | // pointer to the "left hand |
1623 | // chunk" |
1624 | size_t _freeRangeSize; |
1625 | // When _inFreeRange is set, this |
1626 | // indicates the accumulated size |
1627 | // of the "left hand chunk" |
1628 | NOT_PRODUCT( |
1629 | size_t _numObjectsFreed; |
1630 | size_t _numWordsFreed; |
1631 | size_t _numObjectsLive; |
1632 | size_t _numWordsLive; |
1633 | size_t _numObjectsAlreadyFree; |
1634 | size_t _numWordsAlreadyFree; |
1635 | FreeChunk* _last_fc; |
1636 | ) |
1637 | private: |
1638 | // Code that is common to a free chunk or garbage when |
1639 | // encountered during sweeping. |
1640 | void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); |
1641 | // Process a free chunk during sweeping. |
1642 | void do_already_free_chunk(FreeChunk *fc); |
1643 | // Work method called when processing an already free or a |
1644 | // freshly garbage chunk to do a lookahead and possibly a |
1645 | // preemptive flush if crossing over _limit. |
1646 | void lookahead_and_flush(FreeChunk* fc, size_t chunkSize); |
1647 | // Process a garbage chunk during sweeping. |
1648 | size_t do_garbage_chunk(FreeChunk *fc); |
1649 | // Process a live chunk during sweeping. |
1650 | size_t do_live_chunk(FreeChunk* fc); |
1651 | |
1652 | // Accessors. |
1653 | HeapWord* freeFinger() const { return _freeFinger; } |
1654 | void set_freeFinger(HeapWord* v) { _freeFinger = v; } |
1655 | bool inFreeRange() const { return _inFreeRange; } |
1656 | void set_inFreeRange(bool v) { _inFreeRange = v; } |
1657 | bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } |
1658 | void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } |
1659 | bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } |
1660 | void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } |
1661 | |
1662 | // Initialize a free range. |
1663 | void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); |
1664 | // Return this chunk to the free lists. |
1665 | void flush_cur_free_chunk(HeapWord* chunk, size_t size); |
1666 | |
1667 | // Check if we should yield and do so when necessary. |
1668 | inline void do_yield_check(HeapWord* addr); |
1669 | |
1670 | // Yield |
1671 | void do_yield_work(HeapWord* addr); |
1672 | |
1673 | // Debugging/Printing |
1674 | void print_free_block_coalesced(FreeChunk* fc) const; |
1675 | |
1676 | public: |
1677 | SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, |
1678 | CMSBitMap* bitMap, bool should_yield); |
1679 | ~SweepClosure() PRODUCT_RETURN; |
1680 | |
1681 | size_t do_blk_careful(HeapWord* addr); |
1682 | void print() const { print_on(tty); } |
1683 | void print_on(outputStream *st) const; |
1684 | }; |
1685 | |
1686 | // Closures related to weak references processing |
1687 | |
1688 | // During CMS' weak reference processing, this is a |
1689 | // work-routine/closure used to complete transitive |
1690 | // marking of objects as live after a certain point |
1691 | // in which an initial set has been completely accumulated. |
1692 | // This closure is currently used both during the final |
1693 | // remark stop-world phase, as well as during the concurrent |
1694 | // precleaning of the discovered reference lists. |
1695 | class CMSDrainMarkingStackClosure: public VoidClosure { |
1696 | CMSCollector* _collector; |
1697 | MemRegion _span; |
1698 | CMSMarkStack* _mark_stack; |
1699 | CMSBitMap* _bit_map; |
1700 | CMSKeepAliveClosure* _keep_alive; |
1701 | bool _concurrent_precleaning; |
1702 | public: |
1703 | CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, |
1704 | CMSBitMap* bit_map, CMSMarkStack* mark_stack, |
1705 | CMSKeepAliveClosure* keep_alive, |
1706 | bool cpc): |
1707 | _collector(collector), |
1708 | _span(span), |
1709 | _mark_stack(mark_stack), |
1710 | _bit_map(bit_map), |
1711 | _keep_alive(keep_alive), |
1712 | _concurrent_precleaning(cpc) { |
1713 | assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), |
1714 | "Mismatch" ); |
1715 | } |
1716 | |
1717 | void do_void(); |
1718 | }; |
1719 | |
1720 | // A parallel version of CMSDrainMarkingStackClosure above. |
1721 | class CMSParDrainMarkingStackClosure: public VoidClosure { |
1722 | CMSCollector* _collector; |
1723 | MemRegion _span; |
1724 | OopTaskQueue* _work_queue; |
1725 | CMSBitMap* _bit_map; |
1726 | CMSInnerParMarkAndPushClosure _mark_and_push; |
1727 | |
1728 | public: |
1729 | CMSParDrainMarkingStackClosure(CMSCollector* collector, |
1730 | MemRegion span, CMSBitMap* bit_map, |
1731 | OopTaskQueue* work_queue): |
1732 | _collector(collector), |
1733 | _span(span), |
1734 | _work_queue(work_queue), |
1735 | _bit_map(bit_map), |
1736 | _mark_and_push(collector, span, bit_map, work_queue) { } |
1737 | |
1738 | public: |
1739 | void trim_queue(uint max); |
1740 | void do_void(); |
1741 | }; |
1742 | |
1743 | // Allow yielding or short-circuiting of reference list |
1744 | // precleaning work. |
1745 | class CMSPrecleanRefsYieldClosure: public YieldClosure { |
1746 | CMSCollector* _collector; |
1747 | void do_yield_work(); |
1748 | public: |
1749 | CMSPrecleanRefsYieldClosure(CMSCollector* collector): |
1750 | _collector(collector) {} |
1751 | virtual bool should_return(); |
1752 | }; |
1753 | |
1754 | |
1755 | // Convenience class that locks free list locks for given CMS collector |
1756 | class FreelistLocker: public StackObj { |
1757 | private: |
1758 | CMSCollector* _collector; |
1759 | public: |
1760 | FreelistLocker(CMSCollector* collector): |
1761 | _collector(collector) { |
1762 | _collector->getFreelistLocks(); |
1763 | } |
1764 | |
1765 | ~FreelistLocker() { |
1766 | _collector->releaseFreelistLocks(); |
1767 | } |
1768 | }; |
1769 | |
1770 | // Mark all dead objects in a given space. |
1771 | class MarkDeadObjectsClosure: public BlkClosure { |
1772 | const CMSCollector* _collector; |
1773 | const CompactibleFreeListSpace* _sp; |
1774 | CMSBitMap* _live_bit_map; |
1775 | CMSBitMap* _dead_bit_map; |
1776 | public: |
1777 | MarkDeadObjectsClosure(const CMSCollector* collector, |
1778 | const CompactibleFreeListSpace* sp, |
1779 | CMSBitMap *live_bit_map, |
1780 | CMSBitMap *dead_bit_map) : |
1781 | _collector(collector), |
1782 | _sp(sp), |
1783 | _live_bit_map(live_bit_map), |
1784 | _dead_bit_map(dead_bit_map) {} |
1785 | size_t do_blk(HeapWord* addr); |
1786 | }; |
1787 | |
1788 | class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { |
1789 | |
1790 | public: |
1791 | TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause); |
1792 | }; |
1793 | |
1794 | |
1795 | #endif // SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP |
1796 | |