| 1 | /* | 
|---|
| 2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. | 
|---|
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
|---|
| 4 | * | 
|---|
| 5 | * This code is free software; you can redistribute it and/or modify it | 
|---|
| 6 | * under the terms of the GNU General Public License version 2 only, as | 
|---|
| 7 | * published by the Free Software Foundation. | 
|---|
| 8 | * | 
|---|
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | 
|---|
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|---|
| 11 | * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
|---|
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | 
|---|
| 13 | * accompanied this code). | 
|---|
| 14 | * | 
|---|
| 15 | * You should have received a copy of the GNU General Public License version | 
|---|
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | 
|---|
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 
|---|
| 18 | * | 
|---|
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | 
|---|
| 20 | * or visit www.oracle.com if you need additional information or have any | 
|---|
| 21 | * questions. | 
|---|
| 22 | * | 
|---|
| 23 | */ | 
|---|
| 24 |  | 
|---|
| 25 | #ifndef SHARE_GC_G1_G1CONCURRENTMARK_HPP | 
|---|
| 26 | #define SHARE_GC_G1_G1CONCURRENTMARK_HPP | 
|---|
| 27 |  | 
|---|
| 28 | #include "gc/g1/g1ConcurrentMarkBitMap.hpp" | 
|---|
| 29 | #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp" | 
|---|
| 30 | #include "gc/g1/g1HeapVerifier.hpp" | 
|---|
| 31 | #include "gc/g1/g1RegionMarkStatsCache.hpp" | 
|---|
| 32 | #include "gc/g1/heapRegionSet.hpp" | 
|---|
| 33 | #include "gc/shared/taskqueue.hpp" | 
|---|
| 34 | #include "gc/shared/verifyOption.hpp" | 
|---|
| 35 | #include "memory/allocation.hpp" | 
|---|
| 36 | #include "utilities/compilerWarnings.hpp" | 
|---|
| 37 |  | 
|---|
| 38 | class ConcurrentGCTimer; | 
|---|
| 39 | class G1ConcurrentMarkThread; | 
|---|
| 40 | class G1CollectedHeap; | 
|---|
| 41 | class G1CMOopClosure; | 
|---|
| 42 | class G1CMTask; | 
|---|
| 43 | class G1ConcurrentMark; | 
|---|
| 44 | class G1OldTracer; | 
|---|
| 45 | class G1RegionToSpaceMapper; | 
|---|
| 46 | class G1SurvivorRegions; | 
|---|
| 47 |  | 
|---|
| 48 | PRAGMA_DIAG_PUSH | 
|---|
| 49 | // warning C4522: multiple assignment operators specified | 
|---|
| 50 | PRAGMA_DISABLE_MSVC_WARNING(4522) | 
|---|
| 51 |  | 
|---|
| 52 | // This is a container class for either an oop or a continuation address for | 
|---|
| 53 | // mark stack entries. Both are pushed onto the mark stack. | 
|---|
| 54 | class G1TaskQueueEntry { | 
|---|
| 55 | private: | 
|---|
| 56 | void* _holder; | 
|---|
| 57 |  | 
|---|
| 58 | static const uintptr_t ArraySliceBit = 1; | 
|---|
| 59 |  | 
|---|
| 60 | G1TaskQueueEntry(oop obj) : _holder(obj) { | 
|---|
| 61 | assert(_holder != NULL, "Not allowed to set NULL task queue element"); | 
|---|
| 62 | } | 
|---|
| 63 | G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { } | 
|---|
| 64 | public: | 
|---|
| 65 | G1TaskQueueEntry(const G1TaskQueueEntry& other) { _holder = other._holder; } | 
|---|
| 66 | G1TaskQueueEntry() : _holder(NULL) { } | 
|---|
| 67 |  | 
|---|
| 68 | static G1TaskQueueEntry from_slice(HeapWord* what) { return G1TaskQueueEntry(what); } | 
|---|
| 69 | static G1TaskQueueEntry from_oop(oop obj) { return G1TaskQueueEntry(obj); } | 
|---|
| 70 |  | 
|---|
| 71 | G1TaskQueueEntry& operator=(const G1TaskQueueEntry& t) { | 
|---|
| 72 | _holder = t._holder; | 
|---|
| 73 | return *this; | 
|---|
| 74 | } | 
|---|
| 75 |  | 
|---|
| 76 | volatile G1TaskQueueEntry& operator=(const volatile G1TaskQueueEntry& t) volatile { | 
|---|
| 77 | _holder = t._holder; | 
|---|
| 78 | return *this; | 
|---|
| 79 | } | 
|---|
| 80 |  | 
|---|
| 81 | oop obj() const { | 
|---|
| 82 | assert(!is_array_slice(), "Trying to read array slice "PTR_FORMAT " as oop", p2i(_holder)); | 
|---|
| 83 | return (oop)_holder; | 
|---|
| 84 | } | 
|---|
| 85 |  | 
|---|
| 86 | HeapWord* slice() const { | 
|---|
| 87 | assert(is_array_slice(), "Trying to read oop "PTR_FORMAT " as array slice", p2i(_holder)); | 
|---|
| 88 | return (HeapWord*)((uintptr_t)_holder & ~ArraySliceBit); | 
|---|
| 89 | } | 
|---|
| 90 |  | 
|---|
| 91 | bool is_oop() const { return !is_array_slice(); } | 
|---|
| 92 | bool is_array_slice() const { return ((uintptr_t)_holder & ArraySliceBit) != 0; } | 
|---|
| 93 | bool is_null() const { return _holder == NULL; } | 
|---|
| 94 | }; | 
|---|
| 95 |  | 
|---|
| 96 | PRAGMA_DIAG_POP | 
|---|
| 97 |  | 
|---|
| 98 | typedef GenericTaskQueue<G1TaskQueueEntry, mtGC> G1CMTaskQueue; | 
|---|
| 99 | typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet; | 
|---|
| 100 |  | 
|---|
| 101 | // Closure used by CM during concurrent reference discovery | 
|---|
| 102 | // and reference processing (during remarking) to determine | 
|---|
| 103 | // if a particular object is alive. It is primarily used | 
|---|
| 104 | // to determine if referents of discovered reference objects | 
|---|
| 105 | // are alive. An instance is also embedded into the | 
|---|
| 106 | // reference processor as the _is_alive_non_header field | 
|---|
| 107 | class G1CMIsAliveClosure : public BoolObjectClosure { | 
|---|
| 108 | G1CollectedHeap* _g1h; | 
|---|
| 109 | public: | 
|---|
| 110 | G1CMIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } | 
|---|
| 111 | bool do_object_b(oop obj); | 
|---|
| 112 | }; | 
|---|
| 113 |  | 
|---|
| 114 | class G1CMSubjectToDiscoveryClosure : public BoolObjectClosure { | 
|---|
| 115 | G1CollectedHeap* _g1h; | 
|---|
| 116 | public: | 
|---|
| 117 | G1CMSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } | 
|---|
| 118 | bool do_object_b(oop obj); | 
|---|
| 119 | }; | 
|---|
| 120 |  | 
|---|
| 121 | // Represents the overflow mark stack used by concurrent marking. | 
|---|
| 122 | // | 
|---|
| 123 | // Stores oops in a huge buffer in virtual memory that is always fully committed. | 
|---|
| 124 | // Resizing may only happen during a STW pause when the stack is empty. | 
|---|
| 125 | // | 
|---|
| 126 | // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark | 
|---|
| 127 | // stack memory is split into evenly sized chunks of oops. Users can only | 
|---|
| 128 | // add or remove entries on that basis. | 
|---|
| 129 | // Chunks are filled in increasing address order. Not completely filled chunks | 
|---|
| 130 | // have a NULL element as a terminating element. | 
|---|
| 131 | // | 
|---|
| 132 | // Every chunk has a header containing a single pointer element used for memory | 
|---|
| 133 | // management. This wastes some space, but is negligible (< .1% with current sizing). | 
|---|
| 134 | // | 
|---|
| 135 | // Memory management is done using a mix of tracking a high water-mark indicating | 
|---|
| 136 | // that all chunks at a lower address are valid chunks, and a singly linked free | 
|---|
| 137 | // list connecting all empty chunks. | 
|---|
| 138 | class G1CMMarkStack { | 
|---|
| 139 | public: | 
|---|
| 140 | // Number of TaskQueueEntries that can fit in a single chunk. | 
|---|
| 141 | static const size_t EntriesPerChunk = 1024 - 1 /* One reference for the next pointer */; | 
|---|
| 142 | private: | 
|---|
| 143 | struct TaskQueueEntryChunk { | 
|---|
| 144 | TaskQueueEntryChunk* next; | 
|---|
| 145 | G1TaskQueueEntry data[EntriesPerChunk]; | 
|---|
| 146 | }; | 
|---|
| 147 |  | 
|---|
| 148 | size_t _max_chunk_capacity;    // Maximum number of TaskQueueEntryChunk elements on the stack. | 
|---|
| 149 |  | 
|---|
| 150 | TaskQueueEntryChunk* _base;    // Bottom address of allocated memory area. | 
|---|
| 151 | size_t _chunk_capacity;        // Current maximum number of TaskQueueEntryChunk elements. | 
|---|
| 152 |  | 
|---|
| 153 | char _pad0[DEFAULT_CACHE_LINE_SIZE]; | 
|---|
| 154 | TaskQueueEntryChunk* volatile _free_list;  // Linked list of free chunks that can be allocated by users. | 
|---|
| 155 | char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*)]; | 
|---|
| 156 | TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data. | 
|---|
| 157 | volatile size_t _chunks_in_chunk_list; | 
|---|
| 158 | char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)]; | 
|---|
| 159 |  | 
|---|
| 160 | volatile size_t _hwm;          // High water mark within the reserved space. | 
|---|
| 161 | char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)]; | 
|---|
| 162 |  | 
|---|
| 163 | // Allocate a new chunk from the reserved memory, using the high water mark. Returns | 
|---|
| 164 | // NULL if out of memory. | 
|---|
| 165 | TaskQueueEntryChunk* allocate_new_chunk(); | 
|---|
| 166 |  | 
|---|
| 167 | // Atomically add the given chunk to the list. | 
|---|
| 168 | void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem); | 
|---|
| 169 | // Atomically remove and return a chunk from the given list. Returns NULL if the | 
|---|
| 170 | // list is empty. | 
|---|
| 171 | TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list); | 
|---|
| 172 |  | 
|---|
| 173 | void add_chunk_to_chunk_list(TaskQueueEntryChunk* elem); | 
|---|
| 174 | void add_chunk_to_free_list(TaskQueueEntryChunk* elem); | 
|---|
| 175 |  | 
|---|
| 176 | TaskQueueEntryChunk* remove_chunk_from_chunk_list(); | 
|---|
| 177 | TaskQueueEntryChunk* remove_chunk_from_free_list(); | 
|---|
| 178 |  | 
|---|
| 179 | // Resizes the mark stack to the given new capacity. Releases any previous | 
|---|
| 180 | // memory if successful. | 
|---|
| 181 | bool resize(size_t new_capacity); | 
|---|
| 182 |  | 
|---|
| 183 | public: | 
|---|
| 184 | G1CMMarkStack(); | 
|---|
| 185 | ~G1CMMarkStack(); | 
|---|
| 186 |  | 
|---|
| 187 | // Alignment and minimum capacity of this mark stack in number of oops. | 
|---|
| 188 | static size_t capacity_alignment(); | 
|---|
| 189 |  | 
|---|
| 190 | // Allocate and initialize the mark stack with the given number of oops. | 
|---|
| 191 | bool initialize(size_t initial_capacity, size_t max_capacity); | 
|---|
| 192 |  | 
|---|
| 193 | // Pushes the given buffer containing at most EntriesPerChunk elements on the mark | 
|---|
| 194 | // stack. If less than EntriesPerChunk elements are to be pushed, the array must | 
|---|
| 195 | // be terminated with a NULL. | 
|---|
| 196 | // Returns whether the buffer contents were successfully pushed to the global mark | 
|---|
| 197 | // stack. | 
|---|
| 198 | bool par_push_chunk(G1TaskQueueEntry* buffer); | 
|---|
| 199 |  | 
|---|
| 200 | // Pops a chunk from this mark stack, copying them into the given buffer. This | 
|---|
| 201 | // chunk may contain up to EntriesPerChunk elements. If there are less, the last | 
|---|
| 202 | // element in the array is a NULL pointer. | 
|---|
| 203 | bool par_pop_chunk(G1TaskQueueEntry* buffer); | 
|---|
| 204 |  | 
|---|
| 205 | // Return whether the chunk list is empty. Racy due to unsynchronized access to | 
|---|
| 206 | // _chunk_list. | 
|---|
| 207 | bool is_empty() const { return _chunk_list == NULL; } | 
|---|
| 208 |  | 
|---|
| 209 | size_t capacity() const  { return _chunk_capacity; } | 
|---|
| 210 |  | 
|---|
| 211 | // Expand the stack, typically in response to an overflow condition | 
|---|
| 212 | void expand(); | 
|---|
| 213 |  | 
|---|
| 214 | // Return the approximate number of oops on this mark stack. Racy due to | 
|---|
| 215 | // unsynchronized access to _chunks_in_chunk_list. | 
|---|
| 216 | size_t size() const { return _chunks_in_chunk_list * EntriesPerChunk; } | 
|---|
| 217 |  | 
|---|
| 218 | void set_empty(); | 
|---|
| 219 |  | 
|---|
| 220 | // Apply Fn to every oop on the mark stack. The mark stack must not | 
|---|
| 221 | // be modified while iterating. | 
|---|
| 222 | template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN; | 
|---|
| 223 | }; | 
|---|
| 224 |  | 
|---|
| 225 | // Root MemRegions are memory areas that contain objects which references are | 
|---|
| 226 | // roots wrt to the marking. They must be scanned before marking to maintain the | 
|---|
| 227 | // SATB invariant. | 
|---|
| 228 | // Typically they contain the areas from nTAMS to top of the regions. | 
|---|
| 229 | // We could scan and mark through these objects during the initial-mark pause, but for | 
|---|
| 230 | // pause time reasons we move this work to the concurrent phase. | 
|---|
| 231 | // We need to complete this procedure before the next GC because it might determine | 
|---|
| 232 | // that some of these "root objects" are dead, potentially dropping some required | 
|---|
| 233 | // references. | 
|---|
| 234 | // Root MemRegions comprise of the contents of survivor regions at the end | 
|---|
| 235 | // of the GC, and any objects copied into the old gen during GC. | 
|---|
| 236 | class G1CMRootMemRegions { | 
|---|
| 237 | // The set of root MemRegions. | 
|---|
| 238 | MemRegion*   _root_regions; | 
|---|
| 239 | size_t const _max_regions; | 
|---|
| 240 |  | 
|---|
| 241 | volatile size_t _num_root_regions; // Actual number of root regions. | 
|---|
| 242 |  | 
|---|
| 243 | volatile size_t _claimed_root_regions; // Number of root regions currently claimed. | 
|---|
| 244 |  | 
|---|
| 245 | volatile bool _scan_in_progress; | 
|---|
| 246 | volatile bool _should_abort; | 
|---|
| 247 |  | 
|---|
| 248 | void notify_scan_done(); | 
|---|
| 249 |  | 
|---|
| 250 | public: | 
|---|
| 251 | G1CMRootMemRegions(uint const max_regions); | 
|---|
| 252 | ~G1CMRootMemRegions(); | 
|---|
| 253 |  | 
|---|
| 254 | // Reset the data structure to allow addition of new root regions. | 
|---|
| 255 | void reset(); | 
|---|
| 256 |  | 
|---|
| 257 | void add(HeapWord* start, HeapWord* end); | 
|---|
| 258 |  | 
|---|
| 259 | // Reset the claiming / scanning of the root regions. | 
|---|
| 260 | void prepare_for_scan(); | 
|---|
| 261 |  | 
|---|
| 262 | // Forces get_next() to return NULL so that the iteration aborts early. | 
|---|
| 263 | void abort() { _should_abort = true; } | 
|---|
| 264 |  | 
|---|
| 265 | // Return true if the CM thread are actively scanning root regions, | 
|---|
| 266 | // false otherwise. | 
|---|
| 267 | bool scan_in_progress() { return _scan_in_progress; } | 
|---|
| 268 |  | 
|---|
| 269 | // Claim the next root MemRegion to scan atomically, or return NULL if | 
|---|
| 270 | // all have been claimed. | 
|---|
| 271 | const MemRegion* claim_next(); | 
|---|
| 272 |  | 
|---|
| 273 | // The number of root regions to scan. | 
|---|
| 274 | uint num_root_regions() const; | 
|---|
| 275 |  | 
|---|
| 276 | void cancel_scan(); | 
|---|
| 277 |  | 
|---|
| 278 | // Flag that we're done with root region scanning and notify anyone | 
|---|
| 279 | // who's waiting on it. If aborted is false, assume that all regions | 
|---|
| 280 | // have been claimed. | 
|---|
| 281 | void scan_finished(); | 
|---|
| 282 |  | 
|---|
| 283 | // If CM threads are still scanning root regions, wait until they | 
|---|
| 284 | // are done. Return true if we had to wait, false otherwise. | 
|---|
| 285 | bool wait_until_scan_finished(); | 
|---|
| 286 | }; | 
|---|
| 287 |  | 
|---|
| 288 | // This class manages data structures and methods for doing liveness analysis in | 
|---|
| 289 | // G1's concurrent cycle. | 
|---|
| 290 | class G1ConcurrentMark : public CHeapObj<mtGC> { | 
|---|
| 291 | friend class G1ConcurrentMarkThread; | 
|---|
| 292 | friend class G1CMRefProcTaskProxy; | 
|---|
| 293 | friend class G1CMRefProcTaskExecutor; | 
|---|
| 294 | friend class G1CMKeepAliveAndDrainClosure; | 
|---|
| 295 | friend class G1CMDrainMarkingStackClosure; | 
|---|
| 296 | friend class G1CMBitMapClosure; | 
|---|
| 297 | friend class G1CMConcurrentMarkingTask; | 
|---|
| 298 | friend class G1CMRemarkTask; | 
|---|
| 299 | friend class G1CMTask; | 
|---|
| 300 |  | 
|---|
| 301 | G1ConcurrentMarkThread* _cm_thread;     // The thread doing the work | 
|---|
| 302 | G1CollectedHeap*        _g1h;           // The heap | 
|---|
| 303 | bool                    _completed_initialization; // Set to true when initialization is complete | 
|---|
| 304 |  | 
|---|
| 305 | // Concurrent marking support structures | 
|---|
| 306 | G1CMBitMap              _mark_bitmap_1; | 
|---|
| 307 | G1CMBitMap              _mark_bitmap_2; | 
|---|
| 308 | G1CMBitMap*             _prev_mark_bitmap; // Completed mark bitmap | 
|---|
| 309 | G1CMBitMap*             _next_mark_bitmap; // Under-construction mark bitmap | 
|---|
| 310 |  | 
|---|
| 311 | // Heap bounds | 
|---|
| 312 | MemRegion const         _heap; | 
|---|
| 313 |  | 
|---|
| 314 | // Root region tracking and claiming | 
|---|
| 315 | G1CMRootMemRegions         _root_regions; | 
|---|
| 316 |  | 
|---|
| 317 | // For grey objects | 
|---|
| 318 | G1CMMarkStack           _global_mark_stack; // Grey objects behind global finger | 
|---|
| 319 | HeapWord* volatile      _finger;            // The global finger, region aligned, | 
|---|
| 320 | // always pointing to the end of the | 
|---|
| 321 | // last claimed region | 
|---|
| 322 |  | 
|---|
| 323 | uint                    _worker_id_offset; | 
|---|
| 324 | uint                    _max_num_tasks;    // Maximum number of marking tasks | 
|---|
| 325 | uint                    _num_active_tasks; // Number of tasks currently active | 
|---|
| 326 | G1CMTask**              _tasks;            // Task queue array (max_worker_id length) | 
|---|
| 327 |  | 
|---|
| 328 | G1CMTaskQueueSet*       _task_queues; // Task queue set | 
|---|
| 329 | TaskTerminator          _terminator;  // For termination | 
|---|
| 330 |  | 
|---|
| 331 | // Two sync barriers that are used to synchronize tasks when an | 
|---|
| 332 | // overflow occurs. The algorithm is the following. All tasks enter | 
|---|
| 333 | // the first one to ensure that they have all stopped manipulating | 
|---|
| 334 | // the global data structures. After they exit it, they re-initialize | 
|---|
| 335 | // their data structures and task 0 re-initializes the global data | 
|---|
| 336 | // structures. Then, they enter the second sync barrier. This | 
|---|
| 337 | // ensure, that no task starts doing work before all data | 
|---|
| 338 | // structures (local and global) have been re-initialized. When they | 
|---|
| 339 | // exit it, they are free to start working again. | 
|---|
| 340 | WorkGangBarrierSync     _first_overflow_barrier_sync; | 
|---|
| 341 | WorkGangBarrierSync     _second_overflow_barrier_sync; | 
|---|
| 342 |  | 
|---|
| 343 | // This is set by any task, when an overflow on the global data | 
|---|
| 344 | // structures is detected | 
|---|
| 345 | volatile bool           _has_overflown; | 
|---|
| 346 | // True: marking is concurrent, false: we're in remark | 
|---|
| 347 | volatile bool           _concurrent; | 
|---|
| 348 | // Set at the end of a Full GC so that marking aborts | 
|---|
| 349 | volatile bool           _has_aborted; | 
|---|
| 350 |  | 
|---|
| 351 | // Used when remark aborts due to an overflow to indicate that | 
|---|
| 352 | // another concurrent marking phase should start | 
|---|
| 353 | volatile bool           _restart_for_overflow; | 
|---|
| 354 |  | 
|---|
| 355 | ConcurrentGCTimer*      _gc_timer_cm; | 
|---|
| 356 |  | 
|---|
| 357 | G1OldTracer*            _gc_tracer_cm; | 
|---|
| 358 |  | 
|---|
| 359 | // Timing statistics. All of them are in ms | 
|---|
| 360 | NumberSeq _init_times; | 
|---|
| 361 | NumberSeq ; | 
|---|
| 362 | NumberSeq ; | 
|---|
| 363 | NumberSeq ; | 
|---|
| 364 | NumberSeq _cleanup_times; | 
|---|
| 365 | double    _total_cleanup_time; | 
|---|
| 366 |  | 
|---|
| 367 | double*   _accum_task_vtime;   // Accumulated task vtime | 
|---|
| 368 |  | 
|---|
| 369 | WorkGang* _concurrent_workers; | 
|---|
| 370 | uint      _num_concurrent_workers; // The number of marking worker threads we're using | 
|---|
| 371 | uint      _max_concurrent_workers; // Maximum number of marking worker threads | 
|---|
| 372 |  | 
|---|
| 373 | void verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller); | 
|---|
| 374 |  | 
|---|
| 375 | void finalize_marking(); | 
|---|
| 376 |  | 
|---|
| 377 | void weak_refs_work_parallel_part(BoolObjectClosure* is_alive, bool purged_classes); | 
|---|
| 378 | void weak_refs_work(bool clear_all_soft_refs); | 
|---|
| 379 |  | 
|---|
| 380 | void report_object_count(bool mark_completed); | 
|---|
| 381 |  | 
|---|
| 382 | void swap_mark_bitmaps(); | 
|---|
| 383 |  | 
|---|
| 384 | void reclaim_empty_regions(); | 
|---|
| 385 |  | 
|---|
| 386 | // After reclaiming empty regions, update heap sizes. | 
|---|
| 387 | void compute_new_sizes(); | 
|---|
| 388 |  | 
|---|
| 389 | // Clear statistics gathered during the concurrent cycle for the given region after | 
|---|
| 390 | // it has been reclaimed. | 
|---|
| 391 | void clear_statistics(HeapRegion* r); | 
|---|
| 392 |  | 
|---|
| 393 | // Resets the global marking data structures, as well as the | 
|---|
| 394 | // task local ones; should be called during initial mark. | 
|---|
| 395 | void reset(); | 
|---|
| 396 |  | 
|---|
| 397 | // Resets all the marking data structures. Called when we have to restart | 
|---|
| 398 | // marking or when marking completes (via set_non_marking_state below). | 
|---|
| 399 | void reset_marking_for_restart(); | 
|---|
| 400 |  | 
|---|
| 401 | // We do this after we're done with marking so that the marking data | 
|---|
| 402 | // structures are initialized to a sensible and predictable state. | 
|---|
| 403 | void reset_at_marking_complete(); | 
|---|
| 404 |  | 
|---|
| 405 | // Called to indicate how many threads are currently active. | 
|---|
| 406 | void set_concurrency(uint active_tasks); | 
|---|
| 407 |  | 
|---|
| 408 | // Should be called to indicate which phase we're in (concurrent | 
|---|
| 409 | // mark or remark) and how many threads are currently active. | 
|---|
| 410 | void set_concurrency_and_phase(uint active_tasks, bool concurrent); | 
|---|
| 411 |  | 
|---|
| 412 | // Prints all gathered CM-related statistics | 
|---|
| 413 | void print_stats(); | 
|---|
| 414 |  | 
|---|
| 415 | HeapWord*               finger()           { return _finger;   } | 
|---|
| 416 | bool                    concurrent()       { return _concurrent; } | 
|---|
| 417 | uint                    active_tasks()     { return _num_active_tasks; } | 
|---|
| 418 | ParallelTaskTerminator* terminator() const { return _terminator.terminator(); } | 
|---|
| 419 |  | 
|---|
| 420 | // Claims the next available region to be scanned by a marking | 
|---|
| 421 | // task/thread. It might return NULL if the next region is empty or | 
|---|
| 422 | // we have run out of regions. In the latter case, out_of_regions() | 
|---|
| 423 | // determines whether we've really run out of regions or the task | 
|---|
| 424 | // should call claim_region() again. This might seem a bit | 
|---|
| 425 | // awkward. Originally, the code was written so that claim_region() | 
|---|
| 426 | // either successfully returned with a non-empty region or there | 
|---|
| 427 | // were no more regions to be claimed. The problem with this was | 
|---|
| 428 | // that, in certain circumstances, it iterated over large chunks of | 
|---|
| 429 | // the heap finding only empty regions and, while it was working, it | 
|---|
| 430 | // was preventing the calling task to call its regular clock | 
|---|
| 431 | // method. So, this way, each task will spend very little time in | 
|---|
| 432 | // claim_region() and is allowed to call the regular clock method | 
|---|
| 433 | // frequently. | 
|---|
| 434 | HeapRegion* claim_region(uint worker_id); | 
|---|
| 435 |  | 
|---|
| 436 | // Determines whether we've run out of regions to scan. Note that | 
|---|
| 437 | // the finger can point past the heap end in case the heap was expanded | 
|---|
| 438 | // to satisfy an allocation without doing a GC. This is fine, because all | 
|---|
| 439 | // objects in those regions will be considered live anyway because of | 
|---|
| 440 | // SATB guarantees (i.e. their TAMS will be equal to bottom). | 
|---|
| 441 | bool out_of_regions() { return _finger >= _heap.end(); } | 
|---|
| 442 |  | 
|---|
| 443 | // Returns the task with the given id | 
|---|
| 444 | G1CMTask* task(uint id) { | 
|---|
| 445 | // During initial mark we use the parallel gc threads to do some work, so | 
|---|
| 446 | // we can only compare against _max_num_tasks. | 
|---|
| 447 | assert(id < _max_num_tasks, "Task id %u not within bounds up to %u", id, _max_num_tasks); | 
|---|
| 448 | return _tasks[id]; | 
|---|
| 449 | } | 
|---|
| 450 |  | 
|---|
| 451 | // Access / manipulation of the overflow flag which is set to | 
|---|
| 452 | // indicate that the global stack has overflown | 
|---|
| 453 | bool has_overflown()           { return _has_overflown; } | 
|---|
| 454 | void set_has_overflown()       { _has_overflown = true; } | 
|---|
| 455 | void clear_has_overflown()     { _has_overflown = false; } | 
|---|
| 456 | bool restart_for_overflow()    { return _restart_for_overflow; } | 
|---|
| 457 |  | 
|---|
| 458 | // Methods to enter the two overflow sync barriers | 
|---|
| 459 | void enter_first_sync_barrier(uint worker_id); | 
|---|
| 460 | void enter_second_sync_barrier(uint worker_id); | 
|---|
| 461 |  | 
|---|
| 462 | // Clear the given bitmap in parallel using the given WorkGang. If may_yield is | 
|---|
| 463 | // true, periodically insert checks to see if this method should exit prematurely. | 
|---|
| 464 | void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield); | 
|---|
| 465 |  | 
|---|
| 466 | // Region statistics gathered during marking. | 
|---|
| 467 | G1RegionMarkStats* _region_mark_stats; | 
|---|
| 468 | // Top pointer for each region at the start of the rebuild remembered set process | 
|---|
| 469 | // for regions which remembered sets need to be rebuilt. A NULL for a given region | 
|---|
| 470 | // means that this region does not be scanned during the rebuilding remembered | 
|---|
| 471 | // set phase at all. | 
|---|
| 472 | HeapWord* volatile* _top_at_rebuild_starts; | 
|---|
| 473 | public: | 
|---|
| 474 | void add_to_liveness(uint worker_id, oop const obj, size_t size); | 
|---|
| 475 | // Liveness of the given region as determined by concurrent marking, i.e. the amount of | 
|---|
| 476 | // live words between bottom and nTAMS. | 
|---|
| 477 | size_t liveness(uint region) const { return _region_mark_stats[region]._live_words; } | 
|---|
| 478 |  | 
|---|
| 479 | // Sets the internal top_at_region_start for the given region to current top of the region. | 
|---|
| 480 | inline void update_top_at_rebuild_start(HeapRegion* r); | 
|---|
| 481 | // TARS for the given region during remembered set rebuilding. | 
|---|
| 482 | inline HeapWord* top_at_rebuild_start(uint region) const; | 
|---|
| 483 |  | 
|---|
| 484 | // Clear statistics gathered during the concurrent cycle for the given region after | 
|---|
| 485 | // it has been reclaimed. | 
|---|
| 486 | void clear_statistics_in_region(uint region_idx); | 
|---|
| 487 | // Notification for eagerly reclaimed regions to clean up. | 
|---|
| 488 | void humongous_object_eagerly_reclaimed(HeapRegion* r); | 
|---|
| 489 | // Manipulation of the global mark stack. | 
|---|
| 490 | // The push and pop operations are used by tasks for transfers | 
|---|
| 491 | // between task-local queues and the global mark stack. | 
|---|
| 492 | bool mark_stack_push(G1TaskQueueEntry* arr) { | 
|---|
| 493 | if (!_global_mark_stack.par_push_chunk(arr)) { | 
|---|
| 494 | set_has_overflown(); | 
|---|
| 495 | return false; | 
|---|
| 496 | } | 
|---|
| 497 | return true; | 
|---|
| 498 | } | 
|---|
| 499 | bool mark_stack_pop(G1TaskQueueEntry* arr) { | 
|---|
| 500 | return _global_mark_stack.par_pop_chunk(arr); | 
|---|
| 501 | } | 
|---|
| 502 | size_t mark_stack_size() const                { return _global_mark_stack.size(); } | 
|---|
| 503 | size_t partial_mark_stack_size_target() const { return _global_mark_stack.capacity() / 3; } | 
|---|
| 504 | bool mark_stack_empty() const                 { return _global_mark_stack.is_empty(); } | 
|---|
| 505 |  | 
|---|
| 506 | G1CMRootMemRegions* root_regions() { return &_root_regions; } | 
|---|
| 507 |  | 
|---|
| 508 | void concurrent_cycle_start(); | 
|---|
| 509 | // Abandon current marking iteration due to a Full GC. | 
|---|
| 510 | void concurrent_cycle_abort(); | 
|---|
| 511 | void concurrent_cycle_end(); | 
|---|
| 512 |  | 
|---|
| 513 | void update_accum_task_vtime(int i, double vtime) { | 
|---|
| 514 | _accum_task_vtime[i] += vtime; | 
|---|
| 515 | } | 
|---|
| 516 |  | 
|---|
| 517 | double all_task_accum_vtime() { | 
|---|
| 518 | double ret = 0.0; | 
|---|
| 519 | for (uint i = 0; i < _max_num_tasks; ++i) | 
|---|
| 520 | ret += _accum_task_vtime[i]; | 
|---|
| 521 | return ret; | 
|---|
| 522 | } | 
|---|
| 523 |  | 
|---|
| 524 | // Attempts to steal an object from the task queues of other tasks | 
|---|
| 525 | bool try_stealing(uint worker_id, G1TaskQueueEntry& task_entry); | 
|---|
| 526 |  | 
|---|
| 527 | G1ConcurrentMark(G1CollectedHeap* g1h, | 
|---|
| 528 | G1RegionToSpaceMapper* prev_bitmap_storage, | 
|---|
| 529 | G1RegionToSpaceMapper* next_bitmap_storage); | 
|---|
| 530 | ~G1ConcurrentMark(); | 
|---|
| 531 |  | 
|---|
| 532 | G1ConcurrentMarkThread* cm_thread() { return _cm_thread; } | 
|---|
| 533 |  | 
|---|
| 534 | const G1CMBitMap* const prev_mark_bitmap() const { return _prev_mark_bitmap; } | 
|---|
| 535 | G1CMBitMap* next_mark_bitmap() const { return _next_mark_bitmap; } | 
|---|
| 536 |  | 
|---|
| 537 | // Calculates the number of concurrent GC threads to be used in the marking phase. | 
|---|
| 538 | uint calc_active_marking_workers(); | 
|---|
| 539 |  | 
|---|
| 540 | // Moves all per-task cached data into global state. | 
|---|
| 541 | void flush_all_task_caches(); | 
|---|
| 542 | // Prepare internal data structures for the next mark cycle. This includes clearing | 
|---|
| 543 | // the next mark bitmap and some internal data structures. This method is intended | 
|---|
| 544 | // to be called concurrently to the mutator. It will yield to safepoint requests. | 
|---|
| 545 | void cleanup_for_next_mark(); | 
|---|
| 546 |  | 
|---|
| 547 | // Clear the previous marking bitmap during safepoint. | 
|---|
| 548 | void clear_prev_bitmap(WorkGang* workers); | 
|---|
| 549 |  | 
|---|
| 550 | // These two methods do the work that needs to be done at the start and end of the | 
|---|
| 551 | // initial mark pause. | 
|---|
| 552 | void pre_initial_mark(); | 
|---|
| 553 | void post_initial_mark(); | 
|---|
| 554 |  | 
|---|
| 555 | // Scan all the root regions and mark everything reachable from | 
|---|
| 556 | // them. | 
|---|
| 557 | void scan_root_regions(); | 
|---|
| 558 |  | 
|---|
| 559 | // Scan a single root MemRegion to mark everything reachable from it. | 
|---|
| 560 | void scan_root_region(const MemRegion* region, uint worker_id); | 
|---|
| 561 |  | 
|---|
| 562 | // Do concurrent phase of marking, to a tentative transitive closure. | 
|---|
| 563 | void mark_from_roots(); | 
|---|
| 564 |  | 
|---|
| 565 | // Do concurrent preclean work. | 
|---|
| 566 | void preclean(); | 
|---|
| 567 |  | 
|---|
| 568 | void (); | 
|---|
| 569 |  | 
|---|
| 570 | void cleanup(); | 
|---|
| 571 | // Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use | 
|---|
| 572 | // this carefully. | 
|---|
| 573 | inline void mark_in_prev_bitmap(oop p); | 
|---|
| 574 |  | 
|---|
| 575 | // Clears marks for all objects in the given range, for the prev or | 
|---|
| 576 | // next bitmaps.  Caution: the previous bitmap is usually | 
|---|
| 577 | // read-only, so use this carefully! | 
|---|
| 578 | void clear_range_in_prev_bitmap(MemRegion mr); | 
|---|
| 579 |  | 
|---|
| 580 | inline bool is_marked_in_prev_bitmap(oop p) const; | 
|---|
| 581 |  | 
|---|
| 582 | // Verify that there are no collection set oops on the stacks (taskqueues / | 
|---|
| 583 | // global mark stack) and fingers (global / per-task). | 
|---|
| 584 | // If marking is not in progress, it's a no-op. | 
|---|
| 585 | void verify_no_collection_set_oops() PRODUCT_RETURN; | 
|---|
| 586 |  | 
|---|
| 587 | inline bool do_yield_check(); | 
|---|
| 588 |  | 
|---|
| 589 | bool has_aborted()      { return _has_aborted; } | 
|---|
| 590 |  | 
|---|
| 591 | void print_summary_info(); | 
|---|
| 592 |  | 
|---|
| 593 | void print_worker_threads_on(outputStream* st) const; | 
|---|
| 594 | void threads_do(ThreadClosure* tc) const; | 
|---|
| 595 |  | 
|---|
| 596 | void print_on_error(outputStream* st) const; | 
|---|
| 597 |  | 
|---|
| 598 | // Mark the given object on the next bitmap if it is below nTAMS. | 
|---|
| 599 | inline bool mark_in_next_bitmap(uint worker_id, HeapRegion* const hr, oop const obj); | 
|---|
| 600 | inline bool mark_in_next_bitmap(uint worker_id, oop const obj); | 
|---|
| 601 |  | 
|---|
| 602 | inline bool is_marked_in_next_bitmap(oop p) const; | 
|---|
| 603 |  | 
|---|
| 604 | // Returns true if initialization was successfully completed. | 
|---|
| 605 | bool completed_initialization() const { | 
|---|
| 606 | return _completed_initialization; | 
|---|
| 607 | } | 
|---|
| 608 |  | 
|---|
| 609 | ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } | 
|---|
| 610 | G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; } | 
|---|
| 611 |  | 
|---|
| 612 | private: | 
|---|
| 613 | // Rebuilds the remembered sets for chosen regions in parallel and concurrently to the application. | 
|---|
| 614 | void rebuild_rem_set_concurrently(); | 
|---|
| 615 | }; | 
|---|
| 616 |  | 
|---|
| 617 | // A class representing a marking task. | 
|---|
| 618 | class G1CMTask : public TerminatorTerminator { | 
|---|
| 619 | private: | 
|---|
| 620 | enum PrivateConstants { | 
|---|
| 621 | // The regular clock call is called once the scanned words reaches | 
|---|
| 622 | // this limit | 
|---|
| 623 | words_scanned_period          = 12*1024, | 
|---|
| 624 | // The regular clock call is called once the number of visited | 
|---|
| 625 | // references reaches this limit | 
|---|
| 626 | refs_reached_period           = 1024, | 
|---|
| 627 | // Initial value for the hash seed, used in the work stealing code | 
|---|
| 628 | init_hash_seed                = 17 | 
|---|
| 629 | }; | 
|---|
| 630 |  | 
|---|
| 631 | // Number of entries in the per-task stats entry. This seems enough to have a very | 
|---|
| 632 | // low cache miss rate. | 
|---|
| 633 | static const uint RegionMarkStatsCacheSize = 1024; | 
|---|
| 634 |  | 
|---|
| 635 | G1CMObjArrayProcessor       _objArray_processor; | 
|---|
| 636 |  | 
|---|
| 637 | uint                        _worker_id; | 
|---|
| 638 | G1CollectedHeap*            _g1h; | 
|---|
| 639 | G1ConcurrentMark*           _cm; | 
|---|
| 640 | G1CMBitMap*                 _next_mark_bitmap; | 
|---|
| 641 | // the task queue of this task | 
|---|
| 642 | G1CMTaskQueue*              _task_queue; | 
|---|
| 643 |  | 
|---|
| 644 | G1RegionMarkStatsCache      _mark_stats_cache; | 
|---|
| 645 | // Number of calls to this task | 
|---|
| 646 | uint                        _calls; | 
|---|
| 647 |  | 
|---|
| 648 | // When the virtual timer reaches this time, the marking step should exit | 
|---|
| 649 | double                      _time_target_ms; | 
|---|
| 650 | // Start time of the current marking step | 
|---|
| 651 | double                      _start_time_ms; | 
|---|
| 652 |  | 
|---|
| 653 | // Oop closure used for iterations over oops | 
|---|
| 654 | G1CMOopClosure*             _cm_oop_closure; | 
|---|
| 655 |  | 
|---|
| 656 | // Region this task is scanning, NULL if we're not scanning any | 
|---|
| 657 | HeapRegion*                 _curr_region; | 
|---|
| 658 | // Local finger of this task, NULL if we're not scanning a region | 
|---|
| 659 | HeapWord*                   _finger; | 
|---|
| 660 | // Limit of the region this task is scanning, NULL if we're not scanning one | 
|---|
| 661 | HeapWord*                   _region_limit; | 
|---|
| 662 |  | 
|---|
| 663 | // Number of words this task has scanned | 
|---|
| 664 | size_t                      _words_scanned; | 
|---|
| 665 | // When _words_scanned reaches this limit, the regular clock is | 
|---|
| 666 | // called. Notice that this might be decreased under certain | 
|---|
| 667 | // circumstances (i.e. when we believe that we did an expensive | 
|---|
| 668 | // operation). | 
|---|
| 669 | size_t                      _words_scanned_limit; | 
|---|
| 670 | // Initial value of _words_scanned_limit (i.e. what it was | 
|---|
| 671 | // before it was decreased). | 
|---|
| 672 | size_t                      _real_words_scanned_limit; | 
|---|
| 673 |  | 
|---|
| 674 | // Number of references this task has visited | 
|---|
| 675 | size_t                      _refs_reached; | 
|---|
| 676 | // When _refs_reached reaches this limit, the regular clock is | 
|---|
| 677 | // called. Notice this this might be decreased under certain | 
|---|
| 678 | // circumstances (i.e. when we believe that we did an expensive | 
|---|
| 679 | // operation). | 
|---|
| 680 | size_t                      _refs_reached_limit; | 
|---|
| 681 | // Initial value of _refs_reached_limit (i.e. what it was before | 
|---|
| 682 | // it was decreased). | 
|---|
| 683 | size_t                      _real_refs_reached_limit; | 
|---|
| 684 |  | 
|---|
| 685 | // If true, then the task has aborted for some reason | 
|---|
| 686 | bool                        _has_aborted; | 
|---|
| 687 | // Set when the task aborts because it has met its time quota | 
|---|
| 688 | bool                        _has_timed_out; | 
|---|
| 689 | // True when we're draining SATB buffers; this avoids the task | 
|---|
| 690 | // aborting due to SATB buffers being available (as we're already | 
|---|
| 691 | // dealing with them) | 
|---|
| 692 | bool                        _draining_satb_buffers; | 
|---|
| 693 |  | 
|---|
| 694 | // Number sequence of past step times | 
|---|
| 695 | NumberSeq                   _step_times_ms; | 
|---|
| 696 | // Elapsed time of this task | 
|---|
| 697 | double                      _elapsed_time_ms; | 
|---|
| 698 | // Termination time of this task | 
|---|
| 699 | double                      _termination_time_ms; | 
|---|
| 700 | // When this task got into the termination protocol | 
|---|
| 701 | double                      _termination_start_time_ms; | 
|---|
| 702 |  | 
|---|
| 703 | TruncatedSeq                _marking_step_diffs_ms; | 
|---|
| 704 |  | 
|---|
| 705 | // Updates the local fields after this task has claimed | 
|---|
| 706 | // a new region to scan | 
|---|
| 707 | void setup_for_region(HeapRegion* hr); | 
|---|
| 708 | // Makes the limit of the region up-to-date | 
|---|
| 709 | void update_region_limit(); | 
|---|
| 710 |  | 
|---|
| 711 | // Called when either the words scanned or the refs visited limit | 
|---|
| 712 | // has been reached | 
|---|
| 713 | void reached_limit(); | 
|---|
| 714 | // Recalculates the words scanned and refs visited limits | 
|---|
| 715 | void recalculate_limits(); | 
|---|
| 716 | // Decreases the words scanned and refs visited limits when we reach | 
|---|
| 717 | // an expensive operation | 
|---|
| 718 | void decrease_limits(); | 
|---|
| 719 | // Checks whether the words scanned or refs visited reached their | 
|---|
| 720 | // respective limit and calls reached_limit() if they have | 
|---|
| 721 | void check_limits() { | 
|---|
| 722 | if (_words_scanned >= _words_scanned_limit || | 
|---|
| 723 | _refs_reached >= _refs_reached_limit) { | 
|---|
| 724 | reached_limit(); | 
|---|
| 725 | } | 
|---|
| 726 | } | 
|---|
| 727 | // Supposed to be called regularly during a marking step as | 
|---|
| 728 | // it checks a bunch of conditions that might cause the marking step | 
|---|
| 729 | // to abort | 
|---|
| 730 | // Return true if the marking step should continue. Otherwise, return false to abort | 
|---|
| 731 | bool regular_clock_call(); | 
|---|
| 732 |  | 
|---|
| 733 | // Set abort flag if regular_clock_call() check fails | 
|---|
| 734 | inline void abort_marking_if_regular_check_fail(); | 
|---|
| 735 |  | 
|---|
| 736 | // Test whether obj might have already been passed over by the | 
|---|
| 737 | // mark bitmap scan, and so needs to be pushed onto the mark stack. | 
|---|
| 738 | bool is_below_finger(oop obj, HeapWord* global_finger) const; | 
|---|
| 739 |  | 
|---|
| 740 | template<bool scan> void process_grey_task_entry(G1TaskQueueEntry task_entry); | 
|---|
| 741 | public: | 
|---|
| 742 | // Apply the closure on the given area of the objArray. Return the number of words | 
|---|
| 743 | // scanned. | 
|---|
| 744 | inline size_t scan_objArray(objArrayOop obj, MemRegion mr); | 
|---|
| 745 | // Resets the task; should be called right at the beginning of a marking phase. | 
|---|
| 746 | void reset(G1CMBitMap* next_mark_bitmap); | 
|---|
| 747 | // Clears all the fields that correspond to a claimed region. | 
|---|
| 748 | void clear_region_fields(); | 
|---|
| 749 |  | 
|---|
| 750 | // The main method of this class which performs a marking step | 
|---|
| 751 | // trying not to exceed the given duration. However, it might exit | 
|---|
| 752 | // prematurely, according to some conditions (i.e. SATB buffers are | 
|---|
| 753 | // available for processing). | 
|---|
| 754 | void do_marking_step(double target_ms, | 
|---|
| 755 | bool do_termination, | 
|---|
| 756 | bool is_serial); | 
|---|
| 757 |  | 
|---|
| 758 | // These two calls start and stop the timer | 
|---|
| 759 | void record_start_time() { | 
|---|
| 760 | _elapsed_time_ms = os::elapsedTime() * 1000.0; | 
|---|
| 761 | } | 
|---|
| 762 | void record_end_time() { | 
|---|
| 763 | _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; | 
|---|
| 764 | } | 
|---|
| 765 |  | 
|---|
| 766 | // Returns the worker ID associated with this task. | 
|---|
| 767 | uint worker_id() { return _worker_id; } | 
|---|
| 768 |  | 
|---|
| 769 | // From TerminatorTerminator. It determines whether this task should | 
|---|
| 770 | // exit the termination protocol after it's entered it. | 
|---|
| 771 | virtual bool should_exit_termination(); | 
|---|
| 772 |  | 
|---|
| 773 | // Resets the local region fields after a task has finished scanning a | 
|---|
| 774 | // region; or when they have become stale as a result of the region | 
|---|
| 775 | // being evacuated. | 
|---|
| 776 | void giveup_current_region(); | 
|---|
| 777 |  | 
|---|
| 778 | HeapWord* finger()            { return _finger; } | 
|---|
| 779 |  | 
|---|
| 780 | bool has_aborted()            { return _has_aborted; } | 
|---|
| 781 | void set_has_aborted()        { _has_aborted = true; } | 
|---|
| 782 | void clear_has_aborted()      { _has_aborted = false; } | 
|---|
| 783 |  | 
|---|
| 784 | void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); | 
|---|
| 785 |  | 
|---|
| 786 | // Increment the number of references this task has visited. | 
|---|
| 787 | void increment_refs_reached() { ++_refs_reached; } | 
|---|
| 788 |  | 
|---|
| 789 | // Grey the object by marking it.  If not already marked, push it on | 
|---|
| 790 | // the local queue if below the finger. obj is required to be below its region's NTAMS. | 
|---|
| 791 | // Returns whether there has been a mark to the bitmap. | 
|---|
| 792 | inline bool make_reference_grey(oop obj); | 
|---|
| 793 |  | 
|---|
| 794 | // Grey the object (by calling make_grey_reference) if required, | 
|---|
| 795 | // e.g. obj is below its containing region's NTAMS. | 
|---|
| 796 | // Precondition: obj is a valid heap object. | 
|---|
| 797 | // Returns true if the reference caused a mark to be set in the next bitmap. | 
|---|
| 798 | template <class T> | 
|---|
| 799 | inline bool deal_with_reference(T* p); | 
|---|
| 800 |  | 
|---|
| 801 | // Scans an object and visits its children. | 
|---|
| 802 | inline void scan_task_entry(G1TaskQueueEntry task_entry); | 
|---|
| 803 |  | 
|---|
| 804 | // Pushes an object on the local queue. | 
|---|
| 805 | inline void push(G1TaskQueueEntry task_entry); | 
|---|
| 806 |  | 
|---|
| 807 | // Move entries to the global stack. | 
|---|
| 808 | void move_entries_to_global_stack(); | 
|---|
| 809 | // Move entries from the global stack, return true if we were successful to do so. | 
|---|
| 810 | bool get_entries_from_global_stack(); | 
|---|
| 811 |  | 
|---|
| 812 | // Pops and scans objects from the local queue. If partially is | 
|---|
| 813 | // true, then it stops when the queue size is of a given limit. If | 
|---|
| 814 | // partially is false, then it stops when the queue is empty. | 
|---|
| 815 | void drain_local_queue(bool partially); | 
|---|
| 816 | // Moves entries from the global stack to the local queue and | 
|---|
| 817 | // drains the local queue. If partially is true, then it stops when | 
|---|
| 818 | // both the global stack and the local queue reach a given size. If | 
|---|
| 819 | // partially if false, it tries to empty them totally. | 
|---|
| 820 | void drain_global_stack(bool partially); | 
|---|
| 821 | // Keeps picking SATB buffers and processing them until no SATB | 
|---|
| 822 | // buffers are available. | 
|---|
| 823 | void drain_satb_buffers(); | 
|---|
| 824 |  | 
|---|
| 825 | // Moves the local finger to a new location | 
|---|
| 826 | inline void move_finger_to(HeapWord* new_finger) { | 
|---|
| 827 | assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); | 
|---|
| 828 | _finger = new_finger; | 
|---|
| 829 | } | 
|---|
| 830 |  | 
|---|
| 831 | G1CMTask(uint worker_id, | 
|---|
| 832 | G1ConcurrentMark *cm, | 
|---|
| 833 | G1CMTaskQueue* task_queue, | 
|---|
| 834 | G1RegionMarkStats* mark_stats, | 
|---|
| 835 | uint max_regions); | 
|---|
| 836 |  | 
|---|
| 837 | inline void update_liveness(oop const obj, size_t const obj_size); | 
|---|
| 838 |  | 
|---|
| 839 | // Clear (without flushing) the mark cache entry for the given region. | 
|---|
| 840 | void clear_mark_stats_cache(uint region_idx); | 
|---|
| 841 | // Evict the whole statistics cache into the global statistics. Returns the | 
|---|
| 842 | // number of cache hits and misses so far. | 
|---|
| 843 | Pair<size_t, size_t> flush_mark_stats_cache(); | 
|---|
| 844 | // Prints statistics associated with this task | 
|---|
| 845 | void print_stats(); | 
|---|
| 846 | }; | 
|---|
| 847 |  | 
|---|
| 848 | // Class that's used to to print out per-region liveness | 
|---|
| 849 | // information. It's currently used at the end of marking and also | 
|---|
| 850 | // after we sort the old regions at the end of the cleanup operation. | 
|---|
| 851 | class G1PrintRegionLivenessInfoClosure : public HeapRegionClosure { | 
|---|
| 852 | // Accumulators for these values. | 
|---|
| 853 | size_t _total_used_bytes; | 
|---|
| 854 | size_t _total_capacity_bytes; | 
|---|
| 855 | size_t _total_prev_live_bytes; | 
|---|
| 856 | size_t _total_next_live_bytes; | 
|---|
| 857 |  | 
|---|
| 858 | // Accumulator for the remembered set size | 
|---|
| 859 | size_t _total_remset_bytes; | 
|---|
| 860 |  | 
|---|
| 861 | // Accumulator for strong code roots memory size | 
|---|
| 862 | size_t _total_strong_code_roots_bytes; | 
|---|
| 863 |  | 
|---|
| 864 | static double bytes_to_mb(size_t val) { | 
|---|
| 865 | return (double) val / (double) M; | 
|---|
| 866 | } | 
|---|
| 867 |  | 
|---|
| 868 | public: | 
|---|
| 869 | // The header and footer are printed in the constructor and | 
|---|
| 870 | // destructor respectively. | 
|---|
| 871 | G1PrintRegionLivenessInfoClosure(const char* phase_name); | 
|---|
| 872 | virtual bool do_heap_region(HeapRegion* r); | 
|---|
| 873 | ~G1PrintRegionLivenessInfoClosure(); | 
|---|
| 874 | }; | 
|---|
| 875 |  | 
|---|
| 876 | #endif // SHARE_GC_G1_G1CONCURRENTMARK_HPP | 
|---|
| 877 |  | 
|---|