| 1 | /* | 
| 2 |  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. | 
| 3 |  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
| 4 |  * | 
| 5 |  * This code is free software; you can redistribute it and/or modify it | 
| 6 |  * under the terms of the GNU General Public License version 2 only, as | 
| 7 |  * published by the Free Software Foundation. | 
| 8 |  * | 
| 9 |  * This code is distributed in the hope that it will be useful, but WITHOUT | 
| 10 |  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
| 11 |  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
| 12 |  * version 2 for more details (a copy is included in the LICENSE file that | 
| 13 |  * accompanied this code). | 
| 14 |  * | 
| 15 |  * You should have received a copy of the GNU General Public License version | 
| 16 |  * 2 along with this work; if not, write to the Free Software Foundation, | 
| 17 |  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 
| 18 |  * | 
| 19 |  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | 
| 20 |  * or visit www.oracle.com if you need additional information or have any | 
| 21 |  * questions. | 
| 22 |  * | 
| 23 |  */ | 
| 24 |  | 
| 25 | #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP | 
| 26 | #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP | 
| 27 |  | 
| 28 | #include "gc/g1/g1BarrierSet.hpp" | 
| 29 | #include "gc/g1/g1BiasedArray.hpp" | 
| 30 | #include "gc/g1/g1CardTable.hpp" | 
| 31 | #include "gc/g1/g1CollectionSet.hpp" | 
| 32 | #include "gc/g1/g1CollectorState.hpp" | 
| 33 | #include "gc/g1/g1ConcurrentMark.hpp" | 
| 34 | #include "gc/g1/g1DirtyCardQueue.hpp" | 
| 35 | #include "gc/g1/g1EdenRegions.hpp" | 
| 36 | #include "gc/g1/g1EvacFailure.hpp" | 
| 37 | #include "gc/g1/g1EvacStats.hpp" | 
| 38 | #include "gc/g1/g1EvacuationInfo.hpp" | 
| 39 | #include "gc/g1/g1GCPhaseTimes.hpp" | 
| 40 | #include "gc/g1/g1HeapTransition.hpp" | 
| 41 | #include "gc/g1/g1HeapVerifier.hpp" | 
| 42 | #include "gc/g1/g1HRPrinter.hpp" | 
| 43 | #include "gc/g1/g1HeapRegionAttr.hpp" | 
| 44 | #include "gc/g1/g1MonitoringSupport.hpp" | 
| 45 | #include "gc/g1/g1SurvivorRegions.hpp" | 
| 46 | #include "gc/g1/g1YCTypes.hpp" | 
| 47 | #include "gc/g1/heapRegionManager.hpp" | 
| 48 | #include "gc/g1/heapRegionSet.hpp" | 
| 49 | #include "gc/g1/heterogeneousHeapRegionManager.hpp" | 
| 50 | #include "gc/shared/barrierSet.hpp" | 
| 51 | #include "gc/shared/collectedHeap.hpp" | 
| 52 | #include "gc/shared/gcHeapSummary.hpp" | 
| 53 | #include "gc/shared/plab.hpp" | 
| 54 | #include "gc/shared/preservedMarks.hpp" | 
| 55 | #include "gc/shared/softRefPolicy.hpp" | 
| 56 | #include "memory/memRegion.hpp" | 
| 57 | #include "utilities/stack.hpp" | 
| 58 |  | 
| 59 | // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. | 
| 60 | // It uses the "Garbage First" heap organization and algorithm, which | 
| 61 | // may combine concurrent marking with parallel, incremental compaction of | 
| 62 | // heap subsets that will yield large amounts of garbage. | 
| 63 |  | 
| 64 | // Forward declarations | 
| 65 | class HeapRegion; | 
| 66 | class GenerationSpec; | 
| 67 | class G1ParScanThreadState; | 
| 68 | class G1ParScanThreadStateSet; | 
| 69 | class G1ParScanThreadState; | 
| 70 | class MemoryPool; | 
| 71 | class MemoryManager; | 
| 72 | class ObjectClosure; | 
| 73 | class SpaceClosure; | 
| 74 | class CompactibleSpaceClosure; | 
| 75 | class Space; | 
| 76 | class G1CollectionSet; | 
| 77 | class G1Policy; | 
| 78 | class G1HotCardCache; | 
| 79 | class G1RemSet; | 
| 80 | class G1YoungRemSetSamplingThread; | 
| 81 | class HeapRegionRemSetIterator; | 
| 82 | class G1ConcurrentMark; | 
| 83 | class G1ConcurrentMarkThread; | 
| 84 | class G1ConcurrentRefine; | 
| 85 | class GenerationCounters; | 
| 86 | class STWGCTimer; | 
| 87 | class G1NewTracer; | 
| 88 | class EvacuationFailedInfo; | 
| 89 | class nmethod; | 
| 90 | class WorkGang; | 
| 91 | class G1Allocator; | 
| 92 | class G1ArchiveAllocator; | 
| 93 | class G1FullGCScope; | 
| 94 | class G1HeapVerifier; | 
| 95 | class G1HeapSizingPolicy; | 
| 96 | class G1HeapSummary; | 
| 97 | class G1EvacSummary; | 
| 98 |  | 
| 99 | typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue; | 
| 100 | typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet; | 
| 101 |  | 
| 102 | typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() ) | 
| 103 | typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion ) | 
| 104 |  | 
| 105 | // The G1 STW is alive closure. | 
| 106 | // An instance is embedded into the G1CH and used as the | 
| 107 | // (optional) _is_alive_non_header closure in the STW | 
| 108 | // reference processor. It is also extensively used during | 
| 109 | // reference processing during STW evacuation pauses. | 
| 110 | class G1STWIsAliveClosure : public BoolObjectClosure { | 
| 111 |   G1CollectedHeap* _g1h; | 
| 112 | public: | 
| 113 |   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | 
| 114 |   bool do_object_b(oop p); | 
| 115 | }; | 
| 116 |  | 
| 117 | class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure { | 
| 118 |   G1CollectedHeap* _g1h; | 
| 119 | public: | 
| 120 |   G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} | 
| 121 |   bool do_object_b(oop p); | 
| 122 | }; | 
| 123 |  | 
| 124 | class G1RegionMappingChangedListener : public G1MappingChangedListener { | 
| 125 |  private: | 
| 126 |   void reset_from_card_cache(uint start_idx, size_t num_regions); | 
| 127 |  public: | 
| 128 |   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled); | 
| 129 | }; | 
| 130 |  | 
| 131 | class G1CollectedHeap : public CollectedHeap { | 
| 132 |   friend class G1FreeCollectionSetTask; | 
| 133 |   friend class VM_CollectForMetadataAllocation; | 
| 134 |   friend class VM_G1CollectForAllocation; | 
| 135 |   friend class VM_G1CollectFull; | 
| 136 |   friend class VMStructs; | 
| 137 |   friend class MutatorAllocRegion; | 
| 138 |   friend class G1FullCollector; | 
| 139 |   friend class G1GCAllocRegion; | 
| 140 |   friend class G1HeapVerifier; | 
| 141 |  | 
| 142 |   // Closures used in implementation. | 
| 143 |   friend class G1ParScanThreadState; | 
| 144 |   friend class G1ParScanThreadStateSet; | 
| 145 |   friend class G1EvacuateRegionsTask; | 
| 146 |   friend class G1PLABAllocator; | 
| 147 |  | 
| 148 |   // Other related classes. | 
| 149 |   friend class HeapRegionClaimer; | 
| 150 |  | 
| 151 |   // Testing classes. | 
| 152 |   friend class G1CheckRegionAttrTableClosure; | 
| 153 |  | 
| 154 | private: | 
| 155 |   G1YoungRemSetSamplingThread* _young_gen_sampling_thread; | 
| 156 |  | 
| 157 |   WorkGang* _workers; | 
| 158 |   G1CardTable* _card_table; | 
| 159 |  | 
| 160 |   SoftRefPolicy      _soft_ref_policy; | 
| 161 |  | 
| 162 |   static size_t _humongous_object_threshold_in_words; | 
| 163 |  | 
| 164 |   // These sets keep track of old, archive and humongous regions respectively. | 
| 165 |   HeapRegionSet _old_set; | 
| 166 |   HeapRegionSet _archive_set; | 
| 167 |   HeapRegionSet _humongous_set; | 
| 168 |  | 
| 169 |   void eagerly_reclaim_humongous_regions(); | 
| 170 |   // Start a new incremental collection set for the next pause. | 
| 171 |   void start_new_collection_set(); | 
| 172 |  | 
| 173 |   // The block offset table for the G1 heap. | 
| 174 |   G1BlockOffsetTable* _bot; | 
| 175 |  | 
| 176 |   // Tears down the region sets / lists so that they are empty and the | 
| 177 |   // regions on the heap do not belong to a region set / list. The | 
| 178 |   // only exception is the humongous set which we leave unaltered. If | 
| 179 |   // free_list_only is true, it will only tear down the master free | 
| 180 |   // list. It is called before a Full GC (free_list_only == false) or | 
| 181 |   // before heap shrinking (free_list_only == true). | 
| 182 |   void tear_down_region_sets(bool free_list_only); | 
| 183 |  | 
| 184 |   // Rebuilds the region sets / lists so that they are repopulated to | 
| 185 |   // reflect the contents of the heap. The only exception is the | 
| 186 |   // humongous set which was not torn down in the first place. If | 
| 187 |   // free_list_only is true, it will only rebuild the master free | 
| 188 |   // list. It is called after a Full GC (free_list_only == false) or | 
| 189 |   // after heap shrinking (free_list_only == true). | 
| 190 |   void rebuild_region_sets(bool free_list_only); | 
| 191 |  | 
| 192 |   // Callback for region mapping changed events. | 
| 193 |   G1RegionMappingChangedListener _listener; | 
| 194 |  | 
| 195 |   // The sequence of all heap regions in the heap. | 
| 196 |   HeapRegionManager* _hrm; | 
| 197 |  | 
| 198 |   // Manages all allocations with regions except humongous object allocations. | 
| 199 |   G1Allocator* _allocator; | 
| 200 |  | 
| 201 |   // Manages all heap verification. | 
| 202 |   G1HeapVerifier* _verifier; | 
| 203 |  | 
| 204 |   // Outside of GC pauses, the number of bytes used in all regions other | 
| 205 |   // than the current allocation region(s). | 
| 206 |   volatile size_t _summary_bytes_used; | 
| 207 |  | 
| 208 |   void increase_used(size_t bytes); | 
| 209 |   void decrease_used(size_t bytes); | 
| 210 |  | 
| 211 |   void set_used(size_t bytes); | 
| 212 |  | 
| 213 |   // Class that handles archive allocation ranges. | 
| 214 |   G1ArchiveAllocator* _archive_allocator; | 
| 215 |  | 
| 216 |   // GC allocation statistics policy for survivors. | 
| 217 |   G1EvacStats _survivor_evac_stats; | 
| 218 |  | 
| 219 |   // GC allocation statistics policy for tenured objects. | 
| 220 |   G1EvacStats _old_evac_stats; | 
| 221 |  | 
| 222 |   // It specifies whether we should attempt to expand the heap after a | 
| 223 |   // region allocation failure. If heap expansion fails we set this to | 
| 224 |   // false so that we don't re-attempt the heap expansion (it's likely | 
| 225 |   // that subsequent expansion attempts will also fail if one fails). | 
| 226 |   // Currently, it is only consulted during GC and it's reset at the | 
| 227 |   // start of each GC. | 
| 228 |   bool _expand_heap_after_alloc_failure; | 
| 229 |  | 
| 230 |   // Helper for monitoring and management support. | 
| 231 |   G1MonitoringSupport* _g1mm; | 
| 232 |  | 
| 233 |   // Records whether the region at the given index is (still) a | 
| 234 |   // candidate for eager reclaim.  Only valid for humongous start | 
| 235 |   // regions; other regions have unspecified values.  Humongous start | 
| 236 |   // regions are initialized at start of collection pause, with | 
| 237 |   // candidates removed from the set as they are found reachable from | 
| 238 |   // roots or the young generation. | 
| 239 |   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> { | 
| 240 |    protected: | 
| 241 |     bool default_value() const { return false; } | 
| 242 |    public: | 
| 243 |     void clear() { G1BiasedMappedArray<bool>::clear(); } | 
| 244 |     void set_candidate(uint region, bool value) { | 
| 245 |       set_by_index(region, value); | 
| 246 |     } | 
| 247 |     bool is_candidate(uint region) { | 
| 248 |       return get_by_index(region); | 
| 249 |     } | 
| 250 |   }; | 
| 251 |  | 
| 252 |   HumongousReclaimCandidates _humongous_reclaim_candidates; | 
| 253 |   // Stores whether during humongous object registration we found candidate regions. | 
| 254 |   // If not, we can skip a few steps. | 
| 255 |   bool _has_humongous_reclaim_candidates; | 
| 256 |  | 
| 257 |   G1HRPrinter _hr_printer; | 
| 258 |  | 
| 259 |   // It decides whether an explicit GC should start a concurrent cycle | 
| 260 |   // instead of doing a STW GC. Currently, a concurrent cycle is | 
| 261 |   // explicitly started if: | 
| 262 |   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or | 
| 263 |   // (b) cause == _g1_humongous_allocation | 
| 264 |   // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. | 
| 265 |   // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent. | 
| 266 |   // (e) cause == _wb_conc_mark | 
| 267 |   bool should_do_concurrent_full_gc(GCCause::Cause cause); | 
| 268 |  | 
| 269 |   // Return true if should upgrade to full gc after an incremental one. | 
| 270 |   bool should_upgrade_to_full_gc(GCCause::Cause cause); | 
| 271 |  | 
| 272 |   // indicates whether we are in young or mixed GC mode | 
| 273 |   G1CollectorState _collector_state; | 
| 274 |  | 
| 275 |   // Keeps track of how many "old marking cycles" (i.e., Full GCs or | 
| 276 |   // concurrent cycles) we have started. | 
| 277 |   volatile uint _old_marking_cycles_started; | 
| 278 |  | 
| 279 |   // Keeps track of how many "old marking cycles" (i.e., Full GCs or | 
| 280 |   // concurrent cycles) we have completed. | 
| 281 |   volatile uint _old_marking_cycles_completed; | 
| 282 |  | 
| 283 |   // This is a non-product method that is helpful for testing. It is | 
| 284 |   // called at the end of a GC and artificially expands the heap by | 
| 285 |   // allocating a number of dead regions. This way we can induce very | 
| 286 |   // frequent marking cycles and stress the cleanup / concurrent | 
| 287 |   // cleanup code more (as all the regions that will be allocated by | 
| 288 |   // this method will be found dead by the marking cycle). | 
| 289 |   void allocate_dummy_regions() PRODUCT_RETURN; | 
| 290 |  | 
| 291 |   // If the HR printer is active, dump the state of the regions in the | 
| 292 |   // heap after a compaction. | 
| 293 |   void print_hrm_post_compaction(); | 
| 294 |  | 
| 295 |   // Create a memory mapper for auxiliary data structures of the given size and | 
| 296 |   // translation factor. | 
| 297 |   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description, | 
| 298 |                                                          size_t size, | 
| 299 |                                                          size_t translation_factor); | 
| 300 |  | 
| 301 |   void trace_heap(GCWhen::Type when, const GCTracer* tracer); | 
| 302 |  | 
| 303 |   // These are macros so that, if the assert fires, we get the correct | 
| 304 |   // line number, file, etc. | 
| 305 |  | 
| 306 | #define heap_locking_asserts_params(_extra_message_)                          \ | 
| 307 |   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \ | 
| 308 |   (_extra_message_),                                                          \ | 
| 309 |   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \ | 
| 310 |   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \ | 
| 311 |   BOOL_TO_STR(Thread::current()->is_VM_thread()) | 
| 312 |  | 
| 313 | #define assert_heap_locked()                                                  \ | 
| 314 |   do {                                                                        \ | 
| 315 |     assert(Heap_lock->owned_by_self(),                                        \ | 
| 316 |            heap_locking_asserts_params("should be holding the Heap_lock"));   \ | 
| 317 |   } while (0) | 
| 318 |  | 
| 319 | #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \ | 
| 320 |   do {                                                                        \ | 
| 321 |     assert(Heap_lock->owned_by_self() ||                                      \ | 
| 322 |            (SafepointSynchronize::is_at_safepoint() &&                        \ | 
| 323 |              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \ | 
| 324 |            heap_locking_asserts_params("should be holding the Heap_lock or "  \ | 
| 325 |                                         "should be at a safepoint"));         \ | 
| 326 |   } while (0) | 
| 327 |  | 
| 328 | #define assert_heap_locked_and_not_at_safepoint()                             \ | 
| 329 |   do {                                                                        \ | 
| 330 |     assert(Heap_lock->owned_by_self() &&                                      \ | 
| 331 |                                     !SafepointSynchronize::is_at_safepoint(), \ | 
| 332 |           heap_locking_asserts_params("should be holding the Heap_lock and "  \ | 
| 333 |                                        "should not be at a safepoint"));      \ | 
| 334 |   } while (0) | 
| 335 |  | 
| 336 | #define assert_heap_not_locked()                                              \ | 
| 337 |   do {                                                                        \ | 
| 338 |     assert(!Heap_lock->owned_by_self(),                                       \ | 
| 339 |         heap_locking_asserts_params("should not be holding the Heap_lock"));  \ | 
| 340 |   } while (0) | 
| 341 |  | 
| 342 | #define assert_heap_not_locked_and_not_at_safepoint()                         \ | 
| 343 |   do {                                                                        \ | 
| 344 |     assert(!Heap_lock->owned_by_self() &&                                     \ | 
| 345 |                                     !SafepointSynchronize::is_at_safepoint(), \ | 
| 346 |       heap_locking_asserts_params("should not be holding the Heap_lock and "  \ | 
| 347 |                                    "should not be at a safepoint"));          \ | 
| 348 |   } while (0) | 
| 349 |  | 
| 350 | #define assert_at_safepoint_on_vm_thread()                                    \ | 
| 351 |   do {                                                                        \ | 
| 352 |     assert_at_safepoint();                                                    \ | 
| 353 |     assert(Thread::current_or_null() != NULL, "no current thread");           \ | 
| 354 |     assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \ | 
| 355 |   } while (0) | 
| 356 |  | 
| 357 | #define assert_used_and_recalculate_used_equal(g1h)                           \ | 
| 358 |   do {                                                                        \ | 
| 359 |     size_t cur_used_bytes = g1h->used();                                      \ | 
| 360 |     size_t recal_used_bytes = g1h->recalculate_used();                        \ | 
| 361 |     assert(cur_used_bytes == recal_used_bytes, "Used(" SIZE_FORMAT ") is not" \ | 
| 362 |            " same as recalculated used(" SIZE_FORMAT ").",                    \ | 
| 363 |            cur_used_bytes, recal_used_bytes);                                 \ | 
| 364 |   } while (0) | 
| 365 |  | 
| 366 |   const char* young_gc_name() const; | 
| 367 |  | 
| 368 |   // The young region list. | 
| 369 |   G1EdenRegions _eden; | 
| 370 |   G1SurvivorRegions _survivor; | 
| 371 |  | 
| 372 |   STWGCTimer* _gc_timer_stw; | 
| 373 |  | 
| 374 |   G1NewTracer* _gc_tracer_stw; | 
| 375 |  | 
| 376 |   // The current policy object for the collector. | 
| 377 |   G1Policy* _policy; | 
| 378 |   G1HeapSizingPolicy* _heap_sizing_policy; | 
| 379 |  | 
| 380 |   G1CollectionSet _collection_set; | 
| 381 |  | 
| 382 |   // Try to allocate a single non-humongous HeapRegion sufficient for | 
| 383 |   // an allocation of the given word_size. If do_expand is true, | 
| 384 |   // attempt to expand the heap if necessary to satisfy the allocation | 
| 385 |   // request. 'type' takes the type of region to be allocated. (Use constants | 
| 386 |   // Old, Eden, Humongous, Survivor defined in HeapRegionType.) | 
| 387 |   HeapRegion* new_region(size_t word_size, HeapRegionType type, bool do_expand); | 
| 388 |  | 
| 389 |   // Initialize a contiguous set of free regions of length num_regions | 
| 390 |   // and starting at index first so that they appear as a single | 
| 391 |   // humongous region. | 
| 392 |   HeapWord* humongous_obj_allocate_initialize_regions(uint first, | 
| 393 |                                                       uint num_regions, | 
| 394 |                                                       size_t word_size); | 
| 395 |  | 
| 396 |   // Attempt to allocate a humongous object of the given size. Return | 
| 397 |   // NULL if unsuccessful. | 
| 398 |   HeapWord* humongous_obj_allocate(size_t word_size); | 
| 399 |  | 
| 400 |   // The following two methods, allocate_new_tlab() and | 
| 401 |   // mem_allocate(), are the two main entry points from the runtime | 
| 402 |   // into the G1's allocation routines. They have the following | 
| 403 |   // assumptions: | 
| 404 |   // | 
| 405 |   // * They should both be called outside safepoints. | 
| 406 |   // | 
| 407 |   // * They should both be called without holding the Heap_lock. | 
| 408 |   // | 
| 409 |   // * All allocation requests for new TLABs should go to | 
| 410 |   //   allocate_new_tlab(). | 
| 411 |   // | 
| 412 |   // * All non-TLAB allocation requests should go to mem_allocate(). | 
| 413 |   // | 
| 414 |   // * If either call cannot satisfy the allocation request using the | 
| 415 |   //   current allocating region, they will try to get a new one. If | 
| 416 |   //   this fails, they will attempt to do an evacuation pause and | 
| 417 |   //   retry the allocation. | 
| 418 |   // | 
| 419 |   // * If all allocation attempts fail, even after trying to schedule | 
| 420 |   //   an evacuation pause, allocate_new_tlab() will return NULL, | 
| 421 |   //   whereas mem_allocate() will attempt a heap expansion and/or | 
| 422 |   //   schedule a Full GC. | 
| 423 |   // | 
| 424 |   // * We do not allow humongous-sized TLABs. So, allocate_new_tlab | 
| 425 |   //   should never be called with word_size being humongous. All | 
| 426 |   //   humongous allocation requests should go to mem_allocate() which | 
| 427 |   //   will satisfy them with a special path. | 
| 428 |  | 
| 429 |   virtual HeapWord* allocate_new_tlab(size_t min_size, | 
| 430 |                                       size_t requested_size, | 
| 431 |                                       size_t* actual_size); | 
| 432 |  | 
| 433 |   virtual HeapWord* mem_allocate(size_t word_size, | 
| 434 |                                  bool*  gc_overhead_limit_was_exceeded); | 
| 435 |  | 
| 436 |   // First-level mutator allocation attempt: try to allocate out of | 
| 437 |   // the mutator alloc region without taking the Heap_lock. This | 
| 438 |   // should only be used for non-humongous allocations. | 
| 439 |   inline HeapWord* attempt_allocation(size_t min_word_size, | 
| 440 |                                       size_t desired_word_size, | 
| 441 |                                       size_t* actual_word_size); | 
| 442 |  | 
| 443 |   // Second-level mutator allocation attempt: take the Heap_lock and | 
| 444 |   // retry the allocation attempt, potentially scheduling a GC | 
| 445 |   // pause. This should only be used for non-humongous allocations. | 
| 446 |   HeapWord* attempt_allocation_slow(size_t word_size); | 
| 447 |  | 
| 448 |   // Takes the Heap_lock and attempts a humongous allocation. It can | 
| 449 |   // potentially schedule a GC pause. | 
| 450 |   HeapWord* attempt_allocation_humongous(size_t word_size); | 
| 451 |  | 
| 452 |   // Allocation attempt that should be called during safepoints (e.g., | 
| 453 |   // at the end of a successful GC). expect_null_mutator_alloc_region | 
| 454 |   // specifies whether the mutator alloc region is expected to be NULL | 
| 455 |   // or not. | 
| 456 |   HeapWord* attempt_allocation_at_safepoint(size_t word_size, | 
| 457 |                                             bool expect_null_mutator_alloc_region); | 
| 458 |  | 
| 459 |   // These methods are the "callbacks" from the G1AllocRegion class. | 
| 460 |  | 
| 461 |   // For mutator alloc regions. | 
| 462 |   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force); | 
| 463 |   void retire_mutator_alloc_region(HeapRegion* alloc_region, | 
| 464 |                                    size_t allocated_bytes); | 
| 465 |  | 
| 466 |   // For GC alloc regions. | 
| 467 |   bool has_more_regions(G1HeapRegionAttr dest); | 
| 468 |   HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest); | 
| 469 |   void retire_gc_alloc_region(HeapRegion* alloc_region, | 
| 470 |                               size_t allocated_bytes, G1HeapRegionAttr dest); | 
| 471 |  | 
| 472 |   // - if explicit_gc is true, the GC is for a System.gc() etc, | 
| 473 |   //   otherwise it's for a failed allocation. | 
| 474 |   // - if clear_all_soft_refs is true, all soft references should be | 
| 475 |   //   cleared during the GC. | 
| 476 |   // - it returns false if it is unable to do the collection due to the | 
| 477 |   //   GC locker being active, true otherwise. | 
| 478 |   bool do_full_collection(bool explicit_gc, | 
| 479 |                           bool clear_all_soft_refs); | 
| 480 |  | 
| 481 |   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread. | 
| 482 |   virtual void do_full_collection(bool clear_all_soft_refs); | 
| 483 |  | 
| 484 |   // Callback from VM_G1CollectForAllocation operation. | 
| 485 |   // This function does everything necessary/possible to satisfy a | 
| 486 |   // failed allocation request (including collection, expansion, etc.) | 
| 487 |   HeapWord* satisfy_failed_allocation(size_t word_size, | 
| 488 |                                       bool* succeeded); | 
| 489 |   // Internal helpers used during full GC to split it up to | 
| 490 |   // increase readability. | 
| 491 |   void abort_concurrent_cycle(); | 
| 492 |   void verify_before_full_collection(bool explicit_gc); | 
| 493 |   void prepare_heap_for_full_collection(); | 
| 494 |   void prepare_heap_for_mutators(); | 
| 495 |   void abort_refinement(); | 
| 496 |   void verify_after_full_collection(); | 
| 497 |   void print_heap_after_full_collection(G1HeapTransition* heap_transition); | 
| 498 |  | 
| 499 |   // Helper method for satisfy_failed_allocation() | 
| 500 |   HeapWord* satisfy_failed_allocation_helper(size_t word_size, | 
| 501 |                                              bool do_gc, | 
| 502 |                                              bool clear_all_soft_refs, | 
| 503 |                                              bool expect_null_mutator_alloc_region, | 
| 504 |                                              bool* gc_succeeded); | 
| 505 |  | 
| 506 |   // Attempting to expand the heap sufficiently | 
| 507 |   // to support an allocation of the given "word_size".  If | 
| 508 |   // successful, perform the allocation and return the address of the | 
| 509 |   // allocated block, or else "NULL". | 
| 510 |   HeapWord* expand_and_allocate(size_t word_size); | 
| 511 |  | 
| 512 |   // Process any reference objects discovered. | 
| 513 |   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states); | 
| 514 |  | 
| 515 |   // If during an initial mark pause we may install a pending list head which is not | 
| 516 |   // otherwise reachable ensure that it is marked in the bitmap for concurrent marking | 
| 517 |   // to discover. | 
| 518 |   void make_pending_list_reachable(); | 
| 519 |  | 
| 520 |   // Merges the information gathered on a per-thread basis for all worker threads | 
| 521 |   // during GC into global variables. | 
| 522 |   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states); | 
| 523 | public: | 
| 524 |   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; } | 
| 525 |  | 
| 526 |   WorkGang* workers() const { return _workers; } | 
| 527 |  | 
| 528 |   // Runs the given AbstractGangTask with the current active workers, returning the | 
| 529 |   // total time taken. | 
| 530 |   Tickspan run_task(AbstractGangTask* task); | 
| 531 |  | 
| 532 |   G1Allocator* allocator() { | 
| 533 |     return _allocator; | 
| 534 |   } | 
| 535 |  | 
| 536 |   G1HeapVerifier* verifier() { | 
| 537 |     return _verifier; | 
| 538 |   } | 
| 539 |  | 
| 540 |   G1MonitoringSupport* g1mm() { | 
| 541 |     assert(_g1mm != NULL, "should have been initialized" ); | 
| 542 |     return _g1mm; | 
| 543 |   } | 
| 544 |  | 
| 545 |   void resize_heap_if_necessary(); | 
| 546 |  | 
| 547 |   // Expand the garbage-first heap by at least the given size (in bytes!). | 
| 548 |   // Returns true if the heap was expanded by the requested amount; | 
| 549 |   // false otherwise. | 
| 550 |   // (Rounds up to a HeapRegion boundary.) | 
| 551 |   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL); | 
| 552 |  | 
| 553 |   // Returns the PLAB statistics for a given destination. | 
| 554 |   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest); | 
| 555 |  | 
| 556 |   // Determines PLAB size for a given destination. | 
| 557 |   inline size_t desired_plab_sz(G1HeapRegionAttr dest); | 
| 558 |  | 
| 559 |   // Do anything common to GC's. | 
| 560 |   void gc_prologue(bool full); | 
| 561 |   void gc_epilogue(bool full); | 
| 562 |  | 
| 563 |   // Does the given region fulfill remembered set based eager reclaim candidate requirements? | 
| 564 |   bool is_potential_eager_reclaim_candidate(HeapRegion* r) const; | 
| 565 |  | 
| 566 |   // Modify the reclaim candidate set and test for presence. | 
| 567 |   // These are only valid for starts_humongous regions. | 
| 568 |   inline void set_humongous_reclaim_candidate(uint region, bool value); | 
| 569 |   inline bool is_humongous_reclaim_candidate(uint region); | 
| 570 |  | 
| 571 |   // Remove from the reclaim candidate set.  Also remove from the | 
| 572 |   // collection set so that later encounters avoid the slow path. | 
| 573 |   inline void set_humongous_is_live(oop obj); | 
| 574 |  | 
| 575 |   // Register the given region to be part of the collection set. | 
| 576 |   inline void register_humongous_region_with_region_attr(uint index); | 
| 577 |   // Update region attributes table with information about all regions. | 
| 578 |   void register_regions_with_region_attr(); | 
| 579 |   // We register a region with the fast "in collection set" test. We | 
| 580 |   // simply set to true the array slot corresponding to this region. | 
| 581 |   void register_young_region_with_region_attr(HeapRegion* r) { | 
| 582 |     _region_attr.set_in_young(r->hrm_index()); | 
| 583 |   } | 
| 584 |   inline void register_region_with_region_attr(HeapRegion* r); | 
| 585 |   inline void register_old_region_with_region_attr(HeapRegion* r); | 
| 586 |   inline void register_optional_region_with_region_attr(HeapRegion* r); | 
| 587 |  | 
| 588 |   void clear_region_attr(const HeapRegion* hr) { | 
| 589 |     _region_attr.clear(hr); | 
| 590 |   } | 
| 591 |  | 
| 592 |   void clear_region_attr() { | 
| 593 |     _region_attr.clear(); | 
| 594 |   } | 
| 595 |  | 
| 596 |   // Verify that the G1RegionAttr remset tracking corresponds to actual remset tracking | 
| 597 |   // for all regions. | 
| 598 |   void verify_region_attr_remset_update() PRODUCT_RETURN; | 
| 599 |  | 
| 600 |   bool is_user_requested_concurrent_full_gc(GCCause::Cause cause); | 
| 601 |  | 
| 602 |   // This is called at the start of either a concurrent cycle or a Full | 
| 603 |   // GC to update the number of old marking cycles started. | 
| 604 |   void increment_old_marking_cycles_started(); | 
| 605 |  | 
| 606 |   // This is called at the end of either a concurrent cycle or a Full | 
| 607 |   // GC to update the number of old marking cycles completed. Those two | 
| 608 |   // can happen in a nested fashion, i.e., we start a concurrent | 
| 609 |   // cycle, a Full GC happens half-way through it which ends first, | 
| 610 |   // and then the cycle notices that a Full GC happened and ends | 
| 611 |   // too. The concurrent parameter is a boolean to help us do a bit | 
| 612 |   // tighter consistency checking in the method. If concurrent is | 
| 613 |   // false, the caller is the inner caller in the nesting (i.e., the | 
| 614 |   // Full GC). If concurrent is true, the caller is the outer caller | 
| 615 |   // in this nesting (i.e., the concurrent cycle). Further nesting is | 
| 616 |   // not currently supported. The end of this call also notifies | 
| 617 |   // the FullGCCount_lock in case a Java thread is waiting for a full | 
| 618 |   // GC to happen (e.g., it called System.gc() with | 
| 619 |   // +ExplicitGCInvokesConcurrent). | 
| 620 |   void increment_old_marking_cycles_completed(bool concurrent); | 
| 621 |  | 
| 622 |   uint old_marking_cycles_completed() { | 
| 623 |     return _old_marking_cycles_completed; | 
| 624 |   } | 
| 625 |  | 
| 626 |   G1HRPrinter* hr_printer() { return &_hr_printer; } | 
| 627 |  | 
| 628 |   // Allocates a new heap region instance. | 
| 629 |   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr); | 
| 630 |  | 
| 631 |   // Allocate the highest free region in the reserved heap. This will commit | 
| 632 |   // regions as necessary. | 
| 633 |   HeapRegion* alloc_highest_free_region(); | 
| 634 |  | 
| 635 |   // Frees a non-humongous region by initializing its contents and | 
| 636 |   // adding it to the free list that's passed as a parameter (this is | 
| 637 |   // usually a local list which will be appended to the master free | 
| 638 |   // list later). The used bytes of freed regions are accumulated in | 
| 639 |   // pre_used. If skip_remset is true, the region's RSet will not be freed | 
| 640 |   // up. If skip_hot_card_cache is true, the region's hot card cache will not | 
| 641 |   // be freed up. The assumption is that this will be done later. | 
| 642 |   // The locked parameter indicates if the caller has already taken | 
| 643 |   // care of proper synchronization. This may allow some optimizations. | 
| 644 |   void free_region(HeapRegion* hr, | 
| 645 |                    FreeRegionList* free_list, | 
| 646 |                    bool skip_remset, | 
| 647 |                    bool skip_hot_card_cache = false, | 
| 648 |                    bool locked = false); | 
| 649 |  | 
| 650 |   // It dirties the cards that cover the block so that the post | 
| 651 |   // write barrier never queues anything when updating objects on this | 
| 652 |   // block. It is assumed (and in fact we assert) that the block | 
| 653 |   // belongs to a young region. | 
| 654 |   inline void dirty_young_block(HeapWord* start, size_t word_size); | 
| 655 |  | 
| 656 |   // Frees a humongous region by collapsing it into individual regions | 
| 657 |   // and calling free_region() for each of them. The freed regions | 
| 658 |   // will be added to the free list that's passed as a parameter (this | 
| 659 |   // is usually a local list which will be appended to the master free | 
| 660 |   // list later). | 
| 661 |   // The method assumes that only a single thread is ever calling | 
| 662 |   // this for a particular region at once. | 
| 663 |   void free_humongous_region(HeapRegion* hr, | 
| 664 |                              FreeRegionList* free_list); | 
| 665 |  | 
| 666 |   // Facility for allocating in 'archive' regions in high heap memory and | 
| 667 |   // recording the allocated ranges. These should all be called from the | 
| 668 |   // VM thread at safepoints, without the heap lock held. They can be used | 
| 669 |   // to create and archive a set of heap regions which can be mapped at the | 
| 670 |   // same fixed addresses in a subsequent JVM invocation. | 
| 671 |   void begin_archive_alloc_range(bool open = false); | 
| 672 |  | 
| 673 |   // Check if the requested size would be too large for an archive allocation. | 
| 674 |   bool is_archive_alloc_too_large(size_t word_size); | 
| 675 |  | 
| 676 |   // Allocate memory of the requested size from the archive region. This will | 
| 677 |   // return NULL if the size is too large or if no memory is available. It | 
| 678 |   // does not trigger a garbage collection. | 
| 679 |   HeapWord* archive_mem_allocate(size_t word_size); | 
| 680 |  | 
| 681 |   // Optionally aligns the end address and returns the allocated ranges in | 
| 682 |   // an array of MemRegions in order of ascending addresses. | 
| 683 |   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges, | 
| 684 |                                size_t end_alignment_in_bytes = 0); | 
| 685 |  | 
| 686 |   // Facility for allocating a fixed range within the heap and marking | 
| 687 |   // the containing regions as 'archive'. For use at JVM init time, when the | 
| 688 |   // caller may mmap archived heap data at the specified range(s). | 
| 689 |   // Verify that the MemRegions specified in the argument array are within the | 
| 690 |   // reserved heap. | 
| 691 |   bool check_archive_addresses(MemRegion* range, size_t count); | 
| 692 |  | 
| 693 |   // Commit the appropriate G1 regions containing the specified MemRegions | 
| 694 |   // and mark them as 'archive' regions. The regions in the array must be | 
| 695 |   // non-overlapping and in order of ascending address. | 
| 696 |   bool alloc_archive_regions(MemRegion* range, size_t count, bool open); | 
| 697 |  | 
| 698 |   // Insert any required filler objects in the G1 regions around the specified | 
| 699 |   // ranges to make the regions parseable. This must be called after | 
| 700 |   // alloc_archive_regions, and after class loading has occurred. | 
| 701 |   void fill_archive_regions(MemRegion* range, size_t count); | 
| 702 |  | 
| 703 |   // For each of the specified MemRegions, uncommit the containing G1 regions | 
| 704 |   // which had been allocated by alloc_archive_regions. This should be called | 
| 705 |   // rather than fill_archive_regions at JVM init time if the archive file | 
| 706 |   // mapping failed, with the same non-overlapping and sorted MemRegion array. | 
| 707 |   void dealloc_archive_regions(MemRegion* range, size_t count, bool is_open); | 
| 708 |  | 
| 709 |   oop materialize_archived_object(oop obj); | 
| 710 |  | 
| 711 | private: | 
| 712 |  | 
| 713 |   // Shrink the garbage-first heap by at most the given size (in bytes!). | 
| 714 |   // (Rounds down to a HeapRegion boundary.) | 
| 715 |   void shrink(size_t expand_bytes); | 
| 716 |   void shrink_helper(size_t expand_bytes); | 
| 717 |  | 
| 718 |   #if TASKQUEUE_STATS | 
| 719 |   static void print_taskqueue_stats_hdr(outputStream* const st); | 
| 720 |   void print_taskqueue_stats() const; | 
| 721 |   void reset_taskqueue_stats(); | 
| 722 |   #endif // TASKQUEUE_STATS | 
| 723 |  | 
| 724 |   // Schedule the VM operation that will do an evacuation pause to | 
| 725 |   // satisfy an allocation request of word_size. *succeeded will | 
| 726 |   // return whether the VM operation was successful (it did do an | 
| 727 |   // evacuation pause) or not (another thread beat us to it or the GC | 
| 728 |   // locker was active). Given that we should not be holding the | 
| 729 |   // Heap_lock when we enter this method, we will pass the | 
| 730 |   // gc_count_before (i.e., total_collections()) as a parameter since | 
| 731 |   // it has to be read while holding the Heap_lock. Currently, both | 
| 732 |   // methods that call do_collection_pause() release the Heap_lock | 
| 733 |   // before the call, so it's easy to read gc_count_before just before. | 
| 734 |   HeapWord* do_collection_pause(size_t         word_size, | 
| 735 |                                 uint           gc_count_before, | 
| 736 |                                 bool*          succeeded, | 
| 737 |                                 GCCause::Cause gc_cause); | 
| 738 |  | 
| 739 |   void wait_for_root_region_scanning(); | 
| 740 |  | 
| 741 |   // The guts of the incremental collection pause, executed by the vm | 
| 742 |   // thread. It returns false if it is unable to do the collection due | 
| 743 |   // to the GC locker being active, true otherwise | 
| 744 |   bool do_collection_pause_at_safepoint(double target_pause_time_ms); | 
| 745 |  | 
| 746 |   G1HeapVerifier::G1VerifyType young_collection_verify_type() const; | 
| 747 |   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type); | 
| 748 |   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type); | 
| 749 |  | 
| 750 |   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms); | 
| 751 |  | 
| 752 |   // Actually do the work of evacuating the parts of the collection set. | 
| 753 |   void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states); | 
| 754 |   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states); | 
| 755 | private: | 
| 756 |   // Evacuate the next set of optional regions. | 
| 757 |   void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states); | 
| 758 |  | 
| 759 | public: | 
| 760 |   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info); | 
| 761 |   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss); | 
| 762 |  | 
| 763 |   void expand_heap_after_young_collection(); | 
| 764 |   // Update object copying statistics. | 
| 765 |   void record_obj_copy_mem_stats(); | 
| 766 |  | 
| 767 |   // The hot card cache for remembered set insertion optimization. | 
| 768 |   G1HotCardCache* _hot_card_cache; | 
| 769 |  | 
| 770 |   // The g1 remembered set of the heap. | 
| 771 |   G1RemSet* _rem_set; | 
| 772 |  | 
| 773 |   // A set of cards that cover the objects for which the Rsets should be updated | 
| 774 |   // concurrently after the collection. | 
| 775 |   G1DirtyCardQueueSet _dirty_card_queue_set; | 
| 776 |  | 
| 777 |   // After a collection pause, convert the regions in the collection set into free | 
| 778 |   // regions. | 
| 779 |   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words); | 
| 780 |  | 
| 781 |   // Abandon the current collection set without recording policy | 
| 782 |   // statistics or updating free lists. | 
| 783 |   void abandon_collection_set(G1CollectionSet* collection_set); | 
| 784 |  | 
| 785 |   // The concurrent marker (and the thread it runs in.) | 
| 786 |   G1ConcurrentMark* _cm; | 
| 787 |   G1ConcurrentMarkThread* _cm_thread; | 
| 788 |  | 
| 789 |   // The concurrent refiner. | 
| 790 |   G1ConcurrentRefine* _cr; | 
| 791 |  | 
| 792 |   // The parallel task queues | 
| 793 |   RefToScanQueueSet *_task_queues; | 
| 794 |  | 
| 795 |   // True iff a evacuation has failed in the current collection. | 
| 796 |   bool _evacuation_failed; | 
| 797 |  | 
| 798 |   EvacuationFailedInfo* _evacuation_failed_info_array; | 
| 799 |  | 
| 800 |   // Failed evacuations cause some logical from-space objects to have | 
| 801 |   // forwarding pointers to themselves.  Reset them. | 
| 802 |   void remove_self_forwarding_pointers(); | 
| 803 |  | 
| 804 |   // Restore the objects in the regions in the collection set after an | 
| 805 |   // evacuation failure. | 
| 806 |   void restore_after_evac_failure(); | 
| 807 |  | 
| 808 |   PreservedMarksSet _preserved_marks_set; | 
| 809 |  | 
| 810 |   // Preserve the mark of "obj", if necessary, in preparation for its mark | 
| 811 |   // word being overwritten with a self-forwarding-pointer. | 
| 812 |   void preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m); | 
| 813 |  | 
| 814 | #ifndef PRODUCT | 
| 815 |   // Support for forcing evacuation failures. Analogous to | 
| 816 |   // PromotionFailureALot for the other collectors. | 
| 817 |  | 
| 818 |   // Records whether G1EvacuationFailureALot should be in effect | 
| 819 |   // for the current GC | 
| 820 |   bool _evacuation_failure_alot_for_current_gc; | 
| 821 |  | 
| 822 |   // Used to record the GC number for interval checking when | 
| 823 |   // determining whether G1EvaucationFailureALot is in effect | 
| 824 |   // for the current GC. | 
| 825 |   size_t _evacuation_failure_alot_gc_number; | 
| 826 |  | 
| 827 |   // Count of the number of evacuations between failures. | 
| 828 |   volatile size_t _evacuation_failure_alot_count; | 
| 829 |  | 
| 830 |   // Set whether G1EvacuationFailureALot should be in effect | 
| 831 |   // for the current GC (based upon the type of GC and which | 
| 832 |   // command line flags are set); | 
| 833 |   inline bool evacuation_failure_alot_for_gc_type(bool for_young_gc, | 
| 834 |                                                   bool during_initial_mark, | 
| 835 |                                                   bool mark_or_rebuild_in_progress); | 
| 836 |  | 
| 837 |   inline void set_evacuation_failure_alot_for_current_gc(); | 
| 838 |  | 
| 839 |   // Return true if it's time to cause an evacuation failure. | 
| 840 |   inline bool evacuation_should_fail(); | 
| 841 |  | 
| 842 |   // Reset the G1EvacuationFailureALot counters.  Should be called at | 
| 843 |   // the end of an evacuation pause in which an evacuation failure occurred. | 
| 844 |   inline void reset_evacuation_should_fail(); | 
| 845 | #endif // !PRODUCT | 
| 846 |  | 
| 847 |   // ("Weak") Reference processing support. | 
| 848 |   // | 
| 849 |   // G1 has 2 instances of the reference processor class. One | 
| 850 |   // (_ref_processor_cm) handles reference object discovery | 
| 851 |   // and subsequent processing during concurrent marking cycles. | 
| 852 |   // | 
| 853 |   // The other (_ref_processor_stw) handles reference object | 
| 854 |   // discovery and processing during full GCs and incremental | 
| 855 |   // evacuation pauses. | 
| 856 |   // | 
| 857 |   // During an incremental pause, reference discovery will be | 
| 858 |   // temporarily disabled for _ref_processor_cm and will be | 
| 859 |   // enabled for _ref_processor_stw. At the end of the evacuation | 
| 860 |   // pause references discovered by _ref_processor_stw will be | 
| 861 |   // processed and discovery will be disabled. The previous | 
| 862 |   // setting for reference object discovery for _ref_processor_cm | 
| 863 |   // will be re-instated. | 
| 864 |   // | 
| 865 |   // At the start of marking: | 
| 866 |   //  * Discovery by the CM ref processor is verified to be inactive | 
| 867 |   //    and it's discovered lists are empty. | 
| 868 |   //  * Discovery by the CM ref processor is then enabled. | 
| 869 |   // | 
| 870 |   // At the end of marking: | 
| 871 |   //  * Any references on the CM ref processor's discovered | 
| 872 |   //    lists are processed (possibly MT). | 
| 873 |   // | 
| 874 |   // At the start of full GC we: | 
| 875 |   //  * Disable discovery by the CM ref processor and | 
| 876 |   //    empty CM ref processor's discovered lists | 
| 877 |   //    (without processing any entries). | 
| 878 |   //  * Verify that the STW ref processor is inactive and it's | 
| 879 |   //    discovered lists are empty. | 
| 880 |   //  * Temporarily set STW ref processor discovery as single threaded. | 
| 881 |   //  * Temporarily clear the STW ref processor's _is_alive_non_header | 
| 882 |   //    field. | 
| 883 |   //  * Finally enable discovery by the STW ref processor. | 
| 884 |   // | 
| 885 |   // The STW ref processor is used to record any discovered | 
| 886 |   // references during the full GC. | 
| 887 |   // | 
| 888 |   // At the end of a full GC we: | 
| 889 |   //  * Enqueue any reference objects discovered by the STW ref processor | 
| 890 |   //    that have non-live referents. This has the side-effect of | 
| 891 |   //    making the STW ref processor inactive by disabling discovery. | 
| 892 |   //  * Verify that the CM ref processor is still inactive | 
| 893 |   //    and no references have been placed on it's discovered | 
| 894 |   //    lists (also checked as a precondition during initial marking). | 
| 895 |  | 
| 896 |   // The (stw) reference processor... | 
| 897 |   ReferenceProcessor* _ref_processor_stw; | 
| 898 |  | 
| 899 |   // During reference object discovery, the _is_alive_non_header | 
| 900 |   // closure (if non-null) is applied to the referent object to | 
| 901 |   // determine whether the referent is live. If so then the | 
| 902 |   // reference object does not need to be 'discovered' and can | 
| 903 |   // be treated as a regular oop. This has the benefit of reducing | 
| 904 |   // the number of 'discovered' reference objects that need to | 
| 905 |   // be processed. | 
| 906 |   // | 
| 907 |   // Instance of the is_alive closure for embedding into the | 
| 908 |   // STW reference processor as the _is_alive_non_header field. | 
| 909 |   // Supplying a value for the _is_alive_non_header field is | 
| 910 |   // optional but doing so prevents unnecessary additions to | 
| 911 |   // the discovered lists during reference discovery. | 
| 912 |   G1STWIsAliveClosure _is_alive_closure_stw; | 
| 913 |  | 
| 914 |   G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw; | 
| 915 |  | 
| 916 |   // The (concurrent marking) reference processor... | 
| 917 |   ReferenceProcessor* _ref_processor_cm; | 
| 918 |  | 
| 919 |   // Instance of the concurrent mark is_alive closure for embedding | 
| 920 |   // into the Concurrent Marking reference processor as the | 
| 921 |   // _is_alive_non_header field. Supplying a value for the | 
| 922 |   // _is_alive_non_header field is optional but doing so prevents | 
| 923 |   // unnecessary additions to the discovered lists during reference | 
| 924 |   // discovery. | 
| 925 |   G1CMIsAliveClosure _is_alive_closure_cm; | 
| 926 |  | 
| 927 |   G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm; | 
| 928 | public: | 
| 929 |  | 
| 930 |   RefToScanQueue *task_queue(uint i) const; | 
| 931 |  | 
| 932 |   uint num_task_queues() const; | 
| 933 |  | 
| 934 |   // A set of cards where updates happened during the GC | 
| 935 |   G1DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; } | 
| 936 |  | 
| 937 |   // Create a G1CollectedHeap. | 
| 938 |   // Must call the initialize method afterwards. | 
| 939 |   // May not return if something goes wrong. | 
| 940 |   G1CollectedHeap(); | 
| 941 |  | 
| 942 | private: | 
| 943 |   jint initialize_concurrent_refinement(); | 
| 944 |   jint initialize_young_gen_sampling_thread(); | 
| 945 | public: | 
| 946 |   // Initialize the G1CollectedHeap to have the initial and | 
| 947 |   // maximum sizes and remembered and barrier sets | 
| 948 |   // specified by the policy object. | 
| 949 |   jint initialize(); | 
| 950 |  | 
| 951 |   virtual void stop(); | 
| 952 |   virtual void safepoint_synchronize_begin(); | 
| 953 |   virtual void safepoint_synchronize_end(); | 
| 954 |  | 
| 955 |   // Does operations required after initialization has been done. | 
| 956 |   void post_initialize(); | 
| 957 |  | 
| 958 |   // Initialize weak reference processing. | 
| 959 |   void ref_processing_init(); | 
| 960 |  | 
| 961 |   virtual Name kind() const { | 
| 962 |     return CollectedHeap::G1; | 
| 963 |   } | 
| 964 |  | 
| 965 |   virtual const char* name() const { | 
| 966 |     return "G1" ; | 
| 967 |   } | 
| 968 |  | 
| 969 |   const G1CollectorState* collector_state() const { return &_collector_state; } | 
| 970 |   G1CollectorState* collector_state() { return &_collector_state; } | 
| 971 |  | 
| 972 |   // The current policy object for the collector. | 
| 973 |   G1Policy* policy() const { return _policy; } | 
| 974 |   // The remembered set. | 
| 975 |   G1RemSet* rem_set() const { return _rem_set; } | 
| 976 |  | 
| 977 |   inline G1GCPhaseTimes* phase_times() const; | 
| 978 |  | 
| 979 |   HeapRegionManager* hrm() const { return _hrm; } | 
| 980 |  | 
| 981 |   const G1CollectionSet* collection_set() const { return &_collection_set; } | 
| 982 |   G1CollectionSet* collection_set() { return &_collection_set; } | 
| 983 |  | 
| 984 |   virtual SoftRefPolicy* soft_ref_policy(); | 
| 985 |  | 
| 986 |   virtual void initialize_serviceability(); | 
| 987 |   virtual MemoryUsage memory_usage(); | 
| 988 |   virtual GrowableArray<GCMemoryManager*> memory_managers(); | 
| 989 |   virtual GrowableArray<MemoryPool*> memory_pools(); | 
| 990 |  | 
| 991 |   // Try to minimize the remembered set. | 
| 992 |   void scrub_rem_set(); | 
| 993 |  | 
| 994 |   // Apply the given closure on all cards in the Hot Card Cache, emptying it. | 
| 995 |   void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i); | 
| 996 |  | 
| 997 |   // Apply the given closure on all cards in the Dirty Card Queue Set, emptying it. | 
| 998 |   void iterate_dirty_card_closure(G1CardTableEntryClosure* cl, uint worker_i); | 
| 999 |  | 
| 1000 |   // The shared block offset table array. | 
| 1001 |   G1BlockOffsetTable* bot() const { return _bot; } | 
| 1002 |  | 
| 1003 |   // Reference Processing accessors | 
| 1004 |  | 
| 1005 |   // The STW reference processor.... | 
| 1006 |   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; } | 
| 1007 |  | 
| 1008 |   G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; } | 
| 1009 |  | 
| 1010 |   // The Concurrent Marking reference processor... | 
| 1011 |   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; } | 
| 1012 |  | 
| 1013 |   size_t unused_committed_regions_in_bytes() const; | 
| 1014 |  | 
| 1015 |   virtual size_t capacity() const; | 
| 1016 |   virtual size_t used() const; | 
| 1017 |   // This should be called when we're not holding the heap lock. The | 
| 1018 |   // result might be a bit inaccurate. | 
| 1019 |   size_t used_unlocked() const; | 
| 1020 |   size_t recalculate_used() const; | 
| 1021 |  | 
| 1022 |   // These virtual functions do the actual allocation. | 
| 1023 |   // Some heaps may offer a contiguous region for shared non-blocking | 
| 1024 |   // allocation, via inlined code (by exporting the address of the top and | 
| 1025 |   // end fields defining the extent of the contiguous allocation region.) | 
| 1026 |   // But G1CollectedHeap doesn't yet support this. | 
| 1027 |  | 
| 1028 |   virtual bool is_maximal_no_gc() const { | 
| 1029 |     return _hrm->available() == 0; | 
| 1030 |   } | 
| 1031 |  | 
| 1032 |   // Returns whether there are any regions left in the heap for allocation. | 
| 1033 |   bool has_regions_left_for_allocation() const { | 
| 1034 |     return !is_maximal_no_gc() || num_free_regions() != 0; | 
| 1035 |   } | 
| 1036 |  | 
| 1037 |   // The current number of regions in the heap. | 
| 1038 |   uint num_regions() const { return _hrm->length(); } | 
| 1039 |  | 
| 1040 |   // The max number of regions in the heap. | 
| 1041 |   uint max_regions() const { return _hrm->max_length(); } | 
| 1042 |  | 
| 1043 |   // Max number of regions that can be comitted. | 
| 1044 |   uint max_expandable_regions() const { return _hrm->max_expandable_length(); } | 
| 1045 |  | 
| 1046 |   // The number of regions that are completely free. | 
| 1047 |   uint num_free_regions() const { return _hrm->num_free_regions(); } | 
| 1048 |  | 
| 1049 |   // The number of regions that can be allocated into. | 
| 1050 |   uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); } | 
| 1051 |  | 
| 1052 |   MemoryUsage get_auxiliary_data_memory_usage() const { | 
| 1053 |     return _hrm->get_auxiliary_data_memory_usage(); | 
| 1054 |   } | 
| 1055 |  | 
| 1056 |   // The number of regions that are not completely free. | 
| 1057 |   uint num_used_regions() const { return num_regions() - num_free_regions(); } | 
| 1058 |  | 
| 1059 | #ifdef ASSERT | 
| 1060 |   bool is_on_master_free_list(HeapRegion* hr) { | 
| 1061 |     return _hrm->is_free(hr); | 
| 1062 |   } | 
| 1063 | #endif // ASSERT | 
| 1064 |  | 
| 1065 |   inline void old_set_add(HeapRegion* hr); | 
| 1066 |   inline void old_set_remove(HeapRegion* hr); | 
| 1067 |  | 
| 1068 |   inline void archive_set_add(HeapRegion* hr); | 
| 1069 |  | 
| 1070 |   size_t non_young_capacity_bytes() { | 
| 1071 |     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes; | 
| 1072 |   } | 
| 1073 |  | 
| 1074 |   // Determine whether the given region is one that we are using as an | 
| 1075 |   // old GC alloc region. | 
| 1076 |   bool is_old_gc_alloc_region(HeapRegion* hr); | 
| 1077 |  | 
| 1078 |   // Perform a collection of the heap; intended for use in implementing | 
| 1079 |   // "System.gc".  This probably implies as full a collection as the | 
| 1080 |   // "CollectedHeap" supports. | 
| 1081 |   virtual void collect(GCCause::Cause cause); | 
| 1082 |  | 
| 1083 |   // Perform a collection of the heap with the given cause; if the VM operation | 
| 1084 |   // fails to execute for any reason, retry only if retry_on_gc_failure is set. | 
| 1085 |   // Returns whether this collection actually executed. | 
| 1086 |   bool try_collect(GCCause::Cause cause, bool retry_on_gc_failure); | 
| 1087 |  | 
| 1088 |   // True iff an evacuation has failed in the most-recent collection. | 
| 1089 |   bool evacuation_failed() { return _evacuation_failed; } | 
| 1090 |  | 
| 1091 |   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed); | 
| 1092 |   void prepend_to_freelist(FreeRegionList* list); | 
| 1093 |   void decrement_summary_bytes(size_t bytes); | 
| 1094 |  | 
| 1095 |   virtual bool is_in(const void* p) const; | 
| 1096 | #ifdef ASSERT | 
| 1097 |   // Returns whether p is in one of the available areas of the heap. Slow but | 
| 1098 |   // extensive version. | 
| 1099 |   bool is_in_exact(const void* p) const; | 
| 1100 | #endif | 
| 1101 |  | 
| 1102 |   // Return "TRUE" iff the given object address is within the collection | 
| 1103 |   // set. Assumes that the reference points into the heap. | 
| 1104 |   inline bool is_in_cset(const HeapRegion *hr); | 
| 1105 |   inline bool is_in_cset(oop obj); | 
| 1106 |   inline bool is_in_cset(HeapWord* addr); | 
| 1107 |  | 
| 1108 |   inline bool is_in_cset_or_humongous(const oop obj); | 
| 1109 |  | 
| 1110 |  private: | 
| 1111 |   // This array is used for a quick test on whether a reference points into | 
| 1112 |   // the collection set or not. Each of the array's elements denotes whether the | 
| 1113 |   // corresponding region is in the collection set or not. | 
| 1114 |   G1HeapRegionAttrBiasedMappedArray _region_attr; | 
| 1115 |  | 
| 1116 |  public: | 
| 1117 |  | 
| 1118 |   inline G1HeapRegionAttr region_attr(const void* obj); | 
| 1119 |  | 
| 1120 |   // Return "TRUE" iff the given object address is in the reserved | 
| 1121 |   // region of g1. | 
| 1122 |   bool is_in_g1_reserved(const void* p) const { | 
| 1123 |     return _hrm->reserved().contains(p); | 
| 1124 |   } | 
| 1125 |  | 
| 1126 |   // Returns a MemRegion that corresponds to the space that has been | 
| 1127 |   // reserved for the heap | 
| 1128 |   MemRegion g1_reserved() const { | 
| 1129 |     return _hrm->reserved(); | 
| 1130 |   } | 
| 1131 |  | 
| 1132 |   G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; } | 
| 1133 |  | 
| 1134 |   G1CardTable* card_table() const { | 
| 1135 |     return _card_table; | 
| 1136 |   } | 
| 1137 |  | 
| 1138 |   // Iteration functions. | 
| 1139 |  | 
| 1140 |   // Iterate over all objects, calling "cl.do_object" on each. | 
| 1141 |   virtual void object_iterate(ObjectClosure* cl); | 
| 1142 |  | 
| 1143 |   virtual void safe_object_iterate(ObjectClosure* cl) { | 
| 1144 |     object_iterate(cl); | 
| 1145 |   } | 
| 1146 |  | 
| 1147 |   // Iterate over heap regions, in address order, terminating the | 
| 1148 |   // iteration early if the "do_heap_region" method returns "true". | 
| 1149 |   void heap_region_iterate(HeapRegionClosure* blk) const; | 
| 1150 |  | 
| 1151 |   // Return the region with the given index. It assumes the index is valid. | 
| 1152 |   inline HeapRegion* region_at(uint index) const; | 
| 1153 |   inline HeapRegion* region_at_or_null(uint index) const; | 
| 1154 |  | 
| 1155 |   // Return the next region (by index) that is part of the same | 
| 1156 |   // humongous object that hr is part of. | 
| 1157 |   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const; | 
| 1158 |  | 
| 1159 |   // Calculate the region index of the given address. Given address must be | 
| 1160 |   // within the heap. | 
| 1161 |   inline uint addr_to_region(HeapWord* addr) const; | 
| 1162 |  | 
| 1163 |   inline HeapWord* bottom_addr_for_region(uint index) const; | 
| 1164 |  | 
| 1165 |   // Two functions to iterate over the heap regions in parallel. Threads | 
| 1166 |   // compete using the HeapRegionClaimer to claim the regions before | 
| 1167 |   // applying the closure on them. | 
| 1168 |   // The _from_worker_offset version uses the HeapRegionClaimer and | 
| 1169 |   // the worker id to calculate a start offset to prevent all workers to | 
| 1170 |   // start from the point. | 
| 1171 |   void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl, | 
| 1172 |                                                   HeapRegionClaimer* hrclaimer, | 
| 1173 |                                                   uint worker_id) const; | 
| 1174 |  | 
| 1175 |   void heap_region_par_iterate_from_start(HeapRegionClosure* cl, | 
| 1176 |                                           HeapRegionClaimer* hrclaimer) const; | 
| 1177 |  | 
| 1178 |   // Iterate over all regions currently in the current collection set. | 
| 1179 |   void collection_set_iterate_all(HeapRegionClosure* blk); | 
| 1180 |  | 
| 1181 |   // Iterate over the regions in the current increment of the collection set. | 
| 1182 |   // Starts the iteration so that the start regions of a given worker id over the | 
| 1183 |   // set active_workers are evenly spread across the set of collection set regions | 
| 1184 |   // to be iterated. | 
| 1185 |   void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id); | 
| 1186 |  | 
| 1187 |   // Returns the HeapRegion that contains addr. addr must not be NULL. | 
| 1188 |   template <class T> | 
| 1189 |   inline HeapRegion* heap_region_containing(const T addr) const; | 
| 1190 |  | 
| 1191 |   // Returns the HeapRegion that contains addr, or NULL if that is an uncommitted | 
| 1192 |   // region. addr must not be NULL. | 
| 1193 |   template <class T> | 
| 1194 |   inline HeapRegion* heap_region_containing_or_null(const T addr) const; | 
| 1195 |  | 
| 1196 |   // A CollectedHeap is divided into a dense sequence of "blocks"; that is, | 
| 1197 |   // each address in the (reserved) heap is a member of exactly | 
| 1198 |   // one block.  The defining characteristic of a block is that it is | 
| 1199 |   // possible to find its size, and thus to progress forward to the next | 
| 1200 |   // block.  (Blocks may be of different sizes.)  Thus, blocks may | 
| 1201 |   // represent Java objects, or they might be free blocks in a | 
| 1202 |   // free-list-based heap (or subheap), as long as the two kinds are | 
| 1203 |   // distinguishable and the size of each is determinable. | 
| 1204 |  | 
| 1205 |   // Returns the address of the start of the "block" that contains the | 
| 1206 |   // address "addr".  We say "blocks" instead of "object" since some heaps | 
| 1207 |   // may not pack objects densely; a chunk may either be an object or a | 
| 1208 |   // non-object. | 
| 1209 |   virtual HeapWord* block_start(const void* addr) const; | 
| 1210 |  | 
| 1211 |   // Requires "addr" to be the start of a block, and returns "TRUE" iff | 
| 1212 |   // the block is an object. | 
| 1213 |   virtual bool block_is_obj(const HeapWord* addr) const; | 
| 1214 |  | 
| 1215 |   // Section on thread-local allocation buffers (TLABs) | 
| 1216 |   // See CollectedHeap for semantics. | 
| 1217 |  | 
| 1218 |   bool supports_tlab_allocation() const; | 
| 1219 |   size_t tlab_capacity(Thread* ignored) const; | 
| 1220 |   size_t tlab_used(Thread* ignored) const; | 
| 1221 |   size_t max_tlab_size() const; | 
| 1222 |   size_t unsafe_max_tlab_alloc(Thread* ignored) const; | 
| 1223 |  | 
| 1224 |   inline bool is_in_young(const oop obj); | 
| 1225 |  | 
| 1226 |   // Returns "true" iff the given word_size is "very large". | 
| 1227 |   static bool is_humongous(size_t word_size) { | 
| 1228 |     // Note this has to be strictly greater-than as the TLABs | 
| 1229 |     // are capped at the humongous threshold and we want to | 
| 1230 |     // ensure that we don't try to allocate a TLAB as | 
| 1231 |     // humongous and that we don't allocate a humongous | 
| 1232 |     // object in a TLAB. | 
| 1233 |     return word_size > _humongous_object_threshold_in_words; | 
| 1234 |   } | 
| 1235 |  | 
| 1236 |   // Returns the humongous threshold for a specific region size | 
| 1237 |   static size_t humongous_threshold_for(size_t region_size) { | 
| 1238 |     return (region_size / 2); | 
| 1239 |   } | 
| 1240 |  | 
| 1241 |   // Returns the number of regions the humongous object of the given word size | 
| 1242 |   // requires. | 
| 1243 |   static size_t humongous_obj_size_in_regions(size_t word_size); | 
| 1244 |  | 
| 1245 |   // Print the maximum heap capacity. | 
| 1246 |   virtual size_t max_capacity() const; | 
| 1247 |  | 
| 1248 |   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used. | 
| 1249 |   virtual size_t max_reserved_capacity() const; | 
| 1250 |  | 
| 1251 |   virtual jlong millis_since_last_gc(); | 
| 1252 |  | 
| 1253 |  | 
| 1254 |   // Convenience function to be used in situations where the heap type can be | 
| 1255 |   // asserted to be this type. | 
| 1256 |   static G1CollectedHeap* heap(); | 
| 1257 |  | 
| 1258 |   void set_region_short_lived_locked(HeapRegion* hr); | 
| 1259 |   // add appropriate methods for any other surv rate groups | 
| 1260 |  | 
| 1261 |   const G1SurvivorRegions* survivor() const { return &_survivor; } | 
| 1262 |  | 
| 1263 |   uint eden_regions_count() const { return _eden.length(); } | 
| 1264 |   uint survivor_regions_count() const { return _survivor.length(); } | 
| 1265 |   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); } | 
| 1266 |   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); } | 
| 1267 |   uint young_regions_count() const { return _eden.length() + _survivor.length(); } | 
| 1268 |   uint old_regions_count() const { return _old_set.length(); } | 
| 1269 |   uint archive_regions_count() const { return _archive_set.length(); } | 
| 1270 |   uint humongous_regions_count() const { return _humongous_set.length(); } | 
| 1271 |  | 
| 1272 | #ifdef ASSERT | 
| 1273 |   bool check_young_list_empty(); | 
| 1274 | #endif | 
| 1275 |  | 
| 1276 |   // *** Stuff related to concurrent marking.  It's not clear to me that so | 
| 1277 |   // many of these need to be public. | 
| 1278 |  | 
| 1279 |   // The functions below are helper functions that a subclass of | 
| 1280 |   // "CollectedHeap" can use in the implementation of its virtual | 
| 1281 |   // functions. | 
| 1282 |   // This performs a concurrent marking of the live objects in a | 
| 1283 |   // bitmap off to the side. | 
| 1284 |   void do_concurrent_mark(); | 
| 1285 |  | 
| 1286 |   bool is_marked_next(oop obj) const; | 
| 1287 |  | 
| 1288 |   // Determine if an object is dead, given the object and also | 
| 1289 |   // the region to which the object belongs. An object is dead | 
| 1290 |   // iff a) it was not allocated since the last mark, b) it | 
| 1291 |   // is not marked, and c) it is not in an archive region. | 
| 1292 |   bool is_obj_dead(const oop obj, const HeapRegion* hr) const { | 
| 1293 |     return | 
| 1294 |       hr->is_obj_dead(obj, _cm->prev_mark_bitmap()) && | 
| 1295 |       !hr->is_archive(); | 
| 1296 |   } | 
| 1297 |  | 
| 1298 |   // This function returns true when an object has been | 
| 1299 |   // around since the previous marking and hasn't yet | 
| 1300 |   // been marked during this marking, and is not in an archive region. | 
| 1301 |   bool is_obj_ill(const oop obj, const HeapRegion* hr) const { | 
| 1302 |     return | 
| 1303 |       !hr->obj_allocated_since_next_marking(obj) && | 
| 1304 |       !is_marked_next(obj) && | 
| 1305 |       !hr->is_archive(); | 
| 1306 |   } | 
| 1307 |  | 
| 1308 |   // Determine if an object is dead, given only the object itself. | 
| 1309 |   // This will find the region to which the object belongs and | 
| 1310 |   // then call the region version of the same function. | 
| 1311 |  | 
| 1312 |   // Added if it is NULL it isn't dead. | 
| 1313 |  | 
| 1314 |   inline bool is_obj_dead(const oop obj) const; | 
| 1315 |  | 
| 1316 |   inline bool is_obj_ill(const oop obj) const; | 
| 1317 |  | 
| 1318 |   inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const; | 
| 1319 |   inline bool is_obj_dead_full(const oop obj) const; | 
| 1320 |  | 
| 1321 |   G1ConcurrentMark* concurrent_mark() const { return _cm; } | 
| 1322 |  | 
| 1323 |   // Refinement | 
| 1324 |  | 
| 1325 |   G1ConcurrentRefine* concurrent_refine() const { return _cr; } | 
| 1326 |  | 
| 1327 |   // Optimized nmethod scanning support routines | 
| 1328 |  | 
| 1329 |   // Register the given nmethod with the G1 heap. | 
| 1330 |   virtual void register_nmethod(nmethod* nm); | 
| 1331 |  | 
| 1332 |   // Unregister the given nmethod from the G1 heap. | 
| 1333 |   virtual void unregister_nmethod(nmethod* nm); | 
| 1334 |  | 
| 1335 |   // No nmethod flushing needed. | 
| 1336 |   virtual void flush_nmethod(nmethod* nm) {} | 
| 1337 |  | 
| 1338 |   // No nmethod verification implemented. | 
| 1339 |   virtual void verify_nmethod(nmethod* nm) {} | 
| 1340 |  | 
| 1341 |   // Free up superfluous code root memory. | 
| 1342 |   void purge_code_root_memory(); | 
| 1343 |  | 
| 1344 |   // Rebuild the strong code root lists for each region | 
| 1345 |   // after a full GC. | 
| 1346 |   void rebuild_strong_code_roots(); | 
| 1347 |  | 
| 1348 |   // Partial cleaning of VM internal data structures. | 
| 1349 |   void string_dedup_cleaning(BoolObjectClosure* is_alive, | 
| 1350 |                              OopClosure* keep_alive, | 
| 1351 |                              G1GCPhaseTimes* phase_times = NULL); | 
| 1352 |  | 
| 1353 |   // Performs cleaning of data structures after class unloading. | 
| 1354 |   void complete_cleaning(BoolObjectClosure* is_alive, bool class_unloading_occurred); | 
| 1355 |  | 
| 1356 |   // Redirty logged cards in the refinement queue. | 
| 1357 |   void redirty_logged_cards(); | 
| 1358 |   // Verification | 
| 1359 |  | 
| 1360 |   // Deduplicate the string | 
| 1361 |   virtual void deduplicate_string(oop str); | 
| 1362 |  | 
| 1363 |   // Perform any cleanup actions necessary before allowing a verification. | 
| 1364 |   virtual void prepare_for_verify(); | 
| 1365 |  | 
| 1366 |   // Perform verification. | 
| 1367 |  | 
| 1368 |   // vo == UsePrevMarking -> use "prev" marking information, | 
| 1369 |   // vo == UseNextMarking -> use "next" marking information | 
| 1370 |   // vo == UseFullMarking -> use "next" marking bitmap but no TAMS | 
| 1371 |   // | 
| 1372 |   // NOTE: Only the "prev" marking information is guaranteed to be | 
| 1373 |   // consistent most of the time, so most calls to this should use | 
| 1374 |   // vo == UsePrevMarking. | 
| 1375 |   // Currently, there is only one case where this is called with | 
| 1376 |   // vo == UseNextMarking, which is to verify the "next" marking | 
| 1377 |   // information at the end of remark. | 
| 1378 |   // Currently there is only one place where this is called with | 
| 1379 |   // vo == UseFullMarking, which is to verify the marking during a | 
| 1380 |   // full GC. | 
| 1381 |   void verify(VerifyOption vo); | 
| 1382 |  | 
| 1383 |   // WhiteBox testing support. | 
| 1384 |   virtual bool supports_concurrent_phase_control() const; | 
| 1385 |   virtual bool request_concurrent_phase(const char* phase); | 
| 1386 |   bool is_heterogeneous_heap() const; | 
| 1387 |  | 
| 1388 |   virtual WorkGang* get_safepoint_workers() { return _workers; } | 
| 1389 |  | 
| 1390 |   // The methods below are here for convenience and dispatch the | 
| 1391 |   // appropriate method depending on value of the given VerifyOption | 
| 1392 |   // parameter. The values for that parameter, and their meanings, | 
| 1393 |   // are the same as those above. | 
| 1394 |  | 
| 1395 |   bool is_obj_dead_cond(const oop obj, | 
| 1396 |                         const HeapRegion* hr, | 
| 1397 |                         const VerifyOption vo) const; | 
| 1398 |  | 
| 1399 |   bool is_obj_dead_cond(const oop obj, | 
| 1400 |                         const VerifyOption vo) const; | 
| 1401 |  | 
| 1402 |   G1HeapSummary create_g1_heap_summary(); | 
| 1403 |   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats); | 
| 1404 |  | 
| 1405 |   // Printing | 
| 1406 | private: | 
| 1407 |   void print_heap_regions() const; | 
| 1408 |   void print_regions_on(outputStream* st) const; | 
| 1409 |  | 
| 1410 | public: | 
| 1411 |   virtual void print_on(outputStream* st) const; | 
| 1412 |   virtual void print_extended_on(outputStream* st) const; | 
| 1413 |   virtual void print_on_error(outputStream* st) const; | 
| 1414 |  | 
| 1415 |   virtual void print_gc_threads_on(outputStream* st) const; | 
| 1416 |   virtual void gc_threads_do(ThreadClosure* tc) const; | 
| 1417 |  | 
| 1418 |   // Override | 
| 1419 |   void print_tracing_info() const; | 
| 1420 |  | 
| 1421 |   // The following two methods are helpful for debugging RSet issues. | 
| 1422 |   void print_cset_rsets() PRODUCT_RETURN; | 
| 1423 |   void print_all_rsets() PRODUCT_RETURN; | 
| 1424 |  | 
| 1425 |   size_t pending_card_num(); | 
| 1426 | }; | 
| 1427 |  | 
| 1428 | class G1ParEvacuateFollowersClosure : public VoidClosure { | 
| 1429 | private: | 
| 1430 |   double _start_term; | 
| 1431 |   double _term_time; | 
| 1432 |   size_t _term_attempts; | 
| 1433 |  | 
| 1434 |   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); } | 
| 1435 |   void end_term_time() { _term_time += (os::elapsedTime() - _start_term); } | 
| 1436 | protected: | 
| 1437 |   G1CollectedHeap*              _g1h; | 
| 1438 |   G1ParScanThreadState*         _par_scan_state; | 
| 1439 |   RefToScanQueueSet*            _queues; | 
| 1440 |   ParallelTaskTerminator*       _terminator; | 
| 1441 |   G1GCPhaseTimes::GCParPhases   _phase; | 
| 1442 |  | 
| 1443 |   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; } | 
| 1444 |   RefToScanQueueSet*      queues()         { return _queues; } | 
| 1445 |   ParallelTaskTerminator* terminator()     { return _terminator; } | 
| 1446 |  | 
| 1447 | public: | 
| 1448 |   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, | 
| 1449 |                                 G1ParScanThreadState* par_scan_state, | 
| 1450 |                                 RefToScanQueueSet* queues, | 
| 1451 |                                 ParallelTaskTerminator* terminator, | 
| 1452 |                                 G1GCPhaseTimes::GCParPhases phase) | 
| 1453 |     : _start_term(0.0), _term_time(0.0), _term_attempts(0), | 
| 1454 |       _g1h(g1h), _par_scan_state(par_scan_state), | 
| 1455 |       _queues(queues), _terminator(terminator), _phase(phase) {} | 
| 1456 |  | 
| 1457 |   void do_void(); | 
| 1458 |  | 
| 1459 |   double term_time() const { return _term_time; } | 
| 1460 |   size_t term_attempts() const { return _term_attempts; } | 
| 1461 |  | 
| 1462 | private: | 
| 1463 |   inline bool offer_termination(); | 
| 1464 | }; | 
| 1465 |  | 
| 1466 | #endif // SHARE_GC_G1_G1COLLECTEDHEAP_HPP | 
| 1467 |  |