| 1 | /* | 
|---|
| 2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. | 
|---|
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
|---|
| 4 | * | 
|---|
| 5 | * This code is free software; you can redistribute it and/or modify it | 
|---|
| 6 | * under the terms of the GNU General Public License version 2 only, as | 
|---|
| 7 | * published by the Free Software Foundation. | 
|---|
| 8 | * | 
|---|
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | 
|---|
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|---|
| 11 | * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
|---|
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | 
|---|
| 13 | * accompanied this code). | 
|---|
| 14 | * | 
|---|
| 15 | * You should have received a copy of the GNU General Public License version | 
|---|
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | 
|---|
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 
|---|
| 18 | * | 
|---|
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | 
|---|
| 20 | * or visit www.oracle.com if you need additional information or have any | 
|---|
| 21 | * questions. | 
|---|
| 22 | * | 
|---|
| 23 | */ | 
|---|
| 24 |  | 
|---|
| 25 | #ifndef SHARE_GC_G1_G1OOPCLOSURES_HPP | 
|---|
| 26 | #define SHARE_GC_G1_G1OOPCLOSURES_HPP | 
|---|
| 27 |  | 
|---|
| 28 | #include "gc/g1/g1HeapRegionAttr.hpp" | 
|---|
| 29 | #include "memory/iterator.hpp" | 
|---|
| 30 | #include "oops/markOop.hpp" | 
|---|
| 31 |  | 
|---|
| 32 | class HeapRegion; | 
|---|
| 33 | class G1CollectedHeap; | 
|---|
| 34 | class G1RemSet; | 
|---|
| 35 | class G1ConcurrentMark; | 
|---|
| 36 | class DirtyCardToOopClosure; | 
|---|
| 37 | class G1CMBitMap; | 
|---|
| 38 | class G1ParScanThreadState; | 
|---|
| 39 | class G1ScanEvacuatedObjClosure; | 
|---|
| 40 | class G1CMTask; | 
|---|
| 41 | class ReferenceProcessor; | 
|---|
| 42 |  | 
|---|
| 43 | class G1ScanClosureBase : public BasicOopIterateClosure { | 
|---|
| 44 | protected: | 
|---|
| 45 | G1CollectedHeap* _g1h; | 
|---|
| 46 | G1ParScanThreadState* _par_scan_state; | 
|---|
| 47 |  | 
|---|
| 48 | G1ScanClosureBase(G1CollectedHeap* g1h, G1ParScanThreadState* par_scan_state); | 
|---|
| 49 | ~G1ScanClosureBase() { } | 
|---|
| 50 |  | 
|---|
| 51 | template <class T> | 
|---|
| 52 | inline void prefetch_and_push(T* p, oop const obj); | 
|---|
| 53 |  | 
|---|
| 54 | template <class T> | 
|---|
| 55 | inline void handle_non_cset_obj_common(G1HeapRegionAttr const region_attr, T* p, oop const obj); | 
|---|
| 56 | public: | 
|---|
| 57 | virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; } | 
|---|
| 58 |  | 
|---|
| 59 | inline void trim_queue_partially(); | 
|---|
| 60 | }; | 
|---|
| 61 |  | 
|---|
| 62 | // Used to scan cards from the DCQS or the remembered sets during garbage collection. | 
|---|
| 63 | class G1ScanCardClosure : public G1ScanClosureBase { | 
|---|
| 64 | public: | 
|---|
| 65 | G1ScanCardClosure(G1CollectedHeap* g1h, | 
|---|
| 66 | G1ParScanThreadState* pss) : | 
|---|
| 67 | G1ScanClosureBase(g1h, pss) { } | 
|---|
| 68 |  | 
|---|
| 69 | template <class T> void do_oop_work(T* p); | 
|---|
| 70 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } | 
|---|
| 71 | virtual void do_oop(oop* p)       { do_oop_work(p); } | 
|---|
| 72 | }; | 
|---|
| 73 |  | 
|---|
| 74 | // Used during Optional RS scanning to make sure we trim the queues in a timely manner. | 
|---|
| 75 | class G1ScanRSForOptionalClosure : public OopClosure { | 
|---|
| 76 | G1CollectedHeap* _g1h; | 
|---|
| 77 | G1ScanCardClosure* _scan_cl; | 
|---|
| 78 | public: | 
|---|
| 79 | G1ScanRSForOptionalClosure(G1CollectedHeap* g1h, G1ScanCardClosure* cl) : _g1h(g1h), _scan_cl(cl) { } | 
|---|
| 80 |  | 
|---|
| 81 | template <class T> void do_oop_work(T* p); | 
|---|
| 82 | virtual void do_oop(oop* p)          { do_oop_work(p); } | 
|---|
| 83 | virtual void do_oop(narrowOop* p)    { do_oop_work(p); } | 
|---|
| 84 | }; | 
|---|
| 85 |  | 
|---|
| 86 | // This closure is applied to the fields of the objects that have just been copied during evacuation. | 
|---|
| 87 | class G1ScanEvacuatedObjClosure : public G1ScanClosureBase { | 
|---|
| 88 | friend class G1ScanInYoungSetter; | 
|---|
| 89 |  | 
|---|
| 90 | enum ScanningInYoungValues { | 
|---|
| 91 | False = 0, | 
|---|
| 92 | True, | 
|---|
| 93 | Uninitialized | 
|---|
| 94 | }; | 
|---|
| 95 |  | 
|---|
| 96 | ScanningInYoungValues _scanning_in_young; | 
|---|
| 97 |  | 
|---|
| 98 | public: | 
|---|
| 99 | G1ScanEvacuatedObjClosure(G1CollectedHeap* g1h, G1ParScanThreadState* par_scan_state) : | 
|---|
| 100 | G1ScanClosureBase(g1h, par_scan_state), _scanning_in_young(Uninitialized) { } | 
|---|
| 101 |  | 
|---|
| 102 | template <class T> void do_oop_work(T* p); | 
|---|
| 103 | virtual void do_oop(oop* p)          { do_oop_work(p); } | 
|---|
| 104 | virtual void do_oop(narrowOop* p)    { do_oop_work(p); } | 
|---|
| 105 |  | 
|---|
| 106 | // We need to do reference discovery while processing evacuated objects. | 
|---|
| 107 | virtual ReferenceIterationMode reference_iteration_mode() { return DO_DISCOVERED_AND_DISCOVERY; } | 
|---|
| 108 |  | 
|---|
| 109 | void set_ref_discoverer(ReferenceDiscoverer* rd) { | 
|---|
| 110 | set_ref_discoverer_internal(rd); | 
|---|
| 111 | } | 
|---|
| 112 | }; | 
|---|
| 113 |  | 
|---|
| 114 | // RAII object to properly set the _scanning_in_young field in G1ScanEvacuatedObjClosure. | 
|---|
| 115 | class G1ScanInYoungSetter : public StackObj { | 
|---|
| 116 | G1ScanEvacuatedObjClosure* _closure; | 
|---|
| 117 |  | 
|---|
| 118 | public: | 
|---|
| 119 | G1ScanInYoungSetter(G1ScanEvacuatedObjClosure* closure, bool new_value) : _closure(closure) { | 
|---|
| 120 | assert(_closure->_scanning_in_young == G1ScanEvacuatedObjClosure::Uninitialized, "Must not be set"); | 
|---|
| 121 | _closure->_scanning_in_young = new_value ? G1ScanEvacuatedObjClosure::True : G1ScanEvacuatedObjClosure::False; | 
|---|
| 122 | } | 
|---|
| 123 |  | 
|---|
| 124 | ~G1ScanInYoungSetter() { | 
|---|
| 125 | DEBUG_ONLY(_closure->_scanning_in_young = G1ScanEvacuatedObjClosure::Uninitialized;) | 
|---|
| 126 | } | 
|---|
| 127 | }; | 
|---|
| 128 |  | 
|---|
| 129 | // Add back base class for metadata | 
|---|
| 130 | class G1ParCopyHelper : public OopClosure { | 
|---|
| 131 | protected: | 
|---|
| 132 | G1CollectedHeap* _g1h; | 
|---|
| 133 | G1ParScanThreadState* _par_scan_state; | 
|---|
| 134 | uint _worker_id;              // Cache value from par_scan_state. | 
|---|
| 135 | ClassLoaderData* _scanned_cld; | 
|---|
| 136 | G1ConcurrentMark* _cm; | 
|---|
| 137 |  | 
|---|
| 138 | // Mark the object if it's not already marked. This is used to mark | 
|---|
| 139 | // objects pointed to by roots that are guaranteed not to move | 
|---|
| 140 | // during the GC (i.e., non-CSet objects). It is MT-safe. | 
|---|
| 141 | inline void mark_object(oop obj); | 
|---|
| 142 |  | 
|---|
| 143 | G1ParCopyHelper(G1CollectedHeap* g1h,  G1ParScanThreadState* par_scan_state); | 
|---|
| 144 | ~G1ParCopyHelper() { } | 
|---|
| 145 |  | 
|---|
| 146 | public: | 
|---|
| 147 | void set_scanned_cld(ClassLoaderData* cld) { _scanned_cld = cld; } | 
|---|
| 148 | inline void do_cld_barrier(oop new_obj); | 
|---|
| 149 |  | 
|---|
| 150 | inline void trim_queue_partially(); | 
|---|
| 151 | }; | 
|---|
| 152 |  | 
|---|
| 153 | enum G1Barrier { | 
|---|
| 154 | G1BarrierNone, | 
|---|
| 155 | G1BarrierCLD | 
|---|
| 156 | }; | 
|---|
| 157 |  | 
|---|
| 158 | enum G1Mark { | 
|---|
| 159 | G1MarkNone, | 
|---|
| 160 | G1MarkFromRoot, | 
|---|
| 161 | G1MarkPromotedFromRoot | 
|---|
| 162 | }; | 
|---|
| 163 |  | 
|---|
| 164 | template <G1Barrier barrier, G1Mark do_mark_object> | 
|---|
| 165 | class G1ParCopyClosure : public G1ParCopyHelper { | 
|---|
| 166 | public: | 
|---|
| 167 | G1ParCopyClosure(G1CollectedHeap* g1h, G1ParScanThreadState* par_scan_state) : | 
|---|
| 168 | G1ParCopyHelper(g1h, par_scan_state) { } | 
|---|
| 169 |  | 
|---|
| 170 | template <class T> void do_oop_work(T* p); | 
|---|
| 171 | virtual void do_oop(oop* p)       { do_oop_work(p); } | 
|---|
| 172 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } | 
|---|
| 173 | }; | 
|---|
| 174 |  | 
|---|
| 175 | class G1CLDScanClosure : public CLDClosure { | 
|---|
| 176 | G1ParCopyHelper* _closure; | 
|---|
| 177 | bool             _process_only_dirty; | 
|---|
| 178 | int              _claim; | 
|---|
| 179 | int              _count; | 
|---|
| 180 | public: | 
|---|
| 181 | G1CLDScanClosure(G1ParCopyHelper* closure, | 
|---|
| 182 | bool process_only_dirty, int claim_value) | 
|---|
| 183 | : _closure(closure), _process_only_dirty(process_only_dirty), _claim(claim_value), _count(0) {} | 
|---|
| 184 | void do_cld(ClassLoaderData* cld); | 
|---|
| 185 | }; | 
|---|
| 186 |  | 
|---|
| 187 | // Closure for iterating over object fields during concurrent marking | 
|---|
| 188 | class G1CMOopClosure : public MetadataVisitingOopIterateClosure { | 
|---|
| 189 | G1CollectedHeap*   _g1h; | 
|---|
| 190 | G1CMTask*          _task; | 
|---|
| 191 | public: | 
|---|
| 192 | G1CMOopClosure(G1CollectedHeap* g1h,G1CMTask* task); | 
|---|
| 193 | template <class T> void do_oop_work(T* p); | 
|---|
| 194 | virtual void do_oop(      oop* p) { do_oop_work(p); } | 
|---|
| 195 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } | 
|---|
| 196 | }; | 
|---|
| 197 |  | 
|---|
| 198 | // Closure to scan the root regions during concurrent marking | 
|---|
| 199 | class G1RootRegionScanClosure : public MetadataVisitingOopIterateClosure { | 
|---|
| 200 | private: | 
|---|
| 201 | G1CollectedHeap* _g1h; | 
|---|
| 202 | G1ConcurrentMark* _cm; | 
|---|
| 203 | uint _worker_id; | 
|---|
| 204 | public: | 
|---|
| 205 | G1RootRegionScanClosure(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint worker_id) : | 
|---|
| 206 | _g1h(g1h), _cm(cm), _worker_id(worker_id) { } | 
|---|
| 207 | template <class T> void do_oop_work(T* p); | 
|---|
| 208 | virtual void do_oop(      oop* p) { do_oop_work(p); } | 
|---|
| 209 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } | 
|---|
| 210 | }; | 
|---|
| 211 |  | 
|---|
| 212 | class G1ConcurrentRefineOopClosure: public BasicOopIterateClosure { | 
|---|
| 213 | G1CollectedHeap* _g1h; | 
|---|
| 214 | uint _worker_i; | 
|---|
| 215 |  | 
|---|
| 216 | public: | 
|---|
| 217 | G1ConcurrentRefineOopClosure(G1CollectedHeap* g1h, uint worker_i) : | 
|---|
| 218 | _g1h(g1h), | 
|---|
| 219 | _worker_i(worker_i) { | 
|---|
| 220 | } | 
|---|
| 221 |  | 
|---|
| 222 | virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; } | 
|---|
| 223 |  | 
|---|
| 224 | template <class T> void do_oop_work(T* p); | 
|---|
| 225 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } | 
|---|
| 226 | virtual void do_oop(oop* p)       { do_oop_work(p); } | 
|---|
| 227 | }; | 
|---|
| 228 |  | 
|---|
| 229 | class G1RebuildRemSetClosure : public BasicOopIterateClosure { | 
|---|
| 230 | G1CollectedHeap* _g1h; | 
|---|
| 231 | uint _worker_id; | 
|---|
| 232 | public: | 
|---|
| 233 | G1RebuildRemSetClosure(G1CollectedHeap* g1h, uint worker_id) : _g1h(g1h), _worker_id(worker_id) { | 
|---|
| 234 | } | 
|---|
| 235 |  | 
|---|
| 236 | template <class T> void do_oop_work(T* p); | 
|---|
| 237 | virtual void do_oop(oop* p)       { do_oop_work(p); } | 
|---|
| 238 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } | 
|---|
| 239 |  | 
|---|
| 240 | virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; } | 
|---|
| 241 | }; | 
|---|
| 242 |  | 
|---|
| 243 | #endif // SHARE_GC_G1_G1OOPCLOSURES_HPP | 
|---|
| 244 |  | 
|---|