1 | /* |
2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_GC_G1_G1CONCURRENTMARK_INLINE_HPP |
26 | #define SHARE_GC_G1_G1CONCURRENTMARK_INLINE_HPP |
27 | |
28 | #include "gc/g1/g1CollectedHeap.inline.hpp" |
29 | #include "gc/g1/g1ConcurrentMark.hpp" |
30 | #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" |
31 | #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp" |
32 | #include "gc/g1/g1OopClosures.inline.hpp" |
33 | #include "gc/g1/g1Policy.hpp" |
34 | #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" |
35 | #include "gc/g1/g1RemSetTrackingPolicy.hpp" |
36 | #include "gc/g1/heapRegionRemSet.hpp" |
37 | #include "gc/g1/heapRegion.hpp" |
38 | #include "gc/shared/suspendibleThreadSet.hpp" |
39 | #include "gc/shared/taskqueue.inline.hpp" |
40 | #include "utilities/bitMap.inline.hpp" |
41 | |
42 | inline bool G1CMIsAliveClosure::do_object_b(oop obj) { |
43 | return !_g1h->is_obj_ill(obj); |
44 | } |
45 | |
46 | inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) { |
47 | // Re-check whether the passed object is null. With ReferentBasedDiscovery the |
48 | // mutator may have changed the referent's value (i.e. cleared it) between the |
49 | // time the referent was determined to be potentially alive and calling this |
50 | // method. |
51 | if (obj == NULL) { |
52 | return false; |
53 | } |
54 | assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap" , p2i(obj)); |
55 | return _g1h->heap_region_containing(obj)->is_old_or_humongous_or_archive(); |
56 | } |
57 | |
58 | inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, oop const obj) { |
59 | HeapRegion* const hr = _g1h->heap_region_containing(obj); |
60 | return mark_in_next_bitmap(worker_id, hr, obj); |
61 | } |
62 | |
63 | inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, HeapRegion* const hr, oop const obj) { |
64 | assert(hr != NULL, "just checking" ); |
65 | assert(hr->is_in_reserved(obj), "Attempting to mark object at " PTR_FORMAT " that is not contained in the given region %u" , p2i(obj), hr->hrm_index()); |
66 | |
67 | if (hr->obj_allocated_since_next_marking(obj)) { |
68 | return false; |
69 | } |
70 | |
71 | // Some callers may have stale objects to mark above nTAMS after humongous reclaim. |
72 | // Can't assert that this is a valid object at this point, since it might be in the process of being copied by another thread. |
73 | assert(!hr->is_continues_humongous(), "Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above nTAMS " PTR_FORMAT, p2i(obj), hr->hrm_index(), p2i(hr->next_top_at_mark_start())); |
74 | |
75 | HeapWord* const obj_addr = (HeapWord*)obj; |
76 | |
77 | bool success = _next_mark_bitmap->par_mark(obj_addr); |
78 | if (success) { |
79 | add_to_liveness(worker_id, obj, obj->size()); |
80 | } |
81 | return success; |
82 | } |
83 | |
84 | #ifndef PRODUCT |
85 | template<typename Fn> |
86 | inline void G1CMMarkStack::iterate(Fn fn) const { |
87 | assert_at_safepoint_on_vm_thread(); |
88 | |
89 | size_t num_chunks = 0; |
90 | |
91 | TaskQueueEntryChunk* cur = _chunk_list; |
92 | while (cur != NULL) { |
93 | guarantee(num_chunks <= _chunks_in_chunk_list, "Found " SIZE_FORMAT " oop chunks which is more than there should be" , num_chunks); |
94 | |
95 | for (size_t i = 0; i < EntriesPerChunk; ++i) { |
96 | if (cur->data[i].is_null()) { |
97 | break; |
98 | } |
99 | fn(cur->data[i]); |
100 | } |
101 | cur = cur->next; |
102 | num_chunks++; |
103 | } |
104 | } |
105 | #endif |
106 | |
107 | // It scans an object and visits its children. |
108 | inline void G1CMTask::scan_task_entry(G1TaskQueueEntry task_entry) { process_grey_task_entry<true>(task_entry); } |
109 | |
110 | inline void G1CMTask::push(G1TaskQueueEntry task_entry) { |
111 | assert(task_entry.is_array_slice() || _g1h->is_in_g1_reserved(task_entry.obj()), "invariant" ); |
112 | assert(task_entry.is_array_slice() || !_g1h->is_on_master_free_list( |
113 | _g1h->heap_region_containing(task_entry.obj())), "invariant" ); |
114 | assert(task_entry.is_array_slice() || !_g1h->is_obj_ill(task_entry.obj()), "invariant" ); // FIXME!!! |
115 | assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked((HeapWord*)task_entry.obj()), "invariant" ); |
116 | |
117 | if (!_task_queue->push(task_entry)) { |
118 | // The local task queue looks full. We need to push some entries |
119 | // to the global stack. |
120 | move_entries_to_global_stack(); |
121 | |
122 | // this should succeed since, even if we overflow the global |
123 | // stack, we should have definitely removed some entries from the |
124 | // local queue. So, there must be space on it. |
125 | bool success = _task_queue->push(task_entry); |
126 | assert(success, "invariant" ); |
127 | } |
128 | } |
129 | |
130 | inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const { |
131 | // If obj is above the global finger, then the mark bitmap scan |
132 | // will find it later, and no push is needed. Similarly, if we have |
133 | // a current region and obj is between the local finger and the |
134 | // end of the current region, then no push is needed. The tradeoff |
135 | // of checking both vs only checking the global finger is that the |
136 | // local check will be more accurate and so result in fewer pushes, |
137 | // but may also be a little slower. |
138 | HeapWord* objAddr = (HeapWord*)obj; |
139 | if (_finger != NULL) { |
140 | // We have a current region. |
141 | |
142 | // Finger and region values are all NULL or all non-NULL. We |
143 | // use _finger to check since we immediately use its value. |
144 | assert(_curr_region != NULL, "invariant" ); |
145 | assert(_region_limit != NULL, "invariant" ); |
146 | assert(_region_limit <= global_finger, "invariant" ); |
147 | |
148 | // True if obj is less than the local finger, or is between |
149 | // the region limit and the global finger. |
150 | if (objAddr < _finger) { |
151 | return true; |
152 | } else if (objAddr < _region_limit) { |
153 | return false; |
154 | } // Else check global finger. |
155 | } |
156 | // Check global finger. |
157 | return objAddr < global_finger; |
158 | } |
159 | |
160 | template<bool scan> |
161 | inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry) { |
162 | assert(scan || (task_entry.is_oop() && task_entry.obj()->is_typeArray()), "Skipping scan of grey non-typeArray" ); |
163 | assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked((HeapWord*)task_entry.obj()), |
164 | "Any stolen object should be a slice or marked" ); |
165 | |
166 | if (scan) { |
167 | if (task_entry.is_array_slice()) { |
168 | _words_scanned += _objArray_processor.process_slice(task_entry.slice()); |
169 | } else { |
170 | oop obj = task_entry.obj(); |
171 | if (G1CMObjArrayProcessor::should_be_sliced(obj)) { |
172 | _words_scanned += _objArray_processor.process_obj(obj); |
173 | } else { |
174 | _words_scanned += obj->oop_iterate_size(_cm_oop_closure);; |
175 | } |
176 | } |
177 | } |
178 | check_limits(); |
179 | } |
180 | |
181 | inline size_t G1CMTask::scan_objArray(objArrayOop obj, MemRegion mr) { |
182 | obj->oop_iterate(_cm_oop_closure, mr); |
183 | return mr.word_size(); |
184 | } |
185 | |
186 | inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(uint region) const { |
187 | assert(region < _g1h->max_regions(), "Tried to access TARS for region %u out of bounds" , region); |
188 | return _top_at_rebuild_starts[region]; |
189 | } |
190 | |
191 | inline void G1ConcurrentMark::update_top_at_rebuild_start(HeapRegion* r) { |
192 | uint const region = r->hrm_index(); |
193 | assert(region < _g1h->max_regions(), "Tried to access TARS for region %u out of bounds" , region); |
194 | assert(_top_at_rebuild_starts[region] == NULL, |
195 | "TARS for region %u has already been set to " PTR_FORMAT " should be NULL" , |
196 | region, p2i(_top_at_rebuild_starts[region])); |
197 | G1RemSetTrackingPolicy* tracker = _g1h->policy()->remset_tracker(); |
198 | if (tracker->needs_scan_for_rebuild(r)) { |
199 | _top_at_rebuild_starts[region] = r->top(); |
200 | } else { |
201 | // Leave TARS at NULL. |
202 | } |
203 | } |
204 | |
205 | inline void G1CMTask::update_liveness(oop const obj, const size_t obj_size) { |
206 | _mark_stats_cache.add_live_words(_g1h->addr_to_region((HeapWord*)obj), obj_size); |
207 | } |
208 | |
209 | inline void G1ConcurrentMark::add_to_liveness(uint worker_id, oop const obj, size_t size) { |
210 | task(worker_id)->update_liveness(obj, size); |
211 | } |
212 | |
213 | inline void G1CMTask::abort_marking_if_regular_check_fail() { |
214 | if (!regular_clock_call()) { |
215 | set_has_aborted(); |
216 | } |
217 | } |
218 | |
219 | inline bool G1CMTask::make_reference_grey(oop obj) { |
220 | if (!_cm->mark_in_next_bitmap(_worker_id, obj)) { |
221 | return false; |
222 | } |
223 | |
224 | // No OrderAccess:store_load() is needed. It is implicit in the |
225 | // CAS done in G1CMBitMap::parMark() call in the routine above. |
226 | HeapWord* global_finger = _cm->finger(); |
227 | |
228 | // We only need to push a newly grey object on the mark |
229 | // stack if it is in a section of memory the mark bitmap |
230 | // scan has already examined. Mark bitmap scanning |
231 | // maintains progress "fingers" for determining that. |
232 | // |
233 | // Notice that the global finger might be moving forward |
234 | // concurrently. This is not a problem. In the worst case, we |
235 | // mark the object while it is above the global finger and, by |
236 | // the time we read the global finger, it has moved forward |
237 | // past this object. In this case, the object will probably |
238 | // be visited when a task is scanning the region and will also |
239 | // be pushed on the stack. So, some duplicate work, but no |
240 | // correctness problems. |
241 | if (is_below_finger(obj, global_finger)) { |
242 | G1TaskQueueEntry entry = G1TaskQueueEntry::from_oop(obj); |
243 | if (obj->is_typeArray()) { |
244 | // Immediately process arrays of primitive types, rather |
245 | // than pushing on the mark stack. This keeps us from |
246 | // adding humongous objects to the mark stack that might |
247 | // be reclaimed before the entry is processed - see |
248 | // selection of candidates for eager reclaim of humongous |
249 | // objects. The cost of the additional type test is |
250 | // mitigated by avoiding a trip through the mark stack, |
251 | // by only doing a bookkeeping update and avoiding the |
252 | // actual scan of the object - a typeArray contains no |
253 | // references, and the metadata is built-in. |
254 | process_grey_task_entry<false>(entry); |
255 | } else { |
256 | push(entry); |
257 | } |
258 | } |
259 | return true; |
260 | } |
261 | |
262 | template <class T> |
263 | inline bool G1CMTask::deal_with_reference(T* p) { |
264 | increment_refs_reached(); |
265 | oop const obj = RawAccess<MO_VOLATILE>::oop_load(p); |
266 | if (obj == NULL) { |
267 | return false; |
268 | } |
269 | return make_reference_grey(obj); |
270 | } |
271 | |
272 | inline void G1ConcurrentMark::mark_in_prev_bitmap(oop p) { |
273 | assert(!_prev_mark_bitmap->is_marked((HeapWord*) p), "sanity" ); |
274 | _prev_mark_bitmap->mark((HeapWord*) p); |
275 | } |
276 | |
277 | bool G1ConcurrentMark::is_marked_in_prev_bitmap(oop p) const { |
278 | assert(p != NULL && oopDesc::is_oop(p), "expected an oop" ); |
279 | return _prev_mark_bitmap->is_marked((HeapWord*)p); |
280 | } |
281 | |
282 | bool G1ConcurrentMark::is_marked_in_next_bitmap(oop p) const { |
283 | assert(p != NULL && oopDesc::is_oop(p), "expected an oop" ); |
284 | return _next_mark_bitmap->is_marked((HeapWord*)p); |
285 | } |
286 | |
287 | inline bool G1ConcurrentMark::do_yield_check() { |
288 | if (SuspendibleThreadSet::should_yield()) { |
289 | SuspendibleThreadSet::yield(); |
290 | return true; |
291 | } else { |
292 | return false; |
293 | } |
294 | } |
295 | |
296 | #endif // SHARE_GC_G1_G1CONCURRENTMARK_INLINE_HPP |
297 | |