1/*
2 * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
26#define SHARE_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
27
28#include "gc/g1/g1CollectedHeap.inline.hpp"
29#include "gc/g1/g1OopStarChunkedList.inline.hpp"
30#include "gc/g1/g1ParScanThreadState.hpp"
31#include "gc/g1/g1RemSet.hpp"
32#include "oops/access.inline.hpp"
33#include "oops/oop.inline.hpp"
34
35template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
36 // Reference should not be NULL here as such are never pushed to the task queue.
37 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
38
39 // Although we never intentionally push references outside of the collection
40 // set, due to (benign) races in the claim mechanism during RSet scanning more
41 // than one thread might claim the same card. So the same card may be
42 // processed multiple times, and so we might get references into old gen here.
43 // So we need to redo this check.
44 const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
45 // References pushed onto the work stack should never point to a humongous region
46 // as they are not added to the collection set due to above precondition.
47 assert(!region_attr.is_humongous(),
48 "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
49 p2i(obj), _g1h->addr_to_region((HeapWord*)obj), p2i(p));
50
51 if (!region_attr.is_in_cset()) {
52 // In this case somebody else already did all the work.
53 return;
54 }
55
56 markOop m = obj->mark_raw();
57 if (m->is_marked()) {
58 obj = (oop) m->decode_pointer();
59 } else {
60 obj = copy_to_survivor_space(region_attr, obj, m);
61 }
62 RawAccess<IS_NOT_NULL>::oop_store(p, obj);
63
64 assert(obj != NULL, "Must be");
65 if (HeapRegion::is_in_same_region(p, obj)) {
66 return;
67 }
68 HeapRegion* from = _g1h->heap_region_containing(p);
69 if (!from->is_young()) {
70 enqueue_card_if_tracked(_g1h->region_attr(obj), p, obj);
71 }
72}
73
74template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
75 assert(verify_ref(ref), "sanity");
76 _refs->push(ref);
77}
78
79inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
80 assert(has_partial_array_mask(p), "invariant");
81 oop from_obj = clear_partial_array_mask(p);
82
83 assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
84 assert(from_obj->is_objArray(), "must be obj array");
85 objArrayOop from_obj_array = objArrayOop(from_obj);
86 // The from-space object contains the real length.
87 int length = from_obj_array->length();
88
89 assert(from_obj->is_forwarded(), "must be forwarded");
90 oop to_obj = from_obj->forwardee();
91 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
92 objArrayOop to_obj_array = objArrayOop(to_obj);
93 // We keep track of the next start index in the length field of the
94 // to-space object.
95 int next_index = to_obj_array->length();
96 assert(0 <= next_index && next_index < length,
97 "invariant, next index: %d, length: %d", next_index, length);
98
99 int start = next_index;
100 int end = length;
101 int remainder = end - start;
102 // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
103 if (remainder > 2 * ParGCArrayScanChunk) {
104 end = start + ParGCArrayScanChunk;
105 to_obj_array->set_length(end);
106 // Push the remainder before we process the range in case another
107 // worker has run out of things to do and can steal it.
108 oop* from_obj_p = set_partial_array_mask(from_obj);
109 push_on_queue(from_obj_p);
110 } else {
111 assert(length == end, "sanity");
112 // We'll process the final range for this object. Restore the length
113 // so that the heap remains parsable in case of evacuation failure.
114 to_obj_array->set_length(end);
115 }
116
117 HeapRegion* hr = _g1h->heap_region_containing(to_obj);
118 G1ScanInYoungSetter x(&_scanner, hr->is_young());
119 // Process indexes [start,end). It will also process the header
120 // along with the first chunk (i.e., the chunk with start == 0).
121 // Note that at this point the length field of to_obj_array is not
122 // correct given that we are using it to keep track of the next
123 // start index. oop_iterate_range() (thankfully!) ignores the length
124 // field and only relies on the start / end parameters. It does
125 // however return the size of the object which will be incorrect. So
126 // we have to ignore it even if we wanted to use it.
127 to_obj_array->oop_iterate_range(&_scanner, start, end);
128}
129
130inline void G1ParScanThreadState::deal_with_reference(oop* ref_to_scan) {
131 if (!has_partial_array_mask(ref_to_scan)) {
132 do_oop_evac(ref_to_scan);
133 } else {
134 do_oop_partial_array(ref_to_scan);
135 }
136}
137
138inline void G1ParScanThreadState::deal_with_reference(narrowOop* ref_to_scan) {
139 assert(!has_partial_array_mask(ref_to_scan), "NarrowOop* elements should never be partial arrays.");
140 do_oop_evac(ref_to_scan);
141}
142
143inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
144 assert(verify_task(ref), "sanity");
145 if (ref.is_narrow()) {
146 deal_with_reference((narrowOop*)ref);
147 } else {
148 deal_with_reference((oop*)ref);
149 }
150}
151
152void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
153 StarTask stolen_task;
154 while (task_queues->steal(_worker_id, stolen_task)) {
155 assert(verify_task(stolen_task), "sanity");
156 dispatch_reference(stolen_task);
157
158 // We've just processed a reference and we might have made
159 // available new entries on the queues. So we have to make sure
160 // we drain the queues as necessary.
161 trim_queue();
162 }
163}
164
165inline bool G1ParScanThreadState::needs_partial_trimming() const {
166 return !_refs->overflow_empty() || _refs->size() > _stack_trim_upper_threshold;
167}
168
169inline bool G1ParScanThreadState::is_partially_trimmed() const {
170 return _refs->overflow_empty() && _refs->size() <= _stack_trim_lower_threshold;
171}
172
173inline void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) {
174 StarTask ref;
175 // Drain the overflow stack first, so other threads can potentially steal.
176 while (_refs->pop_overflow(ref)) {
177 if (!_refs->try_push_to_taskqueue(ref)) {
178 dispatch_reference(ref);
179 }
180 }
181
182 while (_refs->pop_local(ref, threshold)) {
183 dispatch_reference(ref);
184 }
185}
186
187inline void G1ParScanThreadState::trim_queue_partially() {
188 if (!needs_partial_trimming()) {
189 return;
190 }
191
192 const Ticks start = Ticks::now();
193 do {
194 trim_queue_to_threshold(_stack_trim_lower_threshold);
195 } while (!is_partially_trimmed());
196 _trim_ticks += Ticks::now() - start;
197}
198
199inline Tickspan G1ParScanThreadState::trim_ticks() const {
200 return _trim_ticks;
201}
202
203inline void G1ParScanThreadState::reset_trim_ticks() {
204 _trim_ticks = Tickspan();
205}
206
207template <typename T>
208inline void G1ParScanThreadState::remember_root_into_optional_region(T* p) {
209 oop o = RawAccess<IS_NOT_NULL>::oop_load(p);
210 uint index = _g1h->heap_region_containing(o)->index_in_opt_cset();
211 assert(index < _num_optional_regions,
212 "Trying to access optional region idx %u beyond " SIZE_FORMAT, index, _num_optional_regions);
213 _oops_into_optional_regions[index].push_root(p);
214}
215
216template <typename T>
217inline void G1ParScanThreadState::remember_reference_into_optional_region(T* p) {
218 oop o = RawAccess<IS_NOT_NULL>::oop_load(p);
219 uint index = _g1h->heap_region_containing(o)->index_in_opt_cset();
220 assert(index < _num_optional_regions,
221 "Trying to access optional region idx %u beyond " SIZE_FORMAT, index, _num_optional_regions);
222 _oops_into_optional_regions[index].push_oop(p);
223 DEBUG_ONLY(verify_ref(p);)
224}
225
226G1OopStarChunkedList* G1ParScanThreadState::oops_into_optional_region(const HeapRegion* hr) {
227 assert(hr->index_in_opt_cset() < _num_optional_regions,
228 "Trying to access optional region idx %u beyond " SIZE_FORMAT " " HR_FORMAT,
229 hr->index_in_opt_cset(), _num_optional_regions, HR_FORMAT_PARAMS(hr));
230 return &_oops_into_optional_regions[hr->index_in_opt_cset()];
231}
232
233#endif // SHARE_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
234