1 | /* |
2 | * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved. |
3 | * |
4 | * This code is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License version 2 only, as |
6 | * published by the Free Software Foundation. |
7 | * |
8 | * This code is distributed in the hope that it will be useful, but WITHOUT |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
11 | * version 2 for more details (a copy is included in the LICENSE file that |
12 | * accompanied this code). |
13 | * |
14 | * You should have received a copy of the GNU General Public License version |
15 | * 2 along with this work; if not, write to the Free Software Foundation, |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
17 | * |
18 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
19 | * or visit www.oracle.com if you need additional information or have any |
20 | * questions. |
21 | * |
22 | */ |
23 | |
24 | #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP |
25 | #define SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP |
26 | |
27 | #include "gc/shenandoah/shenandoahAsserts.hpp" |
28 | #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" |
29 | #include "gc/shenandoah/shenandoahConcurrentMark.hpp" |
30 | #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" |
31 | #include "gc/shenandoah/shenandoahStringDedup.inline.hpp" |
32 | #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" |
33 | #include "memory/iterator.inline.hpp" |
34 | #include "oops/compressedOops.inline.hpp" |
35 | #include "oops/oop.inline.hpp" |
36 | #include "runtime/prefetch.inline.hpp" |
37 | |
38 | template <class T> |
39 | void ShenandoahConcurrentMark::do_task(ShenandoahObjToScanQueue* q, T* cl, jushort* live_data, ShenandoahMarkTask* task) { |
40 | oop obj = task->obj(); |
41 | |
42 | shenandoah_assert_not_forwarded_except(NULL, obj, _heap->is_concurrent_traversal_in_progress() && _heap->cancelled_gc()); |
43 | shenandoah_assert_marked(NULL, obj); |
44 | shenandoah_assert_not_in_cset_except(NULL, obj, _heap->cancelled_gc()); |
45 | |
46 | if (task->is_not_chunked()) { |
47 | if (obj->is_instance()) { |
48 | // Case 1: Normal oop, process as usual. |
49 | obj->oop_iterate(cl); |
50 | } else if (obj->is_objArray()) { |
51 | // Case 2: Object array instance and no chunk is set. Must be the first |
52 | // time we visit it, start the chunked processing. |
53 | do_chunked_array_start<T>(q, cl, obj); |
54 | } else { |
55 | // Case 3: Primitive array. Do nothing, no oops there. We use the same |
56 | // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using: |
57 | // We skip iterating over the klass pointer since we know that |
58 | // Universe::TypeArrayKlass never moves. |
59 | assert (obj->is_typeArray(), "should be type array" ); |
60 | } |
61 | // Count liveness the last: push the outstanding work to the queues first |
62 | count_liveness(live_data, obj); |
63 | } else { |
64 | // Case 4: Array chunk, has sensible chunk id. Process it. |
65 | do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow()); |
66 | } |
67 | } |
68 | |
69 | inline void ShenandoahConcurrentMark::count_liveness(jushort* live_data, oop obj) { |
70 | size_t region_idx = _heap->heap_region_index_containing(obj); |
71 | ShenandoahHeapRegion* region = _heap->get_region(region_idx); |
72 | size_t size = obj->size(); |
73 | |
74 | if (!region->is_humongous_start()) { |
75 | assert(!region->is_humongous(), "Cannot have continuations here" ); |
76 | size_t max = (1 << (sizeof(jushort) * 8)) - 1; |
77 | if (size >= max) { |
78 | // too big, add to region data directly |
79 | region->increase_live_data_gc_words(size); |
80 | } else { |
81 | jushort cur = live_data[region_idx]; |
82 | size_t new_val = cur + size; |
83 | if (new_val >= max) { |
84 | // overflow, flush to region data |
85 | region->increase_live_data_gc_words(new_val); |
86 | live_data[region_idx] = 0; |
87 | } else { |
88 | // still good, remember in locals |
89 | live_data[region_idx] = (jushort) new_val; |
90 | } |
91 | } |
92 | } else { |
93 | shenandoah_assert_in_correct_region(NULL, obj); |
94 | size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); |
95 | |
96 | for (size_t i = region_idx; i < region_idx + num_regions; i++) { |
97 | ShenandoahHeapRegion* chain_reg = _heap->get_region(i); |
98 | assert(chain_reg->is_humongous(), "Expecting a humongous region" ); |
99 | chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize); |
100 | } |
101 | } |
102 | } |
103 | |
104 | template <class T> |
105 | inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj) { |
106 | assert(obj->is_objArray(), "expect object array" ); |
107 | objArrayOop array = objArrayOop(obj); |
108 | int len = array->length(); |
109 | |
110 | if (len <= (int) ObjArrayMarkingStride*2) { |
111 | // A few slices only, process directly |
112 | array->oop_iterate_range(cl, 0, len); |
113 | } else { |
114 | int bits = log2_long((size_t) len); |
115 | // Compensate for non-power-of-two arrays, cover the array in excess: |
116 | if (len != (1 << bits)) bits++; |
117 | |
118 | // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to |
119 | // boundaries against array->length(), touching the array header on every chunk. |
120 | // |
121 | // To do this, we cut the prefix in full-sized chunks, and submit them on the queue. |
122 | // If the array is not divided in chunk sizes, then there would be an irregular tail, |
123 | // which we will process separately. |
124 | |
125 | int last_idx = 0; |
126 | |
127 | int chunk = 1; |
128 | int pow = bits; |
129 | |
130 | // Handle overflow |
131 | if (pow >= 31) { |
132 | assert (pow == 31, "sanity" ); |
133 | pow--; |
134 | chunk = 2; |
135 | last_idx = (1 << pow); |
136 | bool pushed = q->push(ShenandoahMarkTask(array, 1, pow)); |
137 | assert(pushed, "overflow queue should always succeed pushing" ); |
138 | } |
139 | |
140 | // Split out tasks, as suggested in ObjArrayChunkedTask docs. Record the last |
141 | // successful right boundary to figure out the irregular tail. |
142 | while ((1 << pow) > (int)ObjArrayMarkingStride && |
143 | (chunk*2 < ShenandoahMarkTask::chunk_size())) { |
144 | pow--; |
145 | int left_chunk = chunk*2 - 1; |
146 | int right_chunk = chunk*2; |
147 | int left_chunk_end = left_chunk * (1 << pow); |
148 | if (left_chunk_end < len) { |
149 | bool pushed = q->push(ShenandoahMarkTask(array, left_chunk, pow)); |
150 | assert(pushed, "overflow queue should always succeed pushing" ); |
151 | chunk = right_chunk; |
152 | last_idx = left_chunk_end; |
153 | } else { |
154 | chunk = left_chunk; |
155 | } |
156 | } |
157 | |
158 | // Process the irregular tail, if present |
159 | int from = last_idx; |
160 | if (from < len) { |
161 | array->oop_iterate_range(cl, from, len); |
162 | } |
163 | } |
164 | } |
165 | |
166 | template <class T> |
167 | inline void ShenandoahConcurrentMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow) { |
168 | assert(obj->is_objArray(), "expect object array" ); |
169 | objArrayOop array = objArrayOop(obj); |
170 | |
171 | assert (ObjArrayMarkingStride > 0, "sanity" ); |
172 | |
173 | // Split out tasks, as suggested in ObjArrayChunkedTask docs. Avoid pushing tasks that |
174 | // are known to start beyond the array. |
175 | while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) { |
176 | pow--; |
177 | chunk *= 2; |
178 | bool pushed = q->push(ShenandoahMarkTask(array, chunk - 1, pow)); |
179 | assert(pushed, "overflow queue should always succeed pushing" ); |
180 | } |
181 | |
182 | int chunk_size = 1 << pow; |
183 | |
184 | int from = (chunk - 1) * chunk_size; |
185 | int to = chunk * chunk_size; |
186 | |
187 | #ifdef ASSERT |
188 | int len = array->length(); |
189 | assert (0 <= from && from < len, "from is sane: %d/%d" , from, len); |
190 | assert (0 < to && to <= len, "to is sane: %d/%d" , to, len); |
191 | #endif |
192 | |
193 | array->oop_iterate_range(cl, from, to); |
194 | } |
195 | |
196 | class ShenandoahSATBBufferClosure : public SATBBufferClosure { |
197 | private: |
198 | ShenandoahObjToScanQueue* _queue; |
199 | ShenandoahHeap* _heap; |
200 | ShenandoahMarkingContext* const _mark_context; |
201 | public: |
202 | ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q) : |
203 | _queue(q), |
204 | _heap(ShenandoahHeap::heap()), |
205 | _mark_context(_heap->marking_context()) |
206 | { |
207 | } |
208 | |
209 | void do_buffer(void **buffer, size_t size) { |
210 | if (_heap->has_forwarded_objects()) { |
211 | if (ShenandoahStringDedup::is_enabled()) { |
212 | do_buffer_impl<RESOLVE, ENQUEUE_DEDUP>(buffer, size); |
213 | } else { |
214 | do_buffer_impl<RESOLVE, NO_DEDUP>(buffer, size); |
215 | } |
216 | } else { |
217 | if (ShenandoahStringDedup::is_enabled()) { |
218 | do_buffer_impl<NONE, ENQUEUE_DEDUP>(buffer, size); |
219 | } else { |
220 | do_buffer_impl<NONE, NO_DEDUP>(buffer, size); |
221 | } |
222 | } |
223 | } |
224 | |
225 | template<UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP> |
226 | void do_buffer_impl(void **buffer, size_t size) { |
227 | for (size_t i = 0; i < size; ++i) { |
228 | oop *p = (oop *) &buffer[i]; |
229 | ShenandoahConcurrentMark::mark_through_ref<oop, UPDATE_REFS, STRING_DEDUP>(p, _heap, _queue, _mark_context); |
230 | } |
231 | } |
232 | }; |
233 | |
234 | template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP> |
235 | inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context) { |
236 | T o = RawAccess<>::oop_load(p); |
237 | if (!CompressedOops::is_null(o)) { |
238 | oop obj = CompressedOops::decode_not_null(o); |
239 | switch (UPDATE_REFS) { |
240 | case NONE: |
241 | break; |
242 | case RESOLVE: |
243 | obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); |
244 | break; |
245 | case SIMPLE: |
246 | // We piggy-back reference updating to the marking tasks. |
247 | obj = heap->update_with_forwarded_not_null(p, obj); |
248 | break; |
249 | case CONCURRENT: |
250 | obj = heap->maybe_update_with_forwarded_not_null(p, obj); |
251 | break; |
252 | default: |
253 | ShouldNotReachHere(); |
254 | } |
255 | |
256 | // Note: Only when concurrently updating references can obj be different |
257 | // (that is, really different, not just different from-/to-space copies of the same) |
258 | // from the one we originally loaded. Mutator thread can beat us by writing something |
259 | // else into the location. In that case, we would mark through that updated value, |
260 | // on the off-chance it is not handled by other means (e.g. via SATB). However, |
261 | // if that write was NULL, we don't need to do anything else. |
262 | if (UPDATE_REFS != CONCURRENT || !CompressedOops::is_null(obj)) { |
263 | shenandoah_assert_not_forwarded(p, obj); |
264 | shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_gc()); |
265 | |
266 | if (mark_context->mark(obj)) { |
267 | bool pushed = q->push(ShenandoahMarkTask(obj)); |
268 | assert(pushed, "overflow queue should always succeed pushing" ); |
269 | |
270 | if ((STRING_DEDUP == ENQUEUE_DEDUP) && ShenandoahStringDedup::is_candidate(obj)) { |
271 | assert(ShenandoahStringDedup::is_enabled(), "Must be enabled" ); |
272 | ShenandoahStringDedup::enqueue_candidate(obj); |
273 | } |
274 | } |
275 | |
276 | shenandoah_assert_marked(p, obj); |
277 | } |
278 | } |
279 | } |
280 | |
281 | #endif // SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP |
282 | |