1/*
2 * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24#include "precompiled.hpp"
25#include "gc/shenandoah/shenandoahAsserts.hpp"
26#include "gc/shenandoah/shenandoahBarrierSet.hpp"
27#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
28#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
29#include "gc/shenandoah/shenandoahHeap.inline.hpp"
30#include "gc/shenandoah/shenandoahHeuristics.hpp"
31#include "gc/shenandoah/shenandoahTraversalGC.hpp"
32#include "memory/iterator.inline.hpp"
33#include "runtime/interfaceSupport.inline.hpp"
34#ifdef COMPILER1
35#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
36#endif
37#ifdef COMPILER2
38#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
39#endif
40
41class ShenandoahBarrierSetC1;
42class ShenandoahBarrierSetC2;
43
44template <bool STOREVAL_EVAC_BARRIER>
45class ShenandoahUpdateRefsForOopClosure: public BasicOopIterateClosure {
46private:
47 ShenandoahHeap* _heap;
48 ShenandoahBarrierSet* _bs;
49
50 template <class T>
51 inline void do_oop_work(T* p) {
52 oop o;
53 if (STOREVAL_EVAC_BARRIER) {
54 o = _heap->evac_update_with_forwarded(p);
55 if (!CompressedOops::is_null(o)) {
56 _bs->enqueue(o);
57 }
58 } else {
59 _heap->maybe_update_with_forwarded(p);
60 }
61 }
62public:
63 ShenandoahUpdateRefsForOopClosure() : _heap(ShenandoahHeap::heap()), _bs(ShenandoahBarrierSet::barrier_set()) {
64 assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
65 }
66
67 virtual void do_oop(oop* p) { do_oop_work(p); }
68 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
69};
70
71ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
72 BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
73 make_barrier_set_c1<ShenandoahBarrierSetC1>(),
74 make_barrier_set_c2<ShenandoahBarrierSetC2>(),
75 NULL /* barrier_set_nmethod */,
76 BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
77 _heap(heap),
78 _satb_mark_queue_set()
79{
80}
81
82ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
83 BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
84 return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);
85}
86
87void ShenandoahBarrierSet::print_on(outputStream* st) const {
88 st->print("ShenandoahBarrierSet");
89}
90
91bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
92 return bsn == BarrierSet::ShenandoahBarrierSet;
93}
94
95bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
96 return true;
97}
98
99template <class T, bool STOREVAL_EVAC_BARRIER>
100void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) {
101 assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
102 ShenandoahUpdateRefsForOopClosure<STOREVAL_EVAC_BARRIER> cl;
103 T* dst = (T*) start;
104 for (size_t i = 0; i < count; i++) {
105 cl.do_oop(dst++);
106 }
107}
108
109void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
110 assert(_heap->is_update_refs_in_progress(), "should not be here otherwise");
111 assert(count > 0, "Should have been filtered before");
112
113 if (_heap->is_concurrent_traversal_in_progress()) {
114 ShenandoahEvacOOMScope oom_evac_scope;
115 if (UseCompressedOops) {
116 write_ref_array_loop<narrowOop, /* evac = */ true>(start, count);
117 } else {
118 write_ref_array_loop<oop, /* evac = */ true>(start, count);
119 }
120 } else {
121 if (UseCompressedOops) {
122 write_ref_array_loop<narrowOop, /* evac = */ false>(start, count);
123 } else {
124 write_ref_array_loop<oop, /* evac = */ false>(start, count);
125 }
126 }
127}
128
129template <class T>
130void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
131 shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_gc());
132 assert(ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).is_active(), "Shouldn't be here otherwise");
133 assert(ShenandoahSATBBarrier, "Shouldn't be here otherwise");
134 assert(count > 0, "Should have been filtered before");
135
136 Thread* thread = Thread::current();
137 ShenandoahMarkingContext* ctx = _heap->marking_context();
138 bool has_forwarded = _heap->has_forwarded_objects();
139 T* elem_ptr = dst;
140 for (size_t i = 0; i < count; i++, elem_ptr++) {
141 T heap_oop = RawAccess<>::oop_load(elem_ptr);
142 if (!CompressedOops::is_null(heap_oop)) {
143 oop obj = CompressedOops::decode_not_null(heap_oop);
144 if (has_forwarded) {
145 obj = resolve_forwarded_not_null(obj);
146 }
147 if (!ctx->is_marked(obj)) {
148 ShenandoahThreadLocalData::satb_mark_queue(thread).enqueue_known_active(obj);
149 }
150 }
151 }
152}
153
154void ShenandoahBarrierSet::write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized) {
155 if (! dest_uninitialized) {
156 write_ref_array_pre_work(dst, count);
157 }
158}
159
160void ShenandoahBarrierSet::write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized) {
161 if (! dest_uninitialized) {
162 write_ref_array_pre_work(dst, count);
163 }
164}
165
166template <class T>
167inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
168 shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc());
169 if (_heap->is_concurrent_mark_in_progress()) {
170 T heap_oop = RawAccess<>::oop_load(field);
171 if (!CompressedOops::is_null(heap_oop)) {
172 enqueue(CompressedOops::decode(heap_oop));
173 }
174 }
175}
176
177// These are the more general virtual versions.
178void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
179 inline_write_ref_field_pre(field, new_val);
180}
181
182void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
183 inline_write_ref_field_pre(field, new_val);
184}
185
186void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
187 guarantee(false, "Not needed");
188}
189
190void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
191 shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc());
192 shenandoah_assert_not_forwarded_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
193 shenandoah_assert_not_in_cset_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
194}
195
196void ShenandoahBarrierSet::write_region(MemRegion mr) {
197 if (!ShenandoahCloneBarrier) return;
198 if (!_heap->is_update_refs_in_progress()) return;
199
200 // This is called for cloning an object (see jvm.cpp) after the clone
201 // has been made. We are not interested in any 'previous value' because
202 // it would be NULL in any case. But we *are* interested in any oop*
203 // that potentially need to be updated.
204
205 oop obj = oop(mr.start());
206 shenandoah_assert_correct(NULL, obj);
207 if (_heap->is_concurrent_traversal_in_progress()) {
208 ShenandoahEvacOOMScope oom_evac_scope;
209 ShenandoahUpdateRefsForOopClosure</* evac = */ true> cl;
210 obj->oop_iterate(&cl);
211 } else {
212 ShenandoahUpdateRefsForOopClosure</* evac = */ false> cl;
213 obj->oop_iterate(&cl);
214 }
215}
216
217oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) {
218 if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) {
219 return load_reference_barrier_impl(obj);
220 } else {
221 return obj;
222 }
223}
224
225oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
226 if (obj != NULL) {
227 return load_reference_barrier_not_null(obj);
228 } else {
229 return obj;
230 }
231}
232
233
234oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj) {
235 assert(ShenandoahLoadRefBarrier, "should be enabled");
236 assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL), "evac should be in progress");
237 shenandoah_assert_in_cset(NULL, obj);
238
239 oop fwd = resolve_forwarded_not_null(obj);
240 if (oopDesc::equals_raw(obj, fwd)) {
241 ShenandoahEvacOOMScope oom_evac_scope;
242
243 Thread* thread = Thread::current();
244 oop res_oop = _heap->evacuate_object(obj, thread);
245
246 // Since we are already here and paid the price of getting through runtime call adapters
247 // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
248 // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
249 // total assist costs, and can introduce a lot of evacuation latency. This is why we
250 // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
251 // The scan itself should also avoid touching the non-marked objects below TAMS, because
252 // their metadata (notably, klasses) may be incorrect already.
253
254 size_t max = ShenandoahEvacAssist;
255 if (max > 0) {
256 // Traversal is special: it uses incomplete marking context, because it coalesces evac with mark.
257 // Other code uses complete marking context, because evac happens after the mark.
258 ShenandoahMarkingContext* ctx = _heap->is_concurrent_traversal_in_progress() ?
259 _heap->marking_context() : _heap->complete_marking_context();
260
261 ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
262 assert(r->is_cset(), "sanity");
263
264 HeapWord* cur = (HeapWord*)obj + obj->size();
265
266 size_t count = 0;
267 while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
268 oop cur_oop = oop(cur);
269 if (oopDesc::equals_raw(cur_oop, resolve_forwarded_not_null(cur_oop))) {
270 _heap->evacuate_object(cur_oop, thread);
271 }
272 cur = cur + cur_oop->size();
273 }
274 }
275
276 return res_oop;
277 }
278 return fwd;
279}
280
281oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) {
282 assert(ShenandoahLoadRefBarrier, "should be enabled");
283 if (!CompressedOops::is_null(obj)) {
284 bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
285 oop fwd = resolve_forwarded_not_null(obj);
286 if (evac_in_progress &&
287 _heap->in_collection_set(obj) &&
288 oopDesc::equals_raw(obj, fwd)) {
289 Thread *t = Thread::current();
290 if (t->is_GC_task_thread()) {
291 return _heap->evacuate_object(obj, t);
292 } else {
293 ShenandoahEvacOOMScope oom_evac_scope;
294 return _heap->evacuate_object(obj, t);
295 }
296 } else {
297 return fwd;
298 }
299 } else {
300 return obj;
301 }
302}
303
304void ShenandoahBarrierSet::storeval_barrier(oop obj) {
305 if (ShenandoahStoreValEnqueueBarrier && !CompressedOops::is_null(obj) && _heap->is_concurrent_traversal_in_progress()) {
306 enqueue(obj);
307 }
308}
309
310void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
311 if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
312 enqueue(obj);
313 }
314}
315
316void ShenandoahBarrierSet::enqueue(oop obj) {
317 shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress());
318 assert(_satb_mark_queue_set.is_active(), "only get here when SATB active");
319
320 // Filter marked objects before hitting the SATB queues. The same predicate would
321 // be used by SATBMQ::filter to eliminate already marked objects downstream, but
322 // filtering here helps to avoid wasteful SATB queueing work to begin with.
323 if (!_heap->requires_marking<false>(obj)) return;
324
325 ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).enqueue_known_active(obj);
326}
327
328void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
329 // Create thread local data
330 ShenandoahThreadLocalData::create(thread);
331}
332
333void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) {
334 // Destroy thread local data
335 ShenandoahThreadLocalData::destroy(thread);
336}
337
338void ShenandoahBarrierSet::on_thread_attach(Thread *thread) {
339 assert(!thread->is_Java_thread() || !SafepointSynchronize::is_at_safepoint(),
340 "We should not be at a safepoint");
341 SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
342 assert(!queue.is_active(), "SATB queue should not be active");
343 assert( queue.is_empty(), "SATB queue should be empty");
344 queue.set_active(_satb_mark_queue_set.is_active());
345 if (thread->is_Java_thread()) {
346 ShenandoahThreadLocalData::set_gc_state(thread, _heap->gc_state());
347 ShenandoahThreadLocalData::initialize_gclab(thread);
348 }
349}
350
351void ShenandoahBarrierSet::on_thread_detach(Thread *thread) {
352 SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
353 queue.flush();
354 if (thread->is_Java_thread()) {
355 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
356 if (gclab != NULL) {
357 gclab->retire();
358 }
359 }
360}
361