1 | /* |
2 | * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved. |
3 | * |
4 | * This code is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License version 2 only, as |
6 | * published by the Free Software Foundation. |
7 | * |
8 | * This code is distributed in the hope that it will be useful, but WITHOUT |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
11 | * version 2 for more details (a copy is included in the LICENSE file that |
12 | * accompanied this code). |
13 | * |
14 | * You should have received a copy of the GNU General Public License version |
15 | * 2 along with this work; if not, write to the Free Software Foundation, |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
17 | * |
18 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
19 | * or visit www.oracle.com if you need additional information or have any |
20 | * questions. |
21 | * |
22 | */ |
23 | |
24 | #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP |
25 | #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP |
26 | |
27 | #include "classfile/javaClasses.inline.hpp" |
28 | #include "gc/shared/markBitMap.inline.hpp" |
29 | #include "gc/shared/threadLocalAllocBuffer.inline.hpp" |
30 | #include "gc/shared/suspendibleThreadSet.hpp" |
31 | #include "gc/shenandoah/shenandoahAsserts.hpp" |
32 | #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" |
33 | #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" |
34 | #include "gc/shenandoah/shenandoahForwarding.inline.hpp" |
35 | #include "gc/shenandoah/shenandoahWorkGroup.hpp" |
36 | #include "gc/shenandoah/shenandoahHeap.hpp" |
37 | #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" |
38 | #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" |
39 | #include "gc/shenandoah/shenandoahControlThread.hpp" |
40 | #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" |
41 | #include "gc/shenandoah/shenandoahThreadLocalData.hpp" |
42 | #include "oops/compressedOops.inline.hpp" |
43 | #include "oops/oop.inline.hpp" |
44 | #include "runtime/atomic.hpp" |
45 | #include "runtime/prefetch.inline.hpp" |
46 | #include "runtime/thread.hpp" |
47 | #include "utilities/copy.hpp" |
48 | #include "utilities/globalDefinitions.hpp" |
49 | |
50 | |
51 | inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { |
52 | size_t new_index = Atomic::add((size_t) 1, &_index); |
53 | // get_region() provides the bounds-check and returns NULL on OOB. |
54 | return _heap->get_region(new_index - 1); |
55 | } |
56 | |
57 | inline bool ShenandoahHeap::has_forwarded_objects() const { |
58 | return _gc_state.is_set(HAS_FORWARDED); |
59 | } |
60 | |
61 | inline WorkGang* ShenandoahHeap::workers() const { |
62 | return _workers; |
63 | } |
64 | |
65 | inline WorkGang* ShenandoahHeap::get_safepoint_workers() { |
66 | return _safepoint_workers; |
67 | } |
68 | |
69 | inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const { |
70 | uintptr_t region_start = ((uintptr_t) addr); |
71 | uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift(); |
72 | assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr)); |
73 | return index; |
74 | } |
75 | |
76 | inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const { |
77 | size_t index = heap_region_index_containing(addr); |
78 | ShenandoahHeapRegion* const result = get_region(index); |
79 | assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr)); |
80 | return result; |
81 | } |
82 | |
83 | template <class T> |
84 | inline oop ShenandoahHeap::update_with_forwarded_not_null(T* p, oop obj) { |
85 | if (in_collection_set(obj)) { |
86 | shenandoah_assert_forwarded_except(p, obj, is_full_gc_in_progress() || cancelled_gc() || is_degenerated_gc_in_progress()); |
87 | obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); |
88 | RawAccess<IS_NOT_NULL>::oop_store(p, obj); |
89 | } |
90 | #ifdef ASSERT |
91 | else { |
92 | shenandoah_assert_not_forwarded(p, obj); |
93 | } |
94 | #endif |
95 | return obj; |
96 | } |
97 | |
98 | template <class T> |
99 | inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) { |
100 | T o = RawAccess<>::oop_load(p); |
101 | if (!CompressedOops::is_null(o)) { |
102 | oop obj = CompressedOops::decode_not_null(o); |
103 | return maybe_update_with_forwarded_not_null(p, obj); |
104 | } else { |
105 | return NULL; |
106 | } |
107 | } |
108 | |
109 | template <class T> |
110 | inline oop ShenandoahHeap::evac_update_with_forwarded(T* p) { |
111 | T o = RawAccess<>::oop_load(p); |
112 | if (!CompressedOops::is_null(o)) { |
113 | oop heap_oop = CompressedOops::decode_not_null(o); |
114 | if (in_collection_set(heap_oop)) { |
115 | oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); |
116 | if (oopDesc::equals_raw(forwarded_oop, heap_oop)) { |
117 | forwarded_oop = evacuate_object(heap_oop, Thread::current()); |
118 | } |
119 | oop prev = cas_oop(forwarded_oop, p, heap_oop); |
120 | if (oopDesc::equals_raw(prev, heap_oop)) { |
121 | return forwarded_oop; |
122 | } else { |
123 | return NULL; |
124 | } |
125 | } |
126 | return heap_oop; |
127 | } else { |
128 | return NULL; |
129 | } |
130 | } |
131 | |
132 | inline oop ShenandoahHeap::cas_oop(oop n, oop* addr, oop c) { |
133 | return (oop) Atomic::cmpxchg(n, addr, c); |
134 | } |
135 | |
136 | inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) { |
137 | narrowOop cmp = CompressedOops::encode(c); |
138 | narrowOop val = CompressedOops::encode(n); |
139 | return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, cmp)); |
140 | } |
141 | |
142 | template <class T> |
143 | inline oop ShenandoahHeap::maybe_update_with_forwarded_not_null(T* p, oop heap_oop) { |
144 | shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || is_full_gc_in_progress() || is_degenerated_gc_in_progress()); |
145 | shenandoah_assert_correct(p, heap_oop); |
146 | |
147 | if (in_collection_set(heap_oop)) { |
148 | oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); |
149 | if (oopDesc::equals_raw(forwarded_oop, heap_oop)) { |
150 | // E.g. during evacuation. |
151 | return forwarded_oop; |
152 | } |
153 | |
154 | shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress() || is_degenerated_gc_in_progress()); |
155 | shenandoah_assert_not_forwarded(p, forwarded_oop); |
156 | shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_gc()); |
157 | |
158 | // If this fails, another thread wrote to p before us, it will be logged in SATB and the |
159 | // reference be updated later. |
160 | oop witness = cas_oop(forwarded_oop, p, heap_oop); |
161 | |
162 | if (!oopDesc::equals_raw(witness, heap_oop)) { |
163 | // CAS failed, someone had beat us to it. Normally, we would return the failure witness, |
164 | // because that would be the proper write of to-space object, enforced by strong barriers. |
165 | // However, there is a corner case with arraycopy. It can happen that a Java thread |
166 | // beats us with an arraycopy, which first copies the array, which potentially contains |
167 | // from-space refs, and only afterwards updates all from-space refs to to-space refs, |
168 | // which leaves a short window where the new array elements can be from-space. |
169 | // In this case, we can just resolve the result again. As we resolve, we need to consider |
170 | // the contended write might have been NULL. |
171 | oop result = ShenandoahBarrierSet::resolve_forwarded(witness); |
172 | shenandoah_assert_not_forwarded_except(p, result, (result == NULL)); |
173 | shenandoah_assert_not_in_cset_except(p, result, (result == NULL) || cancelled_gc()); |
174 | return result; |
175 | } else { |
176 | // Success! We have updated with known to-space copy. We have already asserted it is sane. |
177 | return forwarded_oop; |
178 | } |
179 | } else { |
180 | shenandoah_assert_not_forwarded(p, heap_oop); |
181 | return heap_oop; |
182 | } |
183 | } |
184 | |
185 | inline bool ShenandoahHeap::cancelled_gc() const { |
186 | return _cancelled_gc.get() == CANCELLED; |
187 | } |
188 | |
189 | inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) { |
190 | if (! (sts_active && ShenandoahSuspendibleWorkers)) { |
191 | return cancelled_gc(); |
192 | } |
193 | |
194 | jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE); |
195 | if (prev == CANCELLABLE || prev == NOT_CANCELLED) { |
196 | if (SuspendibleThreadSet::should_yield()) { |
197 | SuspendibleThreadSet::yield(); |
198 | } |
199 | |
200 | // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets |
201 | // to restore to CANCELLABLE. |
202 | if (prev == CANCELLABLE) { |
203 | _cancelled_gc.set(CANCELLABLE); |
204 | } |
205 | return false; |
206 | } else { |
207 | return true; |
208 | } |
209 | } |
210 | |
211 | inline void ShenandoahHeap::clear_cancelled_gc() { |
212 | _cancelled_gc.set(CANCELLABLE); |
213 | _oom_evac_handler.clear(); |
214 | } |
215 | |
216 | inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) { |
217 | assert(UseTLAB, "TLABs should be enabled" ); |
218 | |
219 | PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); |
220 | if (gclab == NULL) { |
221 | assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), |
222 | "Performance: thread should have GCLAB: %s" , thread->name()); |
223 | // No GCLABs in this thread, fallback to shared allocation |
224 | return NULL; |
225 | } |
226 | HeapWord* obj = gclab->allocate(size); |
227 | if (obj != NULL) { |
228 | return obj; |
229 | } |
230 | // Otherwise... |
231 | return allocate_from_gclab_slow(thread, size); |
232 | } |
233 | |
234 | inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { |
235 | if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) { |
236 | // This thread went through the OOM during evac protocol and it is safe to return |
237 | // the forward pointer. It must not attempt to evacuate any more. |
238 | return ShenandoahBarrierSet::resolve_forwarded(p); |
239 | } |
240 | |
241 | assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope" ); |
242 | |
243 | size_t size = p->size(); |
244 | |
245 | assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects" ); |
246 | |
247 | bool alloc_from_gclab = true; |
248 | HeapWord* copy = NULL; |
249 | |
250 | #ifdef ASSERT |
251 | if (ShenandoahOOMDuringEvacALot && |
252 | (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call |
253 | copy = NULL; |
254 | } else { |
255 | #endif |
256 | if (UseTLAB) { |
257 | copy = allocate_from_gclab(thread, size); |
258 | } |
259 | if (copy == NULL) { |
260 | ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size); |
261 | copy = allocate_memory(req); |
262 | alloc_from_gclab = false; |
263 | } |
264 | #ifdef ASSERT |
265 | } |
266 | #endif |
267 | |
268 | if (copy == NULL) { |
269 | control_thread()->handle_alloc_failure_evac(size); |
270 | |
271 | _oom_evac_handler.handle_out_of_memory_during_evacuation(); |
272 | |
273 | return ShenandoahBarrierSet::resolve_forwarded(p); |
274 | } |
275 | |
276 | // Copy the object: |
277 | Copy::aligned_disjoint_words((HeapWord*) p, copy, size); |
278 | |
279 | // Try to install the new forwarding pointer. |
280 | oop copy_val = oop(copy); |
281 | oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val); |
282 | if (oopDesc::equals_raw(result, copy_val)) { |
283 | // Successfully evacuated. Our copy is now the public one! |
284 | shenandoah_assert_correct(NULL, copy_val); |
285 | return copy_val; |
286 | } else { |
287 | // Failed to evacuate. We need to deal with the object that is left behind. Since this |
288 | // new allocation is certainly after TAMS, it will be considered live in the next cycle. |
289 | // But if it happens to contain references to evacuated regions, those references would |
290 | // not get updated for this stale copy during this cycle, and we will crash while scanning |
291 | // it the next cycle. |
292 | // |
293 | // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next |
294 | // object will overwrite this stale copy, or the filler object on LAB retirement will |
295 | // do this. For non-GCLAB allocations, we have no way to retract the allocation, and |
296 | // have to explicitly overwrite the copy with the filler object. With that overwrite, |
297 | // we have to keep the fwdptr initialized and pointing to our (stale) copy. |
298 | if (alloc_from_gclab) { |
299 | ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size); |
300 | } else { |
301 | fill_with_object(copy, size); |
302 | shenandoah_assert_correct(NULL, copy_val); |
303 | } |
304 | shenandoah_assert_correct(NULL, result); |
305 | return result; |
306 | } |
307 | } |
308 | |
309 | template<bool RESOLVE> |
310 | inline bool ShenandoahHeap::requires_marking(const void* entry) const { |
311 | oop obj = oop(entry); |
312 | if (RESOLVE) { |
313 | obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); |
314 | } |
315 | return !_marking_context->is_marked(obj); |
316 | } |
317 | |
318 | template <class T> |
319 | inline bool ShenandoahHeap::in_collection_set(T p) const { |
320 | HeapWord* obj = (HeapWord*) p; |
321 | assert(collection_set() != NULL, "Sanity" ); |
322 | assert(is_in(obj), "should be in heap" ); |
323 | |
324 | return collection_set()->is_in(obj); |
325 | } |
326 | |
327 | inline bool ShenandoahHeap::is_stable() const { |
328 | return _gc_state.is_clear(); |
329 | } |
330 | |
331 | inline bool ShenandoahHeap::is_idle() const { |
332 | return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS | TRAVERSAL); |
333 | } |
334 | |
335 | inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const { |
336 | return _gc_state.is_set(MARKING); |
337 | } |
338 | |
339 | inline bool ShenandoahHeap::is_concurrent_traversal_in_progress() const { |
340 | return _gc_state.is_set(TRAVERSAL); |
341 | } |
342 | |
343 | inline bool ShenandoahHeap::is_evacuation_in_progress() const { |
344 | return _gc_state.is_set(EVACUATION); |
345 | } |
346 | |
347 | inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const { |
348 | return _gc_state.is_set(mask); |
349 | } |
350 | |
351 | inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const { |
352 | return _degenerated_gc_in_progress.is_set(); |
353 | } |
354 | |
355 | inline bool ShenandoahHeap::is_full_gc_in_progress() const { |
356 | return _full_gc_in_progress.is_set(); |
357 | } |
358 | |
359 | inline bool ShenandoahHeap::is_full_gc_move_in_progress() const { |
360 | return _full_gc_move_in_progress.is_set(); |
361 | } |
362 | |
363 | inline bool ShenandoahHeap::is_update_refs_in_progress() const { |
364 | return _gc_state.is_set(UPDATEREFS); |
365 | } |
366 | |
367 | template<class T> |
368 | inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { |
369 | marked_object_iterate(region, cl, region->top()); |
370 | } |
371 | |
372 | template<class T> |
373 | inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { |
374 | assert(! region->is_humongous_continuation(), "no humongous continuation regions here" ); |
375 | |
376 | ShenandoahMarkingContext* const ctx = complete_marking_context(); |
377 | assert(ctx->is_complete(), "sanity" ); |
378 | |
379 | MarkBitMap* mark_bit_map = ctx->mark_bit_map(); |
380 | HeapWord* tams = ctx->top_at_mark_start(region); |
381 | |
382 | size_t skip_bitmap_delta = 1; |
383 | HeapWord* start = region->bottom(); |
384 | HeapWord* end = MIN2(tams, region->end()); |
385 | |
386 | // Step 1. Scan below the TAMS based on bitmap data. |
387 | HeapWord* limit_bitmap = MIN2(limit, tams); |
388 | |
389 | // Try to scan the initial candidate. If the candidate is above the TAMS, it would |
390 | // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2. |
391 | HeapWord* cb = mark_bit_map->get_next_marked_addr(start, end); |
392 | |
393 | intx dist = ShenandoahMarkScanPrefetch; |
394 | if (dist > 0) { |
395 | // Batched scan that prefetches the oop data, anticipating the access to |
396 | // either header, oop field, or forwarding pointer. Not that we cannot |
397 | // touch anything in oop, while it still being prefetched to get enough |
398 | // time for prefetch to work. This is why we try to scan the bitmap linearly, |
399 | // disregarding the object size. However, since we know forwarding pointer |
400 | // preceeds the object, we can skip over it. Once we cannot trust the bitmap, |
401 | // there is no point for prefetching the oop contents, as oop->size() will |
402 | // touch it prematurely. |
403 | |
404 | // No variable-length arrays in standard C++, have enough slots to fit |
405 | // the prefetch distance. |
406 | static const int SLOT_COUNT = 256; |
407 | guarantee(dist <= SLOT_COUNT, "adjust slot count" ); |
408 | HeapWord* slots[SLOT_COUNT]; |
409 | |
410 | int avail; |
411 | do { |
412 | avail = 0; |
413 | for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) { |
414 | Prefetch::read(cb, oopDesc::mark_offset_in_bytes()); |
415 | slots[avail++] = cb; |
416 | cb += skip_bitmap_delta; |
417 | if (cb < limit_bitmap) { |
418 | cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap); |
419 | } |
420 | } |
421 | |
422 | for (int c = 0; c < avail; c++) { |
423 | assert (slots[c] < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")" , p2i(slots[c]), p2i(tams)); |
424 | assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")" , p2i(slots[c]), p2i(limit)); |
425 | oop obj = oop(slots[c]); |
426 | assert(oopDesc::is_oop(obj), "sanity" ); |
427 | assert(ctx->is_marked(obj), "object expected to be marked" ); |
428 | cl->do_object(obj); |
429 | } |
430 | } while (avail > 0); |
431 | } else { |
432 | while (cb < limit_bitmap) { |
433 | assert (cb < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")" , p2i(cb), p2i(tams)); |
434 | assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")" , p2i(cb), p2i(limit)); |
435 | oop obj = oop(cb); |
436 | assert(oopDesc::is_oop(obj), "sanity" ); |
437 | assert(ctx->is_marked(obj), "object expected to be marked" ); |
438 | cl->do_object(obj); |
439 | cb += skip_bitmap_delta; |
440 | if (cb < limit_bitmap) { |
441 | cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap); |
442 | } |
443 | } |
444 | } |
445 | |
446 | // Step 2. Accurate size-based traversal, happens past the TAMS. |
447 | // This restarts the scan at TAMS, which makes sure we traverse all objects, |
448 | // regardless of what happened at Step 1. |
449 | HeapWord* cs = tams; |
450 | while (cs < limit) { |
451 | assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")" , p2i(cs), p2i(tams)); |
452 | assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")" , p2i(cs), p2i(limit)); |
453 | oop obj = oop(cs); |
454 | assert(oopDesc::is_oop(obj), "sanity" ); |
455 | assert(ctx->is_marked(obj), "object expected to be marked" ); |
456 | int size = obj->size(); |
457 | cl->do_object(obj); |
458 | cs += size; |
459 | } |
460 | } |
461 | |
462 | template <class T> |
463 | class ShenandoahObjectToOopClosure : public ObjectClosure { |
464 | T* _cl; |
465 | public: |
466 | ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {} |
467 | |
468 | void do_object(oop obj) { |
469 | obj->oop_iterate(_cl); |
470 | } |
471 | }; |
472 | |
473 | template <class T> |
474 | class ShenandoahObjectToOopBoundedClosure : public ObjectClosure { |
475 | T* _cl; |
476 | MemRegion _bounds; |
477 | public: |
478 | ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) : |
479 | _cl(cl), _bounds(bottom, top) {} |
480 | |
481 | void do_object(oop obj) { |
482 | obj->oop_iterate(_cl, _bounds); |
483 | } |
484 | }; |
485 | |
486 | template<class T> |
487 | inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) { |
488 | if (region->is_humongous()) { |
489 | HeapWord* bottom = region->bottom(); |
490 | if (top > bottom) { |
491 | region = region->humongous_start_region(); |
492 | ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top); |
493 | marked_object_iterate(region, &objs); |
494 | } |
495 | } else { |
496 | ShenandoahObjectToOopClosure<T> objs(cl); |
497 | marked_object_iterate(region, &objs, top); |
498 | } |
499 | } |
500 | |
501 | inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const { |
502 | if (region_idx < _num_regions) { |
503 | return _regions[region_idx]; |
504 | } else { |
505 | return NULL; |
506 | } |
507 | } |
508 | |
509 | inline void ShenandoahHeap::mark_complete_marking_context() { |
510 | _marking_context->mark_complete(); |
511 | } |
512 | |
513 | inline void ShenandoahHeap::mark_incomplete_marking_context() { |
514 | _marking_context->mark_incomplete(); |
515 | } |
516 | |
517 | inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const { |
518 | assert (_marking_context->is_complete()," sanity" ); |
519 | return _marking_context; |
520 | } |
521 | |
522 | inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const { |
523 | return _marking_context; |
524 | } |
525 | |
526 | #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP |
527 | |