1/*
2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24#include "precompiled.hpp"
25#include "classfile/classLoaderDataGraph.hpp"
26#include "gc/z/zBarrier.inline.hpp"
27#include "gc/z/zMark.inline.hpp"
28#include "gc/z/zMarkCache.inline.hpp"
29#include "gc/z/zMarkStack.inline.hpp"
30#include "gc/z/zMarkTerminate.inline.hpp"
31#include "gc/z/zOopClosures.inline.hpp"
32#include "gc/z/zPage.hpp"
33#include "gc/z/zPageTable.inline.hpp"
34#include "gc/z/zRootsIterator.hpp"
35#include "gc/z/zStat.hpp"
36#include "gc/z/zTask.hpp"
37#include "gc/z/zThread.hpp"
38#include "gc/z/zThreadLocalAllocBuffer.hpp"
39#include "gc/z/zUtils.inline.hpp"
40#include "gc/z/zWorkers.inline.hpp"
41#include "logging/log.hpp"
42#include "memory/iterator.inline.hpp"
43#include "oops/objArrayOop.inline.hpp"
44#include "oops/oop.inline.hpp"
45#include "runtime/atomic.hpp"
46#include "runtime/handshake.hpp"
47#include "runtime/orderAccess.hpp"
48#include "runtime/prefetch.inline.hpp"
49#include "runtime/thread.hpp"
50#include "utilities/align.hpp"
51#include "utilities/globalDefinitions.hpp"
52#include "utilities/ticks.hpp"
53
54static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark");
55static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush");
56static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle");
57static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
58static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
59
60ZMark::ZMark(ZWorkers* workers, ZPageTable* page_table) :
61 _workers(workers),
62 _page_table(page_table),
63 _allocator(),
64 _stripes(),
65 _terminate(),
66 _work_terminateflush(true),
67 _work_nproactiveflush(0),
68 _work_nterminateflush(0),
69 _nproactiveflush(0),
70 _nterminateflush(0),
71 _ntrycomplete(0),
72 _ncontinue(0),
73 _nworkers(0) {}
74
75bool ZMark::is_initialized() const {
76 return _allocator.is_initialized();
77}
78
79size_t ZMark::calculate_nstripes(uint nworkers) const {
80 // Calculate the number of stripes from the number of workers we use,
81 // where the number of stripes must be a power of two and we want to
82 // have at least one worker per stripe.
83 const size_t nstripes = ZUtils::round_down_power_of_2(nworkers);
84 return MIN2(nstripes, ZMarkStripesMax);
85}
86
87void ZMark::prepare_mark() {
88 // Increment global sequence number to invalidate
89 // marking information for all pages.
90 ZGlobalSeqNum++;
91
92 // Reset flush/continue counters
93 _nproactiveflush = 0;
94 _nterminateflush = 0;
95 _ntrycomplete = 0;
96 _ncontinue = 0;
97
98 // Set number of workers to use
99 _nworkers = _workers->nconcurrent();
100
101 // Set number of mark stripes to use, based on number
102 // of workers we will use in the concurrent mark phase.
103 const size_t nstripes = calculate_nstripes(_nworkers);
104 _stripes.set_nstripes(nstripes);
105
106 // Update statistics
107 ZStatMark::set_at_mark_start(nstripes);
108
109 // Print worker/stripe distribution
110 LogTarget(Debug, gc, marking) log;
111 if (log.is_enabled()) {
112 log.print("Mark Worker/Stripe Distribution");
113 for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
114 const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
115 const size_t stripe_id = _stripes.stripe_id(stripe);
116 log.print(" Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")",
117 worker_id, _nworkers, stripe_id, nstripes);
118 }
119 }
120}
121
122class ZMarkRootsIteratorClosure : public ZRootsIteratorClosure {
123public:
124 ZMarkRootsIteratorClosure() {
125 ZThreadLocalAllocBuffer::reset_statistics();
126 }
127
128 ~ZMarkRootsIteratorClosure() {
129 ZThreadLocalAllocBuffer::publish_statistics();
130 }
131
132 virtual void do_thread(Thread* thread) {
133 // Update thread local address bad mask
134 ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
135
136 // Retire TLAB
137 ZThreadLocalAllocBuffer::retire(thread);
138 }
139
140 virtual void do_oop(oop* p) {
141 ZBarrier::mark_barrier_on_root_oop_field(p);
142 }
143
144 virtual void do_oop(narrowOop* p) {
145 ShouldNotReachHere();
146 }
147};
148
149class ZMarkRootsTask : public ZTask {
150private:
151 ZMark* const _mark;
152 ZRootsIterator _roots;
153 ZMarkRootsIteratorClosure _cl;
154
155public:
156 ZMarkRootsTask(ZMark* mark) :
157 ZTask("ZMarkRootsTask"),
158 _mark(mark),
159 _roots() {}
160
161 virtual void work() {
162 _roots.oops_do(&_cl);
163
164 // Flush and free worker stacks. Needed here since
165 // the set of workers executing during root scanning
166 // can be different from the set of workers executing
167 // during mark.
168 _mark->flush_and_free();
169 }
170};
171
172void ZMark::start() {
173 // Verification
174 if (ZVerifyMarking) {
175 verify_all_stacks_empty();
176 }
177
178 // Prepare for concurrent mark
179 prepare_mark();
180
181 // Mark roots
182 ZMarkRootsTask task(this);
183 _workers->run_parallel(&task);
184}
185
186void ZMark::prepare_work() {
187 assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers");
188
189 // Set number of active workers
190 _terminate.reset(_nworkers);
191
192 // Reset flush counters
193 _work_nproactiveflush = _work_nterminateflush = 0;
194 _work_terminateflush = true;
195}
196
197void ZMark::finish_work() {
198 // Accumulate proactive/terminate flush counters
199 _nproactiveflush += _work_nproactiveflush;
200 _nterminateflush += _work_nterminateflush;
201}
202
203bool ZMark::is_array(uintptr_t addr) const {
204 return ZOop::from_address(addr)->is_objArray();
205}
206
207void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) {
208 assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
209 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
210 ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
211 const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift;
212 const uintptr_t length = size / oopSize;
213 const ZMarkStackEntry entry(offset, length, finalizable);
214
215 log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT,
216 addr, size, _stripes.stripe_id(stripe));
217
218 stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */);
219}
220
221void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) {
222 assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split");
223 const size_t length = size / oopSize;
224
225 log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size);
226
227 ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable);
228}
229
230void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) {
231 assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large");
232 assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split");
233 const uintptr_t start = addr;
234 const uintptr_t end = start + size;
235
236 // Calculate the aligned middle start/end/size, where the middle start
237 // should always be greater than the start (hence the +1 below) to make
238 // sure we always do some follow work, not just split the array into pieces.
239 const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
240 const size_t middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize);
241 const uintptr_t middle_end = middle_start + middle_size;
242
243 log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), "
244 "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")",
245 start, end, size, middle_start, middle_end, middle_size);
246
247 // Push unaligned trailing part
248 if (end > middle_end) {
249 const uintptr_t trailing_addr = middle_end;
250 const size_t trailing_size = end - middle_end;
251 push_partial_array(trailing_addr, trailing_size, finalizable);
252 }
253
254 // Push aligned middle part(s)
255 uintptr_t partial_addr = middle_end;
256 while (partial_addr > middle_start) {
257 const size_t parts = 2;
258 const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize);
259 partial_addr -= partial_size;
260 push_partial_array(partial_addr, partial_size, finalizable);
261 }
262
263 // Follow leading part
264 assert(start < middle_start, "Miscalculated middle start");
265 const uintptr_t leading_addr = start;
266 const size_t leading_size = middle_start - start;
267 follow_small_array(leading_addr, leading_size, finalizable);
268}
269
270void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) {
271 if (size <= ZMarkPartialArrayMinSize) {
272 follow_small_array(addr, size, finalizable);
273 } else {
274 follow_large_array(addr, size, finalizable);
275 }
276}
277
278void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
279 const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift);
280 const size_t size = entry.partial_array_length() * oopSize;
281
282 follow_array(addr, size, finalizable);
283}
284
285void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
286 if (finalizable) {
287 ZMarkBarrierOopClosure<true /* finalizable */> cl;
288 cl.do_klass(obj->klass());
289 } else {
290 ZMarkBarrierOopClosure<false /* finalizable */> cl;
291 cl.do_klass(obj->klass());
292 }
293
294 const uintptr_t addr = (uintptr_t)obj->base();
295 const size_t size = (size_t)obj->length() * oopSize;
296
297 follow_array(addr, size, finalizable);
298}
299
300void ZMark::follow_object(oop obj, bool finalizable) {
301 if (finalizable) {
302 ZMarkBarrierOopClosure<true /* finalizable */> cl;
303 obj->oop_iterate(&cl);
304 } else {
305 ZMarkBarrierOopClosure<false /* finalizable */> cl;
306 obj->oop_iterate(&cl);
307 }
308}
309
310bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) {
311 ZPage* const page = _page_table->get(addr);
312 if (page->is_allocating()) {
313 // Newly allocated objects are implicitly marked
314 return false;
315 }
316
317 // Try mark object
318 bool inc_live = false;
319 const bool success = page->mark_object(addr, finalizable, inc_live);
320 if (inc_live) {
321 // Update live objects/bytes for page. We use the aligned object
322 // size since that is the actual number of bytes used on the page
323 // and alignment paddings can never be reclaimed.
324 const size_t size = ZUtils::object_size(addr);
325 const size_t aligned_size = align_up(size, page->object_alignment());
326 cache->inc_live(page, aligned_size);
327 }
328
329 return success;
330}
331
332void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) {
333 // Decode flags
334 const bool finalizable = entry.finalizable();
335 const bool partial_array = entry.partial_array();
336
337 if (partial_array) {
338 follow_partial_array(entry, finalizable);
339 return;
340 }
341
342 // Decode object address
343 const uintptr_t addr = entry.object_address();
344
345 if (!try_mark_object(cache, addr, finalizable)) {
346 // Already marked
347 return;
348 }
349
350 if (is_array(addr)) {
351 follow_array_object(objArrayOop(ZOop::from_address(addr)), finalizable);
352 } else {
353 follow_object(ZOop::from_address(addr), finalizable);
354 }
355}
356
357template <typename T>
358bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
359 ZMarkStackEntry entry;
360
361 // Drain stripe stacks
362 while (stacks->pop(&_allocator, &_stripes, stripe, entry)) {
363 mark_and_follow(cache, entry);
364
365 // Check timeout
366 if (timeout->has_expired()) {
367 // Timeout
368 return false;
369 }
370 }
371
372 // Success
373 return true;
374}
375
376template <typename T>
377bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
378 const bool success = drain(stripe, stacks, cache, timeout);
379
380 // Flush and publish worker stacks
381 stacks->flush(&_allocator, &_stripes);
382
383 return success;
384}
385
386bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
387 // Try to steal a stack from another stripe
388 for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
389 victim_stripe != stripe;
390 victim_stripe = _stripes.stripe_next(victim_stripe)) {
391 ZMarkStack* const stack = victim_stripe->steal_stack();
392 if (stack != NULL) {
393 // Success, install the stolen stack
394 stacks->install(&_stripes, stripe, stack);
395 return true;
396 }
397 }
398
399 // Nothing to steal
400 return false;
401}
402
403void ZMark::idle() const {
404 ZStatTimer timer(ZSubPhaseConcurrentMarkIdle);
405 os::naked_short_sleep(1);
406}
407
408class ZMarkFlushAndFreeStacksClosure : public ThreadClosure {
409private:
410 ZMark* const _mark;
411 bool _flushed;
412
413public:
414 ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
415 _mark(mark),
416 _flushed(false) {}
417
418 void do_thread(Thread* thread) {
419 if (_mark->flush_and_free(thread)) {
420 _flushed = true;
421 }
422 }
423
424 bool flushed() const {
425 return _flushed;
426 }
427};
428
429bool ZMark::flush(bool at_safepoint) {
430 ZMarkFlushAndFreeStacksClosure cl(this);
431 if (at_safepoint) {
432 Threads::threads_do(&cl);
433 } else {
434 Handshake::execute(&cl);
435 }
436
437 // Returns true if more work is available
438 return cl.flushed() || !_stripes.is_empty();
439}
440
441bool ZMark::try_flush(volatile size_t* nflush) {
442 // Only flush if handshakes are enabled
443 if (!ThreadLocalHandshakes) {
444 return false;
445 }
446
447 Atomic::inc(nflush);
448
449 ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush);
450 return flush(false /* at_safepoint */);
451}
452
453bool ZMark::try_proactive_flush() {
454 // Only do proactive flushes from worker 0
455 if (ZThread::worker_id() != 0) {
456 return false;
457 }
458
459 if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax ||
460 Atomic::load(&_work_nterminateflush) != 0) {
461 // Limit reached or we're trying to terminate
462 return false;
463 }
464
465 return try_flush(&_work_nproactiveflush);
466}
467
468bool ZMark::try_terminate() {
469 ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate);
470
471 if (_terminate.enter_stage0()) {
472 // Last thread entered stage 0, flush
473 if (Atomic::load(&_work_terminateflush) &&
474 Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) {
475 // Exit stage 0 to allow other threads to continue marking
476 _terminate.exit_stage0();
477
478 // Flush before termination
479 if (!try_flush(&_work_nterminateflush)) {
480 // No more work available, skip further flush attempts
481 Atomic::store(false, &_work_terminateflush);
482 }
483
484 // Don't terminate, regardless of whether we successfully
485 // flushed out more work or not. We've already exited
486 // termination stage 0, to allow other threads to continue
487 // marking, so this thread has to return false and also
488 // make another round of attempted marking.
489 return false;
490 }
491 }
492
493 for (;;) {
494 if (_terminate.enter_stage1()) {
495 // Last thread entered stage 1, terminate
496 return true;
497 }
498
499 // Idle to give the other threads
500 // a chance to enter termination.
501 idle();
502
503 if (!_terminate.try_exit_stage1()) {
504 // All workers in stage 1, terminate
505 return true;
506 }
507
508 if (_terminate.try_exit_stage0()) {
509 // More work available, don't terminate
510 return false;
511 }
512 }
513}
514
515class ZMarkNoTimeout : public StackObj {
516public:
517 bool has_expired() {
518 return false;
519 }
520};
521
522void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
523 ZStatTimer timer(ZSubPhaseConcurrentMark);
524 ZMarkNoTimeout no_timeout;
525
526 for (;;) {
527 drain_and_flush(stripe, stacks, cache, &no_timeout);
528
529 if (try_steal(stripe, stacks)) {
530 // Stole work
531 continue;
532 }
533
534 if (try_proactive_flush()) {
535 // Work available
536 continue;
537 }
538
539 if (try_terminate()) {
540 // Terminate
541 break;
542 }
543 }
544}
545
546class ZMarkTimeout : public StackObj {
547private:
548 const Ticks _start;
549 const uint64_t _timeout;
550 const uint64_t _check_interval;
551 uint64_t _check_at;
552 uint64_t _check_count;
553 bool _expired;
554
555public:
556 ZMarkTimeout(uint64_t timeout_in_millis) :
557 _start(Ticks::now()),
558 _timeout(_start.value() + TimeHelper::millis_to_counter(timeout_in_millis)),
559 _check_interval(200),
560 _check_at(_check_interval),
561 _check_count(0),
562 _expired(false) {}
563
564 ~ZMarkTimeout() {
565 const Tickspan duration = Ticks::now() - _start;
566 log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms",
567 ZThread::name(), _expired ? "Expired" : "Completed",
568 _check_count, TimeHelper::counter_to_millis(duration.value()));
569 }
570
571 bool has_expired() {
572 if (++_check_count == _check_at) {
573 _check_at += _check_interval;
574 if ((uint64_t)Ticks::now().value() >= _timeout) {
575 // Timeout
576 _expired = true;
577 }
578 }
579
580 return _expired;
581 }
582};
583
584void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, uint64_t timeout_in_millis) {
585 ZStatTimer timer(ZSubPhaseMarkTryComplete);
586 ZMarkTimeout timeout(timeout_in_millis);
587
588 for (;;) {
589 if (!drain_and_flush(stripe, stacks, cache, &timeout)) {
590 // Timed out
591 break;
592 }
593
594 if (try_steal(stripe, stacks)) {
595 // Stole work
596 continue;
597 }
598
599 // Terminate
600 break;
601 }
602}
603
604void ZMark::work(uint64_t timeout_in_millis) {
605 ZMarkCache cache(_stripes.nstripes());
606 ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id());
607 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
608
609 if (timeout_in_millis == 0) {
610 work_without_timeout(&cache, stripe, stacks);
611 } else {
612 work_with_timeout(&cache, stripe, stacks, timeout_in_millis);
613 }
614
615 // Make sure stacks have been flushed
616 assert(stacks->is_empty(&_stripes), "Should be empty");
617
618 // Free remaining stacks
619 stacks->free(&_allocator);
620}
621
622class ZMarkConcurrentRootsIteratorClosure : public ZRootsIteratorClosure {
623public:
624 virtual void do_oop(oop* p) {
625 ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
626 }
627
628 virtual void do_oop(narrowOop* p) {
629 ShouldNotReachHere();
630 }
631};
632
633
634class ZMarkConcurrentRootsTask : public ZTask {
635private:
636 SuspendibleThreadSetJoiner _sts_joiner;
637 ZConcurrentRootsIterator _roots;
638 ZMarkConcurrentRootsIteratorClosure _cl;
639
640public:
641 ZMarkConcurrentRootsTask(ZMark* mark) :
642 ZTask("ZMarkConcurrentRootsTask"),
643 _sts_joiner(true /* active */),
644 _roots(ClassLoaderData::_claim_strong),
645 _cl() {
646 ClassLoaderDataGraph_lock->lock();
647 ClassLoaderDataGraph::clear_claimed_marks();
648 }
649
650 ~ZMarkConcurrentRootsTask() {
651 ClassLoaderDataGraph_lock->unlock();
652 }
653
654 virtual void work() {
655 _roots.oops_do(&_cl);
656 }
657};
658
659class ZMarkTask : public ZTask {
660private:
661 ZMark* const _mark;
662 const uint64_t _timeout_in_millis;
663
664public:
665 ZMarkTask(ZMark* mark, uint64_t timeout_in_millis = 0) :
666 ZTask("ZMarkTask"),
667 _mark(mark),
668 _timeout_in_millis(timeout_in_millis) {
669 _mark->prepare_work();
670 }
671
672 ~ZMarkTask() {
673 _mark->finish_work();
674 }
675
676 virtual void work() {
677 _mark->work(_timeout_in_millis);
678 }
679};
680
681void ZMark::mark(bool initial) {
682 if (initial) {
683 ZMarkConcurrentRootsTask task(this);
684 _workers->run_concurrent(&task);
685 }
686
687 ZMarkTask task(this);
688 _workers->run_concurrent(&task);
689}
690
691bool ZMark::try_complete() {
692 _ntrycomplete++;
693
694 // Use nconcurrent number of worker threads to maintain the
695 // worker/stripe distribution used during concurrent mark.
696 ZMarkTask task(this, ZMarkCompleteTimeout);
697 _workers->run_concurrent(&task);
698
699 // Successful if all stripes are empty
700 return _stripes.is_empty();
701}
702
703bool ZMark::try_end() {
704 // Flush all mark stacks
705 if (!flush(true /* at_safepoint */)) {
706 // Mark completed
707 return true;
708 }
709
710 // Try complete marking by doing a limited
711 // amount of mark work in this phase.
712 return try_complete();
713}
714
715bool ZMark::end() {
716 // Try end marking
717 if (!try_end()) {
718 // Mark not completed
719 _ncontinue++;
720 return false;
721 }
722
723 // Verification
724 if (ZVerifyMarking) {
725 verify_all_stacks_empty();
726 }
727
728 // Update statistics
729 ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
730
731 // Mark completed
732 return true;
733}
734
735void ZMark::flush_and_free() {
736 Thread* const thread = Thread::current();
737 flush_and_free(thread);
738}
739
740bool ZMark::flush_and_free(Thread* thread) {
741 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
742 const bool flushed = stacks->flush(&_allocator, &_stripes);
743 stacks->free(&_allocator);
744 return flushed;
745}
746
747class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
748private:
749 const ZMarkStripeSet* const _stripes;
750
751public:
752 ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) :
753 _stripes(stripes) {}
754
755 void do_thread(Thread* thread) {
756 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
757 guarantee(stacks->is_empty(_stripes), "Should be empty");
758 }
759};
760
761void ZMark::verify_all_stacks_empty() const {
762 // Verify thread stacks
763 ZVerifyMarkStacksEmptyClosure cl(&_stripes);
764 Threads::threads_do(&cl);
765
766 // Verify stripe stacks
767 guarantee(_stripes.is_empty(), "Should be empty");
768}
769