1// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/heap/marker.h"
6
7#include "platform/atomic.h"
8#include "vm/allocation.h"
9#include "vm/dart_api_state.h"
10#include "vm/heap/pages.h"
11#include "vm/heap/pointer_block.h"
12#include "vm/isolate.h"
13#include "vm/log.h"
14#include "vm/object_id_ring.h"
15#include "vm/raw_object.h"
16#include "vm/stack_frame.h"
17#include "vm/thread_barrier.h"
18#include "vm/thread_pool.h"
19#include "vm/thread_registry.h"
20#include "vm/timeline.h"
21#include "vm/visitor.h"
22
23namespace dart {
24
25template <bool sync>
26class MarkingVisitorBase : public ObjectPointerVisitor {
27 public:
28 MarkingVisitorBase(IsolateGroup* isolate_group,
29 PageSpace* page_space,
30 MarkingStack* marking_stack,
31 MarkingStack* deferred_marking_stack)
32 : ObjectPointerVisitor(isolate_group),
33 thread_(Thread::Current()),
34 page_space_(page_space),
35 work_list_(marking_stack),
36 deferred_work_list_(deferred_marking_stack),
37 delayed_weak_properties_(nullptr),
38 marked_bytes_(0),
39 marked_micros_(0) {
40 ASSERT(thread_->isolate_group() == isolate_group);
41 }
42 ~MarkingVisitorBase() {}
43
44 uintptr_t marked_bytes() const { return marked_bytes_; }
45 int64_t marked_micros() const { return marked_micros_; }
46 void AddMicros(int64_t micros) { marked_micros_ += micros; }
47
48 bool ProcessPendingWeakProperties() {
49 bool marked = false;
50 WeakPropertyPtr cur_weak = delayed_weak_properties_;
51 delayed_weak_properties_ = nullptr;
52 while (cur_weak != nullptr) {
53 uword next_weak = cur_weak->ptr()->next_;
54 ObjectPtr raw_key = cur_weak->ptr()->key_;
55 // Reset the next pointer in the weak property.
56 cur_weak->ptr()->next_ = 0;
57 if (raw_key->ptr()->IsMarked()) {
58 ObjectPtr raw_val = cur_weak->ptr()->value_;
59 marked =
60 marked || (raw_val->IsHeapObject() && !raw_val->ptr()->IsMarked());
61
62 // The key is marked so we make sure to properly visit all pointers
63 // originating from this weak property.
64 cur_weak->ptr()->VisitPointersNonvirtual(this);
65 } else {
66 // Requeue this weak property to be handled later.
67 EnqueueWeakProperty(cur_weak);
68 }
69 // Advance to next weak property in the queue.
70 cur_weak = static_cast<WeakPropertyPtr>(next_weak);
71 }
72 return marked;
73 }
74
75 void DrainMarkingStack() {
76 ObjectPtr raw_obj = work_list_.Pop();
77 if ((raw_obj == nullptr) && ProcessPendingWeakProperties()) {
78 raw_obj = work_list_.Pop();
79 }
80
81 if (raw_obj == nullptr) {
82 return;
83 }
84
85 do {
86 do {
87 // First drain the marking stacks.
88 const intptr_t class_id = raw_obj->GetClassId();
89
90 intptr_t size;
91 if (class_id != kWeakPropertyCid) {
92 size = raw_obj->ptr()->VisitPointersNonvirtual(this);
93 } else {
94 WeakPropertyPtr raw_weak = static_cast<WeakPropertyPtr>(raw_obj);
95 size = ProcessWeakProperty(raw_weak, /* did_mark */ true);
96 }
97 marked_bytes_ += size;
98
99 raw_obj = work_list_.Pop();
100 } while (raw_obj != nullptr);
101
102 // Marking stack is empty.
103 ProcessPendingWeakProperties();
104
105 // Check whether any further work was pushed either by other markers or
106 // by the handling of weak properties.
107 raw_obj = work_list_.Pop();
108 } while (raw_obj != nullptr);
109 }
110
111 // Races: The concurrent marker is racing with the mutator, but this race is
112 // harmless. The concurrent marker will only visit objects that were created
113 // before the marker started. It will ignore all new-space objects based on
114 // pointer alignment, and it will ignore old-space objects created after the
115 // marker started because old-space objects allocated while marking is in
116 // progress are allocated black (mark bit set). When visiting object slots,
117 // the marker can see either the value it had when marking started (because
118 // spawning the marker task creates acq-rel ordering) or any value later
119 // stored into that slot. Because pointer slots always contain pointers (i.e.,
120 // we don't do any in-place unboxing like V8), any value we read from the slot
121 // is safe.
122 NO_SANITIZE_THREAD
123 ObjectPtr LoadPointerIgnoreRace(ObjectPtr* ptr) { return *ptr; }
124
125 void VisitPointers(ObjectPtr* first, ObjectPtr* last) {
126 for (ObjectPtr* current = first; current <= last; current++) {
127 MarkObject(LoadPointerIgnoreRace(current));
128 }
129 }
130
131 void EnqueueWeakProperty(WeakPropertyPtr raw_weak) {
132 ASSERT(raw_weak->IsHeapObject());
133 ASSERT(raw_weak->IsOldObject());
134 ASSERT(raw_weak->IsWeakProperty());
135 ASSERT(raw_weak->ptr()->IsMarked());
136 ASSERT(raw_weak->ptr()->next_ == 0);
137 raw_weak->ptr()->next_ = static_cast<uword>(delayed_weak_properties_);
138 delayed_weak_properties_ = raw_weak;
139 }
140
141 intptr_t ProcessWeakProperty(WeakPropertyPtr raw_weak, bool did_mark) {
142 // The fate of the weak property is determined by its key.
143 ObjectPtr raw_key = LoadPointerIgnoreRace(&raw_weak->ptr()->key_);
144 if (raw_key->IsHeapObject() && raw_key->IsOldObject() &&
145 !raw_key->ptr()->IsMarked()) {
146 // Key was white. Enqueue the weak property.
147 if (did_mark) {
148 EnqueueWeakProperty(raw_weak);
149 }
150 return raw_weak->ptr()->HeapSize();
151 }
152 // Key is gray or black. Make the weak property black.
153 return raw_weak->ptr()->VisitPointersNonvirtual(this);
154 }
155
156 void ProcessDeferredMarking() {
157 ObjectPtr raw_obj;
158 while ((raw_obj = deferred_work_list_.Pop()) != nullptr) {
159 ASSERT(raw_obj->IsHeapObject() && raw_obj->IsOldObject());
160 // N.B. We are scanning the object even if it is already marked.
161 bool did_mark = TryAcquireMarkBit(raw_obj);
162 const intptr_t class_id = raw_obj->GetClassId();
163 intptr_t size;
164 if (class_id != kWeakPropertyCid) {
165 size = raw_obj->ptr()->VisitPointersNonvirtual(this);
166 } else {
167 WeakPropertyPtr raw_weak = static_cast<WeakPropertyPtr>(raw_obj);
168 size = ProcessWeakProperty(raw_weak, did_mark);
169 }
170 // Add the size only if we win the marking race to prevent
171 // double-counting.
172 if (did_mark) {
173 marked_bytes_ += size;
174 }
175 }
176 }
177
178 void FinalizeDeferredMarking() {
179 ProcessDeferredMarking();
180 deferred_work_list_.Finalize();
181 }
182
183 // Called when all marking is complete.
184 void Finalize() {
185 work_list_.Finalize();
186 // Clear pending weak properties.
187 WeakPropertyPtr cur_weak = delayed_weak_properties_;
188 delayed_weak_properties_ = nullptr;
189 intptr_t weak_properties_cleared = 0;
190 while (cur_weak != nullptr) {
191 uword next_weak = cur_weak->ptr()->next_;
192 cur_weak->ptr()->next_ = 0;
193 RELEASE_ASSERT(!cur_weak->ptr()->key_->ptr()->IsMarked());
194 WeakProperty::Clear(cur_weak);
195 weak_properties_cleared++;
196 // Advance to next weak property in the queue.
197 cur_weak = static_cast<WeakPropertyPtr>(next_weak);
198 }
199 }
200
201 void AbandonWork() {
202 work_list_.AbandonWork();
203 deferred_work_list_.AbandonWork();
204 }
205
206 private:
207 void PushMarked(ObjectPtr raw_obj) {
208 ASSERT(raw_obj->IsHeapObject());
209 ASSERT(raw_obj->IsOldObject());
210
211 // Push the marked object on the marking stack.
212 ASSERT(raw_obj->ptr()->IsMarked());
213 work_list_.Push(raw_obj);
214 }
215
216 static bool TryAcquireMarkBit(ObjectPtr raw_obj) {
217 if (FLAG_write_protect_code && raw_obj->IsInstructions()) {
218 // A non-writable alias mapping may exist for instruction pages.
219 raw_obj = OldPage::ToWritable(raw_obj);
220 }
221 if (!sync) {
222 raw_obj->ptr()->SetMarkBitUnsynchronized();
223 return true;
224 } else {
225 return raw_obj->ptr()->TryAcquireMarkBit();
226 }
227 }
228
229 DART_FORCE_INLINE
230 void MarkObject(ObjectPtr raw_obj) {
231 // Fast exit if the raw object is immediate or in new space. No memory
232 // access.
233 if (raw_obj->IsSmiOrNewObject()) {
234 return;
235 }
236
237 // While it might seem this is redundant with TryAcquireMarkBit, we must
238 // do this check first to avoid attempting an atomic::fetch_and on the
239 // read-only vm-isolate or image pages, which can fault even if there is no
240 // change in the value.
241 // Doing this before checking for an Instructions object avoids
242 // unnecessary queueing of pre-marked objects.
243 if (raw_obj->ptr()->IsMarked()) {
244 return;
245 }
246
247 intptr_t class_id = raw_obj->GetClassId();
248 ASSERT(class_id != kFreeListElement);
249
250 if (sync && UNLIKELY(class_id == kInstructionsCid)) {
251 // If this is the concurrent marker, this object may be non-writable due
252 // to W^X (--write-protect-code).
253 deferred_work_list_.Push(raw_obj);
254 return;
255 }
256
257 if (!TryAcquireMarkBit(raw_obj)) {
258 // Already marked.
259 return;
260 }
261
262 PushMarked(raw_obj);
263 }
264
265 Thread* thread_;
266 PageSpace* page_space_;
267 MarkerWorkList work_list_;
268 MarkerWorkList deferred_work_list_;
269 WeakPropertyPtr delayed_weak_properties_;
270 uintptr_t marked_bytes_;
271 int64_t marked_micros_;
272
273 DISALLOW_IMPLICIT_CONSTRUCTORS(MarkingVisitorBase);
274};
275
276typedef MarkingVisitorBase<false> UnsyncMarkingVisitor;
277typedef MarkingVisitorBase<true> SyncMarkingVisitor;
278
279static bool IsUnreachable(const ObjectPtr raw_obj) {
280 if (!raw_obj->IsHeapObject()) {
281 return false;
282 }
283 if (raw_obj == Object::null()) {
284 return true;
285 }
286 if (!raw_obj->IsOldObject()) {
287 return false;
288 }
289 return !raw_obj->ptr()->IsMarked();
290}
291
292class MarkingWeakVisitor : public HandleVisitor {
293 public:
294 explicit MarkingWeakVisitor(Thread* thread)
295 : HandleVisitor(thread),
296 class_table_(thread->isolate_group()->shared_class_table()) {}
297
298 void VisitHandle(uword addr) {
299 FinalizablePersistentHandle* handle =
300 reinterpret_cast<FinalizablePersistentHandle*>(addr);
301 ObjectPtr raw_obj = handle->raw();
302 if (IsUnreachable(raw_obj)) {
303 handle->UpdateUnreachable(thread()->isolate_group());
304 }
305 }
306
307 private:
308 SharedClassTable* class_table_;
309
310 DISALLOW_COPY_AND_ASSIGN(MarkingWeakVisitor);
311};
312
313void GCMarker::Prologue() {
314 isolate_group_->ReleaseStoreBuffers();
315
316#ifndef DART_PRECOMPILED_RUNTIME
317 isolate_group_->ForEachIsolate(
318 [&](Isolate* isolate) {
319 Thread* mutator_thread = isolate->mutator_thread();
320 if (mutator_thread != NULL) {
321 Interpreter* interpreter = mutator_thread->interpreter();
322 if (interpreter != NULL) {
323 interpreter->ClearLookupCache();
324 }
325 }
326 },
327 /*at_safepoint=*/true);
328#endif
329}
330
331void GCMarker::Epilogue() {}
332
333enum RootSlices {
334 kIsolate = 0,
335 kNumFixedRootSlices = 1,
336};
337
338void GCMarker::ResetSlices() {
339 ASSERT(Thread::Current()->IsAtSafepoint());
340
341 root_slices_started_ = 0;
342 root_slices_finished_ = 0;
343 root_slices_count_ = kNumFixedRootSlices;
344 new_page_ = heap_->new_space()->head();
345 for (NewPage* p = new_page_; p != nullptr; p = p->next()) {
346 root_slices_count_++;
347 }
348
349 weak_slices_started_ = 0;
350}
351
352void GCMarker::IterateRoots(ObjectPointerVisitor* visitor) {
353 for (;;) {
354 intptr_t slice = root_slices_started_.fetch_add(1);
355 if (slice >= root_slices_count_) {
356 break; // No more slices.
357 }
358
359 switch (slice) {
360 case kIsolate: {
361 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(),
362 "ProcessIsolateGroupRoots");
363 isolate_group_->VisitObjectPointers(
364 visitor, ValidationPolicy::kDontValidateFrames);
365 break;
366 }
367 default: {
368 NewPage* page;
369 {
370 MonitorLocker ml(&root_slices_monitor_);
371 page = new_page_;
372 ASSERT(page != nullptr);
373 new_page_ = page->next();
374 }
375 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ProcessNewSpace");
376 page->VisitObjectPointers(visitor);
377 }
378 }
379
380 MonitorLocker ml(&root_slices_monitor_);
381 root_slices_finished_++;
382 if (root_slices_finished_ == root_slices_count_) {
383 ml.Notify();
384 }
385 }
386}
387
388enum WeakSlices {
389 kWeakHandles = 0,
390 kWeakTables,
391 kObjectIdRing,
392 kRememberedSet,
393 kNumWeakSlices,
394};
395
396void GCMarker::IterateWeakRoots(Thread* thread) {
397 for (;;) {
398 intptr_t slice = weak_slices_started_.fetch_add(1);
399 if (slice >= kNumWeakSlices) {
400 return; // No more slices.
401 }
402
403 switch (slice) {
404 case kWeakHandles:
405 ProcessWeakHandles(thread);
406 break;
407 case kWeakTables:
408 ProcessWeakTables(thread);
409 break;
410 case kObjectIdRing:
411 ProcessObjectIdTable(thread);
412 break;
413 case kRememberedSet:
414 ProcessRememberedSet(thread);
415 break;
416 default:
417 UNREACHABLE();
418 }
419 }
420}
421
422void GCMarker::ProcessWeakHandles(Thread* thread) {
423 TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessWeakHandles");
424 MarkingWeakVisitor visitor(thread);
425 ApiState* state = isolate_group_->api_state();
426 ASSERT(state != NULL);
427 isolate_group_->VisitWeakPersistentHandles(&visitor);
428}
429
430void GCMarker::ProcessWeakTables(Thread* thread) {
431 TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessWeakTables");
432 for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
433 WeakTable* table =
434 heap_->GetWeakTable(Heap::kOld, static_cast<Heap::WeakSelector>(sel));
435 intptr_t size = table->size();
436 for (intptr_t i = 0; i < size; i++) {
437 if (table->IsValidEntryAtExclusive(i)) {
438 ObjectPtr raw_obj = table->ObjectAtExclusive(i);
439 ASSERT(raw_obj->IsHeapObject());
440 if (!raw_obj->ptr()->IsMarked()) {
441 table->InvalidateAtExclusive(i);
442 }
443 }
444 }
445 }
446}
447
448void GCMarker::ProcessRememberedSet(Thread* thread) {
449 TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessRememberedSet");
450 // Filter collected objects from the remembered set.
451 StoreBuffer* store_buffer = isolate_group_->store_buffer();
452 StoreBufferBlock* reading = store_buffer->TakeBlocks();
453 StoreBufferBlock* writing = store_buffer->PopNonFullBlock();
454 while (reading != NULL) {
455 StoreBufferBlock* next = reading->next();
456 // Generated code appends to store buffers; tell MemorySanitizer.
457 MSAN_UNPOISON(reading, sizeof(*reading));
458 while (!reading->IsEmpty()) {
459 ObjectPtr raw_object = reading->Pop();
460 ASSERT(!raw_object->IsForwardingCorpse());
461 ASSERT(raw_object->ptr()->IsRemembered());
462 if (raw_object->ptr()->IsMarked()) {
463 writing->Push(raw_object);
464 if (writing->IsFull()) {
465 store_buffer->PushBlock(writing, StoreBuffer::kIgnoreThreshold);
466 writing = store_buffer->PopNonFullBlock();
467 }
468 }
469 }
470 reading->Reset();
471 // Return the emptied block for recycling (no need to check threshold).
472 store_buffer->PushBlock(reading, StoreBuffer::kIgnoreThreshold);
473 reading = next;
474 }
475 store_buffer->PushBlock(writing, StoreBuffer::kIgnoreThreshold);
476}
477
478class ObjectIdRingClearPointerVisitor : public ObjectPointerVisitor {
479 public:
480 explicit ObjectIdRingClearPointerVisitor(IsolateGroup* isolate_group)
481 : ObjectPointerVisitor(isolate_group) {}
482
483 void VisitPointers(ObjectPtr* first, ObjectPtr* last) {
484 for (ObjectPtr* current = first; current <= last; current++) {
485 ObjectPtr raw_obj = *current;
486 ASSERT(raw_obj->IsHeapObject());
487 if (raw_obj->IsOldObject() && !raw_obj->ptr()->IsMarked()) {
488 // Object has become garbage. Replace it will null.
489 *current = Object::null();
490 }
491 }
492 }
493};
494
495void GCMarker::ProcessObjectIdTable(Thread* thread) {
496#ifndef PRODUCT
497 TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessObjectIdTable");
498 ObjectIdRingClearPointerVisitor visitor(isolate_group_);
499 isolate_group_->VisitObjectIdRingPointers(&visitor);
500#endif // !PRODUCT
501}
502
503class ParallelMarkTask : public ThreadPool::Task {
504 public:
505 ParallelMarkTask(GCMarker* marker,
506 IsolateGroup* isolate_group,
507 MarkingStack* marking_stack,
508 ThreadBarrier* barrier,
509 SyncMarkingVisitor* visitor,
510 RelaxedAtomic<uintptr_t>* num_busy)
511 : marker_(marker),
512 isolate_group_(isolate_group),
513 marking_stack_(marking_stack),
514 barrier_(barrier),
515 visitor_(visitor),
516 num_busy_(num_busy) {}
517
518 virtual void Run() {
519 bool result = Thread::EnterIsolateGroupAsHelper(
520 isolate_group_, Thread::kMarkerTask, /*bypass_safepoint=*/true);
521 ASSERT(result);
522
523 RunEnteredIsolateGroup();
524
525 Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
526
527 // This task is done. Notify the original thread.
528 barrier_->Exit();
529 }
530
531 void RunEnteredIsolateGroup() {
532 {
533 Thread* thread = Thread::Current();
534 TIMELINE_FUNCTION_GC_DURATION(thread, "ParallelMark");
535 int64_t start = OS::GetCurrentMonotonicMicros();
536
537 // Phase 1: Iterate over roots and drain marking stack in tasks.
538 marker_->IterateRoots(visitor_);
539
540 visitor_->ProcessDeferredMarking();
541
542 bool more_to_mark = false;
543 do {
544 do {
545 visitor_->DrainMarkingStack();
546
547 // I can't find more work right now. If no other task is busy,
548 // then there will never be more work (NB: 1 is *before* decrement).
549 if (num_busy_->fetch_sub(1u) == 1) break;
550
551 // Wait for some work to appear.
552 // TODO(40695): Replace busy-waiting with a solution using Monitor,
553 // and redraw the boundaries between stack/visitor/task as needed.
554 while (marking_stack_->IsEmpty() && num_busy_->load() > 0) {
555 }
556
557 // If no tasks are busy, there will never be more work.
558 if (num_busy_->load() == 0) break;
559
560 // I saw some work; get busy and compete for it.
561 num_busy_->fetch_add(1u);
562 } while (true);
563 // Wait for all markers to stop.
564 barrier_->Sync();
565#if defined(DEBUG)
566 ASSERT(num_busy_->load() == 0);
567 // Caveat: must not allow any marker to continue past the barrier
568 // before we checked num_busy, otherwise one of them might rush
569 // ahead and increment it.
570 barrier_->Sync();
571#endif
572 // Check if we have any pending properties with marked keys.
573 // Those might have been marked by another marker.
574 more_to_mark = visitor_->ProcessPendingWeakProperties();
575 if (more_to_mark) {
576 // We have more work to do. Notify others.
577 num_busy_->fetch_add(1u);
578 }
579
580 // Wait for all other markers to finish processing their pending
581 // weak properties and decide if they need to continue marking.
582 // Caveat: we need two barriers here to make this decision in lock step
583 // between all markers and the main thread.
584 barrier_->Sync();
585 if (!more_to_mark && (num_busy_->load() > 0)) {
586 // All markers continue to mark as long as any single marker has
587 // some work to do.
588 num_busy_->fetch_add(1u);
589 more_to_mark = true;
590 }
591 barrier_->Sync();
592 } while (more_to_mark);
593
594 // Phase 2: deferred marking.
595 visitor_->FinalizeDeferredMarking();
596 barrier_->Sync();
597
598 // Phase 3: Weak processing.
599 marker_->IterateWeakRoots(thread);
600 barrier_->Sync();
601
602 // Phase 4: Gather statistics from all markers.
603 int64_t stop = OS::GetCurrentMonotonicMicros();
604 visitor_->AddMicros(stop - start);
605 if (FLAG_log_marker_tasks) {
606 THR_Print("Task marked %" Pd " bytes in %" Pd64 " micros.\n",
607 visitor_->marked_bytes(), visitor_->marked_micros());
608 }
609 marker_->FinalizeResultsFrom(visitor_);
610
611 delete visitor_;
612 }
613 }
614
615 private:
616 GCMarker* marker_;
617 IsolateGroup* isolate_group_;
618 MarkingStack* marking_stack_;
619 ThreadBarrier* barrier_;
620 SyncMarkingVisitor* visitor_;
621 RelaxedAtomic<uintptr_t>* num_busy_;
622
623 DISALLOW_COPY_AND_ASSIGN(ParallelMarkTask);
624};
625
626class ConcurrentMarkTask : public ThreadPool::Task {
627 public:
628 ConcurrentMarkTask(GCMarker* marker,
629 IsolateGroup* isolate_group,
630 PageSpace* page_space,
631 SyncMarkingVisitor* visitor)
632 : marker_(marker),
633 isolate_group_(isolate_group),
634 page_space_(page_space),
635 visitor_(visitor) {
636#if defined(DEBUG)
637 MonitorLocker ml(page_space_->tasks_lock());
638 ASSERT(page_space_->phase() == PageSpace::kMarking);
639#endif
640 }
641
642 virtual void Run() {
643 bool result = Thread::EnterIsolateGroupAsHelper(
644 isolate_group_, Thread::kMarkerTask, /*bypass_safepoint=*/true);
645 ASSERT(result);
646 {
647 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ConcurrentMark");
648 int64_t start = OS::GetCurrentMonotonicMicros();
649
650 marker_->IterateRoots(visitor_);
651
652 visitor_->DrainMarkingStack();
653 int64_t stop = OS::GetCurrentMonotonicMicros();
654 visitor_->AddMicros(stop - start);
655 if (FLAG_log_marker_tasks) {
656 THR_Print("Task marked %" Pd " bytes in %" Pd64 " micros.\n",
657 visitor_->marked_bytes(), visitor_->marked_micros());
658 }
659 }
660
661 // Exit isolate cleanly *before* notifying it, to avoid shutdown race.
662 Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
663 // This marker task is done. Notify the original isolate.
664 {
665 MonitorLocker ml(page_space_->tasks_lock());
666 page_space_->set_tasks(page_space_->tasks() - 1);
667 page_space_->set_concurrent_marker_tasks(
668 page_space_->concurrent_marker_tasks() - 1);
669 ASSERT(page_space_->phase() == PageSpace::kMarking);
670 if (page_space_->concurrent_marker_tasks() == 0) {
671 page_space_->set_phase(PageSpace::kAwaitingFinalization);
672 }
673 ml.NotifyAll();
674 }
675 }
676
677 private:
678 GCMarker* marker_;
679 IsolateGroup* isolate_group_;
680 PageSpace* page_space_;
681 SyncMarkingVisitor* visitor_;
682
683 DISALLOW_COPY_AND_ASSIGN(ConcurrentMarkTask);
684};
685
686template <class MarkingVisitorType>
687void GCMarker::FinalizeResultsFrom(MarkingVisitorType* visitor) {
688 {
689 MutexLocker ml(&stats_mutex_);
690 marked_bytes_ += visitor->marked_bytes();
691 marked_micros_ += visitor->marked_micros();
692 }
693 visitor->Finalize();
694}
695
696intptr_t GCMarker::MarkedWordsPerMicro() const {
697 intptr_t marked_words_per_job_micro;
698 if (marked_micros_ == 0) {
699 marked_words_per_job_micro = marked_words(); // Prevent division by zero.
700 } else {
701 marked_words_per_job_micro = marked_words() / marked_micros_;
702 }
703 if (marked_words_per_job_micro == 0) {
704 marked_words_per_job_micro = 1; // Prevent division by zero.
705 }
706 intptr_t jobs = FLAG_marker_tasks;
707 if (jobs == 0) {
708 jobs = 1; // Marking on main thread is still one job.
709 }
710 return marked_words_per_job_micro * jobs;
711}
712
713GCMarker::GCMarker(IsolateGroup* isolate_group, Heap* heap)
714 : isolate_group_(isolate_group),
715 heap_(heap),
716 marking_stack_(),
717 visitors_(),
718 marked_bytes_(0),
719 marked_micros_(0) {
720 visitors_ = new SyncMarkingVisitor*[FLAG_marker_tasks];
721 for (intptr_t i = 0; i < FLAG_marker_tasks; i++) {
722 visitors_[i] = NULL;
723 }
724}
725
726GCMarker::~GCMarker() {
727 // Cleanup in case isolate shutdown happens after starting the concurrent
728 // marker and before finalizing.
729 if (isolate_group_->marking_stack() != NULL) {
730 isolate_group_->DisableIncrementalBarrier();
731 for (intptr_t i = 0; i < FLAG_marker_tasks; i++) {
732 visitors_[i]->AbandonWork();
733 delete visitors_[i];
734 }
735 }
736 delete[] visitors_;
737}
738
739void GCMarker::StartConcurrentMark(PageSpace* page_space) {
740 isolate_group_->EnableIncrementalBarrier(&marking_stack_,
741 &deferred_marking_stack_);
742
743 const intptr_t num_tasks = FLAG_marker_tasks;
744
745 {
746 // Bulk increase task count before starting any task, instead of
747 // incrementing as each task is started, to prevent a task which
748 // races ahead from falsly beleiving it was the last task to complete.
749 MonitorLocker ml(page_space->tasks_lock());
750 ASSERT(page_space->phase() == PageSpace::kDone);
751 page_space->set_phase(PageSpace::kMarking);
752 page_space->set_tasks(page_space->tasks() + num_tasks);
753 page_space->set_concurrent_marker_tasks(
754 page_space->concurrent_marker_tasks() + num_tasks);
755 }
756
757 ResetSlices();
758 for (intptr_t i = 0; i < num_tasks; i++) {
759 ASSERT(visitors_[i] == NULL);
760 visitors_[i] = new SyncMarkingVisitor(
761 isolate_group_, page_space, &marking_stack_, &deferred_marking_stack_);
762
763 // Begin marking on a helper thread.
764 bool result = Dart::thread_pool()->Run<ConcurrentMarkTask>(
765 this, isolate_group_, page_space, visitors_[i]);
766 ASSERT(result);
767 }
768
769 isolate_group_->DeferredMarkLiveTemporaries();
770
771 // Wait for roots to be marked before exiting safepoint.
772 MonitorLocker ml(&root_slices_monitor_);
773 while (root_slices_finished_ != root_slices_count_) {
774 ml.Wait();
775 }
776}
777
778void GCMarker::MarkObjects(PageSpace* page_space) {
779 if (isolate_group_->marking_stack() != NULL) {
780 isolate_group_->DisableIncrementalBarrier();
781 }
782
783 Prologue();
784 {
785 Thread* thread = Thread::Current();
786 const int num_tasks = FLAG_marker_tasks;
787 if (num_tasks == 0) {
788 TIMELINE_FUNCTION_GC_DURATION(thread, "Mark");
789 int64_t start = OS::GetCurrentMonotonicMicros();
790 // Mark everything on main thread.
791 UnsyncMarkingVisitor mark(isolate_group_, page_space, &marking_stack_,
792 &deferred_marking_stack_);
793 ResetSlices();
794 IterateRoots(&mark);
795 mark.ProcessDeferredMarking();
796 mark.DrainMarkingStack();
797 mark.FinalizeDeferredMarking();
798 IterateWeakRoots(thread);
799 // All marking done; detach code, etc.
800 int64_t stop = OS::GetCurrentMonotonicMicros();
801 mark.AddMicros(stop - start);
802 FinalizeResultsFrom(&mark);
803 } else {
804 ThreadBarrier barrier(num_tasks, heap_->barrier(), heap_->barrier_done());
805 ResetSlices();
806 // Used to coordinate draining among tasks; all start out as 'busy'.
807 RelaxedAtomic<uintptr_t> num_busy(num_tasks);
808 // Phase 1: Iterate over roots and drain marking stack in tasks.
809 for (intptr_t i = 0; i < num_tasks; ++i) {
810 SyncMarkingVisitor* visitor;
811 if (visitors_[i] != NULL) {
812 visitor = visitors_[i];
813 visitors_[i] = NULL;
814 } else {
815 visitor =
816 new SyncMarkingVisitor(isolate_group_, page_space,
817 &marking_stack_, &deferred_marking_stack_);
818 }
819 if (i < (num_tasks - 1)) {
820 // Begin marking on a helper thread.
821 bool result = Dart::thread_pool()->Run<ParallelMarkTask>(
822 this, isolate_group_, &marking_stack_, &barrier, visitor,
823 &num_busy);
824 ASSERT(result);
825 } else {
826 // Last worker is the main thread.
827 ParallelMarkTask task(this, isolate_group_, &marking_stack_, &barrier,
828 visitor, &num_busy);
829 task.RunEnteredIsolateGroup();
830 barrier.Exit();
831 }
832 }
833 }
834 }
835 Epilogue();
836}
837
838} // namespace dart
839