1// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/heap/sweeper.h"
6
7#include "vm/globals.h"
8#include "vm/heap/freelist.h"
9#include "vm/heap/heap.h"
10#include "vm/heap/pages.h"
11#include "vm/heap/safepoint.h"
12#include "vm/lockers.h"
13#include "vm/thread_pool.h"
14#include "vm/timeline.h"
15
16namespace dart {
17
18bool GCSweeper::SweepPage(OldPage* page, FreeList* freelist, bool locked) {
19 ASSERT(!page->is_image_page());
20
21 // Keep track whether this page is still in use.
22 intptr_t used_in_bytes = 0;
23
24 bool is_executable = (page->type() == OldPage::kExecutable);
25 uword start = page->object_start();
26 uword end = page->object_end();
27 uword current = start;
28
29 while (current < end) {
30 intptr_t obj_size;
31 ObjectPtr raw_obj = ObjectLayout::FromAddr(current);
32 ASSERT(OldPage::Of(raw_obj) == page);
33 if (raw_obj->ptr()->IsMarked()) {
34 // Found marked object. Clear the mark bit and update swept bytes.
35 raw_obj->ptr()->ClearMarkBit();
36 obj_size = raw_obj->ptr()->HeapSize();
37 used_in_bytes += obj_size;
38 } else {
39 uword free_end = current + raw_obj->ptr()->HeapSize();
40 while (free_end < end) {
41 ObjectPtr next_obj = ObjectLayout::FromAddr(free_end);
42 if (next_obj->ptr()->IsMarked()) {
43 // Reached the end of the free block.
44 break;
45 }
46 // Expand the free block by the size of this object.
47 free_end += next_obj->ptr()->HeapSize();
48 }
49 obj_size = free_end - current;
50 if (is_executable) {
51 uword cursor = current;
52 uword end = current + obj_size;
53 while (cursor < end) {
54 *reinterpret_cast<uword*>(cursor) = kBreakInstructionFiller;
55 cursor += kWordSize;
56 }
57 } else {
58#if defined(DEBUG)
59 memset(reinterpret_cast<void*>(current), Heap::kZapByte, obj_size);
60#endif // DEBUG
61 }
62 if ((current != start) || (free_end != end)) {
63 // Only add to the free list if not covering the whole page.
64 if (locked) {
65 freelist->FreeLocked(current, obj_size);
66 } else {
67 freelist->Free(current, obj_size);
68 }
69 }
70 }
71 current += obj_size;
72 }
73 ASSERT(current == end);
74
75 page->set_used_in_bytes(used_in_bytes);
76 return used_in_bytes != 0; // In use.
77}
78
79intptr_t GCSweeper::SweepLargePage(OldPage* page) {
80 ASSERT(!page->is_image_page());
81
82 intptr_t words_to_end = 0;
83 ObjectPtr raw_obj = ObjectLayout::FromAddr(page->object_start());
84 ASSERT(OldPage::Of(raw_obj) == page);
85 if (raw_obj->ptr()->IsMarked()) {
86 raw_obj->ptr()->ClearMarkBit();
87 words_to_end = (raw_obj->ptr()->HeapSize() >> kWordSizeLog2);
88 }
89#ifdef DEBUG
90 // Array::MakeFixedLength creates trailing filler objects,
91 // but they are always unreachable. Verify that they are not marked.
92 uword current = ObjectLayout::ToAddr(raw_obj) + raw_obj->ptr()->HeapSize();
93 uword end = page->object_end();
94 while (current < end) {
95 ObjectPtr cur_obj = ObjectLayout::FromAddr(current);
96 ASSERT(!cur_obj->ptr()->IsMarked());
97 intptr_t obj_size = cur_obj->ptr()->HeapSize();
98 memset(reinterpret_cast<void*>(current), Heap::kZapByte, obj_size);
99 current += obj_size;
100 }
101#endif // DEBUG
102 return words_to_end;
103}
104
105class ConcurrentSweeperTask : public ThreadPool::Task {
106 public:
107 ConcurrentSweeperTask(IsolateGroup* isolate_group,
108 PageSpace* old_space,
109 OldPage* first,
110 OldPage* last,
111 OldPage* large_first,
112 OldPage* large_last)
113 : task_isolate_group_(isolate_group),
114 old_space_(old_space),
115 first_(first),
116 last_(last),
117 large_first_(large_first),
118 large_last_(large_last) {
119 ASSERT(task_isolate_group_ != NULL);
120 ASSERT(first_ != NULL);
121 ASSERT(old_space_ != NULL);
122 ASSERT(last_ != NULL);
123 MonitorLocker ml(old_space_->tasks_lock());
124 old_space_->set_tasks(old_space_->tasks() + 1);
125 old_space_->set_phase(PageSpace::kSweepingLarge);
126 }
127
128 virtual void Run() {
129 bool result = Thread::EnterIsolateGroupAsHelper(
130 task_isolate_group_, Thread::kSweeperTask, /*bypass_safepoint=*/true);
131 ASSERT(result);
132 {
133 Thread* thread = Thread::Current();
134 ASSERT(thread->BypassSafepoints()); // Or we should be checking in.
135 TIMELINE_FUNCTION_GC_DURATION(thread, "ConcurrentSweep");
136 GCSweeper sweeper;
137
138 OldPage* page = large_first_;
139 OldPage* prev_page = NULL;
140 while (page != NULL) {
141 OldPage* next_page;
142 if (page == large_last_) {
143 // Don't access page->next(), which would be a race with mutator
144 // allocating new pages.
145 next_page = NULL;
146 } else {
147 next_page = page->next();
148 }
149 ASSERT(page->type() == OldPage::kData);
150 const intptr_t words_to_end = sweeper.SweepLargePage(page);
151 if (words_to_end == 0) {
152 old_space_->FreeLargePage(page, prev_page);
153 } else {
154 old_space_->TruncateLargePage(page, words_to_end << kWordSizeLog2);
155 prev_page = page;
156 }
157 page = next_page;
158 }
159
160 {
161 MonitorLocker ml(old_space_->tasks_lock());
162 ASSERT(old_space_->phase() == PageSpace::kSweepingLarge);
163 old_space_->set_phase(PageSpace::kSweepingRegular);
164 ml.NotifyAll();
165 }
166
167 intptr_t shard = 0;
168 const intptr_t num_shards = Utils::Maximum(FLAG_scavenger_tasks, 1);
169 page = first_;
170 prev_page = NULL;
171 while (page != NULL) {
172 OldPage* next_page;
173 if (page == last_) {
174 // Don't access page->next(), which would be a race with mutator
175 // allocating new pages.
176 next_page = NULL;
177 } else {
178 next_page = page->next();
179 }
180 ASSERT(page->type() == OldPage::kData);
181 shard = (shard + 1) % num_shards;
182 bool page_in_use =
183 sweeper.SweepPage(page, old_space_->DataFreeList(shard), false);
184 if (page_in_use) {
185 prev_page = page;
186 } else {
187 old_space_->FreePage(page, prev_page);
188 }
189 {
190 // Notify the mutator thread that we have added elements to the free
191 // list or that more capacity is available.
192 MonitorLocker ml(old_space_->tasks_lock());
193 ml.Notify();
194 }
195 page = next_page;
196 }
197 }
198 // Exit isolate cleanly *before* notifying it, to avoid shutdown race.
199 Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
200 // This sweeper task is done. Notify the original isolate.
201 {
202 MonitorLocker ml(old_space_->tasks_lock());
203 old_space_->set_tasks(old_space_->tasks() - 1);
204 ASSERT(old_space_->phase() == PageSpace::kSweepingRegular);
205 old_space_->set_phase(PageSpace::kDone);
206 ml.NotifyAll();
207 }
208 }
209
210 private:
211 IsolateGroup* task_isolate_group_;
212 PageSpace* old_space_;
213 OldPage* first_;
214 OldPage* last_;
215 OldPage* large_first_;
216 OldPage* large_last_;
217};
218
219void GCSweeper::SweepConcurrent(IsolateGroup* isolate_group,
220 OldPage* first,
221 OldPage* last,
222 OldPage* large_first,
223 OldPage* large_last,
224 FreeList* freelist) {
225 bool result = Dart::thread_pool()->Run<ConcurrentSweeperTask>(
226 isolate_group, isolate_group->heap()->old_space(), first, last,
227 large_first, large_last);
228 ASSERT(result);
229}
230
231} // namespace dart
232