1 | // Copyright (c) 2017, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/heap/compactor.h" |
6 | |
7 | #include "platform/atomic.h" |
8 | #include "vm/globals.h" |
9 | #include "vm/heap/become.h" |
10 | #include "vm/heap/heap.h" |
11 | #include "vm/heap/pages.h" |
12 | #include "vm/thread_barrier.h" |
13 | #include "vm/timeline.h" |
14 | |
15 | namespace dart { |
16 | |
17 | DEFINE_FLAG(bool, |
18 | force_evacuation, |
19 | false, |
20 | "Force compaction to move every movable object" ); |
21 | |
22 | // Each OldPage is divided into blocks of size kBlockSize. Each object belongs |
23 | // to the block containing its header word (so up to kBlockSize + |
24 | // kAllocatablePageSize - 2 * kObjectAlignment bytes belong to the same block). |
25 | // During compaction, all live objects in the same block will slide such that |
26 | // they all end up on the same OldPage, and all gaps within the block will be |
27 | // closed. During sliding, a bitvector is computed that indictates which |
28 | // allocation units are live, so the new address of any object in the block can |
29 | // be found by adding the number of live allocation units before the object to |
30 | // the block's new start address. |
31 | // Compare CountingBlock used for heap snapshot generation. |
32 | class ForwardingBlock { |
33 | public: |
34 | void Clear() { |
35 | new_address_ = 0; |
36 | live_bitvector_ = 0; |
37 | } |
38 | |
39 | uword Lookup(uword old_addr) const { |
40 | uword block_offset = old_addr & ~kBlockMask; |
41 | intptr_t first_unit_position = block_offset >> kObjectAlignmentLog2; |
42 | ASSERT(first_unit_position < kBitsPerWord); |
43 | uword preceding_live_bitmask = |
44 | (static_cast<uword>(1) << first_unit_position) - 1; |
45 | uword preceding_live_bitset = live_bitvector_ & preceding_live_bitmask; |
46 | uword preceding_live_bytes = Utils::CountOneBitsWord(preceding_live_bitset) |
47 | << kObjectAlignmentLog2; |
48 | return new_address_ + preceding_live_bytes; |
49 | } |
50 | |
51 | // Marks a range of allocation units belonging to an object live by setting |
52 | // the corresponding bits in this ForwardingBlock. Does not update the |
53 | // new_address_ field; that is done after the total live size of the block is |
54 | // known and forwarding location is choosen. Does not mark words in subsequent |
55 | // ForwardingBlocks live for objects that extend into the next block. |
56 | void RecordLive(uword old_addr, intptr_t size) { |
57 | intptr_t size_in_units = size >> kObjectAlignmentLog2; |
58 | if (size_in_units >= kBitsPerWord) { |
59 | size_in_units = kBitsPerWord - 1; |
60 | } |
61 | uword block_offset = old_addr & ~kBlockMask; |
62 | intptr_t first_unit_position = block_offset >> kObjectAlignmentLog2; |
63 | ASSERT(first_unit_position < kBitsPerWord); |
64 | live_bitvector_ |= ((static_cast<uword>(1) << size_in_units) - 1) |
65 | << first_unit_position; |
66 | } |
67 | |
68 | bool IsLive(uword old_addr) const { |
69 | uword block_offset = old_addr & ~kBlockMask; |
70 | intptr_t first_unit_position = block_offset >> kObjectAlignmentLog2; |
71 | ASSERT(first_unit_position < kBitsPerWord); |
72 | return (live_bitvector_ & (static_cast<uword>(1) << first_unit_position)) != |
73 | 0; |
74 | } |
75 | |
76 | uword new_address() const { return new_address_; } |
77 | void set_new_address(uword value) { new_address_ = value; } |
78 | |
79 | private: |
80 | uword new_address_; |
81 | uword live_bitvector_; |
82 | COMPILE_ASSERT(kBitVectorWordsPerBlock == 1); |
83 | |
84 | DISALLOW_COPY_AND_ASSIGN(ForwardingBlock); |
85 | }; |
86 | |
87 | class ForwardingPage { |
88 | public: |
89 | void Clear() { |
90 | for (intptr_t i = 0; i < kBlocksPerPage; i++) { |
91 | blocks_[i].Clear(); |
92 | } |
93 | } |
94 | |
95 | uword Lookup(uword old_addr) { return BlockFor(old_addr)->Lookup(old_addr); } |
96 | |
97 | ForwardingBlock* BlockFor(uword old_addr) { |
98 | intptr_t page_offset = old_addr & ~kOldPageMask; |
99 | intptr_t block_number = page_offset / kBlockSize; |
100 | ASSERT(block_number >= 0); |
101 | ASSERT(block_number <= kBlocksPerPage); |
102 | return &blocks_[block_number]; |
103 | } |
104 | |
105 | private: |
106 | ForwardingBlock blocks_[kBlocksPerPage]; |
107 | |
108 | DISALLOW_ALLOCATION(); |
109 | DISALLOW_IMPLICIT_CONSTRUCTORS(ForwardingPage); |
110 | }; |
111 | |
112 | void OldPage::AllocateForwardingPage() { |
113 | ASSERT(forwarding_page_ == NULL); |
114 | ASSERT((object_start() + sizeof(ForwardingPage)) < object_end()); |
115 | ASSERT(Utils::IsAligned(sizeof(ForwardingPage), kObjectAlignment)); |
116 | object_end_ -= sizeof(ForwardingPage); |
117 | forwarding_page_ = reinterpret_cast<ForwardingPage*>(object_end_); |
118 | } |
119 | |
120 | class CompactorTask : public ThreadPool::Task { |
121 | public: |
122 | CompactorTask(IsolateGroup* isolate_group, |
123 | GCCompactor* compactor, |
124 | ThreadBarrier* barrier, |
125 | RelaxedAtomic<intptr_t>* next_forwarding_task, |
126 | OldPage* head, |
127 | OldPage** tail, |
128 | FreeList* freelist) |
129 | : isolate_group_(isolate_group), |
130 | compactor_(compactor), |
131 | barrier_(barrier), |
132 | next_forwarding_task_(next_forwarding_task), |
133 | head_(head), |
134 | tail_(tail), |
135 | freelist_(freelist), |
136 | free_page_(NULL), |
137 | free_current_(0), |
138 | free_end_(0) {} |
139 | |
140 | void Run(); |
141 | void RunEnteredIsolateGroup(); |
142 | |
143 | private: |
144 | void PlanPage(OldPage* page); |
145 | void SlidePage(OldPage* page); |
146 | uword PlanBlock(uword first_object, ForwardingPage* forwarding_page); |
147 | uword SlideBlock(uword first_object, ForwardingPage* forwarding_page); |
148 | void PlanMoveToContiguousSize(intptr_t size); |
149 | |
150 | IsolateGroup* isolate_group_; |
151 | GCCompactor* compactor_; |
152 | ThreadBarrier* barrier_; |
153 | RelaxedAtomic<intptr_t>* next_forwarding_task_; |
154 | OldPage* head_; |
155 | OldPage** tail_; |
156 | FreeList* freelist_; |
157 | OldPage* free_page_; |
158 | uword free_current_; |
159 | uword free_end_; |
160 | |
161 | DISALLOW_COPY_AND_ASSIGN(CompactorTask); |
162 | }; |
163 | |
164 | // Slides live objects down past free gaps, updates pointers and frees empty |
165 | // pages. Keeps cursors pointing to the next free and next live chunks, and |
166 | // repeatedly moves the next live chunk to the next free chunk, one block at a |
167 | // time, keeping blocks from spanning page boundaries (see ForwardingBlock). |
168 | // Free space at the end of a page that is too small for the next block is |
169 | // added to the freelist. |
170 | void GCCompactor::Compact(OldPage* pages, |
171 | FreeList* freelist, |
172 | Mutex* pages_lock) { |
173 | SetupImagePageBoundaries(); |
174 | |
175 | // Divide the heap. |
176 | // TODO(30978): Try to divide based on live bytes or with work stealing. |
177 | intptr_t num_pages = 0; |
178 | for (OldPage* page = pages; page != NULL; page = page->next()) { |
179 | num_pages++; |
180 | } |
181 | |
182 | intptr_t num_tasks = FLAG_compactor_tasks; |
183 | RELEASE_ASSERT(num_tasks >= 1); |
184 | if (num_pages < num_tasks) { |
185 | num_tasks = num_pages; |
186 | } |
187 | OldPage** heads = new OldPage*[num_tasks]; |
188 | OldPage** tails = new OldPage*[num_tasks]; |
189 | |
190 | { |
191 | const intptr_t pages_per_task = num_pages / num_tasks; |
192 | intptr_t task_index = 0; |
193 | intptr_t page_index = 0; |
194 | OldPage* page = pages; |
195 | OldPage* prev = NULL; |
196 | while (task_index < num_tasks) { |
197 | if (page_index % pages_per_task == 0) { |
198 | heads[task_index] = page; |
199 | tails[task_index] = NULL; |
200 | if (prev != NULL) { |
201 | prev->set_next(NULL); |
202 | } |
203 | task_index++; |
204 | } |
205 | prev = page; |
206 | page = page->next(); |
207 | page_index++; |
208 | } |
209 | ASSERT(page_index <= num_pages); |
210 | ASSERT(task_index == num_tasks); |
211 | } |
212 | |
213 | if (FLAG_force_evacuation) { |
214 | // Inject empty pages at the beginning of each worker's list to ensure all |
215 | // objects move and all pages that used to have an object are released. |
216 | // This can be helpful for finding untracked pointers because it prevents |
217 | // an untracked pointer from getting lucky with its target not moving. |
218 | bool oom = false; |
219 | for (intptr_t task_index = 0; task_index < num_tasks && !oom; |
220 | task_index++) { |
221 | const intptr_t pages_per_task = num_pages / num_tasks; |
222 | for (intptr_t j = 0; j < pages_per_task; j++) { |
223 | OldPage* page = heap_->old_space()->AllocatePage(OldPage::kData, |
224 | /* link */ false); |
225 | |
226 | if (page == nullptr) { |
227 | oom = true; |
228 | break; |
229 | } |
230 | |
231 | FreeListElement::AsElement(page->object_start(), |
232 | page->object_end() - page->object_start()); |
233 | |
234 | // The compactor slides down: add the empty pages to the beginning. |
235 | page->set_next(heads[task_index]); |
236 | heads[task_index] = page; |
237 | } |
238 | } |
239 | } |
240 | |
241 | { |
242 | ThreadBarrier barrier(num_tasks, heap_->barrier(), heap_->barrier_done()); |
243 | RelaxedAtomic<intptr_t> next_forwarding_task = {0}; |
244 | |
245 | for (intptr_t task_index = 0; task_index < num_tasks; task_index++) { |
246 | if (task_index < (num_tasks - 1)) { |
247 | // Begin compacting on a helper thread. |
248 | Dart::thread_pool()->Run<CompactorTask>( |
249 | thread()->isolate_group(), this, &barrier, &next_forwarding_task, |
250 | heads[task_index], &tails[task_index], freelist); |
251 | } else { |
252 | // Last worker is the main thread. |
253 | CompactorTask task(thread()->isolate_group(), this, &barrier, |
254 | &next_forwarding_task, heads[task_index], |
255 | &tails[task_index], freelist); |
256 | task.RunEnteredIsolateGroup(); |
257 | barrier.Exit(); |
258 | } |
259 | } |
260 | } |
261 | |
262 | // Update inner pointers in typed data views (needs to be done after all |
263 | // threads are done with sliding since we need to access fields of the |
264 | // view's backing store) |
265 | // |
266 | // (If the sliding compactor was single-threaded we could do this during the |
267 | // sliding phase: The class id of the backing store can be either accessed by |
268 | // looking at the already-slided-object or the not-yet-slided object. Though |
269 | // with parallel sliding there is no safe way to access the backing store |
270 | // object header.) |
271 | { |
272 | TIMELINE_FUNCTION_GC_DURATION(thread(), |
273 | "ForwardTypedDataViewInternalPointers" ); |
274 | const intptr_t length = typed_data_views_.length(); |
275 | for (intptr_t i = 0; i < length; ++i) { |
276 | auto raw_view = typed_data_views_[i]; |
277 | const classid_t cid = raw_view->ptr()->typed_data_->GetClassIdMayBeSmi(); |
278 | |
279 | // If we have external typed data we can simply return, since the backing |
280 | // store lives in C-heap and will not move. Otherwise we have to update |
281 | // the inner pointer. |
282 | if (IsTypedDataClassId(cid)) { |
283 | raw_view->ptr()->RecomputeDataFieldForInternalTypedData(); |
284 | } else { |
285 | ASSERT(IsExternalTypedDataClassId(cid)); |
286 | } |
287 | } |
288 | } |
289 | |
290 | for (intptr_t task_index = 0; task_index < num_tasks; task_index++) { |
291 | ASSERT(tails[task_index] != NULL); |
292 | } |
293 | |
294 | { |
295 | TIMELINE_FUNCTION_GC_DURATION(thread(), "ForwardStackPointers" ); |
296 | ForwardStackPointers(); |
297 | } |
298 | |
299 | { |
300 | MutexLocker ml(pages_lock); |
301 | |
302 | // Free empty pages. |
303 | for (intptr_t task_index = 0; task_index < num_tasks; task_index++) { |
304 | OldPage* page = tails[task_index]->next(); |
305 | while (page != NULL) { |
306 | OldPage* next = page->next(); |
307 | heap_->old_space()->IncreaseCapacityInWordsLocked( |
308 | -(page->memory_->size() >> kWordSizeLog2)); |
309 | page->Deallocate(); |
310 | page = next; |
311 | } |
312 | } |
313 | |
314 | // Re-join the heap. |
315 | for (intptr_t task_index = 0; task_index < num_tasks - 1; task_index++) { |
316 | tails[task_index]->set_next(heads[task_index + 1]); |
317 | } |
318 | tails[num_tasks - 1]->set_next(NULL); |
319 | heap_->old_space()->pages_ = pages = heads[0]; |
320 | heap_->old_space()->pages_tail_ = tails[num_tasks - 1]; |
321 | |
322 | delete[] heads; |
323 | delete[] tails; |
324 | } |
325 | } |
326 | |
327 | void CompactorTask::Run() { |
328 | bool result = |
329 | Thread::EnterIsolateGroupAsHelper(isolate_group_, Thread::kCompactorTask, |
330 | /*bypass_safepoint=*/true); |
331 | ASSERT(result); |
332 | |
333 | RunEnteredIsolateGroup(); |
334 | |
335 | Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true); |
336 | |
337 | // This task is done. Notify the original thread. |
338 | barrier_->Exit(); |
339 | } |
340 | |
341 | void CompactorTask::RunEnteredIsolateGroup() { |
342 | #ifdef SUPPORT_TIMELINE |
343 | Thread* thread = Thread::Current(); |
344 | #endif |
345 | { |
346 | { |
347 | TIMELINE_FUNCTION_GC_DURATION(thread, "Plan" ); |
348 | free_page_ = head_; |
349 | free_current_ = free_page_->object_start(); |
350 | free_end_ = free_page_->object_end(); |
351 | |
352 | for (OldPage* page = head_; page != NULL; page = page->next()) { |
353 | PlanPage(page); |
354 | } |
355 | } |
356 | |
357 | barrier_->Sync(); |
358 | |
359 | { |
360 | TIMELINE_FUNCTION_GC_DURATION(thread, "Slide" ); |
361 | free_page_ = head_; |
362 | free_current_ = free_page_->object_start(); |
363 | free_end_ = free_page_->object_end(); |
364 | |
365 | for (OldPage* page = head_; page != NULL; page = page->next()) { |
366 | SlidePage(page); |
367 | } |
368 | |
369 | // Add any leftover in the last used page to the freelist. This is |
370 | // required to make the page walkable during forwarding, etc. |
371 | intptr_t free_remaining = free_end_ - free_current_; |
372 | if (free_remaining != 0) { |
373 | freelist_->Free(free_current_, free_remaining); |
374 | } |
375 | |
376 | ASSERT(free_page_ != NULL); |
377 | *tail_ = free_page_; // Last live page. |
378 | } |
379 | |
380 | // Heap: Regular pages already visited during sliding. Code and image pages |
381 | // have no pointers to forward. Visit large pages and new-space. |
382 | |
383 | bool more_forwarding_tasks = true; |
384 | while (more_forwarding_tasks) { |
385 | intptr_t forwarding_task = next_forwarding_task_->fetch_add(1u); |
386 | switch (forwarding_task) { |
387 | case 0: { |
388 | TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardLargePages" ); |
389 | for (OldPage* large_page = |
390 | isolate_group_->heap()->old_space()->large_pages_; |
391 | large_page != NULL; large_page = large_page->next()) { |
392 | large_page->VisitObjectPointers(compactor_); |
393 | } |
394 | break; |
395 | } |
396 | case 1: { |
397 | TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardNewSpace" ); |
398 | isolate_group_->heap()->new_space()->VisitObjectPointers(compactor_); |
399 | break; |
400 | } |
401 | case 2: { |
402 | TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardRememberedSet" ); |
403 | isolate_group_->store_buffer()->VisitObjectPointers(compactor_); |
404 | break; |
405 | } |
406 | case 3: { |
407 | TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardWeakTables" ); |
408 | isolate_group_->heap()->ForwardWeakTables(compactor_); |
409 | break; |
410 | } |
411 | case 4: { |
412 | TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardWeakHandles" ); |
413 | isolate_group_->VisitWeakPersistentHandles(compactor_); |
414 | break; |
415 | } |
416 | #ifndef PRODUCT |
417 | case 5: { |
418 | TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardObjectIdRing" ); |
419 | isolate_group_->ForEachIsolate( |
420 | [&](Isolate* isolate) { |
421 | ObjectIdRing* ring = isolate->object_id_ring(); |
422 | if (ring != nullptr) { |
423 | ring->VisitPointers(compactor_); |
424 | } |
425 | }, |
426 | /*at_safepoint=*/true); |
427 | break; |
428 | } |
429 | #endif // !PRODUCT |
430 | default: |
431 | more_forwarding_tasks = false; |
432 | } |
433 | } |
434 | |
435 | barrier_->Sync(); |
436 | } |
437 | } |
438 | |
439 | void CompactorTask::PlanPage(OldPage* page) { |
440 | uword current = page->object_start(); |
441 | uword end = page->object_end(); |
442 | |
443 | ForwardingPage* forwarding_page = page->forwarding_page(); |
444 | ASSERT(forwarding_page != nullptr); |
445 | forwarding_page->Clear(); |
446 | while (current < end) { |
447 | current = PlanBlock(current, forwarding_page); |
448 | } |
449 | } |
450 | |
451 | void CompactorTask::SlidePage(OldPage* page) { |
452 | uword current = page->object_start(); |
453 | uword end = page->object_end(); |
454 | |
455 | ForwardingPage* forwarding_page = page->forwarding_page(); |
456 | ASSERT(forwarding_page != nullptr); |
457 | while (current < end) { |
458 | current = SlideBlock(current, forwarding_page); |
459 | } |
460 | } |
461 | |
462 | // Plans the destination for a set of live objects starting with the first |
463 | // live object that starts in a block, up to and including the last live |
464 | // object that starts in that block. |
465 | uword CompactorTask::PlanBlock(uword first_object, |
466 | ForwardingPage* forwarding_page) { |
467 | uword block_start = first_object & kBlockMask; |
468 | uword block_end = block_start + kBlockSize; |
469 | ForwardingBlock* forwarding_block = forwarding_page->BlockFor(first_object); |
470 | |
471 | // 1. Compute bitvector of surviving allocation units in the block. |
472 | intptr_t block_live_size = 0; |
473 | intptr_t block_dead_size = 0; |
474 | uword current = first_object; |
475 | while (current < block_end) { |
476 | ObjectPtr obj = ObjectLayout::FromAddr(current); |
477 | intptr_t size = obj->ptr()->HeapSize(); |
478 | if (obj->ptr()->IsMarked()) { |
479 | forwarding_block->RecordLive(current, size); |
480 | ASSERT(static_cast<intptr_t>(forwarding_block->Lookup(current)) == |
481 | block_live_size); |
482 | block_live_size += size; |
483 | } else { |
484 | block_dead_size += size; |
485 | } |
486 | current += size; |
487 | } |
488 | |
489 | // 2. Find the next contiguous space that can fit the live objects that |
490 | // start in the block. |
491 | PlanMoveToContiguousSize(block_live_size); |
492 | forwarding_block->set_new_address(free_current_); |
493 | free_current_ += block_live_size; |
494 | |
495 | return current; // First object in the next block |
496 | } |
497 | |
498 | uword CompactorTask::SlideBlock(uword first_object, |
499 | ForwardingPage* forwarding_page) { |
500 | uword block_start = first_object & kBlockMask; |
501 | uword block_end = block_start + kBlockSize; |
502 | ForwardingBlock* forwarding_block = forwarding_page->BlockFor(first_object); |
503 | |
504 | uword old_addr = first_object; |
505 | while (old_addr < block_end) { |
506 | ObjectPtr old_obj = ObjectLayout::FromAddr(old_addr); |
507 | intptr_t size = old_obj->ptr()->HeapSize(); |
508 | if (old_obj->ptr()->IsMarked()) { |
509 | uword new_addr = forwarding_block->Lookup(old_addr); |
510 | if (new_addr != free_current_) { |
511 | // The only situation where these two don't match is if we are moving |
512 | // to a new page. But if we exactly hit the end of the previous page |
513 | // then free_current could be at the start of the next page, so we |
514 | // subtract 1. |
515 | ASSERT(OldPage::Of(free_current_ - 1) != OldPage::Of(new_addr)); |
516 | intptr_t free_remaining = free_end_ - free_current_; |
517 | // Add any leftover at the end of a page to the free list. |
518 | if (free_remaining > 0) { |
519 | freelist_->Free(free_current_, free_remaining); |
520 | } |
521 | free_page_ = free_page_->next(); |
522 | ASSERT(free_page_ != NULL); |
523 | free_current_ = free_page_->object_start(); |
524 | free_end_ = free_page_->object_end(); |
525 | ASSERT(free_current_ == new_addr); |
526 | } |
527 | ObjectPtr new_obj = ObjectLayout::FromAddr(new_addr); |
528 | |
529 | // Fast path for no movement. There's often a large block of objects at |
530 | // the beginning that don't move. |
531 | if (new_addr != old_addr) { |
532 | // Slide the object down. |
533 | memmove(reinterpret_cast<void*>(new_addr), |
534 | reinterpret_cast<void*>(old_addr), size); |
535 | |
536 | if (IsTypedDataClassId(new_obj->GetClassId())) { |
537 | static_cast<TypedDataPtr>(new_obj)->ptr()->RecomputeDataField(); |
538 | } |
539 | } |
540 | new_obj->ptr()->ClearMarkBit(); |
541 | new_obj->ptr()->VisitPointers(compactor_); |
542 | |
543 | ASSERT(free_current_ == new_addr); |
544 | free_current_ += size; |
545 | } else { |
546 | ASSERT(!forwarding_block->IsLive(old_addr)); |
547 | } |
548 | old_addr += size; |
549 | } |
550 | |
551 | return old_addr; // First object in the next block. |
552 | } |
553 | |
554 | void CompactorTask::PlanMoveToContiguousSize(intptr_t size) { |
555 | // Move the free cursor to ensure 'size' bytes of contiguous space. |
556 | ASSERT(size <= kOldPageSize); |
557 | |
558 | // Check if the current free page has enough space. |
559 | intptr_t free_remaining = free_end_ - free_current_; |
560 | if (free_remaining < size) { |
561 | // Not enough; advance to the next free page. |
562 | free_page_ = free_page_->next(); |
563 | ASSERT(free_page_ != NULL); |
564 | free_current_ = free_page_->object_start(); |
565 | free_end_ = free_page_->object_end(); |
566 | free_remaining = free_end_ - free_current_; |
567 | ASSERT(free_remaining >= size); |
568 | } |
569 | } |
570 | |
571 | void GCCompactor::SetupImagePageBoundaries() { |
572 | MallocGrowableArray<ImagePageRange> ranges(4); |
573 | |
574 | OldPage* image_page = Dart::vm_isolate()->heap()->old_space()->image_pages_; |
575 | while (image_page != NULL) { |
576 | ImagePageRange range = {image_page->object_start(), |
577 | image_page->object_end()}; |
578 | ranges.Add(range); |
579 | image_page = image_page->next(); |
580 | } |
581 | image_page = heap_->old_space()->image_pages_; |
582 | while (image_page != NULL) { |
583 | ImagePageRange range = {image_page->object_start(), |
584 | image_page->object_end()}; |
585 | ranges.Add(range); |
586 | image_page = image_page->next(); |
587 | } |
588 | |
589 | ranges.Sort(CompareImagePageRanges); |
590 | intptr_t image_page_count; |
591 | ranges.StealBuffer(&image_page_ranges_, &image_page_count); |
592 | image_page_hi_ = image_page_count - 1; |
593 | } |
594 | |
595 | DART_FORCE_INLINE |
596 | void GCCompactor::ForwardPointer(ObjectPtr* ptr) { |
597 | ObjectPtr old_target = *ptr; |
598 | if (old_target->IsSmiOrNewObject()) { |
599 | return; // Not moved. |
600 | } |
601 | |
602 | uword old_addr = ObjectLayout::ToAddr(old_target); |
603 | intptr_t lo = 0; |
604 | intptr_t hi = image_page_hi_; |
605 | while (lo <= hi) { |
606 | intptr_t mid = (hi - lo + 1) / 2 + lo; |
607 | ASSERT(mid >= lo); |
608 | ASSERT(mid <= hi); |
609 | if (old_addr < image_page_ranges_[mid].start) { |
610 | hi = mid - 1; |
611 | } else if (old_addr >= image_page_ranges_[mid].end) { |
612 | lo = mid + 1; |
613 | } else { |
614 | return; // Not moved (unaligned image page). |
615 | } |
616 | } |
617 | |
618 | OldPage* page = OldPage::Of(old_target); |
619 | ForwardingPage* forwarding_page = page->forwarding_page(); |
620 | if (forwarding_page == NULL) { |
621 | return; // Not moved (VM isolate, large page, code page). |
622 | } |
623 | |
624 | ObjectPtr new_target = |
625 | ObjectLayout::FromAddr(forwarding_page->Lookup(old_addr)); |
626 | ASSERT(!new_target->IsSmiOrNewObject()); |
627 | *ptr = new_target; |
628 | } |
629 | |
630 | void GCCompactor::VisitTypedDataViewPointers(TypedDataViewPtr view, |
631 | ObjectPtr* first, |
632 | ObjectPtr* last) { |
633 | // First we forward all fields of the typed data view. |
634 | ObjectPtr old_backing = view->ptr()->typed_data_; |
635 | VisitPointers(first, last); |
636 | ObjectPtr new_backing = view->ptr()->typed_data_; |
637 | |
638 | const bool backing_moved = old_backing != new_backing; |
639 | if (backing_moved) { |
640 | // The backing store moved, so we *might* need to update the view's inner |
641 | // pointer. If the backing store is internal typed data we *have* to update |
642 | // it, otherwise (in case of external typed data) we don't have to. |
643 | // |
644 | // Unfortunately we cannot find out whether the backing store is internal |
645 | // or external during sliding phase: Even though we know the old and new |
646 | // location of the backing store another thread might be responsible for |
647 | // moving it and we have no way to tell when it got moved. |
648 | // |
649 | // So instead we queue all those views up and fix their inner pointer in a |
650 | // final phase after compaction. |
651 | MutexLocker ml(&typed_data_view_mutex_); |
652 | typed_data_views_.Add(view); |
653 | } else { |
654 | // The backing store didn't move, we therefore don't need to update the |
655 | // inner pointer. |
656 | if (view->ptr()->data_ == 0) { |
657 | ASSERT(RawSmiValue(view->ptr()->offset_in_bytes_) == 0 && |
658 | RawSmiValue(view->ptr()->length_) == 0 && |
659 | view->ptr()->typed_data_ == Object::null()); |
660 | } |
661 | } |
662 | } |
663 | |
664 | // N.B.: This pointer visitor is not idempotent. We must take care to visit |
665 | // each pointer exactly once. |
666 | void GCCompactor::VisitPointers(ObjectPtr* first, ObjectPtr* last) { |
667 | for (ObjectPtr* ptr = first; ptr <= last; ptr++) { |
668 | ForwardPointer(ptr); |
669 | } |
670 | } |
671 | |
672 | void GCCompactor::VisitHandle(uword addr) { |
673 | FinalizablePersistentHandle* handle = |
674 | reinterpret_cast<FinalizablePersistentHandle*>(addr); |
675 | ForwardPointer(handle->raw_addr()); |
676 | } |
677 | |
678 | void GCCompactor::ForwardStackPointers() { |
679 | // N.B.: Heap pointers have already been forwarded. We forward the heap before |
680 | // forwarding the stack to limit the number of places that need to be aware of |
681 | // forwarding when reading stack maps. |
682 | isolate_group()->VisitObjectPointers(this, |
683 | ValidationPolicy::kDontValidateFrames); |
684 | } |
685 | |
686 | } // namespace dart |
687 | |