1 | // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/heap/scavenger.h" |
6 | |
7 | #include "platform/leak_sanitizer.h" |
8 | #include "vm/dart.h" |
9 | #include "vm/dart_api_state.h" |
10 | #include "vm/flag_list.h" |
11 | #include "vm/heap/become.h" |
12 | #include "vm/heap/pointer_block.h" |
13 | #include "vm/heap/safepoint.h" |
14 | #include "vm/heap/verifier.h" |
15 | #include "vm/heap/weak_table.h" |
16 | #include "vm/isolate.h" |
17 | #include "vm/lockers.h" |
18 | #include "vm/longjump.h" |
19 | #include "vm/object.h" |
20 | #include "vm/object_id_ring.h" |
21 | #include "vm/object_set.h" |
22 | #include "vm/stack_frame.h" |
23 | #include "vm/thread_barrier.h" |
24 | #include "vm/thread_registry.h" |
25 | #include "vm/timeline.h" |
26 | #include "vm/visitor.h" |
27 | |
28 | namespace dart { |
29 | |
30 | DEFINE_FLAG(int, |
31 | early_tenuring_threshold, |
32 | 66, |
33 | "When more than this percentage of promotion candidates survive, " |
34 | "promote all survivors of next scavenge." ); |
35 | DEFINE_FLAG(int, |
36 | new_gen_garbage_threshold, |
37 | 90, |
38 | "Grow new gen when less than this percentage is garbage." ); |
39 | DEFINE_FLAG(int, new_gen_growth_factor, 2, "Grow new gen by this factor." ); |
40 | |
41 | // Scavenger uses the kCardRememberedBit to distinguish forwarded and |
42 | // non-forwarded objects. We must choose a bit that is clear for all new-space |
43 | // object headers, and which doesn't intersect with the target address because |
44 | // of object alignment. |
45 | enum { |
46 | kForwardingMask = 1 << ObjectLayout::kCardRememberedBit, |
47 | kNotForwarded = 0, |
48 | kForwarded = kForwardingMask, |
49 | }; |
50 | |
51 | // If the forwarded bit and pointer tag bit are the same, we can avoid a few |
52 | // conversions. |
53 | COMPILE_ASSERT(kForwarded == kHeapObjectTag); |
54 | |
55 | static inline bool IsForwarding(uword ) { |
56 | uword bits = header & kForwardingMask; |
57 | ASSERT((bits == kNotForwarded) || (bits == kForwarded)); |
58 | return bits == kForwarded; |
59 | } |
60 | |
61 | static inline ObjectPtr ForwardedObj(uword ) { |
62 | ASSERT(IsForwarding(header)); |
63 | return static_cast<ObjectPtr>(header); |
64 | } |
65 | |
66 | static inline uword (ObjectPtr target) { |
67 | uword result = static_cast<uword>(target); |
68 | ASSERT(IsForwarding(result)); |
69 | return result; |
70 | } |
71 | |
72 | // Races: The first word in the copied region is a header word that may be |
73 | // updated by the scavenger worker in another thread, so we might copy either |
74 | // the original object header or an installed forwarding pointer. This race is |
75 | // harmless because if we copy the installed forwarding pointer, the scavenge |
76 | // worker in the current thread will abandon this copy. We do not mark the loads |
77 | // here as relaxed so the C++ compiler still has the freedom to reorder them. |
78 | NO_SANITIZE_THREAD |
79 | static inline void objcpy(void* dst, const void* src, size_t size) { |
80 | // A memcopy specialized for objects. We can assume: |
81 | // - dst and src do not overlap |
82 | ASSERT( |
83 | (reinterpret_cast<uword>(dst) + size <= reinterpret_cast<uword>(src)) || |
84 | (reinterpret_cast<uword>(src) + size <= reinterpret_cast<uword>(dst))); |
85 | // - dst and src are word aligned |
86 | ASSERT(Utils::IsAligned(reinterpret_cast<uword>(dst), sizeof(uword))); |
87 | ASSERT(Utils::IsAligned(reinterpret_cast<uword>(src), sizeof(uword))); |
88 | // - size is strictly positive |
89 | ASSERT(size > 0); |
90 | // - size is a multiple of double words |
91 | ASSERT(Utils::IsAligned(size, 2 * sizeof(uword))); |
92 | |
93 | uword* __restrict dst_cursor = reinterpret_cast<uword*>(dst); |
94 | const uword* __restrict src_cursor = reinterpret_cast<const uword*>(src); |
95 | do { |
96 | uword a = *src_cursor++; |
97 | uword b = *src_cursor++; |
98 | *dst_cursor++ = a; |
99 | *dst_cursor++ = b; |
100 | size -= (2 * sizeof(uword)); |
101 | } while (size > 0); |
102 | } |
103 | |
104 | template <bool parallel> |
105 | class ScavengerVisitorBase : public ObjectPointerVisitor { |
106 | public: |
107 | explicit ScavengerVisitorBase(IsolateGroup* isolate_group, |
108 | Scavenger* scavenger, |
109 | SemiSpace* from, |
110 | FreeList* freelist, |
111 | PromotionStack* promotion_stack) |
112 | : ObjectPointerVisitor(isolate_group), |
113 | thread_(nullptr), |
114 | scavenger_(scavenger), |
115 | from_(from), |
116 | page_space_(scavenger->heap_->old_space()), |
117 | freelist_(freelist), |
118 | bytes_promoted_(0), |
119 | visiting_old_object_(nullptr), |
120 | promoted_list_(promotion_stack) {} |
121 | |
122 | virtual void VisitTypedDataViewPointers(TypedDataViewPtr view, |
123 | ObjectPtr* first, |
124 | ObjectPtr* last) { |
125 | // First we forward all fields of the typed data view. |
126 | VisitPointers(first, last); |
127 | |
128 | if (view->ptr()->data_ == nullptr) { |
129 | ASSERT(RawSmiValue(view->ptr()->offset_in_bytes_) == 0 && |
130 | RawSmiValue(view->ptr()->length_) == 0); |
131 | return; |
132 | } |
133 | |
134 | // Validate 'this' is a typed data view. |
135 | const uword = |
136 | *reinterpret_cast<uword*>(ObjectLayout::ToAddr(view)); |
137 | ASSERT(!IsForwarding(view_header) || view->IsOldObject()); |
138 | ASSERT(IsTypedDataViewClassId(view->GetClassIdMayBeSmi())); |
139 | |
140 | // Validate that the backing store is not a forwarding word. |
141 | TypedDataBasePtr td = view->ptr()->typed_data_; |
142 | ASSERT(td->IsHeapObject()); |
143 | const uword = *reinterpret_cast<uword*>(ObjectLayout::ToAddr(td)); |
144 | ASSERT(!IsForwarding(td_header) || td->IsOldObject()); |
145 | |
146 | // We can always obtain the class id from the forwarded backing store. |
147 | const classid_t cid = td->GetClassId(); |
148 | |
149 | // If we have external typed data we can simply return since the backing |
150 | // store lives in C-heap and will not move. |
151 | if (IsExternalTypedDataClassId(cid)) { |
152 | return; |
153 | } |
154 | |
155 | // Now we update the inner pointer. |
156 | ASSERT(IsTypedDataClassId(cid)); |
157 | view->ptr()->RecomputeDataFieldForInternalTypedData(); |
158 | } |
159 | |
160 | virtual void VisitPointers(ObjectPtr* first, ObjectPtr* last) { |
161 | ASSERT(Utils::IsAligned(first, sizeof(*first))); |
162 | ASSERT(Utils::IsAligned(last, sizeof(*last))); |
163 | for (ObjectPtr* current = first; current <= last; current++) { |
164 | ScavengePointer(current); |
165 | } |
166 | } |
167 | |
168 | void VisitingOldObject(ObjectPtr obj) { |
169 | ASSERT((obj == nullptr) || obj->IsOldObject()); |
170 | visiting_old_object_ = obj; |
171 | if (obj != nullptr) { |
172 | // Card update happens in OldPage::VisitRememberedCards. |
173 | ASSERT(!obj->ptr()->IsCardRemembered()); |
174 | } |
175 | } |
176 | |
177 | intptr_t bytes_promoted() const { return bytes_promoted_; } |
178 | |
179 | void ProcessRoots() { |
180 | thread_ = Thread::Current(); |
181 | page_space_->AcquireLock(freelist_); |
182 | |
183 | LongJumpScope jump; |
184 | if (setjmp(*jump.Set()) == 0) { |
185 | scavenger_->IterateRoots(this); |
186 | } else { |
187 | ASSERT(scavenger_->abort_); |
188 | thread_->ClearStickyError(); |
189 | } |
190 | } |
191 | |
192 | void ProcessSurvivors() { |
193 | LongJumpScope jump; |
194 | if (setjmp(*jump.Set()) == 0) { |
195 | // Iterate until all work has been drained. |
196 | do { |
197 | ProcessToSpace(); |
198 | ProcessPromotedList(); |
199 | } while (HasWork()); |
200 | } else { |
201 | ASSERT(scavenger_->abort_); |
202 | thread_->ClearStickyError(); |
203 | } |
204 | } |
205 | |
206 | void ProcessAll() { |
207 | LongJumpScope jump; |
208 | if (setjmp(*jump.Set()) == 0) { |
209 | do { |
210 | do { |
211 | ProcessToSpace(); |
212 | ProcessPromotedList(); |
213 | } while (HasWork()); |
214 | ProcessWeakProperties(); |
215 | } while (HasWork()); |
216 | } else { |
217 | ASSERT(scavenger_->abort_); |
218 | thread_->ClearStickyError(); |
219 | } |
220 | } |
221 | |
222 | inline void ProcessWeakProperties(); |
223 | |
224 | bool HasWork() { |
225 | if (scavenger_->abort_) return false; |
226 | return (scan_ != tail_) || (scan_ != nullptr && !scan_->IsResolved()) || |
227 | !promoted_list_.IsEmpty(); |
228 | } |
229 | |
230 | void Finalize() { |
231 | if (scavenger_->abort_) { |
232 | promoted_list_.AbandonWork(); |
233 | } else { |
234 | ASSERT(!HasWork()); |
235 | |
236 | for (NewPage* page = head_; page != nullptr; page = page->next()) { |
237 | ASSERT(page->IsResolved()); |
238 | page->RecordSurvivors(); |
239 | } |
240 | |
241 | promoted_list_.Finalize(); |
242 | |
243 | MournWeakProperties(); |
244 | } |
245 | page_space_->ReleaseLock(freelist_); |
246 | thread_ = nullptr; |
247 | } |
248 | |
249 | NewPage* head() const { return head_; } |
250 | NewPage* tail() const { return tail_; } |
251 | |
252 | private: |
253 | void UpdateStoreBuffer(ObjectPtr* p, ObjectPtr obj) { |
254 | ASSERT(obj->IsHeapObject()); |
255 | // If the newly written object is not a new object, drop it immediately. |
256 | if (!obj->IsNewObject() || visiting_old_object_->ptr()->IsRemembered()) { |
257 | return; |
258 | } |
259 | visiting_old_object_->ptr()->SetRememberedBit(); |
260 | thread_->StoreBufferAddObjectGC(visiting_old_object_); |
261 | } |
262 | |
263 | DART_FORCE_INLINE |
264 | void ScavengePointer(ObjectPtr* p) { |
265 | // ScavengePointer cannot be called recursively. |
266 | ObjectPtr raw_obj = *p; |
267 | |
268 | if (raw_obj->IsSmiOrOldObject()) { |
269 | return; |
270 | } |
271 | |
272 | uword raw_addr = ObjectLayout::ToAddr(raw_obj); |
273 | // The scavenger is only expects objects located in the from space. |
274 | ASSERT(from_->Contains(raw_addr)); |
275 | // Read the header word of the object and determine if the object has |
276 | // already been copied. |
277 | uword = reinterpret_cast<std::atomic<uword>*>(raw_addr)->load( |
278 | std::memory_order_relaxed); |
279 | ObjectPtr new_obj; |
280 | if (IsForwarding(header)) { |
281 | // Get the new location of the object. |
282 | new_obj = ForwardedObj(header); |
283 | } else { |
284 | intptr_t size = raw_obj->ptr()->HeapSize(header); |
285 | uword new_addr = 0; |
286 | // Check whether object should be promoted. |
287 | if (!NewPage::Of(raw_obj)->IsSurvivor(raw_addr)) { |
288 | // Not a survivor of a previous scavenge. Just copy the object into the |
289 | // to space. |
290 | new_addr = TryAllocateCopy(size); |
291 | } |
292 | if (new_addr == 0) { |
293 | // This object is a survivor of a previous scavenge. Attempt to promote |
294 | // the object. (Or, unlikely, to-space was exhausted by fragmentation.) |
295 | new_addr = page_space_->TryAllocatePromoLocked(freelist_, size); |
296 | if (LIKELY(new_addr != 0)) { |
297 | // If promotion succeeded then we need to remember it so that it can |
298 | // be traversed later. |
299 | promoted_list_.Push(ObjectLayout::FromAddr(new_addr)); |
300 | bytes_promoted_ += size; |
301 | } else { |
302 | // Promotion did not succeed. Copy into the to space instead. |
303 | scavenger_->failed_to_promote_ = true; |
304 | new_addr = TryAllocateCopy(size); |
305 | // To-space was exhausted by fragmentation and old-space could not |
306 | // grow. |
307 | if (UNLIKELY(new_addr == 0)) { |
308 | AbortScavenge(); |
309 | } |
310 | } |
311 | } |
312 | ASSERT(new_addr != 0); |
313 | // Copy the object to the new location. |
314 | objcpy(reinterpret_cast<void*>(new_addr), |
315 | reinterpret_cast<void*>(raw_addr), size); |
316 | |
317 | new_obj = ObjectLayout::FromAddr(new_addr); |
318 | if (new_obj->IsOldObject()) { |
319 | // Promoted: update age/barrier tags. |
320 | uint32_t tags = static_cast<uint32_t>(header); |
321 | tags = ObjectLayout::OldBit::update(true, tags); |
322 | tags = ObjectLayout::OldAndNotRememberedBit::update(true, tags); |
323 | tags = ObjectLayout::NewBit::update(false, tags); |
324 | // Setting the forwarding pointer below will make this tenured object |
325 | // visible to the concurrent marker, but we haven't visited its slots |
326 | // yet. We mark the object here to prevent the concurrent marker from |
327 | // adding it to the mark stack and visiting its unprocessed slots. We |
328 | // push it to the mark stack after forwarding its slots. |
329 | tags = ObjectLayout::OldAndNotMarkedBit::update(!thread_->is_marking(), |
330 | tags); |
331 | new_obj->ptr()->tags_ = tags; |
332 | } |
333 | |
334 | intptr_t cid = ObjectLayout::ClassIdTag::decode(header); |
335 | if (IsTypedDataClassId(cid)) { |
336 | static_cast<TypedDataPtr>(new_obj)->ptr()->RecomputeDataField(); |
337 | } |
338 | |
339 | // Try to install forwarding address. |
340 | uword = ForwardingHeader(new_obj); |
341 | if (!InstallForwardingPointer(raw_addr, &header, forwarding_header)) { |
342 | ASSERT(IsForwarding(header)); |
343 | if (new_obj->IsOldObject()) { |
344 | // Abandon as a free list element. |
345 | FreeListElement::AsElement(new_addr, size); |
346 | bytes_promoted_ -= size; |
347 | } else { |
348 | // Undo to-space allocation. |
349 | tail_->Unallocate(new_addr, size); |
350 | } |
351 | // Use the winner's forwarding target. |
352 | new_obj = ForwardedObj(header); |
353 | } |
354 | } |
355 | |
356 | // Update the reference. |
357 | if (!new_obj->IsNewObject()) { |
358 | // Setting the mark bit above must not be ordered after a publishing store |
359 | // of this object. Note this could be a publishing store even if the |
360 | // object was promoted by an early invocation of ScavengePointer. Compare |
361 | // Object::Allocate. |
362 | reinterpret_cast<std::atomic<ObjectPtr>*>(p)->store( |
363 | new_obj, std::memory_order_release); |
364 | } else { |
365 | *p = new_obj; |
366 | } |
367 | // Update the store buffer as needed. |
368 | if (visiting_old_object_ != nullptr) { |
369 | UpdateStoreBuffer(p, new_obj); |
370 | } |
371 | } |
372 | |
373 | DART_FORCE_INLINE |
374 | bool InstallForwardingPointer(uword addr, |
375 | uword* , |
376 | uword ) { |
377 | if (parallel) { |
378 | return reinterpret_cast<std::atomic<uword>*>(addr) |
379 | ->compare_exchange_strong(*old_header, new_header, |
380 | std::memory_order_relaxed); |
381 | } else { |
382 | *reinterpret_cast<uword*>(addr) = new_header; |
383 | return true; |
384 | } |
385 | } |
386 | |
387 | DART_FORCE_INLINE |
388 | uword TryAllocateCopy(intptr_t size) { |
389 | ASSERT(Utils::IsAligned(size, kObjectAlignment)); |
390 | // TODO(rmacnak): Allocate one to start? |
391 | if (tail_ != nullptr) { |
392 | uword result = tail_->top_; |
393 | ASSERT((result & kObjectAlignmentMask) == kNewObjectAlignmentOffset); |
394 | uword new_top = result + size; |
395 | if (LIKELY(new_top <= tail_->end_)) { |
396 | tail_->top_ = new_top; |
397 | return result; |
398 | } |
399 | } |
400 | return TryAllocateCopySlow(size); |
401 | } |
402 | |
403 | DART_NOINLINE inline uword TryAllocateCopySlow(intptr_t size); |
404 | |
405 | DART_NOINLINE DART_NORETURN void AbortScavenge() { |
406 | if (FLAG_verbose_gc) { |
407 | OS::PrintErr("Aborting scavenge\n" ); |
408 | } |
409 | scavenger_->abort_ = true; |
410 | thread_->long_jump_base()->Jump(1, Object::out_of_memory_error()); |
411 | } |
412 | |
413 | inline void ProcessToSpace(); |
414 | DART_FORCE_INLINE intptr_t ProcessCopied(ObjectPtr raw_obj); |
415 | inline void ProcessPromotedList(); |
416 | inline void EnqueueWeakProperty(WeakPropertyPtr raw_weak); |
417 | inline void MournWeakProperties(); |
418 | |
419 | Thread* thread_; |
420 | Scavenger* scavenger_; |
421 | SemiSpace* from_; |
422 | PageSpace* page_space_; |
423 | FreeList* freelist_; |
424 | intptr_t bytes_promoted_; |
425 | ObjectPtr visiting_old_object_; |
426 | |
427 | PromotionWorkList promoted_list_; |
428 | WeakPropertyPtr delayed_weak_properties_ = nullptr; |
429 | |
430 | NewPage* head_ = nullptr; |
431 | NewPage* tail_ = nullptr; // Allocating from here. |
432 | NewPage* scan_ = nullptr; // Resolving from here. |
433 | |
434 | DISALLOW_COPY_AND_ASSIGN(ScavengerVisitorBase); |
435 | }; |
436 | |
437 | typedef ScavengerVisitorBase<false> SerialScavengerVisitor; |
438 | typedef ScavengerVisitorBase<true> ParallelScavengerVisitor; |
439 | |
440 | class ScavengerWeakVisitor : public HandleVisitor { |
441 | public: |
442 | ScavengerWeakVisitor(Thread* thread, Scavenger* scavenger) |
443 | : HandleVisitor(thread), |
444 | scavenger_(scavenger), |
445 | class_table_(thread->isolate_group()->shared_class_table()) { |
446 | ASSERT(scavenger->heap_->isolate_group() == thread->isolate_group()); |
447 | } |
448 | |
449 | void VisitHandle(uword addr) { |
450 | FinalizablePersistentHandle* handle = |
451 | reinterpret_cast<FinalizablePersistentHandle*>(addr); |
452 | ObjectPtr* p = handle->raw_addr(); |
453 | if (scavenger_->IsUnreachable(p)) { |
454 | handle->UpdateUnreachable(thread()->isolate_group()); |
455 | } else { |
456 | handle->UpdateRelocated(thread()->isolate_group()); |
457 | } |
458 | } |
459 | |
460 | private: |
461 | Scavenger* scavenger_; |
462 | SharedClassTable* class_table_; |
463 | |
464 | DISALLOW_COPY_AND_ASSIGN(ScavengerWeakVisitor); |
465 | }; |
466 | |
467 | class ParallelScavengerTask : public ThreadPool::Task { |
468 | public: |
469 | ParallelScavengerTask(IsolateGroup* isolate_group, |
470 | ThreadBarrier* barrier, |
471 | ParallelScavengerVisitor* visitor, |
472 | RelaxedAtomic<uintptr_t>* num_busy) |
473 | : isolate_group_(isolate_group), |
474 | barrier_(barrier), |
475 | visitor_(visitor), |
476 | num_busy_(num_busy) {} |
477 | |
478 | virtual void Run() { |
479 | bool result = Thread::EnterIsolateGroupAsHelper( |
480 | isolate_group_, Thread::kScavengerTask, /*bypass_safepoint=*/true); |
481 | ASSERT(result); |
482 | |
483 | RunEnteredIsolateGroup(); |
484 | |
485 | Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true); |
486 | |
487 | // This task is done. Notify the original thread. |
488 | barrier_->Exit(); |
489 | } |
490 | |
491 | void RunEnteredIsolateGroup() { |
492 | TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ParallelScavenge" ); |
493 | |
494 | visitor_->ProcessRoots(); |
495 | |
496 | // Phase 1: Copying. |
497 | bool more_to_scavenge = false; |
498 | do { |
499 | do { |
500 | visitor_->ProcessSurvivors(); |
501 | |
502 | // I can't find more work right now. If no other task is busy, |
503 | // then there will never be more work (NB: 1 is *before* decrement). |
504 | if (num_busy_->fetch_sub(1u) == 1) break; |
505 | |
506 | // Wait for some work to appear. |
507 | // TODO(iposva): Replace busy-waiting with a solution using Monitor, |
508 | // and redraw the boundaries between stack/visitor/task as needed. |
509 | while (!visitor_->HasWork() && num_busy_->load() > 0) { |
510 | } |
511 | |
512 | // If no tasks are busy, there will never be more work. |
513 | if (num_busy_->load() == 0) break; |
514 | |
515 | // I saw some work; get busy and compete for it. |
516 | num_busy_->fetch_add(1u); |
517 | } while (true); |
518 | // Wait for all scavengers to stop. |
519 | barrier_->Sync(); |
520 | #if defined(DEBUG) |
521 | ASSERT(num_busy_->load() == 0); |
522 | // Caveat: must not allow any marker to continue past the barrier |
523 | // before we checked num_busy, otherwise one of them might rush |
524 | // ahead and increment it. |
525 | barrier_->Sync(); |
526 | #endif |
527 | // Check if we have any pending properties with marked keys. |
528 | // Those might have been marked by another marker. |
529 | visitor_->ProcessWeakProperties(); |
530 | more_to_scavenge = visitor_->HasWork(); |
531 | if (more_to_scavenge) { |
532 | // We have more work to do. Notify others. |
533 | num_busy_->fetch_add(1u); |
534 | } |
535 | |
536 | // Wait for all other scavengers to finish processing their pending |
537 | // weak properties and decide if they need to continue marking. |
538 | // Caveat: we need two barriers here to make this decision in lock step |
539 | // between all scavengers and the main thread. |
540 | barrier_->Sync(); |
541 | if (!more_to_scavenge && (num_busy_->load() > 0)) { |
542 | // All scavengers continue to mark as long as any single marker has |
543 | // some work to do. |
544 | num_busy_->fetch_add(1u); |
545 | more_to_scavenge = true; |
546 | } |
547 | barrier_->Sync(); |
548 | } while (more_to_scavenge); |
549 | |
550 | // Phase 2: Weak processing, statistics. |
551 | visitor_->Finalize(); |
552 | barrier_->Sync(); |
553 | } |
554 | |
555 | private: |
556 | IsolateGroup* isolate_group_; |
557 | ThreadBarrier* barrier_; |
558 | ParallelScavengerVisitor* visitor_; |
559 | RelaxedAtomic<uintptr_t>* num_busy_; |
560 | |
561 | DISALLOW_COPY_AND_ASSIGN(ParallelScavengerTask); |
562 | }; |
563 | |
564 | SemiSpace::SemiSpace(intptr_t max_capacity_in_words) |
565 | : max_capacity_in_words_(max_capacity_in_words), head_(nullptr) {} |
566 | |
567 | SemiSpace::~SemiSpace() { |
568 | NewPage* page = head_; |
569 | while (page != nullptr) { |
570 | NewPage* next = page->next(); |
571 | page->Deallocate(); |
572 | page = next; |
573 | } |
574 | } |
575 | |
576 | // TODO(rmacnak): Unify this with old-space pages, and possibly zone segments. |
577 | // This cache needs to be at least as big as FLAG_new_gen_semi_max_size or |
578 | // munmap will noticably impact performance. |
579 | static constexpr intptr_t kPageCacheCapacity = 8 * kWordSize; |
580 | static Mutex* page_cache_mutex = nullptr; |
581 | static VirtualMemory* page_cache[kPageCacheCapacity] = {nullptr}; |
582 | static intptr_t page_cache_size = 0; |
583 | |
584 | void SemiSpace::Init() { |
585 | ASSERT(page_cache_mutex == nullptr); |
586 | page_cache_mutex = new Mutex(NOT_IN_PRODUCT("page_cache_mutex" )); |
587 | } |
588 | |
589 | void SemiSpace::Cleanup() { |
590 | { |
591 | MutexLocker ml(page_cache_mutex); |
592 | ASSERT(page_cache_size >= 0); |
593 | ASSERT(page_cache_size <= kPageCacheCapacity); |
594 | while (page_cache_size > 0) { |
595 | delete page_cache[--page_cache_size]; |
596 | } |
597 | } |
598 | delete page_cache_mutex; |
599 | page_cache_mutex = nullptr; |
600 | } |
601 | |
602 | intptr_t SemiSpace::CachedSize() { |
603 | return page_cache_size * kNewPageSize; |
604 | } |
605 | |
606 | NewPage* NewPage::Allocate() { |
607 | const intptr_t size = kNewPageSize; |
608 | VirtualMemory* memory = nullptr; |
609 | { |
610 | MutexLocker ml(page_cache_mutex); |
611 | ASSERT(page_cache_size >= 0); |
612 | ASSERT(page_cache_size <= kPageCacheCapacity); |
613 | if (page_cache_size > 0) { |
614 | memory = page_cache[--page_cache_size]; |
615 | } |
616 | } |
617 | if (memory == nullptr) { |
618 | const intptr_t alignment = kNewPageSize; |
619 | const bool is_executable = false; |
620 | const char* const name = Heap::RegionName(Heap::kNew); |
621 | memory = |
622 | VirtualMemory::AllocateAligned(size, alignment, is_executable, name); |
623 | } |
624 | if (memory == nullptr) { |
625 | return nullptr; // Out of memory. |
626 | } |
627 | |
628 | #if defined(DEBUG) |
629 | memset(memory->address(), Heap::kZapByte, size); |
630 | #endif |
631 | // Initialized by generated code. |
632 | MSAN_UNPOISON(memory->address(), size); |
633 | |
634 | NewPage* result = reinterpret_cast<NewPage*>(memory->address()); |
635 | result->memory_ = memory; |
636 | result->next_ = nullptr; |
637 | result->owner_ = nullptr; |
638 | uword top = result->object_start(); |
639 | result->top_ = top; |
640 | result->end_ = memory->end() - kNewObjectAlignmentOffset; |
641 | result->survivor_end_ = top; |
642 | result->resolved_top_ = top; |
643 | |
644 | LSAN_REGISTER_ROOT_REGION(result, sizeof(*result)); |
645 | |
646 | return result; |
647 | } |
648 | |
649 | void NewPage::Deallocate() { |
650 | LSAN_UNREGISTER_ROOT_REGION(this, sizeof(*this)); |
651 | |
652 | VirtualMemory* memory = memory_; |
653 | { |
654 | MutexLocker ml(page_cache_mutex); |
655 | ASSERT(page_cache_size >= 0); |
656 | ASSERT(page_cache_size <= kPageCacheCapacity); |
657 | if (page_cache_size < kPageCacheCapacity) { |
658 | intptr_t size = memory->size(); |
659 | #if defined(DEBUG) |
660 | memset(memory->address(), Heap::kZapByte, size); |
661 | #endif |
662 | MSAN_POISON(memory->address(), size); |
663 | page_cache[page_cache_size++] = memory; |
664 | memory = nullptr; |
665 | } |
666 | } |
667 | delete memory; |
668 | } |
669 | |
670 | NewPage* SemiSpace::TryAllocatePageLocked(bool link) { |
671 | if (capacity_in_words_ >= max_capacity_in_words_) { |
672 | return nullptr; // Full. |
673 | } |
674 | NewPage* page = NewPage::Allocate(); |
675 | if (page == nullptr) { |
676 | return nullptr; // Out of memory; |
677 | } |
678 | capacity_in_words_ += kNewPageSizeInWords; |
679 | if (link) { |
680 | if (head_ == nullptr) { |
681 | head_ = tail_ = page; |
682 | } else { |
683 | tail_->set_next(page); |
684 | tail_ = page; |
685 | } |
686 | } |
687 | return page; |
688 | } |
689 | |
690 | bool SemiSpace::Contains(uword addr) const { |
691 | for (NewPage* page = head_; page != nullptr; page = page->next()) { |
692 | if (page->Contains(addr)) return true; |
693 | } |
694 | return false; |
695 | } |
696 | |
697 | void SemiSpace::WriteProtect(bool read_only) { |
698 | for (NewPage* page = head_; page != nullptr; page = page->next()) { |
699 | page->WriteProtect(read_only); |
700 | } |
701 | } |
702 | |
703 | void SemiSpace::AddList(NewPage* head, NewPage* tail) { |
704 | if (head == nullptr) { |
705 | return; |
706 | } |
707 | if (head_ == nullptr) { |
708 | head_ = head; |
709 | tail_ = tail; |
710 | return; |
711 | } |
712 | tail_->set_next(head); |
713 | tail_ = tail; |
714 | } |
715 | |
716 | void SemiSpace::MergeFrom(SemiSpace* donor) { |
717 | for (NewPage* page = donor->head_; page != nullptr; page = page->next()) { |
718 | page->Release(); |
719 | } |
720 | |
721 | AddList(donor->head_, donor->tail_); |
722 | capacity_in_words_ += donor->capacity_in_words_; |
723 | |
724 | donor->head_ = nullptr; |
725 | donor->tail_ = nullptr; |
726 | donor->capacity_in_words_ = 0; |
727 | } |
728 | |
729 | // The initial estimate of how many words we can scavenge per microsecond (usage |
730 | // before / scavenge time). This is a conservative value observed running |
731 | // Flutter on a Nexus 4. After the first scavenge, we instead use a value based |
732 | // on the device's actual speed. |
733 | static const intptr_t kConservativeInitialScavengeSpeed = 40; |
734 | |
735 | Scavenger::Scavenger(Heap* heap, intptr_t max_semi_capacity_in_words) |
736 | : heap_(heap), |
737 | max_semi_capacity_in_words_(max_semi_capacity_in_words), |
738 | scavenging_(false), |
739 | gc_time_micros_(0), |
740 | collections_(0), |
741 | scavenge_words_per_micro_(kConservativeInitialScavengeSpeed), |
742 | idle_scavenge_threshold_in_words_(0), |
743 | external_size_(0), |
744 | failed_to_promote_(false), |
745 | abort_(false) { |
746 | // Verify assumptions about the first word in objects which the scavenger is |
747 | // going to use for forwarding pointers. |
748 | ASSERT(Object::tags_offset() == 0); |
749 | |
750 | // Set initial semi space size in words. |
751 | const intptr_t initial_semi_capacity_in_words = Utils::Minimum( |
752 | max_semi_capacity_in_words, FLAG_new_gen_semi_initial_size * MBInWords); |
753 | |
754 | to_ = new SemiSpace(initial_semi_capacity_in_words); |
755 | idle_scavenge_threshold_in_words_ = initial_semi_capacity_in_words; |
756 | |
757 | UpdateMaxHeapCapacity(); |
758 | UpdateMaxHeapUsage(); |
759 | } |
760 | |
761 | Scavenger::~Scavenger() { |
762 | ASSERT(!scavenging_); |
763 | delete to_; |
764 | } |
765 | |
766 | intptr_t Scavenger::NewSizeInWords(intptr_t old_size_in_words) const { |
767 | if (stats_history_.Size() == 0) { |
768 | return old_size_in_words; |
769 | } |
770 | double garbage = stats_history_.Get(0).ExpectedGarbageFraction(); |
771 | if (garbage < (FLAG_new_gen_garbage_threshold / 100.0)) { |
772 | return Utils::Minimum(max_semi_capacity_in_words_, |
773 | old_size_in_words * FLAG_new_gen_growth_factor); |
774 | } else { |
775 | return old_size_in_words; |
776 | } |
777 | } |
778 | |
779 | class CollectStoreBufferVisitor : public ObjectPointerVisitor { |
780 | public: |
781 | explicit CollectStoreBufferVisitor(ObjectSet* in_store_buffer) |
782 | : ObjectPointerVisitor(IsolateGroup::Current()), |
783 | in_store_buffer_(in_store_buffer) {} |
784 | |
785 | void VisitPointers(ObjectPtr* from, ObjectPtr* to) { |
786 | for (ObjectPtr* ptr = from; ptr <= to; ptr++) { |
787 | ObjectPtr raw_obj = *ptr; |
788 | RELEASE_ASSERT(!raw_obj->ptr()->IsCardRemembered()); |
789 | RELEASE_ASSERT(raw_obj->ptr()->IsRemembered()); |
790 | RELEASE_ASSERT(raw_obj->IsOldObject()); |
791 | in_store_buffer_->Add(raw_obj); |
792 | } |
793 | } |
794 | |
795 | private: |
796 | ObjectSet* const in_store_buffer_; |
797 | }; |
798 | |
799 | class CheckStoreBufferVisitor : public ObjectVisitor, |
800 | public ObjectPointerVisitor { |
801 | public: |
802 | CheckStoreBufferVisitor(ObjectSet* in_store_buffer, const SemiSpace* to) |
803 | : ObjectVisitor(), |
804 | ObjectPointerVisitor(IsolateGroup::Current()), |
805 | in_store_buffer_(in_store_buffer), |
806 | to_(to) {} |
807 | |
808 | void VisitObject(ObjectPtr raw_obj) { |
809 | if (raw_obj->IsPseudoObject()) return; |
810 | RELEASE_ASSERT(raw_obj->IsOldObject()); |
811 | |
812 | if (raw_obj->ptr()->IsCardRemembered()) { |
813 | RELEASE_ASSERT(!raw_obj->ptr()->IsRemembered()); |
814 | // TODO(rmacnak): Verify card tables. |
815 | return; |
816 | } |
817 | |
818 | RELEASE_ASSERT(raw_obj->ptr()->IsRemembered() == |
819 | in_store_buffer_->Contains(raw_obj)); |
820 | |
821 | visiting_ = raw_obj; |
822 | is_remembered_ = raw_obj->ptr()->IsRemembered(); |
823 | raw_obj->ptr()->VisitPointers(this); |
824 | } |
825 | |
826 | void VisitPointers(ObjectPtr* from, ObjectPtr* to) { |
827 | for (ObjectPtr* ptr = from; ptr <= to; ptr++) { |
828 | ObjectPtr raw_obj = *ptr; |
829 | if (raw_obj->IsHeapObject() && raw_obj->IsNewObject()) { |
830 | if (!is_remembered_) { |
831 | FATAL3( |
832 | "Old object %#" Px "references new object %#" Px |
833 | ", but it is not" |
834 | " in any store buffer. Consider using rr to watch the slot %p and" |
835 | " reverse-continue to find the store with a missing barrier.\n" , |
836 | static_cast<uword>(visiting_), static_cast<uword>(raw_obj), ptr); |
837 | } |
838 | RELEASE_ASSERT(to_->Contains(ObjectLayout::ToAddr(raw_obj))); |
839 | } |
840 | } |
841 | } |
842 | |
843 | private: |
844 | const ObjectSet* const in_store_buffer_; |
845 | const SemiSpace* const to_; |
846 | ObjectPtr visiting_; |
847 | bool is_remembered_; |
848 | }; |
849 | |
850 | void Scavenger::VerifyStoreBuffers() { |
851 | Thread* thread = Thread::Current(); |
852 | StackZone stack_zone(thread); |
853 | Zone* zone = stack_zone.GetZone(); |
854 | |
855 | ObjectSet* in_store_buffer = new (zone) ObjectSet(zone); |
856 | heap_->AddRegionsToObjectSet(in_store_buffer); |
857 | |
858 | { |
859 | CollectStoreBufferVisitor visitor(in_store_buffer); |
860 | heap_->isolate_group()->store_buffer()->VisitObjectPointers(&visitor); |
861 | } |
862 | |
863 | { |
864 | CheckStoreBufferVisitor visitor(in_store_buffer, to_); |
865 | heap_->old_space()->VisitObjects(&visitor); |
866 | } |
867 | } |
868 | |
869 | SemiSpace* Scavenger::Prologue() { |
870 | TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "Prologue" ); |
871 | |
872 | heap_->isolate_group()->ReleaseStoreBuffers(); |
873 | |
874 | if (FLAG_verify_store_buffer) { |
875 | OS::PrintErr("Verifying remembered set before Scavenge..." ); |
876 | heap_->WaitForSweeperTasksAtSafepoint(Thread::Current()); |
877 | VerifyStoreBuffers(); |
878 | OS::PrintErr(" done.\n" ); |
879 | } |
880 | |
881 | // Need to stash the old remembered set before any worker begins adding to the |
882 | // new remembered set. |
883 | blocks_ = heap_->isolate_group()->store_buffer()->TakeBlocks(); |
884 | |
885 | // Flip the two semi-spaces so that to_ is always the space for allocating |
886 | // objects. |
887 | SemiSpace* from = to_; |
888 | |
889 | to_ = new SemiSpace(NewSizeInWords(from->max_capacity_in_words())); |
890 | UpdateMaxHeapCapacity(); |
891 | |
892 | return from; |
893 | } |
894 | |
895 | void Scavenger::Epilogue(SemiSpace* from) { |
896 | TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "Epilogue" ); |
897 | |
898 | // All objects in the to space have been copied from the from space at this |
899 | // moment. |
900 | |
901 | // Ensure the mutator thread will fail the next allocation. This will force |
902 | // mutator to allocate a new TLAB |
903 | #if defined(DEBUG) |
904 | heap_->isolate_group()->ForEachIsolate( |
905 | [&](Isolate* isolate) { |
906 | Thread* mutator_thread = isolate->mutator_thread(); |
907 | ASSERT(mutator_thread == nullptr || mutator_thread->top() == 0); |
908 | }, |
909 | /*at_safepoint=*/true); |
910 | #endif // DEBUG |
911 | |
912 | double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction(); |
913 | if (stats_history_.Size() >= 2) { |
914 | // Previous scavenge is only given half as much weight. |
915 | avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction(); |
916 | avg_frac /= 1.0 + 0.5; // Normalize. |
917 | } |
918 | |
919 | early_tenure_ = avg_frac >= (FLAG_early_tenuring_threshold / 100.0); |
920 | |
921 | // Update estimate of scavenger speed. This statistic assumes survivorship |
922 | // rates don't change much. |
923 | intptr_t history_used = 0; |
924 | intptr_t history_micros = 0; |
925 | ASSERT(stats_history_.Size() > 0); |
926 | for (intptr_t i = 0; i < stats_history_.Size(); i++) { |
927 | history_used += stats_history_.Get(i).UsedBeforeInWords(); |
928 | history_micros += stats_history_.Get(i).DurationMicros(); |
929 | } |
930 | if (history_micros == 0) { |
931 | history_micros = 1; |
932 | } |
933 | scavenge_words_per_micro_ = history_used / history_micros; |
934 | if (scavenge_words_per_micro_ == 0) { |
935 | scavenge_words_per_micro_ = 1; |
936 | } |
937 | |
938 | // Update amount of new-space we must allocate before performing an idle |
939 | // scavenge. This is based on the amount of work we expect to be able to |
940 | // complete in a typical idle period. |
941 | intptr_t average_idle_task_micros = 6000; |
942 | idle_scavenge_threshold_in_words_ = |
943 | scavenge_words_per_micro_ * average_idle_task_micros; |
944 | // Even if the scavenge speed is slow, make sure we don't scavenge too |
945 | // frequently, which just wastes power and falsely increases the promotion |
946 | // rate. |
947 | intptr_t lower_bound = 512 * KBInWords; |
948 | if (idle_scavenge_threshold_in_words_ < lower_bound) { |
949 | idle_scavenge_threshold_in_words_ = lower_bound; |
950 | } |
951 | // Even if the scavenge speed is very high, make sure we start considering |
952 | // idle scavenges before new space is full to avoid requiring a scavenge in |
953 | // the middle of a frame. |
954 | intptr_t upper_bound = 8 * CapacityInWords() / 10; |
955 | if (idle_scavenge_threshold_in_words_ > upper_bound) { |
956 | idle_scavenge_threshold_in_words_ = upper_bound; |
957 | } |
958 | |
959 | if (FLAG_verify_store_buffer) { |
960 | // Scavenging will insert into the store buffer block on the current |
961 | // thread (later will parallel scavenge, the worker's threads). We need to |
962 | // flush this thread-local block to the isolate group or we will incorrectly |
963 | // report some objects as absent from the store buffer. This might cause |
964 | // a program to hit a store buffer overflow a bit sooner than it might |
965 | // otherwise, since overflow is measured in blocks. Store buffer overflows |
966 | // are very rare. |
967 | heap_->isolate_group()->ReleaseStoreBuffers(); |
968 | |
969 | OS::PrintErr("Verifying remembered set after Scavenge..." ); |
970 | heap_->WaitForSweeperTasksAtSafepoint(Thread::Current()); |
971 | VerifyStoreBuffers(); |
972 | OS::PrintErr(" done.\n" ); |
973 | } |
974 | |
975 | delete from; |
976 | UpdateMaxHeapUsage(); |
977 | if (heap_ != NULL) { |
978 | heap_->UpdateGlobalMaxUsed(); |
979 | } |
980 | } |
981 | |
982 | bool Scavenger::ShouldPerformIdleScavenge(int64_t deadline) { |
983 | // To make a consistent decision, we should not yield for a safepoint in the |
984 | // middle of deciding whether to perform an idle GC. |
985 | NoSafepointScope no_safepoint; |
986 | |
987 | // TODO(rmacnak): Investigate collecting a history of idle period durations. |
988 | intptr_t used_in_words = UsedInWords(); |
989 | // Normal reason: new space is getting full. |
990 | bool for_new_space = used_in_words >= idle_scavenge_threshold_in_words_; |
991 | // New-space objects are roots during old-space GC. This means that even |
992 | // unreachable new-space objects prevent old-space objects they reference |
993 | // from being collected during an old-space GC. Normally this is not an |
994 | // issue because new-space GCs run much more frequently than old-space GCs. |
995 | // If new-space allocation is low and direct old-space allocation is high, |
996 | // which can happen in a program that allocates large objects and little |
997 | // else, old-space can fill up with unreachable objects until the next |
998 | // new-space GC. This check is the idle equivalent to the |
999 | // new-space GC before synchronous-marking in CollectMostGarbage. |
1000 | bool for_old_space = heap_->last_gc_was_old_space_ && |
1001 | heap_->old_space()->ReachedIdleThreshold(); |
1002 | if (!for_new_space && !for_old_space) { |
1003 | return false; |
1004 | } |
1005 | |
1006 | int64_t estimated_scavenge_completion = |
1007 | OS::GetCurrentMonotonicMicros() + |
1008 | used_in_words / scavenge_words_per_micro_; |
1009 | return estimated_scavenge_completion <= deadline; |
1010 | } |
1011 | |
1012 | void Scavenger::IterateIsolateRoots(ObjectPointerVisitor* visitor) { |
1013 | TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateIsolateRoots" ); |
1014 | heap_->isolate_group()->VisitObjectPointers( |
1015 | visitor, ValidationPolicy::kDontValidateFrames); |
1016 | } |
1017 | |
1018 | template <bool parallel> |
1019 | void Scavenger::IterateStoreBuffers(ScavengerVisitorBase<parallel>* visitor) { |
1020 | TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateStoreBuffers" ); |
1021 | |
1022 | // Iterating through the store buffers. |
1023 | // Grab the deduplication sets out of the isolate's consolidated store buffer. |
1024 | StoreBuffer* store_buffer = heap_->isolate_group()->store_buffer(); |
1025 | StoreBufferBlock* pending = blocks_; |
1026 | blocks_ = nullptr; |
1027 | intptr_t total_count = 0; |
1028 | while (pending != NULL) { |
1029 | StoreBufferBlock* next = pending->next(); |
1030 | // Generated code appends to store buffers; tell MemorySanitizer. |
1031 | MSAN_UNPOISON(pending, sizeof(*pending)); |
1032 | intptr_t count = pending->Count(); |
1033 | total_count += count; |
1034 | while (!pending->IsEmpty()) { |
1035 | ObjectPtr raw_object = pending->Pop(); |
1036 | ASSERT(!raw_object->IsForwardingCorpse()); |
1037 | ASSERT(raw_object->ptr()->IsRemembered()); |
1038 | raw_object->ptr()->ClearRememberedBit(); |
1039 | visitor->VisitingOldObject(raw_object); |
1040 | // Note that this treats old-space WeakProperties as strong. A dead key |
1041 | // won't be reclaimed until after the key is promoted. |
1042 | raw_object->ptr()->VisitPointersNonvirtual(visitor); |
1043 | } |
1044 | pending->Reset(); |
1045 | // Return the emptied block for recycling (no need to check threshold). |
1046 | store_buffer->PushBlock(pending, StoreBuffer::kIgnoreThreshold); |
1047 | pending = next; |
1048 | } |
1049 | // Done iterating through old objects remembered in the store buffers. |
1050 | visitor->VisitingOldObject(NULL); |
1051 | |
1052 | heap_->RecordData(kStoreBufferEntries, total_count); |
1053 | heap_->RecordData(kDataUnused1, 0); |
1054 | heap_->RecordData(kDataUnused2, 0); |
1055 | } |
1056 | |
1057 | template <bool parallel> |
1058 | void Scavenger::IterateRememberedCards( |
1059 | ScavengerVisitorBase<parallel>* visitor) { |
1060 | TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateRememberedCards" ); |
1061 | heap_->old_space()->VisitRememberedCards(visitor); |
1062 | visitor->VisitingOldObject(NULL); |
1063 | } |
1064 | |
1065 | void Scavenger::IterateObjectIdTable(ObjectPointerVisitor* visitor) { |
1066 | #ifndef PRODUCT |
1067 | TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateObjectIdTable" ); |
1068 | heap_->isolate_group()->VisitObjectIdRingPointers(visitor); |
1069 | #endif // !PRODUCT |
1070 | } |
1071 | |
1072 | enum RootSlices { |
1073 | kIsolate = 0, |
1074 | kObjectIdRing, |
1075 | kCards, |
1076 | kStoreBuffer, |
1077 | kNumRootSlices, |
1078 | }; |
1079 | |
1080 | template <bool parallel> |
1081 | void Scavenger::IterateRoots(ScavengerVisitorBase<parallel>* visitor) { |
1082 | for (;;) { |
1083 | intptr_t slice = root_slices_started_.fetch_add(1); |
1084 | if (slice >= kNumRootSlices) { |
1085 | return; // No more slices. |
1086 | } |
1087 | |
1088 | switch (slice) { |
1089 | case kIsolate: |
1090 | IterateIsolateRoots(visitor); |
1091 | break; |
1092 | case kObjectIdRing: |
1093 | IterateObjectIdTable(visitor); |
1094 | break; |
1095 | case kCards: |
1096 | IterateRememberedCards(visitor); |
1097 | break; |
1098 | case kStoreBuffer: |
1099 | IterateStoreBuffers(visitor); |
1100 | break; |
1101 | default: |
1102 | UNREACHABLE(); |
1103 | } |
1104 | } |
1105 | } |
1106 | |
1107 | bool Scavenger::IsUnreachable(ObjectPtr* p) { |
1108 | ObjectPtr raw_obj = *p; |
1109 | if (!raw_obj->IsHeapObject()) { |
1110 | return false; |
1111 | } |
1112 | if (!raw_obj->IsNewObject()) { |
1113 | return false; |
1114 | } |
1115 | uword raw_addr = ObjectLayout::ToAddr(raw_obj); |
1116 | if (to_->Contains(raw_addr)) { |
1117 | return false; |
1118 | } |
1119 | uword = *reinterpret_cast<uword*>(raw_addr); |
1120 | if (IsForwarding(header)) { |
1121 | *p = ForwardedObj(header); |
1122 | return false; |
1123 | } |
1124 | return true; |
1125 | } |
1126 | |
1127 | void Scavenger::MournWeakHandles() { |
1128 | Thread* thread = Thread::Current(); |
1129 | TIMELINE_FUNCTION_GC_DURATION(thread, "MournWeakHandles" ); |
1130 | ScavengerWeakVisitor weak_visitor(thread, this); |
1131 | heap_->isolate_group()->VisitWeakPersistentHandles(&weak_visitor); |
1132 | } |
1133 | |
1134 | template <bool parallel> |
1135 | void ScavengerVisitorBase<parallel>::ProcessToSpace() { |
1136 | while (scan_ != nullptr) { |
1137 | uword resolved_top = scan_->resolved_top_; |
1138 | while (resolved_top < scan_->top_) { |
1139 | ObjectPtr raw_obj = ObjectLayout::FromAddr(resolved_top); |
1140 | resolved_top += ProcessCopied(raw_obj); |
1141 | } |
1142 | scan_->resolved_top_ = resolved_top; |
1143 | |
1144 | NewPage* next = scan_->next(); |
1145 | if (next == nullptr) { |
1146 | // Don't update scan_. More objects may yet be copied to this TLAB. |
1147 | return; |
1148 | } |
1149 | scan_ = next; |
1150 | } |
1151 | } |
1152 | |
1153 | template <bool parallel> |
1154 | void ScavengerVisitorBase<parallel>::ProcessPromotedList() { |
1155 | ObjectPtr raw_object; |
1156 | while ((raw_object = promoted_list_.Pop()) != nullptr) { |
1157 | // Resolve or copy all objects referred to by the current object. This |
1158 | // can potentially push more objects on this stack as well as add more |
1159 | // objects to be resolved in the to space. |
1160 | ASSERT(!raw_object->ptr()->IsRemembered()); |
1161 | VisitingOldObject(raw_object); |
1162 | raw_object->ptr()->VisitPointersNonvirtual(this); |
1163 | if (raw_object->ptr()->IsMarked()) { |
1164 | // Complete our promise from ScavengePointer. Note that marker cannot |
1165 | // visit this object until it pops a block from the mark stack, which |
1166 | // involves a memory fence from the mutex, so even on architectures |
1167 | // with a relaxed memory model, the marker will see the fully |
1168 | // forwarded contents of this object. |
1169 | thread_->MarkingStackAddObject(raw_object); |
1170 | } |
1171 | } |
1172 | VisitingOldObject(NULL); |
1173 | } |
1174 | |
1175 | template <bool parallel> |
1176 | void ScavengerVisitorBase<parallel>::ProcessWeakProperties() { |
1177 | if (scavenger_->abort_) return; |
1178 | |
1179 | // Finished this round of scavenging. Process the pending weak properties |
1180 | // for which the keys have become reachable. Potentially this adds more |
1181 | // objects to the to space. |
1182 | WeakPropertyPtr cur_weak = delayed_weak_properties_; |
1183 | delayed_weak_properties_ = nullptr; |
1184 | while (cur_weak != nullptr) { |
1185 | uword next_weak = cur_weak->ptr()->next_; |
1186 | // Promoted weak properties are not enqueued. So we can guarantee that |
1187 | // we do not need to think about store barriers here. |
1188 | ASSERT(cur_weak->IsNewObject()); |
1189 | ObjectPtr raw_key = cur_weak->ptr()->key_; |
1190 | ASSERT(raw_key->IsHeapObject()); |
1191 | // Key still points into from space even if the object has been |
1192 | // promoted to old space by now. The key will be updated accordingly |
1193 | // below when VisitPointers is run. |
1194 | ASSERT(raw_key->IsNewObject()); |
1195 | uword raw_addr = ObjectLayout::ToAddr(raw_key); |
1196 | ASSERT(from_->Contains(raw_addr)); |
1197 | uword = *reinterpret_cast<uword*>(raw_addr); |
1198 | // Reset the next pointer in the weak property. |
1199 | cur_weak->ptr()->next_ = 0; |
1200 | if (IsForwarding(header)) { |
1201 | cur_weak->ptr()->VisitPointersNonvirtual(this); |
1202 | } else { |
1203 | EnqueueWeakProperty(cur_weak); |
1204 | } |
1205 | // Advance to next weak property in the queue. |
1206 | cur_weak = static_cast<WeakPropertyPtr>(next_weak); |
1207 | } |
1208 | } |
1209 | |
1210 | void Scavenger::UpdateMaxHeapCapacity() { |
1211 | if (heap_ == NULL) { |
1212 | // Some unit tests. |
1213 | return; |
1214 | } |
1215 | ASSERT(to_ != NULL); |
1216 | ASSERT(heap_ != NULL); |
1217 | auto isolate_group = heap_->isolate_group(); |
1218 | ASSERT(isolate_group != NULL); |
1219 | isolate_group->GetHeapNewCapacityMaxMetric()->SetValue( |
1220 | to_->max_capacity_in_words() * kWordSize); |
1221 | } |
1222 | |
1223 | void Scavenger::UpdateMaxHeapUsage() { |
1224 | if (heap_ == NULL) { |
1225 | // Some unit tests. |
1226 | return; |
1227 | } |
1228 | ASSERT(to_ != NULL); |
1229 | ASSERT(heap_ != NULL); |
1230 | auto isolate_group = heap_->isolate_group(); |
1231 | ASSERT(isolate_group != NULL); |
1232 | isolate_group->GetHeapNewUsedMaxMetric()->SetValue(UsedInWords() * kWordSize); |
1233 | } |
1234 | |
1235 | template <bool parallel> |
1236 | void ScavengerVisitorBase<parallel>::EnqueueWeakProperty( |
1237 | WeakPropertyPtr raw_weak) { |
1238 | ASSERT(raw_weak->IsHeapObject()); |
1239 | ASSERT(raw_weak->IsNewObject()); |
1240 | ASSERT(raw_weak->IsWeakProperty()); |
1241 | #if defined(DEBUG) |
1242 | uword raw_addr = ObjectLayout::ToAddr(raw_weak); |
1243 | uword header = *reinterpret_cast<uword*>(raw_addr); |
1244 | ASSERT(!IsForwarding(header)); |
1245 | #endif // defined(DEBUG) |
1246 | ASSERT(raw_weak->ptr()->next_ == 0); |
1247 | raw_weak->ptr()->next_ = static_cast<uword>(delayed_weak_properties_); |
1248 | delayed_weak_properties_ = raw_weak; |
1249 | } |
1250 | |
1251 | template <bool parallel> |
1252 | intptr_t ScavengerVisitorBase<parallel>::ProcessCopied(ObjectPtr raw_obj) { |
1253 | intptr_t class_id = raw_obj->GetClassId(); |
1254 | if (UNLIKELY(class_id == kWeakPropertyCid)) { |
1255 | WeakPropertyPtr raw_weak = static_cast<WeakPropertyPtr>(raw_obj); |
1256 | // The fate of the weak property is determined by its key. |
1257 | ObjectPtr raw_key = raw_weak->ptr()->key_; |
1258 | if (raw_key->IsHeapObject() && raw_key->IsNewObject()) { |
1259 | uword raw_addr = ObjectLayout::ToAddr(raw_key); |
1260 | uword = *reinterpret_cast<uword*>(raw_addr); |
1261 | if (!IsForwarding(header)) { |
1262 | // Key is white. Enqueue the weak property. |
1263 | EnqueueWeakProperty(raw_weak); |
1264 | return raw_weak->ptr()->HeapSize(); |
1265 | } |
1266 | } |
1267 | // Key is gray or black. Make the weak property black. |
1268 | } |
1269 | return raw_obj->ptr()->VisitPointersNonvirtual(this); |
1270 | } |
1271 | |
1272 | void Scavenger::MournWeakTables() { |
1273 | TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "MournWeakTables" ); |
1274 | |
1275 | auto rehash_weak_table = [](WeakTable* table, WeakTable* replacement_new, |
1276 | WeakTable* replacement_old) { |
1277 | intptr_t size = table->size(); |
1278 | for (intptr_t i = 0; i < size; i++) { |
1279 | if (table->IsValidEntryAtExclusive(i)) { |
1280 | ObjectPtr raw_obj = table->ObjectAtExclusive(i); |
1281 | ASSERT(raw_obj->IsHeapObject()); |
1282 | uword raw_addr = ObjectLayout::ToAddr(raw_obj); |
1283 | uword = *reinterpret_cast<uword*>(raw_addr); |
1284 | if (IsForwarding(header)) { |
1285 | // The object has survived. Preserve its record. |
1286 | raw_obj = ForwardedObj(header); |
1287 | auto replacement = |
1288 | raw_obj->IsNewObject() ? replacement_new : replacement_old; |
1289 | replacement->SetValueExclusive(raw_obj, table->ValueAtExclusive(i)); |
1290 | } |
1291 | } |
1292 | } |
1293 | }; |
1294 | |
1295 | // Rehash the weak tables now that we know which objects survive this cycle. |
1296 | for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) { |
1297 | const auto selector = static_cast<Heap::WeakSelector>(sel); |
1298 | auto table = heap_->GetWeakTable(Heap::kNew, selector); |
1299 | auto table_old = heap_->GetWeakTable(Heap::kOld, selector); |
1300 | |
1301 | // Create a new weak table for the new-space. |
1302 | auto table_new = WeakTable::NewFrom(table); |
1303 | rehash_weak_table(table, table_new, table_old); |
1304 | heap_->SetWeakTable(Heap::kNew, selector, table_new); |
1305 | |
1306 | // Remove the old table as it has been replaced with the newly allocated |
1307 | // table above. |
1308 | delete table; |
1309 | } |
1310 | |
1311 | // Each isolate might have a weak table used for fast snapshot writing (i.e. |
1312 | // isolate communication). Rehash those tables if need be. |
1313 | heap_->isolate_group()->ForEachIsolate( |
1314 | [&](Isolate* isolate) { |
1315 | auto table = isolate->forward_table_new(); |
1316 | if (table != nullptr) { |
1317 | auto replacement = WeakTable::NewFrom(table); |
1318 | rehash_weak_table(table, replacement, isolate->forward_table_old()); |
1319 | isolate->set_forward_table_new(replacement); |
1320 | } |
1321 | }, |
1322 | /*at_safepoint=*/true); |
1323 | } |
1324 | |
1325 | template <bool parallel> |
1326 | void ScavengerVisitorBase<parallel>::MournWeakProperties() { |
1327 | ASSERT(!scavenger_->abort_); |
1328 | |
1329 | // The queued weak properties at this point do not refer to reachable keys, |
1330 | // so we clear their key and value fields. |
1331 | WeakPropertyPtr cur_weak = delayed_weak_properties_; |
1332 | delayed_weak_properties_ = nullptr; |
1333 | while (cur_weak != nullptr) { |
1334 | uword next_weak = cur_weak->ptr()->next_; |
1335 | // Reset the next pointer in the weak property. |
1336 | cur_weak->ptr()->next_ = 0; |
1337 | |
1338 | #if defined(DEBUG) |
1339 | ObjectPtr raw_key = cur_weak->ptr()->key_; |
1340 | uword raw_addr = ObjectLayout::ToAddr(raw_key); |
1341 | uword header = *reinterpret_cast<uword*>(raw_addr); |
1342 | ASSERT(!IsForwarding(header)); |
1343 | ASSERT(raw_key->IsHeapObject()); |
1344 | ASSERT(raw_key->IsNewObject()); // Key still points into from space. |
1345 | #endif // defined(DEBUG) |
1346 | |
1347 | WeakProperty::Clear(cur_weak); |
1348 | |
1349 | // Advance to next weak property in the queue. |
1350 | cur_weak = static_cast<WeakPropertyPtr>(next_weak); |
1351 | } |
1352 | } |
1353 | |
1354 | void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const { |
1355 | ASSERT(Thread::Current()->IsAtSafepoint() || |
1356 | (Thread::Current()->task_kind() == Thread::kMarkerTask) || |
1357 | (Thread::Current()->task_kind() == Thread::kCompactorTask)); |
1358 | for (NewPage* page = to_->head(); page != nullptr; page = page->next()) { |
1359 | page->VisitObjectPointers(visitor); |
1360 | } |
1361 | } |
1362 | |
1363 | void Scavenger::VisitObjects(ObjectVisitor* visitor) const { |
1364 | ASSERT(Thread::Current()->IsAtSafepoint() || |
1365 | (Thread::Current()->task_kind() == Thread::kMarkerTask)); |
1366 | for (NewPage* page = to_->head(); page != nullptr; page = page->next()) { |
1367 | page->VisitObjects(visitor); |
1368 | } |
1369 | } |
1370 | |
1371 | void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const { |
1372 | for (NewPage* page = to_->head(); page != nullptr; page = page->next()) { |
1373 | set->AddRegion(page->start(), page->end()); |
1374 | } |
1375 | } |
1376 | |
1377 | ObjectPtr Scavenger::FindObject(FindObjectVisitor* visitor) { |
1378 | ASSERT(!scavenging_); |
1379 | for (NewPage* page = to_->head(); page != nullptr; page = page->next()) { |
1380 | uword cur = page->object_start(); |
1381 | if (!visitor->VisitRange(cur, page->object_end())) continue; |
1382 | while (cur < page->object_end()) { |
1383 | ObjectPtr raw_obj = ObjectLayout::FromAddr(cur); |
1384 | uword next = cur + raw_obj->ptr()->HeapSize(); |
1385 | if (visitor->VisitRange(cur, next) && |
1386 | raw_obj->ptr()->FindObject(visitor)) { |
1387 | return raw_obj; // Found object, return it. |
1388 | } |
1389 | cur = next; |
1390 | } |
1391 | ASSERT(cur == page->object_end()); |
1392 | } |
1393 | return Object::null(); |
1394 | } |
1395 | |
1396 | void Scavenger::TryAllocateNewTLAB(Thread* thread, intptr_t min_size) { |
1397 | ASSERT(heap_ != Dart::vm_isolate()->heap()); |
1398 | ASSERT(!scavenging_); |
1399 | |
1400 | AbandonRemainingTLAB(thread); |
1401 | |
1402 | MutexLocker ml(&space_lock_); |
1403 | for (NewPage* page = to_->head(); page != nullptr; page = page->next()) { |
1404 | if (page->owner() != nullptr) continue; |
1405 | intptr_t available = page->end() - page->object_end(); |
1406 | if (available >= min_size) { |
1407 | page->Acquire(thread); |
1408 | return; |
1409 | } |
1410 | } |
1411 | |
1412 | NewPage* page = to_->TryAllocatePageLocked(true); |
1413 | if (page == nullptr) { |
1414 | return; |
1415 | } |
1416 | page->Acquire(thread); |
1417 | } |
1418 | |
1419 | void Scavenger::AbandonRemainingTLABForDebugging(Thread* thread) { |
1420 | // Allocate any remaining space so the TLAB won't be reused. Write a filler |
1421 | // object so it remains iterable. |
1422 | uword top = thread->top(); |
1423 | intptr_t size = thread->end() - thread->top(); |
1424 | if (size > 0) { |
1425 | thread->set_top(top + size); |
1426 | ForwardingCorpse::AsForwarder(top, size); |
1427 | } |
1428 | |
1429 | AbandonRemainingTLAB(thread); |
1430 | } |
1431 | |
1432 | void Scavenger::AbandonRemainingTLAB(Thread* thread) { |
1433 | if (thread->top() == 0) return; |
1434 | NewPage* page = NewPage::Of(thread->top() - 1); |
1435 | { |
1436 | MutexLocker ml(&space_lock_); |
1437 | page->Release(thread); |
1438 | } |
1439 | ASSERT(thread->top() == 0); |
1440 | } |
1441 | |
1442 | template <bool parallel> |
1443 | uword ScavengerVisitorBase<parallel>::TryAllocateCopySlow(intptr_t size) { |
1444 | NewPage* page; |
1445 | { |
1446 | MutexLocker ml(&scavenger_->space_lock_); |
1447 | page = scavenger_->to_->TryAllocatePageLocked(false); |
1448 | } |
1449 | if (page == nullptr) { |
1450 | return 0; |
1451 | } |
1452 | |
1453 | if (head_ == nullptr) { |
1454 | head_ = scan_ = page; |
1455 | } else { |
1456 | ASSERT(scan_ != nullptr); |
1457 | tail_->set_next(page); |
1458 | } |
1459 | tail_ = page; |
1460 | |
1461 | return tail_->TryAllocateGC(size); |
1462 | } |
1463 | |
1464 | void Scavenger::Scavenge() { |
1465 | int64_t start = OS::GetCurrentMonotonicMicros(); |
1466 | |
1467 | // Ensure that all threads for this isolate are at a safepoint (either stopped |
1468 | // or in native code). If two threads are racing at this point, the loser |
1469 | // will continue with its scavenge after waiting for the winner to complete. |
1470 | // TODO(koda): Consider moving SafepointThreads into allocation failure/retry |
1471 | // logic to avoid needless collections. |
1472 | Thread* thread = Thread::Current(); |
1473 | SafepointOperationScope safepoint_scope(thread); |
1474 | |
1475 | int64_t safe_point = OS::GetCurrentMonotonicMicros(); |
1476 | heap_->RecordTime(kSafePoint, safe_point - start); |
1477 | |
1478 | // Scavenging is not reentrant. Make sure that is the case. |
1479 | ASSERT(!scavenging_); |
1480 | scavenging_ = true; |
1481 | |
1482 | if (FLAG_verify_before_gc) { |
1483 | OS::PrintErr("Verifying before Scavenge..." ); |
1484 | heap_->WaitForSweeperTasksAtSafepoint(thread); |
1485 | heap_->VerifyGC(thread->is_marking() ? kAllowMarked : kForbidMarked); |
1486 | OS::PrintErr(" done.\n" ); |
1487 | } |
1488 | |
1489 | // Prepare for a scavenge. |
1490 | failed_to_promote_ = false; |
1491 | abort_ = false; |
1492 | root_slices_started_ = 0; |
1493 | intptr_t abandoned_bytes = 0; // TODO(rmacnak): Count fragmentation? |
1494 | SpaceUsage usage_before = GetCurrentUsage(); |
1495 | intptr_t promo_candidate_words = 0; |
1496 | for (NewPage* page = to_->head(); page != nullptr; page = page->next()) { |
1497 | page->Release(); |
1498 | if (early_tenure_) { |
1499 | page->EarlyTenure(); |
1500 | } |
1501 | promo_candidate_words += page->promo_candidate_words(); |
1502 | } |
1503 | SemiSpace* from = Prologue(); |
1504 | |
1505 | intptr_t bytes_promoted; |
1506 | if (FLAG_scavenger_tasks == 0) { |
1507 | bytes_promoted = SerialScavenge(from); |
1508 | } else { |
1509 | bytes_promoted = ParallelScavenge(from); |
1510 | } |
1511 | if (abort_) { |
1512 | ReverseScavenge(&from); |
1513 | bytes_promoted = 0; |
1514 | } |
1515 | ASSERT(promotion_stack_.IsEmpty()); |
1516 | MournWeakHandles(); |
1517 | MournWeakTables(); |
1518 | |
1519 | // Restore write-barrier assumptions. |
1520 | heap_->isolate_group()->RememberLiveTemporaries(); |
1521 | |
1522 | // Scavenge finished. Run accounting. |
1523 | int64_t end = OS::GetCurrentMonotonicMicros(); |
1524 | stats_history_.Add(ScavengeStats( |
1525 | start, end, usage_before, GetCurrentUsage(), promo_candidate_words, |
1526 | bytes_promoted >> kWordSizeLog2, abandoned_bytes >> kWordSizeLog2)); |
1527 | Epilogue(from); |
1528 | |
1529 | if (FLAG_verify_after_gc) { |
1530 | OS::PrintErr("Verifying after Scavenge..." ); |
1531 | heap_->WaitForSweeperTasksAtSafepoint(thread); |
1532 | heap_->VerifyGC(thread->is_marking() ? kAllowMarked : kForbidMarked); |
1533 | OS::PrintErr(" done.\n" ); |
1534 | } |
1535 | |
1536 | // Done scavenging. Reset the marker. |
1537 | ASSERT(scavenging_); |
1538 | scavenging_ = false; |
1539 | } |
1540 | |
1541 | intptr_t Scavenger::SerialScavenge(SemiSpace* from) { |
1542 | FreeList* freelist = heap_->old_space()->DataFreeList(0); |
1543 | SerialScavengerVisitor visitor(heap_->isolate_group(), this, from, freelist, |
1544 | &promotion_stack_); |
1545 | visitor.ProcessRoots(); |
1546 | { |
1547 | TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ProcessToSpace" ); |
1548 | visitor.ProcessAll(); |
1549 | } |
1550 | visitor.Finalize(); |
1551 | |
1552 | to_->AddList(visitor.head(), visitor.tail()); |
1553 | return visitor.bytes_promoted(); |
1554 | } |
1555 | |
1556 | intptr_t Scavenger::ParallelScavenge(SemiSpace* from) { |
1557 | intptr_t bytes_promoted = 0; |
1558 | const intptr_t num_tasks = FLAG_scavenger_tasks; |
1559 | ASSERT(num_tasks > 0); |
1560 | |
1561 | ThreadBarrier barrier(num_tasks, heap_->barrier(), heap_->barrier_done()); |
1562 | RelaxedAtomic<uintptr_t> num_busy = num_tasks; |
1563 | |
1564 | ParallelScavengerVisitor** visitors = |
1565 | new ParallelScavengerVisitor*[num_tasks]; |
1566 | for (intptr_t i = 0; i < num_tasks; i++) { |
1567 | FreeList* freelist = heap_->old_space()->DataFreeList(i); |
1568 | visitors[i] = new ParallelScavengerVisitor( |
1569 | heap_->isolate_group(), this, from, freelist, &promotion_stack_); |
1570 | if (i < (num_tasks - 1)) { |
1571 | // Begin scavenging on a helper thread. |
1572 | bool result = Dart::thread_pool()->Run<ParallelScavengerTask>( |
1573 | heap_->isolate_group(), &barrier, visitors[i], &num_busy); |
1574 | ASSERT(result); |
1575 | } else { |
1576 | // Last worker is the main thread. |
1577 | ParallelScavengerTask task(heap_->isolate_group(), &barrier, visitors[i], |
1578 | &num_busy); |
1579 | task.RunEnteredIsolateGroup(); |
1580 | barrier.Exit(); |
1581 | } |
1582 | } |
1583 | |
1584 | for (intptr_t i = 0; i < num_tasks; i++) { |
1585 | to_->AddList(visitors[i]->head(), visitors[i]->tail()); |
1586 | bytes_promoted += visitors[i]->bytes_promoted(); |
1587 | delete visitors[i]; |
1588 | } |
1589 | |
1590 | delete[] visitors; |
1591 | return bytes_promoted; |
1592 | } |
1593 | |
1594 | void Scavenger::ReverseScavenge(SemiSpace** from) { |
1595 | Thread* thread = Thread::Current(); |
1596 | TIMELINE_FUNCTION_GC_DURATION(thread, "ReverseScavenge" ); |
1597 | |
1598 | class ReverseFromForwardingVisitor : public ObjectVisitor { |
1599 | uword (ObjectPtr raw_obj) { |
1600 | return reinterpret_cast<std::atomic<uword>*>( |
1601 | ObjectLayout::ToAddr(raw_obj)) |
1602 | ->load(std::memory_order_relaxed); |
1603 | } |
1604 | void (ObjectPtr raw_obj, uword ) { |
1605 | reinterpret_cast<std::atomic<uword>*>(ObjectLayout::ToAddr(raw_obj)) |
1606 | ->store(header, std::memory_order_relaxed); |
1607 | } |
1608 | void VisitObject(ObjectPtr from_obj) { |
1609 | uword = ReadHeader(from_obj); |
1610 | if (IsForwarding(from_header)) { |
1611 | ObjectPtr to_obj = ForwardedObj(from_header); |
1612 | uword = ReadHeader(to_obj); |
1613 | intptr_t size = to_obj->ptr()->HeapSize(); |
1614 | |
1615 | // Reset the ages bits in case this was a promotion. |
1616 | uint32_t tags = static_cast<uint32_t>(to_header); |
1617 | tags = ObjectLayout::OldBit::update(false, tags); |
1618 | tags = ObjectLayout::OldAndNotRememberedBit::update(false, tags); |
1619 | tags = ObjectLayout::NewBit::update(true, tags); |
1620 | tags = ObjectLayout::OldAndNotMarkedBit::update(false, tags); |
1621 | uword = |
1622 | (to_header & ~static_cast<uword>(0xFFFFFFFF)) | tags; |
1623 | |
1624 | WriteHeader(from_obj, original_header); |
1625 | |
1626 | ForwardingCorpse::AsForwarder(ObjectLayout::ToAddr(to_obj), size) |
1627 | ->set_target(from_obj); |
1628 | } |
1629 | } |
1630 | }; |
1631 | |
1632 | ReverseFromForwardingVisitor visitor; |
1633 | for (NewPage* page = (*from)->head(); page != nullptr; page = page->next()) { |
1634 | page->VisitObjects(&visitor); |
1635 | } |
1636 | |
1637 | // Swap from-space and to-space. The abandoned to-space will be deleted in |
1638 | // the epilogue. |
1639 | SemiSpace* temp = to_; |
1640 | to_ = *from; |
1641 | *from = temp; |
1642 | |
1643 | promotion_stack_.Reset(); |
1644 | |
1645 | // This also rebuilds the remembered set. |
1646 | Become::FollowForwardingPointers(thread); |
1647 | |
1648 | // Don't scavenge again until the next old-space GC has occurred. Prevents |
1649 | // performing one scavenge per allocation as the heap limit is approached. |
1650 | heap_->assume_scavenge_will_fail_ = true; |
1651 | } |
1652 | |
1653 | void Scavenger::WriteProtect(bool read_only) { |
1654 | ASSERT(!scavenging_); |
1655 | to_->WriteProtect(read_only); |
1656 | } |
1657 | |
1658 | #ifndef PRODUCT |
1659 | void Scavenger::PrintToJSONObject(JSONObject* object) const { |
1660 | auto isolate_group = IsolateGroup::Current(); |
1661 | ASSERT(isolate_group != nullptr); |
1662 | JSONObject space(object, "new" ); |
1663 | space.AddProperty("type" , "HeapSpace" ); |
1664 | space.AddProperty("name" , "new" ); |
1665 | space.AddProperty("vmName" , "Scavenger" ); |
1666 | space.AddProperty("collections" , collections()); |
1667 | if (collections() > 0) { |
1668 | int64_t run_time = isolate_group->UptimeMicros(); |
1669 | run_time = Utils::Maximum(run_time, static_cast<int64_t>(0)); |
1670 | double run_time_millis = MicrosecondsToMilliseconds(run_time); |
1671 | double avg_time_between_collections = |
1672 | run_time_millis / static_cast<double>(collections()); |
1673 | space.AddProperty("avgCollectionPeriodMillis" , |
1674 | avg_time_between_collections); |
1675 | } else { |
1676 | space.AddProperty("avgCollectionPeriodMillis" , 0.0); |
1677 | } |
1678 | space.AddProperty64("used" , UsedInWords() * kWordSize); |
1679 | space.AddProperty64("capacity" , CapacityInWords() * kWordSize); |
1680 | space.AddProperty64("external" , ExternalInWords() * kWordSize); |
1681 | space.AddProperty("time" , MicrosecondsToSeconds(gc_time_micros())); |
1682 | } |
1683 | #endif // !PRODUCT |
1684 | |
1685 | void Scavenger::Evacuate() { |
1686 | // We need a safepoint here to prevent allocation right before or right after |
1687 | // the scavenge. |
1688 | // The former can introduce an object that we might fail to collect. |
1689 | // The latter means even if the scavenge promotes every object in the new |
1690 | // space, the new allocation means the space is not empty, |
1691 | // causing the assertion below to fail. |
1692 | SafepointOperationScope scope(Thread::Current()); |
1693 | |
1694 | // Forces the next scavenge to promote all the objects in the new space. |
1695 | early_tenure_ = true; |
1696 | |
1697 | Scavenge(); |
1698 | |
1699 | // It is possible for objects to stay in the new space |
1700 | // if the VM cannot create more pages for these objects. |
1701 | ASSERT((UsedInWords() == 0) || failed_to_promote_); |
1702 | } |
1703 | |
1704 | void Scavenger::MergeFrom(Scavenger* donor) { |
1705 | MutexLocker ml(&space_lock_); |
1706 | MutexLocker ml2(&donor->space_lock_); |
1707 | to_->MergeFrom(donor->to_); |
1708 | |
1709 | external_size_ += donor->external_size_; |
1710 | donor->external_size_ = 0; |
1711 | } |
1712 | |
1713 | } // namespace dart |
1714 | |