1 | // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #ifndef RUNTIME_VM_HEAP_SCAVENGER_H_ |
6 | #define RUNTIME_VM_HEAP_SCAVENGER_H_ |
7 | |
8 | #include "platform/assert.h" |
9 | #include "platform/utils.h" |
10 | |
11 | #include "vm/dart.h" |
12 | #include "vm/flags.h" |
13 | #include "vm/globals.h" |
14 | #include "vm/heap/spaces.h" |
15 | #include "vm/lockers.h" |
16 | #include "vm/raw_object.h" |
17 | #include "vm/ring_buffer.h" |
18 | #include "vm/virtual_memory.h" |
19 | #include "vm/visitor.h" |
20 | |
21 | namespace dart { |
22 | |
23 | // Forward declarations. |
24 | class Heap; |
25 | class Isolate; |
26 | class JSONObject; |
27 | class ObjectSet; |
28 | template <bool parallel> |
29 | class ScavengerVisitorBase; |
30 | |
31 | static constexpr intptr_t kNewPageSize = 512 * KB; |
32 | static constexpr intptr_t kNewPageSizeInWords = kNewPageSize / kWordSize; |
33 | static constexpr intptr_t kNewPageMask = ~(kNewPageSize - 1); |
34 | |
35 | // A page containing new generation objects. |
36 | class NewPage { |
37 | public: |
38 | static NewPage* Allocate(); |
39 | void Deallocate(); |
40 | |
41 | uword start() const { return memory_->start(); } |
42 | uword end() const { return memory_->end(); } |
43 | bool Contains(uword addr) const { return memory_->Contains(addr); } |
44 | void WriteProtect(bool read_only) { |
45 | memory_->Protect(read_only ? VirtualMemory::kReadOnly |
46 | : VirtualMemory::kReadWrite); |
47 | } |
48 | |
49 | NewPage* next() const { return next_; } |
50 | void set_next(NewPage* next) { next_ = next; } |
51 | |
52 | Thread* owner() const { return owner_; } |
53 | |
54 | uword object_start() const { return start() + ObjectStartOffset(); } |
55 | uword object_end() const { return owner_ != nullptr ? owner_->top() : top_; } |
56 | void VisitObjects(ObjectVisitor* visitor) const { |
57 | uword addr = object_start(); |
58 | uword end = object_end(); |
59 | while (addr < end) { |
60 | ObjectPtr obj = ObjectLayout::FromAddr(addr); |
61 | visitor->VisitObject(obj); |
62 | addr += obj->ptr()->HeapSize(); |
63 | } |
64 | } |
65 | void VisitObjectPointers(ObjectPointerVisitor* visitor) const { |
66 | uword addr = object_start(); |
67 | uword end = object_end(); |
68 | while (addr < end) { |
69 | ObjectPtr obj = ObjectLayout::FromAddr(addr); |
70 | intptr_t size = obj->ptr()->VisitPointers(visitor); |
71 | addr += size; |
72 | } |
73 | } |
74 | |
75 | static intptr_t ObjectStartOffset() { |
76 | return Utils::RoundUp(sizeof(NewPage), kObjectAlignment) + |
77 | kNewObjectAlignmentOffset; |
78 | } |
79 | |
80 | static NewPage* Of(ObjectPtr obj) { |
81 | ASSERT(obj->IsHeapObject()); |
82 | ASSERT(obj->IsNewObject()); |
83 | return Of(static_cast<uword>(obj)); |
84 | } |
85 | static NewPage* Of(uword addr) { |
86 | return reinterpret_cast<NewPage*>(addr & kNewPageMask); |
87 | } |
88 | |
89 | // Remember the limit to which objects have been copied. |
90 | void RecordSurvivors() { survivor_end_ = object_end(); } |
91 | |
92 | // Move survivor end to the end of the to_ space, making all surviving |
93 | // objects candidates for promotion next time. |
94 | void EarlyTenure() { survivor_end_ = end_; } |
95 | |
96 | uword promo_candidate_words() const { |
97 | return (survivor_end_ - object_start()) / kWordSize; |
98 | } |
99 | |
100 | void Acquire(Thread* thread) { |
101 | ASSERT(owner_ == nullptr); |
102 | owner_ = thread; |
103 | thread->set_top(top_); |
104 | thread->set_end(end_); |
105 | } |
106 | void Release(Thread* thread) { |
107 | ASSERT(owner_ == thread); |
108 | owner_ = nullptr; |
109 | top_ = thread->top(); |
110 | thread->set_top(0); |
111 | thread->set_end(0); |
112 | } |
113 | void Release() { |
114 | if (owner_ != nullptr) { |
115 | Release(owner_); |
116 | } |
117 | } |
118 | |
119 | uword TryAllocateGC(intptr_t size) { |
120 | ASSERT(owner_ == nullptr); |
121 | uword result = top_; |
122 | uword new_top = result + size; |
123 | if (LIKELY(new_top < end_)) { |
124 | top_ = new_top; |
125 | return result; |
126 | } |
127 | return 0; |
128 | } |
129 | |
130 | void Unallocate(uword addr, intptr_t size) { |
131 | ASSERT((addr + size) == top_); |
132 | top_ -= size; |
133 | } |
134 | |
135 | bool IsSurvivor(uword raw_addr) const { return raw_addr < survivor_end_; } |
136 | bool IsResolved() const { return top_ == resolved_top_; } |
137 | |
138 | private: |
139 | VirtualMemory* memory_; |
140 | NewPage* next_; |
141 | |
142 | // The thread using this page for allocation, otherwise NULL. |
143 | Thread* owner_; |
144 | |
145 | // The address of the next allocation. If owner is non-NULL, this value is |
146 | // stale and the current value is at owner->top_. Called "NEXT" in the |
147 | // original Cheney paper. |
148 | uword top_; |
149 | |
150 | // The address after the last allocatable byte in this page. |
151 | uword end_; |
152 | |
153 | // Objects below this address have survived a scavenge. |
154 | uword survivor_end_; |
155 | |
156 | // A pointer to the first unprocessed object. Resolution completes when this |
157 | // value meets the allocation top. Called "SCAN" in the original Cheney paper. |
158 | uword resolved_top_; |
159 | |
160 | template <bool> |
161 | friend class ScavengerVisitorBase; |
162 | |
163 | DISALLOW_ALLOCATION(); |
164 | DISALLOW_IMPLICIT_CONSTRUCTORS(NewPage); |
165 | }; |
166 | |
167 | class SemiSpace { |
168 | public: |
169 | static void Init(); |
170 | static void Cleanup(); |
171 | static intptr_t CachedSize(); |
172 | |
173 | explicit SemiSpace(intptr_t max_capacity_in_words); |
174 | ~SemiSpace(); |
175 | |
176 | NewPage* TryAllocatePageLocked(bool link); |
177 | |
178 | bool Contains(uword addr) const; |
179 | void WriteProtect(bool read_only); |
180 | |
181 | intptr_t capacity_in_words() const { return capacity_in_words_; } |
182 | intptr_t max_capacity_in_words() const { return max_capacity_in_words_; } |
183 | |
184 | NewPage* head() const { return head_; } |
185 | |
186 | void AddList(NewPage* head, NewPage* tail); |
187 | void MergeFrom(SemiSpace* donor); |
188 | |
189 | private: |
190 | // Size of NewPages in this semi-space. |
191 | intptr_t capacity_in_words_ = 0; |
192 | |
193 | // Size of NewPages before we trigger a scavenge. |
194 | intptr_t max_capacity_in_words_; |
195 | |
196 | NewPage* head_ = nullptr; |
197 | NewPage* tail_ = nullptr; |
198 | }; |
199 | |
200 | // Statistics for a particular scavenge. |
201 | class ScavengeStats { |
202 | public: |
203 | ScavengeStats() {} |
204 | ScavengeStats(int64_t start_micros, |
205 | int64_t end_micros, |
206 | SpaceUsage before, |
207 | SpaceUsage after, |
208 | intptr_t promo_candidates_in_words, |
209 | intptr_t promoted_in_words, |
210 | intptr_t abandoned_in_words) |
211 | : start_micros_(start_micros), |
212 | end_micros_(end_micros), |
213 | before_(before), |
214 | after_(after), |
215 | promo_candidates_in_words_(promo_candidates_in_words), |
216 | promoted_in_words_(promoted_in_words), |
217 | abandoned_in_words_(abandoned_in_words) {} |
218 | |
219 | // Of all data before scavenge, what fraction was found to be garbage? |
220 | // If this scavenge included growth, assume the extra capacity would become |
221 | // garbage to give the scavenger a chance to stablize at the new capacity. |
222 | double ExpectedGarbageFraction() const { |
223 | double work = |
224 | after_.used_in_words + promoted_in_words_ + abandoned_in_words_; |
225 | return 1.0 - (work / after_.capacity_in_words); |
226 | } |
227 | |
228 | // Fraction of promotion candidates that survived and was thereby promoted. |
229 | // Returns zero if there were no promotion candidates. |
230 | double PromoCandidatesSuccessFraction() const { |
231 | return promo_candidates_in_words_ > 0 |
232 | ? promoted_in_words_ / |
233 | static_cast<double>(promo_candidates_in_words_) |
234 | : 0.0; |
235 | } |
236 | |
237 | intptr_t UsedBeforeInWords() const { return before_.used_in_words; } |
238 | |
239 | int64_t DurationMicros() const { return end_micros_ - start_micros_; } |
240 | |
241 | private: |
242 | int64_t start_micros_; |
243 | int64_t end_micros_; |
244 | SpaceUsage before_; |
245 | SpaceUsage after_; |
246 | intptr_t promo_candidates_in_words_; |
247 | intptr_t promoted_in_words_; |
248 | intptr_t abandoned_in_words_; |
249 | }; |
250 | |
251 | class Scavenger { |
252 | private: |
253 | static const intptr_t kTLABSize = 512 * KB; |
254 | |
255 | public: |
256 | Scavenger(Heap* heap, intptr_t max_semi_capacity_in_words); |
257 | ~Scavenger(); |
258 | |
259 | // Check whether this Scavenger contains this address. |
260 | // During scavenging both the to and from spaces contain "legal" objects. |
261 | // During a scavenge this function only returns true for addresses that will |
262 | // be part of the surviving objects. |
263 | bool Contains(uword addr) const { return to_->Contains(addr); } |
264 | |
265 | ObjectPtr FindObject(FindObjectVisitor* visitor); |
266 | |
267 | uword TryAllocate(Thread* thread, intptr_t size) { |
268 | uword addr = TryAllocateFromTLAB(thread, size); |
269 | if (LIKELY(addr != 0)) { |
270 | return addr; |
271 | } |
272 | TryAllocateNewTLAB(thread, size); |
273 | return TryAllocateFromTLAB(thread, size); |
274 | } |
275 | void AbandonRemainingTLAB(Thread* thread); |
276 | void AbandonRemainingTLABForDebugging(Thread* thread); |
277 | |
278 | // Collect the garbage in this scavenger. |
279 | void Scavenge(); |
280 | |
281 | // Promote all live objects. |
282 | void Evacuate(); |
283 | |
284 | void MergeFrom(Scavenger* donor); |
285 | |
286 | int64_t UsedInWords() const { |
287 | MutexLocker ml(&space_lock_); |
288 | return to_->capacity_in_words(); |
289 | } |
290 | int64_t CapacityInWords() const { return to_->max_capacity_in_words(); } |
291 | int64_t ExternalInWords() const { return external_size_ >> kWordSizeLog2; } |
292 | SpaceUsage GetCurrentUsage() const { |
293 | SpaceUsage usage; |
294 | usage.used_in_words = UsedInWords(); |
295 | usage.capacity_in_words = CapacityInWords(); |
296 | usage.external_in_words = ExternalInWords(); |
297 | return usage; |
298 | } |
299 | |
300 | void VisitObjects(ObjectVisitor* visitor) const; |
301 | void VisitObjectPointers(ObjectPointerVisitor* visitor) const; |
302 | |
303 | void AddRegionsToObjectSet(ObjectSet* set) const; |
304 | |
305 | void WriteProtect(bool read_only); |
306 | |
307 | bool ShouldPerformIdleScavenge(int64_t deadline); |
308 | |
309 | void AddGCTime(int64_t micros) { gc_time_micros_ += micros; } |
310 | |
311 | int64_t gc_time_micros() const { return gc_time_micros_; } |
312 | |
313 | void IncrementCollections() { collections_++; } |
314 | |
315 | intptr_t collections() const { return collections_; } |
316 | |
317 | #ifndef PRODUCT |
318 | void PrintToJSONObject(JSONObject* object) const; |
319 | #endif // !PRODUCT |
320 | |
321 | void AllocatedExternal(intptr_t size) { |
322 | ASSERT(size >= 0); |
323 | external_size_ += size; |
324 | ASSERT(external_size_ >= 0); |
325 | } |
326 | void FreedExternal(intptr_t size) { |
327 | ASSERT(size >= 0); |
328 | external_size_ -= size; |
329 | ASSERT(external_size_ >= 0); |
330 | } |
331 | |
332 | void MakeNewSpaceIterable(); |
333 | int64_t FreeSpaceInWords(Isolate* isolate) const; |
334 | |
335 | void InitGrowthControl() { |
336 | growth_control_ = true; |
337 | } |
338 | |
339 | void SetGrowthControlState(bool state) { |
340 | growth_control_ = state; |
341 | } |
342 | |
343 | bool GrowthControlState() { return growth_control_; } |
344 | |
345 | bool scavenging() const { return scavenging_; } |
346 | |
347 | // The maximum number of Dart mutator threads we allow to execute at the same |
348 | // time. |
349 | static intptr_t MaxMutatorThreadCount() { |
350 | // With a max new-space of 16 MB and 512kb TLABs we would allow up to 8 |
351 | // mutator threads to run at the same time. |
352 | const intptr_t max_parallel_tlab_usage = |
353 | (FLAG_new_gen_semi_max_size * MB) / Scavenger::kTLABSize; |
354 | const intptr_t max_pool_size = max_parallel_tlab_usage / 4; |
355 | return max_pool_size > 0 ? max_pool_size : 1; |
356 | } |
357 | |
358 | NewPage* head() const { return to_->head(); } |
359 | |
360 | private: |
361 | // Ids for time and data records in Heap::GCStats. |
362 | enum { |
363 | // Time |
364 | kDummyScavengeTime = 0, |
365 | kSafePoint = 1, |
366 | kVisitIsolateRoots = 2, |
367 | kIterateStoreBuffers = 3, |
368 | kProcessToSpace = 4, |
369 | kIterateWeaks = 5, |
370 | // Data |
371 | kStoreBufferEntries = 0, |
372 | kDataUnused1 = 1, |
373 | kDataUnused2 = 2, |
374 | kToKBAfterStoreBuffer = 3 |
375 | }; |
376 | |
377 | uword TryAllocateFromTLAB(Thread* thread, intptr_t size) { |
378 | ASSERT(Utils::IsAligned(size, kObjectAlignment)); |
379 | ASSERT(heap_ != Dart::vm_isolate()->heap()); |
380 | |
381 | const uword result = thread->top(); |
382 | const intptr_t remaining = thread->end() - result; |
383 | if (UNLIKELY(remaining < size)) { |
384 | return 0; |
385 | } |
386 | |
387 | ASSERT(to_->Contains(result)); |
388 | ASSERT((result & kObjectAlignmentMask) == kNewObjectAlignmentOffset); |
389 | thread->set_top(result + size); |
390 | return result; |
391 | } |
392 | void TryAllocateNewTLAB(Thread* thread, intptr_t size); |
393 | |
394 | SemiSpace* Prologue(); |
395 | intptr_t ParallelScavenge(SemiSpace* from); |
396 | intptr_t SerialScavenge(SemiSpace* from); |
397 | void ReverseScavenge(SemiSpace** from); |
398 | void IterateIsolateRoots(ObjectPointerVisitor* visitor); |
399 | template <bool parallel> |
400 | void IterateStoreBuffers(ScavengerVisitorBase<parallel>* visitor); |
401 | template <bool parallel> |
402 | void IterateRememberedCards(ScavengerVisitorBase<parallel>* visitor); |
403 | void IterateObjectIdTable(ObjectPointerVisitor* visitor); |
404 | template <bool parallel> |
405 | void IterateRoots(ScavengerVisitorBase<parallel>* visitor); |
406 | void MournWeakHandles(); |
407 | void Epilogue(SemiSpace* from); |
408 | |
409 | bool IsUnreachable(ObjectPtr* p); |
410 | |
411 | void VerifyStoreBuffers(); |
412 | |
413 | void UpdateMaxHeapCapacity(); |
414 | void UpdateMaxHeapUsage(); |
415 | |
416 | void MournWeakTables(); |
417 | |
418 | intptr_t NewSizeInWords(intptr_t old_size_in_words) const; |
419 | |
420 | Heap* heap_; |
421 | |
422 | SemiSpace* to_; |
423 | |
424 | PromotionStack promotion_stack_; |
425 | |
426 | intptr_t max_semi_capacity_in_words_; |
427 | |
428 | // Keep track whether a scavenge is currently running. |
429 | bool scavenging_; |
430 | bool early_tenure_ = false; |
431 | RelaxedAtomic<intptr_t> root_slices_started_; |
432 | StoreBufferBlock* blocks_; |
433 | |
434 | int64_t gc_time_micros_; |
435 | intptr_t collections_; |
436 | static const int kStatsHistoryCapacity = 4; |
437 | RingBuffer<ScavengeStats, kStatsHistoryCapacity> stats_history_; |
438 | |
439 | intptr_t scavenge_words_per_micro_; |
440 | intptr_t idle_scavenge_threshold_in_words_; |
441 | |
442 | // The total size of external data associated with objects in this scavenger. |
443 | RelaxedAtomic<intptr_t> external_size_; |
444 | |
445 | RelaxedAtomic<bool> failed_to_promote_; |
446 | RelaxedAtomic<bool> abort_; |
447 | |
448 | bool growth_control_; |
449 | |
450 | // Protects new space during the allocation of new TLABs |
451 | mutable Mutex space_lock_; |
452 | |
453 | template <bool> |
454 | friend class ScavengerVisitorBase; |
455 | friend class ScavengerWeakVisitor; |
456 | |
457 | DISALLOW_COPY_AND_ASSIGN(Scavenger); |
458 | }; |
459 | |
460 | } // namespace dart |
461 | |
462 | #endif // RUNTIME_VM_HEAP_SCAVENGER_H_ |
463 | |