1// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_HEAP_PAGES_H_
6#define RUNTIME_VM_HEAP_PAGES_H_
7
8#include "platform/atomic.h"
9#include "vm/globals.h"
10#include "vm/heap/freelist.h"
11#include "vm/heap/spaces.h"
12#include "vm/lockers.h"
13#include "vm/ring_buffer.h"
14#include "vm/thread.h"
15#include "vm/virtual_memory.h"
16
17namespace dart {
18
19DECLARE_FLAG(bool, write_protect_code);
20
21// Forward declarations.
22class Heap;
23class JSONObject;
24class ObjectPointerVisitor;
25class ObjectSet;
26class ForwardingPage;
27class GCMarker;
28
29static constexpr intptr_t kOldPageSize = 512 * KB;
30static constexpr intptr_t kOldPageSizeInWords = kOldPageSize / kWordSize;
31static constexpr intptr_t kOldPageMask = ~(kOldPageSize - 1);
32
33static constexpr intptr_t kBitVectorWordsPerBlock = 1;
34static constexpr intptr_t kBlockSize =
35 kObjectAlignment * kBitsPerWord * kBitVectorWordsPerBlock;
36static constexpr intptr_t kBlockMask = ~(kBlockSize - 1);
37static constexpr intptr_t kBlocksPerPage = kOldPageSize / kBlockSize;
38
39// A page containing old generation objects.
40class OldPage {
41 public:
42 enum PageType { kExecutable = 0, kData };
43
44 OldPage* next() const { return next_; }
45 void set_next(OldPage* next) { next_ = next; }
46
47 bool Contains(uword addr) const { return memory_->Contains(addr); }
48 intptr_t AliasOffset() const { return memory_->AliasOffset(); }
49
50 uword object_start() const { return memory_->start() + ObjectStartOffset(); }
51 uword object_end() const { return object_end_; }
52 uword used_in_bytes() const { return used_in_bytes_; }
53 void set_used_in_bytes(uword value) {
54 ASSERT(Utils::IsAligned(value, kObjectAlignment));
55 used_in_bytes_ = value;
56 }
57
58 ForwardingPage* forwarding_page() const { return forwarding_page_; }
59 void AllocateForwardingPage();
60
61 PageType type() const { return type_; }
62
63 bool is_image_page() const { return !memory_->vm_owns_region(); }
64
65 void VisitObjects(ObjectVisitor* visitor) const;
66 void VisitObjectPointers(ObjectPointerVisitor* visitor) const;
67
68 ObjectPtr FindObject(FindObjectVisitor* visitor) const;
69
70 void WriteProtect(bool read_only);
71
72 static intptr_t ObjectStartOffset() {
73 return Utils::RoundUp(sizeof(OldPage), kMaxObjectAlignment);
74 }
75
76 // Warning: This does not work for objects on image pages because image pages
77 // are not aligned. However, it works for objects on large pages, because
78 // only one object is allocated per large page.
79 static OldPage* Of(ObjectPtr obj) {
80 ASSERT(obj->IsHeapObject());
81 ASSERT(obj->IsOldObject());
82 return reinterpret_cast<OldPage*>(static_cast<uword>(obj) & kOldPageMask);
83 }
84
85 // Warning: This does not work for addresses on image pages or on large pages.
86 static OldPage* Of(uword addr) {
87 return reinterpret_cast<OldPage*>(addr & kOldPageMask);
88 }
89
90 // Warning: This does not work for objects on image pages.
91 static ObjectPtr ToExecutable(ObjectPtr obj) {
92 OldPage* page = Of(obj);
93 VirtualMemory* memory = page->memory_;
94 const intptr_t alias_offset = memory->AliasOffset();
95 if (alias_offset == 0) {
96 return obj; // Not aliased.
97 }
98 uword addr = ObjectLayout::ToAddr(obj);
99 if (memory->Contains(addr)) {
100 return ObjectLayout::FromAddr(addr + alias_offset);
101 }
102 // obj is executable.
103 ASSERT(memory->ContainsAlias(addr));
104 return obj;
105 }
106
107 // Warning: This does not work for objects on image pages.
108 static ObjectPtr ToWritable(ObjectPtr obj) {
109 OldPage* page = Of(obj);
110 VirtualMemory* memory = page->memory_;
111 const intptr_t alias_offset = memory->AliasOffset();
112 if (alias_offset == 0) {
113 return obj; // Not aliased.
114 }
115 uword addr = ObjectLayout::ToAddr(obj);
116 if (memory->ContainsAlias(addr)) {
117 return ObjectLayout::FromAddr(addr - alias_offset);
118 }
119 // obj is writable.
120 ASSERT(memory->Contains(addr));
121 return obj;
122 }
123
124 // 1 card = 128 slots.
125 static const intptr_t kSlotsPerCardLog2 = 7;
126 static const intptr_t kBytesPerCardLog2 = kWordSizeLog2 + kSlotsPerCardLog2;
127
128 intptr_t card_table_size() const {
129 return memory_->size() >> kBytesPerCardLog2;
130 }
131
132 static intptr_t card_table_offset() {
133 return OFFSET_OF(OldPage, card_table_);
134 }
135
136 void RememberCard(ObjectPtr const* slot) {
137 ASSERT(Contains(reinterpret_cast<uword>(slot)));
138 if (card_table_ == NULL) {
139 card_table_ = reinterpret_cast<uint8_t*>(
140 calloc(card_table_size(), sizeof(uint8_t)));
141 }
142 intptr_t offset =
143 reinterpret_cast<uword>(slot) - reinterpret_cast<uword>(this);
144 intptr_t index = offset >> kBytesPerCardLog2;
145 ASSERT((index >= 0) && (index < card_table_size()));
146 card_table_[index] = 1;
147 }
148 void VisitRememberedCards(ObjectPointerVisitor* visitor);
149
150 private:
151 void set_object_end(uword value) {
152 ASSERT((value & kObjectAlignmentMask) == kOldObjectAlignmentOffset);
153 object_end_ = value;
154 }
155
156 // Returns NULL on OOM.
157 static OldPage* Allocate(intptr_t size_in_words,
158 PageType type,
159 const char* name);
160
161 // Deallocate the virtual memory backing this page. The page pointer to this
162 // page becomes immediately inaccessible.
163 void Deallocate();
164
165 VirtualMemory* memory_;
166 OldPage* next_;
167 uword object_end_;
168 uword used_in_bytes_;
169 ForwardingPage* forwarding_page_;
170 uint8_t* card_table_; // Remembered set, not marking.
171 PageType type_;
172
173 friend class PageSpace;
174 friend class GCCompactor;
175
176 DISALLOW_ALLOCATION();
177 DISALLOW_IMPLICIT_CONSTRUCTORS(OldPage);
178};
179
180// The history holds the timing information of the last garbage collection
181// runs.
182class PageSpaceGarbageCollectionHistory {
183 public:
184 PageSpaceGarbageCollectionHistory() {}
185 ~PageSpaceGarbageCollectionHistory() {}
186
187 void AddGarbageCollectionTime(int64_t start, int64_t end);
188
189 int GarbageCollectionTimeFraction();
190
191 bool IsEmpty() const { return history_.Size() == 0; }
192
193 private:
194 struct Entry {
195 int64_t start;
196 int64_t end;
197 };
198 static const intptr_t kHistoryLength = 4;
199 RingBuffer<Entry, kHistoryLength> history_;
200
201 DISALLOW_ALLOCATION();
202 DISALLOW_COPY_AND_ASSIGN(PageSpaceGarbageCollectionHistory);
203};
204
205// PageSpaceController controls the heap size.
206class PageSpaceController {
207 public:
208 // The heap is passed in for recording stats only. The controller does not
209 // invoke GC by itself.
210 PageSpaceController(Heap* heap,
211 int heap_growth_ratio,
212 int heap_growth_max,
213 int garbage_collection_time_ratio);
214 ~PageSpaceController();
215
216 // Returns whether growing to 'after' should trigger a GC.
217 // This method can be called before allocation (e.g., pretenuring) or after
218 // (e.g., promotion), as it does not change the state of the controller.
219 bool ReachedHardThreshold(SpaceUsage after) const;
220 bool ReachedSoftThreshold(SpaceUsage after) const;
221
222 // Returns whether an idle GC is worthwhile.
223 bool ReachedIdleThreshold(SpaceUsage current) const;
224
225 // Should be called after each collection to update the controller state.
226 void EvaluateGarbageCollection(SpaceUsage before,
227 SpaceUsage after,
228 int64_t start,
229 int64_t end);
230 void EvaluateAfterLoading(SpaceUsage after);
231 void HintFreed(intptr_t size);
232
233 void set_last_usage(SpaceUsage current) { last_usage_ = current; }
234
235 void Enable() { is_enabled_ = true; }
236 void Disable() { is_enabled_ = false; }
237 bool is_enabled() { return is_enabled_; }
238
239 private:
240 friend class PageSpace; // For MergeOtherPageSpaceController
241
242 void RecordUpdate(SpaceUsage before, SpaceUsage after, const char* reason);
243 void MergeFrom(PageSpaceController* donor);
244
245 void RecordUpdate(SpaceUsage before,
246 SpaceUsage after,
247 intptr_t growth_in_pages,
248 const char* reason);
249
250 Heap* heap_;
251
252 bool is_enabled_;
253
254 // Usage after last evaluated GC or last enabled.
255 SpaceUsage last_usage_;
256
257 // If the garbage collector was not able to free more than heap_growth_ratio_
258 // memory, then the heap is grown. Otherwise garbage collection is performed.
259 const int heap_growth_ratio_;
260
261 // The desired percent of heap in-use after a garbage collection.
262 // Equivalent to \frac{100-heap_growth_ratio_}{100}.
263 const double desired_utilization_;
264
265 // Max number of pages we grow.
266 const int heap_growth_max_;
267
268 // If the relative GC time goes above garbage_collection_time_ratio_ %,
269 // we grow the heap more aggressively.
270 const int garbage_collection_time_ratio_;
271
272 // Perform a stop-the-world GC when usage exceeds this amount.
273 intptr_t hard_gc_threshold_in_words_;
274
275 // Begin concurrent marking when usage exceeds this amount.
276 intptr_t soft_gc_threshold_in_words_;
277
278 // Run idle GC if time permits when usage exceeds this amount.
279 intptr_t idle_gc_threshold_in_words_;
280
281 PageSpaceGarbageCollectionHistory history_;
282
283 DISALLOW_IMPLICIT_CONSTRUCTORS(PageSpaceController);
284};
285
286class PageSpace {
287 public:
288 enum GrowthPolicy { kControlGrowth, kForceGrowth };
289 enum Phase {
290 kDone,
291 kMarking,
292 kAwaitingFinalization,
293 kSweepingLarge,
294 kSweepingRegular
295 };
296
297 PageSpace(Heap* heap, intptr_t max_capacity_in_words);
298 ~PageSpace();
299
300 uword TryAllocate(intptr_t size,
301 OldPage::PageType type = OldPage::kData,
302 GrowthPolicy growth_policy = kControlGrowth) {
303 bool is_protected =
304 (type == OldPage::kExecutable) && FLAG_write_protect_code;
305 bool is_locked = false;
306 return TryAllocateInternal(size, &freelists_[type], type, growth_policy,
307 is_protected, is_locked);
308 }
309
310 bool ReachedHardThreshold() const {
311 return page_space_controller_.ReachedHardThreshold(usage_);
312 }
313 bool ReachedSoftThreshold() const {
314 return page_space_controller_.ReachedSoftThreshold(usage_);
315 }
316 bool ReachedIdleThreshold() const {
317 return page_space_controller_.ReachedIdleThreshold(usage_);
318 }
319 void EvaluateAfterLoading() {
320 page_space_controller_.EvaluateAfterLoading(usage_);
321 }
322 void HintFreed(intptr_t size) { page_space_controller_.HintFreed(size); }
323
324 int64_t UsedInWords() const { return usage_.used_in_words; }
325 int64_t CapacityInWords() const {
326 MutexLocker ml(&pages_lock_);
327 return usage_.capacity_in_words;
328 }
329 void IncreaseCapacityInWords(intptr_t increase_in_words) {
330 MutexLocker ml(&pages_lock_);
331 IncreaseCapacityInWordsLocked(increase_in_words);
332 }
333 void IncreaseCapacityInWordsLocked(intptr_t increase_in_words) {
334 DEBUG_ASSERT(pages_lock_.IsOwnedByCurrentThread());
335 usage_.capacity_in_words += increase_in_words;
336 UpdateMaxCapacityLocked();
337 }
338
339 void UpdateMaxCapacityLocked();
340 void UpdateMaxUsed();
341
342 int64_t ExternalInWords() const { return usage_.external_in_words; }
343 SpaceUsage GetCurrentUsage() const {
344 MutexLocker ml(&pages_lock_);
345 return usage_;
346 }
347 int64_t ImageInWords() const {
348 int64_t size = 0;
349 MutexLocker ml(&pages_lock_);
350 for (OldPage* page = image_pages_; page != nullptr; page = page->next()) {
351 size += page->memory_->size();
352 }
353 return size >> kWordSizeLog2;
354 }
355
356 bool Contains(uword addr) const;
357 bool ContainsUnsafe(uword addr) const;
358 bool Contains(uword addr, OldPage::PageType type) const;
359 bool DataContains(uword addr) const;
360 bool IsValidAddress(uword addr) const { return Contains(addr); }
361
362 void VisitObjects(ObjectVisitor* visitor) const;
363 void VisitObjectsNoImagePages(ObjectVisitor* visitor) const;
364 void VisitObjectsImagePages(ObjectVisitor* visitor) const;
365 void VisitObjectPointers(ObjectPointerVisitor* visitor) const;
366
367 void VisitRememberedCards(ObjectPointerVisitor* visitor) const;
368
369 ObjectPtr FindObject(FindObjectVisitor* visitor,
370 OldPage::PageType type) const;
371
372 // Collect the garbage in the page space using mark-sweep or mark-compact.
373 void CollectGarbage(bool compact, bool finalize);
374
375 void AddRegionsToObjectSet(ObjectSet* set) const;
376
377 void InitGrowthControl() {
378 page_space_controller_.set_last_usage(usage_);
379 page_space_controller_.Enable();
380 }
381
382 void SetGrowthControlState(bool state) {
383 if (state) {
384 page_space_controller_.Enable();
385 } else {
386 page_space_controller_.Disable();
387 }
388 }
389
390 bool GrowthControlState() { return page_space_controller_.is_enabled(); }
391
392 // Note: Code pages are made executable/non-executable when 'read_only' is
393 // true/false, respectively.
394 void WriteProtect(bool read_only);
395 void WriteProtectCode(bool read_only);
396
397 bool ShouldStartIdleMarkSweep(int64_t deadline);
398 bool ShouldPerformIdleMarkCompact(int64_t deadline);
399
400 void AddGCTime(int64_t micros) { gc_time_micros_ += micros; }
401
402 int64_t gc_time_micros() const { return gc_time_micros_; }
403
404 void IncrementCollections() { collections_++; }
405
406 intptr_t collections() const { return collections_; }
407
408#ifndef PRODUCT
409 void PrintToJSONObject(JSONObject* object) const;
410 void PrintHeapMapToJSONStream(Isolate* isolate, JSONStream* stream) const;
411#endif // PRODUCT
412
413 void AllocateBlack(intptr_t size) {
414 allocated_black_in_words_.fetch_add(size >> kWordSizeLog2);
415 }
416
417 void AllocatedExternal(intptr_t size) {
418 ASSERT(size >= 0);
419 intptr_t size_in_words = size >> kWordSizeLog2;
420 usage_.external_in_words += size_in_words;
421 }
422 void FreedExternal(intptr_t size) {
423 ASSERT(size >= 0);
424 intptr_t size_in_words = size >> kWordSizeLog2;
425 usage_.external_in_words -= size_in_words;
426 }
427
428 // Bulk data allocation.
429 FreeList* DataFreeList(intptr_t i = 0) {
430 return &freelists_[OldPage::kData + i];
431 }
432 void AcquireLock(FreeList* freelist);
433 void ReleaseLock(FreeList* freelist);
434
435 uword TryAllocateDataLocked(FreeList* freelist,
436 intptr_t size,
437 GrowthPolicy growth_policy) {
438 bool is_protected = false;
439 bool is_locked = true;
440 return TryAllocateInternal(size, freelist, OldPage::kData, growth_policy,
441 is_protected, is_locked);
442 }
443
444 Monitor* tasks_lock() const { return &tasks_lock_; }
445 intptr_t tasks() const { return tasks_; }
446 void set_tasks(intptr_t val) {
447 ASSERT(val >= 0);
448 tasks_ = val;
449 }
450 intptr_t concurrent_marker_tasks() const { return concurrent_marker_tasks_; }
451 void set_concurrent_marker_tasks(intptr_t val) {
452 ASSERT(val >= 0);
453 concurrent_marker_tasks_ = val;
454 }
455 Phase phase() const { return phase_; }
456 void set_phase(Phase val) { phase_ = val; }
457
458 // Attempt to allocate from bump block rather than normal freelist.
459 uword TryAllocateDataBumpLocked(intptr_t size) {
460 return TryAllocateDataBumpLocked(&freelists_[OldPage::kData], size);
461 }
462 uword TryAllocateDataBumpLocked(FreeList* freelist, intptr_t size);
463 DART_FORCE_INLINE
464 uword TryAllocatePromoLocked(FreeList* freelist, intptr_t size) {
465 uword result = freelist->TryAllocateBumpLocked(size);
466 if (result != 0) {
467 return result;
468 }
469 return TryAllocatePromoLockedSlow(freelist, size);
470 }
471 uword TryAllocatePromoLockedSlow(FreeList* freelist, intptr_t size);
472
473 void SetupImagePage(void* pointer, uword size, bool is_executable);
474
475 // Return any bump allocation block to the freelist.
476 void AbandonBumpAllocation();
477 // Have threads release marking stack blocks, etc.
478 void AbandonMarkingForShutdown();
479
480 bool enable_concurrent_mark() const { return enable_concurrent_mark_; }
481 void set_enable_concurrent_mark(bool enable_concurrent_mark) {
482 enable_concurrent_mark_ = enable_concurrent_mark;
483 }
484
485 bool IsObjectFromImagePages(ObjectPtr object);
486
487 void MergeFrom(PageSpace* donor);
488
489 private:
490 // Ids for time and data records in Heap::GCStats.
491 enum {
492 // Time
493 kConcurrentSweep = 0,
494 kSafePoint = 1,
495 kMarkObjects = 2,
496 kResetFreeLists = 3,
497 kSweepPages = 4,
498 kSweepLargePages = 5,
499 // Data
500 kGarbageRatio = 0,
501 kGCTimeFraction = 1,
502 kPageGrowth = 2,
503 kAllowedGrowth = 3
504 };
505
506 uword TryAllocateInternal(intptr_t size,
507 FreeList* freelist,
508 OldPage::PageType type,
509 GrowthPolicy growth_policy,
510 bool is_protected,
511 bool is_locked);
512 uword TryAllocateInFreshPage(intptr_t size,
513 FreeList* freelist,
514 OldPage::PageType type,
515 GrowthPolicy growth_policy,
516 bool is_locked);
517 uword TryAllocateInFreshLargePage(intptr_t size,
518 OldPage::PageType type,
519 GrowthPolicy growth_policy);
520
521 void EvaluateConcurrentMarking(GrowthPolicy growth_policy);
522
523 // Makes bump block walkable; do not call concurrently with mutator.
524 void MakeIterable() const;
525
526 void AddPageLocked(OldPage* page);
527 void AddLargePageLocked(OldPage* page);
528 void AddExecPageLocked(OldPage* page);
529 void RemovePageLocked(OldPage* page, OldPage* previous_page);
530 void RemoveLargePageLocked(OldPage* page, OldPage* previous_page);
531 void RemoveExecPageLocked(OldPage* page, OldPage* previous_page);
532
533 OldPage* AllocatePage(OldPage::PageType type, bool link = true);
534 OldPage* AllocateLargePage(intptr_t size, OldPage::PageType type);
535
536 void TruncateLargePage(OldPage* page, intptr_t new_object_size_in_bytes);
537 void FreePage(OldPage* page, OldPage* previous_page);
538 void FreeLargePage(OldPage* page, OldPage* previous_page);
539 void FreePages(OldPage* pages);
540
541 void CollectGarbageAtSafepoint(bool compact,
542 bool finalize,
543 int64_t pre_wait_for_sweepers,
544 int64_t pre_safe_point);
545 void SweepLarge();
546 void Sweep();
547 void ConcurrentSweep(IsolateGroup* isolate_group);
548 void Compact(Thread* thread);
549
550 static intptr_t LargePageSizeInWordsFor(intptr_t size);
551
552 bool CanIncreaseCapacityInWordsLocked(intptr_t increase_in_words) {
553 if (max_capacity_in_words_ == 0) {
554 // Unlimited.
555 return true;
556 }
557 intptr_t free_capacity_in_words =
558 (max_capacity_in_words_ - usage_.capacity_in_words);
559 return ((free_capacity_in_words > 0) &&
560 (increase_in_words <= free_capacity_in_words));
561 }
562
563 Heap* const heap_;
564
565 // One list for executable pages at freelists_[OldPage::kExecutable].
566 // FLAG_scavenger_tasks count of lists for data pages starting at
567 // freelists_[OldPage::kData]. The sweeper inserts into the data page
568 // freelists round-robin. The scavenger workers each use one of the data
569 // page freelists without locking.
570 const intptr_t num_freelists_;
571 FreeList* freelists_;
572
573 // Use ExclusivePageIterator for safe access to these.
574 mutable Mutex pages_lock_;
575 OldPage* pages_ = nullptr;
576 OldPage* pages_tail_ = nullptr;
577 OldPage* exec_pages_ = nullptr;
578 OldPage* exec_pages_tail_ = nullptr;
579 OldPage* large_pages_ = nullptr;
580 OldPage* large_pages_tail_ = nullptr;
581 OldPage* image_pages_ = nullptr;
582
583 // Various sizes being tracked for this generation.
584 intptr_t max_capacity_in_words_;
585
586 // NOTE: The capacity component of usage_ is updated by the concurrent
587 // sweeper. Use (Increase)CapacityInWords(Locked) for thread-safe access.
588 SpaceUsage usage_;
589 RelaxedAtomic<intptr_t> allocated_black_in_words_;
590
591 // Keep track of running MarkSweep tasks.
592 mutable Monitor tasks_lock_;
593 intptr_t tasks_;
594 intptr_t concurrent_marker_tasks_;
595 Phase phase_;
596
597#if defined(DEBUG)
598 Thread* iterating_thread_;
599#endif
600 PageSpaceController page_space_controller_;
601 GCMarker* marker_;
602
603 int64_t gc_time_micros_;
604 intptr_t collections_;
605 intptr_t mark_words_per_micro_;
606
607 bool enable_concurrent_mark_;
608
609 friend class BasePageIterator;
610 friend class ExclusivePageIterator;
611 friend class ExclusiveCodePageIterator;
612 friend class ExclusiveLargePageIterator;
613 friend class HeapIterationScope;
614 friend class HeapSnapshotWriter;
615 friend class PageSpaceController;
616 friend class ConcurrentSweeperTask;
617 friend class GCCompactor;
618 friend class CompactorTask;
619
620 DISALLOW_IMPLICIT_CONSTRUCTORS(PageSpace);
621};
622
623} // namespace dart
624
625#endif // RUNTIME_VM_HEAP_PAGES_H_
626