1 | // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/heap/pages.h" |
6 | |
7 | #include "platform/assert.h" |
8 | #include "platform/leak_sanitizer.h" |
9 | #include "vm/dart.h" |
10 | #include "vm/heap/become.h" |
11 | #include "vm/heap/compactor.h" |
12 | #include "vm/heap/marker.h" |
13 | #include "vm/heap/safepoint.h" |
14 | #include "vm/heap/sweeper.h" |
15 | #include "vm/lockers.h" |
16 | #include "vm/log.h" |
17 | #include "vm/object.h" |
18 | #include "vm/object_set.h" |
19 | #include "vm/os_thread.h" |
20 | #include "vm/virtual_memory.h" |
21 | |
22 | namespace dart { |
23 | |
24 | DEFINE_FLAG(int, |
25 | old_gen_growth_space_ratio, |
26 | 20, |
27 | "The desired maximum percentage of free space after old gen GC" ); |
28 | DEFINE_FLAG(int, |
29 | old_gen_growth_time_ratio, |
30 | 3, |
31 | "The desired maximum percentage of time spent in old gen GC" ); |
32 | DEFINE_FLAG(int, |
33 | old_gen_growth_rate, |
34 | 280, |
35 | "The max number of pages the old generation can grow at a time" ); |
36 | DEFINE_FLAG(bool, |
37 | print_free_list_before_gc, |
38 | false, |
39 | "Print free list statistics before a GC" ); |
40 | DEFINE_FLAG(bool, |
41 | print_free_list_after_gc, |
42 | false, |
43 | "Print free list statistics after a GC" ); |
44 | DEFINE_FLAG(bool, log_growth, false, "Log PageSpace growth policy decisions." ); |
45 | |
46 | OldPage* OldPage::Allocate(intptr_t size_in_words, |
47 | PageType type, |
48 | const char* name) { |
49 | const bool executable = type == kExecutable; |
50 | |
51 | VirtualMemory* memory = VirtualMemory::AllocateAligned( |
52 | size_in_words << kWordSizeLog2, kOldPageSize, executable, name); |
53 | if (memory == NULL) { |
54 | return NULL; |
55 | } |
56 | |
57 | OldPage* result = reinterpret_cast<OldPage*>(memory->address()); |
58 | ASSERT(result != NULL); |
59 | result->memory_ = memory; |
60 | result->next_ = NULL; |
61 | result->used_in_bytes_ = 0; |
62 | result->forwarding_page_ = NULL; |
63 | result->card_table_ = NULL; |
64 | result->type_ = type; |
65 | |
66 | LSAN_REGISTER_ROOT_REGION(result, sizeof(*result)); |
67 | |
68 | return result; |
69 | } |
70 | |
71 | void OldPage::Deallocate() { |
72 | if (card_table_ != NULL) { |
73 | free(card_table_); |
74 | card_table_ = NULL; |
75 | } |
76 | |
77 | bool image_page = is_image_page(); |
78 | |
79 | if (!image_page) { |
80 | LSAN_UNREGISTER_ROOT_REGION(this, sizeof(*this)); |
81 | } |
82 | |
83 | // For a regular heap pages, the memory for this object will become |
84 | // unavailable after the delete below. |
85 | delete memory_; |
86 | |
87 | // For a heap page from a snapshot, the OldPage object lives in the malloc |
88 | // heap rather than the page itself. |
89 | if (image_page) { |
90 | free(this); |
91 | } |
92 | } |
93 | |
94 | void OldPage::VisitObjects(ObjectVisitor* visitor) const { |
95 | ASSERT(Thread::Current()->IsAtSafepoint()); |
96 | NoSafepointScope no_safepoint; |
97 | uword obj_addr = object_start(); |
98 | uword end_addr = object_end(); |
99 | while (obj_addr < end_addr) { |
100 | ObjectPtr raw_obj = ObjectLayout::FromAddr(obj_addr); |
101 | visitor->VisitObject(raw_obj); |
102 | obj_addr += raw_obj->ptr()->HeapSize(); |
103 | } |
104 | ASSERT(obj_addr == end_addr); |
105 | } |
106 | |
107 | void OldPage::VisitObjectPointers(ObjectPointerVisitor* visitor) const { |
108 | ASSERT(Thread::Current()->IsAtSafepoint() || |
109 | (Thread::Current()->task_kind() == Thread::kCompactorTask)); |
110 | NoSafepointScope no_safepoint; |
111 | uword obj_addr = object_start(); |
112 | uword end_addr = object_end(); |
113 | while (obj_addr < end_addr) { |
114 | ObjectPtr raw_obj = ObjectLayout::FromAddr(obj_addr); |
115 | obj_addr += raw_obj->ptr()->VisitPointers(visitor); |
116 | } |
117 | ASSERT(obj_addr == end_addr); |
118 | } |
119 | |
120 | void OldPage::VisitRememberedCards(ObjectPointerVisitor* visitor) { |
121 | ASSERT(Thread::Current()->IsAtSafepoint() || |
122 | (Thread::Current()->task_kind() == Thread::kScavengerTask)); |
123 | NoSafepointScope no_safepoint; |
124 | |
125 | if (card_table_ == NULL) { |
126 | return; |
127 | } |
128 | |
129 | bool table_is_empty = false; |
130 | |
131 | ArrayPtr obj = static_cast<ArrayPtr>(ObjectLayout::FromAddr(object_start())); |
132 | ASSERT(obj->IsArray()); |
133 | ASSERT(obj->ptr()->IsCardRemembered()); |
134 | ObjectPtr* obj_from = obj->ptr()->from(); |
135 | ObjectPtr* obj_to = obj->ptr()->to(Smi::Value(obj->ptr()->length_)); |
136 | |
137 | const intptr_t size = card_table_size(); |
138 | for (intptr_t i = 0; i < size; i++) { |
139 | if (card_table_[i] != 0) { |
140 | ObjectPtr* card_from = |
141 | reinterpret_cast<ObjectPtr*>(this) + (i << kSlotsPerCardLog2); |
142 | ObjectPtr* card_to = reinterpret_cast<ObjectPtr*>(card_from) + |
143 | (1 << kSlotsPerCardLog2) - 1; |
144 | // Minus 1 because to is inclusive. |
145 | |
146 | if (card_from < obj_from) { |
147 | // First card overlaps with header. |
148 | card_from = obj_from; |
149 | } |
150 | if (card_to > obj_to) { |
151 | // Last card(s) may extend past the object. Array truncation can make |
152 | // this happen for more than one card. |
153 | card_to = obj_to; |
154 | } |
155 | |
156 | visitor->VisitPointers(card_from, card_to); |
157 | |
158 | bool has_new_target = false; |
159 | for (ObjectPtr* slot = card_from; slot <= card_to; slot++) { |
160 | if ((*slot)->IsNewObjectMayBeSmi()) { |
161 | has_new_target = true; |
162 | break; |
163 | } |
164 | } |
165 | |
166 | if (has_new_target) { |
167 | // Card remains remembered. |
168 | table_is_empty = false; |
169 | } else { |
170 | card_table_[i] = 0; |
171 | } |
172 | } |
173 | } |
174 | |
175 | if (table_is_empty) { |
176 | free(card_table_); |
177 | card_table_ = NULL; |
178 | } |
179 | } |
180 | |
181 | ObjectPtr OldPage::FindObject(FindObjectVisitor* visitor) const { |
182 | uword obj_addr = object_start(); |
183 | uword end_addr = object_end(); |
184 | if (visitor->VisitRange(obj_addr, end_addr)) { |
185 | while (obj_addr < end_addr) { |
186 | ObjectPtr raw_obj = ObjectLayout::FromAddr(obj_addr); |
187 | uword next_obj_addr = obj_addr + raw_obj->ptr()->HeapSize(); |
188 | if (visitor->VisitRange(obj_addr, next_obj_addr) && |
189 | raw_obj->ptr()->FindObject(visitor)) { |
190 | return raw_obj; // Found object, return it. |
191 | } |
192 | obj_addr = next_obj_addr; |
193 | } |
194 | ASSERT(obj_addr == end_addr); |
195 | } |
196 | return Object::null(); |
197 | } |
198 | |
199 | void OldPage::WriteProtect(bool read_only) { |
200 | ASSERT(!is_image_page()); |
201 | |
202 | VirtualMemory::Protection prot; |
203 | if (read_only) { |
204 | if ((type_ == kExecutable) && (memory_->AliasOffset() == 0)) { |
205 | prot = VirtualMemory::kReadExecute; |
206 | } else { |
207 | prot = VirtualMemory::kReadOnly; |
208 | } |
209 | } else { |
210 | prot = VirtualMemory::kReadWrite; |
211 | } |
212 | memory_->Protect(prot); |
213 | } |
214 | |
215 | // The initial estimate of how many words we can mark per microsecond (usage |
216 | // before / mark-sweep time). This is a conservative value observed running |
217 | // Flutter on a Nexus 4. After the first mark-sweep, we instead use a value |
218 | // based on the device's actual speed. |
219 | static const intptr_t kConservativeInitialMarkSpeed = 20; |
220 | |
221 | PageSpace::PageSpace(Heap* heap, intptr_t max_capacity_in_words) |
222 | : heap_(heap), |
223 | num_freelists_(Utils::Maximum(FLAG_scavenger_tasks, 1) + 1), |
224 | freelists_(new FreeList[num_freelists_]), |
225 | pages_lock_(), |
226 | max_capacity_in_words_(max_capacity_in_words), |
227 | usage_(), |
228 | allocated_black_in_words_(0), |
229 | tasks_lock_(), |
230 | tasks_(0), |
231 | concurrent_marker_tasks_(0), |
232 | phase_(kDone), |
233 | #if defined(DEBUG) |
234 | iterating_thread_(NULL), |
235 | #endif |
236 | page_space_controller_(heap, |
237 | FLAG_old_gen_growth_space_ratio, |
238 | FLAG_old_gen_growth_rate, |
239 | FLAG_old_gen_growth_time_ratio), |
240 | marker_(NULL), |
241 | gc_time_micros_(0), |
242 | collections_(0), |
243 | mark_words_per_micro_(kConservativeInitialMarkSpeed), |
244 | enable_concurrent_mark_(FLAG_concurrent_mark) { |
245 | // We aren't holding the lock but no one can reference us yet. |
246 | UpdateMaxCapacityLocked(); |
247 | UpdateMaxUsed(); |
248 | |
249 | for (intptr_t i = 0; i < num_freelists_; i++) { |
250 | freelists_[i].Reset(); |
251 | } |
252 | } |
253 | |
254 | PageSpace::~PageSpace() { |
255 | { |
256 | MonitorLocker ml(tasks_lock()); |
257 | while (tasks() > 0) { |
258 | ml.Wait(); |
259 | } |
260 | } |
261 | FreePages(pages_); |
262 | FreePages(exec_pages_); |
263 | FreePages(large_pages_); |
264 | FreePages(image_pages_); |
265 | ASSERT(marker_ == NULL); |
266 | delete[] freelists_; |
267 | } |
268 | |
269 | intptr_t PageSpace::LargePageSizeInWordsFor(intptr_t size) { |
270 | intptr_t page_size = Utils::RoundUp(size + OldPage::ObjectStartOffset(), |
271 | VirtualMemory::PageSize()); |
272 | return page_size >> kWordSizeLog2; |
273 | } |
274 | |
275 | void PageSpace::AddPageLocked(OldPage* page) { |
276 | if (pages_ == nullptr) { |
277 | pages_ = page; |
278 | } else { |
279 | pages_tail_->set_next(page); |
280 | } |
281 | pages_tail_ = page; |
282 | } |
283 | |
284 | void PageSpace::AddLargePageLocked(OldPage* page) { |
285 | if (large_pages_ == nullptr) { |
286 | large_pages_ = page; |
287 | } else { |
288 | large_pages_tail_->set_next(page); |
289 | } |
290 | large_pages_tail_ = page; |
291 | } |
292 | |
293 | void PageSpace::AddExecPageLocked(OldPage* page) { |
294 | if (exec_pages_ == nullptr) { |
295 | exec_pages_ = page; |
296 | } else { |
297 | if (FLAG_write_protect_code) { |
298 | exec_pages_tail_->WriteProtect(false); |
299 | } |
300 | exec_pages_tail_->set_next(page); |
301 | if (FLAG_write_protect_code) { |
302 | exec_pages_tail_->WriteProtect(true); |
303 | } |
304 | } |
305 | exec_pages_tail_ = page; |
306 | } |
307 | |
308 | void PageSpace::RemovePageLocked(OldPage* page, OldPage* previous_page) { |
309 | if (previous_page != NULL) { |
310 | previous_page->set_next(page->next()); |
311 | } else { |
312 | pages_ = page->next(); |
313 | } |
314 | if (page == pages_tail_) { |
315 | pages_tail_ = previous_page; |
316 | } |
317 | } |
318 | |
319 | void PageSpace::RemoveLargePageLocked(OldPage* page, OldPage* previous_page) { |
320 | if (previous_page != NULL) { |
321 | previous_page->set_next(page->next()); |
322 | } else { |
323 | large_pages_ = page->next(); |
324 | } |
325 | if (page == large_pages_tail_) { |
326 | large_pages_tail_ = previous_page; |
327 | } |
328 | } |
329 | |
330 | void PageSpace::RemoveExecPageLocked(OldPage* page, OldPage* previous_page) { |
331 | if (previous_page != NULL) { |
332 | previous_page->set_next(page->next()); |
333 | } else { |
334 | exec_pages_ = page->next(); |
335 | } |
336 | if (page == exec_pages_tail_) { |
337 | exec_pages_tail_ = previous_page; |
338 | } |
339 | } |
340 | |
341 | OldPage* PageSpace::AllocatePage(OldPage::PageType type, bool link) { |
342 | { |
343 | MutexLocker ml(&pages_lock_); |
344 | if (!CanIncreaseCapacityInWordsLocked(kOldPageSizeInWords)) { |
345 | return nullptr; |
346 | } |
347 | IncreaseCapacityInWordsLocked(kOldPageSizeInWords); |
348 | } |
349 | const bool is_exec = (type == OldPage::kExecutable); |
350 | const char* name = Heap::RegionName(is_exec ? Heap::kCode : Heap::kOld); |
351 | OldPage* page = OldPage::Allocate(kOldPageSizeInWords, type, name); |
352 | if (page == nullptr) { |
353 | RELEASE_ASSERT(!FLAG_abort_on_oom); |
354 | IncreaseCapacityInWords(-kOldPageSizeInWords); |
355 | return nullptr; |
356 | } |
357 | |
358 | MutexLocker ml(&pages_lock_); |
359 | if (link) { |
360 | if (is_exec) { |
361 | AddExecPageLocked(page); |
362 | } else { |
363 | AddPageLocked(page); |
364 | } |
365 | } |
366 | |
367 | page->set_object_end(page->memory_->end()); |
368 | if ((type != OldPage::kExecutable) && (heap_ != nullptr) && |
369 | (heap_->isolate_group() != Dart::vm_isolate()->group())) { |
370 | page->AllocateForwardingPage(); |
371 | } |
372 | return page; |
373 | } |
374 | |
375 | OldPage* PageSpace::AllocateLargePage(intptr_t size, OldPage::PageType type) { |
376 | const intptr_t page_size_in_words = LargePageSizeInWordsFor(size); |
377 | { |
378 | MutexLocker ml(&pages_lock_); |
379 | if (!CanIncreaseCapacityInWordsLocked(page_size_in_words)) { |
380 | return nullptr; |
381 | } |
382 | IncreaseCapacityInWordsLocked(page_size_in_words); |
383 | } |
384 | const bool is_exec = (type == OldPage::kExecutable); |
385 | const char* name = Heap::RegionName(is_exec ? Heap::kCode : Heap::kOld); |
386 | OldPage* page = OldPage::Allocate(page_size_in_words, type, name); |
387 | |
388 | MutexLocker ml(&pages_lock_); |
389 | if (page == nullptr) { |
390 | IncreaseCapacityInWordsLocked(-page_size_in_words); |
391 | return nullptr; |
392 | } |
393 | if (is_exec) { |
394 | AddExecPageLocked(page); |
395 | } else { |
396 | AddLargePageLocked(page); |
397 | } |
398 | |
399 | // Only one object in this page (at least until Array::MakeFixedLength |
400 | // is called). |
401 | page->set_object_end(page->object_start() + size); |
402 | return page; |
403 | } |
404 | |
405 | void PageSpace::TruncateLargePage(OldPage* page, |
406 | intptr_t new_object_size_in_bytes) { |
407 | const intptr_t old_object_size_in_bytes = |
408 | page->object_end() - page->object_start(); |
409 | ASSERT(new_object_size_in_bytes <= old_object_size_in_bytes); |
410 | const intptr_t new_page_size_in_words = |
411 | LargePageSizeInWordsFor(new_object_size_in_bytes); |
412 | VirtualMemory* memory = page->memory_; |
413 | const intptr_t old_page_size_in_words = (memory->size() >> kWordSizeLog2); |
414 | if (new_page_size_in_words < old_page_size_in_words) { |
415 | memory->Truncate(new_page_size_in_words << kWordSizeLog2); |
416 | IncreaseCapacityInWords(new_page_size_in_words - old_page_size_in_words); |
417 | page->set_object_end(page->object_start() + new_object_size_in_bytes); |
418 | } |
419 | } |
420 | |
421 | void PageSpace::FreePage(OldPage* page, OldPage* previous_page) { |
422 | bool is_exec = (page->type() == OldPage::kExecutable); |
423 | { |
424 | MutexLocker ml(&pages_lock_); |
425 | IncreaseCapacityInWordsLocked(-(page->memory_->size() >> kWordSizeLog2)); |
426 | if (is_exec) { |
427 | RemoveExecPageLocked(page, previous_page); |
428 | } else { |
429 | RemovePageLocked(page, previous_page); |
430 | } |
431 | } |
432 | // TODO(iposva): Consider adding to a pool of empty pages. |
433 | page->Deallocate(); |
434 | } |
435 | |
436 | void PageSpace::FreeLargePage(OldPage* page, OldPage* previous_page) { |
437 | ASSERT(page->type() != OldPage::kExecutable); |
438 | MutexLocker ml(&pages_lock_); |
439 | IncreaseCapacityInWordsLocked(-(page->memory_->size() >> kWordSizeLog2)); |
440 | RemoveLargePageLocked(page, previous_page); |
441 | page->Deallocate(); |
442 | } |
443 | |
444 | void PageSpace::FreePages(OldPage* pages) { |
445 | OldPage* page = pages; |
446 | while (page != NULL) { |
447 | OldPage* next = page->next(); |
448 | page->Deallocate(); |
449 | page = next; |
450 | } |
451 | } |
452 | |
453 | void PageSpace::EvaluateConcurrentMarking(GrowthPolicy growth_policy) { |
454 | if (growth_policy != kForceGrowth) { |
455 | if (heap_ != NULL) { // Some unit tests. |
456 | Thread* thread = Thread::Current(); |
457 | if (thread->CanCollectGarbage()) { |
458 | heap_->CheckFinishConcurrentMarking(thread); |
459 | heap_->CheckStartConcurrentMarking(thread, Heap::kOldSpace); |
460 | } |
461 | } |
462 | } |
463 | } |
464 | |
465 | uword PageSpace::TryAllocateInFreshPage(intptr_t size, |
466 | FreeList* freelist, |
467 | OldPage::PageType type, |
468 | GrowthPolicy growth_policy, |
469 | bool is_locked) { |
470 | ASSERT(Heap::IsAllocatableViaFreeLists(size)); |
471 | |
472 | EvaluateConcurrentMarking(growth_policy); |
473 | |
474 | uword result = 0; |
475 | SpaceUsage after_allocation = GetCurrentUsage(); |
476 | after_allocation.used_in_words += size >> kWordSizeLog2; |
477 | // Can we grow by one page? |
478 | after_allocation.capacity_in_words += kOldPageSizeInWords; |
479 | if (growth_policy == kForceGrowth || |
480 | !page_space_controller_.ReachedHardThreshold(after_allocation)) { |
481 | OldPage* page = AllocatePage(type); |
482 | if (page == NULL) { |
483 | return 0; |
484 | } |
485 | // Start of the newly allocated page is the allocated object. |
486 | result = page->object_start(); |
487 | // Note: usage_.capacity_in_words is increased by AllocatePage. |
488 | usage_.used_in_words += (size >> kWordSizeLog2); |
489 | // Enqueue the remainder in the free list. |
490 | uword free_start = result + size; |
491 | intptr_t free_size = page->object_end() - free_start; |
492 | if (free_size > 0) { |
493 | if (is_locked) { |
494 | freelist->FreeLocked(free_start, free_size); |
495 | } else { |
496 | freelist->Free(free_start, free_size); |
497 | } |
498 | } |
499 | } |
500 | return result; |
501 | } |
502 | |
503 | uword PageSpace::TryAllocateInFreshLargePage(intptr_t size, |
504 | OldPage::PageType type, |
505 | GrowthPolicy growth_policy) { |
506 | ASSERT(!Heap::IsAllocatableViaFreeLists(size)); |
507 | |
508 | EvaluateConcurrentMarking(growth_policy); |
509 | |
510 | intptr_t page_size_in_words = LargePageSizeInWordsFor(size); |
511 | if ((page_size_in_words << kWordSizeLog2) < size) { |
512 | // On overflow we fail to allocate. |
513 | return 0; |
514 | } |
515 | |
516 | uword result = 0; |
517 | SpaceUsage after_allocation = GetCurrentUsage(); |
518 | after_allocation.used_in_words += size >> kWordSizeLog2; |
519 | after_allocation.capacity_in_words += page_size_in_words; |
520 | if (growth_policy == kForceGrowth || |
521 | !page_space_controller_.ReachedHardThreshold(after_allocation)) { |
522 | OldPage* page = AllocateLargePage(size, type); |
523 | if (page != NULL) { |
524 | result = page->object_start(); |
525 | // Note: usage_.capacity_in_words is increased by AllocateLargePage. |
526 | usage_.used_in_words += (size >> kWordSizeLog2); |
527 | } |
528 | } |
529 | return result; |
530 | } |
531 | |
532 | uword PageSpace::TryAllocateInternal(intptr_t size, |
533 | FreeList* freelist, |
534 | OldPage::PageType type, |
535 | GrowthPolicy growth_policy, |
536 | bool is_protected, |
537 | bool is_locked) { |
538 | ASSERT(size >= kObjectAlignment); |
539 | ASSERT(Utils::IsAligned(size, kObjectAlignment)); |
540 | uword result = 0; |
541 | if (Heap::IsAllocatableViaFreeLists(size)) { |
542 | if (is_locked) { |
543 | result = freelist->TryAllocateLocked(size, is_protected); |
544 | } else { |
545 | result = freelist->TryAllocate(size, is_protected); |
546 | } |
547 | if (result == 0) { |
548 | result = TryAllocateInFreshPage(size, freelist, type, growth_policy, |
549 | is_locked); |
550 | // usage_ is updated by the call above. |
551 | } else { |
552 | usage_.used_in_words += (size >> kWordSizeLog2); |
553 | } |
554 | } else { |
555 | result = TryAllocateInFreshLargePage(size, type, growth_policy); |
556 | // usage_ is updated by the call above. |
557 | } |
558 | ASSERT((result & kObjectAlignmentMask) == kOldObjectAlignmentOffset); |
559 | return result; |
560 | } |
561 | |
562 | void PageSpace::AcquireLock(FreeList* freelist) { |
563 | freelist->mutex()->Lock(); |
564 | } |
565 | |
566 | void PageSpace::ReleaseLock(FreeList* freelist) { |
567 | intptr_t size = freelist->TakeUnaccountedSizeLocked(); |
568 | usage_.used_in_words += (size >> kWordSizeLog2); |
569 | freelist->mutex()->Unlock(); |
570 | } |
571 | |
572 | class BasePageIterator : ValueObject { |
573 | public: |
574 | explicit BasePageIterator(const PageSpace* space) : space_(space) {} |
575 | |
576 | OldPage* page() const { return page_; } |
577 | |
578 | bool Done() const { return page_ == NULL; } |
579 | |
580 | void Advance() { |
581 | ASSERT(!Done()); |
582 | page_ = page_->next(); |
583 | if ((page_ == NULL) && (list_ == kRegular)) { |
584 | list_ = kExecutable; |
585 | page_ = space_->exec_pages_; |
586 | } |
587 | if ((page_ == NULL) && (list_ == kExecutable)) { |
588 | list_ = kLarge; |
589 | page_ = space_->large_pages_; |
590 | } |
591 | if ((page_ == NULL) && (list_ == kLarge)) { |
592 | list_ = kImage; |
593 | page_ = space_->image_pages_; |
594 | } |
595 | ASSERT((page_ != NULL) || (list_ == kImage)); |
596 | } |
597 | |
598 | protected: |
599 | enum List { kRegular, kExecutable, kLarge, kImage }; |
600 | |
601 | void Initialize() { |
602 | list_ = kRegular; |
603 | page_ = space_->pages_; |
604 | if (page_ == NULL) { |
605 | list_ = kExecutable; |
606 | page_ = space_->exec_pages_; |
607 | if (page_ == NULL) { |
608 | list_ = kLarge; |
609 | page_ = space_->large_pages_; |
610 | if (page_ == NULL) { |
611 | list_ = kImage; |
612 | page_ = space_->image_pages_; |
613 | } |
614 | } |
615 | } |
616 | } |
617 | |
618 | const PageSpace* space_ = nullptr; |
619 | List list_; |
620 | OldPage* page_ = nullptr; |
621 | }; |
622 | |
623 | // Provides unsafe access to all pages. Assumes pages are walkable. |
624 | class UnsafeExclusivePageIterator : public BasePageIterator { |
625 | public: |
626 | explicit UnsafeExclusivePageIterator(const PageSpace* space) |
627 | : BasePageIterator(space) { |
628 | Initialize(); |
629 | } |
630 | }; |
631 | |
632 | // Provides exclusive access to all pages, and ensures they are walkable. |
633 | class ExclusivePageIterator : public BasePageIterator { |
634 | public: |
635 | explicit ExclusivePageIterator(const PageSpace* space) |
636 | : BasePageIterator(space), ml_(&space->pages_lock_) { |
637 | space_->MakeIterable(); |
638 | Initialize(); |
639 | } |
640 | |
641 | private: |
642 | MutexLocker ml_; |
643 | NoSafepointScope no_safepoint; |
644 | }; |
645 | |
646 | // Provides exclusive access to code pages, and ensures they are walkable. |
647 | // NOTE: This does not iterate over large pages which can contain code. |
648 | class ExclusiveCodePageIterator : ValueObject { |
649 | public: |
650 | explicit ExclusiveCodePageIterator(const PageSpace* space) |
651 | : space_(space), ml_(&space->pages_lock_) { |
652 | space_->MakeIterable(); |
653 | page_ = space_->exec_pages_; |
654 | } |
655 | OldPage* page() const { return page_; } |
656 | bool Done() const { return page_ == NULL; } |
657 | void Advance() { |
658 | ASSERT(!Done()); |
659 | page_ = page_->next(); |
660 | } |
661 | |
662 | private: |
663 | const PageSpace* space_; |
664 | MutexLocker ml_; |
665 | NoSafepointScope no_safepoint; |
666 | OldPage* page_; |
667 | }; |
668 | |
669 | void PageSpace::MakeIterable() const { |
670 | // Assert not called from concurrent sweeper task. |
671 | // TODO(koda): Use thread/task identity when implemented. |
672 | ASSERT(IsolateGroup::Current()->heap() != NULL); |
673 | for (intptr_t i = 0; i < num_freelists_; i++) { |
674 | freelists_[i].MakeIterable(); |
675 | } |
676 | } |
677 | |
678 | void PageSpace::AbandonBumpAllocation() { |
679 | for (intptr_t i = 0; i < num_freelists_; i++) { |
680 | freelists_[i].AbandonBumpAllocation(); |
681 | } |
682 | } |
683 | |
684 | void PageSpace::AbandonMarkingForShutdown() { |
685 | delete marker_; |
686 | marker_ = NULL; |
687 | } |
688 | |
689 | void PageSpace::UpdateMaxCapacityLocked() { |
690 | if (heap_ == NULL) { |
691 | // Some unit tests. |
692 | return; |
693 | } |
694 | ASSERT(heap_ != NULL); |
695 | ASSERT(heap_->isolate_group() != NULL); |
696 | auto isolate_group = heap_->isolate_group(); |
697 | isolate_group->GetHeapOldCapacityMaxMetric()->SetValue( |
698 | static_cast<int64_t>(usage_.capacity_in_words) * kWordSize); |
699 | } |
700 | |
701 | void PageSpace::UpdateMaxUsed() { |
702 | if (heap_ == NULL) { |
703 | // Some unit tests. |
704 | return; |
705 | } |
706 | ASSERT(heap_ != NULL); |
707 | ASSERT(heap_->isolate_group() != NULL); |
708 | auto isolate_group = heap_->isolate_group(); |
709 | isolate_group->GetHeapOldUsedMaxMetric()->SetValue(UsedInWords() * kWordSize); |
710 | } |
711 | |
712 | bool PageSpace::Contains(uword addr) const { |
713 | for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) { |
714 | if (it.page()->Contains(addr)) { |
715 | return true; |
716 | } |
717 | } |
718 | return false; |
719 | } |
720 | |
721 | bool PageSpace::ContainsUnsafe(uword addr) const { |
722 | for (UnsafeExclusivePageIterator it(this); !it.Done(); it.Advance()) { |
723 | if (it.page()->Contains(addr)) { |
724 | return true; |
725 | } |
726 | } |
727 | return false; |
728 | } |
729 | |
730 | bool PageSpace::Contains(uword addr, OldPage::PageType type) const { |
731 | if (type == OldPage::kExecutable) { |
732 | // Fast path executable pages. |
733 | for (ExclusiveCodePageIterator it(this); !it.Done(); it.Advance()) { |
734 | if (it.page()->Contains(addr)) { |
735 | return true; |
736 | } |
737 | } |
738 | return false; |
739 | } |
740 | for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) { |
741 | if ((it.page()->type() == type) && it.page()->Contains(addr)) { |
742 | return true; |
743 | } |
744 | } |
745 | return false; |
746 | } |
747 | |
748 | bool PageSpace::DataContains(uword addr) const { |
749 | for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) { |
750 | if ((it.page()->type() != OldPage::kExecutable) && |
751 | it.page()->Contains(addr)) { |
752 | return true; |
753 | } |
754 | } |
755 | return false; |
756 | } |
757 | |
758 | void PageSpace::AddRegionsToObjectSet(ObjectSet* set) const { |
759 | ASSERT((pages_ != NULL) || (exec_pages_ != NULL) || (large_pages_ != NULL)); |
760 | for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) { |
761 | set->AddRegion(it.page()->object_start(), it.page()->object_end()); |
762 | } |
763 | } |
764 | |
765 | void PageSpace::VisitObjects(ObjectVisitor* visitor) const { |
766 | for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) { |
767 | it.page()->VisitObjects(visitor); |
768 | } |
769 | } |
770 | |
771 | void PageSpace::VisitObjectsNoImagePages(ObjectVisitor* visitor) const { |
772 | for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) { |
773 | if (!it.page()->is_image_page()) { |
774 | it.page()->VisitObjects(visitor); |
775 | } |
776 | } |
777 | } |
778 | |
779 | void PageSpace::VisitObjectsImagePages(ObjectVisitor* visitor) const { |
780 | for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) { |
781 | if (it.page()->is_image_page()) { |
782 | it.page()->VisitObjects(visitor); |
783 | } |
784 | } |
785 | } |
786 | |
787 | void PageSpace::VisitObjectPointers(ObjectPointerVisitor* visitor) const { |
788 | for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) { |
789 | it.page()->VisitObjectPointers(visitor); |
790 | } |
791 | } |
792 | |
793 | void PageSpace::VisitRememberedCards(ObjectPointerVisitor* visitor) const { |
794 | ASSERT(Thread::Current()->IsAtSafepoint() || |
795 | (Thread::Current()->task_kind() == Thread::kScavengerTask)); |
796 | |
797 | // Wait for the sweeper to finish mutating the large page list. |
798 | MonitorLocker ml(tasks_lock()); |
799 | while (phase() == kSweepingLarge) { |
800 | ml.Wait(); // No safepoint check. |
801 | } |
802 | |
803 | // Large pages may be added concurrently due to promotion in another scavenge |
804 | // worker, so terminate the traversal when we hit the tail we saw while |
805 | // holding the pages lock, instead of at NULL, otherwise we are racing when we |
806 | // read OldPage::next_ and OldPage::remembered_cards_. |
807 | OldPage* page; |
808 | OldPage* tail; |
809 | { |
810 | MutexLocker ml(&pages_lock_); |
811 | page = large_pages_; |
812 | tail = large_pages_tail_; |
813 | } |
814 | while (page != nullptr) { |
815 | page->VisitRememberedCards(visitor); |
816 | if (page == tail) break; |
817 | page = page->next(); |
818 | } |
819 | } |
820 | |
821 | ObjectPtr PageSpace::FindObject(FindObjectVisitor* visitor, |
822 | OldPage::PageType type) const { |
823 | if (type == OldPage::kExecutable) { |
824 | // Fast path executable pages. |
825 | for (ExclusiveCodePageIterator it(this); !it.Done(); it.Advance()) { |
826 | ObjectPtr obj = it.page()->FindObject(visitor); |
827 | if (obj != Object::null()) { |
828 | return obj; |
829 | } |
830 | } |
831 | return Object::null(); |
832 | } |
833 | |
834 | for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) { |
835 | if (it.page()->type() == type) { |
836 | ObjectPtr obj = it.page()->FindObject(visitor); |
837 | if (obj != Object::null()) { |
838 | return obj; |
839 | } |
840 | } |
841 | } |
842 | return Object::null(); |
843 | } |
844 | |
845 | void PageSpace::WriteProtect(bool read_only) { |
846 | if (read_only) { |
847 | // Avoid MakeIterable trying to write to the heap. |
848 | AbandonBumpAllocation(); |
849 | } |
850 | for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) { |
851 | if (!it.page()->is_image_page()) { |
852 | it.page()->WriteProtect(read_only); |
853 | } |
854 | } |
855 | } |
856 | |
857 | #ifndef PRODUCT |
858 | void PageSpace::PrintToJSONObject(JSONObject* object) const { |
859 | auto isolate_group = IsolateGroup::Current(); |
860 | ASSERT(isolate_group != nullptr); |
861 | JSONObject space(object, "old" ); |
862 | space.AddProperty("type" , "HeapSpace" ); |
863 | space.AddProperty("name" , "old" ); |
864 | space.AddProperty("vmName" , "PageSpace" ); |
865 | space.AddProperty("collections" , collections()); |
866 | space.AddProperty64("used" , UsedInWords() * kWordSize); |
867 | space.AddProperty64("capacity" , CapacityInWords() * kWordSize); |
868 | space.AddProperty64("external" , ExternalInWords() * kWordSize); |
869 | space.AddProperty("time" , MicrosecondsToSeconds(gc_time_micros())); |
870 | if (collections() > 0) { |
871 | int64_t run_time = isolate_group->UptimeMicros(); |
872 | run_time = Utils::Maximum(run_time, static_cast<int64_t>(0)); |
873 | double run_time_millis = MicrosecondsToMilliseconds(run_time); |
874 | double avg_time_between_collections = |
875 | run_time_millis / static_cast<double>(collections()); |
876 | space.AddProperty("avgCollectionPeriodMillis" , |
877 | avg_time_between_collections); |
878 | } else { |
879 | space.AddProperty("avgCollectionPeriodMillis" , 0.0); |
880 | } |
881 | } |
882 | |
883 | class HeapMapAsJSONVisitor : public ObjectVisitor { |
884 | public: |
885 | explicit HeapMapAsJSONVisitor(JSONArray* array) : array_(array) {} |
886 | virtual void VisitObject(ObjectPtr obj) { |
887 | array_->AddValue(obj->ptr()->HeapSize() / kObjectAlignment); |
888 | array_->AddValue(obj->GetClassId()); |
889 | } |
890 | |
891 | private: |
892 | JSONArray* array_; |
893 | }; |
894 | |
895 | void PageSpace::PrintHeapMapToJSONStream(Isolate* isolate, |
896 | JSONStream* stream) const { |
897 | JSONObject heap_map(stream); |
898 | heap_map.AddProperty("type" , "HeapMap" ); |
899 | heap_map.AddProperty("freeClassId" , static_cast<intptr_t>(kFreeListElement)); |
900 | heap_map.AddProperty("unitSizeBytes" , |
901 | static_cast<intptr_t>(kObjectAlignment)); |
902 | heap_map.AddProperty("pageSizeBytes" , kOldPageSizeInWords * kWordSize); |
903 | { |
904 | JSONObject class_list(&heap_map, "classList" ); |
905 | isolate->class_table()->PrintToJSONObject(&class_list); |
906 | } |
907 | { |
908 | // "pages" is an array [page0, page1, ..., pageN], each page of the form |
909 | // {"object_start": "0x...", "objects": [size, class id, size, ...]} |
910 | // TODO(19445): Use ExclusivePageIterator once HeapMap supports large pages. |
911 | HeapIterationScope iteration(Thread::Current()); |
912 | MutexLocker ml(&pages_lock_); |
913 | MakeIterable(); |
914 | JSONArray all_pages(&heap_map, "pages" ); |
915 | for (OldPage* page = pages_; page != NULL; page = page->next()) { |
916 | JSONObject page_container(&all_pages); |
917 | page_container.AddPropertyF("objectStart" , "0x%" Px "" , |
918 | page->object_start()); |
919 | JSONArray page_map(&page_container, "objects" ); |
920 | HeapMapAsJSONVisitor printer(&page_map); |
921 | page->VisitObjects(&printer); |
922 | } |
923 | for (OldPage* page = exec_pages_; page != NULL; page = page->next()) { |
924 | JSONObject page_container(&all_pages); |
925 | page_container.AddPropertyF("objectStart" , "0x%" Px "" , |
926 | page->object_start()); |
927 | JSONArray page_map(&page_container, "objects" ); |
928 | HeapMapAsJSONVisitor printer(&page_map); |
929 | page->VisitObjects(&printer); |
930 | } |
931 | } |
932 | } |
933 | #endif // PRODUCT |
934 | |
935 | void PageSpace::WriteProtectCode(bool read_only) { |
936 | if (FLAG_write_protect_code) { |
937 | MutexLocker ml(&pages_lock_); |
938 | NoSafepointScope no_safepoint; |
939 | // No need to go through all of the data pages first. |
940 | OldPage* page = exec_pages_; |
941 | while (page != NULL) { |
942 | ASSERT(page->type() == OldPage::kExecutable); |
943 | page->WriteProtect(read_only); |
944 | page = page->next(); |
945 | } |
946 | page = large_pages_; |
947 | while (page != NULL) { |
948 | if (page->type() == OldPage::kExecutable) { |
949 | page->WriteProtect(read_only); |
950 | } |
951 | page = page->next(); |
952 | } |
953 | } |
954 | } |
955 | |
956 | bool PageSpace::ShouldStartIdleMarkSweep(int64_t deadline) { |
957 | // To make a consistent decision, we should not yield for a safepoint in the |
958 | // middle of deciding whether to perform an idle GC. |
959 | NoSafepointScope no_safepoint; |
960 | |
961 | if (!page_space_controller_.ReachedIdleThreshold(usage_)) { |
962 | return false; |
963 | } |
964 | |
965 | { |
966 | MonitorLocker locker(tasks_lock()); |
967 | if (tasks() > 0) { |
968 | // A concurrent sweeper is running. If we start a mark sweep now |
969 | // we'll have to wait for it, and this wait time is not included in |
970 | // mark_words_per_micro_. |
971 | return false; |
972 | } |
973 | } |
974 | |
975 | // This uses the size of new-space because the pause time to start concurrent |
976 | // marking is related to the size of the root set, which is mostly new-space. |
977 | int64_t estimated_mark_completion = |
978 | OS::GetCurrentMonotonicMicros() + |
979 | heap_->new_space()->UsedInWords() / mark_words_per_micro_; |
980 | return estimated_mark_completion <= deadline; |
981 | } |
982 | |
983 | bool PageSpace::ShouldPerformIdleMarkCompact(int64_t deadline) { |
984 | // To make a consistent decision, we should not yield for a safepoint in the |
985 | // middle of deciding whether to perform an idle GC. |
986 | NoSafepointScope no_safepoint; |
987 | |
988 | // Discount two pages to account for the newest data and code pages, whose |
989 | // partial use doesn't indicate fragmentation. |
990 | const intptr_t excess_in_words = |
991 | usage_.capacity_in_words - usage_.used_in_words - 2 * kOldPageSizeInWords; |
992 | const double excess_ratio = static_cast<double>(excess_in_words) / |
993 | static_cast<double>(usage_.capacity_in_words); |
994 | const bool fragmented = excess_ratio > 0.05; |
995 | |
996 | if (!fragmented && !page_space_controller_.ReachedIdleThreshold(usage_)) { |
997 | return false; |
998 | } |
999 | |
1000 | { |
1001 | MonitorLocker locker(tasks_lock()); |
1002 | if (tasks() > 0) { |
1003 | // A concurrent sweeper is running. If we start a mark sweep now |
1004 | // we'll have to wait for it, and this wait time is not included in |
1005 | // mark_words_per_micro_. |
1006 | return false; |
1007 | } |
1008 | } |
1009 | |
1010 | // Assuming compaction takes as long as marking. |
1011 | intptr_t mark_compact_words_per_micro = mark_words_per_micro_ / 2; |
1012 | if (mark_compact_words_per_micro == 0) { |
1013 | mark_compact_words_per_micro = 1; // Prevent division by zero. |
1014 | } |
1015 | |
1016 | int64_t estimated_mark_compact_completion = |
1017 | OS::GetCurrentMonotonicMicros() + |
1018 | UsedInWords() / mark_compact_words_per_micro; |
1019 | return estimated_mark_compact_completion <= deadline; |
1020 | } |
1021 | |
1022 | void PageSpace::CollectGarbage(bool compact, bool finalize) { |
1023 | if (!finalize) { |
1024 | #if defined(TARGET_ARCH_IA32) |
1025 | return; // Barrier not implemented. |
1026 | #else |
1027 | if (!enable_concurrent_mark()) return; // Disabled. |
1028 | if (FLAG_marker_tasks == 0) return; // Disabled. |
1029 | #endif |
1030 | } |
1031 | |
1032 | Thread* thread = Thread::Current(); |
1033 | |
1034 | const int64_t pre_wait_for_sweepers = OS::GetCurrentMonotonicMicros(); |
1035 | |
1036 | // Wait for pending tasks to complete and then account for the driver task. |
1037 | Phase waited_for; |
1038 | { |
1039 | MonitorLocker locker(tasks_lock()); |
1040 | waited_for = phase(); |
1041 | if (!finalize && |
1042 | (phase() == kMarking || phase() == kAwaitingFinalization)) { |
1043 | // Concurrent mark is already running. |
1044 | return; |
1045 | } |
1046 | |
1047 | while (tasks() > 0) { |
1048 | locker.WaitWithSafepointCheck(thread); |
1049 | } |
1050 | ASSERT(phase() == kAwaitingFinalization || phase() == kDone); |
1051 | set_tasks(1); |
1052 | } |
1053 | |
1054 | const int64_t pre_safe_point = OS::GetCurrentMonotonicMicros(); |
1055 | if (FLAG_verbose_gc) { |
1056 | const int64_t wait = pre_safe_point - pre_wait_for_sweepers; |
1057 | if (waited_for == kMarking) { |
1058 | THR_Print("Waited %" Pd64 " us for concurrent marking to finish.\n" , |
1059 | wait); |
1060 | } else if (waited_for == kSweepingRegular || waited_for == kSweepingLarge) { |
1061 | THR_Print("Waited %" Pd64 " us for concurrent sweeping to finish.\n" , |
1062 | wait); |
1063 | } |
1064 | } |
1065 | |
1066 | // Ensure that all threads for this isolate are at a safepoint (either |
1067 | // stopped or in native code). We have guards around Newgen GC and oldgen GC |
1068 | // to ensure that if two threads are racing to collect at the same time the |
1069 | // loser skips collection and goes straight to allocation. |
1070 | { |
1071 | SafepointOperationScope safepoint_scope(thread); |
1072 | CollectGarbageAtSafepoint(compact, finalize, pre_wait_for_sweepers, |
1073 | pre_safe_point); |
1074 | } |
1075 | |
1076 | // Done, reset the task count. |
1077 | { |
1078 | MonitorLocker ml(tasks_lock()); |
1079 | set_tasks(tasks() - 1); |
1080 | ml.NotifyAll(); |
1081 | } |
1082 | } |
1083 | |
1084 | void PageSpace::CollectGarbageAtSafepoint(bool compact, |
1085 | bool finalize, |
1086 | int64_t pre_wait_for_sweepers, |
1087 | int64_t pre_safe_point) { |
1088 | Thread* thread = Thread::Current(); |
1089 | ASSERT(thread->IsAtSafepoint()); |
1090 | auto isolate_group = heap_->isolate_group(); |
1091 | ASSERT(isolate_group == IsolateGroup::Current()); |
1092 | |
1093 | const int64_t start = OS::GetCurrentMonotonicMicros(); |
1094 | |
1095 | // Perform various cleanup that relies on no tasks interfering. |
1096 | isolate_group->shared_class_table()->FreeOldTables(); |
1097 | isolate_group->ForEachIsolate( |
1098 | [&](Isolate* isolate) { isolate->field_table()->FreeOldTables(); }, |
1099 | /*at_safepoint=*/true); |
1100 | |
1101 | NoSafepointScope no_safepoints; |
1102 | |
1103 | if (FLAG_print_free_list_before_gc) { |
1104 | for (intptr_t i = 0; i < num_freelists_; i++) { |
1105 | OS::PrintErr("Before GC: Freelist %" Pd "\n" , i); |
1106 | freelists_[i].Print(); |
1107 | } |
1108 | } |
1109 | |
1110 | if (FLAG_verify_before_gc) { |
1111 | OS::PrintErr("Verifying before marking..." ); |
1112 | heap_->VerifyGC(phase() == kDone ? kForbidMarked : kAllowMarked); |
1113 | OS::PrintErr(" done.\n" ); |
1114 | } |
1115 | |
1116 | // Make code pages writable. |
1117 | if (finalize) WriteProtectCode(false); |
1118 | |
1119 | // Save old value before GCMarker visits the weak persistent handles. |
1120 | SpaceUsage usage_before = GetCurrentUsage(); |
1121 | |
1122 | // Mark all reachable old-gen objects. |
1123 | if (marker_ == NULL) { |
1124 | ASSERT(phase() == kDone); |
1125 | marker_ = new GCMarker(isolate_group, heap_); |
1126 | } else { |
1127 | ASSERT(phase() == kAwaitingFinalization); |
1128 | } |
1129 | |
1130 | if (!finalize) { |
1131 | ASSERT(phase() == kDone); |
1132 | marker_->StartConcurrentMark(this); |
1133 | return; |
1134 | } |
1135 | |
1136 | marker_->MarkObjects(this); |
1137 | usage_.used_in_words = marker_->marked_words() + allocated_black_in_words_; |
1138 | allocated_black_in_words_ = 0; |
1139 | mark_words_per_micro_ = marker_->MarkedWordsPerMicro(); |
1140 | delete marker_; |
1141 | marker_ = NULL; |
1142 | |
1143 | int64_t mid1 = OS::GetCurrentMonotonicMicros(); |
1144 | |
1145 | // Abandon the remainder of the bump allocation block. |
1146 | AbandonBumpAllocation(); |
1147 | // Reset the freelists and setup sweeping. |
1148 | for (intptr_t i = 0; i < num_freelists_; i++) { |
1149 | freelists_[i].Reset(); |
1150 | } |
1151 | |
1152 | int64_t mid2 = OS::GetCurrentMonotonicMicros(); |
1153 | int64_t mid3 = 0; |
1154 | |
1155 | { |
1156 | if (FLAG_verify_before_gc) { |
1157 | OS::PrintErr("Verifying before sweeping..." ); |
1158 | heap_->VerifyGC(kAllowMarked); |
1159 | OS::PrintErr(" done.\n" ); |
1160 | } |
1161 | |
1162 | // Executable pages are always swept immediately to simplify |
1163 | // code protection. |
1164 | |
1165 | TIMELINE_FUNCTION_GC_DURATION(thread, "SweepExecutable" ); |
1166 | GCSweeper sweeper; |
1167 | OldPage* prev_page = NULL; |
1168 | OldPage* page = exec_pages_; |
1169 | FreeList* freelist = &freelists_[OldPage::kExecutable]; |
1170 | MutexLocker ml(freelist->mutex()); |
1171 | while (page != NULL) { |
1172 | OldPage* next_page = page->next(); |
1173 | bool page_in_use = sweeper.SweepPage(page, freelist, true /*is_locked*/); |
1174 | if (page_in_use) { |
1175 | prev_page = page; |
1176 | } else { |
1177 | FreePage(page, prev_page); |
1178 | } |
1179 | // Advance to the next page. |
1180 | page = next_page; |
1181 | } |
1182 | |
1183 | mid3 = OS::GetCurrentMonotonicMicros(); |
1184 | } |
1185 | |
1186 | if (compact) { |
1187 | SweepLarge(); |
1188 | Compact(thread); |
1189 | set_phase(kDone); |
1190 | } else if (FLAG_concurrent_sweep) { |
1191 | ConcurrentSweep(isolate_group); |
1192 | } else { |
1193 | SweepLarge(); |
1194 | Sweep(); |
1195 | set_phase(kDone); |
1196 | } |
1197 | |
1198 | // Make code pages read-only. |
1199 | if (finalize) WriteProtectCode(true); |
1200 | |
1201 | int64_t end = OS::GetCurrentMonotonicMicros(); |
1202 | |
1203 | // Record signals for growth control. Include size of external allocations. |
1204 | page_space_controller_.EvaluateGarbageCollection( |
1205 | usage_before, GetCurrentUsage(), start, end); |
1206 | |
1207 | heap_->RecordTime(kConcurrentSweep, pre_safe_point - pre_wait_for_sweepers); |
1208 | heap_->RecordTime(kSafePoint, start - pre_safe_point); |
1209 | heap_->RecordTime(kMarkObjects, mid1 - start); |
1210 | heap_->RecordTime(kResetFreeLists, mid2 - mid1); |
1211 | heap_->RecordTime(kSweepPages, mid3 - mid2); |
1212 | heap_->RecordTime(kSweepLargePages, end - mid3); |
1213 | |
1214 | if (FLAG_print_free_list_after_gc) { |
1215 | for (intptr_t i = 0; i < num_freelists_; i++) { |
1216 | OS::PrintErr("After GC: Freelist %" Pd "\n" , i); |
1217 | freelists_[i].Print(); |
1218 | } |
1219 | } |
1220 | |
1221 | UpdateMaxUsed(); |
1222 | if (heap_ != NULL) { |
1223 | heap_->UpdateGlobalMaxUsed(); |
1224 | } |
1225 | } |
1226 | |
1227 | void PageSpace::SweepLarge() { |
1228 | TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "SweepLarge" ); |
1229 | |
1230 | GCSweeper sweeper; |
1231 | OldPage* prev_page = nullptr; |
1232 | OldPage* page = large_pages_; |
1233 | while (page != nullptr) { |
1234 | OldPage* next_page = page->next(); |
1235 | const intptr_t words_to_end = sweeper.SweepLargePage(page); |
1236 | if (words_to_end == 0) { |
1237 | FreeLargePage(page, prev_page); |
1238 | } else { |
1239 | TruncateLargePage(page, words_to_end << kWordSizeLog2); |
1240 | prev_page = page; |
1241 | } |
1242 | // Advance to the next page. |
1243 | page = next_page; |
1244 | } |
1245 | } |
1246 | |
1247 | void PageSpace::Sweep() { |
1248 | TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "Sweep" ); |
1249 | |
1250 | GCSweeper sweeper; |
1251 | |
1252 | intptr_t shard = 0; |
1253 | const intptr_t num_shards = Utils::Maximum(FLAG_scavenger_tasks, 1); |
1254 | for (intptr_t i = 0; i < num_shards; i++) { |
1255 | DataFreeList(i)->mutex()->Lock(); |
1256 | } |
1257 | |
1258 | OldPage* prev_page = nullptr; |
1259 | OldPage* page = pages_; |
1260 | while (page != nullptr) { |
1261 | OldPage* next_page = page->next(); |
1262 | ASSERT(page->type() == OldPage::kData); |
1263 | shard = (shard + 1) % num_shards; |
1264 | bool page_in_use = |
1265 | sweeper.SweepPage(page, DataFreeList(shard), true /*is_locked*/); |
1266 | if (page_in_use) { |
1267 | prev_page = page; |
1268 | } else { |
1269 | FreePage(page, prev_page); |
1270 | } |
1271 | // Advance to the next page. |
1272 | page = next_page; |
1273 | } |
1274 | |
1275 | for (intptr_t i = 0; i < num_shards; i++) { |
1276 | DataFreeList(i)->mutex()->Unlock(); |
1277 | } |
1278 | |
1279 | if (FLAG_verify_after_gc) { |
1280 | OS::PrintErr("Verifying after sweeping..." ); |
1281 | heap_->VerifyGC(kForbidMarked); |
1282 | OS::PrintErr(" done.\n" ); |
1283 | } |
1284 | } |
1285 | |
1286 | void PageSpace::ConcurrentSweep(IsolateGroup* isolate_group) { |
1287 | // Start the concurrent sweeper task now. |
1288 | GCSweeper::SweepConcurrent(isolate_group, pages_, pages_tail_, large_pages_, |
1289 | large_pages_tail_, &freelists_[OldPage::kData]); |
1290 | } |
1291 | |
1292 | void PageSpace::Compact(Thread* thread) { |
1293 | thread->isolate_group()->set_compaction_in_progress(true); |
1294 | GCCompactor compactor(thread, heap_); |
1295 | compactor.Compact(pages_, &freelists_[OldPage::kData], &pages_lock_); |
1296 | thread->isolate_group()->set_compaction_in_progress(false); |
1297 | |
1298 | if (FLAG_verify_after_gc) { |
1299 | OS::PrintErr("Verifying after compacting..." ); |
1300 | heap_->VerifyGC(kForbidMarked); |
1301 | OS::PrintErr(" done.\n" ); |
1302 | } |
1303 | } |
1304 | |
1305 | uword PageSpace::TryAllocateDataBumpLocked(FreeList* freelist, intptr_t size) { |
1306 | ASSERT(size >= kObjectAlignment); |
1307 | ASSERT(Utils::IsAligned(size, kObjectAlignment)); |
1308 | |
1309 | intptr_t remaining = freelist->end() - freelist->top(); |
1310 | if (UNLIKELY(remaining < size)) { |
1311 | // Checking this first would be logical, but needlessly slow. |
1312 | if (!Heap::IsAllocatableViaFreeLists(size)) { |
1313 | return TryAllocateDataLocked(freelist, size, kForceGrowth); |
1314 | } |
1315 | FreeListElement* block = freelist->TryAllocateLargeLocked(size); |
1316 | if (block == NULL) { |
1317 | // Allocating from a new page (if growth policy allows) will have the |
1318 | // side-effect of populating the freelist with a large block. The next |
1319 | // bump allocation request will have a chance to consume that block. |
1320 | // TODO(koda): Could take freelist lock just once instead of twice. |
1321 | return TryAllocateInFreshPage(size, freelist, OldPage::kData, |
1322 | kForceGrowth, true /* is_locked*/); |
1323 | } |
1324 | intptr_t block_size = block->HeapSize(); |
1325 | if (remaining > 0) { |
1326 | freelist->FreeLocked(freelist->top(), remaining); |
1327 | } |
1328 | freelist->set_top(reinterpret_cast<uword>(block)); |
1329 | freelist->set_end(freelist->top() + block_size); |
1330 | remaining = block_size; |
1331 | } |
1332 | ASSERT(remaining >= size); |
1333 | uword result = freelist->top(); |
1334 | freelist->set_top(result + size); |
1335 | |
1336 | freelist->AddUnaccountedSize(size); |
1337 | |
1338 | // Note: Remaining block is unwalkable until MakeIterable is called. |
1339 | #ifdef DEBUG |
1340 | if (freelist->top() < freelist->end()) { |
1341 | // Fail fast if we try to walk the remaining block. |
1342 | COMPILE_ASSERT(kIllegalCid == 0); |
1343 | *reinterpret_cast<uword*>(freelist->top()) = 0; |
1344 | } |
1345 | #endif // DEBUG |
1346 | return result; |
1347 | } |
1348 | |
1349 | uword PageSpace::TryAllocatePromoLockedSlow(FreeList* freelist, intptr_t size) { |
1350 | uword result = freelist->TryAllocateSmallLocked(size); |
1351 | if (result != 0) { |
1352 | freelist->AddUnaccountedSize(size); |
1353 | return result; |
1354 | } |
1355 | return TryAllocateDataBumpLocked(freelist, size); |
1356 | } |
1357 | |
1358 | void PageSpace::SetupImagePage(void* pointer, uword size, bool is_executable) { |
1359 | // Setup a OldPage so precompiled Instructions can be traversed. |
1360 | // Instructions are contiguous at [pointer, pointer + size). OldPage |
1361 | // expects to find objects at [memory->start() + ObjectStartOffset, |
1362 | // memory->end()). |
1363 | uword offset = OldPage::ObjectStartOffset(); |
1364 | pointer = reinterpret_cast<void*>(reinterpret_cast<uword>(pointer) - offset); |
1365 | ASSERT(Utils::IsAligned(pointer, kObjectAlignment)); |
1366 | size += offset; |
1367 | |
1368 | VirtualMemory* memory = VirtualMemory::ForImagePage(pointer, size); |
1369 | ASSERT(memory != NULL); |
1370 | OldPage* page = reinterpret_cast<OldPage*>(malloc(sizeof(OldPage))); |
1371 | page->memory_ = memory; |
1372 | page->next_ = NULL; |
1373 | page->object_end_ = memory->end(); |
1374 | page->used_in_bytes_ = page->object_end_ - page->object_start(); |
1375 | page->forwarding_page_ = NULL; |
1376 | page->card_table_ = NULL; |
1377 | if (is_executable) { |
1378 | page->type_ = OldPage::kExecutable; |
1379 | } else { |
1380 | page->type_ = OldPage::kData; |
1381 | } |
1382 | |
1383 | MutexLocker ml(&pages_lock_); |
1384 | page->next_ = image_pages_; |
1385 | image_pages_ = page; |
1386 | } |
1387 | |
1388 | bool PageSpace::IsObjectFromImagePages(dart::ObjectPtr object) { |
1389 | uword object_addr = ObjectLayout::ToAddr(object); |
1390 | OldPage* image_page = image_pages_; |
1391 | while (image_page != nullptr) { |
1392 | if (image_page->Contains(object_addr)) { |
1393 | return true; |
1394 | } |
1395 | image_page = image_page->next(); |
1396 | } |
1397 | return false; |
1398 | } |
1399 | |
1400 | static void AppendList(OldPage** pages, |
1401 | OldPage** pages_tail, |
1402 | OldPage** other_pages, |
1403 | OldPage** other_pages_tail) { |
1404 | ASSERT((*pages == nullptr) == (*pages_tail == nullptr)); |
1405 | ASSERT((*other_pages == nullptr) == (*other_pages_tail == nullptr)); |
1406 | |
1407 | if (*other_pages != nullptr) { |
1408 | if (*pages_tail == nullptr) { |
1409 | *pages = *other_pages; |
1410 | *pages_tail = *other_pages_tail; |
1411 | } else { |
1412 | const bool is_execute = FLAG_write_protect_code && |
1413 | (*pages_tail)->type() == OldPage::kExecutable; |
1414 | if (is_execute) { |
1415 | (*pages_tail)->WriteProtect(false); |
1416 | } |
1417 | (*pages_tail)->set_next(*other_pages); |
1418 | if (is_execute) { |
1419 | (*pages_tail)->WriteProtect(true); |
1420 | } |
1421 | *pages_tail = *other_pages_tail; |
1422 | } |
1423 | *other_pages = nullptr; |
1424 | *other_pages_tail = nullptr; |
1425 | } |
1426 | } |
1427 | |
1428 | static void EnsureEqualImagePages(OldPage* pages, OldPage* other_pages) { |
1429 | #if defined(DEBUG) |
1430 | while (pages != nullptr) { |
1431 | ASSERT((pages == nullptr) == (other_pages == nullptr)); |
1432 | ASSERT(pages->object_start() == other_pages->object_start()); |
1433 | ASSERT(pages->object_end() == other_pages->object_end()); |
1434 | pages = pages->next(); |
1435 | other_pages = other_pages->next(); |
1436 | } |
1437 | #endif |
1438 | } |
1439 | |
1440 | void PageSpace::MergeFrom(PageSpace* donor) { |
1441 | donor->AbandonBumpAllocation(); |
1442 | |
1443 | ASSERT(donor->tasks_ == 0); |
1444 | ASSERT(donor->concurrent_marker_tasks_ == 0); |
1445 | ASSERT(donor->phase_ == kDone); |
1446 | DEBUG_ASSERT(donor->iterating_thread_ == nullptr); |
1447 | ASSERT(donor->marker_ == nullptr); |
1448 | |
1449 | for (intptr_t i = 0; i < num_freelists_; ++i) { |
1450 | ASSERT(donor->freelists_[i].top() == 0); |
1451 | ASSERT(donor->freelists_[i].end() == 0); |
1452 | const bool is_protected = |
1453 | FLAG_write_protect_code && i == OldPage::kExecutable; |
1454 | freelists_[i].MergeFrom(&donor->freelists_[i], is_protected); |
1455 | donor->freelists_[i].Reset(); |
1456 | } |
1457 | |
1458 | // The freelist locks will be taken in MergeOtherFreelist above, and the |
1459 | // locking order is the freelist locks are taken before the page list locks, |
1460 | // so don't take the pages lock until after MergeOtherFreelist. |
1461 | MutexLocker ml(&pages_lock_); |
1462 | MutexLocker ml2(&donor->pages_lock_); |
1463 | |
1464 | AppendList(&pages_, &pages_tail_, &donor->pages_, &donor->pages_tail_); |
1465 | AppendList(&exec_pages_, &exec_pages_tail_, &donor->exec_pages_, |
1466 | &donor->exec_pages_tail_); |
1467 | AppendList(&large_pages_, &large_pages_tail_, &donor->large_pages_, |
1468 | &donor->large_pages_tail_); |
1469 | // We intentionall do not merge [image_pages_] beause [this] and [other] have |
1470 | // the same mmap()ed image page areas. |
1471 | EnsureEqualImagePages(image_pages_, donor->image_pages_); |
1472 | |
1473 | // We intentionaly do not increase [max_capacity_in_words_] because this can |
1474 | // lead [max_capacity_in_words_] to become larger and larger and eventually |
1475 | // wrap-around and become negative. |
1476 | allocated_black_in_words_ += donor->allocated_black_in_words_; |
1477 | gc_time_micros_ += donor->gc_time_micros_; |
1478 | collections_ += donor->collections_; |
1479 | |
1480 | usage_.capacity_in_words += donor->usage_.capacity_in_words; |
1481 | usage_.used_in_words += donor->usage_.used_in_words; |
1482 | usage_.external_in_words += donor->usage_.external_in_words; |
1483 | |
1484 | page_space_controller_.MergeFrom(&donor->page_space_controller_); |
1485 | |
1486 | ASSERT(FLAG_concurrent_mark || donor->enable_concurrent_mark_ == false); |
1487 | } |
1488 | |
1489 | PageSpaceController::PageSpaceController(Heap* heap, |
1490 | int heap_growth_ratio, |
1491 | int heap_growth_max, |
1492 | int garbage_collection_time_ratio) |
1493 | : heap_(heap), |
1494 | is_enabled_(false), |
1495 | heap_growth_ratio_(heap_growth_ratio), |
1496 | desired_utilization_((100.0 - heap_growth_ratio) / 100.0), |
1497 | heap_growth_max_(heap_growth_max), |
1498 | garbage_collection_time_ratio_(garbage_collection_time_ratio), |
1499 | idle_gc_threshold_in_words_(0) { |
1500 | const intptr_t growth_in_pages = heap_growth_max / 2; |
1501 | RecordUpdate(last_usage_, last_usage_, growth_in_pages, "initial" ); |
1502 | } |
1503 | |
1504 | PageSpaceController::~PageSpaceController() {} |
1505 | |
1506 | bool PageSpaceController::ReachedHardThreshold(SpaceUsage after) const { |
1507 | if (!is_enabled_) { |
1508 | return false; |
1509 | } |
1510 | if (heap_growth_ratio_ == 100) { |
1511 | return false; |
1512 | } |
1513 | return after.CombinedUsedInWords() > hard_gc_threshold_in_words_; |
1514 | } |
1515 | |
1516 | bool PageSpaceController::ReachedSoftThreshold(SpaceUsage after) const { |
1517 | if (!is_enabled_) { |
1518 | return false; |
1519 | } |
1520 | if (heap_growth_ratio_ == 100) { |
1521 | return false; |
1522 | } |
1523 | return after.CombinedUsedInWords() > soft_gc_threshold_in_words_; |
1524 | } |
1525 | |
1526 | bool PageSpaceController::ReachedIdleThreshold(SpaceUsage current) const { |
1527 | if (!is_enabled_) { |
1528 | return false; |
1529 | } |
1530 | if (heap_growth_ratio_ == 100) { |
1531 | return false; |
1532 | } |
1533 | return current.CombinedUsedInWords() > idle_gc_threshold_in_words_; |
1534 | } |
1535 | |
1536 | void PageSpaceController::EvaluateGarbageCollection(SpaceUsage before, |
1537 | SpaceUsage after, |
1538 | int64_t start, |
1539 | int64_t end) { |
1540 | ASSERT(end >= start); |
1541 | history_.AddGarbageCollectionTime(start, end); |
1542 | const int gc_time_fraction = history_.GarbageCollectionTimeFraction(); |
1543 | heap_->RecordData(PageSpace::kGCTimeFraction, gc_time_fraction); |
1544 | |
1545 | // Assume garbage increases linearly with allocation: |
1546 | // G = kA, and estimate k from the previous cycle. |
1547 | const intptr_t allocated_since_previous_gc = |
1548 | before.CombinedUsedInWords() - last_usage_.CombinedUsedInWords(); |
1549 | intptr_t grow_heap; |
1550 | if (allocated_since_previous_gc > 0) { |
1551 | const intptr_t garbage = |
1552 | before.CombinedUsedInWords() - after.CombinedUsedInWords(); |
1553 | ASSERT(garbage >= 0); |
1554 | // It makes no sense to expect that each kb allocated will cause more than |
1555 | // one kb of garbage, so we clamp k at 1.0. |
1556 | const double k = Utils::Minimum( |
1557 | 1.0, garbage / static_cast<double>(allocated_since_previous_gc)); |
1558 | |
1559 | const int garbage_ratio = static_cast<int>(k * 100); |
1560 | heap_->RecordData(PageSpace::kGarbageRatio, garbage_ratio); |
1561 | |
1562 | // Define GC to be 'worthwhile' iff at least fraction t of heap is garbage. |
1563 | double t = 1.0 - desired_utilization_; |
1564 | // If we spend too much time in GC, strive for even more free space. |
1565 | if (gc_time_fraction > garbage_collection_time_ratio_) { |
1566 | t += (gc_time_fraction - garbage_collection_time_ratio_) / 100.0; |
1567 | } |
1568 | |
1569 | // Number of pages we can allocate and still be within the desired growth |
1570 | // ratio. |
1571 | const intptr_t grow_pages = |
1572 | (static_cast<intptr_t>(after.CombinedUsedInWords() / |
1573 | desired_utilization_) - |
1574 | (after.CombinedUsedInWords())) / |
1575 | kOldPageSizeInWords; |
1576 | if (garbage_ratio == 0) { |
1577 | // No garbage in the previous cycle so it would be hard to compute a |
1578 | // grow_heap size based on estimated garbage so we use growth ratio |
1579 | // heuristics instead. |
1580 | grow_heap = |
1581 | Utils::Maximum(static_cast<intptr_t>(heap_growth_max_), grow_pages); |
1582 | } else { |
1583 | // Find minimum 'grow_heap' such that after increasing capacity by |
1584 | // 'grow_heap' pages and filling them, we expect a GC to be worthwhile. |
1585 | intptr_t max = heap_growth_max_; |
1586 | intptr_t min = 0; |
1587 | intptr_t local_grow_heap = 0; |
1588 | while (min < max) { |
1589 | local_grow_heap = (max + min) / 2; |
1590 | const intptr_t limit = after.CombinedUsedInWords() + |
1591 | (local_grow_heap * kOldPageSizeInWords); |
1592 | const intptr_t allocated_before_next_gc = |
1593 | limit - (after.CombinedUsedInWords()); |
1594 | const double estimated_garbage = k * allocated_before_next_gc; |
1595 | if (t <= estimated_garbage / limit) { |
1596 | max = local_grow_heap - 1; |
1597 | } else { |
1598 | min = local_grow_heap + 1; |
1599 | } |
1600 | } |
1601 | local_grow_heap = (max + min) / 2; |
1602 | grow_heap = local_grow_heap; |
1603 | ASSERT(grow_heap >= 0); |
1604 | // If we are going to grow by heap_grow_max_ then ensure that we |
1605 | // will be growing the heap at least by the growth ratio heuristics. |
1606 | if (grow_heap >= heap_growth_max_) { |
1607 | grow_heap = Utils::Maximum(grow_pages, grow_heap); |
1608 | } |
1609 | } |
1610 | } else { |
1611 | heap_->RecordData(PageSpace::kGarbageRatio, 100); |
1612 | grow_heap = 0; |
1613 | } |
1614 | heap_->RecordData(PageSpace::kPageGrowth, grow_heap); |
1615 | last_usage_ = after; |
1616 | |
1617 | intptr_t max_capacity_in_words = heap_->old_space()->max_capacity_in_words_; |
1618 | if (max_capacity_in_words != 0) { |
1619 | ASSERT(grow_heap >= 0); |
1620 | // Fraction of asymptote used. |
1621 | double f = static_cast<double>(after.CombinedUsedInWords() + |
1622 | (kOldPageSizeInWords * grow_heap)) / |
1623 | static_cast<double>(max_capacity_in_words); |
1624 | ASSERT(f >= 0.0); |
1625 | // Increase weight at the high end. |
1626 | f = f * f; |
1627 | // Fraction of asymptote available. |
1628 | f = 1.0 - f; |
1629 | ASSERT(f <= 1.0); |
1630 | // Discount growth more the closer we get to the desired asymptote. |
1631 | grow_heap = static_cast<intptr_t>(grow_heap * f); |
1632 | // Minimum growth step after reaching the asymptote. |
1633 | intptr_t min_step = (2 * MB) / kOldPageSize; |
1634 | grow_heap = Utils::Maximum(min_step, grow_heap); |
1635 | } |
1636 | |
1637 | RecordUpdate(before, after, grow_heap, "gc" ); |
1638 | } |
1639 | |
1640 | void PageSpaceController::EvaluateAfterLoading(SpaceUsage after) { |
1641 | // Number of pages we can allocate and still be within the desired growth |
1642 | // ratio. |
1643 | intptr_t growth_in_pages; |
1644 | if (desired_utilization_ == 0.0) { |
1645 | growth_in_pages = heap_growth_max_; |
1646 | } else { |
1647 | growth_in_pages = (static_cast<intptr_t>(after.CombinedUsedInWords() / |
1648 | desired_utilization_) - |
1649 | (after.CombinedUsedInWords())) / |
1650 | kOldPageSizeInWords; |
1651 | } |
1652 | |
1653 | // Apply growth cap. |
1654 | growth_in_pages = |
1655 | Utils::Minimum(static_cast<intptr_t>(heap_growth_max_), growth_in_pages); |
1656 | |
1657 | RecordUpdate(after, after, growth_in_pages, "loaded" ); |
1658 | } |
1659 | |
1660 | void PageSpaceController::RecordUpdate(SpaceUsage before, |
1661 | SpaceUsage after, |
1662 | intptr_t growth_in_pages, |
1663 | const char* reason) { |
1664 | // Save final threshold compared before growing. |
1665 | hard_gc_threshold_in_words_ = |
1666 | after.CombinedUsedInWords() + (kOldPageSizeInWords * growth_in_pages); |
1667 | |
1668 | // Start concurrent marking when old-space has less than half of new-space |
1669 | // available or less than 5% available. |
1670 | #if defined(TARGET_ARCH_IA32) |
1671 | const intptr_t headroom = 0; // No concurrent marking. |
1672 | #else |
1673 | // Note that heap_ can be null in some unit tests. |
1674 | const intptr_t new_space = |
1675 | heap_ == nullptr ? 0 : heap_->new_space()->CapacityInWords(); |
1676 | const intptr_t headroom = |
1677 | Utils::Maximum(new_space / 2, hard_gc_threshold_in_words_ / 20); |
1678 | #endif |
1679 | soft_gc_threshold_in_words_ = hard_gc_threshold_in_words_ - headroom; |
1680 | |
1681 | // Set a tight idle threshold. |
1682 | idle_gc_threshold_in_words_ = |
1683 | after.CombinedUsedInWords() + (2 * kOldPageSizeInWords); |
1684 | |
1685 | #if defined(SUPPORT_TIMELINE) |
1686 | Thread* thread = Thread::Current(); |
1687 | if (thread != nullptr) { |
1688 | TIMELINE_FUNCTION_GC_DURATION(thread, "UpdateGrowthLimit" ); |
1689 | tbes.SetNumArguments(6); |
1690 | tbes.CopyArgument(0, "Reason" , reason); |
1691 | tbes.FormatArgument(1, "Before.CombinedUsed (kB)" , "%" Pd "" , |
1692 | RoundWordsToKB(before.CombinedUsedInWords())); |
1693 | tbes.FormatArgument(2, "After.CombinedUsed (kB)" , "%" Pd "" , |
1694 | RoundWordsToKB(after.CombinedUsedInWords())); |
1695 | tbes.FormatArgument(3, "Hard Threshold (kB)" , "%" Pd "" , |
1696 | RoundWordsToKB(hard_gc_threshold_in_words_)); |
1697 | tbes.FormatArgument(4, "Soft Threshold (kB)" , "%" Pd "" , |
1698 | RoundWordsToKB(soft_gc_threshold_in_words_)); |
1699 | tbes.FormatArgument(5, "Idle Threshold (kB)" , "%" Pd "" , |
1700 | RoundWordsToKB(idle_gc_threshold_in_words_)); |
1701 | } |
1702 | #endif |
1703 | |
1704 | if (FLAG_log_growth) { |
1705 | THR_Print("%s: threshold=%" Pd "kB, idle_threshold=%" Pd "kB, reason=%s\n" , |
1706 | heap_->isolate_group()->source()->name, |
1707 | hard_gc_threshold_in_words_ / KBInWords, |
1708 | idle_gc_threshold_in_words_ / KBInWords, reason); |
1709 | } |
1710 | } |
1711 | |
1712 | void PageSpaceController::HintFreed(intptr_t size) { |
1713 | intptr_t size_in_words = size << kWordSizeLog2; |
1714 | if (size_in_words > idle_gc_threshold_in_words_) { |
1715 | idle_gc_threshold_in_words_ = 0; |
1716 | } else { |
1717 | idle_gc_threshold_in_words_ -= size_in_words; |
1718 | } |
1719 | |
1720 | // TODO(rmacnak): Hasten the soft threshold at some discount? |
1721 | } |
1722 | |
1723 | void PageSpaceController::MergeFrom(PageSpaceController* donor) { |
1724 | last_usage_.capacity_in_words += donor->last_usage_.capacity_in_words; |
1725 | last_usage_.used_in_words += donor->last_usage_.used_in_words; |
1726 | last_usage_.external_in_words += donor->last_usage_.external_in_words; |
1727 | } |
1728 | |
1729 | void PageSpaceGarbageCollectionHistory::AddGarbageCollectionTime(int64_t start, |
1730 | int64_t end) { |
1731 | Entry entry; |
1732 | entry.start = start; |
1733 | entry.end = end; |
1734 | history_.Add(entry); |
1735 | } |
1736 | |
1737 | int PageSpaceGarbageCollectionHistory::GarbageCollectionTimeFraction() { |
1738 | int64_t gc_time = 0; |
1739 | int64_t total_time = 0; |
1740 | for (int i = 0; i < history_.Size() - 1; i++) { |
1741 | Entry current = history_.Get(i); |
1742 | Entry previous = history_.Get(i + 1); |
1743 | gc_time += current.end - current.start; |
1744 | total_time += current.end - previous.end; |
1745 | } |
1746 | if (total_time == 0) { |
1747 | return 0; |
1748 | } else { |
1749 | ASSERT(total_time >= gc_time); |
1750 | int result = static_cast<int>( |
1751 | (static_cast<double>(gc_time) / static_cast<double>(total_time)) * 100); |
1752 | return result; |
1753 | } |
1754 | } |
1755 | |
1756 | } // namespace dart |
1757 | |