1 | // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include <memory> |
6 | #include <utility> |
7 | |
8 | #include "vm/heap/heap.h" |
9 | |
10 | #include "platform/assert.h" |
11 | #include "platform/utils.h" |
12 | #include "vm/compiler/jit/compiler.h" |
13 | #include "vm/flags.h" |
14 | #include "vm/heap/pages.h" |
15 | #include "vm/heap/safepoint.h" |
16 | #include "vm/heap/scavenger.h" |
17 | #include "vm/heap/verifier.h" |
18 | #include "vm/heap/weak_table.h" |
19 | #include "vm/isolate.h" |
20 | #include "vm/lockers.h" |
21 | #include "vm/object.h" |
22 | #include "vm/object_set.h" |
23 | #include "vm/os.h" |
24 | #include "vm/raw_object.h" |
25 | #include "vm/service.h" |
26 | #include "vm/service_event.h" |
27 | #include "vm/service_isolate.h" |
28 | #include "vm/stack_frame.h" |
29 | #include "vm/tags.h" |
30 | #include "vm/thread_pool.h" |
31 | #include "vm/timeline.h" |
32 | #include "vm/virtual_memory.h" |
33 | |
34 | namespace dart { |
35 | |
36 | DEFINE_FLAG(bool, write_protect_vm_isolate, true, "Write protect vm_isolate." ); |
37 | DEFINE_FLAG(bool, |
38 | disable_heap_verification, |
39 | false, |
40 | "Explicitly disable heap verification." ); |
41 | |
42 | // We ensure that the GC does not use the current isolate. |
43 | class NoActiveIsolateScope { |
44 | public: |
45 | NoActiveIsolateScope() : thread_(Thread::Current()) { |
46 | saved_isolate_ = thread_->isolate_; |
47 | thread_->isolate_ = nullptr; |
48 | } |
49 | ~NoActiveIsolateScope() { |
50 | ASSERT(thread_->isolate_ == nullptr); |
51 | thread_->isolate_ = saved_isolate_; |
52 | } |
53 | |
54 | private: |
55 | Thread* thread_; |
56 | Isolate* saved_isolate_; |
57 | }; |
58 | |
59 | Heap::Heap(IsolateGroup* isolate_group, |
60 | intptr_t max_new_gen_semi_words, |
61 | intptr_t max_old_gen_words) |
62 | : isolate_group_(isolate_group), |
63 | new_space_(this, max_new_gen_semi_words), |
64 | old_space_(this, max_old_gen_words), |
65 | barrier_(), |
66 | barrier_done_(), |
67 | read_only_(false), |
68 | gc_new_space_in_progress_(false), |
69 | gc_old_space_in_progress_(false), |
70 | last_gc_was_old_space_(false), |
71 | assume_scavenge_will_fail_(false), |
72 | gc_on_nth_allocation_(kNoForcedGarbageCollection) { |
73 | UpdateGlobalMaxUsed(); |
74 | for (int sel = 0; sel < kNumWeakSelectors; sel++) { |
75 | new_weak_tables_[sel] = new WeakTable(); |
76 | old_weak_tables_[sel] = new WeakTable(); |
77 | } |
78 | stats_.num_ = 0; |
79 | } |
80 | |
81 | Heap::~Heap() { |
82 | for (int sel = 0; sel < kNumWeakSelectors; sel++) { |
83 | delete new_weak_tables_[sel]; |
84 | delete old_weak_tables_[sel]; |
85 | } |
86 | } |
87 | |
88 | uword Heap::AllocateNew(intptr_t size) { |
89 | ASSERT(Thread::Current()->no_safepoint_scope_depth() == 0); |
90 | CollectForDebugging(); |
91 | Thread* thread = Thread::Current(); |
92 | uword addr = new_space_.TryAllocate(thread, size); |
93 | if (LIKELY(addr != 0)) { |
94 | return addr; |
95 | } |
96 | if (!assume_scavenge_will_fail_ && new_space_.GrowthControlState()) { |
97 | // This call to CollectGarbage might end up "reusing" a collection spawned |
98 | // from a different thread and will be racing to allocate the requested |
99 | // memory with other threads being released after the collection. |
100 | CollectGarbage(kNew); |
101 | |
102 | addr = new_space_.TryAllocate(thread, size); |
103 | if (LIKELY(addr != 0)) { |
104 | return addr; |
105 | } |
106 | } |
107 | |
108 | // It is possible a GC doesn't clear enough space. |
109 | // In that case, we must fall through and allocate into old space. |
110 | return AllocateOld(size, OldPage::kData); |
111 | } |
112 | |
113 | uword Heap::AllocateOld(intptr_t size, OldPage::PageType type) { |
114 | ASSERT(Thread::Current()->no_safepoint_scope_depth() == 0); |
115 | CollectForDebugging(); |
116 | uword addr = old_space_.TryAllocate(size, type); |
117 | if (addr != 0) { |
118 | return addr; |
119 | } |
120 | // If we are in the process of running a sweep, wait for the sweeper to free |
121 | // memory. |
122 | Thread* thread = Thread::Current(); |
123 | if (old_space_.GrowthControlState()) { |
124 | // Wait for any GC tasks that are in progress. |
125 | WaitForSweeperTasks(thread); |
126 | addr = old_space_.TryAllocate(size, type); |
127 | if (addr != 0) { |
128 | return addr; |
129 | } |
130 | // All GC tasks finished without allocating successfully. Collect both |
131 | // generations. |
132 | CollectMostGarbage(); |
133 | addr = old_space_.TryAllocate(size, type); |
134 | if (addr != 0) { |
135 | return addr; |
136 | } |
137 | // Wait for all of the concurrent tasks to finish before giving up. |
138 | WaitForSweeperTasks(thread); |
139 | addr = old_space_.TryAllocate(size, type); |
140 | if (addr != 0) { |
141 | return addr; |
142 | } |
143 | // Force growth before attempting another synchronous GC. |
144 | addr = old_space_.TryAllocate(size, type, PageSpace::kForceGrowth); |
145 | if (addr != 0) { |
146 | return addr; |
147 | } |
148 | // Before throwing an out-of-memory error try a synchronous GC. |
149 | CollectAllGarbage(kLowMemory); |
150 | WaitForSweeperTasks(thread); |
151 | } |
152 | addr = old_space_.TryAllocate(size, type, PageSpace::kForceGrowth); |
153 | if (addr != 0) { |
154 | return addr; |
155 | } |
156 | // Give up allocating this object. |
157 | OS::PrintErr("Exhausted heap space, trying to allocate %" Pd " bytes.\n" , |
158 | size); |
159 | return 0; |
160 | } |
161 | |
162 | void Heap::AllocatedExternal(intptr_t size, Space space) { |
163 | ASSERT(Thread::Current()->no_safepoint_scope_depth() == 0); |
164 | if (space == kNew) { |
165 | Isolate::Current()->AssertCurrentThreadIsMutator(); |
166 | new_space_.AllocatedExternal(size); |
167 | if (new_space_.ExternalInWords() <= (4 * new_space_.CapacityInWords())) { |
168 | return; |
169 | } |
170 | // Attempt to free some external allocation by a scavenge. (If the total |
171 | // remains above the limit, next external alloc will trigger another.) |
172 | CollectGarbage(kScavenge, kExternal); |
173 | // Promotion may have pushed old space over its limit. Fall through for old |
174 | // space GC check. |
175 | } else { |
176 | ASSERT(space == kOld); |
177 | old_space_.AllocatedExternal(size); |
178 | } |
179 | |
180 | if (old_space_.ReachedHardThreshold()) { |
181 | CollectGarbage(kMarkSweep, kExternal); |
182 | } else { |
183 | CheckStartConcurrentMarking(Thread::Current(), kExternal); |
184 | } |
185 | } |
186 | |
187 | void Heap::FreedExternal(intptr_t size, Space space) { |
188 | if (space == kNew) { |
189 | new_space_.FreedExternal(size); |
190 | } else { |
191 | ASSERT(space == kOld); |
192 | old_space_.FreedExternal(size); |
193 | } |
194 | } |
195 | |
196 | void Heap::PromotedExternal(intptr_t size) { |
197 | new_space_.FreedExternal(size); |
198 | old_space_.AllocatedExternal(size); |
199 | } |
200 | |
201 | bool Heap::Contains(uword addr) const { |
202 | return new_space_.Contains(addr) || old_space_.Contains(addr); |
203 | } |
204 | |
205 | bool Heap::NewContains(uword addr) const { |
206 | return new_space_.Contains(addr); |
207 | } |
208 | |
209 | bool Heap::OldContains(uword addr) const { |
210 | return old_space_.Contains(addr); |
211 | } |
212 | |
213 | bool Heap::CodeContains(uword addr) const { |
214 | return old_space_.Contains(addr, OldPage::kExecutable); |
215 | } |
216 | |
217 | bool Heap::DataContains(uword addr) const { |
218 | return old_space_.DataContains(addr); |
219 | } |
220 | |
221 | void Heap::VisitObjects(ObjectVisitor* visitor) { |
222 | new_space_.VisitObjects(visitor); |
223 | old_space_.VisitObjects(visitor); |
224 | } |
225 | |
226 | void Heap::VisitObjectsNoImagePages(ObjectVisitor* visitor) { |
227 | new_space_.VisitObjects(visitor); |
228 | old_space_.VisitObjectsNoImagePages(visitor); |
229 | } |
230 | |
231 | void Heap::VisitObjectsImagePages(ObjectVisitor* visitor) const { |
232 | old_space_.VisitObjectsImagePages(visitor); |
233 | } |
234 | |
235 | HeapIterationScope::HeapIterationScope(Thread* thread, bool writable) |
236 | : ThreadStackResource(thread), |
237 | heap_(isolate()->heap()), |
238 | old_space_(heap_->old_space()), |
239 | writable_(writable) { |
240 | { |
241 | // It's not safe to iterate over old space when concurrent marking or |
242 | // sweeping is in progress, or another thread is iterating the heap, so wait |
243 | // for any such task to complete first. |
244 | MonitorLocker ml(old_space_->tasks_lock()); |
245 | #if defined(DEBUG) |
246 | // We currently don't support nesting of HeapIterationScopes. |
247 | ASSERT(old_space_->iterating_thread_ != thread); |
248 | #endif |
249 | while ((old_space_->tasks() > 0) || |
250 | (old_space_->phase() != PageSpace::kDone)) { |
251 | if (old_space_->phase() == PageSpace::kAwaitingFinalization) { |
252 | ml.Exit(); |
253 | heap_->CollectOldSpaceGarbage(thread, Heap::kMarkSweep, |
254 | Heap::kFinalize); |
255 | ml.Enter(); |
256 | } |
257 | while (old_space_->tasks() > 0) { |
258 | ml.WaitWithSafepointCheck(thread); |
259 | } |
260 | } |
261 | #if defined(DEBUG) |
262 | ASSERT(old_space_->iterating_thread_ == NULL); |
263 | old_space_->iterating_thread_ = thread; |
264 | #endif |
265 | old_space_->set_tasks(1); |
266 | } |
267 | |
268 | isolate()->safepoint_handler()->SafepointThreads(thread); |
269 | |
270 | if (writable_) { |
271 | heap_->WriteProtectCode(false); |
272 | } |
273 | } |
274 | |
275 | HeapIterationScope::~HeapIterationScope() { |
276 | if (writable_) { |
277 | heap_->WriteProtectCode(true); |
278 | } |
279 | |
280 | isolate()->safepoint_handler()->ResumeThreads(thread()); |
281 | |
282 | MonitorLocker ml(old_space_->tasks_lock()); |
283 | #if defined(DEBUG) |
284 | ASSERT(old_space_->iterating_thread_ == thread()); |
285 | old_space_->iterating_thread_ = NULL; |
286 | #endif |
287 | ASSERT(old_space_->tasks() == 1); |
288 | old_space_->set_tasks(0); |
289 | ml.NotifyAll(); |
290 | } |
291 | |
292 | void HeapIterationScope::IterateObjects(ObjectVisitor* visitor) const { |
293 | heap_->VisitObjects(visitor); |
294 | } |
295 | |
296 | void HeapIterationScope::IterateObjectsNoImagePages( |
297 | ObjectVisitor* visitor) const { |
298 | heap_->new_space()->VisitObjects(visitor); |
299 | heap_->old_space()->VisitObjectsNoImagePages(visitor); |
300 | } |
301 | |
302 | void HeapIterationScope::IterateOldObjects(ObjectVisitor* visitor) const { |
303 | old_space_->VisitObjects(visitor); |
304 | } |
305 | |
306 | void HeapIterationScope::IterateOldObjectsNoImagePages( |
307 | ObjectVisitor* visitor) const { |
308 | old_space_->VisitObjectsNoImagePages(visitor); |
309 | } |
310 | |
311 | void HeapIterationScope::IterateVMIsolateObjects(ObjectVisitor* visitor) const { |
312 | Dart::vm_isolate()->heap()->VisitObjects(visitor); |
313 | } |
314 | |
315 | void HeapIterationScope::IterateObjectPointers( |
316 | ObjectPointerVisitor* visitor, |
317 | ValidationPolicy validate_frames) { |
318 | isolate_group()->VisitObjectPointers(visitor, validate_frames); |
319 | } |
320 | |
321 | void HeapIterationScope::IterateStackPointers( |
322 | ObjectPointerVisitor* visitor, |
323 | ValidationPolicy validate_frames) { |
324 | isolate_group()->VisitStackPointers(visitor, validate_frames); |
325 | } |
326 | |
327 | void Heap::VisitObjectPointers(ObjectPointerVisitor* visitor) { |
328 | new_space_.VisitObjectPointers(visitor); |
329 | old_space_.VisitObjectPointers(visitor); |
330 | } |
331 | |
332 | InstructionsPtr Heap::FindObjectInCodeSpace(FindObjectVisitor* visitor) const { |
333 | // Only executable pages can have RawInstructions objects. |
334 | ObjectPtr raw_obj = old_space_.FindObject(visitor, OldPage::kExecutable); |
335 | ASSERT((raw_obj == Object::null()) || |
336 | (raw_obj->GetClassId() == kInstructionsCid)); |
337 | return static_cast<InstructionsPtr>(raw_obj); |
338 | } |
339 | |
340 | ObjectPtr Heap::FindOldObject(FindObjectVisitor* visitor) const { |
341 | return old_space_.FindObject(visitor, OldPage::kData); |
342 | } |
343 | |
344 | ObjectPtr Heap::FindNewObject(FindObjectVisitor* visitor) { |
345 | return new_space_.FindObject(visitor); |
346 | } |
347 | |
348 | ObjectPtr Heap::FindObject(FindObjectVisitor* visitor) { |
349 | // The visitor must not allocate from the heap. |
350 | NoSafepointScope no_safepoint_scope; |
351 | ObjectPtr raw_obj = FindNewObject(visitor); |
352 | if (raw_obj != Object::null()) { |
353 | return raw_obj; |
354 | } |
355 | raw_obj = FindOldObject(visitor); |
356 | if (raw_obj != Object::null()) { |
357 | return raw_obj; |
358 | } |
359 | raw_obj = FindObjectInCodeSpace(visitor); |
360 | return raw_obj; |
361 | } |
362 | |
363 | bool Heap::BeginNewSpaceGC(Thread* thread) { |
364 | MonitorLocker ml(&gc_in_progress_monitor_); |
365 | bool start_gc_on_thread = true; |
366 | while (gc_new_space_in_progress_ || gc_old_space_in_progress_) { |
367 | start_gc_on_thread = !gc_new_space_in_progress_; |
368 | ml.WaitWithSafepointCheck(thread); |
369 | } |
370 | if (start_gc_on_thread) { |
371 | gc_new_space_in_progress_ = true; |
372 | return true; |
373 | } |
374 | return false; |
375 | } |
376 | |
377 | void Heap::EndNewSpaceGC() { |
378 | MonitorLocker ml(&gc_in_progress_monitor_); |
379 | ASSERT(gc_new_space_in_progress_); |
380 | gc_new_space_in_progress_ = false; |
381 | last_gc_was_old_space_ = false; |
382 | ml.NotifyAll(); |
383 | } |
384 | |
385 | bool Heap::BeginOldSpaceGC(Thread* thread) { |
386 | MonitorLocker ml(&gc_in_progress_monitor_); |
387 | bool start_gc_on_thread = true; |
388 | while (gc_new_space_in_progress_ || gc_old_space_in_progress_) { |
389 | start_gc_on_thread = !gc_old_space_in_progress_; |
390 | ml.WaitWithSafepointCheck(thread); |
391 | } |
392 | if (start_gc_on_thread) { |
393 | gc_old_space_in_progress_ = true; |
394 | return true; |
395 | } |
396 | return false; |
397 | } |
398 | |
399 | void Heap::EndOldSpaceGC() { |
400 | MonitorLocker ml(&gc_in_progress_monitor_); |
401 | ASSERT(gc_old_space_in_progress_); |
402 | gc_old_space_in_progress_ = false; |
403 | last_gc_was_old_space_ = true; |
404 | assume_scavenge_will_fail_ = false; |
405 | ml.NotifyAll(); |
406 | } |
407 | |
408 | void Heap::HintFreed(intptr_t size) { |
409 | old_space_.HintFreed(size); |
410 | } |
411 | |
412 | void Heap::NotifyIdle(int64_t deadline) { |
413 | Thread* thread = Thread::Current(); |
414 | // Check if we want to collect new-space first, because if we want to collect |
415 | // both new-space and old-space, the new-space collection should run first |
416 | // to shrink the root set (make old-space GC faster) and avoid |
417 | // intergenerational garbage (make old-space GC free more memory). |
418 | if (new_space_.ShouldPerformIdleScavenge(deadline)) { |
419 | TIMELINE_FUNCTION_GC_DURATION(thread, "IdleGC" ); |
420 | CollectNewSpaceGarbage(thread, kIdle); |
421 | } |
422 | |
423 | // Check if we want to collect old-space, in decreasing order of cost. |
424 | // Because we use a deadline instead of a timeout, we automatically take any |
425 | // time used up by a scavenge into account when deciding if we can complete |
426 | // a mark-sweep on time. |
427 | if (old_space_.ShouldPerformIdleMarkCompact(deadline)) { |
428 | // We prefer mark-compact over other old space GCs if we have enough time, |
429 | // since it removes old space fragmentation and frees up most memory. |
430 | // Blocks for O(heap), roughtly twice as costly as mark-sweep. |
431 | TIMELINE_FUNCTION_GC_DURATION(thread, "IdleGC" ); |
432 | CollectOldSpaceGarbage(thread, kMarkCompact, kIdle); |
433 | } else if (old_space_.ReachedHardThreshold()) { |
434 | // Even though the following GC may exceed our idle deadline, we need to |
435 | // ensure than that promotions during idle scavenges do not lead to |
436 | // unbounded growth of old space. If a program is allocating only in new |
437 | // space and all scavenges happen during idle time, then NotifyIdle will be |
438 | // the only place that checks the old space allocation limit. |
439 | // Compare the tail end of Heap::CollectNewSpaceGarbage. |
440 | // Blocks for O(heap). |
441 | TIMELINE_FUNCTION_GC_DURATION(thread, "IdleGC" ); |
442 | CollectOldSpaceGarbage(thread, kMarkSweep, kIdle); |
443 | } else if (old_space_.ShouldStartIdleMarkSweep(deadline) || |
444 | old_space_.ReachedSoftThreshold()) { |
445 | // If we have both work to do and enough time, start or finish GC. |
446 | // If we have crossed the soft threshold, ignore time; the next old-space |
447 | // allocation will trigger this work anyway, so we try to pay at least some |
448 | // of that cost with idle time. |
449 | // Blocks for O(roots). |
450 | PageSpace::Phase phase; |
451 | { |
452 | MonitorLocker ml(old_space_.tasks_lock()); |
453 | phase = old_space_.phase(); |
454 | } |
455 | if (phase == PageSpace::kAwaitingFinalization) { |
456 | TIMELINE_FUNCTION_GC_DURATION(thread, "IdleGC" ); |
457 | CollectOldSpaceGarbage(thread, Heap::kMarkSweep, Heap::kFinalize); |
458 | } else if (phase == PageSpace::kDone) { |
459 | TIMELINE_FUNCTION_GC_DURATION(thread, "IdleGC" ); |
460 | StartConcurrentMarking(thread); |
461 | } |
462 | } |
463 | } |
464 | |
465 | void Heap::NotifyLowMemory() { |
466 | CollectMostGarbage(kLowMemory); |
467 | } |
468 | |
469 | void Heap::EvacuateNewSpace(Thread* thread, GCReason reason) { |
470 | ASSERT((reason != kOldSpace) && (reason != kPromotion)); |
471 | if (thread->isolate_group() == Dart::vm_isolate()->group()) { |
472 | // The vm isolate cannot safely collect garbage due to unvisited read-only |
473 | // handles and slots bootstrapped with RAW_NULL. Ignore GC requests to |
474 | // trigger a nice out-of-memory message instead of a crash in the middle of |
475 | // visiting pointers. |
476 | return; |
477 | } |
478 | if (BeginNewSpaceGC(thread)) { |
479 | RecordBeforeGC(kScavenge, reason); |
480 | VMTagScope tagScope(thread, reason == kIdle ? VMTag::kGCIdleTagId |
481 | : VMTag::kGCNewSpaceTagId); |
482 | TIMELINE_FUNCTION_GC_DURATION(thread, "EvacuateNewGeneration" ); |
483 | new_space_.Evacuate(); |
484 | RecordAfterGC(kScavenge); |
485 | PrintStats(); |
486 | NOT_IN_PRODUCT(PrintStatsToTimeline(&tbes, reason)); |
487 | EndNewSpaceGC(); |
488 | } |
489 | } |
490 | |
491 | void Heap::CollectNewSpaceGarbage(Thread* thread, GCReason reason) { |
492 | NoActiveIsolateScope no_active_isolate_scope; |
493 | ASSERT((reason != kOldSpace) && (reason != kPromotion)); |
494 | if (thread->isolate_group() == Dart::vm_isolate()->group()) { |
495 | // The vm isolate cannot safely collect garbage due to unvisited read-only |
496 | // handles and slots bootstrapped with RAW_NULL. Ignore GC requests to |
497 | // trigger a nice out-of-memory message instead of a crash in the middle of |
498 | // visiting pointers. |
499 | return; |
500 | } |
501 | if (BeginNewSpaceGC(thread)) { |
502 | RecordBeforeGC(kScavenge, reason); |
503 | { |
504 | VMTagScope tagScope(thread, reason == kIdle ? VMTag::kGCIdleTagId |
505 | : VMTag::kGCNewSpaceTagId); |
506 | TIMELINE_FUNCTION_GC_DURATION_BASIC(thread, "CollectNewGeneration" ); |
507 | new_space_.Scavenge(); |
508 | RecordAfterGC(kScavenge); |
509 | PrintStats(); |
510 | NOT_IN_PRODUCT(PrintStatsToTimeline(&tbes, reason)); |
511 | EndNewSpaceGC(); |
512 | } |
513 | if (reason == kNewSpace) { |
514 | if (old_space_.ReachedHardThreshold()) { |
515 | CollectOldSpaceGarbage(thread, kMarkSweep, kPromotion); |
516 | } else { |
517 | CheckStartConcurrentMarking(thread, kPromotion); |
518 | } |
519 | } |
520 | } |
521 | } |
522 | |
523 | void Heap::CollectOldSpaceGarbage(Thread* thread, |
524 | GCType type, |
525 | GCReason reason) { |
526 | NoActiveIsolateScope no_active_isolate_scope; |
527 | |
528 | ASSERT(reason != kNewSpace); |
529 | ASSERT(type != kScavenge); |
530 | if (FLAG_use_compactor) { |
531 | type = kMarkCompact; |
532 | } |
533 | if (thread->isolate_group() == Dart::vm_isolate()->group()) { |
534 | // The vm isolate cannot safely collect garbage due to unvisited read-only |
535 | // handles and slots bootstrapped with RAW_NULL. Ignore GC requests to |
536 | // trigger a nice out-of-memory message instead of a crash in the middle of |
537 | // visiting pointers. |
538 | return; |
539 | } |
540 | if (BeginOldSpaceGC(thread)) { |
541 | thread->isolate_group()->ForEachIsolate([&](Isolate* isolate) { |
542 | // Discard regexp backtracking stacks to further reduce memory usage. |
543 | isolate->CacheRegexpBacktrackStack(nullptr); |
544 | }); |
545 | |
546 | RecordBeforeGC(type, reason); |
547 | VMTagScope tagScope(thread, reason == kIdle ? VMTag::kGCIdleTagId |
548 | : VMTag::kGCOldSpaceTagId); |
549 | TIMELINE_FUNCTION_GC_DURATION_BASIC(thread, "CollectOldGeneration" ); |
550 | old_space_.CollectGarbage(type == kMarkCompact, true /* finish */); |
551 | RecordAfterGC(type); |
552 | PrintStats(); |
553 | NOT_IN_PRODUCT(PrintStatsToTimeline(&tbes, reason)); |
554 | |
555 | // Some Code objects may have been collected so invalidate handler cache. |
556 | thread->isolate_group()->ForEachIsolate([&](Isolate* isolate) { |
557 | isolate->handler_info_cache()->Clear(); |
558 | isolate->catch_entry_moves_cache()->Clear(); |
559 | }); |
560 | EndOldSpaceGC(); |
561 | } |
562 | } |
563 | |
564 | void Heap::CollectGarbage(GCType type, GCReason reason) { |
565 | Thread* thread = Thread::Current(); |
566 | switch (type) { |
567 | case kScavenge: |
568 | CollectNewSpaceGarbage(thread, reason); |
569 | break; |
570 | case kMarkSweep: |
571 | case kMarkCompact: |
572 | CollectOldSpaceGarbage(thread, type, reason); |
573 | break; |
574 | default: |
575 | UNREACHABLE(); |
576 | } |
577 | } |
578 | |
579 | void Heap::CollectGarbage(Space space) { |
580 | Thread* thread = Thread::Current(); |
581 | if (space == kOld) { |
582 | CollectOldSpaceGarbage(thread, kMarkSweep, kOldSpace); |
583 | } else { |
584 | ASSERT(space == kNew); |
585 | CollectNewSpaceGarbage(thread, kNewSpace); |
586 | } |
587 | } |
588 | |
589 | void Heap::CollectMostGarbage(GCReason reason) { |
590 | Thread* thread = Thread::Current(); |
591 | CollectNewSpaceGarbage(thread, reason); |
592 | CollectOldSpaceGarbage( |
593 | thread, reason == kLowMemory ? kMarkCompact : kMarkSweep, reason); |
594 | } |
595 | |
596 | void Heap::CollectAllGarbage(GCReason reason) { |
597 | Thread* thread = Thread::Current(); |
598 | |
599 | // New space is evacuated so this GC will collect all dead objects |
600 | // kept alive by a cross-generational pointer. |
601 | EvacuateNewSpace(thread, reason); |
602 | if (thread->is_marking()) { |
603 | // If incremental marking is happening, we need to finish the GC cycle |
604 | // and perform a follow-up GC to purge any "floating garbage" that may be |
605 | // retained by the incremental barrier. |
606 | CollectOldSpaceGarbage(thread, kMarkSweep, reason); |
607 | } |
608 | CollectOldSpaceGarbage( |
609 | thread, reason == kLowMemory ? kMarkCompact : kMarkSweep, reason); |
610 | WaitForSweeperTasks(thread); |
611 | } |
612 | |
613 | void Heap::CheckStartConcurrentMarking(Thread* thread, GCReason reason) { |
614 | { |
615 | MonitorLocker ml(old_space_.tasks_lock()); |
616 | if (old_space_.phase() != PageSpace::kDone) { |
617 | return; // Busy. |
618 | } |
619 | } |
620 | |
621 | if (old_space_.ReachedSoftThreshold()) { |
622 | // New-space objects are roots during old-space GC. This means that even |
623 | // unreachable new-space objects prevent old-space objects they reference |
624 | // from being collected during an old-space GC. Normally this is not an |
625 | // issue because new-space GCs run much more frequently than old-space GCs. |
626 | // If new-space allocation is low and direct old-space allocation is high, |
627 | // which can happen in a program that allocates large objects and little |
628 | // else, old-space can fill up with unreachable objects until the next |
629 | // new-space GC. This check is the concurrent-marking equivalent to the |
630 | // new-space GC before synchronous-marking in CollectMostGarbage. |
631 | if (last_gc_was_old_space_) { |
632 | CollectNewSpaceGarbage(thread, kFull); |
633 | } |
634 | |
635 | StartConcurrentMarking(thread); |
636 | } |
637 | } |
638 | |
639 | void Heap::StartConcurrentMarking(Thread* thread) { |
640 | if (BeginOldSpaceGC(thread)) { |
641 | TIMELINE_FUNCTION_GC_DURATION_BASIC(thread, "StartConcurrentMarking" ); |
642 | old_space_.CollectGarbage(/*compact=*/false, /*finalize=*/false); |
643 | EndOldSpaceGC(); |
644 | } |
645 | } |
646 | |
647 | void Heap::CheckFinishConcurrentMarking(Thread* thread) { |
648 | bool ready; |
649 | { |
650 | MonitorLocker ml(old_space_.tasks_lock()); |
651 | ready = old_space_.phase() == PageSpace::kAwaitingFinalization; |
652 | } |
653 | if (ready) { |
654 | CollectOldSpaceGarbage(thread, Heap::kMarkSweep, Heap::kFinalize); |
655 | } |
656 | } |
657 | |
658 | void Heap::WaitForMarkerTasks(Thread* thread) { |
659 | MonitorLocker ml(old_space_.tasks_lock()); |
660 | while ((old_space_.phase() == PageSpace::kMarking) || |
661 | (old_space_.phase() == PageSpace::kAwaitingFinalization)) { |
662 | while (old_space_.phase() == PageSpace::kMarking) { |
663 | ml.WaitWithSafepointCheck(thread); |
664 | } |
665 | if (old_space_.phase() == PageSpace::kAwaitingFinalization) { |
666 | ml.Exit(); |
667 | CollectOldSpaceGarbage(thread, Heap::kMarkSweep, Heap::kFinalize); |
668 | ml.Enter(); |
669 | } |
670 | } |
671 | } |
672 | |
673 | void Heap::WaitForSweeperTasks(Thread* thread) { |
674 | ASSERT(!thread->IsAtSafepoint()); |
675 | MonitorLocker ml(old_space_.tasks_lock()); |
676 | while (old_space_.tasks() > 0) { |
677 | ml.WaitWithSafepointCheck(thread); |
678 | } |
679 | } |
680 | |
681 | void Heap::WaitForSweeperTasksAtSafepoint(Thread* thread) { |
682 | ASSERT(thread->IsAtSafepoint()); |
683 | MonitorLocker ml(old_space_.tasks_lock()); |
684 | while (old_space_.tasks() > 0) { |
685 | ml.Wait(); |
686 | } |
687 | } |
688 | |
689 | void Heap::UpdateGlobalMaxUsed() { |
690 | ASSERT(isolate_group_ != NULL); |
691 | // We are accessing the used in words count for both new and old space |
692 | // without synchronizing. The value of this metric is approximate. |
693 | isolate_group_->GetHeapGlobalUsedMaxMetric()->SetValue( |
694 | (UsedInWords(Heap::kNew) * kWordSize) + |
695 | (UsedInWords(Heap::kOld) * kWordSize)); |
696 | } |
697 | |
698 | void Heap::InitGrowthControl() { |
699 | new_space_.InitGrowthControl(); |
700 | old_space_.InitGrowthControl(); |
701 | } |
702 | |
703 | void Heap::SetGrowthControlState(bool state) { |
704 | new_space_.SetGrowthControlState(state); |
705 | old_space_.SetGrowthControlState(state); |
706 | } |
707 | |
708 | bool Heap::GrowthControlState() { |
709 | ASSERT(new_space_.GrowthControlState() == old_space_.GrowthControlState()); |
710 | return old_space_.GrowthControlState(); |
711 | } |
712 | |
713 | void Heap::WriteProtect(bool read_only) { |
714 | read_only_ = read_only; |
715 | new_space_.WriteProtect(read_only); |
716 | old_space_.WriteProtect(read_only); |
717 | } |
718 | |
719 | void Heap::Init(IsolateGroup* isolate_group, |
720 | intptr_t max_new_gen_words, |
721 | intptr_t max_old_gen_words) { |
722 | ASSERT(isolate_group->heap() == nullptr); |
723 | std::unique_ptr<Heap> heap( |
724 | new Heap(isolate_group, max_new_gen_words, max_old_gen_words)); |
725 | isolate_group->set_heap(std::move(heap)); |
726 | } |
727 | |
728 | const char* Heap::RegionName(Space space) { |
729 | switch (space) { |
730 | case kNew: |
731 | return "dart-newspace" ; |
732 | case kOld: |
733 | return "dart-oldspace" ; |
734 | case kCode: |
735 | return "dart-codespace" ; |
736 | default: |
737 | UNREACHABLE(); |
738 | } |
739 | } |
740 | |
741 | void Heap::AddRegionsToObjectSet(ObjectSet* set) const { |
742 | new_space_.AddRegionsToObjectSet(set); |
743 | old_space_.AddRegionsToObjectSet(set); |
744 | set->SortRegions(); |
745 | } |
746 | |
747 | void Heap::CollectOnNthAllocation(intptr_t num_allocations) { |
748 | // Prevent generated code from using the TLAB fast path on next allocation. |
749 | new_space_.AbandonRemainingTLABForDebugging(Thread::Current()); |
750 | gc_on_nth_allocation_ = num_allocations; |
751 | } |
752 | |
753 | void Heap::MergeFrom(Heap* donor) { |
754 | ASSERT(!donor->gc_new_space_in_progress_); |
755 | ASSERT(!donor->gc_old_space_in_progress_); |
756 | ASSERT(!donor->read_only_); |
757 | ASSERT(donor->old_space()->tasks() == 0); |
758 | |
759 | new_space_.MergeFrom(donor->new_space()); |
760 | old_space_.MergeFrom(donor->old_space()); |
761 | |
762 | for (intptr_t i = 0; i < kNumWeakSelectors; ++i) { |
763 | // The new space rehashing should not be necessary. |
764 | new_weak_tables_[i]->MergeFrom(donor->new_weak_tables_[i]); |
765 | old_weak_tables_[i]->MergeFrom(donor->old_weak_tables_[i]); |
766 | } |
767 | } |
768 | |
769 | void Heap::CollectForDebugging() { |
770 | if (gc_on_nth_allocation_ == kNoForcedGarbageCollection) return; |
771 | if (Thread::Current()->IsAtSafepoint()) { |
772 | // CollectAllGarbage is not supported when we are at a safepoint. |
773 | // Allocating when at a safepoint is not a common case. |
774 | return; |
775 | } |
776 | gc_on_nth_allocation_--; |
777 | if (gc_on_nth_allocation_ == 0) { |
778 | CollectAllGarbage(kDebugging); |
779 | gc_on_nth_allocation_ = kNoForcedGarbageCollection; |
780 | } else { |
781 | // Prevent generated code from using the TLAB fast path on next allocation. |
782 | new_space_.AbandonRemainingTLABForDebugging(Thread::Current()); |
783 | } |
784 | } |
785 | |
786 | ObjectSet* Heap::CreateAllocatedObjectSet(Zone* zone, |
787 | MarkExpectation mark_expectation) { |
788 | ObjectSet* allocated_set = new (zone) ObjectSet(zone); |
789 | |
790 | this->AddRegionsToObjectSet(allocated_set); |
791 | Isolate* vm_isolate = Dart::vm_isolate(); |
792 | vm_isolate->heap()->AddRegionsToObjectSet(allocated_set); |
793 | |
794 | { |
795 | VerifyObjectVisitor object_visitor(isolate_group(), allocated_set, |
796 | mark_expectation); |
797 | this->VisitObjectsNoImagePages(&object_visitor); |
798 | } |
799 | { |
800 | VerifyObjectVisitor object_visitor(isolate_group(), allocated_set, |
801 | kRequireMarked); |
802 | this->VisitObjectsImagePages(&object_visitor); |
803 | } |
804 | { |
805 | // VM isolate heap is premarked. |
806 | VerifyObjectVisitor vm_object_visitor(isolate_group(), allocated_set, |
807 | kRequireMarked); |
808 | vm_isolate->heap()->VisitObjects(&vm_object_visitor); |
809 | } |
810 | |
811 | return allocated_set; |
812 | } |
813 | |
814 | bool Heap::Verify(MarkExpectation mark_expectation) { |
815 | if (FLAG_disable_heap_verification) { |
816 | return true; |
817 | } |
818 | HeapIterationScope heap_iteration_scope(Thread::Current()); |
819 | return VerifyGC(mark_expectation); |
820 | } |
821 | |
822 | bool Heap::VerifyGC(MarkExpectation mark_expectation) { |
823 | auto thread = Thread::Current(); |
824 | StackZone stack_zone(thread); |
825 | |
826 | ObjectSet* allocated_set = |
827 | CreateAllocatedObjectSet(stack_zone.GetZone(), mark_expectation); |
828 | VerifyPointersVisitor visitor(isolate_group(), allocated_set); |
829 | VisitObjectPointers(&visitor); |
830 | |
831 | // Only returning a value so that Heap::Validate can be called from an ASSERT. |
832 | return true; |
833 | } |
834 | |
835 | void Heap::PrintSizes() const { |
836 | OS::PrintErr( |
837 | "New space (%" Pd64 "k of %" Pd64 |
838 | "k) " |
839 | "Old space (%" Pd64 "k of %" Pd64 "k)\n" , |
840 | (UsedInWords(kNew) / KBInWords), (CapacityInWords(kNew) / KBInWords), |
841 | (UsedInWords(kOld) / KBInWords), (CapacityInWords(kOld) / KBInWords)); |
842 | } |
843 | |
844 | int64_t Heap::UsedInWords(Space space) const { |
845 | return space == kNew ? new_space_.UsedInWords() : old_space_.UsedInWords(); |
846 | } |
847 | |
848 | int64_t Heap::CapacityInWords(Space space) const { |
849 | return space == kNew ? new_space_.CapacityInWords() |
850 | : old_space_.CapacityInWords(); |
851 | } |
852 | |
853 | int64_t Heap::ExternalInWords(Space space) const { |
854 | return space == kNew ? new_space_.ExternalInWords() |
855 | : old_space_.ExternalInWords(); |
856 | } |
857 | |
858 | int64_t Heap::TotalUsedInWords() const { |
859 | return UsedInWords(kNew) + UsedInWords(kOld); |
860 | } |
861 | |
862 | int64_t Heap::TotalCapacityInWords() const { |
863 | return CapacityInWords(kNew) + CapacityInWords(kOld); |
864 | } |
865 | |
866 | int64_t Heap::TotalExternalInWords() const { |
867 | return ExternalInWords(kNew) + ExternalInWords(kOld); |
868 | } |
869 | |
870 | int64_t Heap::GCTimeInMicros(Space space) const { |
871 | if (space == kNew) { |
872 | return new_space_.gc_time_micros(); |
873 | } |
874 | return old_space_.gc_time_micros(); |
875 | } |
876 | |
877 | intptr_t Heap::Collections(Space space) const { |
878 | if (space == kNew) { |
879 | return new_space_.collections(); |
880 | } |
881 | return old_space_.collections(); |
882 | } |
883 | |
884 | const char* Heap::GCTypeToString(GCType type) { |
885 | switch (type) { |
886 | case kScavenge: |
887 | return "Scavenge" ; |
888 | case kMarkSweep: |
889 | return "MarkSweep" ; |
890 | case kMarkCompact: |
891 | return "MarkCompact" ; |
892 | default: |
893 | UNREACHABLE(); |
894 | return "" ; |
895 | } |
896 | } |
897 | |
898 | const char* Heap::GCReasonToString(GCReason gc_reason) { |
899 | switch (gc_reason) { |
900 | case kNewSpace: |
901 | return "new space" ; |
902 | case kPromotion: |
903 | return "promotion" ; |
904 | case kOldSpace: |
905 | return "old space" ; |
906 | case kFinalize: |
907 | return "finalize" ; |
908 | case kFull: |
909 | return "full" ; |
910 | case kExternal: |
911 | return "external" ; |
912 | case kIdle: |
913 | return "idle" ; |
914 | case kLowMemory: |
915 | return "low memory" ; |
916 | case kDebugging: |
917 | return "debugging" ; |
918 | case kSendAndExit: |
919 | return "send_and_exit" ; |
920 | default: |
921 | UNREACHABLE(); |
922 | return "" ; |
923 | } |
924 | } |
925 | |
926 | int64_t Heap::PeerCount() const { |
927 | return new_weak_tables_[kPeers]->count() + old_weak_tables_[kPeers]->count(); |
928 | } |
929 | |
930 | void Heap::ResetCanonicalHashTable() { |
931 | new_weak_tables_[kCanonicalHashes]->Reset(); |
932 | old_weak_tables_[kCanonicalHashes]->Reset(); |
933 | } |
934 | |
935 | void Heap::ResetObjectIdTable() { |
936 | new_weak_tables_[kObjectIds]->Reset(); |
937 | old_weak_tables_[kObjectIds]->Reset(); |
938 | } |
939 | |
940 | intptr_t Heap::GetWeakEntry(ObjectPtr raw_obj, WeakSelector sel) const { |
941 | if (raw_obj->IsNewObject()) { |
942 | return new_weak_tables_[sel]->GetValue(raw_obj); |
943 | } |
944 | ASSERT(raw_obj->IsOldObject()); |
945 | return old_weak_tables_[sel]->GetValue(raw_obj); |
946 | } |
947 | |
948 | void Heap::SetWeakEntry(ObjectPtr raw_obj, WeakSelector sel, intptr_t val) { |
949 | if (raw_obj->IsNewObject()) { |
950 | new_weak_tables_[sel]->SetValue(raw_obj, val); |
951 | } else { |
952 | ASSERT(raw_obj->IsOldObject()); |
953 | old_weak_tables_[sel]->SetValue(raw_obj, val); |
954 | } |
955 | } |
956 | |
957 | void Heap::ForwardWeakEntries(ObjectPtr before_object, ObjectPtr after_object) { |
958 | const auto before_space = |
959 | before_object->IsNewObject() ? Heap::kNew : Heap::kOld; |
960 | const auto after_space = |
961 | after_object->IsNewObject() ? Heap::kNew : Heap::kOld; |
962 | |
963 | for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) { |
964 | const auto selector = static_cast<Heap::WeakSelector>(sel); |
965 | auto before_table = GetWeakTable(before_space, selector); |
966 | intptr_t entry = before_table->RemoveValueExclusive(before_object); |
967 | if (entry != 0) { |
968 | auto after_table = GetWeakTable(after_space, selector); |
969 | after_table->SetValueExclusive(after_object, entry); |
970 | } |
971 | } |
972 | |
973 | // We only come here during hot reload, in which case we assume that none of |
974 | // the isolates is in the middle of sending messages. |
975 | isolate_group()->ForEachIsolate( |
976 | [&](Isolate* isolate) { |
977 | RELEASE_ASSERT(isolate->forward_table_new() == nullptr); |
978 | RELEASE_ASSERT(isolate->forward_table_old() == nullptr); |
979 | }, |
980 | /*at_safepoint=*/true); |
981 | } |
982 | |
983 | void Heap::ForwardWeakTables(ObjectPointerVisitor* visitor) { |
984 | // NOTE: This method is only used by the compactor, so there is no need to |
985 | // process the `Heap::kNew` tables. |
986 | for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) { |
987 | WeakSelector selector = static_cast<Heap::WeakSelector>(sel); |
988 | GetWeakTable(Heap::kOld, selector)->Forward(visitor); |
989 | } |
990 | |
991 | // Isolates might have forwarding tables (used for during snapshoting in |
992 | // isolate communication). |
993 | isolate_group()->ForEachIsolate( |
994 | [&](Isolate* isolate) { |
995 | auto table_old = isolate->forward_table_old(); |
996 | if (table_old != nullptr) table_old->Forward(visitor); |
997 | }, |
998 | /*at_safepoint=*/true); |
999 | } |
1000 | |
1001 | #ifndef PRODUCT |
1002 | void Heap::PrintToJSONObject(Space space, JSONObject* object) const { |
1003 | if (space == kNew) { |
1004 | new_space_.PrintToJSONObject(object); |
1005 | } else { |
1006 | old_space_.PrintToJSONObject(object); |
1007 | } |
1008 | } |
1009 | |
1010 | void Heap::PrintMemoryUsageJSON(JSONStream* stream) const { |
1011 | JSONObject obj(stream); |
1012 | PrintMemoryUsageJSON(&obj); |
1013 | } |
1014 | |
1015 | void Heap::PrintMemoryUsageJSON(JSONObject* jsobj) const { |
1016 | jsobj->AddProperty("type" , "MemoryUsage" ); |
1017 | jsobj->AddProperty64("heapUsage" , TotalUsedInWords() * kWordSize); |
1018 | jsobj->AddProperty64("heapCapacity" , TotalCapacityInWords() * kWordSize); |
1019 | jsobj->AddProperty64("externalUsage" , TotalExternalInWords() * kWordSize); |
1020 | } |
1021 | #endif // PRODUCT |
1022 | |
1023 | void Heap::RecordBeforeGC(GCType type, GCReason reason) { |
1024 | ASSERT((type == kScavenge && gc_new_space_in_progress_) || |
1025 | (type == kMarkSweep && gc_old_space_in_progress_) || |
1026 | (type == kMarkCompact && gc_old_space_in_progress_)); |
1027 | stats_.num_++; |
1028 | stats_.type_ = type; |
1029 | stats_.reason_ = reason; |
1030 | stats_.before_.micros_ = OS::GetCurrentMonotonicMicros(); |
1031 | stats_.before_.new_ = new_space_.GetCurrentUsage(); |
1032 | stats_.before_.old_ = old_space_.GetCurrentUsage(); |
1033 | for (int i = 0; i < GCStats::kTimeEntries; i++) |
1034 | stats_.times_[i] = 0; |
1035 | for (int i = 0; i < GCStats::kDataEntries; i++) |
1036 | stats_.data_[i] = 0; |
1037 | } |
1038 | |
1039 | void Heap::RecordAfterGC(GCType type) { |
1040 | stats_.after_.micros_ = OS::GetCurrentMonotonicMicros(); |
1041 | int64_t delta = stats_.after_.micros_ - stats_.before_.micros_; |
1042 | if (stats_.type_ == kScavenge) { |
1043 | new_space_.AddGCTime(delta); |
1044 | new_space_.IncrementCollections(); |
1045 | } else { |
1046 | old_space_.AddGCTime(delta); |
1047 | old_space_.IncrementCollections(); |
1048 | } |
1049 | stats_.after_.new_ = new_space_.GetCurrentUsage(); |
1050 | stats_.after_.old_ = old_space_.GetCurrentUsage(); |
1051 | ASSERT((type == kScavenge && gc_new_space_in_progress_) || |
1052 | (type == kMarkSweep && gc_old_space_in_progress_) || |
1053 | (type == kMarkCompact && gc_old_space_in_progress_)); |
1054 | #ifndef PRODUCT |
1055 | // For now we'll emit the same GC events on all isolates. |
1056 | if (Service::gc_stream.enabled()) { |
1057 | isolate_group_->ForEachIsolate([&](Isolate* isolate) { |
1058 | if (!Isolate::IsVMInternalIsolate(isolate)) { |
1059 | ServiceEvent event(isolate, ServiceEvent::kGC); |
1060 | event.set_gc_stats(&stats_); |
1061 | Service::HandleEvent(&event); |
1062 | } |
1063 | }); |
1064 | } |
1065 | #endif // !PRODUCT |
1066 | } |
1067 | |
1068 | void Heap::PrintStats() { |
1069 | #if !defined(PRODUCT) |
1070 | if (!FLAG_verbose_gc) return; |
1071 | |
1072 | if ((FLAG_verbose_gc_hdr != 0) && |
1073 | (((stats_.num_ - 1) % FLAG_verbose_gc_hdr) == 0)) { |
1074 | OS::PrintErr( |
1075 | "[ | | | | " |
1076 | "| new gen | new gen | new gen " |
1077 | "| old gen | old gen | old gen " |
1078 | "| sweep | safe- | roots/| stbuf/| tospc/| weaks/| ]\n" |
1079 | "[ GC isolate | space (reason) | GC# | start | time " |
1080 | "| used (kB) | capacity kB | external" |
1081 | "| used (kB) | capacity (kB) | external kB " |
1082 | "| thread| point |marking| reset | sweep |swplrge| data ]\n" |
1083 | "[ | | | (s) | (ms) " |
1084 | "|before| after|before| after| b4 |aftr" |
1085 | "| before| after | before| after |before| after" |
1086 | "| (ms) | (ms) | (ms) | (ms) | (ms) | (ms) | ]\n" ); |
1087 | } |
1088 | |
1089 | // clang-format off |
1090 | OS::PrintErr( |
1091 | "[ %-13.13s, %10s(%9s), " // GC(isolate-group), type(reason) |
1092 | "%4" Pd ", " // count |
1093 | "%6.2f, " // start time |
1094 | "%5.1f, " // total time |
1095 | "%5" Pd ", %5" Pd ", " // new gen: in use before/after |
1096 | "%5" Pd ", %5" Pd ", " // new gen: capacity before/after |
1097 | "%3" Pd ", %3" Pd ", " // new gen: external before/after |
1098 | "%6" Pd ", %6" Pd ", " // old gen: in use before/after |
1099 | "%6" Pd ", %6" Pd ", " // old gen: capacity before/after |
1100 | "%5" Pd ", %5" Pd ", " // old gen: external before/after |
1101 | "%6.2f, %6.2f, %6.2f, %6.2f, %6.2f, %6.2f, " // times |
1102 | "%" Pd ", %" Pd ", %" Pd ", %" Pd ", " // data |
1103 | "]\n" , // End with a comma to make it easier to import in spreadsheets. |
1104 | isolate_group()->source()->name, |
1105 | GCTypeToString(stats_.type_), |
1106 | GCReasonToString(stats_.reason_), |
1107 | stats_.num_, |
1108 | MicrosecondsToSeconds(isolate_group_->UptimeMicros()), |
1109 | MicrosecondsToMilliseconds(stats_.after_.micros_ - |
1110 | stats_.before_.micros_), |
1111 | RoundWordsToKB(stats_.before_.new_.used_in_words), |
1112 | RoundWordsToKB(stats_.after_.new_.used_in_words), |
1113 | RoundWordsToKB(stats_.before_.new_.capacity_in_words), |
1114 | RoundWordsToKB(stats_.after_.new_.capacity_in_words), |
1115 | RoundWordsToKB(stats_.before_.new_.external_in_words), |
1116 | RoundWordsToKB(stats_.after_.new_.external_in_words), |
1117 | RoundWordsToKB(stats_.before_.old_.used_in_words), |
1118 | RoundWordsToKB(stats_.after_.old_.used_in_words), |
1119 | RoundWordsToKB(stats_.before_.old_.capacity_in_words), |
1120 | RoundWordsToKB(stats_.after_.old_.capacity_in_words), |
1121 | RoundWordsToKB(stats_.before_.old_.external_in_words), |
1122 | RoundWordsToKB(stats_.after_.old_.external_in_words), |
1123 | MicrosecondsToMilliseconds(stats_.times_[0]), |
1124 | MicrosecondsToMilliseconds(stats_.times_[1]), |
1125 | MicrosecondsToMilliseconds(stats_.times_[2]), |
1126 | MicrosecondsToMilliseconds(stats_.times_[3]), |
1127 | MicrosecondsToMilliseconds(stats_.times_[4]), |
1128 | MicrosecondsToMilliseconds(stats_.times_[5]), |
1129 | stats_.data_[0], |
1130 | stats_.data_[1], |
1131 | stats_.data_[2], |
1132 | stats_.data_[3]); |
1133 | // clang-format on |
1134 | #endif // !defined(PRODUCT) |
1135 | } |
1136 | |
1137 | void Heap::PrintStatsToTimeline(TimelineEventScope* event, GCReason reason) { |
1138 | #if !defined(PRODUCT) |
1139 | if ((event == NULL) || !event->enabled()) { |
1140 | return; |
1141 | } |
1142 | intptr_t arguments = event->GetNumArguments(); |
1143 | event->SetNumArguments(arguments + 13); |
1144 | event->CopyArgument(arguments + 0, "Reason" , GCReasonToString(reason)); |
1145 | event->FormatArgument(arguments + 1, "Before.New.Used (kB)" , "%" Pd "" , |
1146 | RoundWordsToKB(stats_.before_.new_.used_in_words)); |
1147 | event->FormatArgument(arguments + 2, "After.New.Used (kB)" , "%" Pd "" , |
1148 | RoundWordsToKB(stats_.after_.new_.used_in_words)); |
1149 | event->FormatArgument(arguments + 3, "Before.Old.Used (kB)" , "%" Pd "" , |
1150 | RoundWordsToKB(stats_.before_.old_.used_in_words)); |
1151 | event->FormatArgument(arguments + 4, "After.Old.Used (kB)" , "%" Pd "" , |
1152 | RoundWordsToKB(stats_.after_.old_.used_in_words)); |
1153 | |
1154 | event->FormatArgument(arguments + 5, "Before.New.Capacity (kB)" , "%" Pd "" , |
1155 | RoundWordsToKB(stats_.before_.new_.capacity_in_words)); |
1156 | event->FormatArgument(arguments + 6, "After.New.Capacity (kB)" , "%" Pd "" , |
1157 | RoundWordsToKB(stats_.after_.new_.capacity_in_words)); |
1158 | event->FormatArgument(arguments + 7, "Before.Old.Capacity (kB)" , "%" Pd "" , |
1159 | RoundWordsToKB(stats_.before_.old_.capacity_in_words)); |
1160 | event->FormatArgument(arguments + 8, "After.Old.Capacity (kB)" , "%" Pd "" , |
1161 | RoundWordsToKB(stats_.after_.old_.capacity_in_words)); |
1162 | |
1163 | event->FormatArgument(arguments + 9, "Before.New.External (kB)" , "%" Pd "" , |
1164 | RoundWordsToKB(stats_.before_.new_.external_in_words)); |
1165 | event->FormatArgument(arguments + 10, "After.New.External (kB)" , "%" Pd "" , |
1166 | RoundWordsToKB(stats_.after_.new_.external_in_words)); |
1167 | event->FormatArgument(arguments + 11, "Before.Old.External (kB)" , "%" Pd "" , |
1168 | RoundWordsToKB(stats_.before_.old_.external_in_words)); |
1169 | event->FormatArgument(arguments + 12, "After.Old.External (kB)" , "%" Pd "" , |
1170 | RoundWordsToKB(stats_.after_.old_.external_in_words)); |
1171 | #endif // !defined(PRODUCT) |
1172 | } |
1173 | |
1174 | Heap::Space Heap::SpaceForExternal(intptr_t size) const { |
1175 | // If 'size' would be a significant fraction of new space, then use old. |
1176 | static const int kExtNewRatio = 16; |
1177 | if (size > (CapacityInWords(Heap::kNew) * kWordSize) / kExtNewRatio) { |
1178 | return Heap::kOld; |
1179 | } else { |
1180 | return Heap::kNew; |
1181 | } |
1182 | } |
1183 | |
1184 | NoHeapGrowthControlScope::NoHeapGrowthControlScope() |
1185 | : ThreadStackResource(Thread::Current()) { |
1186 | Heap* heap = isolate()->heap(); |
1187 | current_growth_controller_state_ = heap->GrowthControlState(); |
1188 | heap->DisableGrowthControl(); |
1189 | } |
1190 | |
1191 | NoHeapGrowthControlScope::~NoHeapGrowthControlScope() { |
1192 | Heap* heap = isolate()->heap(); |
1193 | heap->SetGrowthControlState(current_growth_controller_state_); |
1194 | } |
1195 | |
1196 | WritableVMIsolateScope::WritableVMIsolateScope(Thread* thread) |
1197 | : ThreadStackResource(thread) { |
1198 | if (FLAG_write_protect_code && FLAG_write_protect_vm_isolate) { |
1199 | Dart::vm_isolate()->heap()->WriteProtect(false); |
1200 | } |
1201 | } |
1202 | |
1203 | WritableVMIsolateScope::~WritableVMIsolateScope() { |
1204 | ASSERT(Dart::vm_isolate()->heap()->UsedInWords(Heap::kNew) == 0); |
1205 | if (FLAG_write_protect_code && FLAG_write_protect_vm_isolate) { |
1206 | Dart::vm_isolate()->heap()->WriteProtect(true); |
1207 | } |
1208 | } |
1209 | |
1210 | WritableCodePages::WritableCodePages(Thread* thread, Isolate* isolate) |
1211 | : StackResource(thread), isolate_(isolate) { |
1212 | isolate_->heap()->WriteProtectCode(false); |
1213 | } |
1214 | |
1215 | WritableCodePages::~WritableCodePages() { |
1216 | isolate_->heap()->WriteProtectCode(true); |
1217 | } |
1218 | |
1219 | } // namespace dart |
1220 | |