1 | // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #ifndef RUNTIME_VM_HEAP_HEAP_H_ |
6 | #define RUNTIME_VM_HEAP_HEAP_H_ |
7 | |
8 | #if defined(SHOULD_NOT_INCLUDE_RUNTIME) |
9 | #error "Should not include runtime" |
10 | #endif |
11 | |
12 | #include "platform/assert.h" |
13 | #include "vm/allocation.h" |
14 | #include "vm/flags.h" |
15 | #include "vm/globals.h" |
16 | #include "vm/heap/pages.h" |
17 | #include "vm/heap/scavenger.h" |
18 | #include "vm/heap/spaces.h" |
19 | #include "vm/heap/weak_table.h" |
20 | #include "vm/isolate.h" |
21 | |
22 | namespace dart { |
23 | |
24 | // Forward declarations. |
25 | class Isolate; |
26 | class IsolateGroup; |
27 | class ObjectPointerVisitor; |
28 | class ObjectSet; |
29 | class ServiceEvent; |
30 | class TimelineEventScope; |
31 | class VirtualMemory; |
32 | |
33 | class Heap { |
34 | public: |
35 | enum Space { |
36 | kNew, |
37 | kOld, |
38 | kCode, |
39 | }; |
40 | |
41 | enum WeakSelector { |
42 | kPeers = 0, |
43 | #if !defined(HASH_IN_OBJECT_HEADER) |
44 | kIdentityHashes, |
45 | #endif |
46 | kCanonicalHashes, |
47 | kObjectIds, |
48 | kLoadingUnits, |
49 | kNumWeakSelectors |
50 | }; |
51 | |
52 | enum GCType { |
53 | kScavenge, |
54 | kMarkSweep, |
55 | kMarkCompact, |
56 | }; |
57 | |
58 | enum GCReason { |
59 | kNewSpace, // New space is full. |
60 | kPromotion, // Old space limit crossed after a scavenge. |
61 | kOldSpace, // Old space limit crossed. |
62 | kFinalize, // Concurrent marking finished. |
63 | kFull, // Heap::CollectAllGarbage |
64 | kExternal, // Dart_NewFinalizableHandle Dart_NewWeakPersistentHandle |
65 | kIdle, // Dart_NotifyIdle |
66 | kLowMemory, // Dart_NotifyLowMemory |
67 | kDebugging, // service request, etc. |
68 | kSendAndExit, // SendPort.sendAndExit |
69 | }; |
70 | |
71 | // Pattern for unused new space and swept old space. |
72 | static const uint8_t kZapByte = 0xf3; |
73 | |
74 | ~Heap(); |
75 | |
76 | Scavenger* new_space() { return &new_space_; } |
77 | PageSpace* old_space() { return &old_space_; } |
78 | |
79 | uword Allocate(intptr_t size, Space space) { |
80 | ASSERT(!read_only_); |
81 | switch (space) { |
82 | case kNew: |
83 | // Do not attempt to allocate very large objects in new space. |
84 | if (!IsAllocatableInNewSpace(size)) { |
85 | return AllocateOld(size, OldPage::kData); |
86 | } |
87 | return AllocateNew(size); |
88 | case kOld: |
89 | return AllocateOld(size, OldPage::kData); |
90 | case kCode: |
91 | return AllocateOld(size, OldPage::kExecutable); |
92 | default: |
93 | UNREACHABLE(); |
94 | } |
95 | return 0; |
96 | } |
97 | |
98 | // Track external data. |
99 | void AllocatedExternal(intptr_t size, Space space); |
100 | void FreedExternal(intptr_t size, Space space); |
101 | // Move external size from new to old space. Does not by itself trigger GC. |
102 | void PromotedExternal(intptr_t size); |
103 | |
104 | // Heap contains the specified address. |
105 | bool Contains(uword addr) const; |
106 | bool NewContains(uword addr) const; |
107 | bool OldContains(uword addr) const; |
108 | bool CodeContains(uword addr) const; |
109 | bool DataContains(uword addr) const; |
110 | |
111 | // Find an object by visiting all pointers in the specified heap space, |
112 | // the 'visitor' is used to determine if an object is found or not. |
113 | // The 'visitor' function should be set up to return true if the |
114 | // object is found, traversal through the heap space stops at that |
115 | // point. |
116 | // The 'visitor' function should return false if the object is not found, |
117 | // traversal through the heap space continues. |
118 | // Returns null object if nothing is found. |
119 | InstructionsPtr FindObjectInCodeSpace(FindObjectVisitor* visitor) const; |
120 | ObjectPtr FindOldObject(FindObjectVisitor* visitor) const; |
121 | ObjectPtr FindNewObject(FindObjectVisitor* visitor); |
122 | ObjectPtr FindObject(FindObjectVisitor* visitor); |
123 | |
124 | void HintFreed(intptr_t size); |
125 | void NotifyIdle(int64_t deadline); |
126 | void NotifyLowMemory(); |
127 | |
128 | // Collect a single generation. |
129 | void CollectGarbage(Space space); |
130 | void CollectGarbage(GCType type, GCReason reason); |
131 | |
132 | // Collect both generations by performing a scavenge followed by a |
133 | // mark-sweep. This function may not collect all unreachable objects. Because |
134 | // mark-sweep treats new space as roots, a cycle between unreachable old and |
135 | // new objects will not be collected until the new objects are promoted. |
136 | // Verification based on heap iteration should instead use CollectAllGarbage. |
137 | void CollectMostGarbage(GCReason reason = kFull); |
138 | |
139 | // Collect both generations by performing an evacuation followed by a |
140 | // mark-sweep. If incremental marking was in progress, perform another |
141 | // mark-sweep. This function will collect all unreachable objects, including |
142 | // those in inter-generational cycles or stored during incremental marking. |
143 | void CollectAllGarbage(GCReason reason = kFull); |
144 | |
145 | void CheckStartConcurrentMarking(Thread* thread, GCReason reason); |
146 | void StartConcurrentMarking(Thread* thread); |
147 | void CheckFinishConcurrentMarking(Thread* thread); |
148 | void WaitForMarkerTasks(Thread* thread); |
149 | void WaitForSweeperTasks(Thread* thread); |
150 | void WaitForSweeperTasksAtSafepoint(Thread* thread); |
151 | |
152 | // Enables growth control on the page space heaps. This should be |
153 | // called before any user code is executed. |
154 | void InitGrowthControl(); |
155 | void DisableGrowthControl() { SetGrowthControlState(false); } |
156 | void SetGrowthControlState(bool state); |
157 | bool GrowthControlState(); |
158 | |
159 | // Protect access to the heap. Note: Code pages are made |
160 | // executable/non-executable when 'read_only' is true/false, respectively. |
161 | void WriteProtect(bool read_only); |
162 | void WriteProtectCode(bool read_only) { |
163 | old_space_.WriteProtectCode(read_only); |
164 | } |
165 | |
166 | // Initialize the heap and register it with the isolate. |
167 | static void Init(IsolateGroup* isolate_group, |
168 | intptr_t max_new_gen_words, |
169 | intptr_t max_old_gen_words); |
170 | |
171 | // Returns a suitable name for a VM region in the heap. |
172 | static const char* RegionName(Space space); |
173 | |
174 | // Verify that all pointers in the heap point to the heap. |
175 | bool Verify(MarkExpectation mark_expectation = kForbidMarked); |
176 | |
177 | // Print heap sizes. |
178 | void PrintSizes() const; |
179 | |
180 | // Return amount of memory used and capacity in a space, excluding external. |
181 | int64_t UsedInWords(Space space) const; |
182 | int64_t CapacityInWords(Space space) const; |
183 | int64_t ExternalInWords(Space space) const; |
184 | |
185 | int64_t TotalUsedInWords() const; |
186 | int64_t TotalCapacityInWords() const; |
187 | int64_t TotalExternalInWords() const; |
188 | // Return the amount of GCing in microseconds. |
189 | int64_t GCTimeInMicros(Space space) const; |
190 | |
191 | intptr_t Collections(Space space) const; |
192 | |
193 | ObjectSet* CreateAllocatedObjectSet(Zone* zone, |
194 | MarkExpectation mark_expectation); |
195 | |
196 | static const char* GCTypeToString(GCType type); |
197 | static const char* GCReasonToString(GCReason reason); |
198 | |
199 | // Associate a peer with an object. A non-existent peer is equal to NULL. |
200 | void SetPeer(ObjectPtr raw_obj, void* peer) { |
201 | SetWeakEntry(raw_obj, kPeers, reinterpret_cast<intptr_t>(peer)); |
202 | } |
203 | void* GetPeer(ObjectPtr raw_obj) const { |
204 | return reinterpret_cast<void*>(GetWeakEntry(raw_obj, kPeers)); |
205 | } |
206 | int64_t PeerCount() const; |
207 | |
208 | #if !defined(HASH_IN_OBJECT_HEADER) |
209 | // Associate an identity hashCode with an object. An non-existent hashCode |
210 | // is equal to 0. |
211 | void SetHash(ObjectPtr raw_obj, intptr_t hash) { |
212 | SetWeakEntry(raw_obj, kIdentityHashes, hash); |
213 | } |
214 | intptr_t GetHash(ObjectPtr raw_obj) const { |
215 | return GetWeakEntry(raw_obj, kIdentityHashes); |
216 | } |
217 | #endif |
218 | |
219 | void SetCanonicalHash(ObjectPtr raw_obj, intptr_t hash) { |
220 | SetWeakEntry(raw_obj, kCanonicalHashes, hash); |
221 | } |
222 | intptr_t GetCanonicalHash(ObjectPtr raw_obj) const { |
223 | return GetWeakEntry(raw_obj, kCanonicalHashes); |
224 | } |
225 | void ResetCanonicalHashTable(); |
226 | |
227 | // Associate an id with an object (used when serializing an object). |
228 | // A non-existant id is equal to 0. |
229 | void SetObjectId(ObjectPtr raw_obj, intptr_t object_id) { |
230 | ASSERT(Thread::Current()->IsMutatorThread()); |
231 | SetWeakEntry(raw_obj, kObjectIds, object_id); |
232 | } |
233 | intptr_t GetObjectId(ObjectPtr raw_obj) const { |
234 | ASSERT(Thread::Current()->IsMutatorThread()); |
235 | return GetWeakEntry(raw_obj, kObjectIds); |
236 | } |
237 | void ResetObjectIdTable(); |
238 | |
239 | void SetLoadingUnit(ObjectPtr raw_obj, intptr_t object_id) { |
240 | ASSERT(Thread::Current()->IsMutatorThread()); |
241 | SetWeakEntry(raw_obj, kLoadingUnits, object_id); |
242 | } |
243 | intptr_t GetLoadingUnit(ObjectPtr raw_obj) const { |
244 | ASSERT(Thread::Current()->IsMutatorThread()); |
245 | return GetWeakEntry(raw_obj, kLoadingUnits); |
246 | } |
247 | |
248 | // Used by the GC algorithms to propagate weak entries. |
249 | intptr_t GetWeakEntry(ObjectPtr raw_obj, WeakSelector sel) const; |
250 | void SetWeakEntry(ObjectPtr raw_obj, WeakSelector sel, intptr_t val); |
251 | |
252 | WeakTable* GetWeakTable(Space space, WeakSelector selector) const { |
253 | if (space == kNew) { |
254 | return new_weak_tables_[selector]; |
255 | } |
256 | ASSERT(space == kOld); |
257 | return old_weak_tables_[selector]; |
258 | } |
259 | void SetWeakTable(Space space, WeakSelector selector, WeakTable* value) { |
260 | if (space == kNew) { |
261 | new_weak_tables_[selector] = value; |
262 | } else { |
263 | ASSERT(space == kOld); |
264 | old_weak_tables_[selector] = value; |
265 | } |
266 | } |
267 | |
268 | void ForwardWeakEntries(ObjectPtr before_object, ObjectPtr after_object); |
269 | void ForwardWeakTables(ObjectPointerVisitor* visitor); |
270 | |
271 | // Stats collection. |
272 | void RecordTime(int id, int64_t micros) { |
273 | ASSERT((id >= 0) && (id < GCStats::kTimeEntries)); |
274 | stats_.times_[id] = micros; |
275 | } |
276 | |
277 | void RecordData(int id, intptr_t value) { |
278 | ASSERT((id >= 0) && (id < GCStats::kDataEntries)); |
279 | stats_.data_[id] = value; |
280 | } |
281 | |
282 | void UpdateGlobalMaxUsed(); |
283 | |
284 | static bool IsAllocatableInNewSpace(intptr_t size) { |
285 | return size <= kNewAllocatableSize; |
286 | } |
287 | static bool IsAllocatableViaFreeLists(intptr_t size) { |
288 | return size < kAllocatablePageSize; |
289 | } |
290 | |
291 | #ifndef PRODUCT |
292 | void PrintToJSONObject(Space space, JSONObject* object) const; |
293 | |
294 | // Returns a JSON object with total memory usage statistics for both new and |
295 | // old space combined. |
296 | void PrintMemoryUsageJSON(JSONStream* stream) const; |
297 | void PrintMemoryUsageJSON(JSONObject* jsobj) const; |
298 | |
299 | // The heap map contains the sizes and class ids for the objects in each page. |
300 | void PrintHeapMapToJSONStream(Isolate* isolate, JSONStream* stream) { |
301 | old_space_.PrintHeapMapToJSONStream(isolate, stream); |
302 | } |
303 | #endif // PRODUCT |
304 | |
305 | IsolateGroup* isolate_group() const { return isolate_group_; } |
306 | |
307 | Monitor* barrier() const { return &barrier_; } |
308 | Monitor* barrier_done() const { return &barrier_done_; } |
309 | |
310 | void SetupImagePage(void* pointer, uword size, bool is_executable) { |
311 | old_space_.SetupImagePage(pointer, size, is_executable); |
312 | } |
313 | |
314 | static const intptr_t kNewAllocatableSize = 256 * KB; |
315 | static const intptr_t kAllocatablePageSize = 64 * KB; |
316 | |
317 | Space SpaceForExternal(intptr_t size) const; |
318 | |
319 | void CollectOnNthAllocation(intptr_t num_allocations); |
320 | |
321 | void MergeFrom(Heap* donor); |
322 | |
323 | private: |
324 | class GCStats : public ValueObject { |
325 | public: |
326 | GCStats() {} |
327 | intptr_t num_; |
328 | Heap::GCType type_; |
329 | Heap::GCReason reason_; |
330 | |
331 | class Data : public ValueObject { |
332 | public: |
333 | Data() {} |
334 | int64_t micros_; |
335 | SpaceUsage new_; |
336 | SpaceUsage old_; |
337 | |
338 | private: |
339 | DISALLOW_COPY_AND_ASSIGN(Data); |
340 | }; |
341 | |
342 | enum { kTimeEntries = 6 }; |
343 | enum { kDataEntries = 4 }; |
344 | |
345 | Data before_; |
346 | Data after_; |
347 | int64_t times_[kTimeEntries]; |
348 | intptr_t data_[kDataEntries]; |
349 | |
350 | private: |
351 | DISALLOW_COPY_AND_ASSIGN(GCStats); |
352 | }; |
353 | |
354 | Heap(IsolateGroup* isolate_group, |
355 | intptr_t max_new_gen_semi_words, // Max capacity of new semi-space. |
356 | intptr_t max_old_gen_words); |
357 | |
358 | uword AllocateNew(intptr_t size); |
359 | uword AllocateOld(intptr_t size, OldPage::PageType type); |
360 | |
361 | // Visit all pointers. Caller must ensure concurrent sweeper is not running, |
362 | // and the visitor must not allocate. |
363 | void VisitObjectPointers(ObjectPointerVisitor* visitor); |
364 | |
365 | // Visit all objects, including FreeListElement "objects". Caller must ensure |
366 | // concurrent sweeper is not running, and the visitor must not allocate. |
367 | void VisitObjects(ObjectVisitor* visitor); |
368 | void VisitObjectsNoImagePages(ObjectVisitor* visitor); |
369 | void VisitObjectsImagePages(ObjectVisitor* visitor) const; |
370 | |
371 | // Like Verify, but does not wait for concurrent sweeper, so caller must |
372 | // ensure thread-safety. |
373 | bool VerifyGC(MarkExpectation mark_expectation = kForbidMarked); |
374 | |
375 | // Helper functions for garbage collection. |
376 | void CollectNewSpaceGarbage(Thread* thread, GCReason reason); |
377 | void CollectOldSpaceGarbage(Thread* thread, GCType type, GCReason reason); |
378 | void EvacuateNewSpace(Thread* thread, GCReason reason); |
379 | |
380 | // GC stats collection. |
381 | void RecordBeforeGC(GCType type, GCReason reason); |
382 | void RecordAfterGC(GCType type); |
383 | void PrintStats(); |
384 | void PrintStatsToTimeline(TimelineEventScope* event, GCReason reason); |
385 | |
386 | // Updates gc in progress flags. |
387 | bool BeginNewSpaceGC(Thread* thread); |
388 | void EndNewSpaceGC(); |
389 | bool BeginOldSpaceGC(Thread* thread); |
390 | void EndOldSpaceGC(); |
391 | |
392 | void AddRegionsToObjectSet(ObjectSet* set) const; |
393 | |
394 | // Trigger major GC if 'gc_on_nth_allocation_' is set. |
395 | void CollectForDebugging(); |
396 | |
397 | IsolateGroup* isolate_group_; |
398 | |
399 | // The different spaces used for allocation. |
400 | Scavenger new_space_; |
401 | PageSpace old_space_; |
402 | |
403 | WeakTable* new_weak_tables_[kNumWeakSelectors]; |
404 | WeakTable* old_weak_tables_[kNumWeakSelectors]; |
405 | |
406 | mutable Monitor barrier_; |
407 | mutable Monitor barrier_done_; |
408 | |
409 | // GC stats collection. |
410 | GCStats stats_; |
411 | |
412 | // This heap is in read-only mode: No allocation is allowed. |
413 | bool read_only_; |
414 | |
415 | // GC on the heap is in progress. |
416 | Monitor gc_in_progress_monitor_; |
417 | bool gc_new_space_in_progress_; |
418 | bool gc_old_space_in_progress_; |
419 | bool last_gc_was_old_space_; |
420 | bool assume_scavenge_will_fail_; |
421 | |
422 | static const intptr_t kNoForcedGarbageCollection = -1; |
423 | |
424 | // Whether the next heap allocation (new or old) should trigger |
425 | // CollectAllGarbage. Used within unit tests for testing GC on certain |
426 | // sensitive codepaths. |
427 | intptr_t gc_on_nth_allocation_; |
428 | |
429 | friend class Become; // VisitObjectPointers |
430 | friend class GCCompactor; // VisitObjectPointers |
431 | friend class Precompiler; // VisitObjects |
432 | friend class Unmarker; // VisitObjects |
433 | friend class ServiceEvent; |
434 | friend class Scavenger; // VerifyGC |
435 | friend class PageSpace; // VerifyGC |
436 | friend class IsolateReloadContext; // VisitObjects |
437 | friend class ClassFinalizer; // VisitObjects |
438 | friend class HeapIterationScope; // VisitObjects |
439 | friend class ProgramVisitor; // VisitObjectsImagePages |
440 | friend class Serializer; // VisitObjectsImagePages |
441 | friend class HeapTestHelper; |
442 | |
443 | DISALLOW_COPY_AND_ASSIGN(Heap); |
444 | }; |
445 | |
446 | class HeapIterationScope : public ThreadStackResource { |
447 | public: |
448 | explicit HeapIterationScope(Thread* thread, bool writable = false); |
449 | ~HeapIterationScope(); |
450 | |
451 | void IterateObjects(ObjectVisitor* visitor) const; |
452 | void IterateObjectsNoImagePages(ObjectVisitor* visitor) const; |
453 | void IterateOldObjects(ObjectVisitor* visitor) const; |
454 | void IterateOldObjectsNoImagePages(ObjectVisitor* visitor) const; |
455 | |
456 | void IterateVMIsolateObjects(ObjectVisitor* visitor) const; |
457 | |
458 | void IterateObjectPointers(ObjectPointerVisitor* visitor, |
459 | ValidationPolicy validate_frames); |
460 | void IterateStackPointers(ObjectPointerVisitor* visitor, |
461 | ValidationPolicy validate_frames); |
462 | |
463 | private: |
464 | Heap* heap_; |
465 | PageSpace* old_space_; |
466 | bool writable_; |
467 | |
468 | DISALLOW_COPY_AND_ASSIGN(HeapIterationScope); |
469 | }; |
470 | |
471 | class NoHeapGrowthControlScope : public ThreadStackResource { |
472 | public: |
473 | NoHeapGrowthControlScope(); |
474 | ~NoHeapGrowthControlScope(); |
475 | |
476 | private: |
477 | bool current_growth_controller_state_; |
478 | DISALLOW_COPY_AND_ASSIGN(NoHeapGrowthControlScope); |
479 | }; |
480 | |
481 | // Note: During this scope all pages are writable and the code pages are |
482 | // non-executable. |
483 | class WritableVMIsolateScope : ThreadStackResource { |
484 | public: |
485 | explicit WritableVMIsolateScope(Thread* thread); |
486 | ~WritableVMIsolateScope(); |
487 | }; |
488 | |
489 | class WritableCodePages : StackResource { |
490 | public: |
491 | explicit WritableCodePages(Thread* thread, Isolate* isolate); |
492 | ~WritableCodePages(); |
493 | |
494 | private: |
495 | Isolate* isolate_; |
496 | }; |
497 | |
498 | #if defined(TESTING) |
499 | class GCTestHelper : public AllStatic { |
500 | public: |
501 | // Collect new gen without triggering any side effects. The normal call to |
502 | // CollectGarbage(Heap::kNew) could potentially trigger an old gen collection |
503 | // if there is enough promotion, and this can perturb some tests. |
504 | static void CollectNewSpace() { |
505 | Thread* thread = Thread::Current(); |
506 | ASSERT(thread->execution_state() == Thread::kThreadInVM); |
507 | thread->heap()->new_space()->Scavenge(); |
508 | } |
509 | |
510 | // Fully collect old gen and wait for the sweeper to finish. The normal call |
511 | // to CollectGarbage(Heap::kOld) may leave so-called "floating garbage", |
512 | // objects that were seen by the incremental barrier but later made |
513 | // unreachable, and this can perturb some tests. |
514 | static void CollectOldSpace() { |
515 | Thread* thread = Thread::Current(); |
516 | ASSERT(thread->execution_state() == Thread::kThreadInVM); |
517 | if (thread->is_marking()) { |
518 | thread->heap()->CollectGarbage(Heap::kMarkSweep, Heap::kDebugging); |
519 | } |
520 | thread->heap()->CollectGarbage(Heap::kMarkSweep, Heap::kDebugging); |
521 | WaitForGCTasks(); |
522 | } |
523 | |
524 | static void CollectAllGarbage() { |
525 | Thread* thread = Thread::Current(); |
526 | ASSERT(thread->execution_state() == Thread::kThreadInVM); |
527 | thread->heap()->CollectAllGarbage(Heap::kDebugging); |
528 | } |
529 | |
530 | static void WaitForGCTasks() { |
531 | Thread* thread = Thread::Current(); |
532 | ASSERT(thread->execution_state() == Thread::kThreadInVM); |
533 | thread->heap()->WaitForMarkerTasks(thread); |
534 | thread->heap()->WaitForSweeperTasks(thread); |
535 | } |
536 | }; |
537 | #endif // TESTING |
538 | |
539 | } // namespace dart |
540 | |
541 | #endif // RUNTIME_VM_HEAP_HEAP_H_ |
542 | |