1 | // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #ifndef RUNTIME_VM_HEAP_FREELIST_H_ |
6 | #define RUNTIME_VM_HEAP_FREELIST_H_ |
7 | |
8 | #include "platform/assert.h" |
9 | #include "platform/atomic.h" |
10 | #include "vm/allocation.h" |
11 | #include "vm/bit_set.h" |
12 | #include "vm/os_thread.h" |
13 | #include "vm/raw_object.h" |
14 | |
15 | namespace dart { |
16 | |
17 | // FreeListElement describes a freelist element. Smallest FreeListElement is |
18 | // two words in size. Second word of the raw object is used to keep a next_ |
19 | // pointer to chain elements of the list together. For objects larger than the |
20 | // object size encodable in tags field, the size of the element is embedded in |
21 | // the element at the address following the next_ field. All words written by |
22 | // the freelist are guaranteed to look like Smis. |
23 | // A FreeListElement never has its header mark bit set. |
24 | class FreeListElement { |
25 | public: |
26 | FreeListElement* next() const { return next_; } |
27 | uword next_address() const { return reinterpret_cast<uword>(&next_); } |
28 | |
29 | void set_next(FreeListElement* next) { next_ = next; } |
30 | |
31 | intptr_t HeapSize() { |
32 | intptr_t size = ObjectLayout::SizeTag::decode(tags_); |
33 | if (size != 0) return size; |
34 | return *SizeAddress(); |
35 | } |
36 | |
37 | static FreeListElement* AsElement(uword addr, intptr_t size); |
38 | |
39 | static void Init(); |
40 | |
41 | static intptr_t (intptr_t size); |
42 | |
43 | // Used to allocate class for free list elements in Object::InitOnce. |
44 | class FakeInstance { |
45 | public: |
46 | FakeInstance() {} |
47 | static cpp_vtable vtable() { return 0; } |
48 | static intptr_t InstanceSize() { return 0; } |
49 | static intptr_t NextFieldOffset() { return -kWordSize; } |
50 | static const ClassId kClassId = kFreeListElement; |
51 | static bool IsInstance() { return true; } |
52 | |
53 | private: |
54 | DISALLOW_ALLOCATION(); |
55 | DISALLOW_COPY_AND_ASSIGN(FakeInstance); |
56 | }; |
57 | |
58 | private: |
59 | // This layout mirrors the layout of RawObject. |
60 | RelaxedAtomic<uint32_t> tags_; |
61 | #if defined(HASH_IN_OBJECT_HEADER) |
62 | uint32_t hash_; |
63 | #endif |
64 | FreeListElement* next_; |
65 | |
66 | // Returns the address of the embedded size. |
67 | intptr_t* SizeAddress() const { |
68 | uword addr = reinterpret_cast<uword>(&next_) + kWordSize; |
69 | return reinterpret_cast<intptr_t*>(addr); |
70 | } |
71 | |
72 | // FreeListElements cannot be allocated. Instead references to them are |
73 | // created using the AsElement factory method. |
74 | DISALLOW_ALLOCATION(); |
75 | DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListElement); |
76 | }; |
77 | |
78 | class FreeList { |
79 | public: |
80 | FreeList(); |
81 | ~FreeList(); |
82 | |
83 | uword TryAllocate(intptr_t size, bool is_protected); |
84 | void Free(uword addr, intptr_t size); |
85 | |
86 | void Reset(); |
87 | |
88 | void Print() const; |
89 | |
90 | Mutex* mutex() { return &mutex_; } |
91 | uword TryAllocateLocked(intptr_t size, bool is_protected); |
92 | void FreeLocked(uword addr, intptr_t size); |
93 | |
94 | // Returns a large element, at least 'minimum_size', or NULL if none exists. |
95 | FreeListElement* TryAllocateLarge(intptr_t minimum_size); |
96 | FreeListElement* TryAllocateLargeLocked(intptr_t minimum_size); |
97 | |
98 | // Allocates locked and unprotected memory, but only from small elements |
99 | // (i.e., fixed size lists). |
100 | uword TryAllocateSmallLocked(intptr_t size) { |
101 | DEBUG_ASSERT(mutex_.IsOwnedByCurrentThread()); |
102 | if (size > last_free_small_size_) { |
103 | return 0; |
104 | } |
105 | int index = IndexForSize(size); |
106 | if (index != kNumLists && free_map_.Test(index)) { |
107 | return reinterpret_cast<uword>(DequeueElement(index)); |
108 | } |
109 | if ((index + 1) < kNumLists) { |
110 | intptr_t next_index = free_map_.Next(index + 1); |
111 | if (next_index != -1) { |
112 | FreeListElement* element = DequeueElement(next_index); |
113 | SplitElementAfterAndEnqueue(element, size, false); |
114 | return reinterpret_cast<uword>(element); |
115 | } |
116 | } |
117 | return 0; |
118 | } |
119 | |
120 | uword TryAllocateBumpLocked(intptr_t size) { |
121 | ASSERT(mutex_.IsOwnedByCurrentThread()); |
122 | uword result = top_; |
123 | uword new_top = result + size; |
124 | if (new_top <= end_) { |
125 | top_ = new_top; |
126 | unaccounted_size_ += size; |
127 | return result; |
128 | } |
129 | return 0; |
130 | } |
131 | intptr_t TakeUnaccountedSizeLocked() { |
132 | ASSERT(mutex_.IsOwnedByCurrentThread()); |
133 | intptr_t result = unaccounted_size_; |
134 | unaccounted_size_ = 0; |
135 | return result; |
136 | } |
137 | |
138 | // Ensures OldPage::VisitObjects can successful walk over a partially |
139 | // allocated bump region. |
140 | void MakeIterable() { |
141 | if (top_ < end_) { |
142 | FreeListElement::AsElement(top_, end_ - top_); |
143 | } |
144 | } |
145 | // Returns the bump region to the free list. |
146 | void AbandonBumpAllocation() { |
147 | if (top_ < end_) { |
148 | Free(top_, end_ - top_); |
149 | top_ = 0; |
150 | end_ = 0; |
151 | } |
152 | } |
153 | |
154 | uword top() const { return top_; } |
155 | uword end() const { return end_; } |
156 | void set_top(uword value) { top_ = value; } |
157 | void set_end(uword value) { end_ = value; } |
158 | void AddUnaccountedSize(intptr_t size) { unaccounted_size_ += size; } |
159 | |
160 | void MergeFrom(FreeList* donor, bool is_protected); |
161 | |
162 | private: |
163 | static const int kNumLists = 128; |
164 | static const intptr_t kInitialFreeListSearchBudget = 1000; |
165 | |
166 | static intptr_t IndexForSize(intptr_t size) { |
167 | ASSERT(size >= kObjectAlignment); |
168 | ASSERT(Utils::IsAligned(size, kObjectAlignment)); |
169 | |
170 | intptr_t index = size >> kObjectAlignmentLog2; |
171 | if (index >= kNumLists) { |
172 | index = kNumLists; |
173 | } |
174 | return index; |
175 | } |
176 | |
177 | intptr_t LengthLocked(int index) const; |
178 | |
179 | void EnqueueElement(FreeListElement* element, intptr_t index); |
180 | FreeListElement* DequeueElement(intptr_t index) { |
181 | FreeListElement* result = free_lists_[index]; |
182 | FreeListElement* next = result->next(); |
183 | if (next == NULL && index != kNumLists) { |
184 | intptr_t size = index << kObjectAlignmentLog2; |
185 | if (size == last_free_small_size_) { |
186 | // Note: This is -1 * kObjectAlignment if no other small sizes remain. |
187 | last_free_small_size_ = |
188 | free_map_.ClearLastAndFindPrevious(index) * kObjectAlignment; |
189 | } else { |
190 | free_map_.Set(index, false); |
191 | } |
192 | } |
193 | free_lists_[index] = next; |
194 | return result; |
195 | } |
196 | |
197 | void SplitElementAfterAndEnqueue(FreeListElement* element, |
198 | intptr_t size, |
199 | bool is_protected); |
200 | |
201 | void PrintSmall() const; |
202 | void PrintLarge() const; |
203 | |
204 | // Bump pointer region. |
205 | uword top_ = 0; |
206 | uword end_ = 0; |
207 | |
208 | // Allocated from the bump pointer region, but not yet added to |
209 | // PageSpace::usage_. Used to avoid expensive atomic adds during parallel |
210 | // scavenge. |
211 | intptr_t unaccounted_size_ = 0; |
212 | |
213 | // Lock protecting the free list data structures. |
214 | mutable Mutex mutex_; |
215 | |
216 | BitSet<kNumLists> free_map_; |
217 | |
218 | FreeListElement* free_lists_[kNumLists + 1]; |
219 | |
220 | intptr_t freelist_search_budget_ = kInitialFreeListSearchBudget; |
221 | |
222 | // The largest available small size in bytes, or negative if there is none. |
223 | intptr_t last_free_small_size_; |
224 | |
225 | DISALLOW_COPY_AND_ASSIGN(FreeList); |
226 | }; |
227 | |
228 | } // namespace dart |
229 | |
230 | #endif // RUNTIME_VM_HEAP_FREELIST_H_ |
231 | |