1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/zone.h"
6
7#include "platform/assert.h"
8#include "platform/leak_sanitizer.h"
9#include "platform/utils.h"
10#include "vm/dart_api_state.h"
11#include "vm/flags.h"
12#include "vm/handles_impl.h"
13#include "vm/heap/heap.h"
14#include "vm/os.h"
15#include "vm/virtual_memory.h"
16
17namespace dart {
18
19RelaxedAtomic<intptr_t> Zone::total_size_ = {0};
20
21// Zone segments represent chunks of memory: They have starting
22// address encoded in the this pointer and a size in bytes. They are
23// chained together to form the backing storage for an expanding zone.
24class Zone::Segment {
25 public:
26 Segment* next() const { return next_; }
27 intptr_t size() const { return size_; }
28 VirtualMemory* memory() const { return memory_; }
29
30 uword start() { return address(sizeof(Segment)); }
31 uword end() { return address(size_); }
32
33 // Allocate or delete individual segments.
34 static Segment* New(intptr_t size, Segment* next);
35 static void DeleteSegmentList(Segment* segment);
36 static void IncrementMemoryCapacity(uintptr_t size);
37 static void DecrementMemoryCapacity(uintptr_t size);
38
39 private:
40 Segment* next_;
41 intptr_t size_;
42 VirtualMemory* memory_;
43 void* alignment_;
44
45 // Computes the address of the nth byte in this segment.
46 uword address(intptr_t n) { return reinterpret_cast<uword>(this) + n; }
47
48 DISALLOW_IMPLICIT_CONSTRUCTORS(Segment);
49};
50
51// tcmalloc and jemalloc have both been observed to hold onto lots of free'd
52// zone segments (jemalloc to the point of causing OOM), so instead of using
53// malloc to allocate segments, we allocate directly from mmap/zx_vmo_create/
54// VirtualAlloc, and cache a small number of the normal sized segments.
55static constexpr intptr_t kSegmentCacheCapacity = 16; // 1 MB of Segments
56static Mutex* segment_cache_mutex = nullptr;
57static VirtualMemory* segment_cache[kSegmentCacheCapacity] = {nullptr};
58static intptr_t segment_cache_size = 0;
59
60void Zone::Init() {
61 ASSERT(segment_cache_mutex == nullptr);
62 segment_cache_mutex = new Mutex(NOT_IN_PRODUCT("segment_cache_mutex"));
63}
64
65void Zone::Cleanup() {
66 {
67 MutexLocker ml(segment_cache_mutex);
68 ASSERT(segment_cache_size >= 0);
69 ASSERT(segment_cache_size <= kSegmentCacheCapacity);
70 while (segment_cache_size > 0) {
71 delete segment_cache[--segment_cache_size];
72 }
73 }
74 delete segment_cache_mutex;
75 segment_cache_mutex = nullptr;
76}
77
78Zone::Segment* Zone::Segment::New(intptr_t size, Zone::Segment* next) {
79 size = Utils::RoundUp(size, VirtualMemory::PageSize());
80 VirtualMemory* memory = nullptr;
81 if (size == kSegmentSize) {
82 MutexLocker ml(segment_cache_mutex);
83 ASSERT(segment_cache_size >= 0);
84 ASSERT(segment_cache_size <= kSegmentCacheCapacity);
85 if (segment_cache_size > 0) {
86 memory = segment_cache[--segment_cache_size];
87 }
88 }
89 if (memory == nullptr) {
90 memory = VirtualMemory::Allocate(size, false, "dart-zone");
91 total_size_.fetch_add(size);
92 }
93 if (memory == nullptr) {
94 OUT_OF_MEMORY();
95 }
96 Segment* result = reinterpret_cast<Segment*>(memory->start());
97#ifdef DEBUG
98 // Zap the entire allocated segment (including the header).
99 memset(reinterpret_cast<void*>(result), kZapUninitializedByte, size);
100#endif
101 result->next_ = next;
102 result->size_ = size;
103 result->memory_ = memory;
104 result->alignment_ = nullptr; // Avoid unused variable warnings.
105
106 LSAN_REGISTER_ROOT_REGION(result, sizeof(*result));
107
108 IncrementMemoryCapacity(size);
109 return result;
110}
111
112void Zone::Segment::DeleteSegmentList(Segment* head) {
113 Segment* current = head;
114 while (current != NULL) {
115 intptr_t size = current->size();
116 DecrementMemoryCapacity(size);
117 Segment* next = current->next();
118 VirtualMemory* memory = current->memory();
119#ifdef DEBUG
120 // Zap the entire current segment (including the header).
121 memset(reinterpret_cast<void*>(current), kZapDeletedByte, current->size());
122#endif
123 LSAN_UNREGISTER_ROOT_REGION(current, sizeof(*current));
124
125 if (size == kSegmentSize) {
126 MutexLocker ml(segment_cache_mutex);
127 ASSERT(segment_cache_size >= 0);
128 ASSERT(segment_cache_size <= kSegmentCacheCapacity);
129 if (segment_cache_size < kSegmentCacheCapacity) {
130 segment_cache[segment_cache_size++] = memory;
131 memory = nullptr;
132 }
133 }
134 if (memory != nullptr) {
135 total_size_.fetch_sub(size);
136 delete memory;
137 }
138 current = next;
139 }
140}
141
142void Zone::Segment::IncrementMemoryCapacity(uintptr_t size) {
143 ThreadState* current_thread = ThreadState::Current();
144 if (current_thread != NULL) {
145 current_thread->IncrementMemoryCapacity(size);
146 } else if (ApiNativeScope::Current() != NULL) {
147 // If there is no current thread, we might be inside of a native scope.
148 ApiNativeScope::IncrementNativeScopeMemoryCapacity(size);
149 }
150}
151
152void Zone::Segment::DecrementMemoryCapacity(uintptr_t size) {
153 ThreadState* current_thread = ThreadState::Current();
154 if (current_thread != NULL) {
155 current_thread->DecrementMemoryCapacity(size);
156 } else if (ApiNativeScope::Current() != NULL) {
157 // If there is no current thread, we might be inside of a native scope.
158 ApiNativeScope::DecrementNativeScopeMemoryCapacity(size);
159 }
160}
161
162// TODO(bkonyi): We need to account for the initial chunk size when a new zone
163// is created within a new thread or ApiNativeScope when calculating high
164// watermarks or memory consumption.
165Zone::Zone()
166 : initial_buffer_(buffer_, kInitialChunkSize),
167 position_(initial_buffer_.start()),
168 limit_(initial_buffer_.end()),
169 head_(NULL),
170 large_segments_(NULL),
171 handles_(),
172 previous_(NULL) {
173 ASSERT(Utils::IsAligned(position_, kAlignment));
174 Segment::IncrementMemoryCapacity(kInitialChunkSize);
175#ifdef DEBUG
176 // Zap the entire initial buffer.
177 memset(initial_buffer_.pointer(), kZapUninitializedByte,
178 initial_buffer_.size());
179#endif
180}
181
182Zone::~Zone() {
183 if (FLAG_trace_zones) {
184 DumpZoneSizes();
185 }
186 DeleteAll();
187 Segment::DecrementMemoryCapacity(kInitialChunkSize);
188}
189
190void Zone::DeleteAll() {
191 // Traverse the chained list of segments, zapping (in debug mode)
192 // and freeing every zone segment.
193 if (head_ != NULL) {
194 Segment::DeleteSegmentList(head_);
195 }
196 if (large_segments_ != NULL) {
197 Segment::DeleteSegmentList(large_segments_);
198 }
199// Reset zone state.
200#ifdef DEBUG
201 memset(initial_buffer_.pointer(), kZapDeletedByte, initial_buffer_.size());
202#endif
203 position_ = initial_buffer_.start();
204 limit_ = initial_buffer_.end();
205 small_segment_capacity_ = 0;
206 head_ = NULL;
207 large_segments_ = NULL;
208 previous_ = NULL;
209 handles_.Reset();
210}
211
212uintptr_t Zone::SizeInBytes() const {
213 uintptr_t size = 0;
214 for (Segment* s = large_segments_; s != NULL; s = s->next()) {
215 size += s->size();
216 }
217 if (head_ == NULL) {
218 return size + (position_ - initial_buffer_.start());
219 }
220 size += initial_buffer_.size();
221 for (Segment* s = head_->next(); s != NULL; s = s->next()) {
222 size += s->size();
223 }
224 return size + (position_ - head_->start());
225}
226
227uintptr_t Zone::CapacityInBytes() const {
228 uintptr_t size = 0;
229 for (Segment* s = large_segments_; s != NULL; s = s->next()) {
230 size += s->size();
231 }
232 if (head_ == NULL) {
233 return size + initial_buffer_.size();
234 }
235 size += initial_buffer_.size();
236 for (Segment* s = head_; s != NULL; s = s->next()) {
237 size += s->size();
238 }
239 return size;
240}
241
242uword Zone::AllocateExpand(intptr_t size) {
243 ASSERT(size >= 0);
244 if (FLAG_trace_zones) {
245 OS::PrintErr("*** Expanding zone 0x%" Px "\n",
246 reinterpret_cast<intptr_t>(this));
247 DumpZoneSizes();
248 }
249 // Make sure the requested size is already properly aligned and that
250 // there isn't enough room in the Zone to satisfy the request.
251 ASSERT(Utils::IsAligned(size, kAlignment));
252 intptr_t free_size = (limit_ - position_);
253 ASSERT(free_size < size);
254
255 // First check to see if we should just chain it as a large segment.
256 intptr_t max_size =
257 Utils::RoundDown(kSegmentSize - sizeof(Segment), kAlignment);
258 ASSERT(max_size > 0);
259 if (size > max_size) {
260 return AllocateLargeSegment(size);
261 }
262
263 const intptr_t kSuperPageSize = 2 * MB;
264 intptr_t next_size;
265 if (small_segment_capacity_ < kSuperPageSize) {
266 // When the Zone is small, grow linearly to reduce size and use the segment
267 // cache to avoid expensive mmap calls.
268 next_size = kSegmentSize;
269 } else {
270 // When the Zone is large, grow geometrically to avoid Page Table Entry
271 // exhaustion. Using 1.125 ratio.
272 next_size = Utils::RoundUp(small_segment_capacity_ >> 3, kSuperPageSize);
273 }
274 ASSERT(next_size >= kSegmentSize);
275
276 // Allocate another segment and chain it up.
277 head_ = Segment::New(next_size, head_);
278 small_segment_capacity_ += next_size;
279
280 // Recompute 'position' and 'limit' based on the new head segment.
281 uword result = Utils::RoundUp(head_->start(), kAlignment);
282 position_ = result + size;
283 limit_ = head_->end();
284 ASSERT(position_ <= limit_);
285 return result;
286}
287
288uword Zone::AllocateLargeSegment(intptr_t size) {
289 ASSERT(size >= 0);
290 // Make sure the requested size is already properly aligned and that
291 // there isn't enough room in the Zone to satisfy the request.
292 ASSERT(Utils::IsAligned(size, kAlignment));
293 intptr_t free_size = (limit_ - position_);
294 ASSERT(free_size < size);
295
296 // Create a new large segment and chain it up.
297 // Account for book keeping fields in size.
298 size += Utils::RoundUp(sizeof(Segment), kAlignment);
299 large_segments_ = Segment::New(size, large_segments_);
300
301 uword result = Utils::RoundUp(large_segments_->start(), kAlignment);
302 return result;
303}
304
305char* Zone::MakeCopyOfString(const char* str) {
306 intptr_t len = strlen(str) + 1; // '\0'-terminated.
307 char* copy = Alloc<char>(len);
308 strncpy(copy, str, len);
309 return copy;
310}
311
312char* Zone::MakeCopyOfStringN(const char* str, intptr_t len) {
313 ASSERT(len >= 0);
314 for (intptr_t i = 0; i < len; i++) {
315 if (str[i] == '\0') {
316 len = i;
317 break;
318 }
319 }
320 char* copy = Alloc<char>(len + 1); // +1 for '\0'
321 strncpy(copy, str, len);
322 copy[len] = '\0';
323 return copy;
324}
325
326char* Zone::ConcatStrings(const char* a, const char* b, char join) {
327 intptr_t a_len = (a == NULL) ? 0 : strlen(a);
328 const intptr_t b_len = strlen(b) + 1; // '\0'-terminated.
329 const intptr_t len = a_len + b_len;
330 char* copy = Alloc<char>(len);
331 if (a_len > 0) {
332 strncpy(copy, a, a_len);
333 // Insert join character.
334 copy[a_len++] = join;
335 }
336 strncpy(&copy[a_len], b, b_len);
337 return copy;
338}
339
340void Zone::DumpZoneSizes() {
341 intptr_t size = 0;
342 for (Segment* s = large_segments_; s != NULL; s = s->next()) {
343 size += s->size();
344 }
345 OS::PrintErr("*** Zone(0x%" Px
346 ") size in bytes,"
347 " Total = %" Pd " Large Segments = %" Pd "\n",
348 reinterpret_cast<intptr_t>(this), SizeInBytes(), size);
349}
350
351void Zone::VisitObjectPointers(ObjectPointerVisitor* visitor) {
352 Zone* zone = this;
353 while (zone != NULL) {
354 zone->handles()->VisitObjectPointers(visitor);
355 zone = zone->previous_;
356 }
357}
358
359char* Zone::PrintToString(const char* format, ...) {
360 va_list args;
361 va_start(args, format);
362 char* buffer = OS::VSCreate(this, format, args);
363 va_end(args);
364 return buffer;
365}
366
367char* Zone::VPrint(const char* format, va_list args) {
368 return OS::VSCreate(this, format, args);
369}
370
371StackZone::StackZone(ThreadState* thread)
372 : StackResource(thread), zone_(new Zone()) {
373 if (FLAG_trace_zones) {
374 OS::PrintErr("*** Starting a new Stack zone 0x%" Px "(0x%" Px ")\n",
375 reinterpret_cast<intptr_t>(this),
376 reinterpret_cast<intptr_t>(zone_));
377 }
378
379 // This thread must be preventing safepoints or the GC could be visiting the
380 // chain of handle blocks we're about the mutate.
381 ASSERT(Thread::Current()->MayAllocateHandles());
382
383 zone_->Link(thread->zone());
384 thread->set_zone(zone_);
385}
386
387StackZone::~StackZone() {
388 // This thread must be preventing safepoints or the GC could be visiting the
389 // chain of handle blocks we're about the mutate.
390 ASSERT(Thread::Current()->MayAllocateHandles());
391
392 ASSERT(thread()->zone() == zone_);
393 thread()->set_zone(zone_->previous_);
394 if (FLAG_trace_zones) {
395 OS::PrintErr("*** Deleting Stack zone 0x%" Px "(0x%" Px ")\n",
396 reinterpret_cast<intptr_t>(this),
397 reinterpret_cast<intptr_t>(zone_));
398 }
399
400 delete zone_;
401}
402
403} // namespace dart
404