1// Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors
2// Licensed under the MIT License:
3//
4// Permission is hereby granted, free of charge, to any person obtaining a copy
5// of this software and associated documentation files (the "Software"), to deal
6// in the Software without restriction, including without limitation the rights
7// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8// copies of the Software, and to permit persons to whom the Software is
9// furnished to do so, subject to the following conditions:
10//
11// The above copyright notice and this permission notice shall be included in
12// all copies or substantial portions of the Software.
13//
14// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20// THE SOFTWARE.
21
22#include "arena.h"
23#include "debug.h"
24#include <stdint.h>
25
26namespace kj {
27
28Arena::Arena(size_t chunkSizeHint): nextChunkSize(kj::max(sizeof(ChunkHeader), chunkSizeHint)) {}
29
30Arena::Arena(ArrayPtr<byte> scratch)
31 : nextChunkSize(kj::max(sizeof(ChunkHeader), scratch.size())) {
32 if (scratch.size() > sizeof(ChunkHeader)) {
33 ChunkHeader* chunk = reinterpret_cast<ChunkHeader*>(scratch.begin());
34 chunk->end = scratch.end();
35 chunk->pos = reinterpret_cast<byte*>(chunk + 1);
36 chunk->next = nullptr; // Never actually observed.
37
38 // Don't place the chunk in the chunk list because it's not ours to delete. Just make it the
39 // current chunk so that we'll allocate from it until it is empty.
40 currentChunk = chunk;
41 }
42}
43
44Arena::~Arena() noexcept(false) {
45 // Run cleanup() explicitly, but if it throws an exception, make sure to run it again as part of
46 // unwind. The second call will not throw because destructors are required to guard against
47 // exceptions when already unwinding.
48 KJ_ON_SCOPE_FAILURE(cleanup());
49 cleanup();
50}
51
52void Arena::cleanup() {
53 while (objectList != nullptr) {
54 void* ptr = objectList + 1;
55 auto destructor = objectList->destructor;
56 objectList = objectList->next;
57 destructor(ptr);
58 }
59
60 while (chunkList != nullptr) {
61 void* ptr = chunkList;
62 chunkList = chunkList->next;
63 operator delete(ptr);
64 }
65}
66
67namespace {
68
69constexpr bool KJ_UNUSED isPowerOfTwo(size_t value) {
70 return (value & (value - 1)) == 0;
71}
72
73inline byte* alignTo(byte* p, uint alignment) {
74 // Round the pointer up to the next aligned value.
75
76 KJ_DASSERT(isPowerOfTwo(alignment), alignment);
77 uintptr_t mask = alignment - 1;
78 uintptr_t i = reinterpret_cast<uintptr_t>(p);
79 return reinterpret_cast<byte*>((i + mask) & ~mask);
80}
81
82inline size_t alignTo(size_t s, uint alignment) {
83 // Round the pointer up to the next aligned value.
84
85 KJ_DASSERT(isPowerOfTwo(alignment), alignment);
86 size_t mask = alignment - 1;
87 return (s + mask) & ~mask;
88}
89
90} // namespace
91
92void* Arena::allocateBytes(size_t amount, uint alignment, bool hasDisposer) {
93 if (hasDisposer) {
94 alignment = kj::max(alignment, alignof(ObjectHeader));
95 amount += alignTo(sizeof(ObjectHeader), alignment);
96 }
97
98 void* result = allocateBytesInternal(amount, alignment);
99
100 if (hasDisposer) {
101 // Reserve space for the ObjectHeader, but don't add it to the object list yet.
102 result = alignTo(reinterpret_cast<byte*>(result) + sizeof(ObjectHeader), alignment);
103 }
104
105 KJ_DASSERT(reinterpret_cast<uintptr_t>(result) % alignment == 0);
106 return result;
107}
108
109void* Arena::allocateBytesInternal(size_t amount, uint alignment) {
110 if (currentChunk != nullptr) {
111 ChunkHeader* chunk = currentChunk;
112 byte* alignedPos = alignTo(chunk->pos, alignment);
113
114 // Careful about overflow here.
115 if (amount + (alignedPos - chunk->pos) <= chunk->end - chunk->pos) {
116 // There's enough space in this chunk.
117 chunk->pos = alignedPos + amount;
118 return alignedPos;
119 }
120 }
121
122 // Not enough space in the current chunk. Allocate a new one.
123
124 // We need to allocate at least enough space for the ChunkHeader and the requested allocation.
125
126 // If the alignment is less than that of the chunk header, we'll need to increase it.
127 alignment = kj::max(alignment, alignof(ChunkHeader));
128
129 // If the ChunkHeader size does not match the alignment, we'll need to pad it up.
130 amount += alignTo(sizeof(ChunkHeader), alignment);
131
132 // Make sure we're going to allocate enough space.
133 while (nextChunkSize < amount) {
134 nextChunkSize *= 2;
135 }
136
137 // Allocate.
138 byte* bytes = reinterpret_cast<byte*>(operator new(nextChunkSize));
139
140 // Set up the ChunkHeader at the beginning of the allocation.
141 ChunkHeader* newChunk = reinterpret_cast<ChunkHeader*>(bytes);
142 newChunk->next = chunkList;
143 newChunk->pos = bytes + amount;
144 newChunk->end = bytes + nextChunkSize;
145 currentChunk = newChunk;
146 chunkList = newChunk;
147 nextChunkSize *= 2;
148
149 // Move past the ChunkHeader to find the position of the allocated object.
150 return alignTo(bytes + sizeof(ChunkHeader), alignment);
151}
152
153StringPtr Arena::copyString(StringPtr content) {
154 char* data = reinterpret_cast<char*>(allocateBytes(content.size() + 1, 1, false));
155 memcpy(data, content.cStr(), content.size() + 1);
156 return StringPtr(data, content.size());
157}
158
159void Arena::setDestructor(void* ptr, void (*destructor)(void*)) {
160 ObjectHeader* header = reinterpret_cast<ObjectHeader*>(ptr) - 1;
161 KJ_DASSERT(reinterpret_cast<uintptr_t>(header) % alignof(ObjectHeader) == 0);
162 header->destructor = destructor;
163 header->next = objectList;
164 objectList = header;
165}
166
167} // namespace kj
168