1 | /* |
2 | * Copyright 2016 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef SkArenaAlloc_DEFINED |
9 | #define SkArenaAlloc_DEFINED |
10 | |
11 | #include "include/private/SkTFitsIn.h" |
12 | |
13 | #include <array> |
14 | #include <cassert> |
15 | #include <cstddef> |
16 | #include <cstdint> |
17 | #include <cstdlib> |
18 | #include <cstring> |
19 | #include <limits> |
20 | #include <new> |
21 | #include <type_traits> |
22 | #include <utility> |
23 | #include <vector> |
24 | |
25 | // SkArenaAlloc allocates object and destroys the allocated objects when destroyed. It's designed |
26 | // to minimize the number of underlying block allocations. SkArenaAlloc allocates first out of an |
27 | // (optional) user-provided block of memory, and when that's exhausted it allocates on the heap, |
28 | // starting with an allocation of firstHeapAllocation bytes. If your data (plus a small overhead) |
29 | // fits in the user-provided block, SkArenaAlloc never uses the heap, and if it fits in |
30 | // firstHeapAllocation bytes, it'll use the heap only once. If 0 is specified for |
31 | // firstHeapAllocation, then blockSize is used unless that too is 0, then 1024 is used. |
32 | // |
33 | // Examples: |
34 | // |
35 | // char block[mostCasesSize]; |
36 | // SkArenaAlloc arena(block, mostCasesSize); |
37 | // |
38 | // If mostCasesSize is too large for the stack, you can use the following pattern. |
39 | // |
40 | // std::unique_ptr<char[]> block{new char[mostCasesSize]}; |
41 | // SkArenaAlloc arena(block.get(), mostCasesSize, almostAllCasesSize); |
42 | // |
43 | // If the program only sometimes allocates memory, use the following pattern. |
44 | // |
45 | // SkArenaAlloc arena(nullptr, 0, almostAllCasesSize); |
46 | // |
47 | // The storage does not necessarily need to be on the stack. Embedding the storage in a class also |
48 | // works. |
49 | // |
50 | // class Foo { |
51 | // char storage[mostCasesSize]; |
52 | // SkArenaAlloc arena (storage, mostCasesSize); |
53 | // }; |
54 | // |
55 | // In addition, the system is optimized to handle POD data including arrays of PODs (where |
56 | // POD is really data with no destructors). For POD data it has zero overhead per item, and a |
57 | // typical per block overhead of 8 bytes. For non-POD objects there is a per item overhead of 4 |
58 | // bytes. For arrays of non-POD objects there is a per array overhead of typically 8 bytes. There |
59 | // is an addition overhead when switching from POD data to non-POD data of typically 8 bytes. |
60 | // |
61 | // If additional blocks are needed they are increased exponentially. This strategy bounds the |
62 | // recursion of the RunDtorsOnBlock to be limited to O(log size-of-memory). Block size grow using |
63 | // the Fibonacci sequence which means that for 2^32 memory there are 48 allocations, and for 2^48 |
64 | // there are 71 allocations. |
65 | class SkArenaAlloc { |
66 | public: |
67 | SkArenaAlloc(char* block, size_t blockSize, size_t firstHeapAllocation); |
68 | |
69 | explicit SkArenaAlloc(size_t firstHeapAllocation) |
70 | : SkArenaAlloc(nullptr, 0, firstHeapAllocation) |
71 | {} |
72 | |
73 | ~SkArenaAlloc(); |
74 | |
75 | template <typename T, typename... Args> |
76 | T* make(Args&&... args) { |
77 | uint32_t size = ToU32(sizeof(T)); |
78 | uint32_t alignment = ToU32(alignof(T)); |
79 | char* objStart; |
80 | if (std::is_trivially_destructible<T>::value) { |
81 | objStart = this->allocObject(size, alignment); |
82 | fCursor = objStart + size; |
83 | } else { |
84 | objStart = this->allocObjectWithFooter(size + sizeof(Footer), alignment); |
85 | // Can never be UB because max value is alignof(T). |
86 | uint32_t padding = ToU32(objStart - fCursor); |
87 | |
88 | // Advance to end of object to install footer. |
89 | fCursor = objStart + size; |
90 | FooterAction* releaser = [](char* objEnd) { |
91 | char* objStart = objEnd - (sizeof(T) + sizeof(Footer)); |
92 | ((T*)objStart)->~T(); |
93 | return objStart; |
94 | }; |
95 | this->installFooter(releaser, padding); |
96 | } |
97 | |
98 | // This must be last to make objects with nested use of this allocator work. |
99 | return new(objStart) T(std::forward<Args>(args)...); |
100 | } |
101 | |
102 | template <typename T> |
103 | T* makeArrayDefault(size_t count) { |
104 | AssertRelease(SkTFitsIn<uint32_t>(count)); |
105 | uint32_t safeCount = ToU32(count); |
106 | T* array = (T*)this->commonArrayAlloc<T>(safeCount); |
107 | |
108 | // If T is primitive then no initialization takes place. |
109 | for (size_t i = 0; i < safeCount; i++) { |
110 | new (&array[i]) T; |
111 | } |
112 | return array; |
113 | } |
114 | |
115 | template <typename T> |
116 | T* makeArray(size_t count) { |
117 | AssertRelease(SkTFitsIn<uint32_t>(count)); |
118 | uint32_t safeCount = ToU32(count); |
119 | T* array = (T*)this->commonArrayAlloc<T>(safeCount); |
120 | |
121 | // If T is primitive then the memory is initialized. For example, an array of chars will |
122 | // be zeroed. |
123 | for (size_t i = 0; i < safeCount; i++) { |
124 | new (&array[i]) T(); |
125 | } |
126 | return array; |
127 | } |
128 | |
129 | // Only use makeBytesAlignedTo if none of the typed variants are impractical to use. |
130 | void* makeBytesAlignedTo(size_t size, size_t align) { |
131 | AssertRelease(SkTFitsIn<uint32_t>(size)); |
132 | auto objStart = this->allocObject(ToU32(size), ToU32(align)); |
133 | fCursor = objStart + size; |
134 | return objStart; |
135 | } |
136 | |
137 | // Destroy all allocated objects, free any heap allocations. |
138 | void reset(); |
139 | |
140 | private: |
141 | static void AssertRelease(bool cond) { if (!cond) { ::abort(); } } |
142 | static uint32_t ToU32(size_t v) { |
143 | assert(SkTFitsIn<uint32_t>(v)); |
144 | return (uint32_t)v; |
145 | } |
146 | |
147 | using = int64_t; |
148 | using = char* (char*); |
149 | |
150 | static char* SkipPod(char* ); |
151 | static void RunDtorsOnBlock(char* ); |
152 | static char* NextBlock(char* ); |
153 | |
154 | void (FooterAction* releaser, uint32_t padding); |
155 | void (FooterAction* action, uint32_t value, uint32_t padding); |
156 | void (FooterAction* action, char* ptr, uint32_t padding); |
157 | |
158 | void ensureSpace(uint32_t size, uint32_t alignment); |
159 | |
160 | char* allocObject(uint32_t size, uint32_t alignment) { |
161 | uintptr_t mask = alignment - 1; |
162 | uintptr_t alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask; |
163 | uintptr_t totalSize = size + alignedOffset; |
164 | AssertRelease(totalSize >= size); |
165 | if (totalSize > static_cast<uintptr_t>(fEnd - fCursor)) { |
166 | this->ensureSpace(size, alignment); |
167 | alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask; |
168 | } |
169 | return fCursor + alignedOffset; |
170 | } |
171 | |
172 | char* (uint32_t , uint32_t alignment); |
173 | |
174 | template <typename T> |
175 | char* commonArrayAlloc(uint32_t count) { |
176 | char* objStart; |
177 | AssertRelease(count <= std::numeric_limits<uint32_t>::max() / sizeof(T)); |
178 | uint32_t arraySize = ToU32(count * sizeof(T)); |
179 | uint32_t alignment = ToU32(alignof(T)); |
180 | |
181 | if (std::is_trivially_destructible<T>::value) { |
182 | objStart = this->allocObject(arraySize, alignment); |
183 | fCursor = objStart + arraySize; |
184 | } else { |
185 | constexpr uint32_t overhead = sizeof(Footer) + sizeof(uint32_t); |
186 | AssertRelease(arraySize <= std::numeric_limits<uint32_t>::max() - overhead); |
187 | uint32_t totalSize = arraySize + overhead; |
188 | objStart = this->allocObjectWithFooter(totalSize, alignment); |
189 | |
190 | // Can never be UB because max value is alignof(T). |
191 | uint32_t padding = ToU32(objStart - fCursor); |
192 | |
193 | // Advance to end of array to install footer.? |
194 | fCursor = objStart + arraySize; |
195 | this->installUint32Footer( |
196 | [](char* ) { |
197 | char* objEnd = footerEnd - (sizeof(Footer) + sizeof(uint32_t)); |
198 | uint32_t count; |
199 | memmove(&count, objEnd, sizeof(uint32_t)); |
200 | char* objStart = objEnd - count * sizeof(T); |
201 | T* array = (T*) objStart; |
202 | for (uint32_t i = 0; i < count; i++) { |
203 | array[i].~T(); |
204 | } |
205 | return objStart; |
206 | }, |
207 | ToU32(count), |
208 | padding); |
209 | } |
210 | |
211 | return objStart; |
212 | } |
213 | |
214 | char* fDtorCursor; |
215 | char* fCursor; |
216 | char* fEnd; |
217 | char* const fFirstBlock; |
218 | const uint32_t fFirstSize; |
219 | const uint32_t fFirstHeapAllocationSize; |
220 | |
221 | // Use the Fibonacci sequence as the growth factor for block size. The size of the block |
222 | // allocated is fFib0 * fFirstHeapAllocationSize. Using 2 ^ n * fFirstHeapAllocationSize |
223 | // had too much slop for Android. |
224 | uint32_t fFib0 {1}, fFib1 {1}; |
225 | }; |
226 | |
227 | // Helper for defining allocators with inline/reserved storage. |
228 | // For argument declarations, stick to the base type (SkArenaAlloc). |
229 | // Note: Inheriting from the storage first means the storage will outlive the |
230 | // SkArenaAlloc, letting ~SkArenaAlloc read it as it calls destructors. |
231 | // (This is mostly only relevant for strict tools like MSAN.) |
232 | template <size_t InlineStorageSize> |
233 | class SkSTArenaAlloc : private std::array<char, InlineStorageSize>, public SkArenaAlloc { |
234 | public: |
235 | explicit SkSTArenaAlloc(size_t firstHeapAllocation = InlineStorageSize) |
236 | : SkArenaAlloc{this->data(), this->size(), firstHeapAllocation} {} |
237 | }; |
238 | |
239 | #endif // SkArenaAlloc_DEFINED |
240 | |