| 1 | /* |
| 2 | * Copyright 2016 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef SkArenaAlloc_DEFINED |
| 9 | #define SkArenaAlloc_DEFINED |
| 10 | |
| 11 | #include "include/private/SkTFitsIn.h" |
| 12 | |
| 13 | #include <array> |
| 14 | #include <cassert> |
| 15 | #include <cstddef> |
| 16 | #include <cstdint> |
| 17 | #include <cstdlib> |
| 18 | #include <cstring> |
| 19 | #include <limits> |
| 20 | #include <new> |
| 21 | #include <type_traits> |
| 22 | #include <utility> |
| 23 | #include <vector> |
| 24 | |
| 25 | // SkArenaAlloc allocates object and destroys the allocated objects when destroyed. It's designed |
| 26 | // to minimize the number of underlying block allocations. SkArenaAlloc allocates first out of an |
| 27 | // (optional) user-provided block of memory, and when that's exhausted it allocates on the heap, |
| 28 | // starting with an allocation of firstHeapAllocation bytes. If your data (plus a small overhead) |
| 29 | // fits in the user-provided block, SkArenaAlloc never uses the heap, and if it fits in |
| 30 | // firstHeapAllocation bytes, it'll use the heap only once. If 0 is specified for |
| 31 | // firstHeapAllocation, then blockSize is used unless that too is 0, then 1024 is used. |
| 32 | // |
| 33 | // Examples: |
| 34 | // |
| 35 | // char block[mostCasesSize]; |
| 36 | // SkArenaAlloc arena(block, mostCasesSize); |
| 37 | // |
| 38 | // If mostCasesSize is too large for the stack, you can use the following pattern. |
| 39 | // |
| 40 | // std::unique_ptr<char[]> block{new char[mostCasesSize]}; |
| 41 | // SkArenaAlloc arena(block.get(), mostCasesSize, almostAllCasesSize); |
| 42 | // |
| 43 | // If the program only sometimes allocates memory, use the following pattern. |
| 44 | // |
| 45 | // SkArenaAlloc arena(nullptr, 0, almostAllCasesSize); |
| 46 | // |
| 47 | // The storage does not necessarily need to be on the stack. Embedding the storage in a class also |
| 48 | // works. |
| 49 | // |
| 50 | // class Foo { |
| 51 | // char storage[mostCasesSize]; |
| 52 | // SkArenaAlloc arena (storage, mostCasesSize); |
| 53 | // }; |
| 54 | // |
| 55 | // In addition, the system is optimized to handle POD data including arrays of PODs (where |
| 56 | // POD is really data with no destructors). For POD data it has zero overhead per item, and a |
| 57 | // typical per block overhead of 8 bytes. For non-POD objects there is a per item overhead of 4 |
| 58 | // bytes. For arrays of non-POD objects there is a per array overhead of typically 8 bytes. There |
| 59 | // is an addition overhead when switching from POD data to non-POD data of typically 8 bytes. |
| 60 | // |
| 61 | // If additional blocks are needed they are increased exponentially. This strategy bounds the |
| 62 | // recursion of the RunDtorsOnBlock to be limited to O(log size-of-memory). Block size grow using |
| 63 | // the Fibonacci sequence which means that for 2^32 memory there are 48 allocations, and for 2^48 |
| 64 | // there are 71 allocations. |
| 65 | class SkArenaAlloc { |
| 66 | public: |
| 67 | SkArenaAlloc(char* block, size_t blockSize, size_t firstHeapAllocation); |
| 68 | |
| 69 | explicit SkArenaAlloc(size_t firstHeapAllocation) |
| 70 | : SkArenaAlloc(nullptr, 0, firstHeapAllocation) {} |
| 71 | |
| 72 | ~SkArenaAlloc(); |
| 73 | |
| 74 | template <typename T, typename... Args> |
| 75 | T* make(Args&&... args) { |
| 76 | uint32_t size = ToU32(sizeof(T)); |
| 77 | uint32_t alignment = ToU32(alignof(T)); |
| 78 | char* objStart; |
| 79 | if (std::is_trivially_destructible<T>::value) { |
| 80 | objStart = this->allocObject(size, alignment); |
| 81 | fCursor = objStart + size; |
| 82 | } else { |
| 83 | objStart = this->allocObjectWithFooter(size + sizeof(Footer), alignment); |
| 84 | // Can never be UB because max value is alignof(T). |
| 85 | uint32_t padding = ToU32(objStart - fCursor); |
| 86 | |
| 87 | // Advance to end of object to install footer. |
| 88 | fCursor = objStart + size; |
| 89 | FooterAction* releaser = [](char* objEnd) { |
| 90 | char* objStart = objEnd - (sizeof(T) + sizeof(Footer)); |
| 91 | ((T*)objStart)->~T(); |
| 92 | return objStart; |
| 93 | }; |
| 94 | this->installFooter(releaser, padding); |
| 95 | } |
| 96 | |
| 97 | // This must be last to make objects with nested use of this allocator work. |
| 98 | return new(objStart) T(std::forward<Args>(args)...); |
| 99 | } |
| 100 | |
| 101 | template <typename T> |
| 102 | T* makeArrayDefault(size_t count) { |
| 103 | T* array = this->allocUninitializedArray<T>(count); |
| 104 | for (size_t i = 0; i < count; i++) { |
| 105 | // Default initialization: if T is primitive then the value is left uninitialized. |
| 106 | new (&array[i]) T; |
| 107 | } |
| 108 | return array; |
| 109 | } |
| 110 | |
| 111 | template <typename T> |
| 112 | T* makeArray(size_t count) { |
| 113 | T* array = this->allocUninitializedArray<T>(count); |
| 114 | for (size_t i = 0; i < count; i++) { |
| 115 | // Value initialization: if T is primitive then the value is zero-initialized. |
| 116 | new (&array[i]) T(); |
| 117 | } |
| 118 | return array; |
| 119 | } |
| 120 | |
| 121 | template <typename T, typename Initializer> |
| 122 | T* makeInitializedArray(size_t count, Initializer initializer) { |
| 123 | T* array = this->allocUninitializedArray<T>(count); |
| 124 | for (size_t i = 0; i < count; i++) { |
| 125 | new (&array[i]) T(initializer(i)); |
| 126 | } |
| 127 | return array; |
| 128 | } |
| 129 | |
| 130 | // Only use makeBytesAlignedTo if none of the typed variants are impractical to use. |
| 131 | void* makeBytesAlignedTo(size_t size, size_t align) { |
| 132 | AssertRelease(SkTFitsIn<uint32_t>(size)); |
| 133 | auto objStart = this->allocObject(ToU32(size), ToU32(align)); |
| 134 | fCursor = objStart + size; |
| 135 | return objStart; |
| 136 | } |
| 137 | |
| 138 | private: |
| 139 | static void AssertRelease(bool cond) { if (!cond) { ::abort(); } } |
| 140 | static uint32_t ToU32(size_t v) { |
| 141 | assert(SkTFitsIn<uint32_t>(v)); |
| 142 | return (uint32_t)v; |
| 143 | } |
| 144 | |
| 145 | using = int64_t; |
| 146 | using = char* (char*); |
| 147 | |
| 148 | static char* SkipPod(char* ); |
| 149 | static void RunDtorsOnBlock(char* ); |
| 150 | static char* NextBlock(char* ); |
| 151 | |
| 152 | void (FooterAction* releaser, uint32_t padding); |
| 153 | void (FooterAction* action, uint32_t value, uint32_t padding); |
| 154 | void (FooterAction* action, char* ptr, uint32_t padding); |
| 155 | |
| 156 | void ensureSpace(uint32_t size, uint32_t alignment); |
| 157 | |
| 158 | char* allocObject(uint32_t size, uint32_t alignment) { |
| 159 | uintptr_t mask = alignment - 1; |
| 160 | uintptr_t alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask; |
| 161 | uintptr_t totalSize = size + alignedOffset; |
| 162 | AssertRelease(totalSize >= size); |
| 163 | if (totalSize > static_cast<uintptr_t>(fEnd - fCursor)) { |
| 164 | this->ensureSpace(size, alignment); |
| 165 | alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask; |
| 166 | } |
| 167 | return fCursor + alignedOffset; |
| 168 | } |
| 169 | |
| 170 | char* (uint32_t , uint32_t alignment); |
| 171 | |
| 172 | template <typename T> |
| 173 | T* allocUninitializedArray(size_t countZ) { |
| 174 | AssertRelease(SkTFitsIn<uint32_t>(countZ)); |
| 175 | uint32_t count = ToU32(countZ); |
| 176 | |
| 177 | char* objStart; |
| 178 | AssertRelease(count <= std::numeric_limits<uint32_t>::max() / sizeof(T)); |
| 179 | uint32_t arraySize = ToU32(count * sizeof(T)); |
| 180 | uint32_t alignment = ToU32(alignof(T)); |
| 181 | |
| 182 | if (std::is_trivially_destructible<T>::value) { |
| 183 | objStart = this->allocObject(arraySize, alignment); |
| 184 | fCursor = objStart + arraySize; |
| 185 | } else { |
| 186 | constexpr uint32_t overhead = sizeof(Footer) + sizeof(uint32_t); |
| 187 | AssertRelease(arraySize <= std::numeric_limits<uint32_t>::max() - overhead); |
| 188 | uint32_t totalSize = arraySize + overhead; |
| 189 | objStart = this->allocObjectWithFooter(totalSize, alignment); |
| 190 | |
| 191 | // Can never be UB because max value is alignof(T). |
| 192 | uint32_t padding = ToU32(objStart - fCursor); |
| 193 | |
| 194 | // Advance to end of array to install footer.? |
| 195 | fCursor = objStart + arraySize; |
| 196 | this->installUint32Footer( |
| 197 | [](char* ) { |
| 198 | char* objEnd = footerEnd - (sizeof(Footer) + sizeof(uint32_t)); |
| 199 | uint32_t count; |
| 200 | memmove(&count, objEnd, sizeof(uint32_t)); |
| 201 | char* objStart = objEnd - count * sizeof(T); |
| 202 | T* array = (T*) objStart; |
| 203 | for (uint32_t i = 0; i < count; i++) { |
| 204 | array[i].~T(); |
| 205 | } |
| 206 | return objStart; |
| 207 | }, |
| 208 | ToU32(count), |
| 209 | padding); |
| 210 | } |
| 211 | |
| 212 | return (T*)objStart; |
| 213 | } |
| 214 | |
| 215 | char* fDtorCursor; |
| 216 | char* fCursor; |
| 217 | char* fEnd; |
| 218 | |
| 219 | // We found allocating strictly doubling amounts of memory from the heap left too |
| 220 | // much unused slop, particularly on Android. Instead we'll follow a Fibonacci-like |
| 221 | // progression that's simple to implement and grows with roughly a 1.6 exponent: |
| 222 | // |
| 223 | // To start, |
| 224 | // fNextHeapAlloc = fYetNextHeapAlloc = 1*fFirstHeapAllocationSize; |
| 225 | // |
| 226 | // And then when we do allocate, follow a Fibonacci f(n+2) = f(n+1) + f(n) rule: |
| 227 | // void* block = malloc(fNextHeapAlloc); |
| 228 | // std::swap(fNextHeapAlloc, fYetNextHeapAlloc) |
| 229 | // fYetNextHeapAlloc += fNextHeapAlloc; |
| 230 | // |
| 231 | // That makes the nth allocation fib(n) * fFirstHeapAllocationSize bytes. |
| 232 | uint32_t fNextHeapAlloc, // How many bytes minimum will we allocate next from the heap? |
| 233 | fYetNextHeapAlloc; // And then how many the next allocation after that? |
| 234 | }; |
| 235 | |
| 236 | class SkArenaAllocWithReset : public SkArenaAlloc { |
| 237 | public: |
| 238 | SkArenaAllocWithReset(char* block, size_t blockSize, size_t firstHeapAllocation); |
| 239 | |
| 240 | explicit SkArenaAllocWithReset(size_t firstHeapAllocation) |
| 241 | : SkArenaAllocWithReset(nullptr, 0, firstHeapAllocation) {} |
| 242 | |
| 243 | // Destroy all allocated objects, free any heap allocations. |
| 244 | void reset(); |
| 245 | |
| 246 | private: |
| 247 | char* const fFirstBlock; |
| 248 | const uint32_t fFirstSize; |
| 249 | const uint32_t fFirstHeapAllocationSize; |
| 250 | }; |
| 251 | |
| 252 | // Helper for defining allocators with inline/reserved storage. |
| 253 | // For argument declarations, stick to the base type (SkArenaAlloc). |
| 254 | // Note: Inheriting from the storage first means the storage will outlive the |
| 255 | // SkArenaAlloc, letting ~SkArenaAlloc read it as it calls destructors. |
| 256 | // (This is mostly only relevant for strict tools like MSAN.) |
| 257 | template <size_t InlineStorageSize> |
| 258 | class SkSTArenaAlloc : private std::array<char, InlineStorageSize>, public SkArenaAlloc { |
| 259 | public: |
| 260 | explicit SkSTArenaAlloc(size_t firstHeapAllocation = InlineStorageSize) |
| 261 | : SkArenaAlloc{this->data(), this->size(), firstHeapAllocation} {} |
| 262 | }; |
| 263 | |
| 264 | template <size_t InlineStorageSize> |
| 265 | class SkSTArenaAllocWithReset |
| 266 | : private std::array<char, InlineStorageSize>, public SkArenaAllocWithReset { |
| 267 | public: |
| 268 | explicit SkSTArenaAllocWithReset(size_t firstHeapAllocation = InlineStorageSize) |
| 269 | : SkArenaAllocWithReset{this->data(), this->size(), firstHeapAllocation} {} |
| 270 | }; |
| 271 | |
| 272 | #endif // SkArenaAlloc_DEFINED |
| 273 | |