1/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrBlockAllocator.h"
9
10#ifdef SK_DEBUG
11#include <vector>
12#endif
13
14GrBlockAllocator::GrBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
15 size_t additionalPreallocBytes)
16 : fTail(&fHead)
17 // Round up to the nearest max-aligned value, and then divide so that fBlockSizeIncrement
18 // can effectively fit higher byte counts in its 16 bits of storage
19 , fBlockIncrement(SkTo<uint16_t>(GrAlignTo(blockIncrementBytes, kAddressAlign)
20 / kAddressAlign))
21 , fGrowthPolicy(static_cast<uint64_t>(policy))
22 , fN0((policy == GrowthPolicy::kLinear || policy == GrowthPolicy::kExponential) ? 1 : 0)
23 , fN1(1)
24 // The head block always fills remaining space from GrBlockAllocator's size, because it's
25 // inline, but can take over the specified number of bytes immediately after it.
26 , fHead(nullptr, additionalPreallocBytes + BaseHeadBlockSize()) {
27 SkASSERT(fBlockIncrement >= 1);
28 SkASSERT(additionalPreallocBytes <= kMaxAllocationSize);
29}
30
31GrBlockAllocator::Block::Block(Block* prev, int allocationSize)
32 : fNext(nullptr)
33 , fPrev(prev)
34 , fSize(allocationSize)
35 , fCursor(kDataStart)
36 , fMetadata(0)
37 , fAllocatorMetadata(0) {
38 SkASSERT(allocationSize >= (int) sizeof(Block));
39 SkDEBUGCODE(fSentinel = kAssignedMarker;)
40}
41
42GrBlockAllocator::Block::~Block() {
43 SkASSERT(fSentinel == kAssignedMarker);
44 SkDEBUGCODE(fSentinel = kFreedMarker;) // FWIW
45}
46
47size_t GrBlockAllocator::totalSize() const {
48 // Use size_t since the sum across all blocks could exceed 'int', even though each block won't
49 size_t size = offsetof(GrBlockAllocator, fHead) + this->scratchBlockSize();
50 for (const Block* b : this->blocks()) {
51 size += b->fSize;
52 }
53 SkASSERT(size >= this->preallocSize());
54 return size;
55}
56
57size_t GrBlockAllocator::totalUsableSpace() const {
58 size_t size = this->scratchBlockSize();
59 if (size > 0) {
60 size -= kDataStart; // scratchBlockSize reports total block size, not usable size
61 }
62 for (const Block* b : this->blocks()) {
63 size += (b->fSize - kDataStart);
64 }
65 SkASSERT(size >= this->preallocUsableSpace());
66 return size;
67}
68
69size_t GrBlockAllocator::totalSpaceInUse() const {
70 size_t size = 0;
71 for (const Block* b : this->blocks()) {
72 size += (b->fCursor - kDataStart);
73 }
74 SkASSERT(size <= this->totalUsableSpace());
75 return size;
76}
77
78GrBlockAllocator::Block* GrBlockAllocator::findOwningBlock(const void* p) {
79 // When in doubt, search in reverse to find an overlapping block.
80 uintptr_t ptr = reinterpret_cast<uintptr_t>(p);
81 for (Block* b : this->rblocks()) {
82 uintptr_t lowerBound = reinterpret_cast<uintptr_t>(b) + kDataStart;
83 uintptr_t upperBound = reinterpret_cast<uintptr_t>(b) + b->fSize;
84 if (lowerBound <= ptr && ptr < upperBound) {
85 SkASSERT(b->fSentinel == kAssignedMarker);
86 return b;
87 }
88 }
89 return nullptr;
90}
91
92void GrBlockAllocator::releaseBlock(Block* block) {
93 if (block == &fHead) {
94 // Reset the cursor of the head block so that it can be reused if it becomes the new tail
95 block->fCursor = kDataStart;
96 block->fMetadata = 0;
97 // Unlike in reset(), we don't set the head's next block to null because there are
98 // potentially heap-allocated blocks that are still connected to it.
99 } else {
100 SkASSERT(block->fPrev);
101 block->fPrev->fNext = block->fNext;
102 if (block->fNext) {
103 SkASSERT(fTail != block);
104 block->fNext->fPrev = block->fPrev;
105 } else {
106 SkASSERT(fTail == block);
107 fTail = block->fPrev;
108 }
109
110 // The released block becomes the new scratch block (if it's bigger), or delete it
111 if (this->scratchBlockSize() < block->fSize) {
112 SkASSERT(block != fHead.fPrev); // shouldn't already be the scratch block
113 if (fHead.fPrev) {
114 delete fHead.fPrev;
115 }
116 block->markAsScratch();
117 fHead.fPrev = block;
118 } else {
119 delete block;
120 }
121 }
122
123 // Decrement growth policy (opposite of addBlock()'s increment operations)
124 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
125 if (fN0 > 0 && (fN1 > 1 || gp == GrowthPolicy::kFibonacci)) {
126 SkASSERT(gp != GrowthPolicy::kFixed); // fixed never needs undoing, fN0 always is 0
127 if (gp == GrowthPolicy::kLinear) {
128 fN1 = fN1 - fN0;
129 } else if (gp == GrowthPolicy::kFibonacci) {
130 // Subtract n0 from n1 to get the prior 2 terms in the fibonacci sequence
131 int temp = fN1 - fN0; // yields prior fN0
132 fN1 = fN1 - temp; // yields prior fN1
133 fN0 = temp;
134 } else {
135 SkASSERT(gp == GrowthPolicy::kExponential);
136 // Divide by 2 to undo the 2N update from addBlock
137 fN1 = fN1 >> 1;
138 fN0 = fN1;
139 }
140 }
141
142 SkASSERT(fN1 >= 1 && fN0 >= 0);
143}
144
145void GrBlockAllocator::stealHeapBlocks(GrBlockAllocator* other) {
146 Block* toSteal = other->fHead.fNext;
147 if (toSteal) {
148 // The other's next block connects back to this allocator's current tail, and its new tail
149 // becomes the end of other's block linked list.
150 SkASSERT(other->fTail != &other->fHead);
151 toSteal->fPrev = fTail;
152 fTail->fNext = toSteal;
153 fTail = other->fTail;
154 // The other allocator becomes just its inline head block
155 other->fTail = &other->fHead;
156 other->fHead.fNext = nullptr;
157 } // else no block to steal
158}
159
160void GrBlockAllocator::reset() {
161 for (Block* b : this->rblocks()) {
162 if (b == &fHead) {
163 // Reset metadata and cursor, tail points to the head block again
164 fTail = b;
165 b->fNext = nullptr;
166 b->fCursor = kDataStart;
167 b->fMetadata = 0;
168 // For reset(), but NOT releaseBlock(), the head allocatorMetadata and scratch block
169 // are reset/destroyed.
170 b->fAllocatorMetadata = 0;
171 this->resetScratchSpace();
172 } else {
173 delete b;
174 }
175 }
176 SkASSERT(fTail == &fHead && fHead.fNext == nullptr && fHead.fPrev == nullptr &&
177 fHead.metadata() == 0 && fHead.fCursor == kDataStart);
178
179 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
180 fN0 = (gp == GrowthPolicy::kLinear || gp == GrowthPolicy::kExponential) ? 1 : 0;
181 fN1 = 1;
182}
183
184void GrBlockAllocator::resetScratchSpace() {
185 if (fHead.fPrev) {
186 delete fHead.fPrev;
187 fHead.fPrev = nullptr;
188 }
189}
190
191void GrBlockAllocator::addBlock(int minimumSize, int maxSize) {
192 SkASSERT(minimumSize > (int) sizeof(Block) && minimumSize <= maxSize);
193
194 // Max positive value for uint:23 storage (decltype(fN0) picks up uint64_t, not uint:23).
195 static constexpr int kMaxN = (1 << 23) - 1;
196 static_assert(2 * kMaxN <= std::numeric_limits<int32_t>::max()); // Growth policy won't overflow
197
198 auto alignAllocSize = [](int size) {
199 // Round to a nice boundary since the block isn't maxing out:
200 // if allocSize > 32K, aligns on 4K boundary otherwise aligns on max_align_t, to play
201 // nicely with jeMalloc (from SkArenaAlloc).
202 int mask = size > (1 << 15) ? ((1 << 12) - 1) : (kAddressAlign - 1);
203 return (size + mask) & ~mask;
204 };
205
206 int allocSize;
207 void* mem = nullptr;
208 if (this->scratchBlockSize() >= minimumSize) {
209 // Activate the scratch block instead of making a new block
210 SkASSERT(fHead.fPrev->isScratch());
211 allocSize = fHead.fPrev->fSize;
212 mem = fHead.fPrev;
213 fHead.fPrev = nullptr;
214 } else if (minimumSize < maxSize) {
215 // Calculate the 'next' size per growth policy sequence
216 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
217 int nextN1 = fN0 + fN1;
218 int nextN0;
219 if (gp == GrowthPolicy::kFixed || gp == GrowthPolicy::kLinear) {
220 nextN0 = fN0;
221 } else if (gp == GrowthPolicy::kFibonacci) {
222 nextN0 = fN1;
223 } else {
224 SkASSERT(gp == GrowthPolicy::kExponential);
225 nextN0 = nextN1;
226 }
227 fN0 = std::min(kMaxN, nextN0);
228 fN1 = std::min(kMaxN, nextN1);
229
230 // However, must guard against overflow here, since all the size-based asserts prevented
231 // alignment/addition overflows, while multiplication requires 2x bits instead of x+1.
232 int sizeIncrement = fBlockIncrement * kAddressAlign;
233 if (maxSize / sizeIncrement < nextN1) {
234 // The growth policy would overflow, so use the max. We've already confirmed that
235 // maxSize will be sufficient for the requested minimumSize
236 allocSize = maxSize;
237 } else {
238 allocSize = std::min(alignAllocSize(std::max(minimumSize, sizeIncrement * nextN1)),
239 maxSize);
240 }
241 } else {
242 SkASSERT(minimumSize == maxSize);
243 // Still align on a nice boundary, no max clamping since that would just undo the alignment
244 allocSize = alignAllocSize(minimumSize);
245 }
246
247 // Create new block and append to the linked list of blocks in this allocator
248 if (!mem) {
249 mem = operator new(allocSize);
250 }
251 fTail->fNext = new (mem) Block(fTail, allocSize);
252 fTail = fTail->fNext;
253}
254
255#ifdef SK_DEBUG
256void GrBlockAllocator::validate() const {
257 std::vector<const Block*> blocks;
258 const Block* prev = nullptr;
259 for (const Block* block : this->blocks()) {
260 blocks.push_back(block);
261
262 SkASSERT(kAssignedMarker == block->fSentinel);
263 if (block == &fHead) {
264 // The head blocks' fPrev may be non-null if it holds a scratch block, but that's not
265 // considered part of the linked list
266 SkASSERT(!prev && (!fHead.fPrev || fHead.fPrev->isScratch()));
267 } else {
268 SkASSERT(prev == block->fPrev);
269 }
270 if (prev) {
271 SkASSERT(prev->fNext == block);
272 }
273
274 SkASSERT(block->fSize >= (int) sizeof(Block));
275 SkASSERT(block->fCursor >= kDataStart);
276 SkASSERT(block->fCursor <= block->fSize);
277
278 prev = block;
279 }
280 SkASSERT(prev == fTail);
281 SkASSERT(blocks.size() > 0);
282 SkASSERT(blocks[0] == &fHead);
283
284 // Confirm reverse iteration matches forward iteration
285 size_t j = blocks.size();
286 for (const Block* b : this->rblocks()) {
287 SkASSERT(b == blocks[j - 1]);
288 j--;
289 }
290 SkASSERT(j == 0);
291}
292#endif
293