1/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrBlockAllocator.h"
9
10#ifdef SK_DEBUG
11#include <vector>
12#endif
13
14GrBlockAllocator::GrBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
15 size_t additionalPreallocBytes)
16 : fTail(&fHead)
17 // Round up to the nearest max-aligned value, and then divide so that fBlockSizeIncrement
18 // can effectively fit higher byte counts in its 16 bits of storage
19 , fBlockIncrement(SkTo<uint16_t>(GrAlignTo(blockIncrementBytes, kBlockIncrementUnits)
20 / kBlockIncrementUnits))
21 , fGrowthPolicy(static_cast<uint64_t>(policy))
22 , fN0((policy == GrowthPolicy::kLinear || policy == GrowthPolicy::kExponential) ? 1 : 0)
23 , fN1(1)
24 // The head block always fills remaiing space from GrBlockAllocator's size, because it's
25 // inline, but can take over the specified number of bytes immediately after it.
26 , fHead(nullptr, additionalPreallocBytes + BaseHeadBlockSize()) {
27 SkASSERT(fBlockIncrement >= 1);
28 SkASSERT(additionalPreallocBytes <= kMaxAllocationSize);
29}
30
31GrBlockAllocator::Block::Block(Block* prev, int allocationSize)
32 : fNext(nullptr)
33 , fPrev(prev)
34 , fSize(allocationSize)
35 , fCursor(kDataStart)
36 , fMetadata(0) {
37 SkASSERT(allocationSize >= (int) sizeof(Block));
38 SkDEBUGCODE(fSentinel = kAssignedMarker;)
39}
40
41GrBlockAllocator::Block::~Block() {
42 SkASSERT(fSentinel == kAssignedMarker);
43 SkDEBUGCODE(fSentinel = kFreedMarker;) // FWIW
44}
45
46size_t GrBlockAllocator::totalSize() const {
47 // Use size_t since the sum across all blocks could exceed 'int', even though each block won't
48 size_t size = offsetof(GrBlockAllocator, fHead);
49 for (const Block* b : this->blocks()) {
50 size += b->fSize;
51 }
52 SkASSERT(size >= this->preallocSize());
53 return size;
54}
55
56size_t GrBlockAllocator::totalUsableSpace() const {
57 size_t size = 0;
58 for (const Block* b : this->blocks()) {
59 size += (b->fSize - kDataStart);
60 }
61 SkASSERT(size >= this->preallocUsableSpace());
62 return size;
63}
64
65size_t GrBlockAllocator::totalSpaceInUse() const {
66 size_t size = 0;
67 for (const Block* b : this->blocks()) {
68 size += (b->fCursor - kDataStart);
69 }
70 SkASSERT(size <= this->totalUsableSpace());
71 return size;
72}
73
74GrBlockAllocator::Block* GrBlockAllocator::findOwningBlock(const void* p) {
75 // When in doubt, search in reverse to find an overlapping block.
76 uintptr_t ptr = reinterpret_cast<uintptr_t>(p);
77 for (Block* b : this->rblocks()) {
78 uintptr_t lowerBound = reinterpret_cast<uintptr_t>(b) + kDataStart;
79 uintptr_t upperBound = reinterpret_cast<uintptr_t>(b) + b->fSize;
80 if (lowerBound <= ptr && ptr < upperBound) {
81 SkASSERT(b->fSentinel == kAssignedMarker);
82 return b;
83 }
84 }
85 return nullptr;
86}
87
88void GrBlockAllocator::releaseBlock(Block* block) {
89 if (block->fPrev) {
90 // Unlink block from the double-linked list of blocks
91 SkASSERT(block != &fHead);
92 block->fPrev->fNext = block->fNext;
93 if (block->fNext) {
94 SkASSERT(fTail != block);
95 block->fNext->fPrev = block->fPrev;
96 } else {
97 SkASSERT(fTail == block);
98 fTail = block->fPrev;
99 }
100
101 delete block;
102 } else {
103 // Reset the cursor of the head block so that it can be reused
104 SkASSERT(block == &fHead);
105 block->fCursor = kDataStart;
106 block->fMetadata = 0;
107 // Unlike in reset(), we don't set the head's next block to null because there are
108 // potentially heap-allocated blocks that are still connected to it.
109 }
110
111 // Decrement growth policy (opposite of addBlock()'s increment operations)
112 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
113 if (fN0 > 0 && (fN1 > 1 || gp == GrowthPolicy::kFibonacci)) {
114 SkASSERT(gp != GrowthPolicy::kFixed); // fixed never needs undoing, fN0 always is 0
115 if (gp == GrowthPolicy::kLinear) {
116 fN1 = fN1 - fN0;
117 } else if (gp == GrowthPolicy::kFibonacci) {
118 // Subtract n0 from n1 to get the prior 2 terms in the fibonacci sequence
119 int temp = fN1 - fN0; // yields prior fN0
120 fN1 = fN1 - temp; // yields prior fN1
121 fN0 = temp;
122 } else {
123 SkASSERT(gp == GrowthPolicy::kExponential);
124 // Divide by 2 to undo the 2N update from addBlock
125 fN1 = fN1 >> 1;
126 fN0 = fN1;
127 }
128 }
129
130 SkASSERT(fN1 >= 1 && fN0 >= 0);
131}
132
133void GrBlockAllocator::reset() {
134 // We can't use the RBlocks for-range since we're destroying the linked list as we go
135 Block* toFree = fTail;
136 while(toFree) {
137 Block* prev = toFree->fPrev;
138 if (prev) {
139 SkASSERT(toFree != &fHead);
140 delete toFree;
141 } else {
142 SkASSERT(toFree == &fHead);
143 fTail = toFree;
144 // the head block's prev is already null, it's next block was deleted last iteration
145 fTail->fNext = nullptr;
146 fTail->fCursor = kDataStart;
147 fTail->fMetadata = 0;
148 }
149
150 toFree = prev;
151 }
152
153 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
154 fN0 = (gp == GrowthPolicy::kLinear || gp == GrowthPolicy::kExponential) ? 1 : 0;
155 fN1 = 1;
156}
157
158void GrBlockAllocator::addBlock(int minimumSize, int maxSize) {
159 SkASSERT(minimumSize > (int) sizeof(Block) && minimumSize <= maxSize);
160
161 // Max positive value for uint:23 storage (decltype(fN0) picks up uint64_t, not uint:23).
162 static constexpr int kMaxN = (1 << 23) - 1;
163 static_assert(2 * kMaxN <= std::numeric_limits<int32_t>::max()); // Growth policy won't overflow
164
165 // Calculate the 'next' size per growth policy sequence
166 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
167 int nextN1 = fN0 + fN1;
168 int nextN0;
169 if (gp == GrowthPolicy::kFixed || gp == GrowthPolicy::kLinear) {
170 nextN0 = fN0;
171 } else if (gp == GrowthPolicy::kFibonacci) {
172 nextN0 = fN1;
173 } else {
174 SkASSERT(gp == GrowthPolicy::kExponential);
175 nextN0 = nextN1;
176 }
177 fN0 = std::min(kMaxN, nextN0);
178 fN1 = std::min(kMaxN, nextN1);
179
180 // However, must guard against overflow here, since all the size-based asserts prevented
181 // alignment/addition overflows, while multiplication requires 2x bits instead of x+1.
182 int sizeIncrement = fBlockIncrement * kBlockIncrementUnits;
183 int allocSize;
184 if (maxSize / sizeIncrement < nextN1) {
185 // The growth policy would overflow, so use the max. We've already confirmed that maxSize
186 // will be sufficient for the requested minimumSize
187 allocSize = maxSize;
188 } else {
189 allocSize = std::max(minimumSize, sizeIncrement * nextN1);
190 // Then round to a nice boundary since the block isn't maxing out:
191 // if allocSize > 32K, aligns on 4K boundary otherwise aligns on max_align_t, to play
192 // nicely with jeMalloc (from SkArenaAlloc).
193 int mask = allocSize > (1 << 15) ? ((1 << 12) - 1) : (kBlockIncrementUnits - 1);
194 allocSize = std::min((allocSize + mask) & ~mask, maxSize);
195 }
196
197 // Create new block and append to the linked list of blocks in this allocator
198 void* mem = operator new(allocSize);
199 fTail->fNext = new (mem) Block(fTail, allocSize);
200 fTail = fTail->fNext;
201}
202
203#ifdef SK_DEBUG
204void GrBlockAllocator::validate() const {
205 std::vector<const Block*> blocks;
206 const Block* prev = nullptr;
207 for (const Block* block : this->blocks()) {
208 blocks.push_back(block);
209
210 SkASSERT(kAssignedMarker == block->fSentinel);
211 SkASSERT(prev == block->fPrev);
212 if (prev) {
213 SkASSERT(prev->fNext == block);
214 }
215
216 SkASSERT(block->fSize >= (int) sizeof(Block));
217 SkASSERT(block->fCursor >= kDataStart);
218 SkASSERT(block->fCursor <= block->fSize);
219
220 prev = block;
221 }
222 SkASSERT(prev == fTail);
223 SkASSERT(blocks.size() > 0);
224 SkASSERT(blocks[0] == &fHead);
225
226 // Confirm reverse iteration matches forward iteration
227 size_t j = blocks.size();
228 for (const Block* b : this->rblocks()) {
229 SkASSERT(b == blocks[j - 1]);
230 j--;
231 }
232 SkASSERT(j == 0);
233}
234#endif
235