1 | /* |
2 | * Copyright 2010 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef GrTBlockList_DEFINED |
9 | #define GrTBlockList_DEFINED |
10 | |
11 | #include "src/gpu/GrBlockAllocator.h" |
12 | |
13 | #include <type_traits> |
14 | |
15 | // Forward declarations for the iterators used by GrTBlockList |
16 | using IndexFn = int (*)(const GrBlockAllocator::Block*); |
17 | using NextFn = int (*)(const GrBlockAllocator::Block*, int); |
18 | template<typename T, typename B> using ItemFn = T (*)(B*, int); |
19 | template <typename T, bool Forward, bool Const, IndexFn Start, IndexFn End, NextFn Next, |
20 | ItemFn<T, typename std::conditional<Const, const GrBlockAllocator::Block, |
21 | GrBlockAllocator::Block>::type> Resolve> |
22 | class BlockIndexIterator; |
23 | |
24 | /** |
25 | * GrTBlockList manages dynamic storage for instances of T, reserving fixed blocks such that |
26 | * allocation is amortized across every N instances. In this way it is a hybrid of an array-based |
27 | * vector and a linked-list. T can be any type and non-trivial destructors are automatically |
28 | * invoked when the GrTBlockList is destructed. The addresses of instances are guaranteed |
29 | * not to move except when a list is concatenated to another. |
30 | * |
31 | * The collection supports storing a templated number of elements inline before heap-allocated |
32 | * blocks are made to hold additional instances. By default, the heap blocks are sized to hold the |
33 | * same number of items as the inline block. A common pattern is to have the inline size hold only |
34 | * a small number of items for the common case and then allocate larger blocks when needed. |
35 | * |
36 | * If the size of a collection is N, and its block size is B, the complexity of the common |
37 | * operations are: |
38 | * - push_back()/emplace_back(): O(1), with malloc O(B) |
39 | * - pop_back(): O(1), with free O(B) |
40 | * - front()/back(): O(1) |
41 | * - reset(): O(N) for non-trivial types, O(N/B) for trivial types |
42 | * - concat(): O(B) |
43 | * - random access: O(N/B) |
44 | * - iteration: O(1) at each step |
45 | * |
46 | * These characteristics make it well suited for allocating items in a LIFO ordering, or otherwise |
47 | * acting as a stack, or simply using it as a typed allocator. |
48 | */ |
49 | template <typename T, int StartingItems = 1> |
50 | class GrTBlockList { |
51 | public: |
52 | /** |
53 | * Create an allocator that defaults to using StartingItems as heap increment. |
54 | */ |
55 | GrTBlockList() : GrTBlockList(StartingItems) {} |
56 | |
57 | /** |
58 | * Create an allocator |
59 | * |
60 | * @param itemsPerBlock the number of items to allocate at once |
61 | */ |
62 | explicit GrTBlockList(int itemsPerBlock) |
63 | : fAllocator(GrBlockAllocator::GrowthPolicy::kFixed, |
64 | GrBlockAllocator::BlockOverhead<alignof(T)>() + sizeof(T)*itemsPerBlock) {} |
65 | |
66 | ~GrTBlockList() { this->reset(); } |
67 | |
68 | /** |
69 | * Adds an item and returns it. |
70 | * |
71 | * @return the added item. |
72 | */ |
73 | T& push_back() { |
74 | return *new (this->pushItem()) T; |
75 | } |
76 | T& push_back(const T& t) { |
77 | return *new (this->pushItem()) T(t); |
78 | } |
79 | T& push_back(T&& t) { |
80 | return *new (this->pushItem()) T(std::move(t)); |
81 | } |
82 | |
83 | template <typename... Args> |
84 | T& emplace_back(Args&&... args) { |
85 | return *new (this->pushItem()) T(std::forward<Args>(args)...); |
86 | } |
87 | |
88 | /** |
89 | * Move all items from 'other' to the end of this collection. When this returns, 'other' will |
90 | * be empty. Items in 'other' may be moved as part of compacting the pre-allocated start of |
91 | * 'other' into this list (using T's move constructor or memcpy if T is trivially copyable), but |
92 | * this is O(StartingItems) and not O(N). All other items are concatenated in O(1). |
93 | */ |
94 | template <int SI> |
95 | void concat(GrTBlockList<T, SI>&& other); |
96 | |
97 | /** |
98 | * Allocate, if needed, space to hold N more Ts before another malloc will occur. |
99 | */ |
100 | void reserve(int n) { |
101 | int avail = fAllocator->currentBlock()->template avail<alignof(T)>() / sizeof(T); |
102 | if (n > avail) { |
103 | int reserved = n - avail; |
104 | // Don't consider existing bytes since we've already determined how to split the N items |
105 | fAllocator->template reserve<alignof(T)>( |
106 | reserved * sizeof(T), GrBlockAllocator::kIgnoreExistingBytes_Flag); |
107 | } |
108 | } |
109 | |
110 | /** |
111 | * Remove the last item, only call if count() != 0 |
112 | */ |
113 | void pop_back() { |
114 | SkASSERT(this->count() > 0); |
115 | |
116 | GrBlockAllocator::Block* block = fAllocator->currentBlock(); |
117 | |
118 | // Run dtor for the popped item |
119 | int releaseIndex = Last(block); |
120 | GetItem(block, releaseIndex).~T(); |
121 | |
122 | if (releaseIndex == First(block)) { |
123 | fAllocator->releaseBlock(block); |
124 | } else { |
125 | // Since this always follows LIFO, the block should always be able to release the memory |
126 | SkAssertResult(block->release(releaseIndex, releaseIndex + sizeof(T))); |
127 | block->setMetadata(Decrement(block, releaseIndex)); |
128 | } |
129 | |
130 | fAllocator->setMetadata(fAllocator->metadata() - 1); |
131 | } |
132 | |
133 | /** |
134 | * Removes all added items. |
135 | */ |
136 | void reset() { |
137 | // Invoke destructors in reverse order if not trivially destructible |
138 | if /* constexpr */ (!std::is_trivially_destructible<T>::value) { |
139 | for (T& t : this->ritems()) { |
140 | t.~T(); |
141 | } |
142 | } |
143 | |
144 | fAllocator->reset(); |
145 | } |
146 | |
147 | /** |
148 | * Returns the item count. |
149 | */ |
150 | int count() const { |
151 | #ifdef SK_DEBUG |
152 | // Confirm total count matches sum of block counts |
153 | int count = 0; |
154 | for (const auto* b :fAllocator->blocks()) { |
155 | if (b->metadata() == 0) { |
156 | continue; // skip empty |
157 | } |
158 | count += (sizeof(T) + Last(b) - First(b)) / sizeof(T); |
159 | } |
160 | SkASSERT(count == fAllocator->metadata()); |
161 | #endif |
162 | return fAllocator->metadata(); |
163 | } |
164 | |
165 | /** |
166 | * Is the count 0? |
167 | */ |
168 | bool empty() const { return this->count() == 0; } |
169 | |
170 | /** |
171 | * Access first item, only call if count() != 0 |
172 | */ |
173 | T& front() { |
174 | // This assumes that the head block actually have room to store the first item. |
175 | static_assert(StartingItems >= 1); |
176 | SkASSERT(this->count() > 0 && fAllocator->headBlock()->metadata() > 0); |
177 | return GetItem(fAllocator->headBlock(), First(fAllocator->headBlock())); |
178 | } |
179 | const T& front() const { |
180 | SkASSERT(this->count() > 0 && fAllocator->headBlock()->metadata() > 0); |
181 | return GetItem(fAllocator->headBlock(), First(fAllocator->headBlock())); |
182 | } |
183 | |
184 | /** |
185 | * Access last item, only call if count() != 0 |
186 | */ |
187 | T& back() { |
188 | SkASSERT(this->count() > 0 && fAllocator->currentBlock()->metadata() > 0); |
189 | return GetItem(fAllocator->currentBlock(), Last(fAllocator->currentBlock())); |
190 | } |
191 | const T& back() const { |
192 | SkASSERT(this->count() > 0 && fAllocator->currentBlock()->metadata() > 0); |
193 | return GetItem(fAllocator->currentBlock(), Last(fAllocator->currentBlock())); |
194 | } |
195 | |
196 | /** |
197 | * Access item by index. Not an operator[] since it should not be considered constant time. |
198 | * Use for-range loops by calling items() or ritems() instead to access all added items in order |
199 | */ |
200 | T& item(int i) { |
201 | SkASSERT(i >= 0 && i < this->count()); |
202 | |
203 | // Iterate over blocks until we find the one that contains i. |
204 | for (auto* b : fAllocator->blocks()) { |
205 | if (b->metadata() == 0) { |
206 | continue; // skip empty |
207 | } |
208 | |
209 | int start = First(b); |
210 | int end = Last(b) + sizeof(T); // exclusive |
211 | int index = start + i * sizeof(T); |
212 | if (index < end) { |
213 | return GetItem(b, index); |
214 | } else { |
215 | i -= (end - start) / sizeof(T); |
216 | } |
217 | } |
218 | SkUNREACHABLE; |
219 | } |
220 | const T& item(int i) const { |
221 | return const_cast<GrTBlockList*>(this)->item(i); |
222 | } |
223 | |
224 | private: |
225 | // Let other GrTBlockLists have access (only ever used when T and S are the same but you |
226 | // cannot have partial specializations declared as a friend...) |
227 | template<typename S, int N> friend class GrTBlockList; |
228 | |
229 | static constexpr size_t StartingSize = |
230 | GrBlockAllocator::Overhead<alignof(T)>() + StartingItems * sizeof(T); |
231 | |
232 | static T& GetItem(GrBlockAllocator::Block* block, int index) { |
233 | return *static_cast<T*>(block->ptr(index)); |
234 | } |
235 | static const T& GetItem(const GrBlockAllocator::Block* block, int index) { |
236 | return *static_cast<const T*>(block->ptr(index)); |
237 | } |
238 | static int First(const GrBlockAllocator::Block* b) { |
239 | return b->firstAlignedOffset<alignof(T)>(); |
240 | } |
241 | static int Last(const GrBlockAllocator::Block* b) { |
242 | return b->metadata(); |
243 | } |
244 | static int Increment(const GrBlockAllocator::Block* b, int index) { |
245 | return index + sizeof(T); |
246 | } |
247 | static int Decrement(const GrBlockAllocator::Block* b, int index) { |
248 | return index - sizeof(T); |
249 | } |
250 | |
251 | void* pushItem() { |
252 | // 'template' required because fAllocator is a template, calling a template member |
253 | auto br = fAllocator->template allocate<alignof(T)>(sizeof(T)); |
254 | SkASSERT(br.fStart == br.fAlignedOffset || |
255 | br.fAlignedOffset == First(fAllocator->currentBlock())); |
256 | br.fBlock->setMetadata(br.fAlignedOffset); |
257 | fAllocator->setMetadata(fAllocator->metadata() + 1); |
258 | return br.fBlock->ptr(br.fAlignedOffset); |
259 | } |
260 | |
261 | // N represents the number of items, whereas GrSBlockAllocator takes total bytes, so must |
262 | // account for the block allocator's size too. |
263 | // |
264 | // This class uses the GrBlockAllocator's metadata to track total count of items, and per-block |
265 | // metadata to track the index of the last allocated item within each block. |
266 | GrSBlockAllocator<StartingSize> fAllocator; |
267 | |
268 | public: |
269 | using Iter = BlockIndexIterator<T&, true, false, &First, &Last, &Increment, &GetItem>; |
270 | using CIter = BlockIndexIterator<const T&, true, true, &First, &Last, &Increment, &GetItem>; |
271 | using RIter = BlockIndexIterator<T&, false, false, &Last, &First, &Decrement, &GetItem>; |
272 | using CRIter = BlockIndexIterator<const T&, false, true, &Last, &First, &Decrement, &GetItem>; |
273 | |
274 | /** |
275 | * Iterate over all items in allocation order (oldest to newest) using a for-range loop: |
276 | * |
277 | * for (auto&& T : this->items()) {} |
278 | */ |
279 | Iter items() { return Iter(fAllocator.allocator()); } |
280 | CIter items() const { return CIter(fAllocator.allocator()); } |
281 | |
282 | // Iterate from newest to oldest using a for-range loop. |
283 | RIter ritems() { return RIter(fAllocator.allocator()); } |
284 | CRIter ritems() const { return CRIter(fAllocator.allocator()); } |
285 | |
286 | #if GR_TEST_UTILS |
287 | // For introspection |
288 | const GrBlockAllocator* allocator() const { return fAllocator.allocator(); } |
289 | #endif |
290 | }; |
291 | |
292 | template <typename T, int SI1> |
293 | template <int SI2> |
294 | void GrTBlockList<T, SI1>::concat(GrTBlockList<T, SI2>&& other) { |
295 | // Manually move all items in other's head block into this list; all heap blocks from 'other' |
296 | // will be appended to the block linked list (no per-item moves needed then). |
297 | int headItemCount = 0; |
298 | GrBlockAllocator::Block* headBlock = other.fAllocator->headBlock(); |
299 | SkDEBUGCODE(int oldCount = this->count();) |
300 | if (headBlock->metadata() > 0) { |
301 | int headStart = First(headBlock); |
302 | int headEnd = Last(headBlock) + sizeof(T); // exclusive |
303 | headItemCount = (headEnd - headStart) / sizeof(T); |
304 | int avail = fAllocator->currentBlock()->template avail<alignof(T)>() / sizeof(T); |
305 | if (headItemCount > avail) { |
306 | // Make sure there is extra room for the items beyond what's already avail. Use the |
307 | // kIgnoreGrowthPolicy_Flag to make this reservation as tight as possible since |
308 | // 'other's heap blocks will be appended after it and any extra space is wasted. |
309 | fAllocator->template reserve<alignof(T)>((headItemCount - avail) * sizeof(T), |
310 | GrBlockAllocator::kIgnoreExistingBytes_Flag | |
311 | GrBlockAllocator::kIgnoreGrowthPolicy_Flag); |
312 | } |
313 | |
314 | if /*constexpr*/ (std::is_trivially_copyable<T>::value) { |
315 | // memcpy all items at once (or twice between current and reserved space). |
316 | SkASSERT(std::is_trivially_destructible<T>::value); |
317 | auto copy = [](GrBlockAllocator::Block* src, int start, GrBlockAllocator* dst, int n) { |
318 | auto target = dst->template allocate<alignof(T)>(n * sizeof(T)); |
319 | memcpy(target.fBlock->ptr(target.fAlignedOffset), src->ptr(start), n * sizeof(T)); |
320 | target.fBlock->setMetadata(target.fAlignedOffset + (n - 1) * sizeof(T)); |
321 | }; |
322 | |
323 | if (avail > 0) { |
324 | // Copy 0 to avail items into existing tail block |
325 | copy(headBlock, headStart, fAllocator.allocator(), std::min(headItemCount, avail)); |
326 | } |
327 | if (headItemCount > avail) { |
328 | // Copy (head count - avail) into the extra reserved space |
329 | copy(headBlock, headStart + avail * sizeof(T), |
330 | fAllocator.allocator(), headItemCount - avail); |
331 | } |
332 | fAllocator->setMetadata(fAllocator->metadata() + headItemCount); |
333 | } else { |
334 | // Move every item over one at a time |
335 | for (int i = headStart; i < headEnd; i += sizeof(T)) { |
336 | T& toMove = GetItem(headBlock, i); |
337 | this->push_back(std::move(toMove)); |
338 | // Anything of interest should have been moved, but run this since T isn't |
339 | // a trusted type. |
340 | toMove.~T(); // NOLINT(bugprone-use-after-move): calling dtor always allowed |
341 | } |
342 | } |
343 | |
344 | other.fAllocator->releaseBlock(headBlock); |
345 | } |
346 | |
347 | // other's head block must have been fully copied since it cannot be stolen |
348 | SkASSERT(other.fAllocator->headBlock()->metadata() == 0 && |
349 | fAllocator->metadata() == oldCount + headItemCount); |
350 | fAllocator->stealHeapBlocks(other.fAllocator.allocator()); |
351 | fAllocator->setMetadata(fAllocator->metadata() + |
352 | (other.fAllocator->metadata() - headItemCount)); |
353 | other.fAllocator->setMetadata(0); |
354 | } |
355 | |
356 | /** |
357 | * BlockIndexIterator provides a reusable iterator template for collections built on top of a |
358 | * GrBlockAllocator, where each item is of the same type, and the index to an item can be iterated |
359 | * over in a known manner. It supports const and non-const, and forward and reverse, assuming it's |
360 | * provided with proper functions for starting, ending, and advancing. |
361 | */ |
362 | template <typename T, // The element type (including any modifiers) |
363 | bool Forward, // Are indices within a block increasing or decreasing with iteration? |
364 | bool Const, // Whether or not T is const |
365 | IndexFn Start, // Returns the index of the first valid item in a block |
366 | IndexFn End, // Returns the index of the last valid item (so it is inclusive) |
367 | NextFn Next, // Returns the next index given the current index |
368 | ItemFn<T, typename std::conditional<Const, const GrBlockAllocator::Block, |
369 | GrBlockAllocator::Block>::type> Resolve> |
370 | class BlockIndexIterator { |
371 | using BlockIter = typename GrBlockAllocator::BlockIter<Forward, Const>; |
372 | public: |
373 | BlockIndexIterator(BlockIter iter) : fBlockIter(iter) {} |
374 | |
375 | class Item { |
376 | public: |
377 | bool operator!=(const Item& other) const { |
378 | return other.fBlock != fBlock || (SkToBool(*fBlock) && other.fIndex != fIndex); |
379 | } |
380 | |
381 | T operator*() const { |
382 | SkASSERT(*fBlock); |
383 | return Resolve(*fBlock, fIndex); |
384 | } |
385 | |
386 | Item& operator++() { |
387 | const auto* block = *fBlock; |
388 | SkASSERT(block && block->metadata() > 0); |
389 | SkASSERT((Forward && Next(block, fIndex) > fIndex) || |
390 | (!Forward && Next(block, fIndex) < fIndex)); |
391 | fIndex = Next(block, fIndex); |
392 | if ((Forward && fIndex > fEndIndex) || (!Forward && fIndex < fEndIndex)) { |
393 | ++fBlock; |
394 | this->setIndices(); |
395 | } |
396 | return *this; |
397 | } |
398 | |
399 | private: |
400 | friend BlockIndexIterator; |
401 | using BlockItem = typename BlockIter::Item; |
402 | |
403 | Item(BlockItem block) : fBlock(block) { |
404 | this->setIndices(); |
405 | } |
406 | |
407 | void setIndices() { |
408 | // Skip empty blocks |
409 | while(*fBlock && (*fBlock)->metadata() == 0) { |
410 | ++fBlock; |
411 | } |
412 | if (*fBlock) { |
413 | fIndex = Start(*fBlock); |
414 | fEndIndex = End(*fBlock); |
415 | } else { |
416 | fIndex = 0; |
417 | fEndIndex = 0; |
418 | } |
419 | |
420 | SkASSERT((Forward && fIndex <= fEndIndex) || (!Forward && fIndex >= fEndIndex)); |
421 | } |
422 | |
423 | BlockItem fBlock; |
424 | int fIndex; |
425 | int fEndIndex; |
426 | }; |
427 | |
428 | Item begin() const { return Item(fBlockIter.begin()); } |
429 | Item end() const { return Item(fBlockIter.end()); } |
430 | |
431 | private: |
432 | BlockIter fBlockIter; |
433 | }; |
434 | |
435 | #endif |
436 | |