1// [Blend2D]
2// 2D Vector Graphics Powered by a JIT Compiler.
3//
4// [License]
5// Zlib - See LICENSE.md file in the package.
6
7#include "./blapi-build_p.h"
8#include "./blbitarray_p.h"
9#include "./blrandom_p.h"
10#include "./blruntime_p.h"
11#include "./blsupport_p.h"
12#include "./blthreading_p.h"
13#include "./blzeroallocator_p.h"
14#include "./blzoneallocator_p.h"
15#include "./blzonelist_p.h"
16#include "./blzonetree_p.h"
17
18// ============================================================================
19// [BLZeroAllocator - Implementation]
20// ============================================================================
21
22#ifdef BL_BUILD_DEBUG
23static void blZeroAllocatorVerifyIfZeroed(uint8_t* ptr, size_t size) noexcept {
24 // Must be aligned.
25 BL_ASSERT(blIsAligned(ptr , sizeof(uintptr_t)));
26 BL_ASSERT(blIsAligned(size, sizeof(uintptr_t)));
27
28 uintptr_t* p = reinterpret_cast<uintptr_t*>(ptr);
29 for (size_t i = 0; i < size / sizeof(uintptr_t); i++)
30 BL_ASSERT(p[i] == 0);
31}
32#endif
33
34//! Calculate the number of elements that would be required if `base` is
35//! granularized by `granularity`. This function can be used to calculate
36//! the number of BitWords to represent N bits, for example.
37template<typename X, typename Y>
38constexpr X blZeroAllocatorNumGranularized(X base, Y granularity) noexcept {
39 typedef typename BLInternal::StdInt<sizeof(X), 1>::Type U;
40 return X((U(base) + U(granularity) - 1) / U(granularity));
41}
42
43//! Based on asmjit's JitAllocator, but modified and enhanced for our own purposes.
44class BLZeroAllocator {
45public:
46 BL_NONCOPYABLE(BLZeroAllocator)
47
48 enum : uint32_t {
49 kBlockAlignment = 64,
50 kBlockGranularity = 1024,
51
52 kMinBlockSize = 1048576, // 1MB.
53 kMaxBlockSize = 8388608 // 8MB.
54 };
55
56 static constexpr size_t bitWordCountFromAreaSize(uint32_t areaSize) noexcept {
57 return blAlignUp<size_t>(areaSize, blBitSizeOf<BLBitWord>()) / blBitSizeOf<BLBitWord>();
58 }
59
60 class Block : public BLZoneTreeNode<Block>,
61 public BLZoneListNode<Block> {
62 public:
63 BL_NONCOPYABLE(Block)
64
65 enum Flags : uint32_t {
66 //! This is a statically allocated block.
67 kFlagStatic = 0x00000001u,
68 //! Block is dirty (some members need to be updated).
69 kFlagDirty = 0x80000000u
70 };
71
72 //! Zeroed buffer managed by this block.
73 uint8_t* _buffer;
74 //! Aligned `_buffer` to kBlockAlignment.
75 uint8_t* _bufferAligned;
76 //! Size of `buffer` in bytes.
77 size_t _blockSize;
78
79 //! Block flags.
80 uint32_t _flags;
81 //! Size of the whole block area (bit-vector size).
82 uint32_t _areaSize;
83 //! Used area (number of bits in bit-vector used).
84 uint32_t _areaUsed;
85 //! The largest unused continuous area in the bit-vector (or `_areaSize` to initiate rescan).
86 uint32_t _largestUnusedArea;
87 //! Start of a search range (for unused bits).
88 uint32_t _searchStart;
89 //! End of a search range (for unused bits).
90 uint32_t _searchEnd;
91 //! Bit vector representing all used areas (0 = unused, 1 = used).
92 BLBitWord _bitVector[1];
93
94 BL_INLINE Block(uint8_t* buffer, size_t blockSize, uint32_t areaSize) noexcept
95 : BLZoneTreeNode(),
96 _buffer(buffer),
97 _bufferAligned(blAlignUp(buffer, kBlockAlignment)),
98 _blockSize(blockSize),
99 _flags(0),
100 _areaSize(areaSize),
101 _areaUsed(0),
102 _largestUnusedArea(areaSize),
103 _searchStart(0),
104 _searchEnd(areaSize) {}
105
106 BL_INLINE uint8_t* bufferAligned() const noexcept { return _bufferAligned; }
107 BL_INLINE size_t blockSize() const noexcept { return _blockSize; }
108
109 BL_INLINE size_t overheadSize() const noexcept {
110 return sizeof(Block) - sizeof(BLBitWord) + blZeroAllocatorNumGranularized(areaSize(), blBitSizeOf<BLBitWord>());
111 }
112
113 BL_INLINE uint32_t flags() const noexcept { return _flags; }
114 BL_INLINE bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
115 BL_INLINE void addFlags(uint32_t flags) noexcept { _flags |= flags; }
116 BL_INLINE void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
117
118 BL_INLINE uint32_t areaSize() const noexcept { return _areaSize; }
119 BL_INLINE uint32_t areaUsed() const noexcept { return _areaUsed; }
120 BL_INLINE uint32_t areaAvailable() const noexcept { return areaSize() - areaUsed(); }
121 BL_INLINE uint32_t largestUnusedArea() const noexcept { return _largestUnusedArea; }
122
123 BL_INLINE void resetBitVector() noexcept {
124 memset(_bitVector, 0, blZeroAllocatorNumGranularized(_areaSize, blBitSizeOf<BLBitWord>()) * sizeof(BLBitWord));
125 }
126
127 // RBTree default CMP uses '<' and '>' operators.
128 BL_INLINE bool operator<(const Block& other) const noexcept { return bufferAligned() < other.bufferAligned(); }
129 BL_INLINE bool operator>(const Block& other) const noexcept { return bufferAligned() > other.bufferAligned(); }
130
131 // Special implementation for querying blocks by `key`, which must be in `[buffer, buffer + blockSize)` range.
132 BL_INLINE bool operator<(const uint8_t* key) const noexcept { return bufferAligned() + blockSize() <= key; }
133 BL_INLINE bool operator>(const uint8_t* key) const noexcept { return bufferAligned() > key; }
134 };
135
136 //! Mutex for thread safety.
137 mutable BLMutex _mutex;
138 //! Tree that contains all blocks.
139 BLZoneTree<Block> _tree;
140 //! Double linked list of blocks.
141 BLZoneList<Block> _blocks;
142 //! Allocated block count.
143 size_t _blockCount;
144 //! Area size of base block.
145 size_t _baseAreaSize;
146 //! Number of bits reserved across all blocks.
147 size_t _totalAreaSize;
148 //! Number of bits used across all blocks.
149 size_t _totalAreaUsed;
150 //! A threshold to trigger auto-cleanup.
151 size_t _cleanupThreshold;
152 //! Memory overhead required to manage blocks.
153 size_t _overheadSize;
154
155 // --------------------------------------------------------------------------
156 // [Construction / Destruction]
157 // --------------------------------------------------------------------------
158
159 BL_INLINE BLZeroAllocator(Block* baseBlock) noexcept
160 : _tree(),
161 _blocks(),
162 _blockCount(0),
163 _baseAreaSize(0),
164 _totalAreaSize(0),
165 _totalAreaUsed(0),
166 _cleanupThreshold(0),
167 _overheadSize(0) {
168
169 baseBlock->addFlags(Block::kFlagStatic);
170 insertBlock(baseBlock);
171
172 _baseAreaSize = _totalAreaSize;
173 _cleanupThreshold = _totalAreaSize;
174 }
175
176 BL_INLINE ~BLZeroAllocator() noexcept {
177 _cleanupInternal();
178 }
179
180 // --------------------------------------------------------------------------
181 // [Block Management]
182 // --------------------------------------------------------------------------
183
184 // Allocate a new `BLZeroAllocator::Block` for the given `blockSize`.
185 Block* newBlock(size_t blockSize) noexcept {
186 uint32_t areaSize = uint32_t((blockSize + kBlockGranularity - 1) / kBlockGranularity);
187 uint32_t numBitWords = (areaSize + blBitSizeOf<BLBitWord>() - 1u) / blBitSizeOf<BLBitWord>();
188
189 size_t blockStructSize = sizeof(Block) + size_t(numBitWords - 1) * sizeof(BLBitWord);
190 Block* block = static_cast<Block*>(malloc(blockStructSize));
191 uint8_t* buffer = static_cast<uint8_t*>(::calloc(1, blockSize + kBlockAlignment));
192
193 // Out of memory.
194 if (BL_UNLIKELY(!block || !buffer)) {
195 if (buffer)
196 free(buffer);
197
198 if (block)
199 free(block);
200
201 return nullptr;
202 }
203
204 block = new(block) Block(buffer, blockSize, areaSize);
205 block->resetBitVector();
206 return block;
207 }
208
209 void deleteBlock(Block* block) noexcept {
210 BL_ASSERT(!(block->hasFlag(Block::kFlagStatic)));
211
212 free(block->_buffer);
213 free(block);
214 }
215
216 void insertBlock(Block* block) noexcept {
217 // Add to RBTree and List.
218 _tree.insert(block);
219 _blocks.append(block);
220
221 // Update statistics.
222 _blockCount++;
223 _totalAreaSize += block->areaSize();
224 _overheadSize += block->overheadSize();
225 }
226
227 void removeBlock(Block* block) noexcept {
228 // Remove from RBTree and List.
229 _tree.remove(block);
230 _blocks.unlink(block);
231
232 // Update statistics.
233 _blockCount--;
234 _totalAreaSize -= block->areaSize();
235 _overheadSize -= block->overheadSize();
236 }
237
238 BL_INLINE size_t calculateIdealBlockSize(size_t allocationSize) noexcept {
239 uint32_t kMaxSizeShift = blBitCtz(kMaxBlockSize) -
240 blBitCtz(kMinBlockSize) ;
241
242 size_t blockSize = size_t(kMinBlockSize) << blMin<size_t>(_blockCount, kMaxSizeShift);
243 if (blockSize < allocationSize)
244 blockSize = blAlignUp(allocationSize, blockSize);
245 return blockSize;
246 }
247
248 BL_INLINE size_t calculateCleanupThreshold() const noexcept {
249 if (_blockCount <= 6)
250 return 0;
251
252 size_t area = _totalAreaSize - _baseAreaSize;
253 size_t threshold = area / 5u;
254 return _baseAreaSize + threshold;
255 }
256
257 // --------------------------------------------------------------------------
258 // [Cleanup]
259 // --------------------------------------------------------------------------
260
261 void _cleanupInternal(size_t n = SIZE_MAX) noexcept {
262 Block* block = _blocks.last();
263
264 while (block && n) {
265 Block* prev = block->prev();
266 if (block->areaUsed() == 0 && !(block->hasFlag(Block::kFlagStatic))) {
267 removeBlock(block);
268 deleteBlock(block);
269 n--;
270 }
271 block = prev;
272 }
273
274 _cleanupThreshold = calculateCleanupThreshold();
275 }
276
277 BL_INLINE void cleanup() noexcept {
278 BLMutexGuard guard(_mutex);
279 _cleanupInternal();
280 }
281
282 // --------------------------------------------------------------------------
283 // [Memory Info]
284 // --------------------------------------------------------------------------
285
286 BL_INLINE void onMemoryInfo(BLRuntimeMemoryInfo* memoryInfo) noexcept {
287 BLMutexGuard guard(_mutex);
288
289 memoryInfo->zmUsed = _totalAreaUsed * kBlockGranularity;
290 memoryInfo->zmReserved = _totalAreaSize * kBlockGranularity;
291 memoryInfo->zmOverhead = _overheadSize;
292 memoryInfo->zmBlockCount = _blockCount;
293 }
294
295 // --------------------------------------------------------------------------
296 // [Alloc / Release]
297 // --------------------------------------------------------------------------
298
299 void* _allocInternal(size_t size, size_t* allocatedSize) noexcept {
300 constexpr uint32_t kNoIndex = blMaxValue<uint32_t>();
301
302 // Align to minimum granularity by default.
303 size = blAlignUp<size_t>(size, kBlockGranularity);
304 *allocatedSize = 0;
305
306 if (BL_UNLIKELY(size == 0 || size > blMaxValue<uint32_t>() / 2))
307 return nullptr;
308
309 Block* block = _blocks.first();
310 uint32_t areaIndex = kNoIndex;
311 uint32_t areaSize = uint32_t(blZeroAllocatorNumGranularized(size, kBlockGranularity));
312
313 // Try to find the requested memory area in existing blocks.
314 if (block) {
315 Block* initial = block;
316 do {
317 Block* next = block->hasNext() ? block->next() : _blocks.first();
318 if (block->areaAvailable() >= areaSize) {
319 if (block->hasFlag(Block::kFlagDirty) || block->largestUnusedArea() >= areaSize) {
320 uint32_t blockAreaSize = block->areaSize();
321 uint32_t searchStart = block->_searchStart;
322 uint32_t searchEnd = block->_searchEnd;
323
324 BLBitVectorFlipIterator<BLBitWord> it(
325 block->_bitVector, blZeroAllocatorNumGranularized(searchEnd, blBitSizeOf<BLBitWord>()), searchStart, blBitOnes<BLBitWord>());
326
327 // If there is unused area available then there has to be at least one match.
328 BL_ASSERT(it.hasNext());
329
330 uint32_t bestArea = blockAreaSize;
331 uint32_t largestArea = 0;
332
333 uint32_t holeIndex = uint32_t(it.peekNext());
334 uint32_t holeEnd = holeIndex;
335
336 searchStart = holeIndex;
337 do {
338 holeIndex = uint32_t(it.nextAndFlip());
339 if (holeIndex >= searchEnd) break;
340
341 holeEnd = it.hasNext() ? blMin(searchEnd, uint32_t(it.nextAndFlip())) : searchEnd;
342 uint32_t holeSize = holeEnd - holeIndex;
343
344 if (holeSize >= areaSize && bestArea >= holeSize) {
345 largestArea = blMax(largestArea, bestArea);
346 bestArea = holeSize;
347 areaIndex = holeIndex;
348 }
349 else {
350 largestArea = blMax(largestArea, holeSize);
351 }
352 } while (it.hasNext());
353 searchEnd = holeEnd;
354
355 // Because we have traversed the entire block, we can now mark the
356 // largest unused area that can be used to cache the next traversal.
357 block->_searchStart = searchStart;
358 block->_searchEnd = searchEnd;
359 block->_largestUnusedArea = largestArea;
360 block->clearFlags(Block::kFlagDirty);
361
362 if (areaIndex != kNoIndex) {
363 if (searchStart == areaIndex)
364 block->_searchStart += areaSize;
365 break;
366 }
367 }
368 }
369
370 block = next;
371 } while (block != initial);
372 }
373
374 // Allocate a new block if there is no region of a required width.
375 if (areaIndex == kNoIndex) {
376 size_t blockSize = calculateIdealBlockSize(size);
377 block = newBlock(blockSize);
378
379 if (BL_UNLIKELY(!block))
380 return nullptr;
381
382 insertBlock(block);
383 _cleanupThreshold = calculateCleanupThreshold();
384
385 areaIndex = 0;
386 block->_searchStart = areaSize;
387 block->_largestUnusedArea = block->areaSize() - areaSize;
388 }
389
390 // Update statistics.
391 _totalAreaUsed += areaSize;
392 block->_areaUsed += areaSize;
393
394 // Handle special cases.
395 if (block->areaAvailable() == 0) {
396 // The whole block is filled.
397 block->_searchStart = block->areaSize();
398 block->_searchEnd = 0;
399 block->_largestUnusedArea = 0;
400 block->clearFlags(Block::kFlagDirty);
401 }
402
403 // Mark the newly allocated space as occupied and also the sentinel.
404 blBitArrayFillInternal(block->_bitVector, areaIndex, areaSize);
405
406 // Return a pointer to allocated memory.
407 uint8_t* result = block->bufferAligned() + areaIndex * kBlockGranularity;
408 BL_ASSERT(result >= block->bufferAligned());
409 BL_ASSERT(result <= block->bufferAligned() + block->blockSize() - size);
410
411 *allocatedSize = size;
412 return result;
413 }
414
415 void _releaseInternal(void* ptr, size_t size) noexcept {
416 BL_ASSERT(ptr != nullptr);
417 BL_ASSERT(size != 0);
418
419 Block* block = _tree.get(static_cast<uint8_t*>(ptr));
420 BL_ASSERT(block != nullptr);
421
422 #ifdef BL_BUILD_DEBUG
423 blZeroAllocatorVerifyIfZeroed(static_cast<uint8_t*>(ptr), size);
424 #endif
425
426 // Offset relative to the start of the block.
427 size_t byteOffset = (size_t)((uint8_t*)ptr - block->bufferAligned());
428
429 // The first bit representing the allocated area and its size.
430 uint32_t areaIndex = uint32_t(byteOffset / kBlockGranularity);
431 uint32_t areaSize = uint32_t(blZeroAllocatorNumGranularized(size, kBlockGranularity));
432
433 // Update the search region and statistics.
434 block->_searchStart = blMin(block->_searchStart, areaIndex);
435 block->_searchEnd = blMax(block->_searchEnd, areaIndex + areaSize);
436 block->addFlags(Block::kFlagDirty);
437
438 block->_areaUsed -= areaSize;
439 _totalAreaUsed -= areaSize;
440
441 // Clear bits used to mark this area as occupied.
442 blBitArrayClearInternal(block->_bitVector, areaIndex, areaSize);
443
444 if (_totalAreaUsed < _cleanupThreshold)
445 _cleanupInternal(1);
446 }
447
448 BL_INLINE void* alloc(size_t size, size_t* allocatedSize) noexcept {
449 BLMutexGuard guard(_mutex);
450 return _allocInternal(size, allocatedSize);
451 }
452
453 BL_INLINE void* resize(void* prevPtr, size_t prevSize, size_t size, size_t* allocatedSize) noexcept {
454 BLMutexGuard guard(_mutex);
455 if (prevPtr != nullptr)
456 _releaseInternal(prevPtr, prevSize);
457 return _allocInternal(size, allocatedSize);
458 }
459
460 BL_INLINE void release(void* ptr, size_t size) noexcept {
461 BLMutexGuard guard(_mutex);
462 _releaseInternal(ptr, size);
463 }
464};
465
466static BLWrap<BLZeroAllocator> blZeroMemAllocator;
467
468// ============================================================================
469// [BLZeroAllocator - Static Buffer]
470// ============================================================================
471
472// Base memory is a zeroed memory allocated by the linker. By default we use
473// 1MB of memory that we will use as a base before obtaining more from the
474// system if that's not enough.
475
476struct BLZeroAllocatorStaticBlock {
477 enum : uint32_t {
478 kBlockSize = 1024u * 1024u,
479 kAreaSize = blZeroAllocatorNumGranularized(kBlockSize, BLZeroAllocator::kBlockGranularity),
480 kBitWordCount = blZeroAllocatorNumGranularized(kAreaSize, blBitSizeOf<BLBitWord>())
481 };
482
483 BLWrap<BLZeroAllocator::Block> block;
484 BLBitWord bitWords[kBitWordCount];
485};
486
487struct alignas(64) BLZeroAllocatorStaticBuffer {
488 uint8_t buffer[BLZeroAllocatorStaticBlock::kBlockSize];
489};
490
491static BLZeroAllocatorStaticBlock blZeroAllocatorStaticBlock;
492static BLZeroAllocatorStaticBuffer blZeroAllocatorStaticBuffer;
493
494// ============================================================================
495// [BLZeroAllocator - API]
496// ============================================================================
497
498void* blZeroAllocatorAlloc(size_t size, size_t* allocatedSize) noexcept {
499 return blZeroMemAllocator->alloc(size, allocatedSize);
500}
501
502void* blZeroAllocatorResize(void* prevPtr, size_t prevSize, size_t size, size_t* allocatedSize) noexcept {
503 return blZeroMemAllocator->resize(prevPtr, prevSize, size, allocatedSize);
504}
505
506void blZeroAllocatorRelease(void* ptr, size_t size) noexcept {
507 blZeroMemAllocator->release(ptr, size);
508}
509
510// ============================================================================
511// [BLZeroAllocator - Runtime Init]
512// ============================================================================
513
514static void BL_CDECL blZeroAllocatorRtShutdown(BLRuntimeContext* rt) noexcept {
515 BL_UNUSED(rt);
516 blZeroMemAllocator.destroy();
517}
518
519static void BL_CDECL blZeroAllocatorRtCleanup(BLRuntimeContext* rt, uint32_t cleanupFlags) noexcept {
520 BL_UNUSED(rt);
521 if (cleanupFlags & BL_RUNTIME_CLEANUP_ZEROED_POOL)
522 blZeroMemAllocator->cleanup();
523}
524
525static void BL_CDECL blZeroAllocatorRtMemoryInfo(BLRuntimeContext* rt, BLRuntimeMemoryInfo* memoryInfo) noexcept {
526 BL_UNUSED(rt);
527 blZeroMemAllocator->onMemoryInfo(memoryInfo);
528}
529
530void blZeroAllocatorRtInit(BLRuntimeContext* rt) noexcept {
531 BLZeroAllocator::Block* block =
532 blZeroAllocatorStaticBlock.block.init(
533 blZeroAllocatorStaticBuffer.buffer,
534 BLZeroAllocatorStaticBlock::kBlockSize,
535 BLZeroAllocatorStaticBlock::kAreaSize);
536
537 blZeroMemAllocator.init(block);
538
539 rt->shutdownHandlers.add(blZeroAllocatorRtShutdown);
540 rt->cleanupHandlers.add(blZeroAllocatorRtCleanup);
541 rt->memoryInfoHandlers.add(blZeroAllocatorRtMemoryInfo);
542}
543
544// ============================================================================
545// [BLZeroAllocator - Unit Tests]
546// ============================================================================
547
548#ifdef BL_TEST
549// A helper class to verify that BLZeroAllocator doesn't return addresses that overlap.
550class BLZeroAllocatorWrapper {
551public:
552 BL_INLINE BLZeroAllocatorWrapper() noexcept {}
553
554 // Address to a memory region of a given size.
555 class Range {
556 public:
557 BL_INLINE Range(uint8_t* addr, size_t size) noexcept
558 : addr(addr),
559 size(size) {}
560 uint8_t* addr;
561 size_t size;
562 };
563
564 // Based on BLZeroAllocator::Block, serves our purpose well...
565 class Record : public BLZoneTreeNode<Record>,
566 public Range {
567 public:
568 BL_INLINE Record(uint8_t* addr, size_t size)
569 : BLZoneTreeNode<Record>(),
570 Range(addr, size) {}
571
572 BL_INLINE bool operator<(const Record& other) const noexcept { return addr < other.addr; }
573 BL_INLINE bool operator>(const Record& other) const noexcept { return addr > other.addr; }
574
575 BL_INLINE bool operator<(const uint8_t* key) const noexcept { return addr + size <= key; }
576 BL_INLINE bool operator>(const uint8_t* key) const noexcept { return addr > key; }
577 };
578
579 BLZoneTree<Record> _records;
580
581 void _insert(void* p_, size_t size) noexcept {
582 uint8_t* p = static_cast<uint8_t*>(p_);
583 uint8_t* pEnd = p + size - 1;
584
585 Record* record = _records.get(p);
586 if (record)
587 EXPECT(record == nullptr,
588 "Address [%p:%p] collides with a newly allocated [%p:%p]\n", record->addr, record->addr + record->size, p, p + size);
589
590 record = _records.get(pEnd);
591 if (record)
592 EXPECT(record == nullptr,
593 "Address [%p:%p] collides with a newly allocated [%p:%p]\n", record->addr, record->addr + record->size, p, p + size);
594
595 void* rPtr = malloc(sizeof(Record));
596 EXPECT(rPtr != nullptr,
597 "Out of memory, cannot allocate 'Record'");
598 _records.insert(new(rPtr) Record(p, size));
599 }
600
601 void _remove(void* p) noexcept {
602 Record* record = _records.get(static_cast<uint8_t*>(p));
603 EXPECT(record != nullptr,
604 "Address [%p] doesn't exist\n", p);
605
606 _records.remove(record);
607 free(record);
608 }
609
610 void* alloc(size_t size) noexcept {
611 size_t allocatedSize = 0;
612 void* p = blZeroAllocatorAlloc(size, &allocatedSize);
613 EXPECT(p != nullptr,
614 "BLZeroAllocator failed to allocate '%u' bytes\n", unsigned(size));
615
616 for (size_t i = 0; i < allocatedSize; i++) {
617 EXPECT(static_cast<const uint8_t*>(p)[i] == 0,
618 "The returned pointer doesn't point to a zeroed memory %p[%u]\n", p, int(size));
619 }
620
621 _insert(p, allocatedSize);
622 return p;
623 }
624
625 size_t getSizeOfPtr(void* p) noexcept {
626 Record* record = _records.get(static_cast<uint8_t*>(p));
627 return record ? record->size : size_t(0);
628 }
629
630 void release(void* p) noexcept {
631 size_t size = getSizeOfPtr(p);
632 _remove(p);
633 blZeroAllocatorRelease(p, size);
634 }
635};
636
637static void blZeroAllocatorTestShuffle(void** ptrArray, size_t count, BLRandom& prng) noexcept {
638 for (size_t i = 0; i < count; ++i)
639 std::swap(ptrArray[i], ptrArray[size_t(prng.nextUInt32() % count)]);
640}
641
642static void blZeroAllocatorTestUsage() noexcept {
643 BLRuntimeMemoryInfo mi;
644 BLRuntime::queryMemoryInfo(&mi);
645
646 INFO("NumBlocks: %9llu" , (unsigned long long)(mi.zmBlockCount));
647 INFO("UsedSize : %9llu [Bytes]" , (unsigned long long)(mi.zmUsed));
648 INFO("Reserved : %9llu [Bytes]" , (unsigned long long)(mi.zmReserved));
649 INFO("Overhead : %9llu [Bytes]" , (unsigned long long)(mi.zmOverhead));
650}
651
652UNIT(blend2d_zero_allocator) {
653 BLZeroAllocatorWrapper wrapper;
654 BLRandom prng(0);
655
656 size_t i;
657 size_t kCount = 50000;
658
659 INFO("Memory alloc/release test - %d allocations", kCount);
660
661 void** ptrArray = (void**)malloc(sizeof(void*) * size_t(kCount));
662 EXPECT(ptrArray != nullptr,
663 "Couldn't allocate '%u' bytes for pointer-array", unsigned(sizeof(void*) * size_t(kCount)));
664
665 INFO("Allocating zeroed memory...");
666 for (i = 0; i < kCount; i++)
667 ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 8000) + 128);
668 blZeroAllocatorTestUsage();
669
670 INFO("Releasing zeroed memory...");
671 for (i = 0; i < kCount; i++)
672 wrapper.release(ptrArray[i]);
673 blZeroAllocatorTestUsage();
674
675 INFO("Submitting manual cleanup...");
676 BLRuntime::cleanup(BL_RUNTIME_CLEANUP_ZEROED_POOL);
677 blZeroAllocatorTestUsage();
678
679 INFO("Allocating zeroed memory...", kCount);
680 for (i = 0; i < kCount; i++)
681 ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 8000) + 128);
682 blZeroAllocatorTestUsage();
683
684 INFO("Shuffling...");
685 blZeroAllocatorTestShuffle(ptrArray, unsigned(kCount), prng);
686
687 INFO("Releasing 50% blocks...");
688 for (i = 0; i < kCount / 2; i++)
689 wrapper.release(ptrArray[i]);
690 blZeroAllocatorTestUsage();
691
692 INFO("Allocating 50% blocks again...");
693 for (i = 0; i < kCount / 2; i++)
694 ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 8000) + 128);
695 blZeroAllocatorTestUsage();
696
697 INFO("Releasing zeroed memory...");
698 for (i = 0; i < kCount; i++)
699 wrapper.release(ptrArray[i]);
700 blZeroAllocatorTestUsage();
701
702 free(ptrArray);
703}
704#endif
705