1#pragma once
2
3#if __has_include(<sanitizer/asan_interface.h>)
4# include <sanitizer/asan_interface.h>
5#endif
6#include <Core/Defines.h>
7#include <Common/Arena.h>
8#include <Common/BitHelpers.h>
9
10
11namespace DB
12{
13
14
15/** Unlike Arena, allows you to release (for later re-use)
16 * previously allocated (not necessarily just recently) chunks of memory.
17 * For this, the requested size is rounded up to the power of two
18 * (or up to 8, if less, or using memory allocation outside Arena if the size is greater than 65536).
19 * When freeing memory, for each size (14 options in all: 8, 16 ... 65536),
20 * a single-linked list of free blocks is kept track.
21 * When allocating, we take the head of the list of free blocks,
22 * or, if the list is empty - allocate a new block using Arena.
23 */
24class ArenaWithFreeLists : private Allocator<false>, private boost::noncopyable
25{
26private:
27 /// If the block is free, then the pointer to the next free block is stored at its beginning, or nullptr, if there are no more free blocks.
28 /// If the block is used, then some data is stored in it.
29 union Block
30 {
31 Block * next;
32 char data[0];
33 };
34
35 /// The maximum size of a piece of memory that is allocated with Arena. Otherwise, we use Allocator directly.
36 static constexpr size_t max_fixed_block_size = 65536;
37
38 /// Get the index in the freelist array for the specified size.
39 static size_t findFreeListIndex(const size_t size)
40 {
41 return size <= 8 ? 2 : bitScanReverse(size - 1);
42 }
43
44 /// Arena is used to allocate blocks that are not too large.
45 Arena pool;
46
47 /// Lists of free blocks. Each element points to the head of the corresponding list, or is nullptr.
48 /// The first two elements are not used, but are intended to simplify arithmetic.
49 Block * free_lists[16] {};
50
51public:
52 ArenaWithFreeLists(
53 const size_t initial_size = 4096, const size_t growth_factor = 2,
54 const size_t linear_growth_threshold = 128 * 1024 * 1024)
55 : pool{initial_size, growth_factor, linear_growth_threshold}
56 {
57 }
58
59 char * alloc(const size_t size)
60 {
61 if (size > max_fixed_block_size)
62 return static_cast<char *>(Allocator<false>::alloc(size));
63
64 /// find list of required size
65 const auto list_idx = findFreeListIndex(size);
66
67 /// If there is a free block.
68 if (auto & free_block_ptr = free_lists[list_idx])
69 {
70 /// Let's take it. And change the head of the list to the next
71 /// item in the list. We poisoned the free block before putting
72 /// it into the free list, so we have to unpoison it before
73 /// reading anything.
74 ASAN_UNPOISON_MEMORY_REGION(free_block_ptr,
75 std::max(size, sizeof(Block)));
76
77 const auto res = free_block_ptr->data;
78 free_block_ptr = free_block_ptr->next;
79 return res;
80 }
81
82 /// no block of corresponding size, allocate a new one
83 return pool.alloc(1ULL << (list_idx + 1));
84 }
85
86 void free(char * ptr, const size_t size)
87 {
88 if (size > max_fixed_block_size)
89 return Allocator<false>::free(ptr, size);
90
91 /// find list of required size
92 const auto list_idx = findFreeListIndex(size);
93
94 /// Insert the released block into the head of the list.
95 auto & free_block_ptr = free_lists[list_idx];
96 const auto old_head = free_block_ptr;
97 free_block_ptr = reinterpret_cast<Block *>(ptr);
98 free_block_ptr->next = old_head;
99
100 /// The requested size may be less than the size of the block, but
101 /// we still want to poison the entire block.
102 /// Strictly speaking, the free blocks must be unpoisoned in
103 /// destructor, to support an underlying allocator that doesn't
104 /// integrate with asan. We don't do that, and rely on the fact that
105 /// our underlying allocator is Arena, which does have asan integration.
106 ASAN_POISON_MEMORY_REGION(ptr, 1ULL << (list_idx + 1));
107 }
108
109 /// Size of the allocated pool in bytes
110 size_t size() const
111 {
112 return pool.size();
113 }
114};
115
116
117}
118