1 | // Copyright 2016 The SwiftShader Authors. All Rights Reserved. |
2 | // |
3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | // you may not use this file except in compliance with the License. |
5 | // You may obtain a copy of the License at |
6 | // |
7 | // http://www.apache.org/licenses/LICENSE-2.0 |
8 | // |
9 | // Unless required by applicable law or agreed to in writing, software |
10 | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | // See the License for the specific language governing permissions and |
13 | // limitations under the License. |
14 | |
15 | #ifndef _POOLALLOC_INCLUDED_ |
16 | #define _POOLALLOC_INCLUDED_ |
17 | |
18 | #ifdef _DEBUG |
19 | #define GUARD_BLOCKS // define to enable guard block sanity checking |
20 | #endif |
21 | |
22 | // |
23 | // This header defines an allocator that can be used to efficiently |
24 | // allocate a large number of small requests for heap memory, with the |
25 | // intention that they are not individually deallocated, but rather |
26 | // collectively deallocated at one time. |
27 | // |
28 | // This simultaneously |
29 | // |
30 | // * Makes each individual allocation much more efficient; the |
31 | // typical allocation is trivial. |
32 | // * Completely avoids the cost of doing individual deallocation. |
33 | // * Saves the trouble of tracking down and plugging a large class of leaks. |
34 | // |
35 | // Individual classes can use this allocator by supplying their own |
36 | // new and delete methods. |
37 | // |
38 | // STL containers can use this allocator by using the pool_allocator |
39 | // class as the allocator (second) template argument. |
40 | // |
41 | |
42 | #include <stddef.h> |
43 | #include <string.h> |
44 | #include <vector> |
45 | |
46 | // If we are using guard blocks, we must track each indivual |
47 | // allocation. If we aren't using guard blocks, these |
48 | // never get instantiated, so won't have any impact. |
49 | // |
50 | |
51 | class TAllocation { |
52 | public: |
53 | TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) : |
54 | size(size), mem(mem), prevAlloc(prev) { |
55 | // Allocations are bracketed: |
56 | // [allocationHeader][initialGuardBlock][userData][finalGuardBlock] |
57 | // This would be cleaner with if (guardBlockSize)..., but that |
58 | // makes the compiler print warnings about 0 length memsets, |
59 | // even with the if() protecting them. |
60 | #ifdef GUARD_BLOCKS |
61 | memset(preGuard(), guardBlockBeginVal, guardBlockSize); |
62 | memset(data(), userDataFill, size); |
63 | memset(postGuard(), guardBlockEndVal, guardBlockSize); |
64 | #endif |
65 | } |
66 | |
67 | void check() const { |
68 | checkGuardBlock(preGuard(), guardBlockBeginVal, "before" ); |
69 | checkGuardBlock(postGuard(), guardBlockEndVal, "after" ); |
70 | } |
71 | |
72 | void checkAllocList() const; |
73 | |
74 | // Return total size needed to accomodate user buffer of 'size', |
75 | // plus our tracking data. |
76 | inline static size_t allocationSize(size_t size) { |
77 | return size + 2 * guardBlockSize + headerSize(); |
78 | } |
79 | |
80 | // Offset from surrounding buffer to get to user data buffer. |
81 | inline static unsigned char* offsetAllocation(unsigned char* m) { |
82 | return m + guardBlockSize + headerSize(); |
83 | } |
84 | |
85 | private: |
86 | void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const; |
87 | |
88 | // Find offsets to pre and post guard blocks, and user data buffer |
89 | unsigned char* preGuard() const { return mem + headerSize(); } |
90 | unsigned char* data() const { return preGuard() + guardBlockSize; } |
91 | unsigned char* postGuard() const { return data() + size; } |
92 | |
93 | size_t size; // size of the user data area |
94 | unsigned char* mem; // beginning of our allocation (pts to header) |
95 | TAllocation* prevAlloc; // prior allocation in the chain |
96 | |
97 | // Support MSVC++ 6.0 |
98 | const static unsigned char guardBlockBeginVal; |
99 | const static unsigned char guardBlockEndVal; |
100 | const static unsigned char userDataFill; |
101 | |
102 | const static size_t guardBlockSize; |
103 | #ifdef GUARD_BLOCKS |
104 | inline static size_t () { return sizeof(TAllocation); } |
105 | #else |
106 | inline static size_t headerSize() { return 0; } |
107 | #endif |
108 | }; |
109 | |
110 | // |
111 | // There are several stacks. One is to track the pushing and popping |
112 | // of the user, and not yet implemented. The others are simply a |
113 | // repositories of free pages or used pages. |
114 | // |
115 | // Page stacks are linked together with a simple header at the beginning |
116 | // of each allocation obtained from the underlying OS. Multi-page allocations |
117 | // are returned to the OS. Individual page allocations are kept for future |
118 | // re-use. |
119 | // |
120 | // The "page size" used is not, nor must it match, the underlying OS |
121 | // page size. But, having it be about that size or equal to a set of |
122 | // pages is likely most optimal. |
123 | // |
124 | class TPoolAllocator { |
125 | public: |
126 | TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16); |
127 | |
128 | // |
129 | // Don't call the destructor just to free up the memory, call pop() |
130 | // |
131 | ~TPoolAllocator(); |
132 | |
133 | // |
134 | // Call push() to establish a new place to pop memory too. Does not |
135 | // have to be called to get things started. |
136 | // |
137 | void push(); |
138 | |
139 | // |
140 | // Call pop() to free all memory allocated since the last call to push(), |
141 | // or if no last call to push, frees all memory since first allocation. |
142 | // |
143 | void pop(); |
144 | |
145 | // |
146 | // Call popAll() to free all memory allocated. |
147 | // |
148 | void popAll(); |
149 | |
150 | // |
151 | // Call allocate() to actually acquire memory. Returns 0 if no memory |
152 | // available, otherwise a properly aligned pointer to 'numBytes' of memory. |
153 | // |
154 | void* allocate(size_t numBytes); |
155 | |
156 | // |
157 | // There is no deallocate. The point of this class is that |
158 | // deallocation can be skipped by the user of it, as the model |
159 | // of use is to simultaneously deallocate everything at once |
160 | // by calling pop(), and to not have to solve memory leak problems. |
161 | // |
162 | |
163 | private: |
164 | size_t alignment; // all returned allocations will be aligned at |
165 | // this granularity, which will be a power of 2 |
166 | size_t alignmentMask; |
167 | |
168 | #if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC) |
169 | friend struct tHeader; |
170 | |
171 | struct { |
172 | (tHeader* nextPage, size_t pageCount) : |
173 | nextPage(nextPage), |
174 | pageCount(pageCount) |
175 | #ifdef GUARD_BLOCKS |
176 | , lastAllocation(0) |
177 | #endif |
178 | { } |
179 | |
180 | () { |
181 | #ifdef GUARD_BLOCKS |
182 | if (lastAllocation) |
183 | lastAllocation->checkAllocList(); |
184 | #endif |
185 | } |
186 | |
187 | tHeader* ; |
188 | size_t ; |
189 | #ifdef GUARD_BLOCKS |
190 | TAllocation* ; |
191 | #endif |
192 | }; |
193 | |
194 | struct tAllocState { |
195 | size_t offset; |
196 | tHeader* page; |
197 | }; |
198 | typedef std::vector<tAllocState> tAllocStack; |
199 | |
200 | // Track allocations if and only if we're using guard blocks |
201 | void* (tHeader* block, unsigned char* memory, size_t numBytes) { |
202 | #ifdef GUARD_BLOCKS |
203 | new(memory) TAllocation(numBytes, memory, block->lastAllocation); |
204 | block->lastAllocation = reinterpret_cast<TAllocation*>(memory); |
205 | #endif |
206 | // This is optimized entirely away if GUARD_BLOCKS is not defined. |
207 | return TAllocation::offsetAllocation(memory); |
208 | } |
209 | |
210 | size_t pageSize; // granularity of allocation from the OS |
211 | size_t ; // amount of memory to skip to make room for the |
212 | // header (basically, size of header, rounded |
213 | // up to make it aligned |
214 | size_t currentPageOffset; // next offset in top of inUseList to allocate from |
215 | tHeader* freeList; // list of popped memory |
216 | tHeader* inUseList; // list of all memory currently being used |
217 | tAllocStack mStack; // stack of where to allocate from, to partition pool |
218 | |
219 | int numCalls; // just an interesting statistic |
220 | size_t totalBytes; // just an interesting statistic |
221 | |
222 | #else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC) |
223 | std::vector<std::vector<void *>> mStack; |
224 | #endif |
225 | |
226 | TPoolAllocator& operator=(const TPoolAllocator&); // dont allow assignment operator |
227 | TPoolAllocator(const TPoolAllocator&); // dont allow default copy constructor |
228 | }; |
229 | |
230 | |
231 | // |
232 | // There could potentially be many pools with pops happening at |
233 | // different times. But a simple use is to have a global pop |
234 | // with everyone using the same global allocator. |
235 | // |
236 | extern TPoolAllocator* GetGlobalPoolAllocator(); |
237 | extern void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator); |
238 | |
239 | // |
240 | // This STL compatible allocator is intended to be used as the allocator |
241 | // parameter to templatized STL containers, like vector and map. |
242 | // |
243 | // It will use the pools for allocation, and not |
244 | // do any deallocation, but will still do destruction. |
245 | // |
246 | template<class T> |
247 | class pool_allocator { |
248 | public: |
249 | typedef size_t size_type; |
250 | typedef ptrdiff_t difference_type; |
251 | typedef T* pointer; |
252 | typedef const T* const_pointer; |
253 | typedef T& reference; |
254 | typedef const T& const_reference; |
255 | typedef T value_type; |
256 | |
257 | template<class Other> |
258 | struct rebind { |
259 | typedef pool_allocator<Other> other; |
260 | }; |
261 | pointer address(reference x) const { return &x; } |
262 | const_pointer address(const_reference x) const { return &x; } |
263 | |
264 | pool_allocator() : allocator(GetGlobalPoolAllocator()) { } |
265 | pool_allocator(TPoolAllocator& a) : allocator(&a) { } |
266 | pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { } |
267 | |
268 | template <class Other> |
269 | pool_allocator<T>& operator=(const pool_allocator<Other>& p) { |
270 | allocator = p.allocator; |
271 | return *this; |
272 | } |
273 | |
274 | template<class Other> |
275 | pool_allocator(const pool_allocator<Other>& p) : allocator(&p.getAllocator()) { } |
276 | |
277 | #if defined(__SUNPRO_CC) && !defined(_RWSTD_ALLOCATOR) |
278 | // libCStd on some platforms have a different allocate/deallocate interface. |
279 | // Caller pre-bakes sizeof(T) into 'n' which is the number of bytes to be |
280 | // allocated, not the number of elements. |
281 | void* allocate(size_type n) { |
282 | return getAllocator().allocate(n); |
283 | } |
284 | void* allocate(size_type n, const void*) { |
285 | return getAllocator().allocate(n); |
286 | } |
287 | void deallocate(void*, size_type) {} |
288 | #else |
289 | pointer allocate(size_type n) { |
290 | return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); |
291 | } |
292 | pointer allocate(size_type n, const void*) { |
293 | return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); |
294 | } |
295 | void deallocate(pointer, size_type) {} |
296 | #endif // _RWSTD_ALLOCATOR |
297 | |
298 | void construct(pointer p, const T& val) { new ((void *)p) T(val); } |
299 | void destroy(pointer p) { p->T::~T(); } |
300 | |
301 | bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); } |
302 | bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); } |
303 | |
304 | size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); } |
305 | size_type max_size(int size) const { return static_cast<size_type>(-1) / size; } |
306 | |
307 | void setAllocator(TPoolAllocator *a) { allocator = a; } |
308 | TPoolAllocator& getAllocator() const { return *allocator; } |
309 | |
310 | protected: |
311 | TPoolAllocator *allocator; |
312 | }; |
313 | |
314 | #endif // _POOLALLOC_INCLUDED_ |
315 | |