1 | // |
2 | // Copyright (C) 2002-2005 3Dlabs Inc. Ltd. |
3 | // Copyright (C) 2012-2013 LunarG, Inc. |
4 | // |
5 | // All rights reserved. |
6 | // |
7 | // Redistribution and use in source and binary forms, with or without |
8 | // modification, are permitted provided that the following conditions |
9 | // are met: |
10 | // |
11 | // Redistributions of source code must retain the above copyright |
12 | // notice, this list of conditions and the following disclaimer. |
13 | // |
14 | // Redistributions in binary form must reproduce the above |
15 | // copyright notice, this list of conditions and the following |
16 | // disclaimer in the documentation and/or other materials provided |
17 | // with the distribution. |
18 | // |
19 | // Neither the name of 3Dlabs Inc. Ltd. nor the names of its |
20 | // contributors may be used to endorse or promote products derived |
21 | // from this software without specific prior written permission. |
22 | // |
23 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
24 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
25 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
26 | // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
27 | // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
28 | // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
29 | // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
30 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
31 | // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
32 | // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
33 | // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
34 | // POSSIBILITY OF SUCH DAMAGE. |
35 | // |
36 | |
37 | #ifndef _POOLALLOC_INCLUDED_ |
38 | #define _POOLALLOC_INCLUDED_ |
39 | |
40 | #ifdef _DEBUG |
41 | # define GUARD_BLOCKS // define to enable guard block sanity checking |
42 | #endif |
43 | |
44 | // |
45 | // This header defines an allocator that can be used to efficiently |
46 | // allocate a large number of small requests for heap memory, with the |
47 | // intention that they are not individually deallocated, but rather |
48 | // collectively deallocated at one time. |
49 | // |
50 | // This simultaneously |
51 | // |
52 | // * Makes each individual allocation much more efficient; the |
53 | // typical allocation is trivial. |
54 | // * Completely avoids the cost of doing individual deallocation. |
55 | // * Saves the trouble of tracking down and plugging a large class of leaks. |
56 | // |
57 | // Individual classes can use this allocator by supplying their own |
58 | // new and delete methods. |
59 | // |
60 | // STL containers can use this allocator by using the pool_allocator |
61 | // class as the allocator (second) template argument. |
62 | // |
63 | |
64 | #include <cstddef> |
65 | #include <cstring> |
66 | #include <vector> |
67 | |
68 | namespace glslang { |
69 | |
70 | // If we are using guard blocks, we must track each individual |
71 | // allocation. If we aren't using guard blocks, these |
72 | // never get instantiated, so won't have any impact. |
73 | // |
74 | |
75 | class TAllocation { |
76 | public: |
77 | TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) : |
78 | size(size), mem(mem), prevAlloc(prev) { |
79 | // Allocations are bracketed: |
80 | // [allocationHeader][initialGuardBlock][userData][finalGuardBlock] |
81 | // This would be cleaner with if (guardBlockSize)..., but that |
82 | // makes the compiler print warnings about 0 length memsets, |
83 | // even with the if() protecting them. |
84 | # ifdef GUARD_BLOCKS |
85 | memset(preGuard(), guardBlockBeginVal, guardBlockSize); |
86 | memset(data(), userDataFill, size); |
87 | memset(postGuard(), guardBlockEndVal, guardBlockSize); |
88 | # endif |
89 | } |
90 | |
91 | void check() const { |
92 | checkGuardBlock(preGuard(), guardBlockBeginVal, "before" ); |
93 | checkGuardBlock(postGuard(), guardBlockEndVal, "after" ); |
94 | } |
95 | |
96 | void checkAllocList() const; |
97 | |
98 | // Return total size needed to accommodate user buffer of 'size', |
99 | // plus our tracking data. |
100 | inline static size_t allocationSize(size_t size) { |
101 | return size + 2 * guardBlockSize + headerSize(); |
102 | } |
103 | |
104 | // Offset from surrounding buffer to get to user data buffer. |
105 | inline static unsigned char* offsetAllocation(unsigned char* m) { |
106 | return m + guardBlockSize + headerSize(); |
107 | } |
108 | |
109 | private: |
110 | void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const; |
111 | |
112 | // Find offsets to pre and post guard blocks, and user data buffer |
113 | unsigned char* preGuard() const { return mem + headerSize(); } |
114 | unsigned char* data() const { return preGuard() + guardBlockSize; } |
115 | unsigned char* postGuard() const { return data() + size; } |
116 | |
117 | size_t size; // size of the user data area |
118 | unsigned char* mem; // beginning of our allocation (pts to header) |
119 | TAllocation* prevAlloc; // prior allocation in the chain |
120 | |
121 | const static unsigned char guardBlockBeginVal; |
122 | const static unsigned char guardBlockEndVal; |
123 | const static unsigned char userDataFill; |
124 | |
125 | const static size_t guardBlockSize; |
126 | # ifdef GUARD_BLOCKS |
127 | inline static size_t headerSize() { return sizeof(TAllocation); } |
128 | # else |
129 | inline static size_t () { return 0; } |
130 | # endif |
131 | }; |
132 | |
133 | // |
134 | // There are several stacks. One is to track the pushing and popping |
135 | // of the user, and not yet implemented. The others are simply a |
136 | // repositories of free pages or used pages. |
137 | // |
138 | // Page stacks are linked together with a simple header at the beginning |
139 | // of each allocation obtained from the underlying OS. Multi-page allocations |
140 | // are returned to the OS. Individual page allocations are kept for future |
141 | // re-use. |
142 | // |
143 | // The "page size" used is not, nor must it match, the underlying OS |
144 | // page size. But, having it be about that size or equal to a set of |
145 | // pages is likely most optimal. |
146 | // |
147 | class TPoolAllocator { |
148 | public: |
149 | TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16); |
150 | |
151 | // |
152 | // Don't call the destructor just to free up the memory, call pop() |
153 | // |
154 | ~TPoolAllocator(); |
155 | |
156 | // |
157 | // Call push() to establish a new place to pop memory too. Does not |
158 | // have to be called to get things started. |
159 | // |
160 | void push(); |
161 | |
162 | // |
163 | // Call pop() to free all memory allocated since the last call to push(), |
164 | // or if no last call to push, frees all memory since first allocation. |
165 | // |
166 | void pop(); |
167 | |
168 | // |
169 | // Call popAll() to free all memory allocated. |
170 | // |
171 | void popAll(); |
172 | |
173 | // |
174 | // Call allocate() to actually acquire memory. Returns 0 if no memory |
175 | // available, otherwise a properly aligned pointer to 'numBytes' of memory. |
176 | // |
177 | void* allocate(size_t numBytes); |
178 | |
179 | // |
180 | // There is no deallocate. The point of this class is that |
181 | // deallocation can be skipped by the user of it, as the model |
182 | // of use is to simultaneously deallocate everything at once |
183 | // by calling pop(), and to not have to solve memory leak problems. |
184 | // |
185 | |
186 | protected: |
187 | friend struct tHeader; |
188 | |
189 | struct { |
190 | (tHeader* nextPage, size_t pageCount) : |
191 | #ifdef GUARD_BLOCKS |
192 | lastAllocation(0), |
193 | #endif |
194 | nextPage(nextPage), pageCount(pageCount) { } |
195 | |
196 | () { |
197 | #ifdef GUARD_BLOCKS |
198 | if (lastAllocation) |
199 | lastAllocation->checkAllocList(); |
200 | #endif |
201 | } |
202 | |
203 | #ifdef GUARD_BLOCKS |
204 | TAllocation* lastAllocation; |
205 | #endif |
206 | tHeader* ; |
207 | size_t ; |
208 | }; |
209 | |
210 | struct tAllocState { |
211 | size_t offset; |
212 | tHeader* page; |
213 | }; |
214 | typedef std::vector<tAllocState> tAllocStack; |
215 | |
216 | // Track allocations if and only if we're using guard blocks |
217 | #ifndef GUARD_BLOCKS |
218 | void* (tHeader*, unsigned char* memory, size_t) { |
219 | #else |
220 | void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) { |
221 | new(memory) TAllocation(numBytes, memory, block->lastAllocation); |
222 | block->lastAllocation = reinterpret_cast<TAllocation*>(memory); |
223 | #endif |
224 | |
225 | // This is optimized entirely away if GUARD_BLOCKS is not defined. |
226 | return TAllocation::offsetAllocation(memory); |
227 | } |
228 | |
229 | size_t pageSize; // granularity of allocation from the OS |
230 | size_t alignment; // all returned allocations will be aligned at |
231 | // this granularity, which will be a power of 2 |
232 | size_t alignmentMask; |
233 | size_t ; // amount of memory to skip to make room for the |
234 | // header (basically, size of header, rounded |
235 | // up to make it aligned |
236 | size_t currentPageOffset; // next offset in top of inUseList to allocate from |
237 | tHeader* freeList; // list of popped memory |
238 | tHeader* inUseList; // list of all memory currently being used |
239 | tAllocStack stack; // stack of where to allocate from, to partition pool |
240 | |
241 | int numCalls; // just an interesting statistic |
242 | size_t totalBytes; // just an interesting statistic |
243 | private: |
244 | TPoolAllocator& operator=(const TPoolAllocator&); // don't allow assignment operator |
245 | TPoolAllocator(const TPoolAllocator&); // don't allow default copy constructor |
246 | }; |
247 | |
248 | // |
249 | // There could potentially be many pools with pops happening at |
250 | // different times. But a simple use is to have a global pop |
251 | // with everyone using the same global allocator. |
252 | // |
253 | extern TPoolAllocator& GetThreadPoolAllocator(); |
254 | void SetThreadPoolAllocator(TPoolAllocator* poolAllocator); |
255 | |
256 | // |
257 | // This STL compatible allocator is intended to be used as the allocator |
258 | // parameter to templatized STL containers, like vector and map. |
259 | // |
260 | // It will use the pools for allocation, and not |
261 | // do any deallocation, but will still do destruction. |
262 | // |
263 | template<class T> |
264 | class pool_allocator { |
265 | public: |
266 | typedef size_t size_type; |
267 | typedef ptrdiff_t difference_type; |
268 | typedef T *pointer; |
269 | typedef const T *const_pointer; |
270 | typedef T& reference; |
271 | typedef const T& const_reference; |
272 | typedef T value_type; |
273 | template<class Other> |
274 | struct rebind { |
275 | typedef pool_allocator<Other> other; |
276 | }; |
277 | pointer address(reference x) const { return &x; } |
278 | const_pointer address(const_reference x) const { return &x; } |
279 | |
280 | pool_allocator() : allocator(GetThreadPoolAllocator()) { } |
281 | pool_allocator(TPoolAllocator& a) : allocator(a) { } |
282 | pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { } |
283 | |
284 | template<class Other> |
285 | pool_allocator(const pool_allocator<Other>& p) : allocator(p.getAllocator()) { } |
286 | |
287 | pointer allocate(size_type n) { |
288 | return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); } |
289 | pointer allocate(size_type n, const void*) { |
290 | return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); } |
291 | |
292 | void deallocate(void*, size_type) { } |
293 | void deallocate(pointer, size_type) { } |
294 | |
295 | pointer _Charalloc(size_t n) { |
296 | return reinterpret_cast<pointer>(getAllocator().allocate(n)); } |
297 | |
298 | void construct(pointer p, const T& val) { new ((void *)p) T(val); } |
299 | void destroy(pointer p) { p->T::~T(); } |
300 | |
301 | bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); } |
302 | bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); } |
303 | |
304 | size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); } |
305 | size_type max_size(int size) const { return static_cast<size_type>(-1) / size; } |
306 | |
307 | TPoolAllocator& getAllocator() const { return allocator; } |
308 | |
309 | protected: |
310 | pool_allocator& operator=(const pool_allocator&) { return *this; } |
311 | TPoolAllocator& allocator; |
312 | }; |
313 | |
314 | } // end namespace glslang |
315 | |
316 | #endif // _POOLALLOC_INCLUDED_ |
317 | |