1 | // Copyright 2016 The SwiftShader Authors. All Rights Reserved. |
2 | // |
3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | // you may not use this file except in compliance with the License. |
5 | // You may obtain a copy of the License at |
6 | // |
7 | // http://www.apache.org/licenses/LICENSE-2.0 |
8 | // |
9 | // Unless required by applicable law or agreed to in writing, software |
10 | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | // See the License for the specific language governing permissions and |
13 | // limitations under the License. |
14 | |
15 | #include "PoolAlloc.h" |
16 | |
17 | #ifndef _MSC_VER |
18 | #include <stdint.h> |
19 | #endif |
20 | #include <stdio.h> |
21 | #include <stdlib.h> |
22 | |
23 | #include "InitializeGlobals.h" |
24 | #include "osinclude.h" |
25 | |
26 | OS_TLSIndex PoolIndex = OS_INVALID_TLS_INDEX; |
27 | |
28 | bool InitializePoolIndex() |
29 | { |
30 | assert(PoolIndex == OS_INVALID_TLS_INDEX); |
31 | |
32 | PoolIndex = OS_AllocTLSIndex(); |
33 | return PoolIndex != OS_INVALID_TLS_INDEX; |
34 | } |
35 | |
36 | void FreePoolIndex() |
37 | { |
38 | assert(PoolIndex != OS_INVALID_TLS_INDEX); |
39 | |
40 | OS_FreeTLSIndex(PoolIndex); |
41 | PoolIndex = OS_INVALID_TLS_INDEX; |
42 | } |
43 | |
44 | TPoolAllocator* GetGlobalPoolAllocator() |
45 | { |
46 | assert(PoolIndex != OS_INVALID_TLS_INDEX); |
47 | return static_cast<TPoolAllocator*>(OS_GetTLSValue(PoolIndex)); |
48 | } |
49 | |
50 | void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator) |
51 | { |
52 | assert(PoolIndex != OS_INVALID_TLS_INDEX); |
53 | OS_SetTLSValue(PoolIndex, poolAllocator); |
54 | } |
55 | |
56 | // |
57 | // Implement the functionality of the TPoolAllocator class, which |
58 | // is documented in PoolAlloc.h. |
59 | // |
60 | TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) : |
61 | alignment(allocationAlignment) |
62 | #if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC) |
63 | , pageSize(growthIncrement), |
64 | freeList(0), |
65 | inUseList(0), |
66 | numCalls(0), |
67 | totalBytes(0) |
68 | #endif |
69 | { |
70 | // |
71 | // Adjust alignment to be at least pointer aligned and |
72 | // power of 2. |
73 | // |
74 | size_t minAlign = sizeof(void*); |
75 | alignment &= ~(minAlign - 1); |
76 | if (alignment < minAlign) |
77 | alignment = minAlign; |
78 | size_t a = 1; |
79 | while (a < alignment) |
80 | a <<= 1; |
81 | alignment = a; |
82 | alignmentMask = a - 1; |
83 | |
84 | #if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC) |
85 | // |
86 | // Don't allow page sizes we know are smaller than all common |
87 | // OS page sizes. |
88 | // |
89 | if (pageSize < 4*1024) |
90 | pageSize = 4*1024; |
91 | |
92 | // |
93 | // A large currentPageOffset indicates a new page needs to |
94 | // be obtained to allocate memory. |
95 | // |
96 | currentPageOffset = pageSize; |
97 | |
98 | // |
99 | // Align header skip |
100 | // |
101 | headerSkip = minAlign; |
102 | if (headerSkip < sizeof(tHeader)) { |
103 | headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask; |
104 | } |
105 | #else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC) |
106 | mStack.push_back({}); |
107 | #endif |
108 | } |
109 | |
110 | TPoolAllocator::~TPoolAllocator() |
111 | { |
112 | #if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC) |
113 | while (inUseList) { |
114 | tHeader* next = inUseList->nextPage; |
115 | inUseList->~tHeader(); |
116 | delete [] reinterpret_cast<char*>(inUseList); |
117 | inUseList = next; |
118 | } |
119 | |
120 | // We should not check the guard blocks |
121 | // here, because we did it already when the block was |
122 | // placed into the free list. |
123 | // |
124 | while (freeList) { |
125 | tHeader* next = freeList->nextPage; |
126 | delete [] reinterpret_cast<char*>(freeList); |
127 | freeList = next; |
128 | } |
129 | #else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC) |
130 | for (auto& allocs : mStack) { |
131 | for (auto alloc : allocs) { |
132 | free(alloc); |
133 | } |
134 | } |
135 | mStack.clear(); |
136 | #endif |
137 | } |
138 | |
139 | // Support MSVC++ 6.0 |
140 | const unsigned char TAllocation::guardBlockBeginVal = 0xfb; |
141 | const unsigned char TAllocation::guardBlockEndVal = 0xfe; |
142 | const unsigned char TAllocation::userDataFill = 0xcd; |
143 | |
144 | #ifdef GUARD_BLOCKS |
145 | const size_t TAllocation::guardBlockSize = 16; |
146 | #else |
147 | const size_t TAllocation::guardBlockSize = 0; |
148 | #endif |
149 | |
150 | // |
151 | // Check a single guard block for damage |
152 | // |
153 | void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const |
154 | { |
155 | #ifdef GUARD_BLOCKS |
156 | for (size_t x = 0; x < guardBlockSize; x++) { |
157 | if (blockMem[x] != val) { |
158 | char assertMsg[80]; |
159 | |
160 | // We don't print the assert message. It's here just to be helpful. |
161 | #if defined(_MSC_VER) |
162 | _snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n" , |
163 | locText, size, data()); |
164 | #else |
165 | snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n" , |
166 | locText, size, data()); |
167 | #endif |
168 | assert(0 && "PoolAlloc: Damage in guard block" ); |
169 | } |
170 | } |
171 | #endif |
172 | } |
173 | |
174 | |
175 | void TPoolAllocator::push() |
176 | { |
177 | #if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC) |
178 | tAllocState state = { currentPageOffset, inUseList }; |
179 | |
180 | mStack.push_back(state); |
181 | |
182 | // |
183 | // Indicate there is no current page to allocate from. |
184 | // |
185 | currentPageOffset = pageSize; |
186 | #else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC) |
187 | mStack.push_back({}); |
188 | #endif |
189 | } |
190 | |
191 | // |
192 | // Do a mass-deallocation of all the individual allocations |
193 | // that have occurred since the last push(), or since the |
194 | // last pop(), or since the object's creation. |
195 | // |
196 | // The deallocated pages are saved for future allocations. |
197 | // |
198 | void TPoolAllocator::pop() |
199 | { |
200 | if (mStack.size() < 1) |
201 | return; |
202 | |
203 | #if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC) |
204 | tHeader* page = mStack.back().page; |
205 | currentPageOffset = mStack.back().offset; |
206 | |
207 | while (inUseList != page) { |
208 | // invoke destructor to free allocation list |
209 | inUseList->~tHeader(); |
210 | |
211 | tHeader* nextInUse = inUseList->nextPage; |
212 | if (inUseList->pageCount > 1) |
213 | delete [] reinterpret_cast<char*>(inUseList); |
214 | else { |
215 | inUseList->nextPage = freeList; |
216 | freeList = inUseList; |
217 | } |
218 | inUseList = nextInUse; |
219 | } |
220 | |
221 | mStack.pop_back(); |
222 | #else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC) |
223 | for (auto alloc : mStack.back()) { |
224 | free(alloc); |
225 | } |
226 | mStack.pop_back(); |
227 | #endif |
228 | } |
229 | |
230 | // |
231 | // Do a mass-deallocation of all the individual allocations |
232 | // that have occurred. |
233 | // |
234 | void TPoolAllocator::popAll() |
235 | { |
236 | while (mStack.size() > 0) |
237 | pop(); |
238 | } |
239 | |
240 | void* TPoolAllocator::allocate(size_t numBytes) |
241 | { |
242 | #if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC) |
243 | // |
244 | // Just keep some interesting statistics. |
245 | // |
246 | ++numCalls; |
247 | totalBytes += numBytes; |
248 | |
249 | // If we are using guard blocks, all allocations are bracketed by |
250 | // them: [guardblock][allocation][guardblock]. numBytes is how |
251 | // much memory the caller asked for. allocationSize is the total |
252 | // size including guard blocks. In release build, |
253 | // guardBlockSize=0 and this all gets optimized away. |
254 | size_t allocationSize = TAllocation::allocationSize(numBytes); |
255 | // Detect integer overflow. |
256 | if (allocationSize < numBytes) |
257 | return 0; |
258 | |
259 | // |
260 | // Do the allocation, most likely case first, for efficiency. |
261 | // This step could be moved to be inline sometime. |
262 | // |
263 | if (allocationSize <= pageSize - currentPageOffset) { |
264 | // |
265 | // Safe to allocate from currentPageOffset. |
266 | // |
267 | unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset; |
268 | currentPageOffset += allocationSize; |
269 | currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask; |
270 | |
271 | return initializeAllocation(inUseList, memory, numBytes); |
272 | } |
273 | |
274 | if (allocationSize > pageSize - headerSkip) { |
275 | // |
276 | // Do a multi-page allocation. Don't mix these with the others. |
277 | // The OS is efficient and allocating and free-ing multiple pages. |
278 | // |
279 | size_t numBytesToAlloc = allocationSize + headerSkip; |
280 | // Detect integer overflow. |
281 | if (numBytesToAlloc < allocationSize) |
282 | return 0; |
283 | |
284 | tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]); |
285 | if (memory == 0) |
286 | return 0; |
287 | |
288 | // Use placement-new to initialize header |
289 | new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize); |
290 | inUseList = memory; |
291 | |
292 | currentPageOffset = pageSize; // make next allocation come from a new page |
293 | |
294 | // No guard blocks for multi-page allocations (yet) |
295 | return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(memory) + headerSkip); |
296 | } |
297 | |
298 | // |
299 | // Need a simple page to allocate from. |
300 | // |
301 | tHeader* memory; |
302 | if (freeList) { |
303 | memory = freeList; |
304 | freeList = freeList->nextPage; |
305 | } else { |
306 | memory = reinterpret_cast<tHeader*>(::new char[pageSize]); |
307 | if (memory == 0) |
308 | return 0; |
309 | } |
310 | |
311 | // Use placement-new to initialize header |
312 | new(memory) tHeader(inUseList, 1); |
313 | inUseList = memory; |
314 | |
315 | unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip; |
316 | currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask; |
317 | |
318 | return initializeAllocation(inUseList, ret, numBytes); |
319 | #else // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC) |
320 | void *alloc = malloc(numBytes + alignmentMask); |
321 | mStack.back().push_back(alloc); |
322 | |
323 | intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc); |
324 | intAlloc = (intAlloc + alignmentMask) & ~alignmentMask; |
325 | return reinterpret_cast<void *>(intAlloc); |
326 | #endif |
327 | } |
328 | |
329 | |
330 | // |
331 | // Check all allocations in a list for damage by calling check on each. |
332 | // |
333 | void TAllocation::checkAllocList() const |
334 | { |
335 | for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc) |
336 | alloc->check(); |
337 | } |
338 | |