| 1 | // Licensed to the .NET Foundation under one or more agreements. |
| 2 | // The .NET Foundation licenses this file to you under the MIT license. |
| 3 | // See the LICENSE file in the project root for more information. |
| 4 | // StackingAllocator.cpp - |
| 5 | // |
| 6 | |
| 7 | // |
| 8 | // Non-thread safe allocator designed for allocations with the following |
| 9 | // pattern: |
| 10 | // allocate, allocate, allocate ... deallocate all |
| 11 | // There may also be recursive uses of this allocator (by the same thread), so |
| 12 | // the usage becomes: |
| 13 | // mark checkpoint, allocate, allocate, ..., deallocate back to checkpoint |
| 14 | // |
| 15 | // Allocations come from a singly linked list of blocks with dynamically |
| 16 | // determined size (the goal is to have fewer block allocations than allocation |
| 17 | // requests). |
| 18 | // |
| 19 | // Allocations are very fast (in the case where a new block isn't allocated) |
| 20 | // since blocks are carved up into packets by simply moving a cursor through |
| 21 | // the block. |
| 22 | // |
| 23 | // Allocations are guaranteed to be quadword aligned. |
| 24 | |
| 25 | |
| 26 | |
| 27 | #include "common.h" |
| 28 | #include "excep.h" |
| 29 | |
| 30 | |
| 31 | #if 0 |
| 32 | #define INC_COUNTER(_name, _amount) do { \ |
| 33 | unsigned _count = REGUTIL::GetLong(W("AllocCounter_") _name, 0, NULL, HKEY_CURRENT_USER); \ |
| 34 | REGUTIL::SetLong(W("AllocCounter_") _name, _count+(_amount), NULL, HKEY_CURRENT_USER); \ |
| 35 | } while (0) |
| 36 | #define MAX_COUNTER(_name, _amount) do { \ |
| 37 | unsigned _count = REGUTIL::GetLong(W("AllocCounter_") _name, 0, NULL, HKEY_CURRENT_USER); \ |
| 38 | REGUTIL::SetLong(W("AllocCounter_") _name, max(_count, (_amount)), NULL, HKEY_CURRENT_USER); \ |
| 39 | } while (0) |
| 40 | #else |
| 41 | #define INC_COUNTER(_name, _amount) |
| 42 | #define MAX_COUNTER(_name, _amount) |
| 43 | #endif |
| 44 | |
| 45 | |
| 46 | StackingAllocator::StackingAllocator() |
| 47 | { |
| 48 | WRAPPER_NO_CONTRACT; |
| 49 | |
| 50 | _ASSERTE((sizeof(StackBlock) & 7) == 0); |
| 51 | _ASSERTE((sizeof(Checkpoint) & 7) == 0); |
| 52 | |
| 53 | m_FirstBlock = NULL; |
| 54 | m_FirstFree = NULL; |
| 55 | m_InitialBlock = NULL; |
| 56 | m_DeferredFreeBlock = NULL; |
| 57 | |
| 58 | #ifdef _DEBUG |
| 59 | m_CheckpointDepth = 0; |
| 60 | m_Allocs = 0; |
| 61 | m_Checkpoints = 0; |
| 62 | m_Collapses = 0; |
| 63 | m_BlockAllocs = 0; |
| 64 | m_MaxAlloc = 0; |
| 65 | #endif |
| 66 | |
| 67 | Init(true); |
| 68 | } |
| 69 | |
| 70 | |
| 71 | StackingAllocator::~StackingAllocator() |
| 72 | { |
| 73 | CONTRACTL |
| 74 | { |
| 75 | NOTHROW; |
| 76 | GC_NOTRIGGER; |
| 77 | SO_TOLERANT; |
| 78 | MODE_ANY; |
| 79 | } |
| 80 | CONTRACTL_END; |
| 81 | |
| 82 | Clear(NULL); |
| 83 | |
| 84 | if (m_DeferredFreeBlock) |
| 85 | { |
| 86 | delete [] (char*)m_DeferredFreeBlock; |
| 87 | m_DeferredFreeBlock = NULL; |
| 88 | } |
| 89 | |
| 90 | #ifdef _DEBUG |
| 91 | INC_COUNTER(W("Allocs" ), m_Allocs); |
| 92 | INC_COUNTER(W("Checkpoints" ), m_Checkpoints); |
| 93 | INC_COUNTER(W("Collapses" ), m_Collapses); |
| 94 | INC_COUNTER(W("BlockAllocs" ), m_BlockAllocs); |
| 95 | MAX_COUNTER(W("MaxAlloc" ), m_MaxAlloc); |
| 96 | #endif |
| 97 | } |
| 98 | |
| 99 | // Lightweight initial checkpoint |
| 100 | Checkpoint StackingAllocator::s_initialCheckpoint; |
| 101 | |
| 102 | void *StackingAllocator::GetCheckpoint() |
| 103 | { |
| 104 | CONTRACTL { |
| 105 | THROWS; |
| 106 | GC_NOTRIGGER; |
| 107 | SO_TOLERANT; |
| 108 | } CONTRACTL_END; |
| 109 | |
| 110 | #ifdef _DEBUG |
| 111 | m_CheckpointDepth++; |
| 112 | m_Checkpoints++; |
| 113 | #endif |
| 114 | |
| 115 | // As an optimization, initial checkpoints are lightweight (they just return |
| 116 | // a special marker, s_initialCheckpoint). This is because we know how to restore the |
| 117 | // allocator state on a Collapse without having to store any additional |
| 118 | // context info. |
| 119 | if ((m_InitialBlock == NULL) || (m_FirstFree == m_InitialBlock->m_Data)) |
| 120 | return &s_initialCheckpoint; |
| 121 | |
| 122 | // Remember the current allocator state. |
| 123 | StackBlock *pOldBlock = m_FirstBlock; |
| 124 | unsigned iOldBytesLeft = m_BytesLeft; |
| 125 | |
| 126 | // Allocate a checkpoint block (just like a normal user request). |
| 127 | Checkpoint *c = new (this) Checkpoint(); |
| 128 | |
| 129 | // Record previous allocator state in it. |
| 130 | c->m_OldBlock = pOldBlock; |
| 131 | c->m_OldBytesLeft = iOldBytesLeft; |
| 132 | |
| 133 | // Return the checkpoint marker. |
| 134 | return c; |
| 135 | } |
| 136 | |
| 137 | |
| 138 | bool StackingAllocator::AllocNewBlockForBytes(unsigned n) |
| 139 | { |
| 140 | CONTRACT (bool) |
| 141 | { |
| 142 | NOTHROW; |
| 143 | GC_NOTRIGGER; |
| 144 | MODE_ANY; |
| 145 | PRECONDITION(m_CheckpointDepth > 0); |
| 146 | } |
| 147 | CONTRACT_END; |
| 148 | |
| 149 | // already aligned and in the hard case |
| 150 | |
| 151 | _ASSERTE(n % 8 == 0); |
| 152 | _ASSERTE(n > m_BytesLeft); |
| 153 | |
| 154 | StackBlock* b = NULL; |
| 155 | |
| 156 | // we need a block, but before we allocate a new block |
| 157 | // we're going to check to see if there is block that we saved |
| 158 | // rather than return to the OS, if there is such a block |
| 159 | // and it's big enough we can reuse it now, rather than go to the |
| 160 | // OS again -- this helps us if we happen to be checkpointing |
| 161 | // across a block seam very often as in VSWhidbey #100462 |
| 162 | |
| 163 | if (m_DeferredFreeBlock != NULL && m_DeferredFreeBlock->m_Length >= n) |
| 164 | { |
| 165 | b = m_DeferredFreeBlock; |
| 166 | m_DeferredFreeBlock = NULL; |
| 167 | |
| 168 | // b->m_Length doesn't need init because its value is still valid |
| 169 | // from the original allocation |
| 170 | } |
| 171 | else |
| 172 | { |
| 173 | // Allocate a block four times as large as the request but with a lower |
| 174 | // limit of MinBlockSize and an upper limit of MaxBlockSize. If the |
| 175 | // request is larger than MaxBlockSize then allocate exactly that |
| 176 | // amount. |
| 177 | // Additionally, if we don't have an initial block yet, use an increased |
| 178 | // lower bound for the size, since we intend to cache this block. |
| 179 | unsigned lower = m_InitialBlock ? MinBlockSize : InitBlockSize; |
| 180 | size_t allocSize = sizeof(StackBlock) + max(n, min(max(n * 4, lower), MaxBlockSize)); |
| 181 | |
| 182 | // Allocate the block. |
| 183 | // <TODO>@todo: Is it worth implementing a non-thread safe standard heap for |
| 184 | // this allocator, to get even more MP scalability?</TODO> |
| 185 | b = (StackBlock *)new (nothrow) char[allocSize]; |
| 186 | if (b == NULL) |
| 187 | RETURN false; |
| 188 | |
| 189 | // reserve space for the Block structure and then link it in |
| 190 | b->m_Length = (unsigned) (allocSize - sizeof(StackBlock)); |
| 191 | |
| 192 | #ifdef _DEBUG |
| 193 | m_BlockAllocs++; |
| 194 | #endif |
| 195 | } |
| 196 | |
| 197 | // If this is the first block allocated, we record that fact since we |
| 198 | // intend to cache it. |
| 199 | if (m_InitialBlock == NULL) |
| 200 | { |
| 201 | _ASSERTE((m_FirstBlock == NULL) && (m_FirstFree == NULL) && (m_BytesLeft == 0)); |
| 202 | m_InitialBlock = b; |
| 203 | } |
| 204 | |
| 205 | // Link new block to head of block chain and update internal state to |
| 206 | // start allocating from this new block. |
| 207 | b->m_Next = m_FirstBlock; |
| 208 | m_FirstBlock = b; |
| 209 | m_FirstFree = b->m_Data; |
| 210 | // the cast below is safe because b->m_Length is less than MaxBlockSize (4096) |
| 211 | m_BytesLeft = static_cast<unsigned>(b->m_Length); |
| 212 | |
| 213 | INDEBUG(b->m_Sentinal = 0); |
| 214 | |
| 215 | RETURN true; |
| 216 | } |
| 217 | |
| 218 | |
| 219 | void* StackingAllocator::UnsafeAllocSafeThrow(UINT32 Size) |
| 220 | { |
| 221 | CONTRACT (void*) |
| 222 | { |
| 223 | THROWS; |
| 224 | GC_TRIGGERS; |
| 225 | MODE_ANY; |
| 226 | SO_TOLERANT; |
| 227 | INJECT_FAULT(ThrowOutOfMemory()); |
| 228 | PRECONDITION(m_CheckpointDepth > 0); |
| 229 | POSTCONDITION(CheckPointer(RETVAL)); |
| 230 | } |
| 231 | CONTRACT_END; |
| 232 | |
| 233 | // OOM fault injection in AllocNoThrow |
| 234 | |
| 235 | void* retval = UnsafeAllocNoThrow(Size); |
| 236 | if (retval == NULL) |
| 237 | ENCLOSE_IN_EXCEPTION_HANDLER ( ThrowOutOfMemory ); |
| 238 | |
| 239 | RETURN retval; |
| 240 | } |
| 241 | |
| 242 | void *StackingAllocator::UnsafeAlloc(UINT32 Size) |
| 243 | { |
| 244 | CONTRACT (void*) |
| 245 | { |
| 246 | THROWS; |
| 247 | GC_NOTRIGGER; |
| 248 | MODE_ANY; |
| 249 | SO_TOLERANT; |
| 250 | INJECT_FAULT(ThrowOutOfMemory()); |
| 251 | PRECONDITION(m_CheckpointDepth > 0); |
| 252 | POSTCONDITION(CheckPointer(RETVAL)); |
| 253 | } |
| 254 | CONTRACT_END; |
| 255 | |
| 256 | // OOM fault injection in AllocNoThrow |
| 257 | |
| 258 | void* retval = UnsafeAllocNoThrow(Size); |
| 259 | if (retval == NULL) |
| 260 | ThrowOutOfMemory(); |
| 261 | |
| 262 | RETURN retval; |
| 263 | } |
| 264 | |
| 265 | |
| 266 | void StackingAllocator::Collapse(void *CheckpointMarker) |
| 267 | { |
| 268 | LIMITED_METHOD_CONTRACT; |
| 269 | |
| 270 | _ASSERTE(m_CheckpointDepth > 0); |
| 271 | |
| 272 | #ifdef _DEBUG |
| 273 | m_CheckpointDepth--; |
| 274 | m_Collapses++; |
| 275 | #endif |
| 276 | |
| 277 | Checkpoint *c = (Checkpoint *)CheckpointMarker; |
| 278 | |
| 279 | // Special case collapsing back to the initial checkpoint. |
| 280 | if (c == &s_initialCheckpoint || c->m_OldBlock == NULL) { |
| 281 | Clear(m_InitialBlock); |
| 282 | Init(false); |
| 283 | |
| 284 | // confirm no buffer overruns |
| 285 | INDEBUG(Validate(m_FirstBlock, m_FirstFree)); |
| 286 | |
| 287 | return; |
| 288 | } |
| 289 | |
| 290 | // Cache contents of checkpoint, we can potentially deallocate it in the |
| 291 | // next step (if a new block had to be allocated to accomodate the |
| 292 | // checkpoint). |
| 293 | StackBlock *pOldBlock = c->m_OldBlock; |
| 294 | unsigned iOldBytesLeft = c->m_OldBytesLeft; |
| 295 | |
| 296 | // Start deallocating blocks until the block that was at the head on the |
| 297 | // chain when the checkpoint is taken is there again. |
| 298 | Clear(pOldBlock); |
| 299 | |
| 300 | // Restore former allocator state. |
| 301 | m_FirstBlock = pOldBlock; |
| 302 | m_FirstFree = &pOldBlock->m_Data[pOldBlock->m_Length - iOldBytesLeft]; |
| 303 | m_BytesLeft = iOldBytesLeft; |
| 304 | |
| 305 | // confirm no buffer overruns |
| 306 | INDEBUG(Validate(m_FirstBlock, m_FirstFree)); |
| 307 | } |
| 308 | |
| 309 | |
| 310 | void * __cdecl operator new(size_t n, StackingAllocator * alloc) |
| 311 | { |
| 312 | STATIC_CONTRACT_THROWS; |
| 313 | STATIC_CONTRACT_FAULT; |
| 314 | STATIC_CONTRACT_SO_TOLERANT; |
| 315 | |
| 316 | #ifdef _WIN64 |
| 317 | // size_t's too big on 64-bit platforms so we check for overflow |
| 318 | if(n > (size_t)(1<<31)) ThrowOutOfMemory(); |
| 319 | #endif |
| 320 | void *retval = alloc->UnsafeAllocNoThrow((unsigned)n); |
| 321 | if(retval == NULL) ThrowOutOfMemory(); |
| 322 | |
| 323 | return retval; |
| 324 | } |
| 325 | |
| 326 | void * __cdecl operator new[](size_t n, StackingAllocator * alloc) |
| 327 | { |
| 328 | STATIC_CONTRACT_THROWS; |
| 329 | STATIC_CONTRACT_FAULT; |
| 330 | STATIC_CONTRACT_SO_TOLERANT; |
| 331 | |
| 332 | #ifdef _WIN64 |
| 333 | // size_t's too big on 64-bit platforms so we check for overflow |
| 334 | if(n > (size_t)(1<<31)) ThrowOutOfMemory(); |
| 335 | #else |
| 336 | if(n == (size_t)-1) ThrowOutOfMemory(); // overflow occurred |
| 337 | #endif |
| 338 | |
| 339 | void *retval = alloc->UnsafeAllocNoThrow((unsigned)n); |
| 340 | if (retval == NULL) |
| 341 | ThrowOutOfMemory(); |
| 342 | |
| 343 | return retval; |
| 344 | } |
| 345 | |
| 346 | void * __cdecl operator new(size_t n, StackingAllocator * alloc, const NoThrow&) throw() |
| 347 | { |
| 348 | STATIC_CONTRACT_NOTHROW; |
| 349 | STATIC_CONTRACT_FAULT; |
| 350 | STATIC_CONTRACT_SO_TOLERANT; |
| 351 | |
| 352 | #ifdef _WIN64 |
| 353 | // size_t's too big on 64-bit platforms so we check for overflow |
| 354 | if(n > (size_t)(1<<31)) return NULL; |
| 355 | #endif |
| 356 | |
| 357 | return alloc->UnsafeAllocNoThrow((unsigned)n); |
| 358 | } |
| 359 | |
| 360 | void * __cdecl operator new[](size_t n, StackingAllocator * alloc, const NoThrow&) throw() |
| 361 | { |
| 362 | STATIC_CONTRACT_NOTHROW; |
| 363 | STATIC_CONTRACT_FAULT; |
| 364 | STATIC_CONTRACT_SO_TOLERANT; |
| 365 | |
| 366 | #ifdef _WIN64 |
| 367 | // size_t's too big on 64-bit platforms so we check for overflow |
| 368 | if(n > (size_t)(1<<31)) return NULL; |
| 369 | #else |
| 370 | if(n == (size_t)-1) return NULL; // overflow occurred |
| 371 | #endif |
| 372 | |
| 373 | return alloc->UnsafeAllocNoThrow((unsigned)n); |
| 374 | } |
| 375 | |
| 376 | |