| 1 | // Licensed to the .NET Foundation under one or more agreements. | 
|---|
| 2 | // The .NET Foundation licenses this file to you under the MIT license. | 
|---|
| 3 | // See the LICENSE file in the project root for more information. | 
|---|
| 4 | // StackingAllocator.h - | 
|---|
| 5 | // | 
|---|
| 6 |  | 
|---|
| 7 | // | 
|---|
| 8 |  | 
|---|
| 9 |  | 
|---|
| 10 | #ifndef __stacking_allocator_h__ | 
|---|
| 11 | #define __stacking_allocator_h__ | 
|---|
| 12 |  | 
|---|
| 13 | #include "util.hpp" | 
|---|
| 14 | #include "eecontract.h" | 
|---|
| 15 |  | 
|---|
| 16 |  | 
|---|
| 17 | // We use zero sized arrays, disable the non-standard extension warning. | 
|---|
| 18 | #ifdef _MSC_VER | 
|---|
| 19 | #pragma warning(push) | 
|---|
| 20 | #pragma warning(disable:4200) | 
|---|
| 21 | #endif | 
|---|
| 22 |  | 
|---|
| 23 | #ifdef _DEBUG | 
|---|
| 24 | struct Sentinal | 
|---|
| 25 | { | 
|---|
| 26 | enum { marker1Val = 0xBAD00BAD }; | 
|---|
| 27 | Sentinal(Sentinal* next) : m_Marker1(marker1Val), m_Next(next) { LIMITED_METHOD_CONTRACT; } | 
|---|
| 28 |  | 
|---|
| 29 | unsigned  m_Marker1;        // just some data bytes | 
|---|
| 30 | Sentinal* m_Next;           // linked list of these | 
|---|
| 31 | }; | 
|---|
| 32 | #endif | 
|---|
| 33 |  | 
|---|
| 34 | // Blocks from which allocations are carved. Size is determined dynamically, | 
|---|
| 35 | // with upper and lower bounds of MinBlockSize and MaxBlockSize respectively | 
|---|
| 36 | // (though large allocation requests will cause a block of exactly the right | 
|---|
| 37 | // size to be allocated). | 
|---|
| 38 | struct StackBlock | 
|---|
| 39 | { | 
|---|
| 40 | StackBlock     *m_Next;         // Next oldest block in list | 
|---|
| 41 | DWORD_PTR   m_Length;       // Length of block excluding header  (needs to be pointer-sized for alignment on IA64) | 
|---|
| 42 | INDEBUG(Sentinal*   m_Sentinal;)    // insure that we don't fall of the end of the buffer | 
|---|
| 43 | INDEBUG(void**      m_Pad;)    		// keep the size a multiple of 8 | 
|---|
| 44 | char        m_Data[];       // Start of user allocation space | 
|---|
| 45 | }; | 
|---|
| 46 |  | 
|---|
| 47 | // Whenever a checkpoint is requested, a checkpoint structure is allocated | 
|---|
| 48 | // (as a normal allocation) and is filled with information about the state | 
|---|
| 49 | // of the allocator prior to the checkpoint. When a Collapse request comes | 
|---|
| 50 | // in we can therefore restore the state of the allocator. | 
|---|
| 51 | // It is the address of the checkpoint structure that we hand out to the | 
|---|
| 52 | // caller of GetCheckpoint as an opaque checkpoint marker. | 
|---|
| 53 | struct Checkpoint | 
|---|
| 54 | { | 
|---|
| 55 | StackBlock *m_OldBlock;     // Head of block list before checkpoint | 
|---|
| 56 | unsigned    m_OldBytesLeft; // Number of free bytes before checkpoint | 
|---|
| 57 | }; | 
|---|
| 58 |  | 
|---|
| 59 |  | 
|---|
| 60 |  | 
|---|
| 61 | // Non-thread safe allocator designed for allocations with the following | 
|---|
| 62 | // pattern: | 
|---|
| 63 | //      allocate, allocate, allocate ... deallocate all | 
|---|
| 64 | // There may also be recursive uses of this allocator (by the same thread), so | 
|---|
| 65 | // the usage becomes: | 
|---|
| 66 | //      mark checkpoint, allocate, allocate, ..., deallocate back to checkpoint | 
|---|
| 67 | // | 
|---|
| 68 | // Allocations come from a singly linked list of blocks with dynamically | 
|---|
| 69 | // determined size (the goal is to have fewer block allocations than allocation | 
|---|
| 70 | // requests). | 
|---|
| 71 | // | 
|---|
| 72 | // Allocations are very fast (in the case where a new block isn't allocated) | 
|---|
| 73 | // since blocks are carved up into packets by simply moving a cursor through | 
|---|
| 74 | // the block. | 
|---|
| 75 | // | 
|---|
| 76 | // Allocations are guaranteed to be quadword aligned. | 
|---|
| 77 | class StackingAllocator | 
|---|
| 78 | { | 
|---|
| 79 | public: | 
|---|
| 80 |  | 
|---|
| 81 | enum | 
|---|
| 82 | { | 
|---|
| 83 | MinBlockSize    = 128, | 
|---|
| 84 | MaxBlockSize    = 4096, | 
|---|
| 85 | InitBlockSize   = 512 | 
|---|
| 86 | }; | 
|---|
| 87 |  | 
|---|
| 88 | #ifndef DACCESS_COMPILE | 
|---|
| 89 | StackingAllocator(); | 
|---|
| 90 | ~StackingAllocator(); | 
|---|
| 91 | #else | 
|---|
| 92 | StackingAllocator() { LIMITED_METHOD_CONTRACT; } | 
|---|
| 93 | #endif | 
|---|
| 94 |  | 
|---|
| 95 | void StoreCheckpoint(Checkpoint *c) | 
|---|
| 96 | { | 
|---|
| 97 | LIMITED_METHOD_CONTRACT; | 
|---|
| 98 |  | 
|---|
| 99 | #ifdef _DEBUG | 
|---|
| 100 | m_CheckpointDepth++; | 
|---|
| 101 | m_Checkpoints++; | 
|---|
| 102 | #endif | 
|---|
| 103 |  | 
|---|
| 104 | // Record previous allocator state in it. | 
|---|
| 105 | c->m_OldBlock = m_FirstBlock; | 
|---|
| 106 | c->m_OldBytesLeft = m_BytesLeft; | 
|---|
| 107 | } | 
|---|
| 108 |  | 
|---|
| 109 | void* GetCheckpoint(); | 
|---|
| 110 |  | 
|---|
| 111 | // @todo move this into a .inl file as many class users of this class don't need to include this body | 
|---|
| 112 | FORCEINLINE void * UnsafeAllocNoThrow(unsigned Size) | 
|---|
| 113 | { | 
|---|
| 114 | CONTRACT (void*) | 
|---|
| 115 | { | 
|---|
| 116 | NOTHROW; | 
|---|
| 117 | GC_NOTRIGGER; | 
|---|
| 118 | MODE_ANY; | 
|---|
| 119 | SO_TOLERANT; | 
|---|
| 120 | INJECT_FAULT(CONTRACT_RETURN NULL;); | 
|---|
| 121 | PRECONDITION(m_CheckpointDepth > 0); | 
|---|
| 122 | POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); | 
|---|
| 123 | } | 
|---|
| 124 | CONTRACT_END; | 
|---|
| 125 |  | 
|---|
| 126 | #ifdef _DEBUG | 
|---|
| 127 | m_Allocs++; | 
|---|
| 128 | m_MaxAlloc = max(Size, m_MaxAlloc); | 
|---|
| 129 | #endif | 
|---|
| 130 |  | 
|---|
| 131 | //special case, 0 size alloc, return non-null but invalid pointer | 
|---|
| 132 | if (Size == 0) | 
|---|
| 133 | { | 
|---|
| 134 | RETURN (void*)-1; | 
|---|
| 135 | } | 
|---|
| 136 |  | 
|---|
| 137 | // Round size up to ensure alignment. | 
|---|
| 138 | unsigned n = (Size + 7) & ~7; | 
|---|
| 139 | if(n < Size) | 
|---|
| 140 | { | 
|---|
| 141 | return NULL; | 
|---|
| 142 | } | 
|---|
| 143 |  | 
|---|
| 144 | // leave room for sentinal | 
|---|
| 145 | INDEBUG(n += sizeof(Sentinal)); | 
|---|
| 146 |  | 
|---|
| 147 | // Is the request too large for the current block? | 
|---|
| 148 | if (n > m_BytesLeft) | 
|---|
| 149 | { | 
|---|
| 150 | bool allocatedNewBlock = false; | 
|---|
| 151 |  | 
|---|
| 152 | BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), RETURN NULL); | 
|---|
| 153 | allocatedNewBlock = AllocNewBlockForBytes(n); | 
|---|
| 154 | END_SO_INTOLERANT_CODE; | 
|---|
| 155 |  | 
|---|
| 156 | if (!allocatedNewBlock) | 
|---|
| 157 | { | 
|---|
| 158 | RETURN NULL; | 
|---|
| 159 | } | 
|---|
| 160 | } | 
|---|
| 161 |  | 
|---|
| 162 | // Once we get here we know we have enough bytes left in the block at the | 
|---|
| 163 | // head of the chain. | 
|---|
| 164 | _ASSERTE(n <= m_BytesLeft); | 
|---|
| 165 |  | 
|---|
| 166 | void *ret = m_FirstFree; | 
|---|
| 167 | m_FirstFree += n; | 
|---|
| 168 | m_BytesLeft -= n; | 
|---|
| 169 |  | 
|---|
| 170 | #ifdef _DEBUG | 
|---|
| 171 | // Add sentinal to the end | 
|---|
| 172 | m_FirstBlock->m_Sentinal = new(m_FirstFree - sizeof(Sentinal)) Sentinal(m_FirstBlock->m_Sentinal); | 
|---|
| 173 | #endif | 
|---|
| 174 |  | 
|---|
| 175 | RETURN ret; | 
|---|
| 176 | } | 
|---|
| 177 |  | 
|---|
| 178 | FORCEINLINE void * AllocNoThrow(S_UINT32 size) | 
|---|
| 179 | { | 
|---|
| 180 | // Safely round size up to ensure alignment. | 
|---|
| 181 | if(size.IsOverflow()) return NULL; | 
|---|
| 182 |  | 
|---|
| 183 | return UnsafeAllocNoThrow(size.Value()); | 
|---|
| 184 | } | 
|---|
| 185 |  | 
|---|
| 186 | FORCEINLINE void * AllocSafeThrow(S_UINT32 size){ | 
|---|
| 187 | WRAPPER_NO_CONTRACT; | 
|---|
| 188 |  | 
|---|
| 189 | if(size.IsOverflow()) ThrowOutOfMemory(); | 
|---|
| 190 |  | 
|---|
| 191 | return UnsafeAllocSafeThrow(size.Value()); | 
|---|
| 192 | } | 
|---|
| 193 |  | 
|---|
| 194 | FORCEINLINE void * Alloc(S_UINT32 size){ | 
|---|
| 195 | WRAPPER_NO_CONTRACT; | 
|---|
| 196 |  | 
|---|
| 197 | if(size.IsOverflow()) ThrowOutOfMemory(); | 
|---|
| 198 |  | 
|---|
| 199 | return UnsafeAlloc(size.Value()); | 
|---|
| 200 | } | 
|---|
| 201 |  | 
|---|
| 202 | void  Collapse(void* CheckpointMarker); | 
|---|
| 203 |  | 
|---|
| 204 | void* UnsafeAllocSafeThrow(UINT32 size); | 
|---|
| 205 | void* UnsafeAlloc(UINT32 size); | 
|---|
| 206 | private: | 
|---|
| 207 |  | 
|---|
| 208 | bool AllocNewBlockForBytes(unsigned n); | 
|---|
| 209 |  | 
|---|
| 210 | StackBlock      *m_FirstBlock;       // Pointer to head of allocation block list | 
|---|
| 211 | char       *m_FirstFree;        // Pointer to first free byte in head block | 
|---|
| 212 | unsigned    m_BytesLeft;        // Number of free bytes left in head block | 
|---|
| 213 | StackBlock      *m_InitialBlock;     // The first block is special, we never free it | 
|---|
| 214 | StackBlock      *m_DeferredFreeBlock; // Avoid going to the OS too often by deferring one free | 
|---|
| 215 |  | 
|---|
| 216 | #ifdef _DEBUG | 
|---|
| 217 | unsigned    m_CheckpointDepth; | 
|---|
| 218 | unsigned    m_Allocs; | 
|---|
| 219 | unsigned    m_Checkpoints; | 
|---|
| 220 | unsigned    m_Collapses; | 
|---|
| 221 | unsigned    m_BlockAllocs; | 
|---|
| 222 | unsigned    m_MaxAlloc; | 
|---|
| 223 | #endif | 
|---|
| 224 |  | 
|---|
| 225 | void Init(bool bResetInitBlock) | 
|---|
| 226 | { | 
|---|
| 227 | WRAPPER_NO_CONTRACT; | 
|---|
| 228 |  | 
|---|
| 229 | if (bResetInitBlock || (m_InitialBlock == NULL)) | 
|---|
| 230 | { | 
|---|
| 231 | Clear(NULL); | 
|---|
| 232 | m_FirstBlock = NULL; | 
|---|
| 233 | m_FirstFree = NULL; | 
|---|
| 234 | m_BytesLeft = 0; | 
|---|
| 235 | m_InitialBlock = NULL; | 
|---|
| 236 | } | 
|---|
| 237 | else | 
|---|
| 238 | { | 
|---|
| 239 | m_FirstBlock = m_InitialBlock; | 
|---|
| 240 | m_FirstFree = m_InitialBlock->m_Data; | 
|---|
| 241 | _ASSERTE(FitsIn<unsigned>(m_InitialBlock->m_Length)); | 
|---|
| 242 | m_BytesLeft = static_cast<unsigned>(m_InitialBlock->m_Length); | 
|---|
| 243 | } | 
|---|
| 244 | } | 
|---|
| 245 |  | 
|---|
| 246 | #ifdef _DEBUG | 
|---|
| 247 | void Validate(StackBlock *block, void* spot) | 
|---|
| 248 | { | 
|---|
| 249 | LIMITED_METHOD_CONTRACT; | 
|---|
| 250 |  | 
|---|
| 251 | if (!block) | 
|---|
| 252 | return; | 
|---|
| 253 | Sentinal* ptr = block->m_Sentinal; | 
|---|
| 254 | _ASSERTE(spot); | 
|---|
| 255 | while(ptr >= spot) | 
|---|
| 256 | { | 
|---|
| 257 | // If this assert goes off then someone overwrote their buffer! | 
|---|
| 258 | // A common candidate is PINVOKE buffer run.  To confirm look | 
|---|
| 259 | // up on the stack for NDirect.* Look for the MethodDesc | 
|---|
| 260 | // associated with it.  Be very suspicious if it is one that | 
|---|
| 261 | // has a return string buffer!.  This usually means the end | 
|---|
| 262 | // programmer did not allocate a big enough buffer before passing | 
|---|
| 263 | // it to the PINVOKE method. | 
|---|
| 264 | if (ptr->m_Marker1 != Sentinal::marker1Val) | 
|---|
| 265 | _ASSERTE(! "Memory overrun!! May be bad buffer passed to PINVOKE. turn on logging LF_STUBS level 6 to find method"); | 
|---|
| 266 | ptr = ptr->m_Next; | 
|---|
| 267 | } | 
|---|
| 268 | block->m_Sentinal = ptr; | 
|---|
| 269 | } | 
|---|
| 270 | #endif | 
|---|
| 271 |  | 
|---|
| 272 | void Clear(StackBlock *ToBlock) | 
|---|
| 273 | { | 
|---|
| 274 | LIMITED_METHOD_CONTRACT; | 
|---|
| 275 |  | 
|---|
| 276 | StackBlock *p = m_FirstBlock; | 
|---|
| 277 | StackBlock *q; | 
|---|
| 278 |  | 
|---|
| 279 | while (p != ToBlock) | 
|---|
| 280 | { | 
|---|
| 281 | PREFAST_ASSUME(p != NULL); | 
|---|
| 282 |  | 
|---|
| 283 | q = p; | 
|---|
| 284 | p = p->m_Next; | 
|---|
| 285 |  | 
|---|
| 286 | INDEBUG(Validate(q, q)); | 
|---|
| 287 |  | 
|---|
| 288 | // we don't give the tail block back to the OS | 
|---|
| 289 | // because we can get into situations where we're growing | 
|---|
| 290 | // back and forth over a single seam for a tiny alloc | 
|---|
| 291 | // and the perf is a disaster -- VSWhidbey #100462 | 
|---|
| 292 | if (m_DeferredFreeBlock != NULL) | 
|---|
| 293 | { | 
|---|
| 294 | delete [] (char *)m_DeferredFreeBlock; | 
|---|
| 295 | } | 
|---|
| 296 |  | 
|---|
| 297 | m_DeferredFreeBlock = q; | 
|---|
| 298 | m_DeferredFreeBlock->m_Next = NULL; | 
|---|
| 299 | } | 
|---|
| 300 | } | 
|---|
| 301 |  | 
|---|
| 302 | private : | 
|---|
| 303 | static Checkpoint s_initialCheckpoint; | 
|---|
| 304 | }; | 
|---|
| 305 |  | 
|---|
| 306 | void * __cdecl operator new(size_t n, StackingAllocator *alloc); | 
|---|
| 307 | void * __cdecl operator new[](size_t n, StackingAllocator *alloc); | 
|---|
| 308 | void * __cdecl operator new(size_t n, StackingAllocator *alloc, const NoThrow&) throw(); | 
|---|
| 309 | void * __cdecl operator new[](size_t n, StackingAllocator *alloc, const NoThrow&) throw(); | 
|---|
| 310 |  | 
|---|
| 311 | #ifdef _MSC_VER | 
|---|
| 312 | #pragma warning(pop) | 
|---|
| 313 | #endif | 
|---|
| 314 |  | 
|---|
| 315 | #endif | 
|---|
| 316 |  | 
|---|