1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | //***************************************************************************** |
5 | // LoaderHeap.h |
6 | // |
7 | |
8 | // |
9 | // Utility functions for managing memory allocations that typically do not |
10 | // need releasing. |
11 | // |
12 | //***************************************************************************** |
13 | |
14 | |
15 | #ifndef __LoaderHeap_h__ |
16 | #define __LoaderHeap_h__ |
17 | |
18 | #include "utilcode.h" |
19 | #include "ex.h" |
20 | |
21 | //============================================================================== |
22 | // Interface used to back out loader heap allocations. |
23 | //============================================================================== |
24 | class ILoaderHeapBackout |
25 | { |
26 | #ifdef _DEBUG |
27 | #define BackoutMem(pMem, dwSize) RealBackoutMem( (pMem), (dwSize), __FILE__, __LINE__, "UNKNOWN", -1 ) |
28 | #else |
29 | #define BackoutMem(pMem, dwSize) RealBackoutMem( (pMem), (dwSize) ) |
30 | #endif |
31 | |
32 | public: |
33 | virtual void RealBackoutMem(void *pMem |
34 | , size_t dwSize |
35 | #ifdef _DEBUG |
36 | , __in __in_z const char *szFile |
37 | , int lineNum |
38 | , __in __in_z const char *szAllocFile |
39 | , int allocLineNum |
40 | #endif |
41 | ) = 0; |
42 | }; |
43 | |
44 | //============================================================================== |
45 | // This structure packages up all the data needed to back out an AllocMem. |
46 | // It's mainly a short term parking place to get the data from the AllocMem |
47 | // to the AllocMemHolder while preserving the illusion that AllocMem() still |
48 | // returns just a pointer as it did in V1. |
49 | //============================================================================== |
50 | struct TaggedMemAllocPtr |
51 | { |
52 | // Note: For AllocAlignedMem blocks, m_pMem and m_dwRequestedSize are the actual values to pass |
53 | // to BackoutMem. Do not add "m_dwExtra" |
54 | void *m_pMem; //Pointer to AllocMem'd block (needed to pass back to BackoutMem) |
55 | size_t m_dwRequestedSize; //Requested allocation size (needed to pass back to BackoutMem) |
56 | |
57 | ILoaderHeapBackout *m_pHeap; //The heap that alloc'd the block (needed to know who to call BackoutMem on) |
58 | |
59 | //For AllocMem'd blocks, this is always 0. |
60 | //For AllocAlignedMem blocks, you have to add m_dwExtra to m_pMem to arrive |
61 | // at the actual aligned pointer. |
62 | size_t ; |
63 | |
64 | #ifdef _DEBUG |
65 | const char *m_szFile; //File that called AllocMem |
66 | int m_lineNum; //Line # of AllocMem callsite |
67 | #endif |
68 | |
69 | //! Note: this structure is copied around using bitwise copy ("="). |
70 | //! Don't get too fancy putting stuff in here. It's really just a temporary |
71 | //! holding place to get stuff from RealAllocMem() to the MemAllocHolder. |
72 | |
73 | |
74 | public: |
75 | |
76 | // |
77 | // This makes "void *ptr = pLoaderHeap->AllocMem()" work as in V1. |
78 | // |
79 | operator void*() const |
80 | { |
81 | LIMITED_METHOD_CONTRACT; |
82 | return (void*)(m_dwExtra + (BYTE*)m_pMem); |
83 | } |
84 | |
85 | template < typename T > |
86 | T cast() const |
87 | { |
88 | LIMITED_METHOD_CONTRACT; |
89 | return reinterpret_cast< T >( operator void *() ); |
90 | } |
91 | }; |
92 | |
93 | |
94 | |
95 | // # bytes to leave between allocations in debug mode |
96 | // Set to a > 0 boundary to debug problems - I've made this zero, otherwise a 1 byte allocation becomes |
97 | // a (1 + LOADER_HEAP_DEBUG_BOUNDARY) allocation |
98 | #define LOADER_HEAP_DEBUG_BOUNDARY 0 |
99 | |
100 | #define VIRTUAL_ALLOC_RESERVE_GRANULARITY (64*1024) // 0x10000 (64 KB) |
101 | |
102 | typedef DPTR(struct LoaderHeapBlock) PTR_LoaderHeapBlock; |
103 | |
104 | struct LoaderHeapBlock |
105 | { |
106 | PTR_LoaderHeapBlock pNext; |
107 | PTR_VOID pVirtualAddress; |
108 | size_t dwVirtualSize; |
109 | BOOL m_fReleaseMemory; |
110 | |
111 | #ifndef DACCESS_COMPILE |
112 | // pVirtualMemory - the start address of the virtual memory |
113 | // cbVirtualMemory - the length in bytes of the virtual memory |
114 | // fReleaseMemory - should LoaderHeap be responsible for releasing this memory |
115 | void Init(void *pVirtualMemory, |
116 | size_t cbVirtualMemory, |
117 | BOOL fReleaseMemory) |
118 | { |
119 | LIMITED_METHOD_CONTRACT; |
120 | this->pNext = NULL; |
121 | this->pVirtualAddress = pVirtualMemory; |
122 | this->dwVirtualSize = cbVirtualMemory; |
123 | this->m_fReleaseMemory = fReleaseMemory; |
124 | } |
125 | |
126 | // Just calls LoaderHeapBlock::Init |
127 | LoaderHeapBlock(void *pVirtualMemory, |
128 | size_t cbVirtualMemory, |
129 | BOOL fReleaseMemory) |
130 | { |
131 | WRAPPER_NO_CONTRACT; |
132 | Init(pVirtualMemory, cbVirtualMemory, fReleaseMemory); |
133 | } |
134 | |
135 | LoaderHeapBlock() |
136 | { |
137 | WRAPPER_NO_CONTRACT; |
138 | Init(NULL, 0, FALSE); |
139 | } |
140 | #else |
141 | // No ctors in DAC builds |
142 | LoaderHeapBlock() {} |
143 | #endif |
144 | }; |
145 | |
146 | struct LoaderHeapFreeBlock; |
147 | |
148 | // Collection of methods for helping in debugging heap corruptions |
149 | #ifdef _DEBUG |
150 | class LoaderHeapSniffer; |
151 | struct LoaderHeapEvent; |
152 | #endif |
153 | |
154 | |
155 | |
156 | |
157 | |
158 | |
159 | |
160 | |
161 | //=============================================================================== |
162 | // This is the base class for LoaderHeap and ExplicitControlLoaderHeap. Unfortunately, |
163 | // this class has become schizophrenic. Sometimes, it's used as a simple |
164 | // allocator that's semantically (but not perfwise!) equivalent to a blackbox |
165 | // alloc/free heap. Othertimes, it's used by callers who are actually aware |
166 | // of how it reserves addresses and want direct control over the range over which |
167 | // this thing allocates. These two types of allocations are handed out |
168 | // from two independent pools inside the heap. |
169 | // |
170 | // The backout strategy we use for the simple heap probably isn't |
171 | // directly applicable to the more advanced uses. |
172 | // |
173 | // We don't have time to refactor this so as a second-best measure, |
174 | // we make most of UnlockedLoaderHeap's methods protected and force everyone |
175 | // to use it them through two public derived classes that are mutual siblings. |
176 | // |
177 | // The LoaderHeap is the black-box heap and has a Backout() method but none |
178 | // of the advanced features that let you control address ranges. |
179 | // |
180 | // The ExplicitControlLoaderHeap exposes all the advanced features but |
181 | // has no Backout() feature. (If someone wants a backout feature, they need |
182 | // to design an appropriate one into this class.) |
183 | //=============================================================================== |
184 | class UnlockedLoaderHeap |
185 | { |
186 | #ifdef _DEBUG |
187 | friend class LoaderHeapSniffer; |
188 | #endif |
189 | |
190 | #ifdef DACCESS_COMPILE |
191 | friend class ClrDataAccess; |
192 | #endif |
193 | |
194 | private: |
195 | // Linked list of ClrVirtualAlloc'd pages |
196 | PTR_LoaderHeapBlock m_pFirstBlock; |
197 | |
198 | // Allocation pointer in current block |
199 | PTR_BYTE m_pAllocPtr; |
200 | |
201 | // Points to the end of the committed region in the current block |
202 | PTR_BYTE m_pPtrToEndOfCommittedRegion; |
203 | PTR_BYTE m_pEndReservedRegion; |
204 | |
205 | PTR_LoaderHeapBlock m_pCurBlock; |
206 | |
207 | // When we need to ClrVirtualAlloc() MEM_RESERVE a new set of pages, number of bytes to reserve |
208 | DWORD m_dwReserveBlockSize; |
209 | |
210 | // When we need to commit pages from our reserved list, number of bytes to commit at a time |
211 | DWORD m_dwCommitBlockSize; |
212 | |
213 | // Range list to record memory ranges in |
214 | RangeList * m_pRangeList; |
215 | |
216 | size_t m_dwTotalAlloc; |
217 | |
218 | size_t * m_pPrivatePerfCounter_LoaderBytes; |
219 | |
220 | DWORD m_Options; |
221 | |
222 | LoaderHeapFreeBlock *m_pFirstFreeBlock; |
223 | |
224 | // This is used to hold on to a block of reserved memory provided to the |
225 | // constructor. We do this instead of adding it as the first block because |
226 | // that requires comitting the first page of the reserved block, and for |
227 | // startup working set reasons we want to delay that as long as possible. |
228 | LoaderHeapBlock m_reservedBlock; |
229 | |
230 | public: |
231 | |
232 | #ifdef _DEBUG |
233 | enum |
234 | { |
235 | kCallTracing = 0x00000001, // Keep a permanent log of all callers |
236 | |
237 | kEncounteredOOM = 0x80000000, // One time flag to record that an OOM interrupted call tracing |
238 | } |
239 | LoaderHeapDebugFlags; |
240 | |
241 | DWORD m_dwDebugFlags; |
242 | |
243 | LoaderHeapEvent *m_pEventList; // Linked list of events (in reverse time order) |
244 | #endif |
245 | |
246 | |
247 | |
248 | #ifdef _DEBUG |
249 | size_t m_dwDebugWastedBytes; |
250 | static DWORD s_dwNumInstancesOfLoaderHeaps; |
251 | #endif |
252 | |
253 | #ifdef _DEBUG |
254 | size_t DebugGetWastedBytes() |
255 | { |
256 | WRAPPER_NO_CONTRACT; |
257 | return m_dwDebugWastedBytes + GetBytesAvailCommittedRegion(); |
258 | } |
259 | #endif |
260 | |
261 | #ifdef _DEBUG |
262 | // Stubs allocated from a LoaderHeap will have unwind info registered with NT. |
263 | // The info must be unregistered when the heap is destroyed. |
264 | BOOL m_fPermitStubsWithUnwindInfo; |
265 | BOOL m_fStubUnwindInfoUnregistered; |
266 | #endif |
267 | |
268 | public: |
269 | BOOL m_fExplicitControl; // Am I a LoaderHeap or an ExplicitControlLoaderHeap? |
270 | |
271 | #ifdef DACCESS_COMPILE |
272 | public: |
273 | void EnumMemoryRegions(enum CLRDataEnumMemoryFlags flags); |
274 | #endif |
275 | |
276 | public: |
277 | typedef bool (PTR_VOID pvArgs, PTR_VOID pvAllocationBase, SIZE_T cbReserved); |
278 | void (EnumPageRegionsCallback *pCallback, PTR_VOID pvArgs); |
279 | |
280 | #ifndef DACCESS_COMPILE |
281 | protected: |
282 | // Use this version if dwReservedRegionAddress already points to a |
283 | // blob of reserved memory. This will set up internal data structures, |
284 | // using the provided, reserved memory. |
285 | UnlockedLoaderHeap(DWORD dwReserveBlockSize, |
286 | DWORD dwCommitBlockSize, |
287 | const BYTE* dwReservedRegionAddress, |
288 | SIZE_T dwReservedRegionSize, |
289 | size_t *pPrivatePerfCounter_LoaderBytes = NULL, |
290 | RangeList *pRangeList = NULL, |
291 | BOOL fMakeExecutable = FALSE); |
292 | |
293 | ~UnlockedLoaderHeap(); |
294 | #endif |
295 | |
296 | private: |
297 | size_t GetBytesAvailCommittedRegion(); |
298 | size_t GetBytesAvailReservedRegion(); |
299 | |
300 | protected: |
301 | // number of bytes available in region |
302 | size_t UnlockedGetReservedBytesFree() |
303 | { |
304 | LIMITED_METHOD_CONTRACT; |
305 | return m_pEndReservedRegion - m_pAllocPtr; |
306 | } |
307 | |
308 | private: |
309 | // Get some more committed pages - either commit some more in the current reserved region, or, if it |
310 | // has run out, reserve another set of pages |
311 | BOOL GetMoreCommittedPages(size_t dwMinSize); |
312 | |
313 | protected: |
314 | // Reserve some pages at any address |
315 | BOOL UnlockedReservePages(size_t dwCommitBlockSize); |
316 | |
317 | protected: |
318 | // In debug mode, allocate an extra LOADER_HEAP_DEBUG_BOUNDARY bytes and fill it with invalid data. The reason we |
319 | // do this is that when we're allocating vtables out of the heap, it is very easy for code to |
320 | // get careless, and end up reading from memory that it doesn't own - but since it will be |
321 | // reading some other allocation's vtable, no crash will occur. By keeping a gap between |
322 | // allocations, it is more likely that these errors will be encountered. |
323 | void *UnlockedAllocMem(size_t dwSize |
324 | #ifdef _DEBUG |
325 | ,__in __in_z const char *szFile |
326 | ,int lineNum |
327 | #endif |
328 | ); |
329 | void *UnlockedAllocMem_NoThrow(size_t dwSize |
330 | #ifdef _DEBUG |
331 | ,__in __in_z const char *szFile |
332 | ,int lineNum |
333 | #endif |
334 | ); |
335 | |
336 | |
337 | |
338 | |
339 | |
340 | protected: |
341 | // Allocates memory aligned on power-of-2 boundary. |
342 | // |
343 | // The return value is a pointer that's guaranteed to be aligned. |
344 | // |
345 | // FREEING THIS BLOCK: Underneath, the actual block allocated may |
346 | // be larger and start at an address prior to the one you got back. |
347 | // It is this adjusted size and pointer that you pass to BackoutMem. |
348 | // The required adjustment is passed back thru the pdwExtra pointer. |
349 | // |
350 | // Here is how to properly backout the memory: |
351 | // |
352 | // size_t dwExtra; |
353 | // void *pMem = UnlockedAllocAlignedMem(dwRequestedSize, alignment, &dwExtra); |
354 | // _ASSERTE( 0 == (pMem & (alignment - 1)) ); |
355 | // UnlockedBackoutMem( ((BYTE*)pMem) - dExtra, dwRequestedSize + dwExtra ); |
356 | // |
357 | // If you use the AllocMemHolder or AllocMemTracker, all this is taken care of |
358 | // behind the scenes. |
359 | // |
360 | // |
361 | void *UnlockedAllocAlignedMem(size_t dwRequestedSize |
362 | ,size_t dwAlignment |
363 | ,size_t * |
364 | #ifdef _DEBUG |
365 | ,__in __in_z const char *szFile |
366 | ,int lineNum |
367 | #endif |
368 | ); |
369 | |
370 | void *UnlockedAllocAlignedMem_NoThrow(size_t dwRequestedSize |
371 | ,size_t dwAlignment |
372 | ,size_t * |
373 | #ifdef _DEBUG |
374 | ,__in __in_z const char *szFile |
375 | ,int lineNum |
376 | #endif |
377 | ); |
378 | |
379 | protected: |
380 | // This frees memory allocated by UnlockAllocMem. It's given this horrible name to emphasize |
381 | // that it's purpose is for error path leak prevention purposes. You shouldn't |
382 | // use LoaderHeap's as general-purpose alloc-free heaps. |
383 | void UnlockedBackoutMem(void *pMem |
384 | , size_t dwSize |
385 | #ifdef _DEBUG |
386 | , __in __in_z const char *szFile |
387 | , int lineNum |
388 | , __in __in_z const char *szAllocFile |
389 | , int AllocLineNum |
390 | #endif |
391 | ); |
392 | |
393 | public: |
394 | // Perf Counter reports the size of the heap |
395 | size_t GetSize () |
396 | { |
397 | LIMITED_METHOD_CONTRACT; |
398 | return m_dwTotalAlloc; |
399 | } |
400 | |
401 | BOOL IsExecutable(); |
402 | |
403 | public: |
404 | #ifdef _DEBUG |
405 | void DumpFreeList(); |
406 | #endif |
407 | |
408 | public: |
409 | // Extra CallTracing support |
410 | #ifdef _DEBUG |
411 | void UnlockedClearEvents(); //Discard saved events |
412 | void UnlockedCompactEvents(); //Discard matching alloc/free events |
413 | void UnlockedPrintEvents(); //Print event list |
414 | #endif |
415 | |
416 | protected: |
417 | void *UnlockedAllocMemForCode_NoThrow(size_t , size_t dwCodeSize, DWORD dwCodeAlignment, size_t dwReserveForJumpStubs); |
418 | |
419 | void UnlockedSetReservedRegion(BYTE* dwReservedRegionAddress, SIZE_T dwReservedRegionSize, BOOL fReleaseMemory); |
420 | }; |
421 | |
422 | //=============================================================================== |
423 | // Create the LoaderHeap lock. It's the same lock for several different Heaps. |
424 | //=============================================================================== |
425 | inline CRITSEC_COOKIE CreateLoaderHeapLock() |
426 | { |
427 | return ClrCreateCriticalSection(CrstLoaderHeap,CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)); |
428 | } |
429 | |
430 | //=============================================================================== |
431 | // The LoaderHeap is the black-box heap and has a Backout() method but none |
432 | // of the advanced features that let you control address ranges. |
433 | //=============================================================================== |
434 | typedef DPTR(class LoaderHeap) PTR_LoaderHeap; |
435 | class LoaderHeap : public UnlockedLoaderHeap, public ILoaderHeapBackout |
436 | { |
437 | private: |
438 | CRITSEC_COOKIE m_CriticalSection; |
439 | |
440 | #ifndef DACCESS_COMPILE |
441 | public: |
442 | LoaderHeap(DWORD dwReserveBlockSize, |
443 | DWORD dwCommitBlockSize, |
444 | size_t *pPrivatePerfCounter_LoaderBytes = NULL, |
445 | RangeList *pRangeList = NULL, |
446 | BOOL fMakeExecutable = FALSE |
447 | ) |
448 | : UnlockedLoaderHeap(dwReserveBlockSize, |
449 | dwCommitBlockSize, |
450 | NULL, 0, |
451 | pPrivatePerfCounter_LoaderBytes, |
452 | pRangeList, |
453 | fMakeExecutable) |
454 | { |
455 | WRAPPER_NO_CONTRACT; |
456 | m_CriticalSection = NULL; |
457 | m_CriticalSection = CreateLoaderHeapLock(); |
458 | m_fExplicitControl = FALSE; |
459 | } |
460 | |
461 | public: |
462 | LoaderHeap(DWORD dwReserveBlockSize, |
463 | DWORD dwCommitBlockSize, |
464 | const BYTE* dwReservedRegionAddress, |
465 | SIZE_T dwReservedRegionSize, |
466 | size_t *pPrivatePerfCounter_LoaderBytes = NULL, |
467 | RangeList *pRangeList = NULL, |
468 | BOOL fMakeExecutable = FALSE |
469 | ) |
470 | : UnlockedLoaderHeap(dwReserveBlockSize, |
471 | dwCommitBlockSize, |
472 | dwReservedRegionAddress, |
473 | dwReservedRegionSize, |
474 | pPrivatePerfCounter_LoaderBytes, |
475 | pRangeList, |
476 | fMakeExecutable) |
477 | { |
478 | WRAPPER_NO_CONTRACT; |
479 | m_CriticalSection = NULL; |
480 | m_CriticalSection = CreateLoaderHeapLock(); |
481 | m_fExplicitControl = FALSE; |
482 | } |
483 | |
484 | #endif // DACCESS_COMPILE |
485 | |
486 | virtual ~LoaderHeap() |
487 | { |
488 | WRAPPER_NO_CONTRACT; |
489 | |
490 | #ifndef DACCESS_COMPILE |
491 | if (m_CriticalSection != NULL) |
492 | { |
493 | ClrDeleteCriticalSection(m_CriticalSection); |
494 | } |
495 | #endif // DACCESS_COMPILE |
496 | } |
497 | |
498 | |
499 | |
500 | #ifdef _DEBUG |
501 | #define AllocMem(dwSize) RealAllocMem( (dwSize), __FILE__, __LINE__ ) |
502 | #define AllocMem_NoThrow(dwSize) RealAllocMem_NoThrow( (dwSize), __FILE__, __LINE__ ) |
503 | #else |
504 | #define AllocMem(dwSize) RealAllocMem( (dwSize) ) |
505 | #define AllocMem_NoThrow(dwSize) RealAllocMem_NoThrow( (dwSize) ) |
506 | #endif |
507 | |
508 | public: |
509 | FORCEINLINE TaggedMemAllocPtr RealAllocMem(S_SIZE_T dwSize |
510 | #ifdef _DEBUG |
511 | ,__in __in_z const char *szFile |
512 | ,int lineNum |
513 | #endif |
514 | ) |
515 | { |
516 | WRAPPER_NO_CONTRACT; |
517 | |
518 | if(dwSize.IsOverflow()) ThrowOutOfMemory(); |
519 | |
520 | return RealAllocMemUnsafe(dwSize.Value() COMMA_INDEBUG(szFile) COMMA_INDEBUG(lineNum)); |
521 | |
522 | } |
523 | |
524 | FORCEINLINE TaggedMemAllocPtr RealAllocMem_NoThrow(S_SIZE_T dwSize |
525 | #ifdef _DEBUG |
526 | ,__in __in_z const char *szFile |
527 | ,int lineNum |
528 | #endif |
529 | ) |
530 | { |
531 | WRAPPER_NO_CONTRACT; |
532 | |
533 | if(dwSize.IsOverflow()) { |
534 | TaggedMemAllocPtr tmap; |
535 | tmap.m_pMem = NULL; |
536 | tmap.m_dwRequestedSize = dwSize.Value(); |
537 | tmap.m_pHeap = this; |
538 | tmap.m_dwExtra = 0; |
539 | #ifdef _DEBUG |
540 | tmap.m_szFile = szFile; |
541 | tmap.m_lineNum = lineNum; |
542 | #endif |
543 | |
544 | return tmap; |
545 | } |
546 | |
547 | return RealAllocMemUnsafe_NoThrow(dwSize.Value() COMMA_INDEBUG(szFile) COMMA_INDEBUG(lineNum)); |
548 | } |
549 | private: |
550 | |
551 | TaggedMemAllocPtr RealAllocMemUnsafe(size_t dwSize |
552 | #ifdef _DEBUG |
553 | ,__in __in_z const char *szFile |
554 | ,int lineNum |
555 | #endif |
556 | ) |
557 | { |
558 | WRAPPER_NO_CONTRACT; |
559 | |
560 | |
561 | void *pResult; |
562 | TaggedMemAllocPtr tmap; |
563 | |
564 | CRITSEC_Holder csh(m_CriticalSection); |
565 | pResult = UnlockedAllocMem(dwSize |
566 | #ifdef _DEBUG |
567 | , szFile |
568 | , lineNum |
569 | #endif |
570 | ); |
571 | tmap.m_pMem = pResult; |
572 | tmap.m_dwRequestedSize = dwSize; |
573 | tmap.m_pHeap = this; |
574 | tmap.m_dwExtra = 0; |
575 | #ifdef _DEBUG |
576 | tmap.m_szFile = szFile; |
577 | tmap.m_lineNum = lineNum; |
578 | #endif |
579 | return tmap; |
580 | } |
581 | |
582 | TaggedMemAllocPtr RealAllocMemUnsafe_NoThrow(size_t dwSize |
583 | #ifdef _DEBUG |
584 | ,__in __in_z const char *szFile |
585 | ,int lineNum |
586 | #endif |
587 | ) |
588 | { |
589 | WRAPPER_NO_CONTRACT; |
590 | |
591 | void *pResult; |
592 | TaggedMemAllocPtr tmap; |
593 | |
594 | CRITSEC_Holder csh(m_CriticalSection); |
595 | |
596 | pResult = UnlockedAllocMem_NoThrow(dwSize |
597 | #ifdef _DEBUG |
598 | , szFile |
599 | , lineNum |
600 | #endif |
601 | ); |
602 | |
603 | tmap.m_pMem = pResult; |
604 | tmap.m_dwRequestedSize = dwSize; |
605 | tmap.m_pHeap = this; |
606 | tmap.m_dwExtra = 0; |
607 | #ifdef _DEBUG |
608 | tmap.m_szFile = szFile; |
609 | tmap.m_lineNum = lineNum; |
610 | #endif |
611 | |
612 | return tmap; |
613 | } |
614 | |
615 | |
616 | |
617 | #ifdef _DEBUG |
618 | #define AllocAlignedMem(dwSize, dwAlign) RealAllocAlignedMem( (dwSize), (dwAlign), __FILE__, __LINE__) |
619 | #define AllocAlignedMem_NoThrow(dwSize, dwAlign) RealAllocAlignedMem_NoThrow( (dwSize), (dwAlign), __FILE__, __LINE__) |
620 | #else |
621 | #define AllocAlignedMem(dwSize, dwAlign) RealAllocAlignedMem( (dwSize), (dwAlign) ) |
622 | #define AllocAlignedMem_NoThrow(dwSize, dwAlign) RealAllocAlignedMem_NoThrow( (dwSize), (dwAlign) ) |
623 | #endif |
624 | |
625 | public: |
626 | TaggedMemAllocPtr RealAllocAlignedMem(size_t dwRequestedSize |
627 | ,size_t dwAlignment |
628 | #ifdef _DEBUG |
629 | ,__in __in_z const char *szFile |
630 | ,int lineNum |
631 | #endif |
632 | ) |
633 | { |
634 | WRAPPER_NO_CONTRACT; |
635 | |
636 | CRITSEC_Holder csh(m_CriticalSection); |
637 | |
638 | |
639 | TaggedMemAllocPtr tmap; |
640 | void *pResult; |
641 | size_t ; |
642 | |
643 | pResult = UnlockedAllocAlignedMem(dwRequestedSize |
644 | ,dwAlignment |
645 | ,&dwExtra |
646 | #ifdef _DEBUG |
647 | ,szFile |
648 | ,lineNum |
649 | #endif |
650 | ); |
651 | |
652 | tmap.m_pMem = (void*)(((BYTE*)pResult) - dwExtra); |
653 | tmap.m_dwRequestedSize = dwRequestedSize + dwExtra; |
654 | tmap.m_pHeap = this; |
655 | tmap.m_dwExtra = dwExtra; |
656 | #ifdef _DEBUG |
657 | tmap.m_szFile = szFile; |
658 | tmap.m_lineNum = lineNum; |
659 | #endif |
660 | |
661 | return tmap; |
662 | } |
663 | |
664 | |
665 | TaggedMemAllocPtr RealAllocAlignedMem_NoThrow(size_t dwRequestedSize |
666 | ,size_t dwAlignment |
667 | #ifdef _DEBUG |
668 | ,__in __in_z const char *szFile |
669 | ,int lineNum |
670 | #endif |
671 | ) |
672 | { |
673 | WRAPPER_NO_CONTRACT; |
674 | |
675 | CRITSEC_Holder csh(m_CriticalSection); |
676 | |
677 | |
678 | TaggedMemAllocPtr tmap; |
679 | void *pResult; |
680 | size_t ; |
681 | |
682 | pResult = UnlockedAllocAlignedMem_NoThrow(dwRequestedSize |
683 | ,dwAlignment |
684 | ,&dwExtra |
685 | #ifdef _DEBUG |
686 | ,szFile |
687 | ,lineNum |
688 | #endif |
689 | ); |
690 | |
691 | _ASSERTE(!(pResult == NULL && dwExtra != 0)); |
692 | |
693 | tmap.m_pMem = (void*)(((BYTE*)pResult) - dwExtra); |
694 | tmap.m_dwRequestedSize = dwRequestedSize + dwExtra; |
695 | tmap.m_pHeap = this; |
696 | tmap.m_dwExtra = dwExtra; |
697 | #ifdef _DEBUG |
698 | tmap.m_szFile = szFile; |
699 | tmap.m_lineNum = lineNum; |
700 | #endif |
701 | |
702 | return tmap; |
703 | } |
704 | |
705 | |
706 | public: |
707 | // This frees memory allocated by AllocMem. It's given this horrible name to emphasize |
708 | // that it's purpose is for error path leak prevention purposes. You shouldn't |
709 | // use LoaderHeap's as general-purpose alloc-free heaps. |
710 | void RealBackoutMem(void *pMem |
711 | , size_t dwSize |
712 | #ifdef _DEBUG |
713 | , __in __in_z const char *szFile |
714 | , int lineNum |
715 | , __in __in_z const char *szAllocFile |
716 | , int allocLineNum |
717 | #endif |
718 | ) |
719 | { |
720 | WRAPPER_NO_CONTRACT; |
721 | CRITSEC_Holder csh(m_CriticalSection); |
722 | UnlockedBackoutMem(pMem |
723 | , dwSize |
724 | #ifdef _DEBUG |
725 | , szFile |
726 | , lineNum |
727 | , szAllocFile |
728 | , allocLineNum |
729 | #endif |
730 | ); |
731 | } |
732 | |
733 | public: |
734 | // Extra CallTracing support |
735 | #ifdef _DEBUG |
736 | void ClearEvents() |
737 | { |
738 | WRAPPER_NO_CONTRACT; |
739 | CRITSEC_Holder csh(m_CriticalSection); |
740 | UnlockedClearEvents(); |
741 | } |
742 | |
743 | void CompactEvents() |
744 | { |
745 | WRAPPER_NO_CONTRACT; |
746 | CRITSEC_Holder csh(m_CriticalSection); |
747 | UnlockedCompactEvents(); |
748 | } |
749 | |
750 | void PrintEvents() |
751 | { |
752 | WRAPPER_NO_CONTRACT; |
753 | CRITSEC_Holder csh(m_CriticalSection); |
754 | UnlockedPrintEvents(); |
755 | } |
756 | #endif |
757 | |
758 | }; |
759 | |
760 | |
761 | |
762 | |
763 | |
764 | //=============================================================================== |
765 | // The ExplicitControlLoaderHeap exposes all the advanced features but |
766 | // has no Backout() feature. (If someone wants a backout feature, they need |
767 | // to design an appropriate one into this class.) |
768 | // |
769 | // Caller is responsible for synchronization. ExplicitControlLoaderHeap is |
770 | // not multithread safe. |
771 | //=============================================================================== |
772 | typedef DPTR(class ExplicitControlLoaderHeap) PTR_ExplicitControlLoaderHeap; |
773 | class ExplicitControlLoaderHeap : public UnlockedLoaderHeap |
774 | { |
775 | #ifndef DACCESS_COMPILE |
776 | public: |
777 | ExplicitControlLoaderHeap(size_t *pPrivatePerfCounter_LoaderBytes = NULL, |
778 | RangeList *pRangeList = NULL, |
779 | BOOL fMakeExecutable = FALSE |
780 | ) |
781 | : UnlockedLoaderHeap(0, 0, NULL, 0, |
782 | pPrivatePerfCounter_LoaderBytes, |
783 | pRangeList, |
784 | fMakeExecutable) |
785 | { |
786 | WRAPPER_NO_CONTRACT; |
787 | m_fExplicitControl = TRUE; |
788 | } |
789 | #endif // DACCESS_COMPILE |
790 | |
791 | public: |
792 | void *RealAllocMem(size_t dwSize |
793 | #ifdef _DEBUG |
794 | ,__in __in_z const char *szFile |
795 | ,int lineNum |
796 | #endif |
797 | ) |
798 | { |
799 | WRAPPER_NO_CONTRACT; |
800 | |
801 | void *pResult; |
802 | |
803 | pResult = UnlockedAllocMem(dwSize |
804 | #ifdef _DEBUG |
805 | , szFile |
806 | , lineNum |
807 | #endif |
808 | ); |
809 | return pResult; |
810 | } |
811 | |
812 | void *RealAllocMem_NoThrow(size_t dwSize |
813 | #ifdef _DEBUG |
814 | ,__in __in_z const char *szFile |
815 | ,int lineNum |
816 | #endif |
817 | ) |
818 | { |
819 | WRAPPER_NO_CONTRACT; |
820 | |
821 | void *pResult; |
822 | |
823 | pResult = UnlockedAllocMem_NoThrow(dwSize |
824 | #ifdef _DEBUG |
825 | , szFile |
826 | , lineNum |
827 | #endif |
828 | ); |
829 | return pResult; |
830 | } |
831 | |
832 | |
833 | public: |
834 | void *AllocMemForCode_NoThrow(size_t , size_t dwCodeSize, DWORD dwCodeAlignment, size_t dwReserveForJumpStubs) |
835 | { |
836 | WRAPPER_NO_CONTRACT; |
837 | return UnlockedAllocMemForCode_NoThrow(dwHeaderSize, dwCodeSize, dwCodeAlignment, dwReserveForJumpStubs); |
838 | } |
839 | |
840 | void SetReservedRegion(BYTE* dwReservedRegionAddress, SIZE_T dwReservedRegionSize, BOOL fReleaseMemory) |
841 | { |
842 | WRAPPER_NO_CONTRACT; |
843 | return UnlockedSetReservedRegion(dwReservedRegionAddress, dwReservedRegionSize, fReleaseMemory); |
844 | } |
845 | |
846 | public: |
847 | // number of bytes available in region |
848 | size_t GetReservedBytesFree() |
849 | { |
850 | WRAPPER_NO_CONTRACT; |
851 | return UnlockedGetReservedBytesFree(); |
852 | } |
853 | }; |
854 | |
855 | |
856 | |
857 | //============================================================================== |
858 | // AllocMemHolder : Allocated memory from LoaderHeap |
859 | // |
860 | // Old: |
861 | // |
862 | // Foo* pFoo = (Foo*)pLoaderHeap->AllocMem(size); |
863 | // pFoo->BackoutMem(pFoo, size) |
864 | // |
865 | // |
866 | // New: |
867 | // |
868 | // { |
869 | // AllocMemHolder<Foo> pfoo = pLoaderHeap->AllocMem(); |
870 | // } // BackoutMem on out of scope |
871 | // |
872 | //============================================================================== |
873 | template <typename TYPE> |
874 | class AllocMemHolder |
875 | { |
876 | private: |
877 | TaggedMemAllocPtr m_value; |
878 | BOOL m_fAcquired; |
879 | |
880 | |
881 | //-------------------------------------------------------------------- |
882 | // All allowed (and disallowed) ctors here. |
883 | //-------------------------------------------------------------------- |
884 | public: |
885 | // Allow the construction "Holder h;" |
886 | AllocMemHolder() |
887 | { |
888 | LIMITED_METHOD_CONTRACT; |
889 | |
890 | m_value.m_pMem = NULL; |
891 | m_value.m_dwRequestedSize = 0; |
892 | m_value.m_pHeap = 0; |
893 | m_value.m_dwExtra = 0; |
894 | #ifdef _DEBUG |
895 | m_value.m_szFile = NULL; |
896 | m_value.m_lineNum = 0; |
897 | #endif |
898 | m_fAcquired = FALSE; |
899 | } |
900 | |
901 | public: |
902 | // Allow the construction "Holder h = pHeap->AllocMem()" |
903 | AllocMemHolder(const TaggedMemAllocPtr value) |
904 | { |
905 | LIMITED_METHOD_CONTRACT; |
906 | m_value = value; |
907 | m_fAcquired = TRUE; |
908 | } |
909 | |
910 | private: |
911 | // Disallow "Holder holder1 = holder2" |
912 | AllocMemHolder(const AllocMemHolder<TYPE> &); |
913 | |
914 | |
915 | private: |
916 | // Disallow "Holder holder1 = void*" |
917 | AllocMemHolder(const LPVOID &); |
918 | |
919 | //-------------------------------------------------------------------- |
920 | // Destructor (and the whole point of AllocMemHolder) |
921 | //-------------------------------------------------------------------- |
922 | public: |
923 | ~AllocMemHolder() |
924 | { |
925 | WRAPPER_NO_CONTRACT; |
926 | if (m_fAcquired && m_value.m_pMem) |
927 | { |
928 | m_value.m_pHeap->RealBackoutMem(m_value.m_pMem, |
929 | m_value.m_dwRequestedSize |
930 | #ifdef _DEBUG |
931 | ,__FILE__ |
932 | ,__LINE__ |
933 | ,m_value.m_szFile |
934 | ,m_value.m_lineNum |
935 | #endif |
936 | ); |
937 | } |
938 | } |
939 | |
940 | |
941 | //-------------------------------------------------------------------- |
942 | // All allowed (and disallowed) assignment operators here. |
943 | //-------------------------------------------------------------------- |
944 | public: |
945 | // Reluctantly allow "AllocMemHolder h; ... h = pheap->AllocMem()" |
946 | void operator=(const TaggedMemAllocPtr & value) |
947 | { |
948 | WRAPPER_NO_CONTRACT; |
949 | // However, prevent repeated assignments as that would leak. |
950 | _ASSERTE(m_value.m_pMem == NULL && !m_fAcquired); |
951 | m_value = value; |
952 | m_fAcquired = TRUE; |
953 | } |
954 | |
955 | private: |
956 | // Disallow "holder == holder2" |
957 | const AllocMemHolder<TYPE> & operator=(const AllocMemHolder<TYPE> &); |
958 | |
959 | private: |
960 | // Disallow "holder = void*" |
961 | const AllocMemHolder<TYPE> & operator=(const LPVOID &); |
962 | |
963 | |
964 | //-------------------------------------------------------------------- |
965 | // Operations on the holder itself |
966 | //-------------------------------------------------------------------- |
967 | public: |
968 | // Call this when you're ready to take ownership away from the holder. |
969 | void SuppressRelease() |
970 | { |
971 | LIMITED_METHOD_CONTRACT; |
972 | m_fAcquired = FALSE; |
973 | } |
974 | |
975 | |
976 | |
977 | //-------------------------------------------------------------------- |
978 | // ... And the smart-pointer stuff so we can drop holders on top |
979 | // of former pointer variables (mostly) |
980 | //-------------------------------------------------------------------- |
981 | public: |
982 | // Allow holder to be treated as the underlying pointer type |
983 | operator TYPE* () |
984 | { |
985 | LIMITED_METHOD_CONTRACT; |
986 | return (TYPE*)(void*)m_value; |
987 | } |
988 | |
989 | public: |
990 | // Allow holder to be treated as the underlying pointer type |
991 | TYPE* operator->() |
992 | { |
993 | LIMITED_METHOD_CONTRACT; |
994 | return (TYPE*)(void*)m_value; |
995 | } |
996 | public: |
997 | int operator==(TYPE* value) |
998 | { |
999 | LIMITED_METHOD_CONTRACT; |
1000 | return ((void*)m_value) == ((void*)value); |
1001 | } |
1002 | |
1003 | public: |
1004 | int operator!=(TYPE* value) |
1005 | { |
1006 | LIMITED_METHOD_CONTRACT; |
1007 | return ((void*)m_value) != ((void*)value); |
1008 | } |
1009 | |
1010 | public: |
1011 | int operator!() const |
1012 | { |
1013 | LIMITED_METHOD_CONTRACT; |
1014 | return m_value.m_pMem == NULL; |
1015 | } |
1016 | |
1017 | |
1018 | }; |
1019 | |
1020 | |
1021 | |
1022 | // This utility helps track loaderheap allocations. Its main purpose |
1023 | // is to backout allocations in case of an exception. |
1024 | class AllocMemTracker |
1025 | { |
1026 | public: |
1027 | AllocMemTracker(); |
1028 | ~AllocMemTracker(); |
1029 | |
1030 | // Tells tracker to store an allocated loaderheap block. |
1031 | // |
1032 | // Returns the pointer address of block for convenience. |
1033 | // |
1034 | // Ok to call on failed loaderheap allocation (will just do nothing and propagate the OOM as apropos). |
1035 | // |
1036 | // If Track fails due to an OOM allocating node space, it will backout the loaderheap block before returning. |
1037 | void *Track(TaggedMemAllocPtr tmap); |
1038 | void *Track_NoThrow(TaggedMemAllocPtr tmap); |
1039 | |
1040 | void SuppressRelease(); |
1041 | |
1042 | private: |
1043 | struct AllocMemTrackerNode |
1044 | { |
1045 | ILoaderHeapBackout *m_pHeap; |
1046 | void *m_pMem; |
1047 | size_t m_dwRequestedSize; |
1048 | #ifdef _DEBUG |
1049 | const char *m_szAllocFile; |
1050 | int m_allocLineNum; |
1051 | #endif |
1052 | }; |
1053 | |
1054 | enum |
1055 | { |
1056 | kAllocMemTrackerBlockSize = |
1057 | #ifdef _DEBUG |
1058 | 3 |
1059 | #else |
1060 | 20 |
1061 | #endif |
1062 | }; |
1063 | |
1064 | struct AllocMemTrackerBlock |
1065 | { |
1066 | AllocMemTrackerBlock *m_pNext; |
1067 | int m_nextFree; |
1068 | AllocMemTrackerNode m_Node[kAllocMemTrackerBlockSize]; |
1069 | }; |
1070 | |
1071 | |
1072 | AllocMemTrackerBlock *m_pFirstBlock; |
1073 | AllocMemTrackerBlock m_FirstBlock; // Stack-allocate the first block - "new" the rest. |
1074 | |
1075 | protected: |
1076 | BOOL m_fReleased; |
1077 | |
1078 | }; |
1079 | |
1080 | #endif // __LoaderHeap_h__ |
1081 | |
1082 | |