1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4
5#include "stdafx.h" // Precompiled header key.
6#include "loaderheap.h"
7#include "perfcounters.h"
8#include "ex.h"
9#include "pedecoder.h"
10#define DONOT_DEFINE_ETW_CALLBACK
11#include "eventtracebase.h"
12
13#define LHF_EXECUTABLE 0x1
14
15#ifndef DACCESS_COMPILE
16
17INDEBUG(DWORD UnlockedLoaderHeap::s_dwNumInstancesOfLoaderHeaps = 0;)
18
19#ifdef RANDOMIZE_ALLOC
20#include <time.h>
21static class Random
22{
23public:
24 Random() { seed = (unsigned int)time(NULL); }
25 unsigned int Next()
26 {
27 return ((seed = seed * 214013L + 2531011L) >> 16) & 0x7fff;
28 }
29private:
30 unsigned int seed;
31} s_random;
32#endif
33
34namespace
35{
36#if !defined(SELF_NO_HOST) // ETW available only in the runtime
37 inline void EtwAllocRequest(UnlockedLoaderHeap * const pHeap, void* ptr, size_t dwSize)
38 {
39 FireEtwAllocRequest(pHeap, ptr, static_cast<unsigned int>(dwSize), 0, 0, GetClrInstanceId());
40 }
41#else
42#define EtwAllocRequest(pHeap, ptr, dwSize) ((void)0)
43#endif // SELF_NO_HOST
44}
45
46//
47// RangeLists are constructed so they can be searched from multiple
48// threads without locking. They do require locking in order to
49// be safely modified, though.
50//
51
52RangeList::RangeList()
53{
54 WRAPPER_NO_CONTRACT;
55
56 InitBlock(&m_starterBlock);
57
58 m_firstEmptyBlock = &m_starterBlock;
59 m_firstEmptyRange = 0;
60}
61
62RangeList::~RangeList()
63{
64 LIMITED_METHOD_CONTRACT;
65
66 RangeListBlock *b = m_starterBlock.next;
67
68 while (b != NULL)
69 {
70 RangeListBlock *bNext = b->next;
71 delete b;
72 b = bNext;
73 }
74}
75
76void RangeList::InitBlock(RangeListBlock *b)
77{
78 LIMITED_METHOD_CONTRACT;
79
80 Range *r = b->ranges;
81 Range *rEnd = r + RANGE_COUNT;
82 while (r < rEnd)
83 r++->id = NULL;
84
85 b->next = NULL;
86}
87
88BOOL RangeList::AddRangeWorker(const BYTE *start, const BYTE *end, void *id)
89{
90 CONTRACTL
91 {
92 INSTANCE_CHECK;
93 NOTHROW;
94 GC_NOTRIGGER;
95 INJECT_FAULT(return FALSE;);
96 }
97 CONTRACTL_END
98
99 _ASSERTE(id != NULL);
100
101 RangeListBlock *b = m_firstEmptyBlock;
102 Range *r = b->ranges + m_firstEmptyRange;
103 Range *rEnd = b->ranges + RANGE_COUNT;
104
105 while (TRUE)
106 {
107 while (r < rEnd)
108 {
109 if (r->id == NULL)
110 {
111 r->start = (TADDR)start;
112 r->end = (TADDR)end;
113 r->id = (TADDR)id;
114
115 r++;
116
117 m_firstEmptyBlock = b;
118 m_firstEmptyRange = r - b->ranges;
119
120 return TRUE;
121 }
122 r++;
123 }
124
125 //
126 // If there are no more blocks, allocate a
127 // new one.
128 //
129
130 if (b->next == NULL)
131 {
132 RangeListBlock *newBlock = new (nothrow) RangeListBlock;
133
134 if (newBlock == NULL)
135 {
136 m_firstEmptyBlock = b;
137 m_firstEmptyRange = r - b->ranges;
138 return FALSE;
139 }
140
141 InitBlock(newBlock);
142
143 newBlock->next = NULL;
144 b->next = newBlock;
145 }
146
147 //
148 // Next block
149 //
150
151 b = b->next;
152 r = b->ranges;
153 rEnd = r + RANGE_COUNT;
154 }
155}
156
157void RangeList::RemoveRangesWorker(void *id, const BYTE* start, const BYTE* end)
158{
159 CONTRACTL
160 {
161 INSTANCE_CHECK;
162 NOTHROW;
163 GC_NOTRIGGER;
164 FORBID_FAULT;
165 }
166 CONTRACTL_END
167
168 RangeListBlock *b = &m_starterBlock;
169 Range *r = b->ranges;
170 Range *rEnd = r + RANGE_COUNT;
171
172 //
173 // Find the first free element, & mark it.
174 //
175
176 while (TRUE)
177 {
178 //
179 // Clear entries in this block.
180 //
181
182 while (r < rEnd)
183 {
184 if (r->id != NULL)
185 {
186 if (start != NULL)
187 {
188 _ASSERTE(end != NULL);
189
190 if (r->start >= (TADDR)start && r->start < (TADDR)end)
191 {
192 CONSISTENCY_CHECK_MSGF(r->end >= (TADDR)start &&
193 r->end <= (TADDR)end,
194 ("r: %p start: %p end: %p", r, start, end));
195 r->id = NULL;
196 }
197 }
198 else if (r->id == (TADDR)id)
199 {
200 r->id = NULL;
201 }
202 }
203
204 r++;
205 }
206
207 //
208 // If there are no more blocks, we're done.
209 //
210
211 if (b->next == NULL)
212 {
213 m_firstEmptyRange = 0;
214 m_firstEmptyBlock = &m_starterBlock;
215
216 return;
217 }
218
219 //
220 // Next block.
221 //
222
223 b = b->next;
224 r = b->ranges;
225 rEnd = r + RANGE_COUNT;
226 }
227}
228
229#endif // #ifndef DACCESS_COMPILE
230
231BOOL RangeList::IsInRangeWorker(TADDR address, TADDR *pID /* = NULL */)
232{
233 CONTRACTL
234 {
235 INSTANCE_CHECK;
236 NOTHROW;
237 FORBID_FAULT;
238 GC_NOTRIGGER;
239 SO_TOLERANT;
240 }
241 CONTRACTL_END
242
243 SUPPORTS_DAC;
244
245 RangeListBlock* b = &m_starterBlock;
246 Range* r = b->ranges;
247 Range* rEnd = r + RANGE_COUNT;
248
249 //
250 // Look for a matching element
251 //
252
253 while (TRUE)
254 {
255 while (r < rEnd)
256 {
257 if (r->id != NULL &&
258 address >= r->start
259 && address < r->end)
260 {
261 if (pID != NULL)
262 {
263 *pID = r->id;
264 }
265 return TRUE;
266 }
267 r++;
268 }
269
270 //
271 // If there are no more blocks, we're done.
272 //
273
274 if (b->next == NULL)
275 return FALSE;
276
277 //
278 // Next block.
279 //
280
281 b = b->next;
282 r = b->ranges;
283 rEnd = r + RANGE_COUNT;
284 }
285}
286
287#ifdef DACCESS_COMPILE
288
289void
290RangeList::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
291{
292 SUPPORTS_DAC;
293 WRAPPER_NO_CONTRACT;
294
295 // This class is almost always contained in something
296 // else so there's no enumeration of 'this'.
297
298 RangeListBlock* block = &m_starterBlock;
299 block->EnumMemoryRegions(flags);
300
301 while (block->next.IsValid())
302 {
303 block->next.EnumMem();
304 block = block->next;
305
306 block->EnumMemoryRegions(flags);
307 }
308}
309
310void
311RangeList::RangeListBlock::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
312{
313 WRAPPER_NO_CONTRACT;
314
315 Range* range;
316 TADDR BADFOOD;
317 TSIZE_T size;
318 int i;
319
320 // The code below iterates each range stored in the RangeListBlock and
321 // dumps the memory region represented by each range.
322 // It is too much memory for a mini-dump, so we just bail out for mini-dumps.
323 if (flags == CLRDATA_ENUM_MEM_MINI || flags == CLRDATA_ENUM_MEM_TRIAGE)
324 {
325 return;
326 }
327
328 WIN64_ONLY( BADFOOD = 0xbaadf00dbaadf00d; );
329 NOT_WIN64( BADFOOD = 0xbaadf00d; );
330
331 for (i=0; i<RANGE_COUNT; i++)
332 {
333 range = &(this->ranges[i]);
334 if (range->id == NULL || range->start == NULL || range->end == NULL ||
335 // just looking at the lower 4bytes is good enough on WIN64
336 range->start == BADFOOD || range->end == BADFOOD)
337 {
338 break;
339 }
340
341 size = range->end - range->start;
342 _ASSERTE( size < ULONG_MAX ); // ranges should be less than 4gig!
343
344 // We can't be sure this entire range is mapped. For example, the code:StubLinkStubManager
345 // keeps track of all ranges in the code:BaseDomain::m_pStubHeap LoaderHeap, and
346 // code:LoaderHeap::UnlockedReservePages adds a range for the entire reserved region, instead
347 // of updating the RangeList when pages are committed. But in that case, the committed region of
348 // memory will be enumerated by the LoaderHeap anyway, so it's OK if this fails
349 DacEnumMemoryRegion(range->start, size, false);
350 }
351}
352
353#endif // #ifdef DACCESS_COMPILE
354
355
356//=====================================================================================
357// In DEBUG builds only, we tag live blocks with the requested size and the type of
358// allocation (AllocMem, AllocAlignedMem, AllocateOntoReservedMem). This is strictly
359// to validate that those who call Backout* are passing in the right values.
360//
361// For simplicity, we'll use one LoaderHeapValidationTag structure for all types even
362// though not all fields are applicable to all types.
363//=====================================================================================
364#ifdef _DEBUG
365enum AllocationType
366{
367 kAllocMem = 1,
368 kFreedMem = 4,
369};
370
371struct LoaderHeapValidationTag
372{
373 size_t m_dwRequestedSize; // What the caller requested (not what was actually allocated)
374 AllocationType m_allocationType; // Which api allocated this block.
375 const char * m_szFile; // Who allocated me
376 int m_lineNum; // Who allocated me
377
378};
379#endif //_DEBUG
380
381
382
383
384
385//=====================================================================================
386// These classes do detailed loaderheap sniffing to help in debugging heap crashes
387//=====================================================================================
388#ifdef _DEBUG
389
390// This structure logs the results of an Alloc or Free call. They are stored in reverse time order
391// with UnlockedLoaderHeap::m_pEventList pointing to the most recent event.
392struct LoaderHeapEvent
393{
394 LoaderHeapEvent *m_pNext;
395 AllocationType m_allocationType; //Which api was called
396 const char *m_szFile; //Caller Id
397 int m_lineNum; //Caller Id
398 const char *m_szAllocFile; //(BackoutEvents): Who allocated the block?
399 int m_allocLineNum; //(BackoutEvents): Who allocated the block?
400 void *m_pMem; //Starting address of block
401 size_t m_dwRequestedSize; //Requested size of block
402 size_t m_dwSize; //Actual size of block (including validation tags, padding, everything)
403
404
405 void Describe(SString *pSString)
406 {
407 CONTRACTL
408 {
409 INSTANCE_CHECK;
410 DISABLED(NOTHROW);
411 GC_NOTRIGGER;
412 }
413 CONTRACTL_END
414
415 pSString->AppendASCII("\n");
416
417 {
418 StackSString buf;
419 if (m_allocationType == kFreedMem)
420 {
421 buf.Printf(" Freed at: %s (line %d)\n", m_szFile, m_lineNum);
422 buf.Printf(" (block originally allocated at %s (line %d)\n", m_szAllocFile, m_allocLineNum);
423 }
424 else
425 {
426 buf.Printf(" Allocated at: %s (line %d)\n", m_szFile, m_lineNum);
427 }
428 pSString->Append(buf);
429 }
430
431 if (!QuietValidate())
432 {
433 pSString->AppendASCII(" *** THIS BLOCK HAS BEEN CORRUPTED ***\n");
434 }
435
436
437
438 {
439 StackSString buf;
440 buf.Printf(" Type: ");
441 switch (m_allocationType)
442 {
443 case kAllocMem:
444 buf.AppendASCII("AllocMem()\n");
445 break;
446 case kFreedMem:
447 buf.AppendASCII("Free\n");
448 break;
449 default:
450 break;
451 }
452 pSString->Append(buf);
453 }
454
455
456 {
457 StackSString buf;
458 buf.Printf(" Start of block: 0x%p\n", m_pMem);
459 pSString->Append(buf);
460 }
461
462 {
463 StackSString buf;
464 buf.Printf(" End of block: 0x%p\n", ((BYTE*)m_pMem) + m_dwSize - 1);
465 pSString->Append(buf);
466 }
467
468 {
469 StackSString buf;
470 buf.Printf(" Requested size: %lu (0x%lx)\n", (ULONG)m_dwRequestedSize, (ULONG)m_dwRequestedSize);
471 pSString->Append(buf);
472 }
473
474 {
475 StackSString buf;
476 buf.Printf(" Actual size: %lu (0x%lx)\n", (ULONG)m_dwSize, (ULONG)m_dwSize);
477 pSString->Append(buf);
478 }
479
480 pSString->AppendASCII("\n");
481 }
482
483
484
485 BOOL QuietValidate();
486
487};
488
489
490class LoaderHeapSniffer
491{
492 public:
493 static DWORD InitDebugFlags()
494 {
495 WRAPPER_NO_CONTRACT;
496
497 DWORD dwDebugFlags = 0;
498 if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_LoaderHeapCallTracing))
499 {
500 dwDebugFlags |= UnlockedLoaderHeap::kCallTracing;
501 }
502 return dwDebugFlags;
503 }
504
505
506 static VOID RecordEvent(UnlockedLoaderHeap *pHeap,
507 AllocationType allocationType,
508 __in const char *szFile,
509 int lineNum,
510 __in const char *szAllocFile,
511 int allocLineNum,
512 void *pMem,
513 size_t dwRequestedSize,
514 size_t dwSize
515 );
516
517 static VOID ClearEvents(UnlockedLoaderHeap *pHeap)
518 {
519 STATIC_CONTRACT_NOTHROW;
520 STATIC_CONTRACT_FORBID_FAULT;
521
522 LoaderHeapEvent *pEvent = pHeap->m_pEventList;
523 while (pEvent)
524 {
525 LoaderHeapEvent *pNext = pEvent->m_pNext;
526 delete pEvent;
527 pEvent = pNext;
528 }
529 pHeap->m_pEventList = NULL;
530 }
531
532
533 static VOID CompactEvents(UnlockedLoaderHeap *pHeap)
534 {
535 STATIC_CONTRACT_NOTHROW;
536 STATIC_CONTRACT_FORBID_FAULT;
537
538 LoaderHeapEvent **ppEvent = &(pHeap->m_pEventList);
539 while (*ppEvent)
540 {
541 LoaderHeapEvent *pEvent = *ppEvent;
542 if (pEvent->m_allocationType != kFreedMem)
543 {
544 ppEvent = &(pEvent->m_pNext);
545 }
546 else
547 {
548 LoaderHeapEvent **ppWalk = &(pEvent->m_pNext);
549 BOOL fMatchFound = FALSE;
550 while (*ppWalk && !fMatchFound)
551 {
552 LoaderHeapEvent *pWalk = *ppWalk;
553 if (pWalk->m_allocationType != kFreedMem &&
554 pWalk->m_pMem == pEvent->m_pMem &&
555 pWalk->m_dwRequestedSize == pEvent->m_dwRequestedSize)
556 {
557 // Delete matched pairs
558
559 // Order is important here - updating *ppWalk may change pEvent->m_pNext, and we want
560 // to get the updated value when we unlink pEvent.
561 *ppWalk = pWalk->m_pNext;
562 *ppEvent = pEvent->m_pNext;
563
564 delete pEvent;
565 delete pWalk;
566 fMatchFound = TRUE;
567 }
568 else
569 {
570 ppWalk = &(pWalk->m_pNext);
571 }
572 }
573
574 if (!fMatchFound)
575 {
576 ppEvent = &(pEvent->m_pNext);
577 }
578 }
579 }
580 }
581 static VOID PrintEvents(UnlockedLoaderHeap *pHeap)
582 {
583 STATIC_CONTRACT_NOTHROW;
584 STATIC_CONTRACT_FORBID_FAULT;
585
586 printf("\n------------- LoaderHeapEvents (in reverse time order!) --------------------");
587
588 LoaderHeapEvent *pEvent = pHeap->m_pEventList;
589 while (pEvent)
590 {
591 printf("\n");
592 switch (pEvent->m_allocationType)
593 {
594 case kAllocMem: printf("AllocMem "); break;
595 case kFreedMem: printf("BackoutMem "); break;
596
597 }
598 printf(" ptr = 0x%-8p", pEvent->m_pMem);
599 printf(" rqsize = 0x%-8x", pEvent->m_dwRequestedSize);
600 printf(" actsize = 0x%-8x", pEvent->m_dwSize);
601 printf(" (at %s@%d)", pEvent->m_szFile, pEvent->m_lineNum);
602 if (pEvent->m_allocationType == kFreedMem)
603 {
604 printf(" (original allocation at %s@%d)", pEvent->m_szAllocFile, pEvent->m_allocLineNum);
605 }
606
607 pEvent = pEvent->m_pNext;
608
609 }
610 printf("\n------------- End of LoaderHeapEvents --------------------------------------");
611 printf("\n");
612
613 }
614
615
616 static VOID PitchSniffer(SString *pSString)
617 {
618 WRAPPER_NO_CONTRACT;
619 pSString->AppendASCII("\n"
620 "\nBecause call-tracing wasn't turned on, we couldn't provide details about who last owned the affected memory block. To get more precise diagnostics,"
621 "\nset the following registry DWORD value:"
622 "\n"
623 "\n HKLM\\Software\\Microsoft\\.NETFramework\\LoaderHeapCallTracing = 1"
624 "\n"
625 "\nand rerun the scenario that crashed."
626 "\n"
627 "\n");
628 }
629
630 static LoaderHeapEvent *FindEvent(UnlockedLoaderHeap *pHeap, void *pAddr)
631 {
632 LIMITED_METHOD_CONTRACT;
633
634 LoaderHeapEvent *pEvent = pHeap->m_pEventList;
635 while (pEvent)
636 {
637 if (pAddr >= pEvent->m_pMem && pAddr <= ( ((BYTE*)pEvent->m_pMem) + pEvent->m_dwSize - 1))
638 {
639 return pEvent;
640 }
641 pEvent = pEvent->m_pNext;
642 }
643 return NULL;
644
645 }
646
647
648 static void ValidateFreeList(UnlockedLoaderHeap *pHeap);
649
650 static void WeGotAFaultNowWhat(UnlockedLoaderHeap *pHeap)
651 {
652 WRAPPER_NO_CONTRACT;
653 ValidateFreeList(pHeap);
654
655 //If none of the above popped up an assert, pop up a generic one.
656 _ASSERTE(!("Unexpected AV inside LoaderHeap. The usual reason is that someone overwrote the end of a block or wrote into a freed block.\n"));
657
658 }
659
660};
661
662
663#endif
664
665
666#ifdef _DEBUG
667#define LOADER_HEAP_BEGIN_TRAP_FAULT BOOL __faulted = FALSE; EX_TRY {
668#define LOADER_HEAP_END_TRAP_FAULT } EX_CATCH {__faulted = TRUE; } EX_END_CATCH(SwallowAllExceptions) if (__faulted) LoaderHeapSniffer::WeGotAFaultNowWhat(pHeap);
669#else
670#define LOADER_HEAP_BEGIN_TRAP_FAULT
671#define LOADER_HEAP_END_TRAP_FAULT
672#endif
673
674
675size_t AllocMem_TotalSize(size_t dwRequestedSize, UnlockedLoaderHeap *pHeap);
676
677//=====================================================================================
678// This freelist implementation is a first cut and probably needs to be tuned.
679// It should be tuned with the following assumptions:
680//
681// - Freeing LoaderHeap memory is done primarily for OOM backout. LoaderHeaps
682// weren't designed to be general purpose heaps and shouldn't be used that way.
683//
684// - And hence, when memory is freed, expect it to be freed in large clumps and in a
685// LIFO order. Since the LoaderHeap normally hands out memory with sequentially
686// increasing addresses, blocks will typically be freed with sequentially decreasing
687// addresses.
688//
689// The first cut of the freelist is a single-linked list of free blocks using first-fit.
690// Assuming the above alloc-free pattern holds, the list will end up mostly sorted
691// in increasing address order. When a block is freed, we'll attempt to coalesce it
692// with the first block in the list. We could also choose to be more aggressive about
693// sorting and coalescing but this should probably catch most cases in practice.
694//=====================================================================================
695
696// When a block is freed, we place this structure on the first bytes of the freed block (Allocations
697// are bumped in size if necessary to make sure there's room.)
698struct LoaderHeapFreeBlock
699{
700 public:
701 LoaderHeapFreeBlock *m_pNext; // Pointer to next block on free list
702 size_t m_dwSize; // Total size of this block (including this header)
703//! Try not to grow the size of this structure. It places a minimum size on LoaderHeap allocations.
704
705 static void InsertFreeBlock(LoaderHeapFreeBlock **ppHead, void *pMem, size_t dwTotalSize, UnlockedLoaderHeap *pHeap)
706 {
707 STATIC_CONTRACT_NOTHROW;
708 STATIC_CONTRACT_GC_NOTRIGGER;
709
710 LOADER_HEAP_BEGIN_TRAP_FAULT
711
712 // It's illegal to insert a free block that's smaller than the minimum sized allocation -
713 // it may stay stranded on the freelist forever.
714#ifdef _DEBUG
715 if (!(dwTotalSize >= AllocMem_TotalSize(1, pHeap)))
716 {
717 LoaderHeapSniffer::ValidateFreeList(pHeap);
718 _ASSERTE(dwTotalSize >= AllocMem_TotalSize(1, pHeap));
719 }
720
721 if (!(0 == (dwTotalSize & ALLOC_ALIGN_CONSTANT)))
722 {
723 LoaderHeapSniffer::ValidateFreeList(pHeap);
724 _ASSERTE(0 == (dwTotalSize & ALLOC_ALIGN_CONSTANT));
725 }
726#endif
727
728 INDEBUG(memset(pMem, 0xcc, dwTotalSize);)
729 LoaderHeapFreeBlock *pNewBlock = (LoaderHeapFreeBlock*)pMem;
730 pNewBlock->m_pNext = *ppHead;
731 pNewBlock->m_dwSize = dwTotalSize;
732 *ppHead = pNewBlock;
733
734 MergeBlock(pNewBlock, pHeap);
735
736 LOADER_HEAP_END_TRAP_FAULT
737 }
738
739
740 static void *AllocFromFreeList(LoaderHeapFreeBlock **ppHead, size_t dwSize, BOOL fRemoveFromFreeList, UnlockedLoaderHeap *pHeap)
741 {
742 STATIC_CONTRACT_NOTHROW;
743 STATIC_CONTRACT_GC_NOTRIGGER;
744
745 INCONTRACT(_ASSERTE_IMPL(!ARE_FAULTS_FORBIDDEN()));
746
747 void *pResult = NULL;
748 LOADER_HEAP_BEGIN_TRAP_FAULT
749
750 LoaderHeapFreeBlock **ppWalk = ppHead;
751 while (*ppWalk)
752 {
753 LoaderHeapFreeBlock *pCur = *ppWalk;
754 size_t dwCurSize = pCur->m_dwSize;
755 if (dwCurSize == dwSize)
756 {
757 pResult = pCur;
758 // Exact match. Hooray!
759 if (fRemoveFromFreeList)
760 {
761 *ppWalk = pCur->m_pNext;
762 }
763 break;
764 }
765 else if (dwCurSize > dwSize && (dwCurSize - dwSize) >= AllocMem_TotalSize(1, pHeap))
766 {
767 // Partial match. Ok...
768 pResult = pCur;
769 if (fRemoveFromFreeList)
770 {
771 *ppWalk = pCur->m_pNext;
772 InsertFreeBlock(ppWalk, ((BYTE*)pCur) + dwSize, dwCurSize - dwSize, pHeap );
773 }
774 break;
775 }
776
777 // Either block is too small or splitting the block would leave a remainder that's smaller than
778 // the minimum block size. Onto next one.
779
780 ppWalk = &( pCur->m_pNext );
781 }
782
783 if (pResult && fRemoveFromFreeList)
784 {
785 // Callers of loaderheap assume allocated memory is zero-inited so we must preserve this invariant!
786 memset(pResult, 0, dwSize);
787 }
788 LOADER_HEAP_END_TRAP_FAULT
789 return pResult;
790
791
792
793 }
794
795
796 private:
797 // Try to merge pFreeBlock with its immediate successor. Return TRUE if a merge happened. FALSE if no merge happened.
798 static BOOL MergeBlock(LoaderHeapFreeBlock *pFreeBlock, UnlockedLoaderHeap *pHeap)
799 {
800 STATIC_CONTRACT_NOTHROW;
801
802 BOOL result = FALSE;
803
804 LOADER_HEAP_BEGIN_TRAP_FAULT
805
806 LoaderHeapFreeBlock *pNextBlock = pFreeBlock->m_pNext;
807 size_t dwSize = pFreeBlock->m_dwSize;
808
809 if (pNextBlock == NULL || ((BYTE*)pNextBlock) != (((BYTE*)pFreeBlock) + dwSize))
810 {
811 result = FALSE;
812 }
813 else
814 {
815 size_t dwCombinedSize = dwSize + pNextBlock->m_dwSize;
816 LoaderHeapFreeBlock *pNextNextBlock = pNextBlock->m_pNext;
817 INDEBUG(memset(pFreeBlock, 0xcc, dwCombinedSize);)
818 pFreeBlock->m_pNext = pNextNextBlock;
819 pFreeBlock->m_dwSize = dwCombinedSize;
820
821 result = TRUE;
822 }
823
824 LOADER_HEAP_END_TRAP_FAULT
825 return result;
826
827 }
828
829};
830
831
832
833
834//=====================================================================================
835// These helpers encapsulate the actual layout of a block allocated by AllocMem
836// and UnlockedAllocMem():
837//
838// ==> Starting address is always pointer-aligned.
839//
840// - x bytes of user bytes (where "x" is the actual dwSize passed into AllocMem)
841//
842// - y bytes of "EE" (DEBUG-ONLY) (where "y" == LOADER_HEAP_DEBUG_BOUNDARY (normally 0))
843// - z bytes of pad (DEBUG-ONLY) (where "z" is just enough to pointer-align the following byte)
844// - a bytes of tag (DEBUG-ONLY) (where "a" is sizeof(LoaderHeapValidationTag)
845//
846// - b bytes of pad (if total size after all this < sizeof(LoaderHeapFreeBlock), pad enough to make it the size of LoaderHeapFreeBlock)
847// - c bytes of pad (where "c" is just enough to pointer-align the following byte)
848//
849// ==> Following address is always pointer-aligned
850//=====================================================================================
851
852// Convert the requested size into the total # of bytes we'll actually allocate (including padding)
853inline size_t AllocMem_TotalSize(size_t dwRequestedSize, UnlockedLoaderHeap *pHeap)
854{
855 LIMITED_METHOD_CONTRACT;
856
857 size_t dwSize = dwRequestedSize;
858#ifdef _DEBUG
859 dwSize += LOADER_HEAP_DEBUG_BOUNDARY;
860 dwSize = ((dwSize + ALLOC_ALIGN_CONSTANT) & (~ALLOC_ALIGN_CONSTANT));
861
862 if (!pHeap->m_fExplicitControl)
863 {
864 dwSize += sizeof(LoaderHeapValidationTag);
865 }
866#endif
867 if (!pHeap->m_fExplicitControl)
868 {
869 if (dwSize < sizeof(LoaderHeapFreeBlock))
870 {
871 dwSize = sizeof(LoaderHeapFreeBlock);
872 }
873 }
874 dwSize = ((dwSize + ALLOC_ALIGN_CONSTANT) & (~ALLOC_ALIGN_CONSTANT));
875
876 return dwSize;
877}
878
879
880#ifdef _DEBUG
881LoaderHeapValidationTag *AllocMem_GetTag(LPVOID pBlock, size_t dwRequestedSize)
882{
883 LIMITED_METHOD_CONTRACT;
884
885 size_t dwSize = dwRequestedSize;
886 dwSize += LOADER_HEAP_DEBUG_BOUNDARY;
887 dwSize = ((dwSize + ALLOC_ALIGN_CONSTANT) & (~ALLOC_ALIGN_CONSTANT));
888 return (LoaderHeapValidationTag *)( ((BYTE*)pBlock) + dwSize );
889}
890#endif
891
892
893
894
895
896//=====================================================================================
897// UnlockedLoaderHeap methods
898//=====================================================================================
899
900#ifndef DACCESS_COMPILE
901
902UnlockedLoaderHeap::UnlockedLoaderHeap(DWORD dwReserveBlockSize,
903 DWORD dwCommitBlockSize,
904 const BYTE* dwReservedRegionAddress,
905 SIZE_T dwReservedRegionSize,
906 size_t *pPrivatePerfCounter_LoaderBytes,
907 RangeList *pRangeList,
908 BOOL fMakeExecutable)
909{
910 CONTRACTL
911 {
912 CONSTRUCTOR_CHECK;
913 NOTHROW;
914 FORBID_FAULT;
915 }
916 CONTRACTL_END;
917
918 m_pCurBlock = NULL;
919 m_pFirstBlock = NULL;
920
921 m_dwReserveBlockSize = dwReserveBlockSize;
922 m_dwCommitBlockSize = dwCommitBlockSize;
923
924 m_pPtrToEndOfCommittedRegion = NULL;
925 m_pEndReservedRegion = NULL;
926 m_pAllocPtr = NULL;
927
928 m_pRangeList = pRangeList;
929
930 // Round to VIRTUAL_ALLOC_RESERVE_GRANULARITY
931 m_dwTotalAlloc = 0;
932
933#ifdef _DEBUG
934 m_dwDebugWastedBytes = 0;
935 s_dwNumInstancesOfLoaderHeaps++;
936 m_pEventList = NULL;
937 m_dwDebugFlags = LoaderHeapSniffer::InitDebugFlags();
938 m_fPermitStubsWithUnwindInfo = FALSE;
939 m_fStubUnwindInfoUnregistered= FALSE;
940#endif
941
942 m_pPrivatePerfCounter_LoaderBytes = pPrivatePerfCounter_LoaderBytes;
943
944 m_Options = 0;
945
946#ifndef CROSSGEN_COMPILE
947 if (fMakeExecutable)
948 m_Options |= LHF_EXECUTABLE;
949#endif // CROSSGEN_COMPILE
950
951 m_pFirstFreeBlock = NULL;
952
953 if (dwReservedRegionAddress != NULL && dwReservedRegionSize > 0)
954 {
955 m_reservedBlock.Init((void *)dwReservedRegionAddress, dwReservedRegionSize, FALSE);
956 }
957}
958
959// ~LoaderHeap is not synchronised (obviously)
960UnlockedLoaderHeap::~UnlockedLoaderHeap()
961{
962 CONTRACTL
963 {
964 DESTRUCTOR_CHECK;
965 NOTHROW;
966 FORBID_FAULT;
967 }
968 CONTRACTL_END
969
970 _ASSERTE(!m_fPermitStubsWithUnwindInfo || m_fStubUnwindInfoUnregistered);
971
972 if (m_pRangeList != NULL)
973 m_pRangeList->RemoveRanges((void *) this);
974
975 LoaderHeapBlock *pSearch, *pNext;
976
977 for (pSearch = m_pFirstBlock; pSearch; pSearch = pNext)
978 {
979 void * pVirtualAddress;
980 BOOL fReleaseMemory;
981
982 pVirtualAddress = pSearch->pVirtualAddress;
983 fReleaseMemory = pSearch->m_fReleaseMemory;
984 pNext = pSearch->pNext;
985
986 if (fReleaseMemory)
987 {
988 BOOL fSuccess;
989 fSuccess = ClrVirtualFree(pVirtualAddress, 0, MEM_RELEASE);
990 _ASSERTE(fSuccess);
991 }
992 }
993
994 if (m_reservedBlock.m_fReleaseMemory)
995 {
996 BOOL fSuccess;
997 fSuccess = ClrVirtualFree(m_reservedBlock.pVirtualAddress, 0, MEM_RELEASE);
998 _ASSERTE(fSuccess);
999 }
1000
1001 if (m_pPrivatePerfCounter_LoaderBytes)
1002 *m_pPrivatePerfCounter_LoaderBytes = *m_pPrivatePerfCounter_LoaderBytes - (DWORD) m_dwTotalAlloc;
1003
1004 INDEBUG(s_dwNumInstancesOfLoaderHeaps --;)
1005}
1006
1007void UnlockedLoaderHeap::UnlockedSetReservedRegion(BYTE* dwReservedRegionAddress, SIZE_T dwReservedRegionSize, BOOL fReleaseMemory)
1008{
1009 WRAPPER_NO_CONTRACT;
1010 _ASSERTE(m_reservedBlock.pVirtualAddress == NULL);
1011 m_reservedBlock.Init((void *)dwReservedRegionAddress, dwReservedRegionSize, fReleaseMemory);
1012}
1013
1014#endif // #ifndef DACCESS_COMPILE
1015
1016#if 0
1017// Disables access to all pages in the heap - useful when trying to determine if someone is
1018// accessing something in the low frequency heap
1019void UnlockedLoaderHeap::DebugGuardHeap()
1020{
1021 WRAPPER_NO_CONTRACT;
1022 LoaderHeapBlock *pSearch, *pNext;
1023
1024 for (pSearch = m_pFirstBlock; pSearch; pSearch = pNext)
1025 {
1026 void * pResult;
1027 void * pVirtualAddress;
1028
1029 pVirtualAddress = pSearch->pVirtualAddress;
1030 pNext = pSearch->pNext;
1031
1032 pResult = ClrVirtualAlloc(pVirtualAddress, pSearch->dwVirtualSize, MEM_COMMIT, PAGE_NOACCESS);
1033 _ASSERTE(pResult != NULL);
1034 }
1035}
1036#endif
1037
1038size_t UnlockedLoaderHeap::GetBytesAvailCommittedRegion()
1039{
1040 LIMITED_METHOD_CONTRACT;
1041
1042 if (m_pAllocPtr < m_pPtrToEndOfCommittedRegion)
1043 return (size_t)(m_pPtrToEndOfCommittedRegion - m_pAllocPtr);
1044 else
1045 return 0;
1046}
1047
1048size_t UnlockedLoaderHeap::GetBytesAvailReservedRegion()
1049{
1050 LIMITED_METHOD_CONTRACT;
1051
1052 if (m_pAllocPtr < m_pEndReservedRegion)
1053 return (size_t)(m_pEndReservedRegion- m_pAllocPtr);
1054 else
1055 return 0;
1056}
1057
1058#define SETUP_NEW_BLOCK(pData, dwSizeToCommit, dwSizeToReserve) \
1059 m_pPtrToEndOfCommittedRegion = (BYTE *) (pData) + (dwSizeToCommit); \
1060 m_pAllocPtr = (BYTE *) (pData) + sizeof(LoaderHeapBlock); \
1061 m_pEndReservedRegion = (BYTE *) (pData) + (dwSizeToReserve);
1062
1063
1064#ifndef DACCESS_COMPILE
1065
1066BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit)
1067{
1068 CONTRACTL
1069 {
1070 INSTANCE_CHECK;
1071 NOTHROW;
1072 INJECT_FAULT(return FALSE;);
1073 }
1074 CONTRACTL_END;
1075
1076 size_t dwSizeToReserve;
1077
1078 // Add sizeof(LoaderHeapBlock)
1079 dwSizeToCommit += sizeof(LoaderHeapBlock);
1080
1081 // Round to page size again
1082 dwSizeToCommit = ALIGN_UP(dwSizeToCommit, GetOsPageSize());
1083
1084 void *pData = NULL;
1085 BOOL fReleaseMemory = TRUE;
1086
1087 // We were provided with a reserved memory block at instance creation time, so use it if it's big enough.
1088 if (m_reservedBlock.pVirtualAddress != NULL &&
1089 m_reservedBlock.dwVirtualSize >= dwSizeToCommit)
1090 {
1091 // Get the info out of the block.
1092 pData = m_reservedBlock.pVirtualAddress;
1093 dwSizeToReserve = m_reservedBlock.dwVirtualSize;
1094 fReleaseMemory = m_reservedBlock.m_fReleaseMemory;
1095
1096 // Zero the block so this memory doesn't get used again.
1097 m_reservedBlock.Init(NULL, 0, FALSE);
1098 }
1099 // The caller is asking us to allocate the memory
1100 else
1101 {
1102 if (m_fExplicitControl)
1103 {
1104 return FALSE;
1105 }
1106
1107 // Figure out how much to reserve
1108 dwSizeToReserve = max(dwSizeToCommit, m_dwReserveBlockSize);
1109
1110 // Round to VIRTUAL_ALLOC_RESERVE_GRANULARITY
1111 dwSizeToReserve = ALIGN_UP(dwSizeToReserve, VIRTUAL_ALLOC_RESERVE_GRANULARITY);
1112
1113 _ASSERTE(dwSizeToCommit <= dwSizeToReserve);
1114
1115 //
1116 // Reserve pages
1117 //
1118
1119 pData = ClrVirtualAllocExecutable(dwSizeToReserve, MEM_RESERVE, PAGE_NOACCESS);
1120 if (pData == NULL)
1121 {
1122 return FALSE;
1123 }
1124 }
1125
1126 // When the user passes in the reserved memory, the commit size is 0 and is adjusted to be the sizeof(LoaderHeap).
1127 // If for some reason this is not true then we just catch this via an assertion and the dev who changed code
1128 // would have to add logic here to handle the case when committed mem is more than the reserved mem. One option
1129 // could be to leak the users memory and reserve+commit a new block, Another option would be to fail the alloc mem
1130 // and notify the user to provide more reserved mem.
1131 _ASSERTE((dwSizeToCommit <= dwSizeToReserve) && "Loaderheap tried to commit more memory than reserved by user");
1132
1133 if (pData == NULL)
1134 {
1135 //_ASSERTE(!"Unable to ClrVirtualAlloc reserve in a loaderheap");
1136 return FALSE;
1137 }
1138
1139 // Commit first set of pages, since it will contain the LoaderHeapBlock
1140 void *pTemp = ClrVirtualAlloc(pData, dwSizeToCommit, MEM_COMMIT, (m_Options & LHF_EXECUTABLE) ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE);
1141 if (pTemp == NULL)
1142 {
1143 //_ASSERTE(!"Unable to ClrVirtualAlloc commit in a loaderheap");
1144
1145 // Unable to commit - release pages
1146 if (fReleaseMemory)
1147 ClrVirtualFree(pData, 0, MEM_RELEASE);
1148
1149 return FALSE;
1150 }
1151
1152 if (m_pPrivatePerfCounter_LoaderBytes)
1153 *m_pPrivatePerfCounter_LoaderBytes = *m_pPrivatePerfCounter_LoaderBytes + (DWORD) dwSizeToCommit;
1154
1155 // Record reserved range in range list, if one is specified
1156 // Do this AFTER the commit - otherwise we'll have bogus ranges included.
1157 if (m_pRangeList != NULL)
1158 {
1159 if (!m_pRangeList->AddRange((const BYTE *) pData,
1160 ((const BYTE *) pData) + dwSizeToReserve,
1161 (void *) this))
1162 {
1163
1164 if (fReleaseMemory)
1165 ClrVirtualFree(pData, 0, MEM_RELEASE);
1166
1167 return FALSE;
1168 }
1169 }
1170
1171 m_dwTotalAlloc += dwSizeToCommit;
1172
1173 LoaderHeapBlock *pNewBlock;
1174
1175 pNewBlock = (LoaderHeapBlock *) pData;
1176
1177 pNewBlock->dwVirtualSize = dwSizeToReserve;
1178 pNewBlock->pVirtualAddress = pData;
1179 pNewBlock->pNext = NULL;
1180 pNewBlock->m_fReleaseMemory = fReleaseMemory;
1181
1182 LoaderHeapBlock *pCurBlock = m_pCurBlock;
1183
1184 // Add to linked list
1185 while (pCurBlock != NULL &&
1186 pCurBlock->pNext != NULL)
1187 pCurBlock = pCurBlock->pNext;
1188
1189 if (pCurBlock != NULL)
1190 m_pCurBlock->pNext = pNewBlock;
1191 else
1192 m_pFirstBlock = pNewBlock;
1193
1194 // If we want to use the memory immediately...
1195 m_pCurBlock = pNewBlock;
1196
1197 SETUP_NEW_BLOCK(pData, dwSizeToCommit, dwSizeToReserve);
1198
1199 return TRUE;
1200}
1201
1202// Get some more committed pages - either commit some more in the current reserved region, or, if it
1203// has run out, reserve another set of pages.
1204// Returns: FALSE if we can't get any more memory
1205// TRUE: We can/did get some more memory - check to see if it's sufficient for
1206// the caller's needs (see UnlockedAllocMem for example of use)
1207BOOL UnlockedLoaderHeap::GetMoreCommittedPages(size_t dwMinSize)
1208{
1209 CONTRACTL
1210 {
1211 INSTANCE_CHECK;
1212 NOTHROW;
1213 INJECT_FAULT(return FALSE;);
1214 }
1215 CONTRACTL_END;
1216
1217 // If we have memory we can use, what are you doing here!
1218 _ASSERTE(dwMinSize > (SIZE_T)(m_pPtrToEndOfCommittedRegion - m_pAllocPtr));
1219
1220 // Does this fit in the reserved region?
1221 if (dwMinSize <= (size_t)(m_pEndReservedRegion - m_pAllocPtr))
1222 {
1223 SIZE_T dwSizeToCommit = (m_pAllocPtr + dwMinSize) - m_pPtrToEndOfCommittedRegion;
1224
1225 if (dwSizeToCommit < m_dwCommitBlockSize)
1226 dwSizeToCommit = min((SIZE_T)(m_pEndReservedRegion - m_pPtrToEndOfCommittedRegion), (SIZE_T)m_dwCommitBlockSize);
1227
1228 // Round to page size
1229 dwSizeToCommit = ALIGN_UP(dwSizeToCommit, GetOsPageSize());
1230
1231 // Yes, so commit the desired number of reserved pages
1232 void *pData = ClrVirtualAlloc(m_pPtrToEndOfCommittedRegion, dwSizeToCommit, MEM_COMMIT, (m_Options & LHF_EXECUTABLE) ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE);
1233 if (pData == NULL)
1234 return FALSE;
1235
1236 if (m_pPrivatePerfCounter_LoaderBytes)
1237 *m_pPrivatePerfCounter_LoaderBytes = *m_pPrivatePerfCounter_LoaderBytes + (DWORD) dwSizeToCommit;
1238
1239 m_dwTotalAlloc += dwSizeToCommit;
1240
1241 m_pPtrToEndOfCommittedRegion += dwSizeToCommit;
1242 return TRUE;
1243 }
1244
1245 // Need to allocate a new set of reserved pages
1246 INDEBUG(m_dwDebugWastedBytes += (size_t)(m_pPtrToEndOfCommittedRegion - m_pAllocPtr);)
1247
1248 // Note, there are unused reserved pages at end of current region -can't do much about that
1249 // Provide dwMinSize here since UnlockedReservePages will round up the commit size again
1250 // after adding in the size of the LoaderHeapBlock header.
1251 return UnlockedReservePages(dwMinSize);
1252}
1253
1254void *UnlockedLoaderHeap::UnlockedAllocMem(size_t dwSize
1255 COMMA_INDEBUG(__in const char *szFile)
1256 COMMA_INDEBUG(int lineNum))
1257{
1258 CONTRACT(void*)
1259 {
1260 INSTANCE_CHECK;
1261 THROWS;
1262 GC_NOTRIGGER;
1263 INJECT_FAULT(ThrowOutOfMemory(););
1264 POSTCONDITION(CheckPointer(RETVAL));
1265 }
1266 CONTRACT_END;
1267
1268 void *pResult = UnlockedAllocMem_NoThrow(
1269 dwSize COMMA_INDEBUG(szFile) COMMA_INDEBUG(lineNum));
1270
1271 if (pResult == NULL)
1272 ThrowOutOfMemory();
1273
1274 RETURN pResult;
1275}
1276
1277#ifdef _DEBUG
1278static DWORD ShouldInjectFault()
1279{
1280 static DWORD fInjectFault = 99;
1281
1282 if (fInjectFault == 99)
1283 fInjectFault = (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_InjectFault) != 0);
1284 return fInjectFault;
1285}
1286
1287#define SHOULD_INJECT_FAULT(return_statement) \
1288 do { \
1289 if (ShouldInjectFault() & 0x1) \
1290 { \
1291 char *a = new (nothrow) char; \
1292 if (a == NULL) \
1293 { \
1294 return_statement; \
1295 } \
1296 delete a; \
1297 } \
1298 } while (FALSE)
1299
1300#else
1301
1302#define SHOULD_INJECT_FAULT(return_statement) do { (void)((void *)0); } while (FALSE)
1303
1304#endif
1305
1306void *UnlockedLoaderHeap::UnlockedAllocMem_NoThrow(size_t dwSize
1307 COMMA_INDEBUG(__in const char *szFile)
1308 COMMA_INDEBUG(int lineNum))
1309{
1310 CONTRACT(void*)
1311 {
1312 INSTANCE_CHECK;
1313 NOTHROW;
1314 GC_NOTRIGGER;
1315 INJECT_FAULT(CONTRACT_RETURN NULL;);
1316 PRECONDITION(dwSize != 0);
1317 POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
1318 }
1319 CONTRACT_END;
1320
1321 SHOULD_INJECT_FAULT(RETURN NULL);
1322
1323 INDEBUG(size_t dwRequestedSize = dwSize;)
1324
1325 INCONTRACT(_ASSERTE(!ARE_FAULTS_FORBIDDEN()));
1326
1327#ifdef RANDOMIZE_ALLOC
1328 if (!m_fExplicitControl)
1329 dwSize += s_random.Next() % 256;
1330#endif
1331
1332 dwSize = AllocMem_TotalSize(dwSize, this);
1333
1334again:
1335
1336 {
1337 // Any memory available on the free list?
1338 void *pData = LoaderHeapFreeBlock::AllocFromFreeList(&m_pFirstFreeBlock, dwSize, TRUE /*fRemoveFromFreeList*/, this);
1339 if (!pData)
1340 {
1341 // Enough bytes available in committed region?
1342 if (dwSize <= GetBytesAvailCommittedRegion())
1343 {
1344 pData = m_pAllocPtr;
1345 m_pAllocPtr += dwSize;
1346 }
1347 }
1348
1349 if (pData)
1350 {
1351#ifdef _DEBUG
1352
1353 BYTE *pAllocatedBytes = (BYTE *)pData;
1354#if LOADER_HEAP_DEBUG_BOUNDARY > 0
1355 // Don't fill the memory we allocated - it is assumed to be zeroed - fill the memory after it
1356 memset(pAllocatedBytes + dwRequestedSize, 0xEE, LOADER_HEAP_DEBUG_BOUNDARY);
1357#endif
1358 if (dwRequestedSize > 0)
1359 {
1360 _ASSERTE_MSG(pAllocatedBytes[0] == 0 && memcmp(pAllocatedBytes, pAllocatedBytes + 1, dwRequestedSize - 1) == 0,
1361 "LoaderHeap must return zero-initialized memory");
1362 }
1363
1364 if (!m_fExplicitControl)
1365 {
1366 LoaderHeapValidationTag *pTag = AllocMem_GetTag(pData, dwRequestedSize);
1367 pTag->m_allocationType = kAllocMem;
1368 pTag->m_dwRequestedSize = dwRequestedSize;
1369 pTag->m_szFile = szFile;
1370 pTag->m_lineNum = lineNum;
1371 }
1372
1373 if (m_dwDebugFlags & kCallTracing)
1374 {
1375 LoaderHeapSniffer::RecordEvent(this,
1376 kAllocMem,
1377 szFile,
1378 lineNum,
1379 szFile,
1380 lineNum,
1381 pData,
1382 dwRequestedSize,
1383 dwSize
1384 );
1385 }
1386
1387#endif
1388
1389 EtwAllocRequest(this, pData, dwSize);
1390 RETURN pData;
1391 }
1392 }
1393
1394 // Need to commit some more pages in reserved region.
1395 // If we run out of pages in the reserved region, ClrVirtualAlloc some more pages
1396 if (GetMoreCommittedPages(dwSize))
1397 goto again;
1398
1399 // We could not satisfy this allocation request
1400 RETURN NULL;
1401}
1402
1403void UnlockedLoaderHeap::UnlockedBackoutMem(void *pMem,
1404 size_t dwRequestedSize
1405 COMMA_INDEBUG(__in const char *szFile)
1406 COMMA_INDEBUG(int lineNum)
1407 COMMA_INDEBUG(__in const char *szAllocFile)
1408 COMMA_INDEBUG(int allocLineNum))
1409{
1410 CONTRACTL
1411 {
1412 INSTANCE_CHECK;
1413 NOTHROW;
1414 FORBID_FAULT;
1415 }
1416 CONTRACTL_END;
1417
1418 // Because the primary use of this function is backout, we'll be nice and
1419 // define Backout(NULL) be a legal NOP.
1420 if (pMem == NULL)
1421 {
1422 return;
1423 }
1424
1425#ifdef _DEBUG
1426 {
1427 DEBUG_ONLY_REGION();
1428
1429 LoaderHeapValidationTag *pTag = AllocMem_GetTag(pMem, dwRequestedSize);
1430
1431 if (pTag->m_dwRequestedSize != dwRequestedSize || pTag->m_allocationType != kAllocMem)
1432 {
1433 CONTRACT_VIOLATION(ThrowsViolation|FaultViolation); // We're reporting a heap corruption - who cares about violations
1434
1435 StackSString message;
1436 message.Printf("HEAP VIOLATION: Invalid BackoutMem() call made at:\n"
1437 "\n"
1438 " File: %s\n"
1439 " Line: %d\n"
1440 "\n"
1441 "Attempting to free block originally allocated at:\n"
1442 "\n"
1443 " File: %s\n"
1444 " Line: %d\n"
1445 "\n"
1446 "The arguments to BackoutMem() were:\n"
1447 "\n"
1448 " Pointer: 0x%p\n"
1449 " Size: %lu (0x%lx)\n"
1450 "\n"
1451 ,szFile
1452 ,lineNum
1453 ,szAllocFile
1454 ,allocLineNum
1455 ,pMem
1456 ,(ULONG)dwRequestedSize
1457 ,(ULONG)dwRequestedSize
1458 );
1459
1460
1461 if (m_dwDebugFlags & kCallTracing)
1462 {
1463 message.AppendASCII("*** CALLTRACING ENABLED ***\n");
1464 LoaderHeapEvent *pEvent = LoaderHeapSniffer::FindEvent(this, pMem);
1465 if (!pEvent)
1466 {
1467 message.AppendASCII("This pointer doesn't appear to have come from this LoaderHeap.\n");
1468 }
1469 else
1470 {
1471 message.AppendASCII(pMem == pEvent->m_pMem ? "We have the following data about this pointer:" : "This pointer points to the middle of the following block:");
1472 pEvent->Describe(&message);
1473 }
1474 }
1475
1476 if (pTag->m_dwRequestedSize != dwRequestedSize)
1477 {
1478 StackSString buf;
1479 buf.Printf(
1480 "Possible causes:\n"
1481 "\n"
1482 " - This pointer wasn't allocated from this loaderheap.\n"
1483 " - This pointer was allocated by AllocAlignedMem and you didn't adjust for the \"extra.\"\n"
1484 " - This pointer has already been freed.\n"
1485 " - You passed in the wrong size. You must pass the exact same size you passed to AllocMem().\n"
1486 " - Someone wrote past the end of this block making it appear as if one of the above were true.\n"
1487 );
1488 message.Append(buf);
1489
1490 }
1491 else
1492 {
1493 message.AppendASCII("This memory block is completely unrecognizable.\n");
1494 }
1495
1496
1497 if (!(m_dwDebugFlags & kCallTracing))
1498 {
1499 LoaderHeapSniffer::PitchSniffer(&message);
1500 }
1501
1502 StackScratchBuffer scratch;
1503 DbgAssertDialog(szFile, lineNum, (char*) message.GetANSI(scratch));
1504
1505 }
1506 }
1507#endif
1508
1509 size_t dwSize = AllocMem_TotalSize(dwRequestedSize, this);
1510
1511#ifdef _DEBUG
1512 if (m_dwDebugFlags & kCallTracing)
1513 {
1514 DEBUG_ONLY_REGION();
1515
1516 LoaderHeapValidationTag *pTag = m_fExplicitControl ? NULL : AllocMem_GetTag(pMem, dwRequestedSize);
1517
1518
1519 LoaderHeapSniffer::RecordEvent(this,
1520 kFreedMem,
1521 szFile,
1522 lineNum,
1523 (pTag && (allocLineNum < 0)) ? pTag->m_szFile : szAllocFile,
1524 (pTag && (allocLineNum < 0)) ? pTag->m_lineNum : allocLineNum,
1525 pMem,
1526 dwRequestedSize,
1527 dwSize
1528 );
1529 }
1530#endif
1531
1532 if (m_pAllocPtr == ( ((BYTE*)pMem) + dwSize ))
1533 {
1534 // Cool. This was the last block allocated. We can just undo the allocation instead
1535 // of going to the freelist.
1536 memset(pMem, 0x00, dwSize); // Fill freed region with 0
1537 m_pAllocPtr = (BYTE*)pMem;
1538 }
1539 else
1540 {
1541 LoaderHeapFreeBlock::InsertFreeBlock(&m_pFirstFreeBlock, pMem, dwSize, this);
1542 }
1543
1544}
1545
1546
1547// Allocates memory aligned on power-of-2 boundary.
1548//
1549// The return value is a pointer that's guaranteed to be aligned.
1550//
1551// FREEING THIS BLOCK: Underneath, the actual block allocated may
1552// be larger and start at an address prior to the one you got back.
1553// It is this adjusted size and pointer that you pass to BackoutMem.
1554// The required adjustment is passed back thru the pdwExtra pointer.
1555//
1556// Here is how to properly backout the memory:
1557//
1558// size_t dwExtra;
1559// void *pMem = UnlockedAllocAlignedMem(dwRequestedSize, alignment, &dwExtra);
1560// _ASSERTE( 0 == (pMem & (alignment - 1)) );
1561// UnlockedBackoutMem( ((BYTE*)pMem) - dExtra, dwRequestedSize + dwExtra );
1562//
1563// If you use the AllocMemHolder or AllocMemTracker, all this is taken care of
1564// behind the scenes.
1565//
1566//
1567void *UnlockedLoaderHeap::UnlockedAllocAlignedMem_NoThrow(size_t dwRequestedSize,
1568 size_t alignment,
1569 size_t *pdwExtra
1570 COMMA_INDEBUG(__in const char *szFile)
1571 COMMA_INDEBUG(int lineNum))
1572{
1573 CONTRACT(void*)
1574 {
1575 NOTHROW;
1576
1577 // Macro syntax can't handle this INJECT_FAULT expression - we'll use a precondition instead
1578 //INJECT_FAULT( do{ if (*pdwExtra) {*pdwExtra = 0} RETURN NULL; } while(0) );
1579
1580 PRECONDITION( alignment != 0 );
1581 PRECONDITION(0 == (alignment & (alignment - 1))); // require power of 2
1582 POSTCONDITION( (RETVAL) ?
1583 (0 == ( ((UINT_PTR)(RETVAL)) & (alignment - 1))) : // If non-null, pointer must be aligned
1584 (pdwExtra == NULL || 0 == *pdwExtra) // or else *pdwExtra must be set to 0
1585 );
1586 }
1587 CONTRACT_END
1588
1589 STATIC_CONTRACT_FAULT;
1590
1591 // Set default value
1592 if (pdwExtra)
1593 {
1594 *pdwExtra = 0;
1595 }
1596
1597 SHOULD_INJECT_FAULT(RETURN NULL);
1598
1599 void *pResult;
1600
1601 INCONTRACT(_ASSERTE(!ARE_FAULTS_FORBIDDEN()));
1602
1603 // Check for overflow if we align the allocation
1604 if (dwRequestedSize + alignment < dwRequestedSize)
1605 {
1606 RETURN NULL;
1607 }
1608
1609 // We don't know how much "extra" we need to satisfy the alignment until we know
1610 // which address will be handed out which in turn we don't know because we don't
1611 // know whether the allocation will fit within the current reserved range.
1612 //
1613 // Thus, we'll request as much heap growth as is needed for the worst case (extra == alignment)
1614 size_t dwRoomSize = AllocMem_TotalSize(dwRequestedSize + alignment, this);
1615 if (dwRoomSize > GetBytesAvailCommittedRegion())
1616 {
1617 if (!GetMoreCommittedPages(dwRoomSize))
1618 {
1619 RETURN NULL;
1620 }
1621 }
1622
1623 pResult = m_pAllocPtr;
1624
1625 size_t extra = alignment - ((size_t)pResult & ((size_t)alignment - 1));
1626
1627// On DEBUG, we force a non-zero extra so people don't forget to adjust for it on backout
1628#ifndef _DEBUG
1629 if (extra == alignment)
1630 {
1631 extra = 0;
1632 }
1633#endif
1634
1635 S_SIZE_T cbAllocSize = S_SIZE_T( dwRequestedSize ) + S_SIZE_T( extra );
1636 if( cbAllocSize.IsOverflow() )
1637 {
1638 RETURN NULL;
1639 }
1640
1641 size_t dwSize = AllocMem_TotalSize( cbAllocSize.Value(), this);
1642 m_pAllocPtr += dwSize;
1643
1644
1645 ((BYTE*&)pResult) += extra;
1646
1647#ifdef _DEBUG
1648 BYTE *pAllocatedBytes = (BYTE *)pResult;
1649#if LOADER_HEAP_DEBUG_BOUNDARY > 0
1650 // Don't fill the entire memory - we assume it is all zeroed -just the memory after our alloc
1651 memset(pAllocatedBytes + dwRequestedSize, 0xee, LOADER_HEAP_DEBUG_BOUNDARY);
1652#endif
1653
1654 if (dwRequestedSize != 0)
1655 {
1656 _ASSERTE_MSG(pAllocatedBytes[0] == 0 && memcmp(pAllocatedBytes, pAllocatedBytes + 1, dwRequestedSize - 1) == 0,
1657 "LoaderHeap must return zero-initialized memory");
1658 }
1659
1660 if (m_dwDebugFlags & kCallTracing)
1661 {
1662 LoaderHeapSniffer::RecordEvent(this,
1663 kAllocMem,
1664 szFile,
1665 lineNum,
1666 szFile,
1667 lineNum,
1668 ((BYTE*)pResult) - extra,
1669 dwRequestedSize + extra,
1670 dwSize
1671 );
1672 }
1673
1674 EtwAllocRequest(this, pResult, dwSize);
1675
1676 if (!m_fExplicitControl)
1677 {
1678 LoaderHeapValidationTag *pTag = AllocMem_GetTag(((BYTE*)pResult) - extra, dwRequestedSize + extra);
1679 pTag->m_allocationType = kAllocMem;
1680 pTag->m_dwRequestedSize = dwRequestedSize + extra;
1681 pTag->m_szFile = szFile;
1682 pTag->m_lineNum = lineNum;
1683 }
1684#endif //_DEBUG
1685
1686 if (pdwExtra)
1687 {
1688 *pdwExtra = extra;
1689 }
1690
1691 RETURN pResult;
1692
1693}
1694
1695
1696
1697void *UnlockedLoaderHeap::UnlockedAllocAlignedMem(size_t dwRequestedSize,
1698 size_t dwAlignment,
1699 size_t *pdwExtra
1700 COMMA_INDEBUG(__in const char *szFile)
1701 COMMA_INDEBUG(int lineNum))
1702{
1703 CONTRACTL
1704 {
1705 THROWS;
1706 INJECT_FAULT(ThrowOutOfMemory());
1707 }
1708 CONTRACTL_END
1709
1710 void *pResult = UnlockedAllocAlignedMem_NoThrow(dwRequestedSize,
1711 dwAlignment,
1712 pdwExtra
1713 COMMA_INDEBUG(szFile)
1714 COMMA_INDEBUG(lineNum));
1715
1716 if (!pResult)
1717 {
1718 ThrowOutOfMemory();
1719 }
1720
1721 return pResult;
1722
1723
1724}
1725
1726
1727
1728void *UnlockedLoaderHeap::UnlockedAllocMemForCode_NoThrow(size_t dwHeaderSize, size_t dwCodeSize, DWORD dwCodeAlignment, size_t dwReserveForJumpStubs)
1729{
1730 CONTRACT(void*)
1731 {
1732 INSTANCE_CHECK;
1733 NOTHROW;
1734 INJECT_FAULT(CONTRACT_RETURN NULL;);
1735 PRECONDITION(0 == (dwCodeAlignment & (dwCodeAlignment - 1))); // require power of 2
1736 POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
1737 }
1738 CONTRACT_END;
1739
1740 _ASSERTE(m_fExplicitControl);
1741
1742 INCONTRACT(_ASSERTE(!ARE_FAULTS_FORBIDDEN()));
1743
1744 // We don't know how much "extra" we need to satisfy the alignment until we know
1745 // which address will be handed out which in turn we don't know because we don't
1746 // know whether the allocation will fit within the current reserved range.
1747 //
1748 // Thus, we'll request as much heap growth as is needed for the worst case (we request an extra dwCodeAlignment - 1 bytes)
1749
1750 S_SIZE_T cbAllocSize = S_SIZE_T(dwHeaderSize) + S_SIZE_T(dwCodeSize) + S_SIZE_T(dwCodeAlignment - 1) + S_SIZE_T(dwReserveForJumpStubs);
1751 if( cbAllocSize.IsOverflow() )
1752 {
1753 RETURN NULL;
1754 }
1755
1756 if (cbAllocSize.Value() > GetBytesAvailCommittedRegion())
1757 {
1758 if (GetMoreCommittedPages(cbAllocSize.Value()) == FALSE)
1759 {
1760 RETURN NULL;
1761 }
1762 }
1763
1764 BYTE *pResult = (BYTE *)ALIGN_UP(m_pAllocPtr + dwHeaderSize, dwCodeAlignment);
1765 EtwAllocRequest(this, pResult, (pResult + dwCodeSize) - m_pAllocPtr);
1766 m_pAllocPtr = pResult + dwCodeSize;
1767
1768 RETURN pResult;
1769}
1770
1771
1772#endif // #ifndef DACCESS_COMPILE
1773
1774BOOL UnlockedLoaderHeap::IsExecutable()
1775{
1776 return (m_Options & LHF_EXECUTABLE);
1777}
1778
1779#ifdef DACCESS_COMPILE
1780
1781void UnlockedLoaderHeap::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
1782{
1783 WRAPPER_NO_CONTRACT;
1784
1785 DAC_ENUM_DTHIS();
1786
1787 PTR_LoaderHeapBlock block = m_pFirstBlock;
1788 while (block.IsValid())
1789 {
1790 // All we know is the virtual size of this block. We don't have any way to tell how
1791 // much of this space was actually comitted, so don't expect that this will always
1792 // succeed.
1793 // @dbgtodo : Ideally we'd reduce the risk of corruption causing problems here.
1794 // We could extend LoaderHeapBlock to track a commit size,
1795 // but it seems wasteful (eg. makes each AppDomain objects 32 bytes larger on x64).
1796 TADDR addr = dac_cast<TADDR>(block->pVirtualAddress);
1797 TSIZE_T size = block->dwVirtualSize;
1798 DacEnumMemoryRegion(addr, size, false);
1799
1800 block = block->pNext;
1801 }
1802}
1803
1804#endif // #ifdef DACCESS_COMPILE
1805
1806
1807void UnlockedLoaderHeap::EnumPageRegions (EnumPageRegionsCallback *pCallback, PTR_VOID pvArgs)
1808{
1809 WRAPPER_NO_CONTRACT;
1810
1811 PTR_LoaderHeapBlock block = m_pFirstBlock;
1812 while (block)
1813 {
1814 if ((*pCallback)(pvArgs, block->pVirtualAddress, block->dwVirtualSize))
1815 {
1816 break;
1817 }
1818
1819 block = block->pNext;
1820 }
1821}
1822
1823
1824#ifdef _DEBUG
1825
1826void UnlockedLoaderHeap::DumpFreeList()
1827{
1828 LIMITED_METHOD_CONTRACT;
1829 if (m_pFirstFreeBlock == NULL)
1830 {
1831 printf("FREEDUMP: FreeList is empty\n");
1832 }
1833 else
1834 {
1835 LoaderHeapFreeBlock *pBlock = m_pFirstFreeBlock;
1836 while (pBlock != NULL)
1837 {
1838 size_t dwsize = pBlock->m_dwSize;
1839 BOOL ccbad = FALSE;
1840 BOOL sizeunaligned = FALSE;
1841 BOOL sizesmall = FALSE;
1842
1843 if ( 0 != (dwsize & ALLOC_ALIGN_CONSTANT) )
1844 {
1845 sizeunaligned = TRUE;
1846 }
1847 if ( dwsize < sizeof(LoaderHeapBlock))
1848 {
1849 sizesmall = TRUE;
1850 }
1851
1852 for (size_t i = sizeof(LoaderHeapFreeBlock); i < dwsize; i++)
1853 {
1854 if ( ((BYTE*)pBlock)[i] != 0xcc )
1855 {
1856 ccbad = TRUE;
1857 break;
1858 }
1859 }
1860
1861 printf("Addr = %pxh, Size = %lxh", pBlock, ((ULONG)dwsize));
1862 if (ccbad) printf(" *** ERROR: NOT CC'd ***");
1863 if (sizeunaligned) printf(" *** ERROR: size not a multiple of ALLOC_ALIGN_CONSTANT ***");
1864 if (sizesmall) printf(" *** ERROR: size smaller than sizeof(LoaderHeapFreeBlock) ***");
1865 printf("\n");
1866
1867 pBlock = pBlock->m_pNext;
1868 }
1869 }
1870}
1871
1872
1873void UnlockedLoaderHeap::UnlockedClearEvents()
1874{
1875 WRAPPER_NO_CONTRACT;
1876 LoaderHeapSniffer::ClearEvents(this);
1877}
1878
1879void UnlockedLoaderHeap::UnlockedCompactEvents()
1880{
1881 WRAPPER_NO_CONTRACT;
1882 LoaderHeapSniffer::CompactEvents(this);
1883}
1884
1885void UnlockedLoaderHeap::UnlockedPrintEvents()
1886{
1887 WRAPPER_NO_CONTRACT;
1888 LoaderHeapSniffer::PrintEvents(this);
1889}
1890
1891
1892#endif //_DEBUG
1893
1894//************************************************************************************
1895// LOADERHEAP SNIFFER METHODS
1896//************************************************************************************
1897#ifdef _DEBUG
1898
1899/*static*/ VOID LoaderHeapSniffer::RecordEvent(UnlockedLoaderHeap *pHeap,
1900 AllocationType allocationType,
1901 __in const char *szFile,
1902 int lineNum,
1903 __in const char *szAllocFile,
1904 int allocLineNum,
1905 void *pMem,
1906 size_t dwRequestedSize,
1907 size_t dwSize
1908 )
1909{
1910 CONTRACTL
1911 {
1912 NOTHROW;
1913 GC_NOTRIGGER;
1914 FORBID_FAULT; //If we OOM in here, we just throw the event away.
1915 }
1916 CONTRACTL_END
1917
1918 LoaderHeapEvent *pNewEvent;
1919 {
1920 {
1921 FAULT_NOT_FATAL();
1922 pNewEvent = new (nothrow) LoaderHeapEvent;
1923 }
1924 if (!pNewEvent)
1925 {
1926 if (!(pHeap->m_dwDebugFlags & pHeap->kEncounteredOOM))
1927 {
1928 pHeap->m_dwDebugFlags |= pHeap->kEncounteredOOM;
1929 _ASSERTE(!"LOADERHEAPSNIFFER: Failed allocation of LoaderHeapEvent. Call tracing information will be incomplete.");
1930 }
1931 }
1932 else
1933 {
1934 pNewEvent->m_allocationType = allocationType;
1935 pNewEvent->m_szFile = szFile;
1936 pNewEvent->m_lineNum = lineNum;
1937 pNewEvent->m_szAllocFile = szAllocFile;
1938 pNewEvent->m_allocLineNum = allocLineNum;
1939 pNewEvent->m_pMem = pMem;
1940 pNewEvent->m_dwRequestedSize = dwRequestedSize;
1941 pNewEvent->m_dwSize = dwSize;
1942
1943 pNewEvent->m_pNext = pHeap->m_pEventList;
1944 pHeap->m_pEventList = pNewEvent;
1945 }
1946 }
1947}
1948
1949
1950
1951/*static*/
1952void LoaderHeapSniffer::ValidateFreeList(UnlockedLoaderHeap *pHeap)
1953{
1954 CANNOT_HAVE_CONTRACT;
1955
1956 // No contract. This routine is only called if we've AV'd inside the
1957 // loaderheap. The system is already toast. We're trying to be a hero
1958 // and produce the best diagnostic info we can. Last thing we need here
1959 // is a secondary assert inside the contract stuff.
1960 //
1961 // This contract violation is permanent.
1962 CONTRACT_VIOLATION(ThrowsViolation|FaultViolation|GCViolation|ModeViolation); // This violation won't be removed
1963
1964 LoaderHeapFreeBlock *pFree = pHeap->m_pFirstFreeBlock;
1965 LoaderHeapFreeBlock *pPrev = NULL;
1966
1967
1968 void *pBadAddr = NULL;
1969 LoaderHeapFreeBlock *pProbeThis = NULL;
1970 const char *pExpected = NULL;
1971
1972 while (pFree != NULL)
1973 {
1974 if ( 0 != ( ((ULONG_PTR)pFree) & ALLOC_ALIGN_CONSTANT ))
1975 {
1976 // Not aligned - can't be a valid freeblock. Most likely we followed a bad pointer from the previous block.
1977 pProbeThis = pPrev;
1978 pBadAddr = pPrev ? &(pPrev->m_pNext) : &(pHeap->m_pFirstFreeBlock);
1979 pExpected = "a pointer to a valid LoaderHeapFreeBlock";
1980 break;
1981 }
1982
1983 size_t dwSize = pFree->m_dwSize;
1984 if (dwSize < AllocMem_TotalSize(1, pHeap) ||
1985 0 != (dwSize & ALLOC_ALIGN_CONSTANT))
1986 {
1987 // Size is not a valid value (out of range or unaligned.)
1988 pProbeThis = pFree;
1989 pBadAddr = &(pFree->m_dwSize);
1990 pExpected = "a valid block size (multiple of pointer size)";
1991 break;
1992 }
1993
1994 size_t i;
1995 for (i = sizeof(LoaderHeapFreeBlock); i < dwSize; i++)
1996 {
1997 if ( ((BYTE*)pFree)[i] != 0xcc )
1998 {
1999 pProbeThis = pFree;
2000 pBadAddr = i + ((BYTE*)pFree);
2001 pExpected = "0xcc (our fill value for free blocks)";
2002 break;
2003 }
2004 }
2005 if (i != dwSize)
2006 {
2007 break;
2008 }
2009
2010
2011
2012 pPrev = pFree;
2013 pFree = pFree->m_pNext;
2014 }
2015
2016 if (pFree == NULL)
2017 {
2018 return; // No problems found
2019 }
2020
2021 {
2022 StackSString message;
2023
2024 message.Printf("A loaderheap freelist has been corrupted. The bytes at or near address 0x%p appears to have been overwritten. We expected to see %s here.\n"
2025 "\n"
2026 " LoaderHeap: 0x%p\n"
2027 " Suspect address at: 0x%p\n"
2028 " Start of suspect freeblock: 0x%p\n"
2029 "\n"
2030 , pBadAddr
2031 , pExpected
2032 , pHeap
2033 , pBadAddr
2034 , pProbeThis
2035 );
2036
2037 if (!(pHeap->m_dwDebugFlags & pHeap->kCallTracing))
2038 {
2039 message.AppendASCII("\nThe usual reason is that someone wrote past the end of a block or wrote into a block after freeing it."
2040 "\nOf course, the culprit is long gone so it's probably too late to debug this now. Try turning on call-tracing"
2041 "\nand reproing. We can attempt to find out who last owned the surrounding pieces of memory."
2042 "\n"
2043 "\nTo turn on call-tracing, set the following registry DWORD value:"
2044 "\n"
2045 "\n HKLM\\Software\\Microsoft\\.NETFramework\\LoaderHeapCallTracing = 1"
2046 "\n"
2047 );
2048
2049 }
2050 else
2051 {
2052 LoaderHeapEvent *pBadAddrEvent = FindEvent(pHeap, pBadAddr);
2053
2054 message.AppendASCII("*** CALL TRACING ENABLED ***\n\n");
2055
2056 if (pBadAddrEvent)
2057 {
2058 message.AppendASCII("\nThe last known owner of the corrupted address was:\n");
2059 pBadAddrEvent->Describe(&message);
2060 }
2061 else
2062 {
2063 message.AppendASCII("\nNo known owner of last corrupted address.\n");
2064 }
2065
2066 LoaderHeapEvent *pPrevEvent = FindEvent(pHeap, ((BYTE*)pProbeThis) - 1);
2067
2068 int count = 3;
2069 while (count-- &&
2070 pPrevEvent != NULL &&
2071 ( ((UINT_PTR)pProbeThis) - ((UINT_PTR)(pPrevEvent->m_pMem)) + pPrevEvent->m_dwSize ) < 1024)
2072 {
2073 message.AppendASCII("\nThis block is located close to the corruption point. ");
2074 if (pPrevEvent->QuietValidate())
2075 {
2076 message.AppendASCII("If it was overrun, it might have caused this.");
2077 }
2078 else
2079 {
2080 message.AppendASCII("*** CORRUPTION DETECTED IN THIS BLOCK ***");
2081 }
2082 pPrevEvent->Describe(&message);
2083 pPrevEvent = FindEvent(pHeap, ((BYTE*)(pPrevEvent->m_pMem)) - 1);
2084 }
2085
2086
2087 }
2088
2089 StackScratchBuffer scratch;
2090 DbgAssertDialog(__FILE__, __LINE__, (char*) message.GetANSI(scratch));
2091
2092 }
2093
2094
2095
2096}
2097
2098
2099BOOL LoaderHeapEvent::QuietValidate()
2100{
2101 WRAPPER_NO_CONTRACT;
2102
2103 if (m_allocationType == kAllocMem)
2104 {
2105 LoaderHeapValidationTag *pTag = AllocMem_GetTag(m_pMem, m_dwRequestedSize);
2106 return (pTag->m_allocationType == m_allocationType && pTag->m_dwRequestedSize == m_dwRequestedSize);
2107 }
2108 else
2109 {
2110 // We can't easily validate freed blocks.
2111 return TRUE;
2112 }
2113}
2114
2115
2116#endif //_DEBUG
2117
2118#ifndef DACCESS_COMPILE
2119
2120AllocMemTracker::AllocMemTracker()
2121{
2122 CONTRACTL
2123 {
2124 NOTHROW;
2125 FORBID_FAULT;
2126 CANNOT_TAKE_LOCK;
2127 }
2128 CONTRACTL_END
2129
2130 m_FirstBlock.m_pNext = NULL;
2131 m_FirstBlock.m_nextFree = 0;
2132 m_pFirstBlock = &m_FirstBlock;
2133
2134 m_fReleased = FALSE;
2135}
2136
2137AllocMemTracker::~AllocMemTracker()
2138{
2139 CONTRACTL
2140 {
2141 NOTHROW;
2142 FORBID_FAULT;
2143 }
2144 CONTRACTL_END
2145
2146 if (!m_fReleased)
2147 {
2148 AllocMemTrackerBlock *pBlock = m_pFirstBlock;
2149 while (pBlock)
2150 {
2151 // Do the loop in reverse - loaderheaps work best if
2152 // we allocate and backout in LIFO order.
2153 for (int i = pBlock->m_nextFree - 1; i >= 0; i--)
2154 {
2155 AllocMemTrackerNode *pNode = &(pBlock->m_Node[i]);
2156 pNode->m_pHeap->RealBackoutMem(pNode->m_pMem
2157 ,pNode->m_dwRequestedSize
2158#ifdef _DEBUG
2159 ,__FILE__
2160 ,__LINE__
2161 ,pNode->m_szAllocFile
2162 ,pNode->m_allocLineNum
2163#endif
2164 );
2165
2166 }
2167
2168 pBlock = pBlock->m_pNext;
2169 }
2170 }
2171
2172
2173 AllocMemTrackerBlock *pBlock = m_pFirstBlock;
2174 while (pBlock != &m_FirstBlock)
2175 {
2176 AllocMemTrackerBlock *pNext = pBlock->m_pNext;
2177 delete pBlock;
2178 pBlock = pNext;
2179 }
2180
2181 INDEBUG(memset(this, 0xcc, sizeof(*this));)
2182}
2183
2184void *AllocMemTracker::Track(TaggedMemAllocPtr tmap)
2185{
2186 CONTRACTL
2187 {
2188 THROWS;
2189 INJECT_FAULT(ThrowOutOfMemory(););
2190 }
2191 CONTRACTL_END
2192
2193 _ASSERTE(this);
2194
2195 void *pv = Track_NoThrow(tmap);
2196 if (!pv)
2197 {
2198 ThrowOutOfMemory();
2199 }
2200 return pv;
2201}
2202
2203void *AllocMemTracker::Track_NoThrow(TaggedMemAllocPtr tmap)
2204{
2205 CONTRACTL
2206 {
2207 NOTHROW;
2208 INJECT_FAULT(return NULL;);
2209 }
2210 CONTRACTL_END
2211
2212 _ASSERTE(this);
2213
2214 // Calling Track() after calling SuppressRelease() is almost certainly a bug. You're supposed to call SuppressRelease() only after you're
2215 // sure no subsequent failure will force you to backout the memory.
2216 _ASSERTE( (!m_fReleased) && "You've already called SuppressRelease on this AllocMemTracker which implies you've passed your point of no failure. Why are you still doing allocations?");
2217
2218
2219 if (tmap.m_pMem != NULL)
2220 {
2221 AllocMemHolder<void*> holder(tmap); // If anything goes wrong in here, this holder will backout the allocation for the caller.
2222 if (m_fReleased)
2223 {
2224 holder.SuppressRelease();
2225 }
2226 AllocMemTrackerBlock *pBlock = m_pFirstBlock;
2227 if (pBlock->m_nextFree == kAllocMemTrackerBlockSize)
2228 {
2229 AllocMemTrackerBlock *pNewBlock = new (nothrow) AllocMemTrackerBlock;
2230 if (!pNewBlock)
2231 {
2232 return NULL;
2233 }
2234
2235 pNewBlock->m_pNext = m_pFirstBlock;
2236 pNewBlock->m_nextFree = 0;
2237
2238 m_pFirstBlock = pNewBlock;
2239
2240 pBlock = pNewBlock;
2241 }
2242
2243 // From here on, we can't fail
2244 pBlock->m_Node[pBlock->m_nextFree].m_pHeap = tmap.m_pHeap;
2245 pBlock->m_Node[pBlock->m_nextFree].m_pMem = tmap.m_pMem;
2246 pBlock->m_Node[pBlock->m_nextFree].m_dwRequestedSize = tmap.m_dwRequestedSize;
2247#ifdef _DEBUG
2248 pBlock->m_Node[pBlock->m_nextFree].m_szAllocFile = tmap.m_szFile;
2249 pBlock->m_Node[pBlock->m_nextFree].m_allocLineNum = tmap.m_lineNum;
2250#endif
2251
2252 pBlock->m_nextFree++;
2253
2254 holder.SuppressRelease();
2255
2256
2257 }
2258 return (void *)tmap;
2259
2260
2261
2262}
2263
2264
2265void AllocMemTracker::SuppressRelease()
2266{
2267 LIMITED_METHOD_CONTRACT;
2268
2269 _ASSERTE(this);
2270
2271 m_fReleased = TRUE;
2272}
2273
2274#endif //#ifndef DACCESS_COMPILE
2275