1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4//
5// stublink.cpp
6//
7
8
9
10#include "common.h"
11
12#include "threads.h"
13#include "excep.h"
14#include "stublink.h"
15#include "perfcounters.h"
16#include "stubgen.h"
17#include "stublink.inl"
18
19#include "rtlfunctions.h"
20
21#define S_BYTEPTR(x) S_SIZE_T((SIZE_T)(x))
22
23#ifndef DACCESS_COMPILE
24
25
26//************************************************************************
27// CodeElement
28//
29// There are two types of CodeElements: CodeRuns (a stream of uninterpreted
30// code bytes) and LabelRefs (an instruction containing
31// a fixup.)
32//************************************************************************
33struct CodeElement
34{
35 enum CodeElementType {
36 kCodeRun = 0,
37 kLabelRef = 1,
38 };
39
40
41 CodeElementType m_type; // kCodeRun or kLabelRef
42 CodeElement *m_next; // ptr to next CodeElement
43
44 // Used as workspace during Link(): holds the offset relative to
45 // the start of the final stub.
46 UINT m_globaloffset;
47 UINT m_dataoffset;
48};
49
50
51//************************************************************************
52// CodeRun: A run of uninterrupted code bytes.
53//************************************************************************
54
55#ifdef _DEBUG
56#define CODERUNSIZE 3
57#else
58#define CODERUNSIZE 32
59#endif
60
61struct CodeRun : public CodeElement
62{
63 UINT m_numcodebytes; // how many bytes are actually used
64 BYTE m_codebytes[CODERUNSIZE];
65};
66
67//************************************************************************
68// LabelRef: An instruction containing an embedded label reference
69//************************************************************************
70struct LabelRef : public CodeElement
71{
72 // provides platform-specific information about the instruction
73 InstructionFormat *m_pInstructionFormat;
74
75 // a variation code (interpretation is specific to the InstructionFormat)
76 // typically used to customize an instruction (e.g. with a condition
77 // code.)
78 UINT m_variationCode;
79
80
81 CodeLabel *m_target;
82
83 // Workspace during the link phase
84 UINT m_refsize;
85
86
87 // Pointer to next LabelRef
88 LabelRef *m_nextLabelRef;
89};
90
91
92//************************************************************************
93// IntermediateUnwindInfo
94//************************************************************************
95
96#ifdef STUBLINKER_GENERATES_UNWIND_INFO
97
98
99#ifdef _TARGET_AMD64_
100// List of unwind operations, queued in StubLinker::m_pUnwindInfoList.
101struct IntermediateUnwindInfo
102{
103 IntermediateUnwindInfo *pNext;
104 CodeRun *pCodeRun;
105 UINT LocalOffset;
106 UNWIND_CODE rgUnwindCode[1]; // variable length, depends on first entry's UnwindOp
107};
108#endif // _TARGET_AMD64_
109
110
111StubUnwindInfoHeapSegment *g_StubHeapSegments;
112CrstStatic g_StubUnwindInfoHeapSegmentsCrst;
113#ifdef _DEBUG // for unit test
114void *__DEBUG__g_StubHeapSegments = &g_StubHeapSegments;
115#endif
116
117
118//
119// Callback registered via RtlInstallFunctionTableCallback. Called by
120// RtlpLookupDynamicFunctionEntry to locate RUNTIME_FUNCTION entry for a PC
121// found within a portion of a heap that contains stub code.
122//
123T_RUNTIME_FUNCTION*
124FindStubFunctionEntry (
125 WIN64_ONLY(IN ULONG64 ControlPc)
126 NOT_WIN64(IN ULONG ControlPc),
127 IN PVOID Context
128 )
129{
130 CONTRACTL
131 {
132 NOTHROW;
133 GC_NOTRIGGER;
134 FORBID_FAULT;
135 SO_TOLERANT;
136 }
137 CONTRACTL_END
138
139 CONSISTENCY_CHECK(DYNFNTABLE_STUB == IdentifyDynamicFunctionTableTypeFromContext(Context));
140
141 StubUnwindInfoHeapSegment *pStubHeapSegment = (StubUnwindInfoHeapSegment*)DecodeDynamicFunctionTableContext(Context);
142
143 //
144 // The RUNTIME_FUNCTION entry contains ULONG offsets relative to the
145 // segment base. Stub::EmitUnwindInfo ensures that this cast is valid.
146 //
147 ULONG RelativeAddress = (ULONG)((BYTE*)ControlPc - pStubHeapSegment->pbBaseAddress);
148
149 LOG((LF_STUBS, LL_INFO100000, "ControlPc %p, RelativeAddress 0x%x, pStubHeapSegment %p, pStubHeapSegment->pbBaseAddress %p\n",
150 ControlPc,
151 RelativeAddress,
152 pStubHeapSegment,
153 pStubHeapSegment->pbBaseAddress));
154
155 //
156 // Search this segment's list of stubs for an entry that includes the
157 // segment-relative offset.
158 //
159 for (StubUnwindInfoHeader *pHeader = pStubHeapSegment->pUnwindHeaderList;
160 pHeader;
161 pHeader = pHeader->pNext)
162 {
163 // The entry points are in increasing address order.
164 if (RelativeAddress >= RUNTIME_FUNCTION__BeginAddress(&pHeader->FunctionEntry))
165 {
166 T_RUNTIME_FUNCTION *pCurFunction = &pHeader->FunctionEntry;
167 T_RUNTIME_FUNCTION *pPrevFunction = NULL;
168
169 LOG((LF_STUBS, LL_INFO100000, "pCurFunction %p, pCurFunction->BeginAddress 0x%x, pCurFunction->EndAddress 0x%x\n",
170 pCurFunction,
171 RUNTIME_FUNCTION__BeginAddress(pCurFunction),
172 RUNTIME_FUNCTION__EndAddress(pCurFunction, (TADDR)pStubHeapSegment->pbBaseAddress)));
173
174 CONSISTENCY_CHECK((RUNTIME_FUNCTION__EndAddress(pCurFunction, (TADDR)pStubHeapSegment->pbBaseAddress) > RUNTIME_FUNCTION__BeginAddress(pCurFunction)));
175 CONSISTENCY_CHECK((!pPrevFunction || RUNTIME_FUNCTION__EndAddress(pPrevFunction, (TADDR)pStubHeapSegment->pbBaseAddress) <= RUNTIME_FUNCTION__BeginAddress(pCurFunction)));
176
177 // The entry points are in increasing address order. They're
178 // also contiguous, so after we're sure it's after the start of
179 // the first function (checked above), we only need to test
180 // the end address.
181 if (RelativeAddress < RUNTIME_FUNCTION__EndAddress(pCurFunction, (TADDR)pStubHeapSegment->pbBaseAddress))
182 {
183 CONSISTENCY_CHECK((RelativeAddress >= RUNTIME_FUNCTION__BeginAddress(pCurFunction)));
184
185 return pCurFunction;
186 }
187 }
188 }
189
190 //
191 // Return NULL to indicate that there is no RUNTIME_FUNCTION/unwind
192 // information for this offset.
193 //
194 return NULL;
195}
196
197
198bool UnregisterUnwindInfoInLoaderHeapCallback (PVOID pvArgs, PVOID pvAllocationBase, SIZE_T cbReserved)
199{
200 CONTRACTL
201 {
202 NOTHROW;
203 GC_TRIGGERS;
204 }
205 CONTRACTL_END;
206
207 //
208 // There may be multiple StubUnwindInfoHeapSegment's associated with a region.
209 //
210
211 LOG((LF_STUBS, LL_INFO1000, "Looking for stub unwind info for LoaderHeap segment %p size %p\n", pvAllocationBase, cbReserved));
212
213 CrstHolder crst(&g_StubUnwindInfoHeapSegmentsCrst);
214
215 StubUnwindInfoHeapSegment *pStubHeapSegment;
216 for (StubUnwindInfoHeapSegment **ppPrevStubHeapSegment = &g_StubHeapSegments;
217 (pStubHeapSegment = *ppPrevStubHeapSegment); )
218 {
219 LOG((LF_STUBS, LL_INFO10000, " have unwind info for address %p size %p\n", pStubHeapSegment->pbBaseAddress, pStubHeapSegment->cbSegment));
220
221 // If heap region ends before stub segment
222 if ((BYTE*)pvAllocationBase + cbReserved <= pStubHeapSegment->pbBaseAddress)
223 {
224 // The list is ordered, so address range is between segments
225 break;
226 }
227
228 // The given heap segment base address may fall within a prereserved
229 // region that was given to the heap when the heap was constructed, so
230 // pvAllocationBase may be > pbBaseAddress. Also, there could be
231 // multiple segments for each heap region, so pvAllocationBase may be
232 // < pbBaseAddress. So...there is no meaningful relationship between
233 // pvAllocationBase and pbBaseAddress.
234
235 // If heap region starts before end of stub segment
236 if ((BYTE*)pvAllocationBase < pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment)
237 {
238 _ASSERTE((BYTE*)pvAllocationBase + cbReserved <= pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment);
239
240 DeleteEEFunctionTable(pStubHeapSegment);
241#ifdef _TARGET_AMD64_
242 if (pStubHeapSegment->pUnwindInfoTable != 0)
243 delete pStubHeapSegment->pUnwindInfoTable;
244#endif
245 *ppPrevStubHeapSegment = pStubHeapSegment->pNext;
246
247 delete pStubHeapSegment;
248 }
249 else
250 {
251 ppPrevStubHeapSegment = &pStubHeapSegment->pNext;
252 }
253 }
254
255 return false; // Keep enumerating
256}
257
258
259VOID UnregisterUnwindInfoInLoaderHeap (UnlockedLoaderHeap *pHeap)
260{
261 CONTRACTL
262 {
263 NOTHROW;
264 GC_TRIGGERS;
265 PRECONDITION(pHeap->m_fPermitStubsWithUnwindInfo);
266 }
267 CONTRACTL_END;
268
269 pHeap->EnumPageRegions(&UnregisterUnwindInfoInLoaderHeapCallback, NULL /* pvArgs */);
270
271#ifdef _DEBUG
272 pHeap->m_fStubUnwindInfoUnregistered = TRUE;
273#endif // _DEBUG
274}
275
276
277class StubUnwindInfoSegmentBoundaryReservationList
278{
279 struct ReservationList
280 {
281 ReservationList *pNext;
282
283 static ReservationList *FromStub (Stub *pStub)
284 {
285 return (ReservationList*)(pStub+1);
286 }
287
288 Stub *GetStub ()
289 {
290 return (Stub*)this - 1;
291 }
292 };
293
294 ReservationList *m_pList;
295
296public:
297
298 StubUnwindInfoSegmentBoundaryReservationList ()
299 {
300 LIMITED_METHOD_CONTRACT;
301
302 m_pList = NULL;
303 }
304
305 ~StubUnwindInfoSegmentBoundaryReservationList ()
306 {
307 LIMITED_METHOD_CONTRACT;
308
309 ReservationList *pList = m_pList;
310 while (pList)
311 {
312 ReservationList *pNext = pList->pNext;
313
314 pList->GetStub()->DecRef();
315
316 pList = pNext;
317 }
318 }
319
320 void AddStub (Stub *pStub)
321 {
322 LIMITED_METHOD_CONTRACT;
323
324 ReservationList *pList = ReservationList::FromStub(pStub);
325
326 pList->pNext = m_pList;
327 m_pList = pList;
328 }
329};
330
331
332#endif // STUBLINKER_GENERATES_UNWIND_INFO
333
334
335//************************************************************************
336// StubLinker
337//************************************************************************
338
339//---------------------------------------------------------------
340// Construction
341//---------------------------------------------------------------
342StubLinker::StubLinker()
343{
344 CONTRACTL
345 {
346 NOTHROW;
347 GC_NOTRIGGER;
348 SO_TOLERANT;
349 }
350 CONTRACTL_END;
351
352 m_pCodeElements = NULL;
353 m_pFirstCodeLabel = NULL;
354 m_pFirstLabelRef = NULL;
355 m_pPatchLabel = NULL;
356 m_stackSize = 0;
357 m_fDataOnly = FALSE;
358#ifdef _TARGET_ARM_
359 m_fProlog = FALSE;
360 m_cCalleeSavedRegs = 0;
361 m_cbStackFrame = 0;
362 m_fPushArgRegs = FALSE;
363#endif
364#ifdef STUBLINKER_GENERATES_UNWIND_INFO
365#ifdef _DEBUG
366 m_pUnwindInfoCheckLabel = NULL;
367#endif
368#ifdef _TARGET_AMD64_
369 m_pUnwindInfoList = NULL;
370 m_nUnwindSlots = 0;
371 m_fHaveFramePointer = FALSE;
372#endif
373#ifdef _TARGET_ARM64_
374 m_fProlog = FALSE;
375 m_cIntRegArgs = 0;
376 m_cVecRegArgs = 0;
377 m_cCalleeSavedRegs = 0;
378 m_cbStackSpace = 0;
379#endif
380#endif // STUBLINKER_GENERATES_UNWIND_INFO
381}
382
383
384
385//---------------------------------------------------------------
386// Append code bytes.
387//---------------------------------------------------------------
388VOID StubLinker::EmitBytes(const BYTE *pBytes, UINT numBytes)
389{
390 CONTRACTL
391 {
392 THROWS;
393 GC_NOTRIGGER;
394 SO_TOLERANT;
395 }
396 CONTRACTL_END;
397
398 CodeElement *pLastCodeElement = GetLastCodeElement();
399 while (numBytes != 0) {
400
401 if (pLastCodeElement != NULL &&
402 pLastCodeElement->m_type == CodeElement::kCodeRun) {
403 CodeRun *pCodeRun = (CodeRun*)pLastCodeElement;
404 UINT numbytessrc = numBytes;
405 UINT numbytesdst = CODERUNSIZE - pCodeRun->m_numcodebytes;
406 if (numbytesdst <= numbytessrc) {
407 CopyMemory(&(pCodeRun->m_codebytes[pCodeRun->m_numcodebytes]),
408 pBytes,
409 numbytesdst);
410 pCodeRun->m_numcodebytes = CODERUNSIZE;
411 pLastCodeElement = NULL;
412 pBytes += numbytesdst;
413 numBytes -= numbytesdst;
414 } else {
415 CopyMemory(&(pCodeRun->m_codebytes[pCodeRun->m_numcodebytes]),
416 pBytes,
417 numbytessrc);
418 pCodeRun->m_numcodebytes += numbytessrc;
419 pBytes += numbytessrc;
420 numBytes = 0;
421 }
422
423 } else {
424 pLastCodeElement = AppendNewEmptyCodeRun();
425 }
426 }
427}
428
429
430//---------------------------------------------------------------
431// Append code bytes.
432//---------------------------------------------------------------
433VOID StubLinker::Emit8 (unsigned __int8 val)
434{
435 CONTRACTL
436 {
437 THROWS;
438 GC_NOTRIGGER;
439 }
440 CONTRACTL_END;
441
442 CodeRun *pCodeRun = GetLastCodeRunIfAny();
443 if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
444 *((unsigned __int8 *)(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes)) = val;
445 pCodeRun->m_numcodebytes += sizeof(val);
446 } else {
447 EmitBytes((BYTE*)&val, sizeof(val));
448 }
449}
450
451//---------------------------------------------------------------
452// Append code bytes.
453//---------------------------------------------------------------
454VOID StubLinker::Emit16(unsigned __int16 val)
455{
456 CONTRACTL
457 {
458 THROWS;
459 GC_NOTRIGGER;
460 SO_TOLERANT;
461 }
462 CONTRACTL_END;
463
464 CodeRun *pCodeRun = GetLastCodeRunIfAny();
465 if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
466 SET_UNALIGNED_16(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes, val);
467 pCodeRun->m_numcodebytes += sizeof(val);
468 } else {
469 EmitBytes((BYTE*)&val, sizeof(val));
470 }
471}
472
473//---------------------------------------------------------------
474// Append code bytes.
475//---------------------------------------------------------------
476VOID StubLinker::Emit32(unsigned __int32 val)
477{
478 CONTRACTL
479 {
480 THROWS;
481 GC_NOTRIGGER;
482 SO_TOLERANT;
483 }
484 CONTRACTL_END;
485
486 CodeRun *pCodeRun = GetLastCodeRunIfAny();
487 if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
488 SET_UNALIGNED_32(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes, val);
489 pCodeRun->m_numcodebytes += sizeof(val);
490 } else {
491 EmitBytes((BYTE*)&val, sizeof(val));
492 }
493}
494
495//---------------------------------------------------------------
496// Append code bytes.
497//---------------------------------------------------------------
498VOID StubLinker::Emit64(unsigned __int64 val)
499{
500 CONTRACTL
501 {
502 THROWS;
503 GC_NOTRIGGER;
504 }
505 CONTRACTL_END;
506
507 CodeRun *pCodeRun = GetLastCodeRunIfAny();
508 if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
509 SET_UNALIGNED_64(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes, val);
510 pCodeRun->m_numcodebytes += sizeof(val);
511 } else {
512 EmitBytes((BYTE*)&val, sizeof(val));
513 }
514}
515
516//---------------------------------------------------------------
517// Append pointer value.
518//---------------------------------------------------------------
519VOID StubLinker::EmitPtr(const VOID *val)
520{
521 CONTRACTL
522 {
523 THROWS;
524 GC_NOTRIGGER;
525 SO_TOLERANT;
526 }
527 CONTRACTL_END;
528
529 CodeRun *pCodeRun = GetLastCodeRunIfAny();
530 if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
531 SET_UNALIGNED_PTR(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes, (UINT_PTR)val);
532 pCodeRun->m_numcodebytes += sizeof(val);
533 } else {
534 EmitBytes((BYTE*)&val, sizeof(val));
535 }
536}
537
538
539//---------------------------------------------------------------
540// Create a new undefined label. Label must be assigned to a code
541// location using EmitLabel() prior to final linking.
542// Throws COM+ exception on failure.
543//---------------------------------------------------------------
544CodeLabel* StubLinker::NewCodeLabel()
545{
546 CONTRACTL
547 {
548 THROWS;
549 GC_NOTRIGGER;
550 SO_TOLERANT;
551 }
552 CONTRACTL_END;
553
554 CodeLabel *pCodeLabel = (CodeLabel*)(m_quickHeap.Alloc(sizeof(CodeLabel)));
555 _ASSERTE(pCodeLabel); // QuickHeap throws exceptions rather than returning NULL
556 pCodeLabel->m_next = m_pFirstCodeLabel;
557 pCodeLabel->m_fExternal = FALSE;
558 pCodeLabel->m_fAbsolute = FALSE;
559 pCodeLabel->i.m_pCodeRun = NULL;
560 m_pFirstCodeLabel = pCodeLabel;
561 return pCodeLabel;
562
563
564}
565
566CodeLabel* StubLinker::NewAbsoluteCodeLabel()
567{
568 CONTRACTL
569 {
570 THROWS;
571 GC_NOTRIGGER;
572 }
573 CONTRACTL_END;
574
575 CodeLabel *pCodeLabel = NewCodeLabel();
576 pCodeLabel->m_fAbsolute = TRUE;
577 return pCodeLabel;
578}
579
580
581//---------------------------------------------------------------
582// Sets the label to point to the current "instruction pointer".
583// It is invalid to call EmitLabel() twice on
584// the same label.
585//---------------------------------------------------------------
586VOID StubLinker::EmitLabel(CodeLabel* pCodeLabel)
587{
588 CONTRACTL
589 {
590 THROWS;
591 GC_NOTRIGGER;
592 SO_TOLERANT;
593 }
594 CONTRACTL_END;
595
596 _ASSERTE(!(pCodeLabel->m_fExternal)); //can't emit an external label
597 _ASSERTE(pCodeLabel->i.m_pCodeRun == NULL); //must only emit label once
598 CodeRun *pLastCodeRun = GetLastCodeRunIfAny();
599 if (!pLastCodeRun) {
600 pLastCodeRun = AppendNewEmptyCodeRun();
601 }
602 pCodeLabel->i.m_pCodeRun = pLastCodeRun;
603 pCodeLabel->i.m_localOffset = pLastCodeRun->m_numcodebytes;
604}
605
606
607//---------------------------------------------------------------
608// Combines NewCodeLabel() and EmitLabel() for convenience.
609// Throws COM+ exception on failure.
610//---------------------------------------------------------------
611CodeLabel* StubLinker::EmitNewCodeLabel()
612{
613 CONTRACTL
614 {
615 THROWS;
616 GC_NOTRIGGER;
617 SO_TOLERANT;
618 }
619 CONTRACTL_END;
620
621 CodeLabel* label = NewCodeLabel();
622 EmitLabel(label);
623 return label;
624}
625
626
627//---------------------------------------------------------------
628// Creates & emits the patch offset label for the stub
629//---------------------------------------------------------------
630VOID StubLinker::EmitPatchLabel()
631{
632 CONTRACTL
633 {
634 THROWS;
635 GC_NOTRIGGER;
636 SO_TOLERANT;
637 }
638 CONTRACTL_END;
639
640 //
641 // Note that it's OK to have re-emit the patch label,
642 // just use the later one.
643 //
644
645 m_pPatchLabel = EmitNewCodeLabel();
646}
647
648//---------------------------------------------------------------
649// Returns final location of label as an offset from the start
650// of the stub. Can only be called after linkage.
651//---------------------------------------------------------------
652UINT32 StubLinker::GetLabelOffset(CodeLabel *pLabel)
653{
654 CONTRACTL
655 {
656 NOTHROW;
657 GC_NOTRIGGER;
658 SO_TOLERANT;
659 }
660 CONTRACTL_END;
661
662 _ASSERTE(!(pLabel->m_fExternal));
663 return pLabel->i.m_localOffset + pLabel->i.m_pCodeRun->m_globaloffset;
664}
665
666
667//---------------------------------------------------------------
668// Create a new label to an external address.
669// Throws COM+ exception on failure.
670//---------------------------------------------------------------
671CodeLabel* StubLinker::NewExternalCodeLabel(LPVOID pExternalAddress)
672{
673 CONTRACTL
674 {
675 THROWS;
676 GC_NOTRIGGER;
677 SO_TOLERANT;
678
679 PRECONDITION(CheckPointer(pExternalAddress));
680 }
681 CONTRACTL_END;
682
683 CodeLabel *pCodeLabel = (CodeLabel*)(m_quickHeap.Alloc(sizeof(CodeLabel)));
684 _ASSERTE(pCodeLabel); // QuickHeap throws exceptions rather than returning NULL
685 pCodeLabel->m_next = m_pFirstCodeLabel;
686 pCodeLabel->m_fExternal = TRUE;
687 pCodeLabel->m_fAbsolute = FALSE;
688 pCodeLabel->e.m_pExternalAddress = pExternalAddress;
689 m_pFirstCodeLabel = pCodeLabel;
690 return pCodeLabel;
691}
692
693
694
695
696//---------------------------------------------------------------
697// Append an instruction containing a reference to a label.
698//
699// target - the label being referenced.
700// instructionFormat - a platform-specific InstructionFormat object
701// that gives properties about the reference.
702// variationCode - uninterpreted data passed to the pInstructionFormat methods.
703//---------------------------------------------------------------
704VOID StubLinker::EmitLabelRef(CodeLabel* target, const InstructionFormat & instructionFormat, UINT variationCode)
705{
706 CONTRACTL
707 {
708 THROWS;
709 GC_NOTRIGGER;
710 SO_TOLERANT;
711 }
712 CONTRACTL_END;
713
714 LabelRef *pLabelRef = (LabelRef *)(m_quickHeap.Alloc(sizeof(LabelRef)));
715 _ASSERTE(pLabelRef); // m_quickHeap throws an exception rather than returning NULL
716 pLabelRef->m_type = LabelRef::kLabelRef;
717 pLabelRef->m_pInstructionFormat = (InstructionFormat*)&instructionFormat;
718 pLabelRef->m_variationCode = variationCode;
719 pLabelRef->m_target = target;
720
721 pLabelRef->m_nextLabelRef = m_pFirstLabelRef;
722 m_pFirstLabelRef = pLabelRef;
723
724 AppendCodeElement(pLabelRef);
725
726
727}
728
729
730
731
732
733//---------------------------------------------------------------
734// Internal helper routine.
735//---------------------------------------------------------------
736CodeRun *StubLinker::GetLastCodeRunIfAny()
737{
738 CONTRACTL
739 {
740 NOTHROW;
741 GC_NOTRIGGER;
742 SO_TOLERANT;
743 }
744 CONTRACTL_END;
745
746 CodeElement *pLastCodeElem = GetLastCodeElement();
747 if (pLastCodeElem == NULL || pLastCodeElem->m_type != CodeElement::kCodeRun) {
748 return NULL;
749 } else {
750 return (CodeRun*)pLastCodeElem;
751 }
752}
753
754
755//---------------------------------------------------------------
756// Internal helper routine.
757//---------------------------------------------------------------
758CodeRun *StubLinker::AppendNewEmptyCodeRun()
759{
760 CONTRACTL
761 {
762 THROWS;
763 GC_NOTRIGGER;
764 }
765 CONTRACTL_END;
766
767 CodeRun *pNewCodeRun = (CodeRun*)(m_quickHeap.Alloc(sizeof(CodeRun)));
768 _ASSERTE(pNewCodeRun); // QuickHeap throws exceptions rather than returning NULL
769 pNewCodeRun->m_type = CodeElement::kCodeRun;
770 pNewCodeRun->m_numcodebytes = 0;
771 AppendCodeElement(pNewCodeRun);
772 return pNewCodeRun;
773
774}
775
776//---------------------------------------------------------------
777// Internal helper routine.
778//---------------------------------------------------------------
779VOID StubLinker::AppendCodeElement(CodeElement *pCodeElement)
780{
781 CONTRACTL
782 {
783 NOTHROW;
784 GC_NOTRIGGER;
785 SO_TOLERANT;
786 }
787 CONTRACTL_END;
788
789 pCodeElement->m_next = m_pCodeElements;
790 m_pCodeElements = pCodeElement;
791}
792
793
794
795//---------------------------------------------------------------
796// Is the current LabelRef's size big enough to reach the target?
797//---------------------------------------------------------------
798static BOOL LabelCanReach(LabelRef *pLabelRef)
799{
800 CONTRACTL
801 {
802 NOTHROW;
803 GC_NOTRIGGER;
804 SO_TOLERANT;
805 }
806 CONTRACTL_END;
807
808 InstructionFormat *pIF = pLabelRef->m_pInstructionFormat;
809
810 if (pLabelRef->m_target->m_fExternal)
811 {
812 return pLabelRef->m_pInstructionFormat->CanReach(
813 pLabelRef->m_refsize, pLabelRef->m_variationCode, TRUE, (INT_PTR)pLabelRef->m_target->e.m_pExternalAddress);
814 }
815 else
816 {
817 UINT targetglobaloffset = pLabelRef->m_target->i.m_pCodeRun->m_globaloffset +
818 pLabelRef->m_target->i.m_localOffset;
819 UINT srcglobaloffset = pLabelRef->m_globaloffset +
820 pIF->GetHotSpotOffset(pLabelRef->m_refsize,
821 pLabelRef->m_variationCode);
822 INT offset = (INT)(targetglobaloffset - srcglobaloffset);
823
824 return pLabelRef->m_pInstructionFormat->CanReach(
825 pLabelRef->m_refsize, pLabelRef->m_variationCode, FALSE, offset);
826 }
827}
828
829//---------------------------------------------------------------
830// Generate the actual stub. The returned stub has a refcount of 1.
831// No other methods (other than the destructor) should be called
832// after calling Link().
833//
834// Throws COM+ exception on failure.
835//---------------------------------------------------------------
836Stub *StubLinker::LinkInterceptor(LoaderHeap *pHeap, Stub* interceptee, void *pRealAddr)
837{
838 STANDARD_VM_CONTRACT;
839
840 int globalsize = 0;
841 int size = CalculateSize(&globalsize);
842
843 _ASSERTE(!pHeap || pHeap->IsExecutable());
844
845 StubHolder<Stub> pStub;
846
847#ifdef STUBLINKER_GENERATES_UNWIND_INFO
848 StubUnwindInfoSegmentBoundaryReservationList ReservedStubs;
849
850 for (;;)
851#endif
852 {
853 pStub = InterceptStub::NewInterceptedStub(pHeap, size, interceptee,
854 pRealAddr
855#ifdef STUBLINKER_GENERATES_UNWIND_INFO
856 , UnwindInfoSize(globalsize)
857#endif
858 );
859 bool fSuccess; fSuccess = EmitStub(pStub, globalsize, pHeap);
860
861#ifdef STUBLINKER_GENERATES_UNWIND_INFO
862 if (fSuccess)
863 {
864 break;
865 }
866 else
867 {
868 ReservedStubs.AddStub(pStub);
869 pStub.SuppressRelease();
870 }
871#else
872 CONSISTENCY_CHECK_MSG(fSuccess, ("EmitStub should always return true"));
873#endif
874 }
875
876 return pStub.Extract();
877}
878
879//---------------------------------------------------------------
880// Generate the actual stub. The returned stub has a refcount of 1.
881// No other methods (other than the destructor) should be called
882// after calling Link().
883//
884// Throws COM+ exception on failure.
885//---------------------------------------------------------------
886Stub *StubLinker::Link(LoaderHeap *pHeap, DWORD flags)
887{
888 STANDARD_VM_CONTRACT;
889
890 int globalsize = 0;
891 int size = CalculateSize(&globalsize);
892
893#ifndef CROSSGEN_COMPILE
894 _ASSERTE(!pHeap || pHeap->IsExecutable());
895#endif
896
897 StubHolder<Stub> pStub;
898
899#ifdef STUBLINKER_GENERATES_UNWIND_INFO
900 StubUnwindInfoSegmentBoundaryReservationList ReservedStubs;
901
902 for (;;)
903#endif
904 {
905 pStub = Stub::NewStub(
906 pHeap,
907 size,
908 flags
909#ifdef STUBLINKER_GENERATES_UNWIND_INFO
910 , UnwindInfoSize(globalsize)
911#endif
912 );
913 ASSERT(pStub != NULL);
914
915 bool fSuccess; fSuccess = EmitStub(pStub, globalsize, pHeap);
916
917#ifdef STUBLINKER_GENERATES_UNWIND_INFO
918 if (fSuccess)
919 {
920 break;
921 }
922 else
923 {
924 ReservedStubs.AddStub(pStub);
925 pStub.SuppressRelease();
926 }
927#else
928 CONSISTENCY_CHECK_MSG(fSuccess, ("EmitStub should always return true"));
929#endif
930 }
931
932 return pStub.Extract();
933}
934
935int StubLinker::CalculateSize(int* pGlobalSize)
936{
937 CONTRACTL
938 {
939 NOTHROW;
940 GC_NOTRIGGER;
941 SO_TOLERANT;
942 }
943 CONTRACTL_END;
944
945 _ASSERTE(pGlobalSize);
946
947#if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO) && !defined(CROSSGEN_COMPILE)
948 if (m_pUnwindInfoCheckLabel)
949 {
950 EmitLabel(m_pUnwindInfoCheckLabel);
951 EmitUnwindInfoCheckSubfunction();
952 m_pUnwindInfoCheckLabel = NULL;
953 }
954#endif
955
956#ifdef _DEBUG
957 // Don't want any undefined labels
958 for (CodeLabel *pCodeLabel = m_pFirstCodeLabel;
959 pCodeLabel != NULL;
960 pCodeLabel = pCodeLabel->m_next) {
961 if ((!(pCodeLabel->m_fExternal)) && pCodeLabel->i.m_pCodeRun == NULL) {
962 _ASSERTE(!"Forgot to define a label before asking StubLinker to link.");
963 }
964 }
965#endif //_DEBUG
966
967 //-------------------------------------------------------------------
968 // Tentatively set all of the labelref sizes to their smallest possible
969 // value.
970 //-------------------------------------------------------------------
971 for (LabelRef *pLabelRef = m_pFirstLabelRef;
972 pLabelRef != NULL;
973 pLabelRef = pLabelRef->m_nextLabelRef) {
974
975 for (UINT bitmask = 1; bitmask <= InstructionFormat::kMax; bitmask = bitmask << 1) {
976 if (pLabelRef->m_pInstructionFormat->m_allowedSizes & bitmask) {
977 pLabelRef->m_refsize = bitmask;
978 break;
979 }
980 }
981
982 }
983
984 UINT globalsize;
985 UINT datasize;
986 BOOL fSomethingChanged;
987 do {
988 fSomethingChanged = FALSE;
989
990
991 // Layout each code element.
992 globalsize = 0;
993 datasize = 0;
994 CodeElement *pCodeElem;
995 for (pCodeElem = m_pCodeElements; pCodeElem; pCodeElem = pCodeElem->m_next) {
996
997 switch (pCodeElem->m_type) {
998 case CodeElement::kCodeRun:
999 globalsize += ((CodeRun*)pCodeElem)->m_numcodebytes;
1000 break;
1001
1002 case CodeElement::kLabelRef: {
1003 LabelRef *pLabelRef = (LabelRef*)pCodeElem;
1004 globalsize += pLabelRef->m_pInstructionFormat->GetSizeOfInstruction( pLabelRef->m_refsize,
1005 pLabelRef->m_variationCode );
1006 datasize += pLabelRef->m_pInstructionFormat->GetSizeOfData( pLabelRef->m_refsize,
1007 pLabelRef->m_variationCode );
1008 }
1009 break;
1010
1011 default:
1012 _ASSERTE(0);
1013 }
1014
1015 // Record a temporary global offset; this is actually
1016 // wrong by a fixed value. We'll fix up after we know the
1017 // size of the entire stub.
1018 pCodeElem->m_globaloffset = 0 - globalsize;
1019
1020 // also record the data offset. Note the link-list we walk is in
1021 // *reverse* order so we visit the last instruction first
1022 // so what we record now is in fact the offset from the *end* of
1023 // the data block. We fix it up later.
1024 pCodeElem->m_dataoffset = 0 - datasize;
1025 }
1026
1027 // Now fix up the global offsets.
1028 for (pCodeElem = m_pCodeElements; pCodeElem; pCodeElem = pCodeElem->m_next) {
1029 pCodeElem->m_globaloffset += globalsize;
1030 pCodeElem->m_dataoffset += datasize;
1031 }
1032
1033
1034 // Now, iterate thru the LabelRef's and check if any of them
1035 // have to be resized.
1036 for (LabelRef *pLabelRef = m_pFirstLabelRef;
1037 pLabelRef != NULL;
1038 pLabelRef = pLabelRef->m_nextLabelRef) {
1039
1040
1041 if (!LabelCanReach(pLabelRef)) {
1042 fSomethingChanged = TRUE;
1043
1044 UINT bitmask = pLabelRef->m_refsize << 1;
1045 // Find the next largest size.
1046 // (we could be smarter about this and eliminate intermediate
1047 // sizes based on the tentative offset.)
1048 for (; bitmask <= InstructionFormat::kMax; bitmask = bitmask << 1) {
1049 if (pLabelRef->m_pInstructionFormat->m_allowedSizes & bitmask) {
1050 pLabelRef->m_refsize = bitmask;
1051 break;
1052 }
1053 }
1054#ifdef _DEBUG
1055 if (bitmask > InstructionFormat::kMax) {
1056 // CANNOT REACH target even with kMax
1057 _ASSERTE(!"Stub instruction cannot reach target: must choose a different instruction!");
1058 }
1059#endif
1060 }
1061 }
1062
1063
1064 } while (fSomethingChanged); // Keep iterating until all LabelRef's can reach
1065
1066
1067 // We now have the correct layout write out the stub.
1068
1069 // Compute stub code+data size after aligning data correctly
1070 if(globalsize % DATA_ALIGNMENT)
1071 globalsize += (DATA_ALIGNMENT - (globalsize % DATA_ALIGNMENT));
1072
1073 *pGlobalSize = globalsize;
1074 return globalsize + datasize;
1075}
1076
1077bool StubLinker::EmitStub(Stub* pStub, int globalsize, LoaderHeap* pHeap)
1078{
1079 STANDARD_VM_CONTRACT;
1080
1081 BYTE *pCode = (BYTE*)(pStub->GetBlob());
1082 BYTE *pData = pCode+globalsize; // start of data area
1083 {
1084 int lastCodeOffset = 0;
1085
1086 // Write out each code element.
1087 for (CodeElement* pCodeElem = m_pCodeElements; pCodeElem; pCodeElem = pCodeElem->m_next) {
1088 int currOffset = 0;
1089
1090 switch (pCodeElem->m_type) {
1091 case CodeElement::kCodeRun:
1092 CopyMemory(pCode + pCodeElem->m_globaloffset,
1093 ((CodeRun*)pCodeElem)->m_codebytes,
1094 ((CodeRun*)pCodeElem)->m_numcodebytes);
1095 currOffset = pCodeElem->m_globaloffset + ((CodeRun *)pCodeElem)->m_numcodebytes;
1096 break;
1097
1098 case CodeElement::kLabelRef: {
1099 LabelRef *pLabelRef = (LabelRef*)pCodeElem;
1100 InstructionFormat *pIF = pLabelRef->m_pInstructionFormat;
1101 __int64 fixupval;
1102
1103 LPBYTE srcglobaladdr = pCode +
1104 pLabelRef->m_globaloffset +
1105 pIF->GetHotSpotOffset(pLabelRef->m_refsize,
1106 pLabelRef->m_variationCode);
1107 LPBYTE targetglobaladdr;
1108 if (!(pLabelRef->m_target->m_fExternal)) {
1109 targetglobaladdr = pCode +
1110 pLabelRef->m_target->i.m_pCodeRun->m_globaloffset +
1111 pLabelRef->m_target->i.m_localOffset;
1112 } else {
1113 targetglobaladdr = (LPBYTE)(pLabelRef->m_target->e.m_pExternalAddress);
1114 }
1115 if ((pLabelRef->m_target->m_fAbsolute)) {
1116 fixupval = (__int64)(size_t)targetglobaladdr;
1117 } else
1118 fixupval = (__int64)(targetglobaladdr - srcglobaladdr);
1119
1120 pLabelRef->m_pInstructionFormat->EmitInstruction(
1121 pLabelRef->m_refsize,
1122 fixupval,
1123 pCode + pCodeElem->m_globaloffset,
1124 pLabelRef->m_variationCode,
1125 pData + pCodeElem->m_dataoffset);
1126
1127 currOffset =
1128 pCodeElem->m_globaloffset +
1129 pLabelRef->m_pInstructionFormat->GetSizeOfInstruction( pLabelRef->m_refsize,
1130 pLabelRef->m_variationCode );
1131 }
1132 break;
1133
1134 default:
1135 _ASSERTE(0);
1136 }
1137 lastCodeOffset = (currOffset > lastCodeOffset) ? currOffset : lastCodeOffset;
1138 }
1139
1140 // Fill in zeros at the end, if necessary
1141 if (lastCodeOffset < globalsize)
1142 ZeroMemory(pCode + lastCodeOffset, globalsize - lastCodeOffset);
1143 }
1144
1145 // Fill in patch offset, if we have one
1146 // Note that these offsets are relative to the start of the stub,
1147 // not the code, so you'll have to add sizeof(Stub) to get to the
1148 // right spot.
1149 if (m_pPatchLabel != NULL)
1150 {
1151 UINT32 uLabelOffset = GetLabelOffset(m_pPatchLabel);
1152 _ASSERTE(FitsIn<USHORT>(uLabelOffset));
1153 pStub->SetPatchOffset(static_cast<USHORT>(uLabelOffset));
1154
1155 LOG((LF_CORDB, LL_INFO100, "SL::ES: patch offset:0x%x\n",
1156 pStub->GetPatchOffset()));
1157 }
1158
1159#ifdef STUBLINKER_GENERATES_UNWIND_INFO
1160 if (pStub->HasUnwindInfo())
1161 {
1162 if (!EmitUnwindInfo(pStub, globalsize, pHeap))
1163 return false;
1164 }
1165#endif // STUBLINKER_GENERATES_UNWIND_INFO
1166
1167 if (!m_fDataOnly)
1168 {
1169 FlushInstructionCache(GetCurrentProcess(), pCode, globalsize);
1170 }
1171
1172 _ASSERTE(m_fDataOnly || DbgIsExecutable(pCode, globalsize));
1173
1174 return true;
1175}
1176
1177
1178#ifdef STUBLINKER_GENERATES_UNWIND_INFO
1179#if defined(_TARGET_AMD64_)
1180
1181// See RtlVirtualUnwind in base\ntos\rtl\amd64\exdsptch.c
1182
1183static_assert_no_msg(kRAX == (FIELD_OFFSET(CONTEXT, Rax) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1184static_assert_no_msg(kRCX == (FIELD_OFFSET(CONTEXT, Rcx) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1185static_assert_no_msg(kRDX == (FIELD_OFFSET(CONTEXT, Rdx) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1186static_assert_no_msg(kRBX == (FIELD_OFFSET(CONTEXT, Rbx) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1187static_assert_no_msg(kRBP == (FIELD_OFFSET(CONTEXT, Rbp) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1188static_assert_no_msg(kRSI == (FIELD_OFFSET(CONTEXT, Rsi) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1189static_assert_no_msg(kRDI == (FIELD_OFFSET(CONTEXT, Rdi) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1190static_assert_no_msg(kR8 == (FIELD_OFFSET(CONTEXT, R8 ) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1191static_assert_no_msg(kR9 == (FIELD_OFFSET(CONTEXT, R9 ) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1192static_assert_no_msg(kR10 == (FIELD_OFFSET(CONTEXT, R10) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1193static_assert_no_msg(kR11 == (FIELD_OFFSET(CONTEXT, R11) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1194static_assert_no_msg(kR12 == (FIELD_OFFSET(CONTEXT, R12) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1195static_assert_no_msg(kR13 == (FIELD_OFFSET(CONTEXT, R13) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1196static_assert_no_msg(kR14 == (FIELD_OFFSET(CONTEXT, R14) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1197static_assert_no_msg(kR15 == (FIELD_OFFSET(CONTEXT, R15) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
1198
1199VOID StubLinker::UnwindSavedReg (UCHAR reg, ULONG SPRelativeOffset)
1200{
1201 USHORT FrameOffset = (USHORT)(SPRelativeOffset / 8);
1202
1203 if ((ULONG)FrameOffset == SPRelativeOffset)
1204 {
1205 UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_SAVE_NONVOL);
1206 pUnwindCode->OpInfo = reg;
1207 pUnwindCode[1].FrameOffset = FrameOffset;
1208 }
1209 else
1210 {
1211 UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_SAVE_NONVOL_FAR);
1212 pUnwindCode->OpInfo = reg;
1213 pUnwindCode[1].FrameOffset = (USHORT)SPRelativeOffset;
1214 pUnwindCode[2].FrameOffset = (USHORT)(SPRelativeOffset >> 16);
1215 }
1216}
1217
1218VOID StubLinker::UnwindPushedReg (UCHAR reg)
1219{
1220 m_stackSize += sizeof(void*);
1221
1222 if (m_fHaveFramePointer)
1223 return;
1224
1225 UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_PUSH_NONVOL);
1226 pUnwindCode->OpInfo = reg;
1227}
1228
1229VOID StubLinker::UnwindAllocStack (SHORT FrameSizeIncrement)
1230{
1231 CONTRACTL
1232 {
1233 THROWS;
1234 GC_NOTRIGGER;
1235 SO_TOLERANT;
1236 } CONTRACTL_END;
1237
1238 if (! ClrSafeInt<SHORT>::addition(m_stackSize, FrameSizeIncrement, m_stackSize))
1239 COMPlusThrowArithmetic();
1240
1241 if (m_fHaveFramePointer)
1242 return;
1243
1244 UCHAR OpInfo = (UCHAR)((FrameSizeIncrement - 8) / 8);
1245
1246 if (OpInfo*8 + 8 == FrameSizeIncrement)
1247 {
1248 UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_ALLOC_SMALL);
1249 pUnwindCode->OpInfo = OpInfo;
1250 }
1251 else
1252 {
1253 USHORT FrameOffset = (USHORT)FrameSizeIncrement;
1254 BOOL fNeedExtraSlot = ((ULONG)FrameOffset != (ULONG)FrameSizeIncrement);
1255
1256 UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_ALLOC_LARGE, fNeedExtraSlot);
1257
1258 pUnwindCode->OpInfo = fNeedExtraSlot;
1259
1260 pUnwindCode[1].FrameOffset = FrameOffset;
1261
1262 if (fNeedExtraSlot)
1263 pUnwindCode[2].FrameOffset = (USHORT)(FrameSizeIncrement >> 16);
1264 }
1265}
1266
1267VOID StubLinker::UnwindSetFramePointer (UCHAR reg)
1268{
1269 _ASSERTE(!m_fHaveFramePointer);
1270
1271 UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_SET_FPREG);
1272 pUnwindCode->OpInfo = reg;
1273
1274 m_fHaveFramePointer = TRUE;
1275}
1276
1277UNWIND_CODE *StubLinker::AllocUnwindInfo (UCHAR Op, UCHAR nExtraSlots /*= 0*/)
1278{
1279 CONTRACTL
1280 {
1281 THROWS;
1282 GC_NOTRIGGER;
1283 SO_TOLERANT;
1284 } CONTRACTL_END;
1285
1286 _ASSERTE(Op < sizeof(UnwindOpExtraSlotTable));
1287
1288 UCHAR nSlotsAlloc = UnwindOpExtraSlotTable[Op] + nExtraSlots;
1289
1290 IntermediateUnwindInfo *pUnwindInfo = (IntermediateUnwindInfo*)m_quickHeap.Alloc( sizeof(IntermediateUnwindInfo)
1291 + nSlotsAlloc * sizeof(UNWIND_CODE));
1292 m_nUnwindSlots += 1 + nSlotsAlloc;
1293
1294 pUnwindInfo->pNext = m_pUnwindInfoList;
1295 m_pUnwindInfoList = pUnwindInfo;
1296
1297 UNWIND_CODE *pUnwindCode = &pUnwindInfo->rgUnwindCode[0];
1298
1299 pUnwindCode->UnwindOp = Op;
1300
1301 CodeRun *pCodeRun = GetLastCodeRunIfAny();
1302 _ASSERTE(pCodeRun != NULL);
1303
1304 pUnwindInfo->pCodeRun = pCodeRun;
1305 pUnwindInfo->LocalOffset = pCodeRun->m_numcodebytes;
1306
1307 EmitUnwindInfoCheck();
1308
1309 return pUnwindCode;
1310}
1311#endif // defined(_TARGET_AMD64_)
1312
1313struct FindBlockArgs
1314{
1315 BYTE *pCode;
1316 BYTE *pBlockBase;
1317 SIZE_T cbBlockSize;
1318};
1319
1320bool FindBlockCallback (PTR_VOID pvArgs, PTR_VOID pvAllocationBase, SIZE_T cbReserved)
1321{
1322 CONTRACTL
1323 {
1324 NOTHROW;
1325 GC_TRIGGERS;
1326 }
1327 CONTRACTL_END;
1328
1329 FindBlockArgs* pArgs = (FindBlockArgs*)pvArgs;
1330 if (pArgs->pCode >= pvAllocationBase && (pArgs->pCode < ((BYTE *)pvAllocationBase + cbReserved)))
1331 {
1332 pArgs->pBlockBase = (BYTE*)pvAllocationBase;
1333 pArgs->cbBlockSize = cbReserved;
1334 return true;
1335 }
1336
1337 return false;
1338}
1339
1340bool StubLinker::EmitUnwindInfo(Stub* pStub, int globalsize, LoaderHeap* pHeap)
1341{
1342 STANDARD_VM_CONTRACT;
1343
1344 BYTE *pCode = (BYTE*)(pStub->GetEntryPoint());
1345
1346 //
1347 // Determine the lower bound of the address space containing the stub.
1348 //
1349
1350 FindBlockArgs findBlockArgs;
1351 findBlockArgs.pCode = pCode;
1352 findBlockArgs.pBlockBase = NULL;
1353
1354 pHeap->EnumPageRegions(&FindBlockCallback, &findBlockArgs);
1355
1356 if (findBlockArgs.pBlockBase == NULL)
1357 {
1358 // REVISIT_TODO better exception
1359 COMPlusThrowOM();
1360 }
1361
1362 BYTE *pbRegionBaseAddress = findBlockArgs.pBlockBase;
1363
1364#ifdef _DEBUG
1365 static SIZE_T MaxSegmentSize = -1;
1366 if (MaxSegmentSize == (SIZE_T)-1)
1367 MaxSegmentSize = EEConfig::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_MaxStubUnwindInfoSegmentSize, DYNAMIC_FUNCTION_TABLE_MAX_RANGE);
1368#else
1369 const SIZE_T MaxSegmentSize = DYNAMIC_FUNCTION_TABLE_MAX_RANGE;
1370#endif
1371
1372 //
1373 // The RUNTIME_FUNCTION offsets are ULONGs. If the region size is >
1374 // ULONG_MAX, then we'll shift the base address to the next 4gb and
1375 // register a separate function table.
1376 //
1377 // But...RtlInstallFunctionTableCallback has a 2gb restriction...so
1378 // make that LONG_MAX.
1379 //
1380
1381 StubUnwindInfoHeader *pHeader = pStub->GetUnwindInfoHeader();
1382 _ASSERTE(IS_ALIGNED(pHeader, sizeof(void*)));
1383
1384 BYTE *pbBaseAddress = pbRegionBaseAddress;
1385
1386 while ((size_t)((BYTE*)pHeader - pbBaseAddress) > MaxSegmentSize)
1387 {
1388 pbBaseAddress += MaxSegmentSize;
1389 }
1390
1391 //
1392 // If the unwind info/code straddle a 2gb boundary, then we're stuck.
1393 // Rather than add a lot more bit twiddling code to deal with this
1394 // exceptionally rare case, we'll signal the caller to keep this allocation
1395 // temporarily and allocate another. This repeats until we eventually get
1396 // an allocation that doesn't straddle a 2gb boundary. Afterwards the old
1397 // allocations are freed.
1398 //
1399
1400 if ((size_t)(pCode + globalsize - pbBaseAddress) > MaxSegmentSize)
1401 {
1402 return false;
1403 }
1404
1405 // Ensure that the first RUNTIME_FUNCTION struct ends up pointer aligned,
1406 // so that the StubUnwindInfoHeader struct is aligned. UNWIND_INFO
1407 // includes one UNWIND_CODE.
1408 _ASSERTE(IS_ALIGNED(pStub, sizeof(void*)));
1409 _ASSERTE(0 == (FIELD_OFFSET(StubUnwindInfoHeader, FunctionEntry) % sizeof(void*)));
1410
1411 StubUnwindInfoHeader * pUnwindInfoHeader = pStub->GetUnwindInfoHeader();
1412
1413#ifdef _TARGET_AMD64_
1414
1415 UNWIND_CODE *pDestUnwindCode = &pUnwindInfoHeader->UnwindInfo.UnwindCode[0];
1416#ifdef _DEBUG
1417 UNWIND_CODE *pDestUnwindCodeLimit = (UNWIND_CODE*)pStub->GetUnwindInfoHeaderSuffix();
1418#endif
1419
1420 UINT FrameRegister = 0;
1421
1422 //
1423 // Resolve the unwind operation offsets, and fill in the UNWIND_INFO and
1424 // RUNTIME_FUNCTION structs preceeding the stub. The unwind codes are recorded
1425 // in decreasing address order.
1426 //
1427
1428 for (IntermediateUnwindInfo *pUnwindInfoList = m_pUnwindInfoList; pUnwindInfoList != NULL; pUnwindInfoList = pUnwindInfoList->pNext)
1429 {
1430 UNWIND_CODE *pUnwindCode = &pUnwindInfoList->rgUnwindCode[0];
1431 UCHAR op = pUnwindCode[0].UnwindOp;
1432
1433 if (UWOP_SET_FPREG == op)
1434 {
1435 FrameRegister = pUnwindCode[0].OpInfo;
1436 }
1437
1438 //
1439 // Compute number of slots used by this encoding.
1440 //
1441
1442 UINT nSlots;
1443
1444 if (UWOP_ALLOC_LARGE == op)
1445 {
1446 nSlots = 2 + pUnwindCode[0].OpInfo;
1447 }
1448 else
1449 {
1450 _ASSERTE(UnwindOpExtraSlotTable[op] != (UCHAR)-1);
1451 nSlots = 1 + UnwindOpExtraSlotTable[op];
1452 }
1453
1454 //
1455 // Compute offset and ensure that it will fit in the encoding.
1456 //
1457
1458 SIZE_T CodeOffset = pUnwindInfoList->pCodeRun->m_globaloffset
1459 + pUnwindInfoList->LocalOffset;
1460
1461 if (CodeOffset != (SIZE_T)(UCHAR)CodeOffset)
1462 {
1463 // REVISIT_TODO better exception
1464 COMPlusThrowOM();
1465 }
1466
1467 //
1468 // Copy the encoding data, overwrite the new offset, and advance
1469 // to the next encoding.
1470 //
1471
1472 _ASSERTE(pDestUnwindCode + nSlots <= pDestUnwindCodeLimit);
1473
1474 CopyMemory(pDestUnwindCode, pUnwindCode, nSlots * sizeof(UNWIND_CODE));
1475
1476 pDestUnwindCode->CodeOffset = (UCHAR)CodeOffset;
1477
1478 pDestUnwindCode += nSlots;
1479 }
1480
1481 //
1482 // Fill in the UNWIND_INFO struct
1483 //
1484 UNWIND_INFO *pUnwindInfo = &pUnwindInfoHeader->UnwindInfo;
1485 _ASSERTE(IS_ALIGNED(pUnwindInfo, sizeof(ULONG)));
1486
1487 // PrologueSize may be 0 if all unwind directives at offset 0.
1488 SIZE_T PrologueSize = m_pUnwindInfoList->pCodeRun->m_globaloffset
1489 + m_pUnwindInfoList->LocalOffset;
1490
1491 UINT nEntryPointSlots = m_nUnwindSlots;
1492
1493 if ( PrologueSize != (SIZE_T)(UCHAR)PrologueSize
1494 || nEntryPointSlots > UCHAR_MAX)
1495 {
1496 // REVISIT_TODO better exception
1497 COMPlusThrowOM();
1498 }
1499
1500 _ASSERTE(nEntryPointSlots);
1501
1502 pUnwindInfo->Version = 1;
1503 pUnwindInfo->Flags = 0;
1504 pUnwindInfo->SizeOfProlog = (UCHAR)PrologueSize;
1505 pUnwindInfo->CountOfUnwindCodes = (UCHAR)nEntryPointSlots;
1506 pUnwindInfo->FrameRegister = FrameRegister;
1507 pUnwindInfo->FrameOffset = 0;
1508
1509 //
1510 // Fill in the RUNTIME_FUNCTION struct for this prologue.
1511 //
1512 PT_RUNTIME_FUNCTION pCurFunction = &pUnwindInfoHeader->FunctionEntry;
1513 _ASSERTE(IS_ALIGNED(pCurFunction, sizeof(ULONG)));
1514
1515 S_UINT32 sBeginAddress = S_BYTEPTR(pCode) - S_BYTEPTR(pbBaseAddress);
1516 if (sBeginAddress.IsOverflow())
1517 COMPlusThrowArithmetic();
1518 pCurFunction->BeginAddress = sBeginAddress.Value();
1519
1520 S_UINT32 sEndAddress = S_BYTEPTR(pCode) + S_BYTEPTR(globalsize) - S_BYTEPTR(pbBaseAddress);
1521 if (sEndAddress.IsOverflow())
1522 COMPlusThrowArithmetic();
1523 pCurFunction->EndAddress = sEndAddress.Value();
1524
1525 S_UINT32 sTemp = S_BYTEPTR(pUnwindInfo) - S_BYTEPTR(pbBaseAddress);
1526 if (sTemp.IsOverflow())
1527 COMPlusThrowArithmetic();
1528 RUNTIME_FUNCTION__SetUnwindInfoAddress(pCurFunction, sTemp.Value());
1529#elif defined(_TARGET_ARM_)
1530 //
1531 // Fill in the RUNTIME_FUNCTION struct for this prologue.
1532 //
1533 UNWIND_INFO *pUnwindInfo = &pUnwindInfoHeader->UnwindInfo;
1534
1535 PT_RUNTIME_FUNCTION pCurFunction = &pUnwindInfoHeader->FunctionEntry;
1536 _ASSERTE(IS_ALIGNED(pCurFunction, sizeof(ULONG)));
1537
1538 S_UINT32 sBeginAddress = S_BYTEPTR(pCode) - S_BYTEPTR(pbBaseAddress);
1539 if (sBeginAddress.IsOverflow())
1540 COMPlusThrowArithmetic();
1541 RUNTIME_FUNCTION__SetBeginAddress(pCurFunction, sBeginAddress.Value());
1542
1543 S_UINT32 sTemp = S_BYTEPTR(pUnwindInfo) - S_BYTEPTR(pbBaseAddress);
1544 if (sTemp.IsOverflow())
1545 COMPlusThrowArithmetic();
1546 RUNTIME_FUNCTION__SetUnwindInfoAddress(pCurFunction, sTemp.Value());
1547
1548 //Get the exact function Length. Cannot use globalsize as it is explicitly made to be
1549 // 4 byte aligned
1550 CodeRun *pLastCodeElem = GetLastCodeRunIfAny();
1551 _ASSERTE(pLastCodeElem != NULL);
1552
1553 int functionLength = pLastCodeElem->m_numcodebytes + pLastCodeElem->m_globaloffset;
1554
1555 // cannot encode functionLength greater than (2 * 0xFFFFF)
1556 if (functionLength > 2 * 0xFFFFF)
1557 COMPlusThrowArithmetic();
1558
1559 _ASSERTE(functionLength <= globalsize);
1560
1561 BYTE * pUnwindCodes = (BYTE *)pUnwindInfo + sizeof(DWORD);
1562
1563 // Not emitting compact unwind info as there are very few (4) dynamic stubs with unwind info.
1564 // Benefit of the optimization does not outweigh the cost of adding the code for it.
1565
1566 //UnwindInfo for prolog
1567 if (m_cbStackFrame != 0)
1568 {
1569 if(m_cbStackFrame < 512)
1570 {
1571 *pUnwindCodes++ = (BYTE)0xF8; // 16-bit sub/add sp,#x
1572 *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 18);
1573 *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 10);
1574 *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 2);
1575 }
1576 else
1577 {
1578 *pUnwindCodes++ = (BYTE)0xFA; // 32-bit sub/add sp,#x
1579 *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 18);
1580 *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 10);
1581 *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 2);
1582 }
1583
1584 if(m_cbStackFrame >= 4096)
1585 {
1586 // r4 register is used as param to checkStack function and must have been saved in prolog
1587 _ASSERTE(m_cCalleeSavedRegs > 0);
1588 *pUnwindCodes++ = (BYTE)0xFB; // nop 16 bit for bl r12
1589 *pUnwindCodes++ = (BYTE)0xFC; // nop 32 bit for movt r12, checkStack
1590 *pUnwindCodes++ = (BYTE)0xFC; // nop 32 bit for movw r12, checkStack
1591
1592 // Ensure that mov r4, m_cbStackFrame fits in a 32-bit instruction
1593 if(m_cbStackFrame > 65535)
1594 COMPlusThrow(kNotSupportedException);
1595 *pUnwindCodes++ = (BYTE)0xFC; // nop 32 bit for mov r4, m_cbStackFrame
1596 }
1597 }
1598
1599 // Unwind info generated will be incorrect when m_cCalleeSavedRegs = 0.
1600 // The unwind code will say that the size of push/pop instruction
1601 // size is 16bits when actually the opcode generated by
1602 // ThumbEmitPop & ThumbEMitPush will be 32bits.
1603 // Currently no stubs has m_cCalleeSavedRegs as 0
1604 // therfore just adding the assert.
1605 _ASSERTE(m_cCalleeSavedRegs > 0);
1606
1607 if (m_cCalleeSavedRegs <= 4)
1608 {
1609 *pUnwindCodes++ = (BYTE)(0xD4 + (m_cCalleeSavedRegs - 1)); // push/pop {r4-rX}
1610 }
1611 else
1612 {
1613 _ASSERTE(m_cCalleeSavedRegs <= 8);
1614 *pUnwindCodes++ = (BYTE)(0xDC + (m_cCalleeSavedRegs - 5)); // push/pop {r4-rX}
1615 }
1616
1617 if (m_fPushArgRegs)
1618 {
1619 *pUnwindCodes++ = (BYTE)0x04; // push {r0-r3} / add sp,#16
1620 *pUnwindCodes++ = (BYTE)0xFD; // bx lr
1621 }
1622 else
1623 {
1624 *pUnwindCodes++ = (BYTE)0xFF; // end
1625 }
1626
1627 ptrdiff_t epilogUnwindCodeIndex = 0;
1628
1629 //epilog differs from prolog
1630 if(m_cbStackFrame >= 4096)
1631 {
1632 //Index of the first unwind code of the epilog
1633 epilogUnwindCodeIndex = pUnwindCodes - (BYTE *)pUnwindInfo - sizeof(DWORD);
1634
1635 *pUnwindCodes++ = (BYTE)0xF8; // sub/add sp,#x
1636 *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 18);
1637 *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 10);
1638 *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 2);
1639
1640 if (m_cCalleeSavedRegs <= 4)
1641 {
1642 *pUnwindCodes++ = (BYTE)(0xD4 + (m_cCalleeSavedRegs - 1)); // push/pop {r4-rX}
1643 }
1644 else
1645 {
1646 *pUnwindCodes++ = (BYTE)(0xDC + (m_cCalleeSavedRegs - 5)); // push/pop {r4-rX}
1647 }
1648
1649 if (m_fPushArgRegs)
1650 {
1651 *pUnwindCodes++ = (BYTE)0x04; // push {r0-r3} / add sp,#16
1652 *pUnwindCodes++ = (BYTE)0xFD; // bx lr
1653 }
1654 else
1655 {
1656 *pUnwindCodes++ = (BYTE)0xFF; // end
1657 }
1658
1659 }
1660
1661 // Number of 32-bit unwind codes
1662 size_t codeWordsCount = (ALIGN_UP((size_t)pUnwindCodes, sizeof(void*)) - (size_t)pUnwindInfo - sizeof(DWORD))/4;
1663
1664 _ASSERTE(epilogUnwindCodeIndex < 32);
1665
1666 //Check that MAX_UNWIND_CODE_WORDS is sufficient to store all unwind Codes
1667 _ASSERTE(codeWordsCount <= MAX_UNWIND_CODE_WORDS);
1668
1669 *(DWORD *)pUnwindInfo =
1670 ((functionLength) / 2) |
1671 (1 << 21) |
1672 ((int)epilogUnwindCodeIndex << 23)|
1673 ((int)codeWordsCount << 28);
1674
1675#elif defined(_TARGET_ARM64_)
1676 if (!m_fProlog)
1677 {
1678 // If EmitProlog isn't called. This is a leaf function which doesn't need any unwindInfo
1679 T_RUNTIME_FUNCTION *pCurFunction = NULL;
1680 }
1681 else
1682 {
1683
1684 //
1685 // Fill in the RUNTIME_FUNCTION struct for this prologue.
1686 //
1687 UNWIND_INFO *pUnwindInfo = &(pUnwindInfoHeader->UnwindInfo);
1688
1689 T_RUNTIME_FUNCTION *pCurFunction = &(pUnwindInfoHeader->FunctionEntry);
1690
1691 _ASSERTE(IS_ALIGNED(pCurFunction, sizeof(void*)));
1692
1693 S_UINT32 sBeginAddress = S_BYTEPTR(pCode) - S_BYTEPTR(pbBaseAddress);
1694 if (sBeginAddress.IsOverflow())
1695 COMPlusThrowArithmetic();
1696
1697 S_UINT32 sTemp = S_BYTEPTR(pUnwindInfo) - S_BYTEPTR(pbBaseAddress);
1698 if (sTemp.IsOverflow())
1699 COMPlusThrowArithmetic();
1700
1701 RUNTIME_FUNCTION__SetBeginAddress(pCurFunction, sBeginAddress.Value());
1702 RUNTIME_FUNCTION__SetUnwindInfoAddress(pCurFunction, sTemp.Value());
1703
1704 CodeRun *pLastCodeElem = GetLastCodeRunIfAny();
1705 _ASSERTE(pLastCodeElem != NULL);
1706
1707 int functionLength = pLastCodeElem->m_numcodebytes + pLastCodeElem->m_globaloffset;
1708
1709 // .xdata has 18 bits for function length and it is to store the total length of the function in bytes, divided by 4
1710 // If the function is larger than 1M, then multiple pdata and xdata records must be used, which we don't support right now.
1711 if (functionLength > 4 * 0x3FFFF)
1712 COMPlusThrowArithmetic();
1713
1714 _ASSERTE(functionLength <= globalsize);
1715
1716 // No support for extended code words and/or extended epilog.
1717 // ASSERTION: first 10 bits of the pUnwindInfo, which holds the #codewords and #epilogcount, cannot be 0
1718 // And no space for exception scope data also means that no support for exceptions for the stubs
1719 // generated with this stublinker.
1720 BYTE * pUnwindCodes = (BYTE *)pUnwindInfo + sizeof(DWORD);
1721
1722
1723 // Emitting the unwind codes:
1724 // The unwind codes are emited in Epilog order.
1725 //
1726 // 6. Integer argument registers
1727 // Although we might be saving the argument registers in the prolog we don't need
1728 // to report them to the OS. (they are not expressible anyways)
1729
1730 // 5. Floating point argument registers:
1731 // Similar to Integer argument registers, no reporting
1732 //
1733
1734 // 4. Set the frame pointer
1735 // ASSUMPTION: none of the Stubs generated with this stublinker change SP value outside of epilog and prolog
1736 // when that is the case we can skip reporting setting up the frame pointer
1737
1738 // With skiping Step #4, #5 and #6 Prolog and Epilog becomes reversible. so they can share the unwind codes
1739 int epilogUnwindCodeIndex = 0;
1740
1741 unsigned cStackFrameSizeInQWORDs = GetStackFrameSize()/16;
1742 // 3. Store FP/LR
1743 // save_fplr
1744 *pUnwindCodes++ = (BYTE)(0x40 | (m_cbStackSpace>>3));
1745
1746 // 2. Callee-saved registers
1747 //
1748 if (m_cCalleeSavedRegs > 0)
1749 {
1750 unsigned offset = 2 + m_cbStackSpace/8; // 2 is for fp,lr
1751 if ((m_cCalleeSavedRegs %2) ==1)
1752 {
1753 // save_reg
1754 *pUnwindCodes++ = (BYTE) (0xD0 | ((m_cCalleeSavedRegs-1)>>2));
1755 *pUnwindCodes++ = (BYTE) ((BYTE)((m_cCalleeSavedRegs-1) << 6) | ((offset + m_cCalleeSavedRegs - 1) & 0x3F));
1756 }
1757 for (int i=(m_cCalleeSavedRegs/2)*2-2; i>=0; i-=2)
1758 {
1759 if (i!=0)
1760 {
1761 // save_next
1762 *pUnwindCodes++ = 0xE6;
1763 }
1764 else
1765 {
1766 // save_regp
1767 *pUnwindCodes++ = 0xC8;
1768 *pUnwindCodes++ = (BYTE)(offset & 0x3F);
1769 }
1770 }
1771 }
1772
1773 // 1. SP Relocation
1774 //
1775 // EmitProlog is supposed to reject frames larger than 504 bytes.
1776 // Assert that here.
1777 _ASSERTE(cStackFrameSizeInQWORDs <= 0x3F);
1778 if (cStackFrameSizeInQWORDs <= 0x1F)
1779 {
1780 // alloc_s
1781 *pUnwindCodes++ = (BYTE)(cStackFrameSizeInQWORDs);
1782 }
1783 else
1784 {
1785 // alloc_m
1786 *pUnwindCodes++ = (BYTE)(0xC0 | (cStackFrameSizeInQWORDs >> 8));
1787 *pUnwindCodes++ = (BYTE)(cStackFrameSizeInQWORDs);
1788 }
1789
1790 // End
1791 *pUnwindCodes++ = 0xE4;
1792
1793 // Number of 32-bit unwind codes
1794 int codeWordsCount = (int)(ALIGN_UP((size_t)pUnwindCodes, sizeof(DWORD)) - (size_t)pUnwindInfo - sizeof(DWORD))/4;
1795
1796 //Check that MAX_UNWIND_CODE_WORDS is sufficient to store all unwind Codes
1797 _ASSERTE(codeWordsCount <= MAX_UNWIND_CODE_WORDS);
1798
1799 *(DWORD *)pUnwindInfo =
1800 ((functionLength) / 4) |
1801 (1 << 21) | // E bit
1802 (epilogUnwindCodeIndex << 22)|
1803 (codeWordsCount << 27);
1804 } // end else (!m_fProlog)
1805#else
1806 PORTABILITY_ASSERT("StubLinker::EmitUnwindInfo");
1807 T_RUNTIME_FUNCTION *pCurFunction = NULL;
1808#endif
1809
1810 //
1811 // Get a StubUnwindInfoHeapSegment for this base address
1812 //
1813
1814 CrstHolder crst(&g_StubUnwindInfoHeapSegmentsCrst);
1815
1816 StubUnwindInfoHeapSegment *pStubHeapSegment;
1817 StubUnwindInfoHeapSegment **ppPrevStubHeapSegment;
1818 for (ppPrevStubHeapSegment = &g_StubHeapSegments;
1819 (pStubHeapSegment = *ppPrevStubHeapSegment);
1820 (ppPrevStubHeapSegment = &pStubHeapSegment->pNext))
1821 {
1822 if (pbBaseAddress < pStubHeapSegment->pbBaseAddress)
1823 {
1824 // The list is ordered, so address is between segments
1825 pStubHeapSegment = NULL;
1826 break;
1827 }
1828
1829 if (pbBaseAddress == pStubHeapSegment->pbBaseAddress)
1830 {
1831 // Found an existing segment
1832 break;
1833 }
1834 }
1835
1836 if (!pStubHeapSegment)
1837 {
1838 //
1839 // RtlInstallFunctionTableCallback will only accept a ULONG for the
1840 // region size. We've already checked above that the RUNTIME_FUNCTION
1841 // offsets will work relative to pbBaseAddress.
1842 //
1843
1844 SIZE_T cbSegment = findBlockArgs.cbBlockSize;
1845
1846 if (cbSegment > MaxSegmentSize)
1847 cbSegment = MaxSegmentSize;
1848
1849 NewHolder<StubUnwindInfoHeapSegment> pNewStubHeapSegment = new StubUnwindInfoHeapSegment();
1850
1851
1852 pNewStubHeapSegment->pbBaseAddress = pbBaseAddress;
1853 pNewStubHeapSegment->cbSegment = cbSegment;
1854 pNewStubHeapSegment->pUnwindHeaderList = NULL;
1855#ifdef _TARGET_AMD64_
1856 pNewStubHeapSegment->pUnwindInfoTable = NULL;
1857#endif
1858
1859 // Insert the new stub into list
1860 pNewStubHeapSegment->pNext = *ppPrevStubHeapSegment;
1861 *ppPrevStubHeapSegment = pNewStubHeapSegment;
1862 pNewStubHeapSegment.SuppressRelease();
1863
1864 // Use new segment for the stub
1865 pStubHeapSegment = pNewStubHeapSegment;
1866
1867 InstallEEFunctionTable(
1868 pNewStubHeapSegment,
1869 pbBaseAddress,
1870 (ULONG)cbSegment,
1871 &FindStubFunctionEntry,
1872 pNewStubHeapSegment,
1873 DYNFNTABLE_STUB);
1874 }
1875
1876 //
1877 // Link the new stub into the segment.
1878 //
1879
1880 pHeader->pNext = pStubHeapSegment->pUnwindHeaderList;
1881 pStubHeapSegment->pUnwindHeaderList = pHeader;
1882
1883#ifdef _TARGET_AMD64_
1884 // Publish Unwind info to ETW stack crawler
1885 UnwindInfoTable::AddToUnwindInfoTable(
1886 &pStubHeapSegment->pUnwindInfoTable, pCurFunction,
1887 (TADDR) pStubHeapSegment->pbBaseAddress,
1888 (TADDR) pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment);
1889#endif
1890
1891#ifdef _DEBUG
1892 _ASSERTE(pHeader->IsRegistered());
1893 _ASSERTE( &pHeader->FunctionEntry
1894 == FindStubFunctionEntry((ULONG64)pCode, EncodeDynamicFunctionTableContext(pStubHeapSegment, DYNFNTABLE_STUB)));
1895#endif
1896
1897 return true;
1898}
1899#endif // STUBLINKER_GENERATES_UNWIND_INFO
1900
1901#ifdef _TARGET_ARM_
1902void StubLinker::DescribeProlog(UINT cCalleeSavedRegs, UINT cbStackFrame, BOOL fPushArgRegs)
1903{
1904 m_fProlog = TRUE;
1905 m_cCalleeSavedRegs = cCalleeSavedRegs;
1906 m_cbStackFrame = cbStackFrame;
1907 m_fPushArgRegs = fPushArgRegs;
1908}
1909#elif defined(_TARGET_ARM64_)
1910void StubLinker::DescribeProlog(UINT cIntRegArgs, UINT cVecRegArgs, UINT cCalleeSavedRegs, UINT cbStackSpace)
1911{
1912 m_fProlog = TRUE;
1913 m_cIntRegArgs = cIntRegArgs;
1914 m_cVecRegArgs = cVecRegArgs;
1915 m_cCalleeSavedRegs = cCalleeSavedRegs;
1916 m_cbStackSpace = cbStackSpace;
1917}
1918
1919UINT StubLinker::GetSavedRegArgsOffset()
1920{
1921 _ASSERTE(m_fProlog);
1922 // This is the offset from SP
1923 // We're assuming that the stublinker will push the arg registers to the bottom of the stack frame
1924 return m_cbStackSpace + (2+ m_cCalleeSavedRegs)*sizeof(void*); // 2 is for FP and LR
1925}
1926
1927UINT StubLinker::GetStackFrameSize()
1928{
1929 _ASSERTE(m_fProlog);
1930 return m_cbStackSpace + (2 + m_cCalleeSavedRegs + m_cIntRegArgs + m_cVecRegArgs)*sizeof(void*);
1931}
1932
1933
1934#endif // ifdef _TARGET_ARM_, elif defined(_TARGET_ARM64_)
1935
1936#endif // #ifndef DACCESS_COMPILE
1937
1938#ifndef DACCESS_COMPILE
1939
1940//-------------------------------------------------------------------
1941// Inc the refcount.
1942//-------------------------------------------------------------------
1943VOID Stub::IncRef()
1944{
1945 CONTRACTL
1946 {
1947 NOTHROW;
1948 GC_NOTRIGGER;
1949 }
1950 CONTRACTL_END;
1951
1952 _ASSERTE(m_signature == kUsedStub);
1953 FastInterlockIncrement((LONG*)&m_refcount);
1954}
1955
1956//-------------------------------------------------------------------
1957// Dec the refcount.
1958//-------------------------------------------------------------------
1959BOOL Stub::DecRef()
1960{
1961 CONTRACTL
1962 {
1963 NOTHROW;
1964 GC_TRIGGERS;
1965 }
1966 CONTRACTL_END;
1967
1968 _ASSERTE(m_signature == kUsedStub);
1969 int count = FastInterlockDecrement((LONG*)&m_refcount);
1970 if (count <= 0) {
1971 if(m_patchOffset & INTERCEPT_BIT)
1972 {
1973 ((InterceptStub*)this)->ReleaseInterceptedStub();
1974 }
1975
1976 DeleteStub();
1977 return TRUE;
1978 }
1979 return FALSE;
1980}
1981
1982VOID Stub::DeleteStub()
1983{
1984 CONTRACTL
1985 {
1986 NOTHROW;
1987 GC_TRIGGERS;
1988 }
1989 CONTRACTL_END;
1990
1991 COUNTER_ONLY(GetPerfCounters().m_Interop.cStubs--);
1992
1993#ifdef STUBLINKER_GENERATES_UNWIND_INFO
1994 if (HasUnwindInfo())
1995 {
1996 StubUnwindInfoHeader *pHeader = GetUnwindInfoHeader();
1997
1998 //
1999 // Check if the stub has been linked into a StubUnwindInfoHeapSegment.
2000 //
2001 if (pHeader->IsRegistered())
2002 {
2003 CrstHolder crst(&g_StubUnwindInfoHeapSegmentsCrst);
2004
2005 //
2006 // Find the segment containing the stub.
2007 //
2008 StubUnwindInfoHeapSegment **ppPrevSegment = &g_StubHeapSegments;
2009 StubUnwindInfoHeapSegment *pSegment = *ppPrevSegment;
2010
2011 if (pSegment)
2012 {
2013 PBYTE pbCode = (PBYTE)GetEntryPointInternal();
2014#ifdef _TARGET_AMD64_
2015 UnwindInfoTable::RemoveFromUnwindInfoTable(&pSegment->pUnwindInfoTable,
2016 (TADDR) pSegment->pbBaseAddress, (TADDR) pbCode);
2017#endif
2018 for (StubUnwindInfoHeapSegment *pNextSegment = pSegment->pNext;
2019 pNextSegment;
2020 ppPrevSegment = &pSegment->pNext, pSegment = pNextSegment, pNextSegment = pSegment->pNext)
2021 {
2022 // The segments are sorted by pbBaseAddress.
2023 if (pbCode < pNextSegment->pbBaseAddress)
2024 break;
2025 }
2026 }
2027
2028 // The stub was marked as registered, so a segment should exist.
2029 _ASSERTE(pSegment);
2030
2031 if (pSegment)
2032 {
2033
2034 //
2035 // Find this stub's location in the segment's list.
2036 //
2037 StubUnwindInfoHeader *pCurHeader;
2038 StubUnwindInfoHeader **ppPrevHeaderList;
2039 for (ppPrevHeaderList = &pSegment->pUnwindHeaderList;
2040 (pCurHeader = *ppPrevHeaderList);
2041 (ppPrevHeaderList = &pCurHeader->pNext))
2042 {
2043 if (pHeader == pCurHeader)
2044 break;
2045 }
2046
2047 // The stub was marked as registered, so we should find it in the segment's list.
2048 _ASSERTE(pCurHeader);
2049
2050 if (pCurHeader)
2051 {
2052 //
2053 // Remove the stub from the segment's list.
2054 //
2055 *ppPrevHeaderList = pHeader->pNext;
2056
2057 //
2058 // If the segment's list is now empty, delete the segment.
2059 //
2060 if (!pSegment->pUnwindHeaderList)
2061 {
2062 DeleteEEFunctionTable(pSegment);
2063#ifdef _TARGET_AMD64_
2064 if (pSegment->pUnwindInfoTable != 0)
2065 delete pSegment->pUnwindInfoTable;
2066#endif
2067 *ppPrevSegment = pSegment->pNext;
2068 delete pSegment;
2069 }
2070 }
2071 }
2072 }
2073 }
2074#endif
2075
2076 // a size of 0 is a signal to Nirvana to flush the entire cache
2077 //FlushInstructionCache(GetCurrentProcess(),0,0);
2078
2079 if ((m_patchOffset & LOADER_HEAP_BIT) == 0)
2080 {
2081#ifdef _DEBUG
2082 m_signature = kFreedStub;
2083 FillMemory(this+1, m_numCodeBytes, 0xcc);
2084#endif
2085
2086#ifndef FEATURE_PAL
2087 DeleteExecutable((BYTE*)GetAllocationBase());
2088#else
2089 delete [] (BYTE*)GetAllocationBase();
2090#endif
2091 }
2092}
2093
2094TADDR Stub::GetAllocationBase()
2095{
2096 CONTRACTL
2097 {
2098 NOTHROW;
2099 GC_NOTRIGGER;
2100 FORBID_FAULT;
2101 }
2102 CONTRACTL_END
2103
2104 TADDR info = dac_cast<TADDR>(this);
2105 SIZE_T cbPrefix = 0;
2106
2107 if (IsIntercept())
2108 {
2109 cbPrefix += 2 * sizeof(TADDR);
2110 }
2111
2112#ifdef STUBLINKER_GENERATES_UNWIND_INFO
2113 if (HasUnwindInfo())
2114 {
2115 StubUnwindInfoHeaderSuffix *pSuffix =
2116 PTR_StubUnwindInfoHeaderSuffix(info - cbPrefix -
2117 sizeof(*pSuffix));
2118
2119 cbPrefix += StubUnwindInfoHeader::ComputeSize(pSuffix->nUnwindInfoSize);
2120 }
2121#endif // STUBLINKER_GENERATES_UNWIND_INFO
2122
2123 if (!HasExternalEntryPoint())
2124 {
2125 cbPrefix = ALIGN_UP(cbPrefix + sizeof(Stub), CODE_SIZE_ALIGN) - sizeof(Stub);
2126 }
2127
2128 return info - cbPrefix;
2129}
2130
2131Stub* Stub::NewStub(PTR_VOID pCode, DWORD flags)
2132{
2133 CONTRACTL
2134 {
2135 THROWS;
2136 GC_NOTRIGGER;
2137 }
2138 CONTRACTL_END;
2139
2140 Stub* pStub = NewStub(NULL, 0, flags | NEWSTUB_FL_EXTERNAL);
2141 _ASSERTE(pStub->HasExternalEntryPoint());
2142
2143 *(PTR_VOID *)(pStub + 1) = pCode;
2144
2145 return pStub;
2146}
2147
2148//-------------------------------------------------------------------
2149// Stub allocation done here.
2150//-------------------------------------------------------------------
2151/*static*/ Stub* Stub::NewStub(
2152 LoaderHeap *pHeap,
2153 UINT numCodeBytes,
2154 DWORD flags
2155#ifdef STUBLINKER_GENERATES_UNWIND_INFO
2156 , UINT nUnwindInfoSize
2157#endif
2158 )
2159{
2160 CONTRACTL
2161 {
2162 THROWS;
2163 GC_NOTRIGGER;
2164 }
2165 CONTRACTL_END;
2166
2167#ifdef STUBLINKER_GENERATES_UNWIND_INFO
2168 _ASSERTE(!nUnwindInfoSize || !pHeap || pHeap->m_fPermitStubsWithUnwindInfo);
2169#endif // STUBLINKER_GENERATES_UNWIND_INFO
2170
2171 COUNTER_ONLY(GetPerfCounters().m_Interop.cStubs++);
2172
2173 S_SIZE_T size = S_SIZE_T(sizeof(Stub));
2174
2175 if (flags & NEWSTUB_FL_INTERCEPT)
2176 {
2177 size += sizeof(Stub *) + sizeof(void*);
2178 }
2179
2180#ifdef STUBLINKER_GENERATES_UNWIND_INFO
2181 if (nUnwindInfoSize != 0)
2182 {
2183 size += StubUnwindInfoHeader::ComputeSize(nUnwindInfoSize);
2184 }
2185#endif
2186
2187 if (flags & NEWSTUB_FL_EXTERNAL)
2188 {
2189 _ASSERTE(numCodeBytes == 0);
2190 size += sizeof(PTR_PCODE);
2191 }
2192 else
2193 {
2194 size.AlignUp(CODE_SIZE_ALIGN);
2195 size += numCodeBytes;
2196 }
2197
2198 if (size.IsOverflow())
2199 COMPlusThrowArithmetic();
2200
2201 size_t totalSize = size.Value();
2202
2203 BYTE *pBlock;
2204 if (pHeap == NULL)
2205 {
2206#ifndef FEATURE_PAL
2207 pBlock = new (executable) BYTE[totalSize];
2208#else
2209 pBlock = new BYTE[totalSize];
2210#endif
2211 }
2212 else
2213 {
2214 pBlock = (BYTE*)(void*) pHeap->AllocAlignedMem(totalSize, CODE_SIZE_ALIGN);
2215 flags |= NEWSTUB_FL_LOADERHEAP;
2216 }
2217
2218 // Make sure that the payload of the stub is aligned
2219 Stub* pStub = (Stub*)((pBlock + totalSize) -
2220 (sizeof(Stub) + ((flags & NEWSTUB_FL_EXTERNAL) ? sizeof(PTR_PCODE) : numCodeBytes)));
2221
2222 pStub->SetupStub(
2223 numCodeBytes,
2224 flags
2225#ifdef STUBLINKER_GENERATES_UNWIND_INFO
2226 , nUnwindInfoSize
2227#endif
2228 );
2229
2230 _ASSERTE((BYTE *)pStub->GetAllocationBase() == pBlock);
2231
2232 return pStub;
2233}
2234
2235void Stub::SetupStub(int numCodeBytes, DWORD flags
2236#ifdef STUBLINKER_GENERATES_UNWIND_INFO
2237 , UINT nUnwindInfoSize
2238#endif
2239 )
2240{
2241 CONTRACTL
2242 {
2243 NOTHROW;
2244 GC_NOTRIGGER;
2245 }
2246 CONTRACTL_END;
2247
2248#ifdef _DEBUG
2249 m_signature = kUsedStub;
2250#else
2251#ifdef _WIN64
2252 m_pad_code_bytes = 0;
2253#endif
2254#endif
2255
2256 m_numCodeBytes = numCodeBytes;
2257
2258 m_refcount = 1;
2259 m_patchOffset = 0;
2260
2261 if((flags & NEWSTUB_FL_INTERCEPT) != 0)
2262 m_patchOffset |= INTERCEPT_BIT;
2263 if((flags & NEWSTUB_FL_LOADERHEAP) != 0)
2264 m_patchOffset |= LOADER_HEAP_BIT;
2265 if((flags & NEWSTUB_FL_MULTICAST) != 0)
2266 m_patchOffset |= MULTICAST_DELEGATE_BIT;
2267 if ((flags & NEWSTUB_FL_EXTERNAL) != 0)
2268 m_patchOffset |= EXTERNAL_ENTRY_BIT;
2269
2270#ifdef STUBLINKER_GENERATES_UNWIND_INFO
2271 if (nUnwindInfoSize)
2272 {
2273 m_patchOffset |= UNWIND_INFO_BIT;
2274
2275 StubUnwindInfoHeaderSuffix * pSuffix = GetUnwindInfoHeaderSuffix();
2276 pSuffix->nUnwindInfoSize = (BYTE)nUnwindInfoSize;
2277
2278 StubUnwindInfoHeader * pHeader = GetUnwindInfoHeader();
2279 pHeader->Init();
2280 }
2281#endif
2282}
2283
2284//-------------------------------------------------------------------
2285// One-time init
2286//-------------------------------------------------------------------
2287/*static*/ void Stub::Init()
2288{
2289 CONTRACTL
2290 {
2291 THROWS;
2292 GC_NOTRIGGER;
2293 }
2294 CONTRACTL_END;
2295
2296#ifdef STUBLINKER_GENERATES_UNWIND_INFO
2297 g_StubUnwindInfoHeapSegmentsCrst.Init(CrstStubUnwindInfoHeapSegments);
2298#endif
2299}
2300
2301/*static*/ Stub* InterceptStub::NewInterceptedStub(void* pCode,
2302 Stub* interceptee,
2303 void* pRealAddr)
2304{
2305 CONTRACTL
2306 {
2307 THROWS;
2308 GC_NOTRIGGER;
2309 }
2310 CONTRACTL_END;
2311
2312 InterceptStub *pStub = (InterceptStub *) NewStub(pCode, NEWSTUB_FL_INTERCEPT);
2313
2314 *pStub->GetInterceptedStub() = interceptee;
2315 *pStub->GetRealAddr() = (TADDR)pRealAddr;
2316
2317 LOG((LF_CORDB, LL_INFO10000, "For Stub 0x%x, set intercepted stub to 0x%x\n",
2318 pStub, interceptee));
2319
2320 return pStub;
2321}
2322
2323//-------------------------------------------------------------------
2324// Stub allocation done here.
2325//-------------------------------------------------------------------
2326/*static*/ Stub* InterceptStub::NewInterceptedStub(LoaderHeap *pHeap,
2327 UINT numCodeBytes,
2328 Stub* interceptee,
2329 void* pRealAddr
2330#ifdef STUBLINKER_GENERATES_UNWIND_INFO
2331 , UINT nUnwindInfoSize
2332#endif
2333 )
2334{
2335 CONTRACTL
2336 {
2337 THROWS;
2338 GC_NOTRIGGER;
2339 }
2340 CONTRACTL_END;
2341
2342 InterceptStub *pStub = (InterceptStub *) NewStub(
2343 pHeap,
2344 numCodeBytes,
2345 NEWSTUB_FL_INTERCEPT
2346#ifdef STUBLINKER_GENERATES_UNWIND_INFO
2347 , nUnwindInfoSize
2348#endif
2349 );
2350
2351 *pStub->GetInterceptedStub() = interceptee;
2352 *pStub->GetRealAddr() = (TADDR)pRealAddr;
2353
2354 LOG((LF_CORDB, LL_INFO10000, "For Stub 0x%x, set intercepted stub to 0x%x\n",
2355 pStub, interceptee));
2356
2357 return pStub;
2358}
2359
2360//-------------------------------------------------------------------
2361// Release the stub that is owned by this stub
2362//-------------------------------------------------------------------
2363void InterceptStub::ReleaseInterceptedStub()
2364{
2365 CONTRACTL
2366 {
2367 NOTHROW;
2368 GC_TRIGGERS;
2369 }
2370 CONTRACTL_END;
2371
2372 Stub** intercepted = GetInterceptedStub();
2373 // If we own the stub then decrement it. It can be null if the
2374 // linked stub is actually a jitted stub.
2375 if(*intercepted)
2376 (*intercepted)->DecRef();
2377}
2378
2379//-------------------------------------------------------------------
2380// Constructor
2381//-------------------------------------------------------------------
2382ArgBasedStubCache::ArgBasedStubCache(UINT fixedSlots)
2383 : m_numFixedSlots(fixedSlots),
2384 m_crst(CrstArgBasedStubCache)
2385{
2386 WRAPPER_NO_CONTRACT;
2387
2388 m_aStub = new Stub * [m_numFixedSlots];
2389 _ASSERTE(m_aStub != NULL);
2390
2391 for (unsigned __int32 i = 0; i < m_numFixedSlots; i++) {
2392 m_aStub[i] = NULL;
2393 }
2394 m_pSlotEntries = NULL;
2395}
2396
2397
2398//-------------------------------------------------------------------
2399// Destructor
2400//-------------------------------------------------------------------
2401ArgBasedStubCache::~ArgBasedStubCache()
2402{
2403 CONTRACTL
2404 {
2405 NOTHROW;
2406 GC_NOTRIGGER;
2407 }
2408 CONTRACTL_END;
2409
2410 for (unsigned __int32 i = 0; i < m_numFixedSlots; i++) {
2411 Stub *pStub = m_aStub[i];
2412 if (pStub) {
2413 pStub->DecRef();
2414 }
2415 }
2416 // a size of 0 is a signal to Nirvana to flush the entire cache
2417 // not sure if this is needed, but should have no CLR perf impact since size is 0.
2418 FlushInstructionCache(GetCurrentProcess(),0,0);
2419
2420 SlotEntry **ppSlotEntry = &m_pSlotEntries;
2421 SlotEntry *pCur;
2422 while (NULL != (pCur = *ppSlotEntry)) {
2423 Stub *pStub = pCur->m_pStub;
2424 pStub->DecRef();
2425 *ppSlotEntry = pCur->m_pNext;
2426 delete pCur;
2427 }
2428 delete [] m_aStub;
2429}
2430
2431
2432
2433//-------------------------------------------------------------------
2434// Queries/retrieves a previously cached stub.
2435//
2436// If there is no stub corresponding to the given index,
2437// this function returns NULL.
2438//
2439// Otherwise, this function returns the stub after
2440// incrementing its refcount.
2441//-------------------------------------------------------------------
2442Stub *ArgBasedStubCache::GetStub(UINT_PTR key)
2443{
2444 CONTRACTL
2445 {
2446 NOTHROW;
2447 GC_TRIGGERS;
2448 MODE_ANY;
2449 }
2450 CONTRACTL_END;
2451
2452 Stub *pStub;
2453
2454 CrstHolder ch(&m_crst);
2455
2456 if (key < m_numFixedSlots) {
2457 pStub = m_aStub[key];
2458 } else {
2459 pStub = NULL;
2460 for (SlotEntry *pSlotEntry = m_pSlotEntries;
2461 pSlotEntry != NULL;
2462 pSlotEntry = pSlotEntry->m_pNext) {
2463
2464 if (pSlotEntry->m_key == key) {
2465 pStub = pSlotEntry->m_pStub;
2466 break;
2467 }
2468 }
2469 }
2470 if (pStub) {
2471 pStub->IncRef();
2472 }
2473 return pStub;
2474}
2475
2476
2477//-------------------------------------------------------------------
2478// Tries to associate a stub with a given index. This association
2479// may fail because some other thread may have beaten you to it
2480// just before you make the call.
2481//
2482// If the association succeeds, "pStub" is installed, and it is
2483// returned back to the caller. The stub's refcount is incremented
2484// twice (one to reflect the cache's ownership, and one to reflect
2485// the caller's ownership.)
2486//
2487// If the association fails because another stub is already installed,
2488// then the incumbent stub is returned to the caller and its refcount
2489// is incremented once (to reflect the caller's ownership.)
2490//
2491// If the association fails due to lack of memory, NULL is returned
2492// and no one's refcount changes.
2493//
2494// This routine is intended to be called like this:
2495//
2496// Stub *pCandidate = MakeStub(); // after this, pCandidate's rc is 1
2497// Stub *pWinner = cache->SetStub(idx, pCandidate);
2498// pCandidate->DecRef();
2499// pCandidate = 0xcccccccc; // must not use pCandidate again.
2500// if (!pWinner) {
2501// OutOfMemoryError;
2502// }
2503// // If the association succeeded, pWinner's refcount is 2 and so
2504// // is pCandidate's (because it *is* pWinner);.
2505// // If the association failed, pWinner's refcount is still 2
2506// // and pCandidate got destroyed by the last DecRef().
2507// // Either way, pWinner is now the official index holder. It
2508// // has a refcount of 2 (one for the cache's ownership, and
2509// // one belonging to this code.)
2510//-------------------------------------------------------------------
2511Stub* ArgBasedStubCache::AttemptToSetStub(UINT_PTR key, Stub *pStub)
2512{
2513 CONTRACTL
2514 {
2515 THROWS;
2516 GC_TRIGGERS;
2517 MODE_ANY;
2518 }
2519 CONTRACTL_END;
2520
2521 CrstHolder ch(&m_crst);
2522
2523 if (key < m_numFixedSlots) {
2524 if (m_aStub[key]) {
2525 pStub = m_aStub[key];
2526 } else {
2527 m_aStub[key] = pStub;
2528 pStub->IncRef(); // IncRef on cache's behalf
2529 }
2530 } else {
2531 SlotEntry *pSlotEntry;
2532 for (pSlotEntry = m_pSlotEntries;
2533 pSlotEntry != NULL;
2534 pSlotEntry = pSlotEntry->m_pNext) {
2535
2536 if (pSlotEntry->m_key == key) {
2537 pStub = pSlotEntry->m_pStub;
2538 break;
2539 }
2540 }
2541 if (!pSlotEntry) {
2542 pSlotEntry = new SlotEntry;
2543 pSlotEntry->m_pStub = pStub;
2544 pStub->IncRef(); // IncRef on cache's behalf
2545 pSlotEntry->m_key = key;
2546 pSlotEntry->m_pNext = m_pSlotEntries;
2547 m_pSlotEntries = pSlotEntry;
2548 }
2549 }
2550 if (pStub) {
2551 pStub->IncRef(); // IncRef because we're returning it to caller
2552 }
2553 return pStub;
2554}
2555
2556
2557
2558#ifdef _DEBUG
2559// Diagnostic dump
2560VOID ArgBasedStubCache::Dump()
2561{
2562 CONTRACTL
2563 {
2564 NOTHROW;
2565 GC_NOTRIGGER;
2566 MODE_ANY;
2567 }
2568 CONTRACTL_END;
2569
2570 printf("--------------------------------------------------------------\n");
2571 printf("ArgBasedStubCache dump (%lu fixed entries):\n", m_numFixedSlots);
2572 for (UINT32 i = 0; i < m_numFixedSlots; i++) {
2573
2574 printf(" Fixed slot %lu: ", (ULONG)i);
2575 Stub *pStub = m_aStub[i];
2576 if (!pStub) {
2577 printf("empty\n");
2578 } else {
2579 printf("%lxh - refcount is %lu\n",
2580 (size_t)(pStub->GetEntryPoint()),
2581 (ULONG)( *( ( ((ULONG*)(pStub->GetEntryPoint())) - 1))));
2582 }
2583 }
2584
2585 for (SlotEntry *pSlotEntry = m_pSlotEntries;
2586 pSlotEntry != NULL;
2587 pSlotEntry = pSlotEntry->m_pNext) {
2588
2589 printf(" Dyna. slot %lu: ", (ULONG)(pSlotEntry->m_key));
2590 Stub *pStub = pSlotEntry->m_pStub;
2591 printf("%lxh - refcount is %lu\n",
2592 (size_t)(pStub->GetEntryPoint()),
2593 (ULONG)( *( ( ((ULONG*)(pStub->GetEntryPoint())) - 1))));
2594
2595 }
2596
2597
2598 printf("--------------------------------------------------------------\n");
2599}
2600#endif
2601
2602#endif // #ifndef DACCESS_COMPILE
2603
2604