1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4// codeman.cpp - a managment class for handling multiple code managers
5//
6
7//
8
9#include "common.h"
10#include "jitinterface.h"
11#include "corjit.h"
12#include "jithost.h"
13#include "eetwain.h"
14#include "eeconfig.h"
15#include "excep.h"
16#include "appdomain.hpp"
17#include "codeman.h"
18#include "nibblemapmacros.h"
19#include "generics.h"
20#include "dynamicmethod.h"
21#include "eemessagebox.h"
22#include "eventtrace.h"
23#include "threadsuspend.h"
24
25#include "exceptionhandling.h"
26
27#include "rtlfunctions.h"
28
29#include "jitperf.h"
30#include "shimload.h"
31#include "debuginfostore.h"
32#include "strsafe.h"
33
34#include "configuration.h"
35
36#ifdef _WIN64
37#define CHECK_DUPLICATED_STRUCT_LAYOUTS
38#include "../debug/daccess/fntableaccess.h"
39#endif // _WIN64
40
41#ifdef FEATURE_PERFMAP
42#include "perfmap.h"
43#endif
44
45// Default number of jump stubs in a jump stub block
46#define DEFAULT_JUMPSTUBS_PER_BLOCK 32
47
48SPTR_IMPL(EECodeManager, ExecutionManager, m_pDefaultCodeMan);
49
50SPTR_IMPL(EEJitManager, ExecutionManager, m_pEEJitManager);
51#ifdef FEATURE_PREJIT
52SPTR_IMPL(NativeImageJitManager, ExecutionManager, m_pNativeImageJitManager);
53#endif
54#ifdef FEATURE_READYTORUN
55SPTR_IMPL(ReadyToRunJitManager, ExecutionManager, m_pReadyToRunJitManager);
56#endif
57
58#ifndef DACCESS_COMPILE
59Volatile<RangeSection *> ExecutionManager::m_CodeRangeList = NULL;
60Volatile<LONG> ExecutionManager::m_dwReaderCount = 0;
61Volatile<LONG> ExecutionManager::m_dwWriterLock = 0;
62#else
63SPTR_IMPL(RangeSection, ExecutionManager, m_CodeRangeList);
64SVAL_IMPL(LONG, ExecutionManager, m_dwReaderCount);
65SVAL_IMPL(LONG, ExecutionManager, m_dwWriterLock);
66#endif
67
68#ifndef DACCESS_COMPILE
69
70CrstStatic ExecutionManager::m_JumpStubCrst;
71CrstStatic ExecutionManager::m_RangeCrst;
72
73unsigned ExecutionManager::m_normal_JumpStubLookup;
74unsigned ExecutionManager::m_normal_JumpStubUnique;
75unsigned ExecutionManager::m_normal_JumpStubBlockAllocCount;
76unsigned ExecutionManager::m_normal_JumpStubBlockFullCount;
77
78unsigned ExecutionManager::m_LCG_JumpStubLookup;
79unsigned ExecutionManager::m_LCG_JumpStubUnique;
80unsigned ExecutionManager::m_LCG_JumpStubBlockAllocCount;
81unsigned ExecutionManager::m_LCG_JumpStubBlockFullCount;
82
83#endif // DACCESS_COMPILE
84
85#if defined(_TARGET_AMD64_) && !defined(DACCESS_COMPILE) // We don't do this on ARM just amd64
86
87// Support for new style unwind information (to allow OS to stack crawl JIT compiled code).
88
89typedef NTSTATUS (WINAPI* RtlAddGrowableFunctionTableFnPtr) (
90 PVOID *DynamicTable, PRUNTIME_FUNCTION FunctionTable, ULONG EntryCount,
91 ULONG MaximumEntryCount, ULONG_PTR rangeStart, ULONG_PTR rangeEnd);
92typedef VOID (WINAPI* RtlGrowFunctionTableFnPtr) (PVOID DynamicTable, ULONG NewEntryCount);
93typedef VOID (WINAPI* RtlDeleteGrowableFunctionTableFnPtr) (PVOID DynamicTable);
94
95// OS entry points (only exist on Win8 and above)
96static RtlAddGrowableFunctionTableFnPtr pRtlAddGrowableFunctionTable;
97static RtlGrowFunctionTableFnPtr pRtlGrowFunctionTable;
98static RtlDeleteGrowableFunctionTableFnPtr pRtlDeleteGrowableFunctionTable;
99static Volatile<bool> RtlUnwindFtnsInited;
100
101// statics for UnwindInfoTable
102Crst* UnwindInfoTable::s_pUnwindInfoTableLock = NULL;
103Volatile<bool> UnwindInfoTable::s_publishingActive = false;
104
105
106#if _DEBUG
107// Fake functions on Win7 checked build to excercize the code paths, they are no-ops
108NTSTATUS WINAPI FakeRtlAddGrowableFunctionTable (
109 PVOID *DynamicTable, PT_RUNTIME_FUNCTION FunctionTable, ULONG EntryCount,
110 ULONG MaximumEntryCount, ULONG_PTR rangeStart, ULONG_PTR rangeEnd) { *DynamicTable = (PVOID) 1; return 0; }
111VOID WINAPI FakeRtlGrowFunctionTable (PVOID DynamicTable, ULONG NewEntryCount) { }
112VOID WINAPI FakeRtlDeleteGrowableFunctionTable (PVOID DynamicTable) {}
113#endif
114
115/****************************************************************************/
116// initialize the entry points for new win8 unwind info publishing functions.
117// return true if the initialize is successful (the functions exist)
118
119bool InitUnwindFtns()
120{
121 CONTRACTL {
122 NOTHROW;
123 } CONTRACTL_END;
124
125#ifndef FEATURE_PAL
126 if (!RtlUnwindFtnsInited)
127 {
128 HINSTANCE hNtdll = WszGetModuleHandle(W("ntdll.dll"));
129 if (hNtdll != NULL)
130 {
131 void* growFunctionTable = GetProcAddress(hNtdll, "RtlGrowFunctionTable");
132 void* deleteGrowableFunctionTable = GetProcAddress(hNtdll, "RtlDeleteGrowableFunctionTable");
133 void* addGrowableFunctionTable = GetProcAddress(hNtdll, "RtlAddGrowableFunctionTable");
134
135 // All or nothing AddGroableFunctionTable is last (marker)
136 if (growFunctionTable != NULL &&
137 deleteGrowableFunctionTable != NULL &&
138 addGrowableFunctionTable != NULL)
139 {
140 pRtlGrowFunctionTable = (RtlGrowFunctionTableFnPtr) growFunctionTable;
141 pRtlDeleteGrowableFunctionTable = (RtlDeleteGrowableFunctionTableFnPtr) deleteGrowableFunctionTable;
142 pRtlAddGrowableFunctionTable = (RtlAddGrowableFunctionTableFnPtr) addGrowableFunctionTable;
143 }
144 // Don't call FreeLibrary(hNtdll) because GetModuleHandle did *NOT* increment the reference count!
145 }
146 else
147 {
148#if _DEBUG
149 pRtlGrowFunctionTable = FakeRtlGrowFunctionTable;
150 pRtlDeleteGrowableFunctionTable = FakeRtlDeleteGrowableFunctionTable;
151 pRtlAddGrowableFunctionTable = FakeRtlAddGrowableFunctionTable;
152#endif
153 }
154 RtlUnwindFtnsInited = true;
155 }
156 return (pRtlAddGrowableFunctionTable != NULL);
157#else // !FEATURE_PAL
158 return false;
159#endif // !FEATURE_PAL
160}
161
162/****************************************************************************/
163UnwindInfoTable::UnwindInfoTable(ULONG_PTR rangeStart, ULONG_PTR rangeEnd, ULONG size)
164{
165 STANDARD_VM_CONTRACT;
166 _ASSERTE(s_pUnwindInfoTableLock->OwnedByCurrentThread());
167 _ASSERTE((rangeEnd - rangeStart) <= 0x7FFFFFFF);
168
169 cTableCurCount = 0;
170 cTableMaxCount = size;
171 cDeletedEntries = 0;
172 iRangeStart = rangeStart;
173 iRangeEnd = rangeEnd;
174 hHandle = NULL;
175 pTable = new T_RUNTIME_FUNCTION[cTableMaxCount];
176}
177
178/****************************************************************************/
179UnwindInfoTable::~UnwindInfoTable()
180{
181 CONTRACTL {
182 NOTHROW;
183 GC_NOTRIGGER;
184 } CONTRACTL_END;
185 _ASSERTE(s_publishingActive);
186
187 // We do this lock free to because too many places still want no-trigger. It should be OK
188 // It would be cleaner if we could take the lock (we did not have to be GC_NOTRIGGER)
189 UnRegister();
190 delete[] pTable;
191}
192
193/*****************************************************************************/
194void UnwindInfoTable::Register()
195{
196 _ASSERTE(s_pUnwindInfoTableLock->OwnedByCurrentThread());
197 EX_TRY
198 {
199 hHandle = NULL;
200 NTSTATUS ret = pRtlAddGrowableFunctionTable(&hHandle, pTable, cTableCurCount, cTableMaxCount, iRangeStart, iRangeEnd);
201 if (ret != STATUS_SUCCESS)
202 {
203 _ASSERTE(!"Failed to publish UnwindInfo (ignorable)");
204 hHandle = NULL;
205 STRESS_LOG3(LF_JIT, LL_ERROR, "UnwindInfoTable::Register ERROR %x creating table [%p, %p]\n", ret, iRangeStart, iRangeEnd);
206 }
207 else
208 {
209 STRESS_LOG3(LF_JIT, LL_INFO100, "UnwindInfoTable::Register Handle: %p [%p, %p]\n", hHandle, iRangeStart, iRangeEnd);
210 }
211 }
212 EX_CATCH
213 {
214 hHandle = NULL;
215 STRESS_LOG2(LF_JIT, LL_ERROR, "UnwindInfoTable::Register Exception while creating table [%p, %p]\n",
216 iRangeStart, iRangeEnd);
217 _ASSERTE(!"Failed to publish UnwindInfo (ignorable)");
218 }
219 EX_END_CATCH(SwallowAllExceptions)
220}
221
222/*****************************************************************************/
223void UnwindInfoTable::UnRegister()
224{
225 PVOID handle = hHandle;
226 hHandle = 0;
227 if (handle != 0)
228 {
229 STRESS_LOG3(LF_JIT, LL_INFO100, "UnwindInfoTable::UnRegister Handle: %p [%p, %p]\n", handle, iRangeStart, iRangeEnd);
230 pRtlDeleteGrowableFunctionTable(handle);
231 }
232}
233
234/*****************************************************************************/
235// Add 'data' to the linked list whose head is pointed at by 'unwindInfoPtr'
236//
237/* static */
238void UnwindInfoTable::AddToUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, PT_RUNTIME_FUNCTION data,
239 TADDR rangeStart, TADDR rangeEnd)
240{
241 CONTRACTL
242 {
243 THROWS;
244 GC_TRIGGERS;
245 }
246 CONTRACTL_END;
247 _ASSERTE(data->BeginAddress <= RUNTIME_FUNCTION__EndAddress(data, rangeStart));
248 _ASSERTE(RUNTIME_FUNCTION__EndAddress(data, rangeStart) <= (rangeEnd-rangeStart));
249 _ASSERTE(unwindInfoPtr != NULL);
250
251 if (!s_publishingActive)
252 return;
253
254 CrstHolder ch(s_pUnwindInfoTableLock);
255
256 UnwindInfoTable* unwindInfo = *unwindInfoPtr;
257 // was the original list null, If so lazy initialize.
258 if (unwindInfo == NULL)
259 {
260 // We can choose the average method size estimate dynamically based on past experience
261 // 128 is the estimated size of an average method, so we can accurately predict
262 // how many RUNTIME_FUNCTION entries are in each chunk we allocate.
263
264 ULONG size = (ULONG) ((rangeEnd - rangeStart) / 128) + 1;
265
266 // To insure the test the growing logic in debug code make the size much smaller.
267 INDEBUG(size = size / 4 + 1);
268 unwindInfo = (PTR_UnwindInfoTable)new UnwindInfoTable(rangeStart, rangeEnd, size);
269 unwindInfo->Register();
270 *unwindInfoPtr = unwindInfo;
271 }
272 _ASSERTE(unwindInfo != NULL); // If new had failed, we would have thrown OOM
273 _ASSERTE(unwindInfo->cTableCurCount <= unwindInfo->cTableMaxCount);
274 _ASSERTE(unwindInfo->iRangeStart == rangeStart);
275 _ASSERTE(unwindInfo->iRangeEnd == rangeEnd);
276
277 // Means we had a failure publishing to the OS, in this case we give up
278 if (unwindInfo->hHandle == NULL)
279 return;
280
281 // Check for the fast path: we are adding the the end of an UnwindInfoTable with space
282 if (unwindInfo->cTableCurCount < unwindInfo->cTableMaxCount)
283 {
284 if (unwindInfo->cTableCurCount == 0 ||
285 unwindInfo->pTable[unwindInfo->cTableCurCount-1].BeginAddress < data->BeginAddress)
286 {
287 // Yeah, we can simply add to the end of table and we are done!
288 unwindInfo->pTable[unwindInfo->cTableCurCount] = *data;
289 unwindInfo->cTableCurCount++;
290
291 // Add to the function table
292 pRtlGrowFunctionTable(unwindInfo->hHandle, unwindInfo->cTableCurCount);
293
294 STRESS_LOG5(LF_JIT, LL_INFO1000, "AddToUnwindTable Handle: %p [%p, %p] ADDING 0x%xp TO END, now 0x%x entries\n",
295 unwindInfo->hHandle, unwindInfo->iRangeStart, unwindInfo->iRangeEnd,
296 data->BeginAddress, unwindInfo->cTableCurCount);
297 return;
298 }
299 }
300
301 // OK we need to rellocate the table and reregister. First figure out our 'desiredSpace'
302 // We could imagine being much more efficient for 'bulk' updates, but we don't try
303 // because we assume that this is rare and we want to keep the code simple
304
305 int usedSpace = unwindInfo->cTableCurCount - unwindInfo->cDeletedEntries;
306 int desiredSpace = usedSpace * 5 / 4 + 1; // Increase by 20%
307 // Be more aggresive if we used all of our space;
308 if (usedSpace == unwindInfo->cTableMaxCount)
309 desiredSpace = usedSpace * 3 / 2 + 1; // Increase by 50%
310
311 STRESS_LOG7(LF_JIT, LL_INFO100, "AddToUnwindTable Handle: %p [%p, %p] SLOW Realloc Cnt 0x%x Max 0x%x NewMax 0x%x, Adding %x\n",
312 unwindInfo->hHandle, unwindInfo->iRangeStart, unwindInfo->iRangeEnd,
313 unwindInfo->cTableCurCount, unwindInfo->cTableMaxCount, desiredSpace, data->BeginAddress);
314
315 UnwindInfoTable* newTab = new UnwindInfoTable(unwindInfo->iRangeStart, unwindInfo->iRangeEnd, desiredSpace);
316
317 // Copy in the entries, removing deleted entries and adding the new entry wherever it belongs
318 int toIdx = 0;
319 bool inserted = false; // Have we inserted 'data' into the table
320 for(ULONG fromIdx = 0; fromIdx < unwindInfo->cTableCurCount; fromIdx++)
321 {
322 if (!inserted && data->BeginAddress < unwindInfo->pTable[fromIdx].BeginAddress)
323 {
324 STRESS_LOG1(LF_JIT, LL_INFO100, "AddToUnwindTable Inserted at MID position 0x%x\n", toIdx);
325 newTab->pTable[toIdx++] = *data;
326 inserted = true;
327 }
328 if (unwindInfo->pTable[fromIdx].UnwindData != 0) // A 'non-deleted' entry
329 newTab->pTable[toIdx++] = unwindInfo->pTable[fromIdx];
330 }
331 if (!inserted)
332 {
333 STRESS_LOG1(LF_JIT, LL_INFO100, "AddToUnwindTable Inserted at END position 0x%x\n", toIdx);
334 newTab->pTable[toIdx++] = *data;
335 }
336 newTab->cTableCurCount = toIdx;
337 STRESS_LOG2(LF_JIT, LL_INFO100, "AddToUnwindTable New size 0x%x max 0x%x\n",
338 newTab->cTableCurCount, newTab->cTableMaxCount);
339 _ASSERTE(newTab->cTableCurCount <= newTab->cTableMaxCount);
340
341 // Unregister the old table
342 *unwindInfoPtr = 0;
343 unwindInfo->UnRegister();
344
345 // Note that there is a short time when we are not publishing...
346
347 // Register the new table
348 newTab->Register();
349 *unwindInfoPtr = newTab;
350
351 delete unwindInfo;
352}
353
354/*****************************************************************************/
355/* static */ void UnwindInfoTable::RemoveFromUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, TADDR baseAddress, TADDR entryPoint)
356{
357 CONTRACTL {
358 NOTHROW;
359 GC_TRIGGERS;
360 } CONTRACTL_END;
361 _ASSERTE(unwindInfoPtr != NULL);
362
363 if (!s_publishingActive)
364 return;
365 CrstHolder ch(s_pUnwindInfoTableLock);
366
367 UnwindInfoTable* unwindInfo = *unwindInfoPtr;
368 if (unwindInfo != NULL)
369 {
370 DWORD relativeEntryPoint = (DWORD)(entryPoint - baseAddress);
371 STRESS_LOG3(LF_JIT, LL_INFO100, "RemoveFromUnwindInfoTable Removing %p BaseAddress %p rel %x\n",
372 entryPoint, baseAddress, relativeEntryPoint);
373 for(ULONG i = 0; i < unwindInfo->cTableCurCount; i++)
374 {
375 if (unwindInfo->pTable[i].BeginAddress <= relativeEntryPoint &&
376 relativeEntryPoint < RUNTIME_FUNCTION__EndAddress(&unwindInfo->pTable[i], unwindInfo->iRangeStart))
377 {
378 if (unwindInfo->pTable[i].UnwindData != 0)
379 unwindInfo->cDeletedEntries++;
380 unwindInfo->pTable[i].UnwindData = 0; // Mark the entry for deletion
381 STRESS_LOG1(LF_JIT, LL_INFO100, "RemoveFromUnwindInfoTable Removed entry 0x%x\n", i);
382 return;
383 }
384 }
385 }
386 STRESS_LOG2(LF_JIT, LL_WARNING, "RemoveFromUnwindInfoTable COULD NOT FIND %p BaseAddress %p\n",
387 entryPoint, baseAddress);
388}
389
390/****************************************************************************/
391// Publish the stack unwind data 'data' which is relative 'baseAddress'
392// to the operating system in a way ETW stack tracing can use.
393
394/* static */ void UnwindInfoTable::PublishUnwindInfoForMethod(TADDR baseAddress, PT_RUNTIME_FUNCTION unwindInfo, int unwindInfoCount)
395{
396 STANDARD_VM_CONTRACT;
397 if (!s_publishingActive)
398 return;
399
400 TADDR entry = baseAddress + unwindInfo->BeginAddress;
401 RangeSection * pRS = ExecutionManager::FindCodeRange(entry, ExecutionManager::GetScanFlags());
402 _ASSERTE(pRS != NULL);
403 if (pRS != NULL)
404 {
405 for(int i = 0; i < unwindInfoCount; i++)
406 AddToUnwindInfoTable(&pRS->pUnwindInfoTable, &unwindInfo[i], pRS->LowAddress, pRS->HighAddress);
407 }
408}
409
410/*****************************************************************************/
411/* static */ void UnwindInfoTable::UnpublishUnwindInfoForMethod(TADDR entryPoint)
412{
413 CONTRACTL {
414 NOTHROW;
415 GC_TRIGGERS;
416 } CONTRACTL_END;
417 if (!s_publishingActive)
418 return;
419
420 RangeSection * pRS = ExecutionManager::FindCodeRange(entryPoint, ExecutionManager::GetScanFlags());
421 _ASSERTE(pRS != NULL);
422 if (pRS != NULL)
423 {
424 _ASSERTE(pRS->pjit->GetCodeType() == (miManaged | miIL));
425 if (pRS->pjit->GetCodeType() == (miManaged | miIL))
426 {
427 // This cast is justified because only EEJitManager's have the code type above.
428 EEJitManager* pJitMgr = (EEJitManager*)(pRS->pjit);
429 CodeHeader * pHeader = pJitMgr->GetCodeHeaderFromStartAddress(entryPoint);
430 for(ULONG i = 0; i < pHeader->GetNumberOfUnwindInfos(); i++)
431 RemoveFromUnwindInfoTable(&pRS->pUnwindInfoTable, pRS->LowAddress, pRS->LowAddress + pHeader->GetUnwindInfo(i)->BeginAddress);
432 }
433 }
434}
435
436#ifdef STUBLINKER_GENERATES_UNWIND_INFO
437extern StubUnwindInfoHeapSegment *g_StubHeapSegments;
438#endif // STUBLINKER_GENERATES_UNWIND_INFO
439
440extern CrstStatic g_StubUnwindInfoHeapSegmentsCrst;
441/*****************************************************************************/
442// Publish all existing JIT compiled methods by iterating through the code heap
443// Note that because we need to keep the entries in order we have to hold
444// s_pUnwindInfoTableLock so that all entries get inserted in the correct order.
445// (we rely on heapIterator walking the methods in a heap section in order).
446
447/* static */ void UnwindInfoTable::PublishUnwindInfoForExistingMethods()
448{
449 STANDARD_VM_CONTRACT;
450 {
451 // CodeHeapIterator holds the m_CodeHeapCritSec, which insures code heaps don't get deallocated while being walked
452 EEJitManager::CodeHeapIterator heapIterator(NULL);
453
454 // Currently m_CodeHeapCritSec is given the CRST_UNSAFE_ANYMODE flag which allows it to be taken in a GC_NOTRIGGER
455 // region but also disallows GC_TRIGGERS. We need GC_TRIGGERS because we take another lock. Ideally we would
456 // fix m_CodeHeapCritSec to not have the CRST_UNSAFE_ANYMODE flag, but I currently reached my threshold for fixing
457 // contracts.
458 CONTRACT_VIOLATION(GCViolation);
459
460 while(heapIterator.Next())
461 {
462 MethodDesc *pMD = heapIterator.GetMethod();
463 if(pMD)
464 {
465 PCODE methodEntry =(PCODE) heapIterator.GetMethodCode();
466 RangeSection * pRS = ExecutionManager::FindCodeRange(methodEntry, ExecutionManager::GetScanFlags());
467 _ASSERTE(pRS != NULL);
468 _ASSERTE(pRS->pjit->GetCodeType() == (miManaged | miIL));
469 if (pRS != NULL && pRS->pjit->GetCodeType() == (miManaged | miIL))
470 {
471 // This cast is justified because only EEJitManager's have the code type above.
472 EEJitManager* pJitMgr = (EEJitManager*)(pRS->pjit);
473 CodeHeader * pHeader = pJitMgr->GetCodeHeaderFromStartAddress(methodEntry);
474 int unwindInfoCount = pHeader->GetNumberOfUnwindInfos();
475 for(int i = 0; i < unwindInfoCount; i++)
476 AddToUnwindInfoTable(&pRS->pUnwindInfoTable, pHeader->GetUnwindInfo(i), pRS->LowAddress, pRS->HighAddress);
477 }
478 }
479 }
480 }
481
482#ifdef STUBLINKER_GENERATES_UNWIND_INFO
483 // Enumerate all existing stubs
484 CrstHolder crst(&g_StubUnwindInfoHeapSegmentsCrst);
485 for (StubUnwindInfoHeapSegment* pStubHeapSegment = g_StubHeapSegments; pStubHeapSegment; pStubHeapSegment = pStubHeapSegment->pNext)
486 {
487 // The stubs are in reverse order, so we reverse them so they are in memory order
488 CQuickArrayList<StubUnwindInfoHeader*> list;
489 for (StubUnwindInfoHeader *pHeader = pStubHeapSegment->pUnwindHeaderList; pHeader; pHeader = pHeader->pNext)
490 list.Push(pHeader);
491
492 for(int i = (int) list.Size()-1; i >= 0; --i)
493 {
494 StubUnwindInfoHeader *pHeader = list[i];
495 AddToUnwindInfoTable(&pStubHeapSegment->pUnwindInfoTable, &pHeader->FunctionEntry,
496 (TADDR) pStubHeapSegment->pbBaseAddress, (TADDR) pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment);
497 }
498 }
499#endif // STUBLINKER_GENERATES_UNWIND_INFO
500}
501
502/*****************************************************************************/
503// turn on the publishing of unwind info. Called when the ETW rundown provider
504// is turned on.
505
506/* static */ void UnwindInfoTable::PublishUnwindInfo(bool publishExisting)
507{
508 CONTRACTL {
509 NOTHROW;
510 GC_TRIGGERS;
511 } CONTRACTL_END;
512
513 if (s_publishingActive)
514 return;
515
516 // If we don't have the APIs we need, give up
517 if (!InitUnwindFtns())
518 return;
519
520 EX_TRY
521 {
522 // Create the lock
523 Crst* newCrst = new Crst(CrstUnwindInfoTableLock);
524 if (InterlockedCompareExchangeT(&s_pUnwindInfoTableLock, newCrst, NULL) == NULL)
525 {
526 s_publishingActive = true;
527 if (publishExisting)
528 PublishUnwindInfoForExistingMethods();
529 }
530 else
531 delete newCrst; // we were in a race and failed, throw away the Crst we made.
532
533 } EX_CATCH {
534 STRESS_LOG1(LF_JIT, LL_ERROR, "Exception happened when doing unwind Info rundown. EIP of last AV = %p\n", g_LastAccessViolationEIP);
535 _ASSERTE(!"Exception thrown while publishing 'catchup' ETW unwind information");
536 s_publishingActive = false; // Try to minimize damage.
537 } EX_END_CATCH(SwallowAllExceptions);
538}
539
540#endif // defined(_TARGET_AMD64_) && !defined(DACCESS_COMPILE)
541
542/*-----------------------------------------------------------------------------
543 This is a listing of which methods uses which synchronization mechanism
544 in the EEJitManager.
545//-----------------------------------------------------------------------------
546
547Setters of EEJitManager::m_CodeHeapCritSec
548-----------------------------------------------
549allocCode
550allocGCInfo
551allocEHInfo
552allocJumpStubBlock
553ResolveEHClause
554RemoveJitData
555Unload
556ReleaseReferenceToHeap
557JitCodeToMethodInfo
558
559
560Need EEJitManager::m_CodeHeapCritSec to be set
561-----------------------------------------------
562NewCodeHeap
563allocCodeRaw
564GetCodeHeapList
565RemoveCodeHeapFromDomainList
566DeleteCodeHeap
567AddRangeToJitHeapCache
568DeleteJitHeapCache
569
570*/
571
572
573#if !defined(DACCESS_COMPILE)
574EEJitManager::CodeHeapIterator::CodeHeapIterator(LoaderAllocator *pLoaderAllocatorFilter)
575 : m_lockHolder(&(ExecutionManager::GetEEJitManager()->m_CodeHeapCritSec)), m_Iterator(NULL, 0, NULL, 0)
576{
577 CONTRACTL
578 {
579 NOTHROW;
580 GC_NOTRIGGER;
581 MODE_ANY;
582 }
583 CONTRACTL_END;
584
585 m_pHeapList = NULL;
586 m_pLoaderAllocator = pLoaderAllocatorFilter;
587 m_pHeapList = ExecutionManager::GetEEJitManager()->GetCodeHeapList();
588 if(m_pHeapList)
589 new (&m_Iterator) MethodSectionIterator((const void *)m_pHeapList->mapBase, (COUNT_T)m_pHeapList->maxCodeHeapSize, m_pHeapList->pHdrMap, (COUNT_T)HEAP2MAPSIZE(ROUND_UP_TO_PAGE(m_pHeapList->maxCodeHeapSize)));
590};
591
592EEJitManager::CodeHeapIterator::~CodeHeapIterator()
593{
594 CONTRACTL
595 {
596 NOTHROW;
597 GC_NOTRIGGER;
598 MODE_ANY;
599 }
600 CONTRACTL_END;
601}
602
603BOOL EEJitManager::CodeHeapIterator::Next()
604{
605 CONTRACTL
606 {
607 NOTHROW;
608 GC_NOTRIGGER;
609 MODE_ANY;
610 }
611 CONTRACTL_END;
612
613 if(!m_pHeapList)
614 return FALSE;
615
616 while(1)
617 {
618 if(!m_Iterator.Next())
619 {
620 m_pHeapList = m_pHeapList->GetNext();
621 if(!m_pHeapList)
622 return FALSE;
623 new (&m_Iterator) MethodSectionIterator((const void *)m_pHeapList->mapBase, (COUNT_T)m_pHeapList->maxCodeHeapSize, m_pHeapList->pHdrMap, (COUNT_T)HEAP2MAPSIZE(ROUND_UP_TO_PAGE(m_pHeapList->maxCodeHeapSize)));
624 }
625 else
626 {
627 BYTE * code = m_Iterator.GetMethodCode();
628 CodeHeader * pHdr = (CodeHeader *)(code - sizeof(CodeHeader));
629 m_pCurrent = !pHdr->IsStubCodeBlock() ? pHdr->GetMethodDesc() : NULL;
630
631 // LoaderAllocator filter
632 if (m_pLoaderAllocator && m_pCurrent)
633 {
634 LoaderAllocator *pCurrentLoaderAllocator = m_pCurrent->GetLoaderAllocatorForCode();
635 if(pCurrentLoaderAllocator != m_pLoaderAllocator)
636 continue;
637 }
638
639 return TRUE;
640 }
641 }
642}
643#endif // !DACCESS_COMPILE
644
645#ifndef DACCESS_COMPILE
646
647//---------------------------------------------------------------------------------------
648//
649// ReaderLockHolder::ReaderLockHolder takes the reader lock, checks for the writer lock
650// and either aborts if the writer lock is held, or yields until the writer lock is released,
651// keeping the reader lock. This is normally called in the constructor for the
652// ReaderLockHolder.
653//
654// The writer cannot be taken if there are any readers. The WriterLockHolder functions take the
655// writer lock and check for any readers. If there are any, the WriterLockHolder functions
656// release the writer and yield to wait for the readers to be done.
657
658ExecutionManager::ReaderLockHolder::ReaderLockHolder(HostCallPreference hostCallPreference /*=AllowHostCalls*/)
659{
660 CONTRACTL {
661 NOTHROW;
662 if (hostCallPreference == AllowHostCalls) { HOST_CALLS; } else { HOST_NOCALLS; }
663 GC_NOTRIGGER;
664 SO_TOLERANT;
665 CAN_TAKE_LOCK;
666 } CONTRACTL_END;
667
668 IncCantAllocCount();
669
670 FastInterlockIncrement(&m_dwReaderCount);
671
672 EE_LOCK_TAKEN(GetPtrForLockContract());
673
674 if (VolatileLoad(&m_dwWriterLock) != 0)
675 {
676 if (hostCallPreference != AllowHostCalls)
677 {
678 // Rats, writer lock is held. Gotta bail. Since the reader count was already
679 // incremented, we're technically still blocking writers at the moment. But
680 // the holder who called us is about to call DecrementReader in its
681 // destructor and unblock writers.
682 return;
683 }
684
685 YIELD_WHILE ((VolatileLoad(&m_dwWriterLock) != 0));
686 }
687}
688
689//---------------------------------------------------------------------------------------
690//
691// See code:ExecutionManager::ReaderLockHolder::ReaderLockHolder. This just decrements the reader count.
692
693ExecutionManager::ReaderLockHolder::~ReaderLockHolder()
694{
695 CONTRACTL
696 {
697 NOTHROW;
698 GC_NOTRIGGER;
699 SO_TOLERANT;
700 MODE_ANY;
701 }
702 CONTRACTL_END;
703
704 FastInterlockDecrement(&m_dwReaderCount);
705 DecCantAllocCount();
706
707 EE_LOCK_RELEASED(GetPtrForLockContract());
708}
709
710//---------------------------------------------------------------------------------------
711//
712// Returns whether the reader lock is acquired
713
714BOOL ExecutionManager::ReaderLockHolder::Acquired()
715{
716 LIMITED_METHOD_CONTRACT;
717 return VolatileLoad(&m_dwWriterLock) == 0;
718}
719
720ExecutionManager::WriterLockHolder::WriterLockHolder()
721{
722 CONTRACTL {
723 NOTHROW;
724 GC_NOTRIGGER;
725 CAN_TAKE_LOCK;
726 } CONTRACTL_END;
727
728 _ASSERTE(m_dwWriterLock == 0);
729
730 // Signal to a debugger that this thread cannot stop now
731 IncCantStopCount();
732
733 IncCantAllocCount();
734
735 DWORD dwSwitchCount = 0;
736 while (TRUE)
737 {
738 // While this thread holds the writer lock, we must not try to suspend it
739 // or allow a profiler to walk its stack
740 Thread::IncForbidSuspendThread();
741
742 FastInterlockIncrement(&m_dwWriterLock);
743 if (m_dwReaderCount == 0)
744 break;
745 FastInterlockDecrement(&m_dwWriterLock);
746
747 // Before we loop and retry, it's safe to suspend or hijack and inspect
748 // this thread
749 Thread::DecForbidSuspendThread();
750
751 __SwitchToThread(0, ++dwSwitchCount);
752 }
753 EE_LOCK_TAKEN(GetPtrForLockContract());
754}
755
756ExecutionManager::WriterLockHolder::~WriterLockHolder()
757{
758 LIMITED_METHOD_CONTRACT;
759
760 FastInterlockDecrement(&m_dwWriterLock);
761
762 // Writer lock released, so it's safe again for this thread to be
763 // suspended or have its stack walked by a profiler
764 Thread::DecForbidSuspendThread();
765
766 DecCantAllocCount();
767
768 // Signal to a debugger that it's again safe to stop this thread
769 DecCantStopCount();
770
771 EE_LOCK_RELEASED(GetPtrForLockContract());
772}
773
774#else
775
776// For DAC builds, we only care whether the writer lock is held.
777// If it is, we will assume the locked data is in an inconsistent
778// state and throw. We never actually take the lock.
779// Note: Throws
780ExecutionManager::ReaderLockHolder::ReaderLockHolder(HostCallPreference hostCallPreference /*=AllowHostCalls*/)
781{
782 SUPPORTS_DAC;
783
784 if (m_dwWriterLock != 0)
785 {
786 ThrowHR(CORDBG_E_PROCESS_NOT_SYNCHRONIZED);
787 }
788}
789
790ExecutionManager::ReaderLockHolder::~ReaderLockHolder()
791{
792}
793
794#endif // DACCESS_COMPILE
795
796/*-----------------------------------------------------------------------------
797 This is a listing of which methods uses which synchronization mechanism
798 in the ExecutionManager
799//-----------------------------------------------------------------------------
800
801==============================================================================
802ExecutionManger::ReaderLockHolder and ExecutionManger::WriterLockHolder
803Protects the callers of ExecutionManager::GetRangeSection from heap deletions
804while walking RangeSections. You need to take a reader lock before reading the
805values: m_CodeRangeList and hold it while walking the lists
806
807Uses ReaderLockHolder (allows multiple reeaders with no writers)
808-----------------------------------------
809ExecutionManager::FindCodeRange
810ExecutionManager::FindZapModule
811ExecutionManager::EnumMemoryRegions
812
813Uses WriterLockHolder (allows single writer and no readers)
814-----------------------------------------
815ExecutionManager::AddRangeHelper
816ExecutionManager::DeleteRangeHelper
817
818*/
819
820//-----------------------------------------------------------------------------
821
822#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
823#define EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
824#endif
825
826#if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS)
827// The function fragments can be used in Hot/Cold splitting, expressing Large Functions or in 'ShrinkWrapping', which is
828// delaying saving and restoring some callee-saved registers later inside the body of the method.
829// (It's assumed that JIT will not emit any ShrinkWrapping-style methods)
830// For these cases multiple RUNTIME_FUNCTION entries (a.k.a function fragments) are used to define
831// all the regions of the function or funclet. And one of these function fragments cover the beginning of the function/funclet,
832// including the prolog section and is referred as the 'Host Record'.
833// This function returns TRUE if the inspected RUNTIME_FUNCTION entry is NOT a host record
834
835BOOL IsFunctionFragment(TADDR baseAddress, PTR_RUNTIME_FUNCTION pFunctionEntry)
836{
837 LIMITED_METHOD_DAC_CONTRACT;
838
839 _ASSERTE((pFunctionEntry->UnwindData & 3) == 0); // The unwind data must be an RVA; we don't support packed unwind format
840 DWORD unwindHeader = *(PTR_DWORD)(baseAddress + pFunctionEntry->UnwindData);
841 _ASSERTE((0 == ((unwindHeader >> 18) & 3)) || !"unknown unwind data format, version != 0");
842#if defined(_TARGET_ARM_)
843
844 // On ARM, It's assumed that the prolog is always at the beginning of the function and cannot be split.
845 // Given that, there are 4 possible ways to fragment a function:
846 // 1. Prolog only:
847 // 2. Prolog and some epilogs:
848 // 3. Epilogs only:
849 // 4. No Prolog or epilog
850 //
851 // Function fragments describing 1 & 2 are host records, 3 & 4 are not.
852 // for 3 & 4, the .xdata record's F bit is set to 1, marking clearly what is NOT a host record
853
854 _ASSERTE((pFunctionEntry->BeginAddress & THUMB_CODE) == THUMB_CODE); // Sanity check: it's a thumb address
855 DWORD Fbit = (unwindHeader >> 22) & 0x1; // F "fragment" bit
856 return (Fbit == 1);
857#elif defined(_TARGET_ARM64_)
858
859 // ARM64 is a little bit more flexible, in the sense that it supports partial prologs. However only one of the
860 // prolog regions are allowed to alter SP and that's the Host Record. Partial prologs are used in ShrinkWrapping
861 // scenarios which is not supported, hence we don't need to worry about them. discarding partial prologs
862 // simplifies identifying a host record a lot.
863 //
864 // 1. Prolog only: The host record. Epilog Count and E bit are all 0.
865 // 2. Prolog and some epilogs: The host record with accompanying epilog-only records
866 // 3. Epilogs only: First unwind code is Phantom prolog (Starting with an end_c, indicating an empty prolog)
867 // 4. No prologs or epilogs: First unwind code is Phantom prolog (Starting with an end_c, indicating an empty prolog)
868 //
869
870 int EpilogCount = (int)(unwindHeader >> 22) & 0x1F;
871 int CodeWords = unwindHeader >> 27;
872 PTR_DWORD pUnwindCodes = (PTR_DWORD)(baseAddress + pFunctionEntry->UnwindData);
873 // Skip header.
874 pUnwindCodes++;
875
876 // Skip extended header.
877 if ((CodeWords == 0) && (EpilogCount == 0))
878 {
879 EpilogCount = (*pUnwindCodes) & 0xFFFF;
880 pUnwindCodes++;
881 }
882
883 // Skip epilog scopes.
884 BOOL Ebit = (unwindHeader >> 21) & 0x1;
885 if (!Ebit && (EpilogCount != 0))
886 {
887 // EpilogCount is the number of exception scopes defined right after the unwindHeader
888 pUnwindCodes += EpilogCount;
889 }
890
891 return ((*pUnwindCodes & 0xFF) == 0xE5);
892#else
893 PORTABILITY_ASSERT("IsFunctionFragnent - NYI on this platform");
894#endif
895}
896
897#endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
898
899
900#ifndef DACCESS_COMPILE
901
902//**********************************************************************************
903// IJitManager
904//**********************************************************************************
905IJitManager::IJitManager()
906{
907 LIMITED_METHOD_CONTRACT;
908
909 m_runtimeSupport = ExecutionManager::GetDefaultCodeManager();
910}
911
912#endif // #ifndef DACCESS_COMPILE
913
914// When we unload an appdomain, we need to make sure that any threads that are crawling through
915// our heap or rangelist are out. For cooperative-mode threads, we know that they will have
916// been stopped when we suspend the EE so they won't be touching an element that is about to be deleted.
917// However for pre-emptive mode threads, they could be stalled right on top of the element we want
918// to delete, so we need to apply the reader lock to them and wait for them to drain.
919ExecutionManager::ScanFlag ExecutionManager::GetScanFlags()
920{
921 CONTRACTL {
922 NOTHROW;
923 GC_NOTRIGGER;
924 SO_TOLERANT;
925 HOST_NOCALLS;
926 SUPPORTS_DAC;
927 } CONTRACTL_END;
928
929#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
930 BEGIN_GETTHREAD_ALLOWED;
931
932 Thread *pThread = GetThread();
933
934 if (!pThread)
935 return ScanNoReaderLock;
936
937 // If this thread is hijacked by a profiler and crawling its own stack,
938 // we do need to take the lock
939 if (pThread->GetProfilerFilterContext() != NULL)
940 return ScanReaderLock;
941
942 if (pThread->PreemptiveGCDisabled() || (pThread == ThreadSuspend::GetSuspensionThread()))
943 return ScanNoReaderLock;
944
945 END_GETTHREAD_ALLOWED;
946
947 return ScanReaderLock;
948#else
949 return ScanNoReaderLock;
950#endif
951}
952
953#ifdef DACCESS_COMPILE
954
955void IJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
956{
957 DAC_ENUM_VTHIS();
958 if (m_runtimeSupport.IsValid())
959 {
960 m_runtimeSupport->EnumMemoryRegions(flags);
961 }
962}
963
964#endif // #ifdef DACCESS_COMPILE
965
966#if defined(WIN64EXCEPTIONS)
967
968PTR_VOID GetUnwindDataBlob(TADDR moduleBase, PTR_RUNTIME_FUNCTION pRuntimeFunction, /* out */ SIZE_T * pSize)
969{
970 LIMITED_METHOD_CONTRACT;
971
972#if defined(_TARGET_AMD64_)
973 PTR_UNWIND_INFO pUnwindInfo(dac_cast<PTR_UNWIND_INFO>(moduleBase + RUNTIME_FUNCTION__GetUnwindInfoAddress(pRuntimeFunction)));
974
975 *pSize = ALIGN_UP(offsetof(UNWIND_INFO, UnwindCode) +
976 sizeof(UNWIND_CODE) * pUnwindInfo->CountOfUnwindCodes +
977 sizeof(ULONG) /* personality routine is always present */,
978 sizeof(DWORD));
979
980 return pUnwindInfo;
981
982#elif defined(_TARGET_X86_)
983 PTR_UNWIND_INFO pUnwindInfo(dac_cast<PTR_UNWIND_INFO>(moduleBase + RUNTIME_FUNCTION__GetUnwindInfoAddress(pRuntimeFunction)));
984
985 *pSize = sizeof(UNWIND_INFO);
986
987 return pUnwindInfo;
988
989#elif defined(_TARGET_ARM_)
990
991 // if this function uses packed unwind data then at least one of the two least significant bits
992 // will be non-zero. if this is the case then there will be no xdata record to enumerate.
993 _ASSERTE((pRuntimeFunction->UnwindData & 0x3) == 0);
994
995 // compute the size of the unwind info
996 PTR_ULONG xdata = dac_cast<PTR_ULONG>(pRuntimeFunction->UnwindData + moduleBase);
997
998 ULONG epilogScopes = 0;
999 ULONG unwindWords = 0;
1000 ULONG size = 0;
1001
1002 if ((xdata[0] >> 23) != 0)
1003 {
1004 size = 4;
1005 epilogScopes = (xdata[0] >> 23) & 0x1f;
1006 unwindWords = (xdata[0] >> 28) & 0x0f;
1007 }
1008 else
1009 {
1010 size = 8;
1011 epilogScopes = xdata[1] & 0xffff;
1012 unwindWords = (xdata[1] >> 16) & 0xff;
1013 }
1014
1015 if (!(xdata[0] & (1 << 21)))
1016 size += 4 * epilogScopes;
1017
1018 size += 4 * unwindWords;
1019
1020 _ASSERTE(xdata[0] & (1 << 20)); // personality routine should be always present
1021 size += 4;
1022
1023 *pSize = size;
1024 return xdata;
1025
1026#elif defined(_TARGET_ARM64_)
1027 // if this function uses packed unwind data then at least one of the two least significant bits
1028 // will be non-zero. if this is the case then there will be no xdata record to enumerate.
1029 _ASSERTE((pRuntimeFunction->UnwindData & 0x3) == 0);
1030
1031 // compute the size of the unwind info
1032 PTR_ULONG xdata = dac_cast<PTR_ULONG>(pRuntimeFunction->UnwindData + moduleBase);
1033 ULONG epilogScopes = 0;
1034 ULONG unwindWords = 0;
1035 ULONG size = 0;
1036
1037 //If both Epilog Count and Code Word is not zero
1038 //Info of Epilog and Unwind scopes are given by 1 word header
1039 //Otherwise this info is given by a 2 word header
1040 if ((xdata[0] >> 27) != 0)
1041 {
1042 size = 4;
1043 epilogScopes = (xdata[0] >> 22) & 0x1f;
1044 unwindWords = (xdata[0] >> 27) & 0x0f;
1045 }
1046 else
1047 {
1048 size = 8;
1049 epilogScopes = xdata[1] & 0xffff;
1050 unwindWords = (xdata[1] >> 16) & 0xff;
1051 }
1052
1053 if (!(xdata[0] & (1 << 21)))
1054 size += 4 * epilogScopes;
1055
1056 size += 4 * unwindWords;
1057
1058 _ASSERTE(xdata[0] & (1 << 20)); // personality routine should be always present
1059 size += 4; // exception handler RVA
1060
1061 *pSize = size;
1062 return xdata;
1063
1064
1065#else
1066 PORTABILITY_ASSERT("GetUnwindDataBlob");
1067 return NULL;
1068#endif
1069}
1070
1071// GetFuncletStartAddress returns the starting address of the function or funclet indicated by the EECodeInfo address.
1072TADDR IJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo)
1073{
1074 PTR_RUNTIME_FUNCTION pFunctionEntry = pCodeInfo->GetFunctionEntry();
1075
1076#ifdef _TARGET_AMD64_
1077 _ASSERTE((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0);
1078#endif
1079
1080 TADDR baseAddress = pCodeInfo->GetModuleBase();
1081 TADDR funcletStartAddress = baseAddress + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry);
1082
1083#if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS)
1084 // Is the RUNTIME_FUNCTION a fragment? If so, we need to walk backwards until we find the first
1085 // non-fragment RUNTIME_FUNCTION, and use that one. This happens when we have very large functions
1086 // and multiple RUNTIME_FUNCTION entries per function or funclet. However, all but the first will
1087 // have the "F" bit set in the unwind data, indicating a fragment (with phantom prolog unwind codes).
1088
1089 for (;;)
1090 {
1091 if (!IsFunctionFragment(baseAddress, pFunctionEntry))
1092 {
1093 // This is not a fragment; we're done
1094 break;
1095 }
1096
1097 // We found a fragment. Walk backwards in the RUNTIME_FUNCTION array until we find a non-fragment.
1098 // We're guaranteed to find one, because we require that a fragment live in a function or funclet
1099 // that has a prolog, which will have non-fragment .xdata.
1100 --pFunctionEntry;
1101
1102 funcletStartAddress = baseAddress + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry);
1103 }
1104#endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
1105
1106 return funcletStartAddress;
1107}
1108
1109BOOL IJitManager::IsFunclet(EECodeInfo * pCodeInfo)
1110{
1111 CONTRACTL {
1112 NOTHROW;
1113 GC_NOTRIGGER;
1114 MODE_ANY;
1115 }
1116 CONTRACTL_END;
1117
1118 TADDR funcletStartAddress = GetFuncletStartAddress(pCodeInfo);
1119 TADDR methodStartAddress = pCodeInfo->GetStartAddress();
1120
1121 return (funcletStartAddress != methodStartAddress);
1122}
1123
1124BOOL IJitManager::IsFilterFunclet(EECodeInfo * pCodeInfo)
1125{
1126 CONTRACTL {
1127 NOTHROW;
1128 GC_NOTRIGGER;
1129 MODE_ANY;
1130 }
1131 CONTRACTL_END;
1132
1133 if (!pCodeInfo->IsFunclet())
1134 return FALSE;
1135
1136 TADDR funcletStartAddress = GetFuncletStartAddress(pCodeInfo);
1137
1138 // This assumes no hot/cold splitting for funclets
1139
1140 _ASSERTE(FitsInU4(pCodeInfo->GetCodeAddress() - funcletStartAddress));
1141 DWORD relOffsetWithinFunclet = static_cast<DWORD>(pCodeInfo->GetCodeAddress() - funcletStartAddress);
1142
1143 _ASSERTE(pCodeInfo->GetRelOffset() >= relOffsetWithinFunclet);
1144 DWORD funcletStartOffset = pCodeInfo->GetRelOffset() - relOffsetWithinFunclet;
1145
1146 EH_CLAUSE_ENUMERATOR pEnumState;
1147 unsigned EHCount = InitializeEHEnumeration(pCodeInfo->GetMethodToken(), &pEnumState);
1148 _ASSERTE(EHCount > 0);
1149
1150 EE_ILEXCEPTION_CLAUSE EHClause;
1151 for (ULONG i = 0; i < EHCount; i++)
1152 {
1153 GetNextEHClause(&pEnumState, &EHClause);
1154
1155 // Duplicate clauses are always listed at the end, so when we hit a duplicate clause,
1156 // we have already visited all of the normal clauses.
1157 if (IsDuplicateClause(&EHClause))
1158 {
1159 break;
1160 }
1161
1162 if (IsFilterHandler(&EHClause))
1163 {
1164 if (EHClause.FilterOffset == funcletStartOffset)
1165 {
1166 return true;
1167 }
1168 }
1169 }
1170
1171 return false;
1172}
1173
1174#else // WIN64EXCEPTIONS
1175
1176PTR_VOID GetUnwindDataBlob(TADDR moduleBase, PTR_RUNTIME_FUNCTION pRuntimeFunction, /* out */ SIZE_T * pSize)
1177{
1178 *pSize = 0;
1179 return dac_cast<PTR_VOID>(pRuntimeFunction->UnwindData + moduleBase);
1180}
1181
1182#endif // WIN64EXCEPTIONS
1183
1184
1185#ifndef CROSSGEN_COMPILE
1186
1187#ifndef DACCESS_COMPILE
1188
1189//**********************************************************************************
1190// EEJitManager
1191//**********************************************************************************
1192
1193EEJitManager::EEJitManager()
1194 :
1195 // CRST_DEBUGGER_THREAD - We take this lock on debugger thread during EnC add method, among other things
1196 // CRST_TAKEN_DURING_SHUTDOWN - We take this lock during shutdown if ETW is on (to do rundown)
1197 m_CodeHeapCritSec( CrstSingleUseLock,
1198 CrstFlags(CRST_UNSAFE_ANYMODE|CRST_DEBUGGER_THREAD|CRST_TAKEN_DURING_SHUTDOWN)),
1199 m_CPUCompileFlags(),
1200 m_EHClauseCritSec( CrstSingleUseLock )
1201{
1202 CONTRACTL {
1203 THROWS;
1204 GC_NOTRIGGER;
1205 } CONTRACTL_END;
1206
1207 m_pCodeHeap = NULL;
1208 m_jit = NULL;
1209 m_JITCompiler = NULL;
1210#ifdef _TARGET_AMD64_
1211 m_pEmergencyJumpStubReserveList = NULL;
1212#endif
1213#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1214 m_JITCompilerOther = NULL;
1215#endif
1216
1217#ifdef ALLOW_SXS_JIT
1218 m_alternateJit = NULL;
1219 m_AltJITCompiler = NULL;
1220 m_AltJITRequired = false;
1221#endif
1222
1223 m_cleanupList = NULL;
1224}
1225
1226#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1227
1228bool DoesOSSupportAVX()
1229{
1230 LIMITED_METHOD_CONTRACT;
1231
1232#ifndef FEATURE_PAL
1233 // On Windows we have an api(GetEnabledXStateFeatures) to check if AVX is supported
1234 typedef DWORD64 (WINAPI *PGETENABLEDXSTATEFEATURES)();
1235 PGETENABLEDXSTATEFEATURES pfnGetEnabledXStateFeatures = NULL;
1236
1237 HMODULE hMod = WszLoadLibraryEx(WINDOWS_KERNEL32_DLLNAME_W, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32);
1238 if(hMod == NULL)
1239 return FALSE;
1240
1241 pfnGetEnabledXStateFeatures = (PGETENABLEDXSTATEFEATURES)GetProcAddress(hMod, "GetEnabledXStateFeatures");
1242
1243 if (pfnGetEnabledXStateFeatures == NULL)
1244 {
1245 return FALSE;
1246 }
1247
1248 DWORD64 FeatureMask = pfnGetEnabledXStateFeatures();
1249 if ((FeatureMask & XSTATE_MASK_AVX) == 0)
1250 {
1251 return FALSE;
1252 }
1253#endif // !FEATURE_PAL
1254
1255 return TRUE;
1256}
1257
1258#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1259
1260void EEJitManager::SetCpuInfo()
1261{
1262 LIMITED_METHOD_CONTRACT;
1263
1264 //
1265 // NOTE: This function needs to be kept in sync with Zapper::CompileAssembly()
1266 // NOTE: This function needs to be kept in sync with compSetProcesor() in jit\compiler.cpp
1267 //
1268
1269 CORJIT_FLAGS CPUCompileFlags;
1270
1271#if defined(_TARGET_X86_)
1272 // NOTE: if you're adding any flags here, you probably should also be doing it
1273 // for ngen (zapper.cpp)
1274 CORINFO_CPU cpuInfo;
1275 GetSpecificCpuInfo(&cpuInfo);
1276
1277 switch (CPU_X86_FAMILY(cpuInfo.dwCPUType))
1278 {
1279 case CPU_X86_PENTIUM_4:
1280 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_TARGET_P4);
1281 break;
1282
1283 default:
1284 break;
1285 }
1286
1287 if (CPU_X86_USE_CMOV(cpuInfo.dwFeatures))
1288 {
1289 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_CMOV);
1290 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_FCOMI);
1291 }
1292
1293 if (CPU_X86_USE_SSE2(cpuInfo.dwFeatures))
1294 {
1295 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE2);
1296 }
1297#endif // _TARGET_X86_
1298
1299#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1300 // NOTE: The below checks are based on the information reported by
1301 // Intel® 64 and IA-32 Architectures Software Developer’s Manual. Volume 2
1302 // and
1303 // AMD64 Architecture Programmer’s Manual. Volume 3
1304 // For more information, please refer to the CPUID instruction in the respective manuals
1305
1306 // We will set the following flags:
1307 // CORJIT_FLAG_USE_SSE2 is required
1308 // SSE - EDX bit 25 (buffer[15] & 0x02)
1309 // SSE2 - EDX bit 26 (buffer[15] & 0x04)
1310 // CORJIT_FLAG_USE_SSE3 if the following feature bits are set (input EAX of 1)
1311 // CORJIT_FLAG_USE_SSE2
1312 // SSE3 - ECX bit 0 (buffer[8] & 0x01)
1313 // CORJIT_FLAG_USE_SSSE3 if the following feature bits are set (input EAX of 1)
1314 // CORJIT_FLAG_USE_SSE3
1315 // SSSE3 - ECX bit 9 (buffer[9] & 0x02)
1316 // CORJIT_FLAG_USE_SSE41 if the following feature bits are set (input EAX of 1)
1317 // CORJIT_FLAG_USE_SSSE3
1318 // SSE4.1 - ECX bit 19 (buffer[10] & 0x08)
1319 // CORJIT_FLAG_USE_SSE42 if the following feature bits are set (input EAX of 1)
1320 // CORJIT_FLAG_USE_SSE41
1321 // SSE4.2 - ECX bit 20 (buffer[10] & 0x10)
1322 // CORJIT_FLAG_USE_POPCNT if the following feature bits are set (input EAX of 1)
1323 // CORJIT_FLAG_USE_SSE42
1324 // POPCNT - ECX bit 23 (buffer[10] & 0x80)
1325 // CORJIT_FLAG_USE_AVX if the following feature bits are set (input EAX of 1), and xmmYmmStateSupport returns 1:
1326 // CORJIT_FLAG_USE_SSE42
1327 // OSXSAVE - ECX bit 27 (buffer[11] & 0x08)
1328 // XGETBV - XCR0[2:1] 11b
1329 // AVX - ECX bit 28 (buffer[11] & 0x10)
1330 // CORJIT_FLAG_USE_FMA if the following feature bits are set (input EAX of 1), and xmmYmmStateSupport returns 1:
1331 // CORJIT_FLAG_USE_AVX
1332 // FMA - ECX bit 12 (buffer[9] & 0x10)
1333 // CORJIT_FLAG_USE_AVX2 if the following feature bit is set (input EAX of 0x07 and input ECX of 0):
1334 // CORJIT_FLAG_USE_AVX
1335 // AVX2 - EBX bit 5 (buffer[4] & 0x20)
1336 // CORJIT_FLAG_USE_AVX_512 is not currently set, but defined so that it can be used in future without
1337 // CORJIT_FLAG_USE_AES
1338 // CORJIT_FLAG_USE_SSE2
1339 // AES - ECX bit 25 (buffer[11] & 0x01)
1340 // CORJIT_FLAG_USE_PCLMULQDQ
1341 // CORJIT_FLAG_USE_SSE2
1342 // PCLMULQDQ - ECX bit 1 (buffer[8] & 0x01)
1343 // CORJIT_FLAG_USE_BMI1 if the following feature bit is set (input EAX of 0x07 and input ECX of 0):
1344 // BMI1 - EBX bit 3 (buffer[4] & 0x08)
1345 // CORJIT_FLAG_USE_BMI2 if the following feature bit is set (input EAX of 0x07 and input ECX of 0):
1346 // BMI2 - EBX bit 8 (buffer[5] & 0x01)
1347 // CORJIT_FLAG_USE_LZCNT if the following feature bits are set (input EAX of 80000001H)
1348 // LZCNT - ECX bit 5 (buffer[8] & 0x20)
1349 // synchronously updating VM and JIT.
1350
1351 unsigned char buffer[16];
1352 DWORD maxCpuId = getcpuid(0, buffer);
1353
1354 if (maxCpuId >= 1)
1355 {
1356 // getcpuid executes cpuid with eax set to its first argument, and ecx cleared.
1357 // It returns the resulting eax in buffer[0-3], ebx in buffer[4-7], ecx in buffer[8-11],
1358 // and edx in buffer[12-15].
1359
1360 (void) getcpuid(1, buffer);
1361
1362 // If SSE/SSE2 is not enabled, there is no point in checking the rest.
1363 // SSE is bit 25 of EDX (buffer[15] & 0x02)
1364 // SSE2 is bit 26 of EDX (buffer[15] & 0x04)
1365
1366 if ((buffer[15] & 0x06) == 0x06) // SSE & SSE2
1367 {
1368 if ((buffer[11] & 0x02) != 0) // AESNI
1369 {
1370 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_AES);
1371 }
1372
1373 if ((buffer[8] & 0x02) != 0) // PCLMULQDQ
1374 {
1375 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_PCLMULQDQ);
1376 }
1377
1378 if ((buffer[8] & 0x01) != 0) // SSE3
1379 {
1380 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE3);
1381
1382 if ((buffer[9] & 0x02) != 0) // SSSE3
1383 {
1384 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSSE3);
1385
1386 if ((buffer[10] & 0x08) != 0) // SSE4.1
1387 {
1388 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE41);
1389
1390 if ((buffer[10] & 0x10) != 0) // SSE4.2
1391 {
1392 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE42);
1393
1394 if ((buffer[10] & 0x80) != 0) // POPCNT
1395 {
1396 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_POPCNT);
1397 }
1398
1399 if ((buffer[11] & 0x18) == 0x18) // AVX & OSXSAVE
1400 {
1401 if(DoesOSSupportAVX() && (xmmYmmStateSupport() == 1))
1402 {
1403 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX);
1404
1405 if ((buffer[9] & 0x10) != 0) // FMA
1406 {
1407 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_FMA);
1408 }
1409
1410 if (maxCpuId >= 0x07)
1411 {
1412 (void) getextcpuid(0, 0x07, buffer);
1413
1414 if ((buffer[4] & 0x20) != 0) // AVX2
1415 {
1416 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX2);
1417 }
1418 }
1419 }
1420 }
1421 }
1422 }
1423 }
1424 }
1425
1426 static ConfigDWORD fFeatureSIMD;
1427
1428 if (fFeatureSIMD.val(CLRConfig::EXTERNAL_FeatureSIMD) != 0)
1429 {
1430 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_FEATURE_SIMD);
1431 }
1432
1433 if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SIMD16ByteOnly) != 0)
1434 {
1435 CPUCompileFlags.Clear(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX2);
1436 }
1437 }
1438
1439 if (maxCpuId >= 0x07)
1440 {
1441 (void)getextcpuid(0, 0x07, buffer);
1442
1443 if ((buffer[4] & 0x08) != 0) // BMI1
1444 {
1445 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_BMI1);
1446 }
1447
1448 if ((buffer[5] & 0x01) != 0) // BMI2
1449 {
1450 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_BMI2);
1451 }
1452 }
1453 }
1454
1455 DWORD maxCpuIdEx = getcpuid(0x80000000, buffer);
1456
1457 if (maxCpuIdEx >= 0x80000001)
1458 {
1459 // getcpuid executes cpuid with eax set to its first argument, and ecx cleared.
1460 // It returns the resulting eax in buffer[0-3], ebx in buffer[4-7], ecx in buffer[8-11],
1461 // and edx in buffer[12-15].
1462
1463 (void) getcpuid(0x80000001, buffer);
1464
1465 if ((buffer[8] & 0x20) != 0) // LZCNT
1466 {
1467 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_LZCNT);
1468 }
1469 }
1470#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1471
1472#if defined(_TARGET_ARM64_)
1473 static ConfigDWORD fFeatureSIMD;
1474 if (fFeatureSIMD.val(CLRConfig::EXTERNAL_FeatureSIMD) != 0)
1475 {
1476 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_FEATURE_SIMD);
1477 }
1478#if defined(FEATURE_PAL)
1479 PAL_GetJitCpuCapabilityFlags(&CPUCompileFlags);
1480#elif defined(_WIN64)
1481 // FP and SIMD support are enabled by default
1482 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SIMD);
1483 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FP);
1484 // PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE (30)
1485 if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE))
1486 {
1487 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_AES);
1488 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA1);
1489 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA256);
1490 }
1491 // PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE (31)
1492 if (IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE))
1493 {
1494 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_CRC32);
1495 }
1496#endif // _WIN64
1497#endif // _TARGET_ARM64_
1498
1499 m_CPUCompileFlags = CPUCompileFlags;
1500}
1501
1502// Define some data that we can use to get a better idea of what happened when we get a Watson dump that indicates the JIT failed to load.
1503// This will be used and updated by the JIT loading and initialization functions, and the data written will get written into a Watson dump.
1504
1505enum JIT_LOAD_JIT_ID
1506{
1507 JIT_LOAD_MAIN = 500, // The "main" JIT. Normally, this is named "clrjit.dll". Start at a number that is somewhat uncommon (i.e., not zero or 1) to help distinguish from garbage, in process dumps.
1508 // 501 is JIT_LOAD_LEGACY on some platforms; please do not reuse this value.
1509 JIT_LOAD_ALTJIT = 502 // An "altjit". By default, named "protojit.dll". Used both internally, as well as externally for JIT CTP builds.
1510};
1511
1512enum JIT_LOAD_STATUS
1513{
1514 JIT_LOAD_STATUS_STARTING = 1001, // The JIT load process is starting. Start at a number that is somewhat uncommon (i.e., not zero or 1) to help distinguish from garbage, in process dumps.
1515 JIT_LOAD_STATUS_DONE_LOAD, // LoadLibrary of the JIT dll succeeded.
1516 JIT_LOAD_STATUS_DONE_GET_SXSJITSTARTUP, // GetProcAddress for "sxsJitStartup" succeeded.
1517 JIT_LOAD_STATUS_DONE_CALL_SXSJITSTARTUP, // Calling sxsJitStartup() succeeded.
1518 JIT_LOAD_STATUS_DONE_GET_JITSTARTUP, // GetProcAddress for "jitStartup" succeeded.
1519 JIT_LOAD_STATUS_DONE_CALL_JITSTARTUP, // Calling jitStartup() succeeded.
1520 JIT_LOAD_STATUS_DONE_GET_GETJIT, // GetProcAddress for "getJit" succeeded.
1521 JIT_LOAD_STATUS_DONE_CALL_GETJIT, // Calling getJit() succeeded.
1522 JIT_LOAD_STATUS_DONE_CALL_GETVERSIONIDENTIFIER, // Calling ICorJitCompiler::getVersionIdentifier() succeeded.
1523 JIT_LOAD_STATUS_DONE_VERSION_CHECK, // The JIT-EE version identifier check succeeded.
1524 JIT_LOAD_STATUS_DONE, // The JIT load is complete, and successful.
1525};
1526
1527struct JIT_LOAD_DATA
1528{
1529 JIT_LOAD_JIT_ID jld_id; // Which JIT are we currently loading?
1530 JIT_LOAD_STATUS jld_status; // The current load status of a JIT load attempt.
1531 HRESULT jld_hr; // If the JIT load fails, the last jld_status will be JIT_LOAD_STATUS_STARTING.
1532 // In that case, this will contain the HRESULT returned by LoadLibrary.
1533 // Otherwise, this will be S_OK (which is zero).
1534};
1535
1536// Here's the global data for JIT load and initialization state.
1537JIT_LOAD_DATA g_JitLoadData;
1538
1539#if !defined(FEATURE_MERGE_JIT_AND_ENGINE)
1540
1541// Global that holds the path to custom JIT location
1542extern "C" LPCWSTR g_CLRJITPath = nullptr;
1543
1544#endif // !defined(FEATURE_MERGE_JIT_AND_ENGINE)
1545
1546
1547// LoadAndInitializeJIT: load the JIT dll into the process, and initialize it (call the UtilCode initialization function,
1548// check the JIT-EE interface GUID, etc.)
1549//
1550// Parameters:
1551//
1552// pwzJitName - The filename of the JIT .dll file to load. E.g., "altjit.dll".
1553// phJit - On return, *phJit is the Windows module handle of the loaded JIT dll. It will be NULL if the load failed.
1554// ppICorJitCompiler - On return, *ppICorJitCompiler is the ICorJitCompiler* returned by the JIT's getJit() entrypoint.
1555// It is NULL if the JIT returns a NULL interface pointer, or if the JIT-EE interface GUID is mismatched.
1556// Note that if the given JIT is loaded, but the interface is mismatched, then *phJit will be legal and non-NULL
1557// even though *ppICorJitCompiler is NULL. This allows the caller to unload the JIT dll, if necessary
1558// (nobody does this today).
1559// pJitLoadData - Pointer to a structure that we update as we load and initialize the JIT to indicate how far we've gotten. This
1560// is used to help understand problems we see with JIT loading that come in via Watson dumps. Since we don't throw
1561// an exception immediately upon failure, we can lose information about what the failure was if we don't store this
1562// information in a way that persists into a process dump.
1563//
1564
1565static void LoadAndInitializeJIT(LPCWSTR pwzJitName, OUT HINSTANCE* phJit, OUT ICorJitCompiler** ppICorJitCompiler, IN OUT JIT_LOAD_DATA* pJitLoadData)
1566{
1567 STANDARD_VM_CONTRACT;
1568
1569 _ASSERTE(phJit != NULL);
1570 _ASSERTE(ppICorJitCompiler != NULL);
1571 _ASSERTE(pJitLoadData != NULL);
1572
1573 pJitLoadData->jld_status = JIT_LOAD_STATUS_STARTING;
1574 pJitLoadData->jld_hr = S_OK;
1575
1576 *phJit = NULL;
1577 *ppICorJitCompiler = NULL;
1578
1579 HRESULT hr = E_FAIL;
1580
1581 PathString CoreClrFolderHolder;
1582 extern HINSTANCE g_hThisInst;
1583 bool havePath = false;
1584
1585#if !defined(FEATURE_MERGE_JIT_AND_ENGINE)
1586 if (g_CLRJITPath != nullptr)
1587 {
1588 // If we have been asked to load a specific JIT binary, load from that path.
1589 // The main JIT load will use exactly that name because pwzJitName will have
1590 // been computed as the last component of g_CLRJITPath by ExecutionManager::GetJitName().
1591 // Non-primary JIT names (such as compatjit or altjit) will be loaded from the
1592 // same directory.
1593 // (Ideally, g_CLRJITPath would just be the JIT path without the filename component,
1594 // but that's not how the JIT_PATH variable was originally defined.)
1595 CoreClrFolderHolder.Set(g_CLRJITPath);
1596 havePath = true;
1597 }
1598 else
1599#endif // !defined(FEATURE_MERGE_JIT_AND_ENGINE)
1600 if (WszGetModuleFileName(g_hThisInst, CoreClrFolderHolder))
1601 {
1602 // Load JIT from next to CoreCLR binary
1603 havePath = true;
1604 }
1605
1606 if (havePath && !CoreClrFolderHolder.IsEmpty())
1607 {
1608 SString::Iterator iter = CoreClrFolderHolder.End();
1609 BOOL findSep = CoreClrFolderHolder.FindBack(iter, DIRECTORY_SEPARATOR_CHAR_W);
1610 if (findSep)
1611 {
1612 SString sJitName(pwzJitName);
1613 CoreClrFolderHolder.Replace(iter + 1, CoreClrFolderHolder.End() - (iter + 1), sJitName);
1614
1615 *phJit = CLRLoadLibrary(CoreClrFolderHolder.GetUnicode());
1616 if (*phJit != NULL)
1617 {
1618 hr = S_OK;
1619 }
1620 }
1621 }
1622
1623
1624 if (SUCCEEDED(hr))
1625 {
1626 pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_LOAD;
1627
1628 EX_TRY
1629 {
1630 bool fContinueToLoadJIT = false;
1631 // For CoreCLR, we never use "sxsJitStartup" as that is Desktop utilcode initialization
1632 // specific. Thus, assume we always got
1633 fContinueToLoadJIT = true;
1634
1635 if (fContinueToLoadJIT)
1636 {
1637 typedef void (__stdcall* pjitStartup)(ICorJitHost*);
1638 pjitStartup jitStartupFn = (pjitStartup) GetProcAddress(*phJit, "jitStartup");
1639
1640 if (jitStartupFn)
1641 {
1642 pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_GET_JITSTARTUP;
1643
1644 (*jitStartupFn)(JitHost::getJitHost());
1645
1646 pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_CALL_JITSTARTUP;
1647 }
1648
1649 typedef ICorJitCompiler* (__stdcall* pGetJitFn)();
1650 pGetJitFn getJitFn = (pGetJitFn) GetProcAddress(*phJit, "getJit");
1651
1652 if (getJitFn)
1653 {
1654 pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_GET_GETJIT;
1655
1656 ICorJitCompiler* pICorJitCompiler = (*getJitFn)();
1657 if (pICorJitCompiler != NULL)
1658 {
1659 pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_CALL_GETJIT;
1660
1661 GUID versionId;
1662 memset(&versionId, 0, sizeof(GUID));
1663 pICorJitCompiler->getVersionIdentifier(&versionId);
1664
1665 pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_CALL_GETVERSIONIDENTIFIER;
1666
1667 if (memcmp(&versionId, &JITEEVersionIdentifier, sizeof(GUID)) == 0)
1668 {
1669 pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_VERSION_CHECK;
1670
1671 // The JIT has loaded and passed the version identifier test, so publish the JIT interface to the caller.
1672 *ppICorJitCompiler = pICorJitCompiler;
1673
1674 // The JIT is completely loaded and initialized now.
1675 pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE;
1676 }
1677 else
1678 {
1679 // Mismatched version ID. Fail the load.
1680 LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: mismatched JIT version identifier in %S\n", pwzJitName));
1681 }
1682 }
1683 else
1684 {
1685 LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: failed to get ICorJitCompiler in %S\n", pwzJitName));
1686 }
1687 }
1688 else
1689 {
1690 LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: failed to find 'getJit' entrypoint in %S\n", pwzJitName));
1691 }
1692 }
1693 else
1694 {
1695 LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: failed to find 'sxsJitStartup' entrypoint in %S\n", pwzJitName));
1696 }
1697 }
1698 EX_CATCH
1699 {
1700 LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: caught an exception trying to initialize %S\n", pwzJitName));
1701 }
1702 EX_END_CATCH(SwallowAllExceptions)
1703 }
1704 else
1705 {
1706 pJitLoadData->jld_hr = hr;
1707 LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: failed to load %S, hr=0x%08x\n", pwzJitName, hr));
1708 }
1709}
1710
1711#ifdef FEATURE_MERGE_JIT_AND_ENGINE
1712EXTERN_C void __stdcall jitStartup(ICorJitHost* host);
1713EXTERN_C ICorJitCompiler* __stdcall getJit();
1714#endif // FEATURE_MERGE_JIT_AND_ENGINE
1715
1716// Set this to the result of LoadJIT as a courtesy to code:CorCompileGetRuntimeDll
1717extern HMODULE s_ngenCompilerDll;
1718
1719BOOL EEJitManager::LoadJIT()
1720{
1721 STANDARD_VM_CONTRACT;
1722
1723 // If the JIT is already loaded, don't take the lock.
1724 if (IsJitLoaded())
1725 return TRUE;
1726
1727 // Abuse m_EHClauseCritSec to ensure that the JIT is loaded on one thread only
1728 CrstHolder chRead(&m_EHClauseCritSec);
1729
1730 // Did someone load the JIT before we got the lock?
1731 if (IsJitLoaded())
1732 return TRUE;
1733
1734 SetCpuInfo();
1735
1736 ICorJitCompiler* newJitCompiler = NULL;
1737
1738#ifdef FEATURE_MERGE_JIT_AND_ENGINE
1739
1740 EX_TRY
1741 {
1742 jitStartup(JitHost::getJitHost());
1743
1744 newJitCompiler = getJit();
1745
1746 // We don't need to call getVersionIdentifier(), since the JIT is linked together with the VM.
1747 }
1748 EX_CATCH
1749 {
1750 }
1751 EX_END_CATCH(SwallowAllExceptions)
1752
1753#else // !FEATURE_MERGE_JIT_AND_ENGINE
1754
1755 m_JITCompiler = NULL;
1756#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1757 m_JITCompilerOther = NULL;
1758#endif
1759
1760 g_JitLoadData.jld_id = JIT_LOAD_MAIN;
1761 LoadAndInitializeJIT(ExecutionManager::GetJitName(), &m_JITCompiler, &newJitCompiler, &g_JitLoadData);
1762
1763 // Set as a courtesy to code:CorCompileGetRuntimeDll
1764 s_ngenCompilerDll = m_JITCompiler;
1765#endif // !FEATURE_MERGE_JIT_AND_ENGINE
1766
1767#ifdef ALLOW_SXS_JIT
1768
1769 // Do not load altjit.dll unless COMPlus_AltJit is set.
1770 // Even if the main JIT fails to load, if the user asks for an altjit we try to load it.
1771 // This allows us to display load error messages for loading altjit.
1772
1773 ICorJitCompiler* newAltJitCompiler = NULL;
1774
1775 LPWSTR altJitConfig;
1776 IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AltJit, &altJitConfig));
1777
1778 m_AltJITCompiler = NULL;
1779
1780 if (altJitConfig != NULL)
1781 {
1782 // Load the altjit into the system.
1783 // Note: altJitName must be declared as a const otherwise assigning the string
1784 // constructed by MAKEDLLNAME_W() to altJitName will cause a build break on Unix.
1785 LPCWSTR altJitName;
1786 IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AltJitName, (LPWSTR*)&altJitName));
1787
1788 if (altJitName == NULL)
1789 {
1790 altJitName = MAKEDLLNAME_W(W("protojit"));
1791 }
1792
1793 g_JitLoadData.jld_id = JIT_LOAD_ALTJIT;
1794 LoadAndInitializeJIT(altJitName, &m_AltJITCompiler, &newAltJitCompiler, &g_JitLoadData);
1795 }
1796
1797#endif // ALLOW_SXS_JIT
1798
1799 // Publish the compilers.
1800
1801#ifdef ALLOW_SXS_JIT
1802 m_AltJITRequired = (altJitConfig != NULL);
1803 m_alternateJit = newAltJitCompiler;
1804#endif // ALLOW_SXS_JIT
1805
1806 m_jit = newJitCompiler;
1807
1808 // Failing to load the main JIT is a failure.
1809 // If the user requested an altjit and we failed to load an altjit, that is also a failure.
1810 // In either failure case, we'll rip down the VM (so no need to clean up (unload) either JIT that did load successfully.
1811 return IsJitLoaded();
1812}
1813
1814#ifndef CROSSGEN_COMPILE
1815//**************************************************************************
1816
1817CodeFragmentHeap::CodeFragmentHeap(LoaderAllocator * pAllocator, StubCodeBlockKind kind)
1818 : m_pAllocator(pAllocator), m_pFreeBlocks(NULL), m_kind(kind),
1819 // CRST_DEBUGGER_THREAD - We take this lock on debugger thread during EnC add meth
1820 m_CritSec(CrstCodeFragmentHeap, CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD))
1821{
1822 WRAPPER_NO_CONTRACT;
1823}
1824
1825void CodeFragmentHeap::AddBlock(VOID * pMem, size_t dwSize)
1826{
1827 LIMITED_METHOD_CONTRACT;
1828 FreeBlock * pBlock = (FreeBlock *)pMem;
1829 pBlock->m_pNext = m_pFreeBlocks;
1830 pBlock->m_dwSize = dwSize;
1831 m_pFreeBlocks = pBlock;
1832}
1833
1834void CodeFragmentHeap::RemoveBlock(FreeBlock ** ppBlock)
1835{
1836 LIMITED_METHOD_CONTRACT;
1837 FreeBlock * pBlock = *ppBlock;
1838 *ppBlock = pBlock->m_pNext;
1839 ZeroMemory(pBlock, sizeof(FreeBlock));
1840}
1841
1842TaggedMemAllocPtr CodeFragmentHeap::RealAllocAlignedMem(size_t dwRequestedSize
1843 ,unsigned dwAlignment
1844#ifdef _DEBUG
1845 ,__in __in_z const char *szFile
1846 ,int lineNum
1847#endif
1848 )
1849{
1850 CrstHolder ch(&m_CritSec);
1851
1852 dwRequestedSize = ALIGN_UP(dwRequestedSize, sizeof(TADDR));
1853
1854 if (dwRequestedSize < sizeof(FreeBlock))
1855 dwRequestedSize = sizeof(FreeBlock);
1856
1857 // We will try to batch up allocation of small blocks into one large allocation
1858#define SMALL_BLOCK_THRESHOLD 0x100
1859 SIZE_T nFreeSmallBlocks = 0;
1860
1861 FreeBlock ** ppBestFit = NULL;
1862 FreeBlock ** ppFreeBlock = &m_pFreeBlocks;
1863 while (*ppFreeBlock != NULL)
1864 {
1865 FreeBlock * pFreeBlock = *ppFreeBlock;
1866 if (((BYTE *)pFreeBlock + pFreeBlock->m_dwSize) - (BYTE *)ALIGN_UP(pFreeBlock, dwAlignment) >= (SSIZE_T)dwRequestedSize)
1867 {
1868 if (ppBestFit == NULL || pFreeBlock->m_dwSize < (*ppBestFit)->m_dwSize)
1869 ppBestFit = ppFreeBlock;
1870 }
1871 else
1872 {
1873 if (pFreeBlock->m_dwSize < SMALL_BLOCK_THRESHOLD)
1874 nFreeSmallBlocks++;
1875 }
1876 ppFreeBlock = &(*ppFreeBlock)->m_pNext;
1877 }
1878
1879 VOID * pMem;
1880 SIZE_T dwSize;
1881 if (ppBestFit != NULL)
1882 {
1883 pMem = *ppBestFit;
1884 dwSize = (*ppBestFit)->m_dwSize;
1885
1886 RemoveBlock(ppBestFit);
1887 }
1888 else
1889 {
1890 dwSize = dwRequestedSize;
1891 if (dwSize < SMALL_BLOCK_THRESHOLD)
1892 dwSize = 4 * SMALL_BLOCK_THRESHOLD;
1893 pMem = ExecutionManager::GetEEJitManager()->allocCodeFragmentBlock(dwSize, dwAlignment, m_pAllocator, m_kind);
1894 }
1895
1896 SIZE_T dwExtra = (BYTE *)ALIGN_UP(pMem, dwAlignment) - (BYTE *)pMem;
1897 _ASSERTE(dwSize >= dwExtra + dwRequestedSize);
1898 SIZE_T dwRemaining = dwSize - (dwExtra + dwRequestedSize);
1899
1900 // Avoid accumulation of too many small blocks. The more small free blocks we have, the more picky we are going to be about adding new ones.
1901 if ((dwRemaining >= max(sizeof(FreeBlock), sizeof(StubPrecode)) + (SMALL_BLOCK_THRESHOLD / 0x10) * nFreeSmallBlocks) || (dwRemaining >= SMALL_BLOCK_THRESHOLD))
1902 {
1903 AddBlock((BYTE *)pMem + dwExtra + dwRequestedSize, dwRemaining);
1904 dwSize -= dwRemaining;
1905 }
1906
1907 TaggedMemAllocPtr tmap;
1908 tmap.m_pMem = pMem;
1909 tmap.m_dwRequestedSize = dwSize;
1910 tmap.m_pHeap = this;
1911 tmap.m_dwExtra = dwExtra;
1912#ifdef _DEBUG
1913 tmap.m_szFile = szFile;
1914 tmap.m_lineNum = lineNum;
1915#endif
1916 return tmap;
1917}
1918
1919void CodeFragmentHeap::RealBackoutMem(void *pMem
1920 , size_t dwSize
1921#ifdef _DEBUG
1922 , __in __in_z const char *szFile
1923 , int lineNum
1924 , __in __in_z const char *szAllocFile
1925 , int allocLineNum
1926#endif
1927 )
1928{
1929 CrstHolder ch(&m_CritSec);
1930
1931 _ASSERTE(dwSize >= sizeof(FreeBlock));
1932
1933 ZeroMemory((BYTE *)pMem, dwSize);
1934
1935 //
1936 // Try to coalesce blocks if possible
1937 //
1938 FreeBlock ** ppFreeBlock = &m_pFreeBlocks;
1939 while (*ppFreeBlock != NULL)
1940 {
1941 FreeBlock * pFreeBlock = *ppFreeBlock;
1942
1943 if ((BYTE *)pFreeBlock == (BYTE *)pMem + dwSize)
1944 {
1945 // pMem = pMem;
1946 dwSize += pFreeBlock->m_dwSize;
1947 RemoveBlock(ppFreeBlock);
1948 continue;
1949 }
1950 else
1951 if ((BYTE *)pFreeBlock + pFreeBlock->m_dwSize == (BYTE *)pMem)
1952 {
1953 pMem = pFreeBlock;
1954 dwSize += pFreeBlock->m_dwSize;
1955 RemoveBlock(ppFreeBlock);
1956 continue;
1957 }
1958
1959 ppFreeBlock = &(*ppFreeBlock)->m_pNext;
1960 }
1961
1962 AddBlock(pMem, dwSize);
1963}
1964#endif // !CROSSGEN_COMPILE
1965
1966//**************************************************************************
1967
1968LoaderCodeHeap::LoaderCodeHeap(size_t * pPrivatePCLBytes)
1969 : m_LoaderHeap(pPrivatePCLBytes,
1970 0, // RangeList *pRangeList
1971 TRUE), // BOOL fMakeExecutable
1972 m_cbMinNextPad(0)
1973{
1974 WRAPPER_NO_CONTRACT;
1975}
1976
1977void ThrowOutOfMemoryWithinRange()
1978{
1979 CONTRACTL {
1980 THROWS;
1981 GC_NOTRIGGER;
1982 } CONTRACTL_END;
1983
1984 // Allow breaking into debugger or terminating the process when this exception occurs
1985 switch (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnOutOfMemoryWithinRange))
1986 {
1987 case 1:
1988 DebugBreak();
1989 break;
1990 case 2:
1991 EEPOLICY_HANDLE_FATAL_ERROR(COR_E_OUTOFMEMORY);
1992 break;
1993 default:
1994 break;
1995 }
1996
1997 EX_THROW(EEMessageException, (kOutOfMemoryException, IDS_EE_OUT_OF_MEMORY_WITHIN_RANGE));
1998}
1999
2000#ifdef _TARGET_AMD64_
2001BYTE * EEJitManager::AllocateFromEmergencyJumpStubReserve(const BYTE * loAddr, const BYTE * hiAddr, SIZE_T * pReserveSize)
2002{
2003 CONTRACTL {
2004 NOTHROW;
2005 GC_NOTRIGGER;
2006 PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
2007 } CONTRACTL_END;
2008
2009 for (EmergencyJumpStubReserve ** ppPrev = &m_pEmergencyJumpStubReserveList; *ppPrev != NULL; ppPrev = &(*ppPrev)->m_pNext)
2010 {
2011 EmergencyJumpStubReserve * pList = *ppPrev;
2012
2013 if (loAddr <= pList->m_ptr &&
2014 pList->m_ptr + pList->m_size < hiAddr)
2015 {
2016 *ppPrev = pList->m_pNext;
2017
2018 BYTE * pBlock = pList->m_ptr;
2019 *pReserveSize = pList->m_size;
2020
2021 delete pList;
2022
2023 return pBlock;
2024 }
2025 }
2026
2027 return NULL;
2028}
2029
2030VOID EEJitManager::EnsureJumpStubReserve(BYTE * pImageBase, SIZE_T imageSize, SIZE_T reserveSize)
2031{
2032 CONTRACTL {
2033 THROWS;
2034 GC_NOTRIGGER;
2035 } CONTRACTL_END;
2036
2037 CrstHolder ch(&m_CodeHeapCritSec);
2038
2039 BYTE * loAddr = pImageBase + imageSize + INT32_MIN;
2040 if (loAddr > pImageBase) loAddr = NULL; // overflow
2041
2042 BYTE * hiAddr = pImageBase + INT32_MAX;
2043 if (hiAddr < pImageBase) hiAddr = (BYTE *)UINT64_MAX; // overflow
2044
2045 for (EmergencyJumpStubReserve * pList = m_pEmergencyJumpStubReserveList; pList != NULL; pList = pList->m_pNext)
2046 {
2047 if (loAddr <= pList->m_ptr &&
2048 pList->m_ptr + pList->m_size < hiAddr)
2049 {
2050 SIZE_T used = min(reserveSize, pList->m_free);
2051 pList->m_free -= used;
2052
2053 reserveSize -= used;
2054 if (reserveSize == 0)
2055 return;
2056 }
2057 }
2058
2059 // Try several different strategies - the most efficient one first
2060 int allocMode = 0;
2061
2062 // Try to reserve at least 16MB at a time
2063 SIZE_T allocChunk = max(ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY), 16*1024*1024);
2064
2065 while (reserveSize > 0)
2066 {
2067 NewHolder<EmergencyJumpStubReserve> pNewReserve(new EmergencyJumpStubReserve());
2068
2069 for (;;)
2070 {
2071 BYTE * loAddrCurrent = loAddr;
2072 BYTE * hiAddrCurrent = hiAddr;
2073
2074 switch (allocMode)
2075 {
2076 case 0:
2077 // First, try to allocate towards the center of the allowed range. It is more likely to
2078 // satisfy subsequent reservations.
2079 loAddrCurrent = loAddr + (hiAddr - loAddr) / 8;
2080 hiAddrCurrent = hiAddr - (hiAddr - loAddr) / 8;
2081 break;
2082 case 1:
2083 // Try the whole allowed range
2084 break;
2085 case 2:
2086 // If the large allocation failed, retry with small chunk size
2087 allocChunk = VIRTUAL_ALLOC_RESERVE_GRANULARITY;
2088 break;
2089 default:
2090 return; // Unable to allocate the reserve - give up
2091 }
2092
2093 pNewReserve->m_ptr = ClrVirtualAllocWithinRange(loAddrCurrent, hiAddrCurrent,
2094 allocChunk, MEM_RESERVE, PAGE_NOACCESS);
2095
2096 if (pNewReserve->m_ptr != NULL)
2097 break;
2098
2099 // Retry with the next allocation strategy
2100 allocMode++;
2101 }
2102
2103 SIZE_T used = min(allocChunk, reserveSize);
2104 reserveSize -= used;
2105
2106 pNewReserve->m_size = allocChunk;
2107 pNewReserve->m_free = allocChunk - used;
2108
2109 // Add it to the list
2110 pNewReserve->m_pNext = m_pEmergencyJumpStubReserveList;
2111 m_pEmergencyJumpStubReserveList = pNewReserve.Extract();
2112 }
2113}
2114#endif // _TARGET_AMD64_
2115
2116static size_t GetDefaultReserveForJumpStubs(size_t codeHeapSize)
2117{
2118 LIMITED_METHOD_CONTRACT;
2119
2120#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
2121 //
2122 // Keep a small default reserve at the end of the codeheap for jump stubs. It should reduce
2123 // chance that we won't be able allocate jump stub because of lack of suitable address space.
2124 //
2125 static ConfigDWORD configCodeHeapReserveForJumpStubs;
2126 int percentReserveForJumpStubs = configCodeHeapReserveForJumpStubs.val(CLRConfig::INTERNAL_CodeHeapReserveForJumpStubs);
2127
2128 size_t reserveForJumpStubs = percentReserveForJumpStubs * (codeHeapSize / 100);
2129
2130 size_t minReserveForJumpStubs = sizeof(CodeHeader) +
2131 sizeof(JumpStubBlockHeader) + (size_t) DEFAULT_JUMPSTUBS_PER_BLOCK * BACK_TO_BACK_JUMP_ALLOCATE_SIZE +
2132 CODE_SIZE_ALIGN + BYTES_PER_BUCKET;
2133
2134 return max(reserveForJumpStubs, minReserveForJumpStubs);
2135#else
2136 return 0;
2137#endif
2138}
2139
2140HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap *pJitMetaHeap)
2141{
2142 CONTRACT(HeapList *) {
2143 THROWS;
2144 GC_NOTRIGGER;
2145 POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange());
2146 } CONTRACT_END;
2147
2148 size_t * pPrivatePCLBytes = NULL;
2149 size_t reserveSize = pInfo->getReserveSize();
2150 size_t initialRequestSize = pInfo->getRequestSize();
2151 const BYTE * loAddr = pInfo->m_loAddr;
2152 const BYTE * hiAddr = pInfo->m_hiAddr;
2153
2154 // Make sure that what we are reserving will fix inside a DWORD
2155 if (reserveSize != (DWORD) reserveSize)
2156 {
2157 _ASSERTE(!"reserveSize does not fit in a DWORD");
2158 EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
2159 }
2160
2161#ifdef ENABLE_PERF_COUNTERS
2162 pPrivatePCLBytes = &(GetPerfCounters().m_Loading.cbLoaderHeapSize);
2163#endif
2164
2165 LOG((LF_JIT, LL_INFO100,
2166 "Request new LoaderCodeHeap::CreateCodeHeap(%08x, %08x, for loader allocator" FMT_ADDR "in" FMT_ADDR ".." FMT_ADDR ")\n",
2167 (DWORD) reserveSize, (DWORD) initialRequestSize, DBG_ADDR(pInfo->m_pAllocator), DBG_ADDR(loAddr), DBG_ADDR(hiAddr)
2168 ));
2169
2170 NewHolder<LoaderCodeHeap> pCodeHeap(new LoaderCodeHeap(pPrivatePCLBytes));
2171
2172 BYTE * pBaseAddr = NULL;
2173 DWORD dwSizeAcquiredFromInitialBlock = 0;
2174 bool fAllocatedFromEmergencyJumpStubReserve = false;
2175
2176 pBaseAddr = (BYTE *)pInfo->m_pAllocator->GetCodeHeapInitialBlock(loAddr, hiAddr, (DWORD)initialRequestSize, &dwSizeAcquiredFromInitialBlock);
2177 if (pBaseAddr != NULL)
2178 {
2179 pCodeHeap->m_LoaderHeap.SetReservedRegion(pBaseAddr, dwSizeAcquiredFromInitialBlock, FALSE);
2180 }
2181 else
2182 {
2183 if (loAddr != NULL || hiAddr != NULL)
2184 {
2185#ifdef _DEBUG
2186 // Always exercise the fallback path in the caller when forced relocs are turned on
2187 if (!pInfo->getThrowOnOutOfMemoryWithinRange() && PEDecoder::GetForceRelocs())
2188 RETURN NULL;
2189#endif
2190 pBaseAddr = ClrVirtualAllocWithinRange(loAddr, hiAddr,
2191 reserveSize, MEM_RESERVE, PAGE_NOACCESS);
2192
2193 if (!pBaseAddr)
2194 {
2195 // Conserve emergency jump stub reserve until when it is really needed
2196 if (!pInfo->getThrowOnOutOfMemoryWithinRange())
2197 RETURN NULL;
2198#ifdef _TARGET_AMD64_
2199 pBaseAddr = ExecutionManager::GetEEJitManager()->AllocateFromEmergencyJumpStubReserve(loAddr, hiAddr, &reserveSize);
2200 if (!pBaseAddr)
2201 ThrowOutOfMemoryWithinRange();
2202 fAllocatedFromEmergencyJumpStubReserve = true;
2203#else
2204 ThrowOutOfMemoryWithinRange();
2205#endif // _TARGET_AMD64_
2206 }
2207 }
2208 else
2209 {
2210 pBaseAddr = ClrVirtualAllocExecutable(reserveSize, MEM_RESERVE, PAGE_NOACCESS);
2211 if (!pBaseAddr)
2212 ThrowOutOfMemory();
2213 }
2214 pCodeHeap->m_LoaderHeap.SetReservedRegion(pBaseAddr, reserveSize, TRUE);
2215 }
2216
2217
2218 // this first allocation is critical as it sets up correctly the loader heap info
2219 HeapList *pHp = (HeapList*)pCodeHeap->m_LoaderHeap.AllocMem(sizeof(HeapList));
2220
2221 pHp->pHeap = pCodeHeap;
2222
2223 size_t heapSize = pCodeHeap->m_LoaderHeap.GetReservedBytesFree();
2224 size_t nibbleMapSize = HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heapSize));
2225
2226 pHp->startAddress = (TADDR)pHp + sizeof(HeapList);
2227
2228 pHp->endAddress = pHp->startAddress;
2229 pHp->maxCodeHeapSize = heapSize;
2230 pHp->reserveForJumpStubs = fAllocatedFromEmergencyJumpStubReserve ? pHp->maxCodeHeapSize : GetDefaultReserveForJumpStubs(pHp->maxCodeHeapSize);
2231
2232 _ASSERTE(heapSize >= initialRequestSize);
2233
2234 // We do not need to memset this memory, since ClrVirtualAlloc() guarantees that the memory is zero.
2235 // Furthermore, if we avoid writing to it, these pages don't come into our working set
2236
2237 pHp->mapBase = ROUND_DOWN_TO_PAGE(pHp->startAddress); // round down to next lower page align
2238 pHp->pHdrMap = (DWORD*)(void*)pJitMetaHeap->AllocMem(S_SIZE_T(nibbleMapSize));
2239
2240 LOG((LF_JIT, LL_INFO100,
2241 "Created new CodeHeap(" FMT_ADDR ".." FMT_ADDR ")\n",
2242 DBG_ADDR(pHp->startAddress), DBG_ADDR(pHp->startAddress+pHp->maxCodeHeapSize)
2243 ));
2244
2245#ifdef _TARGET_64BIT_
2246 emitJump((LPBYTE)pHp->CLRPersonalityRoutine, (void *)ProcessCLRException);
2247#endif // _TARGET_64BIT_
2248
2249 pCodeHeap.SuppressRelease();
2250 RETURN pHp;
2251}
2252
2253void * LoaderCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment, size_t reserveForJumpStubs)
2254{
2255 CONTRACTL {
2256 NOTHROW;
2257 GC_NOTRIGGER;
2258 } CONTRACTL_END;
2259
2260 if (m_cbMinNextPad > (SSIZE_T)header) header = m_cbMinNextPad;
2261
2262 void * p = m_LoaderHeap.AllocMemForCode_NoThrow(header, size, alignment, reserveForJumpStubs);
2263 if (p == NULL)
2264 return NULL;
2265
2266 // If the next allocation would have started in the same nibble map entry, allocate extra space to prevent it from happening
2267 // Note that m_cbMinNextPad can be negative
2268 m_cbMinNextPad = ALIGN_UP((SIZE_T)p + 1, BYTES_PER_BUCKET) - ((SIZE_T)p + size);
2269
2270 return p;
2271}
2272
2273void CodeHeapRequestInfo::Init()
2274{
2275 CONTRACTL {
2276 NOTHROW;
2277 GC_NOTRIGGER;
2278 PRECONDITION((m_hiAddr == 0) ||
2279 ((m_loAddr < m_hiAddr) &&
2280 ((m_loAddr + m_requestSize) < m_hiAddr)));
2281 } CONTRACTL_END;
2282
2283 if (m_pAllocator == NULL)
2284 m_pAllocator = m_pMD->GetLoaderAllocatorForCode();
2285 m_isDynamicDomain = (m_pMD != NULL) ? m_pMD->IsLCGMethod() : false;
2286 m_isCollectible = m_pAllocator->IsCollectible() ? true : false;
2287 m_throwOnOutOfMemoryWithinRange = true;
2288}
2289
2290#ifdef WIN64EXCEPTIONS
2291
2292#ifdef _WIN64
2293extern "C" PT_RUNTIME_FUNCTION GetRuntimeFunctionCallback(IN ULONG64 ControlPc,
2294 IN PVOID Context)
2295#else
2296extern "C" PT_RUNTIME_FUNCTION GetRuntimeFunctionCallback(IN ULONG ControlPc,
2297 IN PVOID Context)
2298#endif
2299{
2300 WRAPPER_NO_CONTRACT;
2301
2302 PT_RUNTIME_FUNCTION prf = NULL;
2303
2304 // We must preserve this so that GCStress=4 eh processing doesnt kill last error.
2305 BEGIN_PRESERVE_LAST_ERROR;
2306
2307#ifdef ENABLE_CONTRACTS
2308 // Some 64-bit OOM tests use the hosting interface to re-enter the CLR via
2309 // RtlVirtualUnwind to track unique stacks at each failure point. RtlVirtualUnwind can
2310 // result in the EEJitManager taking a reader lock. This, in turn, results in a
2311 // CANNOT_TAKE_LOCK contract violation if a CANNOT_TAKE_LOCK function were on the stack
2312 // at the time. While it's theoretically possible for "real" hosts also to re-enter the
2313 // CLR via RtlVirtualUnwind, generally they don't, and we'd actually like to catch a real
2314 // host causing such a contract violation. Therefore, we'd like to suppress such contract
2315 // asserts when these OOM tests are running, but continue to enforce the contracts by
2316 // default. This function returns whether to suppress locking violations.
2317 CONDITIONAL_CONTRACT_VIOLATION(
2318 TakesLockViolation,
2319 g_pConfig->SuppressLockViolationsOnReentryFromOS());
2320#endif // ENABLE_CONTRACTS
2321
2322 EECodeInfo codeInfo((PCODE)ControlPc);
2323 if (codeInfo.IsValid())
2324 prf = codeInfo.GetFunctionEntry();
2325
2326 LOG((LF_EH, LL_INFO1000000, "GetRuntimeFunctionCallback(%p) returned %p\n", ControlPc, prf));
2327
2328 END_PRESERVE_LAST_ERROR;
2329
2330 return prf;
2331}
2332#endif // WIN64EXCEPTIONS
2333
2334HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapList *pADHeapList)
2335{
2336 CONTRACT(HeapList *) {
2337 THROWS;
2338 GC_NOTRIGGER;
2339 PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
2340 POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange());
2341 } CONTRACT_END;
2342
2343 size_t initialRequestSize = pInfo->getRequestSize();
2344 size_t minReserveSize = VIRTUAL_ALLOC_RESERVE_GRANULARITY; // ( 64 KB)
2345
2346#ifdef _WIN64
2347 if (pInfo->m_hiAddr == 0)
2348 {
2349 if (pADHeapList->m_CodeHeapList.Count() > CODE_HEAP_SIZE_INCREASE_THRESHOLD)
2350 {
2351 minReserveSize *= 4; // Increase the code heap size to 256 KB for workloads with a lot of code.
2352 }
2353
2354 // For non-DynamicDomains that don't have a loAddr/hiAddr range
2355 // we bump up the reserve size for the 64-bit platforms
2356 if (!pInfo->IsDynamicDomain())
2357 {
2358 minReserveSize *= 8; // CodeHeaps are larger on AMD64 (256 KB to 2048 KB)
2359 }
2360 }
2361#endif
2362
2363 // <BUGNUM> VSW 433293 </BUGNUM>
2364 // SETUP_NEW_BLOCK reserves the first sizeof(LoaderHeapBlock) bytes for LoaderHeapBlock.
2365 // In other word, the first m_pAllocPtr starts at sizeof(LoaderHeapBlock) bytes
2366 // after the allocated memory. Therefore, we need to take it into account.
2367 size_t requestAndHeadersSize = sizeof(LoaderHeapBlock) + sizeof(HeapList) + initialRequestSize;
2368
2369 size_t reserveSize = requestAndHeadersSize;
2370 if (reserveSize < minReserveSize)
2371 reserveSize = minReserveSize;
2372 reserveSize = ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY);
2373
2374 pInfo->setReserveSize(reserveSize);
2375
2376 HeapList *pHp = NULL;
2377
2378 DWORD flags = RangeSection::RANGE_SECTION_CODEHEAP;
2379
2380 if (pInfo->IsDynamicDomain())
2381 {
2382 flags |= RangeSection::RANGE_SECTION_COLLECTIBLE;
2383 pHp = HostCodeHeap::CreateCodeHeap(pInfo, this);
2384 }
2385 else
2386 {
2387 LoaderHeap *pJitMetaHeap = pADHeapList->m_pAllocator->GetLowFrequencyHeap();
2388
2389 if (pInfo->IsCollectible())
2390 flags |= RangeSection::RANGE_SECTION_COLLECTIBLE;
2391
2392 pHp = LoaderCodeHeap::CreateCodeHeap(pInfo, pJitMetaHeap);
2393 }
2394 if (pHp == NULL)
2395 {
2396 _ASSERTE(!pInfo->getThrowOnOutOfMemoryWithinRange());
2397 RETURN(NULL);
2398 }
2399
2400 _ASSERTE (pHp != NULL);
2401 _ASSERTE (pHp->maxCodeHeapSize >= initialRequestSize);
2402
2403 pHp->SetNext(GetCodeHeapList());
2404
2405 EX_TRY
2406 {
2407 TADDR pStartRange = (TADDR) pHp;
2408 TADDR pEndRange = (TADDR) &((BYTE*)pHp->startAddress)[pHp->maxCodeHeapSize];
2409
2410 ExecutionManager::AddCodeRange(pStartRange,
2411 pEndRange,
2412 this,
2413 (RangeSection::RangeSectionFlags)flags,
2414 pHp);
2415 //
2416 // add a table to cover each range in the range list
2417 //
2418 InstallEEFunctionTable(
2419 (PVOID)pStartRange, // this is just an ID that gets passed to RtlDeleteFunctionTable;
2420 (PVOID)pStartRange,
2421 (ULONG)((ULONG64)pEndRange - (ULONG64)pStartRange),
2422 GetRuntimeFunctionCallback,
2423 this,
2424 DYNFNTABLE_JIT);
2425 }
2426 EX_CATCH
2427 {
2428 // If we failed to alloc memory in ExecutionManager::AddCodeRange()
2429 // then we will delete the LoaderHeap that we allocated
2430
2431 // pHp is allocated in pHeap, so only need to delete the LoaderHeap itself
2432 delete pHp->pHeap;
2433
2434 pHp = NULL;
2435 }
2436 EX_END_CATCH(SwallowAllExceptions)
2437
2438 if (pHp == NULL)
2439 {
2440 ThrowOutOfMemory();
2441 }
2442
2443 m_pCodeHeap = pHp;
2444
2445 HeapList **ppHeapList = pADHeapList->m_CodeHeapList.AppendThrowing();
2446 *ppHeapList = pHp;
2447
2448 RETURN(pHp);
2449}
2450
2451void* EEJitManager::allocCodeRaw(CodeHeapRequestInfo *pInfo,
2452 size_t header, size_t blockSize, unsigned align,
2453 HeapList ** ppCodeHeap)
2454{
2455 CONTRACT(void *) {
2456 THROWS;
2457 GC_NOTRIGGER;
2458 PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
2459 POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange());
2460 } CONTRACT_END;
2461
2462 pInfo->setRequestSize(header+blockSize+(align-1)+pInfo->getReserveForJumpStubs());
2463
2464 void * mem = NULL;
2465 HeapList * pCodeHeap = NULL;
2466 DomainCodeHeapList *pList = NULL;
2467
2468 // Avoid going through the full list in the common case - try to use the most recently used codeheap
2469 if (pInfo->IsDynamicDomain())
2470 {
2471 pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap;
2472 pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap = NULL;
2473 }
2474 else
2475 {
2476 pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedCodeHeap;
2477 pInfo->m_pAllocator->m_pLastUsedCodeHeap = NULL;
2478 }
2479
2480 // If we will use a cached code heap, ensure that the code heap meets the constraints
2481 if (pCodeHeap && CanUseCodeHeap(pInfo, pCodeHeap))
2482 {
2483 mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->getReserveForJumpStubs());
2484 }
2485
2486 if (mem == NULL)
2487 {
2488 pList = GetCodeHeapList(pInfo, pInfo->m_pAllocator);
2489 if (pList != NULL)
2490 {
2491 for (int i = 0; i < pList->m_CodeHeapList.Count(); i++)
2492 {
2493 pCodeHeap = pList->m_CodeHeapList[i];
2494
2495 // Validate that the code heap can be used for the current request
2496 if (CanUseCodeHeap(pInfo, pCodeHeap))
2497 {
2498 mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->getReserveForJumpStubs());
2499 if (mem != NULL)
2500 break;
2501 }
2502 }
2503 }
2504
2505 if (mem == NULL)
2506 {
2507 // Let us create a new heap.
2508 if (pList == NULL)
2509 {
2510 // not found so need to create the first one
2511 pList = CreateCodeHeapList(pInfo);
2512 _ASSERTE(pList == GetCodeHeapList(pInfo, pInfo->m_pAllocator));
2513 }
2514 _ASSERTE(pList);
2515
2516 pCodeHeap = NewCodeHeap(pInfo, pList);
2517 if (pCodeHeap == NULL)
2518 {
2519 _ASSERTE(!pInfo->getThrowOnOutOfMemoryWithinRange());
2520 RETURN(NULL);
2521 }
2522
2523 mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->getReserveForJumpStubs());
2524 if (mem == NULL)
2525 ThrowOutOfMemory();
2526 _ASSERTE(mem);
2527 }
2528 }
2529
2530 if (pInfo->IsDynamicDomain())
2531 {
2532 pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap = pCodeHeap;
2533 }
2534 else
2535 {
2536 pInfo->m_pAllocator->m_pLastUsedCodeHeap = pCodeHeap;
2537 }
2538
2539 // Record the pCodeHeap value into ppCodeHeap
2540 *ppCodeHeap = pCodeHeap;
2541
2542 _ASSERTE((TADDR)mem >= pCodeHeap->startAddress);
2543
2544 if (((TADDR) mem)+blockSize > (TADDR)pCodeHeap->endAddress)
2545 {
2546 // Update the CodeHeap endAddress
2547 pCodeHeap->endAddress = (TADDR)mem+blockSize;
2548 }
2549
2550 RETURN(mem);
2551}
2552
2553CodeHeader* EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, size_t reserveForJumpStubs, CorJitAllocMemFlag flag
2554#ifdef WIN64EXCEPTIONS
2555 , UINT nUnwindInfos
2556 , TADDR * pModuleBase
2557#endif
2558 )
2559{
2560 CONTRACT(CodeHeader *) {
2561 THROWS;
2562 GC_NOTRIGGER;
2563 POSTCONDITION(CheckPointer(RETVAL));
2564 } CONTRACT_END;
2565
2566 //
2567 // Alignment
2568 //
2569
2570 unsigned alignment = CODE_SIZE_ALIGN;
2571
2572 if ((flag & CORJIT_ALLOCMEM_FLG_16BYTE_ALIGN) != 0)
2573 {
2574 alignment = max(alignment, 16);
2575 }
2576
2577#if defined(_TARGET_X86_)
2578 // when not optimizing for code size, 8-byte align the method entry point, so that
2579 // the JIT can in turn 8-byte align the loop entry headers.
2580 //
2581 // when ReJIT is enabled, 8-byte-align the method entry point so that we may use an
2582 // 8-byte interlocked operation to atomically poke the top most bytes (e.g., to
2583 // redirect the rejit jmp-stamp at the top of the method from the prestub to the
2584 // rejitted code, or to reinstate original code on a revert).
2585 else if ((g_pConfig->GenOptimizeType() != OPT_SIZE) ||
2586 pMD->IsVersionableWithJumpStamp())
2587 {
2588 alignment = max(alignment, 8);
2589 }
2590#endif
2591
2592 //
2593 // Compute header layout
2594 //
2595
2596 SIZE_T totalSize = blockSize;
2597
2598 CodeHeader * pCodeHdr = NULL;
2599
2600 CodeHeapRequestInfo requestInfo(pMD);
2601#if defined(FEATURE_JIT_PITCHING)
2602 if (pMD && pMD->IsPitchable() && CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchMethodSizeThreshold) < blockSize)
2603 {
2604 requestInfo.SetDynamicDomain();
2605 }
2606#endif
2607 requestInfo.setReserveForJumpStubs(reserveForJumpStubs);
2608
2609#if defined(USE_INDIRECT_CODEHEADER)
2610 SIZE_T realHeaderSize = offsetof(RealCodeHeader, unwindInfos[0]) + (sizeof(T_RUNTIME_FUNCTION) * nUnwindInfos);
2611
2612 // if this is a LCG method then we will be allocating the RealCodeHeader
2613 // following the code so that the code block can be removed easily by
2614 // the LCG code heap.
2615 if (requestInfo.IsDynamicDomain())
2616 {
2617 totalSize = ALIGN_UP(totalSize, sizeof(void*)) + realHeaderSize;
2618 static_assert_no_msg(CODE_SIZE_ALIGN >= sizeof(void*));
2619 }
2620#endif // USE_INDIRECT_CODEHEADER
2621
2622 // Scope the lock
2623 {
2624 CrstHolder ch(&m_CodeHeapCritSec);
2625
2626 HeapList *pCodeHeap = NULL;
2627
2628 TADDR pCode = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), totalSize, alignment, &pCodeHeap);
2629
2630 _ASSERTE(pCodeHeap);
2631
2632 if (pMD->IsLCGMethod())
2633 {
2634 pMD->AsDynamicMethodDesc()->GetLCGMethodResolver()->m_recordCodePointer = (void*) pCode;
2635 }
2636
2637 _ASSERTE(IS_ALIGNED(pCode, alignment));
2638
2639 JIT_PERF_UPDATE_X86_CODE_SIZE(totalSize);
2640
2641 // Initialize the CodeHeader *BEFORE* we publish this code range via the nibble
2642 // map so that we don't have to harden readers against uninitialized data.
2643 // However because we hold the lock, this initialization should be fast and cheap!
2644
2645 pCodeHdr = ((CodeHeader *)pCode) - 1;
2646
2647#ifdef USE_INDIRECT_CODEHEADER
2648 if (requestInfo.IsDynamicDomain())
2649 {
2650 pCodeHdr->SetRealCodeHeader((BYTE*)pCode + ALIGN_UP(blockSize, sizeof(void*)));
2651 }
2652 else
2653 {
2654 // TODO: think about the CodeHeap carrying around a RealCodeHeader chunking mechanism
2655 //
2656 // allocate the real header in the low frequency heap
2657 BYTE* pRealHeader = (BYTE*)(void*)pMD->GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(realHeaderSize));
2658 pCodeHdr->SetRealCodeHeader(pRealHeader);
2659 }
2660#endif
2661
2662 pCodeHdr->SetDebugInfo(NULL);
2663 pCodeHdr->SetEHInfo(NULL);
2664 pCodeHdr->SetGCInfo(NULL);
2665 pCodeHdr->SetMethodDesc(pMD);
2666#ifdef WIN64EXCEPTIONS
2667 pCodeHdr->SetNumberOfUnwindInfos(nUnwindInfos);
2668 *pModuleBase = (TADDR)pCodeHeap;
2669#endif
2670
2671 NibbleMapSet(pCodeHeap, pCode, TRUE);
2672 }
2673
2674 RETURN(pCodeHdr);
2675}
2676
2677EEJitManager::DomainCodeHeapList *EEJitManager::GetCodeHeapList(CodeHeapRequestInfo *pInfo, LoaderAllocator *pAllocator, BOOL fDynamicOnly)
2678{
2679 CONTRACTL {
2680 NOTHROW;
2681 GC_NOTRIGGER;
2682 PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
2683 } CONTRACTL_END;
2684
2685 DomainCodeHeapList *pList = NULL;
2686 DomainCodeHeapList **ppList = NULL;
2687 int count = 0;
2688
2689 // get the appropriate list of heaps
2690 // pMD is NULL for NGen modules during Module::LoadTokenTables
2691 if (fDynamicOnly || (pInfo != NULL && pInfo->IsDynamicDomain()))
2692 {
2693 ppList = m_DynamicDomainCodeHeaps.Table();
2694 count = m_DynamicDomainCodeHeaps.Count();
2695 }
2696 else
2697 {
2698 ppList = m_DomainCodeHeaps.Table();
2699 count = m_DomainCodeHeaps.Count();
2700 }
2701
2702 // this is a virtual call - pull it out of the loop
2703 BOOL fCanUnload = pAllocator->CanUnload();
2704
2705 // look for a DomainCodeHeapList
2706 for (int i=0; i < count; i++)
2707 {
2708 if (ppList[i]->m_pAllocator == pAllocator ||
2709 (!fCanUnload && !ppList[i]->m_pAllocator->CanUnload()))
2710 {
2711 pList = ppList[i];
2712 break;
2713 }
2714 }
2715 return pList;
2716}
2717
2718bool EEJitManager::CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCodeHeap)
2719{
2720 CONTRACTL {
2721 NOTHROW;
2722 GC_NOTRIGGER;
2723 PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
2724 } CONTRACTL_END;
2725
2726 bool retVal = false;
2727
2728 if ((pInfo->m_loAddr == 0) && (pInfo->m_hiAddr == 0))
2729 {
2730 // We have no constraint so this non empty heap will be able to satisfy our request
2731 if (pInfo->IsDynamicDomain())
2732 {
2733 _ASSERTE(pCodeHeap->reserveForJumpStubs == 0);
2734 retVal = true;
2735 }
2736 else
2737 {
2738 BYTE * lastAddr = (BYTE *) pCodeHeap->startAddress + pCodeHeap->maxCodeHeapSize;
2739
2740 BYTE * loRequestAddr = (BYTE *) pCodeHeap->endAddress;
2741 BYTE * hiRequestAddr = loRequestAddr + pInfo->getRequestSize() + BYTES_PER_BUCKET;
2742 if (hiRequestAddr <= lastAddr - pCodeHeap->reserveForJumpStubs)
2743 {
2744 retVal = true;
2745 }
2746 }
2747 }
2748 else
2749 {
2750 // We also check to see if an allocation in this heap would satisfy
2751 // the [loAddr..hiAddr] requirement
2752
2753 // Calculate the byte range that can ever be returned by
2754 // an allocation in this HeapList element
2755 //
2756 BYTE * firstAddr = (BYTE *) pCodeHeap->startAddress;
2757 BYTE * lastAddr = (BYTE *) pCodeHeap->startAddress + pCodeHeap->maxCodeHeapSize;
2758
2759 _ASSERTE(pCodeHeap->startAddress <= pCodeHeap->endAddress);
2760 _ASSERTE(firstAddr <= lastAddr);
2761
2762 if (pInfo->IsDynamicDomain())
2763 {
2764 _ASSERTE(pCodeHeap->reserveForJumpStubs == 0);
2765
2766 // We check to see if every allocation in this heap
2767 // will satisfy the [loAddr..hiAddr] requirement.
2768 //
2769 // Dynamic domains use a free list allocator,
2770 // thus we can receive any address in the range
2771 // when calling AllocMemory with a DynamicDomain
2772
2773 // [firstaddr .. lastAddr] must be entirely within
2774 // [pInfo->m_loAddr .. pInfo->m_hiAddr]
2775 //
2776 if ((pInfo->m_loAddr <= firstAddr) &&
2777 (lastAddr <= pInfo->m_hiAddr))
2778 {
2779 // This heap will always satisfy our constraint
2780 retVal = true;
2781 }
2782 }
2783 else // non-DynamicDomain
2784 {
2785 // Calculate the byte range that would be allocated for the
2786 // next allocation request into [loRequestAddr..hiRequestAddr]
2787 //
2788 BYTE * loRequestAddr = (BYTE *) pCodeHeap->endAddress;
2789 BYTE * hiRequestAddr = loRequestAddr + pInfo->getRequestSize() + BYTES_PER_BUCKET;
2790 _ASSERTE(loRequestAddr <= hiRequestAddr);
2791
2792 // loRequestAddr and hiRequestAddr must be entirely within
2793 // [pInfo->m_loAddr .. pInfo->m_hiAddr]
2794 //
2795 if ((pInfo->m_loAddr <= loRequestAddr) &&
2796 (hiRequestAddr <= pInfo->m_hiAddr))
2797 {
2798 // Additionally hiRequestAddr must also be less than or equal to lastAddr.
2799 // If throwOnOutOfMemoryWithinRange is not set, conserve reserveForJumpStubs until when it is really needed.
2800 if (hiRequestAddr <= lastAddr - (pInfo->getThrowOnOutOfMemoryWithinRange() ? 0 : pCodeHeap->reserveForJumpStubs))
2801 {
2802 // This heap will be able to satisfy our constraint
2803 retVal = true;
2804 }
2805 }
2806 }
2807 }
2808
2809 return retVal;
2810}
2811
2812EEJitManager::DomainCodeHeapList * EEJitManager::CreateCodeHeapList(CodeHeapRequestInfo *pInfo)
2813{
2814 CONTRACTL {
2815 THROWS;
2816 GC_NOTRIGGER;
2817 PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
2818 } CONTRACTL_END;
2819
2820 NewHolder<DomainCodeHeapList> pNewList(new DomainCodeHeapList());
2821 pNewList->m_pAllocator = pInfo->m_pAllocator;
2822
2823 DomainCodeHeapList **ppList = NULL;
2824 if (pInfo->IsDynamicDomain())
2825 ppList = m_DynamicDomainCodeHeaps.AppendThrowing();
2826 else
2827 ppList = m_DomainCodeHeaps.AppendThrowing();
2828 *ppList = pNewList;
2829
2830 return pNewList.Extract();
2831}
2832
2833LoaderHeap *EEJitManager::GetJitMetaHeap(MethodDesc *pMD)
2834{
2835 CONTRACTL {
2836 NOTHROW;
2837 GC_NOTRIGGER;
2838 } CONTRACTL_END;
2839
2840 LoaderAllocator *pAllocator = pMD->GetLoaderAllocator();
2841 _ASSERTE(pAllocator);
2842
2843 return pAllocator->GetLowFrequencyHeap();
2844}
2845
2846BYTE* EEJitManager::allocGCInfo(CodeHeader* pCodeHeader, DWORD blockSize, size_t * pAllocationSize)
2847{
2848 CONTRACTL {
2849 THROWS;
2850 GC_NOTRIGGER;
2851 } CONTRACTL_END;
2852
2853 MethodDesc* pMD = pCodeHeader->GetMethodDesc();
2854 // sadly for light code gen I need the check in here. We should change GetJitMetaHeap
2855 if (pMD->IsLCGMethod())
2856 {
2857 CrstHolder ch(&m_CodeHeapCritSec);
2858 pCodeHeader->SetGCInfo((BYTE*)(void*)pMD->AsDynamicMethodDesc()->GetResolver()->GetJitMetaHeap()->New(blockSize));
2859 }
2860 else
2861 {
2862 pCodeHeader->SetGCInfo((BYTE*) (void*)GetJitMetaHeap(pMD)->AllocMem(S_SIZE_T(blockSize)));
2863 }
2864 _ASSERTE(pCodeHeader->GetGCInfo()); // AllocMem throws if there's not enough memory
2865 JIT_PERF_UPDATE_X86_CODE_SIZE(blockSize);
2866
2867 * pAllocationSize = blockSize; // Store the allocation size so we can backout later.
2868
2869 return(pCodeHeader->GetGCInfo());
2870}
2871
2872void* EEJitManager::allocEHInfoRaw(CodeHeader* pCodeHeader, DWORD blockSize, size_t * pAllocationSize)
2873{
2874 CONTRACTL {
2875 THROWS;
2876 GC_NOTRIGGER;
2877 } CONTRACTL_END;
2878
2879 MethodDesc* pMD = pCodeHeader->GetMethodDesc();
2880 void * mem = NULL;
2881
2882 // sadly for light code gen I need the check in here. We should change GetJitMetaHeap
2883 if (pMD->IsLCGMethod())
2884 {
2885 CrstHolder ch(&m_CodeHeapCritSec);
2886 mem = (void*)pMD->AsDynamicMethodDesc()->GetResolver()->GetJitMetaHeap()->New(blockSize);
2887 }
2888 else
2889 {
2890 mem = (void*)GetJitMetaHeap(pMD)->AllocMem(S_SIZE_T(blockSize));
2891 }
2892 _ASSERTE(mem); // AllocMem throws if there's not enough memory
2893
2894 JIT_PERF_UPDATE_X86_CODE_SIZE(blockSize);
2895
2896 * pAllocationSize = blockSize; // Store the allocation size so we can backout later.
2897
2898 return(mem);
2899}
2900
2901
2902EE_ILEXCEPTION* EEJitManager::allocEHInfo(CodeHeader* pCodeHeader, unsigned numClauses, size_t * pAllocationSize)
2903{
2904 CONTRACTL {
2905 THROWS;
2906 GC_NOTRIGGER;
2907 } CONTRACTL_END;
2908
2909 // Note - pCodeHeader->phdrJitEHInfo - sizeof(size_t) contains the number of EH clauses
2910
2911 DWORD temp = EE_ILEXCEPTION::Size(numClauses);
2912 DWORD blockSize = 0;
2913 if (!ClrSafeInt<DWORD>::addition(temp, sizeof(size_t), blockSize))
2914 COMPlusThrowOM();
2915
2916 BYTE *EHInfo = (BYTE*)allocEHInfoRaw(pCodeHeader, blockSize, pAllocationSize);
2917
2918 pCodeHeader->SetEHInfo((EE_ILEXCEPTION*) (EHInfo + sizeof(size_t)));
2919 pCodeHeader->GetEHInfo()->Init(numClauses);
2920 *((size_t *)EHInfo) = numClauses;
2921 return(pCodeHeader->GetEHInfo());
2922}
2923
2924JumpStubBlockHeader * EEJitManager::allocJumpStubBlock(MethodDesc* pMD, DWORD numJumps,
2925 BYTE * loAddr, BYTE * hiAddr,
2926 LoaderAllocator *pLoaderAllocator,
2927 bool throwOnOutOfMemoryWithinRange)
2928{
2929 CONTRACT(JumpStubBlockHeader *) {
2930 THROWS;
2931 GC_NOTRIGGER;
2932 PRECONDITION(loAddr < hiAddr);
2933 PRECONDITION(pLoaderAllocator != NULL);
2934 POSTCONDITION((RETVAL != NULL) || !throwOnOutOfMemoryWithinRange);
2935 } CONTRACT_END;
2936
2937 _ASSERTE((sizeof(JumpStubBlockHeader) % CODE_SIZE_ALIGN) == 0);
2938
2939 size_t blockSize = sizeof(JumpStubBlockHeader) + (size_t) numJumps * BACK_TO_BACK_JUMP_ALLOCATE_SIZE;
2940
2941 HeapList *pCodeHeap = NULL;
2942 CodeHeapRequestInfo requestInfo(pMD, pLoaderAllocator, loAddr, hiAddr);
2943 requestInfo.setThrowOnOutOfMemoryWithinRange(throwOnOutOfMemoryWithinRange);
2944
2945 TADDR mem;
2946 JumpStubBlockHeader * pBlock;
2947
2948 // Scope the lock
2949 {
2950 CrstHolder ch(&m_CodeHeapCritSec);
2951
2952 mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(TADDR), blockSize, CODE_SIZE_ALIGN, &pCodeHeap);
2953 if (mem == NULL)
2954 {
2955 _ASSERTE(!throwOnOutOfMemoryWithinRange);
2956 RETURN(NULL);
2957 }
2958
2959 // CodeHeader comes immediately before the block
2960 CodeHeader * pCodeHdr = (CodeHeader *) (mem - sizeof(CodeHeader));
2961 pCodeHdr->SetStubCodeBlockKind(STUB_CODE_BLOCK_JUMPSTUB);
2962
2963 NibbleMapSet(pCodeHeap, mem, TRUE);
2964
2965 pBlock = (JumpStubBlockHeader *)mem;
2966
2967 _ASSERTE(IS_ALIGNED(pBlock, CODE_SIZE_ALIGN));
2968
2969 JIT_PERF_UPDATE_X86_CODE_SIZE(blockSize);
2970 }
2971
2972 pBlock->m_next = NULL;
2973 pBlock->m_used = 0;
2974 pBlock->m_allocated = numJumps;
2975 if (pMD && pMD->IsLCGMethod())
2976 pBlock->SetHostCodeHeap(static_cast<HostCodeHeap*>(pCodeHeap->pHeap));
2977 else
2978 pBlock->SetLoaderAllocator(pLoaderAllocator);
2979
2980 LOG((LF_JIT, LL_INFO1000, "Allocated new JumpStubBlockHeader for %d stubs at" FMT_ADDR " in loader allocator " FMT_ADDR "\n",
2981 numJumps, DBG_ADDR(pBlock) , DBG_ADDR(pLoaderAllocator) ));
2982
2983 RETURN(pBlock);
2984}
2985
2986void * EEJitManager::allocCodeFragmentBlock(size_t blockSize, unsigned alignment, LoaderAllocator *pLoaderAllocator, StubCodeBlockKind kind)
2987{
2988 CONTRACT(void *) {
2989 THROWS;
2990 GC_NOTRIGGER;
2991 PRECONDITION(pLoaderAllocator != NULL);
2992 POSTCONDITION(CheckPointer(RETVAL));
2993 } CONTRACT_END;
2994
2995 HeapList *pCodeHeap = NULL;
2996 CodeHeapRequestInfo requestInfo(NULL, pLoaderAllocator, NULL, NULL);
2997
2998#ifdef _TARGET_AMD64_
2999 // CodeFragments are pretty much always Precodes that may need to be patched with jump stubs at some point in future
3000 // We will assume the worst case that every FixupPrecode will need to be patched and reserve the jump stubs accordingly
3001 requestInfo.setReserveForJumpStubs((blockSize / 8) * JUMP_ALLOCATE_SIZE);
3002#endif
3003
3004 TADDR mem;
3005
3006 // Scope the lock
3007 {
3008 CrstHolder ch(&m_CodeHeapCritSec);
3009
3010 mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), blockSize, alignment, &pCodeHeap);
3011
3012 // CodeHeader comes immediately before the block
3013 CodeHeader * pCodeHdr = (CodeHeader *) (mem - sizeof(CodeHeader));
3014 pCodeHdr->SetStubCodeBlockKind(kind);
3015
3016 NibbleMapSet(pCodeHeap, (TADDR)mem, TRUE);
3017
3018 // Record the jump stub reservation
3019 pCodeHeap->reserveForJumpStubs += requestInfo.getReserveForJumpStubs();
3020 }
3021
3022 RETURN((void *)mem);
3023}
3024
3025#endif // !DACCESS_COMPILE
3026
3027
3028GCInfoToken EEJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken)
3029{
3030 CONTRACTL {
3031 NOTHROW;
3032 GC_NOTRIGGER;
3033 HOST_NOCALLS;
3034 SUPPORTS_DAC;
3035 } CONTRACTL_END;
3036
3037 // The JIT-ed code always has the current version of GCInfo
3038 return{ GetCodeHeader(MethodToken)->GetGCInfo(), GCINFO_VERSION };
3039}
3040
3041// creates an enumeration and returns the number of EH clauses
3042unsigned EEJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState)
3043{
3044 LIMITED_METHOD_CONTRACT;
3045 EE_ILEXCEPTION * EHInfo = GetCodeHeader(MethodToken)->GetEHInfo();
3046
3047 pEnumState->iCurrentPos = 0; // since the EH info is not compressed, the clause number is used to do the enumeration
3048 pEnumState->pExceptionClauseArray = NULL;
3049
3050 if (!EHInfo)
3051 return 0;
3052
3053 pEnumState->pExceptionClauseArray = dac_cast<TADDR>(EHInfo->EHClause(0));
3054 return *(dac_cast<PTR_unsigned>(dac_cast<TADDR>(EHInfo) - sizeof(size_t)));
3055}
3056
3057PTR_EXCEPTION_CLAUSE_TOKEN EEJitManager::GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
3058 EE_ILEXCEPTION_CLAUSE* pEHClauseOut)
3059{
3060 CONTRACTL {
3061 NOTHROW;
3062 GC_NOTRIGGER;
3063 } CONTRACTL_END;
3064
3065 unsigned iCurrentPos = pEnumState->iCurrentPos;
3066 pEnumState->iCurrentPos++;
3067
3068 EE_ILEXCEPTION_CLAUSE* pClause = &(dac_cast<PTR_EE_ILEXCEPTION_CLAUSE>(pEnumState->pExceptionClauseArray)[iCurrentPos]);
3069 *pEHClauseOut = *pClause;
3070 return dac_cast<PTR_EXCEPTION_CLAUSE_TOKEN>(pClause);
3071}
3072
3073#ifndef DACCESS_COMPILE
3074TypeHandle EEJitManager::ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
3075 CrawlFrame *pCf)
3076{
3077 // We don't want to use a runtime contract here since this codepath is used during
3078 // the processing of a hard SO. Contracts use a significant amount of stack
3079 // which we can't afford for those cases.
3080 STATIC_CONTRACT_THROWS;
3081 STATIC_CONTRACT_GC_TRIGGERS;
3082
3083 _ASSERTE(NULL != pCf);
3084 _ASSERTE(NULL != pEHClause);
3085 _ASSERTE(IsTypedHandler(pEHClause));
3086
3087
3088 TypeHandle typeHnd = TypeHandle();
3089 mdToken typeTok = mdTokenNil;
3090
3091 {
3092 CrstHolder chRead(&m_EHClauseCritSec);
3093 if (HasCachedTypeHandle(pEHClause))
3094 {
3095 typeHnd = TypeHandle::FromPtr(pEHClause->TypeHandle);
3096 }
3097 else
3098 {
3099 typeTok = pEHClause->ClassToken;
3100 }
3101 }
3102
3103 if (!typeHnd.IsNull())
3104 {
3105 return typeHnd;
3106 }
3107
3108 MethodDesc* pMD = pCf->GetFunction();
3109 Module* pModule = pMD->GetModule();
3110 PREFIX_ASSUME(pModule != NULL);
3111
3112 SigTypeContext typeContext(pMD);
3113 VarKind k = hasNoVars;
3114
3115 // In the vast majority of cases the code under the "if" below
3116 // will not be executed.
3117 //
3118 // First grab the representative instantiations. For code
3119 // shared by multiple generic instantiations these are the
3120 // canonical (representative) instantiation.
3121 if (TypeFromToken(typeTok) == mdtTypeSpec)
3122 {
3123 PCCOR_SIGNATURE pSig;
3124 ULONG cSig;
3125 IfFailThrow(pModule->GetMDImport()->GetTypeSpecFromToken(typeTok, &pSig, &cSig));
3126
3127 SigPointer psig(pSig, cSig);
3128 k = psig.IsPolyType(&typeContext);
3129
3130 // Grab the active class and method instantiation. This exact instantiation is only
3131 // needed in the corner case of "generic" exception catching in shared
3132 // generic code. We don't need the exact instantiation if the token
3133 // doesn't contain E_T_VAR or E_T_MVAR.
3134 if ((k & hasSharableVarsMask) != 0)
3135 {
3136 Instantiation classInst;
3137 Instantiation methodInst;
3138 pCf->GetExactGenericInstantiations(&classInst, &methodInst);
3139 SigTypeContext::InitTypeContext(pMD,classInst, methodInst,&typeContext);
3140 }
3141 }
3142
3143 typeHnd = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, typeTok, &typeContext,
3144 ClassLoader::ReturnNullIfNotFound);
3145
3146 // If the type (pModule,typeTok) was not loaded or not
3147 // restored then the exception object won't have this type, because an
3148 // object of this type has not been allocated.
3149 if (typeHnd.IsNull())
3150 return typeHnd;
3151
3152 // We can cache any exception specification except:
3153 // - If the type contains type variables in generic code,
3154 // e.g. catch E<T> where T is a type variable.
3155 // We CANNOT cache E<T> in non-shared instantiations of generic code because
3156 // there is only one EHClause cache for the IL, shared across all instantiations.
3157 //
3158 if((k & hasAnyVarsMask) == 0)
3159 {
3160 CrstHolder chWrite(&m_EHClauseCritSec);
3161
3162 // Note another thread might have beaten us to it ...
3163 if (!HasCachedTypeHandle(pEHClause))
3164 {
3165 // We should never cache a NULL typeHnd.
3166 _ASSERTE(!typeHnd.IsNull());
3167 pEHClause->TypeHandle = typeHnd.AsPtr();
3168 SetHasCachedTypeHandle(pEHClause);
3169 }
3170 else
3171 {
3172 // If we raced in here with another thread and got held up on the lock, then we just need to return the
3173 // type handle that the other thread put into the clause.
3174 // The typeHnd we found and the typeHnd the racing thread found should always be the same
3175 _ASSERTE(typeHnd.AsPtr() == pEHClause->TypeHandle);
3176 typeHnd = TypeHandle::FromPtr(pEHClause->TypeHandle);
3177 }
3178 }
3179 return typeHnd;
3180}
3181
3182void EEJitManager::RemoveJitData (CodeHeader * pCHdr, size_t GCinfo_len, size_t EHinfo_len)
3183{
3184 CONTRACTL {
3185 NOTHROW;
3186 GC_TRIGGERS;
3187 } CONTRACTL_END;
3188
3189 MethodDesc* pMD = pCHdr->GetMethodDesc();
3190
3191 if (pMD->IsLCGMethod()) {
3192
3193 void * codeStart = (pCHdr + 1);
3194
3195 {
3196 CrstHolder ch(&m_CodeHeapCritSec);
3197
3198 LCGMethodResolver * pResolver = pMD->AsDynamicMethodDesc()->GetLCGMethodResolver();
3199
3200 // Clear the pointer only if it matches what we are about to free.
3201 // There can be cases where the JIT is reentered and we JITed the method multiple times.
3202 if (pResolver->m_recordCodePointer == codeStart)
3203 pResolver->m_recordCodePointer = NULL;
3204 }
3205
3206#if defined(_TARGET_AMD64_)
3207 // Remove the unwind information (if applicable)
3208 UnwindInfoTable::UnpublishUnwindInfoForMethod((TADDR)codeStart);
3209#endif // defined(_TARGET_AMD64_)
3210
3211 HostCodeHeap* pHeap = HostCodeHeap::GetCodeHeap((TADDR)codeStart);
3212 FreeCodeMemory(pHeap, codeStart);
3213
3214 // We are leaking GCInfo and EHInfo. They will be freed once the dynamic method is destroyed.
3215
3216 return;
3217 }
3218
3219 {
3220 CrstHolder ch(&m_CodeHeapCritSec);
3221
3222 HeapList *pHp = GetCodeHeapList();
3223
3224 while (pHp && ((pHp->startAddress > (TADDR)pCHdr) ||
3225 (pHp->endAddress < (TADDR)pCHdr + sizeof(CodeHeader))))
3226 {
3227 pHp = pHp->GetNext();
3228 }
3229
3230 _ASSERTE(pHp && pHp->pHdrMap);
3231
3232 // Better to just return than AV?
3233 if (pHp == NULL)
3234 return;
3235
3236 NibbleMapSet(pHp, (TADDR)(pCHdr + 1), FALSE);
3237 }
3238
3239 // Backout the GCInfo
3240 if (GCinfo_len > 0) {
3241 GetJitMetaHeap(pMD)->BackoutMem(pCHdr->GetGCInfo(), GCinfo_len);
3242 }
3243
3244 // Backout the EHInfo
3245 BYTE *EHInfo = (BYTE *)pCHdr->GetEHInfo();
3246 if (EHInfo) {
3247 EHInfo -= sizeof(size_t);
3248
3249 _ASSERTE(EHinfo_len>0);
3250 GetJitMetaHeap(pMD)->BackoutMem(EHInfo, EHinfo_len);
3251 }
3252
3253 // <TODO>
3254 // TODO: Although we have backout the GCInfo and EHInfo, we haven't actually backout the
3255 // code buffer itself. As a result, we might leak the CodeHeap if jitting fails after
3256 // the code buffer is allocated.
3257 //
3258 // However, it appears non-trival to fix this.
3259 // Here are some of the reasons:
3260 // (1) AllocCode calls in AllocCodeRaw to alloc code buffer in the CodeHeap. The exact size
3261 // of the code buffer is not known until the alignment is calculated deep on the stack.
3262 // (2) AllocCodeRaw is called in 3 different places. We might need to remember the
3263 // information for these places.
3264 // (3) AllocCodeRaw might create a new CodeHeap. We should remember exactly which
3265 // CodeHeap is used to allocate the code buffer.
3266 //
3267 // Fortunately, this is not a severe leak since the CodeHeap will be reclaimed on appdomain unload.
3268 //
3269 // </TODO>
3270 return;
3271}
3272
3273// appdomain is being unloaded, so delete any data associated with it. We have to do this in two stages.
3274// On the first stage, we remove the elements from the list. On the second stage, which occurs after a GC
3275// we know that only threads who were in preemptive mode prior to the GC could possibly still be looking
3276// at an element that is about to be deleted. All such threads are guarded with a reader count, so if the
3277// count is 0, we can safely delete, otherwise we must add to the cleanup list to be deleted later. We know
3278// there can only be one unload at a time, so we can use a single var to hold the unlinked, but not deleted,
3279// elements.
3280void EEJitManager::Unload(LoaderAllocator *pAllocator)
3281{
3282 CONTRACTL {
3283 NOTHROW;
3284 GC_NOTRIGGER;
3285 } CONTRACTL_END;
3286
3287 CrstHolder ch(&m_CodeHeapCritSec);
3288
3289 DomainCodeHeapList **ppList = m_DomainCodeHeaps.Table();
3290 int count = m_DomainCodeHeaps.Count();
3291
3292 for (int i=0; i < count; i++) {
3293 if (ppList[i]->m_pAllocator== pAllocator) {
3294 DomainCodeHeapList *pList = ppList[i];
3295 m_DomainCodeHeaps.DeleteByIndex(i);
3296
3297 // pHeapList is allocated in pHeap, so only need to delete the LoaderHeap itself
3298 count = pList->m_CodeHeapList.Count();
3299 for (i=0; i < count; i++) {
3300 HeapList *pHeapList = pList->m_CodeHeapList[i];
3301 DeleteCodeHeap(pHeapList);
3302 }
3303
3304 // this is ok to do delete as anyone accessing the DomainCodeHeapList structure holds the critical section.
3305 delete pList;
3306
3307 break;
3308 }
3309 }
3310 ppList = m_DynamicDomainCodeHeaps.Table();
3311 count = m_DynamicDomainCodeHeaps.Count();
3312 for (int i=0; i < count; i++) {
3313 if (ppList[i]->m_pAllocator== pAllocator) {
3314 DomainCodeHeapList *pList = ppList[i];
3315 m_DynamicDomainCodeHeaps.DeleteByIndex(i);
3316
3317 // pHeapList is allocated in pHeap, so only need to delete the CodeHeap itself
3318 count = pList->m_CodeHeapList.Count();
3319 for (i=0; i < count; i++) {
3320 HeapList *pHeapList = pList->m_CodeHeapList[i];
3321 // m_DynamicDomainCodeHeaps should only contain HostCodeHeap.
3322 RemoveFromCleanupList(static_cast<HostCodeHeap*>(pHeapList->pHeap));
3323 DeleteCodeHeap(pHeapList);
3324 }
3325
3326 // this is ok to do delete as anyone accessing the DomainCodeHeapList structure holds the critical section.
3327 delete pList;
3328
3329 break;
3330 }
3331 }
3332
3333 ResetCodeAllocHint();
3334}
3335
3336EEJitManager::DomainCodeHeapList::DomainCodeHeapList()
3337{
3338 LIMITED_METHOD_CONTRACT;
3339 m_pAllocator = NULL;
3340}
3341
3342EEJitManager::DomainCodeHeapList::~DomainCodeHeapList()
3343{
3344 LIMITED_METHOD_CONTRACT;
3345}
3346
3347void EEJitManager::RemoveCodeHeapFromDomainList(CodeHeap *pHeap, LoaderAllocator *pAllocator)
3348{
3349 CONTRACTL {
3350 NOTHROW;
3351 GC_NOTRIGGER;
3352 PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
3353 } CONTRACTL_END;
3354
3355 // get the AppDomain heap list for pAllocator in m_DynamicDomainCodeHeaps
3356 DomainCodeHeapList *pList = GetCodeHeapList(NULL, pAllocator, TRUE);
3357
3358 // go through the heaps and find and remove pHeap
3359 int count = pList->m_CodeHeapList.Count();
3360 for (int i = 0; i < count; i++) {
3361 HeapList *pHeapList = pList->m_CodeHeapList[i];
3362 if (pHeapList->pHeap == pHeap) {
3363 // found the heap to remove. If this is the only heap we remove the whole DomainCodeHeapList
3364 // otherwise we just remove this heap
3365 if (count == 1) {
3366 m_DynamicDomainCodeHeaps.Delete(pList);
3367 delete pList;
3368 }
3369 else
3370 pList->m_CodeHeapList.Delete(i);
3371
3372 // if this heaplist is cached in the loader allocator, we must clear it
3373 if (pAllocator->m_pLastUsedDynamicCodeHeap == ((void *) pHeapList))
3374 {
3375 pAllocator->m_pLastUsedDynamicCodeHeap = NULL;
3376 }
3377
3378 break;
3379 }
3380 }
3381}
3382
3383void EEJitManager::FreeCodeMemory(HostCodeHeap *pCodeHeap, void * codeStart)
3384{
3385 CONTRACTL
3386 {
3387 NOTHROW;
3388 GC_NOTRIGGER;
3389 }
3390 CONTRACTL_END;
3391
3392 CrstHolder ch(&m_CodeHeapCritSec);
3393
3394 // FreeCodeMemory is only supported on LCG methods,
3395 // so pCodeHeap can only be a HostCodeHeap.
3396
3397 // clean up the NibbleMap
3398 NibbleMapSet(pCodeHeap->m_pHeapList, (TADDR)codeStart, FALSE);
3399
3400 // The caller of this method doesn't call HostCodeHeap->FreeMemForCode
3401 // directly because the operation should be protected by m_CodeHeapCritSec.
3402 pCodeHeap->FreeMemForCode(codeStart);
3403}
3404
3405void ExecutionManager::CleanupCodeHeaps()
3406{
3407 CONTRACTL
3408 {
3409 NOTHROW;
3410 GC_NOTRIGGER;
3411 }
3412 CONTRACTL_END;
3413
3414 _ASSERTE (g_fProcessDetach || (GCHeapUtilities::IsGCInProgress() && ::IsGCThread()));
3415
3416 GetEEJitManager()->CleanupCodeHeaps();
3417}
3418
3419void EEJitManager::CleanupCodeHeaps()
3420{
3421 CONTRACTL
3422 {
3423 NOTHROW;
3424 GC_NOTRIGGER;
3425 }
3426 CONTRACTL_END;
3427
3428 _ASSERTE (g_fProcessDetach || (GCHeapUtilities::IsGCInProgress() && ::IsGCThread()));
3429
3430 // Quick out, don't even take the lock if we have not cleanup to do.
3431 // This is important because ETW takes the CodeHeapLock when it is doing
3432 // rundown, and if there are many JIT compiled methods, this can take a while.
3433 // Because cleanup is called synchronously before a GC, this means GCs get
3434 // blocked while ETW is doing rundown. By not taking the lock we avoid
3435 // this stall most of the time since cleanup is rare, and ETW rundown is rare
3436 // the likelihood of both is very very rare.
3437 if (m_cleanupList == NULL)
3438 return;
3439
3440 CrstHolder ch(&m_CodeHeapCritSec);
3441
3442 if (m_cleanupList == NULL)
3443 return;
3444
3445 HostCodeHeap *pHeap = m_cleanupList;
3446 m_cleanupList = NULL;
3447
3448 while (pHeap)
3449 {
3450 HostCodeHeap *pNextHeap = pHeap->m_pNextHeapToRelease;
3451
3452 DWORD allocCount = pHeap->m_AllocationCount;
3453 if (allocCount == 0)
3454 {
3455 LOG((LF_BCL, LL_INFO100, "Level2 - Destryoing CodeHeap [0x%p, vt(0x%x)] - ref count 0\n", pHeap, *(size_t*)pHeap));
3456 RemoveCodeHeapFromDomainList(pHeap, pHeap->m_pAllocator);
3457 DeleteCodeHeap(pHeap->m_pHeapList);
3458 }
3459 else
3460 {
3461 LOG((LF_BCL, LL_INFO100, "Level2 - Restoring CodeHeap [0x%p, vt(0x%x)] - ref count %d\n", pHeap, *(size_t*)pHeap, allocCount));
3462 }
3463 pHeap = pNextHeap;
3464 }
3465}
3466
3467void EEJitManager::RemoveFromCleanupList(HostCodeHeap *pCodeHeap)
3468{
3469 CONTRACTL {
3470 NOTHROW;
3471 GC_NOTRIGGER;
3472 PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
3473 } CONTRACTL_END;
3474
3475 HostCodeHeap *pHeap = m_cleanupList;
3476 HostCodeHeap *pPrevHeap = NULL;
3477 while (pHeap)
3478 {
3479 if (pHeap == pCodeHeap)
3480 {
3481 if (pPrevHeap)
3482 {
3483 // remove current heap from list
3484 pPrevHeap->m_pNextHeapToRelease = pHeap->m_pNextHeapToRelease;
3485 }
3486 else
3487 {
3488 m_cleanupList = pHeap->m_pNextHeapToRelease;
3489 }
3490 break;
3491 }
3492 pPrevHeap = pHeap;
3493 pHeap = pHeap->m_pNextHeapToRelease;
3494 }
3495}
3496
3497void EEJitManager::AddToCleanupList(HostCodeHeap *pCodeHeap)
3498{
3499 CONTRACTL {
3500 NOTHROW;
3501 GC_NOTRIGGER;
3502 PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
3503 } CONTRACTL_END;
3504
3505 // it may happen that the current heap count goes to 0 and later on, before it is destroyed, it gets reused
3506 // for another dynamic method.
3507 // It's then possible that the ref count reaches 0 multiple times. If so we simply don't add it again
3508 // Also on cleanup we check the the ref count is actually 0.
3509 HostCodeHeap *pHeap = m_cleanupList;
3510 while (pHeap)
3511 {
3512 if (pHeap == pCodeHeap)
3513 {
3514 LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p, vt(0x%x)] - Already in list\n", pCodeHeap, *(size_t*)pCodeHeap));
3515 break;
3516 }
3517 pHeap = pHeap->m_pNextHeapToRelease;
3518 }
3519 if (pHeap == NULL)
3520 {
3521 pCodeHeap->m_pNextHeapToRelease = m_cleanupList;
3522 m_cleanupList = pCodeHeap;
3523 LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p, vt(0x%x)] - ref count %d - Adding to cleanup list\n", pCodeHeap, *(size_t*)pCodeHeap, pCodeHeap->m_AllocationCount));
3524 }
3525}
3526
3527void EEJitManager::DeleteCodeHeap(HeapList *pHeapList)
3528{
3529 CONTRACTL {
3530 NOTHROW;
3531 GC_NOTRIGGER;
3532 PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
3533 } CONTRACTL_END;
3534
3535 HeapList *pHp = GetCodeHeapList();
3536 if (pHp == pHeapList)
3537 m_pCodeHeap = pHp->GetNext();
3538 else
3539 {
3540 HeapList *pHpNext = pHp->GetNext();
3541
3542 while (pHpNext != pHeapList)
3543 {
3544 pHp = pHpNext;
3545 _ASSERTE(pHp != NULL); // should always find the HeapList
3546 pHpNext = pHp->GetNext();
3547 }
3548 pHp->SetNext(pHeapList->GetNext());
3549 }
3550
3551 DeleteEEFunctionTable((PVOID)pHeapList);
3552
3553 ExecutionManager::DeleteRange((TADDR)pHeapList);
3554
3555 LOG((LF_JIT, LL_INFO100, "DeleteCodeHeap start" FMT_ADDR "end" FMT_ADDR "\n",
3556 (const BYTE*)pHeapList->startAddress,
3557 (const BYTE*)pHeapList->endAddress ));
3558
3559 // pHeapList is allocated in pHeap, so only need to delete the CodeHeap itself
3560 // !!! For SoC, compiler inserts code to write a special cookie at pHeapList->pHeap after delete operator, at least for debug code.
3561 // !!! Since pHeapList is deleted at the same time as pHeap, this causes AV.
3562 // delete pHeapList->pHeap;
3563 CodeHeap* pHeap = pHeapList->pHeap;
3564 delete pHeap;
3565}
3566
3567#endif // #ifndef DACCESS_COMPILE
3568
3569static CodeHeader * GetCodeHeaderFromDebugInfoRequest(const DebugInfoRequest & request)
3570{
3571 CONTRACTL {
3572 NOTHROW;
3573 GC_NOTRIGGER;
3574 SUPPORTS_DAC;
3575 } CONTRACTL_END;
3576
3577 TADDR address = (TADDR) request.GetStartAddress();
3578 _ASSERTE(address != NULL);
3579
3580 CodeHeader * pHeader = dac_cast<PTR_CodeHeader>(address & ~3) - 1;
3581 _ASSERTE(pHeader != NULL);
3582
3583 return pHeader;
3584}
3585
3586//-----------------------------------------------------------------------------
3587// Get vars from Jit Store
3588//-----------------------------------------------------------------------------
3589BOOL EEJitManager::GetBoundariesAndVars(
3590 const DebugInfoRequest & request,
3591 IN FP_IDS_NEW fpNew, IN void * pNewData,
3592 OUT ULONG32 * pcMap,
3593 OUT ICorDebugInfo::OffsetMapping **ppMap,
3594 OUT ULONG32 * pcVars,
3595 OUT ICorDebugInfo::NativeVarInfo **ppVars)
3596{
3597 CONTRACTL {
3598 THROWS; // on OOM.
3599 GC_NOTRIGGER; // getting vars shouldn't trigger
3600 SUPPORTS_DAC;
3601 } CONTRACTL_END;
3602
3603 CodeHeader * pHdr = GetCodeHeaderFromDebugInfoRequest(request);
3604 _ASSERTE(pHdr != NULL);
3605
3606 PTR_BYTE pDebugInfo = pHdr->GetDebugInfo();
3607
3608 // No header created, which means no jit information is available.
3609 if (pDebugInfo == NULL)
3610 return FALSE;
3611
3612 // Uncompress. This allocates memory and may throw.
3613 CompressDebugInfo::RestoreBoundariesAndVars(
3614 fpNew, pNewData, // allocators
3615 pDebugInfo, // input
3616 pcMap, ppMap,
3617 pcVars, ppVars); // output
3618
3619 return TRUE;
3620}
3621
3622#ifdef DACCESS_COMPILE
3623void CodeHeader::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, IJitManager* pJitMan)
3624{
3625 CONTRACTL
3626 {
3627 NOTHROW;
3628 GC_NOTRIGGER;
3629 SUPPORTS_DAC;
3630 }
3631 CONTRACTL_END;
3632
3633 DAC_ENUM_DTHIS();
3634
3635#ifdef USE_INDIRECT_CODEHEADER
3636 this->pRealCodeHeader.EnumMem();
3637#endif // USE_INDIRECT_CODEHEADER
3638
3639 if (this->GetDebugInfo() != NULL)
3640 {
3641 CompressDebugInfo::EnumMemoryRegions(flags, this->GetDebugInfo());
3642 }
3643}
3644
3645//-----------------------------------------------------------------------------
3646// Enumerate for minidumps.
3647//-----------------------------------------------------------------------------
3648void EEJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD)
3649{
3650 CONTRACTL
3651 {
3652 NOTHROW;
3653 GC_NOTRIGGER;
3654 SUPPORTS_DAC;
3655 }
3656 CONTRACTL_END;
3657
3658 DebugInfoRequest request;
3659 PCODE addrCode = pMD->GetNativeCode();
3660 request.InitFromStartingAddr(pMD, addrCode);
3661
3662 CodeHeader * pHeader = GetCodeHeaderFromDebugInfoRequest(request);
3663
3664 pHeader->EnumMemoryRegions(flags, NULL);
3665}
3666#endif // DACCESS_COMPILE
3667
3668PCODE EEJitManager::GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset)
3669{
3670 WRAPPER_NO_CONTRACT;
3671
3672 CodeHeader * pHeader = GetCodeHeader(MethodToken);
3673 return pHeader->GetCodeStartAddress() + relOffset;
3674}
3675
3676BOOL EEJitManager::JitCodeToMethodInfo(
3677 RangeSection * pRangeSection,
3678 PCODE currentPC,
3679 MethodDesc ** ppMethodDesc,
3680 EECodeInfo * pCodeInfo)
3681{
3682 CONTRACTL {
3683 NOTHROW;
3684 GC_NOTRIGGER;
3685 SO_TOLERANT;
3686 SUPPORTS_DAC;
3687 } CONTRACTL_END;
3688
3689 _ASSERTE(pRangeSection != NULL);
3690
3691 TADDR start = dac_cast<PTR_EEJitManager>(pRangeSection->pjit)->FindMethodCode(pRangeSection, currentPC);
3692 if (start == NULL)
3693 return FALSE;
3694
3695 CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader));
3696 if (pCHdr->IsStubCodeBlock())
3697 return FALSE;
3698
3699 _ASSERTE(pCHdr->GetMethodDesc()->SanityCheck());
3700
3701 if (pCodeInfo)
3702 {
3703 pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(pCHdr));
3704
3705 // This can be counted on for Jitted code. For NGEN code in the case
3706 // where we have hot/cold splitting this isn't valid and we need to
3707 // take into account cold code.
3708 pCodeInfo->m_relOffset = (DWORD)(PCODEToPINSTR(currentPC) - pCHdr->GetCodeStartAddress());
3709
3710#ifdef WIN64EXCEPTIONS
3711 // Computed lazily by code:EEJitManager::LazyGetFunctionEntry
3712 pCodeInfo->m_pFunctionEntry = NULL;
3713#endif
3714 }
3715
3716 if (ppMethodDesc)
3717 {
3718 *ppMethodDesc = pCHdr->GetMethodDesc();
3719 }
3720 return TRUE;
3721}
3722
3723StubCodeBlockKind EEJitManager::GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC)
3724{
3725 CONTRACTL {
3726 NOTHROW;
3727 GC_NOTRIGGER;
3728 SO_TOLERANT;
3729 SUPPORTS_DAC;
3730 } CONTRACTL_END;
3731
3732 TADDR start = dac_cast<PTR_EEJitManager>(pRangeSection->pjit)->FindMethodCode(pRangeSection, currentPC);
3733 if (start == NULL)
3734 return STUB_CODE_BLOCK_NOCODE;
3735 CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader));
3736 return pCHdr->IsStubCodeBlock() ? pCHdr->GetStubCodeBlockKind() : STUB_CODE_BLOCK_MANAGED;
3737}
3738
3739TADDR EEJitManager::FindMethodCode(PCODE currentPC)
3740{
3741 CONTRACTL {
3742 NOTHROW;
3743 GC_NOTRIGGER;
3744 SO_TOLERANT;
3745 SUPPORTS_DAC;
3746 } CONTRACTL_END;
3747
3748 RangeSection * pRS = ExecutionManager::FindCodeRange(currentPC, ExecutionManager::GetScanFlags());
3749 if (pRS == NULL || (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) == 0)
3750 return STUB_CODE_BLOCK_NOCODE;
3751 return dac_cast<PTR_EEJitManager>(pRS->pjit)->FindMethodCode(pRS, currentPC);
3752}
3753
3754// Finds the header corresponding to the code at offset "delta".
3755// Returns NULL if there is no header for the given "delta"
3756
3757TADDR EEJitManager::FindMethodCode(RangeSection * pRangeSection, PCODE currentPC)
3758{
3759 LIMITED_METHOD_DAC_CONTRACT;
3760
3761 _ASSERTE(pRangeSection != NULL);
3762
3763 HeapList *pHp = dac_cast<PTR_HeapList>(pRangeSection->pHeapListOrZapModule);
3764
3765 if ((currentPC < pHp->startAddress) ||
3766 (currentPC > pHp->endAddress))
3767 {
3768 return NULL;
3769 }
3770
3771 TADDR base = pHp->mapBase;
3772 TADDR delta = currentPC - base;
3773 PTR_DWORD pMap = pHp->pHdrMap;
3774 PTR_DWORD pMapStart = pMap;
3775
3776 DWORD tmp;
3777
3778 size_t startPos = ADDR2POS(delta); // align to 32byte buckets
3779 // ( == index into the array of nibbles)
3780 DWORD offset = ADDR2OFFS(delta); // this is the offset inside the bucket + 1
3781
3782 _ASSERTE(offset == (offset & NIBBLE_MASK));
3783
3784 pMap += (startPos >> LOG2_NIBBLES_PER_DWORD); // points to the proper DWORD of the map
3785
3786 // get DWORD and shift down our nibble
3787
3788 PREFIX_ASSUME(pMap != NULL);
3789 tmp = VolatileLoadWithoutBarrier<DWORD>(pMap) >> POS2SHIFTCOUNT(startPos);
3790
3791 if ((tmp & NIBBLE_MASK) && ((tmp & NIBBLE_MASK) <= offset) )
3792 {
3793 return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK);
3794 }
3795
3796 // Is there a header in the remainder of the DWORD ?
3797 tmp = tmp >> NIBBLE_SIZE;
3798
3799 if (tmp)
3800 {
3801 startPos--;
3802 while (!(tmp & NIBBLE_MASK))
3803 {
3804 tmp = tmp >> NIBBLE_SIZE;
3805 startPos--;
3806 }
3807 return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK);
3808 }
3809
3810 // We skipped the remainder of the DWORD,
3811 // so we must set startPos to the highest position of
3812 // previous DWORD, unless we are already on the first DWORD
3813
3814 if (startPos < NIBBLES_PER_DWORD)
3815 return NULL;
3816
3817 startPos = ((startPos >> LOG2_NIBBLES_PER_DWORD) << LOG2_NIBBLES_PER_DWORD) - 1;
3818
3819 // Skip "headerless" DWORDS
3820
3821 while (pMapStart < pMap && 0 == (tmp = VolatileLoadWithoutBarrier<DWORD>(--pMap)))
3822 {
3823 startPos -= NIBBLES_PER_DWORD;
3824 }
3825
3826 // This helps to catch degenerate error cases. This relies on the fact that
3827 // startPos cannot ever be bigger than MAX_UINT
3828 if (((INT_PTR)startPos) < 0)
3829 return NULL;
3830
3831 // Find the nibble with the header in the DWORD
3832
3833 while (startPos && !(tmp & NIBBLE_MASK))
3834 {
3835 tmp = tmp >> NIBBLE_SIZE;
3836 startPos--;
3837 }
3838
3839 if (startPos == 0 && tmp == 0)
3840 return NULL;
3841
3842 return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK);
3843}
3844
3845#if !defined(DACCESS_COMPILE)
3846void EEJitManager::NibbleMapSet(HeapList * pHp, TADDR pCode, BOOL bSet)
3847{
3848 CONTRACTL {
3849 NOTHROW;
3850 GC_NOTRIGGER;
3851 } CONTRACTL_END;
3852
3853 // Currently all callers to this method ensure EEJitManager::m_CodeHeapCritSec
3854 // is held.
3855 _ASSERTE(m_CodeHeapCritSec.OwnedByCurrentThread());
3856
3857 _ASSERTE(pCode >= pHp->mapBase);
3858
3859 size_t delta = pCode - pHp->mapBase;
3860
3861 size_t pos = ADDR2POS(delta);
3862 DWORD value = bSet?ADDR2OFFS(delta):0;
3863
3864 DWORD index = (DWORD) (pos >> LOG2_NIBBLES_PER_DWORD);
3865 DWORD mask = ~((DWORD) HIGHEST_NIBBLE_MASK >> ((pos & NIBBLES_PER_DWORD_MASK) << LOG2_NIBBLE_SIZE));
3866
3867 value = value << POS2SHIFTCOUNT(pos);
3868
3869 PTR_DWORD pMap = pHp->pHdrMap;
3870
3871 // assert that we don't overwrite an existing offset
3872 // (it's a reset or it is empty)
3873 _ASSERTE(!value || !((*(pMap+index))& ~mask));
3874
3875 // It is important for this update to be atomic. Synchronization would be required with FindMethodCode otherwise.
3876 *(pMap+index) = ((*(pMap+index))&mask)|value;
3877}
3878#endif // !DACCESS_COMPILE
3879
3880#if defined(WIN64EXCEPTIONS)
3881PTR_RUNTIME_FUNCTION EEJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo)
3882{
3883 CONTRACTL {
3884 NOTHROW;
3885 GC_NOTRIGGER;
3886 SO_TOLERANT;
3887 SUPPORTS_DAC;
3888 } CONTRACTL_END;
3889
3890 if (!pCodeInfo->IsValid())
3891 {
3892 return NULL;
3893 }
3894
3895 CodeHeader * pHeader = GetCodeHeader(pCodeInfo->GetMethodToken());
3896
3897 DWORD address = RUNTIME_FUNCTION__BeginAddress(pHeader->GetUnwindInfo(0)) + pCodeInfo->GetRelOffset();
3898
3899 // We need the module base address to calculate the end address of a function from the functionEntry.
3900 // Thus, save it off right now.
3901 TADDR baseAddress = pCodeInfo->GetModuleBase();
3902
3903 // NOTE: We could binary search here, if it would be helpful (e.g., large number of funclets)
3904 for (UINT iUnwindInfo = 0; iUnwindInfo < pHeader->GetNumberOfUnwindInfos(); iUnwindInfo++)
3905 {
3906 PTR_RUNTIME_FUNCTION pFunctionEntry = pHeader->GetUnwindInfo(iUnwindInfo);
3907
3908 if (RUNTIME_FUNCTION__BeginAddress(pFunctionEntry) <= address && address < RUNTIME_FUNCTION__EndAddress(pFunctionEntry, baseAddress))
3909 {
3910 return pFunctionEntry;
3911 }
3912 }
3913
3914 return NULL;
3915}
3916
3917DWORD EEJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength)
3918{
3919 CONTRACTL
3920 {
3921 NOTHROW;
3922 GC_NOTRIGGER;
3923 }
3924 CONTRACTL_END;
3925
3926 CodeHeader * pCH = GetCodeHeader(MethodToken);
3927 TADDR moduleBase = JitTokenToModuleBase(MethodToken);
3928
3929 _ASSERTE(pCH->GetNumberOfUnwindInfos() >= 1);
3930
3931 DWORD parentBeginRva = RUNTIME_FUNCTION__BeginAddress(pCH->GetUnwindInfo(0));
3932
3933 DWORD nFunclets = 0;
3934 for (COUNT_T iUnwindInfo = 1; iUnwindInfo < pCH->GetNumberOfUnwindInfos(); iUnwindInfo++)
3935 {
3936 PTR_RUNTIME_FUNCTION pFunctionEntry = pCH->GetUnwindInfo(iUnwindInfo);
3937
3938#if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS)
3939 if (IsFunctionFragment(moduleBase, pFunctionEntry))
3940 {
3941 // This is a fragment (not the funclet beginning); skip it
3942 continue;
3943 }
3944#endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
3945
3946 DWORD funcletBeginRva = RUNTIME_FUNCTION__BeginAddress(pFunctionEntry);
3947 DWORD relParentOffsetToFunclet = funcletBeginRva - parentBeginRva;
3948
3949 if (nFunclets < dwLength)
3950 pStartFuncletOffsets[nFunclets] = relParentOffsetToFunclet;
3951 nFunclets++;
3952 }
3953
3954 return nFunclets;
3955}
3956
3957#if defined(DACCESS_COMPILE)
3958// This function is basically like RtlLookupFunctionEntry(), except that it works with DAC
3959// to read the function entries out of process. Also, it can only look up function entries
3960// inside mscorwks.dll, since DAC doesn't know anything about other unmanaged dll's.
3961void GetUnmanagedStackWalkInfo(IN ULONG64 ControlPc,
3962 OUT UINT_PTR* pModuleBase,
3963 OUT UINT_PTR* pFuncEntry)
3964{
3965 WRAPPER_NO_CONTRACT;
3966
3967 if (pModuleBase)
3968 {
3969 *pModuleBase = NULL;
3970 }
3971
3972 if (pFuncEntry)
3973 {
3974 *pFuncEntry = NULL;
3975 }
3976
3977 PEDecoder peDecoder(DacGlobalBase());
3978
3979 SIZE_T baseAddr = dac_cast<TADDR>(peDecoder.GetBase());
3980 SIZE_T cbSize = (SIZE_T)peDecoder.GetVirtualSize();
3981
3982 // Check if the control PC is inside mscorwks.
3983 if ( (baseAddr <= ControlPc) &&
3984 (ControlPc < (baseAddr + cbSize))
3985 )
3986 {
3987 if (pModuleBase)
3988 {
3989 *pModuleBase = baseAddr;
3990 }
3991
3992 if (pFuncEntry)
3993 {
3994 // Check if there is a static function table.
3995 COUNT_T cbSize = 0;
3996 TADDR pExceptionDir = peDecoder.GetDirectoryEntryData(IMAGE_DIRECTORY_ENTRY_EXCEPTION, &cbSize);
3997
3998 if (pExceptionDir != NULL)
3999 {
4000 // Do a binary search on the static function table of mscorwks.dll.
4001 HRESULT hr = E_FAIL;
4002 TADDR taFuncEntry;
4003 T_RUNTIME_FUNCTION functionEntry;
4004
4005 DWORD dwLow = 0;
4006 DWORD dwHigh = cbSize / sizeof(T_RUNTIME_FUNCTION);
4007 DWORD dwMid = 0;
4008
4009 while (dwLow <= dwHigh)
4010 {
4011 dwMid = (dwLow + dwHigh) >> 1;
4012 taFuncEntry = pExceptionDir + dwMid * sizeof(T_RUNTIME_FUNCTION);
4013 hr = DacReadAll(taFuncEntry, &functionEntry, sizeof(functionEntry), false);
4014 if (FAILED(hr))
4015 {
4016 return;
4017 }
4018
4019 if (ControlPc < baseAddr + functionEntry.BeginAddress)
4020 {
4021 dwHigh = dwMid - 1;
4022 }
4023 else if (ControlPc >= baseAddr + RUNTIME_FUNCTION__EndAddress(&functionEntry, baseAddr))
4024 {
4025 dwLow = dwMid + 1;
4026 }
4027 else
4028 {
4029 _ASSERTE(pFuncEntry);
4030 *pFuncEntry = (UINT_PTR)(T_RUNTIME_FUNCTION*)PTR_RUNTIME_FUNCTION(taFuncEntry);
4031 break;
4032 }
4033 }
4034
4035 if (dwLow > dwHigh)
4036 {
4037 _ASSERTE(*pFuncEntry == NULL);
4038 }
4039 }
4040 }
4041 }
4042}
4043#endif // DACCESS_COMPILE
4044
4045extern "C" void GetRuntimeStackWalkInfo(IN ULONG64 ControlPc,
4046 OUT UINT_PTR* pModuleBase,
4047 OUT UINT_PTR* pFuncEntry)
4048{
4049
4050 WRAPPER_NO_CONTRACT;
4051
4052 BEGIN_PRESERVE_LAST_ERROR;
4053
4054 BEGIN_ENTRYPOINT_VOIDRET;
4055
4056 if (pModuleBase)
4057 *pModuleBase = NULL;
4058 if (pFuncEntry)
4059 *pFuncEntry = NULL;
4060
4061 EECodeInfo codeInfo((PCODE)ControlPc);
4062 if (!codeInfo.IsValid())
4063 {
4064#if defined(DACCESS_COMPILE)
4065 GetUnmanagedStackWalkInfo(ControlPc, pModuleBase, pFuncEntry);
4066#endif // DACCESS_COMPILE
4067 goto Exit;
4068 }
4069
4070 if (pModuleBase)
4071 {
4072 *pModuleBase = (UINT_PTR)codeInfo.GetModuleBase();
4073 }
4074
4075 if (pFuncEntry)
4076 {
4077 *pFuncEntry = (UINT_PTR)(PT_RUNTIME_FUNCTION)codeInfo.GetFunctionEntry();
4078 }
4079
4080Exit:
4081 END_ENTRYPOINT_VOIDRET;
4082
4083 END_PRESERVE_LAST_ERROR;
4084}
4085#endif // WIN64EXCEPTIONS
4086
4087#ifdef DACCESS_COMPILE
4088
4089void EEJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
4090{
4091 IJitManager::EnumMemoryRegions(flags);
4092
4093 //
4094 // Save all of the code heaps.
4095 //
4096
4097 HeapList* heap;
4098
4099 for (heap = m_pCodeHeap; heap; heap = heap->GetNext())
4100 {
4101 DacEnumHostDPtrMem(heap);
4102
4103 if (heap->pHeap.IsValid())
4104 {
4105 heap->pHeap->EnumMemoryRegions(flags);
4106 }
4107
4108 DacEnumMemoryRegion(heap->startAddress, (ULONG32)
4109 (heap->endAddress - heap->startAddress));
4110
4111 if (heap->pHdrMap.IsValid())
4112 {
4113 ULONG32 nibbleMapSize = (ULONG32)
4114 HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heap->maxCodeHeapSize));
4115 DacEnumMemoryRegion(dac_cast<TADDR>(heap->pHdrMap), nibbleMapSize);
4116 }
4117 }
4118}
4119#endif // #ifdef DACCESS_COMPILE
4120
4121#else // CROSSGEN_COMPILE
4122// stub for compilation
4123BOOL EEJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection,
4124 PCODE currentPC,
4125 MethodDesc ** ppMethodDesc,
4126 EECodeInfo * pCodeInfo)
4127{
4128 _ASSERTE(FALSE);
4129 return FALSE;
4130}
4131#endif // !CROSSGEN_COMPILE
4132
4133
4134#ifndef DACCESS_COMPILE
4135
4136//*******************************************************
4137// Execution Manager
4138//*******************************************************
4139
4140// Init statics
4141void ExecutionManager::Init()
4142{
4143 CONTRACTL {
4144 THROWS;
4145 GC_NOTRIGGER;
4146 } CONTRACTL_END;
4147
4148 m_JumpStubCrst.Init(CrstJumpStubCache, CrstFlags(CRST_UNSAFE_ANYMODE|CRST_DEBUGGER_THREAD));
4149
4150 m_RangeCrst.Init(CrstExecuteManRangeLock, CRST_UNSAFE_ANYMODE);
4151
4152 m_pDefaultCodeMan = new EECodeManager();
4153
4154#ifndef CROSSGEN_COMPILE
4155 m_pEEJitManager = new EEJitManager();
4156#endif
4157#ifdef FEATURE_PREJIT
4158 m_pNativeImageJitManager = new NativeImageJitManager();
4159#endif
4160
4161#ifdef FEATURE_READYTORUN
4162 m_pReadyToRunJitManager = new ReadyToRunJitManager();
4163#endif
4164}
4165
4166#endif // #ifndef DACCESS_COMPILE
4167
4168//**************************************************************************
4169RangeSection *
4170ExecutionManager::FindCodeRange(PCODE currentPC, ScanFlag scanFlag)
4171{
4172 CONTRACTL {
4173 NOTHROW;
4174 GC_NOTRIGGER;
4175 SO_TOLERANT;
4176 SUPPORTS_DAC;
4177 } CONTRACTL_END;
4178
4179 if (currentPC == NULL)
4180 return NULL;
4181
4182 if (scanFlag == ScanReaderLock)
4183 return FindCodeRangeWithLock(currentPC);
4184
4185 return GetRangeSection(currentPC);
4186}
4187
4188//**************************************************************************
4189NOINLINE // Make sure that the slow path with lock won't affect the fast path
4190RangeSection *
4191ExecutionManager::FindCodeRangeWithLock(PCODE currentPC)
4192{
4193 CONTRACTL {
4194 NOTHROW;
4195 GC_NOTRIGGER;
4196 SO_TOLERANT;
4197 SUPPORTS_DAC;
4198 } CONTRACTL_END;
4199
4200 ReaderLockHolder rlh;
4201 return GetRangeSection(currentPC);
4202}
4203
4204
4205//**************************************************************************
4206PCODE ExecutionManager::GetCodeStartAddress(PCODE currentPC)
4207{
4208 WRAPPER_NO_CONTRACT;
4209 _ASSERTE(currentPC != NULL);
4210
4211 EECodeInfo codeInfo(currentPC);
4212 if (!codeInfo.IsValid())
4213 return NULL;
4214 return PINSTRToPCODE(codeInfo.GetStartAddress());
4215}
4216
4217//**************************************************************************
4218MethodDesc * ExecutionManager::GetCodeMethodDesc(PCODE currentPC)
4219{
4220 CONTRACTL
4221 {
4222 NOTHROW;
4223 GC_NOTRIGGER;
4224 FORBID_FAULT;
4225 SO_TOLERANT;
4226 }
4227 CONTRACTL_END
4228
4229 EECodeInfo codeInfo(currentPC);
4230 if (!codeInfo.IsValid())
4231 return NULL;
4232 return codeInfo.GetMethodDesc();
4233}
4234
4235//**************************************************************************
4236BOOL ExecutionManager::IsManagedCode(PCODE currentPC)
4237{
4238 CONTRACTL {
4239 NOTHROW;
4240 GC_NOTRIGGER;
4241 SO_TOLERANT;
4242 } CONTRACTL_END;
4243
4244 if (currentPC == NULL)
4245 return FALSE;
4246
4247 if (GetScanFlags() == ScanReaderLock)
4248 return IsManagedCodeWithLock(currentPC);
4249
4250 return IsManagedCodeWorker(currentPC);
4251}
4252
4253//**************************************************************************
4254NOINLINE // Make sure that the slow path with lock won't affect the fast path
4255BOOL ExecutionManager::IsManagedCodeWithLock(PCODE currentPC)
4256{
4257 CONTRACTL {
4258 NOTHROW;
4259 GC_NOTRIGGER;
4260 SO_TOLERANT;
4261 } CONTRACTL_END;
4262
4263 ReaderLockHolder rlh;
4264 return IsManagedCodeWorker(currentPC);
4265}
4266
4267//**************************************************************************
4268BOOL ExecutionManager::IsManagedCode(PCODE currentPC, HostCallPreference hostCallPreference /*=AllowHostCalls*/, BOOL *pfFailedReaderLock /*=NULL*/)
4269{
4270 CONTRACTL {
4271 NOTHROW;
4272 GC_NOTRIGGER;
4273 SO_TOLERANT;
4274 } CONTRACTL_END;
4275
4276#ifdef DACCESS_COMPILE
4277 return IsManagedCode(currentPC);
4278#else
4279 if (hostCallPreference == AllowHostCalls)
4280 {
4281 return IsManagedCode(currentPC);
4282 }
4283
4284 ReaderLockHolder rlh(hostCallPreference);
4285 if (!rlh.Acquired())
4286 {
4287 _ASSERTE(pfFailedReaderLock != NULL);
4288 *pfFailedReaderLock = TRUE;
4289 return FALSE;
4290 }
4291
4292 return IsManagedCodeWorker(currentPC);
4293#endif
4294}
4295
4296//**************************************************************************
4297// Assumes that the ExecutionManager reader/writer lock is taken or that
4298// it is safe not to take it.
4299BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC)
4300{
4301 CONTRACTL {
4302 NOTHROW;
4303 GC_NOTRIGGER;
4304 SO_TOLERANT;
4305 } CONTRACTL_END;
4306
4307 // This may get called for arbitrary code addresses. Note that the lock is
4308 // taken over the call to JitCodeToMethodInfo too so that nobody pulls out
4309 // the range section from underneath us.
4310
4311 RangeSection * pRS = GetRangeSection(currentPC);
4312 if (pRS == NULL)
4313 return FALSE;
4314
4315 if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP)
4316 {
4317#ifndef CROSSGEN_COMPILE
4318 // Typically if we find a Jit Manager we are inside a managed method
4319 // but on we could also be in a stub, so we check for that
4320 // as well and we don't consider stub to be real managed code.
4321 TADDR start = dac_cast<PTR_EEJitManager>(pRS->pjit)->FindMethodCode(pRS, currentPC);
4322 if (start == NULL)
4323 return FALSE;
4324 CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader));
4325 if (!pCHdr->IsStubCodeBlock())
4326 return TRUE;
4327#endif
4328 }
4329#ifdef FEATURE_READYTORUN
4330 else
4331 if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN)
4332 {
4333 if (dac_cast<PTR_ReadyToRunJitManager>(pRS->pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL))
4334 return TRUE;
4335 }
4336#endif
4337 else
4338 {
4339#ifdef FEATURE_PREJIT
4340 // Check that we are in the range with true managed code. We don't
4341 // consider jump stubs or precodes to be real managed code.
4342
4343 Module * pModule = dac_cast<PTR_Module>(pRS->pHeapListOrZapModule);
4344
4345 NGenLayoutInfo * pLayoutInfo = pModule->GetNGenLayoutInfo();
4346
4347 if (pLayoutInfo->m_CodeSections[0].IsInRange(currentPC) ||
4348 pLayoutInfo->m_CodeSections[1].IsInRange(currentPC) ||
4349 pLayoutInfo->m_CodeSections[2].IsInRange(currentPC))
4350 {
4351 return TRUE;
4352 }
4353#endif
4354 }
4355
4356 return FALSE;
4357}
4358
4359#ifndef DACCESS_COMPILE
4360
4361//**************************************************************************
4362// Clear the caches for all JITs loaded.
4363//
4364void ExecutionManager::ClearCaches( void )
4365{
4366 CONTRACTL {
4367 NOTHROW;
4368 GC_NOTRIGGER;
4369 } CONTRACTL_END;
4370
4371 GetEEJitManager()->ClearCache();
4372}
4373
4374//**************************************************************************
4375// Check if caches for any JITs loaded need to be cleaned
4376//
4377BOOL ExecutionManager::IsCacheCleanupRequired( void )
4378{
4379 CONTRACTL {
4380 NOTHROW;
4381 GC_NOTRIGGER;
4382 } CONTRACTL_END;
4383
4384 return GetEEJitManager()->IsCacheCleanupRequired();
4385}
4386
4387#ifndef FEATURE_MERGE_JIT_AND_ENGINE
4388/*********************************************************************/
4389// This static method returns the name of the jit dll
4390//
4391LPCWSTR ExecutionManager::GetJitName()
4392{
4393 STANDARD_VM_CONTRACT;
4394
4395 LPCWSTR pwzJitName = NULL;
4396
4397#if !defined(CROSSGEN_COMPILE)
4398 if (g_CLRJITPath != nullptr)
4399 {
4400 const wchar_t* p = wcsrchr(g_CLRJITPath, DIRECTORY_SEPARATOR_CHAR_W);
4401 if (p != nullptr)
4402 {
4403 pwzJitName = p + 1; // Return just the filename, not the directory name
4404 }
4405 else
4406 {
4407 pwzJitName = g_CLRJITPath;
4408 }
4409 }
4410#endif // !defined(CROSSGEN_COMPILE)
4411
4412 if (NULL == pwzJitName)
4413 {
4414 pwzJitName = MAKEDLLNAME_W(W("clrjit"));
4415 }
4416
4417 return pwzJitName;
4418}
4419#endif // !FEATURE_MERGE_JIT_AND_ENGINE
4420
4421#endif // #ifndef DACCESS_COMPILE
4422
4423RangeSection* ExecutionManager::GetRangeSection(TADDR addr)
4424{
4425 CONTRACTL {
4426 NOTHROW;
4427 HOST_NOCALLS;
4428 GC_NOTRIGGER;
4429 SO_TOLERANT;
4430 SUPPORTS_DAC;
4431 } CONTRACTL_END;
4432
4433 RangeSection * pHead = m_CodeRangeList;
4434
4435 if (pHead == NULL)
4436 {
4437 return NULL;
4438 }
4439
4440 RangeSection *pCurr = pHead;
4441 RangeSection *pLast = NULL;
4442
4443#ifndef DACCESS_COMPILE
4444 RangeSection *pLastUsedRS = (pCurr != NULL) ? pCurr->pLastUsed : NULL;
4445
4446 if (pLastUsedRS != NULL)
4447 {
4448 // positive case
4449 if ((addr >= pLastUsedRS->LowAddress) &&
4450 (addr < pLastUsedRS->HighAddress) )
4451 {
4452 return pLastUsedRS;
4453 }
4454
4455 RangeSection * pNextAfterLastUsedRS = pLastUsedRS->pnext;
4456
4457 // negative case
4458 if ((addr < pLastUsedRS->LowAddress) &&
4459 (pNextAfterLastUsedRS == NULL || addr >= pNextAfterLastUsedRS->HighAddress))
4460 {
4461 return NULL;
4462 }
4463 }
4464#endif
4465
4466 while (pCurr != NULL)
4467 {
4468 // See if addr is in [pCurr->LowAddress .. pCurr->HighAddress)
4469 if (pCurr->LowAddress <= addr)
4470 {
4471 // Since we are sorted, once pCurr->HighAddress is less than addr
4472 // then all subsequence ones will also be lower, so we are done.
4473 if (addr >= pCurr->HighAddress)
4474 {
4475 // we'll return NULL and put pLast into pLastUsed
4476 pCurr = NULL;
4477 }
4478 else
4479 {
4480 // addr must be in [pCurr->LowAddress .. pCurr->HighAddress)
4481 _ASSERTE((pCurr->LowAddress <= addr) && (addr < pCurr->HighAddress));
4482
4483 // Found the matching RangeSection
4484 // we'll return pCurr and put it into pLastUsed
4485 pLast = pCurr;
4486 }
4487
4488 break;
4489 }
4490 pLast = pCurr;
4491 pCurr = pCurr->pnext;
4492 }
4493
4494#ifndef DACCESS_COMPILE
4495 // Cache pCurr as pLastUsed in the head node
4496 // Unless we are on an MP system with many cpus
4497 // where this sort of caching actually diminishes scaling during server GC
4498 // due to many processors writing to a common location
4499 if (g_SystemInfo.dwNumberOfProcessors < 4 || !GCHeapUtilities::IsServerHeap() || !GCHeapUtilities::IsGCInProgress())
4500 pHead->pLastUsed = pLast;
4501#endif
4502
4503 return pCurr;
4504}
4505
4506RangeSection* ExecutionManager::GetRangeSectionAndPrev(RangeSection *pHead, TADDR addr, RangeSection** ppPrev)
4507{
4508 WRAPPER_NO_CONTRACT;
4509
4510 RangeSection *pCurr;
4511 RangeSection *pPrev;
4512 RangeSection *result = NULL;
4513
4514 for (pPrev = NULL, pCurr = pHead;
4515 pCurr != NULL;
4516 pPrev = pCurr, pCurr = pCurr->pnext)
4517 {
4518 // See if addr is in [pCurr->LowAddress .. pCurr->HighAddress)
4519 if (pCurr->LowAddress > addr)
4520 continue;
4521
4522 if (addr >= pCurr->HighAddress)
4523 break;
4524
4525 // addr must be in [pCurr->LowAddress .. pCurr->HighAddress)
4526 _ASSERTE((pCurr->LowAddress <= addr) && (addr < pCurr->HighAddress));
4527
4528 // Found the matching RangeSection
4529 result = pCurr;
4530
4531 // Write back pPrev to ppPrev if it is non-null
4532 if (ppPrev != NULL)
4533 *ppPrev = pPrev;
4534
4535 break;
4536 }
4537
4538 // If we failed to find a match write NULL to ppPrev if it is non-null
4539 if ((ppPrev != NULL) && (result == NULL))
4540 {
4541 *ppPrev = NULL;
4542 }
4543
4544 return result;
4545}
4546
4547/* static */
4548PTR_Module ExecutionManager::FindZapModule(TADDR currentData)
4549{
4550 CONTRACTL
4551 {
4552 NOTHROW;
4553 GC_NOTRIGGER;
4554 SO_TOLERANT;
4555 MODE_ANY;
4556 STATIC_CONTRACT_HOST_CALLS;
4557 SUPPORTS_DAC;
4558 }
4559 CONTRACTL_END;
4560
4561 ReaderLockHolder rlh;
4562
4563 RangeSection * pRS = GetRangeSection(currentData);
4564 if (pRS == NULL)
4565 return NULL;
4566
4567 if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP)
4568 return NULL;
4569
4570#ifdef FEATURE_READYTORUN
4571 if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN)
4572 return NULL;
4573#endif
4574
4575 return dac_cast<PTR_Module>(pRS->pHeapListOrZapModule);
4576}
4577
4578/* static */
4579PTR_Module ExecutionManager::FindReadyToRunModule(TADDR currentData)
4580{
4581 CONTRACTL
4582 {
4583 NOTHROW;
4584 GC_NOTRIGGER;
4585 SO_TOLERANT;
4586 MODE_ANY;
4587 STATIC_CONTRACT_HOST_CALLS;
4588 SUPPORTS_DAC;
4589 }
4590 CONTRACTL_END;
4591
4592#ifdef FEATURE_READYTORUN
4593 ReaderLockHolder rlh;
4594
4595 RangeSection * pRS = GetRangeSection(currentData);
4596 if (pRS == NULL)
4597 return NULL;
4598
4599 if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP)
4600 return NULL;
4601
4602 if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN)
4603 return dac_cast<PTR_Module>(pRS->pHeapListOrZapModule);;
4604
4605 return NULL;
4606#else
4607 return NULL;
4608#endif
4609}
4610
4611
4612/* static */
4613PTR_Module ExecutionManager::FindModuleForGCRefMap(TADDR currentData)
4614{
4615 CONTRACTL
4616 {
4617 NOTHROW;
4618 GC_NOTRIGGER;
4619 SO_TOLERANT;
4620 SUPPORTS_DAC;
4621 }
4622 CONTRACTL_END;
4623
4624 RangeSection * pRS = FindCodeRange(currentData, ExecutionManager::GetScanFlags());
4625 if (pRS == NULL)
4626 return NULL;
4627
4628 if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP)
4629 return NULL;
4630
4631#ifdef FEATURE_READYTORUN
4632 // RANGE_SECTION_READYTORUN is intentionally not filtered out here
4633#endif
4634
4635 return dac_cast<PTR_Module>(pRS->pHeapListOrZapModule);
4636}
4637
4638#ifndef DACCESS_COMPILE
4639
4640/* NGenMem depends on this entrypoint */
4641NOINLINE
4642void ExecutionManager::AddCodeRange(TADDR pStartRange,
4643 TADDR pEndRange,
4644 IJitManager * pJit,
4645 RangeSection::RangeSectionFlags flags,
4646 void * pHp)
4647{
4648 CONTRACTL {
4649 THROWS;
4650 GC_NOTRIGGER;
4651 PRECONDITION(CheckPointer(pJit));
4652 PRECONDITION(CheckPointer(pHp));
4653 } CONTRACTL_END;
4654
4655 AddRangeHelper(pStartRange,
4656 pEndRange,
4657 pJit,
4658 flags,
4659 dac_cast<TADDR>(pHp));
4660}
4661
4662#ifdef FEATURE_PREJIT
4663
4664void ExecutionManager::AddNativeImageRange(TADDR StartRange,
4665 SIZE_T Size,
4666 Module * pModule)
4667{
4668 CONTRACTL {
4669 THROWS;
4670 GC_NOTRIGGER;
4671 PRECONDITION(CheckPointer(pModule));
4672 } CONTRACTL_END;
4673
4674 AddRangeHelper(StartRange,
4675 StartRange + Size,
4676 GetNativeImageJitManager(),
4677 RangeSection::RANGE_SECTION_NONE,
4678 dac_cast<TADDR>(pModule));
4679}
4680#endif
4681
4682void ExecutionManager::AddRangeHelper(TADDR pStartRange,
4683 TADDR pEndRange,
4684 IJitManager * pJit,
4685 RangeSection::RangeSectionFlags flags,
4686 TADDR pHeapListOrZapModule)
4687{
4688 CONTRACTL {
4689 THROWS;
4690 GC_NOTRIGGER;
4691 HOST_CALLS;
4692 PRECONDITION(pStartRange < pEndRange);
4693 PRECONDITION(pHeapListOrZapModule != NULL);
4694 } CONTRACTL_END;
4695
4696 RangeSection *pnewrange = new RangeSection;
4697
4698 _ASSERTE(pEndRange > pStartRange);
4699
4700 pnewrange->LowAddress = pStartRange;
4701 pnewrange->HighAddress = pEndRange;
4702 pnewrange->pjit = pJit;
4703 pnewrange->pnext = NULL;
4704 pnewrange->flags = flags;
4705 pnewrange->pLastUsed = NULL;
4706 pnewrange->pHeapListOrZapModule = pHeapListOrZapModule;
4707#if defined(_TARGET_AMD64_)
4708 pnewrange->pUnwindInfoTable = NULL;
4709#endif // defined(_TARGET_AMD64_)
4710 {
4711 CrstHolder ch(&m_RangeCrst); // Acquire the Crst before linking in a new RangeList
4712
4713 RangeSection * current = m_CodeRangeList;
4714 RangeSection * previous = NULL;
4715
4716 if (current != NULL)
4717 {
4718 while (true)
4719 {
4720 // Sort addresses top down so that more recently created ranges
4721 // will populate the top of the list
4722 if (pnewrange->LowAddress > current->LowAddress)
4723 {
4724 // Asserts if ranges are overlapping
4725 _ASSERTE(pnewrange->LowAddress >= current->HighAddress);
4726 pnewrange->pnext = current;
4727
4728 if (previous == NULL) // insert new head
4729 {
4730 m_CodeRangeList = pnewrange;
4731 }
4732 else
4733 { // insert in the middle
4734 previous->pnext = pnewrange;
4735 }
4736 break;
4737 }
4738
4739 RangeSection * next = current->pnext;
4740 if (next == NULL) // insert at end of list
4741 {
4742 current->pnext = pnewrange;
4743 break;
4744 }
4745
4746 // Continue walking the RangeSection list
4747 previous = current;
4748 current = next;
4749 }
4750 }
4751 else
4752 {
4753 m_CodeRangeList = pnewrange;
4754 }
4755 }
4756}
4757
4758// Deletes a single range starting at pStartRange
4759void ExecutionManager::DeleteRange(TADDR pStartRange)
4760{
4761 CONTRACTL {
4762 NOTHROW; // If this becomes throwing, then revisit the queuing of deletes below.
4763 GC_NOTRIGGER;
4764 } CONTRACTL_END;
4765
4766 RangeSection *pCurr = NULL;
4767 {
4768 // Acquire the Crst before unlinking a RangeList.
4769 // NOTE: The Crst must be acquired BEFORE we grab the writer lock, as the
4770 // writer lock forces us into a forbid suspend thread region, and it's illegal
4771 // to enter a Crst after the forbid suspend thread region is entered
4772 CrstHolder ch(&m_RangeCrst);
4773
4774 // Acquire the WriterLock and prevent any readers from walking the RangeList.
4775 // This also forces us to enter a forbid suspend thread region, to prevent
4776 // hijacking profilers from grabbing this thread and walking it (the walk may
4777 // require the reader lock, which would cause a deadlock).
4778 WriterLockHolder wlh;
4779
4780 RangeSection *pPrev = NULL;
4781
4782 pCurr = GetRangeSectionAndPrev(m_CodeRangeList, pStartRange, &pPrev);
4783
4784 // pCurr points at the Range that needs to be unlinked from the RangeList
4785 if (pCurr != NULL)
4786 {
4787
4788 // If pPrev is NULL the the head of this list is to be deleted
4789 if (pPrev == NULL)
4790 {
4791 m_CodeRangeList = pCurr->pnext;
4792 }
4793 else
4794 {
4795 _ASSERT(pPrev->pnext == pCurr);
4796
4797 pPrev->pnext = pCurr->pnext;
4798 }
4799
4800 // Clear the cache pLastUsed in the head node (if any)
4801 RangeSection * head = m_CodeRangeList;
4802 if (head != NULL)
4803 {
4804 head->pLastUsed = NULL;
4805 }
4806
4807 //
4808 // Cannot delete pCurr here because we own the WriterLock and if this is
4809 // a hosted scenario then the hosting api callback cannot occur in a forbid
4810 // suspend region, which the writer lock is.
4811 //
4812 }
4813 }
4814
4815 //
4816 // Now delete the node
4817 //
4818 if (pCurr != NULL)
4819 {
4820#if defined(_TARGET_AMD64_)
4821 if (pCurr->pUnwindInfoTable != 0)
4822 delete pCurr->pUnwindInfoTable;
4823#endif // defined(_TARGET_AMD64_)
4824 delete pCurr;
4825 }
4826}
4827
4828#endif // #ifndef DACCESS_COMPILE
4829
4830#ifdef DACCESS_COMPILE
4831
4832void ExecutionManager::EnumRangeList(RangeSection* list,
4833 CLRDataEnumMemoryFlags flags)
4834{
4835 while (list != NULL)
4836 {
4837 // If we can't read the target memory, stop immediately so we don't work
4838 // with broken data.
4839 if (!DacEnumMemoryRegion(dac_cast<TADDR>(list), sizeof(*list)))
4840 break;
4841
4842 if (list->pjit.IsValid())
4843 {
4844 list->pjit->EnumMemoryRegions(flags);
4845 }
4846
4847 if (!(list->flags & RangeSection::RANGE_SECTION_CODEHEAP))
4848 {
4849 PTR_Module pModule = dac_cast<PTR_Module>(list->pHeapListOrZapModule);
4850
4851 if (pModule.IsValid())
4852 {
4853 pModule->EnumMemoryRegions(flags, true);
4854 }
4855 }
4856
4857 list = list->pnext;
4858#if defined (_DEBUG)
4859 // Test hook: when testing on debug builds, we want an easy way to test that the while
4860 // correctly terminates in the face of ridiculous stuff from the target.
4861 if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DumpGeneration_IntentionallyCorruptDataFromTarget) == 1)
4862 {
4863 // Force us to struggle on with something bad.
4864 if (list == NULL)
4865 {
4866 list = (RangeSection *)&flags;
4867 }
4868 }
4869#endif // (_DEBUG)
4870
4871 }
4872}
4873
4874void ExecutionManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
4875{
4876 STATIC_CONTRACT_HOST_CALLS;
4877
4878 ReaderLockHolder rlh;
4879
4880 //
4881 // Report the global data portions.
4882 //
4883
4884 m_CodeRangeList.EnumMem();
4885 m_pDefaultCodeMan.EnumMem();
4886
4887 //
4888 // Walk structures and report.
4889 //
4890
4891 if (m_CodeRangeList.IsValid())
4892 {
4893 EnumRangeList(m_CodeRangeList, flags);
4894 }
4895}
4896#endif // #ifdef DACCESS_COMPILE
4897
4898#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
4899
4900void ExecutionManager::Unload(LoaderAllocator *pLoaderAllocator)
4901{
4902 CONTRACTL {
4903 NOTHROW;
4904 GC_NOTRIGGER;
4905 } CONTRACTL_END;
4906
4907 // a size of 0 is a signal to Nirvana to flush the entire cache
4908 FlushInstructionCache(GetCurrentProcess(),0,0);
4909
4910 /* StackwalkCacheEntry::EIP is an address into code. Since we are
4911 unloading the code, we need to invalidate the cache. Otherwise,
4912 its possible that another appdomain might generate code at the very
4913 same address, and we might incorrectly think that the old
4914 StackwalkCacheEntry corresponds to it. So flush the cache.
4915 */
4916 StackwalkCache::Invalidate(pLoaderAllocator);
4917
4918 JumpStubCache * pJumpStubCache = (JumpStubCache *) pLoaderAllocator->m_pJumpStubCache;
4919 if (pJumpStubCache != NULL)
4920 {
4921 delete pJumpStubCache;
4922 pLoaderAllocator->m_pJumpStubCache = NULL;
4923 }
4924
4925 GetEEJitManager()->Unload(pLoaderAllocator);
4926}
4927
4928// This method is used by the JIT and the runtime for PreStubs. It will return
4929// the address of a short jump thunk that will jump to the 'target' address.
4930// It is only needed when the target architecture has a perferred call instruction
4931// that doesn't actually span the full address space. This is true for x64 where
4932// the preferred call instruction is a 32-bit pc-rel call instruction.
4933// (This is also true on ARM64, but it not true for x86)
4934//
4935// For these architectures, in JITed code and in the prestub, we encode direct calls
4936// using the preferred call instruction and we also try to insure that the Jitted
4937// code is within the 32-bit pc-rel range of clr.dll to allow direct JIT helper calls.
4938//
4939// When the call target is too far away to encode using the preferred call instruction.
4940// We will create a short code thunk that uncoditionally jumps to the target address.
4941// We call this jump thunk a "jumpStub" in the CLR code.
4942// We have the requirement that the "jumpStub" that we create on demand be usable by
4943// the preferred call instruction, this requires that on x64 the location in memory
4944// where we create the "jumpStub" be within the 32-bit pc-rel range of the call that
4945// needs it.
4946//
4947// The arguments to this method:
4948// pMD - the MethodDesc for the currenty managed method in Jitted code
4949// or for the target method for a PreStub
4950// It is required if calling from or to a dynamic method (LCG method)
4951// target - The call target address (this is the address that was too far to encode)
4952// loAddr
4953// hiAddr - The range of the address that we must place the jumpStub in, so that it
4954// can be used to encode the preferred call instruction.
4955// pLoaderAllocator
4956// - The Loader allocator to use for allocations, this can be null.
4957// When it is null, then the pMD must be valid and is used to obtain
4958// the allocator.
4959//
4960// This method will either locate and return an existing jumpStub thunk that can be
4961// reused for this request, because it meets all of the requirements necessary.
4962// Or it will allocate memory in the required region and create a new jumpStub that
4963// meets all of the requirements necessary.
4964//
4965// Note that for dynamic methods (LCG methods) we cannot share the jumpStubs between
4966// different methods. This is because we allow for the unloading (reclaiming) of
4967// individual dynamic methods. And we associate the jumpStub memory allocated with
4968// the dynamic method that requested the jumpStub.
4969//
4970
4971PCODE ExecutionManager::jumpStub(MethodDesc* pMD, PCODE target,
4972 BYTE * loAddr, BYTE * hiAddr,
4973 LoaderAllocator *pLoaderAllocator,
4974 bool throwOnOutOfMemoryWithinRange)
4975{
4976 CONTRACT(PCODE) {
4977 THROWS;
4978 GC_NOTRIGGER;
4979 MODE_ANY;
4980 PRECONDITION(pLoaderAllocator != NULL || pMD != NULL);
4981 PRECONDITION(loAddr < hiAddr);
4982 POSTCONDITION((RETVAL != NULL) || !throwOnOutOfMemoryWithinRange);
4983 } CONTRACT_END;
4984
4985 PCODE jumpStub = NULL;
4986
4987 if (pLoaderAllocator == NULL)
4988 {
4989 pLoaderAllocator = pMD->GetLoaderAllocatorForCode();
4990 }
4991 _ASSERTE(pLoaderAllocator != NULL);
4992
4993 bool isLCG = pMD && pMD->IsLCGMethod();
4994 LCGMethodResolver * pResolver = nullptr;
4995 JumpStubCache * pJumpStubCache = (JumpStubCache *) pLoaderAllocator->m_pJumpStubCache;
4996
4997 if (isLCG)
4998 {
4999 pResolver = pMD->AsDynamicMethodDesc()->GetLCGMethodResolver();
5000 pJumpStubCache = pResolver->m_pJumpStubCache;
5001 }
5002
5003 CrstHolder ch(&m_JumpStubCrst);
5004 if (pJumpStubCache == NULL)
5005 {
5006 pJumpStubCache = new JumpStubCache();
5007 if (isLCG)
5008 {
5009 pResolver->m_pJumpStubCache = pJumpStubCache;
5010 }
5011 else
5012 {
5013 pLoaderAllocator->m_pJumpStubCache = pJumpStubCache;
5014 }
5015 }
5016
5017 if (isLCG)
5018 {
5019 // Increment counter of LCG jump stub lookup attempts
5020 m_LCG_JumpStubLookup++;
5021 }
5022 else
5023 {
5024 // Increment counter of normal jump stub lookup attempts
5025 m_normal_JumpStubLookup++;
5026 }
5027
5028 // search for a matching jumpstub in the jumpStubCache
5029 //
5030 for (JumpStubTable::KeyIterator i = pJumpStubCache->m_Table.Begin(target),
5031 end = pJumpStubCache->m_Table.End(target); i != end; i++)
5032 {
5033 jumpStub = i->m_jumpStub;
5034
5035 _ASSERTE(jumpStub != NULL);
5036
5037 // Is the matching entry with the requested range?
5038 if (((TADDR)loAddr <= jumpStub) && (jumpStub <= (TADDR)hiAddr))
5039 {
5040 RETURN(jumpStub);
5041 }
5042 }
5043
5044 // If we get here we need to create a new jump stub
5045 // add or change the jump stub table to point at the new one
5046 jumpStub = getNextJumpStub(pMD, target, loAddr, hiAddr, pLoaderAllocator, throwOnOutOfMemoryWithinRange); // this statement can throw
5047 if (jumpStub == NULL)
5048 {
5049 _ASSERTE(!throwOnOutOfMemoryWithinRange);
5050 RETURN(NULL);
5051 }
5052
5053 _ASSERTE(((TADDR)loAddr <= jumpStub) && (jumpStub <= (TADDR)hiAddr));
5054
5055 LOG((LF_JIT, LL_INFO10000, "Add JumpStub to" FMT_ADDR "at" FMT_ADDR "\n",
5056 DBG_ADDR(target), DBG_ADDR(jumpStub) ));
5057
5058 RETURN(jumpStub);
5059}
5060
5061PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target,
5062 BYTE * loAddr, BYTE * hiAddr,
5063 LoaderAllocator *pLoaderAllocator,
5064 bool throwOnOutOfMemoryWithinRange)
5065{
5066 CONTRACT(PCODE) {
5067 THROWS;
5068 GC_NOTRIGGER;
5069 PRECONDITION(pLoaderAllocator != NULL);
5070 PRECONDITION(m_JumpStubCrst.OwnedByCurrentThread());
5071 POSTCONDITION((RETVAL != NULL) || !throwOnOutOfMemoryWithinRange);
5072 } CONTRACT_END;
5073
5074 DWORD numJumpStubs = DEFAULT_JUMPSTUBS_PER_BLOCK; // a block of 32 JumpStubs
5075 BYTE * jumpStub = NULL;
5076 bool isLCG = pMD && pMD->IsLCGMethod();
5077 JumpStubCache * pJumpStubCache = (JumpStubCache *) pLoaderAllocator->m_pJumpStubCache;
5078
5079 if (isLCG)
5080 {
5081 LCGMethodResolver * pResolver;
5082 pResolver = pMD->AsDynamicMethodDesc()->GetLCGMethodResolver();
5083 pJumpStubCache = pResolver->m_pJumpStubCache;
5084 }
5085
5086 JumpStubBlockHeader ** ppHead = &(pJumpStubCache->m_pBlocks);
5087 JumpStubBlockHeader * curBlock = *ppHead;
5088
5089 // allocate a new jumpstub from 'curBlock' if it is not fully allocated
5090 //
5091 while (curBlock)
5092 {
5093 _ASSERTE(pLoaderAllocator == (isLCG ? curBlock->GetHostCodeHeap()->GetAllocator() : curBlock->GetLoaderAllocator()));
5094
5095 if (curBlock->m_used < curBlock->m_allocated)
5096 {
5097 jumpStub = (BYTE *) curBlock + sizeof(JumpStubBlockHeader) + ((size_t) curBlock->m_used * BACK_TO_BACK_JUMP_ALLOCATE_SIZE);
5098
5099 if ((loAddr <= jumpStub) && (jumpStub <= hiAddr))
5100 {
5101 // We will update curBlock->m_used at "DONE"
5102 goto DONE;
5103 }
5104 }
5105 curBlock = curBlock->m_next;
5106 }
5107
5108 // If we get here then we need to allocate a new JumpStubBlock
5109
5110 if (isLCG)
5111 {
5112 // For LCG we request a small block of 4 jumpstubs, because we can not share them
5113 // with any other methods and very frequently our method only needs one jump stub.
5114 // Using 4 gives a request size of (32 + 4*12) or 80 bytes.
5115 // Also note that request sizes are rounded up to a multiples of 16.
5116 // The request size is calculated into 'blockSize' in allocJumpStubBlock.
5117 // For x64 the value of BACK_TO_BACK_JUMP_ALLOCATE_SIZE is 12 bytes
5118 // and the sizeof(JumpStubBlockHeader) is 32.
5119 //
5120
5121 numJumpStubs = 4;
5122
5123#ifdef _TARGET_AMD64_
5124 // Note this these values are not requirements, instead we are
5125 // just confirming the values that are mentioned in the comments.
5126 _ASSERTE(BACK_TO_BACK_JUMP_ALLOCATE_SIZE == 12);
5127 _ASSERTE(sizeof(JumpStubBlockHeader) == 32);
5128#endif
5129
5130 // Increment counter of LCG jump stub block allocations
5131 m_LCG_JumpStubBlockAllocCount++;
5132 }
5133 else
5134 {
5135 // Increment counter of normal jump stub block allocations
5136 m_normal_JumpStubBlockAllocCount++;
5137 }
5138
5139 // allocJumpStubBlock will allocate from the LoaderCodeHeap for normal methods
5140 // and will allocate from a HostCodeHeap for LCG methods.
5141 //
5142 // note that this can throw an OOM exception
5143
5144 curBlock = ExecutionManager::GetEEJitManager()->allocJumpStubBlock(pMD, numJumpStubs, loAddr, hiAddr, pLoaderAllocator, throwOnOutOfMemoryWithinRange);
5145 if (curBlock == NULL)
5146 {
5147 _ASSERTE(!throwOnOutOfMemoryWithinRange);
5148 RETURN(NULL);
5149 }
5150
5151 jumpStub = (BYTE *) curBlock + sizeof(JumpStubBlockHeader) + ((size_t) curBlock->m_used * BACK_TO_BACK_JUMP_ALLOCATE_SIZE);
5152
5153 _ASSERTE((loAddr <= jumpStub) && (jumpStub <= hiAddr));
5154
5155 curBlock->m_next = *ppHead;
5156 *ppHead = curBlock;
5157
5158DONE:
5159
5160 _ASSERTE((curBlock->m_used < curBlock->m_allocated));
5161
5162#ifdef _TARGET_ARM64_
5163 // 8-byte alignment is required on ARM64
5164 _ASSERTE(((UINT_PTR)jumpStub & 7) == 0);
5165#endif
5166
5167 emitBackToBackJump(jumpStub, (void*) target);
5168
5169#ifdef FEATURE_PERFMAP
5170 PerfMap::LogStubs(__FUNCTION__, "emitBackToBackJump", (PCODE)jumpStub, BACK_TO_BACK_JUMP_ALLOCATE_SIZE);
5171#endif
5172
5173 // We always add the new jumpstub to the jumpStubCache
5174 //
5175 _ASSERTE(pJumpStubCache != NULL);
5176
5177 JumpStubEntry entry;
5178
5179 entry.m_target = target;
5180 entry.m_jumpStub = (PCODE)jumpStub;
5181
5182 pJumpStubCache->m_Table.Add(entry);
5183
5184 curBlock->m_used++; // record that we have used up one more jumpStub in the block
5185
5186 // Every time we create a new jumpStub thunk one of these counters is incremented
5187 if (isLCG)
5188 {
5189 // Increment counter of LCG unique jump stubs
5190 m_LCG_JumpStubUnique++;
5191 }
5192 else
5193 {
5194 // Increment counter of normal unique jump stubs
5195 m_normal_JumpStubUnique++;
5196 }
5197
5198 // Is the 'curBlock' now completely full?
5199 if (curBlock->m_used == curBlock->m_allocated)
5200 {
5201 if (isLCG)
5202 {
5203 // Increment counter of LCG jump stub blocks that are full
5204 m_LCG_JumpStubBlockFullCount++;
5205
5206 // Log this "LCG JumpStubBlock filled" along with the four counter values
5207 STRESS_LOG4(LF_JIT, LL_INFO1000, "LCG JumpStubBlock filled - (%u, %u, %u, %u)\n",
5208 m_LCG_JumpStubLookup, m_LCG_JumpStubUnique,
5209 m_LCG_JumpStubBlockAllocCount, m_LCG_JumpStubBlockFullCount);
5210 }
5211 else
5212 {
5213 // Increment counter of normal jump stub blocks that are full
5214 m_normal_JumpStubBlockFullCount++;
5215
5216 // Log this "normal JumpStubBlock filled" along with the four counter values
5217 STRESS_LOG4(LF_JIT, LL_INFO1000, "Normal JumpStubBlock filled - (%u, %u, %u, %u)\n",
5218 m_normal_JumpStubLookup, m_normal_JumpStubUnique,
5219 m_normal_JumpStubBlockAllocCount, m_normal_JumpStubBlockFullCount);
5220
5221 if ((m_LCG_JumpStubLookup > 0) && ((m_normal_JumpStubBlockFullCount % 5) == 1))
5222 {
5223 // Every 5 occurance of the above we also
5224 // Log "LCG JumpStubBlock status" along with the four counter values
5225 STRESS_LOG4(LF_JIT, LL_INFO1000, "LCG JumpStubBlock status - (%u, %u, %u, %u)\n",
5226 m_LCG_JumpStubLookup, m_LCG_JumpStubUnique,
5227 m_LCG_JumpStubBlockAllocCount, m_LCG_JumpStubBlockFullCount);
5228 }
5229 }
5230 }
5231
5232 RETURN((PCODE)jumpStub);
5233}
5234#endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE
5235
5236#ifdef FEATURE_PREJIT
5237//***************************************************************************************
5238//***************************************************************************************
5239
5240#ifndef DACCESS_COMPILE
5241
5242NativeImageJitManager::NativeImageJitManager()
5243{
5244 WRAPPER_NO_CONTRACT;
5245}
5246
5247#endif // #ifndef DACCESS_COMPILE
5248
5249GCInfoToken NativeImageJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken)
5250{
5251 CONTRACTL {
5252 NOTHROW;
5253 GC_NOTRIGGER;
5254 HOST_NOCALLS;
5255 SUPPORTS_DAC;
5256 } CONTRACTL_END;
5257
5258 PTR_RUNTIME_FUNCTION pRuntimeFunction = dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader);
5259 TADDR baseAddress = JitTokenToModuleBase(MethodToken);
5260
5261#ifndef DACCESS_COMPILE
5262 if (g_IBCLogger.InstrEnabled())
5263 {
5264 PTR_NGenLayoutInfo pNgenLayout = JitTokenToZapModule(MethodToken)->GetNGenLayoutInfo();
5265 PTR_MethodDesc pMD = NativeUnwindInfoLookupTable::GetMethodDesc(pNgenLayout, pRuntimeFunction, baseAddress);
5266 g_IBCLogger.LogMethodGCInfoAccess(pMD);
5267 }
5268#endif
5269
5270 SIZE_T nUnwindDataSize;
5271 PTR_VOID pUnwindData = GetUnwindDataBlob(baseAddress, pRuntimeFunction, &nUnwindDataSize);
5272
5273 // GCInfo immediatelly follows unwind data
5274 // GCInfo from an NGEN-ed image is always the current version
5275 return{ dac_cast<PTR_BYTE>(pUnwindData) + nUnwindDataSize, GCINFO_VERSION };
5276}
5277
5278unsigned NativeImageJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState)
5279{
5280 CONTRACTL {
5281 NOTHROW;
5282 GC_NOTRIGGER;
5283 } CONTRACTL_END;
5284
5285 NGenLayoutInfo * pNgenLayout = JitTokenToZapModule(MethodToken)->GetNGenLayoutInfo();
5286
5287 //early out if the method doesn't have EH info bit set.
5288 if (!NativeUnwindInfoLookupTable::HasExceptionInfo(pNgenLayout, PTR_RUNTIME_FUNCTION(MethodToken.m_pCodeHeader)))
5289 return 0;
5290
5291 PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pExceptionLookupTable = dac_cast<PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE>(pNgenLayout->m_ExceptionInfoLookupTable.StartAddress());
5292 _ASSERTE(pExceptionLookupTable != NULL);
5293
5294 SIZE_T size = pNgenLayout->m_ExceptionInfoLookupTable.Size();
5295 COUNT_T numLookupTableEntries = (COUNT_T)(size / sizeof(CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY));
5296 // at least 2 entries (1 valid entry + 1 sentinal entry)
5297 _ASSERTE(numLookupTableEntries >= 2);
5298
5299 DWORD methodStartRVA = (DWORD)(JitTokenToStartAddress(MethodToken) - JitTokenToModuleBase(MethodToken));
5300
5301 COUNT_T ehInfoSize = 0;
5302 DWORD exceptionInfoRVA = NativeExceptionInfoLookupTable::LookupExceptionInfoRVAForMethod(pExceptionLookupTable,
5303 numLookupTableEntries,
5304 methodStartRVA,
5305 &ehInfoSize);
5306 if (exceptionInfoRVA == 0)
5307 return 0;
5308
5309 pEnumState->iCurrentPos = 0;
5310 pEnumState->pExceptionClauseArray = JitTokenToModuleBase(MethodToken) + exceptionInfoRVA;
5311
5312 return ehInfoSize / sizeof(CORCOMPILE_EXCEPTION_CLAUSE);
5313}
5314
5315PTR_EXCEPTION_CLAUSE_TOKEN NativeImageJitManager::GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
5316 EE_ILEXCEPTION_CLAUSE* pEHClauseOut)
5317{
5318 CONTRACTL {
5319 NOTHROW;
5320 GC_NOTRIGGER;
5321 } CONTRACTL_END;
5322
5323 unsigned iCurrentPos = pEnumState->iCurrentPos;
5324 pEnumState->iCurrentPos++;
5325
5326 CORCOMPILE_EXCEPTION_CLAUSE* pClause = &(dac_cast<PTR_CORCOMPILE_EXCEPTION_CLAUSE>(pEnumState->pExceptionClauseArray)[iCurrentPos]);
5327
5328 // copy to the input parmeter, this is a nice abstraction for the future
5329 // if we want to compress the Clause encoding, we can do without affecting the call sites
5330 pEHClauseOut->TryStartPC = pClause->TryStartPC;
5331 pEHClauseOut->TryEndPC = pClause->TryEndPC;
5332 pEHClauseOut->HandlerStartPC = pClause->HandlerStartPC;
5333 pEHClauseOut->HandlerEndPC = pClause->HandlerEndPC;
5334 pEHClauseOut->Flags = pClause->Flags;
5335 pEHClauseOut->FilterOffset = pClause->FilterOffset;
5336
5337 return dac_cast<PTR_EXCEPTION_CLAUSE_TOKEN>(pClause);
5338}
5339
5340#ifndef DACCESS_COMPILE
5341
5342TypeHandle NativeImageJitManager::ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
5343 CrawlFrame* pCf)
5344{
5345 CONTRACTL {
5346 THROWS;
5347 GC_TRIGGERS;
5348 } CONTRACTL_END;
5349
5350 _ASSERTE(NULL != pCf);
5351 _ASSERTE(NULL != pEHClause);
5352 _ASSERTE(IsTypedHandler(pEHClause));
5353
5354 MethodDesc *pMD = PTR_MethodDesc(pCf->GetFunction());
5355
5356 _ASSERTE(pMD != NULL);
5357
5358 Module* pModule = pMD->GetModule();
5359 PREFIX_ASSUME(pModule != NULL);
5360
5361 SigTypeContext typeContext(pMD);
5362 VarKind k = hasNoVars;
5363
5364 mdToken typeTok = pEHClause->ClassToken;
5365
5366 // In the vast majority of cases the code under the "if" below
5367 // will not be executed.
5368 //
5369 // First grab the representative instantiations. For code
5370 // shared by multiple generic instantiations these are the
5371 // canonical (representative) instantiation.
5372 if (TypeFromToken(typeTok) == mdtTypeSpec)
5373 {
5374 PCCOR_SIGNATURE pSig;
5375 ULONG cSig;
5376 IfFailThrow(pModule->GetMDImport()->GetTypeSpecFromToken(typeTok, &pSig, &cSig));
5377
5378 SigPointer psig(pSig, cSig);
5379 k = psig.IsPolyType(&typeContext);
5380
5381 // Grab the active class and method instantiation. This exact instantiation is only
5382 // needed in the corner case of "generic" exception catching in shared
5383 // generic code. We don't need the exact instantiation if the token
5384 // doesn't contain E_T_VAR or E_T_MVAR.
5385 if ((k & hasSharableVarsMask) != 0)
5386 {
5387 Instantiation classInst;
5388 Instantiation methodInst;
5389 pCf->GetExactGenericInstantiations(&classInst,&methodInst);
5390 SigTypeContext::InitTypeContext(pMD,classInst, methodInst,&typeContext);
5391 }
5392 }
5393
5394 return ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, typeTok, &typeContext,
5395 ClassLoader::ReturnNullIfNotFound);
5396}
5397
5398#endif // #ifndef DACCESS_COMPILE
5399
5400//-----------------------------------------------------------------------------
5401// Ngen info manager
5402//-----------------------------------------------------------------------------
5403BOOL NativeImageJitManager::GetBoundariesAndVars(
5404 const DebugInfoRequest & request,
5405 IN FP_IDS_NEW fpNew, IN void * pNewData,
5406 OUT ULONG32 * pcMap,
5407 OUT ICorDebugInfo::OffsetMapping **ppMap,
5408 OUT ULONG32 * pcVars,
5409 OUT ICorDebugInfo::NativeVarInfo **ppVars)
5410{
5411 CONTRACTL {
5412 THROWS; // on OOM.
5413 GC_NOTRIGGER; // getting vars shouldn't trigger
5414 SUPPORTS_DAC;
5415 } CONTRACTL_END;
5416
5417 // We want the module that the code is instantiated in, not necessarily the one
5418 // that it was declared in. This only matters for ngen-generics.
5419 MethodDesc * pMD = request.GetMD();
5420 Module * pModule = pMD->GetZapModule();
5421 PREFIX_ASSUME(pModule != NULL);
5422
5423 PTR_BYTE pDebugInfo = pModule->GetNativeDebugInfo(pMD);
5424
5425 // No header created, which means no jit information is available.
5426 if (pDebugInfo == NULL)
5427 return FALSE;
5428
5429 // Uncompress. This allocates memory and may throw.
5430 CompressDebugInfo::RestoreBoundariesAndVars(
5431 fpNew, pNewData, // allocators
5432 pDebugInfo, // input
5433 pcMap, ppMap,
5434 pcVars, ppVars); // output
5435
5436 return TRUE;
5437}
5438
5439#ifdef DACCESS_COMPILE
5440//
5441// Need to write out debug info
5442//
5443void NativeImageJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD)
5444{
5445 SUPPORTS_DAC;
5446
5447 Module * pModule = pMD->GetZapModule();
5448 PREFIX_ASSUME(pModule != NULL);
5449 PTR_BYTE pDebugInfo = pModule->GetNativeDebugInfo(pMD);
5450
5451 if (pDebugInfo != NULL)
5452 {
5453 CompressDebugInfo::EnumMemoryRegions(flags, pDebugInfo);
5454 }
5455}
5456#endif
5457
5458PCODE NativeImageJitManager::GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset)
5459{
5460 WRAPPER_NO_CONTRACT;
5461
5462 MethodRegionInfo methodRegionInfo;
5463 JitTokenToMethodRegionInfo(MethodToken, &methodRegionInfo);
5464
5465 if (relOffset < methodRegionInfo.hotSize)
5466 return methodRegionInfo.hotStartAddress + relOffset;
5467
5468 SIZE_T coldOffset = relOffset - methodRegionInfo.hotSize;
5469 _ASSERTE(coldOffset < methodRegionInfo.coldSize);
5470 return methodRegionInfo.coldStartAddress + coldOffset;
5471}
5472
5473BOOL NativeImageJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection,
5474 PCODE currentPC,
5475 MethodDesc** ppMethodDesc,
5476 EECodeInfo * pCodeInfo)
5477{
5478 CONTRACTL {
5479 SO_TOLERANT;
5480 NOTHROW;
5481 GC_NOTRIGGER;
5482 SUPPORTS_DAC;
5483 } CONTRACTL_END;
5484
5485 TADDR currentInstr = PCODEToPINSTR(currentPC);
5486
5487 Module * pModule = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule);
5488
5489 NGenLayoutInfo * pLayoutInfo = pModule->GetNGenLayoutInfo();
5490 DWORD iRange = 0;
5491
5492 if (pLayoutInfo->m_CodeSections[0].IsInRange(currentInstr))
5493 {
5494 iRange = 0;
5495 }
5496 else
5497 if (pLayoutInfo->m_CodeSections[1].IsInRange(currentInstr))
5498 {
5499 iRange = 1;
5500 }
5501 else
5502 if (pLayoutInfo->m_CodeSections[2].IsInRange(currentInstr))
5503 {
5504 iRange = 2;
5505 }
5506 else
5507 {
5508 return FALSE;
5509 }
5510
5511 TADDR ImageBase = pRangeSection->LowAddress;
5512
5513 DWORD RelativePc = (DWORD)(currentInstr - ImageBase);
5514
5515 PTR_RUNTIME_FUNCTION FunctionEntry;
5516
5517 if (iRange == 2)
5518 {
5519 int ColdMethodIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(RelativePc,
5520 pLayoutInfo->m_pRuntimeFunctions[2],
5521 0,
5522 pLayoutInfo->m_nRuntimeFunctions[2] - 1);
5523
5524 if (ColdMethodIndex < 0)
5525 return FALSE;
5526
5527#ifdef WIN64EXCEPTIONS
5528 // Save the raw entry
5529 int RawColdMethodIndex = ColdMethodIndex;
5530
5531 PTR_CORCOMPILE_COLD_METHOD_ENTRY pColdCodeMap = pLayoutInfo->m_ColdCodeMap;
5532
5533 while (pColdCodeMap[ColdMethodIndex].mainFunctionEntryRVA == 0)
5534 ColdMethodIndex--;
5535
5536 FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(ImageBase + pColdCodeMap[ColdMethodIndex].mainFunctionEntryRVA);
5537#else
5538 DWORD ColdUnwindData = pLayoutInfo->m_pRuntimeFunctions[2][ColdMethodIndex].UnwindData;
5539 _ASSERTE((ColdUnwindData & RUNTIME_FUNCTION_INDIRECT) != 0);
5540 FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(ImageBase + (ColdUnwindData & ~RUNTIME_FUNCTION_INDIRECT));
5541#endif
5542
5543 if (ppMethodDesc)
5544 {
5545 DWORD methodDescRVA;
5546
5547 COUNT_T iIndex = (COUNT_T)(FunctionEntry - pLayoutInfo->m_pRuntimeFunctions[0]);
5548 if (iIndex >= pLayoutInfo->m_nRuntimeFunctions[0])
5549 {
5550 iIndex = (COUNT_T)(FunctionEntry - pLayoutInfo->m_pRuntimeFunctions[1]);
5551 _ASSERTE(iIndex < pLayoutInfo->m_nRuntimeFunctions[1]);
5552 methodDescRVA = pLayoutInfo->m_MethodDescs[1][iIndex];
5553 }
5554 else
5555 {
5556 methodDescRVA = pLayoutInfo->m_MethodDescs[0][iIndex];
5557 }
5558 _ASSERTE(methodDescRVA != NULL);
5559
5560 // Note that the MethodDesc does not have to be restored. (It happens when we are called
5561 // from SetupGcCoverageForNativeMethod.)
5562 *ppMethodDesc = PTR_MethodDesc((methodDescRVA & ~HAS_EXCEPTION_INFO_MASK) + ImageBase);
5563 }
5564
5565 if (pCodeInfo)
5566 {
5567 PTR_RUNTIME_FUNCTION ColdFunctionTable = pLayoutInfo->m_pRuntimeFunctions[2];
5568
5569 PTR_RUNTIME_FUNCTION ColdFunctionEntry = ColdFunctionTable + ColdMethodIndex;
5570 DWORD coldCodeOffset = (DWORD)(RelativePc - RUNTIME_FUNCTION__BeginAddress(ColdFunctionEntry));
5571 pCodeInfo->m_relOffset = pLayoutInfo->m_ColdCodeMap[ColdMethodIndex].hotCodeSize + coldCodeOffset;
5572
5573 // We are using RUNTIME_FUNCTION as METHODTOKEN
5574 pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(FunctionEntry));
5575
5576#ifdef WIN64EXCEPTIONS
5577 PTR_RUNTIME_FUNCTION RawColdFunctionEntry = ColdFunctionTable + RawColdMethodIndex;
5578#ifdef _TARGET_AMD64_
5579 if ((RawColdFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) != 0)
5580 {
5581 RawColdFunctionEntry = PTR_RUNTIME_FUNCTION(ImageBase + (RawColdFunctionEntry->UnwindData & ~RUNTIME_FUNCTION_INDIRECT));
5582 }
5583#endif // _TARGET_AMD64_
5584 pCodeInfo->m_pFunctionEntry = RawColdFunctionEntry;
5585#endif
5586 }
5587 }
5588 else
5589 {
5590 PTR_DWORD pRuntimeFunctionLookupTable = dac_cast<PTR_DWORD>(pLayoutInfo->m_UnwindInfoLookupTable[iRange]);
5591
5592 _ASSERTE(pRuntimeFunctionLookupTable != NULL);
5593
5594 DWORD RelativeToCodeStart = (DWORD)(currentInstr - dac_cast<TADDR>(pLayoutInfo->m_CodeSections[iRange].StartAddress()));
5595 COUNT_T iStrideIndex = RelativeToCodeStart / RUNTIME_FUNCTION_LOOKUP_STRIDE;
5596
5597 // The lookup table may not be big enough to cover the entire code range if there was padding inserted during NGen image layout.
5598 // The last entry is lookup table entry covers the rest of the code range in this case.
5599 if (iStrideIndex >= pLayoutInfo->m_UnwindInfoLookupTableEntryCount[iRange])
5600 iStrideIndex = pLayoutInfo->m_UnwindInfoLookupTableEntryCount[iRange] - 1;
5601
5602 int Low = pRuntimeFunctionLookupTable[iStrideIndex];
5603 int High = pRuntimeFunctionLookupTable[iStrideIndex+1];
5604
5605 PTR_RUNTIME_FUNCTION FunctionTable = pLayoutInfo->m_pRuntimeFunctions[iRange];
5606 PTR_DWORD pMethodDescs = pLayoutInfo->m_MethodDescs[iRange];
5607
5608 int MethodIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(RelativePc,
5609 FunctionTable,
5610 Low,
5611 High);
5612
5613 if (MethodIndex < 0)
5614 return FALSE;
5615
5616#ifdef WIN64EXCEPTIONS
5617 // Save the raw entry
5618 PTR_RUNTIME_FUNCTION RawFunctionEntry = FunctionTable + MethodIndex;;
5619
5620 // Skip funclets to get the method desc
5621 while (pMethodDescs[MethodIndex] == 0)
5622 MethodIndex--;
5623#endif
5624
5625 FunctionEntry = FunctionTable + MethodIndex;
5626
5627 if (ppMethodDesc)
5628 {
5629 DWORD methodDescRVA = pMethodDescs[MethodIndex];
5630 _ASSERTE(methodDescRVA != NULL);
5631
5632 // Note that the MethodDesc does not have to be restored. (It happens when we are called
5633 // from SetupGcCoverageForNativeMethod.)
5634 *ppMethodDesc = PTR_MethodDesc((methodDescRVA & ~HAS_EXCEPTION_INFO_MASK) + ImageBase);
5635
5636 // We are likely executing the code already or going to execute it soon. However, there are a few cases like
5637 // code:MethodTable::GetMethodDescForSlot where it is not the case. Log the code access here to avoid these
5638 // cases from touching cold code maps.
5639 g_IBCLogger.LogMethodCodeAccess(*ppMethodDesc);
5640 }
5641
5642 // Get the function entry that corresponds to the real method desc.
5643 _ASSERTE((RelativePc >= RUNTIME_FUNCTION__BeginAddress(FunctionEntry)));
5644
5645 if (pCodeInfo)
5646 {
5647 pCodeInfo->m_relOffset = (DWORD)
5648 (RelativePc - RUNTIME_FUNCTION__BeginAddress(FunctionEntry));
5649
5650 // We are using RUNTIME_FUNCTION as METHODTOKEN
5651 pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(FunctionEntry));
5652
5653#ifdef WIN64EXCEPTIONS
5654 AMD64_ONLY(_ASSERTE((RawFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0));
5655 pCodeInfo->m_pFunctionEntry = RawFunctionEntry;
5656#endif
5657 }
5658 }
5659
5660 return TRUE;
5661}
5662
5663#if defined(WIN64EXCEPTIONS)
5664PTR_RUNTIME_FUNCTION NativeImageJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo)
5665{
5666 CONTRACTL {
5667 NOTHROW;
5668 GC_NOTRIGGER;
5669 } CONTRACTL_END;
5670
5671 if (!pCodeInfo->IsValid())
5672 {
5673 return NULL;
5674 }
5675
5676 // code:NativeImageJitManager::JitCodeToMethodInfo computes PTR_RUNTIME_FUNCTION eagerly. This path is only
5677 // reachable via EECodeInfo::GetMainFunctionInfo, and so we can just return the main entry.
5678 _ASSERTE(pCodeInfo->GetRelOffset() == 0);
5679
5680 return dac_cast<PTR_RUNTIME_FUNCTION>(pCodeInfo->GetMethodToken().m_pCodeHeader);
5681}
5682
5683TADDR NativeImageJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo)
5684{
5685 LIMITED_METHOD_DAC_CONTRACT;
5686
5687#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
5688 NGenLayoutInfo * pLayoutInfo = JitTokenToZapModule(pCodeInfo->GetMethodToken())->GetNGenLayoutInfo();
5689
5690 if (pLayoutInfo->m_CodeSections[2].IsInRange(pCodeInfo->GetCodeAddress()))
5691 {
5692 // If the address is in the cold section, then we assume it is cold main function
5693 // code, NOT a funclet. So, don't do the backward walk: just return the start address
5694 // of the main function.
5695 // @ARMTODO: Handle hot/cold splitting with EH funclets
5696 return pCodeInfo->GetStartAddress();
5697 }
5698#endif
5699
5700 return IJitManager::GetFuncletStartAddress(pCodeInfo);
5701}
5702
5703static void GetFuncletStartOffsetsHelper(PCODE pCodeStart, SIZE_T size, SIZE_T ofsAdj,
5704 PTR_RUNTIME_FUNCTION pFunctionEntry, TADDR moduleBase,
5705 DWORD * pnFunclets, DWORD* pStartFuncletOffsets, DWORD dwLength)
5706{
5707 _ASSERTE(FitsInU4((pCodeStart + size) - moduleBase));
5708 DWORD endAddress = (DWORD)((pCodeStart + size) - moduleBase);
5709
5710 // Entries are sorted and terminated by sentinel value (DWORD)-1
5711 for ( ; RUNTIME_FUNCTION__BeginAddress(pFunctionEntry) < endAddress; pFunctionEntry++)
5712 {
5713#ifdef _TARGET_AMD64_
5714 _ASSERTE((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0);
5715#endif
5716
5717#if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS)
5718 if (IsFunctionFragment(moduleBase, pFunctionEntry))
5719 {
5720 // This is a fragment (not the funclet beginning); skip it
5721 continue;
5722 }
5723#endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
5724
5725 if (*pnFunclets < dwLength)
5726 {
5727 TADDR funcletStartAddress = (moduleBase + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry)) + ofsAdj;
5728 _ASSERTE(FitsInU4(funcletStartAddress - pCodeStart));
5729 pStartFuncletOffsets[*pnFunclets] = (DWORD)(funcletStartAddress - pCodeStart);
5730 }
5731 (*pnFunclets)++;
5732 }
5733}
5734
5735DWORD NativeImageJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength)
5736{
5737 CONTRACTL
5738 {
5739 NOTHROW;
5740 GC_NOTRIGGER;
5741 }
5742 CONTRACTL_END;
5743
5744 PTR_RUNTIME_FUNCTION pFirstFuncletFunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader) + 1;
5745
5746 TADDR moduleBase = JitTokenToModuleBase(MethodToken);
5747 DWORD nFunclets = 0;
5748 MethodRegionInfo regionInfo;
5749 JitTokenToMethodRegionInfo(MethodToken, &regionInfo);
5750
5751 // pFirstFuncletFunctionEntry will work for ARM when passed to GetFuncletStartOffsetsHelper()
5752 // even if it is a fragment of the main body and not a RUNTIME_FUNCTION for the beginning
5753 // of the first hot funclet, because GetFuncletStartOffsetsHelper() will skip all the function
5754 // fragments until the first funclet, if any, is found.
5755
5756 GetFuncletStartOffsetsHelper(regionInfo.hotStartAddress, regionInfo.hotSize, 0,
5757 pFirstFuncletFunctionEntry, moduleBase,
5758 &nFunclets, pStartFuncletOffsets, dwLength);
5759
5760 // There are no funclets in cold section on ARM yet
5761 // @ARMTODO: support hot/cold splitting in functions with EH
5762#if !defined(_TARGET_ARM_) && !defined(_TARGET_ARM64_)
5763 if (regionInfo.coldSize != NULL)
5764 {
5765 NGenLayoutInfo * pLayoutInfo = JitTokenToZapModule(MethodToken)->GetNGenLayoutInfo();
5766
5767 int iColdMethodIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(
5768 (DWORD)(regionInfo.coldStartAddress - moduleBase),
5769 pLayoutInfo->m_pRuntimeFunctions[2],
5770 0,
5771 pLayoutInfo->m_nRuntimeFunctions[2] - 1);
5772
5773 PTR_RUNTIME_FUNCTION pFunctionEntry = pLayoutInfo->m_pRuntimeFunctions[2] + iColdMethodIndex;
5774
5775 _ASSERTE(regionInfo.coldStartAddress == moduleBase + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry));
5776
5777#ifdef _TARGET_AMD64_
5778 // Skip cold part of the method body
5779 if ((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) != 0)
5780 pFunctionEntry++;
5781#endif
5782
5783 GetFuncletStartOffsetsHelper(regionInfo.coldStartAddress, regionInfo.coldSize, regionInfo.hotSize,
5784 pFunctionEntry, moduleBase,
5785 &nFunclets, pStartFuncletOffsets, dwLength);
5786 }
5787#endif // !_TARGET_ARM_ && !_TARGET_ARM64
5788
5789 return nFunclets;
5790}
5791
5792BOOL NativeImageJitManager::IsFilterFunclet(EECodeInfo * pCodeInfo)
5793{
5794 CONTRACTL {
5795 NOTHROW;
5796 GC_NOTRIGGER;
5797 MODE_ANY;
5798 }
5799 CONTRACTL_END;
5800
5801 if (!pCodeInfo->IsFunclet())
5802 return FALSE;
5803
5804 //
5805 // The generic IsFilterFunclet implementation is touching exception handling tables.
5806 // It is bad for working set because of it is sometimes called during GC stackwalks.
5807 // The optimized version for native images does not touch exception handling tables.
5808 //
5809
5810 NGenLayoutInfo * pLayoutInfo = JitTokenToZapModule(pCodeInfo->GetMethodToken())->GetNGenLayoutInfo();
5811
5812 SIZE_T size;
5813 PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pCodeInfo->GetFunctionEntry(), &size);
5814 _ASSERTE(pUnwindData != NULL);
5815
5816 // Personality routine is always the last element of the unwind data
5817 DWORD rvaPersonalityRoutine = *(dac_cast<PTR_DWORD>(dac_cast<TADDR>(pUnwindData) + size) - 1);
5818
5819 BOOL fRet = (pLayoutInfo->m_rvaFilterPersonalityRoutine == rvaPersonalityRoutine);
5820
5821 // Verify that the optimized implementation is in sync with the slow implementation
5822 _ASSERTE(fRet == IJitManager::IsFilterFunclet(pCodeInfo));
5823
5824 return fRet;
5825}
5826
5827#endif // WIN64EXCEPTIONS
5828
5829StubCodeBlockKind NativeImageJitManager::GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC)
5830{
5831 CONTRACTL
5832 {
5833 NOTHROW;
5834 GC_NOTRIGGER;
5835 SO_TOLERANT;
5836 MODE_ANY;
5837 }
5838 CONTRACTL_END;
5839
5840 Module * pZapModule = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule);
5841
5842 if (pZapModule->IsZappedPrecode(currentPC))
5843 {
5844 return STUB_CODE_BLOCK_PRECODE;
5845 }
5846
5847 NGenLayoutInfo * pLayoutInfo = pZapModule->GetNGenLayoutInfo();
5848 _ASSERTE(pLayoutInfo != NULL);
5849
5850 if (pLayoutInfo->m_JumpStubs.IsInRange(currentPC))
5851 {
5852 return STUB_CODE_BLOCK_JUMPSTUB;
5853 }
5854
5855 if (pLayoutInfo->m_StubLinkStubs.IsInRange(currentPC))
5856 {
5857 return STUB_CODE_BLOCK_STUBLINK;
5858 }
5859
5860 if (pLayoutInfo->m_VirtualMethodThunks.IsInRange(currentPC))
5861 {
5862 return STUB_CODE_BLOCK_VIRTUAL_METHOD_THUNK;
5863 }
5864
5865 if (pLayoutInfo->m_ExternalMethodThunks.IsInRange(currentPC))
5866 {
5867 return STUB_CODE_BLOCK_EXTERNAL_METHOD_THUNK;
5868 }
5869
5870 return STUB_CODE_BLOCK_UNKNOWN;
5871}
5872
5873PTR_Module NativeImageJitManager::JitTokenToZapModule(const METHODTOKEN& MethodToken)
5874{
5875 LIMITED_METHOD_DAC_CONTRACT;
5876 return dac_cast<PTR_Module>(MethodToken.m_pRangeSection->pHeapListOrZapModule);
5877}
5878void NativeImageJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken,
5879 MethodRegionInfo * methodRegionInfo)
5880{
5881 CONTRACTL {
5882 NOTHROW;
5883 GC_NOTRIGGER;
5884 SUPPORTS_DAC;
5885 } CONTRACTL_END;
5886
5887 _ASSERTE(methodRegionInfo != NULL);
5888
5889 //
5890 // Initialize methodRegionInfo assuming that the method is entirely hot. This is the common
5891 // case (either binary is not procedure split or the current method is all hot). We can
5892 // adjust these values later if necessary.
5893 //
5894
5895 methodRegionInfo->hotStartAddress = JitTokenToStartAddress(MethodToken);
5896 methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfoToken(MethodToken));
5897 methodRegionInfo->coldStartAddress = 0;
5898 methodRegionInfo->coldSize = 0;
5899
5900 RangeSection *rangeSection = MethodToken.m_pRangeSection;
5901 PREFIX_ASSUME(rangeSection != NULL);
5902
5903 Module * pModule = dac_cast<PTR_Module>(rangeSection->pHeapListOrZapModule);
5904
5905 NGenLayoutInfo * pLayoutInfo = pModule->GetNGenLayoutInfo();
5906
5907 //
5908 // If this module is not procedure split, then we're done.
5909 //
5910 if (pLayoutInfo->m_CodeSections[2].Size() == 0)
5911 return;
5912
5913 //
5914 // Perform a binary search in the cold range section until we find our method
5915 //
5916
5917 TADDR ImageBase = rangeSection->LowAddress;
5918
5919 int Low = 0;
5920 int High = pLayoutInfo->m_nRuntimeFunctions[2] - 1;
5921
5922 PTR_RUNTIME_FUNCTION pRuntimeFunctionTable = pLayoutInfo->m_pRuntimeFunctions[2];
5923 PTR_CORCOMPILE_COLD_METHOD_ENTRY pColdCodeMap = pLayoutInfo->m_ColdCodeMap;
5924
5925 while (Low <= High)
5926 {
5927 int Middle = Low + (High - Low) / 2;
5928
5929 int ColdMethodIndex = Middle;
5930
5931 PTR_RUNTIME_FUNCTION FunctionEntry;
5932
5933#ifdef WIN64EXCEPTIONS
5934 while (pColdCodeMap[ColdMethodIndex].mainFunctionEntryRVA == 0)
5935 ColdMethodIndex--;
5936
5937 FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(ImageBase + pColdCodeMap[ColdMethodIndex].mainFunctionEntryRVA);
5938#else
5939 DWORD ColdUnwindData = pRuntimeFunctionTable[ColdMethodIndex].UnwindData;
5940 _ASSERTE((ColdUnwindData & RUNTIME_FUNCTION_INDIRECT) != 0);
5941 FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(ImageBase + (ColdUnwindData & ~RUNTIME_FUNCTION_INDIRECT));
5942#endif
5943
5944 if (FunctionEntry == dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader))
5945 {
5946 PTR_RUNTIME_FUNCTION ColdFunctionEntry = pRuntimeFunctionTable + ColdMethodIndex;
5947
5948 methodRegionInfo->coldStartAddress = ImageBase + RUNTIME_FUNCTION__BeginAddress(ColdFunctionEntry);
5949
5950 //
5951 // At this point methodRegionInfo->hotSize is set to the total size of
5952 // the method obtained from the GC info (we set that in the init code above).
5953 // Use that and coldHeader->hotCodeSize to compute the hot and cold code sizes.
5954 //
5955
5956 ULONG hotCodeSize = pColdCodeMap[ColdMethodIndex].hotCodeSize;
5957
5958 methodRegionInfo->coldSize = methodRegionInfo->hotSize - hotCodeSize;
5959 methodRegionInfo->hotSize = hotCodeSize;
5960
5961 return;
5962 }
5963 else if (FunctionEntry < dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader))
5964 {
5965 Low = Middle + 1;
5966 }
5967 else
5968 {
5969 // Use ColdMethodIndex to take advantage of entries skipped while looking for method start
5970 High = ColdMethodIndex - 1;
5971 }
5972 }
5973
5974 //
5975 // We didn't find it. Therefore this method doesn't have a cold section.
5976 //
5977
5978 return;
5979}
5980
5981#ifdef DACCESS_COMPILE
5982
5983void NativeImageJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
5984{
5985 IJitManager::EnumMemoryRegions(flags);
5986}
5987
5988#if defined(WIN64EXCEPTIONS)
5989
5990//
5991// To locate an entry in the function entry table (the program exceptions data directory), the debugger
5992// performs a binary search over the table. This function reports the entries that are encountered in the
5993// binary search.
5994//
5995// Parameters:
5996// pRtf: The target function table entry to be located
5997// pNativeLayout: A pointer to the loaded native layout for the module containing pRtf
5998//
5999static void EnumRuntimeFunctionEntriesToFindEntry(PTR_RUNTIME_FUNCTION pRtf, PTR_PEImageLayout pNativeLayout)
6000{
6001 pRtf.EnumMem();
6002
6003 if (pNativeLayout == NULL)
6004 {
6005 return;
6006 }
6007
6008 IMAGE_DATA_DIRECTORY * pProgramExceptionsDirectory = pNativeLayout->GetDirectoryEntry(IMAGE_DIRECTORY_ENTRY_EXCEPTION);
6009 if (!pProgramExceptionsDirectory ||
6010 (pProgramExceptionsDirectory->Size == 0) ||
6011 (pProgramExceptionsDirectory->Size % sizeof(T_RUNTIME_FUNCTION) != 0))
6012 {
6013 // Program exceptions directory malformatted
6014 return;
6015 }
6016
6017 PTR_BYTE moduleBase(pNativeLayout->GetBase());
6018 PTR_RUNTIME_FUNCTION firstFunctionEntry(moduleBase + pProgramExceptionsDirectory->VirtualAddress);
6019
6020 if (pRtf < firstFunctionEntry ||
6021 ((dac_cast<TADDR>(pRtf) - dac_cast<TADDR>(firstFunctionEntry)) % sizeof(T_RUNTIME_FUNCTION) != 0))
6022 {
6023 // Program exceptions directory malformatted
6024 return;
6025 }
6026
6027// Review conversion of size_t to ULONG.
6028#if defined(_MSC_VER)
6029#pragma warning(push)
6030#pragma warning(disable:4267)
6031#endif // defined(_MSC_VER)
6032
6033 ULONG indexToLocate = pRtf - firstFunctionEntry;
6034
6035#if defined(_MSC_VER)
6036#pragma warning(pop)
6037#endif // defined(_MSC_VER)
6038
6039 ULONG low = 0; // index in the function entry table of low end of search range
6040 ULONG high = (pProgramExceptionsDirectory->Size)/sizeof(T_RUNTIME_FUNCTION) - 1; // index of high end of search range
6041 ULONG mid = (low + high) /2; // index of entry to be compared
6042
6043 if (indexToLocate > high)
6044 {
6045 return;
6046 }
6047
6048 while (indexToLocate != mid)
6049 {
6050 PTR_RUNTIME_FUNCTION functionEntry = firstFunctionEntry + mid;
6051 functionEntry.EnumMem();
6052 if (indexToLocate > mid)
6053 {
6054 low = mid + 1;
6055 }
6056 else
6057 {
6058 high = mid - 1;
6059 }
6060 mid = (low + high) /2;
6061 _ASSERTE( low <= mid && mid <= high );
6062 }
6063}
6064
6065//
6066// EnumMemoryRegionsForMethodUnwindInfo - enumerate the memory necessary to read the unwind info for the
6067// specified method.
6068//
6069// Note that in theory, a dump generation library could save the unwind information itself without help
6070// from us, since it's stored in the image in the standard function table layout for Win64. However,
6071// dump-generation libraries assume that the image will be available at debug time, and if the image
6072// isn't available then it is acceptable for stackwalking to break. For ngen images (which are created
6073// on the client), it usually isn't possible to have the image available at debug time, and so for minidumps
6074// we must explicitly ensure the unwind information is saved into the dump.
6075//
6076// Arguments:
6077// flags - EnumMem flags
6078// pMD - MethodDesc for the method in question
6079//
6080void NativeImageJitManager::EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo)
6081{
6082 // Get the RUNTIME_FUNCTION entry for this method
6083 PTR_RUNTIME_FUNCTION pRtf = pCodeInfo->GetFunctionEntry();
6084
6085 if (pRtf==NULL)
6086 {
6087 return;
6088 }
6089
6090 // Enumerate the function entry and other entries needed to locate it in the program exceptions directory
6091 Module * pModule = JitTokenToZapModule(pCodeInfo->GetMethodToken());
6092 EnumRuntimeFunctionEntriesToFindEntry(pRtf, pModule->GetFile()->GetLoadedNative());
6093
6094 SIZE_T size;
6095 PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pRtf, &size);
6096 if (pUnwindData != NULL)
6097 DacEnumMemoryRegion(PTR_TO_TADDR(pUnwindData), size);
6098}
6099
6100#endif //WIN64EXCEPTIONS
6101#endif // #ifdef DACCESS_COMPILE
6102
6103// Return start of exception info for a method, or 0 if the method has no EH info
6104DWORD NativeExceptionInfoLookupTable::LookupExceptionInfoRVAForMethod(PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pExceptionLookupTable,
6105 COUNT_T numLookupEntries,
6106 DWORD methodStartRVA,
6107 COUNT_T* pSize)
6108{
6109 CONTRACTL {
6110 NOTHROW;
6111 GC_NOTRIGGER;
6112 SUPPORTS_DAC;
6113 } CONTRACTL_END;
6114
6115 _ASSERTE(pExceptionLookupTable != NULL);
6116
6117 COUNT_T start = 0;
6118 COUNT_T end = numLookupEntries - 2;
6119
6120 // The last entry in the lookup table (end-1) points to a sentinal entry.
6121 // The sentinal entry helps to determine the number of EH clauses for the last table entry.
6122 _ASSERTE(pExceptionLookupTable->ExceptionLookupEntry(numLookupEntries-1)->MethodStartRVA == (DWORD)-1);
6123
6124 // Binary search the lookup table
6125 // Using linear search is faster once we get down to small number of entries.
6126 while (end - start > 10)
6127 {
6128 COUNT_T middle = start + (end - start) / 2;
6129
6130 _ASSERTE(start < middle && middle < end);
6131
6132 DWORD rva = pExceptionLookupTable->ExceptionLookupEntry(middle)->MethodStartRVA;
6133
6134 if (methodStartRVA < rva)
6135 {
6136 end = middle - 1;
6137 }
6138 else
6139 {
6140 start = middle;
6141 }
6142 }
6143
6144 for (COUNT_T i = start; i <= end; ++i)
6145 {
6146 DWORD rva = pExceptionLookupTable->ExceptionLookupEntry(i)->MethodStartRVA;
6147 if (methodStartRVA == rva)
6148 {
6149 CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY *pEntry = pExceptionLookupTable->ExceptionLookupEntry(i);
6150
6151 //Get the count of EH Clause entries
6152 CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY * pNextEntry = pExceptionLookupTable->ExceptionLookupEntry(i + 1);
6153 *pSize = pNextEntry->ExceptionInfoRVA - pEntry->ExceptionInfoRVA;
6154
6155 return pEntry->ExceptionInfoRVA;
6156 }
6157 }
6158
6159 // Not found
6160 return 0;
6161}
6162
6163int NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(DWORD RelativePc,
6164 PTR_RUNTIME_FUNCTION pRuntimeFunctionTable,
6165 int Low,
6166 int High)
6167{
6168 CONTRACTL {
6169 SO_TOLERANT;
6170 NOTHROW;
6171 GC_NOTRIGGER;
6172 SUPPORTS_DAC;
6173 } CONTRACTL_END;
6174
6175
6176#ifdef _TARGET_ARM_
6177 RelativePc |= THUMB_CODE;
6178#endif
6179
6180 // Entries are sorted and terminated by sentinel value (DWORD)-1
6181
6182 // Binary search the RUNTIME_FUNCTION table
6183 // Use linear search once we get down to a small number of elements
6184 // to avoid Binary search overhead.
6185 while (High - Low > 10)
6186 {
6187 int Middle = Low + (High - Low) / 2;
6188
6189 PTR_RUNTIME_FUNCTION pFunctionEntry = pRuntimeFunctionTable + Middle;
6190 if (RelativePc < pFunctionEntry->BeginAddress)
6191 {
6192 High = Middle - 1;
6193 }
6194 else
6195 {
6196 Low = Middle;
6197 }
6198 }
6199
6200 for (int i = Low; i <= High; ++i)
6201 {
6202 // This is safe because of entries are terminated by sentinel value (DWORD)-1
6203 PTR_RUNTIME_FUNCTION pNextFunctionEntry = pRuntimeFunctionTable + (i + 1);
6204
6205 if (RelativePc < pNextFunctionEntry->BeginAddress)
6206 {
6207 PTR_RUNTIME_FUNCTION pFunctionEntry = pRuntimeFunctionTable + i;
6208 if (RelativePc >= pFunctionEntry->BeginAddress)
6209 {
6210 return i;
6211 }
6212 break;
6213 }
6214 }
6215
6216 return -1;
6217}
6218
6219BOOL NativeUnwindInfoLookupTable::HasExceptionInfo(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction)
6220{
6221 LIMITED_METHOD_DAC_CONTRACT;
6222 DWORD methodDescRVA = NativeUnwindInfoLookupTable::GetMethodDescRVA(pNgenLayout, pMainRuntimeFunction);
6223 return (methodDescRVA & HAS_EXCEPTION_INFO_MASK);
6224}
6225
6226PTR_MethodDesc NativeUnwindInfoLookupTable::GetMethodDesc(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction, TADDR moduleBase)
6227{
6228 LIMITED_METHOD_DAC_CONTRACT;
6229 DWORD methodDescRVA = NativeUnwindInfoLookupTable::GetMethodDescRVA(pNgenLayout, pMainRuntimeFunction);
6230 return PTR_MethodDesc((methodDescRVA & ~HAS_EXCEPTION_INFO_MASK) + moduleBase);
6231}
6232
6233DWORD NativeUnwindInfoLookupTable::GetMethodDescRVA(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction)
6234{
6235 LIMITED_METHOD_DAC_CONTRACT;
6236
6237 COUNT_T iIndex = (COUNT_T)(pMainRuntimeFunction - pNgenLayout->m_pRuntimeFunctions[0]);
6238 DWORD rva = 0;
6239 if (iIndex >= pNgenLayout->m_nRuntimeFunctions[0])
6240 {
6241 iIndex = (COUNT_T)(pMainRuntimeFunction - pNgenLayout->m_pRuntimeFunctions[1]);
6242 _ASSERTE(iIndex < pNgenLayout->m_nRuntimeFunctions[1]);
6243 rva = pNgenLayout->m_MethodDescs[1][iIndex];
6244 }
6245 else
6246 {
6247 rva = pNgenLayout->m_MethodDescs[0][iIndex];
6248 }
6249 _ASSERTE(rva != 0);
6250
6251 return rva;
6252}
6253
6254#endif // FEATURE_PREJIT
6255
6256#ifndef DACCESS_COMPILE
6257
6258//-----------------------------------------------------------------------------
6259
6260
6261// Nirvana Support
6262
6263MethodDesc* __stdcall Nirvana_FindMethodDesc(PCODE ptr, BYTE*& hotStartAddress, size_t& hotSize, BYTE*& coldStartAddress, size_t & coldSize)
6264{
6265 EECodeInfo codeInfo(ptr);
6266 if (!codeInfo.IsValid())
6267 return NULL;
6268
6269 IJitManager::MethodRegionInfo methodRegionInfo;
6270 codeInfo.GetMethodRegionInfo(&methodRegionInfo);
6271
6272 hotStartAddress = (BYTE*)methodRegionInfo.hotStartAddress;
6273 hotSize = methodRegionInfo.hotSize;
6274 coldStartAddress = (BYTE*)methodRegionInfo.coldStartAddress;
6275 coldSize = methodRegionInfo.coldSize;
6276
6277 return codeInfo.GetMethodDesc();
6278}
6279
6280
6281bool Nirvana_GetMethodInfo(MethodDesc * pMD, BYTE*& hotStartAddress, size_t& hotSize, BYTE*& coldStartAddress, size_t & coldSize)
6282{
6283 EECodeInfo codeInfo(pMD->GetNativeCode());
6284 if (!codeInfo.IsValid())
6285 return false;
6286
6287 IJitManager::MethodRegionInfo methodRegionInfo;
6288 codeInfo.GetMethodRegionInfo(&methodRegionInfo);
6289
6290 hotStartAddress = (BYTE*)methodRegionInfo.hotStartAddress;
6291 hotSize = methodRegionInfo.hotSize;
6292 coldStartAddress = (BYTE*)methodRegionInfo.coldStartAddress;
6293 coldSize = methodRegionInfo.coldSize;
6294
6295 return true;
6296}
6297
6298
6299#include "sigformat.h"
6300
6301__forceinline bool Nirvana_PrintMethodDescWorker(__in_ecount(iBuffer) char * szBuffer, size_t iBuffer, MethodDesc * pMD, const char * pSigString)
6302{
6303 if (iBuffer == 0)
6304 return false;
6305
6306 szBuffer[0] = '\0';
6307 pSigString = strchr(pSigString, ' ');
6308
6309 if (pSigString == NULL)
6310 return false;
6311
6312 ++pSigString;
6313
6314 LPCUTF8 pNamespace;
6315 LPCUTF8 pClassName = pMD->GetMethodTable()->GetFullyQualifiedNameInfo(&pNamespace);
6316
6317 if (pClassName == NULL)
6318 return false;
6319
6320 if (*pNamespace != 0)
6321 {
6322 if (_snprintf_s(szBuffer, iBuffer, _TRUNCATE, "%s.%s.%s", pNamespace, pClassName, pSigString) == -1)
6323 return false;
6324 }
6325 else
6326 {
6327 if (_snprintf_s(szBuffer, iBuffer, _TRUNCATE, "%s.%s", pClassName, pSigString) == -1)
6328 return false;
6329 }
6330
6331 _ASSERTE(szBuffer[0] != '\0');
6332
6333 return true;
6334}
6335
6336bool __stdcall Nirvana_PrintMethodDesc(__in_ecount(iBuffer) char * szBuffer, size_t iBuffer, MethodDesc * pMD)
6337{
6338 bool fResult = false;
6339
6340 EX_TRY
6341 {
6342 NewHolder<SigFormat> pSig = new SigFormat(pMD, NULL, false);
6343 fResult = Nirvana_PrintMethodDescWorker(szBuffer, iBuffer, pMD, pSig->GetCString());
6344 }
6345 EX_CATCH
6346 {
6347 fResult = false;
6348 }
6349 EX_END_CATCH(SwallowAllExceptions)
6350
6351 return fResult;
6352};
6353
6354
6355// Nirvana_Dummy() is a dummy function that is exported privately by ordinal only.
6356// The sole purpose of this function is to reference Nirvana_FindMethodDesc(),
6357// Nirvana_GetMethodInfo(), and Nirvana_PrintMethodDesc() so that they are not
6358// inlined or removed by the compiler or the linker.
6359
6360DWORD __stdcall Nirvana_Dummy()
6361{
6362 LIMITED_METHOD_CONTRACT;
6363 void * funcs[] = {
6364 (void*)Nirvana_FindMethodDesc,
6365 (void*)Nirvana_GetMethodInfo,
6366 (void*)Nirvana_PrintMethodDesc
6367 };
6368
6369 size_t n = sizeof(funcs) / sizeof(funcs[0]);
6370
6371 size_t sum = 0;
6372 for (size_t i = 0; i < n; ++i)
6373 sum += (size_t)funcs[i];
6374
6375 return (DWORD)sum;
6376}
6377
6378
6379#endif // #ifndef DACCESS_COMPILE
6380
6381
6382#ifdef FEATURE_PREJIT
6383
6384MethodIterator::MethodIterator(PTR_Module pModule, MethodIteratorOptions mio)
6385{
6386 CONTRACTL
6387 {
6388 THROWS;
6389 GC_NOTRIGGER;
6390 } CONTRACTL_END;
6391
6392 Init(pModule, pModule->GetNativeImage(), mio);
6393}
6394
6395MethodIterator::MethodIterator(PTR_Module pModule, PEDecoder * pPEDecoder, MethodIteratorOptions mio)
6396{
6397 CONTRACTL
6398 {
6399 THROWS;
6400 GC_NOTRIGGER;
6401 } CONTRACTL_END;
6402
6403 Init(pModule, pPEDecoder, mio);
6404}
6405
6406void MethodIterator::Init(PTR_Module pModule, PEDecoder * pPEDecoder, MethodIteratorOptions mio)
6407{
6408 CONTRACTL
6409 {
6410 THROWS;
6411 GC_NOTRIGGER;
6412 } CONTRACTL_END;
6413
6414 m_ModuleBase = dac_cast<TADDR>(pPEDecoder->GetBase());
6415
6416 methodIteratorOptions = mio;
6417
6418 m_pNgenLayout = pModule->GetNGenLayoutInfo();
6419
6420 m_fHotMethodsDone = FALSE;
6421 m_CurrentRuntimeFunctionIndex = -1;
6422 m_CurrentColdRuntimeFunctionIndex = 0;
6423}
6424
6425BOOL MethodIterator::Next()
6426{
6427 CONTRACTL {
6428 NOTHROW;
6429 GC_NOTRIGGER;
6430 } CONTRACTL_END;
6431
6432 m_CurrentRuntimeFunctionIndex ++;
6433
6434 if (!m_fHotMethodsDone)
6435 {
6436 //iterate the hot methods
6437 if (methodIteratorOptions & Hot)
6438 {
6439#ifdef WIN64EXCEPTIONS
6440 //Skip to the next method.
6441 // skip over method fragments and funclets.
6442 while (m_CurrentRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[0])
6443 {
6444 if (m_pNgenLayout->m_MethodDescs[0][m_CurrentRuntimeFunctionIndex] != 0)
6445 return TRUE;
6446 m_CurrentRuntimeFunctionIndex++;
6447 }
6448#else
6449 if (m_CurrentRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[0])
6450 return TRUE;
6451#endif
6452 }
6453 m_CurrentRuntimeFunctionIndex = 0;
6454 m_fHotMethodsDone = TRUE;
6455 }
6456
6457 if (methodIteratorOptions & Unprofiled)
6458 {
6459#ifdef WIN64EXCEPTIONS
6460 //Skip to the next method.
6461 // skip over method fragments and funclets.
6462 while (m_CurrentRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[1])
6463 {
6464 if (m_pNgenLayout->m_MethodDescs[1][m_CurrentRuntimeFunctionIndex] != 0)
6465 return TRUE;
6466 m_CurrentRuntimeFunctionIndex++;
6467 }
6468#else
6469 if (m_CurrentRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[1])
6470 return TRUE;
6471#endif
6472 }
6473
6474 return FALSE;
6475}
6476
6477PTR_MethodDesc MethodIterator::GetMethodDesc()
6478{
6479 CONTRACTL
6480 {
6481 NOTHROW;
6482 GC_NOTRIGGER;
6483 }
6484 CONTRACTL_END;
6485
6486 return NativeUnwindInfoLookupTable::GetMethodDesc(m_pNgenLayout, GetRuntimeFunction(), m_ModuleBase);
6487}
6488
6489GCInfoToken MethodIterator::GetGCInfoToken()
6490{
6491 LIMITED_METHOD_CONTRACT;
6492
6493 // get the gc info from the RT function
6494 SIZE_T size;
6495 PTR_VOID pUnwindData = GetUnwindDataBlob(m_ModuleBase, GetRuntimeFunction(), &size);
6496 PTR_VOID gcInfo = (PTR_VOID)((PTR_BYTE)pUnwindData + size);
6497 // MethodIterator is used to iterate over methods of an NgenImage.
6498 // So, GcInfo version is always current
6499 return{ gcInfo, GCINFO_VERSION };
6500}
6501
6502TADDR MethodIterator::GetMethodStartAddress()
6503{
6504 LIMITED_METHOD_CONTRACT;
6505
6506 return m_ModuleBase + RUNTIME_FUNCTION__BeginAddress(GetRuntimeFunction());
6507}
6508
6509TADDR MethodIterator::GetMethodColdStartAddress()
6510{
6511 LIMITED_METHOD_CONTRACT;
6512
6513 PTR_RUNTIME_FUNCTION CurrentFunctionEntry = GetRuntimeFunction();
6514
6515 //
6516 // Catch up with hot code
6517 //
6518 for ( ; m_CurrentColdRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[2]; m_CurrentColdRuntimeFunctionIndex++)
6519 {
6520 PTR_RUNTIME_FUNCTION ColdFunctionEntry = m_pNgenLayout->m_pRuntimeFunctions[2] + m_CurrentColdRuntimeFunctionIndex;
6521
6522 PTR_RUNTIME_FUNCTION FunctionEntry;
6523
6524#ifdef WIN64EXCEPTIONS
6525 DWORD MainFunctionEntryRVA = m_pNgenLayout->m_ColdCodeMap[m_CurrentColdRuntimeFunctionIndex].mainFunctionEntryRVA;
6526
6527 if (MainFunctionEntryRVA == 0)
6528 continue;
6529
6530 FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(m_ModuleBase + MainFunctionEntryRVA);
6531#else
6532 DWORD ColdUnwindData = ColdFunctionEntry->UnwindData;
6533 _ASSERTE((ColdUnwindData & RUNTIME_FUNCTION_INDIRECT) != 0);
6534 FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(m_ModuleBase + (ColdUnwindData & ~RUNTIME_FUNCTION_INDIRECT));
6535#endif
6536
6537 if (CurrentFunctionEntry == FunctionEntry)
6538 {
6539 // we found a match
6540 return m_ModuleBase + RUNTIME_FUNCTION__BeginAddress(ColdFunctionEntry);
6541 }
6542 else
6543 if (CurrentFunctionEntry < FunctionEntry)
6544 {
6545 // method does not have cold code
6546 return NULL;
6547 }
6548 }
6549
6550 return NULL;
6551}
6552
6553PTR_RUNTIME_FUNCTION MethodIterator::GetRuntimeFunction()
6554{
6555 LIMITED_METHOD_DAC_CONTRACT;
6556 _ASSERTE(m_CurrentRuntimeFunctionIndex >= 0);
6557 _ASSERTE(m_CurrentRuntimeFunctionIndex < (m_fHotMethodsDone ? m_pNgenLayout->m_nRuntimeFunctions[1] : m_pNgenLayout->m_nRuntimeFunctions[0]));
6558 return (m_fHotMethodsDone ? m_pNgenLayout->m_pRuntimeFunctions[1] : m_pNgenLayout->m_pRuntimeFunctions[0]) + m_CurrentRuntimeFunctionIndex;
6559}
6560
6561ULONG MethodIterator::GetHotCodeSize()
6562{
6563 LIMITED_METHOD_CONTRACT;
6564 _ASSERTE(GetMethodColdStartAddress() != NULL);
6565 return m_pNgenLayout->m_ColdCodeMap[m_CurrentColdRuntimeFunctionIndex].hotCodeSize;
6566}
6567
6568void MethodIterator::GetMethodRegionInfo(IJitManager::MethodRegionInfo *methodRegionInfo)
6569{
6570 CONTRACTL {
6571 NOTHROW;
6572 GC_NOTRIGGER;
6573 } CONTRACTL_END;
6574
6575 methodRegionInfo->hotStartAddress = GetMethodStartAddress();
6576 methodRegionInfo->coldStartAddress = GetMethodColdStartAddress();
6577 GCInfoToken gcInfoToken = GetGCInfoToken();
6578 methodRegionInfo->hotSize = ExecutionManager::GetNativeImageJitManager()->GetCodeManager()->GetFunctionSize(gcInfoToken);
6579 methodRegionInfo->coldSize = 0;
6580
6581 if (methodRegionInfo->coldStartAddress != NULL)
6582 {
6583 //
6584 // At this point methodRegionInfo->hotSize is set to the total size of
6585 // the method obtained from the GC info (we set that in the init code above).
6586 // Use that and pCMH->hotCodeSize to compute the hot and cold code sizes.
6587 //
6588
6589 ULONG hotCodeSize = GetHotCodeSize();
6590
6591 methodRegionInfo->coldSize = methodRegionInfo->hotSize - hotCodeSize;
6592 methodRegionInfo->hotSize = hotCodeSize;
6593 }
6594}
6595
6596#endif // FEATURE_PREJIT
6597
6598
6599
6600#ifdef FEATURE_READYTORUN
6601
6602//***************************************************************************************
6603//***************************************************************************************
6604
6605#ifndef DACCESS_COMPILE
6606
6607ReadyToRunJitManager::ReadyToRunJitManager()
6608{
6609 WRAPPER_NO_CONTRACT;
6610}
6611
6612#endif // #ifndef DACCESS_COMPILE
6613
6614ReadyToRunInfo * ReadyToRunJitManager::JitTokenToReadyToRunInfo(const METHODTOKEN& MethodToken)
6615{
6616 CONTRACTL {
6617 NOTHROW;
6618 GC_NOTRIGGER;
6619 HOST_NOCALLS;
6620 SUPPORTS_DAC;
6621 } CONTRACTL_END;
6622
6623 return dac_cast<PTR_Module>(MethodToken.m_pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo();
6624}
6625
6626UINT32 ReadyToRunJitManager::JitTokenToGCInfoVersion(const METHODTOKEN& MethodToken)
6627{
6628 CONTRACTL{
6629 NOTHROW;
6630 GC_NOTRIGGER;
6631 HOST_NOCALLS;
6632 SUPPORTS_DAC;
6633 } CONTRACTL_END;
6634
6635 READYTORUN_HEADER * header = JitTokenToReadyToRunInfo(MethodToken)->GetImage()->GetReadyToRunHeader();
6636
6637 return GCInfoToken::ReadyToRunVersionToGcInfoVersion(header->MajorVersion);
6638}
6639
6640PTR_RUNTIME_FUNCTION ReadyToRunJitManager::JitTokenToRuntimeFunction(const METHODTOKEN& MethodToken)
6641{
6642 CONTRACTL {
6643 NOTHROW;
6644 GC_NOTRIGGER;
6645 HOST_NOCALLS;
6646 SUPPORTS_DAC;
6647 } CONTRACTL_END;
6648
6649 return dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader);
6650}
6651
6652TADDR ReadyToRunJitManager::JitTokenToStartAddress(const METHODTOKEN& MethodToken)
6653{
6654 CONTRACTL {
6655 NOTHROW;
6656 GC_NOTRIGGER;
6657 HOST_NOCALLS;
6658 SUPPORTS_DAC;
6659 } CONTRACTL_END;
6660
6661 return JitTokenToModuleBase(MethodToken) +
6662 RUNTIME_FUNCTION__BeginAddress(dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader));
6663}
6664
6665GCInfoToken ReadyToRunJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken)
6666{
6667 CONTRACTL {
6668 NOTHROW;
6669 GC_NOTRIGGER;
6670 HOST_NOCALLS;
6671 SUPPORTS_DAC;
6672 } CONTRACTL_END;
6673
6674 PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(MethodToken);
6675 TADDR baseAddress = JitTokenToModuleBase(MethodToken);
6676
6677#ifndef DACCESS_COMPILE
6678 if (g_IBCLogger.InstrEnabled())
6679 {
6680 ReadyToRunInfo * pInfo = JitTokenToReadyToRunInfo(MethodToken);
6681 MethodDesc * pMD = pInfo->GetMethodDescForEntryPoint(JitTokenToStartAddress(MethodToken));
6682 g_IBCLogger.LogMethodGCInfoAccess(pMD);
6683 }
6684#endif
6685
6686 SIZE_T nUnwindDataSize;
6687 PTR_VOID pUnwindData = GetUnwindDataBlob(baseAddress, pRuntimeFunction, &nUnwindDataSize);
6688
6689 // GCInfo immediatelly follows unwind data
6690 PTR_BYTE gcInfo = dac_cast<PTR_BYTE>(pUnwindData) + nUnwindDataSize;
6691 UINT32 gcInfoVersion = JitTokenToGCInfoVersion(MethodToken);
6692
6693 return{ gcInfo, gcInfoVersion };
6694}
6695
6696unsigned ReadyToRunJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState)
6697{
6698 CONTRACTL {
6699 NOTHROW;
6700 GC_NOTRIGGER;
6701 } CONTRACTL_END;
6702
6703 ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(MethodToken);
6704
6705 IMAGE_DATA_DIRECTORY * pExceptionInfoDir = pReadyToRunInfo->FindSection(READYTORUN_SECTION_EXCEPTION_INFO);
6706 if (pExceptionInfoDir == NULL)
6707 return 0;
6708
6709 PEImageLayout * pLayout = pReadyToRunInfo->GetImage();
6710
6711 PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pExceptionLookupTable = dac_cast<PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE>(pLayout->GetRvaData(pExceptionInfoDir->VirtualAddress));
6712
6713 COUNT_T numLookupTableEntries = (COUNT_T)(pExceptionInfoDir->Size / sizeof(CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY));
6714 // at least 2 entries (1 valid entry + 1 sentinal entry)
6715 _ASSERTE(numLookupTableEntries >= 2);
6716
6717 DWORD methodStartRVA = (DWORD)(JitTokenToStartAddress(MethodToken) - JitTokenToModuleBase(MethodToken));
6718
6719 COUNT_T ehInfoSize = 0;
6720 DWORD exceptionInfoRVA = NativeExceptionInfoLookupTable::LookupExceptionInfoRVAForMethod(pExceptionLookupTable,
6721 numLookupTableEntries,
6722 methodStartRVA,
6723 &ehInfoSize);
6724 if (exceptionInfoRVA == 0)
6725 return 0;
6726
6727 pEnumState->iCurrentPos = 0;
6728 pEnumState->pExceptionClauseArray = JitTokenToModuleBase(MethodToken) + exceptionInfoRVA;
6729
6730 return ehInfoSize / sizeof(CORCOMPILE_EXCEPTION_CLAUSE);
6731}
6732
6733PTR_EXCEPTION_CLAUSE_TOKEN ReadyToRunJitManager::GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
6734 EE_ILEXCEPTION_CLAUSE* pEHClauseOut)
6735{
6736 CONTRACTL {
6737 NOTHROW;
6738 GC_NOTRIGGER;
6739 } CONTRACTL_END;
6740
6741 unsigned iCurrentPos = pEnumState->iCurrentPos;
6742 pEnumState->iCurrentPos++;
6743
6744 CORCOMPILE_EXCEPTION_CLAUSE* pClause = &(dac_cast<PTR_CORCOMPILE_EXCEPTION_CLAUSE>(pEnumState->pExceptionClauseArray)[iCurrentPos]);
6745
6746 // copy to the input parmeter, this is a nice abstraction for the future
6747 // if we want to compress the Clause encoding, we can do without affecting the call sites
6748 pEHClauseOut->TryStartPC = pClause->TryStartPC;
6749 pEHClauseOut->TryEndPC = pClause->TryEndPC;
6750 pEHClauseOut->HandlerStartPC = pClause->HandlerStartPC;
6751 pEHClauseOut->HandlerEndPC = pClause->HandlerEndPC;
6752 pEHClauseOut->Flags = pClause->Flags;
6753 pEHClauseOut->FilterOffset = pClause->FilterOffset;
6754
6755 return dac_cast<PTR_EXCEPTION_CLAUSE_TOKEN>(pClause);
6756}
6757
6758StubCodeBlockKind ReadyToRunJitManager::GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC)
6759{
6760 CONTRACTL
6761 {
6762 NOTHROW;
6763 GC_NOTRIGGER;
6764 SO_TOLERANT;
6765 MODE_ANY;
6766 }
6767 CONTRACTL_END;
6768
6769 DWORD rva = (DWORD)(currentPC - pRangeSection->LowAddress);
6770
6771 ReadyToRunInfo * pReadyToRunInfo = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo();
6772
6773 IMAGE_DATA_DIRECTORY * pDelayLoadMethodCallThunksDir = pReadyToRunInfo->FindSection(READYTORUN_SECTION_DELAYLOAD_METHODCALL_THUNKS);
6774 if (pDelayLoadMethodCallThunksDir != NULL)
6775 {
6776 if (pDelayLoadMethodCallThunksDir->VirtualAddress <= rva
6777 && rva < pDelayLoadMethodCallThunksDir->VirtualAddress + pDelayLoadMethodCallThunksDir->Size)
6778 return STUB_CODE_BLOCK_METHOD_CALL_THUNK;
6779 }
6780
6781 return STUB_CODE_BLOCK_UNKNOWN;
6782}
6783
6784#ifndef DACCESS_COMPILE
6785
6786TypeHandle ReadyToRunJitManager::ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
6787 CrawlFrame* pCf)
6788{
6789 CONTRACTL {
6790 THROWS;
6791 GC_TRIGGERS;
6792 } CONTRACTL_END;
6793
6794 _ASSERTE(NULL != pCf);
6795 _ASSERTE(NULL != pEHClause);
6796 _ASSERTE(IsTypedHandler(pEHClause));
6797
6798 MethodDesc *pMD = PTR_MethodDesc(pCf->GetFunction());
6799
6800 _ASSERTE(pMD != NULL);
6801
6802 Module* pModule = pMD->GetModule();
6803 PREFIX_ASSUME(pModule != NULL);
6804
6805 SigTypeContext typeContext(pMD);
6806 VarKind k = hasNoVars;
6807
6808 mdToken typeTok = pEHClause->ClassToken;
6809
6810 // In the vast majority of cases the code un der the "if" below
6811 // will not be executed.
6812 //
6813 // First grab the representative instantiations. For code
6814 // shared by multiple generic instantiations these are the
6815 // canonical (representative) instantiation.
6816 if (TypeFromToken(typeTok) == mdtTypeSpec)
6817 {
6818 PCCOR_SIGNATURE pSig;
6819 ULONG cSig;
6820 IfFailThrow(pModule->GetMDImport()->GetTypeSpecFromToken(typeTok, &pSig, &cSig));
6821
6822 SigPointer psig(pSig, cSig);
6823 k = psig.IsPolyType(&typeContext);
6824
6825 // Grab the active class and method instantiation. This exact instantiation is only
6826 // needed in the corner case of "generic" exception catching in shared
6827 // generic code. We don't need the exact instantiation if the token
6828 // doesn't contain E_T_VAR or E_T_MVAR.
6829 if ((k & hasSharableVarsMask) != 0)
6830 {
6831 Instantiation classInst;
6832 Instantiation methodInst;
6833 pCf->GetExactGenericInstantiations(&classInst,&methodInst);
6834 SigTypeContext::InitTypeContext(pMD,classInst, methodInst,&typeContext);
6835 }
6836 }
6837
6838 return ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, typeTok, &typeContext,
6839 ClassLoader::ReturnNullIfNotFound);
6840}
6841
6842#endif // #ifndef DACCESS_COMPILE
6843
6844//-----------------------------------------------------------------------------
6845// Ngen info manager
6846//-----------------------------------------------------------------------------
6847BOOL ReadyToRunJitManager::GetBoundariesAndVars(
6848 const DebugInfoRequest & request,
6849 IN FP_IDS_NEW fpNew, IN void * pNewData,
6850 OUT ULONG32 * pcMap,
6851 OUT ICorDebugInfo::OffsetMapping **ppMap,
6852 OUT ULONG32 * pcVars,
6853 OUT ICorDebugInfo::NativeVarInfo **ppVars)
6854{
6855 CONTRACTL {
6856 THROWS; // on OOM.
6857 GC_NOTRIGGER; // getting vars shouldn't trigger
6858 SUPPORTS_DAC;
6859 } CONTRACTL_END;
6860
6861 EECodeInfo codeInfo(request.GetStartAddress());
6862 if (!codeInfo.IsValid())
6863 return FALSE;
6864
6865 ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(codeInfo.GetMethodToken());
6866 PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(codeInfo.GetMethodToken());
6867
6868 PTR_BYTE pDebugInfo = pReadyToRunInfo->GetDebugInfo(pRuntimeFunction);
6869 if (pDebugInfo == NULL)
6870 return FALSE;
6871
6872 // Uncompress. This allocates memory and may throw.
6873 CompressDebugInfo::RestoreBoundariesAndVars(
6874 fpNew, pNewData, // allocators
6875 pDebugInfo, // input
6876 pcMap, ppMap,
6877 pcVars, ppVars); // output
6878
6879 return TRUE;
6880}
6881
6882#ifdef DACCESS_COMPILE
6883//
6884// Need to write out debug info
6885//
6886void ReadyToRunJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD)
6887{
6888 SUPPORTS_DAC;
6889
6890 EECodeInfo codeInfo(pMD->GetNativeCode());
6891 if (!codeInfo.IsValid())
6892 return;
6893
6894 ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(codeInfo.GetMethodToken());
6895 PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(codeInfo.GetMethodToken());
6896
6897 PTR_BYTE pDebugInfo = pReadyToRunInfo->GetDebugInfo(pRuntimeFunction);
6898 if (pDebugInfo == NULL)
6899 return;
6900
6901 CompressDebugInfo::EnumMemoryRegions(flags, pDebugInfo);
6902}
6903#endif
6904
6905PCODE ReadyToRunJitManager::GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset)
6906{
6907 WRAPPER_NO_CONTRACT;
6908
6909 MethodRegionInfo methodRegionInfo;
6910 JitTokenToMethodRegionInfo(MethodToken, &methodRegionInfo);
6911
6912 if (relOffset < methodRegionInfo.hotSize)
6913 return methodRegionInfo.hotStartAddress + relOffset;
6914
6915 SIZE_T coldOffset = relOffset - methodRegionInfo.hotSize;
6916 _ASSERTE(coldOffset < methodRegionInfo.coldSize);
6917 return methodRegionInfo.coldStartAddress + coldOffset;
6918}
6919
6920BOOL ReadyToRunJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection,
6921 PCODE currentPC,
6922 MethodDesc** ppMethodDesc,
6923 OUT EECodeInfo * pCodeInfo)
6924{
6925 CONTRACTL {
6926 NOTHROW;
6927 GC_NOTRIGGER;
6928 SO_TOLERANT;
6929 SUPPORTS_DAC;
6930 } CONTRACTL_END;
6931
6932 // READYTORUN: FUTURE: Hot-cold spliting
6933
6934 TADDR currentInstr = PCODEToPINSTR(currentPC);
6935
6936 TADDR ImageBase = pRangeSection->LowAddress;
6937
6938 DWORD RelativePc = (DWORD)(currentInstr - ImageBase);
6939
6940 Module * pModule = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule);
6941 ReadyToRunInfo * pInfo = pModule->GetReadyToRunInfo();
6942
6943 COUNT_T nRuntimeFunctions = pInfo->m_nRuntimeFunctions;
6944 PTR_RUNTIME_FUNCTION pRuntimeFunctions = pInfo->m_pRuntimeFunctions;
6945
6946 int MethodIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(RelativePc,
6947 pRuntimeFunctions,
6948 0,
6949 nRuntimeFunctions - 1);
6950
6951 if (MethodIndex < 0)
6952 return FALSE;
6953
6954 if (ppMethodDesc == NULL && pCodeInfo == NULL)
6955 {
6956 // Bail early if caller doesn't care about the MethodDesc or EECodeInfo.
6957 // Avoiding the method desc lookups below also prevents deadlocks when this
6958 // is called from IsManagedCode.
6959 return TRUE;
6960 }
6961
6962#ifdef WIN64EXCEPTIONS
6963 // Save the raw entry
6964 PTR_RUNTIME_FUNCTION RawFunctionEntry = pRuntimeFunctions + MethodIndex;
6965
6966 MethodDesc *pMethodDesc;
6967 while ((pMethodDesc = pInfo->GetMethodDescForEntryPoint(ImageBase + RUNTIME_FUNCTION__BeginAddress(pRuntimeFunctions + MethodIndex))) == NULL)
6968 MethodIndex--;
6969#endif
6970
6971 PTR_RUNTIME_FUNCTION FunctionEntry = pRuntimeFunctions + MethodIndex;
6972
6973 if (ppMethodDesc)
6974 {
6975#ifdef WIN64EXCEPTIONS
6976 *ppMethodDesc = pMethodDesc;
6977#else
6978 *ppMethodDesc = pInfo->GetMethodDescForEntryPoint(ImageBase + RUNTIME_FUNCTION__BeginAddress(FunctionEntry));
6979#endif
6980 _ASSERTE(*ppMethodDesc != NULL);
6981 }
6982
6983 if (pCodeInfo)
6984 {
6985 pCodeInfo->m_relOffset = (DWORD)
6986 (RelativePc - RUNTIME_FUNCTION__BeginAddress(FunctionEntry));
6987
6988 // We are using RUNTIME_FUNCTION as METHODTOKEN
6989 pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(FunctionEntry));
6990
6991#ifdef WIN64EXCEPTIONS
6992 AMD64_ONLY(_ASSERTE((RawFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0));
6993 pCodeInfo->m_pFunctionEntry = RawFunctionEntry;
6994#endif
6995 }
6996
6997 return TRUE;
6998}
6999
7000#if defined(WIN64EXCEPTIONS)
7001PTR_RUNTIME_FUNCTION ReadyToRunJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo)
7002{
7003 CONTRACTL {
7004 NOTHROW;
7005 GC_NOTRIGGER;
7006 } CONTRACTL_END;
7007
7008 if (!pCodeInfo->IsValid())
7009 {
7010 return NULL;
7011 }
7012
7013 // code:ReadyToRunJitManager::JitCodeToMethodInfo computes PTR_RUNTIME_FUNCTION eagerly. This path is only
7014 // reachable via EECodeInfo::GetMainFunctionInfo, and so we can just return the main entry.
7015 _ASSERTE(pCodeInfo->GetRelOffset() == 0);
7016
7017 return dac_cast<PTR_RUNTIME_FUNCTION>(pCodeInfo->GetMethodToken().m_pCodeHeader);
7018}
7019
7020TADDR ReadyToRunJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo)
7021{
7022 LIMITED_METHOD_DAC_CONTRACT;
7023
7024 // READYTORUN: FUTURE: Hot-cold spliting
7025
7026 return IJitManager::GetFuncletStartAddress(pCodeInfo);
7027}
7028
7029DWORD ReadyToRunJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength)
7030{
7031 PTR_RUNTIME_FUNCTION pFirstFuncletFunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader) + 1;
7032
7033 TADDR moduleBase = JitTokenToModuleBase(MethodToken);
7034 DWORD nFunclets = 0;
7035 MethodRegionInfo regionInfo;
7036 JitTokenToMethodRegionInfo(MethodToken, &regionInfo);
7037
7038 // pFirstFuncletFunctionEntry will work for ARM when passed to GetFuncletStartOffsetsHelper()
7039 // even if it is a fragment of the main body and not a RUNTIME_FUNCTION for the beginning
7040 // of the first hot funclet, because GetFuncletStartOffsetsHelper() will skip all the function
7041 // fragments until the first funclet, if any, is found.
7042
7043 GetFuncletStartOffsetsHelper(regionInfo.hotStartAddress, regionInfo.hotSize, 0,
7044 pFirstFuncletFunctionEntry, moduleBase,
7045 &nFunclets, pStartFuncletOffsets, dwLength);
7046
7047 // READYTORUN: FUTURE: Hot/cold splitting
7048
7049 return nFunclets;
7050}
7051
7052BOOL ReadyToRunJitManager::IsFilterFunclet(EECodeInfo * pCodeInfo)
7053{
7054 CONTRACTL {
7055 NOTHROW;
7056 GC_NOTRIGGER;
7057 MODE_ANY;
7058 }
7059 CONTRACTL_END;
7060
7061 if (!pCodeInfo->IsFunclet())
7062 return FALSE;
7063
7064 // Get address of the personality routine for the function being queried.
7065 SIZE_T size;
7066 PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pCodeInfo->GetFunctionEntry(), &size);
7067 _ASSERTE(pUnwindData != NULL);
7068
7069 // Personality routine is always the last element of the unwind data
7070 DWORD rvaPersonalityRoutine = *(dac_cast<PTR_DWORD>(dac_cast<TADDR>(pUnwindData) + size) - 1);
7071
7072 // Get the personality routine for the first function in the module, which is guaranteed to be not a funclet.
7073 ReadyToRunInfo * pInfo = JitTokenToReadyToRunInfo(pCodeInfo->GetMethodToken());
7074 if (pInfo->m_nRuntimeFunctions == 0)
7075 return FALSE;
7076
7077 PTR_VOID pFirstUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pInfo->m_pRuntimeFunctions, &size);
7078 _ASSERTE(pFirstUnwindData != NULL);
7079 DWORD rvaFirstPersonalityRoutine = *(dac_cast<PTR_DWORD>(dac_cast<TADDR>(pFirstUnwindData) + size) - 1);
7080
7081 // Compare the two personality routines. If they are different, then the current function is a filter funclet.
7082 BOOL fRet = (rvaPersonalityRoutine != rvaFirstPersonalityRoutine);
7083
7084 // Verify that the optimized implementation is in sync with the slow implementation
7085 _ASSERTE(fRet == IJitManager::IsFilterFunclet(pCodeInfo));
7086
7087 return fRet;
7088}
7089
7090#endif // WIN64EXCEPTIONS
7091
7092void ReadyToRunJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken,
7093 MethodRegionInfo * methodRegionInfo)
7094{
7095 CONTRACTL {
7096 NOTHROW;
7097 GC_NOTRIGGER;
7098 HOST_NOCALLS;
7099 SUPPORTS_DAC;
7100 PRECONDITION(methodRegionInfo != NULL);
7101 } CONTRACTL_END;
7102
7103 // READYTORUN: FUTURE: Hot-cold spliting
7104
7105 methodRegionInfo->hotStartAddress = JitTokenToStartAddress(MethodToken);
7106 methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfoToken(MethodToken));
7107 methodRegionInfo->coldStartAddress = 0;
7108 methodRegionInfo->coldSize = 0;
7109}
7110
7111#ifdef DACCESS_COMPILE
7112
7113void ReadyToRunJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
7114{
7115 IJitManager::EnumMemoryRegions(flags);
7116}
7117
7118#if defined(WIN64EXCEPTIONS)
7119
7120//
7121// EnumMemoryRegionsForMethodUnwindInfo - enumerate the memory necessary to read the unwind info for the
7122// specified method.
7123//
7124// Note that in theory, a dump generation library could save the unwind information itself without help
7125// from us, since it's stored in the image in the standard function table layout for Win64. However,
7126// dump-generation libraries assume that the image will be available at debug time, and if the image
7127// isn't available then it is acceptable for stackwalking to break. For ngen images (which are created
7128// on the client), it usually isn't possible to have the image available at debug time, and so for minidumps
7129// we must explicitly ensure the unwind information is saved into the dump.
7130//
7131// Arguments:
7132// flags - EnumMem flags
7133// pMD - MethodDesc for the method in question
7134//
7135void ReadyToRunJitManager::EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo)
7136{
7137 // Get the RUNTIME_FUNCTION entry for this method
7138 PTR_RUNTIME_FUNCTION pRtf = pCodeInfo->GetFunctionEntry();
7139
7140 if (pRtf==NULL)
7141 {
7142 return;
7143 }
7144
7145 // Enumerate the function entry and other entries needed to locate it in the program exceptions directory
7146 ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(pCodeInfo->GetMethodToken());
7147 EnumRuntimeFunctionEntriesToFindEntry(pRtf, pReadyToRunInfo->GetImage());
7148
7149 SIZE_T size;
7150 PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pRtf, &size);
7151 if (pUnwindData != NULL)
7152 DacEnumMemoryRegion(PTR_TO_TADDR(pUnwindData), size);
7153}
7154
7155#endif //WIN64EXCEPTIONS
7156#endif // #ifdef DACCESS_COMPILE
7157
7158#endif
7159