| 1 | // Licensed to the .NET Foundation under one or more agreements. |
| 2 | // The .NET Foundation licenses this file to you under the MIT license. |
| 3 | // See the LICENSE file in the project root for more information. |
| 4 | |
| 5 | |
| 6 | |
| 7 | |
| 8 | /****************************************************************************** |
| 9 | |
| 10 | Module Name: |
| 11 | |
| 12 | codeman.h |
| 13 | |
| 14 | Abstract: |
| 15 | |
| 16 | Wrapper to facilitate multiple JITcompiler support in the COM+ Runtime |
| 17 | |
| 18 | The ExecutionManager is responsible for managing the RangeSections. |
| 19 | Given an IP, it can find the RangeSection which holds that IP. |
| 20 | |
| 21 | RangeSections contain the JITed codes. Each RangeSection knows the |
| 22 | IJitManager which created it. |
| 23 | |
| 24 | An IJitManager knows about which method bodies live in each RangeSection. |
| 25 | It can handle methods of one given CodeType. It can map a method body to |
| 26 | a MethodDesc. It knows where the GCInfo about the method lives. |
| 27 | Today, we have three IJitManagers viz. |
| 28 | 1. EEJitManager for JITcompiled code generated by clrjit.dll |
| 29 | 2. NativeImageJitManager for ngenned code. |
| 30 | 3. ReadyToRunJitManager for version resiliant ReadyToRun code |
| 31 | |
| 32 | An ICodeManager knows how to crack a specific format of GCInfo. There is |
| 33 | a default format (handled by ExecutionManager::GetDefaultCodeManager()) |
| 34 | which can be shared by different IJitManagers/IJitCompilers. |
| 35 | |
| 36 | An ICorJitCompiler knows how to generate code for a method IL, and produce |
| 37 | GCInfo in a format which the corresponding IJitManager's ICodeManager |
| 38 | can handle. |
| 39 | |
| 40 | ExecutionManager |
| 41 | | |
| 42 | +-----------+---------------+---------------+-----------+--- ... |
| 43 | | | | | |
| 44 | CodeType | CodeType | |
| 45 | | | | | |
| 46 | v v v v |
| 47 | +---------------+ +--------+<---- R +---------------+ +--------+<---- R |
| 48 | |ICorJitCompiler|<---->|IJitMan |<---- R |ICorJitCompiler|<---->|IJitMan |<---- R |
| 49 | +---------------+ +--------+<---- R +---------------+ +--------+<---- R |
| 50 | | x . | x . |
| 51 | | \ . | \ . |
| 52 | v \ . v \ . |
| 53 | +--------+ R +--------+ R |
| 54 | |ICodeMan| |ICodeMan| (RangeSections) |
| 55 | +--------+ +--------+ |
| 56 | |
| 57 | ******************************************************************************/ |
| 58 | |
| 59 | #ifndef __CODEMAN_HPP__ |
| 60 | |
| 61 | #define __CODEMAN_HPP__ |
| 62 | |
| 63 | #include "crst.h" |
| 64 | #include "eetwain.h" |
| 65 | #include "ceeload.h" |
| 66 | #include "jitinterface.h" |
| 67 | #include "debuginfostore.h" |
| 68 | #include "shash.h" |
| 69 | #include "pedecoder.h" |
| 70 | #include "gcinfo.h" |
| 71 | #include "eexcp.h" |
| 72 | |
| 73 | #if defined(WIN64EXCEPTIONS) && !defined(USE_INDIRECT_CODEHEADER) |
| 74 | #error "WIN64EXCEPTIONS requires USE_INDIRECT_CODEHEADER" |
| 75 | #endif // WIN64EXCEPTIONS && !USE_INDIRECT_CODEHEADER |
| 76 | |
| 77 | class MethodDesc; |
| 78 | class ICorJitCompiler; |
| 79 | class IJitManager; |
| 80 | class EEJitManager; |
| 81 | class NativeImageJitManager; |
| 82 | class ReadyToRunJitManager; |
| 83 | class ExecutionManager; |
| 84 | class Thread; |
| 85 | class CrawlFrame; |
| 86 | struct EE_ILEXCEPTION; |
| 87 | struct EE_ILEXCEPTION_CLAUSE; |
| 88 | typedef struct |
| 89 | { |
| 90 | unsigned iCurrentPos; |
| 91 | TADDR pExceptionClauseArray; |
| 92 | } EH_CLAUSE_ENUMERATOR; |
| 93 | class EECodeInfo; |
| 94 | |
| 95 | #define ROUND_DOWN_TO_PAGE(x) ( (size_t) (x) & ~((size_t)GetOsPageSize()-1)) |
| 96 | #define ROUND_UP_TO_PAGE(x) (((size_t) (x) + (GetOsPageSize()-1)) & ~((size_t)GetOsPageSize()-1)) |
| 97 | |
| 98 | enum StubCodeBlockKind : int |
| 99 | { |
| 100 | STUB_CODE_BLOCK_UNKNOWN, |
| 101 | STUB_CODE_BLOCK_JUMPSTUB, |
| 102 | STUB_CODE_BLOCK_PRECODE, |
| 103 | STUB_CODE_BLOCK_DYNAMICHELPER, |
| 104 | // Last valid value. Note that the definition is duplicated in debug\daccess\fntableaccess.cpp |
| 105 | STUB_CODE_BLOCK_LAST = 0xF, |
| 106 | // Placeholders returned by code:GetStubCodeBlockKind |
| 107 | STUB_CODE_BLOCK_NOCODE, |
| 108 | STUB_CODE_BLOCK_MANAGED, |
| 109 | STUB_CODE_BLOCK_STUBLINK, |
| 110 | // Placeholdes used by NGen images |
| 111 | STUB_CODE_BLOCK_VIRTUAL_METHOD_THUNK, |
| 112 | STUB_CODE_BLOCK_EXTERNAL_METHOD_THUNK, |
| 113 | // Placeholdes used by ReadyToRun images |
| 114 | STUB_CODE_BLOCK_METHOD_CALL_THUNK, |
| 115 | }; |
| 116 | |
| 117 | //----------------------------------------------------------------------------- |
| 118 | // Method header which exists just before the code. |
| 119 | // Every IJitManager could have its own format for the header. |
| 120 | // Today CodeHeader is used by the EEJitManager. |
| 121 | // The GCInfo version is always current GCINFO_VERSION in this header. |
| 122 | |
| 123 | #ifdef USE_INDIRECT_CODEHEADER |
| 124 | typedef DPTR(struct _hpRealCodeHdr) ; |
| 125 | typedef DPTR(struct _hpCodeHdr) ; |
| 126 | |
| 127 | #else // USE_INDIRECT_CODEHEADER |
| 128 | typedef DPTR(struct _hpCodeHdr) PTR_CodeHeader; |
| 129 | |
| 130 | #endif // USE_INDIRECT_CODEHEADER |
| 131 | |
| 132 | #ifdef USE_INDIRECT_CODEHEADER |
| 133 | typedef struct _hpRealCodeHdr |
| 134 | #else // USE_INDIRECT_CODEHEADER |
| 135 | typedef struct _hpCodeHdr |
| 136 | #endif // USE_INDIRECT_CODEHEADER |
| 137 | { |
| 138 | public: |
| 139 | PTR_BYTE phdrDebugInfo; |
| 140 | |
| 141 | // Note - *(&(pCodeHeader->phdrJitEHInfo) - sizeof(size_t)) |
| 142 | // contains the number of EH clauses, See EEJitManager::allocEHInfo |
| 143 | PTR_EE_ILEXCEPTION phdrJitEHInfo; |
| 144 | PTR_BYTE phdrJitGCInfo; |
| 145 | |
| 146 | #if defined(FEATURE_GDBJIT) |
| 147 | VOID* pCalledMethods; |
| 148 | #endif |
| 149 | |
| 150 | PTR_MethodDesc phdrMDesc; |
| 151 | |
| 152 | #ifdef WIN64EXCEPTIONS |
| 153 | DWORD nUnwindInfos; |
| 154 | T_RUNTIME_FUNCTION unwindInfos[0]; |
| 155 | #endif // WIN64EXCEPTIONS |
| 156 | |
| 157 | public: |
| 158 | #ifndef USE_INDIRECT_CODEHEADER |
| 159 | // |
| 160 | // Note: that the JITted code follows immediately after the MethodDesc* |
| 161 | // |
| 162 | PTR_BYTE GetDebugInfo() |
| 163 | { |
| 164 | SUPPORTS_DAC; |
| 165 | |
| 166 | return phdrDebugInfo; |
| 167 | } |
| 168 | PTR_EE_ILEXCEPTION GetEHInfo() |
| 169 | { |
| 170 | return phdrJitEHInfo; |
| 171 | } |
| 172 | PTR_BYTE GetGCInfo() |
| 173 | { |
| 174 | SUPPORTS_DAC; |
| 175 | return phdrJitGCInfo; |
| 176 | } |
| 177 | PTR_MethodDesc GetMethodDesc() |
| 178 | { |
| 179 | SUPPORTS_DAC; |
| 180 | return phdrMDesc; |
| 181 | } |
| 182 | #if defined(FEATURE_GDBJIT) |
| 183 | VOID* GetCalledMethods() |
| 184 | { |
| 185 | SUPPORTS_DAC; |
| 186 | return pCalledMethods; |
| 187 | } |
| 188 | #endif |
| 189 | TADDR GetCodeStartAddress() |
| 190 | { |
| 191 | SUPPORTS_DAC; |
| 192 | return dac_cast<TADDR>(dac_cast<PTR_CodeHeader>(this) + 1); |
| 193 | } |
| 194 | StubCodeBlockKind GetStubCodeBlockKind() |
| 195 | { |
| 196 | SUPPORTS_DAC; |
| 197 | return (StubCodeBlockKind)dac_cast<TADDR>(phdrMDesc); |
| 198 | } |
| 199 | BOOL IsStubCodeBlock() |
| 200 | { |
| 201 | SUPPORTS_DAC; |
| 202 | // Note that it is important for this comparison to be unsigned |
| 203 | return dac_cast<TADDR>(phdrMDesc) <= (TADDR)STUB_CODE_BLOCK_LAST; |
| 204 | } |
| 205 | |
| 206 | void SetDebugInfo(PTR_BYTE pDI) |
| 207 | { |
| 208 | phdrDebugInfo = pDI; |
| 209 | } |
| 210 | void SetEHInfo(PTR_EE_ILEXCEPTION pEH) |
| 211 | { |
| 212 | phdrJitEHInfo = pEH; |
| 213 | } |
| 214 | void SetGCInfo(PTR_BYTE pGC) |
| 215 | { |
| 216 | phdrJitGCInfo = pGC; |
| 217 | } |
| 218 | void SetMethodDesc(PTR_MethodDesc pMD) |
| 219 | { |
| 220 | phdrMDesc = pMD; |
| 221 | } |
| 222 | #if defined(FEATURE_GDBJIT) |
| 223 | void SetCalledMethods(VOID* pCM) |
| 224 | { |
| 225 | pCalledMethods = pCM; |
| 226 | } |
| 227 | #endif |
| 228 | void SetStubCodeBlockKind(StubCodeBlockKind kind) |
| 229 | { |
| 230 | phdrMDesc = (PTR_MethodDesc)kind; |
| 231 | } |
| 232 | #endif // !USE_INDIRECT_CODEHEADER |
| 233 | |
| 234 | // if we're using the indirect codeheaders then all enumeration is done by the code header |
| 235 | #ifndef USE_INDIRECT_CODEHEADER |
| 236 | #ifdef DACCESS_COMPILE |
| 237 | void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, IJitManager* pJitMan); |
| 238 | #endif // DACCESS_COMPILE |
| 239 | #endif // USE_INDIRECT_CODEHEADER |
| 240 | #ifdef USE_INDIRECT_CODEHEADER |
| 241 | } ; |
| 242 | #else // USE_INDIRECT_CODEHEADER |
| 243 | } CodeHeader; |
| 244 | #endif // USE_INDIRECT_CODEHEADER |
| 245 | |
| 246 | #ifdef USE_INDIRECT_CODEHEADER |
| 247 | typedef struct _hpCodeHdr |
| 248 | { |
| 249 | PTR_RealCodeHeader ; |
| 250 | |
| 251 | public: |
| 252 | PTR_BYTE GetDebugInfo() |
| 253 | { |
| 254 | SUPPORTS_DAC; |
| 255 | return pRealCodeHeader->phdrDebugInfo; |
| 256 | } |
| 257 | PTR_EE_ILEXCEPTION GetEHInfo() |
| 258 | { |
| 259 | return pRealCodeHeader->phdrJitEHInfo; |
| 260 | } |
| 261 | PTR_BYTE GetGCInfo() |
| 262 | { |
| 263 | SUPPORTS_DAC; |
| 264 | return pRealCodeHeader->phdrJitGCInfo; |
| 265 | } |
| 266 | PTR_MethodDesc GetMethodDesc() |
| 267 | { |
| 268 | SUPPORTS_DAC; |
| 269 | return pRealCodeHeader->phdrMDesc; |
| 270 | } |
| 271 | #if defined(FEATURE_GDBJIT) |
| 272 | VOID* GetCalledMethods() |
| 273 | { |
| 274 | SUPPORTS_DAC; |
| 275 | return pRealCodeHeader->pCalledMethods; |
| 276 | } |
| 277 | #endif |
| 278 | TADDR GetCodeStartAddress() |
| 279 | { |
| 280 | SUPPORTS_DAC; |
| 281 | return dac_cast<PCODE>(dac_cast<PTR_CodeHeader>(this) + 1); |
| 282 | } |
| 283 | StubCodeBlockKind GetStubCodeBlockKind() |
| 284 | { |
| 285 | SUPPORTS_DAC; |
| 286 | return (StubCodeBlockKind)dac_cast<TADDR>(pRealCodeHeader); |
| 287 | } |
| 288 | BOOL IsStubCodeBlock() |
| 289 | { |
| 290 | SUPPORTS_DAC; |
| 291 | // Note that it is important for this comparison to be unsigned |
| 292 | return dac_cast<TADDR>(pRealCodeHeader) <= (TADDR)STUB_CODE_BLOCK_LAST; |
| 293 | } |
| 294 | |
| 295 | void (BYTE* pRCH) |
| 296 | { |
| 297 | pRealCodeHeader = PTR_RealCodeHeader((RealCodeHeader*)pRCH); |
| 298 | } |
| 299 | |
| 300 | void SetDebugInfo(PTR_BYTE pDI) |
| 301 | { |
| 302 | pRealCodeHeader->phdrDebugInfo = pDI; |
| 303 | } |
| 304 | void SetEHInfo(PTR_EE_ILEXCEPTION pEH) |
| 305 | { |
| 306 | pRealCodeHeader->phdrJitEHInfo = pEH; |
| 307 | } |
| 308 | void SetGCInfo(PTR_BYTE pGC) |
| 309 | { |
| 310 | pRealCodeHeader->phdrJitGCInfo = pGC; |
| 311 | } |
| 312 | void SetMethodDesc(PTR_MethodDesc pMD) |
| 313 | { |
| 314 | pRealCodeHeader->phdrMDesc = pMD; |
| 315 | } |
| 316 | #if defined(FEATURE_GDBJIT) |
| 317 | void SetCalledMethods(VOID* pCM) |
| 318 | { |
| 319 | pRealCodeHeader->pCalledMethods = pCM; |
| 320 | } |
| 321 | #endif |
| 322 | void SetStubCodeBlockKind(StubCodeBlockKind kind) |
| 323 | { |
| 324 | pRealCodeHeader = (PTR_RealCodeHeader)kind; |
| 325 | } |
| 326 | |
| 327 | #if defined(WIN64EXCEPTIONS) |
| 328 | UINT GetNumberOfUnwindInfos() |
| 329 | { |
| 330 | SUPPORTS_DAC; |
| 331 | return pRealCodeHeader->nUnwindInfos; |
| 332 | } |
| 333 | void SetNumberOfUnwindInfos(UINT nUnwindInfos) |
| 334 | { |
| 335 | LIMITED_METHOD_CONTRACT; |
| 336 | pRealCodeHeader->nUnwindInfos = nUnwindInfos; |
| 337 | } |
| 338 | PTR_RUNTIME_FUNCTION GetUnwindInfo(UINT iUnwindInfo) |
| 339 | { |
| 340 | SUPPORTS_DAC; |
| 341 | _ASSERTE(iUnwindInfo < GetNumberOfUnwindInfos()); |
| 342 | return dac_cast<PTR_RUNTIME_FUNCTION>( |
| 343 | PTR_TO_MEMBER_TADDR(RealCodeHeader, pRealCodeHeader, unwindInfos) + iUnwindInfo * sizeof(T_RUNTIME_FUNCTION)); |
| 344 | } |
| 345 | #endif // WIN64EXCEPTIONS |
| 346 | |
| 347 | #ifdef DACCESS_COMPILE |
| 348 | void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, IJitManager* pJitMan); |
| 349 | #endif // DACCESS_COMPILE |
| 350 | |
| 351 | } ; |
| 352 | #endif // USE_INDIRECT_CODEHEADER |
| 353 | |
| 354 | |
| 355 | //----------------------------------------------------------------------------- |
| 356 | // This is a structure used to consolidate the information that we |
| 357 | // need we creating new code heaps. |
| 358 | // When creating new JumpStubs we have a constarint that the address used |
| 359 | // should be in the range [loAddr..hiAddr] |
| 360 | // |
| 361 | struct CodeHeapRequestInfo |
| 362 | { |
| 363 | MethodDesc * m_pMD; |
| 364 | LoaderAllocator* m_pAllocator; |
| 365 | const BYTE * m_loAddr; // lowest address to use to satisfy our request (0 -- don't care) |
| 366 | const BYTE * m_hiAddr; // hihest address to use to satisfy our request (0 -- don't care) |
| 367 | size_t m_requestSize; // minimum size that must be made available |
| 368 | size_t m_reserveSize; // Amount that VirtualAlloc will reserved |
| 369 | size_t m_reserveForJumpStubs; // Amount to reserve for jump stubs (won't be allocated) |
| 370 | bool m_isDynamicDomain; |
| 371 | bool m_isCollectible; |
| 372 | bool m_throwOnOutOfMemoryWithinRange; |
| 373 | |
| 374 | bool IsDynamicDomain() { return m_isDynamicDomain; } |
| 375 | void SetDynamicDomain() { m_isDynamicDomain = true; } |
| 376 | |
| 377 | bool IsCollectible() { return m_isCollectible; } |
| 378 | |
| 379 | size_t getRequestSize() { return m_requestSize; } |
| 380 | void setRequestSize(size_t requestSize) { m_requestSize = requestSize; } |
| 381 | |
| 382 | size_t getReserveSize() { return m_reserveSize; } |
| 383 | void setReserveSize(size_t reserveSize) { m_reserveSize = reserveSize; } |
| 384 | |
| 385 | size_t getReserveForJumpStubs() { return m_reserveForJumpStubs; } |
| 386 | void setReserveForJumpStubs(size_t size) { m_reserveForJumpStubs = size; } |
| 387 | |
| 388 | bool getThrowOnOutOfMemoryWithinRange() { return m_throwOnOutOfMemoryWithinRange; } |
| 389 | void setThrowOnOutOfMemoryWithinRange(bool value) { m_throwOnOutOfMemoryWithinRange = value; } |
| 390 | |
| 391 | void Init(); |
| 392 | |
| 393 | CodeHeapRequestInfo(MethodDesc *pMD) |
| 394 | : m_pMD(pMD), m_pAllocator(0), |
| 395 | m_loAddr(0), m_hiAddr(0), |
| 396 | m_requestSize(0), m_reserveSize(0), m_reserveForJumpStubs(0) |
| 397 | { WRAPPER_NO_CONTRACT; Init(); } |
| 398 | |
| 399 | CodeHeapRequestInfo(MethodDesc *pMD, LoaderAllocator* pAllocator, |
| 400 | BYTE * loAddr, BYTE * hiAddr) |
| 401 | : m_pMD(pMD), m_pAllocator(pAllocator), |
| 402 | m_loAddr(loAddr), m_hiAddr(hiAddr), |
| 403 | m_requestSize(0), m_reserveSize(0), m_reserveForJumpStubs(0) |
| 404 | { WRAPPER_NO_CONTRACT; Init(); } |
| 405 | }; |
| 406 | |
| 407 | //----------------------------------------------------------------------------- |
| 408 | // |
| 409 | // A CodeHeap is the abstraction the IJitManager uses to allocate memory |
| 410 | // needed to the jitting of a method. |
| 411 | // The CodeHeap works together with the HeapList to manage a contiguous block of memory. |
| 412 | // The CodeHeap is a non growable chunk of memory (it can be reserved and |
| 413 | // committed on demand). |
| 414 | // |
| 415 | // A CodeHeap is naturally protected from multiple threads by the code heap |
| 416 | // critical section - m_pCodeHeapCritSec - so if the implementation of the heap |
| 417 | // is only for the code manager, no locking needs to occur. |
| 418 | // It's important however that a delete operation on the CodeHeap (if any) happens |
| 419 | // via EEJitManager::FreeCodeMemory(HostCodeHeap*, void*) |
| 420 | // |
| 421 | // The heap to be created depends on the MethodDesc that is being compiled. |
| 422 | // Standard code uses the LoaderCodeHeap, a heap based on the LoaderHeap. |
| 423 | // DynamicMethods - and only those - use a HostCodeHeap, a heap that does |
| 424 | // normal Alloc/Free so reclamation can be performed. |
| 425 | // |
| 426 | // The convention is that every heap implementation would have a static create |
| 427 | // function that returns a HeapList. The HeapList *must* be properly initialized |
| 428 | // on return except for the next pointer |
| 429 | // |
| 430 | |
| 431 | typedef VPTR(class CodeHeap) PTR_CodeHeap; |
| 432 | |
| 433 | class CodeHeap |
| 434 | { |
| 435 | VPTR_BASE_VTABLE_CLASS(CodeHeap) |
| 436 | |
| 437 | public: |
| 438 | CodeHeap() {} |
| 439 | |
| 440 | // virtual dtor. Clean up heap |
| 441 | virtual ~CodeHeap() {} |
| 442 | |
| 443 | // Alloc the specified numbers of bytes for code. Returns NULL if the request does not fit |
| 444 | // Space for header is reserved immediately before. It is not included in size. |
| 445 | virtual void* AllocMemForCode_NoThrow(size_t , size_t size, DWORD alignment, size_t reserveForJumpStubs) = 0; |
| 446 | |
| 447 | #ifdef DACCESS_COMPILE |
| 448 | virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) = 0; |
| 449 | #endif |
| 450 | |
| 451 | protected: |
| 452 | friend class EEJitManager; |
| 453 | }; |
| 454 | |
| 455 | //----------------------------------------------------------------------------- |
| 456 | // The HeapList works together with the CodeHeap to manage a contiguous block of memory. |
| 457 | // |
| 458 | // A single HeapList contains code only for a single AppDomain. EEJitManager uses |
| 459 | // EEJitManager::DomainCodeHeapList to keep a list of HeapLists for each AppDomain. |
| 460 | |
| 461 | // The number of code heaps at which we increase the size of new code heaps. |
| 462 | #define CODE_HEAP_SIZE_INCREASE_THRESHOLD 5 |
| 463 | |
| 464 | typedef DPTR(struct _HeapList) PTR_HeapList; |
| 465 | |
| 466 | typedef struct _HeapList |
| 467 | { |
| 468 | PTR_HeapList hpNext; |
| 469 | |
| 470 | PTR_CodeHeap pHeap; |
| 471 | |
| 472 | TADDR startAddress; |
| 473 | TADDR endAddress; // the current end of the used portion of the Heap |
| 474 | |
| 475 | TADDR mapBase; // "startAddress" rounded down to GetOsPageSize(). pHdrMap is relative to this address |
| 476 | PTR_DWORD pHdrMap; // bit array used to find the start of methods |
| 477 | |
| 478 | size_t maxCodeHeapSize;// Size of the entire contiguous block of memory |
| 479 | size_t reserveForJumpStubs; // Amount of memory reserved for jump stubs in this block |
| 480 | |
| 481 | #if defined(_TARGET_AMD64_) |
| 482 | BYTE CLRPersonalityRoutine[JUMP_ALLOCATE_SIZE]; // jump thunk to personality routine |
| 483 | #elif defined(_TARGET_ARM64_) |
| 484 | UINT32 CLRPersonalityRoutine[JUMP_ALLOCATE_SIZE/sizeof(UINT32)]; // jump thunk to personality routine |
| 485 | #endif |
| 486 | |
| 487 | PTR_HeapList GetNext() |
| 488 | { SUPPORTS_DAC; return hpNext; } |
| 489 | |
| 490 | void SetNext(PTR_HeapList next) |
| 491 | { hpNext = next; } |
| 492 | |
| 493 | } HeapList; |
| 494 | |
| 495 | //----------------------------------------------------------------------------- |
| 496 | // Implementation of the standard CodeHeap. |
| 497 | // Use the ExplicitControlLoaderHeap for allocations |
| 498 | // (Check the base class above - CodeHeap - for comments on the functions) |
| 499 | // |
| 500 | typedef VPTR(class LoaderCodeHeap) PTR_LoaderCodeHeap; |
| 501 | |
| 502 | class LoaderCodeHeap : CodeHeap |
| 503 | { |
| 504 | #ifdef DACCESS_COMPILE |
| 505 | friend class ClrDataAccess; |
| 506 | #endif |
| 507 | |
| 508 | VPTR_VTABLE_CLASS(LoaderCodeHeap, CodeHeap) |
| 509 | |
| 510 | private: |
| 511 | ExplicitControlLoaderHeap m_LoaderHeap; |
| 512 | SSIZE_T m_cbMinNextPad; |
| 513 | |
| 514 | LoaderCodeHeap(size_t * pPrivatePCLBytes); |
| 515 | |
| 516 | public: |
| 517 | static HeapList* CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap *pJitMetaHeap); |
| 518 | |
| 519 | public: |
| 520 | virtual ~LoaderCodeHeap() |
| 521 | { |
| 522 | WRAPPER_NO_CONTRACT; |
| 523 | } |
| 524 | |
| 525 | virtual void* AllocMemForCode_NoThrow(size_t , size_t size, DWORD alignment, size_t reserveForJumpStubs) DAC_EMPTY_RET(NULL); |
| 526 | |
| 527 | #ifdef DACCESS_COMPILE |
| 528 | virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) |
| 529 | { |
| 530 | WRAPPER_NO_CONTRACT; |
| 531 | m_LoaderHeap.EnumMemoryRegions(flags); |
| 532 | } |
| 533 | #endif |
| 534 | }; |
| 535 | |
| 536 | #if defined(_WIN64) |
| 537 | // On non X86 platforms, the OS defined UnwindInfo (accessed from RUNTIME_FUNCTION |
| 538 | // structures) to support the ability unwind the stack. Unfortunatey the pre-Win8 |
| 539 | // APIs defined a callback API for publishing this data dynamically that ETW does |
| 540 | // not use (and really can't because the walk happens in the kernel). In Win8 |
| 541 | // new APIs were defined that allow incremental publishing via a table. |
| 542 | // |
| 543 | // UnwindInfoTable is a class that wraps the OS APIs that we use to publish |
| 544 | // this table. Its job is to allocate the table, deallocate it when we are |
| 545 | // done and allow us to add new entries one at a time (AddToUnwindInfoTable) |
| 546 | // |
| 547 | // Each _rangesection has a UnwindInfoTable's which hold the |
| 548 | // RUNTIME_FUNCTION array as well as other bookeeping (the current and maximum |
| 549 | // size of the array, and the handle used to publish it to the OS. |
| 550 | // |
| 551 | // Ideally we would just use this new API when it is available, however to mininmize |
| 552 | // risk and to make the change perfectly pay-for-play, we us the original mechanism |
| 553 | // ALWAYS, and in addition publish via the Table ONLY WHEN ETW JIT events are turned |
| 554 | // on. |
| 555 | // |
| 556 | // This class implements a 'catchup' routine that allows us to publish existing JITTed |
| 557 | // methods when ETW turns on. Currently this is 'sticky' (once we start publishing |
| 558 | // both ways, we do so for the life of the process. |
| 559 | // |
| 560 | typedef DPTR(class UnwindInfoTable) PTR_UnwindInfoTable; |
| 561 | class UnwindInfoTable { |
| 562 | public: |
| 563 | // All public functions are thread-safe. |
| 564 | |
| 565 | // These are wrapper functions over the UnwindInfoTable functions that are specific to JIT compile code |
| 566 | static void PublishUnwindInfoForMethod(TADDR baseAddress, T_RUNTIME_FUNCTION* unwindInfo, int unwindInfoCount); |
| 567 | static void UnpublishUnwindInfoForMethod(TADDR entryPoint); |
| 568 | |
| 569 | // These are lower level functions that assume you have found the list of UnwindInfoTable entries |
| 570 | // These are used by the stublinker and the high-level method functions above |
| 571 | static void AddToUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, T_RUNTIME_FUNCTION* data, TADDR rangeStart, TADDR rangeEnd); |
| 572 | static void RemoveFromUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, TADDR baseAddress, TADDR entryPoint); |
| 573 | |
| 574 | // By default this publishing is off, this routine turns it on (and optionally publishes existing methods) |
| 575 | static void PublishUnwindInfo(bool publishExisting); |
| 576 | ~UnwindInfoTable(); |
| 577 | |
| 578 | private: |
| 579 | void UnRegister(); |
| 580 | void Register(); |
| 581 | UnwindInfoTable(ULONG_PTR rangeStart, ULONG_PTR rangeEnd, ULONG size); |
| 582 | static void PublishUnwindInfoForExistingMethods(); |
| 583 | |
| 584 | private: |
| 585 | static Volatile<bool> s_publishingActive; // Publishing to ETW is turned on |
| 586 | static class Crst* s_pUnwindInfoTableLock; // lock protects all public UnwindInfoTable functions |
| 587 | |
| 588 | PVOID hHandle; // OS handle for a published RUNTIME_FUNCTION table |
| 589 | ULONG_PTR iRangeStart; // Start of memory described by this table |
| 590 | ULONG_PTR iRangeEnd; // End of memory described by this table |
| 591 | T_RUNTIME_FUNCTION* pTable; // The actual list of method unwind info, sorted by address |
| 592 | ULONG cTableCurCount; |
| 593 | ULONG cTableMaxCount; |
| 594 | int cDeletedEntries; // Number of slots we removed. |
| 595 | }; |
| 596 | |
| 597 | #endif // defined(_WIN64) |
| 598 | |
| 599 | //----------------------------------------------------------------------------- |
| 600 | // The ExecutionManager uses RangeSection as the abstraction of a contiguous |
| 601 | // address range to track the code heaps. |
| 602 | |
| 603 | typedef DPTR(struct RangeSection) PTR_RangeSection; |
| 604 | |
| 605 | struct RangeSection |
| 606 | { |
| 607 | TADDR LowAddress; |
| 608 | TADDR HighAddress; |
| 609 | |
| 610 | PTR_IJitManager pjit; // The owner of this address range |
| 611 | |
| 612 | #ifndef DACCESS_COMPILE |
| 613 | // Volatile because of the list can be walked lock-free |
| 614 | Volatile<RangeSection *> pnext; // link rangesections in a sorted list |
| 615 | #else |
| 616 | PTR_RangeSection pnext; |
| 617 | #endif |
| 618 | |
| 619 | PTR_RangeSection pLastUsed; // for the head node only: a link to rangesections that was used most recently |
| 620 | |
| 621 | enum RangeSectionFlags |
| 622 | { |
| 623 | RANGE_SECTION_NONE = 0x0, |
| 624 | RANGE_SECTION_COLLECTIBLE = 0x1, |
| 625 | RANGE_SECTION_CODEHEAP = 0x2, |
| 626 | #ifdef FEATURE_READYTORUN |
| 627 | RANGE_SECTION_READYTORUN = 0x4, |
| 628 | #endif |
| 629 | }; |
| 630 | |
| 631 | DWORD flags; |
| 632 | |
| 633 | // union |
| 634 | // { |
| 635 | // PTR_CodeHeap pCodeHeap; // valid if RANGE_SECTION_HEAP is set |
| 636 | // PTR_Module pZapModule; // valid if RANGE_SECTION_HEAP is not set |
| 637 | // }; |
| 638 | TADDR pHeapListOrZapModule; |
| 639 | #if defined(_WIN64) |
| 640 | PTR_UnwindInfoTable pUnwindInfoTable; // Points to unwind information for this memory range. |
| 641 | #endif // defined(_WIN64) |
| 642 | }; |
| 643 | |
| 644 | /*****************************************************************************/ |
| 645 | |
| 646 | #ifdef CROSSGEN_COMPILE |
| 647 | #define CodeFragmentHeap LoaderHeap |
| 648 | #else |
| 649 | |
| 650 | // |
| 651 | // A simple linked-list based allocator to expose code heap as loader heap for allocation of precodes. |
| 652 | // The loader heap like interface is necessary to support backout. It is also conveniently used to reduce space overhead |
| 653 | // for small blocks that are common for precodes. |
| 654 | // |
| 655 | // Allocating precodes on code heap makes them close to other code, it reduces need for jump stubs and thus chance |
| 656 | // that we run into bogus OOM because of not being able to allocate memory in particular memory range. |
| 657 | // |
| 658 | class CodeFragmentHeap : public ILoaderHeapBackout |
| 659 | { |
| 660 | PTR_LoaderAllocator m_pAllocator; |
| 661 | |
| 662 | struct FreeBlock |
| 663 | { |
| 664 | DPTR(FreeBlock) m_pNext; // Next block |
| 665 | SIZE_T m_dwSize; // Size of this block (includes size of FreeBlock) |
| 666 | }; |
| 667 | typedef DPTR(FreeBlock) PTR_FreeBlock; |
| 668 | |
| 669 | PTR_FreeBlock m_pFreeBlocks; |
| 670 | StubCodeBlockKind m_kind; |
| 671 | |
| 672 | Crst m_CritSec; |
| 673 | |
| 674 | void AddBlock(VOID * pMem, size_t dwSize); |
| 675 | void RemoveBlock(FreeBlock ** ppBlock); |
| 676 | |
| 677 | public: |
| 678 | CodeFragmentHeap(LoaderAllocator * pAllocator, StubCodeBlockKind kind); |
| 679 | virtual ~CodeFragmentHeap() {} |
| 680 | |
| 681 | TaggedMemAllocPtr RealAllocAlignedMem(size_t dwRequestedSize |
| 682 | ,unsigned dwAlignment |
| 683 | #ifdef _DEBUG |
| 684 | ,__in __in_z const char *szFile |
| 685 | ,int lineNum |
| 686 | #endif |
| 687 | ); |
| 688 | |
| 689 | virtual void RealBackoutMem(void *pMem |
| 690 | , size_t dwSize |
| 691 | #ifdef _DEBUG |
| 692 | , __in __in_z const char *szFile |
| 693 | , int lineNum |
| 694 | , __in __in_z const char *szAllocFile |
| 695 | , int allocLineNum |
| 696 | #endif |
| 697 | ) DAC_EMPTY(); |
| 698 | |
| 699 | #ifdef DACCESS_COMPILE |
| 700 | void EnumMemoryRegions(enum CLRDataEnumMemoryFlags flags) |
| 701 | { |
| 702 | WRAPPER_NO_CONTRACT; |
| 703 | DAC_ENUM_DTHIS(); |
| 704 | } |
| 705 | #endif |
| 706 | }; |
| 707 | #endif // CROSSGEN_COMPILE |
| 708 | |
| 709 | typedef DPTR(class CodeFragmentHeap) PTR_CodeFragmentHeap; |
| 710 | |
| 711 | //----------------------------------------------------------------------------- |
| 712 | // |
| 713 | // Manages the CodeHeap for some of the RangeSections in the ExecutionManager |
| 714 | // |
| 715 | //----------------------------------------------------------------------------- |
| 716 | |
| 717 | class IJitManager |
| 718 | { |
| 719 | VPTR_BASE_VTABLE_CLASS(IJitManager) |
| 720 | |
| 721 | public: |
| 722 | struct MethodRegionInfo |
| 723 | { |
| 724 | TADDR hotStartAddress; |
| 725 | size_t hotSize; |
| 726 | TADDR coldStartAddress; |
| 727 | size_t coldSize; |
| 728 | }; |
| 729 | |
| 730 | #ifndef DACCESS_COMPILE |
| 731 | IJitManager(); |
| 732 | #endif // !DACCESS_COMPILE |
| 733 | |
| 734 | virtual DWORD GetCodeType() = 0; |
| 735 | |
| 736 | // Used to read debug info. |
| 737 | // 1) Caller passes an allocator which these functions use to allocate memory. |
| 738 | // This is b/c the store may need to decompress the information just to figure out the size. |
| 739 | // 2) Note that these methods use Uncompressed (Normal) jit data. |
| 740 | // Compression is just an implementation detail. |
| 741 | // 3) These throw on OOM (exceptional case), and may return a |
| 742 | // failing HR if no data is available (not exceptional) |
| 743 | |
| 744 | virtual BOOL GetBoundariesAndVars( |
| 745 | const DebugInfoRequest & request, |
| 746 | IN FP_IDS_NEW fpNew, IN void * pNewData, |
| 747 | OUT ULONG32 * pcMap, |
| 748 | OUT ICorDebugInfo::OffsetMapping **ppMap, |
| 749 | OUT ULONG32 * pcVars, |
| 750 | OUT ICorDebugInfo::NativeVarInfo **ppVars) = 0; |
| 751 | |
| 752 | virtual BOOL JitCodeToMethodInfo( |
| 753 | RangeSection * pRangeSection, |
| 754 | PCODE currentPC, |
| 755 | MethodDesc** ppMethodDesc, |
| 756 | OUT EECodeInfo * pCodeInfo) = 0; |
| 757 | |
| 758 | virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset) = 0; |
| 759 | |
| 760 | virtual TADDR JitTokenToStartAddress(const METHODTOKEN& MethodToken)=0; |
| 761 | virtual void JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, MethodRegionInfo *methodRegionInfo) = 0; |
| 762 | virtual unsigned InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState)=0; |
| 763 | virtual PTR_EXCEPTION_CLAUSE_TOKEN GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState, |
| 764 | EE_ILEXCEPTION_CLAUSE* pEHclause)=0; |
| 765 | #ifndef DACCESS_COMPILE |
| 766 | virtual TypeHandle ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause, |
| 767 | CrawlFrame *pCf)=0; |
| 768 | #endif // #ifndef DACCESS_COMPILE |
| 769 | |
| 770 | virtual GCInfoToken GetGCInfoToken(const METHODTOKEN& MethodToken)=0; |
| 771 | PTR_VOID GetGCInfo(const METHODTOKEN& MethodToken) |
| 772 | { |
| 773 | return GetGCInfoToken(MethodToken).Info; |
| 774 | } |
| 775 | |
| 776 | TADDR JitTokenToModuleBase(const METHODTOKEN& MethodToken); |
| 777 | |
| 778 | #if defined(WIN64EXCEPTIONS) |
| 779 | virtual PTR_RUNTIME_FUNCTION LazyGetFunctionEntry(EECodeInfo * pCodeInfo) = 0; |
| 780 | |
| 781 | // GetFuncletStartAddress returns the starting address of the function or funclet indicated by the EECodeInfo address. |
| 782 | virtual TADDR GetFuncletStartAddress(EECodeInfo * pCodeInfo); |
| 783 | |
| 784 | virtual DWORD GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength) = 0; |
| 785 | |
| 786 | BOOL IsFunclet(EECodeInfo * pCodeInfo); |
| 787 | virtual BOOL IsFilterFunclet(EECodeInfo * pCodeInfo); |
| 788 | #endif // WIN64EXCEPTIONS |
| 789 | |
| 790 | virtual StubCodeBlockKind GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC) = 0; |
| 791 | |
| 792 | // DAC-specific virtual functions. |
| 793 | // Note that these MUST occur below any other virtual function definitions to ensure that the vtable in |
| 794 | // DAC builds is compatible with the non-DAC one so that DAC virtual dispatch will work correctly. |
| 795 | #if defined(DACCESS_COMPILE) |
| 796 | virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); |
| 797 | virtual void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD) = 0; |
| 798 | #if defined(WIN64EXCEPTIONS) |
| 799 | // Enumerate the memory necessary to retrieve the unwind info for a specific method |
| 800 | virtual void EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo) = 0; |
| 801 | #endif // WIN64EXCEPTIONS |
| 802 | #endif // DACCESS_COMPILE |
| 803 | |
| 804 | #ifndef DACCESS_COMPILE |
| 805 | void SetCodeManager(ICodeManager *codeMgr) |
| 806 | { |
| 807 | LIMITED_METHOD_CONTRACT; |
| 808 | |
| 809 | m_runtimeSupport = codeMgr; |
| 810 | } |
| 811 | #endif // !DACCESS_COMPILE |
| 812 | |
| 813 | ICodeManager *GetCodeManager() |
| 814 | { |
| 815 | LIMITED_METHOD_DAC_CONTRACT; |
| 816 | return m_runtimeSupport; |
| 817 | } |
| 818 | |
| 819 | protected: |
| 820 | PTR_ICodeManager m_runtimeSupport; |
| 821 | }; |
| 822 | |
| 823 | //----------------------------------------------------------------------------- |
| 824 | |
| 825 | class HostCodeHeap; |
| 826 | typedef VPTR(class HostCodeHeap) PTR_HostCodeHeap; |
| 827 | |
| 828 | typedef VPTR(class EEJitManager) PTR_EEJitManager; |
| 829 | typedef VPTR(class NativeImageJitManager) PTR_NativeImageJitManager; |
| 830 | typedef VPTR(class ReadyToRunJitManager) PTR_ReadyToRunJitManager; |
| 831 | |
| 832 | struct |
| 833 | { |
| 834 | JumpStubBlockHeader * ; |
| 835 | UINT32 ; |
| 836 | UINT32 ; |
| 837 | |
| 838 | LoaderAllocator* () |
| 839 | { |
| 840 | _ASSERTE(m_zero == 0); |
| 841 | return m_Allocator; |
| 842 | } |
| 843 | |
| 844 | void (LoaderAllocator * loaderAllocator) |
| 845 | { |
| 846 | m_zero = 0; |
| 847 | m_Allocator = loaderAllocator; |
| 848 | } |
| 849 | |
| 850 | HostCodeHeap* () |
| 851 | { |
| 852 | WRAPPER_NO_CONTRACT; |
| 853 | _ASSERTE(m_zero == -1); |
| 854 | return m_CodeHeap; |
| 855 | } |
| 856 | |
| 857 | void (HostCodeHeap * hostCodeHeap) |
| 858 | { |
| 859 | m_zero = -1; |
| 860 | m_CodeHeap = hostCodeHeap; |
| 861 | } |
| 862 | |
| 863 | private: |
| 864 | union { |
| 865 | HostCodeHeap *; |
| 866 | LoaderAllocator *; |
| 867 | }; |
| 868 | |
| 869 | INT64 ; // 0 for normal methods and -1 for LCG methods |
| 870 | }; |
| 871 | |
| 872 | |
| 873 | /*****************************************************************************/ |
| 874 | |
| 875 | class EEJitManager : public IJitManager |
| 876 | { |
| 877 | #ifdef DACCESS_COMPILE |
| 878 | friend class ClrDataAccess; |
| 879 | #endif |
| 880 | friend class CheckDuplicatedStructLayouts; |
| 881 | friend class CodeHeapIterator; |
| 882 | |
| 883 | VPTR_VTABLE_CLASS(EEJitManager, IJitManager) |
| 884 | |
| 885 | public: |
| 886 | |
| 887 | // Failing to load the main JIT is a failure. |
| 888 | // If the user requested an altjit and we failed to load an altjit, that is also a failure. |
| 889 | BOOL IsJitLoaded() |
| 890 | { |
| 891 | LIMITED_METHOD_CONTRACT; |
| 892 | |
| 893 | return (m_jit != NULL) |
| 894 | #ifdef ALLOW_SXS_JIT |
| 895 | && (!m_AltJITRequired || (m_alternateJit != NULL)) |
| 896 | #endif // ALLOW_SXS_JIT |
| 897 | ; |
| 898 | } |
| 899 | |
| 900 | #ifdef ALLOW_SXS_JIT |
| 901 | BOOL IsMainJitLoaded() |
| 902 | { |
| 903 | LIMITED_METHOD_CONTRACT; |
| 904 | |
| 905 | return (m_jit != NULL); |
| 906 | } |
| 907 | |
| 908 | BOOL IsAltJitLoaded() |
| 909 | { |
| 910 | LIMITED_METHOD_CONTRACT; |
| 911 | |
| 912 | return (m_alternateJit != NULL); |
| 913 | } |
| 914 | #endif // ALLOW_SXS_JIT |
| 915 | |
| 916 | VOID ClearCache() |
| 917 | { |
| 918 | CONTRACTL { |
| 919 | NOTHROW; |
| 920 | GC_NOTRIGGER; |
| 921 | } CONTRACTL_END; |
| 922 | |
| 923 | if( m_jit != NULL ) |
| 924 | { |
| 925 | m_jit->clearCache(); |
| 926 | } |
| 927 | #ifdef ALLOW_SXS_JIT |
| 928 | if( m_alternateJit != NULL ) |
| 929 | { |
| 930 | m_alternateJit->clearCache(); |
| 931 | } |
| 932 | #endif // ALLOW_SXS_JIT |
| 933 | } |
| 934 | |
| 935 | BOOL IsCacheCleanupRequired() |
| 936 | { |
| 937 | CONTRACTL { |
| 938 | NOTHROW; |
| 939 | GC_NOTRIGGER; |
| 940 | } CONTRACTL_END; |
| 941 | |
| 942 | BOOL ret = FALSE; |
| 943 | |
| 944 | if( m_jit != NULL ) |
| 945 | { |
| 946 | if (m_jit->isCacheCleanupRequired()) |
| 947 | ret = TRUE; |
| 948 | } |
| 949 | |
| 950 | #ifdef ALLOW_SXS_JIT |
| 951 | if( !ret && m_alternateJit != NULL ) |
| 952 | { |
| 953 | if (m_alternateJit->isCacheCleanupRequired()) |
| 954 | ret = TRUE; |
| 955 | } |
| 956 | #endif // ALLOW_SXS_JIT |
| 957 | |
| 958 | return ret; |
| 959 | } |
| 960 | |
| 961 | #if !defined CROSSGEN_COMPILE && !defined DACCESS_COMPILE |
| 962 | EEJitManager(); |
| 963 | |
| 964 | // No destructor necessary. Only one instance of this class that is destroyed at process shutdown. |
| 965 | // ~EEJitManager(); |
| 966 | #endif // !CROSSGEN_COMPILE && !DACCESS_COMPILE |
| 967 | |
| 968 | |
| 969 | virtual DWORD GetCodeType() |
| 970 | { |
| 971 | LIMITED_METHOD_DAC_CONTRACT; |
| 972 | return (miManaged | miIL); |
| 973 | } |
| 974 | |
| 975 | #ifndef CROSSGEN_COMPILE |
| 976 | // Used to read debug info. |
| 977 | virtual BOOL GetBoundariesAndVars( |
| 978 | const DebugInfoRequest & request, |
| 979 | IN FP_IDS_NEW fpNew, IN void * pNewData, |
| 980 | OUT ULONG32 * pcMap, |
| 981 | OUT ICorDebugInfo::OffsetMapping **ppMap, |
| 982 | OUT ULONG32 * pcVars, |
| 983 | OUT ICorDebugInfo::NativeVarInfo **ppVars); |
| 984 | |
| 985 | virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset); |
| 986 | #endif // !CROSSGEN_COMPILE |
| 987 | |
| 988 | virtual BOOL JitCodeToMethodInfo(RangeSection * pRangeSection, |
| 989 | PCODE currentPC, |
| 990 | MethodDesc ** ppMethodDesc, |
| 991 | EECodeInfo * pCodeInfo); |
| 992 | |
| 993 | virtual TADDR JitTokenToStartAddress(const METHODTOKEN& MethodToken); |
| 994 | virtual void JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, MethodRegionInfo *methodRegionInfo); |
| 995 | |
| 996 | #ifndef CROSSGEN_COMPILE |
| 997 | virtual unsigned InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState); |
| 998 | virtual PTR_EXCEPTION_CLAUSE_TOKEN GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState, |
| 999 | EE_ILEXCEPTION_CLAUSE* pEHclause); |
| 1000 | #ifndef DACCESS_COMPILE |
| 1001 | virtual TypeHandle ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause, |
| 1002 | CrawlFrame *pCf); |
| 1003 | #endif // !DACCESS_COMPILE |
| 1004 | GCInfoToken GetGCInfoToken(const METHODTOKEN& MethodToken); |
| 1005 | #endif // !CROSSGEN_COMPILE |
| 1006 | #if !defined DACCESS_COMPILE && !defined CROSSGEN_COMPILE |
| 1007 | void RemoveJitData(CodeHeader * pCHdr, size_t GCinfo_len, size_t EHinfo_len); |
| 1008 | void Unload(LoaderAllocator* pAllocator); |
| 1009 | void CleanupCodeHeaps(); |
| 1010 | |
| 1011 | BOOL LoadJIT(); |
| 1012 | |
| 1013 | CodeHeader* allocCode(MethodDesc* pFD, size_t blockSize, size_t reserveForJumpStubs, CorJitAllocMemFlag flag |
| 1014 | #ifdef WIN64EXCEPTIONS |
| 1015 | , UINT nUnwindInfos |
| 1016 | , TADDR * pModuleBase |
| 1017 | #endif |
| 1018 | ); |
| 1019 | BYTE * allocGCInfo(CodeHeader* , DWORD blockSize, size_t * pAllocationSize); |
| 1020 | EE_ILEXCEPTION* allocEHInfo(CodeHeader* , unsigned numClauses, size_t * pAllocationSize); |
| 1021 | JumpStubBlockHeader* allocJumpStubBlock(MethodDesc* pMD, DWORD numJumps, |
| 1022 | BYTE * loAddr, BYTE * hiAddr, |
| 1023 | LoaderAllocator *pLoaderAllocator, |
| 1024 | bool throwOnOutOfMemoryWithinRange); |
| 1025 | |
| 1026 | void * allocCodeFragmentBlock(size_t blockSize, unsigned alignment, LoaderAllocator *pLoaderAllocator, StubCodeBlockKind kind); |
| 1027 | #endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE |
| 1028 | |
| 1029 | static CodeHeader * GetCodeHeader(const METHODTOKEN& MethodToken); |
| 1030 | static CodeHeader * GetCodeHeaderFromStartAddress(TADDR methodStartAddress); |
| 1031 | |
| 1032 | #ifndef CROSSGEN_COMPILE |
| 1033 | #if defined(WIN64EXCEPTIONS) |
| 1034 | // Compute function entry lazily. Do not call directly. Use EECodeInfo::GetFunctionEntry instead. |
| 1035 | virtual PTR_RUNTIME_FUNCTION LazyGetFunctionEntry(EECodeInfo * pCodeInfo); |
| 1036 | |
| 1037 | virtual DWORD GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength); |
| 1038 | #endif // WIN64EXCEPTIONS |
| 1039 | |
| 1040 | virtual StubCodeBlockKind GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC); |
| 1041 | |
| 1042 | #if defined(DACCESS_COMPILE) |
| 1043 | virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); |
| 1044 | virtual void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD); |
| 1045 | #endif // DACCESS_COMPILE |
| 1046 | #if defined(WIN64EXCEPTIONS) |
| 1047 | // Enumerate the memory necessary to retrieve the unwind info for a specific method |
| 1048 | virtual void EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo) |
| 1049 | { |
| 1050 | // We don't need to do explicitly enumerate the memory for unwind information for JITted methods because |
| 1051 | // it is stored using the Win64 standard dynamic function table mechanism, and dump generation code knows |
| 1052 | // it needs to call our code:OutOfProcessFunctionTableCallback in order to save the function table including |
| 1053 | // unwind information at dump generation time (since it's dynamic, it will not be otherwise |
| 1054 | // available at debug time). |
| 1055 | } |
| 1056 | #endif // WIN64EXCEPTIONS |
| 1057 | #endif // !CROSSGEN_COMPILE |
| 1058 | |
| 1059 | #ifndef CROSSGEN_COMPILE |
| 1060 | #ifndef DACCESS_COMPILE |
| 1061 | // Heap Management functions |
| 1062 | void NibbleMapSet(HeapList * pHp, TADDR pCode, BOOL bSet); |
| 1063 | #endif // !DACCESS_COMPILE |
| 1064 | |
| 1065 | static TADDR FindMethodCode(RangeSection * pRangeSection, PCODE currentPC); |
| 1066 | static TADDR FindMethodCode(PCODE currentPC); |
| 1067 | #endif // !CROSSGEN_COMPILE |
| 1068 | |
| 1069 | #if !defined DACCESS_COMPILE && !defined CROSSGEN_COMPILE |
| 1070 | void FreeCodeMemory(HostCodeHeap *pCodeHeap, void * codeStart); |
| 1071 | void RemoveFromCleanupList(HostCodeHeap *pCodeHeap); |
| 1072 | void AddToCleanupList(HostCodeHeap *pCodeHeap); |
| 1073 | void DeleteCodeHeap(HeapList *pHeapList); |
| 1074 | void RemoveCodeHeapFromDomainList(CodeHeap *pHeap, LoaderAllocator *pAllocator); |
| 1075 | #endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE |
| 1076 | |
| 1077 | private : |
| 1078 | #ifndef CROSSGEN_COMPILE |
| 1079 | struct DomainCodeHeapList { |
| 1080 | LoaderAllocator *m_pAllocator; |
| 1081 | CDynArray<HeapList *> m_CodeHeapList; |
| 1082 | DomainCodeHeapList(); |
| 1083 | ~DomainCodeHeapList(); |
| 1084 | }; |
| 1085 | #endif |
| 1086 | |
| 1087 | #ifndef DACCESS_COMPILE |
| 1088 | #ifndef CROSSGEN_COMPILE |
| 1089 | HeapList* NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapList *pADHeapList); |
| 1090 | bool CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCodeHeap); |
| 1091 | void* allocCodeRaw(CodeHeapRequestInfo *pInfo, |
| 1092 | size_t , size_t blockSize, unsigned align, |
| 1093 | HeapList ** ppCodeHeap); |
| 1094 | |
| 1095 | DomainCodeHeapList *GetCodeHeapList(CodeHeapRequestInfo *pInfo, LoaderAllocator *pAllocator, BOOL fDynamicOnly = FALSE); |
| 1096 | DomainCodeHeapList *CreateCodeHeapList(CodeHeapRequestInfo *pInfo); |
| 1097 | LoaderHeap* GetJitMetaHeap(MethodDesc *pMD); |
| 1098 | #endif // !CROSSGEN_COMPILE |
| 1099 | |
| 1100 | HeapList * GetCodeHeapList() |
| 1101 | { |
| 1102 | return m_pCodeHeap; |
| 1103 | } |
| 1104 | |
| 1105 | #ifndef CROSSGEN_COMPILE |
| 1106 | protected: |
| 1107 | void * allocEHInfoRaw(CodeHeader* , DWORD blockSize, size_t * pAllocationSize); |
| 1108 | private: |
| 1109 | #endif |
| 1110 | #endif // !DACCESS_COMPILE |
| 1111 | |
| 1112 | PTR_HeapList m_pCodeHeap; |
| 1113 | |
| 1114 | protected : |
| 1115 | Crst m_CodeHeapCritSec; |
| 1116 | |
| 1117 | #if !defined(DACCESS_COMPILE) |
| 1118 | public: |
| 1119 | class CodeHeapIterator |
| 1120 | { |
| 1121 | CrstHolder m_lockHolder; |
| 1122 | HeapList *m_pHeapList; |
| 1123 | LoaderAllocator *m_pLoaderAllocator; |
| 1124 | MethodSectionIterator m_Iterator; |
| 1125 | MethodDesc *m_pCurrent; |
| 1126 | |
| 1127 | public: |
| 1128 | CodeHeapIterator(LoaderAllocator *pLoaderAllocatorFilter = NULL); |
| 1129 | ~CodeHeapIterator(); |
| 1130 | BOOL Next(); |
| 1131 | |
| 1132 | MethodDesc *GetMethod() |
| 1133 | { |
| 1134 | LIMITED_METHOD_CONTRACT; |
| 1135 | return m_pCurrent; |
| 1136 | } |
| 1137 | |
| 1138 | TADDR GetMethodCode() |
| 1139 | { |
| 1140 | LIMITED_METHOD_CONTRACT; |
| 1141 | return (TADDR)m_Iterator.GetMethodCode(); |
| 1142 | } |
| 1143 | }; |
| 1144 | #endif // !DACCESS_COMPILE |
| 1145 | |
| 1146 | private: |
| 1147 | CORJIT_FLAGS m_CPUCompileFlags; |
| 1148 | |
| 1149 | #if !defined CROSSGEN_COMPILE && !defined DACCESS_COMPILE |
| 1150 | void SetCpuInfo(); |
| 1151 | #endif |
| 1152 | |
| 1153 | public: |
| 1154 | inline CORJIT_FLAGS GetCPUCompileFlags() |
| 1155 | { |
| 1156 | LIMITED_METHOD_CONTRACT; |
| 1157 | return m_CPUCompileFlags; |
| 1158 | } |
| 1159 | |
| 1160 | private : |
| 1161 | PTR_HostCodeHeap m_cleanupList; |
| 1162 | //When EH Clauses are resolved we need to atomically update the TypeHandle |
| 1163 | Crst m_EHClauseCritSec; |
| 1164 | |
| 1165 | #if !defined CROSSGEN_COMPILE |
| 1166 | // must hold critical section to access this structure. |
| 1167 | CUnorderedArray<DomainCodeHeapList *, 5> m_DomainCodeHeaps; |
| 1168 | CUnorderedArray<DomainCodeHeapList *, 5> m_DynamicDomainCodeHeaps; |
| 1169 | #endif |
| 1170 | |
| 1171 | #ifdef _TARGET_AMD64_ |
| 1172 | private: |
| 1173 | // |
| 1174 | // List of reserved memory blocks to be used for jump stub allocation if no suitable memory block is found |
| 1175 | // via the regular mechanism |
| 1176 | // |
| 1177 | struct EmergencyJumpStubReserve |
| 1178 | { |
| 1179 | EmergencyJumpStubReserve * m_pNext; |
| 1180 | BYTE * m_ptr; |
| 1181 | SIZE_T m_size; |
| 1182 | SIZE_T m_free; |
| 1183 | }; |
| 1184 | EmergencyJumpStubReserve * m_pEmergencyJumpStubReserveList; |
| 1185 | |
| 1186 | public: |
| 1187 | BYTE * AllocateFromEmergencyJumpStubReserve(const BYTE * loAddr, const BYTE * hiAddr, SIZE_T * pReserveSize); |
| 1188 | VOID EnsureJumpStubReserve(BYTE * pImageBase, SIZE_T imageSize, SIZE_T reserveSize); |
| 1189 | #endif |
| 1190 | |
| 1191 | public: |
| 1192 | ICorJitCompiler * m_jit; |
| 1193 | HINSTANCE m_JITCompiler; |
| 1194 | #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) |
| 1195 | HINSTANCE m_JITCompilerOther; // Stores the handle of the legacy JIT, if one is loaded. |
| 1196 | #endif |
| 1197 | |
| 1198 | #ifdef ALLOW_SXS_JIT |
| 1199 | //put these at the end so that we don't mess up the offsets in the DAC. |
| 1200 | ICorJitCompiler * m_alternateJit; |
| 1201 | HINSTANCE m_AltJITCompiler; |
| 1202 | bool m_AltJITRequired; |
| 1203 | #endif //ALLOW_SXS_JIT |
| 1204 | }; |
| 1205 | |
| 1206 | //***************************************************************************** |
| 1207 | // |
| 1208 | // This class manages IJitManagers and ICorJitCompilers. It has only static |
| 1209 | // members. It should never be constucted. |
| 1210 | // |
| 1211 | //***************************************************************************** |
| 1212 | |
| 1213 | class ExecutionManager |
| 1214 | { |
| 1215 | friend class CorExternalDataAccess; |
| 1216 | friend struct _DacGlobals; |
| 1217 | |
| 1218 | #ifdef DACCESS_COMPILE |
| 1219 | friend class ClrDataAccess; |
| 1220 | #endif |
| 1221 | |
| 1222 | public: |
| 1223 | static void Init(); |
| 1224 | |
| 1225 | enum ScanFlag |
| 1226 | { |
| 1227 | // When this is passed to a function, it must directly acquire a reader lock |
| 1228 | // before it may continue |
| 1229 | ScanReaderLock, |
| 1230 | |
| 1231 | // This means the function need not directly acquire a reader lock; however, it |
| 1232 | // may call other functions that may require other reader locks (e.g., |
| 1233 | // ExecutionManager::FindJitMan may be called with ScanNoReaderLock, but |
| 1234 | // still calls IJitManager::JitCodeToMethodInfo which acquires its own |
| 1235 | // IJitManager reader lock) |
| 1236 | ScanNoReaderLock |
| 1237 | }; |
| 1238 | |
| 1239 | // Returns default scan flag for current thread |
| 1240 | static ScanFlag GetScanFlags(); |
| 1241 | |
| 1242 | // Returns whether currentPC is in managed code. Returns false for jump stubs on WIN64. |
| 1243 | static BOOL IsManagedCode(PCODE currentPC); |
| 1244 | |
| 1245 | // Special version with profiler hook |
| 1246 | static BOOL IsManagedCode(PCODE currentPC, HostCallPreference hostCallPreference, BOOL *pfFailedReaderLock); |
| 1247 | |
| 1248 | // Returns method's start address for a given PC |
| 1249 | static PCODE GetCodeStartAddress(PCODE currentPC); |
| 1250 | |
| 1251 | // Returns methodDesc for given PC |
| 1252 | static MethodDesc * GetCodeMethodDesc(PCODE currentPC); |
| 1253 | |
| 1254 | static IJitManager* FindJitMan(PCODE currentPC) |
| 1255 | { |
| 1256 | CONTRACTL { |
| 1257 | NOTHROW; |
| 1258 | GC_NOTRIGGER; |
| 1259 | SO_TOLERANT; |
| 1260 | SUPPORTS_DAC; |
| 1261 | } CONTRACTL_END; |
| 1262 | |
| 1263 | RangeSection * pRange = FindCodeRange(currentPC, GetScanFlags()); |
| 1264 | return (pRange != NULL) ? pRange->pjit : NULL; |
| 1265 | } |
| 1266 | |
| 1267 | static RangeSection * FindCodeRange(PCODE currentPC, ScanFlag scanFlag); |
| 1268 | |
| 1269 | static BOOL IsCollectibleMethod(const METHODTOKEN& MethodToken); |
| 1270 | |
| 1271 | class ReaderLockHolder |
| 1272 | { |
| 1273 | public: |
| 1274 | ReaderLockHolder(HostCallPreference hostCallPreference = AllowHostCalls); |
| 1275 | ~ReaderLockHolder(); |
| 1276 | |
| 1277 | BOOL Acquired(); |
| 1278 | }; |
| 1279 | |
| 1280 | #ifdef _TARGET_64BIT_ |
| 1281 | static ULONG GetCLRPersonalityRoutineValue() |
| 1282 | { |
| 1283 | LIMITED_METHOD_CONTRACT; |
| 1284 | static_assert_no_msg(offsetof(HeapList, CLRPersonalityRoutine) == |
| 1285 | (size_t)((ULONG)offsetof(HeapList, CLRPersonalityRoutine))); |
| 1286 | return offsetof(HeapList, CLRPersonalityRoutine); |
| 1287 | } |
| 1288 | #endif // _TARGET_64BIT_ |
| 1289 | |
| 1290 | static EEJitManager * GetEEJitManager() |
| 1291 | { |
| 1292 | LIMITED_METHOD_DAC_CONTRACT; |
| 1293 | return m_pEEJitManager; |
| 1294 | } |
| 1295 | |
| 1296 | #ifdef FEATURE_PREJIT |
| 1297 | static NativeImageJitManager * GetNativeImageJitManager() |
| 1298 | { |
| 1299 | LIMITED_METHOD_DAC_CONTRACT; |
| 1300 | return m_pNativeImageJitManager; |
| 1301 | } |
| 1302 | #endif |
| 1303 | |
| 1304 | #ifdef FEATURE_READYTORUN |
| 1305 | static ReadyToRunJitManager * GetReadyToRunJitManager() |
| 1306 | { |
| 1307 | LIMITED_METHOD_DAC_CONTRACT; |
| 1308 | return m_pReadyToRunJitManager; |
| 1309 | } |
| 1310 | #endif |
| 1311 | |
| 1312 | static void ClearCaches( void ); |
| 1313 | static BOOL IsCacheCleanupRequired(); |
| 1314 | |
| 1315 | static LPCWSTR GetJitName(); |
| 1316 | |
| 1317 | static void Unload(LoaderAllocator *pLoaderAllocator); |
| 1318 | |
| 1319 | static void AddCodeRange(TADDR StartRange, TADDR EndRange, |
| 1320 | IJitManager* pJit, |
| 1321 | RangeSection::RangeSectionFlags flags, |
| 1322 | void * pHp); |
| 1323 | |
| 1324 | static void AddNativeImageRange(TADDR StartRange, |
| 1325 | SIZE_T Size, |
| 1326 | Module * pModule); |
| 1327 | |
| 1328 | static void DeleteRange(TADDR StartRange); |
| 1329 | |
| 1330 | static void CleanupCodeHeaps(); |
| 1331 | |
| 1332 | static ICodeManager* GetDefaultCodeManager() |
| 1333 | { |
| 1334 | LIMITED_METHOD_CONTRACT; |
| 1335 | return (ICodeManager *)m_pDefaultCodeMan; |
| 1336 | } |
| 1337 | |
| 1338 | static PTR_Module FindZapModule(TADDR currentData); |
| 1339 | static PTR_Module FindReadyToRunModule(TADDR currentData); |
| 1340 | |
| 1341 | // FindZapModule flavor to be used during GC to find GCRefMap |
| 1342 | static PTR_Module FindModuleForGCRefMap(TADDR currentData); |
| 1343 | |
| 1344 | static RangeSection* GetRangeSectionAndPrev(RangeSection *pRS, TADDR addr, RangeSection **ppPrev); |
| 1345 | |
| 1346 | #ifdef DACCESS_COMPILE |
| 1347 | static void EnumRangeList(RangeSection* list, |
| 1348 | CLRDataEnumMemoryFlags flags); |
| 1349 | static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); |
| 1350 | #endif |
| 1351 | |
| 1352 | #ifndef DACCESS_COMPILE |
| 1353 | static PCODE jumpStub(MethodDesc* pMD, |
| 1354 | PCODE target, |
| 1355 | BYTE * loAddr, |
| 1356 | BYTE * hiAddr, |
| 1357 | LoaderAllocator *pLoaderAllocator = NULL, |
| 1358 | bool throwOnOutOfMemoryWithinRange = true); |
| 1359 | #endif |
| 1360 | |
| 1361 | private: |
| 1362 | static RangeSection * FindCodeRangeWithLock(PCODE currentPC); |
| 1363 | |
| 1364 | static BOOL IsManagedCodeWithLock(PCODE currentPC); |
| 1365 | static BOOL IsManagedCodeWorker(PCODE currentPC); |
| 1366 | |
| 1367 | static RangeSection* GetRangeSection(TADDR addr); |
| 1368 | |
| 1369 | SPTR_DECL(EECodeManager, m_pDefaultCodeMan); |
| 1370 | |
| 1371 | SPTR_DECL(EEJitManager, m_pEEJitManager); |
| 1372 | #ifdef FEATURE_PREJIT |
| 1373 | SPTR_DECL(NativeImageJitManager, m_pNativeImageJitManager); |
| 1374 | #endif |
| 1375 | #ifdef FEATURE_READYTORUN |
| 1376 | SPTR_DECL(ReadyToRunJitManager, m_pReadyToRunJitManager); |
| 1377 | #endif |
| 1378 | |
| 1379 | static CrstStatic m_JumpStubCrst; |
| 1380 | static CrstStatic m_RangeCrst; // Aquire before writing into m_CodeRangeList and m_DataRangeList |
| 1381 | |
| 1382 | // infrastructure to manage readers so we can lock them out and delete domain data |
| 1383 | // make ReaderCount volatile because we have order dependency in READER_INCREMENT |
| 1384 | #ifndef DACCESS_COMPILE |
| 1385 | static Volatile<RangeSection *> m_CodeRangeList; |
| 1386 | static Volatile<LONG> m_dwReaderCount; |
| 1387 | static Volatile<LONG> m_dwWriterLock; |
| 1388 | #else |
| 1389 | SPTR_DECL(RangeSection, m_CodeRangeList); |
| 1390 | SVAL_DECL(LONG, m_dwReaderCount); |
| 1391 | SVAL_DECL(LONG, m_dwWriterLock); |
| 1392 | #endif |
| 1393 | |
| 1394 | #ifndef DACCESS_COMPILE |
| 1395 | class WriterLockHolder |
| 1396 | { |
| 1397 | public: |
| 1398 | WriterLockHolder(); |
| 1399 | ~WriterLockHolder(); |
| 1400 | }; |
| 1401 | #endif |
| 1402 | |
| 1403 | #if defined(_DEBUG) |
| 1404 | // The LOCK_TAKEN/RELEASED macros need a "pointer" to the lock object to do |
| 1405 | // comparisons between takes & releases (and to provide debugging info to the |
| 1406 | // developer). Since Inc/Dec Reader/Writer are static, there's no object to |
| 1407 | // use. So we just use the pointer to m_dwReaderCount. Note that both |
| 1408 | // readers & writers use this same pointer, which follows the general convention |
| 1409 | // of other ReaderWriter locks in the EE code base: each reader/writer locking object |
| 1410 | // instance protects only 1 piece of data or code. Readers & writers both access the |
| 1411 | // same locking object & shared resource, so conceptually they would share the same |
| 1412 | // lock pointer. |
| 1413 | static void * GetPtrForLockContract() |
| 1414 | { |
| 1415 | return (void *) &m_dwReaderCount; |
| 1416 | } |
| 1417 | #endif // defined(_DEBUG) |
| 1418 | |
| 1419 | static void AddRangeHelper(TADDR StartRange, |
| 1420 | TADDR EndRange, |
| 1421 | IJitManager* pJit, |
| 1422 | RangeSection::RangeSectionFlags flags, |
| 1423 | TADDR pHeapListOrZapModule); |
| 1424 | static void DeleteRangeHelper(RangeSection** ppRangeList, |
| 1425 | TADDR StartRange); |
| 1426 | |
| 1427 | #ifndef DACCESS_COMPILE |
| 1428 | static PCODE getNextJumpStub(MethodDesc* pMD, |
| 1429 | PCODE target, |
| 1430 | BYTE * loAddr, BYTE * hiAddr, |
| 1431 | LoaderAllocator *pLoaderAllocator, |
| 1432 | bool throwOnOutOfMemoryWithinRange); |
| 1433 | #endif |
| 1434 | |
| 1435 | private: |
| 1436 | // *************************************************************************** |
| 1437 | // Hashtable for JumpStubs for jitted code |
| 1438 | |
| 1439 | struct JumpStubEntry { |
| 1440 | PCODE m_target; |
| 1441 | PCODE m_jumpStub; |
| 1442 | }; |
| 1443 | |
| 1444 | class JumpStubTraits : public DefaultSHashTraits<JumpStubEntry> |
| 1445 | { |
| 1446 | public: |
| 1447 | typedef PCODE key_t; |
| 1448 | |
| 1449 | static key_t GetKey(element_t e) |
| 1450 | { |
| 1451 | LIMITED_METHOD_CONTRACT; |
| 1452 | return e.m_target; |
| 1453 | } |
| 1454 | static BOOL Equals(key_t k1, key_t k2) |
| 1455 | { |
| 1456 | LIMITED_METHOD_CONTRACT; |
| 1457 | return k1 == k2; |
| 1458 | } |
| 1459 | static count_t Hash(key_t k) |
| 1460 | { |
| 1461 | LIMITED_METHOD_CONTRACT; |
| 1462 | #ifdef _WIN64 |
| 1463 | return (count_t) ((size_t) k ^ ((size_t) k >> 32)); |
| 1464 | #else |
| 1465 | return (count_t)(size_t)k; |
| 1466 | #endif |
| 1467 | } |
| 1468 | |
| 1469 | static const element_t Null() { LIMITED_METHOD_CONTRACT; JumpStubEntry e; e.m_target = NULL; e.m_jumpStub = NULL; return e; } |
| 1470 | static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e.m_target == NULL; } |
| 1471 | static const element_t Deleted() { LIMITED_METHOD_CONTRACT; JumpStubEntry e; e.m_target = (PCODE)-1; e.m_jumpStub = NULL; return e; } |
| 1472 | static bool IsDeleted(const element_t &e) { LIMITED_METHOD_CONTRACT; return e.m_target == (PCODE)-1; } |
| 1473 | }; |
| 1474 | typedef SHash<JumpStubTraits> JumpStubTable; |
| 1475 | |
| 1476 | static unsigned m_normal_JumpStubLookup; |
| 1477 | static unsigned m_normal_JumpStubUnique; |
| 1478 | static unsigned m_normal_JumpStubBlockAllocCount; |
| 1479 | static unsigned m_normal_JumpStubBlockFullCount; |
| 1480 | |
| 1481 | static unsigned m_LCG_JumpStubLookup; |
| 1482 | static unsigned m_LCG_JumpStubUnique; |
| 1483 | static unsigned m_LCG_JumpStubBlockAllocCount; |
| 1484 | static unsigned m_LCG_JumpStubBlockFullCount; |
| 1485 | |
| 1486 | public: |
| 1487 | struct JumpStubCache |
| 1488 | { |
| 1489 | JumpStubCache() |
| 1490 | : m_pBlocks(NULL) |
| 1491 | { |
| 1492 | LIMITED_METHOD_CONTRACT; |
| 1493 | } |
| 1494 | |
| 1495 | JumpStubBlockHeader * m_pBlocks; |
| 1496 | JumpStubTable m_Table; |
| 1497 | }; |
| 1498 | }; |
| 1499 | |
| 1500 | inline CodeHeader * EEJitManager::(const METHODTOKEN& MethodToken) |
| 1501 | { |
| 1502 | LIMITED_METHOD_DAC_CONTRACT; |
| 1503 | _ASSERTE(!MethodToken.IsNull()); |
| 1504 | return dac_cast<PTR_CodeHeader>(MethodToken.m_pCodeHeader); |
| 1505 | } |
| 1506 | |
| 1507 | inline CodeHeader * EEJitManager::(TADDR methodStartAddress) |
| 1508 | { |
| 1509 | LIMITED_METHOD_DAC_CONTRACT; |
| 1510 | _ASSERTE(methodStartAddress != NULL); |
| 1511 | ARM_ONLY(_ASSERTE((methodStartAddress & THUMB_CODE) == 0)); |
| 1512 | return dac_cast<PTR_CodeHeader>(methodStartAddress - sizeof(CodeHeader)); |
| 1513 | } |
| 1514 | |
| 1515 | inline TADDR EEJitManager::JitTokenToStartAddress(const METHODTOKEN& MethodToken) |
| 1516 | { |
| 1517 | CONTRACTL { |
| 1518 | NOTHROW; |
| 1519 | GC_NOTRIGGER; |
| 1520 | HOST_NOCALLS; |
| 1521 | SUPPORTS_DAC; |
| 1522 | } CONTRACTL_END; |
| 1523 | |
| 1524 | CodeHeader * pCH = GetCodeHeader(MethodToken); |
| 1525 | return pCH->GetCodeStartAddress(); |
| 1526 | } |
| 1527 | |
| 1528 | inline void EEJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, |
| 1529 | MethodRegionInfo * methodRegionInfo) |
| 1530 | { |
| 1531 | CONTRACTL { |
| 1532 | NOTHROW; |
| 1533 | GC_NOTRIGGER; |
| 1534 | HOST_NOCALLS; |
| 1535 | SUPPORTS_DAC; |
| 1536 | PRECONDITION(methodRegionInfo != NULL); |
| 1537 | } CONTRACTL_END; |
| 1538 | |
| 1539 | methodRegionInfo->hotStartAddress = JitTokenToStartAddress(MethodToken); |
| 1540 | methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfoToken(MethodToken)); |
| 1541 | methodRegionInfo->coldStartAddress = 0; |
| 1542 | methodRegionInfo->coldSize = 0; |
| 1543 | } |
| 1544 | |
| 1545 | |
| 1546 | //----------------------------------------------------------------------------- |
| 1547 | #ifdef FEATURE_PREJIT |
| 1548 | |
| 1549 | //***************************************************************************** |
| 1550 | // Stub JitManager for Managed native. |
| 1551 | |
| 1552 | class NativeImageJitManager : public IJitManager |
| 1553 | { |
| 1554 | VPTR_VTABLE_CLASS(NativeImageJitManager, IJitManager) |
| 1555 | |
| 1556 | public: |
| 1557 | #ifndef DACCESS_COMPILE |
| 1558 | NativeImageJitManager(); |
| 1559 | #endif // #ifndef DACCESS_COMPILE |
| 1560 | |
| 1561 | virtual DWORD GetCodeType() |
| 1562 | { |
| 1563 | LIMITED_METHOD_DAC_CONTRACT; |
| 1564 | return (miManaged | miNative); |
| 1565 | } |
| 1566 | |
| 1567 | // Used to read debug info. |
| 1568 | virtual BOOL GetBoundariesAndVars( |
| 1569 | const DebugInfoRequest & request, |
| 1570 | IN FP_IDS_NEW fpNew, IN void * pNewData, |
| 1571 | OUT ULONG32 * pcMap, |
| 1572 | OUT ICorDebugInfo::OffsetMapping **ppMap, |
| 1573 | OUT ULONG32 * pcVars, |
| 1574 | OUT ICorDebugInfo::NativeVarInfo **ppVars); |
| 1575 | |
| 1576 | virtual BOOL JitCodeToMethodInfo(RangeSection * pRangeSection, |
| 1577 | PCODE currentPC, |
| 1578 | MethodDesc ** ppMethodDesc, |
| 1579 | EECodeInfo * pCodeInfo); |
| 1580 | |
| 1581 | virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset); |
| 1582 | |
| 1583 | static PTR_Module JitTokenToZapModule(const METHODTOKEN& MethodToken); |
| 1584 | virtual TADDR JitTokenToStartAddress(const METHODTOKEN& MethodToken); |
| 1585 | virtual void JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, MethodRegionInfo * methodRegionInfo); |
| 1586 | |
| 1587 | virtual unsigned InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState); |
| 1588 | |
| 1589 | virtual PTR_EXCEPTION_CLAUSE_TOKEN GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState, |
| 1590 | EE_ILEXCEPTION_CLAUSE* pEHclause); |
| 1591 | |
| 1592 | #ifndef DACCESS_COMPILE |
| 1593 | virtual TypeHandle ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause, |
| 1594 | CrawlFrame *pCf); |
| 1595 | #endif // #ifndef DACCESS_COMPILE |
| 1596 | |
| 1597 | virtual GCInfoToken GetGCInfoToken(const METHODTOKEN& MethodToken); |
| 1598 | |
| 1599 | #if defined(WIN64EXCEPTIONS) |
| 1600 | virtual PTR_RUNTIME_FUNCTION LazyGetFunctionEntry(EECodeInfo * pCodeInfo); |
| 1601 | |
| 1602 | virtual TADDR GetFuncletStartAddress(EECodeInfo * pCodeInfo); |
| 1603 | virtual DWORD GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength); |
| 1604 | virtual BOOL IsFilterFunclet(EECodeInfo * pCodeInfo); |
| 1605 | #endif // WIN64EXCEPTIONS |
| 1606 | |
| 1607 | virtual StubCodeBlockKind GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC); |
| 1608 | |
| 1609 | #if defined(DACCESS_COMPILE) |
| 1610 | virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); |
| 1611 | virtual void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD); |
| 1612 | #if defined(WIN64EXCEPTIONS) |
| 1613 | // Enumerate the memory necessary to retrieve the unwind info for a specific method |
| 1614 | virtual void EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo); |
| 1615 | #endif //WIN64EXCEPTIONS |
| 1616 | #endif //DACCESS_COMPILE |
| 1617 | }; |
| 1618 | |
| 1619 | class NativeExceptionInfoLookupTable |
| 1620 | { |
| 1621 | public: |
| 1622 | static DWORD LookupExceptionInfoRVAForMethod(PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pTable, |
| 1623 | COUNT_T numLookupEntries, |
| 1624 | DWORD methodStartRVA, |
| 1625 | COUNT_T* pSize); |
| 1626 | }; |
| 1627 | |
| 1628 | class NativeUnwindInfoLookupTable |
| 1629 | { |
| 1630 | public: |
| 1631 | static int LookupUnwindInfoForMethod(DWORD codeOffset, |
| 1632 | PTR_RUNTIME_FUNCTION pRuntimeFunctionTable, |
| 1633 | int StartIndex, |
| 1634 | int EndIndex); |
| 1635 | |
| 1636 | static BOOL HasExceptionInfo(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction); |
| 1637 | static PTR_MethodDesc GetMethodDesc(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction, TADDR moduleBase); |
| 1638 | |
| 1639 | private: |
| 1640 | static DWORD GetMethodDescRVA(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction); |
| 1641 | }; |
| 1642 | |
| 1643 | inline TADDR NativeImageJitManager::JitTokenToStartAddress(const METHODTOKEN& MethodToken) |
| 1644 | { |
| 1645 | CONTRACTL { |
| 1646 | NOTHROW; |
| 1647 | GC_NOTRIGGER; |
| 1648 | HOST_NOCALLS; |
| 1649 | SUPPORTS_DAC; |
| 1650 | } CONTRACTL_END; |
| 1651 | |
| 1652 | return JitTokenToModuleBase(MethodToken) + |
| 1653 | RUNTIME_FUNCTION__BeginAddress(dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader)); |
| 1654 | } |
| 1655 | |
| 1656 | #endif // FEATURE_PREJIT |
| 1657 | |
| 1658 | #ifdef FEATURE_READYTORUN |
| 1659 | |
| 1660 | class ReadyToRunJitManager : public IJitManager |
| 1661 | { |
| 1662 | VPTR_VTABLE_CLASS(ReadyToRunJitManager, IJitManager) |
| 1663 | |
| 1664 | public: |
| 1665 | #ifndef DACCESS_COMPILE |
| 1666 | ReadyToRunJitManager(); |
| 1667 | #endif // #ifndef DACCESS_COMPILE |
| 1668 | |
| 1669 | virtual DWORD GetCodeType() |
| 1670 | { |
| 1671 | LIMITED_METHOD_DAC_CONTRACT; |
| 1672 | return (miManaged | miNative); |
| 1673 | } |
| 1674 | |
| 1675 | // Used to read debug info. |
| 1676 | virtual BOOL GetBoundariesAndVars( |
| 1677 | const DebugInfoRequest & request, |
| 1678 | IN FP_IDS_NEW fpNew, IN void * pNewData, |
| 1679 | OUT ULONG32 * pcMap, |
| 1680 | OUT ICorDebugInfo::OffsetMapping **ppMap, |
| 1681 | OUT ULONG32 * pcVars, |
| 1682 | OUT ICorDebugInfo::NativeVarInfo **ppVars); |
| 1683 | |
| 1684 | virtual BOOL JitCodeToMethodInfo(RangeSection * pRangeSection, |
| 1685 | PCODE currentPC, |
| 1686 | MethodDesc** ppMethodDesc, |
| 1687 | OUT EECodeInfo * pCodeInfo); |
| 1688 | |
| 1689 | virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset); |
| 1690 | |
| 1691 | static ReadyToRunInfo * JitTokenToReadyToRunInfo(const METHODTOKEN& MethodToken); |
| 1692 | static UINT32 JitTokenToGCInfoVersion(const METHODTOKEN& MethodToken); |
| 1693 | |
| 1694 | static PTR_RUNTIME_FUNCTION JitTokenToRuntimeFunction(const METHODTOKEN& MethodToken); |
| 1695 | |
| 1696 | virtual TADDR JitTokenToStartAddress(const METHODTOKEN& MethodToken); |
| 1697 | virtual void JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, MethodRegionInfo * methodRegionInfo); |
| 1698 | |
| 1699 | virtual unsigned InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState); |
| 1700 | |
| 1701 | virtual PTR_EXCEPTION_CLAUSE_TOKEN GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState, |
| 1702 | EE_ILEXCEPTION_CLAUSE* pEHclause); |
| 1703 | |
| 1704 | #ifndef DACCESS_COMPILE |
| 1705 | virtual TypeHandle ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause, |
| 1706 | CrawlFrame *pCf); |
| 1707 | #endif // #ifndef DACCESS_COMPILE |
| 1708 | |
| 1709 | virtual GCInfoToken GetGCInfoToken(const METHODTOKEN& MethodToken); |
| 1710 | |
| 1711 | #if defined(WIN64EXCEPTIONS) |
| 1712 | virtual PTR_RUNTIME_FUNCTION LazyGetFunctionEntry(EECodeInfo * pCodeInfo); |
| 1713 | |
| 1714 | virtual TADDR GetFuncletStartAddress(EECodeInfo * pCodeInfo); |
| 1715 | virtual DWORD GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength); |
| 1716 | virtual BOOL IsFilterFunclet(EECodeInfo * pCodeInfo); |
| 1717 | #endif // WIN64EXCEPTIONS |
| 1718 | |
| 1719 | virtual StubCodeBlockKind GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC); |
| 1720 | |
| 1721 | #if defined(DACCESS_COMPILE) |
| 1722 | virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); |
| 1723 | virtual void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD); |
| 1724 | #if defined(WIN64EXCEPTIONS) |
| 1725 | // Enumerate the memory necessary to retrieve the unwind info for a specific method |
| 1726 | virtual void EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo); |
| 1727 | #endif //WIN64EXCEPTIONS |
| 1728 | #endif //DACCESS_COMPILE |
| 1729 | }; |
| 1730 | |
| 1731 | #endif |
| 1732 | |
| 1733 | //***************************************************************************** |
| 1734 | // EECodeInfo provides information about code at particular address: |
| 1735 | // - Start of the method and relative offset |
| 1736 | // - GC Info of the method |
| 1737 | // etc. |
| 1738 | // |
| 1739 | // EECodeInfo caches information from IJitManager and thus avoids |
| 1740 | // quering IJitManager repeatedly for same data. |
| 1741 | // |
| 1742 | class EECodeInfo |
| 1743 | { |
| 1744 | friend BOOL EEJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection, PCODE currentPC, MethodDesc** ppMethodDesc, EECodeInfo * pCodeInfo); |
| 1745 | #ifdef FEATURE_PREJIT |
| 1746 | friend BOOL NativeImageJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection, PCODE currentPC, MethodDesc** ppMethodDesc, EECodeInfo * pCodeInfo); |
| 1747 | #endif |
| 1748 | #ifdef FEATURE_READYTORUN |
| 1749 | friend BOOL ReadyToRunJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection, PCODE currentPC, MethodDesc** ppMethodDesc, EECodeInfo * pCodeInfo); |
| 1750 | #endif |
| 1751 | |
| 1752 | public: |
| 1753 | EECodeInfo(); |
| 1754 | |
| 1755 | EECodeInfo(PCODE codeAddress) |
| 1756 | { |
| 1757 | Init(codeAddress); |
| 1758 | } |
| 1759 | |
| 1760 | // Explicit initialization |
| 1761 | void Init(PCODE codeAddress); |
| 1762 | void Init(PCODE codeAddress, ExecutionManager::ScanFlag scanFlag); |
| 1763 | |
| 1764 | TADDR GetSavedMethodCode(); |
| 1765 | |
| 1766 | TADDR GetStartAddress(); |
| 1767 | |
| 1768 | BOOL IsValid() |
| 1769 | { |
| 1770 | LIMITED_METHOD_DAC_CONTRACT; |
| 1771 | return m_pJM != NULL; |
| 1772 | } |
| 1773 | |
| 1774 | IJitManager* GetJitManager() |
| 1775 | { |
| 1776 | LIMITED_METHOD_DAC_CONTRACT; |
| 1777 | _ASSERTE(m_pJM != NULL); |
| 1778 | return m_pJM; |
| 1779 | } |
| 1780 | |
| 1781 | ICodeManager* GetCodeManager() |
| 1782 | { |
| 1783 | LIMITED_METHOD_DAC_CONTRACT; |
| 1784 | return GetJitManager()->GetCodeManager(); |
| 1785 | } |
| 1786 | |
| 1787 | const METHODTOKEN& GetMethodToken() |
| 1788 | { |
| 1789 | LIMITED_METHOD_DAC_CONTRACT; |
| 1790 | return m_methodToken; |
| 1791 | } |
| 1792 | |
| 1793 | // This returns a pointer to the start of an instruction; conceptually, a PINSTR. |
| 1794 | TADDR GetCodeAddress() |
| 1795 | { |
| 1796 | LIMITED_METHOD_DAC_CONTRACT; |
| 1797 | return PCODEToPINSTR(m_codeAddress); |
| 1798 | } |
| 1799 | |
| 1800 | MethodDesc * GetMethodDesc() |
| 1801 | { |
| 1802 | LIMITED_METHOD_DAC_CONTRACT; |
| 1803 | return m_pMD; |
| 1804 | } |
| 1805 | |
| 1806 | DWORD GetRelOffset() |
| 1807 | { |
| 1808 | LIMITED_METHOD_DAC_CONTRACT; |
| 1809 | return m_relOffset; |
| 1810 | } |
| 1811 | |
| 1812 | GCInfoToken GetGCInfoToken() |
| 1813 | { |
| 1814 | WRAPPER_NO_CONTRACT; |
| 1815 | return GetJitManager()->GetGCInfoToken(GetMethodToken()); |
| 1816 | } |
| 1817 | |
| 1818 | PTR_VOID GetGCInfo() |
| 1819 | { |
| 1820 | WRAPPER_NO_CONTRACT; |
| 1821 | return GetGCInfoToken().Info; |
| 1822 | } |
| 1823 | |
| 1824 | void GetMethodRegionInfo(IJitManager::MethodRegionInfo *methodRegionInfo) |
| 1825 | { |
| 1826 | WRAPPER_NO_CONTRACT; |
| 1827 | return GetJitManager()->JitTokenToMethodRegionInfo(GetMethodToken(), methodRegionInfo); |
| 1828 | } |
| 1829 | |
| 1830 | TADDR GetModuleBase() |
| 1831 | { |
| 1832 | WRAPPER_NO_CONTRACT; |
| 1833 | return GetJitManager()->JitTokenToModuleBase(GetMethodToken()); |
| 1834 | } |
| 1835 | |
| 1836 | #ifdef WIN64EXCEPTIONS |
| 1837 | PTR_RUNTIME_FUNCTION GetFunctionEntry(); |
| 1838 | BOOL IsFunclet() { WRAPPER_NO_CONTRACT; return GetJitManager()->IsFunclet(this); } |
| 1839 | EECodeInfo GetMainFunctionInfo(); |
| 1840 | ULONG GetFixedStackSize(); |
| 1841 | |
| 1842 | #if defined(_TARGET_AMD64_) |
| 1843 | BOOL HasFrameRegister(); |
| 1844 | #endif // _TARGET_AMD64_ |
| 1845 | |
| 1846 | #else // WIN64EXCEPTIONS |
| 1847 | ULONG GetFixedStackSize() |
| 1848 | { |
| 1849 | WRAPPER_NO_CONTRACT; |
| 1850 | return GetCodeManager()->GetFrameSize(GetGCInfoToken()); |
| 1851 | } |
| 1852 | #endif // WIN64EXCEPTIONS |
| 1853 | |
| 1854 | #if defined(_TARGET_AMD64_) |
| 1855 | void GetOffsetsFromUnwindInfo(ULONG* pRSPOffset, ULONG* pRBPOffset); |
| 1856 | |
| 1857 | #if defined(_DEBUG) && defined(HAVE_GCCOVER) |
| 1858 | // Find first funclet inside (pvFuncletStart, pvFuncletStart + cbCode) |
| 1859 | static LPVOID findNextFunclet (LPVOID pvFuncletStart, SIZE_T cbCode, LPVOID *ppvFuncletEnd); |
| 1860 | #endif // _DEBUG && HAVE_GCCOVER |
| 1861 | #endif // _TARGET_AMD64_ |
| 1862 | |
| 1863 | private: |
| 1864 | PCODE m_codeAddress; |
| 1865 | METHODTOKEN m_methodToken; |
| 1866 | MethodDesc *m_pMD; |
| 1867 | IJitManager *m_pJM; |
| 1868 | DWORD m_relOffset; |
| 1869 | #ifdef WIN64EXCEPTIONS |
| 1870 | PTR_RUNTIME_FUNCTION m_pFunctionEntry; |
| 1871 | #endif // WIN64EXCEPTIONS |
| 1872 | |
| 1873 | #ifdef _TARGET_AMD64_ |
| 1874 | // Simple helper to return a pointer to the UNWIND_INFO given the offset to the unwind info. |
| 1875 | UNWIND_INFO * GetUnwindInfoHelper(ULONG unwindInfoOffset); |
| 1876 | #endif // _TARGET_AMD64_ |
| 1877 | }; |
| 1878 | |
| 1879 | #include "codeman.inl" |
| 1880 | |
| 1881 | |
| 1882 | #ifdef FEATURE_PREJIT |
| 1883 | class MethodSectionIterator; |
| 1884 | |
| 1885 | // |
| 1886 | // MethodIterator class is used to iterate all the methods in an ngen image. |
| 1887 | // It will match and report hot (and cold, if any) sections of a method at the same time. |
| 1888 | // GcInfo version is always current |
| 1889 | class MethodIterator |
| 1890 | { |
| 1891 | public: |
| 1892 | enum MethodIteratorOptions |
| 1893 | { |
| 1894 | Hot = 0x1, |
| 1895 | Unprofiled =0x2, |
| 1896 | All = Hot | Unprofiled |
| 1897 | }; |
| 1898 | private: |
| 1899 | TADDR m_ModuleBase; |
| 1900 | MethodIteratorOptions methodIteratorOptions; |
| 1901 | |
| 1902 | NGenLayoutInfo * m_pNgenLayout; |
| 1903 | BOOL m_fHotMethodsDone; |
| 1904 | COUNT_T m_CurrentRuntimeFunctionIndex; |
| 1905 | COUNT_T m_CurrentColdRuntimeFunctionIndex; |
| 1906 | |
| 1907 | void Init(PTR_Module pModule, PEDecoder * pPEDecoder, MethodIteratorOptions mio); |
| 1908 | |
| 1909 | public: |
| 1910 | MethodIterator(PTR_Module pModule, MethodIteratorOptions mio = All); |
| 1911 | MethodIterator(PTR_Module pModule, PEDecoder * pPEDecoder, MethodIteratorOptions mio = All); |
| 1912 | |
| 1913 | BOOL Next(); |
| 1914 | |
| 1915 | PTR_MethodDesc GetMethodDesc(); |
| 1916 | GCInfoToken GetGCInfoToken(); |
| 1917 | TADDR GetMethodStartAddress(); |
| 1918 | TADDR GetMethodColdStartAddress(); |
| 1919 | ULONG GetHotCodeSize(); |
| 1920 | |
| 1921 | PTR_RUNTIME_FUNCTION GetRuntimeFunction(); |
| 1922 | |
| 1923 | void GetMethodRegionInfo(IJitManager::MethodRegionInfo *methodRegionInfo); |
| 1924 | }; |
| 1925 | #endif //FEATURE_PREJIT |
| 1926 | |
| 1927 | void ThrowOutOfMemoryWithinRange(); |
| 1928 | |
| 1929 | #endif // !__CODEMAN_HPP__ |
| 1930 | |