| 1 | // Licensed to the .NET Foundation under one or more agreements. |
| 2 | // The .NET Foundation licenses this file to you under the MIT license. |
| 3 | // See the LICENSE file in the project root for more information. |
| 4 | // ============================================================================ |
| 5 | // File: stackwalktypes.h |
| 6 | // |
| 7 | |
| 8 | // ============================================================================ |
| 9 | // Contains types used by stackwalk.h. |
| 10 | |
| 11 | |
| 12 | #ifndef __STACKWALKTYPES_H__ |
| 13 | #define __STACKWALKTYPES_H__ |
| 14 | |
| 15 | class CrawlFrame; |
| 16 | struct RangeSection; |
| 17 | struct StackwalkCacheEntry; |
| 18 | |
| 19 | // |
| 20 | // This type should be used internally inside the code manager only. EECodeInfo should |
| 21 | // be used in general code instead. Ideally, we would replace all uses of METHODTOKEN |
| 22 | // with EECodeInfo. |
| 23 | // |
| 24 | struct METHODTOKEN |
| 25 | { |
| 26 | METHODTOKEN(RangeSection * pRangeSection, TADDR ) |
| 27 | : m_pRangeSection(pRangeSection), m_pCodeHeader(pCodeHeader) |
| 28 | { |
| 29 | } |
| 30 | |
| 31 | METHODTOKEN() |
| 32 | { |
| 33 | } |
| 34 | |
| 35 | // Cache of RangeSection containing the code to avoid redundant lookups. |
| 36 | RangeSection * m_pRangeSection; |
| 37 | |
| 38 | // CodeHeader* for EEJitManager |
| 39 | // PTR_RUNTIME_FUNCTION for managed native code |
| 40 | TADDR ; |
| 41 | |
| 42 | BOOL IsNull() const |
| 43 | { |
| 44 | return m_pCodeHeader == NULL; |
| 45 | } |
| 46 | }; |
| 47 | |
| 48 | //************************************************************************ |
| 49 | // Stack walking |
| 50 | //************************************************************************ |
| 51 | enum StackCrawlMark |
| 52 | { |
| 53 | LookForMe = 0, |
| 54 | LookForMyCaller = 1, |
| 55 | LookForMyCallersCaller = 2, |
| 56 | LookForThread = 3 |
| 57 | }; |
| 58 | |
| 59 | enum StackWalkAction |
| 60 | { |
| 61 | SWA_CONTINUE = 0, // continue walking |
| 62 | SWA_ABORT = 1, // stop walking, early out in "failure case" |
| 63 | SWA_FAILED = 2 // couldn't walk stack |
| 64 | }; |
| 65 | |
| 66 | #define SWA_DONE SWA_CONTINUE |
| 67 | |
| 68 | |
| 69 | // Pointer to the StackWalk callback function. |
| 70 | typedef StackWalkAction (*PSTACKWALKFRAMESCALLBACK)( |
| 71 | CrawlFrame *pCF, // |
| 72 | VOID* pData // Caller's private data |
| 73 | |
| 74 | ); |
| 75 | |
| 76 | /****************************************************************************** |
| 77 | StackwalkCache: new class implements stackwalk perf optimization features. |
| 78 | StackwalkCacheEntry array: very simple per thread hash table, keeping cached data. |
| 79 | StackwalkCacheUnwindInfo: used by EECodeManager::UnwindStackFrame to return |
| 80 | stackwalk cache flags. |
| 81 | Cf. Ilyakoz for any questions. |
| 82 | */ |
| 83 | |
| 84 | struct StackwalkCacheUnwindInfo |
| 85 | { |
| 86 | #if defined(_TARGET_AMD64_) |
| 87 | ULONG RBPOffset; |
| 88 | ULONG RSPOffsetFromUnwindInfo; |
| 89 | #else // !_TARGET_AMD64_ |
| 90 | size_t securityObjectOffset; // offset of SecurityObject. 0 if there is no security object |
| 91 | BOOL fUseEbp; // Is EBP modified by the method - either for a frame-pointer or for a scratch-register? |
| 92 | BOOL fUseEbpAsFrameReg; // use EBP as the frame pointer? |
| 93 | #endif // !_TARGET_AMD64_ |
| 94 | |
| 95 | inline StackwalkCacheUnwindInfo() { SUPPORTS_DAC; ZeroMemory(this, sizeof(StackwalkCacheUnwindInfo)); } |
| 96 | StackwalkCacheUnwindInfo(StackwalkCacheEntry * pCacheEntry); |
| 97 | }; |
| 98 | |
| 99 | //************************************************************************ |
| 100 | |
| 101 | #if defined(_WIN64) |
| 102 | #define STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY 0x10 |
| 103 | #else // !_WIN64 |
| 104 | #define STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY 0x8 |
| 105 | #endif // !_WIN64 |
| 106 | |
| 107 | struct |
| 108 | DECLSPEC_ALIGN(STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY) |
| 109 | StackwalkCacheEntry |
| 110 | { |
| 111 | // |
| 112 | // don't rearrange the fields, so that invalid value 0x8000000000000000 will never appear |
| 113 | // as StackwalkCacheEntry, it's required for atomicMOVQ using FILD/FISTP instructions |
| 114 | // |
| 115 | UINT_PTR IP; |
| 116 | #if !defined(_TARGET_AMD64_) |
| 117 | WORD ESPOffset:15; // stack offset (frame size + pending arguments + etc) |
| 118 | WORD securityObjectOffset:3;// offset of SecurityObject. 0 if there is no security object |
| 119 | WORD fUseEbp:1; // For ESP methods, is EBP touched at all? |
| 120 | WORD fUseEbpAsFrameReg:1; // use EBP as the frame register? |
| 121 | WORD argSize:11; // size of args pushed on stack |
| 122 | #else // _TARGET_AMD64_ |
| 123 | DWORD RSPOffset; |
| 124 | DWORD RBPOffset; |
| 125 | #endif // _TARGET_AMD64_ |
| 126 | |
| 127 | inline BOOL Init(UINT_PTR IP, |
| 128 | UINT_PTR SPOffset, |
| 129 | StackwalkCacheUnwindInfo *pUnwindInfo, |
| 130 | UINT_PTR argSize) |
| 131 | { |
| 132 | LIMITED_METHOD_CONTRACT; |
| 133 | |
| 134 | this->IP = IP; |
| 135 | |
| 136 | #if defined(_TARGET_X86_) |
| 137 | this->ESPOffset = SPOffset; |
| 138 | this->argSize = argSize; |
| 139 | |
| 140 | this->securityObjectOffset = (WORD)pUnwindInfo->securityObjectOffset; |
| 141 | _ASSERTE(this->securityObjectOffset == pUnwindInfo->securityObjectOffset); |
| 142 | |
| 143 | this->fUseEbp = pUnwindInfo->fUseEbp; |
| 144 | this->fUseEbpAsFrameReg = pUnwindInfo->fUseEbpAsFrameReg; |
| 145 | _ASSERTE(!fUseEbpAsFrameReg || fUseEbp); |
| 146 | |
| 147 | // return success if we fit SPOffset and argSize into |
| 148 | return ((this->ESPOffset == SPOffset) && |
| 149 | (this->argSize == argSize)); |
| 150 | #elif defined(_TARGET_AMD64_) |
| 151 | // The size of a stack frame is guaranteed to fit in 4 bytes, so we don't need to check RSPOffset and RBPOffset. |
| 152 | |
| 153 | // The actual SP offset may be bigger than the offset we get from the unwind info because of stack allocations. |
| 154 | _ASSERTE(SPOffset >= pUnwindInfo->RSPOffsetFromUnwindInfo); |
| 155 | |
| 156 | _ASSERTE(FitsIn<DWORD>(SPOffset)); |
| 157 | this->RSPOffset = static_cast<DWORD>(SPOffset); |
| 158 | _ASSERTE(FitsIn<DWORD>(pUnwindInfo->RBPOffset + (SPOffset - pUnwindInfo->RSPOffsetFromUnwindInfo))); |
| 159 | this->RBPOffset = static_cast<DWORD>(pUnwindInfo->RBPOffset + (SPOffset - pUnwindInfo->RSPOffsetFromUnwindInfo)); |
| 160 | return TRUE; |
| 161 | #else // !_TARGET_X86_ && !_TARGET_AMD64_ |
| 162 | return FALSE; |
| 163 | #endif // !_TARGET_X86_ && !_TARGET_AMD64_ |
| 164 | } |
| 165 | |
| 166 | inline BOOL HasSecurityObject() |
| 167 | { |
| 168 | LIMITED_METHOD_CONTRACT; |
| 169 | |
| 170 | #if defined(_TARGET_X86_) |
| 171 | return securityObjectOffset != 0; |
| 172 | #else // !_TARGET_X86_ |
| 173 | // On AMD64 we don't save anything by grabbing the security object before it is needed. This is because |
| 174 | // we need to crack the GC info in order to find the security object, and to unwind we only need to |
| 175 | // crack the unwind info. |
| 176 | return FALSE; |
| 177 | #endif // !_TARGET_X86_ |
| 178 | } |
| 179 | |
| 180 | inline BOOL IsSafeToUseCache() |
| 181 | { |
| 182 | LIMITED_METHOD_CONTRACT; |
| 183 | |
| 184 | #if defined(_TARGET_X86_) |
| 185 | return (!fUseEbp || fUseEbpAsFrameReg); |
| 186 | #elif defined(_TARGET_AMD64_) |
| 187 | return TRUE; |
| 188 | #else // !_TARGET_X86_ && !_TARGET_AMD64_ |
| 189 | return FALSE; |
| 190 | #endif // !_TARGET_X86_ && !_TARGET_AMD64_ |
| 191 | } |
| 192 | }; |
| 193 | |
| 194 | #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) |
| 195 | static_assert_no_msg(sizeof(StackwalkCacheEntry) == 2 * sizeof(UINT_PTR)); |
| 196 | #endif // _TARGET_X86_ || _TARGET_AMD64_ |
| 197 | |
| 198 | //************************************************************************ |
| 199 | |
| 200 | class StackwalkCache |
| 201 | { |
| 202 | friend struct _DacGlobals; |
| 203 | |
| 204 | public: |
| 205 | BOOL Lookup(UINT_PTR IP); |
| 206 | void Insert(StackwalkCacheEntry *pCacheEntry); |
| 207 | inline void ClearEntry () { LIMITED_METHOD_DAC_CONTRACT; m_CacheEntry.IP = 0; } |
| 208 | inline BOOL Enabled() { LIMITED_METHOD_DAC_CONTRACT; return s_Enabled; }; |
| 209 | inline BOOL IsEmpty () { LIMITED_METHOD_CONTRACT; return m_CacheEntry.IP == 0; } |
| 210 | |
| 211 | #ifndef DACCESS_COMPILE |
| 212 | StackwalkCache(); |
| 213 | #endif |
| 214 | static void Init(); |
| 215 | |
| 216 | StackwalkCacheEntry m_CacheEntry; // local copy of Global Cache entry for current IP |
| 217 | |
| 218 | static void Invalidate(LoaderAllocator * pLoaderAllocator); |
| 219 | |
| 220 | private: |
| 221 | unsigned GetKey(UINT_PTR IP); |
| 222 | |
| 223 | #ifdef DACCESS_COMPILE |
| 224 | // DAC can't rely on the cache here |
| 225 | const static BOOL s_Enabled; |
| 226 | #else |
| 227 | static BOOL s_Enabled; |
| 228 | #endif |
| 229 | }; |
| 230 | |
| 231 | //************************************************************************ |
| 232 | |
| 233 | inline StackwalkCacheUnwindInfo::StackwalkCacheUnwindInfo(StackwalkCacheEntry * pCacheEntry) |
| 234 | { |
| 235 | LIMITED_METHOD_CONTRACT; |
| 236 | |
| 237 | #if defined(_TARGET_AMD64_) |
| 238 | RBPOffset = pCacheEntry->RBPOffset; |
| 239 | #else // !_TARGET_AMD64_ |
| 240 | securityObjectOffset = pCacheEntry->securityObjectOffset; |
| 241 | fUseEbp = pCacheEntry->fUseEbp; |
| 242 | fUseEbpAsFrameReg = pCacheEntry->fUseEbpAsFrameReg; |
| 243 | #endif // !_TARGET_AMD64_ |
| 244 | } |
| 245 | |
| 246 | #endif // __STACKWALKTYPES_H__ |
| 247 | |