| 1 | // Licensed to the .NET Foundation under one or more agreements. | 
|---|
| 2 | // The .NET Foundation licenses this file to you under the MIT license. | 
|---|
| 3 | // See the LICENSE file in the project root for more information. | 
|---|
| 4 | // THREADS.H - | 
|---|
| 5 | // | 
|---|
| 6 |  | 
|---|
| 7 |  | 
|---|
| 8 | // | 
|---|
| 9 | // | 
|---|
| 10 | // Currently represents a logical and physical COM+ thread. Later, these concepts will be separated. | 
|---|
| 11 | // | 
|---|
| 12 |  | 
|---|
| 13 | // | 
|---|
| 14 | // #RuntimeThreadLocals. | 
|---|
| 15 | // | 
|---|
| 16 | // Windows has a feature call Thread Local Storage (TLS, which is data that the OS allocates every time it | 
|---|
| 17 | // creates a thread). Programs access this storage by using the Windows TlsAlloc, TlsGetValue, TlsSetValue | 
|---|
| 18 | // APIs (see http://msdn2.microsoft.com/en-us/library/ms686812.aspx). The runtime allocates two such slots | 
|---|
| 19 | // for its use | 
|---|
| 20 | // | 
|---|
| 21 | //     * A slot that holds a pointer to the runtime thread object code:Thread (see code:#ThreadClass). The | 
|---|
| 22 | //         runtime has a special optimized version of this helper code:GetThread (we actually emit assembly | 
|---|
| 23 | //         code on the fly so it is as fast as possible). These code:Thread objects live in the | 
|---|
| 24 | //         code:ThreadStore. | 
|---|
| 25 | // | 
|---|
| 26 | //      * The other slot holds the current code:AppDomain (a managed equivalent of a process). The | 
|---|
| 27 | //          runtime thread object also has a pointer to the thread's AppDomain (see code:Thread.m_pDomain, | 
|---|
| 28 | //          so in theory this TLS is redundant. It is there for speed (one less pointer indirection). The | 
|---|
| 29 | //          optimized helper for this is code:GetAppDomain (we emit assembly code on the fly for this one | 
|---|
| 30 | //          too). | 
|---|
| 31 | // | 
|---|
| 32 | // Initially these TLS slots are empty (when the OS starts up), however before we run managed code, we must | 
|---|
| 33 | // set them properly so that managed code knows what AppDomain it is in and we can suspend threads properly | 
|---|
| 34 | // for a GC (see code:#SuspendingTheRuntime) | 
|---|
| 35 | // | 
|---|
| 36 | // #SuspendingTheRuntime | 
|---|
| 37 | // | 
|---|
| 38 | // One of the primary differences between runtime code (managed code), and traditional (unmanaged code) is | 
|---|
| 39 | // the existence of the GC heap (see file:gc.cpp#Overview). For the GC to do its job, it must be able to | 
|---|
| 40 | // traverse all references to the GC heap, including ones on the stack of every thread, as well as any in | 
|---|
| 41 | // hardware registers. While it is simple to state this requirement, it has long reaching effects, because | 
|---|
| 42 | // properly accounting for all GC heap references ALL the time turns out to be quite hard. When we make a | 
|---|
| 43 | // bookkeeping mistake, a GC reference is not reported at GC time, which means it will not be updated when the | 
|---|
| 44 | // GC happens. Since memory in the GC heap can move, this can cause the pointer to point at 'random' places | 
|---|
| 45 | // in the GC heap, causing data corruption. This is a 'GC Hole', and is very bad. We have special modes (see | 
|---|
| 46 | // code:EEConfig.GetGCStressLevel) called GCStress to help find such issues. | 
|---|
| 47 | // | 
|---|
| 48 | // In order to find all GC references on the stacks we need insure that no thread is manipulating a GC | 
|---|
| 49 | // reference at the time of the scan. This is the job of code:Thread.SuspendRuntime. Logically it suspends | 
|---|
| 50 | // every thread in the process. Unfortunately it can not literally simply call the OS SuspendThread API on | 
|---|
| 51 | // all threads. The reason is that the other threads MIGHT hold important locks (for example there is a lock | 
|---|
| 52 | // that is taken when unmanaged heap memory is requested, or when a DLL is loaded). In general process | 
|---|
| 53 | // global structures in the OS will be protected by locks, and if you suspend a thread it might hold that | 
|---|
| 54 | // lock. If you happen to need that OS service (eg you might need to allocated unmanaged memory), then | 
|---|
| 55 | // deadlock will occur (as you wait on the suspended thread, that never wakes up). | 
|---|
| 56 | // | 
|---|
| 57 | // Luckily, we don't need to actually suspend the threads, we just need to insure that all GC references on | 
|---|
| 58 | // the stack are stable. This is where the concept of cooperative mode and preemptive mode (a bad name) come | 
|---|
| 59 | // from. | 
|---|
| 60 | // | 
|---|
| 61 | // #CooperativeMode | 
|---|
| 62 | // | 
|---|
| 63 | // The runtime keeps a table of all threads that have ever run managed code in the code:ThreadStore table. | 
|---|
| 64 | // The ThreadStore table holds a list of Thread objects (see code:#ThreadClass). This object holds all | 
|---|
| 65 | // infomation about managed threads. Cooperative mode is defined as the mode the thread is in when the field | 
|---|
| 66 | // code:Thread.m_fPreemptiveGCDisabled is non-zero. When this field is zero the thread is said to be in | 
|---|
| 67 | // Preemptive mode (named because if you preempt the thread in this mode, it is guaranteed to be in a place | 
|---|
| 68 | // where a GC can occur). | 
|---|
| 69 | // | 
|---|
| 70 | // When a thread is in cooperative mode, it is basically saying that it is potentially modifying GC | 
|---|
| 71 | // references, and so the runtime must Cooperate with it to get to a 'GC Safe' location where the GC | 
|---|
| 72 | // references can be enumerated. This is the mode that a thread is in MOST times when it is running managed | 
|---|
| 73 | // code (in fact if the EIP is in JIT compiled code, there is only one place where you are NOT in cooperative | 
|---|
| 74 | // mode (Inlined PINVOKE transition code)). Conversely, any time non-runtime unmanaged code is running, the | 
|---|
| 75 | // thread MUST NOT be in cooperative mode (you risk deadlock otherwise). Only code in mscorwks.dll might be | 
|---|
| 76 | // running in either cooperative or preemptive mode. | 
|---|
| 77 | // | 
|---|
| 78 | // It is easier to describe the invariant associated with being in Preemptive mode. When the thread is in | 
|---|
| 79 | // preemptive mode (when code:Thread.m_fPreemptiveGCDisabled is zero), the thread guarantees two things | 
|---|
| 80 | // | 
|---|
| 81 | //     * That it not currently running code that manipulates GC references. | 
|---|
| 82 | //     * That it has set the code:Thread.m_pFrame pointer in the code:Thread to be a subclass of the class | 
|---|
| 83 | //         code:Frame which marks the location on the stack where the last managed method frame is. This | 
|---|
| 84 | //         allows the GC to start crawling the stack from there (essentially skip over the unmanaged frames). | 
|---|
| 85 | //     * That the thread will not reenter managed code if the global variable code:g_TrapReturningThreads is | 
|---|
| 86 | //         set (it will call code:Thread.RareDisablePreemptiveGC first which will block if a a suspension is | 
|---|
| 87 | //         in progress) | 
|---|
| 88 | // | 
|---|
| 89 | // The basic idea is that the suspension logic in code:Thread.SuspendRuntime first sets the global variable | 
|---|
| 90 | // code:g_TrapReturningThreads and then checks if each thread in the ThreadStore is in Cooperative mode. If a | 
|---|
| 91 | // thread is NOT in cooperative mode, the logic simply skips the thread, because it knows that the thread | 
|---|
| 92 | // will stop itself before reentering managed code (because code:g_TrapReturningThreads is set). This avoids | 
|---|
| 93 | // the deadlock problem mentioned earlier, because threads that are running unmanaged code are allowed to | 
|---|
| 94 | // run. Enumeration of GC references starts at the first managed frame (pointed at by code:Thread.m_pFrame). | 
|---|
| 95 | // | 
|---|
| 96 | // When a thread is in cooperative mode, it means that GC references might be being manipulated. There are | 
|---|
| 97 | // two important possibilities | 
|---|
| 98 | // | 
|---|
| 99 | //     * The CPU is running JIT compiled code | 
|---|
| 100 | //     * The CPU is running code elsewhere (which should only be in mscorwks.dll, because everywhere else a | 
|---|
| 101 | //         transition to preemptive mode should have happened first) | 
|---|
| 102 | // | 
|---|
| 103 | // * #PartiallyInteruptibleCode | 
|---|
| 104 | // * #FullyInteruptibleCode | 
|---|
| 105 | // | 
|---|
| 106 | // If the Instruction pointer (x86/x64: EIP, ARM: R15/PC) is in JIT compiled code, we can detect this because we have tables that | 
|---|
| 107 | // map the ranges of every method back to their code:MethodDesc (this the code:ICodeManager interface). In | 
|---|
| 108 | // addition to knowing the method, these tables also point at 'GCInfo' that tell for that method which stack | 
|---|
| 109 | // locations and which registers hold GC references at any particular instruction pointer. If the method is | 
|---|
| 110 | // what is called FullyInterruptible, then we have information for any possible instruction pointer in the | 
|---|
| 111 | // method and we can simply stop the thread (however we have to do this carefully TODO explain). | 
|---|
| 112 | // | 
|---|
| 113 | // However for most methods, we only keep GC information for paticular EIP's, in particular we keep track of | 
|---|
| 114 | // GC reference liveness only at call sites. Thus not every location is 'GC Safe' (that is we can enumerate | 
|---|
| 115 | // all references, but must be 'driven' to a GC safe location). | 
|---|
| 116 | // | 
|---|
| 117 | // We drive threads to GC safe locations by hijacking. This is a term for updating the return address on the | 
|---|
| 118 | // stack so that we gain control when a method returns. If we find that we are in JITTed code but NOT at a GC | 
|---|
| 119 | // safe location, then we find the return address for the method and modfiy it to cause the runtime to stop. | 
|---|
| 120 | // We then let the method run. Hopefully the method quickly returns, and hits our hijack, and we are now at a | 
|---|
| 121 | // GC-safe location (all call sites are GC-safe). If not we repeat the procedure (possibly moving the | 
|---|
| 122 | // hijack). At some point a method returns, and we get control. For methods that have loops that don't make | 
|---|
| 123 | // calls, we are forced to make the method FullyInterruptible, so we can be sure to stop the mehod. | 
|---|
| 124 | // | 
|---|
| 125 | // This leaves only the case where we are in cooperative modes, but not in JIT compiled code (we should be in | 
|---|
| 126 | // clr.dll). In this case we simply let the thread run. The idea is that code in clr.dll makes the | 
|---|
| 127 | // promise that it will not do ANYTHING that will block (which includes taking a lock), while in cooperative | 
|---|
| 128 | // mode, or do anything that might take a long time without polling to see if a GC is needed. Thus this code | 
|---|
| 129 | // 'cooperates' to insure that GCs can happen in a timely fashion. | 
|---|
| 130 | // | 
|---|
| 131 | // If you need to switch the GC mode of the current thread, look for the GCX_COOP() and GCX_PREEMP() macros. | 
|---|
| 132 | // | 
|---|
| 133 |  | 
|---|
| 134 | #ifndef __threads_h__ | 
|---|
| 135 | #define __threads_h__ | 
|---|
| 136 |  | 
|---|
| 137 | #include "vars.hpp" | 
|---|
| 138 | #include "util.hpp" | 
|---|
| 139 | #include "eventstore.hpp" | 
|---|
| 140 | #include "argslot.h" | 
|---|
| 141 | #include "regdisp.h" | 
|---|
| 142 | #include "mscoree.h" | 
|---|
| 143 | #include "gcheaputilities.h" | 
|---|
| 144 | #include "gchandleutilities.h" | 
|---|
| 145 | #include "gcinfotypes.h" | 
|---|
| 146 | #include <clrhost.h> | 
|---|
| 147 |  | 
|---|
| 148 | class     Thread; | 
|---|
| 149 | class     ThreadStore; | 
|---|
| 150 | class     MethodDesc; | 
|---|
| 151 | struct    PendingSync; | 
|---|
| 152 | class     AppDomain; | 
|---|
| 153 | class     NDirect; | 
|---|
| 154 | class     Frame; | 
|---|
| 155 | class     ThreadBaseObject; | 
|---|
| 156 | class     AppDomainStack; | 
|---|
| 157 | class     LoadLevelLimiter; | 
|---|
| 158 | class     DomainFile; | 
|---|
| 159 | class     DeadlockAwareLock; | 
|---|
| 160 | struct    HelperMethodFrameCallerList; | 
|---|
| 161 | class     ThreadLocalIBCInfo; | 
|---|
| 162 | class     EECodeInfo; | 
|---|
| 163 | class     DebuggerPatchSkip; | 
|---|
| 164 | class     FaultingExceptionFrame; | 
|---|
| 165 | class     ContextTransitionFrame; | 
|---|
| 166 | enum      BinderMethodID : int; | 
|---|
| 167 | class     CRWLock; | 
|---|
| 168 | struct    LockEntry; | 
|---|
| 169 | class     PendingTypeLoadHolder; | 
|---|
| 170 |  | 
|---|
| 171 | struct    ThreadLocalBlock; | 
|---|
| 172 | typedef DPTR(struct ThreadLocalBlock) PTR_ThreadLocalBlock; | 
|---|
| 173 | typedef DPTR(PTR_ThreadLocalBlock) PTR_PTR_ThreadLocalBlock; | 
|---|
| 174 |  | 
|---|
| 175 | typedef void(*ADCallBackFcnType)(LPVOID); | 
|---|
| 176 |  | 
|---|
| 177 | #include "stackwalktypes.h" | 
|---|
| 178 | #include "log.h" | 
|---|
| 179 | #include "stackingallocator.h" | 
|---|
| 180 | #include "excep.h" | 
|---|
| 181 | #include "synch.h" | 
|---|
| 182 | #include "exstate.h" | 
|---|
| 183 | #include "threaddebugblockinginfo.h" | 
|---|
| 184 | #include "interoputil.h" | 
|---|
| 185 | #include "eventtrace.h" | 
|---|
| 186 |  | 
|---|
| 187 | #ifdef FEATURE_PERFTRACING | 
|---|
| 188 | class EventPipeBufferList; | 
|---|
| 189 | #endif // FEATURE_PERFTRACING | 
|---|
| 190 |  | 
|---|
| 191 | struct TLMTableEntry; | 
|---|
| 192 |  | 
|---|
| 193 | typedef DPTR(struct TLMTableEntry) PTR_TLMTableEntry; | 
|---|
| 194 | typedef DPTR(struct ThreadLocalModule) PTR_ThreadLocalModule; | 
|---|
| 195 |  | 
|---|
| 196 | class ThreadStaticHandleTable; | 
|---|
| 197 | struct ThreadLocalModule; | 
|---|
| 198 | class Module; | 
|---|
| 199 |  | 
|---|
| 200 | struct ThreadLocalBlock | 
|---|
| 201 | { | 
|---|
| 202 | friend class ClrDataAccess; | 
|---|
| 203 |  | 
|---|
| 204 | private: | 
|---|
| 205 | PTR_TLMTableEntry   m_pTLMTable;     // Table of ThreadLocalModules | 
|---|
| 206 | SIZE_T              m_TLMTableSize;  // Current size of table | 
|---|
| 207 | SpinLock            m_TLMTableLock;  // Spinlock used to synchronize growing the table and freeing TLM by other threads | 
|---|
| 208 |  | 
|---|
| 209 | // Each ThreadLocalBlock has its own ThreadStaticHandleTable. The ThreadStaticHandleTable works | 
|---|
| 210 | // by allocating Object arrays on the GC heap and keeping them alive with pinning handles. | 
|---|
| 211 | // | 
|---|
| 212 | // We use the ThreadStaticHandleTable to allocate space for GC thread statics. A GC thread | 
|---|
| 213 | // static is thread static that is either a reference type or a value type whose layout | 
|---|
| 214 | // contains a pointer to a reference type. | 
|---|
| 215 |  | 
|---|
| 216 | ThreadStaticHandleTable * m_pThreadStaticHandleTable; | 
|---|
| 217 |  | 
|---|
| 218 | // Need to keep a list of the pinning handles we've created | 
|---|
| 219 | // so they can be cleaned up when the thread dies | 
|---|
| 220 | ObjectHandleList          m_PinningHandleList; | 
|---|
| 221 |  | 
|---|
| 222 | public: | 
|---|
| 223 |  | 
|---|
| 224 | #ifndef DACCESS_COMPILE | 
|---|
| 225 | void AddPinningHandleToList(OBJECTHANDLE oh); | 
|---|
| 226 | void FreePinningHandles(); | 
|---|
| 227 | void AllocateThreadStaticHandles(Module * pModule, ThreadLocalModule * pThreadLocalModule); | 
|---|
| 228 | OBJECTHANDLE AllocateStaticFieldObjRefPtrs(int nRequested, OBJECTHANDLE* ppLazyAllocate = NULL); | 
|---|
| 229 | void InitThreadStaticHandleTable(); | 
|---|
| 230 |  | 
|---|
| 231 | void AllocateThreadStaticBoxes(MethodTable* pMT); | 
|---|
| 232 | #endif | 
|---|
| 233 |  | 
|---|
| 234 | public: // used by code generators | 
|---|
| 235 | static SIZE_T GetOffsetOfModuleSlotsPointer() { return offsetof(ThreadLocalBlock, m_pTLMTable); } | 
|---|
| 236 |  | 
|---|
| 237 | public: | 
|---|
| 238 |  | 
|---|
| 239 | #ifndef DACCESS_COMPILE | 
|---|
| 240 | ThreadLocalBlock() | 
|---|
| 241 | : m_pTLMTable(NULL), m_TLMTableSize(0), m_pThreadStaticHandleTable(NULL) | 
|---|
| 242 | { | 
|---|
| 243 | m_TLMTableLock.Init(LOCK_TYPE_DEFAULT); | 
|---|
| 244 | } | 
|---|
| 245 |  | 
|---|
| 246 | void    FreeTLM(SIZE_T i, BOOL isThreadShuttingDown); | 
|---|
| 247 |  | 
|---|
| 248 | void    FreeTable(); | 
|---|
| 249 |  | 
|---|
| 250 | void    EnsureModuleIndex(ModuleIndex index); | 
|---|
| 251 |  | 
|---|
| 252 | #endif | 
|---|
| 253 |  | 
|---|
| 254 | void SetModuleSlot(ModuleIndex index, PTR_ThreadLocalModule pLocalModule); | 
|---|
| 255 |  | 
|---|
| 256 | PTR_ThreadLocalModule GetTLMIfExists(ModuleIndex index); | 
|---|
| 257 | PTR_ThreadLocalModule GetTLMIfExists(MethodTable* pMT); | 
|---|
| 258 |  | 
|---|
| 259 | #ifdef DACCESS_COMPILE | 
|---|
| 260 | void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); | 
|---|
| 261 | #endif | 
|---|
| 262 | }; | 
|---|
| 263 |  | 
|---|
| 264 | #ifdef CROSSGEN_COMPILE | 
|---|
| 265 |  | 
|---|
| 266 | #include "asmconstants.h" | 
|---|
| 267 |  | 
|---|
| 268 | class Thread | 
|---|
| 269 | { | 
|---|
| 270 | friend class ThreadStatics; | 
|---|
| 271 |  | 
|---|
| 272 | ThreadLocalBlock m_ThreadLocalBlock; | 
|---|
| 273 |  | 
|---|
| 274 | public: | 
|---|
| 275 | BOOL IsAddressInStack (PTR_VOID addr) const { return TRUE; } | 
|---|
| 276 | static BOOL IsAddressInCurrentStack (PTR_VOID addr) { return TRUE; } | 
|---|
| 277 |  | 
|---|
| 278 | Frame *IsRunningIn(AppDomain* pDomain, int *count) { return NULL; } | 
|---|
| 279 |  | 
|---|
| 280 | StackingAllocator    m_MarshalAlloc; | 
|---|
| 281 |  | 
|---|
| 282 | private: | 
|---|
| 283 | LoadLevelLimiter *m_pLoadLimiter; | 
|---|
| 284 |  | 
|---|
| 285 | public: | 
|---|
| 286 | LoadLevelLimiter *GetLoadLevelLimiter() | 
|---|
| 287 | { | 
|---|
| 288 | LIMITED_METHOD_CONTRACT; | 
|---|
| 289 | return m_pLoadLimiter; | 
|---|
| 290 | } | 
|---|
| 291 |  | 
|---|
| 292 | void SetLoadLevelLimiter(LoadLevelLimiter *limiter) | 
|---|
| 293 | { | 
|---|
| 294 | LIMITED_METHOD_CONTRACT; | 
|---|
| 295 | m_pLoadLimiter = limiter; | 
|---|
| 296 | } | 
|---|
| 297 |  | 
|---|
| 298 | PTR_Frame GetFrame() { return NULL; } | 
|---|
| 299 | void SetFrame(Frame *pFrame) { } | 
|---|
| 300 | DWORD CatchAtSafePoint() { return 0; } | 
|---|
| 301 | DWORD CatchAtSafePointOpportunistic() { return 0; } | 
|---|
| 302 |  | 
|---|
| 303 | static void ObjectRefProtected(const OBJECTREF* ref) { } | 
|---|
| 304 | static void ObjectRefNew(const OBJECTREF* ref) { } | 
|---|
| 305 |  | 
|---|
| 306 | void EnablePreemptiveGC() { } | 
|---|
| 307 | void DisablePreemptiveGC() { } | 
|---|
| 308 |  | 
|---|
| 309 | inline void IncLockCount() { } | 
|---|
| 310 | inline void DecLockCount() { } | 
|---|
| 311 |  | 
|---|
| 312 | static LPVOID GetStaticFieldAddress(FieldDesc *pFD) { return NULL; } | 
|---|
| 313 |  | 
|---|
| 314 | PTR_AppDomain GetDomain() { return ::GetAppDomain(); } | 
|---|
| 315 |  | 
|---|
| 316 | DWORD GetThreadId() { return 0; } | 
|---|
| 317 |  | 
|---|
| 318 | inline DWORD GetOverridesCount() { return 0; } | 
|---|
| 319 | inline BOOL CheckThreadWideSpecialFlag(DWORD flags) { return 0; } | 
|---|
| 320 |  | 
|---|
| 321 | BOOL PreemptiveGCDisabled() { return false; } | 
|---|
| 322 | void PulseGCMode() { } | 
|---|
| 323 |  | 
|---|
| 324 | OBJECTREF GetThrowable() { return NULL; } | 
|---|
| 325 |  | 
|---|
| 326 | OBJECTREF LastThrownObject() { return NULL; } | 
|---|
| 327 |  | 
|---|
| 328 | static BOOL Debug_AllowCallout() { return TRUE; } | 
|---|
| 329 |  | 
|---|
| 330 | static void IncForbidSuspendThread() { } | 
|---|
| 331 | static void DecForbidSuspendThread() { } | 
|---|
| 332 |  | 
|---|
| 333 | // The ForbidSuspendThreadHolder is used during the initialization of the stack marker infrastructure so | 
|---|
| 334 | // it can't do any backout stack validation (which is why we pass in VALIDATION_TYPE=HSV_NoValidation). | 
|---|
| 335 | typedef StateHolder<Thread::IncForbidSuspendThread, Thread::DecForbidSuspendThread, HSV_NoValidation> ForbidSuspendThreadHolder; | 
|---|
| 336 |  | 
|---|
| 337 | static BYTE GetOffsetOfCurrentFrame() | 
|---|
| 338 | { | 
|---|
| 339 | LIMITED_METHOD_CONTRACT; | 
|---|
| 340 | size_t ofs = Thread_m_pFrame; | 
|---|
| 341 | _ASSERTE(FitsInI1(ofs)); | 
|---|
| 342 | return (BYTE)ofs; | 
|---|
| 343 | } | 
|---|
| 344 |  | 
|---|
| 345 | static BYTE GetOffsetOfGCFlag() | 
|---|
| 346 | { | 
|---|
| 347 | LIMITED_METHOD_CONTRACT; | 
|---|
| 348 | size_t ofs = Thread_m_fPreemptiveGCDisabled; | 
|---|
| 349 | _ASSERTE(FitsInI1(ofs)); | 
|---|
| 350 | return (BYTE)ofs; | 
|---|
| 351 | } | 
|---|
| 352 |  | 
|---|
| 353 | void SetLoadingFile(DomainFile *pFile) | 
|---|
| 354 | { | 
|---|
| 355 | } | 
|---|
| 356 |  | 
|---|
| 357 | typedef Holder<Thread *, DoNothing, DoNothing> LoadingFileHolder; | 
|---|
| 358 |  | 
|---|
| 359 | enum ThreadState | 
|---|
| 360 | { | 
|---|
| 361 | }; | 
|---|
| 362 |  | 
|---|
| 363 | BOOL HasThreadState(ThreadState ts) | 
|---|
| 364 | { | 
|---|
| 365 | LIMITED_METHOD_CONTRACT; | 
|---|
| 366 | return ((DWORD)m_State & ts); | 
|---|
| 367 | } | 
|---|
| 368 |  | 
|---|
| 369 | BOOL HasThreadStateOpportunistic(ThreadState ts) | 
|---|
| 370 | { | 
|---|
| 371 | LIMITED_METHOD_CONTRACT; | 
|---|
| 372 | return m_State.LoadWithoutBarrier() & ts; | 
|---|
| 373 | } | 
|---|
| 374 |  | 
|---|
| 375 | Volatile<ThreadState> m_State; | 
|---|
| 376 |  | 
|---|
| 377 | enum ThreadStateNoConcurrency | 
|---|
| 378 | { | 
|---|
| 379 | TSNC_OwnsSpinLock               = 0x00000400, // The thread owns a spinlock. | 
|---|
| 380 |  | 
|---|
| 381 | TSNC_DisableOleaut32Check       = 0x00040000, // Disable oleaut32 delay load check.  Oleaut32 has | 
|---|
| 382 | // been loaded | 
|---|
| 383 |  | 
|---|
| 384 | TSNC_LoadsTypeViolation         = 0x40000000, // Use by type loader to break deadlocks caused by type load level ordering violations | 
|---|
| 385 | }; | 
|---|
| 386 |  | 
|---|
| 387 | ThreadStateNoConcurrency m_StateNC; | 
|---|
| 388 |  | 
|---|
| 389 | void SetThreadStateNC(ThreadStateNoConcurrency tsnc) | 
|---|
| 390 | { | 
|---|
| 391 | LIMITED_METHOD_CONTRACT; | 
|---|
| 392 | m_StateNC = (ThreadStateNoConcurrency)((DWORD)m_StateNC | tsnc); | 
|---|
| 393 | } | 
|---|
| 394 |  | 
|---|
| 395 | void ResetThreadStateNC(ThreadStateNoConcurrency tsnc) | 
|---|
| 396 | { | 
|---|
| 397 | LIMITED_METHOD_CONTRACT; | 
|---|
| 398 | m_StateNC = (ThreadStateNoConcurrency)((DWORD)m_StateNC & ~tsnc); | 
|---|
| 399 | } | 
|---|
| 400 |  | 
|---|
| 401 | BOOL HasThreadStateNC(ThreadStateNoConcurrency tsnc) | 
|---|
| 402 | { | 
|---|
| 403 | LIMITED_METHOD_DAC_CONTRACT; | 
|---|
| 404 | return ((DWORD)m_StateNC & tsnc); | 
|---|
| 405 | } | 
|---|
| 406 |  | 
|---|
| 407 | PendingTypeLoadHolder* m_pPendingTypeLoad; | 
|---|
| 408 |  | 
|---|
| 409 | #ifndef DACCESS_COMPILE | 
|---|
| 410 | PendingTypeLoadHolder* GetPendingTypeLoad() | 
|---|
| 411 | { | 
|---|
| 412 | LIMITED_METHOD_CONTRACT; | 
|---|
| 413 | return m_pPendingTypeLoad; | 
|---|
| 414 | } | 
|---|
| 415 |  | 
|---|
| 416 | void SetPendingTypeLoad(PendingTypeLoadHolder* pPendingTypeLoad) | 
|---|
| 417 | { | 
|---|
| 418 | LIMITED_METHOD_CONTRACT; | 
|---|
| 419 | m_pPendingTypeLoad = pPendingTypeLoad; | 
|---|
| 420 | } | 
|---|
| 421 | #endif | 
|---|
| 422 |  | 
|---|
| 423 | #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT | 
|---|
| 424 | enum ApartmentState { AS_Unknown }; | 
|---|
| 425 | #endif | 
|---|
| 426 |  | 
|---|
| 427 | #if defined(FEATURE_COMINTEROP) && defined(MDA_SUPPORTED) | 
|---|
| 428 | void RegisterRCW(RCW *pRCW) | 
|---|
| 429 | { | 
|---|
| 430 | } | 
|---|
| 431 |  | 
|---|
| 432 | BOOL RegisterRCWNoThrow(RCW *pRCW) | 
|---|
| 433 | { | 
|---|
| 434 | return FALSE; | 
|---|
| 435 | } | 
|---|
| 436 |  | 
|---|
| 437 | RCW *UnregisterRCW(INDEBUG(SyncBlock *pSB)) | 
|---|
| 438 | { | 
|---|
| 439 | return NULL; | 
|---|
| 440 | } | 
|---|
| 441 | #endif | 
|---|
| 442 |  | 
|---|
| 443 | DWORD       m_dwLastError; | 
|---|
| 444 | }; | 
|---|
| 445 |  | 
|---|
| 446 | inline void DoReleaseCheckpoint(void *checkPointMarker) | 
|---|
| 447 | { | 
|---|
| 448 | WRAPPER_NO_CONTRACT; | 
|---|
| 449 | GetThread()->m_MarshalAlloc.Collapse(checkPointMarker); | 
|---|
| 450 | } | 
|---|
| 451 |  | 
|---|
| 452 | // CheckPointHolder : Back out to a checkpoint on the thread allocator. | 
|---|
| 453 | typedef Holder<void*, DoNothing,DoReleaseCheckpoint> CheckPointHolder; | 
|---|
| 454 |  | 
|---|
| 455 | class AVInRuntimeImplOkayHolder | 
|---|
| 456 | { | 
|---|
| 457 | public: | 
|---|
| 458 | AVInRuntimeImplOkayHolder() | 
|---|
| 459 | { | 
|---|
| 460 | LIMITED_METHOD_CONTRACT; | 
|---|
| 461 | } | 
|---|
| 462 | AVInRuntimeImplOkayHolder(Thread * pThread) | 
|---|
| 463 | { | 
|---|
| 464 | LIMITED_METHOD_CONTRACT; | 
|---|
| 465 | } | 
|---|
| 466 | ~AVInRuntimeImplOkayHolder() | 
|---|
| 467 | { | 
|---|
| 468 | LIMITED_METHOD_CONTRACT; | 
|---|
| 469 | } | 
|---|
| 470 | }; | 
|---|
| 471 |  | 
|---|
| 472 | inline BOOL dbgOnly_IsSpecialEEThread() { return FALSE; } | 
|---|
| 473 |  | 
|---|
| 474 | #define INCTHREADLOCKCOUNT() { } | 
|---|
| 475 | #define DECTHREADLOCKCOUNT() { } | 
|---|
| 476 | #define INCTHREADLOCKCOUNTTHREAD(thread) { } | 
|---|
| 477 | #define DECTHREADLOCKCOUNTTHREAD(thread) { } | 
|---|
| 478 |  | 
|---|
| 479 | #define FORBIDGC_LOADER_USE_ENABLED() false | 
|---|
| 480 | #define ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE()    ; | 
|---|
| 481 |  | 
|---|
| 482 | #define BEGIN_FORBID_TYPELOAD() | 
|---|
| 483 | #define END_FORBID_TYPELOAD() | 
|---|
| 484 | #define TRIGGERS_TYPELOAD() | 
|---|
| 485 |  | 
|---|
| 486 | #define TRIGGERSGC() ANNOTATION_GC_TRIGGERS | 
|---|
| 487 |  | 
|---|
| 488 | inline void CommonTripThread() { } | 
|---|
| 489 |  | 
|---|
| 490 | //current ad, always safe | 
|---|
| 491 | #define ADV_CURRENTAD   0 | 
|---|
| 492 | //default ad, never unloaded | 
|---|
| 493 | #define ADV_DEFAULTAD   1 | 
|---|
| 494 | // held by iterator, iterator holds a ref | 
|---|
| 495 | #define ADV_ITERATOR    2 | 
|---|
| 496 | // the appdomain is on the stack | 
|---|
| 497 | #define ADV_RUNNINGIN   4 | 
|---|
| 498 | // we're in process of creating the appdomain, refcount guaranteed to be >0 | 
|---|
| 499 | #define ADV_CREATING    8 | 
|---|
| 500 | // compilation domain - ngen guarantees it won't be unloaded until everyone left | 
|---|
| 501 | #define ADV_COMPILATION  0x10 | 
|---|
| 502 | // finalizer thread - synchronized with ADU | 
|---|
| 503 | #define ADV_FINALIZER     0x40 | 
|---|
| 504 | // held by AppDomainRefTaker | 
|---|
| 505 | #define ADV_REFTAKER    0x100 | 
|---|
| 506 |  | 
|---|
| 507 | #define CheckADValidity(pDomain,ADValidityKind) { } | 
|---|
| 508 |  | 
|---|
| 509 | #define ENTER_DOMAIN_PTR(_pDestDomain,ADValidityKind) { | 
|---|
| 510 | #define END_DOMAIN_TRANSITION } | 
|---|
| 511 |  | 
|---|
| 512 | class DeadlockAwareLock | 
|---|
| 513 | { | 
|---|
| 514 | public: | 
|---|
| 515 | DeadlockAwareLock(const char *description = NULL) { } | 
|---|
| 516 | ~DeadlockAwareLock() { } | 
|---|
| 517 |  | 
|---|
| 518 | BOOL CanEnterLock() { return TRUE; } | 
|---|
| 519 |  | 
|---|
| 520 | BOOL TryBeginEnterLock() { return TRUE; } | 
|---|
| 521 | void BeginEnterLock() { } | 
|---|
| 522 |  | 
|---|
| 523 | void EndEnterLock() { } | 
|---|
| 524 |  | 
|---|
| 525 | void LeaveLock() { } | 
|---|
| 526 |  | 
|---|
| 527 | public: | 
|---|
| 528 | typedef StateHolder<DoNothing,DoNothing> BlockingLockHolder; | 
|---|
| 529 | }; | 
|---|
| 530 |  | 
|---|
| 531 | // Do not include threads.inl | 
|---|
| 532 | #define _THREADS_INL | 
|---|
| 533 |  | 
|---|
| 534 | typedef Thread::ForbidSuspendThreadHolder ForbidSuspendThreadHolder; | 
|---|
| 535 |  | 
|---|
| 536 | #else // CROSSGEN_COMPILE | 
|---|
| 537 |  | 
|---|
| 538 | #ifdef _TARGET_ARM_ | 
|---|
| 539 | #include "armsinglestepper.h" | 
|---|
| 540 | #endif | 
|---|
| 541 |  | 
|---|
| 542 | #if !defined(PLATFORM_SUPPORTS_SAFE_THREADSUSPEND) | 
|---|
| 543 | // DISABLE_THREADSUSPEND controls whether Thread::SuspendThread will be used at all. | 
|---|
| 544 | //   This API is dangerous on non-Windows platforms, as it can lead to deadlocks, | 
|---|
| 545 | //   due to low level OS resources that the PAL is not aware of, or due to the fact that | 
|---|
| 546 | //   PAL-unaware code in the process may hold onto some OS resources. | 
|---|
| 547 | #define DISABLE_THREADSUSPEND | 
|---|
| 548 | #endif | 
|---|
| 549 |  | 
|---|
| 550 | // NT thread priorities range from -15 to +15. | 
|---|
| 551 | #define INVALID_THREAD_PRIORITY  ((DWORD)0x80000000) | 
|---|
| 552 |  | 
|---|
| 553 | // For a fiber which switched out, we set its OSID to a special number | 
|---|
| 554 | // Note: there's a copy of this macro in strike.cpp | 
|---|
| 555 | #define SWITCHED_OUT_FIBER_OSID 0xbaadf00d; | 
|---|
| 556 |  | 
|---|
| 557 | #ifdef _DEBUG | 
|---|
| 558 | // A thread doesn't recieve its id until fully constructed. | 
|---|
| 559 | #define UNINITIALIZED_THREADID 0xbaadf00d | 
|---|
| 560 | #endif //_DEBUG | 
|---|
| 561 |  | 
|---|
| 562 | // Capture all the synchronization requests, for debugging purposes | 
|---|
| 563 | #if defined(_DEBUG) && defined(TRACK_SYNC) | 
|---|
| 564 |  | 
|---|
| 565 | // Each thread has a stack that tracks all enter and leave requests | 
|---|
| 566 | struct Dbg_TrackSync | 
|---|
| 567 | { | 
|---|
| 568 | virtual ~Dbg_TrackSync() = default; | 
|---|
| 569 |  | 
|---|
| 570 | virtual void EnterSync    (UINT_PTR caller, void *pAwareLock) = 0; | 
|---|
| 571 | virtual void LeaveSync    (UINT_PTR caller, void *pAwareLock) = 0; | 
|---|
| 572 | }; | 
|---|
| 573 |  | 
|---|
| 574 | EXTERN_C void EnterSyncHelper    (UINT_PTR caller, void *pAwareLock); | 
|---|
| 575 | EXTERN_C void LeaveSyncHelper    (UINT_PTR caller, void *pAwareLock); | 
|---|
| 576 |  | 
|---|
| 577 | #endif  // TRACK_SYNC | 
|---|
| 578 |  | 
|---|
| 579 | //*************************************************************************** | 
|---|
| 580 | #ifdef FEATURE_HIJACK | 
|---|
| 581 |  | 
|---|
| 582 | // Used to capture information about the state of execution of a *SUSPENDED* thread. | 
|---|
| 583 | struct ExecutionState; | 
|---|
| 584 |  | 
|---|
| 585 | #ifndef PLATFORM_UNIX | 
|---|
| 586 | // This is the type of the start function of a redirected thread pulled from | 
|---|
| 587 | // a HandledJITCase during runtime suspension | 
|---|
| 588 | typedef void (__stdcall *PFN_REDIRECTTARGET)(); | 
|---|
| 589 |  | 
|---|
| 590 | // Describes the weird argument sets during hijacking | 
|---|
| 591 | struct HijackArgs; | 
|---|
| 592 | #endif // !PLATFORM_UNIX | 
|---|
| 593 |  | 
|---|
| 594 | #endif // FEATURE_HIJACK | 
|---|
| 595 |  | 
|---|
| 596 | //*************************************************************************** | 
|---|
| 597 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 598 | inline Thread* GetThreadNULLOk() | 
|---|
| 599 | { | 
|---|
| 600 | LIMITED_METHOD_CONTRACT; | 
|---|
| 601 | Thread * pThread; | 
|---|
| 602 | BEGIN_GETTHREAD_ALLOWED_IN_NO_THROW_REGION; | 
|---|
| 603 | pThread = GetThread(); | 
|---|
| 604 | END_GETTHREAD_ALLOWED_IN_NO_THROW_REGION; | 
|---|
| 605 | return pThread; | 
|---|
| 606 | } | 
|---|
| 607 | #else | 
|---|
| 608 | #define GetThreadNULLOk() GetThread() | 
|---|
| 609 | #endif | 
|---|
| 610 |  | 
|---|
| 611 | // manifest constant for waiting in the exposed classlibs | 
|---|
| 612 | const INT32 INFINITE_TIMEOUT = -1; | 
|---|
| 613 |  | 
|---|
| 614 | /***************************************************************************/ | 
|---|
| 615 | // Public enum shared between thread and threadpool | 
|---|
| 616 | // These are two kinds of threadpool thread that the threadpool mgr needs | 
|---|
| 617 | // to keep track of | 
|---|
| 618 | enum ThreadpoolThreadType | 
|---|
| 619 | { | 
|---|
| 620 | WorkerThread, | 
|---|
| 621 | CompletionPortThread, | 
|---|
| 622 | WaitThread, | 
|---|
| 623 | TimerMgrThread | 
|---|
| 624 | }; | 
|---|
| 625 | //*************************************************************************** | 
|---|
| 626 | // Public functions | 
|---|
| 627 | // | 
|---|
| 628 | //      Thread* GetThread()             - returns current Thread | 
|---|
| 629 | //      Thread* SetupThread()           - creates new Thread. | 
|---|
| 630 | //      Thread* SetupUnstartedThread()  - creates new unstarted Thread which | 
|---|
| 631 | //                                        (obviously) isn't in a TLS. | 
|---|
| 632 | //      void    DestroyThread()         - the underlying logical thread is going | 
|---|
| 633 | //                                        away. | 
|---|
| 634 | //      void    DetachThread()          - the underlying logical thread is going | 
|---|
| 635 | //                                        away but we don't want to destroy it yet. | 
|---|
| 636 | // | 
|---|
| 637 | // Public functions for ASM code generators | 
|---|
| 638 | // | 
|---|
| 639 | //      Thread* __stdcall CreateThreadBlockThrow() - creates new Thread on reverse p-invoke | 
|---|
| 640 | // | 
|---|
| 641 | // Public functions for one-time init/cleanup | 
|---|
| 642 | // | 
|---|
| 643 | //      void InitThreadManager()      - onetime init | 
|---|
| 644 | //      void TerminateThreadManager() - onetime cleanup | 
|---|
| 645 | // | 
|---|
| 646 | // Public functions for taking control of a thread at a safe point | 
|---|
| 647 | // | 
|---|
| 648 | //      VOID OnHijackTripThread() - we've hijacked a JIT method | 
|---|
| 649 | //      VOID OnHijackFPTripThread() - we've hijacked a JIT method, | 
|---|
| 650 | //                                    and need to save the x87 FP stack. | 
|---|
| 651 | // | 
|---|
| 652 | //*************************************************************************** | 
|---|
| 653 |  | 
|---|
| 654 |  | 
|---|
| 655 | //*************************************************************************** | 
|---|
| 656 | // Public functions | 
|---|
| 657 | //*************************************************************************** | 
|---|
| 658 |  | 
|---|
| 659 | //--------------------------------------------------------------------------- | 
|---|
| 660 | // | 
|---|
| 661 | //--------------------------------------------------------------------------- | 
|---|
| 662 | Thread* SetupThread(BOOL fInternal); | 
|---|
| 663 | inline Thread* SetupThread() | 
|---|
| 664 | { | 
|---|
| 665 | WRAPPER_NO_CONTRACT; | 
|---|
| 666 | return SetupThread(FALSE); | 
|---|
| 667 | } | 
|---|
| 668 | // A host can deny a thread entering runtime by returning a NULL IHostTask. | 
|---|
| 669 | // But we do want threads used by threadpool. | 
|---|
| 670 | inline Thread* SetupInternalThread() | 
|---|
| 671 | { | 
|---|
| 672 | WRAPPER_NO_CONTRACT; | 
|---|
| 673 | return SetupThread(TRUE); | 
|---|
| 674 | } | 
|---|
| 675 | Thread* SetupThreadNoThrow(HRESULT *phresult = NULL); | 
|---|
| 676 | // WARNING : only GC calls this with bRequiresTSL set to FALSE. | 
|---|
| 677 | Thread* SetupUnstartedThread(BOOL bRequiresTSL=TRUE); | 
|---|
| 678 | void    DestroyThread(Thread *th); | 
|---|
| 679 |  | 
|---|
| 680 | DWORD GetRuntimeId(); | 
|---|
| 681 |  | 
|---|
| 682 | EXTERN_C Thread* WINAPI CreateThreadBlockThrow(); | 
|---|
| 683 |  | 
|---|
| 684 | //--------------------------------------------------------------------------- | 
|---|
| 685 | // One-time initialization. Called during Dll initialization. | 
|---|
| 686 | //--------------------------------------------------------------------------- | 
|---|
| 687 | void InitThreadManager(); | 
|---|
| 688 |  | 
|---|
| 689 |  | 
|---|
| 690 | // When we want to take control of a thread at a safe point, the thread will | 
|---|
| 691 | // eventually come back to us in one of the following trip functions: | 
|---|
| 692 |  | 
|---|
| 693 | #ifdef FEATURE_HIJACK | 
|---|
| 694 |  | 
|---|
| 695 | EXTERN_C void WINAPI OnHijackTripThread(); | 
|---|
| 696 | #ifdef _TARGET_X86_ | 
|---|
| 697 | EXTERN_C void WINAPI OnHijackFPTripThread();  // hijacked JIT code is returning an FP value | 
|---|
| 698 | #endif // _TARGET_X86_ | 
|---|
| 699 |  | 
|---|
| 700 | #endif // FEATURE_HIJACK | 
|---|
| 701 |  | 
|---|
| 702 | void CommonTripThread(); | 
|---|
| 703 |  | 
|---|
| 704 | // When we resume a thread at a new location, to get an exception thrown, we have to | 
|---|
| 705 | // pretend the exception originated elsewhere. | 
|---|
| 706 | EXTERN_C void ThrowControlForThread( | 
|---|
| 707 | #ifdef WIN64EXCEPTIONS | 
|---|
| 708 | FaultingExceptionFrame *pfef | 
|---|
| 709 | #endif // WIN64EXCEPTIONS | 
|---|
| 710 | ); | 
|---|
| 711 |  | 
|---|
| 712 | // RWLock state inside TLS | 
|---|
| 713 | struct LockEntry | 
|---|
| 714 | { | 
|---|
| 715 | LockEntry *pNext;    // next entry | 
|---|
| 716 | LockEntry *pPrev;    // prev entry | 
|---|
| 717 | LONG dwULockID; | 
|---|
| 718 | LONG dwLLockID;         // owning lock | 
|---|
| 719 | WORD wReaderLevel;      // reader nesting level | 
|---|
| 720 | }; | 
|---|
| 721 |  | 
|---|
| 722 | #if defined(_DEBUG) | 
|---|
| 723 | BOOL MatchThreadHandleToOsId ( HANDLE h, DWORD osId ); | 
|---|
| 724 | #endif | 
|---|
| 725 |  | 
|---|
| 726 | #ifdef FEATURE_COMINTEROP | 
|---|
| 727 |  | 
|---|
| 728 | #define RCW_STACK_SIZE 64 | 
|---|
| 729 |  | 
|---|
| 730 | class RCWStack | 
|---|
| 731 | { | 
|---|
| 732 | public: | 
|---|
| 733 | inline RCWStack() | 
|---|
| 734 | { | 
|---|
| 735 | LIMITED_METHOD_CONTRACT; | 
|---|
| 736 | memset(this, 0, sizeof(RCWStack)); | 
|---|
| 737 | } | 
|---|
| 738 |  | 
|---|
| 739 | inline VOID SetEntry(unsigned int index, RCW* pRCW) | 
|---|
| 740 | { | 
|---|
| 741 | CONTRACTL | 
|---|
| 742 | { | 
|---|
| 743 | NOTHROW; | 
|---|
| 744 | GC_NOTRIGGER; | 
|---|
| 745 | MODE_ANY; | 
|---|
| 746 | PRECONDITION(index < RCW_STACK_SIZE); | 
|---|
| 747 | PRECONDITION(CheckPointer(pRCW, NULL_OK)); | 
|---|
| 748 | } | 
|---|
| 749 | CONTRACTL_END; | 
|---|
| 750 |  | 
|---|
| 751 | m_pList[index] = pRCW; | 
|---|
| 752 | } | 
|---|
| 753 |  | 
|---|
| 754 | inline RCW* GetEntry(unsigned int index) | 
|---|
| 755 | { | 
|---|
| 756 | CONTRACT (RCW*) | 
|---|
| 757 | { | 
|---|
| 758 | NOTHROW; | 
|---|
| 759 | GC_NOTRIGGER; | 
|---|
| 760 | MODE_ANY; | 
|---|
| 761 | PRECONDITION(index < RCW_STACK_SIZE); | 
|---|
| 762 | } | 
|---|
| 763 | CONTRACT_END; | 
|---|
| 764 |  | 
|---|
| 765 | RETURN m_pList[index]; | 
|---|
| 766 | } | 
|---|
| 767 |  | 
|---|
| 768 | inline VOID SetNextStack(RCWStack* pStack) | 
|---|
| 769 | { | 
|---|
| 770 | CONTRACTL | 
|---|
| 771 | { | 
|---|
| 772 | NOTHROW; | 
|---|
| 773 | GC_NOTRIGGER; | 
|---|
| 774 | MODE_ANY; | 
|---|
| 775 | PRECONDITION(CheckPointer(pStack)); | 
|---|
| 776 | PRECONDITION(m_pNext == NULL); | 
|---|
| 777 | } | 
|---|
| 778 | CONTRACTL_END; | 
|---|
| 779 |  | 
|---|
| 780 | m_pNext = pStack; | 
|---|
| 781 | } | 
|---|
| 782 |  | 
|---|
| 783 | inline RCWStack* GetNextStack() | 
|---|
| 784 | { | 
|---|
| 785 | CONTRACT (RCWStack*) | 
|---|
| 786 | { | 
|---|
| 787 | NOTHROW; | 
|---|
| 788 | GC_NOTRIGGER; | 
|---|
| 789 | MODE_ANY; | 
|---|
| 790 | POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); | 
|---|
| 791 | } | 
|---|
| 792 | CONTRACT_END; | 
|---|
| 793 |  | 
|---|
| 794 | RETURN m_pNext; | 
|---|
| 795 | } | 
|---|
| 796 |  | 
|---|
| 797 | private: | 
|---|
| 798 | RCWStack*   m_pNext; | 
|---|
| 799 | RCW*        m_pList[RCW_STACK_SIZE]; | 
|---|
| 800 | }; | 
|---|
| 801 |  | 
|---|
| 802 |  | 
|---|
| 803 | class RCWStackHeader | 
|---|
| 804 | { | 
|---|
| 805 | public: | 
|---|
| 806 | RCWStackHeader() | 
|---|
| 807 | { | 
|---|
| 808 | CONTRACTL | 
|---|
| 809 | { | 
|---|
| 810 | THROWS; | 
|---|
| 811 | GC_NOTRIGGER; | 
|---|
| 812 | MODE_ANY; | 
|---|
| 813 | } | 
|---|
| 814 | CONTRACTL_END; | 
|---|
| 815 |  | 
|---|
| 816 | m_iIndex = 0; | 
|---|
| 817 | m_iSize = RCW_STACK_SIZE; | 
|---|
| 818 | m_pHead = new RCWStack(); | 
|---|
| 819 | } | 
|---|
| 820 |  | 
|---|
| 821 | ~RCWStackHeader() | 
|---|
| 822 | { | 
|---|
| 823 | CONTRACTL | 
|---|
| 824 | { | 
|---|
| 825 | NOTHROW; | 
|---|
| 826 | GC_NOTRIGGER; | 
|---|
| 827 | MODE_ANY; | 
|---|
| 828 | } | 
|---|
| 829 | CONTRACTL_END; | 
|---|
| 830 |  | 
|---|
| 831 | RCWStack* pStack = m_pHead; | 
|---|
| 832 | RCWStack* pNextStack = NULL; | 
|---|
| 833 |  | 
|---|
| 834 | while (pStack) | 
|---|
| 835 | { | 
|---|
| 836 | pNextStack = pStack->GetNextStack(); | 
|---|
| 837 | delete pStack; | 
|---|
| 838 | pStack = pNextStack; | 
|---|
| 839 | } | 
|---|
| 840 | } | 
|---|
| 841 |  | 
|---|
| 842 | bool Push(RCW* pRCW) | 
|---|
| 843 | { | 
|---|
| 844 | CONTRACTL | 
|---|
| 845 | { | 
|---|
| 846 | NOTHROW; | 
|---|
| 847 | GC_NOTRIGGER; | 
|---|
| 848 | MODE_ANY; | 
|---|
| 849 | PRECONDITION(CheckPointer(pRCW, NULL_OK)); | 
|---|
| 850 | } | 
|---|
| 851 | CONTRACTL_END; | 
|---|
| 852 |  | 
|---|
| 853 | if (!GrowListIfNeeded()) | 
|---|
| 854 | return false; | 
|---|
| 855 |  | 
|---|
| 856 | // Fast Path | 
|---|
| 857 | if (m_iIndex < RCW_STACK_SIZE) | 
|---|
| 858 | { | 
|---|
| 859 | m_pHead->SetEntry(m_iIndex, pRCW); | 
|---|
| 860 | m_iIndex++; | 
|---|
| 861 | return true; | 
|---|
| 862 | } | 
|---|
| 863 |  | 
|---|
| 864 | // Slow Path | 
|---|
| 865 | unsigned int count = m_iIndex; | 
|---|
| 866 | RCWStack* pStack = m_pHead; | 
|---|
| 867 | while (count >= RCW_STACK_SIZE) | 
|---|
| 868 | { | 
|---|
| 869 | pStack = pStack->GetNextStack(); | 
|---|
| 870 | _ASSERTE(pStack); | 
|---|
| 871 |  | 
|---|
| 872 | count -= RCW_STACK_SIZE; | 
|---|
| 873 | } | 
|---|
| 874 |  | 
|---|
| 875 | pStack->SetEntry(count, pRCW); | 
|---|
| 876 | m_iIndex++; | 
|---|
| 877 | return true; | 
|---|
| 878 | } | 
|---|
| 879 |  | 
|---|
| 880 | RCW* Pop() | 
|---|
| 881 | { | 
|---|
| 882 | CONTRACT (RCW*) | 
|---|
| 883 | { | 
|---|
| 884 | NOTHROW; | 
|---|
| 885 | GC_NOTRIGGER; | 
|---|
| 886 | MODE_ANY; | 
|---|
| 887 | PRECONDITION(m_iIndex > 0); | 
|---|
| 888 | POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); | 
|---|
| 889 | } | 
|---|
| 890 | CONTRACT_END; | 
|---|
| 891 |  | 
|---|
| 892 | RCW* pRCW = NULL; | 
|---|
| 893 |  | 
|---|
| 894 | m_iIndex--; | 
|---|
| 895 |  | 
|---|
| 896 | // Fast Path | 
|---|
| 897 | if (m_iIndex < RCW_STACK_SIZE) | 
|---|
| 898 | { | 
|---|
| 899 | pRCW = m_pHead->GetEntry(m_iIndex); | 
|---|
| 900 | m_pHead->SetEntry(m_iIndex, NULL); | 
|---|
| 901 | RETURN pRCW; | 
|---|
| 902 | } | 
|---|
| 903 |  | 
|---|
| 904 | // Slow Path | 
|---|
| 905 | unsigned int count = m_iIndex; | 
|---|
| 906 | RCWStack* pStack = m_pHead; | 
|---|
| 907 | while (count >= RCW_STACK_SIZE) | 
|---|
| 908 | { | 
|---|
| 909 | pStack = pStack->GetNextStack(); | 
|---|
| 910 | _ASSERTE(pStack); | 
|---|
| 911 | count -= RCW_STACK_SIZE; | 
|---|
| 912 | } | 
|---|
| 913 |  | 
|---|
| 914 | pRCW = pStack->GetEntry(count); | 
|---|
| 915 | pStack->SetEntry(count, NULL); | 
|---|
| 916 |  | 
|---|
| 917 | RETURN pRCW; | 
|---|
| 918 | } | 
|---|
| 919 |  | 
|---|
| 920 | BOOL IsInStack(RCW* pRCW) | 
|---|
| 921 | { | 
|---|
| 922 | CONTRACTL | 
|---|
| 923 | { | 
|---|
| 924 | NOTHROW; | 
|---|
| 925 | GC_NOTRIGGER; | 
|---|
| 926 | MODE_ANY; | 
|---|
| 927 | PRECONDITION(CheckPointer(pRCW)); | 
|---|
| 928 | } | 
|---|
| 929 | CONTRACTL_END; | 
|---|
| 930 |  | 
|---|
| 931 | if (m_iIndex == 0) | 
|---|
| 932 | return FALSE; | 
|---|
| 933 |  | 
|---|
| 934 | // Fast Path | 
|---|
| 935 | if (m_iIndex <= RCW_STACK_SIZE) | 
|---|
| 936 | { | 
|---|
| 937 | for (int i = 0; i < (int)m_iIndex; i++) | 
|---|
| 938 | { | 
|---|
| 939 | if (pRCW == m_pHead->GetEntry(i)) | 
|---|
| 940 | return TRUE; | 
|---|
| 941 | } | 
|---|
| 942 |  | 
|---|
| 943 | return FALSE; | 
|---|
| 944 | } | 
|---|
| 945 |  | 
|---|
| 946 | // Slow Path | 
|---|
| 947 | RCWStack* pStack = m_pHead; | 
|---|
| 948 | int totalcount = 0; | 
|---|
| 949 | while (pStack != NULL) | 
|---|
| 950 | { | 
|---|
| 951 | for (int i = 0; (i < RCW_STACK_SIZE) && (totalcount < m_iIndex); i++, totalcount++) | 
|---|
| 952 | { | 
|---|
| 953 | if (pRCW == pStack->GetEntry(i)) | 
|---|
| 954 | return TRUE; | 
|---|
| 955 | } | 
|---|
| 956 |  | 
|---|
| 957 | pStack = pStack->GetNextStack(); | 
|---|
| 958 | } | 
|---|
| 959 |  | 
|---|
| 960 | return FALSE; | 
|---|
| 961 | } | 
|---|
| 962 |  | 
|---|
| 963 | private: | 
|---|
| 964 | bool GrowListIfNeeded() | 
|---|
| 965 | { | 
|---|
| 966 | CONTRACTL | 
|---|
| 967 | { | 
|---|
| 968 | NOTHROW; | 
|---|
| 969 | GC_NOTRIGGER; | 
|---|
| 970 | MODE_ANY; | 
|---|
| 971 | INJECT_FAULT(COMPlusThrowOM()); | 
|---|
| 972 | PRECONDITION(CheckPointer(m_pHead)); | 
|---|
| 973 | } | 
|---|
| 974 | CONTRACTL_END; | 
|---|
| 975 |  | 
|---|
| 976 | if (m_iIndex == m_iSize) | 
|---|
| 977 | { | 
|---|
| 978 | RCWStack* pStack = m_pHead; | 
|---|
| 979 | RCWStack* pNextStack = NULL; | 
|---|
| 980 | while ( (pNextStack = pStack->GetNextStack()) != NULL) | 
|---|
| 981 | pStack = pNextStack; | 
|---|
| 982 |  | 
|---|
| 983 | RCWStack* pNewStack = new (nothrow) RCWStack(); | 
|---|
| 984 | if (NULL == pNewStack) | 
|---|
| 985 | return false; | 
|---|
| 986 |  | 
|---|
| 987 | pStack->SetNextStack(pNewStack); | 
|---|
| 988 |  | 
|---|
| 989 | m_iSize += RCW_STACK_SIZE; | 
|---|
| 990 | } | 
|---|
| 991 |  | 
|---|
| 992 | return true; | 
|---|
| 993 | } | 
|---|
| 994 |  | 
|---|
| 995 | // Zero-based index to the first free element in the list. | 
|---|
| 996 | int        m_iIndex; | 
|---|
| 997 |  | 
|---|
| 998 | // Total size of the list, including all stacks. | 
|---|
| 999 | int        m_iSize; | 
|---|
| 1000 |  | 
|---|
| 1001 | // Pointer to the first stack. | 
|---|
| 1002 | RCWStack*           m_pHead; | 
|---|
| 1003 | }; | 
|---|
| 1004 |  | 
|---|
| 1005 | #endif // FEATURE_COMINTEROP | 
|---|
| 1006 |  | 
|---|
| 1007 |  | 
|---|
| 1008 | typedef DWORD (*AppropriateWaitFunc) (void *args, DWORD timeout, DWORD option); | 
|---|
| 1009 |  | 
|---|
| 1010 | // The Thread class represents a managed thread.  This thread could be internal | 
|---|
| 1011 | // or external (i.e. it wandered in from outside the runtime).  For internal | 
|---|
| 1012 | // threads, it could correspond to an exposed System.Thread object or it | 
|---|
| 1013 | // could correspond to an internal worker thread of the runtime. | 
|---|
| 1014 | // | 
|---|
| 1015 | // If there's a physical Win32 thread underneath this object (i.e. it isn't an | 
|---|
| 1016 | // unstarted System.Thread), then this instance can be found in the TLS | 
|---|
| 1017 | // of that physical thread. | 
|---|
| 1018 |  | 
|---|
| 1019 | // FEATURE_MULTIREG_RETURN is set for platforms where a struct return value | 
|---|
| 1020 | // [GcInfo v2 only]        can be returned in multiple registers | 
|---|
| 1021 | //                         ex: Windows/Unix ARM/ARM64, Unix-AMD64. | 
|---|
| 1022 | // | 
|---|
| 1023 | // | 
|---|
| 1024 | // UNIX_AMD64_ABI is a specific kind of FEATURE_MULTIREG_RETURN | 
|---|
| 1025 | // [GcInfo v1 and v2]       specified by SystemV ABI for AMD64 | 
|---|
| 1026 | // | 
|---|
| 1027 |  | 
|---|
| 1028 | #ifdef FEATURE_HIJACK                                                    // Hijack function returning | 
|---|
| 1029 | EXTERN_C void STDCALL OnHijackWorker(HijackArgs * pArgs); | 
|---|
| 1030 | #endif // FEATURE_HIJACK | 
|---|
| 1031 |  | 
|---|
| 1032 | // This is the code we pass around for Thread.Interrupt, mainly for assertions | 
|---|
| 1033 | #define APC_Code    0xEECEECEE | 
|---|
| 1034 |  | 
|---|
| 1035 | #ifdef DACCESS_COMPILE | 
|---|
| 1036 | class BaseStackGuard; | 
|---|
| 1037 | #endif | 
|---|
| 1038 |  | 
|---|
| 1039 | // #ThreadClass | 
|---|
| 1040 | // | 
|---|
| 1041 | // A code:Thread contains all the per-thread information needed by the runtime.  You can get at this | 
|---|
| 1042 | // structure throught the and OS TLS slot see code:#RuntimeThreadLocals for more | 
|---|
| 1043 | // Implementing IUnknown would prevent the field (e.g. m_Context) layout from being rearranged (which will need to be fixed in | 
|---|
| 1044 | // "asmconstants.h" for the respective architecture). As it is, ICLRTask derives from IUnknown and would have got IUnknown implemented | 
|---|
| 1045 | // here - so doing this explicitly and maintaining layout sanity should be just fine. | 
|---|
| 1046 | class Thread: public IUnknown | 
|---|
| 1047 | { | 
|---|
| 1048 | friend struct ThreadQueue;  // used to enqueue & dequeue threads onto SyncBlocks | 
|---|
| 1049 | friend class  ThreadStore; | 
|---|
| 1050 | friend class  ThreadSuspend; | 
|---|
| 1051 | friend class  SyncBlock; | 
|---|
| 1052 | friend struct PendingSync; | 
|---|
| 1053 | friend class  AppDomain; | 
|---|
| 1054 | friend class  ThreadNative; | 
|---|
| 1055 | friend class  DeadlockAwareLock; | 
|---|
| 1056 | #ifdef _DEBUG | 
|---|
| 1057 | friend class  EEContract; | 
|---|
| 1058 | #endif | 
|---|
| 1059 | #ifdef DACCESS_COMPILE | 
|---|
| 1060 | friend class ClrDataAccess; | 
|---|
| 1061 | friend class ClrDataTask; | 
|---|
| 1062 | #endif | 
|---|
| 1063 |  | 
|---|
| 1064 | friend BOOL NTGetThreadContext(Thread *pThread, T_CONTEXT *pContext); | 
|---|
| 1065 | friend BOOL NTSetThreadContext(Thread *pThread, const T_CONTEXT *pContext); | 
|---|
| 1066 |  | 
|---|
| 1067 | friend void CommonTripThread(); | 
|---|
| 1068 |  | 
|---|
| 1069 | #ifdef FEATURE_HIJACK | 
|---|
| 1070 | // MapWin32FaultToCOMPlusException needs access to Thread::IsAddrOfRedirectFunc() | 
|---|
| 1071 | friend DWORD MapWin32FaultToCOMPlusException(EXCEPTION_RECORD *pExceptionRecord); | 
|---|
| 1072 | friend void STDCALL OnHijackWorker(HijackArgs * pArgs); | 
|---|
| 1073 | #ifdef PLATFORM_UNIX | 
|---|
| 1074 | friend void HandleGCSuspensionForInterruptedThread(CONTEXT *interruptedContext); | 
|---|
| 1075 | #endif // PLATFORM_UNIX | 
|---|
| 1076 |  | 
|---|
| 1077 | #endif // FEATURE_HIJACK | 
|---|
| 1078 |  | 
|---|
| 1079 | friend void         InitThreadManager(); | 
|---|
| 1080 | friend void         ThreadBaseObject::SetDelegate(OBJECTREF delegate); | 
|---|
| 1081 |  | 
|---|
| 1082 | friend void CallFinalizerOnThreadObject(Object *obj); | 
|---|
| 1083 |  | 
|---|
| 1084 | friend class ContextTransitionFrame;  // To set m_dwBeginLockCount | 
|---|
| 1085 |  | 
|---|
| 1086 | // Debug and Profiler caches ThreadHandle. | 
|---|
| 1087 | friend class Debugger;                  // void Debugger::ThreadStarted(Thread* pRuntimeThread, BOOL fAttaching); | 
|---|
| 1088 | #if defined(DACCESS_COMPILE) | 
|---|
| 1089 | friend class DacDbiInterfaceImpl;       // DacDbiInterfaceImpl::GetThreadHandle(HANDLE * phThread); | 
|---|
| 1090 | #endif // DACCESS_COMPILE | 
|---|
| 1091 | friend class ProfToEEInterfaceImpl;     // HRESULT ProfToEEInterfaceImpl::GetHandleFromThread(ThreadID threadId, HANDLE *phThread); | 
|---|
| 1092 | friend class CExecutionEngine; | 
|---|
| 1093 | friend class UnC; | 
|---|
| 1094 | friend class CheckAsmOffsets; | 
|---|
| 1095 |  | 
|---|
| 1096 | friend class ExceptionTracker; | 
|---|
| 1097 | friend class ThreadExceptionState; | 
|---|
| 1098 |  | 
|---|
| 1099 | friend class StackFrameIterator; | 
|---|
| 1100 |  | 
|---|
| 1101 | friend class ThreadStatics; | 
|---|
| 1102 |  | 
|---|
| 1103 | VPTR_BASE_CONCRETE_VTABLE_CLASS(Thread) | 
|---|
| 1104 |  | 
|---|
| 1105 | public: | 
|---|
| 1106 | enum SetThreadStackGuaranteeScope { STSGuarantee_Force, STSGuarantee_OnlyIfEnabled }; | 
|---|
| 1107 | static BOOL IsSetThreadStackGuaranteeInUse(SetThreadStackGuaranteeScope fScope = STSGuarantee_OnlyIfEnabled) | 
|---|
| 1108 | { | 
|---|
| 1109 | WRAPPER_NO_CONTRACT; | 
|---|
| 1110 |  | 
|---|
| 1111 | if(STSGuarantee_Force == fScope) | 
|---|
| 1112 | return TRUE; | 
|---|
| 1113 |  | 
|---|
| 1114 | //The runtime must be hosted to have escalation policy | 
|---|
| 1115 | //If escalation policy is enabled but StackOverflow is not part of the policy | 
|---|
| 1116 | //   then we don't use SetThreadStackGuarantee | 
|---|
| 1117 | if(!CLRHosted() || | 
|---|
| 1118 | GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeExitProcess) | 
|---|
| 1119 | { | 
|---|
| 1120 | //FAIL_StackOverflow is ProcessExit so don't use SetThreadStackGuarantee | 
|---|
| 1121 | return FALSE; | 
|---|
| 1122 | } | 
|---|
| 1123 | return TRUE; | 
|---|
| 1124 | } | 
|---|
| 1125 |  | 
|---|
| 1126 | public: | 
|---|
| 1127 |  | 
|---|
| 1128 | // If we are trying to suspend a thread, we set the appropriate pending bit to | 
|---|
| 1129 | // indicate why we want to suspend it (TS_GCSuspendPending, TS_UserSuspendPending, | 
|---|
| 1130 | // TS_DebugSuspendPending). | 
|---|
| 1131 | // | 
|---|
| 1132 | // If instead the thread has blocked itself, via WaitSuspendEvent, we indicate | 
|---|
| 1133 | // this with TS_SyncSuspended.  However, we need to know whether the synchronous | 
|---|
| 1134 | // suspension is for a user request, or for an internal one (GC & Debug).  That's | 
|---|
| 1135 | // because a user request is not allowed to resume a thread suspended for | 
|---|
| 1136 | // debugging or GC.  -- That's not stricly true.  It is allowed to resume such a | 
|---|
| 1137 | // thread so long as it was ALSO suspended by the user.  In other words, this | 
|---|
| 1138 | // ensures that user resumptions aren't unbalanced from user suspensions. | 
|---|
| 1139 | // | 
|---|
| 1140 | enum ThreadState | 
|---|
| 1141 | { | 
|---|
| 1142 | TS_Unknown                = 0x00000000,    // threads are initialized this way | 
|---|
| 1143 |  | 
|---|
| 1144 | TS_AbortRequested         = 0x00000001,    // Abort the thread | 
|---|
| 1145 | TS_GCSuspendPending       = 0x00000002,    // waiting to get to safe spot for GC | 
|---|
| 1146 | TS_UserSuspendPending     = 0x00000004,    // user suspension at next opportunity | 
|---|
| 1147 | TS_DebugSuspendPending    = 0x00000008,    // Is the debugger suspending threads? | 
|---|
| 1148 | TS_GCOnTransitions        = 0x00000010,    // Force a GC on stub transitions (GCStress only) | 
|---|
| 1149 |  | 
|---|
| 1150 | TS_LegalToJoin            = 0x00000020,    // Is it now legal to attempt a Join() | 
|---|
| 1151 |  | 
|---|
| 1152 | // unused                 = 0x00000040, | 
|---|
| 1153 |  | 
|---|
| 1154 | #ifdef FEATURE_HIJACK | 
|---|
| 1155 | TS_Hijacked               = 0x00000080,    // Return address has been hijacked | 
|---|
| 1156 | #endif // FEATURE_HIJACK | 
|---|
| 1157 |  | 
|---|
| 1158 | TS_BlockGCForSO           = 0x00000100,    // If a thread does not have enough stack, WaitUntilGCComplete may fail. | 
|---|
| 1159 | // Either GC suspension will wait until the thread has cleared this bit, | 
|---|
| 1160 | // Or the current thread is going to spin if GC has suspended all threads. | 
|---|
| 1161 | TS_Background             = 0x00000200,    // Thread is a background thread | 
|---|
| 1162 | TS_Unstarted              = 0x00000400,    // Thread has never been started | 
|---|
| 1163 | TS_Dead                   = 0x00000800,    // Thread is dead | 
|---|
| 1164 |  | 
|---|
| 1165 | TS_WeOwn                  = 0x00001000,    // Exposed object initiated this thread | 
|---|
| 1166 | #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT | 
|---|
| 1167 | TS_CoInitialized          = 0x00002000,    // CoInitialize has been called for this thread | 
|---|
| 1168 |  | 
|---|
| 1169 | TS_InSTA                  = 0x00004000,    // Thread hosts an STA | 
|---|
| 1170 | TS_InMTA                  = 0x00008000,    // Thread is part of the MTA | 
|---|
| 1171 | #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT | 
|---|
| 1172 |  | 
|---|
| 1173 | // Some bits that only have meaning for reporting the state to clients. | 
|---|
| 1174 | TS_ReportDead             = 0x00010000,    // in WaitForOtherThreads() | 
|---|
| 1175 | TS_FullyInitialized       = 0x00020000,    // Thread is fully initialized and we are ready to broadcast its existence to external clients | 
|---|
| 1176 |  | 
|---|
| 1177 | TS_TaskReset              = 0x00040000,    // The task is reset | 
|---|
| 1178 |  | 
|---|
| 1179 | TS_SyncSuspended          = 0x00080000,    // Suspended via WaitSuspendEvent | 
|---|
| 1180 | TS_DebugWillSync          = 0x00100000,    // Debugger will wait for this thread to sync | 
|---|
| 1181 |  | 
|---|
| 1182 | TS_StackCrawlNeeded       = 0x00200000,    // A stackcrawl is needed on this thread, such as for thread abort | 
|---|
| 1183 | // See comment for s_pWaitForStackCrawlEvent for reason. | 
|---|
| 1184 |  | 
|---|
| 1185 | TS_SuspendUnstarted       = 0x00400000,    // latch a user suspension on an unstarted thread | 
|---|
| 1186 |  | 
|---|
| 1187 | TS_Aborted                = 0x00800000,    // is the thread aborted? | 
|---|
| 1188 | TS_TPWorkerThread         = 0x01000000,    // is this a threadpool worker thread? | 
|---|
| 1189 |  | 
|---|
| 1190 | TS_Interruptible          = 0x02000000,    // sitting in a Sleep(), Wait(), Join() | 
|---|
| 1191 | TS_Interrupted            = 0x04000000,    // was awakened by an interrupt APC. !!! This can be moved to TSNC | 
|---|
| 1192 |  | 
|---|
| 1193 | TS_CompletionPortThread   = 0x08000000,    // Completion port thread | 
|---|
| 1194 |  | 
|---|
| 1195 | TS_AbortInitiated         = 0x10000000,    // set when abort is begun | 
|---|
| 1196 |  | 
|---|
| 1197 | TS_Finalized              = 0x20000000,    // The associated managed Thread object has been finalized. | 
|---|
| 1198 | // We can clean up the unmanaged part now. | 
|---|
| 1199 |  | 
|---|
| 1200 | TS_FailStarted            = 0x40000000,    // The thread fails during startup. | 
|---|
| 1201 | TS_Detached               = 0x80000000,    // Thread was detached by DllMain | 
|---|
| 1202 |  | 
|---|
| 1203 | // <TODO> @TODO: We need to reclaim the bits that have no concurrency issues (i.e. they are only | 
|---|
| 1204 | //         manipulated by the owning thread) and move them off to a different DWORD.  Note if this | 
|---|
| 1205 | //         enum is changed, we also need to update SOS to reflect this.</TODO> | 
|---|
| 1206 |  | 
|---|
| 1207 | // We require (and assert) that the following bits are less than 0x100. | 
|---|
| 1208 | TS_CatchAtSafePoint = (TS_UserSuspendPending | TS_AbortRequested | | 
|---|
| 1209 | TS_GCSuspendPending | TS_DebugSuspendPending | TS_GCOnTransitions), | 
|---|
| 1210 | }; | 
|---|
| 1211 |  | 
|---|
| 1212 | // Thread flags that aren't really states in themselves but rather things the thread | 
|---|
| 1213 | // has to do. | 
|---|
| 1214 | enum ThreadTasks | 
|---|
| 1215 | { | 
|---|
| 1216 | TT_CleanupSyncBlock       = 0x00000001, // The synch block needs to be cleaned up. | 
|---|
| 1217 | #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT | 
|---|
| 1218 | TT_CallCoInitialize       = 0x00000002, // CoInitialize needs to be called. | 
|---|
| 1219 | #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT | 
|---|
| 1220 | }; | 
|---|
| 1221 |  | 
|---|
| 1222 | // Thread flags that have no concurrency issues (i.e., they are only manipulated by the owning thread). Use these | 
|---|
| 1223 | // state flags when you have a new thread state that doesn't belong in the ThreadState enum above. | 
|---|
| 1224 | // | 
|---|
| 1225 | // <TODO>@TODO: its possible that the ThreadTasks from above and these flags should be merged.</TODO> | 
|---|
| 1226 | enum ThreadStateNoConcurrency | 
|---|
| 1227 | { | 
|---|
| 1228 | TSNC_Unknown                    = 0x00000000, // threads are initialized this way | 
|---|
| 1229 |  | 
|---|
| 1230 | TSNC_DebuggerUserSuspend        = 0x00000001, // marked "suspended" by the debugger | 
|---|
| 1231 | TSNC_DebuggerReAbort            = 0x00000002, // thread needs to re-abort itself when resumed by the debugger | 
|---|
| 1232 | TSNC_DebuggerIsStepping         = 0x00000004, // debugger is stepping this thread | 
|---|
| 1233 | TSNC_DebuggerIsManagedException = 0x00000008, // EH is re-raising a managed exception. | 
|---|
| 1234 | TSNC_WaitUntilGCFinished        = 0x00000010, // The current thread is waiting for GC.  If host returns | 
|---|
| 1235 | // SO during wait, we will either spin or make GC wait. | 
|---|
| 1236 | TSNC_BlockedForShutdown         = 0x00000020, // Thread is blocked in WaitForEndOfShutdown.  We should not hit WaitForEndOfShutdown again. | 
|---|
| 1237 | TSNC_SOWorkNeeded               = 0x00000040, // The thread needs to wake up AD unload helper thread to finish SO work | 
|---|
| 1238 | TSNC_CLRCreatedThread           = 0x00000080, // The thread was created through Thread::CreateNewThread | 
|---|
| 1239 | TSNC_ExistInThreadStore         = 0x00000100, // For dtor to know if it needs to be removed from ThreadStore | 
|---|
| 1240 | TSNC_UnsafeSkipEnterCooperative = 0x00000200, // This is a "fix" for deadlocks caused when cleaning up COM | 
|---|
| 1241 | TSNC_OwnsSpinLock               = 0x00000400, // The thread owns a spinlock. | 
|---|
| 1242 | TSNC_PreparingAbort             = 0x00000800, // Preparing abort.  This avoids recursive HandleThreadAbort call. | 
|---|
| 1243 | TSNC_OSAlertableWait            = 0x00001000, // Preparing abort.  This avoids recursive HandleThreadAbort call. | 
|---|
| 1244 | TSNC_ADUnloadHelper             = 0x00002000, // This thread is AD Unload helper. | 
|---|
| 1245 | TSNC_CreatingTypeInitException  = 0x00004000, // Thread is trying to create a TypeInitException | 
|---|
| 1246 | // unused                       = 0x00008000, | 
|---|
| 1247 | TSNC_AppDomainContainUnhandled  = 0x00010000, // Used to control how unhandled exception reporting occurs. | 
|---|
| 1248 | // See detailed explanation for this bit in threads.cpp | 
|---|
| 1249 | TSNC_InRestoringSyncBlock       = 0x00020000, // The thread is restoring its SyncBlock for Object.Wait. | 
|---|
| 1250 | // After the thread is interrupted once, we turn off interruption | 
|---|
| 1251 | // at the beginning of wait. | 
|---|
| 1252 | TSNC_DisableOleaut32Check       = 0x00040000, // Disable oleaut32 delay load check.  Oleaut32 has | 
|---|
| 1253 | // been loaded | 
|---|
| 1254 | TSNC_CannotRecycle              = 0x00080000, // A host can not recycle this Thread object.  When a thread | 
|---|
| 1255 | // has orphaned lock, we will apply this. | 
|---|
| 1256 | TSNC_RaiseUnloadEvent           = 0x00100000, // Finalize thread is raising managed unload event which | 
|---|
| 1257 | // may call AppDomain.Unload. | 
|---|
| 1258 | TSNC_UnbalancedLocks            = 0x00200000, // Do not rely on lock accounting for this thread: | 
|---|
| 1259 | // we left an app domain with a lock count different from | 
|---|
| 1260 | // when we entered it | 
|---|
| 1261 | TSNC_DisableSOCheckInHCALL      = 0x00400000, // Some HCALL method may be called directly from VM. | 
|---|
| 1262 | // We can not assert they are called in SOTolerant | 
|---|
| 1263 | // region. | 
|---|
| 1264 | TSNC_IgnoreUnhandledExceptions  = 0x00800000, // Set for a managed thread born inside an appdomain created with the APPDOMAIN_IGNORE_UNHANDLED_EXCEPTIONS flag. | 
|---|
| 1265 | TSNC_ProcessedUnhandledException = 0x01000000,// Set on a thread on which we have done unhandled exception processing so that | 
|---|
| 1266 | // we dont perform it again when OS invokes our UEF. Currently, applicable threads include: | 
|---|
| 1267 | // 1) entry point thread of a managed app | 
|---|
| 1268 | // 2) new managed thread created in default domain | 
|---|
| 1269 | // | 
|---|
| 1270 | // For such threads, we will return to the OS after our UE processing is done | 
|---|
| 1271 | // and the OS will start invoking the UEFs. If our UEF gets invoked, it will try to | 
|---|
| 1272 | // perform the UE processing again. We will use this flag to prevent the duplicated | 
|---|
| 1273 | // effort. | 
|---|
| 1274 | // | 
|---|
| 1275 | // Once we are completely independent of the OS UEF, we could remove this. | 
|---|
| 1276 | TSNC_InsideSyncContextWait      = 0x02000000, // Whether we are inside DoSyncContextWait | 
|---|
| 1277 | TSNC_DebuggerSleepWaitJoin      = 0x04000000, // Indicates to the debugger that this thread is in a sleep wait or join state | 
|---|
| 1278 | // This almost mirrors the TS_Interruptible state however that flag can change | 
|---|
| 1279 | // during GC-preemptive mode whereas this one cannot. | 
|---|
| 1280 | #ifdef FEATURE_COMINTEROP | 
|---|
| 1281 | TSNC_WinRTInitialized           = 0x08000000, // the thread has initialized WinRT | 
|---|
| 1282 | #endif // FEATURE_COMINTEROP | 
|---|
| 1283 |  | 
|---|
| 1284 | // TSNC_Unused                  = 0x10000000, | 
|---|
| 1285 |  | 
|---|
| 1286 | TSNC_CallingManagedCodeDisabled = 0x20000000, // Use by multicore JIT feature to asert on calling managed code/loading module in background thread | 
|---|
| 1287 | // Exception, system module is allowed, security demand is allowed | 
|---|
| 1288 |  | 
|---|
| 1289 | TSNC_LoadsTypeViolation         = 0x40000000, // Use by type loader to break deadlocks caused by type load level ordering violations | 
|---|
| 1290 |  | 
|---|
| 1291 | TSNC_EtwStackWalkInProgress     = 0x80000000, // Set on the thread so that ETW can know that stackwalking is in progress | 
|---|
| 1292 | // and does not proceed with a stackwalk on the same thread | 
|---|
| 1293 | // There are cases during managed debugging when we can run into this situation | 
|---|
| 1294 | }; | 
|---|
| 1295 |  | 
|---|
| 1296 | // Functions called by host | 
|---|
| 1297 | STDMETHODIMP    QueryInterface(REFIID riid, void** ppv) | 
|---|
| 1298 | DAC_EMPTY_RET(E_NOINTERFACE); | 
|---|
| 1299 | STDMETHODIMP_(ULONG) AddRef(void) | 
|---|
| 1300 | DAC_EMPTY_RET(0); | 
|---|
| 1301 | STDMETHODIMP_(ULONG) Release(void) | 
|---|
| 1302 | DAC_EMPTY_RET(0); | 
|---|
| 1303 | STDMETHODIMP Abort() | 
|---|
| 1304 | DAC_EMPTY_RET(E_FAIL); | 
|---|
| 1305 | STDMETHODIMP RudeAbort() | 
|---|
| 1306 | DAC_EMPTY_RET(E_FAIL); | 
|---|
| 1307 | STDMETHODIMP NeedsPriorityScheduling(BOOL *pbNeedsPriorityScheduling) | 
|---|
| 1308 | DAC_EMPTY_RET(E_FAIL); | 
|---|
| 1309 |  | 
|---|
| 1310 | STDMETHODIMP YieldTask() | 
|---|
| 1311 | DAC_EMPTY_RET(E_FAIL); | 
|---|
| 1312 | STDMETHODIMP LocksHeld(SIZE_T *pLockCount) | 
|---|
| 1313 | DAC_EMPTY_RET(E_FAIL); | 
|---|
| 1314 |  | 
|---|
| 1315 | STDMETHODIMP BeginPreventAsyncAbort() | 
|---|
| 1316 | DAC_EMPTY_RET(E_FAIL); | 
|---|
| 1317 | STDMETHODIMP EndPreventAsyncAbort() | 
|---|
| 1318 | DAC_EMPTY_RET(E_FAIL); | 
|---|
| 1319 |  | 
|---|
| 1320 | void InternalReset (BOOL fNotFinalizerThread=FALSE, BOOL fThreadObjectResetNeeded=TRUE, BOOL fResetAbort=TRUE); | 
|---|
| 1321 | INT32 ResetManagedThreadObject(INT32 nPriority); | 
|---|
| 1322 | INT32 ResetManagedThreadObjectInCoopMode(INT32 nPriority); | 
|---|
| 1323 | BOOL  IsRealThreadPoolResetNeeded(); | 
|---|
| 1324 | public: | 
|---|
| 1325 | HRESULT DetachThread(BOOL fDLLThreadDetach); | 
|---|
| 1326 |  | 
|---|
| 1327 | void SetThreadState(ThreadState ts) | 
|---|
| 1328 | { | 
|---|
| 1329 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1330 | FastInterlockOr((DWORD*)&m_State, ts); | 
|---|
| 1331 | } | 
|---|
| 1332 |  | 
|---|
| 1333 | void ResetThreadState(ThreadState ts) | 
|---|
| 1334 | { | 
|---|
| 1335 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1336 | FastInterlockAnd((DWORD*)&m_State, ~ts); | 
|---|
| 1337 | } | 
|---|
| 1338 |  | 
|---|
| 1339 | BOOL HasThreadState(ThreadState ts) | 
|---|
| 1340 | { | 
|---|
| 1341 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1342 | return ((DWORD)m_State & ts); | 
|---|
| 1343 | } | 
|---|
| 1344 |  | 
|---|
| 1345 | // | 
|---|
| 1346 | // This is meant to be used for quick opportunistic checks for thread abort and similar conditions. This method | 
|---|
| 1347 | // does not erect memory barrier and so it may return wrong result sometime that the caller has to handle. | 
|---|
| 1348 | // | 
|---|
| 1349 | BOOL HasThreadStateOpportunistic(ThreadState ts) | 
|---|
| 1350 | { | 
|---|
| 1351 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1352 | return m_State.LoadWithoutBarrier() & ts; | 
|---|
| 1353 | } | 
|---|
| 1354 |  | 
|---|
| 1355 | void SetThreadStateNC(ThreadStateNoConcurrency tsnc) | 
|---|
| 1356 | { | 
|---|
| 1357 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1358 | m_StateNC = (ThreadStateNoConcurrency)((DWORD)m_StateNC | tsnc); | 
|---|
| 1359 | } | 
|---|
| 1360 |  | 
|---|
| 1361 | void ResetThreadStateNC(ThreadStateNoConcurrency tsnc) | 
|---|
| 1362 | { | 
|---|
| 1363 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1364 | m_StateNC = (ThreadStateNoConcurrency)((DWORD)m_StateNC & ~tsnc); | 
|---|
| 1365 | } | 
|---|
| 1366 |  | 
|---|
| 1367 | BOOL HasThreadStateNC(ThreadStateNoConcurrency tsnc) | 
|---|
| 1368 | { | 
|---|
| 1369 | LIMITED_METHOD_DAC_CONTRACT; | 
|---|
| 1370 | return ((DWORD)m_StateNC & tsnc); | 
|---|
| 1371 | } | 
|---|
| 1372 |  | 
|---|
| 1373 | void MarkEtwStackWalkInProgress() | 
|---|
| 1374 | { | 
|---|
| 1375 | WRAPPER_NO_CONTRACT; | 
|---|
| 1376 | SetThreadStateNC(Thread::TSNC_EtwStackWalkInProgress); | 
|---|
| 1377 | } | 
|---|
| 1378 |  | 
|---|
| 1379 | void MarkEtwStackWalkCompleted() | 
|---|
| 1380 | { | 
|---|
| 1381 | WRAPPER_NO_CONTRACT; | 
|---|
| 1382 | ResetThreadStateNC(Thread::TSNC_EtwStackWalkInProgress); | 
|---|
| 1383 | } | 
|---|
| 1384 |  | 
|---|
| 1385 | BOOL IsEtwStackWalkInProgress() | 
|---|
| 1386 | { | 
|---|
| 1387 | WRAPPER_NO_CONTRACT; | 
|---|
| 1388 | return HasThreadStateNC(Thread::TSNC_EtwStackWalkInProgress); | 
|---|
| 1389 | } | 
|---|
| 1390 |  | 
|---|
| 1391 | DWORD RequireSyncBlockCleanup() | 
|---|
| 1392 | { | 
|---|
| 1393 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1394 | return (m_ThreadTasks & TT_CleanupSyncBlock); | 
|---|
| 1395 | } | 
|---|
| 1396 |  | 
|---|
| 1397 | void SetSyncBlockCleanup() | 
|---|
| 1398 | { | 
|---|
| 1399 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1400 | FastInterlockOr((ULONG *)&m_ThreadTasks, TT_CleanupSyncBlock); | 
|---|
| 1401 | } | 
|---|
| 1402 |  | 
|---|
| 1403 | void ResetSyncBlockCleanup() | 
|---|
| 1404 | { | 
|---|
| 1405 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1406 | FastInterlockAnd((ULONG *)&m_ThreadTasks, ~TT_CleanupSyncBlock); | 
|---|
| 1407 | } | 
|---|
| 1408 |  | 
|---|
| 1409 | #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT | 
|---|
| 1410 | DWORD IsCoInitialized() | 
|---|
| 1411 | { | 
|---|
| 1412 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1413 | return (m_State & TS_CoInitialized); | 
|---|
| 1414 | } | 
|---|
| 1415 |  | 
|---|
| 1416 | void SetCoInitialized() | 
|---|
| 1417 | { | 
|---|
| 1418 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1419 | FastInterlockOr((ULONG *)&m_State, TS_CoInitialized); | 
|---|
| 1420 | FastInterlockAnd((ULONG*)&m_ThreadTasks, ~TT_CallCoInitialize); | 
|---|
| 1421 | } | 
|---|
| 1422 |  | 
|---|
| 1423 | void ResetCoInitialized() | 
|---|
| 1424 | { | 
|---|
| 1425 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1426 | FastInterlockAnd((ULONG *)&m_State,~TS_CoInitialized); | 
|---|
| 1427 | } | 
|---|
| 1428 |  | 
|---|
| 1429 | #ifdef FEATURE_COMINTEROP | 
|---|
| 1430 | BOOL IsWinRTInitialized() | 
|---|
| 1431 | { | 
|---|
| 1432 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1433 | return HasThreadStateNC(TSNC_WinRTInitialized); | 
|---|
| 1434 | } | 
|---|
| 1435 |  | 
|---|
| 1436 | void ResetWinRTInitialized() | 
|---|
| 1437 | { | 
|---|
| 1438 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1439 | ResetThreadStateNC(TSNC_WinRTInitialized); | 
|---|
| 1440 | } | 
|---|
| 1441 | #endif // FEATURE_COMINTEROP | 
|---|
| 1442 |  | 
|---|
| 1443 | DWORD RequiresCoInitialize() | 
|---|
| 1444 | { | 
|---|
| 1445 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1446 | return (m_ThreadTasks & TT_CallCoInitialize); | 
|---|
| 1447 | } | 
|---|
| 1448 |  | 
|---|
| 1449 | void SetRequiresCoInitialize() | 
|---|
| 1450 | { | 
|---|
| 1451 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1452 | FastInterlockOr((ULONG *)&m_ThreadTasks, TT_CallCoInitialize); | 
|---|
| 1453 | } | 
|---|
| 1454 |  | 
|---|
| 1455 | void ResetRequiresCoInitialize() | 
|---|
| 1456 | { | 
|---|
| 1457 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1458 | FastInterlockAnd((ULONG *)&m_ThreadTasks,~TT_CallCoInitialize); | 
|---|
| 1459 | } | 
|---|
| 1460 |  | 
|---|
| 1461 | void CleanupCOMState(); | 
|---|
| 1462 |  | 
|---|
| 1463 | #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT | 
|---|
| 1464 |  | 
|---|
| 1465 | #ifdef FEATURE_COMINTEROP | 
|---|
| 1466 | bool IsDisableComObjectEagerCleanup() | 
|---|
| 1467 | { | 
|---|
| 1468 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1469 | return m_fDisableComObjectEagerCleanup; | 
|---|
| 1470 | } | 
|---|
| 1471 | void SetDisableComObjectEagerCleanup() | 
|---|
| 1472 | { | 
|---|
| 1473 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1474 | m_fDisableComObjectEagerCleanup = true; | 
|---|
| 1475 | } | 
|---|
| 1476 | #endif //FEATURE_COMINTEROP | 
|---|
| 1477 |  | 
|---|
| 1478 | #ifndef DACCESS_COMPILE | 
|---|
| 1479 | bool HasDeadThreadBeenConsideredForGCTrigger() | 
|---|
| 1480 | { | 
|---|
| 1481 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1482 | _ASSERTE(IsDead()); | 
|---|
| 1483 |  | 
|---|
| 1484 | return m_fHasDeadThreadBeenConsideredForGCTrigger; | 
|---|
| 1485 | } | 
|---|
| 1486 |  | 
|---|
| 1487 | void SetHasDeadThreadBeenConsideredForGCTrigger() | 
|---|
| 1488 | { | 
|---|
| 1489 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1490 | _ASSERTE(IsDead()); | 
|---|
| 1491 |  | 
|---|
| 1492 | m_fHasDeadThreadBeenConsideredForGCTrigger = true; | 
|---|
| 1493 | } | 
|---|
| 1494 | #endif // !DACCESS_COMPILE | 
|---|
| 1495 |  | 
|---|
| 1496 | // returns if there is some extra work for the finalizer thread. | 
|---|
| 1497 | BOOL (); | 
|---|
| 1498 |  | 
|---|
| 1499 | // do the extra finalizer work. | 
|---|
| 1500 | void (); | 
|---|
| 1501 |  | 
|---|
| 1502 | #ifndef DACCESS_COMPILE | 
|---|
| 1503 | DWORD CatchAtSafePoint() | 
|---|
| 1504 | { | 
|---|
| 1505 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1506 | return (m_State & TS_CatchAtSafePoint); | 
|---|
| 1507 | } | 
|---|
| 1508 |  | 
|---|
| 1509 | DWORD CatchAtSafePointOpportunistic() | 
|---|
| 1510 | { | 
|---|
| 1511 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1512 | return HasThreadStateOpportunistic(TS_CatchAtSafePoint); | 
|---|
| 1513 | } | 
|---|
| 1514 | #endif // DACCESS_COMPILE | 
|---|
| 1515 |  | 
|---|
| 1516 | DWORD IsBackground() | 
|---|
| 1517 | { | 
|---|
| 1518 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1519 | return (m_State & TS_Background); | 
|---|
| 1520 | } | 
|---|
| 1521 |  | 
|---|
| 1522 | DWORD IsUnstarted() | 
|---|
| 1523 | { | 
|---|
| 1524 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1525 | SUPPORTS_DAC; | 
|---|
| 1526 | return (m_State & TS_Unstarted); | 
|---|
| 1527 | } | 
|---|
| 1528 |  | 
|---|
| 1529 | DWORD IsDead() | 
|---|
| 1530 | { | 
|---|
| 1531 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1532 | return (m_State & TS_Dead); | 
|---|
| 1533 | } | 
|---|
| 1534 |  | 
|---|
| 1535 | DWORD IsAborted() | 
|---|
| 1536 | { | 
|---|
| 1537 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1538 | return (m_State & TS_Aborted); | 
|---|
| 1539 | } | 
|---|
| 1540 |  | 
|---|
| 1541 | void SetAborted() | 
|---|
| 1542 | { | 
|---|
| 1543 | FastInterlockOr((ULONG *) &m_State, TS_Aborted); | 
|---|
| 1544 | } | 
|---|
| 1545 |  | 
|---|
| 1546 | void ClearAborted() | 
|---|
| 1547 | { | 
|---|
| 1548 | FastInterlockAnd((ULONG *) &m_State, ~TS_Aborted); | 
|---|
| 1549 | } | 
|---|
| 1550 |  | 
|---|
| 1551 | DWORD DoWeOwn() | 
|---|
| 1552 | { | 
|---|
| 1553 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1554 | return (m_State & TS_WeOwn); | 
|---|
| 1555 | } | 
|---|
| 1556 |  | 
|---|
| 1557 | // For reporting purposes, grab a consistent snapshot of the thread's state | 
|---|
| 1558 | ThreadState GetSnapshotState(); | 
|---|
| 1559 |  | 
|---|
| 1560 | // For delayed destruction of threads | 
|---|
| 1561 | DWORD           IsDetached() | 
|---|
| 1562 | { | 
|---|
| 1563 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1564 | return (m_State & TS_Detached); | 
|---|
| 1565 | } | 
|---|
| 1566 |  | 
|---|
| 1567 | #ifdef FEATURE_STACK_PROBE | 
|---|
| 1568 | //--------------------------------------------------------------------------------------- | 
|---|
| 1569 | // | 
|---|
| 1570 | // IsSOTolerant - Is the current thread in SO Tolerant region? | 
|---|
| 1571 | // | 
|---|
| 1572 | // Arguments: | 
|---|
| 1573 | //    pLimitFrame: the limit of search for frames | 
|---|
| 1574 | // | 
|---|
| 1575 | // Return Value: | 
|---|
| 1576 | //    TRUE if in SO tolerant region. | 
|---|
| 1577 | //    FALSE if in SO intolerant region. | 
|---|
| 1578 | // | 
|---|
| 1579 | // Note: | 
|---|
| 1580 | //    We walk our frame chain to decide.  If HelperMethodFrame is seen first, we are in tolerant | 
|---|
| 1581 | //    region.  If EnterSOIntolerantCodeFrame is seen first, we are in intolerant region. | 
|---|
| 1582 | // | 
|---|
| 1583 | BOOL IsSOTolerant(void * pLimitFrame); | 
|---|
| 1584 | #endif | 
|---|
| 1585 |  | 
|---|
| 1586 | #ifdef _DEBUG | 
|---|
| 1587 | class DisableSOCheckInHCALL | 
|---|
| 1588 | { | 
|---|
| 1589 | private: | 
|---|
| 1590 | Thread *m_pThread; | 
|---|
| 1591 | public: | 
|---|
| 1592 | DisableSOCheckInHCALL() | 
|---|
| 1593 | { | 
|---|
| 1594 | m_pThread = GetThread(); | 
|---|
| 1595 | m_pThread->SetThreadStateNC(TSNC_DisableSOCheckInHCALL); | 
|---|
| 1596 | } | 
|---|
| 1597 | ~DisableSOCheckInHCALL() | 
|---|
| 1598 | { | 
|---|
| 1599 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1600 | m_pThread->ResetThreadStateNC(TSNC_DisableSOCheckInHCALL); | 
|---|
| 1601 | } | 
|---|
| 1602 | }; | 
|---|
| 1603 | #endif | 
|---|
| 1604 | static LONG     m_DetachCount; | 
|---|
| 1605 | static LONG     m_ActiveDetachCount;  // Count how many non-background detached | 
|---|
| 1606 |  | 
|---|
| 1607 | static Volatile<LONG>     m_threadsAtUnsafePlaces; | 
|---|
| 1608 |  | 
|---|
| 1609 | // Offsets for the following variables need to fit in 1 byte, so keep near | 
|---|
| 1610 | // the top of the object.  Also, we want cache line filling to work for us | 
|---|
| 1611 | // so the critical stuff is ordered based on frequency of use. | 
|---|
| 1612 |  | 
|---|
| 1613 | Volatile<ThreadState> m_State;   // Bits for the state of the thread | 
|---|
| 1614 |  | 
|---|
| 1615 | // If TRUE, GC is scheduled cooperatively with this thread. | 
|---|
| 1616 | // NOTE: This "byte" is actually a boolean - we don't allow | 
|---|
| 1617 | // recursive disables. | 
|---|
| 1618 | Volatile<ULONG>      m_fPreemptiveGCDisabled; | 
|---|
| 1619 |  | 
|---|
| 1620 | PTR_Frame            m_pFrame;  // The Current Frame | 
|---|
| 1621 |  | 
|---|
| 1622 | //----------------------------------------------------------- | 
|---|
| 1623 | // If the thread has wandered in from the outside this is | 
|---|
| 1624 | // its Domain. | 
|---|
| 1625 | //----------------------------------------------------------- | 
|---|
| 1626 | PTR_AppDomain       m_pDomain; | 
|---|
| 1627 |  | 
|---|
| 1628 | // Track the number of locks (critical section, spin lock, syncblock lock, | 
|---|
| 1629 | // EE Crst, GC lock) held by the current thread. | 
|---|
| 1630 | DWORD                m_dwLockCount; | 
|---|
| 1631 |  | 
|---|
| 1632 | // Unique thread id used for thin locks - kept as small as possible, as we have limited space | 
|---|
| 1633 | // in the object header to store it. | 
|---|
| 1634 | DWORD                m_ThreadId; | 
|---|
| 1635 |  | 
|---|
| 1636 |  | 
|---|
| 1637 | // RWLock state | 
|---|
| 1638 | LockEntry           *m_pHead; | 
|---|
| 1639 | LockEntry            m_embeddedEntry; | 
|---|
| 1640 |  | 
|---|
| 1641 | #ifndef DACCESS_COMPILE | 
|---|
| 1642 | Frame* NotifyFrameChainOfExceptionUnwind(Frame* pStartFrame, LPVOID pvLimitSP); | 
|---|
| 1643 | #endif // DACCESS_COMPILE | 
|---|
| 1644 |  | 
|---|
| 1645 | #if defined(FEATURE_COMINTEROP) && !defined(DACCESS_COMPILE) | 
|---|
| 1646 | void RegisterRCW(RCW *pRCW) | 
|---|
| 1647 | { | 
|---|
| 1648 | CONTRACTL | 
|---|
| 1649 | { | 
|---|
| 1650 | THROWS; | 
|---|
| 1651 | GC_NOTRIGGER; | 
|---|
| 1652 | MODE_ANY; | 
|---|
| 1653 | PRECONDITION(CheckPointer(pRCW)); | 
|---|
| 1654 | } | 
|---|
| 1655 | CONTRACTL_END; | 
|---|
| 1656 |  | 
|---|
| 1657 | if (!m_pRCWStack->Push(pRCW)) | 
|---|
| 1658 | { | 
|---|
| 1659 | ThrowOutOfMemory(); | 
|---|
| 1660 | } | 
|---|
| 1661 | } | 
|---|
| 1662 |  | 
|---|
| 1663 | // Returns false on OOM. | 
|---|
| 1664 | BOOL RegisterRCWNoThrow(RCW *pRCW) | 
|---|
| 1665 | { | 
|---|
| 1666 | CONTRACTL | 
|---|
| 1667 | { | 
|---|
| 1668 | NOTHROW; | 
|---|
| 1669 | GC_NOTRIGGER; | 
|---|
| 1670 | MODE_ANY; | 
|---|
| 1671 | PRECONDITION(CheckPointer(pRCW, NULL_OK)); | 
|---|
| 1672 | } | 
|---|
| 1673 | CONTRACTL_END; | 
|---|
| 1674 |  | 
|---|
| 1675 | return m_pRCWStack->Push(pRCW); | 
|---|
| 1676 | } | 
|---|
| 1677 |  | 
|---|
| 1678 | RCW *UnregisterRCW(INDEBUG(SyncBlock *pSB)) | 
|---|
| 1679 | { | 
|---|
| 1680 | CONTRACTL | 
|---|
| 1681 | { | 
|---|
| 1682 | NOTHROW; | 
|---|
| 1683 | GC_NOTRIGGER; | 
|---|
| 1684 | MODE_ANY; | 
|---|
| 1685 | PRECONDITION(CheckPointer(pSB)); | 
|---|
| 1686 | } | 
|---|
| 1687 | CONTRACTL_END; | 
|---|
| 1688 |  | 
|---|
| 1689 | RCW* pPoppedRCW = m_pRCWStack->Pop(); | 
|---|
| 1690 |  | 
|---|
| 1691 | #ifdef _DEBUG | 
|---|
| 1692 | // The RCW we popped must be the one pointed to by pSB if pSB still points to an RCW. | 
|---|
| 1693 | RCW* pCurrentRCW = pSB->GetInteropInfoNoCreate()->GetRawRCW(); | 
|---|
| 1694 | _ASSERTE(pCurrentRCW == NULL || pPoppedRCW == NULL || pCurrentRCW == pPoppedRCW); | 
|---|
| 1695 | #endif // _DEBUG | 
|---|
| 1696 |  | 
|---|
| 1697 | return pPoppedRCW; | 
|---|
| 1698 | } | 
|---|
| 1699 |  | 
|---|
| 1700 | BOOL RCWIsInUse(RCW* pRCW) | 
|---|
| 1701 | { | 
|---|
| 1702 | CONTRACTL | 
|---|
| 1703 | { | 
|---|
| 1704 | NOTHROW; | 
|---|
| 1705 | GC_NOTRIGGER; | 
|---|
| 1706 | MODE_ANY; | 
|---|
| 1707 | PRECONDITION(CheckPointer(pRCW)); | 
|---|
| 1708 | } | 
|---|
| 1709 | CONTRACTL_END; | 
|---|
| 1710 |  | 
|---|
| 1711 | return m_pRCWStack->IsInStack(pRCW); | 
|---|
| 1712 | } | 
|---|
| 1713 | #endif // FEATURE_COMINTEROP && !DACCESS_COMPILE | 
|---|
| 1714 |  | 
|---|
| 1715 | // Lock thread is trying to acquire | 
|---|
| 1716 | VolatilePtr<DeadlockAwareLock> m_pBlockingLock; | 
|---|
| 1717 |  | 
|---|
| 1718 | public: | 
|---|
| 1719 |  | 
|---|
| 1720 | // on MP systems, each thread has its own allocation chunk so we can avoid | 
|---|
| 1721 | // lock prefixes and expensive MP cache snooping stuff | 
|---|
| 1722 | gc_alloc_context        m_alloc_context; | 
|---|
| 1723 |  | 
|---|
| 1724 | inline gc_alloc_context *GetAllocContext() { LIMITED_METHOD_CONTRACT; return &m_alloc_context; } | 
|---|
| 1725 |  | 
|---|
| 1726 | // This is the type handle of the first object in the alloc context at the time | 
|---|
| 1727 | // we fire the AllocationTick event. It's only for tooling purpose. | 
|---|
| 1728 | TypeHandle m_thAllocContextObj; | 
|---|
| 1729 |  | 
|---|
| 1730 | #ifndef FEATURE_PAL | 
|---|
| 1731 | private: | 
|---|
| 1732 | _NT_TIB *m_pTEB; | 
|---|
| 1733 | public: | 
|---|
| 1734 | _NT_TIB *GetTEB() { | 
|---|
| 1735 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1736 | return m_pTEB; | 
|---|
| 1737 | } | 
|---|
| 1738 | PEXCEPTION_REGISTRATION_RECORD *GetExceptionListPtr() { | 
|---|
| 1739 | WRAPPER_NO_CONTRACT; | 
|---|
| 1740 | return &GetTEB()->ExceptionList; | 
|---|
| 1741 | } | 
|---|
| 1742 | #endif // !FEATURE_PAL | 
|---|
| 1743 |  | 
|---|
| 1744 | inline void SetTHAllocContextObj(TypeHandle th) {LIMITED_METHOD_CONTRACT; m_thAllocContextObj = th; } | 
|---|
| 1745 |  | 
|---|
| 1746 | inline TypeHandle GetTHAllocContextObj() {LIMITED_METHOD_CONTRACT; return m_thAllocContextObj; } | 
|---|
| 1747 |  | 
|---|
| 1748 | #ifdef FEATURE_COMINTEROP | 
|---|
| 1749 | // The header for the per-thread in-use RCW stack. | 
|---|
| 1750 | RCWStackHeader*      m_pRCWStack; | 
|---|
| 1751 | #endif // FEATURE_COMINTEROP | 
|---|
| 1752 |  | 
|---|
| 1753 | // Allocator used during marshaling for temporary buffers, much faster than | 
|---|
| 1754 | // heap allocation. | 
|---|
| 1755 | // | 
|---|
| 1756 | // Uses of this allocator should be effectively statically scoped, i.e. a "region" | 
|---|
| 1757 | // is started using a CheckPointHolder and GetCheckpoint, and this region can then be used for allocations | 
|---|
| 1758 | // from that point onwards, and then all memory is reclaimed when the static scope for the | 
|---|
| 1759 | // checkpoint is exited by the running thread. | 
|---|
| 1760 | StackingAllocator    m_MarshalAlloc; | 
|---|
| 1761 |  | 
|---|
| 1762 | // Flags used to indicate tasks the thread has to do. | 
|---|
| 1763 | ThreadTasks          m_ThreadTasks; | 
|---|
| 1764 |  | 
|---|
| 1765 | // Flags for thread states that have no concurrency issues. | 
|---|
| 1766 | ThreadStateNoConcurrency m_StateNC; | 
|---|
| 1767 |  | 
|---|
| 1768 | inline void IncLockCount(); | 
|---|
| 1769 | inline void DecLockCount(); | 
|---|
| 1770 |  | 
|---|
| 1771 | private: | 
|---|
| 1772 | DWORD m_dwBeginLockCount;  // lock count when the thread enters current domain | 
|---|
| 1773 |  | 
|---|
| 1774 | #ifdef _DEBUG | 
|---|
| 1775 | DWORD dbg_m_cSuspendedThreads; | 
|---|
| 1776 | // Count of suspended threads that we know are not in native code (and therefore cannot hold OS lock which prevents us calling out to host) | 
|---|
| 1777 | DWORD dbg_m_cSuspendedThreadsWithoutOSLock; | 
|---|
| 1778 | EEThreadId m_Creater; | 
|---|
| 1779 | #endif | 
|---|
| 1780 |  | 
|---|
| 1781 | // After we suspend a thread, we may need to call EEJitManager::JitCodeToMethodInfo | 
|---|
| 1782 | // or StressLog which may waits on a spinlock.  It is unsafe to suspend a thread while it | 
|---|
| 1783 | // is in this state. | 
|---|
| 1784 | Volatile<LONG> m_dwForbidSuspendThread; | 
|---|
| 1785 | public: | 
|---|
| 1786 |  | 
|---|
| 1787 | static void IncForbidSuspendThread() | 
|---|
| 1788 | { | 
|---|
| 1789 | CONTRACTL | 
|---|
| 1790 | { | 
|---|
| 1791 | NOTHROW; | 
|---|
| 1792 | GC_NOTRIGGER; | 
|---|
| 1793 | SO_TOLERANT; | 
|---|
| 1794 | MODE_ANY; | 
|---|
| 1795 | SUPPORTS_DAC; | 
|---|
| 1796 | } | 
|---|
| 1797 | CONTRACTL_END; | 
|---|
| 1798 | #ifndef DACCESS_COMPILE | 
|---|
| 1799 | Thread * pThread = GetThreadNULLOk(); | 
|---|
| 1800 | if (pThread) | 
|---|
| 1801 | { | 
|---|
| 1802 | _ASSERTE (pThread->m_dwForbidSuspendThread != (LONG)MAXLONG); | 
|---|
| 1803 | #ifdef _DEBUG | 
|---|
| 1804 | { | 
|---|
| 1805 | //DEBUG_ONLY; | 
|---|
| 1806 | STRESS_LOG2(LF_SYNC, LL_INFO100000, "Set forbid suspend [%d] for thread %p.\n", pThread->m_dwForbidSuspendThread.Load(), pThread); | 
|---|
| 1807 | } | 
|---|
| 1808 | #endif | 
|---|
| 1809 | FastInterlockIncrement(&pThread->m_dwForbidSuspendThread); | 
|---|
| 1810 | } | 
|---|
| 1811 | #endif //!DACCESS_COMPILE | 
|---|
| 1812 | } | 
|---|
| 1813 |  | 
|---|
| 1814 | static void DecForbidSuspendThread() | 
|---|
| 1815 | { | 
|---|
| 1816 | CONTRACTL | 
|---|
| 1817 | { | 
|---|
| 1818 | NOTHROW; | 
|---|
| 1819 | GC_NOTRIGGER; | 
|---|
| 1820 | SO_TOLERANT; | 
|---|
| 1821 | MODE_ANY; | 
|---|
| 1822 | SUPPORTS_DAC; | 
|---|
| 1823 | } | 
|---|
| 1824 | CONTRACTL_END; | 
|---|
| 1825 | #ifndef DACCESS_COMPILE | 
|---|
| 1826 | Thread * pThread = GetThreadNULLOk(); | 
|---|
| 1827 | if (pThread) | 
|---|
| 1828 | { | 
|---|
| 1829 | _ASSERTE (pThread->m_dwForbidSuspendThread != (LONG)0); | 
|---|
| 1830 | FastInterlockDecrement(&pThread->m_dwForbidSuspendThread); | 
|---|
| 1831 | #ifdef _DEBUG | 
|---|
| 1832 | { | 
|---|
| 1833 | //DEBUG_ONLY; | 
|---|
| 1834 | STRESS_LOG2(LF_SYNC, LL_INFO100000, "Reset forbid suspend [%d] for thread %p.\n", pThread->m_dwForbidSuspendThread.Load(), pThread); | 
|---|
| 1835 | } | 
|---|
| 1836 | #endif | 
|---|
| 1837 | } | 
|---|
| 1838 | #endif //!DACCESS_COMPILE | 
|---|
| 1839 | } | 
|---|
| 1840 |  | 
|---|
| 1841 | bool IsInForbidSuspendRegion() | 
|---|
| 1842 | { | 
|---|
| 1843 | return m_dwForbidSuspendThread != (LONG)0; | 
|---|
| 1844 | } | 
|---|
| 1845 |  | 
|---|
| 1846 | // The ForbidSuspendThreadHolder is used during the initialization of the stack marker infrastructure so | 
|---|
| 1847 | // it can't do any backout stack validation (which is why we pass in VALIDATION_TYPE=HSV_NoValidation). | 
|---|
| 1848 | typedef StateHolder<Thread::IncForbidSuspendThread, Thread::DecForbidSuspendThread, HSV_NoValidation> ForbidSuspendThreadHolder; | 
|---|
| 1849 |  | 
|---|
| 1850 | private: | 
|---|
| 1851 | // Per thread counter to dispense hash code - kept in the thread so we don't need a lock | 
|---|
| 1852 | // or interlocked operations to get a new hash code; | 
|---|
| 1853 | DWORD m_dwHashCodeSeed; | 
|---|
| 1854 |  | 
|---|
| 1855 | public: | 
|---|
| 1856 |  | 
|---|
| 1857 | inline BOOL HasLockInCurrentDomain() | 
|---|
| 1858 | { | 
|---|
| 1859 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1860 |  | 
|---|
| 1861 | _ASSERTE(m_dwLockCount >= m_dwBeginLockCount); | 
|---|
| 1862 |  | 
|---|
| 1863 | // Equivalent to (m_dwLockCount != m_dwBeginLockCount || | 
|---|
| 1864 | //                m_dwCriticalRegionCount ! m_dwBeginCriticalRegionCount), | 
|---|
| 1865 | // but without branching instructions | 
|---|
| 1866 | BOOL fHasLock = (m_dwLockCount ^ m_dwBeginLockCount); | 
|---|
| 1867 |  | 
|---|
| 1868 | return fHasLock; | 
|---|
| 1869 | } | 
|---|
| 1870 |  | 
|---|
| 1871 | inline BOOL HasCriticalRegion() | 
|---|
| 1872 | { | 
|---|
| 1873 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1874 | return FALSE; | 
|---|
| 1875 | } | 
|---|
| 1876 |  | 
|---|
| 1877 | inline DWORD GetNewHashCode() | 
|---|
| 1878 | { | 
|---|
| 1879 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1880 | // Every thread has its own generator for hash codes so that we won't get into a situation | 
|---|
| 1881 | // where two threads consistently give out the same hash codes. | 
|---|
| 1882 | // Choice of multiplier guarantees period of 2**32 - see Knuth Vol 2 p16 (3.2.1.2 Theorem A). | 
|---|
| 1883 | DWORD multiplier = GetThreadId()*4 + 5; | 
|---|
| 1884 | m_dwHashCodeSeed = m_dwHashCodeSeed*multiplier + 1; | 
|---|
| 1885 | return m_dwHashCodeSeed; | 
|---|
| 1886 | } | 
|---|
| 1887 |  | 
|---|
| 1888 | #ifdef _DEBUG | 
|---|
| 1889 | // If the current thread suspends other threads, we need to make sure that the thread | 
|---|
| 1890 | // only allocates memory if the suspended threads do not have OS Heap lock. | 
|---|
| 1891 | static BOOL Debug_AllowCallout() | 
|---|
| 1892 | { | 
|---|
| 1893 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1894 | Thread * pThread = GetThreadNULLOk(); | 
|---|
| 1895 | return ((pThread == NULL) || (pThread->dbg_m_cSuspendedThreads == pThread->dbg_m_cSuspendedThreadsWithoutOSLock)); | 
|---|
| 1896 | } | 
|---|
| 1897 |  | 
|---|
| 1898 | // Returns number of threads that are currently suspended by the current thread and that can potentially hold OS lock | 
|---|
| 1899 | BOOL Debug_GetUnsafeSuspendeeCount() | 
|---|
| 1900 | { | 
|---|
| 1901 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1902 | return (dbg_m_cSuspendedThreads - dbg_m_cSuspendedThreadsWithoutOSLock); | 
|---|
| 1903 | } | 
|---|
| 1904 | #endif | 
|---|
| 1905 |  | 
|---|
| 1906 | public: | 
|---|
| 1907 |  | 
|---|
| 1908 | BOOL HasThreadAffinity() | 
|---|
| 1909 | { | 
|---|
| 1910 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1911 | return FALSE; | 
|---|
| 1912 | } | 
|---|
| 1913 |  | 
|---|
| 1914 | private: | 
|---|
| 1915 | LoadLevelLimiter *m_pLoadLimiter; | 
|---|
| 1916 |  | 
|---|
| 1917 | public: | 
|---|
| 1918 | LoadLevelLimiter *GetLoadLevelLimiter() | 
|---|
| 1919 | { | 
|---|
| 1920 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1921 | return m_pLoadLimiter; | 
|---|
| 1922 | } | 
|---|
| 1923 |  | 
|---|
| 1924 | void SetLoadLevelLimiter(LoadLevelLimiter *limiter) | 
|---|
| 1925 | { | 
|---|
| 1926 | LIMITED_METHOD_CONTRACT; | 
|---|
| 1927 | m_pLoadLimiter = limiter; | 
|---|
| 1928 | } | 
|---|
| 1929 |  | 
|---|
| 1930 |  | 
|---|
| 1931 |  | 
|---|
| 1932 | public: | 
|---|
| 1933 |  | 
|---|
| 1934 | //-------------------------------------------------------------- | 
|---|
| 1935 | // Constructor. | 
|---|
| 1936 | //-------------------------------------------------------------- | 
|---|
| 1937 | #ifndef DACCESS_COMPILE | 
|---|
| 1938 | Thread(); | 
|---|
| 1939 | #endif | 
|---|
| 1940 |  | 
|---|
| 1941 | //-------------------------------------------------------------- | 
|---|
| 1942 | // Failable initialization occurs here. | 
|---|
| 1943 | //-------------------------------------------------------------- | 
|---|
| 1944 | BOOL InitThread(BOOL fInternal); | 
|---|
| 1945 | BOOL AllocHandles(); | 
|---|
| 1946 |  | 
|---|
| 1947 | void SetupThreadForHost(); | 
|---|
| 1948 |  | 
|---|
| 1949 | //-------------------------------------------------------------- | 
|---|
| 1950 | // If the thread was setup through SetupUnstartedThread, rather | 
|---|
| 1951 | // than SetupThread, complete the setup here when the thread is | 
|---|
| 1952 | // actually running. | 
|---|
| 1953 | // WARNING : only GC calls this with bRequiresTSL set to FALSE. | 
|---|
| 1954 | //-------------------------------------------------------------- | 
|---|
| 1955 | BOOL HasStarted(BOOL bRequiresTSL=TRUE); | 
|---|
| 1956 |  | 
|---|
| 1957 | // We don't want ::CreateThread() calls scattered throughout the source. | 
|---|
| 1958 | // Create all new threads here.  The thread is created as suspended, so | 
|---|
| 1959 | // you must ::ResumeThread to kick it off.  It is guaranteed to create the | 
|---|
| 1960 | // thread, or throw. | 
|---|
| 1961 | BOOL CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName=NULL); | 
|---|
| 1962 |  | 
|---|
| 1963 |  | 
|---|
| 1964 | enum StackSizeBucket | 
|---|
| 1965 | { | 
|---|
| 1966 | StackSize_Small, | 
|---|
| 1967 | StackSize_Medium, | 
|---|
| 1968 | StackSize_Large | 
|---|
| 1969 | }; | 
|---|
| 1970 |  | 
|---|
| 1971 | // | 
|---|
| 1972 | // Creates a raw OS thread; use this only for CLR-internal threads that never execute user code. | 
|---|
| 1973 | // StackSizeBucket determines how large the stack should be. | 
|---|
| 1974 | // | 
|---|
| 1975 | static HANDLE CreateUtilityThread(StackSizeBucket stackSizeBucket, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName, DWORD flags = 0, DWORD* pThreadId = NULL); | 
|---|
| 1976 |  | 
|---|
| 1977 | //-------------------------------------------------------------- | 
|---|
| 1978 | // Destructor | 
|---|
| 1979 | //-------------------------------------------------------------- | 
|---|
| 1980 | #ifndef DACCESS_COMPILE | 
|---|
| 1981 | virtual ~Thread(); | 
|---|
| 1982 | #else | 
|---|
| 1983 | virtual ~Thread() {} | 
|---|
| 1984 | #endif | 
|---|
| 1985 |  | 
|---|
| 1986 | #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT | 
|---|
| 1987 | void            CoUninitialize(); | 
|---|
| 1988 | void            BaseCoUninitialize(); | 
|---|
| 1989 | void            BaseWinRTUninitialize(); | 
|---|
| 1990 | #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT | 
|---|
| 1991 |  | 
|---|
| 1992 | void        OnThreadTerminate(BOOL holdingLock); | 
|---|
| 1993 |  | 
|---|
| 1994 | static void CleanupDetachedThreads(); | 
|---|
| 1995 | //-------------------------------------------------------------- | 
|---|
| 1996 | // Returns innermost active Frame. | 
|---|
| 1997 | //-------------------------------------------------------------- | 
|---|
| 1998 | PTR_Frame GetFrame() | 
|---|
| 1999 | { | 
|---|
| 2000 | SUPPORTS_DAC; | 
|---|
| 2001 |  | 
|---|
| 2002 | #ifndef DACCESS_COMPILE | 
|---|
| 2003 | #ifdef _DEBUG_IMPL | 
|---|
| 2004 | WRAPPER_NO_CONTRACT; | 
|---|
| 2005 | if (this == GetThreadNULLOk()) | 
|---|
| 2006 | { | 
|---|
| 2007 | void* curSP; | 
|---|
| 2008 | curSP = (void *)GetCurrentSP(); | 
|---|
| 2009 | _ASSERTE((curSP <= m_pFrame && m_pFrame < m_CacheStackBase) || m_pFrame == (Frame*) -1); | 
|---|
| 2010 | } | 
|---|
| 2011 | #else | 
|---|
| 2012 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2013 | _ASSERTE(! "NYI"); | 
|---|
| 2014 | #endif | 
|---|
| 2015 | #endif // #ifndef DACCESS_COMPILE | 
|---|
| 2016 | return m_pFrame; | 
|---|
| 2017 | } | 
|---|
| 2018 |  | 
|---|
| 2019 | //-------------------------------------------------------------- | 
|---|
| 2020 | // Replaces innermost active Frames. | 
|---|
| 2021 | //-------------------------------------------------------------- | 
|---|
| 2022 | #ifndef DACCESS_COMPILE | 
|---|
| 2023 | void  SetFrame(Frame *pFrame) | 
|---|
| 2024 | #ifdef _DEBUG | 
|---|
| 2025 | ; | 
|---|
| 2026 | #else | 
|---|
| 2027 | { | 
|---|
| 2028 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2029 | m_pFrame = pFrame; | 
|---|
| 2030 | } | 
|---|
| 2031 | #endif | 
|---|
| 2032 | ; | 
|---|
| 2033 | #endif | 
|---|
| 2034 | inline Frame* FindFrame(SIZE_T StackPointer); | 
|---|
| 2035 |  | 
|---|
| 2036 | bool DetectHandleILStubsForDebugger(); | 
|---|
| 2037 |  | 
|---|
| 2038 | void SetWin32FaultAddress(DWORD eip) | 
|---|
| 2039 | { | 
|---|
| 2040 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2041 | m_Win32FaultAddress = eip; | 
|---|
| 2042 | } | 
|---|
| 2043 |  | 
|---|
| 2044 | void SetWin32FaultCode(DWORD code) | 
|---|
| 2045 | { | 
|---|
| 2046 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2047 | m_Win32FaultCode = code; | 
|---|
| 2048 | } | 
|---|
| 2049 |  | 
|---|
| 2050 | DWORD GetWin32FaultAddress() | 
|---|
| 2051 | { | 
|---|
| 2052 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2053 | return m_Win32FaultAddress; | 
|---|
| 2054 | } | 
|---|
| 2055 |  | 
|---|
| 2056 | DWORD GetWin32FaultCode() | 
|---|
| 2057 | { | 
|---|
| 2058 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2059 | return m_Win32FaultCode; | 
|---|
| 2060 | } | 
|---|
| 2061 |  | 
|---|
| 2062 | #ifdef ENABLE_CONTRACTS | 
|---|
| 2063 | ClrDebugState *GetClrDebugState() | 
|---|
| 2064 | { | 
|---|
| 2065 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2066 | return m_pClrDebugState; | 
|---|
| 2067 | } | 
|---|
| 2068 | #endif | 
|---|
| 2069 |  | 
|---|
| 2070 | //************************************************************** | 
|---|
| 2071 | // GC interaction | 
|---|
| 2072 | //************************************************************** | 
|---|
| 2073 |  | 
|---|
| 2074 | //-------------------------------------------------------------- | 
|---|
| 2075 | // Enter cooperative GC mode. NOT NESTABLE. | 
|---|
| 2076 | //-------------------------------------------------------------- | 
|---|
| 2077 | FORCEINLINE_NONDEBUG void DisablePreemptiveGC() | 
|---|
| 2078 | { | 
|---|
| 2079 | #ifndef DACCESS_COMPILE | 
|---|
| 2080 | WRAPPER_NO_CONTRACT; | 
|---|
| 2081 | _ASSERTE(this == GetThread()); | 
|---|
| 2082 | _ASSERTE(!m_fPreemptiveGCDisabled); | 
|---|
| 2083 | // holding a spin lock in preemp mode and transit to coop mode will cause other threads | 
|---|
| 2084 | // spinning waiting for GC | 
|---|
| 2085 | _ASSERTE ((m_StateNC & Thread::TSNC_OwnsSpinLock) == 0); | 
|---|
| 2086 |  | 
|---|
| 2087 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 2088 | TriggersGC(this); | 
|---|
| 2089 | #endif | 
|---|
| 2090 |  | 
|---|
| 2091 | // Logically, we just want to check whether a GC is in progress and halt | 
|---|
| 2092 | // at the boundary if it is -- before we disable preemptive GC.  However | 
|---|
| 2093 | // this opens up a race condition where the GC starts after we make the | 
|---|
| 2094 | // check.  SuspendRuntime will ignore such a thread because it saw it as | 
|---|
| 2095 | // outside the EE.  So the thread would run wild during the GC. | 
|---|
| 2096 | // | 
|---|
| 2097 | // Instead, enter cooperative mode and then check if a GC is in progress. | 
|---|
| 2098 | // If so, go back out and try again.  The reason we go back out before we | 
|---|
| 2099 | // try again, is that SuspendRuntime might have seen us as being in | 
|---|
| 2100 | // cooperative mode if it checks us between the next two statements. | 
|---|
| 2101 | // In that case, it will be trying to move us to a safe spot.  If | 
|---|
| 2102 | // we don't let it see us leave, it will keep waiting on us indefinitely. | 
|---|
| 2103 |  | 
|---|
| 2104 | // ------------------------------------------------------------------------ | 
|---|
| 2105 | //   ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING **  | | 
|---|
| 2106 | // ------------------------------------------------------------------------ | 
|---|
| 2107 | // | 
|---|
| 2108 | //   DO NOT CHANGE THIS METHOD WITHOUT VISITING ALL THE STUB GENERATORS | 
|---|
| 2109 | //   THAT EFFECTIVELY INLINE IT INTO THEIR STUBS | 
|---|
| 2110 | // | 
|---|
| 2111 | // ------------------------------------------------------------------------ | 
|---|
| 2112 | //   ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING **  | | 
|---|
| 2113 | // ------------------------------------------------------------------------ | 
|---|
| 2114 |  | 
|---|
| 2115 | m_fPreemptiveGCDisabled.StoreWithoutBarrier(1); | 
|---|
| 2116 |  | 
|---|
| 2117 | if (g_TrapReturningThreads.LoadWithoutBarrier()) | 
|---|
| 2118 | { | 
|---|
| 2119 | RareDisablePreemptiveGC(); | 
|---|
| 2120 | } | 
|---|
| 2121 | #else | 
|---|
| 2122 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2123 | #endif | 
|---|
| 2124 | } | 
|---|
| 2125 |  | 
|---|
| 2126 | NOINLINE void RareDisablePreemptiveGC(); | 
|---|
| 2127 |  | 
|---|
| 2128 | void HandleThreadAbort() | 
|---|
| 2129 | { | 
|---|
| 2130 | HandleThreadAbort(FALSE); | 
|---|
| 2131 | } | 
|---|
| 2132 | void HandleThreadAbort(BOOL fForce);  // fForce=TRUE only for a thread waiting to start AD unload | 
|---|
| 2133 |  | 
|---|
| 2134 | void PreWorkForThreadAbort(); | 
|---|
| 2135 |  | 
|---|
| 2136 | private: | 
|---|
| 2137 | void HandleThreadAbortTimeout(); | 
|---|
| 2138 |  | 
|---|
| 2139 | public: | 
|---|
| 2140 | //-------------------------------------------------------------- | 
|---|
| 2141 | // Leave cooperative GC mode. NOT NESTABLE. | 
|---|
| 2142 | //-------------------------------------------------------------- | 
|---|
| 2143 | FORCEINLINE_NONDEBUG void EnablePreemptiveGC() | 
|---|
| 2144 | { | 
|---|
| 2145 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2146 |  | 
|---|
| 2147 | #ifndef DACCESS_COMPILE | 
|---|
| 2148 | _ASSERTE(this == GetThread()); | 
|---|
| 2149 | _ASSERTE(m_fPreemptiveGCDisabled); | 
|---|
| 2150 | // holding a spin lock in coop mode and transit to preemp mode will cause deadlock on GC | 
|---|
| 2151 | _ASSERTE ((m_StateNC & Thread::TSNC_OwnsSpinLock) == 0); | 
|---|
| 2152 |  | 
|---|
| 2153 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 2154 | _ASSERTE(!GCForbidden()); | 
|---|
| 2155 | TriggersGC(this); | 
|---|
| 2156 | #endif | 
|---|
| 2157 |  | 
|---|
| 2158 | // ------------------------------------------------------------------------ | 
|---|
| 2159 | //   ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING **  | | 
|---|
| 2160 | // ------------------------------------------------------------------------ | 
|---|
| 2161 | // | 
|---|
| 2162 | //   DO NOT CHANGE THIS METHOD WITHOUT VISITING ALL THE STUB GENERATORS | 
|---|
| 2163 | //   THAT EFFECTIVELY INLINE IT INTO THEIR STUBS | 
|---|
| 2164 | // | 
|---|
| 2165 | // ------------------------------------------------------------------------ | 
|---|
| 2166 | //   ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING **  | | 
|---|
| 2167 | // ------------------------------------------------------------------------ | 
|---|
| 2168 |  | 
|---|
| 2169 | m_fPreemptiveGCDisabled.StoreWithoutBarrier(0); | 
|---|
| 2170 | #ifdef ENABLE_CONTRACTS | 
|---|
| 2171 | m_ulEnablePreemptiveGCCount ++; | 
|---|
| 2172 | #endif  // _DEBUG | 
|---|
| 2173 |  | 
|---|
| 2174 | if (CatchAtSafePoint()) | 
|---|
| 2175 | RareEnablePreemptiveGC(); | 
|---|
| 2176 | #endif | 
|---|
| 2177 | } | 
|---|
| 2178 |  | 
|---|
| 2179 | #if defined(STRESS_HEAP) && defined(_DEBUG) | 
|---|
| 2180 | void PerformPreemptiveGC(); | 
|---|
| 2181 | #endif | 
|---|
| 2182 | void RareEnablePreemptiveGC(); | 
|---|
| 2183 | void PulseGCMode(); | 
|---|
| 2184 |  | 
|---|
| 2185 | //-------------------------------------------------------------- | 
|---|
| 2186 | // Query mode | 
|---|
| 2187 | //-------------------------------------------------------------- | 
|---|
| 2188 | BOOL PreemptiveGCDisabled() | 
|---|
| 2189 | { | 
|---|
| 2190 | WRAPPER_NO_CONTRACT; | 
|---|
| 2191 | _ASSERTE(this == GetThread()); | 
|---|
| 2192 | // | 
|---|
| 2193 | // m_fPreemptiveGCDisabled is always modified by the thread itself, and so the thread itself | 
|---|
| 2194 | // can read it without memory barrier. | 
|---|
| 2195 | // | 
|---|
| 2196 | return m_fPreemptiveGCDisabled.LoadWithoutBarrier(); | 
|---|
| 2197 | } | 
|---|
| 2198 |  | 
|---|
| 2199 | BOOL PreemptiveGCDisabledOther() | 
|---|
| 2200 | { | 
|---|
| 2201 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2202 | return (m_fPreemptiveGCDisabled); | 
|---|
| 2203 | } | 
|---|
| 2204 |  | 
|---|
| 2205 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 2206 |  | 
|---|
| 2207 | void BeginNoTriggerGC(const char *szFile, int lineNum) | 
|---|
| 2208 | { | 
|---|
| 2209 | WRAPPER_NO_CONTRACT; | 
|---|
| 2210 | m_pClrDebugState->IncrementGCNoTriggerCount(); | 
|---|
| 2211 | if (PreemptiveGCDisabled()) | 
|---|
| 2212 | { | 
|---|
| 2213 | m_pClrDebugState->IncrementGCForbidCount(); | 
|---|
| 2214 | } | 
|---|
| 2215 | } | 
|---|
| 2216 |  | 
|---|
| 2217 | void EndNoTriggerGC() | 
|---|
| 2218 | { | 
|---|
| 2219 | WRAPPER_NO_CONTRACT; | 
|---|
| 2220 | _ASSERTE(m_pClrDebugState->GetGCNoTriggerCount() != 0 || (m_pClrDebugState->ViolationMask() & BadDebugState)); | 
|---|
| 2221 | m_pClrDebugState->DecrementGCNoTriggerCount(); | 
|---|
| 2222 |  | 
|---|
| 2223 | if (m_pClrDebugState->GetGCForbidCount()) | 
|---|
| 2224 | { | 
|---|
| 2225 | m_pClrDebugState->DecrementGCForbidCount(); | 
|---|
| 2226 | } | 
|---|
| 2227 | } | 
|---|
| 2228 |  | 
|---|
| 2229 | void BeginForbidGC(const char *szFile, int lineNum) | 
|---|
| 2230 | { | 
|---|
| 2231 | WRAPPER_NO_CONTRACT; | 
|---|
| 2232 | _ASSERTE(this == GetThread()); | 
|---|
| 2233 | #ifdef PROFILING_SUPPORTED | 
|---|
| 2234 | _ASSERTE(PreemptiveGCDisabled() | 
|---|
| 2235 | || CORProfilerPresent() ||    // This added to allow profiler to use GetILToNativeMapping | 
|---|
| 2236 | // while in preemptive GC mode | 
|---|
| 2237 | (g_fEEShutDown & (ShutDown_Finalize2 | ShutDown_Profiler)) == ShutDown_Finalize2); | 
|---|
| 2238 | #else // PROFILING_SUPPORTED | 
|---|
| 2239 | _ASSERTE(PreemptiveGCDisabled()); | 
|---|
| 2240 | #endif // PROFILING_SUPPORTED | 
|---|
| 2241 | BeginNoTriggerGC(szFile, lineNum); | 
|---|
| 2242 | } | 
|---|
| 2243 |  | 
|---|
| 2244 | void EndForbidGC() | 
|---|
| 2245 | { | 
|---|
| 2246 | WRAPPER_NO_CONTRACT; | 
|---|
| 2247 | _ASSERTE(this == GetThread()); | 
|---|
| 2248 | #ifdef PROFILING_SUPPORTED | 
|---|
| 2249 | _ASSERTE(PreemptiveGCDisabled() || | 
|---|
| 2250 | CORProfilerPresent() ||    // This added to allow profiler to use GetILToNativeMapping | 
|---|
| 2251 | // while in preemptive GC mode | 
|---|
| 2252 | (g_fEEShutDown & (ShutDown_Finalize2 | ShutDown_Profiler)) == ShutDown_Finalize2); | 
|---|
| 2253 | #else // PROFILING_SUPPORTED | 
|---|
| 2254 | _ASSERTE(PreemptiveGCDisabled()); | 
|---|
| 2255 | #endif // PROFILING_SUPPORTED | 
|---|
| 2256 | EndNoTriggerGC(); | 
|---|
| 2257 | } | 
|---|
| 2258 |  | 
|---|
| 2259 | BOOL GCNoTrigger() | 
|---|
| 2260 | { | 
|---|
| 2261 | WRAPPER_NO_CONTRACT; | 
|---|
| 2262 | _ASSERTE(this == GetThread()); | 
|---|
| 2263 | if ( (GCViolation|BadDebugState) & m_pClrDebugState->ViolationMask() ) | 
|---|
| 2264 | { | 
|---|
| 2265 | return FALSE; | 
|---|
| 2266 | } | 
|---|
| 2267 | return m_pClrDebugState->GetGCNoTriggerCount(); | 
|---|
| 2268 | } | 
|---|
| 2269 |  | 
|---|
| 2270 | BOOL GCForbidden() | 
|---|
| 2271 | { | 
|---|
| 2272 | WRAPPER_NO_CONTRACT; | 
|---|
| 2273 | _ASSERTE(this == GetThread()); | 
|---|
| 2274 | if ( (GCViolation|BadDebugState) & m_pClrDebugState->ViolationMask()) | 
|---|
| 2275 | { | 
|---|
| 2276 | return FALSE; | 
|---|
| 2277 | } | 
|---|
| 2278 | return m_pClrDebugState->GetGCForbidCount(); | 
|---|
| 2279 | } | 
|---|
| 2280 |  | 
|---|
| 2281 | BOOL RawGCNoTrigger() | 
|---|
| 2282 | { | 
|---|
| 2283 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2284 | if (m_pClrDebugState->ViolationMask() & BadDebugState) | 
|---|
| 2285 | { | 
|---|
| 2286 | return 0; | 
|---|
| 2287 | } | 
|---|
| 2288 | return m_pClrDebugState->GetGCNoTriggerCount(); | 
|---|
| 2289 | } | 
|---|
| 2290 |  | 
|---|
| 2291 | BOOL RawGCForbidden() | 
|---|
| 2292 | { | 
|---|
| 2293 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2294 | if (m_pClrDebugState->ViolationMask() & BadDebugState) | 
|---|
| 2295 | { | 
|---|
| 2296 | return 0; | 
|---|
| 2297 | } | 
|---|
| 2298 | return m_pClrDebugState->GetGCForbidCount(); | 
|---|
| 2299 | } | 
|---|
| 2300 | #endif // ENABLE_CONTRACTS_IMPL | 
|---|
| 2301 |  | 
|---|
| 2302 | //--------------------------------------------------------------- | 
|---|
| 2303 | // Expose key offsets and values for stub generation. | 
|---|
| 2304 | //--------------------------------------------------------------- | 
|---|
| 2305 | static BYTE GetOffsetOfCurrentFrame() | 
|---|
| 2306 | { | 
|---|
| 2307 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2308 | size_t ofs = offsetof(class Thread, m_pFrame); | 
|---|
| 2309 | _ASSERTE(FitsInI1(ofs)); | 
|---|
| 2310 | return (BYTE)ofs; | 
|---|
| 2311 | } | 
|---|
| 2312 |  | 
|---|
| 2313 | static BYTE GetOffsetOfState() | 
|---|
| 2314 | { | 
|---|
| 2315 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2316 | size_t ofs = offsetof(class Thread, m_State); | 
|---|
| 2317 | _ASSERTE(FitsInI1(ofs)); | 
|---|
| 2318 | return (BYTE)ofs; | 
|---|
| 2319 | } | 
|---|
| 2320 |  | 
|---|
| 2321 | static BYTE GetOffsetOfGCFlag() | 
|---|
| 2322 | { | 
|---|
| 2323 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2324 | size_t ofs = offsetof(class Thread, m_fPreemptiveGCDisabled); | 
|---|
| 2325 | _ASSERTE(FitsInI1(ofs)); | 
|---|
| 2326 | return (BYTE)ofs; | 
|---|
| 2327 | } | 
|---|
| 2328 |  | 
|---|
| 2329 | static void StaticDisablePreemptiveGC( Thread *pThread) | 
|---|
| 2330 | { | 
|---|
| 2331 | WRAPPER_NO_CONTRACT; | 
|---|
| 2332 | _ASSERTE(pThread != NULL); | 
|---|
| 2333 | pThread->DisablePreemptiveGC(); | 
|---|
| 2334 | } | 
|---|
| 2335 |  | 
|---|
| 2336 | static void StaticEnablePreemptiveGC( Thread *pThread) | 
|---|
| 2337 | { | 
|---|
| 2338 | WRAPPER_NO_CONTRACT; | 
|---|
| 2339 | _ASSERTE(pThread != NULL); | 
|---|
| 2340 | pThread->EnablePreemptiveGC(); | 
|---|
| 2341 | } | 
|---|
| 2342 |  | 
|---|
| 2343 |  | 
|---|
| 2344 | //--------------------------------------------------------------- | 
|---|
| 2345 | // Expose offset of the app domain word for the interop and delegate callback | 
|---|
| 2346 | //--------------------------------------------------------------- | 
|---|
| 2347 | static SIZE_T GetOffsetOfAppDomain() | 
|---|
| 2348 | { | 
|---|
| 2349 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2350 | return (SIZE_T)(offsetof(class Thread, m_pDomain)); | 
|---|
| 2351 | } | 
|---|
| 2352 |  | 
|---|
| 2353 | //--------------------------------------------------------------- | 
|---|
| 2354 | // Expose offset of the place for storing the filter context for the debugger. | 
|---|
| 2355 | //--------------------------------------------------------------- | 
|---|
| 2356 | static SIZE_T GetOffsetOfDebuggerFilterContext() | 
|---|
| 2357 | { | 
|---|
| 2358 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2359 | return (SIZE_T)(offsetof(class Thread, m_debuggerFilterContext)); | 
|---|
| 2360 | } | 
|---|
| 2361 |  | 
|---|
| 2362 | //--------------------------------------------------------------- | 
|---|
| 2363 | // Expose offset of the debugger cant stop count for the debugger | 
|---|
| 2364 | //--------------------------------------------------------------- | 
|---|
| 2365 | static SIZE_T GetOffsetOfCantStop() | 
|---|
| 2366 | { | 
|---|
| 2367 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2368 | return (SIZE_T)(offsetof(class Thread, m_debuggerCantStop)); | 
|---|
| 2369 | } | 
|---|
| 2370 |  | 
|---|
| 2371 | //--------------------------------------------------------------- | 
|---|
| 2372 | // Expose offset of m_StateNC | 
|---|
| 2373 | //--------------------------------------------------------------- | 
|---|
| 2374 | static SIZE_T GetOffsetOfStateNC() | 
|---|
| 2375 | { | 
|---|
| 2376 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2377 | return (SIZE_T)(offsetof(class Thread, m_StateNC)); | 
|---|
| 2378 | } | 
|---|
| 2379 |  | 
|---|
| 2380 | //--------------------------------------------------------------- | 
|---|
| 2381 | // Last exception to be thrown | 
|---|
| 2382 | //--------------------------------------------------------------- | 
|---|
| 2383 | inline void SetThrowable(OBJECTREF pThrowable | 
|---|
| 2384 | DEBUG_ARG(ThreadExceptionState::SetThrowableErrorChecking stecFlags = ThreadExceptionState::STEC_All)); | 
|---|
| 2385 |  | 
|---|
| 2386 | OBJECTREF GetThrowable() | 
|---|
| 2387 | { | 
|---|
| 2388 | WRAPPER_NO_CONTRACT; | 
|---|
| 2389 |  | 
|---|
| 2390 | return m_ExceptionState.GetThrowable(); | 
|---|
| 2391 | } | 
|---|
| 2392 |  | 
|---|
| 2393 | // An unmnaged thread can check if a managed is processing an exception | 
|---|
| 2394 | BOOL HasException() | 
|---|
| 2395 | { | 
|---|
| 2396 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2397 | OBJECTHANDLE pThrowable = m_ExceptionState.GetThrowableAsHandle(); | 
|---|
| 2398 | return pThrowable && *PTR_UNCHECKED_OBJECTREF(pThrowable); | 
|---|
| 2399 | } | 
|---|
| 2400 |  | 
|---|
| 2401 | OBJECTHANDLE GetThrowableAsHandle() | 
|---|
| 2402 | { | 
|---|
| 2403 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2404 | return m_ExceptionState.GetThrowableAsHandle(); | 
|---|
| 2405 | } | 
|---|
| 2406 |  | 
|---|
| 2407 | // special null test (for use when we're in the wrong GC mode) | 
|---|
| 2408 | BOOL IsThrowableNull() | 
|---|
| 2409 | { | 
|---|
| 2410 | WRAPPER_NO_CONTRACT; | 
|---|
| 2411 | return IsHandleNullUnchecked(m_ExceptionState.GetThrowableAsHandle()); | 
|---|
| 2412 | } | 
|---|
| 2413 |  | 
|---|
| 2414 | BOOL IsExceptionInProgress() | 
|---|
| 2415 | { | 
|---|
| 2416 | SUPPORTS_DAC; | 
|---|
| 2417 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2418 | return m_ExceptionState.IsExceptionInProgress(); | 
|---|
| 2419 | } | 
|---|
| 2420 |  | 
|---|
| 2421 |  | 
|---|
| 2422 | void SyncManagedExceptionState(bool fIsDebuggerThread); | 
|---|
| 2423 |  | 
|---|
| 2424 | //--------------------------------------------------------------- | 
|---|
| 2425 | // Per-thread information used by handler | 
|---|
| 2426 | //--------------------------------------------------------------- | 
|---|
| 2427 | // exception handling info stored in thread | 
|---|
| 2428 | // can't allocate this as needed because can't make exception-handling depend upon memory allocation | 
|---|
| 2429 |  | 
|---|
| 2430 | PTR_ThreadExceptionState GetExceptionState() | 
|---|
| 2431 | { | 
|---|
| 2432 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2433 | SUPPORTS_DAC; | 
|---|
| 2434 |  | 
|---|
| 2435 | return PTR_ThreadExceptionState(PTR_HOST_MEMBER_TADDR(Thread, this, m_ExceptionState)); | 
|---|
| 2436 | } | 
|---|
| 2437 |  | 
|---|
| 2438 | public: | 
|---|
| 2439 |  | 
|---|
| 2440 | void DECLSPEC_NORETURN RaiseCrossContextException(Exception* pEx, ContextTransitionFrame* pFrame); | 
|---|
| 2441 |  | 
|---|
| 2442 | // ClearContext are to be called only during shutdown | 
|---|
| 2443 | void ClearContext(); | 
|---|
| 2444 |  | 
|---|
| 2445 | private: | 
|---|
| 2446 | // don't ever call these except when creating thread!!!!! | 
|---|
| 2447 | void InitContext(); | 
|---|
| 2448 |  | 
|---|
| 2449 | public: | 
|---|
| 2450 | PTR_AppDomain GetDomain(INDEBUG(BOOL fMidContextTransitionOK = FALSE)) | 
|---|
| 2451 | { | 
|---|
| 2452 | LIMITED_METHOD_DAC_CONTRACT; | 
|---|
| 2453 |  | 
|---|
| 2454 | return m_pDomain; | 
|---|
| 2455 | } | 
|---|
| 2456 |  | 
|---|
| 2457 | Frame *IsRunningIn(AppDomain* pDomain, int *count); | 
|---|
| 2458 | Frame *GetFirstTransitionInto(AppDomain *pDomain, int *count); | 
|---|
| 2459 |  | 
|---|
| 2460 | //--------------------------------------------------------------- | 
|---|
| 2461 | // Track use of the thread block.  See the general comments on | 
|---|
| 2462 | // thread destruction in threads.cpp, for details. | 
|---|
| 2463 | //--------------------------------------------------------------- | 
|---|
| 2464 | int         IncExternalCount(); | 
|---|
| 2465 | int         DecExternalCount(BOOL holdingLock); | 
|---|
| 2466 |  | 
|---|
| 2467 |  | 
|---|
| 2468 | //--------------------------------------------------------------- | 
|---|
| 2469 | // !!!! THESE ARE NOT SAFE FOR GENERAL USE  !!!! | 
|---|
| 2470 | //      IncExternalCountDANGEROUSProfilerOnly() | 
|---|
| 2471 | //      DecExternalCountDANGEROUSProfilerOnly() | 
|---|
| 2472 | // Currently only the profiler API should be using these | 
|---|
| 2473 | // functions, because the profiler is responsible for ensuring | 
|---|
| 2474 | // that the thread exists, undestroyed, before operating on it. | 
|---|
| 2475 | // All other clients should use IncExternalCount/DecExternalCount | 
|---|
| 2476 | // instead | 
|---|
| 2477 | //--------------------------------------------------------------- | 
|---|
| 2478 | int         IncExternalCountDANGEROUSProfilerOnly() | 
|---|
| 2479 | { | 
|---|
| 2480 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2481 |  | 
|---|
| 2482 | #ifdef _DEBUG | 
|---|
| 2483 | int cRefs = | 
|---|
| 2484 | #else   // _DEBUG | 
|---|
| 2485 | return | 
|---|
| 2486 | #endif //_DEBUG | 
|---|
| 2487 | FastInterlockIncrement((LONG*)&m_ExternalRefCount); | 
|---|
| 2488 |  | 
|---|
| 2489 | #ifdef _DEBUG | 
|---|
| 2490 | // This should never be called on a thread being destroyed | 
|---|
| 2491 | _ASSERTE(cRefs != 1); | 
|---|
| 2492 | return cRefs; | 
|---|
| 2493 | #endif //_DEBUG | 
|---|
| 2494 | } | 
|---|
| 2495 |  | 
|---|
| 2496 | int         DecExternalCountDANGEROUSProfilerOnly() | 
|---|
| 2497 | { | 
|---|
| 2498 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2499 | #ifdef _DEBUG | 
|---|
| 2500 | int cRefs = | 
|---|
| 2501 | #else   // _DEBUG | 
|---|
| 2502 | return | 
|---|
| 2503 | #endif //_DEBUG | 
|---|
| 2504 |  | 
|---|
| 2505 | FastInterlockDecrement((LONG*)&m_ExternalRefCount); | 
|---|
| 2506 |  | 
|---|
| 2507 | #ifdef _DEBUG | 
|---|
| 2508 | // This should never cause the last reference on the thread to be released | 
|---|
| 2509 | _ASSERTE(cRefs != 0); | 
|---|
| 2510 | return cRefs; | 
|---|
| 2511 | #endif //_DEBUG | 
|---|
| 2512 | } | 
|---|
| 2513 |  | 
|---|
| 2514 | // Get and Set the exposed System.Thread object which corresponds to | 
|---|
| 2515 | // this thread.  Also the thread handle and Id. | 
|---|
| 2516 | OBJECTREF   GetExposedObject(); | 
|---|
| 2517 | OBJECTREF   GetExposedObjectRaw(); | 
|---|
| 2518 | void        SetExposedObject(OBJECTREF exposed); | 
|---|
| 2519 | OBJECTHANDLE GetExposedObjectHandleForDebugger() | 
|---|
| 2520 | { | 
|---|
| 2521 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2522 | return m_ExposedObject; | 
|---|
| 2523 | } | 
|---|
| 2524 |  | 
|---|
| 2525 | // Query whether the exposed object exists | 
|---|
| 2526 | BOOL IsExposedObjectSet() | 
|---|
| 2527 | { | 
|---|
| 2528 | CONTRACTL | 
|---|
| 2529 | { | 
|---|
| 2530 | NOTHROW; | 
|---|
| 2531 | GC_NOTRIGGER; | 
|---|
| 2532 | SO_TOLERANT; | 
|---|
| 2533 | MODE_COOPERATIVE; | 
|---|
| 2534 | } | 
|---|
| 2535 | CONTRACTL_END; | 
|---|
| 2536 | return (ObjectFromHandle(m_ExposedObject) != NULL) ; | 
|---|
| 2537 | } | 
|---|
| 2538 |  | 
|---|
| 2539 | void GetSynchronizationContext(OBJECTREF *pSyncContextObj) | 
|---|
| 2540 | { | 
|---|
| 2541 | CONTRACTL | 
|---|
| 2542 | { | 
|---|
| 2543 | MODE_COOPERATIVE; | 
|---|
| 2544 | GC_NOTRIGGER; | 
|---|
| 2545 | NOTHROW; | 
|---|
| 2546 | PRECONDITION(CheckPointer(pSyncContextObj)); | 
|---|
| 2547 | } | 
|---|
| 2548 | CONTRACTL_END; | 
|---|
| 2549 |  | 
|---|
| 2550 | *pSyncContextObj = NULL; | 
|---|
| 2551 |  | 
|---|
| 2552 | THREADBASEREF ExposedThreadObj = (THREADBASEREF)GetExposedObjectRaw(); | 
|---|
| 2553 | if (ExposedThreadObj != NULL) | 
|---|
| 2554 | *pSyncContextObj = ExposedThreadObj->GetSynchronizationContext(); | 
|---|
| 2555 | } | 
|---|
| 2556 |  | 
|---|
| 2557 |  | 
|---|
| 2558 | // When we create a managed thread, the thread is suspended.  We call StartThread to get | 
|---|
| 2559 | // the thread start. | 
|---|
| 2560 | DWORD StartThread(); | 
|---|
| 2561 |  | 
|---|
| 2562 | // The result of attempting to OS-suspend an EE thread. | 
|---|
| 2563 | enum SuspendThreadResult | 
|---|
| 2564 | { | 
|---|
| 2565 | // We successfully suspended the thread.  This is the only | 
|---|
| 2566 | // case where the caller should subsequently call ResumeThread. | 
|---|
| 2567 | STR_Success, | 
|---|
| 2568 |  | 
|---|
| 2569 | // The underlying call to the operating system's SuspendThread | 
|---|
| 2570 | // or GetThreadContext failed.  This is usually taken to mean | 
|---|
| 2571 | // that the OS thread has exited.  (This can possibly also mean | 
|---|
| 2572 | // | 
|---|
| 2573 | // that the suspension count exceeded the allowed maximum, but | 
|---|
| 2574 | // Thread::SuspendThread asserts that does not happen.) | 
|---|
| 2575 | STR_Failure, | 
|---|
| 2576 |  | 
|---|
| 2577 | // The thread handle is invalid.  This means that the thread | 
|---|
| 2578 | // is dead (or dying), or that the object has been created for | 
|---|
| 2579 | // an exposed System.Thread that has not been started yet. | 
|---|
| 2580 | STR_UnstartedOrDead, | 
|---|
| 2581 |  | 
|---|
| 2582 | // The fOneTryOnly flag was set, and we managed to OS suspend the | 
|---|
| 2583 | // thread, but we found that it had its m_dwForbidSuspendThread | 
|---|
| 2584 | // flag set.  If fOneTryOnly is not set, Thread::Suspend will | 
|---|
| 2585 | // retry in this case. | 
|---|
| 2586 | STR_Forbidden, | 
|---|
| 2587 |  | 
|---|
| 2588 | // Stress logging is turned on, but no stress log had been created | 
|---|
| 2589 | // for the thread yet, and we failed to create one.  This can mean | 
|---|
| 2590 | // that either we are not allowed to call into the host, or we ran | 
|---|
| 2591 | // out of memory. | 
|---|
| 2592 | STR_NoStressLog, | 
|---|
| 2593 |  | 
|---|
| 2594 | // The EE thread is currently switched out.  This can only happen | 
|---|
| 2595 | // if we are hosted and the host schedules EE threads on fibers. | 
|---|
| 2596 | STR_SwitchedOut, | 
|---|
| 2597 | }; | 
|---|
| 2598 |  | 
|---|
| 2599 | #if defined(FEATURE_HIJACK) && defined(PLATFORM_UNIX) | 
|---|
| 2600 | bool InjectGcSuspension(); | 
|---|
| 2601 | #endif // FEATURE_HIJACK && PLATFORM_UNIX | 
|---|
| 2602 |  | 
|---|
| 2603 | #ifndef DISABLE_THREADSUSPEND | 
|---|
| 2604 | // SuspendThread | 
|---|
| 2605 | //   Attempts to OS-suspend the thread, whichever GC mode it is in. | 
|---|
| 2606 | // Arguments: | 
|---|
| 2607 | //   fOneTryOnly - If TRUE, report failure if the thread has its | 
|---|
| 2608 | //     m_dwForbidSuspendThread flag set.  If FALSE, retry. | 
|---|
| 2609 | //   pdwSuspendCount - If non-NULL, will contain the return code | 
|---|
| 2610 | //     of the underlying OS SuspendThread call on success, | 
|---|
| 2611 | //     undefined on any kind of failure. | 
|---|
| 2612 | // Return value: | 
|---|
| 2613 | //   A SuspendThreadResult value indicating success or failure. | 
|---|
| 2614 | SuspendThreadResult SuspendThread(BOOL fOneTryOnly = FALSE, DWORD *pdwSuspendCount = NULL); | 
|---|
| 2615 |  | 
|---|
| 2616 | DWORD ResumeThread(); | 
|---|
| 2617 |  | 
|---|
| 2618 | #endif  // DISABLE_THREADSUSPEND | 
|---|
| 2619 |  | 
|---|
| 2620 | int GetThreadPriority(); | 
|---|
| 2621 | BOOL SetThreadPriority( | 
|---|
| 2622 | int nPriority   // thread priority level | 
|---|
| 2623 | ); | 
|---|
| 2624 | BOOL Alert (); | 
|---|
| 2625 | DWORD Join(DWORD timeout, BOOL alertable); | 
|---|
| 2626 | DWORD JoinEx(DWORD timeout, WaitMode mode); | 
|---|
| 2627 |  | 
|---|
| 2628 | BOOL GetThreadContext( | 
|---|
| 2629 | LPCONTEXT lpContext   // context structure | 
|---|
| 2630 | ) | 
|---|
| 2631 | { | 
|---|
| 2632 | WRAPPER_NO_CONTRACT; | 
|---|
| 2633 | return ::GetThreadContext (GetThreadHandle(), lpContext); | 
|---|
| 2634 | } | 
|---|
| 2635 |  | 
|---|
| 2636 | #ifndef DACCESS_COMPILE | 
|---|
| 2637 | BOOL SetThreadContext( | 
|---|
| 2638 | CONST CONTEXT *lpContext   // context structure | 
|---|
| 2639 | ) | 
|---|
| 2640 | { | 
|---|
| 2641 | WRAPPER_NO_CONTRACT; | 
|---|
| 2642 | return ::SetThreadContext (GetThreadHandle(), lpContext); | 
|---|
| 2643 | } | 
|---|
| 2644 | #endif | 
|---|
| 2645 |  | 
|---|
| 2646 | BOOL HasValidThreadHandle () | 
|---|
| 2647 | { | 
|---|
| 2648 | WRAPPER_NO_CONTRACT; | 
|---|
| 2649 | return GetThreadHandle() != INVALID_HANDLE_VALUE; | 
|---|
| 2650 | } | 
|---|
| 2651 |  | 
|---|
| 2652 | DWORD       GetThreadId() | 
|---|
| 2653 | { | 
|---|
| 2654 | STATIC_CONTRACT_SO_TOLERANT; | 
|---|
| 2655 | LIMITED_METHOD_DAC_CONTRACT; | 
|---|
| 2656 | _ASSERTE(m_ThreadId != UNINITIALIZED_THREADID); | 
|---|
| 2657 | return m_ThreadId; | 
|---|
| 2658 | } | 
|---|
| 2659 |  | 
|---|
| 2660 | DWORD       GetOSThreadId() | 
|---|
| 2661 | { | 
|---|
| 2662 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2663 | SUPPORTS_DAC; | 
|---|
| 2664 | #ifndef DACCESS_COMPILE | 
|---|
| 2665 | _ASSERTE (m_OSThreadId != 0xbaadf00d); | 
|---|
| 2666 | #endif // !DACCESS_COMPILE | 
|---|
| 2667 | return m_OSThreadId; | 
|---|
| 2668 | } | 
|---|
| 2669 |  | 
|---|
| 2670 | // This API is to be used for Debugger only. | 
|---|
| 2671 | // We need to be able to return the true value of m_OSThreadId. | 
|---|
| 2672 | // | 
|---|
| 2673 | DWORD       GetOSThreadIdForDebugger() | 
|---|
| 2674 | { | 
|---|
| 2675 | SUPPORTS_DAC; | 
|---|
| 2676 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2677 | return m_OSThreadId; | 
|---|
| 2678 | } | 
|---|
| 2679 |  | 
|---|
| 2680 | BOOL        IsThreadPoolThread() | 
|---|
| 2681 | { | 
|---|
| 2682 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2683 | return m_State & (Thread::TS_TPWorkerThread | Thread::TS_CompletionPortThread); | 
|---|
| 2684 | } | 
|---|
| 2685 |  | 
|---|
| 2686 | // public suspend functions.  System ones are internal, like for GC.  User ones | 
|---|
| 2687 | // correspond to suspend/resume calls on the exposed System.Thread object. | 
|---|
| 2688 | static bool    SysStartSuspendForDebug(AppDomain *pAppDomain); | 
|---|
| 2689 | static bool    SysSweepThreadsForDebug(bool forceSync); | 
|---|
| 2690 | static void    SysResumeFromDebug(AppDomain *pAppDomain); | 
|---|
| 2691 |  | 
|---|
| 2692 | void           UserSleep(INT32 time); | 
|---|
| 2693 |  | 
|---|
| 2694 | // AD unload uses ThreadAbort support.  We need to distinguish pure ThreadAbort and AD unload | 
|---|
| 2695 | // cases. | 
|---|
| 2696 | enum ThreadAbortRequester | 
|---|
| 2697 | { | 
|---|
| 2698 | TAR_Thread =      0x00000001,   // Request by Thread | 
|---|
| 2699 | TAR_FuncEval =    0x00000004,   // Request by Func-Eval | 
|---|
| 2700 | TAR_StackOverflow = 0x00000008,   // Request by StackOverflow.  TAR_THREAD should be set at the same time. | 
|---|
| 2701 | TAR_ALL = 0xFFFFFFFF, | 
|---|
| 2702 | }; | 
|---|
| 2703 |  | 
|---|
| 2704 | private: | 
|---|
| 2705 |  | 
|---|
| 2706 | // | 
|---|
| 2707 | // Bit mask for tracking which aborts came in and why. | 
|---|
| 2708 | // | 
|---|
| 2709 | enum ThreadAbortInfo | 
|---|
| 2710 | { | 
|---|
| 2711 | TAI_ThreadAbort       = 0x00000001, | 
|---|
| 2712 | TAI_ThreadV1Abort     = 0x00000002, | 
|---|
| 2713 | TAI_ThreadRudeAbort   = 0x00000004, | 
|---|
| 2714 | TAI_ADUnloadAbort     = 0x00000008, | 
|---|
| 2715 | TAI_ADUnloadV1Abort   = 0x00000010, | 
|---|
| 2716 | TAI_ADUnloadRudeAbort = 0x00000020, | 
|---|
| 2717 | TAI_FuncEvalAbort     = 0x00000040, | 
|---|
| 2718 | TAI_FuncEvalV1Abort   = 0x00000080, | 
|---|
| 2719 | TAI_FuncEvalRudeAbort = 0x00000100, | 
|---|
| 2720 | }; | 
|---|
| 2721 |  | 
|---|
| 2722 | static const DWORD TAI_AnySafeAbort = (TAI_ThreadAbort   | | 
|---|
| 2723 | TAI_ADUnloadAbort | | 
|---|
| 2724 | TAI_FuncEvalAbort | 
|---|
| 2725 | ); | 
|---|
| 2726 |  | 
|---|
| 2727 | static const DWORD TAI_AnyV1Abort   = (TAI_ThreadV1Abort   | | 
|---|
| 2728 | TAI_ADUnloadV1Abort | | 
|---|
| 2729 | TAI_FuncEvalV1Abort | 
|---|
| 2730 | ); | 
|---|
| 2731 |  | 
|---|
| 2732 | static const DWORD TAI_AnyRudeAbort = (TAI_ThreadRudeAbort   | | 
|---|
| 2733 | TAI_ADUnloadRudeAbort | | 
|---|
| 2734 | TAI_FuncEvalRudeAbort | 
|---|
| 2735 | ); | 
|---|
| 2736 |  | 
|---|
| 2737 | static const DWORD TAI_AnyFuncEvalAbort = (TAI_FuncEvalAbort   | | 
|---|
| 2738 | TAI_FuncEvalV1Abort | | 
|---|
| 2739 | TAI_FuncEvalRudeAbort | 
|---|
| 2740 | ); | 
|---|
| 2741 |  | 
|---|
| 2742 |  | 
|---|
| 2743 | // Specifies type of thread abort. | 
|---|
| 2744 | DWORD  m_AbortInfo; | 
|---|
| 2745 | DWORD  m_AbortType; | 
|---|
| 2746 | ULONGLONG  m_AbortEndTime; | 
|---|
| 2747 | ULONGLONG  m_RudeAbortEndTime; | 
|---|
| 2748 | BOOL   m_fRudeAbortInitiated; | 
|---|
| 2749 | LONG   m_AbortController; | 
|---|
| 2750 |  | 
|---|
| 2751 | static ULONGLONG s_NextSelfAbortEndTime; | 
|---|
| 2752 |  | 
|---|
| 2753 | void SetRudeAbortEndTimeFromEEPolicy(); | 
|---|
| 2754 |  | 
|---|
| 2755 | // This is a spin lock to serialize setting/resetting of AbortType and AbortRequest. | 
|---|
| 2756 | LONG  m_AbortRequestLock; | 
|---|
| 2757 |  | 
|---|
| 2758 | static void  LockAbortRequest(Thread *pThread); | 
|---|
| 2759 | static void  UnlockAbortRequest(Thread *pThread); | 
|---|
| 2760 |  | 
|---|
| 2761 | typedef Holder<Thread*, Thread::LockAbortRequest, Thread::UnlockAbortRequest> AbortRequestLockHolder; | 
|---|
| 2762 |  | 
|---|
| 2763 | static void AcquireAbortControl(Thread *pThread) | 
|---|
| 2764 | { | 
|---|
| 2765 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2766 | FastInterlockIncrement (&pThread->m_AbortController); | 
|---|
| 2767 | } | 
|---|
| 2768 |  | 
|---|
| 2769 | static void ReleaseAbortControl(Thread *pThread) | 
|---|
| 2770 | { | 
|---|
| 2771 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2772 | _ASSERTE (pThread->m_AbortController > 0); | 
|---|
| 2773 | FastInterlockDecrement (&pThread->m_AbortController); | 
|---|
| 2774 | } | 
|---|
| 2775 |  | 
|---|
| 2776 | typedef Holder<Thread*, Thread::AcquireAbortControl, Thread::ReleaseAbortControl> AbortControlHolder; | 
|---|
| 2777 |  | 
|---|
| 2778 | public: | 
|---|
| 2779 | #ifdef _DEBUG | 
|---|
| 2780 | BOOL           m_fRudeAborted; | 
|---|
| 2781 | DWORD          m_dwAbortPoint; | 
|---|
| 2782 | #endif | 
|---|
| 2783 |  | 
|---|
| 2784 |  | 
|---|
| 2785 | public: | 
|---|
| 2786 | enum UserAbort_Client | 
|---|
| 2787 | { | 
|---|
| 2788 | UAC_Normal, | 
|---|
| 2789 | UAC_Host,       // Called by host through IClrTask::Abort | 
|---|
| 2790 | UAC_WatchDog,   // Called by ADUnload helper thread | 
|---|
| 2791 | UAC_FinalizerTimeout, | 
|---|
| 2792 | }; | 
|---|
| 2793 |  | 
|---|
| 2794 | HRESULT        UserAbort(ThreadAbortRequester requester, | 
|---|
| 2795 | EEPolicy::ThreadAbortTypes abortType, | 
|---|
| 2796 | DWORD timeout, | 
|---|
| 2797 | UserAbort_Client client | 
|---|
| 2798 | ); | 
|---|
| 2799 |  | 
|---|
| 2800 | BOOL    HandleJITCaseForAbort(); | 
|---|
| 2801 |  | 
|---|
| 2802 | void           UserResetAbort(ThreadAbortRequester requester) | 
|---|
| 2803 | { | 
|---|
| 2804 | InternalResetAbort(requester, FALSE); | 
|---|
| 2805 | } | 
|---|
| 2806 | void           EEResetAbort(ThreadAbortRequester requester) | 
|---|
| 2807 | { | 
|---|
| 2808 | InternalResetAbort(requester, TRUE); | 
|---|
| 2809 | } | 
|---|
| 2810 |  | 
|---|
| 2811 | private: | 
|---|
| 2812 | void           InternalResetAbort(ThreadAbortRequester requester, BOOL fResetRudeAbort); | 
|---|
| 2813 |  | 
|---|
| 2814 | void SetAbortEndTime(ULONGLONG endTime, BOOL fRudeAbort); | 
|---|
| 2815 |  | 
|---|
| 2816 | public: | 
|---|
| 2817 |  | 
|---|
| 2818 | ULONGLONG      GetAbortEndTime() | 
|---|
| 2819 | { | 
|---|
| 2820 | WRAPPER_NO_CONTRACT; | 
|---|
| 2821 | return IsRudeAbort()?m_RudeAbortEndTime:m_AbortEndTime; | 
|---|
| 2822 | } | 
|---|
| 2823 |  | 
|---|
| 2824 | // We distinguish interrupting a thread between Thread.Interrupt and other usage. | 
|---|
| 2825 | // For Thread.Interrupt usage, we will interrupt an alertable wait using the same | 
|---|
| 2826 | // rule as ReadyForAbort.  Wait in EH clause or CER region is not interrupted. | 
|---|
| 2827 | // For other usage, we will try to Abort the thread. | 
|---|
| 2828 | // If we can not do the operation, we will delay until next wait. | 
|---|
| 2829 | enum ThreadInterruptMode | 
|---|
| 2830 | { | 
|---|
| 2831 | TI_Interrupt = 0x00000001,     // Requested by Thread.Interrupt | 
|---|
| 2832 | TI_Abort     = 0x00000002,     // Requested by Thread.Abort or AppDomain.Unload | 
|---|
| 2833 | }; | 
|---|
| 2834 |  | 
|---|
| 2835 | private: | 
|---|
| 2836 | BOOL           ReadyForAsyncException(); | 
|---|
| 2837 |  | 
|---|
| 2838 | public: | 
|---|
| 2839 | void           UserInterrupt(ThreadInterruptMode mode); | 
|---|
| 2840 |  | 
|---|
| 2841 | void           SetAbortRequest(EEPolicy::ThreadAbortTypes abortType);  // Should only be called by ADUnload | 
|---|
| 2842 | BOOL           ReadyForAbort() | 
|---|
| 2843 | { | 
|---|
| 2844 | return ReadyForAsyncException(); | 
|---|
| 2845 | } | 
|---|
| 2846 |  | 
|---|
| 2847 | BOOL           IsRudeAbort(); | 
|---|
| 2848 | BOOL           IsFuncEvalAbort(); | 
|---|
| 2849 |  | 
|---|
| 2850 | #if defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK) | 
|---|
| 2851 | BOOL           IsSafeToInjectThreadAbort(PTR_CONTEXT pContextToCheck); | 
|---|
| 2852 | #endif // defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK) | 
|---|
| 2853 |  | 
|---|
| 2854 | inline BOOL IsAbortRequested() | 
|---|
| 2855 | { | 
|---|
| 2856 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2857 | return (m_State & TS_AbortRequested); | 
|---|
| 2858 | } | 
|---|
| 2859 |  | 
|---|
| 2860 | inline BOOL IsAbortInitiated() | 
|---|
| 2861 | { | 
|---|
| 2862 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2863 | return (m_State & TS_AbortInitiated); | 
|---|
| 2864 | } | 
|---|
| 2865 |  | 
|---|
| 2866 | inline BOOL IsRudeAbortInitiated() | 
|---|
| 2867 | { | 
|---|
| 2868 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2869 | return IsAbortRequested() && m_fRudeAbortInitiated; | 
|---|
| 2870 | } | 
|---|
| 2871 |  | 
|---|
| 2872 | inline void SetAbortInitiated() | 
|---|
| 2873 | { | 
|---|
| 2874 | WRAPPER_NO_CONTRACT; | 
|---|
| 2875 | if (IsRudeAbort()) { | 
|---|
| 2876 | m_fRudeAbortInitiated = TRUE; | 
|---|
| 2877 | } | 
|---|
| 2878 | FastInterlockOr((ULONG *)&m_State, TS_AbortInitiated); | 
|---|
| 2879 | // The following should be factored better, but I'm looking for a minimal V1 change. | 
|---|
| 2880 | ResetUserInterrupted(); | 
|---|
| 2881 | } | 
|---|
| 2882 |  | 
|---|
| 2883 | inline void ResetAbortInitiated() | 
|---|
| 2884 | { | 
|---|
| 2885 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2886 | FastInterlockAnd((ULONG *)&m_State, ~TS_AbortInitiated); | 
|---|
| 2887 | m_fRudeAbortInitiated = FALSE; | 
|---|
| 2888 | } | 
|---|
| 2889 |  | 
|---|
| 2890 | inline void SetPreparingAbort() | 
|---|
| 2891 | { | 
|---|
| 2892 | WRAPPER_NO_CONTRACT; | 
|---|
| 2893 | SetThreadStateNC(TSNC_PreparingAbort); | 
|---|
| 2894 | } | 
|---|
| 2895 |  | 
|---|
| 2896 | inline void ResetPreparingAbort() | 
|---|
| 2897 | { | 
|---|
| 2898 | WRAPPER_NO_CONTRACT; | 
|---|
| 2899 | ResetThreadStateNC(TSNC_PreparingAbort); | 
|---|
| 2900 | } | 
|---|
| 2901 |  | 
|---|
| 2902 | private: | 
|---|
| 2903 | inline static void SetPreparingAbortForHolder() | 
|---|
| 2904 | { | 
|---|
| 2905 | GetThread()->SetPreparingAbort(); | 
|---|
| 2906 | } | 
|---|
| 2907 | inline static void ResetPreparingAbortForHolder() | 
|---|
| 2908 | { | 
|---|
| 2909 | GetThread()->ResetPreparingAbort(); | 
|---|
| 2910 | } | 
|---|
| 2911 | typedef StateHolder<Thread::SetPreparingAbortForHolder, Thread::ResetPreparingAbortForHolder> PreparingAbortHolder; | 
|---|
| 2912 |  | 
|---|
| 2913 | public: | 
|---|
| 2914 |  | 
|---|
| 2915 | inline void SetIsCreatingTypeInitException() | 
|---|
| 2916 | { | 
|---|
| 2917 | WRAPPER_NO_CONTRACT; | 
|---|
| 2918 | SetThreadStateNC(TSNC_CreatingTypeInitException); | 
|---|
| 2919 | } | 
|---|
| 2920 |  | 
|---|
| 2921 | inline void ResetIsCreatingTypeInitException() | 
|---|
| 2922 | { | 
|---|
| 2923 | WRAPPER_NO_CONTRACT; | 
|---|
| 2924 | ResetThreadStateNC(TSNC_CreatingTypeInitException); | 
|---|
| 2925 | } | 
|---|
| 2926 |  | 
|---|
| 2927 | inline BOOL IsCreatingTypeInitException() | 
|---|
| 2928 | { | 
|---|
| 2929 | WRAPPER_NO_CONTRACT; | 
|---|
| 2930 | return HasThreadStateNC(TSNC_CreatingTypeInitException); | 
|---|
| 2931 | } | 
|---|
| 2932 |  | 
|---|
| 2933 | private: | 
|---|
| 2934 | void SetAbortRequestBit(); | 
|---|
| 2935 |  | 
|---|
| 2936 | void RemoveAbortRequestBit(); | 
|---|
| 2937 |  | 
|---|
| 2938 | public: | 
|---|
| 2939 | void MarkThreadForAbort(ThreadAbortRequester requester, EEPolicy::ThreadAbortTypes abortType, BOOL fTentative = FALSE); | 
|---|
| 2940 | void UnmarkThreadForAbort(ThreadAbortRequester requester, BOOL fForce = TRUE); | 
|---|
| 2941 |  | 
|---|
| 2942 | private: | 
|---|
| 2943 | static void ThreadAbortWatchDogAbort(Thread *pThread); | 
|---|
| 2944 | static void ThreadAbortWatchDogEscalate(Thread *pThread); | 
|---|
| 2945 |  | 
|---|
| 2946 | public: | 
|---|
| 2947 | static void ThreadAbortWatchDog(); | 
|---|
| 2948 |  | 
|---|
| 2949 | static ULONGLONG GetNextSelfAbortEndTime() | 
|---|
| 2950 | { | 
|---|
| 2951 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2952 | return s_NextSelfAbortEndTime; | 
|---|
| 2953 | } | 
|---|
| 2954 |  | 
|---|
| 2955 | #if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) | 
|---|
| 2956 | // Tricks for resuming threads from fully interruptible code with a ThreadStop. | 
|---|
| 2957 | BOOL           ResumeUnderControl(T_CONTEXT *pCtx); | 
|---|
| 2958 | #endif // FEATURE_HIJACK && !PLATFORM_UNIX | 
|---|
| 2959 |  | 
|---|
| 2960 | enum InducedThrowReason { | 
|---|
| 2961 | InducedThreadStop = 1, | 
|---|
| 2962 | InducedThreadRedirect = 2, | 
|---|
| 2963 | InducedThreadRedirectAtEndOfCatch = 3, | 
|---|
| 2964 | }; | 
|---|
| 2965 |  | 
|---|
| 2966 | DWORD          m_ThrewControlForThread;     // flag that is set when the thread deliberately raises an exception for stop/abort | 
|---|
| 2967 |  | 
|---|
| 2968 | inline DWORD ThrewControlForThread() | 
|---|
| 2969 | { | 
|---|
| 2970 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2971 | return m_ThrewControlForThread; | 
|---|
| 2972 | } | 
|---|
| 2973 |  | 
|---|
| 2974 | inline void SetThrowControlForThread(InducedThrowReason reason) | 
|---|
| 2975 | { | 
|---|
| 2976 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2977 | m_ThrewControlForThread = reason; | 
|---|
| 2978 | } | 
|---|
| 2979 |  | 
|---|
| 2980 | inline void ResetThrowControlForThread() | 
|---|
| 2981 | { | 
|---|
| 2982 | LIMITED_METHOD_CONTRACT; | 
|---|
| 2983 | m_ThrewControlForThread = 0; | 
|---|
| 2984 | } | 
|---|
| 2985 |  | 
|---|
| 2986 | PTR_CONTEXT m_OSContext;    // ptr to a Context structure used to record the OS specific ThreadContext for a thread | 
|---|
| 2987 | // this is used for thread stop/abort and is intialized on demand | 
|---|
| 2988 |  | 
|---|
| 2989 | PT_CONTEXT GetAbortContext (); | 
|---|
| 2990 |  | 
|---|
| 2991 | // These will only ever be called from the debugger's helper | 
|---|
| 2992 | // thread. | 
|---|
| 2993 | // | 
|---|
| 2994 | // When a thread is being created after a debug suspension has | 
|---|
| 2995 | // started, we get the event on the debugger helper thread. It | 
|---|
| 2996 | // will turn around and call this to set the debug suspend pending | 
|---|
| 2997 | // flag on the newly created flag, since it was missed by | 
|---|
| 2998 | // SysStartSuspendForGC as it didn't exist when that function was | 
|---|
| 2999 | // run. | 
|---|
| 3000 | void           MarkForDebugSuspend(); | 
|---|
| 3001 |  | 
|---|
| 3002 | // When the debugger uses the trace flag to single step a thread, | 
|---|
| 3003 | // it also calls this function to mark this info in the thread's | 
|---|
| 3004 | // state. The out-of-process portion of the debugger will read the | 
|---|
| 3005 | // thread's state for a variety of reasons, including looking for | 
|---|
| 3006 | // this flag. | 
|---|
| 3007 | void           MarkDebuggerIsStepping(bool onOff) | 
|---|
| 3008 | { | 
|---|
| 3009 | WRAPPER_NO_CONTRACT; | 
|---|
| 3010 | if (onOff) | 
|---|
| 3011 | SetThreadStateNC(Thread::TSNC_DebuggerIsStepping); | 
|---|
| 3012 | else | 
|---|
| 3013 | ResetThreadStateNC(Thread::TSNC_DebuggerIsStepping); | 
|---|
| 3014 | } | 
|---|
| 3015 |  | 
|---|
| 3016 | #ifdef _TARGET_ARM_ | 
|---|
| 3017 | // ARM doesn't currently support any reliable hardware mechanism for single-stepping. Instead we emulate | 
|---|
| 3018 | // this in software. This support is used only by the debugger. | 
|---|
| 3019 | private: | 
|---|
| 3020 | ArmSingleStepper m_singleStepper; | 
|---|
| 3021 | public: | 
|---|
| 3022 | #ifndef DACCESS_COMPILE | 
|---|
| 3023 | // Given the context with which this thread shall be resumed and the first WORD of the instruction that | 
|---|
| 3024 | // should be executed next (this is not always the WORD under PC since the debugger uses this mechanism to | 
|---|
| 3025 | // skip breakpoints written into the code), set the thread up to execute one instruction and then throw an | 
|---|
| 3026 | // EXCEPTION_SINGLE_STEP. (In fact an EXCEPTION_BREAKPOINT will be thrown, but this is fixed up in our | 
|---|
| 3027 | // first chance exception handler, see IsDebuggerFault in excep.cpp). | 
|---|
| 3028 | void EnableSingleStep() | 
|---|
| 3029 | { | 
|---|
| 3030 | m_singleStepper.Enable(); | 
|---|
| 3031 | } | 
|---|
| 3032 |  | 
|---|
| 3033 | void BypassWithSingleStep(DWORD ip, WORD opcode1, WORD opcode2) | 
|---|
| 3034 | { | 
|---|
| 3035 | m_singleStepper.Bypass(ip, opcode1, opcode2); | 
|---|
| 3036 | } | 
|---|
| 3037 |  | 
|---|
| 3038 | void DisableSingleStep() | 
|---|
| 3039 | { | 
|---|
| 3040 | m_singleStepper.Disable(); | 
|---|
| 3041 | } | 
|---|
| 3042 |  | 
|---|
| 3043 | void ApplySingleStep(T_CONTEXT *pCtx) | 
|---|
| 3044 | { | 
|---|
| 3045 | m_singleStepper.Apply(pCtx); | 
|---|
| 3046 | } | 
|---|
| 3047 |  | 
|---|
| 3048 | bool IsSingleStepEnabled() const | 
|---|
| 3049 | { | 
|---|
| 3050 | return m_singleStepper.IsEnabled(); | 
|---|
| 3051 | } | 
|---|
| 3052 |  | 
|---|
| 3053 | // Fixup code called by our vectored exception handler to complete the emulation of single stepping | 
|---|
| 3054 | // initiated by EnableSingleStep above. Returns true if the exception was indeed encountered during | 
|---|
| 3055 | // stepping. | 
|---|
| 3056 | bool HandleSingleStep(T_CONTEXT *pCtx, DWORD dwExceptionCode) | 
|---|
| 3057 | { | 
|---|
| 3058 | return m_singleStepper.Fixup(pCtx, dwExceptionCode); | 
|---|
| 3059 | } | 
|---|
| 3060 | #endif // !DACCESS_COMPILE | 
|---|
| 3061 | #endif // _TARGET_ARM_ | 
|---|
| 3062 |  | 
|---|
| 3063 | private: | 
|---|
| 3064 |  | 
|---|
| 3065 | PendingTypeLoadHolder* m_pPendingTypeLoad; | 
|---|
| 3066 |  | 
|---|
| 3067 | public: | 
|---|
| 3068 |  | 
|---|
| 3069 | #ifndef DACCESS_COMPILE | 
|---|
| 3070 | PendingTypeLoadHolder* GetPendingTypeLoad() | 
|---|
| 3071 | { | 
|---|
| 3072 | LIMITED_METHOD_CONTRACT; | 
|---|
| 3073 | return m_pPendingTypeLoad; | 
|---|
| 3074 | } | 
|---|
| 3075 |  | 
|---|
| 3076 | void SetPendingTypeLoad(PendingTypeLoadHolder* pPendingTypeLoad) | 
|---|
| 3077 | { | 
|---|
| 3078 | LIMITED_METHOD_CONTRACT; | 
|---|
| 3079 | m_pPendingTypeLoad = pPendingTypeLoad; | 
|---|
| 3080 | } | 
|---|
| 3081 | #endif | 
|---|
| 3082 |  | 
|---|
| 3083 | #ifdef FEATURE_PREJIT | 
|---|
| 3084 |  | 
|---|
| 3085 | private: | 
|---|
| 3086 |  | 
|---|
| 3087 | ThreadLocalIBCInfo* m_pIBCInfo; | 
|---|
| 3088 |  | 
|---|
| 3089 | public: | 
|---|
| 3090 |  | 
|---|
| 3091 | #ifndef DACCESS_COMPILE | 
|---|
| 3092 |  | 
|---|
| 3093 | ThreadLocalIBCInfo* GetIBCInfo() | 
|---|
| 3094 | { | 
|---|
| 3095 | LIMITED_METHOD_CONTRACT; | 
|---|
| 3096 | _ASSERTE(g_IBCLogger.InstrEnabled()); | 
|---|
| 3097 | return m_pIBCInfo; | 
|---|
| 3098 | } | 
|---|
| 3099 |  | 
|---|
| 3100 | void SetIBCInfo(ThreadLocalIBCInfo* pInfo) | 
|---|
| 3101 | { | 
|---|
| 3102 | LIMITED_METHOD_CONTRACT; | 
|---|
| 3103 | _ASSERTE(g_IBCLogger.InstrEnabled()); | 
|---|
| 3104 | m_pIBCInfo = pInfo; | 
|---|
| 3105 | } | 
|---|
| 3106 |  | 
|---|
| 3107 | void FlushIBCInfo() | 
|---|
| 3108 | { | 
|---|
| 3109 | WRAPPER_NO_CONTRACT; | 
|---|
| 3110 | if (m_pIBCInfo != NULL) | 
|---|
| 3111 | m_pIBCInfo->FlushDelayedCallbacks(); | 
|---|
| 3112 | } | 
|---|
| 3113 |  | 
|---|
| 3114 | #endif // #ifndef DACCESS_COMPILE | 
|---|
| 3115 |  | 
|---|
| 3116 | #endif // #ifdef FEATURE_PREJIT | 
|---|
| 3117 |  | 
|---|
| 3118 | // Indicate whether this thread should run in the background.  Background threads | 
|---|
| 3119 | // don't interfere with the EE shutting down.  Whereas a running non-background | 
|---|
| 3120 | // thread prevents us from shutting down (except through System.Exit(), of course) | 
|---|
| 3121 | // WARNING : only GC calls this with bRequiresTSL set to FALSE. | 
|---|
| 3122 | void           SetBackground(BOOL isBack, BOOL bRequiresTSL=TRUE); | 
|---|
| 3123 |  | 
|---|
| 3124 | // When the thread starts running, make sure it is running in the correct apartment | 
|---|
| 3125 | // and context. | 
|---|
| 3126 | BOOL           PrepareApartmentAndContext(); | 
|---|
| 3127 |  | 
|---|
| 3128 | #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT | 
|---|
| 3129 | // Retrieve the apartment state of the current thread. There are three possible | 
|---|
| 3130 | // states: thread hosts an STA, thread is part of the MTA or thread state is | 
|---|
| 3131 | // undecided. The last state may indicate that the apartment has not been set at | 
|---|
| 3132 | // all (nobody has called CoInitializeEx) or that the EE does not know the | 
|---|
| 3133 | // current state (EE has not called CoInitializeEx). | 
|---|
| 3134 | enum ApartmentState { AS_InSTA, AS_InMTA, AS_Unknown }; | 
|---|
| 3135 | ApartmentState GetApartment(); | 
|---|
| 3136 | ApartmentState GetApartmentRare(Thread::ApartmentState as); | 
|---|
| 3137 | ApartmentState GetExplicitApartment(); | 
|---|
| 3138 |  | 
|---|
| 3139 | // Sets the apartment state if it has not already been set and | 
|---|
| 3140 | // returns the state. | 
|---|
| 3141 | ApartmentState GetFinalApartment(); | 
|---|
| 3142 |  | 
|---|
| 3143 | // Attempt to set current thread's apartment state. The actual apartment state | 
|---|
| 3144 | // achieved is returned and may differ from the input state if someone managed to | 
|---|
| 3145 | // call CoInitializeEx on this thread first (note that calls to SetApartment made | 
|---|
| 3146 | // before the thread has started are guaranteed to succeed). | 
|---|
| 3147 | // The fFireMDAOnMismatch indicates if we should fire the apartment state probe | 
|---|
| 3148 | // on an apartment state mismatch. | 
|---|
| 3149 | ApartmentState SetApartment(ApartmentState state, BOOL fFireMDAOnMismatch); | 
|---|
| 3150 |  | 
|---|
| 3151 | // when we get apartment tear-down notification, | 
|---|
| 3152 | // we want reset the apartment state we cache on the thread | 
|---|
| 3153 | VOID ResetApartment(); | 
|---|
| 3154 | #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT | 
|---|
| 3155 |  | 
|---|
| 3156 | // Either perform WaitForSingleObject or MsgWaitForSingleObject as appropriate. | 
|---|
| 3157 | DWORD          DoAppropriateWait(int countHandles, HANDLE *handles, BOOL waitAll, | 
|---|
| 3158 | DWORD millis, WaitMode mode, | 
|---|
| 3159 | PendingSync *syncInfo = 0); | 
|---|
| 3160 |  | 
|---|
| 3161 | DWORD          DoAppropriateWait(AppropriateWaitFunc func, void *args, DWORD millis, | 
|---|
| 3162 | WaitMode mode, PendingSync *syncInfo = 0); | 
|---|
| 3163 | DWORD          DoSignalAndWait(HANDLE *handles, DWORD millis, BOOL alertable, | 
|---|
| 3164 | PendingSync *syncState = 0); | 
|---|
| 3165 | private: | 
|---|
| 3166 | void           DoAppropriateWaitWorkerAlertableHelper(WaitMode mode); | 
|---|
| 3167 | DWORD          DoAppropriateWaitWorker(int countHandles, HANDLE *handles, BOOL waitAll, | 
|---|
| 3168 | DWORD millis, WaitMode mode); | 
|---|
| 3169 | DWORD          DoAppropriateWaitWorker(AppropriateWaitFunc func, void *args, | 
|---|
| 3170 | DWORD millis, WaitMode mode); | 
|---|
| 3171 | DWORD          DoSignalAndWaitWorker(HANDLE* pHandles, DWORD millis,BOOL alertable); | 
|---|
| 3172 | DWORD          DoAppropriateAptStateWait(int numWaiters, HANDLE* pHandles, BOOL bWaitAll, DWORD timeout, WaitMode mode); | 
|---|
| 3173 | DWORD          DoSyncContextWait(OBJECTREF *pSyncCtxObj, int countHandles, HANDLE *handles, BOOL waitAll, DWORD millis); | 
|---|
| 3174 | public: | 
|---|
| 3175 |  | 
|---|
| 3176 | //************************************************************************ | 
|---|
| 3177 | // Enumerate all frames. | 
|---|
| 3178 | //************************************************************************ | 
|---|
| 3179 |  | 
|---|
| 3180 | /* Flags used for StackWalkFramesEx */ | 
|---|
| 3181 |  | 
|---|
| 3182 | // FUNCTIONSONLY excludes all functionless frames and all funclets | 
|---|
| 3183 | #define FUNCTIONSONLY                   0x0001 | 
|---|
| 3184 |  | 
|---|
| 3185 | // SKIPFUNCLETS includes functionless frames but excludes all funclets and everything between funclets and their parent methods | 
|---|
| 3186 | #define SKIPFUNCLETS                    0x0002 | 
|---|
| 3187 |  | 
|---|
| 3188 | #define POPFRAMES                       0x0004 | 
|---|
| 3189 |  | 
|---|
| 3190 | /* use the following  flag only if you REALLY know what you are doing !!! */ | 
|---|
| 3191 | #define QUICKUNWIND                     0x0008 // do not restore all registers during unwind | 
|---|
| 3192 |  | 
|---|
| 3193 | #define HANDLESKIPPEDFRAMES             0x0010 // temporary to handle skipped frames for appdomain unload | 
|---|
| 3194 | // stack crawl. Eventually need to always do this but it | 
|---|
| 3195 | // breaks the debugger right now. | 
|---|
| 3196 |  | 
|---|
| 3197 | #define LIGHTUNWIND                     0x0020 // allow using cache schema (see StackwalkCache class) | 
|---|
| 3198 |  | 
|---|
| 3199 | #define NOTIFY_ON_U2M_TRANSITIONS       0x0040 // Provide a callback for native transitions. | 
|---|
| 3200 | // This is only useful to a debugger trying to find native code | 
|---|
| 3201 | // in the stack. | 
|---|
| 3202 |  | 
|---|
| 3203 | #define DISABLE_MISSING_FRAME_DETECTION 0x0080 // disable detection of missing TransitionFrames | 
|---|
| 3204 |  | 
|---|
| 3205 | // One thread may be walking the stack of another thread | 
|---|
| 3206 | // If you need to use this, you may also need to put a call to CrawlFrame::CheckGSCookies | 
|---|
| 3207 | // in your callback routine if it does any potentially time-consuming activity. | 
|---|
| 3208 | #define ALLOW_ASYNC_STACK_WALK          0x0100 | 
|---|
| 3209 |  | 
|---|
| 3210 | #define THREAD_IS_SUSPENDED             0x0200 // Be careful not to cause deadlocks, this thread is suspended | 
|---|
| 3211 |  | 
|---|
| 3212 | // Stackwalk tries to verify some objects, but it could be called in relocate phase of GC, | 
|---|
| 3213 | // where objects could be in invalid state, this flag is to tell stackwalk to skip the validation | 
|---|
| 3214 | #define ALLOW_INVALID_OBJECTS           0x0400 | 
|---|
| 3215 |  | 
|---|
| 3216 | // Caller has verified that the thread to be walked is in the middle of executing | 
|---|
| 3217 | // JITd or NGENd code, according to the thread's current context (or seeded | 
|---|
| 3218 | // context if one was provided).  The caller ensures this when the stackwalk | 
|---|
| 3219 | // is initiated by a profiler. | 
|---|
| 3220 | #define THREAD_EXECUTING_MANAGED_CODE   0x0800 | 
|---|
| 3221 |  | 
|---|
| 3222 | // This stackwalk is due to the DoStackSnapshot profiler API | 
|---|
| 3223 | #define PROFILER_DO_STACK_SNAPSHOT   0x1000 | 
|---|
| 3224 |  | 
|---|
| 3225 | // When this flag is set, the stackwalker does not automatically advance to the | 
|---|
| 3226 | // faulting managed stack frame when it encounters an ExInfo.  This should only be | 
|---|
| 3227 | // necessary for native debuggers doing mixed-mode stackwalking. | 
|---|
| 3228 | #define NOTIFY_ON_NO_FRAME_TRANSITIONS  0x2000 | 
|---|
| 3229 |  | 
|---|
| 3230 | // Normally, the stackwalker does not stop at the initial CONTEXT if the IP is in native code. | 
|---|
| 3231 | // This flag changes the stackwalker behaviour.  Currently this is only used in the debugger stackwalking | 
|---|
| 3232 | // API. | 
|---|
| 3233 | #define NOTIFY_ON_INITIAL_NATIVE_CONTEXT 0x4000 | 
|---|
| 3234 |  | 
|---|
| 3235 | // Indicates that we are enumerating GC references and should follow appropriate | 
|---|
| 3236 | // callback rules for parent methods vs funclets. Only supported on non-x86 platforms. | 
|---|
| 3237 | // | 
|---|
| 3238 | // Refer to StackFrameIterator::Filter for detailed comments on this flag. | 
|---|
| 3239 | #define GC_FUNCLET_REFERENCE_REPORTING 0x8000 | 
|---|
| 3240 |  | 
|---|
| 3241 | // Stackwalking normally checks GS cookies on the fly, but there are cases in which the JIT reports | 
|---|
| 3242 | // incorrect epilog information. This causes the debugger to request stack walks in the epilog, checking | 
|---|
| 3243 | // an now invalid cookie. This flag allows the debugger stack walks to disable GS cookie checking. | 
|---|
| 3244 |  | 
|---|
| 3245 | // This is a workaround for the debugger stackwalking. In general, the stackwalker and CrawlFrame | 
|---|
| 3246 | // may still execute GS cookie tracking/checking code paths. | 
|---|
| 3247 | #define SKIP_GSCOOKIE_CHECK 0x10000 | 
|---|
| 3248 |  | 
|---|
| 3249 | StackWalkAction StackWalkFramesEx( | 
|---|
| 3250 | PREGDISPLAY pRD,        // virtual register set at crawl start | 
|---|
| 3251 | PSTACKWALKFRAMESCALLBACK pCallback, | 
|---|
| 3252 | VOID *pData, | 
|---|
| 3253 | unsigned flags, | 
|---|
| 3254 | PTR_Frame pStartFrame = PTR_NULL); | 
|---|
| 3255 |  | 
|---|
| 3256 | private: | 
|---|
| 3257 | // private helpers used by StackWalkFramesEx and StackFrameIterator | 
|---|
| 3258 | StackWalkAction MakeStackwalkerCallback(CrawlFrame* pCF, PSTACKWALKFRAMESCALLBACK pCallback, VOID* pData DEBUG_ARG(UINT32 uLoopIteration)); | 
|---|
| 3259 |  | 
|---|
| 3260 | #ifdef _DEBUG | 
|---|
| 3261 | void            DebugLogStackWalkInfo(CrawlFrame* pCF, __in_z LPCSTR pszTag, UINT32 uLoopIteration); | 
|---|
| 3262 | #endif // _DEBUG | 
|---|
| 3263 |  | 
|---|
| 3264 | public: | 
|---|
| 3265 |  | 
|---|
| 3266 | StackWalkAction StackWalkFrames( | 
|---|
| 3267 | PSTACKWALKFRAMESCALLBACK pCallback, | 
|---|
| 3268 | VOID *pData, | 
|---|
| 3269 | unsigned flags = 0, | 
|---|
| 3270 | PTR_Frame pStartFrame = PTR_NULL); | 
|---|
| 3271 |  | 
|---|
| 3272 | bool InitRegDisplay(const PREGDISPLAY, const PT_CONTEXT, bool validContext); | 
|---|
| 3273 | void FillRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx); | 
|---|
| 3274 |  | 
|---|
| 3275 | #ifdef WIN64EXCEPTIONS | 
|---|
| 3276 | static PCODE VirtualUnwindCallFrame(T_CONTEXT* pContext, T_KNONVOLATILE_CONTEXT_POINTERS* pContextPointers = NULL, | 
|---|
| 3277 | EECodeInfo * pCodeInfo = NULL); | 
|---|
| 3278 | static UINT_PTR VirtualUnwindCallFrame(PREGDISPLAY pRD, EECodeInfo * pCodeInfo = NULL); | 
|---|
| 3279 | #ifndef DACCESS_COMPILE | 
|---|
| 3280 | static PCODE VirtualUnwindLeafCallFrame(T_CONTEXT* pContext); | 
|---|
| 3281 | static PCODE VirtualUnwindNonLeafCallFrame(T_CONTEXT* pContext, T_KNONVOLATILE_CONTEXT_POINTERS* pContextPointers = NULL, | 
|---|
| 3282 | PT_RUNTIME_FUNCTION pFunctionEntry = NULL, UINT_PTR uImageBase = NULL); | 
|---|
| 3283 | static UINT_PTR VirtualUnwindToFirstManagedCallFrame(T_CONTEXT* pContext); | 
|---|
| 3284 | #endif // DACCESS_COMPILE | 
|---|
| 3285 | #endif // WIN64EXCEPTIONS | 
|---|
| 3286 |  | 
|---|
| 3287 | // During a <clinit>, this thread must not be asynchronously | 
|---|
| 3288 | // stopped or interrupted.  That would leave the class unavailable | 
|---|
| 3289 | // and is therefore a security hole. | 
|---|
| 3290 | static void        IncPreventAsync() | 
|---|
| 3291 | { | 
|---|
| 3292 | WRAPPER_NO_CONTRACT; | 
|---|
| 3293 | Thread *pThread = GetThread(); | 
|---|
| 3294 | FastInterlockIncrement((LONG*)&pThread->m_PreventAsync); | 
|---|
| 3295 | } | 
|---|
| 3296 | static void        DecPreventAsync() | 
|---|
| 3297 | { | 
|---|
| 3298 | WRAPPER_NO_CONTRACT; | 
|---|
| 3299 | Thread *pThread = GetThread(); | 
|---|
| 3300 | FastInterlockDecrement((LONG*)&pThread->m_PreventAsync); | 
|---|
| 3301 | } | 
|---|
| 3302 |  | 
|---|
| 3303 | bool IsAsyncPrevented() | 
|---|
| 3304 | { | 
|---|
| 3305 | return m_PreventAsync != 0; | 
|---|
| 3306 | } | 
|---|
| 3307 |  | 
|---|
| 3308 | typedef StateHolder<Thread::IncPreventAsync, Thread::DecPreventAsync> ThreadPreventAsyncHolder; | 
|---|
| 3309 |  | 
|---|
| 3310 | // During a <clinit>, this thread must not be asynchronously | 
|---|
| 3311 | // stopped or interrupted.  That would leave the class unavailable | 
|---|
| 3312 | // and is therefore a security hole. | 
|---|
| 3313 | static void        IncPreventAbort() | 
|---|
| 3314 | { | 
|---|
| 3315 | WRAPPER_NO_CONTRACT; | 
|---|
| 3316 | Thread *pThread = GetThread(); | 
|---|
| 3317 | FastInterlockIncrement((LONG*)&pThread->m_PreventAbort); | 
|---|
| 3318 | } | 
|---|
| 3319 | static void        DecPreventAbort() | 
|---|
| 3320 | { | 
|---|
| 3321 | WRAPPER_NO_CONTRACT; | 
|---|
| 3322 | Thread *pThread = GetThread(); | 
|---|
| 3323 | FastInterlockDecrement((LONG*)&pThread->m_PreventAbort); | 
|---|
| 3324 | } | 
|---|
| 3325 |  | 
|---|
| 3326 | BOOL IsAbortPrevented() | 
|---|
| 3327 | { | 
|---|
| 3328 | return m_PreventAbort != 0; | 
|---|
| 3329 | } | 
|---|
| 3330 |  | 
|---|
| 3331 | typedef StateHolder<Thread::IncPreventAbort, Thread::DecPreventAbort> ThreadPreventAbortHolder; | 
|---|
| 3332 |  | 
|---|
| 3333 | // The ThreadStore manages a list of all the threads in the system.  I | 
|---|
| 3334 | // can't figure out how to expand the ThreadList template type without | 
|---|
| 3335 | // making m_Link public. | 
|---|
| 3336 | SLink       m_Link; | 
|---|
| 3337 |  | 
|---|
| 3338 | // For N/Direct calls with the "setLastError" bit, this field stores | 
|---|
| 3339 | // the errorcode from that call. | 
|---|
| 3340 | DWORD       m_dwLastError; | 
|---|
| 3341 |  | 
|---|
| 3342 | #ifdef FEATURE_INTERPRETER | 
|---|
| 3343 | // When we're interpreting IL stubs for N/Direct calls with the "setLastError" bit, | 
|---|
| 3344 | // the interpretation will trash the last error before we get to the call to "SetLastError". | 
|---|
| 3345 | // Therefore, we record it here immediately after the calli, and treat "SetLastError" as an | 
|---|
| 3346 | // intrinsic that transfers the value stored here into the field above. | 
|---|
| 3347 | DWORD       m_dwLastErrorInterp; | 
|---|
| 3348 | #endif | 
|---|
| 3349 |  | 
|---|
| 3350 | // Debugger per-thread flag for enabling notification on "manual" | 
|---|
| 3351 | // method calls,  for stepping logic | 
|---|
| 3352 | void IncrementTraceCallCount(); | 
|---|
| 3353 | void DecrementTraceCallCount(); | 
|---|
| 3354 |  | 
|---|
| 3355 | FORCEINLINE int IsTraceCall() | 
|---|
| 3356 | { | 
|---|
| 3357 | LIMITED_METHOD_CONTRACT; | 
|---|
| 3358 | return m_TraceCallCount; | 
|---|
| 3359 | } | 
|---|
| 3360 |  | 
|---|
| 3361 | // Functions to get/set culture information for current thread. | 
|---|
| 3362 | static OBJECTREF GetCulture(BOOL bUICulture); | 
|---|
| 3363 | static void SetCulture(OBJECTREF *CultureObj, BOOL bUICulture); | 
|---|
| 3364 |  | 
|---|
| 3365 | private: | 
|---|
| 3366 | #if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) | 
|---|
| 3367 | // Used in suspension code to redirect a thread at a HandledJITCase | 
|---|
| 3368 | BOOL RedirectThreadAtHandledJITCase(PFN_REDIRECTTARGET pTgt); | 
|---|
| 3369 | BOOL RedirectCurrentThreadAtHandledJITCase(PFN_REDIRECTTARGET pTgt, T_CONTEXT *pCurrentThreadCtx); | 
|---|
| 3370 |  | 
|---|
| 3371 | // Will Redirect the thread using RedirectThreadAtHandledJITCase if necessary | 
|---|
| 3372 | BOOL CheckForAndDoRedirect(PFN_REDIRECTTARGET pRedirectTarget); | 
|---|
| 3373 | BOOL CheckForAndDoRedirectForDbg(); | 
|---|
| 3374 | BOOL CheckForAndDoRedirectForGC(); | 
|---|
| 3375 | BOOL CheckForAndDoRedirectForUserSuspend(); | 
|---|
| 3376 |  | 
|---|
| 3377 | // Exception handling must be very aware of redirection, so we provide a helper | 
|---|
| 3378 | // to identifying redirection targets | 
|---|
| 3379 | static BOOL IsAddrOfRedirectFunc(void * pFuncAddr); | 
|---|
| 3380 |  | 
|---|
| 3381 | #if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) | 
|---|
| 3382 | public: | 
|---|
| 3383 | BOOL CheckForAndDoRedirectForGCStress (T_CONTEXT *pCurrentThreadCtx); | 
|---|
| 3384 | private: | 
|---|
| 3385 | bool        m_fPreemptiveGCDisabledForGCStress; | 
|---|
| 3386 | #endif // HAVE_GCCOVER && USE_REDIRECT_FOR_GCSTRESS | 
|---|
| 3387 | #endif // FEATURE_HIJACK && !PLATFORM_UNIX | 
|---|
| 3388 |  | 
|---|
| 3389 | public: | 
|---|
| 3390 |  | 
|---|
| 3391 | #ifndef DACCESS_COMPILE | 
|---|
| 3392 | // These re-calculate the proper value on each call for the currently executing thread. Use GetCachedStackLimit | 
|---|
| 3393 | // and GetCachedStackBase for the cached values on this Thread. | 
|---|
| 3394 | static void * GetStackLowerBound(); | 
|---|
| 3395 | static void * GetStackUpperBound(); | 
|---|
| 3396 | #endif | 
|---|
| 3397 |  | 
|---|
| 3398 | enum SetStackLimitScope { fAll, fAllowableOnly }; | 
|---|
| 3399 | BOOL SetStackLimits(SetStackLimitScope scope); | 
|---|
| 3400 |  | 
|---|
| 3401 | // These access the stack base and limit values for this thread. (They are cached during InitThread.) The | 
|---|
| 3402 | // "stack base" is the "upper bound", i.e., where the stack starts growing from. (Main's call frame is at the | 
|---|
| 3403 | // upper bound.) The "stack limit" is the "lower bound", i.e., how far the stack can grow down to. | 
|---|
| 3404 | // The "stack sufficient execution limit" is used by EnsureSufficientExecutionStack() to limit how much stack | 
|---|
| 3405 | // should remain to execute the average Framework method. | 
|---|
| 3406 | PTR_VOID GetCachedStackBase() {LIMITED_METHOD_DAC_CONTRACT;  return m_CacheStackBase; } | 
|---|
| 3407 | PTR_VOID GetCachedStackLimit() {LIMITED_METHOD_DAC_CONTRACT;  return m_CacheStackLimit;} | 
|---|
| 3408 | UINT_PTR GetCachedStackSufficientExecutionLimit() {LIMITED_METHOD_DAC_CONTRACT; return m_CacheStackSufficientExecutionLimit;} | 
|---|
| 3409 |  | 
|---|
| 3410 | private: | 
|---|
| 3411 | // Access the base and limit of the stack. (I.e. the memory ranges that the thread has reserved for its stack). | 
|---|
| 3412 | // | 
|---|
| 3413 | // Note that the base is at a higher address than the limit, since the stack grows downwards. | 
|---|
| 3414 | // | 
|---|
| 3415 | // Note that we generally access the stack of the thread we are crawling, which is cached in the ScanContext. | 
|---|
| 3416 | PTR_VOID    m_CacheStackBase; | 
|---|
| 3417 | PTR_VOID    m_CacheStackLimit; | 
|---|
| 3418 | UINT_PTR    m_CacheStackSufficientExecutionLimit; | 
|---|
| 3419 |  | 
|---|
| 3420 | #define HARD_GUARD_REGION_SIZE GetOsPageSize() | 
|---|
| 3421 |  | 
|---|
| 3422 | private: | 
|---|
| 3423 | // | 
|---|
| 3424 | static HRESULT CLRSetThreadStackGuarantee(SetThreadStackGuaranteeScope fScope = STSGuarantee_OnlyIfEnabled); | 
|---|
| 3425 |  | 
|---|
| 3426 | // try to turn a page into a guard page | 
|---|
| 3427 | static BOOL MarkPageAsGuard(UINT_PTR uGuardPageBase); | 
|---|
| 3428 |  | 
|---|
| 3429 | // scan a region for a guard page | 
|---|
| 3430 | static BOOL DoesRegionContainGuardPage(UINT_PTR uLowAddress, UINT_PTR uHighAddress); | 
|---|
| 3431 |  | 
|---|
| 3432 | // Every stack has a single reserved page at its limit that we call the 'hard guard page'. This page is never | 
|---|
| 3433 | // committed, and access to it after a stack overflow will terminate the thread. | 
|---|
| 3434 | #define HARD_GUARD_REGION_SIZE GetOsPageSize() | 
|---|
| 3435 | #define SIZEOF_DEFAULT_STACK_GUARANTEE 1 * GetOsPageSize() | 
|---|
| 3436 |  | 
|---|
| 3437 | public: | 
|---|
| 3438 | // This will return the last stack address that one could write to before a stack overflow. | 
|---|
| 3439 | static UINT_PTR GetLastNormalStackAddress(UINT_PTR stackBase); | 
|---|
| 3440 | UINT_PTR GetLastNormalStackAddress(); | 
|---|
| 3441 |  | 
|---|
| 3442 | UINT_PTR GetLastAllowableStackAddress() | 
|---|
| 3443 | { | 
|---|
| 3444 | return m_LastAllowableStackAddress; | 
|---|
| 3445 | } | 
|---|
| 3446 |  | 
|---|
| 3447 | UINT_PTR GetProbeLimit() | 
|---|
| 3448 | { | 
|---|
| 3449 | return m_ProbeLimit; | 
|---|
| 3450 | } | 
|---|
| 3451 |  | 
|---|
| 3452 | void ResetStackLimits() | 
|---|
| 3453 | { | 
|---|
| 3454 | CONTRACTL | 
|---|
| 3455 | { | 
|---|
| 3456 | NOTHROW; | 
|---|
| 3457 | GC_NOTRIGGER; | 
|---|
| 3458 | SO_TOLERANT; | 
|---|
| 3459 | MODE_ANY; | 
|---|
| 3460 | } | 
|---|
| 3461 | CONTRACTL_END; | 
|---|
| 3462 | if (!IsSetThreadStackGuaranteeInUse()) | 
|---|
| 3463 | { | 
|---|
| 3464 | return; | 
|---|
| 3465 | } | 
|---|
| 3466 | SetStackLimits(fAllowableOnly); | 
|---|
| 3467 | } | 
|---|
| 3468 |  | 
|---|
| 3469 | BOOL IsSPBeyondLimit(); | 
|---|
| 3470 |  | 
|---|
| 3471 | INDEBUG(static void DebugLogStackMBIs()); | 
|---|
| 3472 |  | 
|---|
| 3473 | #if defined(_DEBUG_IMPL) && !defined(DACCESS_COMPILE) | 
|---|
| 3474 | // Verify that the cached stack base is for the current thread. | 
|---|
| 3475 | BOOL HasRightCacheStackBase() | 
|---|
| 3476 | { | 
|---|
| 3477 | WRAPPER_NO_CONTRACT; | 
|---|
| 3478 | return m_CacheStackBase == GetStackUpperBound(); | 
|---|
| 3479 | } | 
|---|
| 3480 | #endif | 
|---|
| 3481 |  | 
|---|
| 3482 | public: | 
|---|
| 3483 | static BOOL UniqueStack(void* startLoc = 0); | 
|---|
| 3484 |  | 
|---|
| 3485 | BOOL IsAddressInStack (PTR_VOID addr) const | 
|---|
| 3486 | { | 
|---|
| 3487 | LIMITED_METHOD_DAC_CONTRACT; | 
|---|
| 3488 | _ASSERTE(m_CacheStackBase != NULL); | 
|---|
| 3489 | _ASSERTE(m_CacheStackLimit != NULL); | 
|---|
| 3490 | _ASSERTE(m_CacheStackLimit < m_CacheStackBase); | 
|---|
| 3491 | return m_CacheStackLimit < addr && addr <= m_CacheStackBase; | 
|---|
| 3492 | } | 
|---|
| 3493 |  | 
|---|
| 3494 | static BOOL IsAddressInCurrentStack (PTR_VOID addr) | 
|---|
| 3495 | { | 
|---|
| 3496 | LIMITED_METHOD_DAC_CONTRACT; | 
|---|
| 3497 | Thread* currentThread = GetThread(); | 
|---|
| 3498 | if (currentThread == NULL) | 
|---|
| 3499 | { | 
|---|
| 3500 | return FALSE; | 
|---|
| 3501 | } | 
|---|
| 3502 |  | 
|---|
| 3503 | PTR_VOID sp = dac_cast<PTR_VOID>(GetCurrentSP()); | 
|---|
| 3504 | _ASSERTE(currentThread->m_CacheStackBase != NULL); | 
|---|
| 3505 | _ASSERTE(sp < currentThread->m_CacheStackBase); | 
|---|
| 3506 | return sp < addr && addr <= currentThread->m_CacheStackBase; | 
|---|
| 3507 | } | 
|---|
| 3508 |  | 
|---|
| 3509 | // DetermineIfGuardPagePresent returns TRUE if the thread's stack contains a proper guard page. This function | 
|---|
| 3510 | // makes a physical check of the stack, rather than relying on whether or not the CLR is currently processing a | 
|---|
| 3511 | // stack overflow exception. | 
|---|
| 3512 | BOOL DetermineIfGuardPagePresent(); | 
|---|
| 3513 |  | 
|---|
| 3514 | #ifdef FEATURE_STACK_PROBE | 
|---|
| 3515 | // CanResetStackTo will return TRUE if the given stack pointer is far enough away from the guard page to proper | 
|---|
| 3516 | // restore the guard page with RestoreGuardPage. | 
|---|
| 3517 | BOOL CanResetStackTo(LPCVOID stackPointer); | 
|---|
| 3518 |  | 
|---|
| 3519 | // IsStackSpaceAvailable will return true if there are the given number of stack pages available on the stack. | 
|---|
| 3520 | BOOL IsStackSpaceAvailable(float numPages); | 
|---|
| 3521 |  | 
|---|
| 3522 | #endif | 
|---|
| 3523 |  | 
|---|
| 3524 | // Returns the amount of stack available after an SO but before the OS rips the process. | 
|---|
| 3525 | static UINT_PTR GetStackGuarantee(); | 
|---|
| 3526 |  | 
|---|
| 3527 | // RestoreGuardPage will replace the guard page on this thread's stack. The assumption is that it was removed | 
|---|
| 3528 | // by the OS due to a stack overflow exception. This function requires that you know that you have enough stack | 
|---|
| 3529 | // space to restore the guard page, so make sure you know what you're doing when you decide to call this. | 
|---|
| 3530 | VOID RestoreGuardPage(); | 
|---|
| 3531 |  | 
|---|
| 3532 | #if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) | 
|---|
| 3533 | private: | 
|---|
| 3534 | // Redirecting of threads in managed code at suspension | 
|---|
| 3535 |  | 
|---|
| 3536 | enum RedirectReason { | 
|---|
| 3537 | RedirectReason_GCSuspension, | 
|---|
| 3538 | RedirectReason_DebugSuspension, | 
|---|
| 3539 | RedirectReason_UserSuspension, | 
|---|
| 3540 | #if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) // GCCOVER | 
|---|
| 3541 | RedirectReason_GCStress, | 
|---|
| 3542 | #endif // HAVE_GCCOVER && USE_REDIRECT_FOR_GCSTRESS | 
|---|
| 3543 | }; | 
|---|
| 3544 | static void __stdcall RedirectedHandledJITCase(RedirectReason reason); | 
|---|
| 3545 | static void __stdcall RedirectedHandledJITCaseForDbgThreadControl(); | 
|---|
| 3546 | static void __stdcall RedirectedHandledJITCaseForGCThreadControl(); | 
|---|
| 3547 | static void __stdcall RedirectedHandledJITCaseForUserSuspend(); | 
|---|
| 3548 | #if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) // GCCOVER | 
|---|
| 3549 | static void __stdcall RedirectedHandledJITCaseForGCStress(); | 
|---|
| 3550 | #endif // defined(HAVE_GCCOVER) && USE_REDIRECT_FOR_GCSTRESS | 
|---|
| 3551 |  | 
|---|
| 3552 | friend void CPFH_AdjustContextForThreadSuspensionRace(T_CONTEXT *pContext, Thread *pThread); | 
|---|
| 3553 | #endif // FEATURE_HIJACK && !PLATFORM_UNIX | 
|---|
| 3554 |  | 
|---|
| 3555 | private: | 
|---|
| 3556 | //------------------------------------------------------------- | 
|---|
| 3557 | // Waiting & Synchronization | 
|---|
| 3558 | //------------------------------------------------------------- | 
|---|
| 3559 |  | 
|---|
| 3560 | // For suspends.  The thread waits on this event.  A client sets the event to cause | 
|---|
| 3561 | // the thread to resume. | 
|---|
| 3562 | void    WaitSuspendEvents(BOOL fDoWait = TRUE); | 
|---|
| 3563 | BOOL    WaitSuspendEventsHelper(void); | 
|---|
| 3564 |  | 
|---|
| 3565 | // Helpers to ensure that the bits for suspension and the number of active | 
|---|
| 3566 | // traps remain coordinated. | 
|---|
| 3567 | void    MarkForSuspension(ULONG bit); | 
|---|
| 3568 | void    UnmarkForSuspension(ULONG bit); | 
|---|
| 3569 |  | 
|---|
| 3570 | void    SetupForSuspension(ULONG bit) | 
|---|
| 3571 | { | 
|---|
| 3572 | WRAPPER_NO_CONTRACT; | 
|---|
| 3573 |  | 
|---|
| 3574 | // CoreCLR does not support user-requested thread suspension | 
|---|
| 3575 | _ASSERTE(!(bit & TS_UserSuspendPending)); | 
|---|
| 3576 |  | 
|---|
| 3577 |  | 
|---|
| 3578 | if (bit & TS_DebugSuspendPending) { | 
|---|
| 3579 | m_DebugSuspendEvent.Reset(); | 
|---|
| 3580 | } | 
|---|
| 3581 | } | 
|---|
| 3582 |  | 
|---|
| 3583 | void    ReleaseFromSuspension(ULONG bit) | 
|---|
| 3584 | { | 
|---|
| 3585 | WRAPPER_NO_CONTRACT; | 
|---|
| 3586 |  | 
|---|
| 3587 | UnmarkForSuspension(~bit); | 
|---|
| 3588 |  | 
|---|
| 3589 | // | 
|---|
| 3590 | // If the thread is set free, mark it as not-suspended now | 
|---|
| 3591 | // | 
|---|
| 3592 | ThreadState oldState = m_State; | 
|---|
| 3593 |  | 
|---|
| 3594 | // CoreCLR does not support user-requested thread suspension | 
|---|
| 3595 | _ASSERTE(!(oldState & TS_UserSuspendPending)); | 
|---|
| 3596 |  | 
|---|
| 3597 | while ((oldState & (TS_UserSuspendPending | TS_DebugSuspendPending)) == 0) | 
|---|
| 3598 | { | 
|---|
| 3599 | // CoreCLR does not support user-requested thread suspension | 
|---|
| 3600 | _ASSERTE(!(oldState & TS_UserSuspendPending)); | 
|---|
| 3601 |  | 
|---|
| 3602 | // | 
|---|
| 3603 | // Construct the destination state we desire - all suspension bits turned off. | 
|---|
| 3604 | // | 
|---|
| 3605 | ThreadState newState = (ThreadState)(oldState & ~(TS_UserSuspendPending | | 
|---|
| 3606 | TS_DebugSuspendPending | | 
|---|
| 3607 | TS_SyncSuspended)); | 
|---|
| 3608 |  | 
|---|
| 3609 | if (FastInterlockCompareExchange((LONG *)&m_State, newState, oldState) == (LONG)oldState) | 
|---|
| 3610 | { | 
|---|
| 3611 | break; | 
|---|
| 3612 | } | 
|---|
| 3613 |  | 
|---|
| 3614 | // | 
|---|
| 3615 | // The state changed underneath us, refresh it and try again. | 
|---|
| 3616 | // | 
|---|
| 3617 | oldState = m_State; | 
|---|
| 3618 | } | 
|---|
| 3619 |  | 
|---|
| 3620 | // CoreCLR does not support user-requested thread suspension | 
|---|
| 3621 | _ASSERTE(!(bit & TS_UserSuspendPending)); | 
|---|
| 3622 |  | 
|---|
| 3623 | if (bit & TS_DebugSuspendPending) { | 
|---|
| 3624 | m_DebugSuspendEvent.Set(); | 
|---|
| 3625 | } | 
|---|
| 3626 |  | 
|---|
| 3627 | } | 
|---|
| 3628 |  | 
|---|
| 3629 | public: | 
|---|
| 3630 | FORCEINLINE void UnhijackThreadNoAlloc() | 
|---|
| 3631 | { | 
|---|
| 3632 | #if defined(FEATURE_HIJACK) && !defined(DACCESS_COMPILE) | 
|---|
| 3633 | if (m_State & TS_Hijacked) | 
|---|
| 3634 | { | 
|---|
| 3635 | *m_ppvHJRetAddrPtr = m_pvHJRetAddr; | 
|---|
| 3636 | FastInterlockAnd((ULONG *) &m_State, ~TS_Hijacked); | 
|---|
| 3637 | } | 
|---|
| 3638 | #endif | 
|---|
| 3639 | } | 
|---|
| 3640 |  | 
|---|
| 3641 | void    UnhijackThread(); | 
|---|
| 3642 |  | 
|---|
| 3643 | // Flags that may be passed to GetSafelyRedirectableThreadContext, to customize | 
|---|
| 3644 | // which checks it should perform.  This allows a subset of the context verification | 
|---|
| 3645 | // logic used by HandledJITCase to be shared with other callers, such as profiler | 
|---|
| 3646 | // stackwalking | 
|---|
| 3647 | enum GetSafelyRedirectableThreadContextOptions | 
|---|
| 3648 | { | 
|---|
| 3649 | // Perform the default thread context checks | 
|---|
| 3650 | kDefaultChecks              = 0x00000000, | 
|---|
| 3651 |  | 
|---|
| 3652 | // Compares the thread context's IP against m_LastRedirectIP, and potentially | 
|---|
| 3653 | // updates m_LastRedirectIP, when determining the safeness of the thread's | 
|---|
| 3654 | // context.  HandledJITCase will always set this flag. | 
|---|
| 3655 | // This flag is ignored on non-x86 platforms, and also on x86 if the OS supports | 
|---|
| 3656 | // trap frame reporting. | 
|---|
| 3657 | kPerfomLastRedirectIPCheck  = 0x00000001, | 
|---|
| 3658 |  | 
|---|
| 3659 | // Use g_pDebugInterface->IsThreadContextInvalid() to see if breakpoints might | 
|---|
| 3660 | // confuse the stack walker.  HandledJITCase will always set this flag. | 
|---|
| 3661 | kCheckDebuggerBreakpoints   = 0x00000002, | 
|---|
| 3662 | }; | 
|---|
| 3663 |  | 
|---|
| 3664 | // Helper used by HandledJITCase and others who need an absolutely reliable | 
|---|
| 3665 | // register context. | 
|---|
| 3666 | BOOL GetSafelyRedirectableThreadContext(DWORD dwOptions, T_CONTEXT * pCtx, REGDISPLAY * pRD); | 
|---|
| 3667 |  | 
|---|
| 3668 | private: | 
|---|
| 3669 | #ifdef FEATURE_HIJACK | 
|---|
| 3670 | void    HijackThread(VOID *pvHijackAddr, ExecutionState *esb); | 
|---|
| 3671 |  | 
|---|
| 3672 | VOID        *m_pvHJRetAddr;           // original return address (before hijack) | 
|---|
| 3673 | VOID       **m_ppvHJRetAddrPtr;       // place we bashed a new return address | 
|---|
| 3674 | MethodDesc  *m_HijackedFunction;      // remember what we hijacked | 
|---|
| 3675 |  | 
|---|
| 3676 | #ifndef PLATFORM_UNIX | 
|---|
| 3677 | BOOL    HandledJITCase(BOOL ForTaskSwitchIn = FALSE); | 
|---|
| 3678 |  | 
|---|
| 3679 | #ifdef _TARGET_X86_ | 
|---|
| 3680 | PCODE       m_LastRedirectIP; | 
|---|
| 3681 | ULONG       m_SpinCount; | 
|---|
| 3682 | #endif // _TARGET_X86_ | 
|---|
| 3683 |  | 
|---|
| 3684 | #endif // !PLATFORM_UNIX | 
|---|
| 3685 |  | 
|---|
| 3686 | #endif // FEATURE_HIJACK | 
|---|
| 3687 |  | 
|---|
| 3688 | DWORD       m_Win32FaultAddress; | 
|---|
| 3689 | DWORD       m_Win32FaultCode; | 
|---|
| 3690 |  | 
|---|
| 3691 | // Support for Wait/Notify | 
|---|
| 3692 | BOOL        Block(INT32 timeOut, PendingSync *syncInfo); | 
|---|
| 3693 | void        Wake(SyncBlock *psb); | 
|---|
| 3694 | DWORD       Wait(HANDLE *objs, int cntObjs, INT32 timeOut, PendingSync *syncInfo); | 
|---|
| 3695 | DWORD       Wait(CLREvent* pEvent, INT32 timeOut, PendingSync *syncInfo); | 
|---|
| 3696 |  | 
|---|
| 3697 | // support for Thread.Interrupt() which breaks out of Waits, Sleeps, Joins | 
|---|
| 3698 | LONG        m_UserInterrupt; | 
|---|
| 3699 | DWORD       IsUserInterrupted() | 
|---|
| 3700 | { | 
|---|
| 3701 | LIMITED_METHOD_CONTRACT; | 
|---|
| 3702 | return m_UserInterrupt; | 
|---|
| 3703 | } | 
|---|
| 3704 | void        ResetUserInterrupted() | 
|---|
| 3705 | { | 
|---|
| 3706 | LIMITED_METHOD_CONTRACT; | 
|---|
| 3707 | FastInterlockExchange(&m_UserInterrupt, 0); | 
|---|
| 3708 | } | 
|---|
| 3709 |  | 
|---|
| 3710 | void        HandleThreadInterrupt(BOOL fWaitForADUnload); | 
|---|
| 3711 |  | 
|---|
| 3712 | public: | 
|---|
| 3713 | static void WINAPI UserInterruptAPC(ULONG_PTR ignore); | 
|---|
| 3714 |  | 
|---|
| 3715 | #if defined(_DEBUG) && defined(TRACK_SYNC) | 
|---|
| 3716 |  | 
|---|
| 3717 | // Each thread has a stack that tracks all enter and leave requests | 
|---|
| 3718 | public: | 
|---|
| 3719 | Dbg_TrackSync   *m_pTrackSync; | 
|---|
| 3720 |  | 
|---|
| 3721 | #endif // TRACK_SYNC | 
|---|
| 3722 |  | 
|---|
| 3723 | private: | 
|---|
| 3724 | #ifdef ENABLE_CONTRACTS_DATA | 
|---|
| 3725 | struct ClrDebugState *m_pClrDebugState; // Pointer to ClrDebugState for quick access | 
|---|
| 3726 |  | 
|---|
| 3727 | ULONG  m_ulEnablePreemptiveGCCount; | 
|---|
| 3728 | #endif  // _DEBUG | 
|---|
| 3729 |  | 
|---|
| 3730 | private: | 
|---|
| 3731 | // For suspends: | 
|---|
| 3732 | CLREvent        m_DebugSuspendEvent; | 
|---|
| 3733 |  | 
|---|
| 3734 | // For Object::Wait, Notify and NotifyAll, we use an Event inside the | 
|---|
| 3735 | // thread and we queue the threads onto the SyncBlock of the object they | 
|---|
| 3736 | // are waiting for. | 
|---|
| 3737 | CLREvent        m_EventWait; | 
|---|
| 3738 | WaitEventLink   m_WaitEventLink; | 
|---|
| 3739 | WaitEventLink* WaitEventLinkForSyncBlock (SyncBlock *psb) | 
|---|
| 3740 | { | 
|---|
| 3741 | LIMITED_METHOD_CONTRACT; | 
|---|
| 3742 | WaitEventLink *walk = &m_WaitEventLink; | 
|---|
| 3743 | while (walk->m_Next) { | 
|---|
| 3744 | _ASSERTE (walk->m_Next->m_Thread == this); | 
|---|
| 3745 | if ((SyncBlock*)(((DWORD_PTR)walk->m_Next->m_WaitSB) & ~1)== psb) { | 
|---|
| 3746 | break; | 
|---|
| 3747 | } | 
|---|
| 3748 | walk = walk->m_Next; | 
|---|
| 3749 | } | 
|---|
| 3750 | return walk; | 
|---|
| 3751 | } | 
|---|
| 3752 |  | 
|---|
| 3753 | // Access to thread handle and ThreadId. | 
|---|
| 3754 | HANDLE      GetThreadHandle() | 
|---|
| 3755 | { | 
|---|
| 3756 | LIMITED_METHOD_CONTRACT; | 
|---|
| 3757 | #if defined(_DEBUG) && !defined(DACCESS_COMPILE) | 
|---|
| 3758 | { | 
|---|
| 3759 | CounterHolder handleHolder(&m_dwThreadHandleBeingUsed); | 
|---|
| 3760 | HANDLE handle = m_ThreadHandle; | 
|---|
| 3761 | _ASSERTE ( handle == INVALID_HANDLE_VALUE | 
|---|
| 3762 | || handle == SWITCHOUT_HANDLE_VALUE | 
|---|
| 3763 | || m_OSThreadId == 0 | 
|---|
| 3764 | || m_OSThreadId == 0xbaadf00d | 
|---|
| 3765 | || ::MatchThreadHandleToOsId(handle, m_OSThreadId) ); | 
|---|
| 3766 | } | 
|---|
| 3767 | #endif | 
|---|
| 3768 |  | 
|---|
| 3769 | DACCOP_IGNORE(FieldAccess, "Treated as raw address, no marshaling is necessary"); | 
|---|
| 3770 | return m_ThreadHandle; | 
|---|
| 3771 | } | 
|---|
| 3772 |  | 
|---|
| 3773 | void        SetThreadHandle(HANDLE h) | 
|---|
| 3774 | { | 
|---|
| 3775 | LIMITED_METHOD_CONTRACT; | 
|---|
| 3776 | #if defined(_DEBUG) | 
|---|
| 3777 | _ASSERTE ( h == INVALID_HANDLE_VALUE | 
|---|
| 3778 | || h == SWITCHOUT_HANDLE_VALUE | 
|---|
| 3779 | || m_OSThreadId == 0 | 
|---|
| 3780 | || m_OSThreadId == 0xbaadf00d | 
|---|
| 3781 | || ::MatchThreadHandleToOsId(h, m_OSThreadId) ); | 
|---|
| 3782 | #endif | 
|---|
| 3783 | FastInterlockExchangePointer(&m_ThreadHandle, h); | 
|---|
| 3784 | } | 
|---|
| 3785 |  | 
|---|
| 3786 | // We maintain a correspondence between this object, the ThreadId and ThreadHandle | 
|---|
| 3787 | // in Win32, and the exposed Thread object. | 
|---|
| 3788 | HANDLE          m_ThreadHandle; | 
|---|
| 3789 |  | 
|---|
| 3790 | // <TODO> It would be nice to remove m_ThreadHandleForClose to simplify Thread.Join, | 
|---|
| 3791 | //   but at the moment that isn't possible without extensive work. | 
|---|
| 3792 | //   This handle is used by SwitchOut to store the old handle which may need to be closed | 
|---|
| 3793 | //   if we are the owner.  The handle can't be closed before checking the external count | 
|---|
| 3794 | //   which we can't do in SwitchOut since that may require locking or switching threads.</TODO> | 
|---|
| 3795 | HANDLE          m_ThreadHandleForClose; | 
|---|
| 3796 | HANDLE          m_ThreadHandleForResume; | 
|---|
| 3797 | BOOL            m_WeOwnThreadHandle; | 
|---|
| 3798 | DWORD           m_OSThreadId; | 
|---|
| 3799 |  | 
|---|
| 3800 | BOOL CreateNewOSThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args); | 
|---|
| 3801 |  | 
|---|
| 3802 | OBJECTHANDLE    m_ExposedObject; | 
|---|
| 3803 | OBJECTHANDLE    m_StrongHndToExposedObject; | 
|---|
| 3804 |  | 
|---|
| 3805 | DWORD           m_Priority;     // initialized to INVALID_THREAD_PRIORITY, set to actual priority when a | 
|---|
| 3806 | // thread does a busy wait for GC, reset to INVALID_THREAD_PRIORITY after wait is over | 
|---|
| 3807 | friend class NDirect; // Quick access to thread stub creation | 
|---|
| 3808 |  | 
|---|
| 3809 | #ifdef HAVE_GCCOVER | 
|---|
| 3810 | friend void DoGcStress (PT_CONTEXT regs, MethodDesc *pMD);  // Needs to call UnhijackThread | 
|---|
| 3811 | #endif // HAVE_GCCOVER | 
|---|
| 3812 |  | 
|---|
| 3813 | ULONG           m_ExternalRefCount; | 
|---|
| 3814 |  | 
|---|
| 3815 | ULONG           m_UnmanagedRefCount; | 
|---|
| 3816 |  | 
|---|
| 3817 | LONG            m_TraceCallCount; | 
|---|
| 3818 |  | 
|---|
| 3819 | //----------------------------------------------------------- | 
|---|
| 3820 | // Bytes promoted on this thread since the last GC? | 
|---|
| 3821 | //----------------------------------------------------------- | 
|---|
| 3822 | DWORD           m_fPromoted; | 
|---|
| 3823 | public: | 
|---|
| 3824 | void SetHasPromotedBytes (); | 
|---|
| 3825 | DWORD GetHasPromotedBytes () | 
|---|
| 3826 | { | 
|---|
| 3827 | LIMITED_METHOD_CONTRACT; | 
|---|
| 3828 | return m_fPromoted; | 
|---|
| 3829 | } | 
|---|
| 3830 |  | 
|---|
| 3831 | private: | 
|---|
| 3832 | //----------------------------------------------------------- | 
|---|
| 3833 | // Last exception to be thrown. | 
|---|
| 3834 | //----------------------------------------------------------- | 
|---|
| 3835 | friend class EEDbgInterfaceImpl; | 
|---|
| 3836 |  | 
|---|
| 3837 | private: | 
|---|
| 3838 | // Stores the most recently thrown exception. We need to have a handle in case a GC occurs before | 
|---|
| 3839 | // we catch so we don't lose the object. Having a static allows others to catch outside of COM+ w/o leaking | 
|---|
| 3840 | // a handler and allows rethrow outside of COM+ too. | 
|---|
| 3841 | // Differs from m_pThrowable in that it doesn't stack on nested exceptions. | 
|---|
| 3842 | OBJECTHANDLE m_LastThrownObjectHandle;      // Unsafe to use directly.  Use accessors instead. | 
|---|
| 3843 |  | 
|---|
| 3844 | // Indicates that the throwable in m_lastThrownObjectHandle should be treated as | 
|---|
| 3845 | // unhandled. This occurs during fatal error and a few other early error conditions | 
|---|
| 3846 | // before EH is fully set up. | 
|---|
| 3847 | BOOL m_ltoIsUnhandled; | 
|---|
| 3848 |  | 
|---|
| 3849 | friend void DECLSPEC_NORETURN EEPolicy::HandleFatalStackOverflow(EXCEPTION_POINTERS *pExceptionInfo, BOOL fSkipDebugger); | 
|---|
| 3850 |  | 
|---|
| 3851 | public: | 
|---|
| 3852 |  | 
|---|
| 3853 | BOOL IsLastThrownObjectNull() { WRAPPER_NO_CONTRACT; return (m_LastThrownObjectHandle == NULL); } | 
|---|
| 3854 |  | 
|---|
| 3855 | OBJECTREF LastThrownObject() | 
|---|
| 3856 | { | 
|---|
| 3857 | WRAPPER_NO_CONTRACT; | 
|---|
| 3858 |  | 
|---|
| 3859 | if (m_LastThrownObjectHandle == NULL) | 
|---|
| 3860 | { | 
|---|
| 3861 | return NULL; | 
|---|
| 3862 | } | 
|---|
| 3863 | else | 
|---|
| 3864 | { | 
|---|
| 3865 | // We only have a handle if we have an object to keep in it. | 
|---|
| 3866 | _ASSERTE(ObjectFromHandle(m_LastThrownObjectHandle) != NULL); | 
|---|
| 3867 | return ObjectFromHandle(m_LastThrownObjectHandle); | 
|---|
| 3868 | } | 
|---|
| 3869 | } | 
|---|
| 3870 |  | 
|---|
| 3871 | OBJECTHANDLE LastThrownObjectHandle() | 
|---|
| 3872 | { | 
|---|
| 3873 | LIMITED_METHOD_DAC_CONTRACT; | 
|---|
| 3874 |  | 
|---|
| 3875 | return m_LastThrownObjectHandle; | 
|---|
| 3876 | } | 
|---|
| 3877 |  | 
|---|
| 3878 | void SetLastThrownObject(OBJECTREF throwable, BOOL isUnhandled = FALSE); | 
|---|
| 3879 | void SetSOForLastThrownObject(); | 
|---|
| 3880 | OBJECTREF SafeSetLastThrownObject(OBJECTREF throwable); | 
|---|
| 3881 |  | 
|---|
| 3882 | // Inidcates that the last thrown object is now treated as unhandled | 
|---|
| 3883 | void MarkLastThrownObjectUnhandled() | 
|---|
| 3884 | { | 
|---|
| 3885 | LIMITED_METHOD_CONTRACT; | 
|---|
| 3886 | m_ltoIsUnhandled = TRUE; | 
|---|
| 3887 | } | 
|---|
| 3888 |  | 
|---|
| 3889 | // TRUE if the throwable in LTO should be treated as unhandled | 
|---|
| 3890 | BOOL IsLastThrownObjectUnhandled() | 
|---|
| 3891 | { | 
|---|
| 3892 | LIMITED_METHOD_DAC_CONTRACT; | 
|---|
| 3893 | return m_ltoIsUnhandled; | 
|---|
| 3894 | } | 
|---|
| 3895 |  | 
|---|
| 3896 | void SafeUpdateLastThrownObject(void); | 
|---|
| 3897 | OBJECTREF SafeSetThrowables(OBJECTREF pThrowable | 
|---|
| 3898 | DEBUG_ARG(ThreadExceptionState::SetThrowableErrorChecking stecFlags = ThreadExceptionState::STEC_All), | 
|---|
| 3899 | BOOL isUnhandled = FALSE); | 
|---|
| 3900 |  | 
|---|
| 3901 | bool IsLastThrownObjectStackOverflowException() | 
|---|
| 3902 | { | 
|---|
| 3903 | LIMITED_METHOD_CONTRACT; | 
|---|
| 3904 | CONSISTENCY_CHECK(NULL != g_pPreallocatedStackOverflowException); | 
|---|
| 3905 |  | 
|---|
| 3906 | return (m_LastThrownObjectHandle == g_pPreallocatedStackOverflowException); | 
|---|
| 3907 | } | 
|---|
| 3908 |  | 
|---|
| 3909 | void SetKickOffDomainId(ADID ad); | 
|---|
| 3910 | ADID GetKickOffDomainId(); | 
|---|
| 3911 |  | 
|---|
| 3912 | // get the current notification (if any) from this thread | 
|---|
| 3913 | OBJECTHANDLE GetThreadCurrNotification(); | 
|---|
| 3914 |  | 
|---|
| 3915 | // set the current notification on this thread | 
|---|
| 3916 | void SetThreadCurrNotification(OBJECTHANDLE handle); | 
|---|
| 3917 |  | 
|---|
| 3918 | // clear the current notification (if any) from this thread | 
|---|
| 3919 | void ClearThreadCurrNotification(); | 
|---|
| 3920 |  | 
|---|
| 3921 | private: | 
|---|
| 3922 | void SetLastThrownObjectHandle(OBJECTHANDLE h); | 
|---|
| 3923 |  | 
|---|
| 3924 | ADID m_pKickOffDomainId; | 
|---|
| 3925 |  | 
|---|
| 3926 | ThreadExceptionState  m_ExceptionState; | 
|---|
| 3927 |  | 
|---|
| 3928 | //----------------------------------------------------------- | 
|---|
| 3929 | // For stack probing.  These are the last allowable addresses that a thread | 
|---|
| 3930 | // can touch.  Going beyond is a stack overflow.  The ProbeLimit will be | 
|---|
| 3931 | // set based on whether SO probing is enabled.  The LastAllowableAddress | 
|---|
| 3932 | // will always represent the true stack limit. | 
|---|
| 3933 | //----------------------------------------------------------- | 
|---|
| 3934 | UINT_PTR             m_ProbeLimit; | 
|---|
| 3935 |  | 
|---|
| 3936 | UINT_PTR             m_LastAllowableStackAddress; | 
|---|
| 3937 |  | 
|---|
| 3938 | private: | 
|---|
| 3939 | //--------------------------------------------------------------- | 
|---|
| 3940 | // m_debuggerFilterContext holds the thread's "filter context" for the | 
|---|
| 3941 | // debugger.  This filter context is used by the debugger to seed | 
|---|
| 3942 | // stack walks on the thread. | 
|---|
| 3943 | //--------------------------------------------------------------- | 
|---|
| 3944 | PTR_CONTEXT m_debuggerFilterContext; | 
|---|
| 3945 |  | 
|---|
| 3946 | //--------------------------------------------------------------- | 
|---|
| 3947 | // m_profilerFilterContext holds an additional context for the | 
|---|
| 3948 | // case when a (sampling) profiler wishes to hijack the thread | 
|---|
| 3949 | // and do a stack walk on the same thread. | 
|---|
| 3950 | //--------------------------------------------------------------- | 
|---|
| 3951 | T_CONTEXT *m_pProfilerFilterContext; | 
|---|
| 3952 |  | 
|---|
| 3953 | //--------------------------------------------------------------- | 
|---|
| 3954 | // m_hijackLock holds a BOOL that is used for mutual exclusion | 
|---|
| 3955 | // between profiler stack walks and thread hijacks (bashing | 
|---|
| 3956 | // return addresses on the stack) | 
|---|
| 3957 | //--------------------------------------------------------------- | 
|---|
| 3958 | Volatile<LONG> m_hijackLock; | 
|---|
| 3959 | //--------------------------------------------------------------- | 
|---|
| 3960 | // m_debuggerCantStop holds a count of entries into "can't stop" | 
|---|
| 3961 | // areas that the Interop Debugging Services must know about. | 
|---|
| 3962 | //--------------------------------------------------------------- | 
|---|
| 3963 | DWORD m_debuggerCantStop; | 
|---|
| 3964 |  | 
|---|
| 3965 | //--------------------------------------------------------------- | 
|---|
| 3966 | // The current custom notification data object (or NULL if none | 
|---|
| 3967 | // pending) | 
|---|
| 3968 | //--------------------------------------------------------------- | 
|---|
| 3969 | OBJECTHANDLE m_hCurrNotification; | 
|---|
| 3970 |  | 
|---|
| 3971 | //--------------------------------------------------------------- | 
|---|
| 3972 | // For Interop-Debugging; track if a thread is hijacked. | 
|---|
| 3973 | //--------------------------------------------------------------- | 
|---|
| 3974 | BOOL    m_fInteropDebuggingHijacked; | 
|---|
| 3975 |  | 
|---|
| 3976 | //--------------------------------------------------------------- | 
|---|
| 3977 | // Bitmask to remember per-thread state useful for the profiler API.  See | 
|---|
| 3978 | // COR_PRF_CALLBACKSTATE_* flags in clr\src\inc\ProfilePriv.h for bit values. | 
|---|
| 3979 | //--------------------------------------------------------------- | 
|---|
| 3980 | DWORD m_profilerCallbackState; | 
|---|
| 3981 |  | 
|---|
| 3982 | #if defined(FEATURE_PROFAPI_ATTACH_DETACH) || defined(DATA_PROFAPI_ATTACH_DETACH) | 
|---|
| 3983 | //--------------------------------------------------------------- | 
|---|
| 3984 | // m_dwProfilerEvacuationCounter keeps track of how many profiler | 
|---|
| 3985 | // callback calls remain on the stack | 
|---|
| 3986 | //--------------------------------------------------------------- | 
|---|
| 3987 | // Why volatile? | 
|---|
| 3988 | // See code:ProfilingAPIUtility::InitializeProfiling#LoadUnloadCallbackSynchronization. | 
|---|
| 3989 | Volatile<DWORD> m_dwProfilerEvacuationCounter; | 
|---|
| 3990 | #endif // defined(FEATURE_PROFAPI_ATTACH_DETACH) || defined(DATA_PROFAPI_ATTACH_DETACH) | 
|---|
| 3991 |  | 
|---|
| 3992 | private: | 
|---|
| 3993 | Volatile<LONG> m_threadPoolCompletionCount; | 
|---|
| 3994 | static Volatile<LONG> s_threadPoolCompletionCountOverflow; //counts completions for threads that have been destroyed. | 
|---|
| 3995 |  | 
|---|
| 3996 | public: | 
|---|
| 3997 | static void IncrementThreadPoolCompletionCount() | 
|---|
| 3998 | { | 
|---|
| 3999 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4000 | Thread* pThread = GetThread(); | 
|---|
| 4001 | if (pThread) | 
|---|
| 4002 | pThread->m_threadPoolCompletionCount++; | 
|---|
| 4003 | else | 
|---|
| 4004 | FastInterlockIncrement(&s_threadPoolCompletionCountOverflow); | 
|---|
| 4005 | } | 
|---|
| 4006 |  | 
|---|
| 4007 | static LONG GetTotalThreadPoolCompletionCount(); | 
|---|
| 4008 |  | 
|---|
| 4009 | private: | 
|---|
| 4010 |  | 
|---|
| 4011 | //------------------------------------------------------------------------- | 
|---|
| 4012 | // Support creation of assemblies in DllMain (see ceemain.cpp) | 
|---|
| 4013 | //------------------------------------------------------------------------- | 
|---|
| 4014 | DomainFile* m_pLoadingFile; | 
|---|
| 4015 |  | 
|---|
| 4016 |  | 
|---|
| 4017 | // The ThreadAbort reason (Get/Set/ClearExceptionStateInfo on the managed thread) is | 
|---|
| 4018 | // held here as an OBJECTHANDLE and the ADID of the AppDomain in which it is valid. | 
|---|
| 4019 | // Atomic updates of this state use the Thread's Crst. | 
|---|
| 4020 |  | 
|---|
| 4021 | OBJECTHANDLE    m_AbortReason; | 
|---|
| 4022 | ADID            m_AbortReasonDomainID; | 
|---|
| 4023 |  | 
|---|
| 4024 | void            ClearAbortReason(BOOL pNoLock = FALSE); | 
|---|
| 4025 |  | 
|---|
| 4026 | public: | 
|---|
| 4027 |  | 
|---|
| 4028 | void SetInteropDebuggingHijacked(BOOL f) | 
|---|
| 4029 | { | 
|---|
| 4030 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4031 | m_fInteropDebuggingHijacked = f; | 
|---|
| 4032 | } | 
|---|
| 4033 | BOOL GetInteropDebuggingHijacked() | 
|---|
| 4034 | { | 
|---|
| 4035 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4036 | return m_fInteropDebuggingHijacked; | 
|---|
| 4037 | } | 
|---|
| 4038 |  | 
|---|
| 4039 | void SetFilterContext(T_CONTEXT *pContext); | 
|---|
| 4040 | T_CONTEXT *GetFilterContext(void); | 
|---|
| 4041 |  | 
|---|
| 4042 | void SetProfilerFilterContext(T_CONTEXT *pContext) | 
|---|
| 4043 | { | 
|---|
| 4044 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4045 |  | 
|---|
| 4046 | m_pProfilerFilterContext = pContext; | 
|---|
| 4047 | } | 
|---|
| 4048 |  | 
|---|
| 4049 | // Used by the profiler API to find which flags have been set on the Thread object, | 
|---|
| 4050 | // in order to authorize a profiler's call into ICorProfilerInfo(2). | 
|---|
| 4051 | DWORD GetProfilerCallbackFullState() | 
|---|
| 4052 | { | 
|---|
| 4053 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4054 | _ASSERTE(GetThread() == this); | 
|---|
| 4055 | return m_profilerCallbackState; | 
|---|
| 4056 | } | 
|---|
| 4057 |  | 
|---|
| 4058 | // Used by profiler API to set at once all callback flag bits stored on the Thread object. | 
|---|
| 4059 | // Used to reinstate the previous state that had been modified by a previous call to | 
|---|
| 4060 | // SetProfilerCallbackStateFlags | 
|---|
| 4061 | void SetProfilerCallbackFullState(DWORD dwFullState) | 
|---|
| 4062 | { | 
|---|
| 4063 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4064 | _ASSERTE(GetThread() == this); | 
|---|
| 4065 | m_profilerCallbackState = dwFullState; | 
|---|
| 4066 | } | 
|---|
| 4067 |  | 
|---|
| 4068 | // Used by profiler API to set individual callback flags on the Thread object. | 
|---|
| 4069 | // Returns the previous state of all flags. | 
|---|
| 4070 | DWORD SetProfilerCallbackStateFlags(DWORD dwFlags) | 
|---|
| 4071 | { | 
|---|
| 4072 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4073 | _ASSERTE(GetThread() == this); | 
|---|
| 4074 |  | 
|---|
| 4075 | DWORD dwRet = m_profilerCallbackState; | 
|---|
| 4076 | m_profilerCallbackState |= dwFlags; | 
|---|
| 4077 | return dwRet; | 
|---|
| 4078 | } | 
|---|
| 4079 |  | 
|---|
| 4080 | T_CONTEXT *GetProfilerFilterContext(void) | 
|---|
| 4081 | { | 
|---|
| 4082 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4083 | return m_pProfilerFilterContext; | 
|---|
| 4084 | } | 
|---|
| 4085 |  | 
|---|
| 4086 | #ifdef FEATURE_PROFAPI_ATTACH_DETACH | 
|---|
| 4087 |  | 
|---|
| 4088 | FORCEINLINE DWORD GetProfilerEvacuationCounter(void) | 
|---|
| 4089 | { | 
|---|
| 4090 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4091 | return m_dwProfilerEvacuationCounter; | 
|---|
| 4092 | } | 
|---|
| 4093 |  | 
|---|
| 4094 | FORCEINLINE void IncProfilerEvacuationCounter(void) | 
|---|
| 4095 | { | 
|---|
| 4096 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4097 | m_dwProfilerEvacuationCounter++; | 
|---|
| 4098 | _ASSERTE(m_dwProfilerEvacuationCounter != 0U); | 
|---|
| 4099 | } | 
|---|
| 4100 |  | 
|---|
| 4101 | FORCEINLINE void DecProfilerEvacuationCounter(void) | 
|---|
| 4102 | { | 
|---|
| 4103 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4104 | _ASSERTE(m_dwProfilerEvacuationCounter != 0U); | 
|---|
| 4105 | m_dwProfilerEvacuationCounter--; | 
|---|
| 4106 | } | 
|---|
| 4107 |  | 
|---|
| 4108 | #endif // FEATURE_PROFAPI_ATTACH_DETACH | 
|---|
| 4109 |  | 
|---|
| 4110 | //------------------------------------------------------------------------- | 
|---|
| 4111 | // The hijack lock enforces that a thread on which a profiler is currently | 
|---|
| 4112 | // performing a stack walk cannot be hijacked. | 
|---|
| 4113 | // | 
|---|
| 4114 | // Note that the hijack lock cannot be managed by the host (i.e., this | 
|---|
| 4115 | // cannot be a Crst), because this could lead to a deadlock:  YieldTask, | 
|---|
| 4116 | // which is called by the host, may need to hijack, for which it would | 
|---|
| 4117 | // need to take this lock - but since the host needs not be reentrant, | 
|---|
| 4118 | // taking the lock cannot cause a call back into the host. | 
|---|
| 4119 | //------------------------------------------------------------------------- | 
|---|
| 4120 | static BOOL EnterHijackLock(Thread *pThread) | 
|---|
| 4121 | { | 
|---|
| 4122 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4123 |  | 
|---|
| 4124 | return ::InterlockedCompareExchange(&(pThread->m_hijackLock), TRUE, FALSE) == FALSE; | 
|---|
| 4125 | } | 
|---|
| 4126 |  | 
|---|
| 4127 | static void LeaveHijackLock(Thread *pThread) | 
|---|
| 4128 | { | 
|---|
| 4129 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4130 |  | 
|---|
| 4131 | pThread->m_hijackLock = FALSE; | 
|---|
| 4132 | } | 
|---|
| 4133 |  | 
|---|
| 4134 | typedef ConditionalStateHolder<Thread *, Thread::EnterHijackLock, Thread::LeaveHijackLock> HijackLockHolder; | 
|---|
| 4135 | //------------------------------------------------------------------------- | 
|---|
| 4136 |  | 
|---|
| 4137 | static bool ThreadsAtUnsafePlaces(void) | 
|---|
| 4138 | { | 
|---|
| 4139 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4140 |  | 
|---|
| 4141 | return (m_threadsAtUnsafePlaces != (LONG)0); | 
|---|
| 4142 | } | 
|---|
| 4143 |  | 
|---|
| 4144 | static void IncThreadsAtUnsafePlaces(void) | 
|---|
| 4145 | { | 
|---|
| 4146 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4147 | InterlockedIncrement(&m_threadsAtUnsafePlaces); | 
|---|
| 4148 | } | 
|---|
| 4149 |  | 
|---|
| 4150 | static void DecThreadsAtUnsafePlaces(void) | 
|---|
| 4151 | { | 
|---|
| 4152 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4153 | InterlockedDecrement(&m_threadsAtUnsafePlaces); | 
|---|
| 4154 | } | 
|---|
| 4155 |  | 
|---|
| 4156 | void PrepareForEERestart(BOOL SuspendSucceeded) | 
|---|
| 4157 | { | 
|---|
| 4158 | WRAPPER_NO_CONTRACT; | 
|---|
| 4159 |  | 
|---|
| 4160 | #ifdef FEATURE_HIJACK | 
|---|
| 4161 | // Only unhijack the thread if the suspend succeeded. If it failed, | 
|---|
| 4162 | // the target thread may currently be using the original stack | 
|---|
| 4163 | // location of the return address for something else. | 
|---|
| 4164 | if (SuspendSucceeded) | 
|---|
| 4165 | UnhijackThread(); | 
|---|
| 4166 | #endif // FEATURE_HIJACK | 
|---|
| 4167 |  | 
|---|
| 4168 | ResetThreadState(TS_GCSuspendPending); | 
|---|
| 4169 | } | 
|---|
| 4170 |  | 
|---|
| 4171 | void SetDebugCantStop(bool fCantStop); | 
|---|
| 4172 | bool GetDebugCantStop(void); | 
|---|
| 4173 |  | 
|---|
| 4174 | static LPVOID GetStaticFieldAddress(FieldDesc *pFD); | 
|---|
| 4175 | TADDR GetStaticFieldAddrNoCreate(FieldDesc *pFD); | 
|---|
| 4176 |  | 
|---|
| 4177 | void SetLoadingFile(DomainFile *pFile) | 
|---|
| 4178 | { | 
|---|
| 4179 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4180 | CONSISTENCY_CHECK(m_pLoadingFile == NULL); | 
|---|
| 4181 | m_pLoadingFile = pFile; | 
|---|
| 4182 | } | 
|---|
| 4183 |  | 
|---|
| 4184 | void ClearLoadingFile() | 
|---|
| 4185 | { | 
|---|
| 4186 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4187 | m_pLoadingFile = NULL; | 
|---|
| 4188 | } | 
|---|
| 4189 |  | 
|---|
| 4190 | DomainFile *GetLoadingFile() | 
|---|
| 4191 | { | 
|---|
| 4192 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4193 | return m_pLoadingFile; | 
|---|
| 4194 | } | 
|---|
| 4195 |  | 
|---|
| 4196 | private: | 
|---|
| 4197 | static void LoadingFileRelease(Thread *pThread) | 
|---|
| 4198 | { | 
|---|
| 4199 | WRAPPER_NO_CONTRACT; | 
|---|
| 4200 | pThread->ClearLoadingFile(); | 
|---|
| 4201 | } | 
|---|
| 4202 |  | 
|---|
| 4203 | public: | 
|---|
| 4204 | typedef Holder<Thread *, DoNothing, Thread::LoadingFileRelease> LoadingFileHolder; | 
|---|
| 4205 |  | 
|---|
| 4206 | private: | 
|---|
| 4207 | // Don't allow a thread to be asynchronously stopped or interrupted (e.g. because | 
|---|
| 4208 | // it is performing a <clinit>) | 
|---|
| 4209 | int         m_PreventAsync; | 
|---|
| 4210 | int         m_PreventAbort; | 
|---|
| 4211 | int         m_nNestedMarshalingExceptions; | 
|---|
| 4212 | BOOL IsMarshalingException() | 
|---|
| 4213 | { | 
|---|
| 4214 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4215 | return (m_nNestedMarshalingExceptions != 0); | 
|---|
| 4216 | } | 
|---|
| 4217 | int StartedMarshalingException() | 
|---|
| 4218 | { | 
|---|
| 4219 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4220 | return m_nNestedMarshalingExceptions++; | 
|---|
| 4221 | } | 
|---|
| 4222 | void FinishedMarshalingException() | 
|---|
| 4223 | { | 
|---|
| 4224 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4225 | _ASSERTE(m_nNestedMarshalingExceptions > 0); | 
|---|
| 4226 | m_nNestedMarshalingExceptions--; | 
|---|
| 4227 | } | 
|---|
| 4228 |  | 
|---|
| 4229 | static LONG m_DebugWillSyncCount; | 
|---|
| 4230 |  | 
|---|
| 4231 | // IP cache used by QueueCleanupIP. | 
|---|
| 4232 | #define CLEANUP_IPS_PER_CHUNK 4 | 
|---|
| 4233 | struct CleanupIPs { | 
|---|
| 4234 | IUnknown    *m_Slots[CLEANUP_IPS_PER_CHUNK]; | 
|---|
| 4235 | CleanupIPs  *m_Next; | 
|---|
| 4236 | CleanupIPs() {LIMITED_METHOD_CONTRACT; memset(this, 0, sizeof(*this)); } | 
|---|
| 4237 | }; | 
|---|
| 4238 | CleanupIPs   m_CleanupIPs; | 
|---|
| 4239 |  | 
|---|
| 4240 | #define BEGIN_FORBID_TYPELOAD() _ASSERTE_IMPL((GetThreadNULLOk() == 0) || ++GetThreadNULLOk()->m_ulForbidTypeLoad) | 
|---|
| 4241 | #define END_FORBID_TYPELOAD()   _ASSERTE_IMPL((GetThreadNULLOk() == 0) || GetThreadNULLOk()->m_ulForbidTypeLoad--) | 
|---|
| 4242 | #define TRIGGERS_TYPELOAD()     _ASSERTE_IMPL((GetThreadNULLOk() == 0) || !GetThreadNULLOk()->m_ulForbidTypeLoad) | 
|---|
| 4243 |  | 
|---|
| 4244 | #ifdef _DEBUG | 
|---|
| 4245 | public: | 
|---|
| 4246 | DWORD m_GCOnTransitionsOK; | 
|---|
| 4247 | ULONG  m_ulForbidTypeLoad; | 
|---|
| 4248 |  | 
|---|
| 4249 |  | 
|---|
| 4250 | /****************************************************************************/ | 
|---|
| 4251 | /* The code below an attempt to catch people who don't protect GC pointers that | 
|---|
| 4252 | they should be protecting.  Basically, OBJECTREF's constructor, adds the slot | 
|---|
| 4253 | to a table.   When we protect a slot, we remove it from the table.  When GC | 
|---|
| 4254 | could happen, all entries in the table are marked as bad.  When access to | 
|---|
| 4255 | an OBJECTREF happens (the -> operator) we assert the slot is not bad.  To make | 
|---|
| 4256 | this fast, the table is not perfect (there can be collisions), but this should | 
|---|
| 4257 | not cause false positives, but it may allow errors to go undetected  */ | 
|---|
| 4258 |  | 
|---|
| 4259 | #ifdef _WIN64 | 
|---|
| 4260 | #define OBJREF_HASH_SHIFT_AMOUNT 3 | 
|---|
| 4261 | #else // _WIN64 | 
|---|
| 4262 | #define OBJREF_HASH_SHIFT_AMOUNT 2 | 
|---|
| 4263 | #endif // _WIN64 | 
|---|
| 4264 |  | 
|---|
| 4265 | // For debugging, you may want to make this number very large, (8K) | 
|---|
| 4266 | // should basically insure that no collisions happen | 
|---|
| 4267 | #define OBJREF_TABSIZE              256 | 
|---|
| 4268 | DWORD_PTR dangerousObjRefs[OBJREF_TABSIZE];      // Really objectRefs with lower bit stolen | 
|---|
| 4269 | // m_allObjRefEntriesBad is TRUE iff dangerousObjRefs are all marked as GC happened | 
|---|
| 4270 | // It's purely a perf optimization for debug builds that'll help for the cases where we make 2 successive calls | 
|---|
| 4271 | // to Thread::TriggersGC. In that case, the entire array doesn't need to be walked and marked, since we just did | 
|---|
| 4272 | // that. | 
|---|
| 4273 | BOOL m_allObjRefEntriesBad; | 
|---|
| 4274 |  | 
|---|
| 4275 | static DWORD_PTR OBJREF_HASH; | 
|---|
| 4276 | // Remembers that this object ref pointer is 'alive' and unprotected (Bad if GC happens) | 
|---|
| 4277 | static void ObjectRefNew(const OBJECTREF* ref) { | 
|---|
| 4278 | WRAPPER_NO_CONTRACT; | 
|---|
| 4279 | Thread * curThread = GetThreadNULLOk(); | 
|---|
| 4280 | if (curThread == 0) return; | 
|---|
| 4281 |  | 
|---|
| 4282 | curThread->dangerousObjRefs[((size_t)ref >> OBJREF_HASH_SHIFT_AMOUNT) % OBJREF_HASH] = (size_t)ref; | 
|---|
| 4283 | curThread->m_allObjRefEntriesBad = FALSE; | 
|---|
| 4284 | } | 
|---|
| 4285 |  | 
|---|
| 4286 | static void ObjectRefAssign(const OBJECTREF* ref) { | 
|---|
| 4287 | WRAPPER_NO_CONTRACT; | 
|---|
| 4288 | Thread * curThread = GetThreadNULLOk(); | 
|---|
| 4289 | if (curThread == 0) return; | 
|---|
| 4290 |  | 
|---|
| 4291 | curThread->m_allObjRefEntriesBad = FALSE; | 
|---|
| 4292 | DWORD_PTR* slot = &curThread->dangerousObjRefs[((DWORD_PTR) ref >> OBJREF_HASH_SHIFT_AMOUNT) % OBJREF_HASH]; | 
|---|
| 4293 | if ((*slot & ~3) == (size_t) ref) | 
|---|
| 4294 | *slot = *slot & ~1;                  // Don't care about GC's that have happened | 
|---|
| 4295 | } | 
|---|
| 4296 |  | 
|---|
| 4297 | // If an object is protected, it can be removed from the 'dangerous table' | 
|---|
| 4298 | static void ObjectRefProtected(const OBJECTREF* ref) { | 
|---|
| 4299 | #ifdef USE_CHECKED_OBJECTREFS | 
|---|
| 4300 | WRAPPER_NO_CONTRACT; | 
|---|
| 4301 | _ASSERTE(IsObjRefValid(ref)); | 
|---|
| 4302 | Thread * curThread = GetThreadNULLOk(); | 
|---|
| 4303 | if (curThread == 0) return; | 
|---|
| 4304 |  | 
|---|
| 4305 | curThread->m_allObjRefEntriesBad = FALSE; | 
|---|
| 4306 | DWORD_PTR* slot = &curThread->dangerousObjRefs[((DWORD_PTR) ref >> OBJREF_HASH_SHIFT_AMOUNT) % OBJREF_HASH]; | 
|---|
| 4307 | if ((*slot & ~3) == (DWORD_PTR) ref) | 
|---|
| 4308 | *slot = (size_t) ref | 2;                             // mark has being protected | 
|---|
| 4309 | #else | 
|---|
| 4310 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4311 | #endif | 
|---|
| 4312 | } | 
|---|
| 4313 |  | 
|---|
| 4314 | static bool IsObjRefValid(const OBJECTREF* ref) { | 
|---|
| 4315 | WRAPPER_NO_CONTRACT; | 
|---|
| 4316 | Thread * curThread = GetThreadNULLOk(); | 
|---|
| 4317 | if (curThread == 0) return(true); | 
|---|
| 4318 |  | 
|---|
| 4319 | // If the object ref is NULL, we'll let it pass. | 
|---|
| 4320 | if (*((DWORD_PTR*) ref) == 0) | 
|---|
| 4321 | return(true); | 
|---|
| 4322 |  | 
|---|
| 4323 | DWORD_PTR val = curThread->dangerousObjRefs[((DWORD_PTR) ref >> OBJREF_HASH_SHIFT_AMOUNT) % OBJREF_HASH]; | 
|---|
| 4324 | // if not in the table, or not the case that it was unprotected and GC happened, return true. | 
|---|
| 4325 | if((val & ~3) != (size_t) ref || (val & 3) != 1) | 
|---|
| 4326 | return(true); | 
|---|
| 4327 | // If the pointer lives in the GC heap, than it is protected, and thus valid. | 
|---|
| 4328 | if (dac_cast<TADDR>(g_lowest_address) <= val && val < dac_cast<TADDR>(g_highest_address)) | 
|---|
| 4329 | return(true); | 
|---|
| 4330 | return(false); | 
|---|
| 4331 | } | 
|---|
| 4332 |  | 
|---|
| 4333 | // Clears the table.  Useful to do when crossing the managed-code - EE boundary | 
|---|
| 4334 | // as you ususally only care about OBJECTREFS that have been created after that | 
|---|
| 4335 | static void STDCALL ObjectRefFlush(Thread* thread); | 
|---|
| 4336 |  | 
|---|
| 4337 |  | 
|---|
| 4338 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 4339 | // Marks all Objrefs in the table as bad (since they are unprotected) | 
|---|
| 4340 | static void TriggersGC(Thread* thread) { | 
|---|
| 4341 | WRAPPER_NO_CONTRACT; | 
|---|
| 4342 | if ((GCViolation|BadDebugState) & (UINT_PTR)(GetViolationMask())) | 
|---|
| 4343 | { | 
|---|
| 4344 | return; | 
|---|
| 4345 | } | 
|---|
| 4346 | if (!thread->m_allObjRefEntriesBad) | 
|---|
| 4347 | { | 
|---|
| 4348 | thread->m_allObjRefEntriesBad = TRUE; | 
|---|
| 4349 | for(unsigned i = 0; i < OBJREF_TABSIZE; i++) | 
|---|
| 4350 | thread->dangerousObjRefs[i] |= 1;                       // mark all slots as GC happened | 
|---|
| 4351 | } | 
|---|
| 4352 | } | 
|---|
| 4353 | #endif // ENABLE_CONTRACTS_IMPL | 
|---|
| 4354 |  | 
|---|
| 4355 | #endif // _DEBUG | 
|---|
| 4356 |  | 
|---|
| 4357 | private: | 
|---|
| 4358 | PTR_CONTEXT m_pSavedRedirectContext; | 
|---|
| 4359 |  | 
|---|
| 4360 | BOOL IsContextSafeToRedirect(T_CONTEXT* pContext); | 
|---|
| 4361 |  | 
|---|
| 4362 | public: | 
|---|
| 4363 | PT_CONTEXT GetSavedRedirectContext() | 
|---|
| 4364 | { | 
|---|
| 4365 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4366 | return (m_pSavedRedirectContext); | 
|---|
| 4367 | } | 
|---|
| 4368 |  | 
|---|
| 4369 | #ifndef DACCESS_COMPILE | 
|---|
| 4370 | void     SetSavedRedirectContext(PT_CONTEXT pCtx) | 
|---|
| 4371 | { | 
|---|
| 4372 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4373 | m_pSavedRedirectContext = pCtx; | 
|---|
| 4374 | } | 
|---|
| 4375 | #endif | 
|---|
| 4376 |  | 
|---|
| 4377 | void EnsurePreallocatedContext(); | 
|---|
| 4378 |  | 
|---|
| 4379 | ThreadLocalBlock m_ThreadLocalBlock; | 
|---|
| 4380 |  | 
|---|
| 4381 | // Called during AssemblyLoadContext teardown to clean up all structures | 
|---|
| 4382 | // associated with thread statics for the specific Module | 
|---|
| 4383 | void DeleteThreadStaticData(ModuleIndex index); | 
|---|
| 4384 |  | 
|---|
| 4385 | private: | 
|---|
| 4386 |  | 
|---|
| 4387 | // Called during Thread death to clean up all structures | 
|---|
| 4388 | // associated with thread statics | 
|---|
| 4389 | void DeleteThreadStaticData(); | 
|---|
| 4390 |  | 
|---|
| 4391 | #ifdef _DEBUG | 
|---|
| 4392 | private: | 
|---|
| 4393 | // When we create an object, or create an OBJECTREF, or create an Interior Pointer, or enter EE from managed | 
|---|
| 4394 | // code, we will set this flag. | 
|---|
| 4395 | // Inside GCHeapUtilities::StressHeap, we only do GC if this flag is TRUE.  Then we reset it to zero. | 
|---|
| 4396 | BOOL m_fStressHeapCount; | 
|---|
| 4397 | public: | 
|---|
| 4398 | void EnableStressHeap() | 
|---|
| 4399 | { | 
|---|
| 4400 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4401 | m_fStressHeapCount = TRUE; | 
|---|
| 4402 | } | 
|---|
| 4403 | void DisableStressHeap() | 
|---|
| 4404 | { | 
|---|
| 4405 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4406 | m_fStressHeapCount = FALSE; | 
|---|
| 4407 | } | 
|---|
| 4408 | BOOL StressHeapIsEnabled() | 
|---|
| 4409 | { | 
|---|
| 4410 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4411 | return m_fStressHeapCount; | 
|---|
| 4412 | } | 
|---|
| 4413 |  | 
|---|
| 4414 | size_t *m_pCleanedStackBase; | 
|---|
| 4415 | #endif | 
|---|
| 4416 |  | 
|---|
| 4417 | #ifdef DACCESS_COMPILE | 
|---|
| 4418 | public: | 
|---|
| 4419 | void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); | 
|---|
| 4420 | void EnumMemoryRegionsWorker(CLRDataEnumMemoryFlags flags); | 
|---|
| 4421 | #endif | 
|---|
| 4422 |  | 
|---|
| 4423 | public: | 
|---|
| 4424 | // Is the current thread currently executing within a constrained execution region? | 
|---|
| 4425 | static BOOL IsExecutingWithinCer(); | 
|---|
| 4426 |  | 
|---|
| 4427 | // Determine whether the method at the given frame in the thread's execution stack is executing within a CER. | 
|---|
| 4428 | BOOL IsWithinCer(CrawlFrame *pCf); | 
|---|
| 4429 |  | 
|---|
| 4430 | private: | 
|---|
| 4431 | // used to pad stack on thread creation to avoid aliasing penalty in P4 HyperThread scenarios | 
|---|
| 4432 |  | 
|---|
| 4433 | static DWORD WINAPI intermediateThreadProc(PVOID arg); | 
|---|
| 4434 | static int m_offset_counter; | 
|---|
| 4435 | static const int offset_multiplier = 128; | 
|---|
| 4436 |  | 
|---|
| 4437 | typedef struct { | 
|---|
| 4438 | LPTHREAD_START_ROUTINE  lpThreadFunction; | 
|---|
| 4439 | PVOID lpArg; | 
|---|
| 4440 | } intermediateThreadParam; | 
|---|
| 4441 |  | 
|---|
| 4442 | #ifdef _DEBUG | 
|---|
| 4443 | // when the thread is doing a stressing GC, some Crst violation could be ignored, by a non-elegant solution. | 
|---|
| 4444 | private: | 
|---|
| 4445 | BOOL m_bGCStressing; // the flag to indicate if the thread is doing a stressing GC | 
|---|
| 4446 | BOOL m_bUniqueStacking; // the flag to indicate if the thread is doing a UniqueStack | 
|---|
| 4447 | public: | 
|---|
| 4448 | BOOL GetGCStressing () | 
|---|
| 4449 | { | 
|---|
| 4450 | return m_bGCStressing; | 
|---|
| 4451 | } | 
|---|
| 4452 | BOOL GetUniqueStacking () | 
|---|
| 4453 | { | 
|---|
| 4454 | return m_bUniqueStacking; | 
|---|
| 4455 | } | 
|---|
| 4456 | #endif | 
|---|
| 4457 |  | 
|---|
| 4458 | private: | 
|---|
| 4459 | //----------------------------------------------------------------------------- | 
|---|
| 4460 | // AVInRuntimeImplOkay : its okay to have an AV in Runtime implemetation while | 
|---|
| 4461 | // this holder is in effect. | 
|---|
| 4462 | // | 
|---|
| 4463 | //  { | 
|---|
| 4464 | //      AVInRuntimeImplOkayHolder foo(); | 
|---|
| 4465 | //  } // make AV's in the Runtime illegal on out of scope. | 
|---|
| 4466 | //----------------------------------------------------------------------------- | 
|---|
| 4467 | DWORD m_dwAVInRuntimeImplOkayCount; | 
|---|
| 4468 |  | 
|---|
| 4469 | static void AVInRuntimeImplOkayAcquire(Thread * pThread) | 
|---|
| 4470 | { | 
|---|
| 4471 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4472 |  | 
|---|
| 4473 | if (pThread) | 
|---|
| 4474 | { | 
|---|
| 4475 | _ASSERTE(pThread->m_dwAVInRuntimeImplOkayCount != (DWORD)-1); | 
|---|
| 4476 | pThread->m_dwAVInRuntimeImplOkayCount++; | 
|---|
| 4477 | } | 
|---|
| 4478 | } | 
|---|
| 4479 |  | 
|---|
| 4480 | static void AVInRuntimeImplOkayRelease(Thread * pThread) | 
|---|
| 4481 | { | 
|---|
| 4482 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4483 |  | 
|---|
| 4484 | if (pThread) | 
|---|
| 4485 | { | 
|---|
| 4486 | _ASSERTE(pThread->m_dwAVInRuntimeImplOkayCount > 0); | 
|---|
| 4487 | pThread->m_dwAVInRuntimeImplOkayCount--; | 
|---|
| 4488 | } | 
|---|
| 4489 | } | 
|---|
| 4490 |  | 
|---|
| 4491 | public: | 
|---|
| 4492 | static BOOL AVInRuntimeImplOkay(void) | 
|---|
| 4493 | { | 
|---|
| 4494 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4495 |  | 
|---|
| 4496 | Thread * pThread = GetThreadNULLOk(); | 
|---|
| 4497 |  | 
|---|
| 4498 | if (pThread) | 
|---|
| 4499 | { | 
|---|
| 4500 | return (pThread->m_dwAVInRuntimeImplOkayCount > 0); | 
|---|
| 4501 | } | 
|---|
| 4502 | else | 
|---|
| 4503 | { | 
|---|
| 4504 | return FALSE; | 
|---|
| 4505 | } | 
|---|
| 4506 | } | 
|---|
| 4507 |  | 
|---|
| 4508 | class AVInRuntimeImplOkayHolder | 
|---|
| 4509 | { | 
|---|
| 4510 | Thread * const m_pThread; | 
|---|
| 4511 | public: | 
|---|
| 4512 | AVInRuntimeImplOkayHolder() : | 
|---|
| 4513 | m_pThread(GetThread()) | 
|---|
| 4514 | { | 
|---|
| 4515 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4516 | AVInRuntimeImplOkayAcquire(m_pThread); | 
|---|
| 4517 | } | 
|---|
| 4518 | AVInRuntimeImplOkayHolder(Thread * pThread) : | 
|---|
| 4519 | m_pThread(pThread) | 
|---|
| 4520 | { | 
|---|
| 4521 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4522 | AVInRuntimeImplOkayAcquire(m_pThread); | 
|---|
| 4523 | } | 
|---|
| 4524 | ~AVInRuntimeImplOkayHolder() | 
|---|
| 4525 | { | 
|---|
| 4526 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4527 | AVInRuntimeImplOkayRelease(m_pThread); | 
|---|
| 4528 | } | 
|---|
| 4529 | }; | 
|---|
| 4530 |  | 
|---|
| 4531 | #ifdef _DEBUG | 
|---|
| 4532 | private: | 
|---|
| 4533 | DWORD m_dwUnbreakableLockCount; | 
|---|
| 4534 | public: | 
|---|
| 4535 | void IncUnbreakableLockCount() | 
|---|
| 4536 | { | 
|---|
| 4537 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4538 | _ASSERTE (m_dwUnbreakableLockCount != (DWORD)-1); | 
|---|
| 4539 | m_dwUnbreakableLockCount ++; | 
|---|
| 4540 | } | 
|---|
| 4541 | void DecUnbreakableLockCount() | 
|---|
| 4542 | { | 
|---|
| 4543 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4544 | _ASSERTE (m_dwUnbreakableLockCount > 0); | 
|---|
| 4545 | m_dwUnbreakableLockCount --; | 
|---|
| 4546 | } | 
|---|
| 4547 | BOOL HasUnbreakableLock() const | 
|---|
| 4548 | { | 
|---|
| 4549 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4550 | return m_dwUnbreakableLockCount != 0; | 
|---|
| 4551 | } | 
|---|
| 4552 | DWORD GetUnbreakableLockCount() const | 
|---|
| 4553 | { | 
|---|
| 4554 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4555 | return m_dwUnbreakableLockCount; | 
|---|
| 4556 | } | 
|---|
| 4557 | #endif // _DEBUG | 
|---|
| 4558 |  | 
|---|
| 4559 | #ifdef _DEBUG | 
|---|
| 4560 | private: | 
|---|
| 4561 | friend class FCallTransitionState; | 
|---|
| 4562 | friend class PermitHelperMethodFrameState; | 
|---|
| 4563 | friend class CompletedFCallTransitionState; | 
|---|
| 4564 | HelperMethodFrameCallerList *m_pHelperMethodFrameCallerList; | 
|---|
| 4565 | #endif // _DEBUG | 
|---|
| 4566 |  | 
|---|
| 4567 | private: | 
|---|
| 4568 | LONG m_dwHostTaskRefCount; | 
|---|
| 4569 |  | 
|---|
| 4570 | private: | 
|---|
| 4571 | // If HasStarted fails, we cache the exception here, and rethrow on the thread which | 
|---|
| 4572 | // calls Thread.Start. | 
|---|
| 4573 | Exception* m_pExceptionDuringStartup; | 
|---|
| 4574 |  | 
|---|
| 4575 | public: | 
|---|
| 4576 | void HandleThreadStartupFailure(); | 
|---|
| 4577 |  | 
|---|
| 4578 | #ifdef HAVE_GCCOVER | 
|---|
| 4579 | private: | 
|---|
| 4580 | BYTE* m_pbDestCode; | 
|---|
| 4581 | BYTE* m_pbSrcCode; | 
|---|
| 4582 | #if defined(GCCOVER_TOLERATE_SPURIOUS_AV) | 
|---|
| 4583 | LPVOID m_pLastAVAddress; | 
|---|
| 4584 | #endif // defined(GCCOVER_TOLERATE_SPURIOUS_AV) | 
|---|
| 4585 |  | 
|---|
| 4586 | public: | 
|---|
| 4587 | void CommitGCStressInstructionUpdate(); | 
|---|
| 4588 | void PostGCStressInstructionUpdate(BYTE* pbDestCode, BYTE* pbSrcCode) | 
|---|
| 4589 | { | 
|---|
| 4590 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4591 | PRECONDITION(!HasPendingGCStressInstructionUpdate()); | 
|---|
| 4592 |  | 
|---|
| 4593 | VolatileStoreWithoutBarrier<BYTE*>(&m_pbSrcCode, pbSrcCode); | 
|---|
| 4594 | VolatileStore<BYTE*>(&m_pbDestCode, pbDestCode); | 
|---|
| 4595 | } | 
|---|
| 4596 | bool HasPendingGCStressInstructionUpdate() | 
|---|
| 4597 | { | 
|---|
| 4598 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4599 | BYTE* dest = VolatileLoad(&m_pbDestCode); | 
|---|
| 4600 | return dest != NULL; | 
|---|
| 4601 | } | 
|---|
| 4602 | bool TryClearGCStressInstructionUpdate(BYTE** ppbDestCode, BYTE** ppbSrcCode) | 
|---|
| 4603 | { | 
|---|
| 4604 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4605 | bool result = false; | 
|---|
| 4606 |  | 
|---|
| 4607 | if(HasPendingGCStressInstructionUpdate()) | 
|---|
| 4608 | { | 
|---|
| 4609 | *ppbDestCode = FastInterlockExchangePointer(&m_pbDestCode, NULL); | 
|---|
| 4610 |  | 
|---|
| 4611 | if(*ppbDestCode != NULL) | 
|---|
| 4612 | { | 
|---|
| 4613 | result = true; | 
|---|
| 4614 | *ppbSrcCode = FastInterlockExchangePointer(&m_pbSrcCode, NULL); | 
|---|
| 4615 |  | 
|---|
| 4616 | CONSISTENCY_CHECK(*ppbSrcCode != NULL); | 
|---|
| 4617 | } | 
|---|
| 4618 | } | 
|---|
| 4619 | return result; | 
|---|
| 4620 | } | 
|---|
| 4621 | #if defined(GCCOVER_TOLERATE_SPURIOUS_AV) | 
|---|
| 4622 | void SetLastAVAddress(LPVOID address) | 
|---|
| 4623 | { | 
|---|
| 4624 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4625 | m_pLastAVAddress = address; | 
|---|
| 4626 | } | 
|---|
| 4627 | LPVOID GetLastAVAddress() | 
|---|
| 4628 | { | 
|---|
| 4629 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4630 | return m_pLastAVAddress; | 
|---|
| 4631 | } | 
|---|
| 4632 | #endif // defined(GCCOVER_TOLERATE_SPURIOUS_AV) | 
|---|
| 4633 | #endif // HAVE_GCCOVER | 
|---|
| 4634 |  | 
|---|
| 4635 | #if defined(_DEBUG) && defined(FEATURE_STACK_PROBE) | 
|---|
| 4636 | class ::BaseStackGuard; | 
|---|
| 4637 | private: | 
|---|
| 4638 | // This field is used for debugging purposes to allow easy access to the stack guard | 
|---|
| 4639 | // chain and also in SO-tolerance checking to quickly determine if a guard is in place. | 
|---|
| 4640 | BaseStackGuard *m_pCurrentStackGuard; | 
|---|
| 4641 |  | 
|---|
| 4642 | public: | 
|---|
| 4643 | BaseStackGuard *GetCurrentStackGuard() | 
|---|
| 4644 | { | 
|---|
| 4645 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4646 | return m_pCurrentStackGuard; | 
|---|
| 4647 | } | 
|---|
| 4648 |  | 
|---|
| 4649 | void SetCurrentStackGuard(BaseStackGuard *pGuard) | 
|---|
| 4650 | { | 
|---|
| 4651 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4652 | m_pCurrentStackGuard = pGuard; | 
|---|
| 4653 | } | 
|---|
| 4654 | #endif | 
|---|
| 4655 |  | 
|---|
| 4656 | private: | 
|---|
| 4657 | BOOL m_fCompletionPortDrained; | 
|---|
| 4658 | public: | 
|---|
| 4659 | void MarkCompletionPortDrained() | 
|---|
| 4660 | { | 
|---|
| 4661 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4662 | FastInterlockExchange ((LONG*)&m_fCompletionPortDrained, TRUE); | 
|---|
| 4663 | } | 
|---|
| 4664 | void UnmarkCompletionPortDrained() | 
|---|
| 4665 | { | 
|---|
| 4666 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4667 | FastInterlockExchange ((LONG*)&m_fCompletionPortDrained, FALSE); | 
|---|
| 4668 | } | 
|---|
| 4669 | BOOL IsCompletionPortDrained() | 
|---|
| 4670 | { | 
|---|
| 4671 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4672 | return m_fCompletionPortDrained; | 
|---|
| 4673 | } | 
|---|
| 4674 |  | 
|---|
| 4675 | // -------------------------------- | 
|---|
| 4676 | //  Store the maxReservedStackSize | 
|---|
| 4677 | //  This is passed in from managed code in the thread constructor | 
|---|
| 4678 | // --------------------------------- | 
|---|
| 4679 | private: | 
|---|
| 4680 | SIZE_T m_RequestedStackSize; | 
|---|
| 4681 |  | 
|---|
| 4682 | public: | 
|---|
| 4683 |  | 
|---|
| 4684 | // Get the MaxStackSize | 
|---|
| 4685 | SIZE_T RequestedThreadStackSize() | 
|---|
| 4686 | { | 
|---|
| 4687 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4688 | return (m_RequestedStackSize); | 
|---|
| 4689 | } | 
|---|
| 4690 |  | 
|---|
| 4691 | // Set the MaxStackSize | 
|---|
| 4692 | void RequestedThreadStackSize(SIZE_T requestedStackSize) | 
|---|
| 4693 | { | 
|---|
| 4694 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4695 | m_RequestedStackSize = requestedStackSize; | 
|---|
| 4696 | } | 
|---|
| 4697 |  | 
|---|
| 4698 | static BOOL CheckThreadStackSize(SIZE_T *SizeToCommitOrReserve, | 
|---|
| 4699 | BOOL   isSizeToReserve  // When TRUE, the previous argument is the stack size to reserve. | 
|---|
| 4700 | // Otherwise, it is the size to commit. | 
|---|
| 4701 | ); | 
|---|
| 4702 |  | 
|---|
| 4703 | static BOOL GetProcessDefaultStackSize(SIZE_T* reserveSize, SIZE_T* commitSize); | 
|---|
| 4704 |  | 
|---|
| 4705 | private: | 
|---|
| 4706 |  | 
|---|
| 4707 | // Although this is a pointer, it is used as a flag to indicate the current context is unsafe | 
|---|
| 4708 | // to inspect. When NULL the context is safe to use, otherwise it points to the active patch skipper | 
|---|
| 4709 | // and the context is unsafe to use. When running a patch skipper we could be in one of two | 
|---|
| 4710 | // debug-only situations that the context inspecting/modifying code isn't generally prepared | 
|---|
| 4711 | // to deal with. | 
|---|
| 4712 | // a) We have set the IP to point somewhere in the patch skip table but have not yet run the | 
|---|
| 4713 | // instruction | 
|---|
| 4714 | // b) We executed the instruction in the patch skip table and now the IP could be anywhere | 
|---|
| 4715 | // The debugger may need to fix up the IP to compensate for the instruction being run | 
|---|
| 4716 | // from a different address. | 
|---|
| 4717 | VolatilePtr<DebuggerPatchSkip> m_debuggerActivePatchSkipper; | 
|---|
| 4718 |  | 
|---|
| 4719 | public: | 
|---|
| 4720 | VOID BeginDebuggerPatchSkip(DebuggerPatchSkip* patchSkipper) | 
|---|
| 4721 | { | 
|---|
| 4722 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4723 | _ASSERTE(!m_debuggerActivePatchSkipper.Load()); | 
|---|
| 4724 | FastInterlockExchangePointer(m_debuggerActivePatchSkipper.GetPointer(), patchSkipper); | 
|---|
| 4725 | _ASSERTE(m_debuggerActivePatchSkipper.Load()); | 
|---|
| 4726 | } | 
|---|
| 4727 |  | 
|---|
| 4728 | VOID EndDebuggerPatchSkip() | 
|---|
| 4729 | { | 
|---|
| 4730 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4731 | _ASSERTE(m_debuggerActivePatchSkipper.Load()); | 
|---|
| 4732 | FastInterlockExchangePointer(m_debuggerActivePatchSkipper.GetPointer(), NULL); | 
|---|
| 4733 | _ASSERTE(!m_debuggerActivePatchSkipper.Load()); | 
|---|
| 4734 | } | 
|---|
| 4735 |  | 
|---|
| 4736 | private: | 
|---|
| 4737 |  | 
|---|
| 4738 | static BOOL EnterWorkingOnThreadContext(Thread *pThread) | 
|---|
| 4739 | { | 
|---|
| 4740 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4741 |  | 
|---|
| 4742 | if(pThread->m_debuggerActivePatchSkipper.Load() != NULL) | 
|---|
| 4743 | { | 
|---|
| 4744 | return FALSE; | 
|---|
| 4745 | } | 
|---|
| 4746 | return TRUE; | 
|---|
| 4747 | } | 
|---|
| 4748 |  | 
|---|
| 4749 | static void LeaveWorkingOnThreadContext(Thread *pThread) | 
|---|
| 4750 | { | 
|---|
| 4751 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4752 | } | 
|---|
| 4753 |  | 
|---|
| 4754 | typedef ConditionalStateHolder<Thread *, Thread::EnterWorkingOnThreadContext, Thread::LeaveWorkingOnThreadContext> WorkingOnThreadContextHolder; | 
|---|
| 4755 |  | 
|---|
| 4756 | public: | 
|---|
| 4757 | void PrepareThreadForSOWork() | 
|---|
| 4758 | { | 
|---|
| 4759 | WRAPPER_NO_CONTRACT; | 
|---|
| 4760 |  | 
|---|
| 4761 | #ifdef FEATURE_HIJACK | 
|---|
| 4762 | UnhijackThread(); | 
|---|
| 4763 | #endif // FEATURE_HIJACK | 
|---|
| 4764 |  | 
|---|
| 4765 | ResetThrowControlForThread(); | 
|---|
| 4766 |  | 
|---|
| 4767 | // Since this Thread has taken an SO, there may be state left-over after we | 
|---|
| 4768 | // short-circuited exception or other error handling, and so we don't want | 
|---|
| 4769 | // to risk recycling it. | 
|---|
| 4770 | SetThreadStateNC(TSNC_CannotRecycle); | 
|---|
| 4771 | } | 
|---|
| 4772 |  | 
|---|
| 4773 | void SetSOWorkNeeded() | 
|---|
| 4774 | { | 
|---|
| 4775 | SetThreadStateNC(TSNC_SOWorkNeeded); | 
|---|
| 4776 | } | 
|---|
| 4777 |  | 
|---|
| 4778 | BOOL IsSOWorkNeeded() | 
|---|
| 4779 | { | 
|---|
| 4780 | return HasThreadStateNC(TSNC_SOWorkNeeded); | 
|---|
| 4781 | } | 
|---|
| 4782 |  | 
|---|
| 4783 | void FinishSOWork(); | 
|---|
| 4784 |  | 
|---|
| 4785 | void ClearExceptionStateAfterSO(void* pStackFrameSP) | 
|---|
| 4786 | { | 
|---|
| 4787 | WRAPPER_NO_CONTRACT; | 
|---|
| 4788 |  | 
|---|
| 4789 | // Clear any stale exception state. | 
|---|
| 4790 | m_ExceptionState.ClearExceptionStateAfterSO(pStackFrameSP); | 
|---|
| 4791 | } | 
|---|
| 4792 |  | 
|---|
| 4793 | private: | 
|---|
| 4794 | BOOL m_fAllowProfilerCallbacks; | 
|---|
| 4795 |  | 
|---|
| 4796 | public: | 
|---|
| 4797 | // | 
|---|
| 4798 | // These two methods are for profiler support.  The profiler clears the allowed | 
|---|
| 4799 | // value once it has delivered a ThreadDestroyed callback, so that it does not | 
|---|
| 4800 | // deliver any notifications to the profiler afterwards which reference this | 
|---|
| 4801 | // thread.  Callbacks on this thread which do not reference this thread are | 
|---|
| 4802 | // allowable. | 
|---|
| 4803 | // | 
|---|
| 4804 | BOOL ProfilerCallbacksAllowed(void) | 
|---|
| 4805 | { | 
|---|
| 4806 | return m_fAllowProfilerCallbacks; | 
|---|
| 4807 | } | 
|---|
| 4808 |  | 
|---|
| 4809 | void SetProfilerCallbacksAllowed(BOOL fValue) | 
|---|
| 4810 | { | 
|---|
| 4811 | m_fAllowProfilerCallbacks = fValue; | 
|---|
| 4812 | } | 
|---|
| 4813 |  | 
|---|
| 4814 | private: | 
|---|
| 4815 | // | 
|---|
| 4816 | //This context is used for optimizations on I/O thread pool thread. In case the | 
|---|
| 4817 | //overlapped structure is from a different appdomain, it is stored in this structure | 
|---|
| 4818 | //to be processed later correctly by entering the right domain. | 
|---|
| 4819 | PVOID m_pIOCompletionContext; | 
|---|
| 4820 | BOOL AllocateIOCompletionContext(); | 
|---|
| 4821 | VOID FreeIOCompletionContext(); | 
|---|
| 4822 | public: | 
|---|
| 4823 | inline PVOID GetIOCompletionContext() | 
|---|
| 4824 | { | 
|---|
| 4825 | return m_pIOCompletionContext; | 
|---|
| 4826 | } | 
|---|
| 4827 |  | 
|---|
| 4828 | private: | 
|---|
| 4829 | // Inside a host, we don't own a thread handle, and we avoid DuplicateHandle call. | 
|---|
| 4830 | // If a thread is dying after we obtain the thread handle, our SuspendThread may fail | 
|---|
| 4831 | // because the handle may be closed and reused for a completely different type of handle. | 
|---|
| 4832 | // To solve this problem, we have a counter m_dwThreadHandleBeingUsed.  Before we grab | 
|---|
| 4833 | // the thread handle, we increment the counter.  Before we return a thread back to SQL | 
|---|
| 4834 | // in Reset and ExitTask, we wait until the counter drops to 0. | 
|---|
| 4835 | Volatile<LONG> m_dwThreadHandleBeingUsed; | 
|---|
| 4836 |  | 
|---|
| 4837 |  | 
|---|
| 4838 | private: | 
|---|
| 4839 | static BOOL s_fCleanFinalizedThread; | 
|---|
| 4840 |  | 
|---|
| 4841 | public: | 
|---|
| 4842 | #ifndef DACCESS_COMPILE | 
|---|
| 4843 | static void SetCleanupNeededForFinalizedThread() | 
|---|
| 4844 | { | 
|---|
| 4845 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4846 | _ASSERTE (IsFinalizerThread()); | 
|---|
| 4847 | s_fCleanFinalizedThread = TRUE; | 
|---|
| 4848 | } | 
|---|
| 4849 | #endif //!DACCESS_COMPILE | 
|---|
| 4850 |  | 
|---|
| 4851 | static BOOL CleanupNeededForFinalizedThread() | 
|---|
| 4852 | { | 
|---|
| 4853 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4854 | return s_fCleanFinalizedThread; | 
|---|
| 4855 | } | 
|---|
| 4856 |  | 
|---|
| 4857 | private: | 
|---|
| 4858 | // When we create throwable for an exception, we need to run managed code. | 
|---|
| 4859 | // If the same type of exception is thrown while creating managed object, like InvalidProgramException, | 
|---|
| 4860 | // we may be in an infinite recursive case. | 
|---|
| 4861 | Exception *m_pCreatingThrowableForException; | 
|---|
| 4862 | friend OBJECTREF CLRException::GetThrowable(); | 
|---|
| 4863 |  | 
|---|
| 4864 | #ifdef _DEBUG | 
|---|
| 4865 | private: | 
|---|
| 4866 | int m_dwDisableAbortCheckCount; // Disable check before calling managed code. | 
|---|
| 4867 | // !!! Use this very carefully.  If managed code runs user code | 
|---|
| 4868 | // !!! or blocks on locks, the thread may not be aborted. | 
|---|
| 4869 | public: | 
|---|
| 4870 | static void        DisableAbortCheck() | 
|---|
| 4871 | { | 
|---|
| 4872 | WRAPPER_NO_CONTRACT; | 
|---|
| 4873 | Thread *pThread = GetThread(); | 
|---|
| 4874 | FastInterlockIncrement((LONG*)&pThread->m_dwDisableAbortCheckCount); | 
|---|
| 4875 | } | 
|---|
| 4876 | static void        EnableAbortCheck() | 
|---|
| 4877 | { | 
|---|
| 4878 | WRAPPER_NO_CONTRACT; | 
|---|
| 4879 | Thread *pThread = GetThread(); | 
|---|
| 4880 | _ASSERTE (pThread->m_dwDisableAbortCheckCount > 0); | 
|---|
| 4881 | FastInterlockDecrement((LONG*)&pThread->m_dwDisableAbortCheckCount); | 
|---|
| 4882 | } | 
|---|
| 4883 |  | 
|---|
| 4884 | BOOL IsAbortCheckDisabled() | 
|---|
| 4885 | { | 
|---|
| 4886 | return m_dwDisableAbortCheckCount > 0; | 
|---|
| 4887 | } | 
|---|
| 4888 |  | 
|---|
| 4889 | typedef StateHolder<Thread::DisableAbortCheck, Thread::EnableAbortCheck> DisableAbortCheckHolder; | 
|---|
| 4890 | #endif | 
|---|
| 4891 |  | 
|---|
| 4892 | private: | 
|---|
| 4893 | // At the end of a catch, we may raise ThreadAbortException.  If catch clause set IP to resume in the | 
|---|
| 4894 | // corresponding try block, our exception system will execute the same catch clause again and again. | 
|---|
| 4895 | // So we save reference to the clause post which TA was reraised, which is used in ExceptionTracker::ProcessManagedCallFrame | 
|---|
| 4896 | // to make ThreadAbort proceed ahead instead of going in a loop. | 
|---|
| 4897 | // This problem only happens on Win64 due to JIT64.  The common scenario is VB's "On error resume next" | 
|---|
| 4898 | #ifdef WIN64EXCEPTIONS | 
|---|
| 4899 | DWORD       m_dwIndexClauseForCatch; | 
|---|
| 4900 | StackFrame  m_sfEstablisherOfActualHandlerFrame; | 
|---|
| 4901 | #endif // WIN64EXCEPTIONS | 
|---|
| 4902 |  | 
|---|
| 4903 | public: | 
|---|
| 4904 | // Holds per-thread information the debugger uses to expose locking information | 
|---|
| 4905 | // See ThreadDebugBlockingInfo.h for more details | 
|---|
| 4906 | ThreadDebugBlockingInfo DebugBlockingInfo; | 
|---|
| 4907 | #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING | 
|---|
| 4908 | // For the purposes of tracking resource usage we implement a simple cpu resource usage counter on each | 
|---|
| 4909 | // thread. Every time QueryThreadProcessorUsage() is invoked it returns the amount of cpu time (a | 
|---|
| 4910 | // combination of user and kernel mode time) used since the last call to QueryThreadProcessorUsage(). The | 
|---|
| 4911 | // result is in 100 nanosecond units. | 
|---|
| 4912 | ULONGLONG QueryThreadProcessorUsage(); | 
|---|
| 4913 |  | 
|---|
| 4914 | private: | 
|---|
| 4915 | // The amount of processor time (both user and kernel) in 100ns units used by this thread at the time of | 
|---|
| 4916 | // the last call to QueryThreadProcessorUsage(). | 
|---|
| 4917 | ULONGLONG m_ullProcessorUsageBaseline; | 
|---|
| 4918 | #endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING | 
|---|
| 4919 |  | 
|---|
| 4920 | // Disables pumping and thread join in RCW creation | 
|---|
| 4921 | bool m_fDisableComObjectEagerCleanup; | 
|---|
| 4922 |  | 
|---|
| 4923 | // See ThreadStore::TriggerGCForDeadThreadsIfNecessary() | 
|---|
| 4924 | bool m_fHasDeadThreadBeenConsideredForGCTrigger; | 
|---|
| 4925 |  | 
|---|
| 4926 | private: | 
|---|
| 4927 | CLRRandom m_random; | 
|---|
| 4928 |  | 
|---|
| 4929 | public: | 
|---|
| 4930 | CLRRandom* GetRandom() {return &m_random;} | 
|---|
| 4931 |  | 
|---|
| 4932 | #ifdef FEATURE_COMINTEROP | 
|---|
| 4933 | private: | 
|---|
| 4934 | // Cookie returned from CoRegisterInitializeSpy | 
|---|
| 4935 | ULARGE_INTEGER m_uliInitializeSpyCookie; | 
|---|
| 4936 |  | 
|---|
| 4937 | // True if m_uliInitializeSpyCookie is valid | 
|---|
| 4938 | bool m_fInitializeSpyRegistered; | 
|---|
| 4939 |  | 
|---|
| 4940 | // The last STA COM context we saw - used to speed up RCW creation | 
|---|
| 4941 | LPVOID m_pLastSTACtxCookie; | 
|---|
| 4942 |  | 
|---|
| 4943 | public: | 
|---|
| 4944 | inline void RevokeApartmentSpy(); | 
|---|
| 4945 | inline LPVOID GetLastSTACtxCookie(BOOL *pfNAContext); | 
|---|
| 4946 | inline void SetLastSTACtxCookie(LPVOID pCtxCookie, BOOL fNAContext); | 
|---|
| 4947 | #endif // FEATURE_COMINTEROP | 
|---|
| 4948 |  | 
|---|
| 4949 | private: | 
|---|
| 4950 | // This duplicates the ThreadType_GC bit stored in TLS (TlsIdx_ThreadType). It exists | 
|---|
| 4951 | // so that any thread can query whether any other thread is a "GC Special" thread. | 
|---|
| 4952 | // (In contrast, ::IsGCSpecialThread() only gives this info about the currently | 
|---|
| 4953 | // executing thread.) The Profiling API uses this to determine whether it should | 
|---|
| 4954 | // "hide" the thread from profilers. GC Special threads (in particular the bgc | 
|---|
| 4955 | // thread) need to be hidden from profilers because the bgc thread creation path | 
|---|
| 4956 | // occurs while the EE is suspended, and while the thread that's suspending the | 
|---|
| 4957 | // runtime is waiting for the bgc thread to signal an event. The bgc thread cannot | 
|---|
| 4958 | // switch to preemptive mode and call into a profiler at this time, or else a | 
|---|
| 4959 | // deadlock will result when toggling back to cooperative mode (bgc thread toggling | 
|---|
| 4960 | // to coop will block due to the suspension, and the thread suspending the runtime | 
|---|
| 4961 | // continues to block waiting for the bgc thread to signal its creation events). | 
|---|
| 4962 | // Furthermore, profilers have no need to be aware of GC special threads anyway, | 
|---|
| 4963 | // since managed code never runs on them. | 
|---|
| 4964 | bool m_fGCSpecial; | 
|---|
| 4965 |  | 
|---|
| 4966 | public: | 
|---|
| 4967 | // Profiling API uses this to determine whether it should hide this thread from the | 
|---|
| 4968 | // profiler. | 
|---|
| 4969 | bool IsGCSpecial(); | 
|---|
| 4970 |  | 
|---|
| 4971 | // GC calls this when creating special threads that also happen to have an EE Thread | 
|---|
| 4972 | // object associated with them (e.g., the bgc thread). | 
|---|
| 4973 | void SetGCSpecial(bool fGCSpecial); | 
|---|
| 4974 |  | 
|---|
| 4975 | private: | 
|---|
| 4976 | WORD m_wCPUGroup; | 
|---|
| 4977 | DWORD_PTR m_pAffinityMask; | 
|---|
| 4978 |  | 
|---|
| 4979 | public: | 
|---|
| 4980 | void ChooseThreadCPUGroupAffinity(); | 
|---|
| 4981 | void ClearThreadCPUGroupAffinity(); | 
|---|
| 4982 |  | 
|---|
| 4983 | private: | 
|---|
| 4984 | // Per thread table used to implement allocation sampling. | 
|---|
| 4985 | AllLoggedTypes * m_pAllLoggedTypes; | 
|---|
| 4986 |  | 
|---|
| 4987 | public: | 
|---|
| 4988 | AllLoggedTypes * GetAllocationSamplingTable() | 
|---|
| 4989 | { | 
|---|
| 4990 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4991 |  | 
|---|
| 4992 | return m_pAllLoggedTypes; | 
|---|
| 4993 | } | 
|---|
| 4994 |  | 
|---|
| 4995 | void SetAllocationSamplingTable(AllLoggedTypes * pAllLoggedTypes) | 
|---|
| 4996 | { | 
|---|
| 4997 | LIMITED_METHOD_CONTRACT; | 
|---|
| 4998 |  | 
|---|
| 4999 | // Assert if we try to set the m_pAllLoggedTypes to a non NULL value if it is already non-NULL. | 
|---|
| 5000 | // This implies a memory leak. | 
|---|
| 5001 | _ASSERTE(pAllLoggedTypes != NULL ? m_pAllLoggedTypes == NULL : TRUE); | 
|---|
| 5002 | m_pAllLoggedTypes = pAllLoggedTypes; | 
|---|
| 5003 | } | 
|---|
| 5004 |  | 
|---|
| 5005 | #ifdef FEATURE_PERFTRACING | 
|---|
| 5006 | private: | 
|---|
| 5007 | // The object that contains the list write buffers used by this thread. | 
|---|
| 5008 | Volatile<EventPipeBufferList*> m_pEventPipeBufferList; | 
|---|
| 5009 |  | 
|---|
| 5010 | // Whether or not the thread is currently writing an event. | 
|---|
| 5011 | Volatile<bool> m_eventWriteInProgress; | 
|---|
| 5012 |  | 
|---|
| 5013 | // SampleProfiler thread state.  This is set on suspension and cleared before restart. | 
|---|
| 5014 | // True if the thread was in cooperative mode.  False if it was in preemptive when the suspension started. | 
|---|
| 5015 | Volatile<ULONG> m_gcModeOnSuspension; | 
|---|
| 5016 |  | 
|---|
| 5017 | // The activity ID for the current thread. | 
|---|
| 5018 | // An activity ID of zero means the thread is not executing in the context of an activity. | 
|---|
| 5019 | GUID m_activityId; | 
|---|
| 5020 |  | 
|---|
| 5021 | public: | 
|---|
| 5022 | EventPipeBufferList* GetEventPipeBufferList() | 
|---|
| 5023 | { | 
|---|
| 5024 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5025 | return m_pEventPipeBufferList; | 
|---|
| 5026 | } | 
|---|
| 5027 |  | 
|---|
| 5028 | void SetEventPipeBufferList(EventPipeBufferList *pList) | 
|---|
| 5029 | { | 
|---|
| 5030 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5031 | m_pEventPipeBufferList = pList; | 
|---|
| 5032 | } | 
|---|
| 5033 |  | 
|---|
| 5034 | bool GetEventWriteInProgress() const | 
|---|
| 5035 | { | 
|---|
| 5036 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5037 | return m_eventWriteInProgress; | 
|---|
| 5038 | } | 
|---|
| 5039 |  | 
|---|
| 5040 | void SetEventWriteInProgress(bool value) | 
|---|
| 5041 | { | 
|---|
| 5042 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5043 | m_eventWriteInProgress = value; | 
|---|
| 5044 | } | 
|---|
| 5045 |  | 
|---|
| 5046 | bool GetGCModeOnSuspension() | 
|---|
| 5047 | { | 
|---|
| 5048 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5049 | return m_gcModeOnSuspension != 0; | 
|---|
| 5050 | } | 
|---|
| 5051 |  | 
|---|
| 5052 | void SaveGCModeOnSuspension() | 
|---|
| 5053 | { | 
|---|
| 5054 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5055 | m_gcModeOnSuspension = m_fPreemptiveGCDisabled; | 
|---|
| 5056 | } | 
|---|
| 5057 |  | 
|---|
| 5058 | void ClearGCModeOnSuspension() | 
|---|
| 5059 | { | 
|---|
| 5060 | m_gcModeOnSuspension = 0; | 
|---|
| 5061 | } | 
|---|
| 5062 |  | 
|---|
| 5063 | LPCGUID GetActivityId() const | 
|---|
| 5064 | { | 
|---|
| 5065 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5066 | return &m_activityId; | 
|---|
| 5067 | } | 
|---|
| 5068 |  | 
|---|
| 5069 | void SetActivityId(LPCGUID pActivityId) | 
|---|
| 5070 | { | 
|---|
| 5071 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5072 | _ASSERTE(pActivityId != NULL); | 
|---|
| 5073 |  | 
|---|
| 5074 | m_activityId = *pActivityId; | 
|---|
| 5075 | } | 
|---|
| 5076 | #endif // FEATURE_PERFTRACING | 
|---|
| 5077 |  | 
|---|
| 5078 | #ifdef FEATURE_HIJACK | 
|---|
| 5079 | private: | 
|---|
| 5080 |  | 
|---|
| 5081 | // By the time a frame is scanned by the runtime, m_pHijackReturnKind always | 
|---|
| 5082 | // identifies the gc-ness of the return register(s) | 
|---|
| 5083 | // If the ReturnKind information is not available from the GcInfo, the runtime | 
|---|
| 5084 | // computes it using the return types's class handle. | 
|---|
| 5085 |  | 
|---|
| 5086 | ReturnKind m_HijackReturnKind; | 
|---|
| 5087 |  | 
|---|
| 5088 | public: | 
|---|
| 5089 |  | 
|---|
| 5090 | ReturnKind GetHijackReturnKind() | 
|---|
| 5091 | { | 
|---|
| 5092 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5093 |  | 
|---|
| 5094 | return m_HijackReturnKind; | 
|---|
| 5095 | } | 
|---|
| 5096 |  | 
|---|
| 5097 | void SetHijackReturnKind(ReturnKind returnKind) | 
|---|
| 5098 | { | 
|---|
| 5099 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5100 |  | 
|---|
| 5101 | m_HijackReturnKind = returnKind; | 
|---|
| 5102 | } | 
|---|
| 5103 | #endif // FEATURE_HIJACK | 
|---|
| 5104 | }; | 
|---|
| 5105 |  | 
|---|
| 5106 | // End of class Thread | 
|---|
| 5107 |  | 
|---|
| 5108 | typedef Thread::ForbidSuspendThreadHolder ForbidSuspendThreadHolder; | 
|---|
| 5109 | typedef Thread::ThreadPreventAsyncHolder ThreadPreventAsyncHolder; | 
|---|
| 5110 | typedef Thread::ThreadPreventAbortHolder ThreadPreventAbortHolder; | 
|---|
| 5111 |  | 
|---|
| 5112 | // Combines ForBindSuspendThreadHolder and CrstHolder into one. | 
|---|
| 5113 | class ForbidSuspendThreadCrstHolder | 
|---|
| 5114 | { | 
|---|
| 5115 | public: | 
|---|
| 5116 | // Note: member initialization is intentionally ordered. | 
|---|
| 5117 | ForbidSuspendThreadCrstHolder(CrstBase * pCrst) | 
|---|
| 5118 | : m_forbid_suspend_holder() | 
|---|
| 5119 | , m_lock_holder(pCrst) | 
|---|
| 5120 | { WRAPPER_NO_CONTRACT; } | 
|---|
| 5121 |  | 
|---|
| 5122 | private: | 
|---|
| 5123 | ForbidSuspendThreadHolder   m_forbid_suspend_holder; | 
|---|
| 5124 | CrstHolder                  m_lock_holder; | 
|---|
| 5125 | }; | 
|---|
| 5126 |  | 
|---|
| 5127 | ETaskType GetCurrentTaskType(); | 
|---|
| 5128 |  | 
|---|
| 5129 |  | 
|---|
| 5130 |  | 
|---|
| 5131 | typedef Thread::AVInRuntimeImplOkayHolder AVInRuntimeImplOkayHolder; | 
|---|
| 5132 |  | 
|---|
| 5133 | BOOL RevertIfImpersonated(BOOL *bReverted, HANDLE *phToken); | 
|---|
| 5134 | void UndoRevert(BOOL bReverted, HANDLE hToken); | 
|---|
| 5135 |  | 
|---|
| 5136 | // --------------------------------------------------------------------------- | 
|---|
| 5137 | // | 
|---|
| 5138 | //      The ThreadStore manages all the threads in the system. | 
|---|
| 5139 | // | 
|---|
| 5140 | // There is one ThreadStore in the system, available through | 
|---|
| 5141 | // ThreadStore::m_pThreadStore. | 
|---|
| 5142 | // --------------------------------------------------------------------------- | 
|---|
| 5143 |  | 
|---|
| 5144 | typedef SList<Thread, false, PTR_Thread> ThreadList; | 
|---|
| 5145 |  | 
|---|
| 5146 |  | 
|---|
| 5147 | // The ThreadStore is a singleton class | 
|---|
| 5148 | #define CHECK_ONE_STORE()       _ASSERTE(this == ThreadStore::s_pThreadStore); | 
|---|
| 5149 |  | 
|---|
| 5150 | typedef DPTR(class ThreadStore) PTR_ThreadStore; | 
|---|
| 5151 | typedef DPTR(class ExceptionTracker) PTR_ExceptionTracker; | 
|---|
| 5152 |  | 
|---|
| 5153 | class ThreadStore | 
|---|
| 5154 | { | 
|---|
| 5155 | friend class Thread; | 
|---|
| 5156 | friend class ThreadSuspend; | 
|---|
| 5157 | friend Thread* SetupThread(BOOL); | 
|---|
| 5158 | friend class AppDomain; | 
|---|
| 5159 | #ifdef DACCESS_COMPILE | 
|---|
| 5160 | friend class ClrDataAccess; | 
|---|
| 5161 | friend Thread* __stdcall DacGetThread(ULONG32 osThreadID); | 
|---|
| 5162 | #endif | 
|---|
| 5163 |  | 
|---|
| 5164 | public: | 
|---|
| 5165 |  | 
|---|
| 5166 | ThreadStore(); | 
|---|
| 5167 |  | 
|---|
| 5168 | static void InitThreadStore(); | 
|---|
| 5169 | static void LockThreadStore(); | 
|---|
| 5170 | static void UnlockThreadStore(); | 
|---|
| 5171 |  | 
|---|
| 5172 | // Add a Thread to the ThreadStore | 
|---|
| 5173 | // WARNING : only GC calls this with bRequiresTSL set to FALSE. | 
|---|
| 5174 | static void AddThread(Thread *newThread, BOOL bRequiresTSL=TRUE); | 
|---|
| 5175 |  | 
|---|
| 5176 | // RemoveThread finds the thread in the ThreadStore and discards it. | 
|---|
| 5177 | static BOOL RemoveThread(Thread *target); | 
|---|
| 5178 |  | 
|---|
| 5179 | static BOOL CanAcquireLock(); | 
|---|
| 5180 |  | 
|---|
| 5181 | // Transfer a thread from the unstarted to the started list. | 
|---|
| 5182 | // WARNING : only GC calls this with bRequiresTSL set to FALSE. | 
|---|
| 5183 | static void TransferStartedThread(Thread *target, BOOL bRequiresTSL=TRUE); | 
|---|
| 5184 |  | 
|---|
| 5185 | // Before using the thread list, be sure to take the critical section.  Otherwise | 
|---|
| 5186 | // it can change underneath you, perhaps leading to an exception after Remove. | 
|---|
| 5187 | // Prev==NULL to get the first entry in the list. | 
|---|
| 5188 | static Thread *GetAllThreadList(Thread *Prev, ULONG mask, ULONG bits); | 
|---|
| 5189 | static Thread *GetThreadList(Thread *Prev); | 
|---|
| 5190 |  | 
|---|
| 5191 | // Every EE process can lazily create a GUID that uniquely identifies it (for | 
|---|
| 5192 | // purposes of remoting). | 
|---|
| 5193 | const GUID    &GetUniqueEEId(); | 
|---|
| 5194 |  | 
|---|
| 5195 | // We shut down the EE when the last non-background thread terminates.  This event | 
|---|
| 5196 | // is used to signal the main thread when this condition occurs. | 
|---|
| 5197 | void            WaitForOtherThreads(); | 
|---|
| 5198 | static void     CheckForEEShutdown(); | 
|---|
| 5199 | CLREvent        m_TerminationEvent; | 
|---|
| 5200 |  | 
|---|
| 5201 | // Have all the foreground threads completed?  In other words, can we release | 
|---|
| 5202 | // the main thread? | 
|---|
| 5203 | BOOL        OtherThreadsComplete() | 
|---|
| 5204 | { | 
|---|
| 5205 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5206 | _ASSERTE(m_ThreadCount - m_UnstartedThreadCount - m_DeadThreadCount - Thread::m_ActiveDetachCount + m_PendingThreadCount >= m_BackgroundThreadCount); | 
|---|
| 5207 |  | 
|---|
| 5208 | return (m_ThreadCount - m_UnstartedThreadCount - m_DeadThreadCount | 
|---|
| 5209 | - Thread::m_ActiveDetachCount + m_PendingThreadCount | 
|---|
| 5210 | == m_BackgroundThreadCount); | 
|---|
| 5211 | } | 
|---|
| 5212 |  | 
|---|
| 5213 | // If you want to trap threads re-entering the EE (be this for GC, or debugging, | 
|---|
| 5214 | // or Thread.Suspend() or whatever, you need to TrapReturningThreads(TRUE).  When | 
|---|
| 5215 | // you are finished snagging threads, call TrapReturningThreads(FALSE).  This | 
|---|
| 5216 | // counts internally. | 
|---|
| 5217 | // | 
|---|
| 5218 | // Of course, you must also fix RareDisablePreemptiveGC to do the right thing | 
|---|
| 5219 | // when the trap occurs. | 
|---|
| 5220 | static void     TrapReturningThreads(BOOL yes); | 
|---|
| 5221 |  | 
|---|
| 5222 | private: | 
|---|
| 5223 |  | 
|---|
| 5224 | // Enter and leave the critical section around the thread store.  Clients should | 
|---|
| 5225 | // use LockThreadStore and UnlockThreadStore. | 
|---|
| 5226 | void Enter(); | 
|---|
| 5227 | void Leave(); | 
|---|
| 5228 |  | 
|---|
| 5229 | // Critical section for adding and removing threads to the store | 
|---|
| 5230 | Crst        m_Crst; | 
|---|
| 5231 |  | 
|---|
| 5232 | // List of all the threads known to the ThreadStore (started & unstarted). | 
|---|
| 5233 | ThreadList  m_ThreadList; | 
|---|
| 5234 |  | 
|---|
| 5235 | // m_ThreadCount is the count of all threads in m_ThreadList.  This includes | 
|---|
| 5236 | // background threads / unstarted threads / whatever. | 
|---|
| 5237 | // | 
|---|
| 5238 | // m_UnstartedThreadCount is the subset of m_ThreadCount that have not yet been | 
|---|
| 5239 | // started. | 
|---|
| 5240 | // | 
|---|
| 5241 | // m_BackgroundThreadCount is the subset of m_ThreadCount that have been started | 
|---|
| 5242 | // but which are running in the background.  So this is a misnomer in the sense | 
|---|
| 5243 | // that unstarted background threads are not reflected in this count. | 
|---|
| 5244 | // | 
|---|
| 5245 | // m_PendingThreadCount is used to solve a race condition.  The main thread could | 
|---|
| 5246 | // start another thread running and then exit.  The main thread might then start | 
|---|
| 5247 | // tearing down the EE before the new thread moves itself out of m_UnstartedThread- | 
|---|
| 5248 | // Count in TransferUnstartedThread.  This count is atomically bumped in | 
|---|
| 5249 | // CreateNewThread, and atomically reduced within a locked thread store. | 
|---|
| 5250 | // | 
|---|
| 5251 | // m_DeadThreadCount is the subset of m_ThreadCount which have died.  The Win32 | 
|---|
| 5252 | // thread has disappeared, but something (like the exposed object) has kept the | 
|---|
| 5253 | // refcount non-zero so we can't destruct yet. | 
|---|
| 5254 | // | 
|---|
| 5255 | // m_MaxThreadCount is the maximum value of m_ThreadCount. ie. the largest number | 
|---|
| 5256 | // of simultaneously active threads | 
|---|
| 5257 |  | 
|---|
| 5258 | protected: | 
|---|
| 5259 | LONG        m_ThreadCount; | 
|---|
| 5260 | LONG        m_MaxThreadCount; | 
|---|
| 5261 | public: | 
|---|
| 5262 | LONG        ThreadCountInEE () | 
|---|
| 5263 | { | 
|---|
| 5264 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5265 | return m_ThreadCount; | 
|---|
| 5266 | } | 
|---|
| 5267 | #if defined(_DEBUG) || defined(DACCESS_COMPILE) | 
|---|
| 5268 | LONG        MaxThreadCountInEE () | 
|---|
| 5269 | { | 
|---|
| 5270 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5271 | return m_MaxThreadCount; | 
|---|
| 5272 | } | 
|---|
| 5273 | #endif | 
|---|
| 5274 | private: | 
|---|
| 5275 | LONG        m_UnstartedThreadCount; | 
|---|
| 5276 | LONG        m_BackgroundThreadCount; | 
|---|
| 5277 | LONG        m_PendingThreadCount; | 
|---|
| 5278 |  | 
|---|
| 5279 | LONG        m_DeadThreadCount; | 
|---|
| 5280 | LONG        m_DeadThreadCountForGCTrigger; | 
|---|
| 5281 | bool        m_TriggerGCForDeadThreads; | 
|---|
| 5282 |  | 
|---|
| 5283 | private: | 
|---|
| 5284 | // Space for the lazily-created GUID. | 
|---|
| 5285 | GUID        m_EEGuid; | 
|---|
| 5286 | BOOL        m_GuidCreated; | 
|---|
| 5287 |  | 
|---|
| 5288 | // Even in the release product, we need to know what thread holds the lock on | 
|---|
| 5289 | // the ThreadStore.  This is so we never deadlock when the GC thread halts a | 
|---|
| 5290 | // thread that holds this lock. | 
|---|
| 5291 | Thread     *m_HoldingThread; | 
|---|
| 5292 | EEThreadId  m_holderthreadid;   // current holder (or NULL) | 
|---|
| 5293 |  | 
|---|
| 5294 | private: | 
|---|
| 5295 | static LONG s_DeadThreadCountThresholdForGCTrigger; | 
|---|
| 5296 | static DWORD s_DeadThreadGCTriggerPeriodMilliseconds; | 
|---|
| 5297 | static SIZE_T *s_DeadThreadGenerationCounts; | 
|---|
| 5298 |  | 
|---|
| 5299 | public: | 
|---|
| 5300 |  | 
|---|
| 5301 | static BOOL HoldingThreadStore() | 
|---|
| 5302 | { | 
|---|
| 5303 | WRAPPER_NO_CONTRACT; | 
|---|
| 5304 | // Note that GetThread() may be 0 if it is the debugger thread | 
|---|
| 5305 | // or perhaps a concurrent GC thread. | 
|---|
| 5306 | return HoldingThreadStore(GetThread()); | 
|---|
| 5307 | } | 
|---|
| 5308 |  | 
|---|
| 5309 | static BOOL HoldingThreadStore(Thread *pThread); | 
|---|
| 5310 |  | 
|---|
| 5311 | #ifdef DACCESS_COMPILE | 
|---|
| 5312 | static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); | 
|---|
| 5313 | #endif | 
|---|
| 5314 |  | 
|---|
| 5315 | SPTR_DECL(ThreadStore, s_pThreadStore); | 
|---|
| 5316 |  | 
|---|
| 5317 | #ifdef _DEBUG | 
|---|
| 5318 | public: | 
|---|
| 5319 | BOOL        DbgFindThread(Thread *target); | 
|---|
| 5320 | LONG        DbgBackgroundThreadCount() | 
|---|
| 5321 | { | 
|---|
| 5322 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5323 | return m_BackgroundThreadCount; | 
|---|
| 5324 | } | 
|---|
| 5325 |  | 
|---|
| 5326 | BOOL IsCrstForThreadStore (const CrstBase* const pCrstBase) | 
|---|
| 5327 | { | 
|---|
| 5328 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5329 | return (void *)pCrstBase == (void*)&m_Crst; | 
|---|
| 5330 | } | 
|---|
| 5331 |  | 
|---|
| 5332 | #endif | 
|---|
| 5333 | private: | 
|---|
| 5334 | static CONTEXT *s_pOSContext; | 
|---|
| 5335 | public: | 
|---|
| 5336 | // We can not do any memory allocation after we suspend a thread in order ot | 
|---|
| 5337 | // avoid deadlock situation. | 
|---|
| 5338 | static void AllocateOSContext(); | 
|---|
| 5339 | static CONTEXT *GrabOSContext(); | 
|---|
| 5340 |  | 
|---|
| 5341 | private: | 
|---|
| 5342 | // Thread abort needs to walk stack to decide if thread abort can proceed. | 
|---|
| 5343 | // It is unsafe to crawl a stack of thread if the thread is OS-suspended which we do during | 
|---|
| 5344 | // thread abort.  For example, Thread T1 aborts thread T2.  T2 is suspended by T1. Inside SQL | 
|---|
| 5345 | // this means that no thread sharing the same scheduler with T2 can run.  If T1 needs a lock which | 
|---|
| 5346 | // is owned by one thread on the scheduler, T1 will wait forever. | 
|---|
| 5347 | // Our solution is to move T2 to a safe point, resume it, and then do stack crawl. | 
|---|
| 5348 | static CLREvent *s_pWaitForStackCrawlEvent; | 
|---|
| 5349 | public: | 
|---|
| 5350 | static void WaitForStackCrawlEvent() | 
|---|
| 5351 | { | 
|---|
| 5352 | CONTRACTL | 
|---|
| 5353 | { | 
|---|
| 5354 | NOTHROW; | 
|---|
| 5355 | GC_NOTRIGGER; | 
|---|
| 5356 | MODE_ANY; | 
|---|
| 5357 | CAN_TAKE_LOCK; | 
|---|
| 5358 | } | 
|---|
| 5359 | CONTRACTL_END; | 
|---|
| 5360 | s_pWaitForStackCrawlEvent->Wait(INFINITE,FALSE); | 
|---|
| 5361 | } | 
|---|
| 5362 | static void SetStackCrawlEvent() | 
|---|
| 5363 | { | 
|---|
| 5364 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5365 | s_pWaitForStackCrawlEvent->Set(); | 
|---|
| 5366 | } | 
|---|
| 5367 | static void ResetStackCrawlEvent() | 
|---|
| 5368 | { | 
|---|
| 5369 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5370 | s_pWaitForStackCrawlEvent->Reset(); | 
|---|
| 5371 | } | 
|---|
| 5372 |  | 
|---|
| 5373 | private: | 
|---|
| 5374 | void IncrementDeadThreadCountForGCTrigger(); | 
|---|
| 5375 | void DecrementDeadThreadCountForGCTrigger(); | 
|---|
| 5376 | public: | 
|---|
| 5377 | void OnMaxGenerationGCStarted(); | 
|---|
| 5378 | bool ShouldTriggerGCForDeadThreads(); | 
|---|
| 5379 | void TriggerGCForDeadThreadsIfNecessary(); | 
|---|
| 5380 | }; | 
|---|
| 5381 |  | 
|---|
| 5382 | struct TSSuspendHelper { | 
|---|
| 5383 | static void SetTrap() { ThreadStore::TrapReturningThreads(TRUE); } | 
|---|
| 5384 | static void UnsetTrap() { ThreadStore::TrapReturningThreads(FALSE); } | 
|---|
| 5385 | }; | 
|---|
| 5386 | typedef StateHolder<TSSuspendHelper::SetTrap, TSSuspendHelper::UnsetTrap> TSSuspendHolder; | 
|---|
| 5387 |  | 
|---|
| 5388 | typedef StateHolder<ThreadStore::LockThreadStore,ThreadStore::UnlockThreadStore> ThreadStoreLockHolder; | 
|---|
| 5389 |  | 
|---|
| 5390 | #endif | 
|---|
| 5391 |  | 
|---|
| 5392 | // This class dispenses small thread ids for the thin lock mechanism. | 
|---|
| 5393 | // Recently we started using this class to dispense domain neutral module IDs as well. | 
|---|
| 5394 | class IdDispenser | 
|---|
| 5395 | { | 
|---|
| 5396 | private: | 
|---|
| 5397 | DWORD       m_highestId;          // highest id given out so far | 
|---|
| 5398 | SIZE_T      m_recycleBin;         // link list to chain all ids returning to us | 
|---|
| 5399 | Crst        m_Crst;               // lock to protect our data structures | 
|---|
| 5400 | DPTR(PTR_Thread)    m_idToThread;         // map thread ids to threads | 
|---|
| 5401 | DWORD       m_idToThreadCapacity; // capacity of the map | 
|---|
| 5402 |  | 
|---|
| 5403 | #ifndef DACCESS_COMPILE | 
|---|
| 5404 | void GrowIdToThread() | 
|---|
| 5405 | { | 
|---|
| 5406 | CONTRACTL | 
|---|
| 5407 | { | 
|---|
| 5408 | THROWS; | 
|---|
| 5409 | GC_NOTRIGGER; | 
|---|
| 5410 | SO_TOLERANT; | 
|---|
| 5411 | MODE_ANY; | 
|---|
| 5412 | } | 
|---|
| 5413 | CONTRACTL_END; | 
|---|
| 5414 |  | 
|---|
| 5415 | DWORD newCapacity = m_idToThreadCapacity == 0 ? 16 : m_idToThreadCapacity*2; | 
|---|
| 5416 | Thread **newIdToThread = new Thread*[newCapacity]; | 
|---|
| 5417 |  | 
|---|
| 5418 | newIdToThread[0] = NULL; | 
|---|
| 5419 |  | 
|---|
| 5420 | for (DWORD i = 1; i < m_idToThreadCapacity; i++) | 
|---|
| 5421 | { | 
|---|
| 5422 | newIdToThread[i] = m_idToThread[i]; | 
|---|
| 5423 | } | 
|---|
| 5424 | for (DWORD j = m_idToThreadCapacity; j < newCapacity; j++) | 
|---|
| 5425 | { | 
|---|
| 5426 | newIdToThread[j] = NULL; | 
|---|
| 5427 | } | 
|---|
| 5428 | delete[] m_idToThread; | 
|---|
| 5429 | m_idToThread = newIdToThread; | 
|---|
| 5430 | m_idToThreadCapacity = newCapacity; | 
|---|
| 5431 | } | 
|---|
| 5432 | #endif // !DACCESS_COMPILE | 
|---|
| 5433 |  | 
|---|
| 5434 | public: | 
|---|
| 5435 | IdDispenser() : | 
|---|
| 5436 | // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst. | 
|---|
| 5437 | // If you remove this flag, we will switch to preemptive mode when entering | 
|---|
| 5438 | // m_Crst, which means all functions that enter it will become | 
|---|
| 5439 | // GC_TRIGGERS.  (This includes all uses of CrstHolder.)  So be sure | 
|---|
| 5440 | // to update the contracts if you remove this flag. | 
|---|
| 5441 | m_Crst(CrstThreadIdDispenser, CRST_UNSAFE_ANYMODE) | 
|---|
| 5442 | { | 
|---|
| 5443 | WRAPPER_NO_CONTRACT; | 
|---|
| 5444 | m_highestId = 0; | 
|---|
| 5445 | m_recycleBin = 0; | 
|---|
| 5446 | m_idToThreadCapacity = 0; | 
|---|
| 5447 | m_idToThread = NULL; | 
|---|
| 5448 | } | 
|---|
| 5449 |  | 
|---|
| 5450 | ~IdDispenser() | 
|---|
| 5451 | { | 
|---|
| 5452 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5453 | delete[] m_idToThread; | 
|---|
| 5454 | } | 
|---|
| 5455 |  | 
|---|
| 5456 | bool IsValidId(DWORD id) | 
|---|
| 5457 | { | 
|---|
| 5458 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5459 | return (id > 0) && (id <= m_highestId); | 
|---|
| 5460 | } | 
|---|
| 5461 |  | 
|---|
| 5462 | #ifndef DACCESS_COMPILE | 
|---|
| 5463 | void NewId(Thread *pThread, DWORD & newId) | 
|---|
| 5464 | { | 
|---|
| 5465 | WRAPPER_NO_CONTRACT; | 
|---|
| 5466 | DWORD result; | 
|---|
| 5467 | CrstHolder ch(&m_Crst); | 
|---|
| 5468 |  | 
|---|
| 5469 | if (m_recycleBin != 0) | 
|---|
| 5470 | { | 
|---|
| 5471 | _ASSERTE(FitsIn<DWORD>(m_recycleBin)); | 
|---|
| 5472 | result = static_cast<DWORD>(m_recycleBin); | 
|---|
| 5473 | m_recycleBin = reinterpret_cast<SIZE_T>(m_idToThread[m_recycleBin]); | 
|---|
| 5474 | } | 
|---|
| 5475 | else | 
|---|
| 5476 | { | 
|---|
| 5477 | // we make sure ids don't wrap around - before they do, we always return the highest possible | 
|---|
| 5478 | // one and rely on our caller to detect this situation | 
|---|
| 5479 | if (m_highestId + 1 > m_highestId) | 
|---|
| 5480 | m_highestId = m_highestId + 1; | 
|---|
| 5481 | result = m_highestId; | 
|---|
| 5482 | if (result >= m_idToThreadCapacity) | 
|---|
| 5483 | GrowIdToThread(); | 
|---|
| 5484 | } | 
|---|
| 5485 |  | 
|---|
| 5486 | _ASSERTE(result < m_idToThreadCapacity); | 
|---|
| 5487 | newId = result; | 
|---|
| 5488 | if (result < m_idToThreadCapacity) | 
|---|
| 5489 | m_idToThread[result] = pThread; | 
|---|
| 5490 | } | 
|---|
| 5491 | #endif // !DACCESS_COMPILE | 
|---|
| 5492 |  | 
|---|
| 5493 | #ifndef DACCESS_COMPILE | 
|---|
| 5494 | void DisposeId(DWORD id) | 
|---|
| 5495 | { | 
|---|
| 5496 | CONTRACTL | 
|---|
| 5497 | { | 
|---|
| 5498 | NOTHROW; | 
|---|
| 5499 | GC_NOTRIGGER; | 
|---|
| 5500 | MODE_ANY; | 
|---|
| 5501 | CAN_TAKE_LOCK; | 
|---|
| 5502 | } | 
|---|
| 5503 | CONTRACTL_END; | 
|---|
| 5504 | CrstHolder ch(&m_Crst); | 
|---|
| 5505 |  | 
|---|
| 5506 | _ASSERTE(IsValidId(id)); | 
|---|
| 5507 | if (id == m_highestId) | 
|---|
| 5508 | { | 
|---|
| 5509 | m_highestId--; | 
|---|
| 5510 | } | 
|---|
| 5511 | else | 
|---|
| 5512 | { | 
|---|
| 5513 | m_idToThread[id] = reinterpret_cast<PTR_Thread>(m_recycleBin); | 
|---|
| 5514 | m_recycleBin = id; | 
|---|
| 5515 | #ifdef _DEBUG | 
|---|
| 5516 | size_t index = (size_t)m_idToThread[id]; | 
|---|
| 5517 | while (index != 0) | 
|---|
| 5518 | { | 
|---|
| 5519 | _ASSERTE(index != id); | 
|---|
| 5520 | index = (size_t)m_idToThread[index]; | 
|---|
| 5521 | } | 
|---|
| 5522 | #endif | 
|---|
| 5523 | } | 
|---|
| 5524 | } | 
|---|
| 5525 | #endif // !DACCESS_COMPILE | 
|---|
| 5526 |  | 
|---|
| 5527 | Thread *IdToThread(DWORD id) | 
|---|
| 5528 | { | 
|---|
| 5529 | LIMITED_METHOD_CONTRACT; | 
|---|
| 5530 | CrstHolder ch(&m_Crst); | 
|---|
| 5531 |  | 
|---|
| 5532 | Thread *result = NULL; | 
|---|
| 5533 | if (id <= m_highestId) | 
|---|
| 5534 | result = m_idToThread[id]; | 
|---|
| 5535 | // m_idToThread may have Thread*, or the next free slot | 
|---|
| 5536 | _ASSERTE ((size_t)result > m_idToThreadCapacity); | 
|---|
| 5537 |  | 
|---|
| 5538 | return result; | 
|---|
| 5539 | } | 
|---|
| 5540 |  | 
|---|
| 5541 | Thread *IdToThreadWithValidation(DWORD id) | 
|---|
| 5542 | { | 
|---|
| 5543 | WRAPPER_NO_CONTRACT; | 
|---|
| 5544 |  | 
|---|
| 5545 | CrstHolder ch(&m_Crst); | 
|---|
| 5546 |  | 
|---|
| 5547 | Thread *result = NULL; | 
|---|
| 5548 | if (id <= m_highestId) | 
|---|
| 5549 | result = m_idToThread[id]; | 
|---|
| 5550 | // m_idToThread may have Thread*, or the next free slot | 
|---|
| 5551 | if ((size_t)result <= m_idToThreadCapacity) | 
|---|
| 5552 | result = NULL; | 
|---|
| 5553 | _ASSERTE(result == NULL || ((size_t)result & 0x3) == 0 || ((Thread*)result)->GetThreadId() == id); | 
|---|
| 5554 | return result; | 
|---|
| 5555 | } | 
|---|
| 5556 | }; | 
|---|
| 5557 | typedef DPTR(IdDispenser) PTR_IdDispenser; | 
|---|
| 5558 |  | 
|---|
| 5559 | #ifndef CROSSGEN_COMPILE | 
|---|
| 5560 |  | 
|---|
| 5561 | // Dispenser of small thread ids for thin lock mechanism | 
|---|
| 5562 | GPTR_DECL(IdDispenser,g_pThinLockThreadIdDispenser); | 
|---|
| 5563 |  | 
|---|
| 5564 | // forward declaration | 
|---|
| 5565 | DWORD MsgWaitHelper(int numWaiters, HANDLE* phEvent, BOOL bWaitAll, DWORD millis, BOOL alertable = FALSE); | 
|---|
| 5566 |  | 
|---|
| 5567 | // When a thread is being created after a debug suspension has started, it sends an event up to the | 
|---|
| 5568 | // debugger. Afterwards, with the Debugger Lock still held, it will check to see if we had already asked to suspend the | 
|---|
| 5569 | // Runtime. If we have, then it will turn around and call this to set the debug suspend pending flag on the newly | 
|---|
| 5570 | // created thread, since it was missed by SysStartSuspendForDebug as it didn't exist when that function was run. | 
|---|
| 5571 | // | 
|---|
| 5572 | inline void Thread::MarkForDebugSuspend(void) | 
|---|
| 5573 | { | 
|---|
| 5574 | WRAPPER_NO_CONTRACT; | 
|---|
| 5575 | if (!(m_State & TS_DebugSuspendPending)) | 
|---|
| 5576 | { | 
|---|
| 5577 | FastInterlockOr((ULONG *) &m_State, TS_DebugSuspendPending); | 
|---|
| 5578 | ThreadStore::TrapReturningThreads(TRUE); | 
|---|
| 5579 | } | 
|---|
| 5580 | } | 
|---|
| 5581 |  | 
|---|
| 5582 | // Debugger per-thread flag for enabling notification on "manual" | 
|---|
| 5583 | // method calls, for stepping logic. | 
|---|
| 5584 |  | 
|---|
| 5585 | inline void Thread::IncrementTraceCallCount() | 
|---|
| 5586 | { | 
|---|
| 5587 | WRAPPER_NO_CONTRACT; | 
|---|
| 5588 | FastInterlockIncrement(&m_TraceCallCount); | 
|---|
| 5589 | ThreadStore::TrapReturningThreads(TRUE); | 
|---|
| 5590 | } | 
|---|
| 5591 |  | 
|---|
| 5592 | inline void Thread::DecrementTraceCallCount() | 
|---|
| 5593 | { | 
|---|
| 5594 | WRAPPER_NO_CONTRACT; | 
|---|
| 5595 | ThreadStore::TrapReturningThreads(FALSE); | 
|---|
| 5596 | FastInterlockDecrement(&m_TraceCallCount); | 
|---|
| 5597 | } | 
|---|
| 5598 |  | 
|---|
| 5599 | // When we enter an Object.Wait() we are logically inside the synchronized | 
|---|
| 5600 | // region of that object.  Of course, we've actually completely left the region, | 
|---|
| 5601 | // or else nobody could Notify us.  But if we throw ThreadInterruptedException to | 
|---|
| 5602 | // break out of the Wait, all the catchers are going to expect the synchronized | 
|---|
| 5603 | // state to be correct.  So we carry it around in case we need to restore it. | 
|---|
| 5604 | struct PendingSync | 
|---|
| 5605 | { | 
|---|
| 5606 | LONG            m_EnterCount; | 
|---|
| 5607 | WaitEventLink  *m_WaitEventLink; | 
|---|
| 5608 | #ifdef _DEBUG | 
|---|
| 5609 | Thread         *m_OwnerThread; | 
|---|
| 5610 | #endif | 
|---|
| 5611 |  | 
|---|
| 5612 | PendingSync(WaitEventLink *s) : m_WaitEventLink(s) | 
|---|
| 5613 | { | 
|---|
| 5614 | WRAPPER_NO_CONTRACT; | 
|---|
| 5615 | #ifdef _DEBUG | 
|---|
| 5616 | m_OwnerThread = GetThread(); | 
|---|
| 5617 | #endif | 
|---|
| 5618 | } | 
|---|
| 5619 | void Restore(BOOL bRemoveFromSB); | 
|---|
| 5620 | }; | 
|---|
| 5621 |  | 
|---|
| 5622 |  | 
|---|
| 5623 | #define INCTHREADLOCKCOUNT() { } | 
|---|
| 5624 | #define DECTHREADLOCKCOUNT() { } | 
|---|
| 5625 | #define INCTHREADLOCKCOUNTTHREAD(thread) { } | 
|---|
| 5626 | #define DECTHREADLOCKCOUNTTHREAD(thread) { } | 
|---|
| 5627 |  | 
|---|
| 5628 |  | 
|---|
| 5629 | // -------------------------------------------------------------------------------- | 
|---|
| 5630 | // GCHolder is used to implement the normal GCX_ macros. | 
|---|
| 5631 | // | 
|---|
| 5632 | // GCHolder is normally used indirectly through GCX_ convenience macros, but can be used | 
|---|
| 5633 | // directly if needed (e.g. due to multiple holders in one scope, or to use | 
|---|
| 5634 | // in class definitions). | 
|---|
| 5635 | // | 
|---|
| 5636 | // GCHolder (or derived types) should only be instantiated as automatic variables | 
|---|
| 5637 | // -------------------------------------------------------------------------------- | 
|---|
| 5638 |  | 
|---|
| 5639 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 5640 | #define GCHOLDER_CONTRACT_ARGS_NoDtor   , false, szConstruct, szFunction, szFile, lineNum | 
|---|
| 5641 | #define GCHOLDER_CONTRACT_ARGS_HasDtor  , true,  szConstruct, szFunction, szFile, lineNum | 
|---|
| 5642 | #define GCHOLDER_DECLARE_CONTRACT_ARGS_BARE \ | 
|---|
| 5643 | const char * szConstruct = "Unknown" \ | 
|---|
| 5644 | , const char * szFunction = "Unknown" \ | 
|---|
| 5645 | , const char * szFile = "Unknown" \ | 
|---|
| 5646 | , int lineNum = 0 | 
|---|
| 5647 | #define GCHOLDER_DECLARE_CONTRACT_ARGS , GCHOLDER_DECLARE_CONTRACT_ARGS_BARE | 
|---|
| 5648 | #define GCHOLDER_DECLARE_CONTRACT_ARGS_INTERNAL , bool fPushStackRecord = true, GCHOLDER_DECLARE_CONTRACT_ARGS_BARE | 
|---|
| 5649 |  | 
|---|
| 5650 | #define GCHOLDER_SETUP_CONTRACT_STACK_RECORD(mode)                                  \ | 
|---|
| 5651 | m_fPushedRecord = false;                                                    \ | 
|---|
| 5652 | \ | 
|---|
| 5653 | if (fPushStackRecord && conditional)                                        \ | 
|---|
| 5654 | {                                                                           \ | 
|---|
| 5655 | m_pClrDebugState = GetClrDebugState();                                  \ | 
|---|
| 5656 | m_oldClrDebugState = *m_pClrDebugState;                                 \ | 
|---|
| 5657 | \ | 
|---|
| 5658 | m_pClrDebugState->ViolationMaskReset( ModeViolation );                  \ | 
|---|
| 5659 | \ | 
|---|
| 5660 | m_ContractStackRecord.m_szFunction = szFunction;                        \ | 
|---|
| 5661 | m_ContractStackRecord.m_szFile     = szFile;                            \ | 
|---|
| 5662 | m_ContractStackRecord.m_lineNum    = lineNum;                           \ | 
|---|
| 5663 | m_ContractStackRecord.m_testmask   =                                    \ | 
|---|
| 5664 | (Contract::ALL_Disabled & ~((UINT)(Contract::MODE_Mask)))         \ | 
|---|
| 5665 | | (mode);                                                           \ | 
|---|
| 5666 | m_ContractStackRecord.m_construct  = szConstruct;                       \ | 
|---|
| 5667 | m_pClrDebugState->LinkContractStackTrace( &m_ContractStackRecord );     \ | 
|---|
| 5668 | m_fPushedRecord = true;                                                 \ | 
|---|
| 5669 | } | 
|---|
| 5670 | #define GCHOLDER_CHECK_FOR_PREEMP_IN_NOTRIGGER(pThread)                                         \ | 
|---|
| 5671 | if (pThread->GCNoTrigger())                                                         \ | 
|---|
| 5672 | {                                                                                   \ | 
|---|
| 5673 | CONTRACT_ASSERT("Coop->preemp->coop switch attempted in a GC_NOTRIGGER scope",  \ | 
|---|
| 5674 | Contract::GC_NoTrigger,                                         \ | 
|---|
| 5675 | Contract::GC_Mask,                                              \ | 
|---|
| 5676 | szFunction,                                                     \ | 
|---|
| 5677 | szFile,                                                         \ | 
|---|
| 5678 | lineNum                                                         \ | 
|---|
| 5679 | );                                                              \ | 
|---|
| 5680 | } | 
|---|
| 5681 | #else | 
|---|
| 5682 | #define GCHOLDER_CONTRACT_ARGS_NoDtor | 
|---|
| 5683 | #define GCHOLDER_CONTRACT_ARGS_HasDtor | 
|---|
| 5684 | #define GCHOLDER_DECLARE_CONTRACT_ARGS_BARE | 
|---|
| 5685 | #define GCHOLDER_DECLARE_CONTRACT_ARGS | 
|---|
| 5686 | #define GCHOLDER_DECLARE_CONTRACT_ARGS_INTERNAL | 
|---|
| 5687 | #define GCHOLDER_SETUP_CONTRACT_STACK_RECORD(mode) | 
|---|
| 5688 | #define GCHOLDER_CHECK_FOR_PREEMP_IN_NOTRIGGER(pThread) | 
|---|
| 5689 | #endif // ENABLE_CONTRACTS_IMPL | 
|---|
| 5690 |  | 
|---|
| 5691 | #ifndef DACCESS_COMPILE | 
|---|
| 5692 | class GCHolderBase | 
|---|
| 5693 | { | 
|---|
| 5694 | protected: | 
|---|
| 5695 | // NOTE: This method is FORCEINLINE'ed into its callers, but the callers are just the | 
|---|
| 5696 | // corresponding methods in the derived types, not all sites that use GC holders.  This | 
|---|
| 5697 | // is done so that the #pragma optimize will take affect since the optimize settings | 
|---|
| 5698 | // are taken from the template instantiation site, not the template definition site. | 
|---|
| 5699 | template <BOOL THREAD_EXISTS> | 
|---|
| 5700 | FORCEINLINE_NONDEBUG | 
|---|
| 5701 | void PopInternal() | 
|---|
| 5702 | { | 
|---|
| 5703 | SCAN_SCOPE_END; | 
|---|
| 5704 | WRAPPER_NO_CONTRACT; | 
|---|
| 5705 |  | 
|---|
| 5706 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 5707 | if (m_fPushedRecord) | 
|---|
| 5708 | { | 
|---|
| 5709 | *m_pClrDebugState = m_oldClrDebugState; | 
|---|
| 5710 | } | 
|---|
| 5711 | // Make sure that we're using the version of this template that matches the | 
|---|
| 5712 | // invariant setup in EnterInternal{Coop|Preemp}{_HackNoThread} | 
|---|
| 5713 | _ASSERTE(!!THREAD_EXISTS == m_fThreadMustExist); | 
|---|
| 5714 | #endif | 
|---|
| 5715 |  | 
|---|
| 5716 | if (m_WasCoop) | 
|---|
| 5717 | { | 
|---|
| 5718 | // m_WasCoop is only TRUE if we've already verified there's an EE thread. | 
|---|
| 5719 | BEGIN_GETTHREAD_ALLOWED; | 
|---|
| 5720 |  | 
|---|
| 5721 | _ASSERTE(m_Thread != NULL);  // Cannot switch to cooperative with no thread | 
|---|
| 5722 | if (!m_Thread->PreemptiveGCDisabled()) | 
|---|
| 5723 | m_Thread->DisablePreemptiveGC(); | 
|---|
| 5724 |  | 
|---|
| 5725 | END_GETTHREAD_ALLOWED; | 
|---|
| 5726 | } | 
|---|
| 5727 | else | 
|---|
| 5728 | { | 
|---|
| 5729 | // Either we initialized m_Thread explicitly with GetThread() in the | 
|---|
| 5730 | // constructor, or our caller (instantiator of GCHolder) called our constructor | 
|---|
| 5731 | // with GetThread() (which we already asserted in the constuctor) | 
|---|
| 5732 | // (i.e., m_Thread == GetThread()).  Also, note that if THREAD_EXISTS, | 
|---|
| 5733 | // then m_Thread must be non-null (as it's == GetThread()).  So the | 
|---|
| 5734 | // "if" below looks a little hokey since we're checking for either condition. | 
|---|
| 5735 | // But the template param THREAD_EXISTS allows us to statically early-out | 
|---|
| 5736 | // when it's TRUE, so we check it for perf. | 
|---|
| 5737 | if (THREAD_EXISTS || m_Thread != NULL) | 
|---|
| 5738 | { | 
|---|
| 5739 | BEGIN_GETTHREAD_ALLOWED; | 
|---|
| 5740 | if (m_Thread->PreemptiveGCDisabled()) | 
|---|
| 5741 | m_Thread->EnablePreemptiveGC(); | 
|---|
| 5742 | END_GETTHREAD_ALLOWED; | 
|---|
| 5743 | } | 
|---|
| 5744 | } | 
|---|
| 5745 |  | 
|---|
| 5746 | // If we have a thread then we assert that we ended up in the same state | 
|---|
| 5747 | // which we started in. | 
|---|
| 5748 | if (THREAD_EXISTS || m_Thread != NULL) | 
|---|
| 5749 | { | 
|---|
| 5750 | _ASSERTE(!!m_WasCoop == !!(m_Thread->PreemptiveGCDisabled())); | 
|---|
| 5751 | } | 
|---|
| 5752 | } | 
|---|
| 5753 |  | 
|---|
| 5754 | // NOTE: The rest of these methods are all FORCEINLINE so that the uses where 'conditional==true' | 
|---|
| 5755 | // can have the if-checks removed by the compiler.  The callers are just the corresponding methods | 
|---|
| 5756 | // in the derived types, not all sites that use GC holders. | 
|---|
| 5757 |  | 
|---|
| 5758 |  | 
|---|
| 5759 | // This is broken - there is a potential race with the GC thread.  It is currently | 
|---|
| 5760 | // used for a few cases where (a) we potentially haven't started up the EE yet, or | 
|---|
| 5761 | // (b) we are on a "special thread".  We need a real solution here though. | 
|---|
| 5762 | FORCEINLINE_NONDEBUG | 
|---|
| 5763 | void EnterInternalCoop_HackNoThread(bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS_INTERNAL) | 
|---|
| 5764 | { | 
|---|
| 5765 | GCHOLDER_SETUP_CONTRACT_STACK_RECORD(Contract::MODE_Coop); | 
|---|
| 5766 |  | 
|---|
| 5767 | m_Thread = GetThreadNULLOk(); | 
|---|
| 5768 |  | 
|---|
| 5769 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 5770 | m_fThreadMustExist = false; | 
|---|
| 5771 | #endif // ENABLE_CONTRACTS_IMPL | 
|---|
| 5772 |  | 
|---|
| 5773 | if (m_Thread != NULL) | 
|---|
| 5774 | { | 
|---|
| 5775 | BEGIN_GETTHREAD_ALLOWED; | 
|---|
| 5776 | m_WasCoop = m_Thread->PreemptiveGCDisabled(); | 
|---|
| 5777 |  | 
|---|
| 5778 | if (conditional && !m_WasCoop) | 
|---|
| 5779 | { | 
|---|
| 5780 | m_Thread->DisablePreemptiveGC(); | 
|---|
| 5781 | _ASSERTE(m_Thread->PreemptiveGCDisabled()); | 
|---|
| 5782 | } | 
|---|
| 5783 | END_GETTHREAD_ALLOWED; | 
|---|
| 5784 | } | 
|---|
| 5785 | else | 
|---|
| 5786 | { | 
|---|
| 5787 | m_WasCoop = FALSE; | 
|---|
| 5788 | } | 
|---|
| 5789 | } | 
|---|
| 5790 |  | 
|---|
| 5791 | FORCEINLINE_NONDEBUG | 
|---|
| 5792 | void EnterInternalPreemp(bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS_INTERNAL) | 
|---|
| 5793 | { | 
|---|
| 5794 | GCHOLDER_SETUP_CONTRACT_STACK_RECORD(Contract::MODE_Preempt); | 
|---|
| 5795 |  | 
|---|
| 5796 | m_Thread = GetThreadNULLOk(); | 
|---|
| 5797 |  | 
|---|
| 5798 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 5799 | m_fThreadMustExist = false; | 
|---|
| 5800 | if (m_Thread != NULL && conditional) | 
|---|
| 5801 | { | 
|---|
| 5802 | BEGIN_GETTHREAD_ALLOWED; | 
|---|
| 5803 | GCHOLDER_CHECK_FOR_PREEMP_IN_NOTRIGGER(m_Thread); | 
|---|
| 5804 | END_GETTHREAD_ALLOWED; | 
|---|
| 5805 | } | 
|---|
| 5806 | #endif  // ENABLE_CONTRACTS_IMPL | 
|---|
| 5807 |  | 
|---|
| 5808 | if (m_Thread != NULL) | 
|---|
| 5809 | { | 
|---|
| 5810 | BEGIN_GETTHREAD_ALLOWED; | 
|---|
| 5811 | m_WasCoop = m_Thread->PreemptiveGCDisabled(); | 
|---|
| 5812 |  | 
|---|
| 5813 | if (conditional && m_WasCoop) | 
|---|
| 5814 | { | 
|---|
| 5815 | m_Thread->EnablePreemptiveGC(); | 
|---|
| 5816 | _ASSERTE(!m_Thread->PreemptiveGCDisabled()); | 
|---|
| 5817 | } | 
|---|
| 5818 | END_GETTHREAD_ALLOWED; | 
|---|
| 5819 | } | 
|---|
| 5820 | else | 
|---|
| 5821 | { | 
|---|
| 5822 | m_WasCoop = FALSE; | 
|---|
| 5823 | } | 
|---|
| 5824 | } | 
|---|
| 5825 |  | 
|---|
| 5826 | FORCEINLINE_NONDEBUG | 
|---|
| 5827 | void EnterInternalCoop(Thread *pThread, bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS_INTERNAL) | 
|---|
| 5828 | { | 
|---|
| 5829 | // This is the perf version. So we deliberately restrict the calls | 
|---|
| 5830 | // to already setup threads to avoid the null checks and GetThread call | 
|---|
| 5831 | _ASSERTE(pThread && (pThread == GetThread())); | 
|---|
| 5832 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 5833 | m_fThreadMustExist = true; | 
|---|
| 5834 | #endif // ENABLE_CONTRACTS_IMPL | 
|---|
| 5835 |  | 
|---|
| 5836 | GCHOLDER_SETUP_CONTRACT_STACK_RECORD(Contract::MODE_Coop); | 
|---|
| 5837 |  | 
|---|
| 5838 | m_Thread = pThread; | 
|---|
| 5839 | m_WasCoop = m_Thread->PreemptiveGCDisabled(); | 
|---|
| 5840 | if (conditional && !m_WasCoop) | 
|---|
| 5841 | { | 
|---|
| 5842 | m_Thread->DisablePreemptiveGC(); | 
|---|
| 5843 | _ASSERTE(m_Thread->PreemptiveGCDisabled()); | 
|---|
| 5844 | } | 
|---|
| 5845 | } | 
|---|
| 5846 |  | 
|---|
| 5847 | template <BOOL THREAD_EXISTS> | 
|---|
| 5848 | FORCEINLINE_NONDEBUG | 
|---|
| 5849 | void EnterInternalPreemp(Thread *pThread, bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS_INTERNAL) | 
|---|
| 5850 | { | 
|---|
| 5851 | // This is the perf version. So we deliberately restrict the calls | 
|---|
| 5852 | // to already setup threads to avoid the null checks and GetThread call | 
|---|
| 5853 | _ASSERTE(!THREAD_EXISTS || (pThread && (pThread == GetThread()))); | 
|---|
| 5854 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 5855 | m_fThreadMustExist = !!THREAD_EXISTS; | 
|---|
| 5856 | #endif // ENABLE_CONTRACTS_IMPL | 
|---|
| 5857 |  | 
|---|
| 5858 | GCHOLDER_SETUP_CONTRACT_STACK_RECORD(Contract::MODE_Preempt); | 
|---|
| 5859 |  | 
|---|
| 5860 | m_Thread = pThread; | 
|---|
| 5861 |  | 
|---|
| 5862 | if (THREAD_EXISTS || (m_Thread != NULL)) | 
|---|
| 5863 | { | 
|---|
| 5864 | GCHOLDER_CHECK_FOR_PREEMP_IN_NOTRIGGER(m_Thread); | 
|---|
| 5865 | m_WasCoop = m_Thread->PreemptiveGCDisabled(); | 
|---|
| 5866 | if (conditional && m_WasCoop) | 
|---|
| 5867 | { | 
|---|
| 5868 | m_Thread->EnablePreemptiveGC(); | 
|---|
| 5869 | _ASSERTE(!m_Thread->PreemptiveGCDisabled()); | 
|---|
| 5870 | } | 
|---|
| 5871 | } | 
|---|
| 5872 | else | 
|---|
| 5873 | { | 
|---|
| 5874 | m_WasCoop = FALSE; | 
|---|
| 5875 | } | 
|---|
| 5876 | } | 
|---|
| 5877 |  | 
|---|
| 5878 | private: | 
|---|
| 5879 | Thread * m_Thread; | 
|---|
| 5880 | BOOL     m_WasCoop;         // This is BOOL and not 'bool' because PreemptiveGCDisabled returns BOOL, | 
|---|
| 5881 | // so the codegen is better if we don't have to convert to 'bool'. | 
|---|
| 5882 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 5883 | bool                m_fThreadMustExist;     // used to validate that the proper Pop<THREAD_EXISTS> method is used | 
|---|
| 5884 | bool                m_fPushedRecord; | 
|---|
| 5885 | ClrDebugState       m_oldClrDebugState; | 
|---|
| 5886 | ClrDebugState      *m_pClrDebugState; | 
|---|
| 5887 | ContractStackRecord m_ContractStackRecord; | 
|---|
| 5888 | #endif | 
|---|
| 5889 | }; | 
|---|
| 5890 |  | 
|---|
| 5891 | class GCCoopNoDtor : public GCHolderBase | 
|---|
| 5892 | { | 
|---|
| 5893 | public: | 
|---|
| 5894 | DEBUG_NOINLINE | 
|---|
| 5895 | void Enter(bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS) | 
|---|
| 5896 | { | 
|---|
| 5897 | WRAPPER_NO_CONTRACT; | 
|---|
| 5898 | SCAN_SCOPE_BEGIN; | 
|---|
| 5899 | if (conditional) | 
|---|
| 5900 | { | 
|---|
| 5901 | STATIC_CONTRACT_MODE_COOPERATIVE; | 
|---|
| 5902 | } | 
|---|
| 5903 | // The thread must be non-null to enter MODE_COOP | 
|---|
| 5904 | this->EnterInternalCoop(GetThread(), conditional GCHOLDER_CONTRACT_ARGS_NoDtor); | 
|---|
| 5905 | } | 
|---|
| 5906 |  | 
|---|
| 5907 | DEBUG_NOINLINE | 
|---|
| 5908 | void Leave() | 
|---|
| 5909 | { | 
|---|
| 5910 | WRAPPER_NO_CONTRACT; | 
|---|
| 5911 | SCAN_SCOPE_BEGIN; | 
|---|
| 5912 | this->PopInternal<TRUE>();  // Thread must be non-NULL | 
|---|
| 5913 | } | 
|---|
| 5914 | }; | 
|---|
| 5915 |  | 
|---|
| 5916 | class GCPreempNoDtor : public GCHolderBase | 
|---|
| 5917 | { | 
|---|
| 5918 | public: | 
|---|
| 5919 | DEBUG_NOINLINE | 
|---|
| 5920 | void Enter(bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS) | 
|---|
| 5921 | { | 
|---|
| 5922 | SCAN_SCOPE_BEGIN; | 
|---|
| 5923 | if (conditional) | 
|---|
| 5924 | { | 
|---|
| 5925 | STATIC_CONTRACT_MODE_PREEMPTIVE; | 
|---|
| 5926 | } | 
|---|
| 5927 |  | 
|---|
| 5928 | this->EnterInternalPreemp(conditional GCHOLDER_CONTRACT_ARGS_NoDtor); | 
|---|
| 5929 | } | 
|---|
| 5930 |  | 
|---|
| 5931 | DEBUG_NOINLINE | 
|---|
| 5932 | void Enter(Thread * pThreadNullOk, bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS) | 
|---|
| 5933 | { | 
|---|
| 5934 | SCAN_SCOPE_BEGIN; | 
|---|
| 5935 | if (conditional) | 
|---|
| 5936 | { | 
|---|
| 5937 | STATIC_CONTRACT_MODE_PREEMPTIVE; | 
|---|
| 5938 | } | 
|---|
| 5939 |  | 
|---|
| 5940 | this->EnterInternalPreemp<FALSE>( // Thread may be NULL | 
|---|
| 5941 | pThreadNullOk, conditional GCHOLDER_CONTRACT_ARGS_NoDtor); | 
|---|
| 5942 | } | 
|---|
| 5943 |  | 
|---|
| 5944 | DEBUG_NOINLINE | 
|---|
| 5945 | void Leave() | 
|---|
| 5946 | { | 
|---|
| 5947 | SCAN_SCOPE_END; | 
|---|
| 5948 | this->PopInternal<FALSE>(); // Thread may be NULL | 
|---|
| 5949 | } | 
|---|
| 5950 | }; | 
|---|
| 5951 |  | 
|---|
| 5952 | class GCCoop : public GCHolderBase | 
|---|
| 5953 | { | 
|---|
| 5954 | public: | 
|---|
| 5955 | DEBUG_NOINLINE | 
|---|
| 5956 | GCCoop(GCHOLDER_DECLARE_CONTRACT_ARGS_BARE) | 
|---|
| 5957 | { | 
|---|
| 5958 | SCAN_SCOPE_BEGIN; | 
|---|
| 5959 | STATIC_CONTRACT_MODE_COOPERATIVE; | 
|---|
| 5960 |  | 
|---|
| 5961 | // The thread must be non-null to enter MODE_COOP | 
|---|
| 5962 | this->EnterInternalCoop(GetThread(), true GCHOLDER_CONTRACT_ARGS_HasDtor); | 
|---|
| 5963 | } | 
|---|
| 5964 |  | 
|---|
| 5965 | DEBUG_NOINLINE | 
|---|
| 5966 | GCCoop(bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS) | 
|---|
| 5967 | { | 
|---|
| 5968 | SCAN_SCOPE_BEGIN; | 
|---|
| 5969 | if (conditional) | 
|---|
| 5970 | { | 
|---|
| 5971 | STATIC_CONTRACT_MODE_COOPERATIVE; | 
|---|
| 5972 | } | 
|---|
| 5973 |  | 
|---|
| 5974 | // The thread must be non-null to enter MODE_COOP | 
|---|
| 5975 | this->EnterInternalCoop(GetThread(), conditional GCHOLDER_CONTRACT_ARGS_HasDtor); | 
|---|
| 5976 | } | 
|---|
| 5977 |  | 
|---|
| 5978 | DEBUG_NOINLINE | 
|---|
| 5979 | ~GCCoop() | 
|---|
| 5980 | { | 
|---|
| 5981 | SCAN_SCOPE_END; | 
|---|
| 5982 | this->PopInternal<TRUE>();  // Thread must be non-NULL | 
|---|
| 5983 | } | 
|---|
| 5984 | }; | 
|---|
| 5985 |  | 
|---|
| 5986 | // This is broken - there is a potential race with the GC thread.  It is currently | 
|---|
| 5987 | // used for a few cases where (a) we potentially haven't started up the EE yet, or | 
|---|
| 5988 | // (b) we are on a "special thread".  We need a real solution here though. | 
|---|
| 5989 | class GCCoopHackNoThread : public GCHolderBase | 
|---|
| 5990 | { | 
|---|
| 5991 | public: | 
|---|
| 5992 | DEBUG_NOINLINE | 
|---|
| 5993 | GCCoopHackNoThread(GCHOLDER_DECLARE_CONTRACT_ARGS_BARE) | 
|---|
| 5994 | { | 
|---|
| 5995 | SCAN_SCOPE_BEGIN; | 
|---|
| 5996 | STATIC_CONTRACT_MODE_COOPERATIVE; | 
|---|
| 5997 |  | 
|---|
| 5998 | this->EnterInternalCoop_HackNoThread(true GCHOLDER_CONTRACT_ARGS_HasDtor); | 
|---|
| 5999 | } | 
|---|
| 6000 |  | 
|---|
| 6001 | DEBUG_NOINLINE | 
|---|
| 6002 | GCCoopHackNoThread(bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS) | 
|---|
| 6003 | { | 
|---|
| 6004 | SCAN_SCOPE_BEGIN; | 
|---|
| 6005 | if (conditional) | 
|---|
| 6006 | { | 
|---|
| 6007 | STATIC_CONTRACT_MODE_COOPERATIVE; | 
|---|
| 6008 | } | 
|---|
| 6009 |  | 
|---|
| 6010 | this->EnterInternalCoop_HackNoThread(conditional GCHOLDER_CONTRACT_ARGS_HasDtor); | 
|---|
| 6011 | } | 
|---|
| 6012 |  | 
|---|
| 6013 | DEBUG_NOINLINE | 
|---|
| 6014 | ~GCCoopHackNoThread() | 
|---|
| 6015 | { | 
|---|
| 6016 | SCAN_SCOPE_END; | 
|---|
| 6017 | this->PopInternal<FALSE>();  // Thread might be NULL | 
|---|
| 6018 | } | 
|---|
| 6019 | }; | 
|---|
| 6020 |  | 
|---|
| 6021 | class GCCoopThreadExists : public GCHolderBase | 
|---|
| 6022 | { | 
|---|
| 6023 | public: | 
|---|
| 6024 | DEBUG_NOINLINE | 
|---|
| 6025 | GCCoopThreadExists(Thread * pThread GCHOLDER_DECLARE_CONTRACT_ARGS) | 
|---|
| 6026 | { | 
|---|
| 6027 | SCAN_SCOPE_BEGIN; | 
|---|
| 6028 | STATIC_CONTRACT_MODE_COOPERATIVE; | 
|---|
| 6029 |  | 
|---|
| 6030 | this->EnterInternalCoop(pThread, true GCHOLDER_CONTRACT_ARGS_HasDtor); | 
|---|
| 6031 | } | 
|---|
| 6032 |  | 
|---|
| 6033 | DEBUG_NOINLINE | 
|---|
| 6034 | GCCoopThreadExists(Thread * pThread, bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS) | 
|---|
| 6035 | { | 
|---|
| 6036 | SCAN_SCOPE_BEGIN; | 
|---|
| 6037 | if (conditional) | 
|---|
| 6038 | { | 
|---|
| 6039 | STATIC_CONTRACT_MODE_COOPERATIVE; | 
|---|
| 6040 | } | 
|---|
| 6041 |  | 
|---|
| 6042 | this->EnterInternalCoop(pThread, conditional GCHOLDER_CONTRACT_ARGS_HasDtor); | 
|---|
| 6043 | } | 
|---|
| 6044 |  | 
|---|
| 6045 | DEBUG_NOINLINE | 
|---|
| 6046 | ~GCCoopThreadExists() | 
|---|
| 6047 | { | 
|---|
| 6048 | SCAN_SCOPE_END; | 
|---|
| 6049 | this->PopInternal<TRUE>();  // Thread must be non-NULL | 
|---|
| 6050 | } | 
|---|
| 6051 | }; | 
|---|
| 6052 |  | 
|---|
| 6053 | class GCPreemp : public GCHolderBase | 
|---|
| 6054 | { | 
|---|
| 6055 | public: | 
|---|
| 6056 | DEBUG_NOINLINE | 
|---|
| 6057 | GCPreemp(GCHOLDER_DECLARE_CONTRACT_ARGS_BARE) | 
|---|
| 6058 | { | 
|---|
| 6059 | SCAN_SCOPE_BEGIN; | 
|---|
| 6060 | STATIC_CONTRACT_MODE_PREEMPTIVE; | 
|---|
| 6061 |  | 
|---|
| 6062 | this->EnterInternalPreemp(true GCHOLDER_CONTRACT_ARGS_HasDtor); | 
|---|
| 6063 | } | 
|---|
| 6064 |  | 
|---|
| 6065 | DEBUG_NOINLINE | 
|---|
| 6066 | GCPreemp(bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS) | 
|---|
| 6067 | { | 
|---|
| 6068 | SCAN_SCOPE_BEGIN; | 
|---|
| 6069 | if (conditional) | 
|---|
| 6070 | { | 
|---|
| 6071 | STATIC_CONTRACT_MODE_PREEMPTIVE; | 
|---|
| 6072 | } | 
|---|
| 6073 |  | 
|---|
| 6074 | this->EnterInternalPreemp(conditional GCHOLDER_CONTRACT_ARGS_HasDtor); | 
|---|
| 6075 | } | 
|---|
| 6076 |  | 
|---|
| 6077 | DEBUG_NOINLINE | 
|---|
| 6078 | ~GCPreemp() | 
|---|
| 6079 | { | 
|---|
| 6080 | SCAN_SCOPE_END; | 
|---|
| 6081 | this->PopInternal<FALSE>(); // Thread may be NULL | 
|---|
| 6082 | } | 
|---|
| 6083 | }; | 
|---|
| 6084 |  | 
|---|
| 6085 | class GCPreempThreadExists : public GCHolderBase | 
|---|
| 6086 | { | 
|---|
| 6087 | public: | 
|---|
| 6088 | DEBUG_NOINLINE | 
|---|
| 6089 | GCPreempThreadExists(Thread * pThread GCHOLDER_DECLARE_CONTRACT_ARGS) | 
|---|
| 6090 | { | 
|---|
| 6091 | SCAN_SCOPE_BEGIN; | 
|---|
| 6092 | STATIC_CONTRACT_MODE_PREEMPTIVE; | 
|---|
| 6093 |  | 
|---|
| 6094 | this->EnterInternalPreemp<TRUE>(    // Thread must be non-NULL | 
|---|
| 6095 | pThread, true GCHOLDER_CONTRACT_ARGS_HasDtor); | 
|---|
| 6096 | } | 
|---|
| 6097 |  | 
|---|
| 6098 | DEBUG_NOINLINE | 
|---|
| 6099 | GCPreempThreadExists(Thread * pThread, bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS) | 
|---|
| 6100 | { | 
|---|
| 6101 | SCAN_SCOPE_BEGIN; | 
|---|
| 6102 | if (conditional) | 
|---|
| 6103 | { | 
|---|
| 6104 | STATIC_CONTRACT_MODE_PREEMPTIVE; | 
|---|
| 6105 | } | 
|---|
| 6106 |  | 
|---|
| 6107 | this->EnterInternalPreemp<TRUE>(    // Thread must be non-NULL | 
|---|
| 6108 | pThread, conditional GCHOLDER_CONTRACT_ARGS_HasDtor); | 
|---|
| 6109 | } | 
|---|
| 6110 |  | 
|---|
| 6111 | DEBUG_NOINLINE | 
|---|
| 6112 | ~GCPreempThreadExists() | 
|---|
| 6113 | { | 
|---|
| 6114 | SCAN_SCOPE_END; | 
|---|
| 6115 | this->PopInternal<TRUE>();  // Thread must be non-NULL | 
|---|
| 6116 | } | 
|---|
| 6117 | }; | 
|---|
| 6118 | #endif // DACCESS_COMPILE | 
|---|
| 6119 |  | 
|---|
| 6120 |  | 
|---|
| 6121 | // -------------------------------------------------------------------------------- | 
|---|
| 6122 | // GCAssert is used to implement the assert GCX_ macros. Usage is similar to GCHolder. | 
|---|
| 6123 | // | 
|---|
| 6124 | // GCAsserting for preemptive mode automatically passes on unmanaged threads. | 
|---|
| 6125 | // | 
|---|
| 6126 | // Note that the assert is "2 sided"; it happens on entering and on leaving scope, to | 
|---|
| 6127 | // help ensure mode integrity. | 
|---|
| 6128 | // | 
|---|
| 6129 | // GCAssert is a noop in a free build | 
|---|
| 6130 | // -------------------------------------------------------------------------------- | 
|---|
| 6131 |  | 
|---|
| 6132 | template<BOOL COOPERATIVE> | 
|---|
| 6133 | class GCAssert | 
|---|
| 6134 | { | 
|---|
| 6135 | public: | 
|---|
| 6136 | DEBUG_NOINLINE void BeginGCAssert(); | 
|---|
| 6137 | DEBUG_NOINLINE void EndGCAssert() | 
|---|
| 6138 | { | 
|---|
| 6139 | SCAN_SCOPE_END; | 
|---|
| 6140 | } | 
|---|
| 6141 | }; | 
|---|
| 6142 |  | 
|---|
| 6143 | template<BOOL COOPERATIVE> | 
|---|
| 6144 | class AutoCleanupGCAssert | 
|---|
| 6145 | { | 
|---|
| 6146 | #ifdef _DEBUG_IMPL | 
|---|
| 6147 | public: | 
|---|
| 6148 | DEBUG_NOINLINE AutoCleanupGCAssert(); | 
|---|
| 6149 |  | 
|---|
| 6150 | DEBUG_NOINLINE ~AutoCleanupGCAssert() | 
|---|
| 6151 | { | 
|---|
| 6152 | SCAN_SCOPE_END; | 
|---|
| 6153 | WRAPPER_NO_CONTRACT; | 
|---|
| 6154 | // This is currently disabled; we currently have a lot of code which doesn't | 
|---|
| 6155 | // back out the GC mode properly (instead relying on the EX_TRY macros.) | 
|---|
| 6156 | // | 
|---|
| 6157 | // @todo enable this when we remove raw GC mode switching. | 
|---|
| 6158 | #if 0 | 
|---|
| 6159 | DoCheck(); | 
|---|
| 6160 | #endif | 
|---|
| 6161 | } | 
|---|
| 6162 |  | 
|---|
| 6163 | private: | 
|---|
| 6164 | FORCEINLINE void DoCheck() | 
|---|
| 6165 | { | 
|---|
| 6166 | WRAPPER_NO_CONTRACT; | 
|---|
| 6167 | Thread *pThread = GetThread(); | 
|---|
| 6168 | if (COOPERATIVE) | 
|---|
| 6169 | { | 
|---|
| 6170 | _ASSERTE(pThread != NULL); | 
|---|
| 6171 | _ASSERTE(pThread->PreemptiveGCDisabled()); | 
|---|
| 6172 | } | 
|---|
| 6173 | else | 
|---|
| 6174 | { | 
|---|
| 6175 | _ASSERTE(pThread == NULL || !(pThread->PreemptiveGCDisabled())); | 
|---|
| 6176 | } | 
|---|
| 6177 | } | 
|---|
| 6178 | #endif | 
|---|
| 6179 | }; | 
|---|
| 6180 |  | 
|---|
| 6181 |  | 
|---|
| 6182 | // -------------------------------------------------------------------------------- | 
|---|
| 6183 | // GCForbid is used to add ForbidGC semantics to the current GC mode.  Note that | 
|---|
| 6184 | // it requires the thread to be in cooperative mode already. | 
|---|
| 6185 | // | 
|---|
| 6186 | // GCForbid is a noop in a free build | 
|---|
| 6187 | // -------------------------------------------------------------------------------- | 
|---|
| 6188 | #ifndef DACCESS_COMPILE | 
|---|
| 6189 | class GCForbid : AutoCleanupGCAssert<TRUE> | 
|---|
| 6190 | { | 
|---|
| 6191 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 6192 | public: | 
|---|
| 6193 | DEBUG_NOINLINE GCForbid(BOOL fConditional, const char *szFunction, const char *szFile, int lineNum) | 
|---|
| 6194 | { | 
|---|
| 6195 | SCAN_SCOPE_BEGIN; | 
|---|
| 6196 | if (fConditional) | 
|---|
| 6197 | { | 
|---|
| 6198 | STATIC_CONTRACT_MODE_COOPERATIVE; | 
|---|
| 6199 | STATIC_CONTRACT_GC_NOTRIGGER; | 
|---|
| 6200 | } | 
|---|
| 6201 |  | 
|---|
| 6202 | m_fConditional = fConditional; | 
|---|
| 6203 | if (m_fConditional) | 
|---|
| 6204 | { | 
|---|
| 6205 | Thread *pThread = GetThread(); | 
|---|
| 6206 | m_pClrDebugState = pThread ? pThread->GetClrDebugState() : ::GetClrDebugState(); | 
|---|
| 6207 | m_oldClrDebugState = *m_pClrDebugState; | 
|---|
| 6208 |  | 
|---|
| 6209 | m_pClrDebugState->ViolationMaskReset( GCViolation ); | 
|---|
| 6210 |  | 
|---|
| 6211 | GetThread()->BeginForbidGC(szFile, lineNum); | 
|---|
| 6212 |  | 
|---|
| 6213 | m_ContractStackRecord.m_szFunction = szFunction; | 
|---|
| 6214 | m_ContractStackRecord.m_szFile     = (char*)szFile; | 
|---|
| 6215 | m_ContractStackRecord.m_lineNum    = lineNum; | 
|---|
| 6216 | m_ContractStackRecord.m_testmask   = (Contract::ALL_Disabled & ~((UINT)(Contract::GC_Mask))) | Contract::GC_NoTrigger; | 
|---|
| 6217 | m_ContractStackRecord.m_construct  = "GCX_FORBID"; | 
|---|
| 6218 | m_pClrDebugState->LinkContractStackTrace( &m_ContractStackRecord ); | 
|---|
| 6219 | } | 
|---|
| 6220 | } | 
|---|
| 6221 |  | 
|---|
| 6222 | DEBUG_NOINLINE GCForbid(const char *szFunction, const char *szFile, int lineNum) | 
|---|
| 6223 | { | 
|---|
| 6224 | SCAN_SCOPE_BEGIN; | 
|---|
| 6225 | STATIC_CONTRACT_MODE_COOPERATIVE; | 
|---|
| 6226 | STATIC_CONTRACT_GC_NOTRIGGER; | 
|---|
| 6227 |  | 
|---|
| 6228 | m_fConditional = TRUE; | 
|---|
| 6229 |  | 
|---|
| 6230 | Thread *pThread = GetThread(); | 
|---|
| 6231 | m_pClrDebugState = pThread ? pThread->GetClrDebugState() : ::GetClrDebugState(); | 
|---|
| 6232 | m_oldClrDebugState = *m_pClrDebugState; | 
|---|
| 6233 |  | 
|---|
| 6234 | m_pClrDebugState->ViolationMaskReset( GCViolation ); | 
|---|
| 6235 |  | 
|---|
| 6236 | GetThread()->BeginForbidGC(szFile, lineNum); | 
|---|
| 6237 |  | 
|---|
| 6238 | m_ContractStackRecord.m_szFunction = szFunction; | 
|---|
| 6239 | m_ContractStackRecord.m_szFile     = (char*)szFile; | 
|---|
| 6240 | m_ContractStackRecord.m_lineNum    = lineNum; | 
|---|
| 6241 | m_ContractStackRecord.m_testmask   = (Contract::ALL_Disabled & ~((UINT)(Contract::GC_Mask))) | Contract::GC_NoTrigger; | 
|---|
| 6242 | m_ContractStackRecord.m_construct  = "GCX_FORBID"; | 
|---|
| 6243 | m_pClrDebugState->LinkContractStackTrace( &m_ContractStackRecord ); | 
|---|
| 6244 | } | 
|---|
| 6245 |  | 
|---|
| 6246 | DEBUG_NOINLINE ~GCForbid() | 
|---|
| 6247 | { | 
|---|
| 6248 | SCAN_SCOPE_END; | 
|---|
| 6249 |  | 
|---|
| 6250 | if (m_fConditional) | 
|---|
| 6251 | { | 
|---|
| 6252 | GetThread()->EndForbidGC(); | 
|---|
| 6253 | *m_pClrDebugState = m_oldClrDebugState; | 
|---|
| 6254 | } | 
|---|
| 6255 | } | 
|---|
| 6256 |  | 
|---|
| 6257 | private: | 
|---|
| 6258 | BOOL                m_fConditional; | 
|---|
| 6259 | ClrDebugState      *m_pClrDebugState; | 
|---|
| 6260 | ClrDebugState       m_oldClrDebugState; | 
|---|
| 6261 | ContractStackRecord m_ContractStackRecord; | 
|---|
| 6262 | #endif  // _DEBUG_IMPL | 
|---|
| 6263 | }; | 
|---|
| 6264 | #endif // !DACCESS_COMPILE | 
|---|
| 6265 |  | 
|---|
| 6266 | // -------------------------------------------------------------------------------- | 
|---|
| 6267 | // GCNoTrigger is used to add NoTriggerGC semantics to the current GC mode.  Unlike | 
|---|
| 6268 | // GCForbid, it does not require a thread to be in cooperative mode. | 
|---|
| 6269 | // | 
|---|
| 6270 | // GCNoTrigger is a noop in a free build | 
|---|
| 6271 | // -------------------------------------------------------------------------------- | 
|---|
| 6272 | #ifndef DACCESS_COMPILE | 
|---|
| 6273 | class GCNoTrigger | 
|---|
| 6274 | { | 
|---|
| 6275 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 6276 | public: | 
|---|
| 6277 | DEBUG_NOINLINE GCNoTrigger(BOOL fConditional, const char *szFunction, const char *szFile, int lineNum) | 
|---|
| 6278 | { | 
|---|
| 6279 | SCAN_SCOPE_BEGIN; | 
|---|
| 6280 | if (fConditional) | 
|---|
| 6281 | { | 
|---|
| 6282 | STATIC_CONTRACT_GC_NOTRIGGER; | 
|---|
| 6283 | } | 
|---|
| 6284 |  | 
|---|
| 6285 | m_fConditional = fConditional; | 
|---|
| 6286 |  | 
|---|
| 6287 | if (m_fConditional) | 
|---|
| 6288 | { | 
|---|
| 6289 | Thread * pThread = GetThreadNULLOk(); | 
|---|
| 6290 | m_pClrDebugState = pThread ? pThread->GetClrDebugState() : ::GetClrDebugState(); | 
|---|
| 6291 | m_oldClrDebugState = *m_pClrDebugState; | 
|---|
| 6292 |  | 
|---|
| 6293 | m_pClrDebugState->ViolationMaskReset( GCViolation ); | 
|---|
| 6294 |  | 
|---|
| 6295 | if (pThread != NULL) | 
|---|
| 6296 | { | 
|---|
| 6297 | pThread->BeginNoTriggerGC(szFile, lineNum); | 
|---|
| 6298 | } | 
|---|
| 6299 |  | 
|---|
| 6300 | m_ContractStackRecord.m_szFunction = szFunction; | 
|---|
| 6301 | m_ContractStackRecord.m_szFile     = (char*)szFile; | 
|---|
| 6302 | m_ContractStackRecord.m_lineNum    = lineNum; | 
|---|
| 6303 | m_ContractStackRecord.m_testmask   = (Contract::ALL_Disabled & ~((UINT)(Contract::GC_Mask))) | Contract::GC_NoTrigger; | 
|---|
| 6304 | m_ContractStackRecord.m_construct  = "GCX_NOTRIGGER"; | 
|---|
| 6305 | m_pClrDebugState->LinkContractStackTrace( &m_ContractStackRecord ); | 
|---|
| 6306 | } | 
|---|
| 6307 | } | 
|---|
| 6308 |  | 
|---|
| 6309 | DEBUG_NOINLINE GCNoTrigger(const char *szFunction, const char *szFile, int lineNum) | 
|---|
| 6310 | { | 
|---|
| 6311 | SCAN_SCOPE_BEGIN; | 
|---|
| 6312 | STATIC_CONTRACT_GC_NOTRIGGER; | 
|---|
| 6313 |  | 
|---|
| 6314 | m_fConditional = TRUE; | 
|---|
| 6315 |  | 
|---|
| 6316 | Thread * pThread = GetThreadNULLOk(); | 
|---|
| 6317 | m_pClrDebugState = pThread ? pThread->GetClrDebugState() : ::GetClrDebugState(); | 
|---|
| 6318 | m_oldClrDebugState = *m_pClrDebugState; | 
|---|
| 6319 |  | 
|---|
| 6320 | m_pClrDebugState->ViolationMaskReset( GCViolation ); | 
|---|
| 6321 |  | 
|---|
| 6322 | if (pThread != NULL) | 
|---|
| 6323 | { | 
|---|
| 6324 | pThread->BeginNoTriggerGC(szFile, lineNum); | 
|---|
| 6325 | } | 
|---|
| 6326 |  | 
|---|
| 6327 | m_ContractStackRecord.m_szFunction = szFunction; | 
|---|
| 6328 | m_ContractStackRecord.m_szFile     = (char*)szFile; | 
|---|
| 6329 | m_ContractStackRecord.m_lineNum    = lineNum; | 
|---|
| 6330 | m_ContractStackRecord.m_testmask   = (Contract::ALL_Disabled & ~((UINT)(Contract::GC_Mask))) | Contract::GC_NoTrigger; | 
|---|
| 6331 | m_ContractStackRecord.m_construct  = "GCX_NOTRIGGER"; | 
|---|
| 6332 | m_pClrDebugState->LinkContractStackTrace( &m_ContractStackRecord ); | 
|---|
| 6333 | } | 
|---|
| 6334 |  | 
|---|
| 6335 | DEBUG_NOINLINE ~GCNoTrigger() | 
|---|
| 6336 | { | 
|---|
| 6337 | SCAN_SCOPE_END; | 
|---|
| 6338 |  | 
|---|
| 6339 | if (m_fConditional) | 
|---|
| 6340 | { | 
|---|
| 6341 | Thread * pThread = GetThreadNULLOk(); | 
|---|
| 6342 | if (pThread) | 
|---|
| 6343 | { | 
|---|
| 6344 | pThread->EndNoTriggerGC(); | 
|---|
| 6345 | } | 
|---|
| 6346 | *m_pClrDebugState = m_oldClrDebugState; | 
|---|
| 6347 | } | 
|---|
| 6348 | } | 
|---|
| 6349 |  | 
|---|
| 6350 | private: | 
|---|
| 6351 | BOOL m_fConditional; | 
|---|
| 6352 | ClrDebugState      *m_pClrDebugState; | 
|---|
| 6353 | ClrDebugState       m_oldClrDebugState; | 
|---|
| 6354 | ContractStackRecord m_ContractStackRecord; | 
|---|
| 6355 | #endif  // _DEBUG_IMPL | 
|---|
| 6356 | }; | 
|---|
| 6357 | #endif //!DACCESS_COMPILE | 
|---|
| 6358 |  | 
|---|
| 6359 | class CoopTransitionHolder | 
|---|
| 6360 | { | 
|---|
| 6361 | Frame * m_pFrame; | 
|---|
| 6362 |  | 
|---|
| 6363 | public: | 
|---|
| 6364 | CoopTransitionHolder(Thread * pThread) | 
|---|
| 6365 | : m_pFrame(pThread->m_pFrame) | 
|---|
| 6366 | { | 
|---|
| 6367 | LIMITED_METHOD_CONTRACT; | 
|---|
| 6368 | } | 
|---|
| 6369 |  | 
|---|
| 6370 | ~CoopTransitionHolder() | 
|---|
| 6371 | { | 
|---|
| 6372 | WRAPPER_NO_CONTRACT; | 
|---|
| 6373 | if (m_pFrame != NULL) | 
|---|
| 6374 | COMPlusCooperativeTransitionHandler(m_pFrame); | 
|---|
| 6375 | } | 
|---|
| 6376 |  | 
|---|
| 6377 | void SuppressRelease() | 
|---|
| 6378 | { | 
|---|
| 6379 | LIMITED_METHOD_CONTRACT; | 
|---|
| 6380 | // FRAME_TOP and NULL must be distinct values. | 
|---|
| 6381 | // static_assert_no_msg(FRAME_TOP_VALUE != NULL); | 
|---|
| 6382 | m_pFrame = NULL; | 
|---|
| 6383 | } | 
|---|
| 6384 | }; | 
|---|
| 6385 |  | 
|---|
| 6386 | // -------------------------------------------------------------------------------- | 
|---|
| 6387 | // GCX macros - see util.hpp | 
|---|
| 6388 | // -------------------------------------------------------------------------------- | 
|---|
| 6389 |  | 
|---|
| 6390 | #ifdef _DEBUG_IMPL | 
|---|
| 6391 |  | 
|---|
| 6392 | // Normally, any thread we operate on has a Thread block in its TLS.  But there are | 
|---|
| 6393 | // a few special threads we don't normally execute managed code on. | 
|---|
| 6394 | BOOL dbgOnly_IsSpecialEEThread(); | 
|---|
| 6395 | void dbgOnly_IdentifySpecialEEThread(); | 
|---|
| 6396 |  | 
|---|
| 6397 | #ifdef USE_CHECKED_OBJECTREFS | 
|---|
| 6398 | #define ASSERT_PROTECTED(objRef)        Thread::ObjectRefProtected(objRef) | 
|---|
| 6399 | #else | 
|---|
| 6400 | #define ASSERT_PROTECTED(objRef) | 
|---|
| 6401 | #endif | 
|---|
| 6402 |  | 
|---|
| 6403 | #else | 
|---|
| 6404 |  | 
|---|
| 6405 | #define ASSERT_PROTECTED(objRef) | 
|---|
| 6406 |  | 
|---|
| 6407 | #endif | 
|---|
| 6408 |  | 
|---|
| 6409 |  | 
|---|
| 6410 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 6411 |  | 
|---|
| 6412 | #define BEGINFORBIDGC() {if (GetThreadNULLOk() != NULL) GetThreadNULLOk()->BeginForbidGC(__FILE__, __LINE__);} | 
|---|
| 6413 | #define ENDFORBIDGC()   {if (GetThreadNULLOk() != NULL) GetThreadNULLOk()->EndForbidGC();} | 
|---|
| 6414 |  | 
|---|
| 6415 | class FCallGCCanTrigger | 
|---|
| 6416 | { | 
|---|
| 6417 | public: | 
|---|
| 6418 | static DEBUG_NOINLINE void Enter() | 
|---|
| 6419 | { | 
|---|
| 6420 | SCAN_SCOPE_BEGIN; | 
|---|
| 6421 | STATIC_CONTRACT_GC_TRIGGERS; | 
|---|
| 6422 | Thread * pThread = GetThreadNULLOk(); | 
|---|
| 6423 | if (pThread != NULL) | 
|---|
| 6424 | { | 
|---|
| 6425 | Enter(pThread); | 
|---|
| 6426 | } | 
|---|
| 6427 | } | 
|---|
| 6428 |  | 
|---|
| 6429 | static DEBUG_NOINLINE void Enter(Thread* pThread) | 
|---|
| 6430 | { | 
|---|
| 6431 | SCAN_SCOPE_BEGIN; | 
|---|
| 6432 | STATIC_CONTRACT_GC_TRIGGERS; | 
|---|
| 6433 | pThread->EndForbidGC(); | 
|---|
| 6434 | } | 
|---|
| 6435 |  | 
|---|
| 6436 | static DEBUG_NOINLINE void Leave(const char *szFunction, const char *szFile, int lineNum) | 
|---|
| 6437 | { | 
|---|
| 6438 | SCAN_SCOPE_END; | 
|---|
| 6439 | Thread * pThread = GetThreadNULLOk(); | 
|---|
| 6440 | if (pThread != NULL) | 
|---|
| 6441 | { | 
|---|
| 6442 | Leave(pThread, szFunction, szFile, lineNum); | 
|---|
| 6443 | } | 
|---|
| 6444 | } | 
|---|
| 6445 |  | 
|---|
| 6446 | static DEBUG_NOINLINE void Leave(Thread* pThread, const char *szFunction, const char *szFile, int lineNum) | 
|---|
| 6447 | { | 
|---|
| 6448 | SCAN_SCOPE_END; | 
|---|
| 6449 | pThread->BeginForbidGC(szFile, lineNum); | 
|---|
| 6450 | } | 
|---|
| 6451 | }; | 
|---|
| 6452 |  | 
|---|
| 6453 | #define TRIGGERSGC_NOSTOMP()  do {                                           \ | 
|---|
| 6454 | ANNOTATION_GC_TRIGGERS;                         \ | 
|---|
| 6455 | Thread* curThread = GetThread();                \ | 
|---|
| 6456 | if(curThread->GCNoTrigger())                    \ | 
|---|
| 6457 | {                                               \ | 
|---|
| 6458 | CONTRACT_ASSERT("TRIGGERSGC found in a GC_NOTRIGGER region.", Contract::GC_NoTrigger, Contract::GC_Mask, __FUNCTION__, __FILE__, __LINE__); \ | 
|---|
| 6459 | }                                               \ | 
|---|
| 6460 | } while(0) | 
|---|
| 6461 |  | 
|---|
| 6462 |  | 
|---|
| 6463 | #define TRIGGERSGC()    do {                                                \ | 
|---|
| 6464 | TRIGGERSGC_NOSTOMP();                           \ | 
|---|
| 6465 | Thread::TriggersGC(GetThread());                \ | 
|---|
| 6466 | } while(0) | 
|---|
| 6467 |  | 
|---|
| 6468 | #else // ENABLE_CONTRACTS_IMPL | 
|---|
| 6469 |  | 
|---|
| 6470 | #define BEGINFORBIDGC() | 
|---|
| 6471 | #define ENDFORBIDGC() | 
|---|
| 6472 | #define TRIGGERSGC_NOSTOMP() ANNOTATION_GC_TRIGGERS | 
|---|
| 6473 | #define TRIGGERSGC() ANNOTATION_GC_TRIGGERS | 
|---|
| 6474 |  | 
|---|
| 6475 | #endif // ENABLE_CONTRACTS_IMPL | 
|---|
| 6476 |  | 
|---|
| 6477 | inline BOOL GC_ON_TRANSITIONS(BOOL val) { | 
|---|
| 6478 | WRAPPER_NO_CONTRACT; | 
|---|
| 6479 | #ifdef _DEBUG | 
|---|
| 6480 | Thread* thread = GetThread(); | 
|---|
| 6481 | if (thread == 0) | 
|---|
| 6482 | return(FALSE); | 
|---|
| 6483 | BOOL ret = thread->m_GCOnTransitionsOK; | 
|---|
| 6484 | thread->m_GCOnTransitionsOK = val; | 
|---|
| 6485 | return(ret); | 
|---|
| 6486 | #else // _DEBUG | 
|---|
| 6487 | return FALSE; | 
|---|
| 6488 | #endif // !_DEBUG | 
|---|
| 6489 | } | 
|---|
| 6490 |  | 
|---|
| 6491 | #ifdef _DEBUG | 
|---|
| 6492 | inline void ENABLESTRESSHEAP() { | 
|---|
| 6493 | WRAPPER_NO_CONTRACT; | 
|---|
| 6494 | Thread * thread = GetThreadNULLOk(); | 
|---|
| 6495 | if (thread) { | 
|---|
| 6496 | thread->EnableStressHeap(); | 
|---|
| 6497 | } | 
|---|
| 6498 | } | 
|---|
| 6499 |  | 
|---|
| 6500 | void CleanStackForFastGCStress (); | 
|---|
| 6501 | #define CLEANSTACKFORFASTGCSTRESS()                                         \ | 
|---|
| 6502 | if (g_pConfig->GetGCStressLevel() && g_pConfig->FastGCStressLevel() > 1) {   \ | 
|---|
| 6503 | CleanStackForFastGCStress ();                                            \ | 
|---|
| 6504 | } | 
|---|
| 6505 |  | 
|---|
| 6506 | #else   // _DEBUG | 
|---|
| 6507 | #define CLEANSTACKFORFASTGCSTRESS() | 
|---|
| 6508 |  | 
|---|
| 6509 | #endif  // _DEBUG | 
|---|
| 6510 |  | 
|---|
| 6511 |  | 
|---|
| 6512 |  | 
|---|
| 6513 |  | 
|---|
| 6514 | inline void DoReleaseCheckpoint(void *checkPointMarker) | 
|---|
| 6515 | { | 
|---|
| 6516 | WRAPPER_NO_CONTRACT; | 
|---|
| 6517 | GetThread()->m_MarshalAlloc.Collapse(checkPointMarker); | 
|---|
| 6518 | } | 
|---|
| 6519 |  | 
|---|
| 6520 |  | 
|---|
| 6521 | // CheckPointHolder : Back out to a checkpoint on the thread allocator. | 
|---|
| 6522 | typedef Holder<void*, DoNothing, DoReleaseCheckpoint> CheckPointHolder; | 
|---|
| 6523 |  | 
|---|
| 6524 |  | 
|---|
| 6525 | #ifdef _DEBUG_IMPL | 
|---|
| 6526 | // Holder for incrementing the ForbidGCLoaderUse counter. | 
|---|
| 6527 | class GCForbidLoaderUseHolder | 
|---|
| 6528 | { | 
|---|
| 6529 | public: | 
|---|
| 6530 | GCForbidLoaderUseHolder() | 
|---|
| 6531 | { | 
|---|
| 6532 | WRAPPER_NO_CONTRACT; | 
|---|
| 6533 | ClrFlsIncrementValue(TlsIdx_ForbidGCLoaderUseCount, 1); | 
|---|
| 6534 | } | 
|---|
| 6535 |  | 
|---|
| 6536 | ~GCForbidLoaderUseHolder() | 
|---|
| 6537 | { | 
|---|
| 6538 | WRAPPER_NO_CONTRACT; | 
|---|
| 6539 | ClrFlsIncrementValue(TlsIdx_ForbidGCLoaderUseCount, -1); | 
|---|
| 6540 | } | 
|---|
| 6541 | }; | 
|---|
| 6542 |  | 
|---|
| 6543 | #endif | 
|---|
| 6544 |  | 
|---|
| 6545 | // Declaring this macro turns off the GC_TRIGGERS/THROWS/INJECT_FAULT contract in LoadTypeHandle. | 
|---|
| 6546 | // If you do this, you must restrict your use of the loader only to retrieve TypeHandles | 
|---|
| 6547 | // for types that have already been loaded and resolved. If you fail to observe this restriction, you will | 
|---|
| 6548 | // reach a GC_TRIGGERS point somewhere in the loader and assert. If you're lucky, that is. | 
|---|
| 6549 | // (If you're not lucky, you will introduce a GC hole.) | 
|---|
| 6550 | // | 
|---|
| 6551 | // The main user of this workaround is the GC stack crawl. It must parse signatures and retrieve | 
|---|
| 6552 | // type handles for valuetypes in method parameters. Some other uses have creeped into the codebase - | 
|---|
| 6553 | // some justified, others not. | 
|---|
| 6554 | // | 
|---|
| 6555 | // ENABLE_FORBID_GC_LOADER is *not* the same as using tokenNotToLoad to suppress loading. | 
|---|
| 6556 | // You should use tokenNotToLoad in preference to ENABLE_FORBID. ENABLE_FORBID is a fragile | 
|---|
| 6557 | // workaround and places enormous responsibilities on the caller. The only reason it exists at all | 
|---|
| 6558 | // is that the GC stack crawl simply cannot tolerate exceptions or new GC's - that's an immovable | 
|---|
| 6559 | // rock we're faced with. | 
|---|
| 6560 | // | 
|---|
| 6561 | // The key differences are: | 
|---|
| 6562 | // | 
|---|
| 6563 | //      ENABLE_FORBID                                   tokenNotToLoad | 
|---|
| 6564 | //      --------------------------------------------    ------------------------------------------------------ | 
|---|
| 6565 | //      caller must guarantee the type is already       caller does not have to guarantee the type | 
|---|
| 6566 | //        loaded - otherwise, we will crash badly.        is already loaded. | 
|---|
| 6567 | // | 
|---|
| 6568 | //      loader will not throw, trigger gc or OOM        loader may throw, trigger GC or OOM. | 
|---|
| 6569 | // | 
|---|
| 6570 | // | 
|---|
| 6571 | // | 
|---|
| 6572 | #ifdef ENABLE_CONTRACTS_IMPL | 
|---|
| 6573 | #define ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE()    GCForbidLoaderUseHolder __gcfluh; \ | 
|---|
| 6574 | CANNOTTHROWCOMPLUSEXCEPTION();  \ | 
|---|
| 6575 | GCX_NOTRIGGER(); \ | 
|---|
| 6576 | FAULT_FORBID(); | 
|---|
| 6577 | #else   // _DEBUG_IMPL | 
|---|
| 6578 | #define ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE()    ; | 
|---|
| 6579 | #endif  // _DEBUG_IMPL | 
|---|
| 6580 | // This macro lets us define a conditional CONTRACT for the GC_TRIGGERS behavior. | 
|---|
| 6581 | // This is for the benefit of a select group of callers that use the loader | 
|---|
| 6582 | // in ForbidGC mode strictly to retrieve existing TypeHandles. The reason | 
|---|
| 6583 | // we use a threadstate rather than an extra parameter is that these annoying | 
|---|
| 6584 | // callers call the loader through intermediaries (MetaSig) and it proved to be too | 
|---|
| 6585 | // cumbersome to pass this state down through all those callers. | 
|---|
| 6586 | // | 
|---|
| 6587 | // Don't make GC_TRIGGERS conditional just because your function ends up calling | 
|---|
| 6588 | // LoadTypeHandle indirectly. We don't want to proliferate conditonal contracts more | 
|---|
| 6589 | // than necessary so declare such functions as GC_TRIGGERS until the need | 
|---|
| 6590 | // for the conditional contract is actually proven through code inspection or | 
|---|
| 6591 | // coverage. | 
|---|
| 6592 | #if defined(DACCESS_COMPILE) | 
|---|
| 6593 |  | 
|---|
| 6594 | // Disable (<non-zero constant> || <expression>) is always a non-zero constant. | 
|---|
| 6595 | // <expression> is never evaluated and might have side effects, because | 
|---|
| 6596 | // FORBIDGC_LOADER_USE_ENABLED is used in that pattern and additionally the rule | 
|---|
| 6597 | // has little value. | 
|---|
| 6598 | #ifdef _PREFAST_ | 
|---|
| 6599 | #pragma warning(disable:6286) | 
|---|
| 6600 | #endif | 
|---|
| 6601 | #define FORBIDGC_LOADER_USE_ENABLED() true | 
|---|
| 6602 |  | 
|---|
| 6603 | #else // DACCESS_COMPILE | 
|---|
| 6604 | #if defined (_DEBUG_IMPL) || defined(_PREFAST_) | 
|---|
| 6605 | #ifndef DACCESS_COMPILE | 
|---|
| 6606 | #define FORBIDGC_LOADER_USE_ENABLED() (ClrFlsGetValue(TlsIdx_ForbidGCLoaderUseCount)) | 
|---|
| 6607 | #else | 
|---|
| 6608 | #define FORBIDGC_LOADER_USE_ENABLED() TRUE | 
|---|
| 6609 | #endif | 
|---|
| 6610 | #else   // _DEBUG_IMPL | 
|---|
| 6611 |  | 
|---|
| 6612 | // If you got an error about FORBIDGC_LOADER_USE_ENABLED being undefined, it's because you tried | 
|---|
| 6613 | // to use this predicate in a free build outside of a CONTRACT or ASSERT. | 
|---|
| 6614 | // | 
|---|
| 6615 | #define FORBIDGC_LOADER_USE_ENABLED() (sizeof(YouCannotUseThisHere) != 0) | 
|---|
| 6616 | #endif  // _DEBUG_IMPL | 
|---|
| 6617 | #endif // DACCESS_COMPILE | 
|---|
| 6618 |  | 
|---|
| 6619 | #ifdef FEATURE_STACK_PROBE | 
|---|
| 6620 | #ifdef _DEBUG_IMPL | 
|---|
| 6621 | inline void NO_FORBIDGC_LOADER_USE_ThrowSO() | 
|---|
| 6622 | { | 
|---|
| 6623 | WRAPPER_NO_CONTRACT; | 
|---|
| 6624 | if (FORBIDGC_LOADER_USE_ENABLED()) | 
|---|
| 6625 | { | 
|---|
| 6626 | //if you hitting this assert maybe a failure was injected at the place | 
|---|
| 6627 | // it won't occur in a real-world scenario, see VSW 397871 | 
|---|
| 6628 | // then again maybe it 's a bug at the place FORBIDGC_LOADER_USE_ENABLED was set | 
|---|
| 6629 | _ASSERTE(! "Unexpected SO, please read the comment"); | 
|---|
| 6630 | } | 
|---|
| 6631 | else | 
|---|
| 6632 | COMPlusThrowSO(); | 
|---|
| 6633 | } | 
|---|
| 6634 | #else | 
|---|
| 6635 | inline void NO_FORBIDGC_LOADER_USE_ThrowSO() | 
|---|
| 6636 | { | 
|---|
| 6637 | COMPlusThrowSO(); | 
|---|
| 6638 | } | 
|---|
| 6639 | #endif | 
|---|
| 6640 | #endif | 
|---|
| 6641 |  | 
|---|
| 6642 | // There is an MDA which can detect illegal reentrancy into the CLR.  For instance, if you call managed | 
|---|
| 6643 | // code from a native vectored exception handler, this might cause a reverse PInvoke to occur.  But if the | 
|---|
| 6644 | // exception was triggered from code that was executing in cooperative GC mode, we now have GC holes and | 
|---|
| 6645 | // general corruption. | 
|---|
| 6646 | BOOL HasIllegalReentrancy(); | 
|---|
| 6647 |  | 
|---|
| 6648 | // | 
|---|
| 6649 | // _pThread:        (Thread*)       current Thread | 
|---|
| 6650 | // _pCurrDomain:    (AppDomain*)    current AppDomain | 
|---|
| 6651 | // _pDestDomain:    (AppDomain*)    AppDomain to transition to | 
|---|
| 6652 | // _predicate_expr: (bool)          Expression to predicate the transition.  If this is true, we transition, | 
|---|
| 6653 | //                                  otherwise we don't.  WARNING : if you change this macro, be sure you | 
|---|
| 6654 | //                                  guarantee that this macro argument is only evaluated once. | 
|---|
| 6655 | // | 
|---|
| 6656 |  | 
|---|
| 6657 | // | 
|---|
| 6658 | // @TODO: can't we take the transition with a holder? | 
|---|
| 6659 | // | 
|---|
| 6660 | #define ENTER_DOMAIN_SETUPVARS(_pThread, _predicate_expr)                                       \ | 
|---|
| 6661 | {                                                                                               \ | 
|---|
| 6662 | DEBUG_ASSURE_NO_RETURN_BEGIN(DOMAIN)                                                        \ | 
|---|
| 6663 | \ | 
|---|
| 6664 | Thread*     _ctx_trans_pThread          = (_pThread);                                       \ | 
|---|
| 6665 | bool        _ctx_trans_fTransitioned    = false;                                            \ | 
|---|
| 6666 | bool        _ctx_trans_fPredicate       = (_predicate_expr);                                \ | 
|---|
| 6667 | bool        _ctx_trans_fRaiseNeeded     = false;                                            \ | 
|---|
| 6668 | Exception* _ctx_trans_pTargetDomainException=NULL;                   \ | 
|---|
| 6669 | ADID _ctx_trans_pDestDomainId=ADID(0);                                               \ | 
|---|
| 6670 | FrameWithCookie<ContextTransitionFrame> _ctx_trans_Frame;                                                   \ | 
|---|
| 6671 | ContextTransitionFrame* _ctx_trans_pFrame = &_ctx_trans_Frame;                              \ | 
|---|
| 6672 |  | 
|---|
| 6673 | #define ENTER_DOMAIN_SWITCH_CTX_BY_ADID(_pCurrDomainPtr,_pDestDomainId,_bUnsafePoint)           \ | 
|---|
| 6674 | AppDomain* _ctx_trans_pCurrDomain=_pCurrDomainPtr;                                          \ | 
|---|
| 6675 | _ctx_trans_pDestDomainId=(ADID)_pDestDomainId;                                               \ | 
|---|
| 6676 | if (_ctx_trans_fPredicate &&                                                                \ | 
|---|
| 6677 | (_ctx_trans_pCurrDomain==NULL ||                                                        \ | 
|---|
| 6678 | (_ctx_trans_pCurrDomain->GetId() != _ctx_trans_pDestDomainId)))                     \ | 
|---|
| 6679 | {                                                                                           \ | 
|---|
| 6680 | _ctx_trans_fTransitioned = true;                                                        \ | 
|---|
| 6681 | } | 
|---|
| 6682 |  | 
|---|
| 6683 | #define ENTER_DOMAIN_SWITCH_CTX_BY_ADPTR(_pCurrDomain,_pDestDomain)                             \ | 
|---|
| 6684 | AppDomain* _ctx_trans_pCurrDomain=_pCurrDomain;                                             \ | 
|---|
| 6685 | AppDomain* _ctx_trans_pDestDomain=_pDestDomain;                                             \ | 
|---|
| 6686 | _ctx_trans_pDestDomainId=_ctx_trans_pDestDomain->GetId();                  \ | 
|---|
| 6687 | \ | 
|---|
| 6688 | if (_ctx_trans_fPredicate && (_ctx_trans_pCurrDomain != _ctx_trans_pDestDomain))            \ | 
|---|
| 6689 | {                                                                                           \ | 
|---|
| 6690 | TESTHOOKCALL(AppDomainCanBeUnloaded(_ctx_trans_pDestDomain->GetId().m_dwId,FALSE));        \ | 
|---|
| 6691 | GCX_FORBID();                                                                           \ | 
|---|
| 6692 | \ | 
|---|
| 6693 | _ctx_trans_fTransitioned = true;                                                        \ | 
|---|
| 6694 | } | 
|---|
| 6695 |  | 
|---|
| 6696 |  | 
|---|
| 6697 |  | 
|---|
| 6698 | #define ENTER_DOMAIN_SETUP_EH                                                                   \ | 
|---|
| 6699 | /* work around unreachable code warning */                                                  \ | 
|---|
| 6700 | SCAN_BLOCKMARKER_N(DOMAIN);                                                                 \ | 
|---|
| 6701 | if (true) EX_TRY                                                                            \ | 
|---|
| 6702 | {                                                                                           \ | 
|---|
| 6703 | SCAN_BLOCKMARKER_MARK_N(DOMAIN);                                                        \ | 
|---|
| 6704 | LOG((LF_APPDOMAIN, LL_INFO1000, "ENTER_DOMAIN(%s, %s, %d): %s\n",                              \ | 
|---|
| 6705 | __FUNCTION__, __FILE__, __LINE__,                                                   \ | 
|---|
| 6706 | _ctx_trans_fTransitioned ? "ENTERED" : "NOP")); | 
|---|
| 6707 |  | 
|---|
| 6708 | // Note: we go to preemptive mode before the EX_RETHROW Going preemp here is safe, since there are many other paths in | 
|---|
| 6709 | // this macro that toggle the GC mode, too. | 
|---|
| 6710 | #define END_DOMAIN_TRANSITION                                                                   \ | 
|---|
| 6711 | TESTHOOKCALL(LeavingAppDomain(::GetAppDomain()->GetId().m_dwId)); \ | 
|---|
| 6712 | }                                                                                           \ | 
|---|
| 6713 | EX_CATCH                                                                                    \ | 
|---|
| 6714 | {                                                                                           \ | 
|---|
| 6715 | SCAN_BLOCKMARKER_USE_N(DOMAIN);                                                         \ | 
|---|
| 6716 | LOG((LF_EH|LF_APPDOMAIN, LL_INFO1000, "ENTER_DOMAIN(%s, %s, %d): exception in flight\n",             \ | 
|---|
| 6717 | __FUNCTION__, __FILE__, __LINE__));                                                 \ | 
|---|
| 6718 | \ | 
|---|
| 6719 | if (!_ctx_trans_fTransitioned)                                                          \ | 
|---|
| 6720 | {                                                                                       \ | 
|---|
| 6721 | if (_ctx_trans_pThread->PreemptiveGCDisabled())                                     \ | 
|---|
| 6722 | {                                                                                   \ | 
|---|
| 6723 | _ctx_trans_pThread->EnablePreemptiveGC();                                       \ | 
|---|
| 6724 | }                                                                                   \ | 
|---|
| 6725 | \ | 
|---|
| 6726 | EX_RETHROW;                                                                         \ | 
|---|
| 6727 | }                                                                                       \ | 
|---|
| 6728 | \ | 
|---|
| 6729 | \ | 
|---|
| 6730 | _ctx_trans_pTargetDomainException=EXTRACT_EXCEPTION();                                  \ | 
|---|
| 6731 | \ | 
|---|
| 6732 | /* Save Watson buckets before the exception object is changed */                        \ | 
|---|
| 6733 | CAPTURE_BUCKETS_AT_TRANSITION(_ctx_trans_pThread, GET_THROWABLE());                     \ | 
|---|
| 6734 | \ | 
|---|
| 6735 | _ctx_trans_fRaiseNeeded = true;                                                         \ | 
|---|
| 6736 | SCAN_BLOCKMARKER_END_USE_N(DOMAIN);                                                     \ | 
|---|
| 6737 | }                                                                                           \ | 
|---|
| 6738 | /* SwallowAllExceptions is fine because we don't get to this point */                       \ | 
|---|
| 6739 | /* unless fRaiseNeeded = true or no exception was thrown */                                 \ | 
|---|
| 6740 | EX_END_CATCH(SwallowAllExceptions);                                                         \ | 
|---|
| 6741 | \ | 
|---|
| 6742 | if (_ctx_trans_fRaiseNeeded)                                                                \ | 
|---|
| 6743 | {                                                                                           \ | 
|---|
| 6744 | SCAN_BLOCKMARKER_USE_N(DOMAIN);                                                        \ | 
|---|
| 6745 | LOG((LF_EH, LL_INFO1000, "RaiseCrossContextException(%s, %s, %d)\n",                    \ | 
|---|
| 6746 | __FUNCTION__, __FILE__, __LINE__));                                                 \ | 
|---|
| 6747 | _ctx_trans_pThread->RaiseCrossContextException(_ctx_trans_pTargetDomainException, _ctx_trans_pFrame);                       \ | 
|---|
| 6748 | }                                                                                           \ | 
|---|
| 6749 | \ | 
|---|
| 6750 | LOG((LF_APPDOMAIN, LL_INFO1000, "LEAVE_DOMAIN(%s, %s, %d)\n",                                      \ | 
|---|
| 6751 | __FUNCTION__, __FILE__, __LINE__));                                                 \ | 
|---|
| 6752 | \ | 
|---|
| 6753 | TESTHOOKCALL(LeftAppDomain(_ctx_trans_pDestDomainId.m_dwId));                                           \ | 
|---|
| 6754 | DEBUG_ASSURE_NO_RETURN_END(DOMAIN)                                                          \ | 
|---|
| 6755 | } | 
|---|
| 6756 |  | 
|---|
| 6757 | //current ad, always safe | 
|---|
| 6758 | #define ADV_CURRENTAD   0 | 
|---|
| 6759 | //default ad, never unloaded | 
|---|
| 6760 | #define ADV_DEFAULTAD   1 | 
|---|
| 6761 | // held by iterator, iterator holds a ref | 
|---|
| 6762 | #define ADV_ITERATOR    2 | 
|---|
| 6763 | // the appdomain is on the stack | 
|---|
| 6764 | #define ADV_RUNNINGIN   4 | 
|---|
| 6765 | // we're in process of creating the appdomain, refcount guaranteed to be >0 | 
|---|
| 6766 | #define ADV_CREATING    8 | 
|---|
| 6767 | // compilation domain - ngen guarantees it won't be unloaded until everyone left | 
|---|
| 6768 | #define ADV_COMPILATION  0x10 | 
|---|
| 6769 | // finalizer thread - synchronized with ADU | 
|---|
| 6770 | #define ADV_FINALIZER     0x40 | 
|---|
| 6771 | // held by AppDomainRefTaker | 
|---|
| 6772 | #define ADV_REFTAKER    0x100 | 
|---|
| 6773 |  | 
|---|
| 6774 | #ifdef _DEBUG | 
|---|
| 6775 | void CheckADValidity(AppDomain* pDomain, DWORD ADValidityKind); | 
|---|
| 6776 | #else | 
|---|
| 6777 | #define CheckADValidity(pDomain,ADValidityKind) | 
|---|
| 6778 | #endif | 
|---|
| 6779 |  | 
|---|
| 6780 | // Please keep these macros in sync with the NO_EH_AT_TRANSITION macros below. | 
|---|
| 6781 | #define ENTER_DOMAIN_ID_PREDICATED(_pDestDomain,_predicate_expr) \ | 
|---|
| 6782 | TESTHOOKCALL(EnteringAppDomain(_pDestDomain.m_dwId))    ;    \ | 
|---|
| 6783 | ENTER_DOMAIN_SETUPVARS(GetThread(), _predicate_expr) \ | 
|---|
| 6784 | ENTER_DOMAIN_SWITCH_CTX_BY_ADID(_ctx_trans_pThread->GetDomain(), _pDestDomain, FALSE) \ | 
|---|
| 6785 | ENTER_DOMAIN_SETUP_EH    \ | 
|---|
| 6786 | TESTHOOKCALL(EnteredAppDomain(_pDestDomain.m_dwId)); | 
|---|
| 6787 |  | 
|---|
| 6788 | #define ENTER_DOMAIN_PTR_PREDICATED(_pDestDomain,ADValidityKind,_predicate_expr) \ | 
|---|
| 6789 | TESTHOOKCALL(EnteringAppDomain((_pDestDomain)->GetId().m_dwId)); \ | 
|---|
| 6790 | ENTER_DOMAIN_SETUPVARS(GetThread(), _predicate_expr) \ | 
|---|
| 6791 | CheckADValidity(_ctx_trans_fPredicate?(_pDestDomain):GetAppDomain(),ADValidityKind);      \ | 
|---|
| 6792 | ENTER_DOMAIN_SWITCH_CTX_BY_ADPTR(_ctx_trans_pThread->GetDomain(), _pDestDomain) \ | 
|---|
| 6793 | ENTER_DOMAIN_SETUP_EH    \ | 
|---|
| 6794 | TESTHOOKCALL(EnteredAppDomain((_pDestDomain)->GetId().m_dwId)); | 
|---|
| 6795 |  | 
|---|
| 6796 |  | 
|---|
| 6797 | #define ENTER_DOMAIN_PTR(_pDestDomain,ADValidityKind) \ | 
|---|
| 6798 | TESTHOOKCALL(EnteringAppDomain((_pDestDomain)->GetId().m_dwId)); \ | 
|---|
| 6799 | CheckADValidity(_pDestDomain,ADValidityKind);      \ | 
|---|
| 6800 | ENTER_DOMAIN_SETUPVARS(GetThread(), true) \ | 
|---|
| 6801 | ENTER_DOMAIN_SWITCH_CTX_BY_ADPTR(_ctx_trans_pThread->GetDomain(), _pDestDomain) \ | 
|---|
| 6802 | ENTER_DOMAIN_SETUP_EH   \ | 
|---|
| 6803 | TESTHOOKCALL(EnteredAppDomain((_pDestDomain)->GetId().m_dwId)); | 
|---|
| 6804 |  | 
|---|
| 6805 | #define ENTER_DOMAIN_ID(_pDestDomain) \ | 
|---|
| 6806 | ENTER_DOMAIN_ID_PREDICATED(_pDestDomain,true) | 
|---|
| 6807 |  | 
|---|
| 6808 | // <EnableADTransitionWithoutEH> | 
|---|
| 6809 | // The following macros support the AD transition *without* using EH at transition boundary. | 
|---|
| 6810 | // Please keep them in sync with the macros above. | 
|---|
| 6811 | #define ENTER_DOMAIN_PTR_NO_EH_AT_TRANSITION(_pDestDomain,ADValidityKind) \ | 
|---|
| 6812 | TESTHOOKCALL(EnteringAppDomain((_pDestDomain)->GetId().m_dwId)); \ | 
|---|
| 6813 | CheckADValidity(_pDestDomain,ADValidityKind);      \ | 
|---|
| 6814 | ENTER_DOMAIN_SETUPVARS(GetThread(), true) \ | 
|---|
| 6815 | ENTER_DOMAIN_SWITCH_CTX_BY_ADPTR(_ctx_trans_pThread->GetDomain(), _pDestDomain) \ | 
|---|
| 6816 | TESTHOOKCALL(EnteredAppDomain((_pDestDomain)->GetId().m_dwId)); | 
|---|
| 6817 |  | 
|---|
| 6818 | #define ENTER_DOMAIN_ID_NO_EH_AT_TRANSITION_PREDICATED(_pDestDomain,_predicate_expr) \ | 
|---|
| 6819 | TESTHOOKCALL(EnteringAppDomain(_pDestDomain.m_dwId))    ;    \ | 
|---|
| 6820 | ENTER_DOMAIN_SETUPVARS(GetThread(), _predicate_expr) \ | 
|---|
| 6821 | ENTER_DOMAIN_SWITCH_CTX_BY_ADID(_ctx_trans_pThread->GetDomain(), _pDestDomain, FALSE) \ | 
|---|
| 6822 | TESTHOOKCALL(EnteredAppDomain(_pDestDomain.m_dwId)); | 
|---|
| 6823 |  | 
|---|
| 6824 | #define ENTER_DOMAIN_ID_NO_EH_AT_TRANSITION(_pDestDomain) \ | 
|---|
| 6825 | ENTER_DOMAIN_ID_NO_EH_AT_TRANSITION_PREDICATED(_pDestDomain,true) | 
|---|
| 6826 |  | 
|---|
| 6827 | #define END_DOMAIN_TRANSITION_NO_EH_AT_TRANSITION                                   \ | 
|---|
| 6828 | TESTHOOKCALL(LeavingAppDomain(::GetAppDomain()->GetId().m_dwId));           \ | 
|---|
| 6829 | LOG((LF_APPDOMAIN, LL_INFO1000, "LEAVE_DOMAIN(%s, %s, %d)\n",               \ | 
|---|
| 6830 | __FUNCTION__, __FILE__, __LINE__));                                 \ | 
|---|
| 6831 | \ | 
|---|
| 6832 | __returnToPreviousAppDomainHolder.SuppressRelease();                        \ | 
|---|
| 6833 | TESTHOOKCALL(LeftAppDomain(_ctx_trans_pDestDomainId.m_dwId));               \ | 
|---|
| 6834 | DEBUG_ASSURE_NO_RETURN_END(DOMAIN)                                          \ | 
|---|
| 6835 | } // Close scope setup by ENTER_DOMAIN_SETUPVARS | 
|---|
| 6836 |  | 
|---|
| 6837 | // </EnableADTransitionWithoutEH> | 
|---|
| 6838 |  | 
|---|
| 6839 | #define GET_CTX_TRANSITION_FRAME() \ | 
|---|
| 6840 | (_ctx_trans_pFrame) | 
|---|
| 6841 |  | 
|---|
| 6842 | //----------------------------------------------------------------------------- | 
|---|
| 6843 | // System to make Cross-Appdomain calls. | 
|---|
| 6844 | // | 
|---|
| 6845 | // Cross-AppDomain calls are made via a callback + args. This gives us the flexibility | 
|---|
| 6846 | // to check if a transition is needed, and take fast vs. slow paths for the debugger. | 
|---|
| 6847 | // | 
|---|
| 6848 | // Example usage: | 
|---|
| 6849 | //   struct FooArgs : public CtxTransitionBaseArgs { ... } args (...); // load up args | 
|---|
| 6850 | //   MakeCallWithPossibleAppDomainTransition(pNewDomain, MyFooFunc, &args); | 
|---|
| 6851 | // | 
|---|
| 6852 | // MyFooFunc is always executed in pNewDomain. | 
|---|
| 6853 | // If we're already in pNewDomain, then that just becomes MyFooFunc(&args); | 
|---|
| 6854 | // else we'll switch ADs, and do the proper Try/Catch/Rethrow. | 
|---|
| 6855 | //----------------------------------------------------------------------------- | 
|---|
| 6856 |  | 
|---|
| 6857 | // All Arg structs should derive from this. This makes certain standard args | 
|---|
| 6858 | // are available (such as the context-transition frame). | 
|---|
| 6859 | // The ADCallback helpers will fill in these base args. | 
|---|
| 6860 | struct CtxTransitionBaseArgs; | 
|---|
| 6861 |  | 
|---|
| 6862 | // Pointer type for the AppDomain callback function. | 
|---|
| 6863 | typedef void (*FPAPPDOMAINCALLBACK)( | 
|---|
| 6864 | CtxTransitionBaseArgs*             pData     // Caller's private data | 
|---|
| 6865 | ); | 
|---|
| 6866 |  | 
|---|
| 6867 |  | 
|---|
| 6868 | //----------------------------------------------------------------------------- | 
|---|
| 6869 | // Call w/a  wrapper. | 
|---|
| 6870 | // We've already transitioned AppDomains here. This just places a 1st-pass filter to sniff | 
|---|
| 6871 | // for catch-handler found callbacks for the debugger. | 
|---|
| 6872 | //----------------------------------------------------------------------------- | 
|---|
| 6873 | void MakeADCallDebuggerWrapper( | 
|---|
| 6874 | FPAPPDOMAINCALLBACK fpCallback, | 
|---|
| 6875 | CtxTransitionBaseArgs * args, | 
|---|
| 6876 | ContextTransitionFrame* pFrame); | 
|---|
| 6877 |  | 
|---|
| 6878 | // Invoke a callback in another appdomain. | 
|---|
| 6879 | // Caller should have checked that we're actually transitioning domains here. | 
|---|
| 6880 | void MakeCallWithAppDomainTransition( | 
|---|
| 6881 | ADID pTargetDomain, | 
|---|
| 6882 | FPAPPDOMAINCALLBACK fpCallback, | 
|---|
| 6883 | CtxTransitionBaseArgs * args); | 
|---|
| 6884 |  | 
|---|
| 6885 | // Invoke the callback in the AppDomain. | 
|---|
| 6886 | // Ensure that predicate only gets evaluted once!! | 
|---|
| 6887 | #define MakePredicatedCallWithPossibleAppDomainTransition(pTargetDomain, fPredicate, fpCallback, args) \ | 
|---|
| 6888 | { \ | 
|---|
| 6889 | Thread*     _ctx_trans_pThread          = GetThread(); \ | 
|---|
| 6890 | _ASSERTE(_ctx_trans_pThread != NULL); \ | 
|---|
| 6891 | ADID  _ctx_trans_pCurrDomain      = _ctx_trans_pThread->GetDomain()->GetId(); \ | 
|---|
| 6892 | ADID  _ctx_trans_pDestDomain      = (pTargetDomain);                                   \ | 
|---|
| 6893 | \ | 
|---|
| 6894 | if (fPredicate && (_ctx_trans_pCurrDomain != _ctx_trans_pDestDomain)) \ | 
|---|
| 6895 | { \ | 
|---|
| 6896 | /* Transition domains and make the call */ \ | 
|---|
| 6897 | MakeCallWithAppDomainTransition(pTargetDomain, (FPAPPDOMAINCALLBACK) fpCallback, args); \ | 
|---|
| 6898 | } \ | 
|---|
| 6899 | else      \ | 
|---|
| 6900 | { \ | 
|---|
| 6901 | /* No transition needed. Just call directly.  */ \ | 
|---|
| 6902 | (fpCallback)(args); \ | 
|---|
| 6903 | }\ | 
|---|
| 6904 | } | 
|---|
| 6905 |  | 
|---|
| 6906 | // Invoke the callback in the AppDomain. | 
|---|
| 6907 | #define MakeCallWithPossibleAppDomainTransition(pTargetDomain, fpCallback, args) \ | 
|---|
| 6908 | MakePredicatedCallWithPossibleAppDomainTransition(pTargetDomain, true, fpCallback, args) | 
|---|
| 6909 |  | 
|---|
| 6910 |  | 
|---|
| 6911 | struct CtxTransitionBaseArgs | 
|---|
| 6912 | { | 
|---|
| 6913 | // This function fills out the private base args. | 
|---|
| 6914 | friend void MakeCallWithAppDomainTransition( | 
|---|
| 6915 | ADID pTargetDomain, | 
|---|
| 6916 | FPAPPDOMAINCALLBACK fpCallback, | 
|---|
| 6917 | CtxTransitionBaseArgs * args); | 
|---|
| 6918 |  | 
|---|
| 6919 | public: | 
|---|
| 6920 | CtxTransitionBaseArgs() { pCtxFrame = NULL; } | 
|---|
| 6921 | // This will be NULL if we didn't actually transition. | 
|---|
| 6922 | ContextTransitionFrame* GetCtxTransitionFrame() { return pCtxFrame; } | 
|---|
| 6923 | private: | 
|---|
| 6924 | ContextTransitionFrame* pCtxFrame; | 
|---|
| 6925 | }; | 
|---|
| 6926 |  | 
|---|
| 6927 |  | 
|---|
| 6928 | // We have numerous places where we start up a managed thread.  This includes several places in the | 
|---|
| 6929 | // ThreadPool, the 'new Thread(...).Start()' case, and the Finalizer.  Try to factor the code so our | 
|---|
| 6930 | // base exception handling behavior is consistent across those places.  The resulting code is convoluted, | 
|---|
| 6931 | // but it's better than the prior situation of each thread being on a different plan. | 
|---|
| 6932 |  | 
|---|
| 6933 | // If you add a new kind of managed thread (i.e. thread proc) to the system, you must: | 
|---|
| 6934 | // | 
|---|
| 6935 | // 1) Call HasStarted() before calling any ManagedThreadBase_* routine. | 
|---|
| 6936 | // 2) Define a ManagedThreadBase_* routine for your scenario and declare it below. | 
|---|
| 6937 | // 3) Always perform any AD transitions through the ManagedThreadBase_* mechanism. | 
|---|
| 6938 | // 4) Allow the ManagedThreadBase_* mechanism to perform all your exception handling, including | 
|---|
| 6939 | //    dispatching of unhandled exception events, deciding what to swallow, etc. | 
|---|
| 6940 | // 5) If you must separate your base thread proc behavior from your AD transitioning behavior, | 
|---|
| 6941 | //    define a second ManagedThreadADCall_* helper and declare it below. | 
|---|
| 6942 | // 6) Never decide this is too much work and that you will roll your own thread proc code. | 
|---|
| 6943 |  | 
|---|
| 6944 | // intentionally opaque. | 
|---|
| 6945 | struct ManagedThreadCallState; | 
|---|
| 6946 |  | 
|---|
| 6947 | struct ManagedThreadBase | 
|---|
| 6948 | { | 
|---|
| 6949 | // The 'new Thread(...).Start()' case from COMSynchronizable kickoff thread worker | 
|---|
| 6950 | static void KickOff(ADID pAppDomain, | 
|---|
| 6951 | ADCallBackFcnType pTarget, | 
|---|
| 6952 | LPVOID args); | 
|---|
| 6953 |  | 
|---|
| 6954 | // The IOCompletion, QueueUserWorkItem, AddTimer, RegisterWaitForSingleObject cases in | 
|---|
| 6955 | // the ThreadPool | 
|---|
| 6956 | static void ThreadPool(ADID pAppDomain, ADCallBackFcnType pTarget, LPVOID args); | 
|---|
| 6957 |  | 
|---|
| 6958 | // The Finalizer thread separates the tasks of establishing exception handling at its | 
|---|
| 6959 | // base and transitioning into AppDomains.  The turnaround structure that ties the 2 calls together | 
|---|
| 6960 | // is the ManagedThreadCallState. | 
|---|
| 6961 |  | 
|---|
| 6962 |  | 
|---|
| 6963 | // For the case (like Finalization) where the base transition and the AppDomain transition are | 
|---|
| 6964 | // separated, an opaque structure is used to tie together the two calls. | 
|---|
| 6965 |  | 
|---|
| 6966 | static void FinalizerBase(ADCallBackFcnType pTarget); | 
|---|
| 6967 | static void FinalizerAppDomain(AppDomain* pAppDomain, | 
|---|
| 6968 | ADCallBackFcnType pTarget, | 
|---|
| 6969 | LPVOID args, | 
|---|
| 6970 | ManagedThreadCallState *pTurnAround); | 
|---|
| 6971 | }; | 
|---|
| 6972 |  | 
|---|
| 6973 |  | 
|---|
| 6974 | // DeadlockAwareLock is a base for building deadlock-aware locks. | 
|---|
| 6975 | // Note that DeadlockAwareLock only works if ALL locks involved in the deadlock are deadlock aware. | 
|---|
| 6976 |  | 
|---|
| 6977 | class DeadlockAwareLock | 
|---|
| 6978 | { | 
|---|
| 6979 | private: | 
|---|
| 6980 | VolatilePtr<Thread> m_pHoldingThread; | 
|---|
| 6981 | #ifdef _DEBUG | 
|---|
| 6982 | const char  *m_description; | 
|---|
| 6983 | #endif | 
|---|
| 6984 |  | 
|---|
| 6985 | public: | 
|---|
| 6986 | DeadlockAwareLock(const char *description = NULL); | 
|---|
| 6987 | ~DeadlockAwareLock(); | 
|---|
| 6988 |  | 
|---|
| 6989 | // Test for deadlock | 
|---|
| 6990 | BOOL CanEnterLock(); | 
|---|
| 6991 |  | 
|---|
| 6992 | // Call BeginEnterLock before attempting to acquire the lock | 
|---|
| 6993 | BOOL TryBeginEnterLock(); // returns FALSE if deadlock | 
|---|
| 6994 | void BeginEnterLock(); // Asserts if deadlock | 
|---|
| 6995 |  | 
|---|
| 6996 | // Call EndEnterLock after acquiring the lock | 
|---|
| 6997 | void EndEnterLock(); | 
|---|
| 6998 |  | 
|---|
| 6999 | // Call LeaveLock after releasing the lock | 
|---|
| 7000 | void LeaveLock(); | 
|---|
| 7001 |  | 
|---|
| 7002 | const char *GetDescription(); | 
|---|
| 7003 |  | 
|---|
| 7004 | private: | 
|---|
| 7005 | CHECK CheckDeadlock(Thread *pThread); | 
|---|
| 7006 |  | 
|---|
| 7007 | static void ReleaseBlockingLock() | 
|---|
| 7008 | { | 
|---|
| 7009 | Thread *pThread = GetThread(); | 
|---|
| 7010 | _ASSERTE (pThread); | 
|---|
| 7011 | pThread->m_pBlockingLock = NULL; | 
|---|
| 7012 | } | 
|---|
| 7013 | public: | 
|---|
| 7014 | typedef StateHolder<DoNothing,DeadlockAwareLock::ReleaseBlockingLock> BlockingLockHolder; | 
|---|
| 7015 | }; | 
|---|
| 7016 |  | 
|---|
| 7017 | inline void SetTypeHandleOnThreadForAlloc(TypeHandle th) | 
|---|
| 7018 | { | 
|---|
| 7019 | // We are doing this unconditionally even though th is only used by ETW events in GC. When the ETW | 
|---|
| 7020 | // event is not enabled we still need to set it because it may not be enabled here but by the | 
|---|
| 7021 | // time we are checking in GC, the event is enabled - we don't want GC to read a random value | 
|---|
| 7022 | // from before in this case. | 
|---|
| 7023 | GetThread()->SetTHAllocContextObj(th); | 
|---|
| 7024 | } | 
|---|
| 7025 |  | 
|---|
| 7026 | #endif // CROSSGEN_COMPILE | 
|---|
| 7027 |  | 
|---|
| 7028 | class Compiler; | 
|---|
| 7029 | // users of OFFSETOF__TLS__tls_CurrentThread macro expect the offset of these variables wrt to _tls_start to be stable. | 
|---|
| 7030 | // Defining each of the following thread local variable separately without the struct causes the offsets to change in | 
|---|
| 7031 | // different flavors of build. Eg. in chk build the offset of m_pThread is 0x4 while in ret build it becomes 0x8 as 0x4 is | 
|---|
| 7032 | // occupied by m_pAddDomain. Packing all thread local variables in a struct and making struct instance to be thread local | 
|---|
| 7033 | // ensures that the offsets of the variables are stable in all build flavors. | 
|---|
| 7034 | struct ThreadLocalInfo | 
|---|
| 7035 | { | 
|---|
| 7036 | Thread* m_pThread; | 
|---|
| 7037 | AppDomain* m_pAppDomain; // This field is read only by the SOS plugin to get the AppDomain | 
|---|
| 7038 | void** m_EETlsData; // ClrTlsInfo::data | 
|---|
| 7039 | }; | 
|---|
| 7040 |  | 
|---|
| 7041 | class ThreadStateHolder | 
|---|
| 7042 | { | 
|---|
| 7043 | public: | 
|---|
| 7044 | ThreadStateHolder (BOOL fNeed, DWORD state) | 
|---|
| 7045 | { | 
|---|
| 7046 | LIMITED_METHOD_CONTRACT; | 
|---|
| 7047 | _ASSERTE (GetThread()); | 
|---|
| 7048 | m_fNeed = fNeed; | 
|---|
| 7049 | m_state = state; | 
|---|
| 7050 | } | 
|---|
| 7051 | ~ThreadStateHolder () | 
|---|
| 7052 | { | 
|---|
| 7053 | LIMITED_METHOD_CONTRACT; | 
|---|
| 7054 |  | 
|---|
| 7055 | if (m_fNeed) | 
|---|
| 7056 | { | 
|---|
| 7057 | Thread *pThread = GetThread(); | 
|---|
| 7058 | _ASSERTE (pThread); | 
|---|
| 7059 | FastInterlockAnd((ULONG *) &pThread->m_State, ~m_state); | 
|---|
| 7060 | } | 
|---|
| 7061 | } | 
|---|
| 7062 | private: | 
|---|
| 7063 | BOOL m_fNeed; | 
|---|
| 7064 | DWORD m_state; | 
|---|
| 7065 | }; | 
|---|
| 7066 |  | 
|---|
| 7067 | // Sets an NC threadstate if not already set, and restores the old state | 
|---|
| 7068 | // of that bit upon destruction | 
|---|
| 7069 |  | 
|---|
| 7070 | // fNeed > 0,   make sure state is set, restored in destructor | 
|---|
| 7071 | // fNeed = 0,   no change | 
|---|
| 7072 | // fNeed < 0,   make sure state is reset, restored in destructor | 
|---|
| 7073 |  | 
|---|
| 7074 | class ThreadStateNCStackHolder | 
|---|
| 7075 | { | 
|---|
| 7076 | public: | 
|---|
| 7077 | ThreadStateNCStackHolder (BOOL fNeed, Thread::ThreadStateNoConcurrency state) | 
|---|
| 7078 | { | 
|---|
| 7079 | LIMITED_METHOD_CONTRACT; | 
|---|
| 7080 |  | 
|---|
| 7081 | _ASSERTE (GetThread()); | 
|---|
| 7082 | m_fNeed = fNeed; | 
|---|
| 7083 | m_state = state; | 
|---|
| 7084 |  | 
|---|
| 7085 | if (fNeed) | 
|---|
| 7086 | { | 
|---|
| 7087 | Thread *pThread = GetThread(); | 
|---|
| 7088 | _ASSERTE (pThread); | 
|---|
| 7089 |  | 
|---|
| 7090 | if (fNeed < 0) | 
|---|
| 7091 | { | 
|---|
| 7092 | // if the state is set, reset it | 
|---|
| 7093 | if (pThread->HasThreadStateNC(state)) | 
|---|
| 7094 | { | 
|---|
| 7095 | pThread->ResetThreadStateNC(m_state); | 
|---|
| 7096 | } | 
|---|
| 7097 | else | 
|---|
| 7098 | { | 
|---|
| 7099 | m_fNeed = FALSE; | 
|---|
| 7100 | } | 
|---|
| 7101 | } | 
|---|
| 7102 | else | 
|---|
| 7103 | { | 
|---|
| 7104 | // if the state is already set then no change is | 
|---|
| 7105 | // necessary during the back out | 
|---|
| 7106 | if(pThread->HasThreadStateNC(state)) | 
|---|
| 7107 | { | 
|---|
| 7108 | m_fNeed = FALSE; | 
|---|
| 7109 | } | 
|---|
| 7110 | else | 
|---|
| 7111 | { | 
|---|
| 7112 | pThread->SetThreadStateNC(state); | 
|---|
| 7113 | } | 
|---|
| 7114 | } | 
|---|
| 7115 | } | 
|---|
| 7116 | } | 
|---|
| 7117 |  | 
|---|
| 7118 | ~ThreadStateNCStackHolder() | 
|---|
| 7119 | { | 
|---|
| 7120 | LIMITED_METHOD_CONTRACT; | 
|---|
| 7121 |  | 
|---|
| 7122 | if (m_fNeed) | 
|---|
| 7123 | { | 
|---|
| 7124 | Thread *pThread = GetThread(); | 
|---|
| 7125 | _ASSERTE (pThread); | 
|---|
| 7126 |  | 
|---|
| 7127 | if (m_fNeed < 0) | 
|---|
| 7128 | { | 
|---|
| 7129 | pThread->SetThreadStateNC(m_state); // set it | 
|---|
| 7130 | } | 
|---|
| 7131 | else | 
|---|
| 7132 | { | 
|---|
| 7133 | pThread->ResetThreadStateNC(m_state); | 
|---|
| 7134 | } | 
|---|
| 7135 | } | 
|---|
| 7136 | } | 
|---|
| 7137 |  | 
|---|
| 7138 | private: | 
|---|
| 7139 | BOOL m_fNeed; | 
|---|
| 7140 | Thread::ThreadStateNoConcurrency m_state; | 
|---|
| 7141 | }; | 
|---|
| 7142 |  | 
|---|
| 7143 | BOOL Debug_IsLockedViaThreadSuspension(); | 
|---|
| 7144 |  | 
|---|
| 7145 | #endif //__threads_h__ | 
|---|
| 7146 |  | 
|---|