1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4//
5// THREADS.CPP
6//
7
8//
9//
10
11
12#include "common.h"
13
14#include "frames.h"
15#include "threads.h"
16#include "stackwalk.h"
17#include "excep.h"
18#include "comsynchronizable.h"
19#include "log.h"
20#include "gcheaputilities.h"
21#include "mscoree.h"
22#include "dbginterface.h"
23#include "corprof.h" // profiling
24#include "eeprofinterfaces.h"
25#include "eeconfig.h"
26#include "perfcounters.h"
27#include "corhost.h"
28#include "win32threadpool.h"
29#include "jitinterface.h"
30#include "eventtrace.h"
31#include "comutilnative.h"
32#include "finalizerthread.h"
33#include "threadsuspend.h"
34
35#include "wrappers.h"
36
37#include "nativeoverlapped.h"
38
39#include "mdaassistants.h"
40#include "appdomain.inl"
41#include "vmholder.h"
42#include "exceptmacros.h"
43#include "win32threadpool.h"
44
45#ifdef FEATURE_COMINTEROP
46#include "runtimecallablewrapper.h"
47#include "interoputil.h"
48#include "interoputil.inl"
49#endif // FEATURE_COMINTEROP
50
51#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
52#include "olecontexthelpers.h"
53#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
54
55#ifdef FEATURE_PERFTRACING
56#include "eventpipebuffermanager.h"
57#endif // FEATURE_PERFTRACING
58
59
60
61SPTR_IMPL(ThreadStore, ThreadStore, s_pThreadStore);
62CONTEXT *ThreadStore::s_pOSContext = NULL;
63CLREvent *ThreadStore::s_pWaitForStackCrawlEvent;
64
65PTR_ThreadLocalModule ThreadLocalBlock::GetTLMIfExists(ModuleIndex index)
66{
67 WRAPPER_NO_CONTRACT;
68 SUPPORTS_DAC;
69
70 if (index.m_dwIndex >= m_TLMTableSize)
71 return NULL;
72
73 return m_pTLMTable[index.m_dwIndex].pTLM;
74}
75
76PTR_ThreadLocalModule ThreadLocalBlock::GetTLMIfExists(MethodTable* pMT)
77{
78 WRAPPER_NO_CONTRACT;
79 ModuleIndex index = pMT->GetModuleForStatics()->GetModuleIndex();
80 return GetTLMIfExists(index);
81}
82
83#ifndef DACCESS_COMPILE
84
85BOOL Thread::s_fCleanFinalizedThread = FALSE;
86
87Volatile<LONG> Thread::s_threadPoolCompletionCountOverflow = 0;
88
89CrstStatic g_DeadlockAwareCrst;
90
91
92#if defined(_DEBUG)
93BOOL MatchThreadHandleToOsId ( HANDLE h, DWORD osId )
94{
95#ifndef FEATURE_PAL
96 LIMITED_METHOD_CONTRACT;
97
98 DWORD id = GetThreadId(h);
99
100 // OS call GetThreadId may fail, and return 0. In this case we can not
101 // make a decision if the two match or not. Instead, we ignore this check.
102 return id == 0 || id == osId;
103#else // !FEATURE_PAL
104 return TRUE;
105#endif // !FEATURE_PAL
106}
107#endif // _DEBUG
108
109
110#ifdef _DEBUG_IMPL
111template<> AutoCleanupGCAssert<TRUE>::AutoCleanupGCAssert()
112{
113 SCAN_SCOPE_BEGIN;
114 STATIC_CONTRACT_MODE_COOPERATIVE;
115}
116
117template<> AutoCleanupGCAssert<FALSE>::AutoCleanupGCAssert()
118{
119 SCAN_SCOPE_BEGIN;
120 STATIC_CONTRACT_MODE_PREEMPTIVE;
121}
122
123template<> void GCAssert<TRUE>::BeginGCAssert()
124{
125 SCAN_SCOPE_BEGIN;
126 STATIC_CONTRACT_MODE_COOPERATIVE;
127}
128
129template<> void GCAssert<FALSE>::BeginGCAssert()
130{
131 SCAN_SCOPE_BEGIN;
132 STATIC_CONTRACT_MODE_PREEMPTIVE;
133}
134#endif
135
136// #define NEW_TLS 1
137
138#ifdef _DEBUG
139void Thread::SetFrame(Frame *pFrame)
140{
141 CONTRACTL {
142 NOTHROW;
143 GC_NOTRIGGER;
144 DEBUG_ONLY;
145 MODE_COOPERATIVE;
146 // It only makes sense for a Thread to call SetFrame on itself.
147 PRECONDITION(this == GetThread());
148 PRECONDITION(CheckPointer(pFrame));
149 }
150 CONTRACTL_END;
151
152 if (g_pConfig->fAssertOnFailFast())
153 {
154 Frame *pWalk = m_pFrame;
155 BOOL fExist = FALSE;
156 while (pWalk != (Frame*) -1)
157 {
158 if (pWalk == pFrame)
159 {
160 fExist = TRUE;
161 break;
162 }
163 pWalk = pWalk->m_Next;
164 }
165 pWalk = m_pFrame;
166 while (fExist && pWalk != pFrame && pWalk != (Frame*)-1)
167 {
168 if (pWalk->GetVTablePtr() == ContextTransitionFrame::GetMethodFrameVPtr())
169 {
170 _ASSERTE (((ContextTransitionFrame *)pWalk)->GetReturnDomain() == m_pDomain);
171 }
172 pWalk = pWalk->m_Next;
173 }
174 }
175
176 m_pFrame = pFrame;
177
178 // If stack overrun corruptions are expected, then skip this check
179 // as the Frame chain may have been corrupted.
180 if (g_pConfig->fAssertOnFailFast() == false)
181 return;
182
183 Frame* espVal = (Frame*)GetCurrentSP();
184
185 while (pFrame != (Frame*) -1)
186 {
187 static Frame* stopFrame = 0;
188 if (pFrame == stopFrame)
189 _ASSERTE(!"SetFrame frame == stopFrame");
190
191 _ASSERTE(espVal < pFrame);
192 _ASSERTE(pFrame < m_CacheStackBase);
193 _ASSERTE(pFrame->GetFrameType() < Frame::TYPE_COUNT);
194
195 pFrame = pFrame->m_Next;
196 }
197}
198
199#endif // _DEBUG
200
201//************************************************************************
202// PRIVATE GLOBALS
203//************************************************************************
204
205extern unsigned __int64 getTimeStamp();
206
207extern unsigned __int64 getTickFrequency();
208
209unsigned __int64 tgetFrequency() {
210 static unsigned __int64 cachedFreq = (unsigned __int64) -1;
211
212 if (cachedFreq != (unsigned __int64) -1)
213 return cachedFreq;
214 else {
215 cachedFreq = getTickFrequency();
216 return cachedFreq;
217 }
218}
219
220#endif // #ifndef DACCESS_COMPILE
221
222static StackWalkAction DetectHandleILStubsForDebugger_StackWalkCallback(CrawlFrame *pCF, VOID *pData)
223{
224 WRAPPER_NO_CONTRACT;
225 // It suffices to wait for the first CrawlFrame with non-NULL function
226 MethodDesc *pMD = pCF->GetFunction();
227 if (pMD != NULL)
228 {
229 *(bool *)pData = pMD->IsILStub();
230 return SWA_ABORT;
231 }
232
233 return SWA_CONTINUE;
234}
235
236// This is really just a heuristic to detect if we are executing in an M2U IL stub or
237// one of the marshaling methods it calls. It doesn't deal with U2M IL stubs.
238// We loop through the frame chain looking for an uninitialized TransitionFrame.
239// If there is one, then we are executing in an M2U IL stub or one of the methods it calls.
240// On the other hand, if there is an initialized TransitionFrame, then we are not.
241// Also, if there is an HMF on the stack, then we stop. This could be the case where
242// an IL stub calls an FCALL which ends up in a managed method, and the debugger wants to
243// stop in those cases. Some examples are COMException..ctor and custom marshalers.
244//
245// X86 IL stubs use InlinedCallFrame and are indistinguishable from ordinary methods with
246// inlined P/Invoke when judging just from the frame chain. We use stack walk to decide
247// this case.
248bool Thread::DetectHandleILStubsForDebugger()
249{
250 CONTRACTL {
251 NOTHROW;
252 GC_NOTRIGGER;
253 }
254 CONTRACTL_END;
255
256 Frame* pFrame = GetFrame();
257
258 if (pFrame != NULL)
259 {
260 while (pFrame != FRAME_TOP)
261 {
262 // Check for HMF's. See the comment at the beginning of this function.
263 if (pFrame->GetVTablePtr() == HelperMethodFrame::GetMethodFrameVPtr())
264 {
265 break;
266 }
267 // If there is an entry frame (i.e. U2M managed), we should break.
268 else if (pFrame->GetFrameType() == Frame::TYPE_ENTRY)
269 {
270 break;
271 }
272 // Check for M2U transition frames. See the comment at the beginning of this function.
273 else if (pFrame->GetFrameType() == Frame::TYPE_EXIT)
274 {
275 if (pFrame->GetReturnAddress() == NULL)
276 {
277 // If the return address is NULL, then the frame has not been initialized yet.
278 // We may see InlinedCallFrame in ordinary methods as well. Have to do
279 // stack walk to find out if this is really an IL stub.
280 bool fInILStub = false;
281
282 StackWalkFrames(&DetectHandleILStubsForDebugger_StackWalkCallback,
283 &fInILStub,
284 QUICKUNWIND,
285 dac_cast<PTR_Frame>(pFrame));
286
287 if (fInILStub) return true;
288 }
289 else
290 {
291 // The frame is fully initialized.
292 return false;
293 }
294 }
295 pFrame = pFrame->Next();
296 }
297 }
298 return false;
299}
300
301extern "C" {
302#ifndef __llvm__
303__declspec(thread)
304#else // !__llvm__
305__thread
306#endif // !__llvm__
307ThreadLocalInfo gCurrentThreadInfo =
308 {
309 NULL, // m_pThread
310 NULL, // m_pAppDomain
311 NULL, // m_EETlsData
312 };
313} // extern "C"
314
315// index into TLS Array. Definition added by compiler
316EXTERN_C UINT32 _tls_index;
317
318#ifndef DACCESS_COMPILE
319
320BOOL SetThread(Thread* t)
321{
322 LIMITED_METHOD_CONTRACT
323
324 gCurrentThreadInfo.m_pThread = t;
325 return TRUE;
326}
327
328BOOL SetAppDomain(AppDomain* ad)
329{
330 LIMITED_METHOD_CONTRACT
331
332 gCurrentThreadInfo.m_pAppDomain = ad;
333 return TRUE;
334}
335
336BOOL Thread::Alert ()
337{
338 CONTRACTL {
339 NOTHROW;
340 GC_NOTRIGGER;
341 }
342 CONTRACTL_END;
343
344 BOOL fRetVal = FALSE;
345 {
346 HANDLE handle = GetThreadHandle();
347 if (handle != INVALID_HANDLE_VALUE && handle != SWITCHOUT_HANDLE_VALUE)
348 {
349 fRetVal = ::QueueUserAPC(UserInterruptAPC, handle, APC_Code);
350 }
351 }
352
353 return fRetVal;
354}
355
356
357DWORD Thread::Join(DWORD timeout, BOOL alertable)
358{
359 WRAPPER_NO_CONTRACT;
360 return JoinEx(timeout,alertable?WaitMode_Alertable:WaitMode_None);
361}
362DWORD Thread::JoinEx(DWORD timeout, WaitMode mode)
363{
364 CONTRACTL {
365 THROWS;
366 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
367 }
368 CONTRACTL_END;
369
370 BOOL alertable = (mode & WaitMode_Alertable)?TRUE:FALSE;
371
372 Thread *pCurThread = GetThread();
373 _ASSERTE(pCurThread || dbgOnly_IsSpecialEEThread());
374
375 {
376 // We're not hosted, so WaitMode_InDeadlock is irrelevant. Clear it, so that this wait can be
377 // forwarded to a SynchronizationContext if needed.
378 mode = (WaitMode)(mode & ~WaitMode_InDeadlock);
379
380 HANDLE handle = GetThreadHandle();
381 if (handle == INVALID_HANDLE_VALUE || handle == SWITCHOUT_HANDLE_VALUE) {
382 return WAIT_FAILED;
383 }
384 if (pCurThread) {
385 return pCurThread->DoAppropriateWait(1, &handle, FALSE, timeout, mode);
386 }
387 else {
388 return WaitForSingleObjectEx(handle,timeout,alertable);
389 }
390 }
391}
392
393extern INT32 MapFromNTPriority(INT32 NTPriority);
394
395BOOL Thread::SetThreadPriority(
396 int nPriority // thread priority level
397)
398{
399 CONTRACTL
400 {
401 NOTHROW;
402 GC_NOTRIGGER;
403 }
404 CONTRACTL_END;
405
406 BOOL fRet;
407 {
408 if (GetThreadHandle() == INVALID_HANDLE_VALUE) {
409 // When the thread starts running, we will set the thread priority.
410 fRet = TRUE;
411 }
412 else
413 fRet = ::SetThreadPriority(GetThreadHandle(), nPriority);
414 }
415
416 if (fRet)
417 {
418 GCX_COOP();
419 THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject);
420 if (pObject != NULL)
421 {
422 // TODO: managed ThreadPriority only supports up to 4.
423 pObject->SetPriority (MapFromNTPriority(nPriority));
424 }
425 }
426 return fRet;
427}
428
429int Thread::GetThreadPriority()
430{
431 CONTRACTL {
432 NOTHROW;
433 GC_NOTRIGGER;
434 }
435 CONTRACTL_END;
436
437 int nRetVal = -1;
438 if (GetThreadHandle() == INVALID_HANDLE_VALUE) {
439 nRetVal = FALSE;
440 }
441 else
442 nRetVal = ::GetThreadPriority(GetThreadHandle());
443
444 return nRetVal;
445}
446
447void Thread::ChooseThreadCPUGroupAffinity()
448{
449 CONTRACTL
450 {
451 NOTHROW;
452 GC_TRIGGERS;
453 }
454 CONTRACTL_END;
455
456 if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
457 return;
458
459
460 //Borrow the ThreadStore Lock here: Lock ThreadStore before distributing threads
461 ThreadStoreLockHolder TSLockHolder(TRUE);
462
463 // this thread already has CPU group affinity set
464 if (m_pAffinityMask != 0)
465 return;
466
467 if (GetThreadHandle() == INVALID_HANDLE_VALUE)
468 return;
469
470 GROUP_AFFINITY groupAffinity;
471 CPUGroupInfo::ChooseCPUGroupAffinity(&groupAffinity);
472 CPUGroupInfo::SetThreadGroupAffinity(GetThreadHandle(), &groupAffinity, NULL);
473 m_wCPUGroup = groupAffinity.Group;
474 m_pAffinityMask = groupAffinity.Mask;
475}
476
477void Thread::ClearThreadCPUGroupAffinity()
478{
479 CONTRACTL
480 {
481 NOTHROW;
482 GC_NOTRIGGER;
483 }
484 CONTRACTL_END;
485
486 if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
487 return;
488
489
490 ThreadStoreLockHolder TSLockHolder(TRUE);
491
492 // this thread does not have CPU group affinity set
493 if (m_pAffinityMask == 0)
494 return;
495
496 GROUP_AFFINITY groupAffinity;
497 groupAffinity.Group = m_wCPUGroup;
498 groupAffinity.Mask = m_pAffinityMask;
499 CPUGroupInfo::ClearCPUGroupAffinity(&groupAffinity);
500
501 m_wCPUGroup = 0;
502 m_pAffinityMask = 0;
503}
504
505DWORD Thread::StartThread()
506{
507 CONTRACTL
508 {
509 NOTHROW;
510 GC_NOTRIGGER;
511 MODE_ANY;
512 }
513 CONTRACTL_END;
514
515 DWORD dwRetVal = (DWORD) -1;
516#ifdef _DEBUG
517 _ASSERTE (m_Creater.IsCurrentThread());
518 m_Creater.Clear();
519#endif
520
521 _ASSERTE (GetThreadHandle() != INVALID_HANDLE_VALUE &&
522 GetThreadHandle() != SWITCHOUT_HANDLE_VALUE);
523 dwRetVal = ::ResumeThread(GetThreadHandle());
524
525
526 return dwRetVal;
527}
528
529
530// Class static data:
531LONG Thread::m_DebugWillSyncCount = -1;
532LONG Thread::m_DetachCount = 0;
533LONG Thread::m_ActiveDetachCount = 0;
534int Thread::m_offset_counter = 0;
535Volatile<LONG> Thread::m_threadsAtUnsafePlaces = 0;
536
537//-------------------------------------------------------------------------
538// Public function: SetupThreadNoThrow()
539// Creates Thread for current thread if not previously created.
540// Returns NULL for failure (usually due to out-of-memory.)
541//-------------------------------------------------------------------------
542Thread* SetupThreadNoThrow(HRESULT *pHR)
543{
544 CONTRACTL {
545 NOTHROW;
546 SO_TOLERANT;
547 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
548 }
549 CONTRACTL_END;
550
551 HRESULT hr = S_OK;
552
553 Thread *pThread = GetThread();
554 if (pThread != NULL)
555 {
556 return pThread;
557 }
558
559 EX_TRY
560 {
561 pThread = SetupThread();
562 }
563 EX_CATCH
564 {
565 // We failed SetupThread. GET_EXCEPTION() may depend on Thread object.
566 if (__pException == NULL)
567 {
568 hr = E_OUTOFMEMORY;
569 }
570 else
571 {
572 hr = GET_EXCEPTION()->GetHR();
573 }
574 }
575 EX_END_CATCH(SwallowAllExceptions);
576
577 if (pHR)
578 {
579 *pHR = hr;
580 }
581
582 return pThread;
583}
584
585void DeleteThread(Thread* pThread)
586{
587 CONTRACTL {
588 NOTHROW;
589 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
590 }
591 CONTRACTL_END;
592
593 //_ASSERTE (pThread == GetThread());
594 SetThread(NULL);
595 SetAppDomain(NULL);
596
597 if (pThread->HasThreadStateNC(Thread::TSNC_ExistInThreadStore))
598 {
599 pThread->DetachThread(FALSE);
600 }
601 else
602 {
603#ifdef FEATURE_COMINTEROP
604 pThread->RevokeApartmentSpy();
605#endif // FEATURE_COMINTEROP
606
607 FastInterlockOr((ULONG *)&pThread->m_State, Thread::TS_Dead);
608
609 // ~Thread() calls SafeSetThrowables which has a conditional contract
610 // which says that if you call it with a NULL throwable then it is
611 // MODE_ANY, otherwise MODE_COOPERATIVE. Scan doesn't understand that
612 // and assumes that we're violating the MODE_COOPERATIVE.
613 CONTRACT_VIOLATION(ModeViolation);
614
615 delete pThread;
616 }
617}
618
619void EnsurePreemptive()
620{
621 WRAPPER_NO_CONTRACT;
622 Thread *pThread = GetThread();
623 if (pThread && pThread->PreemptiveGCDisabled())
624 {
625 pThread->EnablePreemptiveGC();
626 }
627}
628
629typedef StateHolder<DoNothing, EnsurePreemptive> EnsurePreemptiveModeIfException;
630
631Thread* SetupThread(BOOL fInternal)
632{
633 CONTRACTL {
634 THROWS;
635 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
636 SO_TOLERANT;
637 }
638 CONTRACTL_END;
639
640 Thread* pThread;
641 if ((pThread = GetThread()) != NULL)
642 return pThread;
643
644#ifdef FEATURE_STACK_PROBE
645 RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), NULL);
646#endif //FEATURE_STACK_PROBE
647
648 CONTRACT_VIOLATION(SOToleranceViolation);
649
650 // For interop debugging, we must mark that we're in a can't-stop region
651 // b.c we may take Crsts here that may block the helper thread.
652 // We're especially fragile here b/c we don't have a Thread object yet
653 CantStopHolder hCantStop;
654
655 EnsurePreemptiveModeIfException ensurePreemptive;
656
657#ifdef _DEBUG
658 CHECK chk;
659 if (g_pConfig->SuppressChecks())
660 {
661 // EnterAssert will suppress any checks
662 chk.EnterAssert();
663 }
664#endif
665
666 // Normally, HasStarted is called from the thread's entrypoint to introduce it to
667 // the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications
668 // that call into managed code. In that case, a call to SetupThread here must
669 // find the correct Thread object and install it into TLS.
670
671 if (ThreadStore::s_pThreadStore->m_PendingThreadCount != 0)
672 {
673 DWORD ourOSThreadId = ::GetCurrentThreadId();
674 {
675 ThreadStoreLockHolder TSLockHolder;
676 _ASSERTE(pThread == NULL);
677 while ((pThread = ThreadStore::s_pThreadStore->GetAllThreadList(pThread, Thread::TS_Unstarted | Thread::TS_FailStarted, Thread::TS_Unstarted)) != NULL)
678 {
679 if (pThread->GetOSThreadId() == ourOSThreadId)
680 {
681 break;
682 }
683 }
684
685 if (pThread != NULL)
686 {
687 STRESS_LOG2(LF_SYNC, LL_INFO1000, "T::ST - recycling thread 0x%p (state: 0x%x)\n", pThread, pThread->m_State.Load());
688 }
689 }
690
691 // It's perfectly reasonable to not find this guy. It's just an unrelated
692 // thread spinning up.
693 if (pThread)
694 {
695 if (IsThreadPoolWorkerSpecialThread())
696 {
697 FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
698 pThread->SetBackground(TRUE);
699 }
700 else if (IsThreadPoolIOCompletionSpecialThread())
701 {
702 FastInterlockOr ((ULONG *) &pThread->m_State, Thread::TS_CompletionPortThread);
703 pThread->SetBackground(TRUE);
704 }
705 else if (IsTimerSpecialThread() || IsWaitSpecialThread())
706 {
707 FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
708 pThread->SetBackground(TRUE);
709 }
710
711 BOOL fStatus = pThread->HasStarted();
712 ensurePreemptive.SuppressRelease();
713 return fStatus ? pThread : NULL;
714 }
715 }
716
717 // First time we've seen this thread in the runtime:
718 pThread = new Thread();
719
720// What state are we in here? COOP???
721
722 Holder<Thread*,DoNothing<Thread*>,DeleteThread> threadHolder(pThread);
723
724 CExecutionEngine::SetupTLSForThread(pThread);
725
726 // A host can deny a thread entering runtime by returning a NULL IHostTask.
727 // But we do want threads used by threadpool.
728 if (IsThreadPoolWorkerSpecialThread() ||
729 IsThreadPoolIOCompletionSpecialThread() ||
730 IsTimerSpecialThread() ||
731 IsWaitSpecialThread())
732 {
733 fInternal = TRUE;
734 }
735
736 if (!pThread->InitThread(fInternal) ||
737 !pThread->PrepareApartmentAndContext())
738 ThrowOutOfMemory();
739
740 // reset any unstarted bits on the thread object
741 FastInterlockAnd((ULONG *) &pThread->m_State, ~Thread::TS_Unstarted);
742 FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_LegalToJoin);
743
744 ThreadStore::AddThread(pThread);
745
746 BOOL fOK = SetThread(pThread);
747 _ASSERTE (fOK);
748 fOK = SetAppDomain(pThread->GetDomain());
749 _ASSERTE (fOK);
750
751#ifdef FEATURE_INTEROP_DEBUGGING
752 // Ensure that debugger word slot is allocated
753 UnsafeTlsSetValue(g_debuggerWordTLSIndex, 0);
754#endif
755
756 // We now have a Thread object visable to the RS. unmark special status.
757 hCantStop.Release();
758
759 pThread->SetupThreadForHost();
760
761 threadHolder.SuppressRelease();
762
763 FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_FullyInitialized);
764
765#ifdef DEBUGGING_SUPPORTED
766 //
767 // If we're debugging, let the debugger know that this
768 // thread is up and running now.
769 //
770 if (CORDebuggerAttached())
771 {
772 g_pDebugInterface->ThreadCreated(pThread);
773 }
774 else
775 {
776 LOG((LF_CORDB, LL_INFO10000, "ThreadCreated() not called due to CORDebuggerAttached() being FALSE for thread 0x%x\n", pThread->GetThreadId()));
777 }
778#endif // DEBUGGING_SUPPORTED
779
780#ifdef PROFILING_SUPPORTED
781 // If a profiler is present, then notify the profiler that a
782 // thread has been created.
783 if (!IsGCSpecialThread())
784 {
785 BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
786 {
787 GCX_PREEMP();
788 g_profControlBlock.pProfInterface->ThreadCreated(
789 (ThreadID)pThread);
790 }
791
792 DWORD osThreadId = ::GetCurrentThreadId();
793 g_profControlBlock.pProfInterface->ThreadAssignedToOSThread(
794 (ThreadID)pThread, osThreadId);
795 END_PIN_PROFILER();
796 }
797#endif // PROFILING_SUPPORTED
798
799 _ASSERTE(!pThread->IsBackground()); // doesn't matter, but worth checking
800 pThread->SetBackground(TRUE);
801
802 ensurePreemptive.SuppressRelease();
803
804 if (IsThreadPoolWorkerSpecialThread())
805 {
806 FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
807 }
808 else if (IsThreadPoolIOCompletionSpecialThread())
809 {
810 FastInterlockOr ((ULONG *) &pThread->m_State, Thread::TS_CompletionPortThread);
811 }
812 else if (IsTimerSpecialThread() || IsWaitSpecialThread())
813 {
814 FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
815 }
816
817#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
818 if (g_fEnableARM)
819 {
820 pThread->QueryThreadProcessorUsage();
821 }
822#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
823
824#ifdef FEATURE_EVENT_TRACE
825 ETW::ThreadLog::FireThreadCreated(pThread);
826#endif // FEATURE_EVENT_TRACE
827
828 return pThread;
829}
830
831//-------------------------------------------------------------------------
832// Public function: SetupUnstartedThread()
833// This sets up a Thread object for an exposed System.Thread that
834// has not been started yet. This allows us to properly enumerate all threads
835// in the ThreadStore, so we can report on even unstarted threads. Clearly
836// there is no physical thread to match, yet.
837//
838// When there is, complete the setup with code:Thread::HasStarted()
839//-------------------------------------------------------------------------
840Thread* SetupUnstartedThread(BOOL bRequiresTSL)
841{
842 CONTRACTL {
843 THROWS;
844 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
845 }
846 CONTRACTL_END;
847
848 Thread* pThread = new Thread();
849
850 FastInterlockOr((ULONG *) &pThread->m_State,
851 (Thread::TS_Unstarted | Thread::TS_WeOwn));
852
853 ThreadStore::AddThread(pThread, bRequiresTSL);
854
855 return pThread;
856}
857
858//-------------------------------------------------------------------------
859// Public function: DestroyThread()
860// Destroys the specified Thread object, for a thread which is about to die.
861//-------------------------------------------------------------------------
862void DestroyThread(Thread *th)
863{
864 CONTRACTL {
865 NOTHROW;
866 GC_TRIGGERS;
867 }
868 CONTRACTL_END;
869
870 _ASSERTE (th == GetThread());
871
872 _ASSERTE(g_fEEShutDown || th->m_dwLockCount == 0 || th->m_fRudeAborted);
873
874#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
875 if (g_fEnableARM)
876 {
877 AppDomain* pDomain = th->GetDomain();
878 pDomain->UpdateProcessorUsage(th->QueryThreadProcessorUsage());
879 FireEtwThreadTerminated((ULONGLONG)th, (ULONGLONG)pDomain, GetClrInstanceId());
880 }
881#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
882
883 th->FinishSOWork();
884
885 GCX_PREEMP_NO_DTOR();
886
887 if (th->IsAbortRequested()) {
888 // Reset trapping count.
889 th->UnmarkThreadForAbort(Thread::TAR_ALL);
890 }
891
892 // Clear any outstanding stale EH state that maybe still active on the thread.
893#ifdef WIN64EXCEPTIONS
894 ExceptionTracker::PopTrackers((void*)-1);
895#else // !WIN64EXCEPTIONS
896#ifdef _TARGET_X86_
897 PTR_ThreadExceptionState pExState = th->GetExceptionState();
898 if (pExState->IsExceptionInProgress())
899 {
900 GCX_COOP();
901 pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1);
902 }
903#else // !_TARGET_X86_
904#error Unsupported platform
905#endif // _TARGET_X86_
906#endif // WIN64EXCEPTIONS
907
908#ifdef FEATURE_PERFTRACING
909 // Before the thread dies, mark its buffers as no longer owned
910 // so that they can be cleaned up after the thread dies.
911 EventPipeBufferList *pBufferList = th->GetEventPipeBufferList();
912 if(pBufferList != NULL)
913 {
914 pBufferList->SetOwnedByThread(false);
915 }
916#endif // FEATURE_PERFTRACING
917
918 if (g_fEEShutDown == 0)
919 {
920 th->SetThreadState(Thread::TS_ReportDead);
921 th->OnThreadTerminate(FALSE);
922 }
923}
924
925//-------------------------------------------------------------------------
926// Public function: DetachThread()
927// Marks the thread as needing to be destroyed, but doesn't destroy it yet.
928//-------------------------------------------------------------------------
929HRESULT Thread::DetachThread(BOOL fDLLThreadDetach)
930{
931 // !!! Can not use contract here.
932 // !!! Contract depends on Thread object for GC_TRIGGERS.
933 // !!! At the end of this function, we call InternalSwitchOut,
934 // !!! and then GetThread()=NULL, and dtor of contract does not work any more.
935 STATIC_CONTRACT_NOTHROW;
936 STATIC_CONTRACT_GC_NOTRIGGER;
937
938 // @todo . We need to probe here, but can't introduce destructors etc.
939 BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
940
941 // Clear any outstanding stale EH state that maybe still active on the thread.
942#ifdef WIN64EXCEPTIONS
943 ExceptionTracker::PopTrackers((void*)-1);
944#else // !WIN64EXCEPTIONS
945#ifdef _TARGET_X86_
946 PTR_ThreadExceptionState pExState = GetExceptionState();
947 if (pExState->IsExceptionInProgress())
948 {
949 GCX_COOP();
950 pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1);
951 }
952#else // !_TARGET_X86_
953#error Unsupported platform
954#endif // _TARGET_X86_
955#endif // WIN64EXCEPTIONS
956
957#ifdef FEATURE_COMINTEROP
958 IErrorInfo *pErrorInfo;
959 // Avoid calling GetErrorInfo() if ole32 has already executed the DLL_THREAD_DETACH,
960 // otherwise we'll cause ole32 to re-allocate and leak its TLS data (SOleTlsData).
961 if (ClrTeb::GetOleReservedPtr() != NULL && GetErrorInfo(0, &pErrorInfo) == S_OK)
962 {
963 // if this is our IErrorInfo, release it now - we don't want ole32 to do it later as
964 // part of its DLL_THREAD_DETACH as we won't be able to handle the call at that point
965 if (!ComInterfaceSlotIs(pErrorInfo, 2, Unknown_ReleaseSpecial_IErrorInfo))
966 {
967 // if it's not our IErrorInfo, put it back
968 SetErrorInfo(0, pErrorInfo);
969 }
970 pErrorInfo->Release();
971 }
972
973 // Revoke our IInitializeSpy registration only if we are not in DLL_THREAD_DETACH
974 // (COM will do it or may have already done it automatically in that case).
975 if (!fDLLThreadDetach)
976 {
977 RevokeApartmentSpy();
978 }
979#endif // FEATURE_COMINTEROP
980
981 _ASSERTE(!PreemptiveGCDisabled());
982 _ASSERTE(g_fEEShutDown || m_dwLockCount == 0 || m_fRudeAborted);
983
984 _ASSERTE ((m_State & Thread::TS_Detached) == 0);
985
986 _ASSERTE (this == GetThread());
987
988#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
989 if (g_fEnableARM && m_pDomain)
990 {
991 m_pDomain->UpdateProcessorUsage(QueryThreadProcessorUsage());
992 FireEtwThreadTerminated((ULONGLONG)this, (ULONGLONG)m_pDomain, GetClrInstanceId());
993 }
994#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
995
996 FinishSOWork();
997
998 FastInterlockIncrement(&Thread::m_DetachCount);
999
1000 if (IsAbortRequested()) {
1001 // Reset trapping count.
1002 UnmarkThreadForAbort(Thread::TAR_ALL);
1003 }
1004
1005 if (!IsBackground())
1006 {
1007 FastInterlockIncrement(&Thread::m_ActiveDetachCount);
1008 ThreadStore::CheckForEEShutdown();
1009 }
1010
1011 END_CONTRACT_VIOLATION;
1012
1013 HANDLE hThread = GetThreadHandle();
1014 SetThreadHandle (SWITCHOUT_HANDLE_VALUE);
1015 while (m_dwThreadHandleBeingUsed > 0)
1016 {
1017 // Another thread is using the handle now.
1018#undef Sleep
1019 // We can not call __SwitchToThread since we can not go back to host.
1020 ::Sleep(10);
1021#define Sleep(a) Dont_Use_Sleep(a)
1022 }
1023 if (m_WeOwnThreadHandle && m_ThreadHandleForClose == INVALID_HANDLE_VALUE)
1024 {
1025 m_ThreadHandleForClose = hThread;
1026 }
1027
1028 // We need to make sure that TLS are touched last here.
1029 SetThread(NULL);
1030 SetAppDomain(NULL);
1031
1032#ifdef ENABLE_CONTRACTS_DATA
1033 m_pClrDebugState = NULL;
1034#endif //ENABLE_CONTRACTS_DATA
1035
1036#ifdef FEATURE_PERFTRACING
1037 // Before the thread dies, mark its buffers as no longer owned
1038 // so that they can be cleaned up after the thread dies.
1039 EventPipeBufferList *pBufferList = m_pEventPipeBufferList.Load();
1040 if(pBufferList != NULL)
1041 {
1042 pBufferList->SetOwnedByThread(false);
1043 }
1044#endif // FEATURE_PERFTRACING
1045
1046 FastInterlockOr((ULONG*)&m_State, (int) (Thread::TS_Detached | Thread::TS_ReportDead));
1047 // Do not touch Thread object any more. It may be destroyed.
1048
1049 // These detached threads will be cleaned up by finalizer thread. But if the process uses
1050 // little managed heap, it will be a while before GC happens, and finalizer thread starts
1051 // working on detached thread. So we wake up finalizer thread to clean up resources.
1052 //
1053 // (It's possible that this is the startup thread, and startup failed, and so the finalization
1054 // machinery isn't fully initialized. Hence this check.)
1055 if (g_fEEStarted)
1056 FinalizerThread::EnableFinalization();
1057
1058 return S_OK;
1059}
1060
1061DWORD GetRuntimeId()
1062{
1063 LIMITED_METHOD_CONTRACT;
1064
1065 return _tls_index;
1066}
1067
1068//---------------------------------------------------------------------------
1069// Creates new Thread for reverse p-invoke calls.
1070//---------------------------------------------------------------------------
1071Thread* WINAPI CreateThreadBlockThrow()
1072{
1073
1074 WRAPPER_NO_CONTRACT;
1075
1076 // This is a workaround to disable our check for throwing exception in SetupThread.
1077 // We want to throw an exception for reverse p-invoke, and our assertion may fire if
1078 // a unmanaged caller does not setup an exception handler.
1079 CONTRACT_VIOLATION(ThrowsViolation); // WON'T FIX - This enables catastrophic failure exception in reverse P/Invoke - the only way we can communicate an error to legacy code.
1080 Thread* pThread = NULL;
1081 BEGIN_ENTRYPOINT_THROWS;
1082
1083 if (!CanRunManagedCode())
1084 {
1085 // CLR is shutting down - someone's DllMain detach event may be calling back into managed code.
1086 // It is misleading to use our COM+ exception code, since this is not a managed exception.
1087 ULONG_PTR arg = E_PROCESS_SHUTDOWN_REENTRY;
1088 RaiseException(EXCEPTION_EXX, 0, 1, &arg);
1089 }
1090
1091 HRESULT hr = S_OK;
1092 pThread = SetupThreadNoThrow(&hr);
1093 if (pThread == NULL)
1094 {
1095 // Creating Thread failed, and we need to throw an exception to report status.
1096 // It is misleading to use our COM+ exception code, since this is not a managed exception.
1097 ULONG_PTR arg = hr;
1098 RaiseException(EXCEPTION_EXX, 0, 1, &arg);
1099 }
1100 END_ENTRYPOINT_THROWS;
1101
1102 return pThread;
1103}
1104
1105#ifdef _DEBUG
1106DWORD_PTR Thread::OBJREF_HASH = OBJREF_TABSIZE;
1107#endif
1108
1109extern "C" void STDCALL JIT_PatchedCodeStart();
1110extern "C" void STDCALL JIT_PatchedCodeLast();
1111
1112//---------------------------------------------------------------------------
1113// One-time initialization. Called during Dll initialization. So
1114// be careful what you do in here!
1115//---------------------------------------------------------------------------
1116void InitThreadManager()
1117{
1118 CONTRACTL {
1119 THROWS;
1120 GC_TRIGGERS;
1121 }
1122 CONTRACTL_END;
1123
1124 InitializeYieldProcessorNormalizedCrst();
1125
1126 // All patched helpers should fit into one page.
1127 // If you hit this assert on retail build, there is most likely problem with BBT script.
1128 _ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < (ptrdiff_t)GetOsPageSize());
1129
1130 // I am using virtual protect to cover the entire range that this code falls in.
1131 //
1132
1133 // We could reset it to non-writeable inbetween GCs and such, but then we'd have to keep on re-writing back and forth,
1134 // so instead we'll leave it writable from here forward.
1135
1136 DWORD oldProt;
1137 if (!ClrVirtualProtect((void *)JIT_PatchedCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart,
1138 PAGE_EXECUTE_READWRITE, &oldProt))
1139 {
1140 _ASSERTE(!"ClrVirtualProtect of code page failed");
1141 COMPlusThrowWin32();
1142 }
1143
1144#ifndef FEATURE_PAL
1145 _ASSERTE(GetThread() == NULL);
1146
1147 PTEB Teb = NtCurrentTeb();
1148 BYTE** tlsArray = (BYTE**)Teb->ThreadLocalStoragePointer;
1149 BYTE* tlsData = (BYTE*)tlsArray[_tls_index];
1150
1151 size_t offsetOfCurrentThreadInfo = (BYTE*)&gCurrentThreadInfo - tlsData;
1152
1153 _ASSERTE(offsetOfCurrentThreadInfo < 0x8000);
1154 _ASSERTE(_tls_index < 0x10000);
1155
1156 // Save gCurrentThreadInfo location for debugger
1157 g_TlsIndex = (DWORD)(_tls_index + (offsetOfCurrentThreadInfo << 16) + 0x80000000);
1158
1159 _ASSERTE(g_TrapReturningThreads == 0);
1160#endif // !FEATURE_PAL
1161
1162#ifdef FEATURE_INTEROP_DEBUGGING
1163 g_debuggerWordTLSIndex = UnsafeTlsAlloc();
1164 if (g_debuggerWordTLSIndex == TLS_OUT_OF_INDEXES)
1165 COMPlusThrowWin32();
1166#endif
1167
1168 __ClrFlsGetBlock = CExecutionEngine::GetTlsData;
1169
1170 IfFailThrow(Thread::CLRSetThreadStackGuarantee(Thread::STSGuarantee_Force));
1171
1172 ThreadStore::InitThreadStore();
1173
1174 // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst.
1175 // If you remove this flag, we will switch to preemptive mode when entering
1176 // g_DeadlockAwareCrst, which means all functions that enter it will become
1177 // GC_TRIGGERS. (This includes all uses of CrstHolder.) So be sure
1178 // to update the contracts if you remove this flag.
1179 g_DeadlockAwareCrst.Init(CrstDeadlockDetection, CRST_UNSAFE_ANYMODE);
1180
1181#ifdef _DEBUG
1182 // Randomize OBJREF_HASH to handle hash collision.
1183 Thread::OBJREF_HASH = OBJREF_TABSIZE - (DbgGetEXETimeStamp()%10);
1184#endif // _DEBUG
1185
1186 ThreadSuspend::Initialize();
1187}
1188
1189
1190//************************************************************************
1191// Thread members
1192//************************************************************************
1193
1194
1195#if defined(_DEBUG) && defined(TRACK_SYNC)
1196
1197// One outstanding synchronization held by this thread:
1198struct Dbg_TrackSyncEntry
1199{
1200 UINT_PTR m_caller;
1201 AwareLock *m_pAwareLock;
1202
1203 BOOL Equiv (UINT_PTR caller, void *pAwareLock)
1204 {
1205 LIMITED_METHOD_CONTRACT;
1206
1207 return (m_caller == caller) && (m_pAwareLock == pAwareLock);
1208 }
1209
1210 BOOL Equiv (void *pAwareLock)
1211 {
1212 LIMITED_METHOD_CONTRACT;
1213
1214 return (m_pAwareLock == pAwareLock);
1215 }
1216};
1217
1218// Each thread has a stack that tracks all enter and leave requests
1219struct Dbg_TrackSyncStack : public Dbg_TrackSync
1220{
1221 enum
1222 {
1223 MAX_TRACK_SYNC = 20, // adjust stack depth as necessary
1224 };
1225
1226 void EnterSync (UINT_PTR caller, void *pAwareLock);
1227 void LeaveSync (UINT_PTR caller, void *pAwareLock);
1228
1229 Dbg_TrackSyncEntry m_Stack [MAX_TRACK_SYNC];
1230 UINT_PTR m_StackPointer;
1231 BOOL m_Active;
1232
1233 Dbg_TrackSyncStack() : m_StackPointer(0),
1234 m_Active(TRUE)
1235 {
1236 LIMITED_METHOD_CONTRACT;
1237 }
1238};
1239
1240// ensure that registers are preserved across this call
1241#ifdef _MSC_VER
1242#pragma optimize("", off)
1243#endif
1244// A pain to do all this from ASM, but watch out for trashed registers
1245EXTERN_C void EnterSyncHelper (UINT_PTR caller, void *pAwareLock)
1246{
1247 BEGIN_ENTRYPOINT_THROWS;
1248 WRAPPER_NO_CONTRACT;
1249 GetThread()->m_pTrackSync->EnterSync(caller, pAwareLock);
1250 END_ENTRYPOINT_THROWS;
1251
1252}
1253EXTERN_C void LeaveSyncHelper (UINT_PTR caller, void *pAwareLock)
1254{
1255 BEGIN_ENTRYPOINT_THROWS;
1256 WRAPPER_NO_CONTRACT;
1257 GetThread()->m_pTrackSync->LeaveSync(caller, pAwareLock);
1258 END_ENTRYPOINT_THROWS;
1259
1260}
1261#ifdef _MSC_VER
1262#pragma optimize("", on)
1263#endif
1264
1265void Dbg_TrackSyncStack::EnterSync(UINT_PTR caller, void *pAwareLock)
1266{
1267 LIMITED_METHOD_CONTRACT;
1268
1269 STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::EnterSync, IP=%p, Recursion=%u, LockState=%x, HoldingThread=%p.\n",
1270 caller,
1271 ((AwareLock*)pAwareLock)->GetRecursionLevel(),
1272 ((AwareLock*)pAwareLock)->GetLockState(),
1273 ((AwareLock*)pAwareLock)->GetHoldingThread());
1274
1275 if (m_Active)
1276 {
1277 if (m_StackPointer >= MAX_TRACK_SYNC)
1278 {
1279 _ASSERTE(!"Overflowed synchronization stack checking. Disabling");
1280 m_Active = FALSE;
1281 return;
1282 }
1283 }
1284 m_Stack[m_StackPointer].m_caller = caller;
1285 m_Stack[m_StackPointer].m_pAwareLock = (AwareLock *) pAwareLock;
1286
1287 m_StackPointer++;
1288
1289}
1290
1291void Dbg_TrackSyncStack::LeaveSync(UINT_PTR caller, void *pAwareLock)
1292{
1293 WRAPPER_NO_CONTRACT;
1294
1295 STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::LeaveSync, IP=%p, Recursion=%u, LockState=%x, HoldingThread=%p.\n",
1296 caller,
1297 ((AwareLock*)pAwareLock)->GetRecursionLevel(),
1298 ((AwareLock*)pAwareLock)->GetLockState(),
1299 ((AwareLock*)pAwareLock)->GetHoldingThread());
1300
1301 if (m_Active)
1302 {
1303 if (m_StackPointer == 0)
1304 _ASSERTE(!"Underflow in leaving synchronization");
1305 else
1306 if (m_Stack[m_StackPointer - 1].Equiv(pAwareLock))
1307 {
1308 m_StackPointer--;
1309 }
1310 else
1311 {
1312 for (int i=m_StackPointer - 2; i>=0; i--)
1313 {
1314 if (m_Stack[i].Equiv(pAwareLock))
1315 {
1316 _ASSERTE(!"Locks are released out of order. This might be okay...");
1317 memcpy(&m_Stack[i], &m_Stack[i+1],
1318 sizeof(m_Stack[0]) * (m_StackPointer - i - 1));
1319
1320 return;
1321 }
1322 }
1323 _ASSERTE(!"Trying to release a synchronization lock which isn't held");
1324 }
1325 }
1326}
1327
1328#endif // TRACK_SYNC
1329
1330
1331static DWORD dwHashCodeSeed = 123456789;
1332
1333#ifdef _DEBUG
1334void CheckADValidity(AppDomain* pDomain, DWORD ADValidityKind)
1335{
1336 CONTRACTL
1337 {
1338 NOTHROW;
1339 FORBID_FAULT;
1340 GC_NOTRIGGER;
1341 MODE_ANY;
1342 }
1343 CONTRACTL_END;
1344
1345 //
1346 // Note: this apparently checks if any one of the supplied conditions is satisified, rather
1347 // than checking that *all* of them are satisfied. One would have expected it to assert all of the
1348 // conditions but it does not.
1349 //
1350
1351 CONTRACT_VIOLATION(FaultViolation);
1352 if (::GetAppDomain()==pDomain)
1353 return;
1354 if ((ADValidityKind & ADV_DEFAULTAD) &&
1355 pDomain->IsDefaultDomain())
1356 return;
1357 if ((ADValidityKind & ADV_ITERATOR) &&
1358 pDomain->IsHeldByIterator())
1359 return;
1360 if ((ADValidityKind & ADV_CREATING) &&
1361 pDomain->IsBeingCreated())
1362 return;
1363 if ((ADValidityKind & ADV_COMPILATION) &&
1364 pDomain->IsCompilationDomain())
1365 return;
1366 if ((ADValidityKind & ADV_FINALIZER) &&
1367 IsFinalizerThread())
1368 return;
1369 if ((ADValidityKind & ADV_RUNNINGIN) &&
1370 pDomain->IsRunningIn(GetThread()))
1371 return;
1372 if ((ADValidityKind & ADV_REFTAKER) &&
1373 pDomain->IsHeldByRefTaker())
1374 return;
1375
1376 _ASSERTE(!"Appdomain* can be invalid");
1377}
1378#endif
1379
1380
1381//--------------------------------------------------------------------
1382// Thread construction
1383//--------------------------------------------------------------------
1384Thread::Thread()
1385{
1386 CONTRACTL {
1387 THROWS;
1388 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
1389 }
1390 CONTRACTL_END;
1391
1392 m_pFrame = FRAME_TOP;
1393
1394 m_fPreemptiveGCDisabled = 0;
1395
1396#ifdef _DEBUG
1397 m_ulForbidTypeLoad = 0;
1398 m_GCOnTransitionsOK = TRUE;
1399#endif
1400
1401#ifdef ENABLE_CONTRACTS
1402 m_pClrDebugState = NULL;
1403 m_ulEnablePreemptiveGCCount = 0;
1404#endif
1405
1406 m_dwLockCount = 0;
1407 m_dwBeginLockCount = 0;
1408
1409#ifdef _DEBUG
1410 dbg_m_cSuspendedThreads = 0;
1411 dbg_m_cSuspendedThreadsWithoutOSLock = 0;
1412 m_Creater.Clear();
1413 m_dwUnbreakableLockCount = 0;
1414#endif
1415
1416 m_dwForbidSuspendThread = 0;
1417
1418 // Initialize lock state
1419 m_pHead = &m_embeddedEntry;
1420 m_embeddedEntry.pNext = m_pHead;
1421 m_embeddedEntry.pPrev = m_pHead;
1422 m_embeddedEntry.dwLLockID = 0;
1423 m_embeddedEntry.dwULockID = 0;
1424 m_embeddedEntry.wReaderLevel = 0;
1425
1426 m_pBlockingLock = NULL;
1427
1428 m_alloc_context.init();
1429 m_thAllocContextObj = 0;
1430
1431 m_UserInterrupt = 0;
1432 m_WaitEventLink.m_Next = NULL;
1433 m_WaitEventLink.m_LinkSB.m_pNext = NULL;
1434 m_ThreadHandle = INVALID_HANDLE_VALUE;
1435 m_ThreadHandleForClose = INVALID_HANDLE_VALUE;
1436 m_ThreadHandleForResume = INVALID_HANDLE_VALUE;
1437 m_WeOwnThreadHandle = FALSE;
1438
1439#ifdef _DEBUG
1440 m_ThreadId = UNINITIALIZED_THREADID;
1441#endif //_DEBUG
1442
1443 // Initialize this variable to a very different start value for each thread
1444 // Using linear congruential generator from Knuth Vol. 2, p. 102, line 24
1445 dwHashCodeSeed = dwHashCodeSeed * 1566083941 + 1;
1446 m_dwHashCodeSeed = dwHashCodeSeed;
1447
1448 m_hijackLock = FALSE;
1449
1450 m_OSThreadId = 0;
1451 m_Priority = INVALID_THREAD_PRIORITY;
1452 m_ExternalRefCount = 1;
1453 m_UnmanagedRefCount = 0;
1454 m_State = TS_Unstarted;
1455 m_StateNC = TSNC_Unknown;
1456
1457 // It can't be a LongWeakHandle because we zero stuff out of the exposed
1458 // object as it is finalized. At that point, calls to GetCurrentThread()
1459 // had better get a new one,!
1460 m_ExposedObject = CreateGlobalShortWeakHandle(NULL);
1461
1462 GlobalShortWeakHandleHolder exposedObjectHolder(m_ExposedObject);
1463
1464 m_StrongHndToExposedObject = CreateGlobalStrongHandle(NULL);
1465 GlobalStrongHandleHolder strongHndToExposedObjectHolder(m_StrongHndToExposedObject);
1466
1467 m_LastThrownObjectHandle = NULL;
1468 m_ltoIsUnhandled = FALSE;
1469
1470 m_AbortReason = NULL;
1471
1472 m_debuggerFilterContext = NULL;
1473 m_debuggerCantStop = 0;
1474 m_fInteropDebuggingHijacked = FALSE;
1475 m_profilerCallbackState = 0;
1476#ifdef FEATURE_PROFAPI_ATTACH_DETACH
1477 m_dwProfilerEvacuationCounter = 0;
1478#endif // FEATURE_PROFAPI_ATTACH_DETACH
1479
1480 m_pProfilerFilterContext = NULL;
1481
1482 m_CacheStackBase = 0;
1483 m_CacheStackLimit = 0;
1484 m_CacheStackSufficientExecutionLimit = 0;
1485
1486 m_LastAllowableStackAddress= 0;
1487 m_ProbeLimit = 0;
1488
1489#ifdef _DEBUG
1490 m_pCleanedStackBase = NULL;
1491#endif
1492
1493#ifdef STACK_GUARDS_DEBUG
1494 m_pCurrentStackGuard = NULL;
1495#endif
1496
1497#ifdef FEATURE_HIJACK
1498 m_ppvHJRetAddrPtr = (VOID**) 0xCCCCCCCCCCCCCCCC;
1499 m_pvHJRetAddr = (VOID*) 0xCCCCCCCCCCCCCCCC;
1500
1501#ifndef PLATFORM_UNIX
1502 X86_ONLY(m_LastRedirectIP = 0);
1503 X86_ONLY(m_SpinCount = 0);
1504#endif // PLATFORM_UNIX
1505#endif // FEATURE_HIJACK
1506
1507#if defined(_DEBUG) && defined(TRACK_SYNC)
1508 m_pTrackSync = new Dbg_TrackSyncStack;
1509 NewHolder<Dbg_TrackSyncStack> trackSyncHolder(static_cast<Dbg_TrackSyncStack*>(m_pTrackSync));
1510#endif // TRACK_SYNC
1511
1512 m_RequestedStackSize = 0;
1513 m_PreventAsync = 0;
1514 m_PreventAbort = 0;
1515 m_nNestedMarshalingExceptions = 0;
1516 m_pDomain = NULL;
1517#ifdef FEATURE_COMINTEROP
1518 m_fDisableComObjectEagerCleanup = false;
1519#endif //FEATURE_COMINTEROP
1520 m_fHasDeadThreadBeenConsideredForGCTrigger = false;
1521 m_TraceCallCount = 0;
1522 m_ThrewControlForThread = 0;
1523 m_OSContext = NULL;
1524 m_ThreadTasks = (ThreadTasks)0;
1525 m_pLoadLimiter= NULL;
1526 m_pLoadingFile = NULL;
1527
1528 // The state and the tasks must be 32-bit aligned for atomicity to be guaranteed.
1529 _ASSERTE((((size_t) &m_State) & 3) == 0);
1530 _ASSERTE((((size_t) &m_ThreadTasks) & 3) == 0);
1531
1532 // Track perf counter for the logical thread object.
1533 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsLogical++);
1534
1535 // On all callbacks, call the trap code, which we now have
1536 // wired to cause a GC. Thus we will do a GC on all Transition Frame Transitions (and more).
1537 if (GCStress<cfg_transition>::IsEnabled())
1538 {
1539 m_State = (ThreadState) (m_State | TS_GCOnTransitions);
1540 }
1541
1542 m_AbortType = EEPolicy::TA_None;
1543 m_AbortInfo = 0;
1544 m_AbortEndTime = MAXULONGLONG;
1545 m_RudeAbortEndTime = MAXULONGLONG;
1546 m_AbortController = 0;
1547 m_AbortRequestLock = 0;
1548 m_fRudeAbortInitiated = FALSE;
1549
1550 m_pIOCompletionContext = NULL;
1551
1552#ifdef _DEBUG
1553 m_fRudeAborted = FALSE;
1554 m_dwAbortPoint = 0;
1555#endif
1556
1557 m_OSContext = new CONTEXT();
1558 NewHolder<CONTEXT> contextHolder(m_OSContext);
1559
1560 m_pSavedRedirectContext = NULL;
1561 NewHolder<CONTEXT> savedRedirectContextHolder(m_pSavedRedirectContext);
1562
1563#ifdef FEATURE_COMINTEROP
1564 m_pRCWStack = new RCWStackHeader();
1565#endif
1566
1567#ifdef _DEBUG
1568 m_bGCStressing = FALSE;
1569 m_bUniqueStacking = FALSE;
1570#endif
1571
1572 m_pPendingTypeLoad = NULL;
1573
1574#ifdef FEATURE_PREJIT
1575 m_pIBCInfo = NULL;
1576#endif
1577
1578 m_dwAVInRuntimeImplOkayCount = 0;
1579
1580#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) && !defined(PLATFORM_UNIX) // GCCOVER
1581 m_fPreemptiveGCDisabledForGCStress = false;
1582#endif
1583
1584#ifdef _DEBUG
1585 m_pHelperMethodFrameCallerList = (HelperMethodFrameCallerList*)-1;
1586#endif
1587
1588 m_dwHostTaskRefCount = 0;
1589
1590 m_pExceptionDuringStartup = NULL;
1591
1592#ifdef HAVE_GCCOVER
1593 m_pbDestCode = NULL;
1594 m_pbSrcCode = NULL;
1595#if defined(GCCOVER_TOLERATE_SPURIOUS_AV)
1596 m_pLastAVAddress = NULL;
1597#endif // defined(GCCOVER_TOLERATE_SPURIOUS_AV)
1598#endif // HAVE_GCCOVER
1599
1600 m_fCompletionPortDrained = FALSE;
1601
1602 m_debuggerActivePatchSkipper = NULL;
1603 m_dwThreadHandleBeingUsed = 0;
1604 SetProfilerCallbacksAllowed(TRUE);
1605
1606 m_pCreatingThrowableForException = NULL;
1607#ifdef _DEBUG
1608 m_dwDisableAbortCheckCount = 0;
1609#endif // _DEBUG
1610
1611#ifdef WIN64EXCEPTIONS
1612 m_dwIndexClauseForCatch = 0;
1613 m_sfEstablisherOfActualHandlerFrame.Clear();
1614#endif // WIN64EXCEPTIONS
1615
1616 m_threadPoolCompletionCount = 0;
1617
1618 Thread *pThread = GetThread();
1619 InitContext();
1620 if (pThread)
1621 {
1622 _ASSERTE(pThread->GetDomain());
1623 // Start off the new thread in the default context of
1624 // the creating thread's appDomain. This could be changed by SetDelegate
1625 SetKickOffDomainId(pThread->GetDomain()->GetId());
1626 } else
1627 SetKickOffDomainId((ADID)DefaultADID);
1628
1629 // Do not expose thread until it is fully constructed
1630 g_pThinLockThreadIdDispenser->NewId(this, this->m_ThreadId);
1631
1632 //
1633 // DO NOT ADD ADDITIONAL CONSTRUCTION AFTER THIS POINT.
1634 // NewId() allows this Thread instance to be accessed via a Thread Id. Do not
1635 // add additional construction after this point to prevent the race condition
1636 // of accessing a partially constructed Thread via Thread Id lookup.
1637 //
1638
1639 exposedObjectHolder.SuppressRelease();
1640 strongHndToExposedObjectHolder.SuppressRelease();
1641#if defined(_DEBUG) && defined(TRACK_SYNC)
1642 trackSyncHolder.SuppressRelease();
1643#endif
1644 contextHolder.SuppressRelease();
1645 savedRedirectContextHolder.SuppressRelease();
1646
1647#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
1648 m_ullProcessorUsageBaseline = 0;
1649#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
1650
1651#ifdef FEATURE_COMINTEROP
1652 m_uliInitializeSpyCookie.QuadPart = 0ul;
1653 m_fInitializeSpyRegistered = false;
1654 m_pLastSTACtxCookie = NULL;
1655#endif // FEATURE_COMINTEROP
1656
1657 m_fGCSpecial = FALSE;
1658
1659 m_wCPUGroup = 0;
1660 m_pAffinityMask = 0;
1661
1662 m_pAllLoggedTypes = NULL;
1663
1664#ifdef FEATURE_PERFTRACING
1665 m_pEventPipeBufferList = NULL;
1666 m_eventWriteInProgress = false;
1667 memset(&m_activityId, 0, sizeof(m_activityId));
1668#endif // FEATURE_PERFTRACING
1669 m_HijackReturnKind = RT_Illegal;
1670}
1671
1672//--------------------------------------------------------------------
1673// Failable initialization occurs here.
1674//--------------------------------------------------------------------
1675BOOL Thread::InitThread(BOOL fInternal)
1676{
1677 CONTRACTL {
1678 THROWS;
1679 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
1680 }
1681 CONTRACTL_END;
1682
1683
1684 HANDLE hDup = INVALID_HANDLE_VALUE;
1685 BOOL ret = TRUE;
1686
1687 // This message actually serves a purpose (which is why it is always run)
1688 // The Stress log is run during hijacking, when other threads can be suspended
1689 // at arbitrary locations (including when holding a lock that NT uses to serialize
1690 // all memory allocations). By sending a message now, we insure that the stress
1691 // log will not allocate memory at these critical times an avoid deadlock.
1692 STRESS_LOG2(LF_ALWAYS, LL_ALWAYS, "SetupThread managed Thread %p Thread Id = %x\n", this, GetThreadId());
1693
1694 if ((m_State & TS_WeOwn) == 0)
1695 {
1696 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cRecognizedThreads++);
1697 }
1698 else
1699 {
1700 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsPhysical++);
1701 }
1702
1703#ifndef FEATURE_PAL
1704 // workaround: Remove this when we flow impersonation token to host.
1705 BOOL reverted = FALSE;
1706 HANDLE threadToken = INVALID_HANDLE_VALUE;
1707#endif // !FEATURE_PAL
1708
1709 if (m_ThreadHandle == INVALID_HANDLE_VALUE)
1710 {
1711 // For WinCE, all clients have the same handle for a thread. Duplication is
1712 // not possible. We make sure we never close this handle unless we created
1713 // the thread (TS_WeOwn).
1714 //
1715 // For Win32, each client has its own handle. This is achieved by duplicating
1716 // the pseudo-handle from ::GetCurrentThread(). Unlike WinCE, this service
1717 // returns a pseudo-handle which is only useful for duplication. In this case
1718 // each client is responsible for closing its own (duplicated) handle.
1719 //
1720 // We don't bother duplicating if WeOwn, because we created the handle in the
1721 // first place.
1722 // Thread is created when or after the physical thread started running
1723 HANDLE curProcess = ::GetCurrentProcess();
1724
1725#ifndef FEATURE_PAL
1726
1727 // If we're impersonating on NT, then DuplicateHandle(GetCurrentThread()) is going to give us a handle with only
1728 // THREAD_TERMINATE, THREAD_QUERY_INFORMATION, and THREAD_SET_INFORMATION. This doesn't include
1729 // THREAD_SUSPEND_RESUME nor THREAD_GET_CONTEXT. We need to be able to suspend the thread, and we need to be
1730 // able to get its context. Therefore, if we're impersonating, we revert to self, dup the handle, then
1731 // re-impersonate before we leave this routine.
1732 if (!RevertIfImpersonated(&reverted, &threadToken))
1733 {
1734 COMPlusThrowWin32();
1735 }
1736
1737 class EnsureResetThreadToken
1738 {
1739 private:
1740 BOOL m_NeedReset;
1741 HANDLE m_threadToken;
1742 public:
1743 EnsureResetThreadToken(HANDLE threadToken, BOOL reverted)
1744 {
1745 m_threadToken = threadToken;
1746 m_NeedReset = reverted;
1747 }
1748 ~EnsureResetThreadToken()
1749 {
1750 UndoRevert(m_NeedReset, m_threadToken);
1751 if (m_threadToken != INVALID_HANDLE_VALUE)
1752 {
1753 CloseHandle(m_threadToken);
1754 }
1755 }
1756 };
1757
1758 EnsureResetThreadToken resetToken(threadToken, reverted);
1759
1760#endif // !FEATURE_PAL
1761
1762 if (::DuplicateHandle(curProcess, ::GetCurrentThread(), curProcess, &hDup,
1763 0 /*ignored*/, FALSE /*inherit*/, DUPLICATE_SAME_ACCESS))
1764 {
1765 _ASSERTE(hDup != INVALID_HANDLE_VALUE);
1766
1767 SetThreadHandle(hDup);
1768 m_WeOwnThreadHandle = TRUE;
1769 }
1770 else
1771 {
1772 COMPlusThrowWin32();
1773 }
1774 }
1775
1776 if ((m_State & TS_WeOwn) == 0)
1777 {
1778 if (!AllocHandles())
1779 {
1780 ThrowOutOfMemory();
1781 }
1782 }
1783
1784 _ASSERTE(HasValidThreadHandle());
1785
1786 m_random.Init();
1787
1788 // Set floating point mode to round to nearest
1789#ifndef FEATURE_PAL
1790 (void) _controlfp_s( NULL, _RC_NEAR, _RC_CHOP|_RC_UP|_RC_DOWN|_RC_NEAR );
1791
1792 m_pTEB = (struct _NT_TIB*)NtCurrentTeb();
1793
1794#endif // !FEATURE_PAL
1795
1796 if (m_CacheStackBase == 0)
1797 {
1798 _ASSERTE(m_CacheStackLimit == 0);
1799 _ASSERTE(m_LastAllowableStackAddress == 0);
1800 _ASSERTE(m_ProbeLimit == 0);
1801 ret = SetStackLimits(fAll);
1802 if (ret == FALSE)
1803 {
1804 ThrowOutOfMemory();
1805 }
1806 }
1807
1808 ret = Thread::AllocateIOCompletionContext();
1809 if (!ret)
1810 {
1811 ThrowOutOfMemory();
1812 }
1813
1814 _ASSERTE(ret); // every failure case for ret should throw.
1815 return ret;
1816}
1817
1818// Allocate all the handles. When we are kicking of a new thread, we can call
1819// here before the thread starts running.
1820BOOL Thread::AllocHandles()
1821{
1822 WRAPPER_NO_CONTRACT;
1823
1824 _ASSERTE(!m_DebugSuspendEvent.IsValid());
1825 _ASSERTE(!m_EventWait.IsValid());
1826
1827 BOOL fOK = TRUE;
1828 EX_TRY {
1829 // create a manual reset event for getting the thread to a safe point
1830 m_DebugSuspendEvent.CreateManualEvent(FALSE);
1831 m_EventWait.CreateManualEvent(TRUE);
1832 }
1833 EX_CATCH {
1834 fOK = FALSE;
1835
1836 if (!m_DebugSuspendEvent.IsValid()) {
1837 m_DebugSuspendEvent.CloseEvent();
1838 }
1839
1840 if (!m_EventWait.IsValid()) {
1841 m_EventWait.CloseEvent();
1842 }
1843 }
1844 EX_END_CATCH(RethrowTerminalExceptions);
1845
1846 return fOK;
1847}
1848
1849
1850//--------------------------------------------------------------------
1851// This is the alternate path to SetupThread/InitThread. If we created
1852// an unstarted thread, we have SetupUnstartedThread/HasStarted.
1853//--------------------------------------------------------------------
1854BOOL Thread::HasStarted(BOOL bRequiresTSL)
1855{
1856 CONTRACTL {
1857 NOTHROW;
1858 DISABLED(GC_NOTRIGGER);
1859 SO_TOLERANT;
1860 }
1861 CONTRACTL_END;
1862
1863 // @todo need a probe that tolerates not having a thread setup at all
1864 CONTRACT_VIOLATION(SOToleranceViolation);
1865
1866 _ASSERTE(!m_fPreemptiveGCDisabled); // can't use PreemptiveGCDisabled() here
1867
1868 // This is cheating a little. There is a pathway here from SetupThread, but only
1869 // via IJW SystemDomain::RunDllMain. Normally SetupThread returns a thread in
1870 // preemptive mode, ready for a transition. But in the IJW case, it can return a
1871 // cooperative mode thread. RunDllMain handles this "surprise" correctly.
1872 m_fPreemptiveGCDisabled = TRUE;
1873
1874 // Normally, HasStarted is called from the thread's entrypoint to introduce it to
1875 // the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications
1876 // that call into managed code. In that case, the second HasStarted call is
1877 // redundant and should be ignored.
1878 if (GetThread() == this)
1879 return TRUE;
1880
1881
1882 _ASSERTE(GetThread() == 0);
1883 _ASSERTE(HasValidThreadHandle());
1884
1885 BOOL fKeepTLS = FALSE;
1886 BOOL fCanCleanupCOMState = FALSE;
1887 BOOL res = TRUE;
1888
1889 res = SetStackLimits(fAll);
1890 if (res == FALSE)
1891 {
1892 m_pExceptionDuringStartup = Exception::GetOOMException();
1893 goto FAILURE;
1894 }
1895
1896 // If any exception happens during HasStarted, we will cache the exception in Thread::m_pExceptionDuringStartup
1897 // which will be thrown in Thread.Start as an internal exception
1898 EX_TRY
1899 {
1900 //
1901 // Initialization must happen in the following order - hosts like SQL Server depend on this.
1902 //
1903 CExecutionEngine::SetupTLSForThread(this);
1904
1905 fCanCleanupCOMState = TRUE;
1906 res = PrepareApartmentAndContext();
1907 if (!res)
1908 {
1909 ThrowOutOfMemory();
1910 }
1911
1912 InitThread(FALSE);
1913
1914 if (SetThread(this) == FALSE)
1915 {
1916 ThrowOutOfMemory();
1917 }
1918
1919 if (SetAppDomain(m_pDomain) == FALSE)
1920 {
1921 ThrowOutOfMemory();
1922 }
1923
1924 SetupThreadForHost();
1925
1926
1927 ThreadStore::TransferStartedThread(this, bRequiresTSL);
1928
1929#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
1930 if (g_fEnableARM)
1931 {
1932 QueryThreadProcessorUsage();
1933 }
1934#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
1935#ifdef FEATURE_EVENT_TRACE
1936 ETW::ThreadLog::FireThreadCreated(this);
1937#endif // FEATURE_EVENT_TRACE
1938 }
1939 EX_CATCH
1940 {
1941 if (__pException != NULL)
1942 {
1943 __pException.SuppressRelease();
1944 m_pExceptionDuringStartup = __pException;
1945 }
1946 res = FALSE;
1947 }
1948 EX_END_CATCH(SwallowAllExceptions);
1949
1950FAILURE:
1951 if (res == FALSE)
1952 {
1953 if (m_fPreemptiveGCDisabled)
1954 {
1955 m_fPreemptiveGCDisabled = FALSE;
1956 }
1957 _ASSERTE (HasThreadState(TS_Unstarted));
1958
1959 SetThreadState(TS_FailStarted);
1960
1961 if (GetThread() != NULL && IsAbortRequested())
1962 UnmarkThreadForAbort(TAR_ALL);
1963
1964 if (!fKeepTLS)
1965 {
1966#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
1967 //
1968 // Undo our call to PrepareApartmentAndContext above, so we don't leak a CoInitialize
1969 // If we're keeping TLS, then the host's call to ExitTask will clean this up instead.
1970 //
1971 if (fCanCleanupCOMState)
1972 {
1973 // The thread pointer in TLS may not be set yet, if we had a failure before we set it.
1974 // So we'll set it up here (we'll unset it a few lines down).
1975 if (SetThread(this) != FALSE)
1976 {
1977 CleanupCOMState();
1978 }
1979 }
1980#endif
1981 FastInterlockDecrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount);
1982 // One of the components of OtherThreadsComplete() has changed, so check whether
1983 // we should now exit the EE.
1984 ThreadStore::CheckForEEShutdown();
1985 DecExternalCount(/*holdingLock*/ !bRequiresTSL);
1986 SetThread(NULL);
1987 SetAppDomain(NULL);
1988 }
1989 }
1990 else
1991 {
1992 FastInterlockOr((ULONG *) &m_State, TS_FullyInitialized);
1993
1994#ifdef DEBUGGING_SUPPORTED
1995 //
1996 // If we're debugging, let the debugger know that this
1997 // thread is up and running now.
1998 //
1999 if (CORDebuggerAttached())
2000 {
2001 g_pDebugInterface->ThreadCreated(this);
2002 }
2003 else
2004 {
2005 LOG((LF_CORDB, LL_INFO10000, "ThreadCreated() not called due to CORDebuggerAttached() being FALSE for thread 0x%x\n", GetThreadId()));
2006 }
2007
2008#endif // DEBUGGING_SUPPORTED
2009
2010#ifdef PROFILING_SUPPORTED
2011 // If a profiler is running, let them know about the new thread.
2012 //
2013 // The call to IsGCSpecial is crucial to avoid a deadlock. See code:Thread::m_fGCSpecial for more
2014 // information
2015 if (!IsGCSpecial())
2016 {
2017 BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
2018 BOOL gcOnTransition = GC_ON_TRANSITIONS(FALSE); // disable GCStress 2 to avoid the profiler receiving a RuntimeThreadSuspended notification even before the ThreadCreated notification
2019
2020 {
2021 GCX_PREEMP();
2022 g_profControlBlock.pProfInterface->ThreadCreated((ThreadID) this);
2023 }
2024
2025 GC_ON_TRANSITIONS(gcOnTransition);
2026
2027 DWORD osThreadId = ::GetCurrentThreadId();
2028 g_profControlBlock.pProfInterface->ThreadAssignedToOSThread(
2029 (ThreadID) this, osThreadId);
2030 END_PIN_PROFILER();
2031 }
2032#endif // PROFILING_SUPPORTED
2033
2034 // CoreCLR does not support user-requested thread suspension
2035 _ASSERTE(!(m_State & TS_SuspendUnstarted));
2036 }
2037
2038 return res;
2039}
2040
2041BOOL Thread::AllocateIOCompletionContext()
2042{
2043 WRAPPER_NO_CONTRACT;
2044 PIOCompletionContext pIOC = new (nothrow) IOCompletionContext;
2045
2046 if(pIOC != NULL)
2047 {
2048 pIOC->lpOverlapped = NULL;
2049 m_pIOCompletionContext = pIOC;
2050 return TRUE;
2051 }
2052 else
2053 {
2054 return FALSE;
2055 }
2056}
2057
2058VOID Thread::FreeIOCompletionContext()
2059{
2060 WRAPPER_NO_CONTRACT;
2061 if (m_pIOCompletionContext != NULL)
2062 {
2063 PIOCompletionContext pIOC = (PIOCompletionContext) m_pIOCompletionContext;
2064 delete pIOC;
2065 m_pIOCompletionContext = NULL;
2066 }
2067}
2068
2069void Thread::HandleThreadStartupFailure()
2070{
2071 CONTRACTL
2072 {
2073 THROWS;
2074 GC_TRIGGERS;
2075 MODE_COOPERATIVE;
2076 }
2077 CONTRACTL_END;
2078
2079 _ASSERTE(GetThread() != NULL);
2080
2081 struct ProtectArgs
2082 {
2083 OBJECTREF pThrowable;
2084 OBJECTREF pReason;
2085 } args;
2086 memset(&args, 0, sizeof(ProtectArgs));
2087
2088 GCPROTECT_BEGIN(args);
2089
2090 MethodTable *pMT = MscorlibBinder::GetException(kThreadStartException);
2091 args.pThrowable = AllocateObject(pMT);
2092
2093 MethodDescCallSite exceptionCtor(METHOD__THREAD_START_EXCEPTION__EX_CTOR);
2094
2095 if (m_pExceptionDuringStartup)
2096 {
2097 args.pReason = CLRException::GetThrowableFromException(m_pExceptionDuringStartup);
2098 Exception::Delete(m_pExceptionDuringStartup);
2099 m_pExceptionDuringStartup = NULL;
2100 }
2101
2102 ARG_SLOT args1[] = {
2103 ObjToArgSlot(args.pThrowable),
2104 ObjToArgSlot(args.pReason),
2105 };
2106 exceptionCtor.Call(args1);
2107
2108 GCPROTECT_END(); //Prot
2109
2110 RaiseTheExceptionInternalOnly(args.pThrowable, FALSE);
2111}
2112
2113#ifndef FEATURE_PAL
2114BOOL RevertIfImpersonated(BOOL *bReverted, HANDLE *phToken)
2115{
2116 WRAPPER_NO_CONTRACT;
2117
2118 BOOL bImpersonated = OpenThreadToken(GetCurrentThread(), // we are assuming that if this call fails,
2119 TOKEN_IMPERSONATE, // we are not impersonating. There is no win32
2120 TRUE, // api to figure this out. The only alternative
2121 phToken); // is to use NtCurrentTeb->IsImpersonating().
2122 if (bImpersonated)
2123 {
2124 *bReverted = RevertToSelf();
2125 return *bReverted;
2126
2127 }
2128 return TRUE;
2129}
2130
2131void UndoRevert(BOOL bReverted, HANDLE hToken)
2132{
2133 if (bReverted)
2134 {
2135 if (!SetThreadToken(NULL, hToken))
2136 {
2137 _ASSERT("Undo Revert -> SetThreadToken failed");
2138 STRESS_LOG1(LF_EH, LL_INFO100, "UndoRevert/SetThreadToken failed for hToken = %d\n",hToken);
2139 EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY);
2140 }
2141 }
2142 return;
2143}
2144#endif // !FEATURE_PAL
2145
2146
2147// We don't want ::CreateThread() calls scattered throughout the source. So gather
2148// them all here.
2149
2150BOOL Thread::CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName)
2151{
2152 CONTRACTL {
2153 NOTHROW;
2154 GC_TRIGGERS;
2155 }
2156 CONTRACTL_END;
2157 BOOL bRet;
2158
2159 //This assert is here to prevent a bug in the future
2160 // CreateTask currently takes a DWORD and we will downcast
2161 // if that interface changes to take a SIZE_T this Assert needs to be removed.
2162 //
2163 _ASSERTE(stackSize <= 0xFFFFFFFF);
2164
2165#ifndef FEATURE_PAL
2166 HandleHolder token;
2167 BOOL bReverted = FALSE;
2168 bRet = RevertIfImpersonated(&bReverted, &token);
2169 if (bRet != TRUE)
2170 return bRet;
2171#endif // !FEATURE_PAL
2172
2173 m_StateNC = (ThreadStateNoConcurrency)((ULONG)m_StateNC | TSNC_CLRCreatedThread);
2174 bRet = CreateNewOSThread(stackSize, start, args);
2175#ifndef FEATURE_PAL
2176 UndoRevert(bReverted, token);
2177 if (pName != NULL)
2178 SetThreadName(m_ThreadHandle, pName);
2179#endif // !FEATURE_PAL
2180
2181 return bRet;
2182}
2183
2184
2185// This is to avoid the 64KB/1MB aliasing problem present on Pentium 4 processors,
2186// which can significantly impact performance with HyperThreading enabled
2187DWORD WINAPI Thread::intermediateThreadProc(PVOID arg)
2188{
2189 WRAPPER_NO_CONTRACT;
2190
2191 m_offset_counter++;
2192 if (m_offset_counter * offset_multiplier > (int) GetOsPageSize())
2193 m_offset_counter = 0;
2194
2195 (void)_alloca(m_offset_counter * offset_multiplier);
2196
2197 intermediateThreadParam* param = (intermediateThreadParam*)arg;
2198
2199 LPTHREAD_START_ROUTINE ThreadFcnPtr = param->lpThreadFunction;
2200 PVOID args = param->lpArg;
2201 delete param;
2202
2203 return ThreadFcnPtr(args);
2204}
2205
2206HANDLE Thread::CreateUtilityThread(Thread::StackSizeBucket stackSizeBucket, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName, DWORD flags, DWORD* pThreadId)
2207{
2208 LIMITED_METHOD_CONTRACT;
2209
2210 // TODO: we should always use small stacks for most of these threads. For CLR 4, we're being conservative
2211 // here because this is a last-minute fix.
2212
2213 SIZE_T stackSize;
2214
2215 switch (stackSizeBucket)
2216 {
2217 case StackSize_Small:
2218 stackSize = 256 * 1024;
2219 break;
2220
2221 case StackSize_Medium:
2222 stackSize = 512 * 1024;
2223 break;
2224
2225 default:
2226 _ASSERTE(!"Bad stack size bucket");
2227 case StackSize_Large:
2228 stackSize = 1024 * 1024;
2229 break;
2230 }
2231
2232 flags |= STACK_SIZE_PARAM_IS_A_RESERVATION;
2233
2234 DWORD threadId;
2235 HANDLE hThread = CreateThread(NULL, stackSize, start, args, flags, &threadId);
2236#ifndef FEATURE_PAL
2237 SetThreadName(hThread, pName);
2238#endif // !FEATURE_PAL
2239
2240
2241 if (pThreadId)
2242 *pThreadId = threadId;
2243
2244 return hThread;
2245}
2246
2247
2248BOOL Thread::GetProcessDefaultStackSize(SIZE_T* reserveSize, SIZE_T* commitSize)
2249{
2250 CONTRACTL
2251 {
2252 NOTHROW;
2253 GC_NOTRIGGER;
2254 }
2255 CONTRACTL_END;
2256
2257 //
2258 // Let's get the stack sizes from the PE file that started process.
2259 //
2260 static SIZE_T ExeSizeOfStackReserve = 0;
2261 static SIZE_T ExeSizeOfStackCommit = 0;
2262
2263 static BOOL fSizesGot = FALSE;
2264
2265#ifndef FEATURE_PAL
2266 if (!fSizesGot)
2267 {
2268 HINSTANCE hInst = WszGetModuleHandle(NULL);
2269 _ASSERTE(hInst); // WszGetModuleHandle should never fail on the module that started the process.
2270 EX_TRY
2271 {
2272 PEDecoder pe(hInst);
2273 pe.GetEXEStackSizes(&ExeSizeOfStackReserve, &ExeSizeOfStackCommit);
2274 fSizesGot = TRUE;
2275 }
2276 EX_CATCH
2277 {
2278 fSizesGot = FALSE;
2279 }
2280 EX_END_CATCH(SwallowAllExceptions);
2281 }
2282#endif // !FEATURE_PAL
2283
2284 if (!fSizesGot) {
2285 //return some somewhat-reasonable numbers
2286 if (NULL != reserveSize) *reserveSize = 256*1024;
2287 if (NULL != commitSize) *commitSize = 256*1024;
2288 return FALSE;
2289 }
2290
2291 if (NULL != reserveSize) *reserveSize = ExeSizeOfStackReserve;
2292 if (NULL != commitSize) *commitSize = ExeSizeOfStackCommit;
2293 return TRUE;
2294}
2295
2296BOOL Thread::CreateNewOSThread(SIZE_T sizeToCommitOrReserve, LPTHREAD_START_ROUTINE start, void *args)
2297{
2298 CONTRACTL {
2299 NOTHROW;
2300 GC_TRIGGERS;
2301 }
2302 CONTRACTL_END;
2303
2304 DWORD ourId = 0;
2305 HANDLE h = NULL;
2306 DWORD dwCreationFlags = CREATE_SUSPENDED;
2307
2308 dwCreationFlags |= STACK_SIZE_PARAM_IS_A_RESERVATION;
2309
2310#ifndef FEATURE_PAL // the PAL does its own adjustments as necessary
2311 if (sizeToCommitOrReserve != 0 && sizeToCommitOrReserve <= GetOsPageSize())
2312 {
2313 // On Windows, passing a value that is <= one page size bizarrely causes the OS to use the default stack size instead of
2314 // a minimum, which is undesirable. This adjustment fixes that issue to use a minimum stack size (typically 64 KB).
2315 sizeToCommitOrReserve = GetOsPageSize() + 1;
2316 }
2317#endif // !FEATURE_PAL
2318
2319 intermediateThreadParam* lpThreadArgs = new (nothrow) intermediateThreadParam;
2320 if (lpThreadArgs == NULL)
2321 {
2322 return FALSE;
2323 }
2324 NewHolder<intermediateThreadParam> argHolder(lpThreadArgs);
2325
2326 // Make sure we have all our handles, in case someone tries to suspend us
2327 // as we are starting up.
2328 if (!AllocHandles())
2329 {
2330 // OS is out of handles/memory?
2331 return FALSE;
2332 }
2333
2334 lpThreadArgs->lpThreadFunction = start;
2335 lpThreadArgs->lpArg = args;
2336
2337 h = ::CreateThread(NULL /*=SECURITY_ATTRIBUTES*/,
2338 sizeToCommitOrReserve,
2339 intermediateThreadProc,
2340 lpThreadArgs,
2341 dwCreationFlags,
2342 &ourId);
2343
2344 if (h == NULL)
2345 return FALSE;
2346
2347 argHolder.SuppressRelease();
2348
2349 _ASSERTE(!m_fPreemptiveGCDisabled); // leave in preemptive until HasStarted.
2350
2351 SetThreadHandle(h);
2352 m_WeOwnThreadHandle = TRUE;
2353
2354 // Before we do the resume, we need to take note of the new ThreadId. This
2355 // is necessary because -- before the thread starts executing at KickofThread --
2356 // it may perform some DllMain DLL_THREAD_ATTACH notifications. These could
2357 // call into managed code. During the consequent SetupThread, we need to
2358 // perform the Thread::HasStarted call instead of going through the normal
2359 // 'new thread' pathway.
2360 _ASSERTE(GetOSThreadId() == 0);
2361 _ASSERTE(ourId != 0);
2362
2363 m_OSThreadId = ourId;
2364
2365 FastInterlockIncrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount);
2366
2367#ifdef _DEBUG
2368 m_Creater.SetToCurrentThread();
2369#endif
2370
2371 return TRUE;
2372}
2373
2374//
2375// #threadDestruction
2376//
2377// General comments on thread destruction.
2378//
2379// The C++ Thread object can survive beyond the time when the Win32 thread has died.
2380// This is important if an exposed object has been created for this thread. The
2381// exposed object will survive until it is GC'ed.
2382//
2383// A client like an exposed object can place an external reference count on that
2384// object. We also place a reference count on it when we construct it, and we lose
2385// that count when the thread finishes doing useful work (OnThreadTerminate).
2386//
2387// One way OnThreadTerminate() is called is when the thread finishes doing useful
2388// work. This case always happens on the correct thread.
2389//
2390// The other way OnThreadTerminate() is called is during product shutdown. We do
2391// a "best effort" to eliminate all threads except the Main thread before shutdown
2392// happens. But there may be some background threads or external threads still
2393// running.
2394//
2395// When the final reference count disappears, we destruct. Until then, the thread
2396// remains in the ThreadStore, but is marked as "Dead".
2397//<TODO>
2398// @TODO cwb: for a typical shutdown, only background threads are still around.
2399// Should we interrupt them? What about the non-typical shutdown?</TODO>
2400
2401int Thread::IncExternalCount()
2402{
2403 CONTRACTL {
2404 NOTHROW;
2405 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
2406 }
2407 CONTRACTL_END;
2408
2409 Thread *pCurThread = GetThread();
2410
2411 _ASSERTE(m_ExternalRefCount > 0);
2412 int retVal = FastInterlockIncrement((LONG*)&m_ExternalRefCount);
2413 // If we have an exposed object and the refcount is greater than one
2414 // we must make sure to keep a strong handle to the exposed object
2415 // so that we keep it alive even if nobody has a reference to it.
2416 if (pCurThread && ((*((void**)m_ExposedObject)) != NULL))
2417 {
2418 // The exposed object exists and needs a strong handle so check
2419 // to see if it has one.
2420 // Only a managed thread can setup StrongHnd.
2421 if ((*((void**)m_StrongHndToExposedObject)) == NULL)
2422 {
2423 GCX_COOP();
2424 // Store the object in the strong handle.
2425 StoreObjectInHandle(m_StrongHndToExposedObject, ObjectFromHandle(m_ExposedObject));
2426 }
2427 }
2428
2429 return retVal;
2430}
2431
2432int Thread::DecExternalCount(BOOL holdingLock)
2433{
2434 CONTRACTL {
2435 NOTHROW;
2436 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
2437 }
2438 CONTRACTL_END;
2439
2440 // Note that it's possible to get here with a NULL current thread (during
2441 // shutdown of the thread manager).
2442 Thread *pCurThread = GetThread();
2443 _ASSERTE (pCurThread == NULL || IsAtProcessExit()
2444 || (!holdingLock && !ThreadStore::HoldingThreadStore(pCurThread))
2445 || (holdingLock && ThreadStore::HoldingThreadStore(pCurThread)));
2446
2447 BOOL ToggleGC = FALSE;
2448 BOOL SelfDelete = FALSE;
2449
2450 int retVal;
2451
2452 // Must synchronize count and exposed object handle manipulation. We use the
2453 // thread lock for this, which implies that we must be in pre-emptive mode
2454 // to begin with and avoid any activity that would invoke a GC (this
2455 // acquires the thread store lock).
2456 if (pCurThread)
2457 {
2458 // TODO: we would prefer to use a GC Holder here, however it is hard
2459 // to get the case where we're deleting this thread correct given
2460 // the current macros. We want to supress the release of the holder
2461 // here which puts us in Preemptive mode, and also the switch to
2462 // Cooperative mode below, but since both holders will be named
2463 // the same thing (due to the generic nature of the macro) we can
2464 // not use GCX_*_SUPRESS_RELEASE() for 2 holders in the same scope
2465 // b/c they will both apply simply to the most narrowly scoped
2466 // holder.
2467
2468 ToggleGC = pCurThread->PreemptiveGCDisabled();
2469 if (ToggleGC)
2470 {
2471 pCurThread->EnablePreemptiveGC();
2472 }
2473 }
2474
2475 GCX_ASSERT_PREEMP();
2476
2477 ThreadStoreLockHolder tsLock(!holdingLock);
2478
2479 _ASSERTE(m_ExternalRefCount >= 1);
2480 _ASSERTE(!holdingLock ||
2481 ThreadStore::s_pThreadStore->m_Crst.GetEnterCount() > 0 ||
2482 IsAtProcessExit());
2483
2484 retVal = FastInterlockDecrement((LONG*)&m_ExternalRefCount);
2485
2486 if (retVal == 0)
2487 {
2488 HANDLE h = GetThreadHandle();
2489 if (h == INVALID_HANDLE_VALUE)
2490 {
2491 h = m_ThreadHandleForClose;
2492 m_ThreadHandleForClose = INVALID_HANDLE_VALUE;
2493 }
2494 // Can not assert like this. We have already removed the Unstarted bit.
2495 //_ASSERTE (IsUnstarted() || h != INVALID_HANDLE_VALUE);
2496 if (h != INVALID_HANDLE_VALUE && m_WeOwnThreadHandle)
2497 {
2498 ::CloseHandle(h);
2499 SetThreadHandle(INVALID_HANDLE_VALUE);
2500 }
2501 // Switch back to cooperative mode to manipulate the thread.
2502 if (pCurThread)
2503 {
2504 // TODO: we would prefer to use GCX_COOP here, see comment above.
2505 pCurThread->DisablePreemptiveGC();
2506 }
2507
2508 GCX_ASSERT_COOP();
2509
2510 // during process detach the thread might still be in the thread list
2511 // if it hasn't seen its DLL_THREAD_DETACH yet. Use the following
2512 // tweak to decide if the thread has terminated yet.
2513 if (!HasValidThreadHandle())
2514 {
2515 SelfDelete = this == pCurThread;
2516 m_ExceptionState.FreeAllStackTraces();
2517 if (SelfDelete) {
2518 SetThread(NULL);
2519 }
2520 delete this;
2521 }
2522
2523 tsLock.Release();
2524
2525 // It only makes sense to restore the GC mode if we didn't just destroy
2526 // our own thread object.
2527 if (pCurThread && !SelfDelete && !ToggleGC)
2528 {
2529 pCurThread->EnablePreemptiveGC();
2530 }
2531
2532 // Cannot use this here b/c it creates a holder named the same as GCX_ASSERT_COOP
2533 // in the same scope above...
2534 //
2535 // GCX_ASSERT_PREEMP()
2536
2537 return retVal;
2538 }
2539 else if (pCurThread == NULL)
2540 {
2541 // We're in shutdown, too late to be worrying about having a strong
2542 // handle to the exposed thread object, we've already performed our
2543 // final GC.
2544 tsLock.Release();
2545
2546 return retVal;
2547 }
2548 else
2549 {
2550 // Check to see if the external ref count reaches exactly one. If this
2551 // is the case and we have an exposed object then it is that exposed object
2552 // that is holding a reference to us. To make sure that we are not the
2553 // ones keeping the exposed object alive we need to remove the strong
2554 // reference we have to it.
2555 if ((retVal == 1) && ((*((void**)m_StrongHndToExposedObject)) != NULL))
2556 {
2557 // Switch back to cooperative mode to manipulate the object.
2558
2559 // Don't want to switch back to COOP until we let go of the lock
2560 // however we are allowed to call StoreObjectInHandle here in preemptive
2561 // mode because we are setting the value to NULL.
2562 CONTRACT_VIOLATION(ModeViolation);
2563
2564 // Clear the handle and leave the lock.
2565 // We do not have to to DisablePreemptiveGC here, because
2566 // we just want to put NULL into a handle.
2567 StoreObjectInHandle(m_StrongHndToExposedObject, NULL);
2568
2569 tsLock.Release();
2570
2571 // Switch back to the initial GC mode.
2572 if (ToggleGC)
2573 {
2574 pCurThread->DisablePreemptiveGC();
2575 }
2576
2577 GCX_ASSERT_COOP();
2578
2579 return retVal;
2580 }
2581 }
2582
2583 tsLock.Release();
2584
2585 // Switch back to the initial GC mode.
2586 if (ToggleGC)
2587 {
2588 pCurThread->DisablePreemptiveGC();
2589 }
2590
2591 return retVal;
2592}
2593
2594
2595
2596//--------------------------------------------------------------------
2597// Destruction. This occurs after the associated native thread
2598// has died.
2599//--------------------------------------------------------------------
2600Thread::~Thread()
2601{
2602 CONTRACTL {
2603 NOTHROW;
2604 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
2605 }
2606 CONTRACTL_END;
2607
2608 // TODO: enable this
2609 //_ASSERTE(GetThread() != this);
2610 _ASSERTE(m_ThrewControlForThread == 0);
2611
2612 // AbortRequest is coupled with TrapReturningThread.
2613 // We should have unmarked the thread for abort.
2614 // !!! Can not assert here. If a thread has no managed code on stack
2615 // !!! we leave the g_TrapReturningThread set so that the thread will be
2616 // !!! aborted if it enters managed code.
2617 //_ASSERTE(!IsAbortRequested());
2618
2619 // We should not have the Thread marked for abort. But if we have
2620 // we need to unmark it so that g_TrapReturningThreads is decremented.
2621 if (IsAbortRequested())
2622 {
2623 UnmarkThreadForAbort(TAR_ALL);
2624 }
2625
2626#if defined(_DEBUG) && defined(TRACK_SYNC)
2627 _ASSERTE(IsAtProcessExit() || ((Dbg_TrackSyncStack *) m_pTrackSync)->m_StackPointer == 0);
2628 delete m_pTrackSync;
2629#endif // TRACK_SYNC
2630
2631 _ASSERTE(IsDead() || IsUnstarted() || IsAtProcessExit());
2632
2633 if (m_WaitEventLink.m_Next != NULL && !IsAtProcessExit())
2634 {
2635 WaitEventLink *walk = &m_WaitEventLink;
2636 while (walk->m_Next) {
2637 ThreadQueue::RemoveThread(this, (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB & ~1));
2638 StoreEventToEventStore (walk->m_Next->m_EventWait);
2639 }
2640 m_WaitEventLink.m_Next = NULL;
2641 }
2642
2643 if (m_StateNC & TSNC_ExistInThreadStore) {
2644 BOOL ret;
2645 ret = ThreadStore::RemoveThread(this);
2646 _ASSERTE(ret);
2647 }
2648
2649#ifdef _DEBUG
2650 m_pFrame = (Frame *)POISONC;
2651#endif
2652
2653 // Update Perfmon counters.
2654 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsLogical--);
2655
2656 // Current recognized threads are non-runtime threads that are alive and ran under the
2657 // runtime. Check whether this Thread was one of them.
2658 if ((m_State & TS_WeOwn) == 0)
2659 {
2660 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cRecognizedThreads--);
2661 }
2662 else
2663 {
2664 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsPhysical--);
2665 }
2666
2667 // Normally we shouldn't get here with a valid thread handle; however if SetupThread
2668 // failed (due to an OOM for example) then we need to CloseHandle the thread
2669 // handle if we own it.
2670 if (m_WeOwnThreadHandle && (GetThreadHandle() != INVALID_HANDLE_VALUE))
2671 {
2672 CloseHandle(GetThreadHandle());
2673 }
2674
2675 if (m_DebugSuspendEvent.IsValid())
2676 {
2677 m_DebugSuspendEvent.CloseEvent();
2678 }
2679 if (m_EventWait.IsValid())
2680 {
2681 m_EventWait.CloseEvent();
2682 }
2683
2684 FreeIOCompletionContext();
2685
2686 if (m_OSContext)
2687 delete m_OSContext;
2688
2689 if (GetSavedRedirectContext())
2690 {
2691 delete GetSavedRedirectContext();
2692 SetSavedRedirectContext(NULL);
2693 }
2694
2695#ifdef FEATURE_COMINTEROP
2696 if (m_pRCWStack)
2697 delete m_pRCWStack;
2698#endif
2699
2700 if (m_pExceptionDuringStartup)
2701 {
2702 Exception::Delete (m_pExceptionDuringStartup);
2703 }
2704
2705 ClearContext();
2706
2707 if (!IsAtProcessExit())
2708 {
2709 // Destroy any handles that we're using to hold onto exception objects
2710 SafeSetThrowables(NULL);
2711
2712 DestroyShortWeakHandle(m_ExposedObject);
2713 DestroyStrongHandle(m_StrongHndToExposedObject);
2714 }
2715
2716 g_pThinLockThreadIdDispenser->DisposeId(GetThreadId());
2717
2718#ifdef FEATURE_PREJIT
2719 if (m_pIBCInfo) {
2720 delete m_pIBCInfo;
2721 }
2722#endif
2723
2724#ifdef FEATURE_EVENT_TRACE
2725 // Destruct the thread local type cache for allocation sampling
2726 if(m_pAllLoggedTypes) {
2727 ETW::TypeSystemLog::DeleteTypeHashNoLock(&m_pAllLoggedTypes);
2728 }
2729#endif // FEATURE_EVENT_TRACE
2730
2731 // Wait for another thread to leave its loop in DeadlockAwareLock::TryBeginEnterLock
2732 CrstHolder lock(&g_DeadlockAwareCrst);
2733}
2734
2735#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
2736
2737void Thread::BaseCoUninitialize()
2738{
2739 STATIC_CONTRACT_THROWS;
2740 STATIC_CONTRACT_GC_TRIGGERS;
2741 STATIC_CONTRACT_SO_INTOLERANT;
2742 STATIC_CONTRACT_MODE_PREEMPTIVE;
2743
2744 _ASSERTE(GetThread() == this);
2745
2746 BEGIN_SO_TOLERANT_CODE(this);
2747 // BEGIN_SO_TOLERANT_CODE wraps a __try/__except around this call, so if the OS were to allow
2748 // an exception to leak through to us, we'll catch it.
2749 ::CoUninitialize();
2750 END_SO_TOLERANT_CODE;
2751
2752}// BaseCoUninitialize
2753
2754#ifdef FEATURE_COMINTEROP
2755void Thread::BaseWinRTUninitialize()
2756{
2757 STATIC_CONTRACT_THROWS;
2758 STATIC_CONTRACT_GC_TRIGGERS;
2759 STATIC_CONTRACT_SO_INTOLERANT;
2760 STATIC_CONTRACT_MODE_PREEMPTIVE;
2761
2762 _ASSERTE(WinRTSupported());
2763 _ASSERTE(GetThread() == this);
2764 _ASSERTE(IsWinRTInitialized());
2765
2766 BEGIN_SO_TOLERANT_CODE(this);
2767 RoUninitialize();
2768 END_SO_TOLERANT_CODE;
2769}
2770#endif // FEATURE_COMINTEROP
2771
2772void Thread::CoUninitialize()
2773{
2774 CONTRACTL {
2775 NOTHROW;
2776 GC_TRIGGERS;
2777 }
2778 CONTRACTL_END;
2779
2780 // Running threads might have performed a CoInitialize which must
2781 // now be balanced.
2782 BOOL needsUninitialize = IsCoInitialized()
2783#ifdef FEATURE_COMINTEROP
2784 || IsWinRTInitialized()
2785#endif // FEATURE_COMINTEROP
2786 ;
2787
2788 if (!IsAtProcessExit() && needsUninitialize)
2789 {
2790 GCX_PREEMP();
2791 CONTRACT_VIOLATION(ThrowsViolation);
2792
2793 if (IsCoInitialized())
2794 {
2795 BaseCoUninitialize();
2796 FastInterlockAnd((ULONG *)&m_State, ~TS_CoInitialized);
2797 }
2798
2799#ifdef FEATURE_COMINTEROP
2800 if (IsWinRTInitialized())
2801 {
2802 _ASSERTE(WinRTSupported());
2803 BaseWinRTUninitialize();
2804 ResetWinRTInitialized();
2805 }
2806#endif // FEATURE_COMNITEROP
2807 }
2808}
2809#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
2810
2811void Thread::CleanupDetachedThreads()
2812{
2813 CONTRACTL {
2814 NOTHROW;
2815 GC_TRIGGERS;
2816 }
2817 CONTRACTL_END;
2818
2819 _ASSERTE(!ThreadStore::HoldingThreadStore());
2820
2821 ThreadStoreLockHolder threadStoreLockHolder;
2822
2823 Thread *thread = ThreadStore::GetAllThreadList(NULL, 0, 0);
2824
2825 STRESS_LOG0(LF_SYNC, LL_INFO1000, "T::CDT called\n");
2826
2827 while (thread != NULL)
2828 {
2829 Thread *next = ThreadStore::GetAllThreadList(thread, 0, 0);
2830
2831 if (thread->IsDetached() && thread->m_UnmanagedRefCount == 0)
2832 {
2833 STRESS_LOG1(LF_SYNC, LL_INFO1000, "T::CDT - detaching thread 0x%p\n", thread);
2834
2835 // Unmark that the thread is detached while we have the
2836 // thread store lock. This will ensure that no other
2837 // thread will race in here and try to delete it, too.
2838 FastInterlockAnd((ULONG*)&(thread->m_State), ~TS_Detached);
2839 FastInterlockDecrement(&m_DetachCount);
2840 if (!thread->IsBackground())
2841 FastInterlockDecrement(&m_ActiveDetachCount);
2842
2843 // If the debugger is attached, then we need to unlock the
2844 // thread store before calling OnThreadTerminate. That
2845 // way, we won't be holding the thread store lock if we
2846 // need to block sending a detach thread event.
2847 BOOL debuggerAttached =
2848#ifdef DEBUGGING_SUPPORTED
2849 CORDebuggerAttached();
2850#else // !DEBUGGING_SUPPORTED
2851 FALSE;
2852#endif // !DEBUGGING_SUPPORTED
2853
2854 if (debuggerAttached)
2855 ThreadStore::UnlockThreadStore();
2856
2857 thread->OnThreadTerminate(debuggerAttached ? FALSE : TRUE);
2858
2859#ifdef DEBUGGING_SUPPORTED
2860 if (debuggerAttached)
2861 {
2862 ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
2863
2864 // We remember the next Thread in the thread store
2865 // list before deleting the current one. But we can't
2866 // use that Thread pointer now that we release the
2867 // thread store lock in the middle of the loop. We
2868 // have to start from the beginning of the list every
2869 // time. If two threads T1 and T2 race into
2870 // CleanupDetachedThreads, then T1 will grab the first
2871 // Thread on the list marked for deletion and release
2872 // the lock. T2 will grab the second one on the
2873 // list. T2 may complete destruction of its Thread,
2874 // then T1 might re-acquire the thread store lock and
2875 // try to use the next Thread in the thread store. But
2876 // T2 just deleted that next Thread.
2877 thread = ThreadStore::GetAllThreadList(NULL, 0, 0);
2878 }
2879 else
2880#endif // DEBUGGING_SUPPORTED
2881 {
2882 thread = next;
2883 }
2884 }
2885 else if (thread->HasThreadState(TS_Finalized))
2886 {
2887 STRESS_LOG1(LF_SYNC, LL_INFO1000, "T::CDT - finalized thread 0x%p\n", thread);
2888
2889 thread->ResetThreadState(TS_Finalized);
2890 // We have finalized the managed Thread object. Now it is time to clean up the unmanaged part
2891 thread->DecExternalCount(TRUE);
2892 thread = next;
2893 }
2894 else
2895 {
2896 thread = next;
2897 }
2898 }
2899
2900 s_fCleanFinalizedThread = FALSE;
2901}
2902
2903#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
2904
2905void Thread::CleanupCOMState()
2906{
2907 CONTRACTL {
2908 NOTHROW;
2909 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
2910 }
2911 CONTRACTL_END;
2912
2913#ifdef FEATURE_COMINTEROP
2914 if (GetFinalApartment() == Thread::AS_InSTA)
2915 ReleaseRCWsInCachesNoThrow(GetCurrentCtxCookie());
2916#endif // FEATURE_COMINTEROP
2917
2918 // Running threads might have performed a CoInitialize which must
2919 // now be balanced. However only the thread that called COInitialize can
2920 // call CoUninitialize.
2921
2922 BOOL needsUninitialize = IsCoInitialized()
2923#ifdef FEATURE_COMINTEROP
2924 || IsWinRTInitialized()
2925#endif // FEATURE_COMINTEROP
2926 ;
2927
2928 if (needsUninitialize)
2929 {
2930 GCX_PREEMP();
2931 CONTRACT_VIOLATION(ThrowsViolation);
2932
2933 if (IsCoInitialized())
2934 {
2935 BaseCoUninitialize();
2936 ResetCoInitialized();
2937 }
2938
2939#ifdef FEATURE_COMINTEROP
2940 if (IsWinRTInitialized())
2941 {
2942 _ASSERTE(WinRTSupported());
2943 BaseWinRTUninitialize();
2944 ResetWinRTInitialized();
2945 }
2946#endif // FEATURE_COMINTEROP
2947 }
2948}
2949#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
2950
2951// See general comments on thread destruction (code:#threadDestruction) above.
2952void Thread::OnThreadTerminate(BOOL holdingLock)
2953{
2954 CONTRACTL {
2955 NOTHROW;
2956 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
2957 }
2958 CONTRACTL_END;
2959
2960 // #ReportDeadOnThreadTerminate
2961 // Caller should have put the TS_ReportDead bit on by now.
2962 // We don't want any windows after the exit event but before the thread is marked dead.
2963 // If a debugger attached during such a window (or even took a dump at the exit event),
2964 // then it may not realize the thread is dead.
2965 // So ensure we mark the thread as dead before we send the tool notifications.
2966 // The TS_ReportDead bit will cause the debugger to view this as TS_Dead.
2967 _ASSERTE(HasThreadState(TS_ReportDead));
2968
2969 // Should not use OSThreadId:
2970 // OSThreadId may change for the current thread is the thread is blocked and rescheduled
2971 // by host.
2972 Thread *pCurrentThread = GetThread();
2973 DWORD CurrentThreadID = pCurrentThread?pCurrentThread->GetThreadId():0;
2974 DWORD ThisThreadID = GetThreadId();
2975
2976#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
2977 // If the currently running thread is the thread that died and it is an STA thread, then we
2978 // need to release all the RCW's in the current context. However, we cannot do this if we
2979 // are in the middle of process detach.
2980 if (!IsAtProcessExit() && this == GetThread())
2981 {
2982 CleanupCOMState();
2983 }
2984#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
2985
2986 if (g_fEEShutDown != 0)
2987 {
2988 // We have started shutdown. Not safe to touch CLR state.
2989 return;
2990 }
2991
2992 // We took a count during construction, and we rely on the count being
2993 // non-zero as we terminate the thread here.
2994 _ASSERTE(m_ExternalRefCount > 0);
2995
2996 // The thread is no longer running. It's important that we zero any general OBJECTHANDLE's
2997 // on this Thread object. That's because we need the managed Thread object to be subject to
2998 // GC and yet any HANDLE is opaque to the GC when it comes to collecting cycles. If e.g. the
2999 // Thread's AbortReason (which is an arbitrary object) contains transitively a reference back
3000 // to the Thread, then we have an uncollectible cycle. When the thread is executing, nothing
3001 // can be collected anyway. But now that we stop running the cycle concerns us.
3002 //
3003 // It's important that we only use OBJECTHANDLE's that are retrievable while the thread is
3004 // still running. That's what allows us to zero them here with impunity:
3005 {
3006 // No handles to clean up in the m_ExceptionState
3007 _ASSERTE(!m_ExceptionState.IsExceptionInProgress());
3008
3009 GCX_COOP();
3010
3011 // Destroy the LastThrown handle (and anything that violates the above assert).
3012 SafeSetThrowables(NULL);
3013
3014 // Cleaning up the AbortReason is tricky, since the handle is only valid if the ADID is valid
3015 // ...and we can only perform this operation if other threads aren't racing to update these
3016 // values on our thread asynchronously.
3017 ClearAbortReason();
3018
3019 // Free all structures related to thread statics for this thread
3020 DeleteThreadStaticData();
3021
3022 }
3023
3024 if (GCHeapUtilities::IsGCHeapInitialized())
3025 {
3026 // Guaranteed to NOT be a shutdown case, because we tear down the heap before
3027 // we tear down any threads during shutdown.
3028 if (ThisThreadID == CurrentThreadID)
3029 {
3030 GCX_COOP();
3031 GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, NULL, NULL);
3032 m_alloc_context.init();
3033 }
3034 }
3035
3036 // We switch a thread to dead when it has finished doing useful work. But it
3037 // remains in the thread store so long as someone keeps it alive. An exposed
3038 // object will do this (it releases the refcount in its finalizer). If the
3039 // thread is never released, we have another look during product shutdown and
3040 // account for the unreleased refcount of the uncollected exposed object:
3041 if (IsDead())
3042 {
3043 GCX_COOP();
3044
3045 _ASSERTE(IsAtProcessExit());
3046 ClearContext();
3047 if (m_ExposedObject != NULL)
3048 DecExternalCount(holdingLock); // may destruct now
3049 }
3050 else
3051 {
3052#ifdef DEBUGGING_SUPPORTED
3053 //
3054 // If we're debugging, let the debugger know that this thread is
3055 // gone.
3056 //
3057 // There is a race here where the debugger could have attached after
3058 // we checked (and thus didn't release the lock). In this case,
3059 // we can't call out to the debugger or we risk a deadlock.
3060 //
3061 if (!holdingLock && CORDebuggerAttached())
3062 {
3063 g_pDebugInterface->DetachThread(this);
3064 }
3065#endif // DEBUGGING_SUPPORTED
3066
3067#ifdef PROFILING_SUPPORTED
3068 // If a profiler is present, then notify the profiler of thread destroy
3069 {
3070 BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
3071 GCX_PREEMP();
3072 g_profControlBlock.pProfInterface->ThreadDestroyed((ThreadID) this);
3073 END_PIN_PROFILER();
3074 }
3075#endif // PROFILING_SUPPORTED
3076
3077 if (!holdingLock)
3078 {
3079 LOG((LF_SYNC, INFO3, "OnThreadTerminate obtain lock\n"));
3080 ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
3081
3082 }
3083
3084 if (GCHeapUtilities::IsGCHeapInitialized() && ThisThreadID != CurrentThreadID)
3085 {
3086 // We must be holding the ThreadStore lock in order to clean up alloc context.
3087 // We should never call FixAllocContext during GC.
3088 GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, NULL, NULL);
3089 m_alloc_context.init();
3090 }
3091
3092 FastInterlockOr((ULONG *) &m_State, TS_Dead);
3093 ThreadStore::s_pThreadStore->m_DeadThreadCount++;
3094 ThreadStore::s_pThreadStore->IncrementDeadThreadCountForGCTrigger();
3095
3096 if (IsUnstarted())
3097 ThreadStore::s_pThreadStore->m_UnstartedThreadCount--;
3098 else
3099 {
3100 if (IsBackground())
3101 ThreadStore::s_pThreadStore->m_BackgroundThreadCount--;
3102 }
3103
3104 FastInterlockAnd((ULONG *) &m_State, ~(TS_Unstarted | TS_Background));
3105
3106 //
3107 // If this thread was told to trip for debugging between the
3108 // sending of the detach event above and the locking of the
3109 // thread store lock, then remove the flag and decrement the
3110 // global trap returning threads count.
3111 //
3112 if (!IsAtProcessExit())
3113 {
3114 // A thread can't die during a GCPending, because the thread store's
3115 // lock is held by the GC thread.
3116 if (m_State & TS_DebugSuspendPending)
3117 UnmarkForSuspension(~TS_DebugSuspendPending);
3118
3119 // CoreCLR does not support user-requested thread suspension
3120 _ASSERTE(!(m_State & TS_UserSuspendPending));
3121
3122 if (CurrentThreadID == ThisThreadID && IsAbortRequested())
3123 {
3124 UnmarkThreadForAbort(Thread::TAR_ALL);
3125 }
3126 }
3127
3128 if (GetThreadHandle() != INVALID_HANDLE_VALUE)
3129 {
3130 if (m_ThreadHandleForClose == INVALID_HANDLE_VALUE)
3131 {
3132 m_ThreadHandleForClose = GetThreadHandle();
3133 }
3134 SetThreadHandle (INVALID_HANDLE_VALUE);
3135 }
3136
3137 m_OSThreadId = 0;
3138
3139 // If nobody else is holding onto the thread, we may destruct it here:
3140 ULONG oldCount = DecExternalCount(TRUE);
3141 // If we are shutting down the process, we only have one thread active in the
3142 // system. So we can disregard all the reasons that hold this thread alive --
3143 // TLS is about to be reclaimed anyway.
3144 if (IsAtProcessExit())
3145 while (oldCount > 0)
3146 {
3147 oldCount = DecExternalCount(TRUE);
3148 }
3149
3150 // ASSUME THAT THE THREAD IS DELETED, FROM HERE ON
3151
3152 _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >= 0);
3153 _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount >= 0);
3154 _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
3155 ThreadStore::s_pThreadStore->m_BackgroundThreadCount);
3156 _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
3157 ThreadStore::s_pThreadStore->m_UnstartedThreadCount);
3158 _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
3159 ThreadStore::s_pThreadStore->m_DeadThreadCount);
3160
3161 // One of the components of OtherThreadsComplete() has changed, so check whether
3162 // we should now exit the EE.
3163 ThreadStore::CheckForEEShutdown();
3164
3165 if (ThisThreadID == CurrentThreadID)
3166 {
3167 // NULL out the thread block in the tls. We can't do this if we aren't on the
3168 // right thread. But this will only happen during a shutdown. And we've made
3169 // a "best effort" to reduce to a single thread before we begin the shutdown.
3170 SetThread(NULL);
3171 SetAppDomain(NULL);
3172 }
3173
3174 if (!holdingLock)
3175 {
3176 LOG((LF_SYNC, INFO3, "OnThreadTerminate releasing lock\n"));
3177 ThreadSuspend::UnlockThreadStore(ThisThreadID == CurrentThreadID);
3178 }
3179 }
3180}
3181
3182// Helper functions to check for duplicate handles. we only do this check if
3183// a waitfor multiple fails.
3184int __cdecl compareHandles( const void *arg1, const void *arg2 )
3185{
3186 CONTRACTL {
3187 NOTHROW;
3188 GC_NOTRIGGER;
3189 }
3190 CONTRACTL_END;
3191
3192 HANDLE h1 = *(HANDLE*)arg1;
3193 HANDLE h2 = *(HANDLE*)arg2;
3194 return (h1 == h2) ? 0 : ((h1 < h2) ? -1 : 1);
3195}
3196
3197BOOL CheckForDuplicateHandles(int countHandles, HANDLE *handles)
3198{
3199 CONTRACTL {
3200 NOTHROW;
3201 GC_NOTRIGGER;
3202 }
3203 CONTRACTL_END;
3204
3205 qsort(handles,countHandles,sizeof(HANDLE),compareHandles);
3206 for (int i=1; i < countHandles; i++)
3207 {
3208 if (handles[i-1] == handles[i])
3209 return TRUE;
3210 }
3211 return FALSE;
3212}
3213//--------------------------------------------------------------------
3214// Based on whether this thread has a message pump, do the appropriate
3215// style of Wait.
3216//--------------------------------------------------------------------
3217DWORD Thread::DoAppropriateWait(int countHandles, HANDLE *handles, BOOL waitAll,
3218 DWORD millis, WaitMode mode, PendingSync *syncState)
3219{
3220 STATIC_CONTRACT_THROWS;
3221 STATIC_CONTRACT_GC_TRIGGERS;
3222
3223 INDEBUG(BOOL alertable = (mode & WaitMode_Alertable) != 0;);
3224 _ASSERTE(alertable || syncState == 0);
3225
3226 struct Param
3227 {
3228 Thread *pThis;
3229 int countHandles;
3230 HANDLE *handles;
3231 BOOL waitAll;
3232 DWORD millis;
3233 WaitMode mode;
3234 DWORD dwRet;
3235 } param;
3236 param.pThis = this;
3237 param.countHandles = countHandles;
3238 param.handles = handles;
3239 param.waitAll = waitAll;
3240 param.millis = millis;
3241 param.mode = mode;
3242 param.dwRet = (DWORD) -1;
3243
3244 EE_TRY_FOR_FINALLY(Param *, pParam, &param) {
3245 pParam->dwRet = pParam->pThis->DoAppropriateWaitWorker(pParam->countHandles, pParam->handles, pParam->waitAll, pParam->millis, pParam->mode);
3246 }
3247 EE_FINALLY {
3248 if (syncState) {
3249 if (!GOT_EXCEPTION() &&
3250 param.dwRet >= WAIT_OBJECT_0 && param.dwRet < (DWORD)(WAIT_OBJECT_0 + countHandles)) {
3251 // This thread has been removed from syncblk waiting list by the signalling thread
3252 syncState->Restore(FALSE);
3253 }
3254 else
3255 syncState->Restore(TRUE);
3256 }
3257
3258 _ASSERTE (param.dwRet != WAIT_IO_COMPLETION);
3259 }
3260 EE_END_FINALLY;
3261
3262 return(param.dwRet);
3263}
3264
3265DWORD Thread::DoAppropriateWait(AppropriateWaitFunc func, void *args,
3266 DWORD millis, WaitMode mode,
3267 PendingSync *syncState)
3268{
3269 STATIC_CONTRACT_THROWS;
3270 STATIC_CONTRACT_GC_TRIGGERS;
3271
3272 INDEBUG(BOOL alertable = (mode & WaitMode_Alertable) != 0;);
3273 _ASSERTE(alertable || syncState == 0);
3274
3275 struct Param
3276 {
3277 Thread *pThis;
3278 AppropriateWaitFunc func;
3279 void *args;
3280 DWORD millis;
3281 WaitMode mode;
3282 DWORD dwRet;
3283 } param;
3284 param.pThis = this;
3285 param.func = func;
3286 param.args = args;
3287 param.millis = millis;
3288 param.mode = mode;
3289 param.dwRet = (DWORD) -1;
3290
3291 EE_TRY_FOR_FINALLY(Param *, pParam, &param) {
3292 pParam->dwRet = pParam->pThis->DoAppropriateWaitWorker(pParam->func, pParam->args, pParam->millis, pParam->mode);
3293 }
3294 EE_FINALLY {
3295 if (syncState) {
3296 if (!GOT_EXCEPTION() && WAIT_OBJECT_0 == param.dwRet) {
3297 // This thread has been removed from syncblk waiting list by the signalling thread
3298 syncState->Restore(FALSE);
3299 }
3300 else
3301 syncState->Restore(TRUE);
3302 }
3303
3304 _ASSERTE (WAIT_IO_COMPLETION != param.dwRet);
3305 }
3306 EE_END_FINALLY;
3307
3308 return(param.dwRet);
3309}
3310
3311#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
3312
3313//--------------------------------------------------------------------
3314// helper to do message wait
3315//--------------------------------------------------------------------
3316DWORD MsgWaitHelper(int numWaiters, HANDLE* phEvent, BOOL bWaitAll, DWORD millis, BOOL bAlertable)
3317{
3318 STATIC_CONTRACT_THROWS;
3319 // The true contract for GC trigger should be the following. But this puts a very strong restriction
3320 // on contract for functions that call EnablePreemptiveGC.
3321 //if (GetThread() && !ThreadStore::HoldingThreadStore(GetThread())) {GC_TRIGGERS;} else {GC_NOTRIGGER;}
3322 STATIC_CONTRACT_SO_INTOLERANT;
3323 STATIC_CONTRACT_GC_TRIGGERS;
3324
3325 DWORD flags = 0;
3326 DWORD dwReturn=WAIT_ABANDONED;
3327
3328 Thread* pThread = GetThread();
3329 // If pThread is NULL, we'd better shut down.
3330 if (pThread == NULL)
3331 _ASSERTE (g_fEEShutDown);
3332
3333 DWORD lastError = 0;
3334 BEGIN_SO_TOLERANT_CODE(pThread);
3335
3336 // If we're going to pump, we cannot use WAIT_ALL. That's because the wait would
3337 // only be satisfied if a message arrives while the handles are signalled. If we
3338 // want true WAIT_ALL, we need to fire up a different thread in the MTA and wait
3339 // on his result. This isn't implemented yet.
3340 //
3341 // A change was added to WaitHandleNative::CorWaitMultipleNative to disable WaitAll
3342 // in an STA with more than one handle.
3343 if (bWaitAll)
3344 {
3345 if (numWaiters == 1)
3346 bWaitAll = FALSE;
3347
3348 // The check that's supposed to prevent this condition from occuring, in WaitHandleNative::CorWaitMultipleNative,
3349 // is unfortunately behind FEATURE_COMINTEROP instead of FEATURE_COMINTEROP_APARTMENT_SUPPORT.
3350 // So on CoreCLR (where FEATURE_COMINTEROP is not currently defined) we can actually reach this point.
3351 // We can't fix this, because it's a breaking change, so we just won't assert here.
3352 // The result is that WaitAll on an STA thread in CoreCLR will behave stragely, as described above.
3353 }
3354
3355 if (bWaitAll)
3356 flags |= COWAIT_WAITALL;
3357
3358 if (bAlertable)
3359 flags |= COWAIT_ALERTABLE;
3360
3361 HRESULT hr = S_OK;
3362 hr = CoWaitForMultipleHandles(flags, millis, numWaiters, phEvent, &dwReturn);
3363
3364 if (hr == RPC_S_CALLPENDING)
3365 {
3366 dwReturn = WAIT_TIMEOUT;
3367 }
3368 else if (FAILED(hr))
3369 {
3370 // The service behaves differently on an STA vs. MTA in how much
3371 // error information it propagates back, and in which form. We currently
3372 // only get here in the STA case, so bias this logic that way.
3373 dwReturn = WAIT_FAILED;
3374 }
3375 else
3376{
3377 dwReturn += WAIT_OBJECT_0; // success -- bias back
3378 }
3379
3380 lastError = ::GetLastError();
3381
3382 END_SO_TOLERANT_CODE;
3383
3384 // END_SO_TOLERANT_CODE overwrites lasterror. Let's reset it.
3385 ::SetLastError(lastError);
3386
3387 return dwReturn;
3388}
3389
3390#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
3391
3392DWORD WaitForMultipleObjectsEx_SO_TOLERANT (DWORD nCount, HANDLE *lpHandles, BOOL bWaitAll,DWORD dwMilliseconds, BOOL bAlertable)
3393{
3394 STATIC_CONTRACT_SO_INTOLERANT;
3395
3396 DWORD dwRet = WAIT_FAILED;
3397 DWORD lastError = 0;
3398
3399 BEGIN_SO_TOLERANT_CODE (GetThread ());
3400 dwRet = ::WaitForMultipleObjectsEx (nCount, lpHandles, bWaitAll, dwMilliseconds, bAlertable);
3401 lastError = ::GetLastError();
3402 END_SO_TOLERANT_CODE;
3403
3404 // END_SO_TOLERANT_CODE overwrites lasterror. Let's reset it.
3405 ::SetLastError(lastError);
3406 return dwRet;
3407}
3408
3409//--------------------------------------------------------------------
3410// Do appropriate wait based on apartment state (STA or MTA)
3411DWORD Thread::DoAppropriateAptStateWait(int numWaiters, HANDLE* pHandles, BOOL bWaitAll,
3412 DWORD timeout, WaitMode mode)
3413{
3414 CONTRACTL {
3415 THROWS;
3416 GC_TRIGGERS;
3417 SO_INTOLERANT;
3418 }
3419 CONTRACTL_END;
3420
3421 BOOL alertable = (mode & WaitMode_Alertable) != 0;
3422
3423#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
3424 if (alertable && !GetDomain()->MustForceTrivialWaitOperations())
3425 {
3426 ApartmentState as = GetFinalApartment();
3427 if (AS_InMTA != as)
3428 {
3429 return MsgWaitHelper(numWaiters, pHandles, bWaitAll, timeout, alertable);
3430 }
3431 }
3432#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
3433
3434 return WaitForMultipleObjectsEx_SO_TOLERANT(numWaiters, pHandles, bWaitAll, timeout, alertable);
3435}
3436
3437// A helper called by our two flavors of DoAppropriateWaitWorker
3438void Thread::DoAppropriateWaitWorkerAlertableHelper(WaitMode mode)
3439{
3440 CONTRACTL {
3441 THROWS;
3442 GC_TRIGGERS;
3443 }
3444 CONTRACTL_END;
3445
3446 // If thread abort is prevented, we do not want this thread to see thread abort and thread interrupt exception.
3447 if (IsAbortPrevented())
3448 {
3449 return;
3450 }
3451
3452 // A word about ordering for Interrupt. If someone tries to interrupt a thread
3453 // that's in the interruptible state, we queue an APC. But if they try to interrupt
3454 // a thread that's not in the interruptible state, we just record that fact. So
3455 // we have to set TS_Interruptible before we test to see whether someone wants to
3456 // interrupt us or else we have a race condition that causes us to skip the APC.
3457 FastInterlockOr((ULONG *) &m_State, TS_Interruptible);
3458
3459 if (HasThreadStateNC(TSNC_InRestoringSyncBlock))
3460 {
3461 // The thread is restoring SyncBlock for Object.Wait.
3462 ResetThreadStateNC(TSNC_InRestoringSyncBlock);
3463 }
3464 else
3465 {
3466 HandleThreadInterrupt((mode & WaitMode_ADUnload) != 0);
3467
3468 // Safe to clear the interrupted state, no APC could have fired since we
3469 // reset m_UserInterrupt (which inhibits our APC callback from doing
3470 // anything).
3471 FastInterlockAnd((ULONG *) &m_State, ~TS_Interrupted);
3472 }
3473}
3474
3475void MarkOSAlertableWait()
3476{
3477 LIMITED_METHOD_CONTRACT;
3478 GetThread()->SetThreadStateNC (Thread::TSNC_OSAlertableWait);
3479}
3480
3481void UnMarkOSAlertableWait()
3482{
3483 LIMITED_METHOD_CONTRACT;
3484 GetThread()->ResetThreadStateNC (Thread::TSNC_OSAlertableWait);
3485}
3486
3487//--------------------------------------------------------------------
3488// Based on whether this thread has a message pump, do the appropriate
3489// style of Wait.
3490//--------------------------------------------------------------------
3491DWORD Thread::DoAppropriateWaitWorker(int countHandles, HANDLE *handles, BOOL waitAll,
3492 DWORD millis, WaitMode mode)
3493{
3494 CONTRACTL {
3495 THROWS;
3496 GC_TRIGGERS;
3497 }
3498 CONTRACTL_END;
3499
3500 DWORD ret = 0;
3501
3502 BOOL alertable = (mode & WaitMode_Alertable) != 0;
3503 // Waits from SynchronizationContext.WaitHelper are always just WaitMode_IgnoreSyncCtx.
3504 // So if we defer to a sync ctx, we will lose any extra bits. We must therefore not
3505 // defer to a sync ctx if doing any non-default wait.
3506 // If you're doing a default wait, but want to ignore sync ctx, specify WaitMode_IgnoreSyncCtx
3507 // which will make mode != WaitMode_Alertable.
3508 BOOL ignoreSyncCtx = (mode != WaitMode_Alertable);
3509
3510 if (GetDomain()->MustForceTrivialWaitOperations())
3511 ignoreSyncCtx = TRUE;
3512
3513 // Unless the ignoreSyncCtx flag is set, first check to see if there is a synchronization
3514 // context on the current thread and if there is, dispatch to it to do the wait.
3515 // If the wait is non alertable we cannot forward the call to the sync context
3516 // since fundamental parts of the system (such as the GC) rely on non alertable
3517 // waits not running any managed code. Also if we are past the point in shutdown were we
3518 // are allowed to run managed code then we can't forward the call to the sync context.
3519 if (!ignoreSyncCtx && alertable && CanRunManagedCode(LoaderLockCheck::None)
3520 && !HasThreadStateNC(Thread::TSNC_BlockedForShutdown))
3521 {
3522 GCX_COOP();
3523
3524 BOOL fSyncCtxPresent = FALSE;
3525 OBJECTREF SyncCtxObj = NULL;
3526 GCPROTECT_BEGIN(SyncCtxObj)
3527 {
3528 GetSynchronizationContext(&SyncCtxObj);
3529 if (SyncCtxObj != NULL)
3530 {
3531 SYNCHRONIZATIONCONTEXTREF syncRef = (SYNCHRONIZATIONCONTEXTREF)SyncCtxObj;
3532 if (syncRef->IsWaitNotificationRequired())
3533 {
3534 fSyncCtxPresent = TRUE;
3535 ret = DoSyncContextWait(&SyncCtxObj, countHandles, handles, waitAll, millis);
3536 }
3537 }
3538 }
3539 GCPROTECT_END();
3540
3541 if (fSyncCtxPresent)
3542 return ret;
3543 }
3544
3545 // Before going to pre-emptive mode the thread needs to be flagged as waiting for
3546 // the debugger. This used to be accomplished by the TS_Interruptible flag but that
3547 // doesn't work reliably, see DevDiv Bugs 699245. Some methods call in here already in
3548 // COOP mode so we set the bit before the transition. For the calls that are already
3549 // in pre-emptive mode those are still buggy. This is only a partial fix.
3550 BOOL isCoop = PreemptiveGCDisabled();
3551 ThreadStateNCStackHolder tsNC(isCoop && alertable, TSNC_DebuggerSleepWaitJoin);
3552
3553 GCX_PREEMP();
3554
3555 if (alertable)
3556 {
3557 DoAppropriateWaitWorkerAlertableHelper(mode);
3558 }
3559
3560 StateHolder<MarkOSAlertableWait,UnMarkOSAlertableWait> OSAlertableWait(alertable);
3561
3562 ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
3563
3564 ULONGLONG dwStart = 0, dwEnd;
3565retry:
3566 if (millis != INFINITE)
3567 {
3568 dwStart = CLRGetTickCount64();
3569 }
3570
3571 ret = DoAppropriateAptStateWait(countHandles, handles, waitAll, millis, mode);
3572
3573 if (ret == WAIT_IO_COMPLETION)
3574 {
3575 _ASSERTE (alertable);
3576
3577 if (m_State & TS_Interrupted)
3578 {
3579 HandleThreadInterrupt(mode & WaitMode_ADUnload);
3580 }
3581 // We could be woken by some spurious APC or an EE APC queued to
3582 // interrupt us. In the latter case the TS_Interrupted bit will be set
3583 // in the thread state bits. Otherwise we just go back to sleep again.
3584 if (millis != INFINITE)
3585 {
3586 dwEnd = CLRGetTickCount64();
3587 if (dwEnd >= dwStart + millis)
3588 {
3589 ret = WAIT_TIMEOUT;
3590 goto WaitCompleted;
3591 }
3592 else
3593 {
3594 millis -= (DWORD)(dwEnd - dwStart);
3595 }
3596 }
3597 goto retry;
3598 }
3599 _ASSERTE((ret >= WAIT_OBJECT_0 && ret < (WAIT_OBJECT_0 + (DWORD)countHandles)) ||
3600 (ret >= WAIT_ABANDONED && ret < (WAIT_ABANDONED + (DWORD)countHandles)) ||
3601 (ret == WAIT_TIMEOUT) || (ret == WAIT_FAILED));
3602 // countHandles is used as an unsigned -- it should never be negative.
3603 _ASSERTE(countHandles >= 0);
3604
3605 // We support precisely one WAIT_FAILED case, where we attempt to wait on a
3606 // thread handle and the thread is in the process of dying we might get a
3607 // invalid handle substatus. Turn this into a successful wait.
3608 // There are three cases to consider:
3609 // 1) Only waiting on one handle: return success right away.
3610 // 2) Waiting for all handles to be signalled: retry the wait without the
3611 // affected handle.
3612 // 3) Waiting for one of multiple handles to be signalled: return with the
3613 // first handle that is either signalled or has become invalid.
3614 if (ret == WAIT_FAILED)
3615 {
3616 DWORD errorCode = ::GetLastError();
3617 if (errorCode == ERROR_INVALID_PARAMETER)
3618 {
3619 if (CheckForDuplicateHandles(countHandles, handles))
3620 COMPlusThrow(kDuplicateWaitObjectException);
3621 else
3622 COMPlusThrowHR(HRESULT_FROM_WIN32(errorCode));
3623 }
3624 else if (errorCode == ERROR_ACCESS_DENIED)
3625 {
3626 // A Win32 ACL could prevent us from waiting on the handle.
3627 COMPlusThrow(kUnauthorizedAccessException);
3628 }
3629 else if (errorCode == ERROR_NOT_ENOUGH_MEMORY)
3630 {
3631 ThrowOutOfMemory();
3632 }
3633#ifdef FEATURE_PAL
3634 else if (errorCode == ERROR_NOT_SUPPORTED)
3635 {
3636 // "Wait for any" and "wait for all" operations on multiple wait handles are not supported when a cross-process sync
3637 // object is included in the array
3638 COMPlusThrow(kPlatformNotSupportedException, W("PlatformNotSupported_NamedSyncObjectWaitAnyWaitAll"));
3639 }
3640#endif
3641 else if (errorCode != ERROR_INVALID_HANDLE)
3642 {
3643 ThrowWin32(errorCode);
3644 }
3645
3646 if (countHandles == 1)
3647 ret = WAIT_OBJECT_0;
3648 else if (waitAll)
3649 {
3650 // Probe all handles with a timeout of zero. When we find one that's
3651 // invalid, move it out of the list and retry the wait.
3652 for (int i = 0; i < countHandles; i++)
3653 {
3654 // WaitForSingleObject won't pump memssage; we already probe enough space
3655 // before calling this function and we don't want to fail here, so we don't
3656 // do a transition to tolerant code here
3657 DWORD subRet = WaitForSingleObject (handles[i], 0);
3658 if (subRet != WAIT_FAILED)
3659 continue;
3660 _ASSERTE(::GetLastError() == ERROR_INVALID_HANDLE);
3661 if ((countHandles - i - 1) > 0)
3662 memmove(&handles[i], &handles[i+1], (countHandles - i - 1) * sizeof(HANDLE));
3663 countHandles--;
3664 break;
3665 }
3666
3667 // Compute the new timeout value by assume that the timeout
3668 // is not large enough for more than one wrap
3669 dwEnd = CLRGetTickCount64();
3670 if (millis != INFINITE)
3671 {
3672 if (dwEnd >= dwStart + millis)
3673 {
3674 ret = WAIT_TIMEOUT;
3675 goto WaitCompleted;
3676 }
3677 else
3678 {
3679 millis -= (DWORD)(dwEnd - dwStart);
3680 }
3681 }
3682 goto retry;
3683 }
3684 else
3685 {
3686 // Probe all handles with a timeout as zero, succeed with the first
3687 // handle that doesn't timeout.
3688 ret = WAIT_OBJECT_0;
3689 int i;
3690 for (i = 0; i < countHandles; i++)
3691 {
3692 TryAgain:
3693 // WaitForSingleObject won't pump memssage; we already probe enough space
3694 // before calling this function and we don't want to fail here, so we don't
3695 // do a transition to tolerant code here
3696 DWORD subRet = WaitForSingleObject (handles[i], 0);
3697 if ((subRet == WAIT_OBJECT_0) || (subRet == WAIT_FAILED))
3698 break;
3699 if (subRet == WAIT_ABANDONED)
3700 {
3701 ret = (ret - WAIT_OBJECT_0) + WAIT_ABANDONED;
3702 break;
3703 }
3704 // If we get alerted it just masks the real state of the current
3705 // handle, so retry the wait.
3706 if (subRet == WAIT_IO_COMPLETION)
3707 goto TryAgain;
3708 _ASSERTE(subRet == WAIT_TIMEOUT);
3709 ret++;
3710 }
3711 }
3712 }
3713
3714WaitCompleted:
3715
3716 _ASSERTE((ret != WAIT_TIMEOUT) || (millis != INFINITE));
3717
3718 return ret;
3719}
3720
3721
3722DWORD Thread::DoAppropriateWaitWorker(AppropriateWaitFunc func, void *args,
3723 DWORD millis, WaitMode mode)
3724{
3725 CONTRACTL {
3726 THROWS;
3727 GC_TRIGGERS;
3728 }
3729 CONTRACTL_END;
3730
3731 BOOL alertable = (mode & WaitMode_Alertable)!=0;
3732
3733 // Before going to pre-emptive mode the thread needs to be flagged as waiting for
3734 // the debugger. This used to be accomplished by the TS_Interruptible flag but that
3735 // doesn't work reliably, see DevDiv Bugs 699245. Some methods call in here already in
3736 // COOP mode so we set the bit before the transition. For the calls that are already
3737 // in pre-emptive mode those are still buggy. This is only a partial fix.
3738 BOOL isCoop = PreemptiveGCDisabled();
3739 ThreadStateNCStackHolder tsNC(isCoop && alertable, TSNC_DebuggerSleepWaitJoin);
3740 GCX_PREEMP();
3741
3742 // <TODO>
3743 // @TODO cwb: we don't know whether a thread has a message pump or
3744 // how to pump its messages, currently.
3745 // @TODO cwb: WinCE isn't going to support Thread.Interrupt() correctly until
3746 // we get alertable waits on that platform.</TODO>
3747 DWORD ret;
3748 if(alertable)
3749 {
3750 DoAppropriateWaitWorkerAlertableHelper(mode);
3751 }
3752
3753 DWORD option;
3754 if (alertable)
3755 {
3756 option = WAIT_ALERTABLE;
3757#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
3758 ApartmentState as = GetFinalApartment();
3759 if ((AS_InMTA != as) && !GetDomain()->MustForceTrivialWaitOperations())
3760 {
3761 option |= WAIT_MSGPUMP;
3762 }
3763#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
3764 }
3765 else
3766 {
3767 option = 0;
3768 }
3769
3770 ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
3771
3772 ULONGLONG dwStart = 0;
3773 ULONGLONG dwEnd;
3774
3775retry:
3776 if (millis != INFINITE)
3777 {
3778 dwStart = CLRGetTickCount64();
3779 }
3780 ret = func(args, millis, option);
3781
3782 if (ret == WAIT_IO_COMPLETION)
3783 {
3784 _ASSERTE (alertable);
3785
3786 if ((m_State & TS_Interrupted))
3787 {
3788 HandleThreadInterrupt(mode & WaitMode_ADUnload);
3789 }
3790 if (millis != INFINITE)
3791 {
3792 dwEnd = CLRGetTickCount64();
3793 if (dwEnd >= dwStart + millis)
3794 {
3795 ret = WAIT_TIMEOUT;
3796 goto WaitCompleted;
3797 }
3798 else
3799 {
3800 millis -= (DWORD)(dwEnd - dwStart);
3801 }
3802 }
3803 goto retry;
3804 }
3805
3806WaitCompleted:
3807 _ASSERTE(ret == WAIT_OBJECT_0 ||
3808 ret == WAIT_ABANDONED ||
3809 ret == WAIT_TIMEOUT ||
3810 ret == WAIT_FAILED);
3811
3812 _ASSERTE((ret != WAIT_TIMEOUT) || (millis != INFINITE));
3813
3814 return ret;
3815}
3816
3817//--------------------------------------------------------------------
3818// Only one style of wait for DoSignalAndWait since we don't support this on STA Threads
3819//--------------------------------------------------------------------
3820DWORD Thread::DoSignalAndWait(HANDLE *handles, DWORD millis, BOOL alertable, PendingSync *syncState)
3821{
3822 STATIC_CONTRACT_THROWS;
3823 STATIC_CONTRACT_GC_TRIGGERS;
3824
3825 _ASSERTE(alertable || syncState == 0);
3826
3827 struct Param
3828 {
3829 Thread *pThis;
3830 HANDLE *handles;
3831 DWORD millis;
3832 BOOL alertable;
3833 DWORD dwRet;
3834 } param;
3835 param.pThis = this;
3836 param.handles = handles;
3837 param.millis = millis;
3838 param.alertable = alertable;
3839 param.dwRet = (DWORD) -1;
3840
3841 EE_TRY_FOR_FINALLY(Param *, pParam, &param) {
3842 pParam->dwRet = pParam->pThis->DoSignalAndWaitWorker(pParam->handles, pParam->millis, pParam->alertable);
3843 }
3844 EE_FINALLY {
3845 if (syncState) {
3846 if (!GOT_EXCEPTION() && WAIT_OBJECT_0 == param.dwRet) {
3847 // This thread has been removed from syncblk waiting list by the signalling thread
3848 syncState->Restore(FALSE);
3849 }
3850 else
3851 syncState->Restore(TRUE);
3852 }
3853
3854 _ASSERTE (WAIT_IO_COMPLETION != param.dwRet);
3855 }
3856 EE_END_FINALLY;
3857
3858 return(param.dwRet);
3859}
3860
3861
3862DWORD Thread::DoSignalAndWaitWorker(HANDLE* pHandles, DWORD millis,BOOL alertable)
3863{
3864 CONTRACTL {
3865 THROWS;
3866 GC_TRIGGERS;
3867 }
3868 CONTRACTL_END;
3869
3870 DWORD ret = 0;
3871
3872 GCX_PREEMP();
3873
3874 if(alertable)
3875 {
3876 DoAppropriateWaitWorkerAlertableHelper(WaitMode_None);
3877 }
3878
3879 StateHolder<MarkOSAlertableWait,UnMarkOSAlertableWait> OSAlertableWait(alertable);
3880
3881 ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
3882
3883 ULONGLONG dwStart = 0, dwEnd;
3884
3885 if (INFINITE != millis)
3886 {
3887 dwStart = CLRGetTickCount64();
3888 }
3889
3890 ret = SignalObjectAndWait(pHandles[0], pHandles[1], millis, alertable);
3891
3892retry:
3893
3894 if (WAIT_IO_COMPLETION == ret)
3895 {
3896 _ASSERTE (alertable);
3897 // We could be woken by some spurious APC or an EE APC queued to
3898 // interrupt us. In the latter case the TS_Interrupted bit will be set
3899 // in the thread state bits. Otherwise we just go back to sleep again.
3900 if ((m_State & TS_Interrupted))
3901 {
3902 HandleThreadInterrupt(FALSE);
3903 }
3904 if (INFINITE != millis)
3905 {
3906 dwEnd = CLRGetTickCount64();
3907 if (dwStart + millis <= dwEnd)
3908 {
3909 ret = WAIT_TIMEOUT;
3910 goto WaitCompleted;
3911 }
3912 else
3913 {
3914 millis -= (DWORD)(dwEnd - dwStart);
3915 }
3916 dwStart = CLRGetTickCount64();
3917 }
3918 //Retry case we don't want to signal again so only do the wait...
3919 ret = WaitForSingleObjectEx(pHandles[1],millis,TRUE);
3920 goto retry;
3921 }
3922
3923 if (WAIT_FAILED == ret)
3924 {
3925 DWORD errorCode = ::GetLastError();
3926 //If the handle to signal is a mutex and
3927 // the calling thread is not the owner, errorCode is ERROR_NOT_OWNER
3928
3929 switch(errorCode)
3930 {
3931 case ERROR_INVALID_HANDLE:
3932 case ERROR_NOT_OWNER:
3933 case ERROR_ACCESS_DENIED:
3934 COMPlusThrowWin32();
3935 break;
3936
3937 case ERROR_TOO_MANY_POSTS:
3938 ret = ERROR_TOO_MANY_POSTS;
3939 break;
3940
3941 default:
3942 CONSISTENCY_CHECK_MSGF(0, ("This errorCode is not understood '(%d)''\n", errorCode));
3943 COMPlusThrowWin32();
3944 break;
3945 }
3946 }
3947
3948WaitCompleted:
3949
3950 //Check that the return state is valid
3951 _ASSERTE(WAIT_OBJECT_0 == ret ||
3952 WAIT_ABANDONED == ret ||
3953 WAIT_TIMEOUT == ret ||
3954 WAIT_FAILED == ret ||
3955 ERROR_TOO_MANY_POSTS == ret);
3956
3957 //Wrong to time out if the wait was infinite
3958 _ASSERTE((WAIT_TIMEOUT != ret) || (INFINITE != millis));
3959
3960 return ret;
3961}
3962
3963DWORD Thread::DoSyncContextWait(OBJECTREF *pSyncCtxObj, int countHandles, HANDLE *handles, BOOL waitAll, DWORD millis)
3964{
3965 CONTRACTL
3966 {
3967 THROWS;
3968 GC_TRIGGERS;
3969 MODE_COOPERATIVE;
3970 PRECONDITION(CheckPointer(handles));
3971 PRECONDITION(IsProtectedByGCFrame (pSyncCtxObj));
3972 }
3973 CONTRACTL_END;
3974 MethodDescCallSite invokeWaitMethodHelper(METHOD__SYNCHRONIZATION_CONTEXT__INVOKE_WAIT_METHOD_HELPER);
3975
3976 BASEARRAYREF handleArrayObj = (BASEARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_I, countHandles);
3977 memcpyNoGCRefs(handleArrayObj->GetDataPtr(), handles, countHandles * sizeof(HANDLE));
3978
3979 ARG_SLOT args[6] =
3980 {
3981 ObjToArgSlot(*pSyncCtxObj),
3982 ObjToArgSlot(handleArrayObj),
3983 BoolToArgSlot(waitAll),
3984 (ARG_SLOT)millis,
3985 };
3986
3987 // Needed by TriggerGCForMDAInternal to avoid infinite recursion
3988 ThreadStateNCStackHolder holder(TRUE, TSNC_InsideSyncContextWait);
3989
3990 return invokeWaitMethodHelper.Call_RetI4(args);
3991}
3992
3993// Called out of SyncBlock::Wait() to block this thread until the Notify occurs.
3994BOOL Thread::Block(INT32 timeOut, PendingSync *syncState)
3995{
3996 WRAPPER_NO_CONTRACT;
3997
3998 _ASSERTE(this == GetThread());
3999
4000 // Before calling Block, the SyncBlock queued us onto it's list of waiting threads.
4001 // However, before calling Block the SyncBlock temporarily left the synchronized
4002 // region. This allowed threads to enter the region and call Notify, in which
4003 // case we may have been signalled before we entered the Wait. So we aren't in the
4004 // m_WaitSB list any longer. Not a problem: the following Wait will return
4005 // immediately. But it means we cannot enforce the following assertion:
4006// _ASSERTE(m_WaitSB != NULL);
4007
4008 return (Wait(syncState->m_WaitEventLink->m_Next->m_EventWait, timeOut, syncState) != WAIT_OBJECT_0);
4009}
4010
4011
4012// Return whether or not a timeout occurred. TRUE=>we waited successfully
4013DWORD Thread::Wait(HANDLE *objs, int cntObjs, INT32 timeOut, PendingSync *syncInfo)
4014{
4015 WRAPPER_NO_CONTRACT;
4016
4017 DWORD dwResult;
4018 DWORD dwTimeOut32;
4019
4020 _ASSERTE(timeOut >= 0 || timeOut == INFINITE_TIMEOUT);
4021
4022 dwTimeOut32 = (timeOut == INFINITE_TIMEOUT
4023 ? INFINITE
4024 : (DWORD) timeOut);
4025
4026 dwResult = DoAppropriateWait(cntObjs, objs, FALSE /*=waitAll*/, dwTimeOut32,
4027 WaitMode_Alertable /*alertable*/,
4028 syncInfo);
4029
4030 // Either we succeeded in the wait, or we timed out
4031 _ASSERTE((dwResult >= WAIT_OBJECT_0 && dwResult < (DWORD)(WAIT_OBJECT_0 + cntObjs)) ||
4032 (dwResult == WAIT_TIMEOUT));
4033
4034 return dwResult;
4035}
4036
4037// Return whether or not a timeout occurred. TRUE=>we waited successfully
4038DWORD Thread::Wait(CLREvent *pEvent, INT32 timeOut, PendingSync *syncInfo)
4039{
4040 WRAPPER_NO_CONTRACT;
4041
4042 DWORD dwResult;
4043 DWORD dwTimeOut32;
4044
4045 _ASSERTE(timeOut >= 0 || timeOut == INFINITE_TIMEOUT);
4046
4047 dwTimeOut32 = (timeOut == INFINITE_TIMEOUT
4048 ? INFINITE
4049 : (DWORD) timeOut);
4050
4051 dwResult = pEvent->Wait(dwTimeOut32, TRUE /*alertable*/, syncInfo);
4052
4053 // Either we succeeded in the wait, or we timed out
4054 _ASSERTE((dwResult == WAIT_OBJECT_0) ||
4055 (dwResult == WAIT_TIMEOUT));
4056
4057 return dwResult;
4058}
4059
4060void Thread::Wake(SyncBlock *psb)
4061{
4062 WRAPPER_NO_CONTRACT;
4063
4064 CLREvent* hEvent = NULL;
4065 WaitEventLink *walk = &m_WaitEventLink;
4066 while (walk->m_Next) {
4067 if (walk->m_Next->m_WaitSB == psb) {
4068 hEvent = walk->m_Next->m_EventWait;
4069 // We are guaranteed that only one thread can change walk->m_Next->m_WaitSB
4070 // since the thread is helding the syncblock.
4071 walk->m_Next->m_WaitSB = (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB | 1);
4072 break;
4073 }
4074#ifdef _DEBUG
4075 else if ((SyncBlock*)((DWORD_PTR)walk->m_Next & ~1) == psb) {
4076 _ASSERTE (!"Can not wake a thread on the same SyncBlock more than once");
4077 }
4078#endif
4079 }
4080 PREFIX_ASSUME (hEvent != NULL);
4081 hEvent->Set();
4082}
4083
4084#define WAIT_INTERRUPT_THREADABORT 0x1
4085#define WAIT_INTERRUPT_INTERRUPT 0x2
4086#define WAIT_INTERRUPT_OTHEREXCEPTION 0x4
4087
4088// When we restore
4089DWORD EnterMonitorForRestore(SyncBlock *pSB)
4090{
4091 CONTRACTL
4092 {
4093 THROWS;
4094 GC_TRIGGERS;
4095 MODE_COOPERATIVE;
4096 }
4097 CONTRACTL_END;
4098
4099 DWORD state = 0;
4100 EX_TRY
4101 {
4102 pSB->EnterMonitor();
4103 }
4104 EX_CATCH
4105 {
4106 // Assume it is a normal exception unless proven.
4107 state = WAIT_INTERRUPT_OTHEREXCEPTION;
4108 Thread *pThread = GetThread();
4109 if (pThread->IsAbortInitiated())
4110 {
4111 state = WAIT_INTERRUPT_THREADABORT;
4112 }
4113 else if (__pException != NULL)
4114 {
4115 if (__pException->GetHR() == COR_E_THREADINTERRUPTED)
4116 {
4117 state = WAIT_INTERRUPT_INTERRUPT;
4118 }
4119 }
4120 }
4121 EX_END_CATCH(SwallowAllExceptions);
4122
4123 return state;
4124}
4125
4126// This is the service that backs us out of a wait that we interrupted. We must
4127// re-enter the monitor to the same extent the SyncBlock would, if we returned
4128// through it (instead of throwing through it). And we need to cancel the wait,
4129// if it didn't get notified away while we are processing the interrupt.
4130void PendingSync::Restore(BOOL bRemoveFromSB)
4131{
4132 CONTRACTL {
4133 THROWS;
4134 GC_TRIGGERS;
4135 }
4136 CONTRACTL_END;
4137
4138 _ASSERTE(m_EnterCount);
4139
4140 Thread *pCurThread = GetThread();
4141
4142 _ASSERTE (pCurThread == m_OwnerThread);
4143
4144 WaitEventLink *pRealWaitEventLink = m_WaitEventLink->m_Next;
4145
4146 pRealWaitEventLink->m_RefCount --;
4147 if (pRealWaitEventLink->m_RefCount == 0)
4148 {
4149 if (bRemoveFromSB) {
4150 ThreadQueue::RemoveThread(pCurThread, pRealWaitEventLink->m_WaitSB);
4151 }
4152 if (pRealWaitEventLink->m_EventWait != &pCurThread->m_EventWait) {
4153 // Put the event back to the pool.
4154 StoreEventToEventStore(pRealWaitEventLink->m_EventWait);
4155 }
4156 // Remove from the link.
4157 m_WaitEventLink->m_Next = m_WaitEventLink->m_Next->m_Next;
4158 }
4159
4160 // Someone up the stack is responsible for keeping the syncblock alive by protecting
4161 // the object that owns it. But this relies on assertions that EnterMonitor is only
4162 // called in cooperative mode. Even though we are safe in preemptive, do the
4163 // switch.
4164 GCX_COOP_THREAD_EXISTS(pCurThread);
4165 // We need to make sure that EnterMonitor succeeds. We may have code like
4166 // lock (a)
4167 // {
4168 // a.Wait
4169 // }
4170 // We need to make sure that the finally from lock is excuted with the lock owned.
4171 DWORD state = 0;
4172 SyncBlock *psb = (SyncBlock*)((DWORD_PTR)pRealWaitEventLink->m_WaitSB & ~1);
4173 for (LONG i=0; i < m_EnterCount;)
4174 {
4175 if ((state & (WAIT_INTERRUPT_THREADABORT | WAIT_INTERRUPT_INTERRUPT)) != 0)
4176 {
4177 // If the thread has been interrupted by Thread.Interrupt or Thread.Abort,
4178 // disable the check at the beginning of DoAppropriateWait
4179 pCurThread->SetThreadStateNC(Thread::TSNC_InRestoringSyncBlock);
4180 }
4181 DWORD result = EnterMonitorForRestore(psb);
4182 if (result == 0)
4183 {
4184 i++;
4185 }
4186 else
4187 {
4188 // We block the thread until the thread acquires the lock.
4189 // This is to make sure that when catch/finally is executed, the thread has the lock.
4190 // We do not want thread to run its catch/finally if the lock is not taken.
4191 state |= result;
4192
4193 // If the thread is being rudely aborted, and the thread has
4194 // no Cer on stack, we will not run managed code to release the
4195 // lock, so we can terminate the loop.
4196 if (pCurThread->IsRudeAbortInitiated() &&
4197 !pCurThread->IsExecutingWithinCer())
4198 {
4199 break;
4200 }
4201 }
4202 }
4203
4204 pCurThread->ResetThreadStateNC(Thread::TSNC_InRestoringSyncBlock);
4205
4206 if ((state & WAIT_INTERRUPT_THREADABORT) != 0)
4207 {
4208 pCurThread->HandleThreadAbort();
4209 }
4210 else if ((state & WAIT_INTERRUPT_INTERRUPT) != 0)
4211 {
4212 COMPlusThrow(kThreadInterruptedException);
4213 }
4214}
4215
4216
4217
4218// This is the callback from the OS, when we queue an APC to interrupt a waiting thread.
4219// The callback occurs on the thread we wish to interrupt. It is a STATIC method.
4220void WINAPI Thread::UserInterruptAPC(ULONG_PTR data)
4221{
4222 CONTRACTL {
4223 NOTHROW;
4224 GC_NOTRIGGER;
4225 SO_TOLERANT;
4226 }
4227 CONTRACTL_END;
4228
4229 _ASSERTE(data == APC_Code);
4230
4231 Thread *pCurThread = GetThread();
4232 if (pCurThread)
4233 {
4234 // We should only take action if an interrupt is currently being
4235 // requested (our synchronization does not guarantee that we won't fire
4236 // spuriously). It's safe to check the m_UserInterrupt field and then
4237 // set TS_Interrupted in a non-atomic fashion because m_UserInterrupt is
4238 // only cleared in this thread's context (though it may be set from any
4239 // context).
4240 if (pCurThread->IsUserInterrupted())
4241 {
4242 // Set bit to indicate this routine was called (as opposed to other
4243 // generic APCs).
4244 FastInterlockOr((ULONG *) &pCurThread->m_State, TS_Interrupted);
4245 }
4246 }
4247}
4248
4249// This is the workhorse for Thread.Interrupt().
4250void Thread::UserInterrupt(ThreadInterruptMode mode)
4251{
4252 CONTRACTL {
4253 NOTHROW;
4254 GC_NOTRIGGER;
4255 }
4256 CONTRACTL_END;
4257
4258 FastInterlockOr((DWORD*)&m_UserInterrupt, mode);
4259
4260 if (HasValidThreadHandle() &&
4261 HasThreadState (TS_Interruptible))
4262 {
4263 Alert();
4264 }
4265}
4266
4267// Implementation of Thread.Sleep().
4268void Thread::UserSleep(INT32 time)
4269{
4270 CONTRACTL {
4271 THROWS;
4272 GC_TRIGGERS;
4273 }
4274 CONTRACTL_END;
4275
4276 INCONTRACT(_ASSERTE(!GetThread()->GCNoTrigger()));
4277
4278 DWORD res;
4279
4280 // Before going to pre-emptive mode the thread needs to be flagged as waiting for
4281 // the debugger. This used to be accomplished by the TS_Interruptible flag but that
4282 // doesn't work reliably, see DevDiv Bugs 699245.
4283 ThreadStateNCStackHolder tsNC(TRUE, TSNC_DebuggerSleepWaitJoin);
4284 GCX_PREEMP();
4285
4286 // A word about ordering for Interrupt. If someone tries to interrupt a thread
4287 // that's in the interruptible state, we queue an APC. But if they try to interrupt
4288 // a thread that's not in the interruptible state, we just record that fact. So
4289 // we have to set TS_Interruptible before we test to see whether someone wants to
4290 // interrupt us or else we have a race condition that causes us to skip the APC.
4291 FastInterlockOr((ULONG *) &m_State, TS_Interruptible);
4292
4293 // If someone has interrupted us, we should not enter the wait.
4294 if (IsUserInterrupted())
4295 {
4296 HandleThreadInterrupt(FALSE);
4297 }
4298
4299 ThreadStateHolder tsh(TRUE, TS_Interruptible | TS_Interrupted);
4300
4301 FastInterlockAnd((ULONG *) &m_State, ~TS_Interrupted);
4302
4303 DWORD dwTime = (DWORD)time;
4304retry:
4305
4306 ULONGLONG start = CLRGetTickCount64();
4307
4308 res = ClrSleepEx (dwTime, TRUE);
4309
4310 if (res == WAIT_IO_COMPLETION)
4311 {
4312 // We could be woken by some spurious APC or an EE APC queued to
4313 // interrupt us. In the latter case the TS_Interrupted bit will be set
4314 // in the thread state bits. Otherwise we just go back to sleep again.
4315 if ((m_State & TS_Interrupted))
4316 {
4317 HandleThreadInterrupt(FALSE);
4318 }
4319
4320 if (dwTime == INFINITE)
4321 {
4322 goto retry;
4323 }
4324 else
4325 {
4326 ULONGLONG actDuration = CLRGetTickCount64() - start;
4327
4328 if (dwTime > actDuration)
4329 {
4330 dwTime -= (DWORD)actDuration;
4331 goto retry;
4332 }
4333 else
4334 {
4335 res = WAIT_TIMEOUT;
4336 }
4337 }
4338 }
4339 _ASSERTE(res == WAIT_TIMEOUT || res == WAIT_OBJECT_0);
4340}
4341
4342
4343// Correspondence between an EE Thread and an exposed System.Thread:
4344OBJECTREF Thread::GetExposedObject()
4345{
4346 CONTRACTL {
4347 THROWS;
4348 GC_TRIGGERS;
4349 }
4350 CONTRACTL_END;
4351
4352 TRIGGERSGC();
4353
4354 Thread *pCurThread = GetThread();
4355 _ASSERTE (!(pCurThread == NULL || IsAtProcessExit()));
4356
4357 _ASSERTE(pCurThread->PreemptiveGCDisabled());
4358
4359 if (ObjectFromHandle(m_ExposedObject) == NULL)
4360 {
4361 // Allocate the exposed thread object.
4362 THREADBASEREF attempt = (THREADBASEREF) AllocateObject(g_pThreadClass);
4363 GCPROTECT_BEGIN(attempt);
4364
4365 // The exposed object keeps us alive until it is GC'ed. This
4366 // doesn't mean the physical thread continues to run, of course.
4367 // We have to set this outside of the ThreadStore lock, because this might trigger a GC.
4368 attempt->SetInternal(this);
4369
4370 BOOL fNeedThreadStore = (! ThreadStore::HoldingThreadStore(pCurThread));
4371 // Take a lock to make sure that only one thread creates the object.
4372 ThreadStoreLockHolder tsHolder(fNeedThreadStore);
4373
4374 // Check to see if another thread has not already created the exposed object.
4375 if (ObjectFromHandle(m_ExposedObject) == NULL)
4376 {
4377 // Keep a weak reference to the exposed object.
4378 StoreObjectInHandle(m_ExposedObject, (OBJECTREF) attempt);
4379
4380 ObjectInHandleHolder exposedHolder(m_ExposedObject);
4381
4382 // Increase the external ref count. We can't call IncExternalCount because we
4383 // already hold the thread lock and IncExternalCount won't be able to take it.
4384 ULONG retVal = FastInterlockIncrement ((LONG*)&m_ExternalRefCount);
4385
4386 // Check to see if we need to store a strong pointer to the object.
4387 if (retVal > 1)
4388 StoreObjectInHandle(m_StrongHndToExposedObject, (OBJECTREF) attempt);
4389
4390 ObjectInHandleHolder strongHolder(m_StrongHndToExposedObject);
4391
4392
4393 attempt->SetManagedThreadId(GetThreadId());
4394
4395
4396 // Note that we are NOT calling the constructor on the Thread. That's
4397 // because this is an internal create where we don't want a Start
4398 // address. And we don't want to expose such a constructor for our
4399 // customers to accidentally call. The following is in lieu of a true
4400 // constructor:
4401 attempt->InitExisting();
4402
4403 exposedHolder.SuppressRelease();
4404 strongHolder.SuppressRelease();
4405 }
4406 else
4407 {
4408 attempt->ClearInternal();
4409 }
4410
4411 GCPROTECT_END();
4412 }
4413 return ObjectFromHandle(m_ExposedObject);
4414}
4415
4416
4417// We only set non NULL exposed objects for unstarted threads that haven't exited
4418// their constructor yet. So there are no race conditions.
4419void Thread::SetExposedObject(OBJECTREF exposed)
4420{
4421 CONTRACTL {
4422 NOTHROW;
4423 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
4424 }
4425 CONTRACTL_END;
4426
4427 if (exposed != NULL)
4428 {
4429 _ASSERTE (GetThread() != this);
4430 _ASSERTE(IsUnstarted());
4431 _ASSERTE(ObjectFromHandle(m_ExposedObject) == NULL);
4432 // The exposed object keeps us alive until it is GC'ed. This doesn't mean the
4433 // physical thread continues to run, of course.
4434 StoreObjectInHandle(m_ExposedObject, exposed);
4435 // This makes sure the contexts on the backing thread
4436 // and the managed thread start off in sync with each other.
4437 // BEWARE: the IncExternalCount call below may cause GC to happen.
4438
4439 // IncExternalCount will store exposed in m_StrongHndToExposedObject which is in default domain.
4440 // If the creating thread is killed before the target thread is killed in Thread.Start, Thread object
4441 // will be kept alive forever.
4442 // Instead, IncExternalCount should be called after the target thread has been started in Thread.Start.
4443 // IncExternalCount();
4444 }
4445 else
4446 {
4447 // Simply set both of the handles to NULL. The GC of the old exposed thread
4448 // object will take care of decrementing the external ref count.
4449 StoreObjectInHandle(m_ExposedObject, NULL);
4450 StoreObjectInHandle(m_StrongHndToExposedObject, NULL);
4451 }
4452}
4453
4454void Thread::SetLastThrownObject(OBJECTREF throwable, BOOL isUnhandled)
4455{
4456 CONTRACTL
4457 {
4458 if ((throwable == NULL) || CLRException::IsPreallocatedExceptionObject(throwable)) NOTHROW; else THROWS; // From CreateHandle
4459 GC_NOTRIGGER;
4460 if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
4461 SO_TOLERANT;
4462 }
4463 CONTRACTL_END;
4464
4465 STRESS_LOG_COND1(LF_EH, LL_INFO100, OBJECTREFToObject(throwable) != NULL, "in Thread::SetLastThrownObject: obj = %p\n", OBJECTREFToObject(throwable));
4466
4467 // you can't have a NULL unhandled exception
4468 _ASSERTE(!(throwable == NULL && isUnhandled));
4469
4470 if (m_LastThrownObjectHandle != NULL)
4471 {
4472 // We'll somtimes use a handle for a preallocated exception object. We should never, ever destroy one of
4473 // these handles... they'll be destroyed when the Runtime shuts down.
4474 if (!CLRException::IsPreallocatedExceptionHandle(m_LastThrownObjectHandle))
4475 {
4476 DestroyHandle(m_LastThrownObjectHandle);
4477 }
4478
4479 m_LastThrownObjectHandle = NULL; // Make sure to set this to NULL here just in case we throw trying to make
4480 // a new handle below.
4481 }
4482
4483 if (throwable != NULL)
4484 {
4485 _ASSERTE(this == GetThread());
4486
4487 // Non-compliant exceptions are always wrapped.
4488 // The use of the ExceptionNative:: helper here (rather than the global ::IsException helper)
4489 // is hokey, but we need a GC_NOTRIGGER version and it's only for an ASSERT.
4490 _ASSERTE(IsException(throwable->GetMethodTable()));
4491
4492 // If we're tracking one of the preallocated exception objects, then just use the global handle that
4493 // matches it rather than creating a new one.
4494 if (CLRException::IsPreallocatedExceptionObject(throwable))
4495 {
4496 m_LastThrownObjectHandle = CLRException::GetPreallocatedHandleForObject(throwable);
4497 }
4498 else
4499 {
4500 BEGIN_SO_INTOLERANT_CODE(GetThread());
4501 {
4502 m_LastThrownObjectHandle = GetDomain()->CreateHandle(throwable);
4503 }
4504 END_SO_INTOLERANT_CODE;
4505 }
4506
4507 _ASSERTE(m_LastThrownObjectHandle != NULL);
4508 m_ltoIsUnhandled = isUnhandled;
4509 }
4510 else
4511 {
4512 m_ltoIsUnhandled = FALSE;
4513 }
4514}
4515
4516void Thread::SetSOForLastThrownObject()
4517{
4518 CONTRACTL
4519 {
4520 NOTHROW;
4521 GC_NOTRIGGER;
4522 MODE_COOPERATIVE;
4523 SO_TOLERANT;
4524 CANNOT_TAKE_LOCK;
4525 }
4526 CONTRACTL_END;
4527
4528
4529 // If we are saving stack overflow exception, we can just null out the current handle.
4530 // The current domain is going to be unloaded or the process is going to be killed, so
4531 // we will not leak a handle.
4532 m_LastThrownObjectHandle = CLRException::GetPreallocatedStackOverflowExceptionHandle();
4533}
4534
4535//
4536// This is a nice wrapper for SetLastThrownObject which catches any exceptions caused by not being able to create
4537// the handle for the throwable, and setting the last thrown object to the preallocated out of memory exception
4538// instead.
4539//
4540OBJECTREF Thread::SafeSetLastThrownObject(OBJECTREF throwable)
4541{
4542 CONTRACTL
4543 {
4544 NOTHROW;
4545 GC_NOTRIGGER;
4546 if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
4547 SO_TOLERANT;
4548 }
4549 CONTRACTL_END;
4550
4551 // We return the original throwable if nothing goes wrong.
4552 OBJECTREF ret = throwable;
4553
4554 EX_TRY
4555 {
4556 // Try to set the throwable.
4557 SetLastThrownObject(throwable);
4558 }
4559 EX_CATCH
4560 {
4561 // If it didn't work, then set the last thrown object to the preallocated OOM exception, and return that
4562 // object instead of the original throwable.
4563 ret = CLRException::GetPreallocatedOutOfMemoryException();
4564 SetLastThrownObject(ret);
4565 }
4566 EX_END_CATCH(SwallowAllExceptions);
4567
4568 return ret;
4569}
4570
4571//
4572// This is a nice wrapper for SetThrowable and SetLastThrownObject, which catches any exceptions caused by not
4573// being able to create the handle for the throwable, and sets the throwable to the preallocated out of memory
4574// exception instead. It also updates the last thrown object, which is always updated when the throwable is
4575// updated.
4576//
4577OBJECTREF Thread::SafeSetThrowables(OBJECTREF throwable DEBUG_ARG(ThreadExceptionState::SetThrowableErrorChecking stecFlags),
4578 BOOL isUnhandled)
4579{
4580 CONTRACTL
4581 {
4582 NOTHROW;
4583 GC_NOTRIGGER;
4584 if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
4585 SO_TOLERANT;
4586 }
4587 CONTRACTL_END;
4588
4589 // We return the original throwable if nothing goes wrong.
4590 OBJECTREF ret = throwable;
4591
4592 EX_TRY
4593 {
4594 // Try to set the throwable.
4595 SetThrowable(throwable DEBUG_ARG(stecFlags));
4596
4597 // Now, if the last thrown object is different, go ahead and update it. This makes sure that we re-throw
4598 // the right object when we rethrow.
4599 if (LastThrownObject() != throwable)
4600 {
4601 SetLastThrownObject(throwable);
4602 }
4603
4604 if (isUnhandled)
4605 {
4606 MarkLastThrownObjectUnhandled();
4607 }
4608 }
4609 EX_CATCH
4610 {
4611 // If either set didn't work, then set both throwables to the preallocated OOM exception, and return that
4612 // object instead of the original throwable.
4613 ret = CLRException::GetPreallocatedOutOfMemoryException();
4614
4615 // Neither of these will throw because we're setting with a preallocated exception.
4616 SetThrowable(ret DEBUG_ARG(stecFlags));
4617 SetLastThrownObject(ret, isUnhandled);
4618 }
4619 EX_END_CATCH(SwallowAllExceptions);
4620
4621
4622 return ret;
4623}
4624
4625// This method will sync the managed exception state to be in sync with the topmost active exception
4626// for a given thread
4627void Thread::SyncManagedExceptionState(bool fIsDebuggerThread)
4628{
4629 CONTRACTL
4630 {
4631 NOTHROW;
4632 GC_NOTRIGGER;
4633 MODE_ANY;
4634 }
4635 CONTRACTL_END;
4636
4637 {
4638 GCX_COOP();
4639
4640 // Syncup the LastThrownObject on the managed thread
4641 SafeUpdateLastThrownObject();
4642 }
4643
4644#ifdef FEATURE_CORRUPTING_EXCEPTIONS
4645 // Since the catch clause has successfully executed and we are exiting it, reset the corruption severity
4646 // in the ThreadExceptionState for the last active exception. This will ensure that when the next exception
4647 // gets thrown/raised, EH tracker wont pick up an invalid value.
4648 if (!fIsDebuggerThread)
4649 {
4650 CEHelper::ResetLastActiveCorruptionSeverityPostCatchHandler(this);
4651 }
4652#endif // FEATURE_CORRUPTING_EXCEPTIONS
4653
4654}
4655
4656void Thread::SetLastThrownObjectHandle(OBJECTHANDLE h)
4657{
4658 CONTRACTL
4659 {
4660 NOTHROW;
4661 GC_NOTRIGGER;
4662 MODE_COOPERATIVE;
4663 SO_TOLERANT;
4664 }
4665 CONTRACTL_END;
4666
4667 if (m_LastThrownObjectHandle != NULL &&
4668 !CLRException::IsPreallocatedExceptionHandle(m_LastThrownObjectHandle))
4669 {
4670 DestroyHandle(m_LastThrownObjectHandle);
4671 }
4672
4673 m_LastThrownObjectHandle = h;
4674}
4675
4676//
4677// Create a duplicate handle of the current throwable and set the last thrown object to that. This ensures that the
4678// last thrown object and the current throwable have handles that are in the same app domain.
4679//
4680void Thread::SafeUpdateLastThrownObject(void)
4681{
4682 CONTRACTL
4683 {
4684 NOTHROW;
4685 GC_NOTRIGGER;
4686 MODE_COOPERATIVE;
4687 SO_INTOLERANT;
4688 }
4689 CONTRACTL_END;
4690
4691 OBJECTHANDLE hThrowable = GetThrowableAsHandle();
4692
4693 if (hThrowable != NULL)
4694 {
4695 EX_TRY
4696 {
4697 IGCHandleManager *pHandleTable = GCHandleUtilities::GetGCHandleManager();
4698
4699 // Creating a duplicate handle here ensures that the AD of the last thrown object
4700 // matches the domain of the current throwable.
4701 OBJECTHANDLE duplicateHandle = pHandleTable->CreateDuplicateHandle(hThrowable);
4702 SetLastThrownObjectHandle(duplicateHandle);
4703 }
4704 EX_CATCH
4705 {
4706 // If we can't create a duplicate handle, we set both throwables to the preallocated OOM exception.
4707 SafeSetThrowables(CLRException::GetPreallocatedOutOfMemoryException());
4708 }
4709 EX_END_CATCH(SwallowAllExceptions);
4710 }
4711}
4712
4713// Background threads must be counted, because the EE should shut down when the
4714// last non-background thread terminates. But we only count running ones.
4715void Thread::SetBackground(BOOL isBack, BOOL bRequiresTSL)
4716{
4717 CONTRACTL {
4718 NOTHROW;
4719 GC_TRIGGERS;
4720 }
4721 CONTRACTL_END;
4722
4723 // booleanize IsBackground() which just returns bits
4724 if (isBack == !!IsBackground())
4725 return;
4726
4727 LOG((LF_SYNC, INFO3, "SetBackground obtain lock\n"));
4728 ThreadStoreLockHolder TSLockHolder(FALSE);
4729 if (bRequiresTSL)
4730 {
4731 TSLockHolder.Acquire();
4732 }
4733
4734 if (IsDead())
4735 {
4736 // This can only happen in a race condition, where the correct thing to do
4737 // is ignore it. If it happens without the race condition, we throw an
4738 // exception.
4739 }
4740 else
4741 if (isBack)
4742 {
4743 if (!IsBackground())
4744 {
4745 FastInterlockOr((ULONG *) &m_State, TS_Background);
4746
4747 // unstarted threads don't contribute to the background count
4748 if (!IsUnstarted())
4749 ThreadStore::s_pThreadStore->m_BackgroundThreadCount++;
4750
4751 // If we put the main thread into a wait, until only background threads exist,
4752 // then we make that
4753 // main thread a background thread. This cleanly handles the case where it
4754 // may or may not be one as it enters the wait.
4755
4756 // One of the components of OtherThreadsComplete() has changed, so check whether
4757 // we should now exit the EE.
4758 ThreadStore::CheckForEEShutdown();
4759 }
4760 }
4761 else
4762 {
4763 if (IsBackground())
4764 {
4765 FastInterlockAnd((ULONG *) &m_State, ~TS_Background);
4766
4767 // unstarted threads don't contribute to the background count
4768 if (!IsUnstarted())
4769 ThreadStore::s_pThreadStore->m_BackgroundThreadCount--;
4770
4771 _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount >= 0);
4772 _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount <=
4773 ThreadStore::s_pThreadStore->m_ThreadCount);
4774 }
4775 }
4776
4777 if (bRequiresTSL)
4778 {
4779 TSLockHolder.Release();
4780 }
4781}
4782
4783#ifdef FEATURE_COMINTEROP
4784class ApartmentSpyImpl : public IUnknownCommon<IInitializeSpy>
4785{
4786
4787public:
4788 HRESULT STDMETHODCALLTYPE PreInitialize(DWORD dwCoInit, DWORD dwCurThreadAptRefs)
4789 {
4790 LIMITED_METHOD_CONTRACT;
4791 return S_OK;
4792 }
4793
4794 HRESULT STDMETHODCALLTYPE PostInitialize(HRESULT hrCoInit, DWORD dwCoInit, DWORD dwNewThreadAptRefs)
4795 {
4796 LIMITED_METHOD_CONTRACT;
4797 return hrCoInit; // this HRESULT will be returned from CoInitialize(Ex)
4798 }
4799
4800 HRESULT STDMETHODCALLTYPE PreUninitialize(DWORD dwCurThreadAptRefs)
4801 {
4802 // Don't assume that Thread exists and do not create it.
4803 STATIC_CONTRACT_NOTHROW;
4804 STATIC_CONTRACT_GC_TRIGGERS;
4805 STATIC_CONTRACT_MODE_PREEMPTIVE;
4806
4807 HRESULT hr = S_OK;
4808
4809 if (dwCurThreadAptRefs == 1 && !g_fEEShutDown)
4810 {
4811 // This is the last CoUninitialize on this thread and the CLR is still running. If it's an STA
4812 // we take the opportunity to perform COM/WinRT cleanup now, when the apartment is still alive.
4813
4814 Thread *pThread = GetThreadNULLOk();
4815 if (pThread != NULL)
4816 {
4817 BEGIN_EXTERNAL_ENTRYPOINT(&hr)
4818 {
4819 if (pThread->GetFinalApartment() == Thread::AS_InSTA)
4820 {
4821 // This will release RCWs and purge the WinRT factory cache on all AppDomains. It
4822 // will also synchronize with the finalizer thread which ensures that the RCWs
4823 // that were already in the global RCW cleanup list will be cleaned up as well.
4824 //
4825 ReleaseRCWsInCachesNoThrow(GetCurrentCtxCookie());
4826 }
4827 }
4828 END_EXTERNAL_ENTRYPOINT;
4829 }
4830 }
4831 return hr;
4832 }
4833
4834 HRESULT STDMETHODCALLTYPE PostUninitialize(DWORD dwNewThreadAptRefs)
4835 {
4836 LIMITED_METHOD_CONTRACT;
4837 return S_OK;
4838 }
4839};
4840#endif // FEATURE_COMINTEROP
4841
4842// When the thread starts running, make sure it is running in the correct apartment
4843// and context.
4844BOOL Thread::PrepareApartmentAndContext()
4845{
4846 CONTRACTL {
4847 THROWS;
4848 GC_TRIGGERS;
4849 }
4850 CONTRACTL_END;
4851
4852 m_OSThreadId = ::GetCurrentThreadId();
4853
4854#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
4855 // Be very careful in here because we haven't set up e.g. TLS yet.
4856
4857 if (m_State & (TS_InSTA | TS_InMTA))
4858 {
4859 // Make sure TS_InSTA and TS_InMTA aren't both set.
4860 _ASSERTE(!((m_State & TS_InSTA) && (m_State & TS_InMTA)));
4861
4862 // Determine the apartment state to set based on the requested state.
4863 ApartmentState aState = m_State & TS_InSTA ? AS_InSTA : AS_InMTA;
4864
4865 // Clear the requested apartment state from the thread. This is requested since
4866 // the thread might actually be a fiber that has already been initialized to
4867 // a different apartment state than the requested one. If we didn't clear
4868 // the requested apartment state, then we could end up with both TS_InSTA and
4869 // TS_InMTA set at the same time.
4870 FastInterlockAnd ((ULONG *) &m_State, ~TS_InSTA & ~TS_InMTA);
4871
4872 // Attempt to set the requested apartment state.
4873 SetApartment(aState, FALSE);
4874 }
4875
4876 // In the case where we own the thread and we have switched it to a different
4877 // starting context, it is the responsibility of the caller (KickOffThread())
4878 // to notice that the context changed, and to adjust the delegate that it will
4879 // dispatch on, as appropriate.
4880#endif //FEATURE_COMINTEROP_APARTMENT_SUPPORT
4881
4882#ifdef FEATURE_COMINTEROP
4883 // Our IInitializeSpy will be registered in AppX always, in classic processes
4884 // only if the internal config switch is on.
4885 if (AppX::IsAppXProcess() || g_pConfig->EnableRCWCleanupOnSTAShutdown())
4886 {
4887 NewHolder<ApartmentSpyImpl> pSpyImpl = new ApartmentSpyImpl();
4888
4889 IfFailThrow(CoRegisterInitializeSpy(pSpyImpl, &m_uliInitializeSpyCookie));
4890 pSpyImpl.SuppressRelease();
4891
4892 m_fInitializeSpyRegistered = true;
4893 }
4894#endif // FEATURE_COMINTEROP
4895
4896 return TRUE;
4897}
4898
4899
4900#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
4901
4902// TS_InSTA (0x00004000) -> AS_InSTA (0)
4903// TS_InMTA (0x00008000) -> AS_InMTA (1)
4904#define TS_TO_AS(ts) \
4905 (Thread::ApartmentState)((((DWORD)ts) >> 14) - 1) \
4906
4907// Retrieve the apartment state of the current thread. There are three possible
4908// states: thread hosts an STA, thread is part of the MTA or thread state is
4909// undecided. The last state may indicate that the apartment has not been set at
4910// all (nobody has called CoInitializeEx) or that the EE does not know the
4911// current state (EE has not called CoInitializeEx).
4912Thread::ApartmentState Thread::GetApartment()
4913{
4914 CONTRACTL
4915 {
4916 NOTHROW;
4917 GC_TRIGGERS;
4918 MODE_ANY;
4919 }
4920 CONTRACTL_END;
4921
4922 ApartmentState as = AS_Unknown;
4923 ThreadState maskedTs = (ThreadState)(((DWORD)m_State) & (TS_InSTA|TS_InMTA));
4924 if (maskedTs)
4925 {
4926 _ASSERTE((maskedTs == TS_InSTA) || (maskedTs == TS_InMTA));
4927 static_assert_no_msg(TS_TO_AS(TS_InSTA) == AS_InSTA);
4928 static_assert_no_msg(TS_TO_AS(TS_InMTA) == AS_InMTA);
4929
4930 as = TS_TO_AS(maskedTs);
4931 }
4932
4933 if (
4934#ifdef MDA_SUPPORTED
4935 (NULL == MDA_GET_ASSISTANT(InvalidApartmentStateChange)) &&
4936#endif
4937 (as != AS_Unknown))
4938 {
4939 return as;
4940 }
4941
4942 return GetApartmentRare(as);
4943}
4944
4945Thread::ApartmentState Thread::GetApartmentRare(Thread::ApartmentState as)
4946{
4947 CONTRACTL
4948 {
4949 NOTHROW;
4950 GC_TRIGGERS;
4951 MODE_ANY;
4952 }
4953 CONTRACTL_END;
4954
4955 if (this == GetThread())
4956 {
4957 THDTYPE type;
4958 HRESULT hr = S_OK;
4959
4960#ifdef MDA_SUPPORTED
4961 MdaInvalidApartmentStateChange* pProbe = MDA_GET_ASSISTANT(InvalidApartmentStateChange);
4962 if (pProbe)
4963 {
4964 // Without notifications from OLE32, we cannot know when the apartment state of a
4965 // thread changes. But we have cached this fact and depend on it for all our
4966 // blocking and COM Interop behavior to work correctly. Using the CDH, log that it
4967 // is not changing underneath us, on those platforms where it is relatively cheap for
4968 // us to do so.
4969 if (as != AS_Unknown)
4970 {
4971 hr = GetCurrentThreadTypeNT5(&type);
4972 if (hr == S_OK)
4973 {
4974 if (type == THDTYPE_PROCESSMESSAGES && as == AS_InMTA)
4975 {
4976 pProbe->ReportViolation(this, as, FALSE);
4977 }
4978 else if (type == THDTYPE_BLOCKMESSAGES && as == AS_InSTA)
4979 {
4980 pProbe->ReportViolation(this, as, FALSE);
4981 }
4982 }
4983 }
4984 }
4985#endif
4986
4987 if (as == AS_Unknown)
4988 {
4989 hr = GetCurrentThreadTypeNT5(&type);
4990 if (hr == S_OK)
4991 {
4992 as = (type == THDTYPE_PROCESSMESSAGES) ? AS_InSTA : AS_InMTA;
4993
4994 // If we get back THDTYPE_PROCESSMESSAGES, we are guaranteed to
4995 // be an STA thread. If not, we are an MTA thread, however
4996 // we can't know if the thread has been explicitly set to MTA
4997 // (via a call to CoInitializeEx) or if it has been implicitly
4998 // made MTA (if it hasn't been CoInitializeEx'd but CoInitialize
4999 // has already been called on some other thread in the process.
5000 if (as == AS_InSTA)
5001 FastInterlockOr((ULONG *) &m_State, AS_InSTA);
5002 }
5003 }
5004 }
5005
5006 return as;
5007}
5008
5009
5010// Retrieve the explicit apartment state of the current thread. There are three possible
5011// states: thread hosts an STA, thread is part of the MTA or thread state is
5012// undecided. The last state may indicate that the apartment has not been set at
5013// all (nobody has called CoInitializeEx), the EE does not know the
5014// current state (EE has not called CoInitializeEx), or the thread is implicitly in
5015// the MTA.
5016Thread::ApartmentState Thread::GetExplicitApartment()
5017{
5018 CONTRACTL
5019 {
5020 NOTHROW;
5021 GC_TRIGGERS;
5022 MODE_ANY;
5023 }
5024 CONTRACTL_END;
5025
5026 _ASSERTE(!((m_State & TS_InSTA) && (m_State & TS_InMTA)));
5027
5028 // Initialize m_State by calling GetApartment.
5029 GetApartment();
5030
5031 ApartmentState as = (m_State & TS_InSTA) ? AS_InSTA :
5032 (m_State & TS_InMTA) ? AS_InMTA :
5033 AS_Unknown;
5034
5035 return as;
5036}
5037
5038
5039Thread::ApartmentState Thread::GetFinalApartment()
5040{
5041 CONTRACTL
5042 {
5043 NOTHROW;
5044 GC_TRIGGERS;
5045 MODE_ANY;
5046 SO_TOLERANT;
5047 }
5048 CONTRACTL_END;
5049
5050 _ASSERTE(this == GetThread());
5051
5052 ApartmentState as = AS_Unknown;
5053 if (g_fEEShutDown)
5054 {
5055 // On shutdown, do not use cached value. Someone might have called
5056 // CoUninitialize.
5057 FastInterlockAnd ((ULONG *) &m_State, ~TS_InSTA & ~TS_InMTA);
5058 }
5059
5060 as = GetApartment();
5061 if (as == AS_Unknown)
5062 {
5063 // On Win2k and above, GetApartment will only return AS_Unknown if CoInitialize
5064 // hasn't been called in the process. In that case we can simply assume MTA. However we
5065 // cannot cache this value in the Thread because if a CoInitialize does occur, then the
5066 // thread state might change.
5067 as = AS_InMTA;
5068 }
5069
5070 return as;
5071}
5072
5073// when we get apartment tear-down notification,
5074// we want reset the apartment state we cache on the thread
5075VOID Thread::ResetApartment()
5076{
5077 CONTRACTL {
5078 NOTHROW;
5079 GC_NOTRIGGER;
5080 }
5081 CONTRACTL_END;
5082
5083 // reset the TS_InSTA bit and TS_InMTA bit
5084 ThreadState t_State = (ThreadState)(~(TS_InSTA | TS_InMTA));
5085 FastInterlockAnd((ULONG *) &m_State, t_State);
5086}
5087
5088// Attempt to set current thread's apartment state. The actual apartment state
5089// achieved is returned and may differ from the input state if someone managed
5090// to call CoInitializeEx on this thread first (note that calls to SetApartment
5091// made before the thread has started are guaranteed to succeed).
5092// The fFireMDAOnMismatch indicates if we should fire the apartment state probe
5093// on an apartment state mismatch.
5094Thread::ApartmentState Thread::SetApartment(ApartmentState state, BOOL fFireMDAOnMismatch)
5095{
5096 CONTRACTL {
5097 THROWS;
5098 GC_TRIGGERS;
5099 MODE_ANY;
5100 INJECT_FAULT(COMPlusThrowOM(););
5101 }
5102 CONTRACTL_END;
5103
5104 // Reset any bits that request for CoInitialize
5105 ResetRequiresCoInitialize();
5106
5107 // Setting the state to AS_Unknown indicates we should CoUninitialize
5108 // the thread.
5109 if (state == AS_Unknown)
5110 {
5111 BOOL needUninitialize = (m_State & TS_CoInitialized)
5112#ifdef FEATURE_COMINTEROP
5113 || IsWinRTInitialized()
5114#endif // FEATURE_COMINTEROP
5115 ;
5116
5117 if (needUninitialize)
5118 {
5119 GCX_PREEMP();
5120
5121 // If we haven't CoInitialized the thread, then we don't have anything to do.
5122 if (m_State & TS_CoInitialized)
5123 {
5124 // We should never be attempting to CoUninitialize another thread than
5125 // the currently running thread.
5126 _ASSERTE(m_OSThreadId == ::GetCurrentThreadId());
5127
5128 // CoUninitialize the thread and reset the STA/MTA/CoInitialized state bits.
5129 ::CoUninitialize();
5130
5131 ThreadState uninitialized = static_cast<ThreadState>(TS_InSTA | TS_InMTA | TS_CoInitialized);
5132 FastInterlockAnd((ULONG *) &m_State, ~uninitialized);
5133 }
5134
5135#ifdef FEATURE_COMINTEROP
5136 if (IsWinRTInitialized())
5137 {
5138 _ASSERTE(WinRTSupported());
5139 BaseWinRTUninitialize();
5140 ResetWinRTInitialized();
5141 }
5142#endif // FEATURE_COMINTEROP
5143 }
5144 return GetApartment();
5145 }
5146
5147 // Call GetApartment to initialize the current apartment state.
5148 //
5149 // Important note: For Win2k and above this can return AS_InMTA even if the current
5150 // thread has never been CoInitialized. Because of this we MUST NOT look at the
5151 // return value of GetApartment here. We can however look at the m_State flags
5152 // since these will only be set to TS_InMTA if we know for a fact the the
5153 // current thread has explicitly been made MTA (via a call to CoInitializeEx).
5154 GetApartment();
5155
5156 // If the current thread is STA, then it is impossible to change it to
5157 // MTA.
5158 if (m_State & TS_InSTA)
5159 {
5160#ifdef MDA_SUPPORTED
5161 if (state == AS_InMTA && fFireMDAOnMismatch)
5162 {
5163 MDA_TRIGGER_ASSISTANT(InvalidApartmentStateChange, ReportViolation(this, state, TRUE));
5164 }
5165#endif
5166 return AS_InSTA;
5167 }
5168
5169 // If the current thread is EXPLICITLY MTA, then it is impossible to change it to
5170 // STA.
5171 if (m_State & TS_InMTA)
5172 {
5173#ifdef MDA_SUPPORTED
5174 if (state == AS_InSTA && fFireMDAOnMismatch)
5175 {
5176 MDA_TRIGGER_ASSISTANT(InvalidApartmentStateChange, ReportViolation(this, state, TRUE));
5177 }
5178#endif
5179 return AS_InMTA;
5180 }
5181
5182 // If the thread isn't even started yet, we mark the state bits without
5183 // calling CoInitializeEx (since we're obviously not in the correct thread
5184 // context yet). We'll retry this call when the thread is started.
5185 // Don't use the TS_Unstarted state bit to check for this, it's cleared far
5186 // too late in the day for us. Instead check whether we're in the correct
5187 // thread context.
5188 if (m_OSThreadId != ::GetCurrentThreadId())
5189 {
5190 FastInterlockOr((ULONG *) &m_State, (state == AS_InSTA) ? TS_InSTA : TS_InMTA);
5191 return state;
5192 }
5193
5194 HRESULT hr;
5195 {
5196 GCX_PREEMP();
5197
5198 // Attempt to set apartment by calling CoInitializeEx. This may fail if
5199 // another caller (outside EE) beat us to it.
5200 //
5201 // Important note: When calling CoInitializeEx(COINIT_MULTITHREADED) on a
5202 // thread that has never been CoInitialized, the return value will always
5203 // be S_OK, even if another thread in the process has already been
5204 // CoInitialized to MTA. However if the current thread has already been
5205 // CoInitialized to MTA, then S_FALSE will be returned.
5206 hr = ::CoInitializeEx(NULL, (state == AS_InSTA) ?
5207 COINIT_APARTMENTTHREADED : COINIT_MULTITHREADED);
5208 }
5209
5210 if (SUCCEEDED(hr))
5211 {
5212 ThreadState t_State = (state == AS_InSTA) ? TS_InSTA : TS_InMTA;
5213
5214 if (hr == S_OK)
5215 {
5216 // The thread has never been CoInitialized.
5217 t_State = (ThreadState)(t_State | TS_CoInitialized);
5218 }
5219 else
5220 {
5221 _ASSERTE(hr == S_FALSE);
5222
5223 // If the thread has already been CoInitialized to the proper mode, then
5224 // we don't want to leave an outstanding CoInit so we CoUninit.
5225 {
5226 GCX_PREEMP();
5227 ::CoUninitialize();
5228 }
5229 }
5230
5231 // We succeeded in setting the apartment state to the requested state.
5232 FastInterlockOr((ULONG *) &m_State, t_State);
5233 }
5234 else if (hr == RPC_E_CHANGED_MODE)
5235 {
5236 // We didn't manage to enforce the requested apartment state, but at least
5237 // we can work out what the state is now. No need to actually do the CoInit --
5238 // obviously someone else already took care of that.
5239 FastInterlockOr((ULONG *) &m_State, ((state == AS_InSTA) ? TS_InMTA : TS_InSTA));
5240
5241#ifdef MDA_SUPPORTED
5242 if (fFireMDAOnMismatch)
5243 {
5244 // Report via the customer debug helper that we failed to set the apartment type
5245 // to the specified type.
5246 MDA_TRIGGER_ASSISTANT(InvalidApartmentStateChange, ReportViolation(this, state, TRUE));
5247 }
5248#endif
5249 }
5250 else if (hr == E_OUTOFMEMORY)
5251 {
5252 COMPlusThrowOM();
5253 }
5254 else
5255 {
5256 _ASSERTE(!"Unexpected HRESULT returned from CoInitializeEx!");
5257 }
5258
5259#ifdef FEATURE_COMINTEROP
5260
5261 // If WinRT is supported on this OS, also initialize it at the same time. Since WinRT sits on top of COM
5262 // we need to make sure that it is initialized in the same threading mode as we just started COM itself
5263 // with (or that we detected COM had already been started with).
5264 if (WinRTSupported() && !IsWinRTInitialized())
5265 {
5266 GCX_PREEMP();
5267
5268 BOOL isSTA = m_State & TS_InSTA;
5269 _ASSERTE(isSTA || (m_State & TS_InMTA));
5270
5271 HRESULT hrWinRT = RoInitialize(isSTA ? RO_INIT_SINGLETHREADED : RO_INIT_MULTITHREADED);
5272
5273 if (SUCCEEDED(hrWinRT))
5274 {
5275 if (hrWinRT == S_OK)
5276 {
5277 SetThreadStateNC(TSNC_WinRTInitialized);
5278 }
5279 else
5280 {
5281 _ASSERTE(hrWinRT == S_FALSE);
5282
5283 // If the thread has already been initialized, back it out. We may not
5284 // always be able to call RoUninitialize on shutdown so if there's
5285 // a way to avoid having to, we should take advantage of that.
5286 RoUninitialize();
5287 }
5288 }
5289 else if (hrWinRT == E_OUTOFMEMORY)
5290 {
5291 COMPlusThrowOM();
5292 }
5293 else
5294 {
5295 // We don't check for RPC_E_CHANGEDMODE, since we're using the mode that was read in by
5296 // initializing COM above. COM and WinRT need to always be in the same mode, so we should never
5297 // see that return code at this point.
5298 _ASSERTE(!"Unexpected HRESULT From RoInitialize");
5299 }
5300 }
5301
5302 // Since we've just called CoInitialize, COM has effectively been started up.
5303 // To ensure the CLR is aware of this, we need to call EnsureComStarted.
5304 EnsureComStarted(FALSE);
5305#endif // FEATURE_COMINTEROP
5306
5307 return GetApartment();
5308}
5309#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
5310
5311
5312//----------------------------------------------------------------------------
5313//
5314// ThreadStore Implementation
5315//
5316//----------------------------------------------------------------------------
5317
5318ThreadStore::ThreadStore()
5319 : m_Crst(CrstThreadStore, (CrstFlags) (CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)),
5320 m_ThreadCount(0),
5321 m_MaxThreadCount(0),
5322 m_UnstartedThreadCount(0),
5323 m_BackgroundThreadCount(0),
5324 m_PendingThreadCount(0),
5325 m_DeadThreadCount(0),
5326 m_DeadThreadCountForGCTrigger(0),
5327 m_TriggerGCForDeadThreads(false),
5328 m_GuidCreated(FALSE),
5329 m_HoldingThread(0)
5330{
5331 CONTRACTL {
5332 THROWS;
5333 GC_NOTRIGGER;
5334 }
5335 CONTRACTL_END;
5336
5337 m_TerminationEvent.CreateManualEvent(FALSE);
5338 _ASSERTE(m_TerminationEvent.IsValid());
5339}
5340
5341
5342void ThreadStore::InitThreadStore()
5343{
5344 CONTRACTL {
5345 THROWS;
5346 GC_TRIGGERS;
5347 }
5348 CONTRACTL_END;
5349
5350 s_pThreadStore = new ThreadStore;
5351
5352 g_pThinLockThreadIdDispenser = new IdDispenser();
5353
5354 ThreadSuspend::g_pGCSuspendEvent = new CLREvent();
5355 ThreadSuspend::g_pGCSuspendEvent->CreateManualEvent(FALSE);
5356
5357 s_pWaitForStackCrawlEvent = new CLREvent();
5358 s_pWaitForStackCrawlEvent->CreateManualEvent(FALSE);
5359
5360 s_DeadThreadCountThresholdForGCTrigger =
5361 static_cast<LONG>(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Thread_DeadThreadCountThresholdForGCTrigger));
5362 if (s_DeadThreadCountThresholdForGCTrigger < 0)
5363 {
5364 s_DeadThreadCountThresholdForGCTrigger = 0;
5365 }
5366 s_DeadThreadGCTriggerPeriodMilliseconds =
5367 CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Thread_DeadThreadGCTriggerPeriodMilliseconds);
5368 s_DeadThreadGenerationCounts = nullptr;
5369}
5370
5371// Enter and leave the critical section around the thread store. Clients should
5372// use LockThreadStore and UnlockThreadStore because ThreadStore lock has
5373// additional semantics well beyond a normal lock.
5374DEBUG_NOINLINE void ThreadStore::Enter()
5375{
5376 WRAPPER_NO_CONTRACT;
5377 ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
5378 CHECK_ONE_STORE();
5379 m_Crst.Enter();
5380
5381 // Threadstore needs special shutdown handling.
5382 if (g_fSuspendOnShutdown)
5383 {
5384 m_Crst.ReleaseAndBlockForShutdownIfNotSpecialThread();
5385 }
5386}
5387
5388DEBUG_NOINLINE void ThreadStore::Leave()
5389{
5390 WRAPPER_NO_CONTRACT;
5391 ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
5392 CHECK_ONE_STORE();
5393 m_Crst.Leave();
5394}
5395
5396void ThreadStore::LockThreadStore()
5397{
5398 WRAPPER_NO_CONTRACT;
5399
5400 // The actual implementation is in ThreadSuspend class since it is coupled
5401 // with thread suspension logic
5402 ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
5403}
5404
5405void ThreadStore::UnlockThreadStore()
5406{
5407 WRAPPER_NO_CONTRACT;
5408
5409 // The actual implementation is in ThreadSuspend class since it is coupled
5410 // with thread suspension logic
5411 ThreadSuspend::UnlockThreadStore(FALSE, ThreadSuspend::SUSPEND_OTHER);
5412}
5413
5414// AddThread adds 'newThread' to m_ThreadList
5415void ThreadStore::AddThread(Thread *newThread, BOOL bRequiresTSL)
5416{
5417 CONTRACTL {
5418 NOTHROW;
5419 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
5420 }
5421 CONTRACTL_END;
5422
5423 LOG((LF_SYNC, INFO3, "AddThread obtain lock\n"));
5424
5425 ThreadStoreLockHolder TSLockHolder(FALSE);
5426 if (bRequiresTSL)
5427 {
5428 TSLockHolder.Acquire();
5429 }
5430
5431 s_pThreadStore->m_ThreadList.InsertTail(newThread);
5432
5433 s_pThreadStore->m_ThreadCount++;
5434 if (s_pThreadStore->m_MaxThreadCount < s_pThreadStore->m_ThreadCount)
5435 s_pThreadStore->m_MaxThreadCount = s_pThreadStore->m_ThreadCount;
5436
5437 if (newThread->IsUnstarted())
5438 s_pThreadStore->m_UnstartedThreadCount++;
5439
5440 newThread->SetThreadStateNC(Thread::TSNC_ExistInThreadStore);
5441
5442 _ASSERTE(!newThread->IsBackground());
5443 _ASSERTE(!newThread->IsDead());
5444
5445 if (bRequiresTSL)
5446 {
5447 TSLockHolder.Release();
5448 }
5449}
5450
5451// this function is just desgined to avoid deadlocks during abnormal process termination, and should not be used for any other purpose
5452BOOL ThreadStore::CanAcquireLock()
5453{
5454 WRAPPER_NO_CONTRACT;
5455 {
5456 return (s_pThreadStore->m_Crst.m_criticalsection.LockCount == -1 || (size_t)s_pThreadStore->m_Crst.m_criticalsection.OwningThread == (size_t)GetCurrentThreadId());
5457 }
5458}
5459
5460// Whenever one of the components of OtherThreadsComplete() has changed in the
5461// correct direction, see whether we can now shutdown the EE because only background
5462// threads are running.
5463void ThreadStore::CheckForEEShutdown()
5464{
5465 CONTRACTL {
5466 NOTHROW;
5467 GC_NOTRIGGER;
5468 }
5469 CONTRACTL_END;
5470
5471 if (g_fWeControlLifetime &&
5472 s_pThreadStore->OtherThreadsComplete())
5473 {
5474 BOOL bRet;
5475 bRet = s_pThreadStore->m_TerminationEvent.Set();
5476 _ASSERTE(bRet);
5477 }
5478}
5479
5480
5481BOOL ThreadStore::RemoveThread(Thread *target)
5482{
5483 CONTRACTL {
5484 NOTHROW;
5485 GC_NOTRIGGER;
5486 }
5487 CONTRACTL_END;
5488
5489 BOOL found;
5490 Thread *ret;
5491
5492#if 0 // This assert is not valid when failing to create background GC thread.
5493 // Main GC thread holds the TS lock.
5494 _ASSERTE (ThreadStore::HoldingThreadStore());
5495#endif
5496
5497 _ASSERTE(s_pThreadStore->m_Crst.GetEnterCount() > 0 ||
5498 IsAtProcessExit());
5499 _ASSERTE(s_pThreadStore->DbgFindThread(target));
5500 ret = s_pThreadStore->m_ThreadList.FindAndRemove(target);
5501 _ASSERTE(ret && ret == target);
5502 found = (ret != NULL);
5503
5504 if (found)
5505 {
5506 target->ResetThreadStateNC(Thread::TSNC_ExistInThreadStore);
5507
5508 s_pThreadStore->m_ThreadCount--;
5509
5510 if (target->IsDead())
5511 {
5512 s_pThreadStore->m_DeadThreadCount--;
5513 s_pThreadStore->DecrementDeadThreadCountForGCTrigger();
5514 }
5515
5516 // Unstarted threads are not in the Background count:
5517 if (target->IsUnstarted())
5518 s_pThreadStore->m_UnstartedThreadCount--;
5519 else
5520 if (target->IsBackground())
5521 s_pThreadStore->m_BackgroundThreadCount--;
5522
5523 FastInterlockExchangeAdd(
5524 &Thread::s_threadPoolCompletionCountOverflow,
5525 target->m_threadPoolCompletionCount);
5526
5527 _ASSERTE(s_pThreadStore->m_ThreadCount >= 0);
5528 _ASSERTE(s_pThreadStore->m_BackgroundThreadCount >= 0);
5529 _ASSERTE(s_pThreadStore->m_ThreadCount >=
5530 s_pThreadStore->m_BackgroundThreadCount);
5531 _ASSERTE(s_pThreadStore->m_ThreadCount >=
5532 s_pThreadStore->m_UnstartedThreadCount);
5533 _ASSERTE(s_pThreadStore->m_ThreadCount >=
5534 s_pThreadStore->m_DeadThreadCount);
5535
5536 // One of the components of OtherThreadsComplete() has changed, so check whether
5537 // we should now exit the EE.
5538 CheckForEEShutdown();
5539 }
5540 return found;
5541}
5542
5543
5544// When a thread is created as unstarted. Later it may get started, in which case
5545// someone calls Thread::HasStarted() on that physical thread. This completes
5546// the Setup and calls here.
5547void ThreadStore::TransferStartedThread(Thread *thread, BOOL bRequiresTSL)
5548{
5549 CONTRACTL {
5550 THROWS;
5551 GC_TRIGGERS;
5552 }
5553 CONTRACTL_END;
5554
5555 _ASSERTE(GetThread() == thread);
5556
5557 LOG((LF_SYNC, INFO3, "TransferUnstartedThread obtain lock\n"));
5558 ThreadStoreLockHolder TSLockHolder(FALSE);
5559 if (bRequiresTSL)
5560 {
5561 TSLockHolder.Acquire();
5562 }
5563
5564 _ASSERTE(s_pThreadStore->DbgFindThread(thread));
5565 _ASSERTE(thread->HasValidThreadHandle());
5566 _ASSERTE(thread->m_State & Thread::TS_WeOwn);
5567 _ASSERTE(thread->IsUnstarted());
5568 _ASSERTE(!thread->IsDead());
5569
5570 if (thread->m_State & Thread::TS_AbortRequested)
5571 {
5572 PAL_CPP_THROW(EEException *, new EEException(COR_E_THREADABORTED));
5573 }
5574
5575 // Of course, m_ThreadCount is already correct since it includes started and
5576 // unstarted threads.
5577
5578 s_pThreadStore->m_UnstartedThreadCount--;
5579
5580 // We only count background threads that have been started
5581 if (thread->IsBackground())
5582 s_pThreadStore->m_BackgroundThreadCount++;
5583
5584 _ASSERTE(s_pThreadStore->m_PendingThreadCount > 0);
5585 FastInterlockDecrement(&s_pThreadStore->m_PendingThreadCount);
5586
5587 // As soon as we erase this bit, the thread becomes eligible for suspension,
5588 // stopping, interruption, etc.
5589 FastInterlockAnd((ULONG *) &thread->m_State, ~Thread::TS_Unstarted);
5590 FastInterlockOr((ULONG *) &thread->m_State, Thread::TS_LegalToJoin);
5591
5592 // release ThreadStore Crst to avoid Crst Violation when calling HandleThreadAbort later
5593 if (bRequiresTSL)
5594 {
5595 TSLockHolder.Release();
5596 }
5597
5598 // One of the components of OtherThreadsComplete() has changed, so check whether
5599 // we should now exit the EE.
5600 CheckForEEShutdown();
5601}
5602
5603LONG ThreadStore::s_DeadThreadCountThresholdForGCTrigger = 0;
5604DWORD ThreadStore::s_DeadThreadGCTriggerPeriodMilliseconds = 0;
5605SIZE_T *ThreadStore::s_DeadThreadGenerationCounts = nullptr;
5606
5607void ThreadStore::IncrementDeadThreadCountForGCTrigger()
5608{
5609 CONTRACTL {
5610 NOTHROW;
5611 GC_NOTRIGGER;
5612 }
5613 CONTRACTL_END;
5614
5615 // Although all increments and decrements are usually done inside a lock, that is not sufficient to synchronize with a
5616 // background GC thread resetting this value, hence the interlocked operation. Ignore overflow; overflow would likely never
5617 // occur, the count is treated as unsigned, and nothing bad would happen if it were to overflow.
5618 SIZE_T count = static_cast<SIZE_T>(FastInterlockIncrement(&m_DeadThreadCountForGCTrigger));
5619
5620 SIZE_T countThreshold = static_cast<SIZE_T>(s_DeadThreadCountThresholdForGCTrigger);
5621 if (count < countThreshold || countThreshold == 0)
5622 {
5623 return;
5624 }
5625
5626 IGCHeap *gcHeap = GCHeapUtilities::GetGCHeap();
5627 if (gcHeap == nullptr)
5628 {
5629 return;
5630 }
5631
5632 SIZE_T gcLastMilliseconds = gcHeap->GetLastGCStartTime(gcHeap->GetMaxGeneration());
5633 SIZE_T gcNowMilliseconds = gcHeap->GetNow();
5634 if (gcNowMilliseconds - gcLastMilliseconds < s_DeadThreadGCTriggerPeriodMilliseconds)
5635 {
5636 return;
5637 }
5638
5639 if (!g_fEEStarted) // required for FinalizerThread::EnableFinalization() below
5640 {
5641 return;
5642 }
5643
5644 // The GC is triggered on the finalizer thread since it's not safe to trigger it on DLL_THREAD_DETACH.
5645 // TriggerGCForDeadThreadsIfNecessary() will determine which generation of GC to trigger, and may not actually trigger a GC.
5646 // If a GC is triggered, since there would be a delay before the dead thread count is updated, clear the count and wait for
5647 // it to reach the threshold again. If a GC would not be triggered, the count is still cleared here to prevent waking up the
5648 // finalizer thread to do the work in TriggerGCForDeadThreadsIfNecessary() for every dead thread.
5649 m_DeadThreadCountForGCTrigger = 0;
5650 m_TriggerGCForDeadThreads = true;
5651 FinalizerThread::EnableFinalization();
5652}
5653
5654void ThreadStore::DecrementDeadThreadCountForGCTrigger()
5655{
5656 CONTRACTL {
5657 NOTHROW;
5658 GC_NOTRIGGER;
5659 }
5660 CONTRACTL_END;
5661
5662 // Although all increments and decrements are usually done inside a lock, that is not sufficient to synchronize with a
5663 // background GC thread resetting this value, hence the interlocked operation.
5664 if (FastInterlockDecrement(&m_DeadThreadCountForGCTrigger) < 0)
5665 {
5666 m_DeadThreadCountForGCTrigger = 0;
5667 }
5668}
5669
5670void ThreadStore::OnMaxGenerationGCStarted()
5671{
5672 LIMITED_METHOD_CONTRACT;
5673
5674 // A dead thread may contribute to triggering a GC at most once. After a max-generation GC occurs, if some dead thread
5675 // objects are still reachable due to references to the thread objects, they will not contribute to triggering a GC again.
5676 // Synchronize the store with increment/decrement operations occurring on different threads, and make the change visible to
5677 // other threads in order to prevent unnecessary GC triggers.
5678 FastInterlockExchange(&m_DeadThreadCountForGCTrigger, 0);
5679}
5680
5681bool ThreadStore::ShouldTriggerGCForDeadThreads()
5682{
5683 LIMITED_METHOD_CONTRACT;
5684
5685 return m_TriggerGCForDeadThreads;
5686}
5687
5688void ThreadStore::TriggerGCForDeadThreadsIfNecessary()
5689{
5690 CONTRACTL {
5691 THROWS;
5692 GC_TRIGGERS;
5693 }
5694 CONTRACTL_END;
5695
5696 if (!m_TriggerGCForDeadThreads)
5697 {
5698 return;
5699 }
5700 m_TriggerGCForDeadThreads = false;
5701
5702 if (g_fEEShutDown)
5703 {
5704 // Not safe to touch CLR state
5705 return;
5706 }
5707
5708 unsigned gcGenerationToTrigger = 0;
5709 IGCHeap *gcHeap = GCHeapUtilities::GetGCHeap();
5710 _ASSERTE(gcHeap != nullptr);
5711 SIZE_T generationCountThreshold = static_cast<SIZE_T>(s_DeadThreadCountThresholdForGCTrigger) / 2;
5712 unsigned maxGeneration = gcHeap->GetMaxGeneration();
5713 if (!s_DeadThreadGenerationCounts)
5714 {
5715 // initialize this field on first use with an entry for every table.
5716 s_DeadThreadGenerationCounts = new (nothrow) SIZE_T[maxGeneration + 1];
5717 if (!s_DeadThreadGenerationCounts)
5718 {
5719 return;
5720 }
5721 }
5722
5723 memset(s_DeadThreadGenerationCounts, 0, sizeof(SIZE_T) * (maxGeneration + 1));
5724 {
5725 ThreadStoreLockHolder threadStoreLockHolder;
5726 GCX_COOP();
5727
5728 // Determine the generation for which to trigger a GC. Iterate over all dead threads that have not yet been considered
5729 // for triggering a GC and see how many are in which generations.
5730 for (Thread *thread = ThreadStore::GetAllThreadList(NULL, Thread::TS_Dead, Thread::TS_Dead);
5731 thread != nullptr;
5732 thread = ThreadStore::GetAllThreadList(thread, Thread::TS_Dead, Thread::TS_Dead))
5733 {
5734 if (thread->HasDeadThreadBeenConsideredForGCTrigger())
5735 {
5736 continue;
5737 }
5738
5739 Object *exposedObject = OBJECTREFToObject(thread->GetExposedObjectRaw());
5740 if (exposedObject == nullptr)
5741 {
5742 continue;
5743 }
5744
5745 unsigned exposedObjectGeneration = gcHeap->WhichGeneration(exposedObject);
5746 SIZE_T newDeadThreadGenerationCount = ++s_DeadThreadGenerationCounts[exposedObjectGeneration];
5747 if (exposedObjectGeneration > gcGenerationToTrigger && newDeadThreadGenerationCount >= generationCountThreshold)
5748 {
5749 gcGenerationToTrigger = exposedObjectGeneration;
5750 if (gcGenerationToTrigger >= maxGeneration)
5751 {
5752 break;
5753 }
5754 }
5755 }
5756
5757 // Make sure that enough time has elapsed since the last GC of the desired generation. We don't want to trigger GCs
5758 // based on this heuristic too often. Give it some time to let the memory pressure trigger GCs automatically, and only
5759 // if it doesn't in the given time, this heuristic may kick in to trigger a GC.
5760 SIZE_T gcLastMilliseconds = gcHeap->GetLastGCStartTime(gcGenerationToTrigger);
5761 SIZE_T gcNowMilliseconds = gcHeap->GetNow();
5762 if (gcNowMilliseconds - gcLastMilliseconds < s_DeadThreadGCTriggerPeriodMilliseconds)
5763 {
5764 return;
5765 }
5766
5767 // For threads whose exposed objects are in the generation of GC that will be triggered or in a lower GC generation,
5768 // mark them as having contributed to a GC trigger to prevent redundant GC triggers
5769 for (Thread *thread = ThreadStore::GetAllThreadList(NULL, Thread::TS_Dead, Thread::TS_Dead);
5770 thread != nullptr;
5771 thread = ThreadStore::GetAllThreadList(thread, Thread::TS_Dead, Thread::TS_Dead))
5772 {
5773 if (thread->HasDeadThreadBeenConsideredForGCTrigger())
5774 {
5775 continue;
5776 }
5777
5778 Object *exposedObject = OBJECTREFToObject(thread->GetExposedObjectRaw());
5779 if (exposedObject == nullptr)
5780 {
5781 continue;
5782 }
5783
5784 if (gcGenerationToTrigger < maxGeneration &&
5785 gcHeap->WhichGeneration(exposedObject) > gcGenerationToTrigger)
5786 {
5787 continue;
5788 }
5789
5790 thread->SetHasDeadThreadBeenConsideredForGCTrigger();
5791 }
5792 } // ThreadStoreLockHolder, GCX_COOP()
5793
5794 GCHeapUtilities::GetGCHeap()->GarbageCollect(gcGenerationToTrigger, FALSE, collection_non_blocking);
5795}
5796
5797#endif // #ifndef DACCESS_COMPILE
5798
5799
5800// Access the list of threads. You must be inside a critical section, otherwise
5801// the "cursor" thread might disappear underneath you. Pass in NULL for the
5802// cursor to begin at the start of the list.
5803Thread *ThreadStore::GetAllThreadList(Thread *cursor, ULONG mask, ULONG bits)
5804{
5805 CONTRACTL {
5806 NOTHROW;
5807 GC_NOTRIGGER;
5808 SO_TOLERANT;
5809 }
5810 CONTRACTL_END;
5811 SUPPORTS_DAC;
5812
5813#ifndef DACCESS_COMPILE
5814 _ASSERTE((s_pThreadStore->m_Crst.GetEnterCount() > 0) || IsAtProcessExit());
5815#endif
5816
5817 while (TRUE)
5818 {
5819 cursor = (cursor
5820 ? s_pThreadStore->m_ThreadList.GetNext(cursor)
5821 : s_pThreadStore->m_ThreadList.GetHead());
5822
5823 if (cursor == NULL)
5824 break;
5825
5826 if ((cursor->m_State & mask) == bits)
5827 return cursor;
5828 }
5829 return NULL;
5830}
5831
5832// Iterate over the threads that have been started
5833Thread *ThreadStore::GetThreadList(Thread *cursor)
5834{
5835 CONTRACTL {
5836 NOTHROW;
5837 GC_NOTRIGGER;
5838 SO_TOLERANT;
5839 }
5840 CONTRACTL_END;
5841 SUPPORTS_DAC;
5842
5843 return GetAllThreadList(cursor, (Thread::TS_Unstarted | Thread::TS_Dead), 0);
5844}
5845
5846//---------------------------------------------------------------------------------------
5847//
5848// Grab a consistent snapshot of the thread's state, for reporting purposes only.
5849//
5850// Return Value:
5851// the current state of the thread
5852//
5853
5854Thread::ThreadState Thread::GetSnapshotState()
5855{
5856 CONTRACTL {
5857 NOTHROW;
5858 GC_NOTRIGGER;
5859 SO_TOLERANT;
5860 SUPPORTS_DAC;
5861 }
5862 CONTRACTL_END;
5863
5864 ThreadState res = m_State;
5865
5866 if (res & TS_ReportDead)
5867 {
5868 res = (ThreadState) (res | TS_Dead);
5869 }
5870
5871 return res;
5872}
5873
5874#ifndef DACCESS_COMPILE
5875
5876BOOL CLREventWaitWithTry(CLREventBase *pEvent, DWORD timeout, BOOL fAlertable, DWORD *pStatus)
5877{
5878 CONTRACTL
5879 {
5880 NOTHROW;
5881 WRAPPER(GC_TRIGGERS);
5882 }
5883 CONTRACTL_END;
5884
5885 BOOL fLoop = TRUE;
5886 EX_TRY
5887 {
5888 *pStatus = pEvent->Wait(timeout, fAlertable);
5889 fLoop = FALSE;
5890 }
5891 EX_CATCH
5892 {
5893 }
5894 EX_END_CATCH(SwallowAllExceptions);
5895
5896 return fLoop;
5897}
5898
5899// We shut down the EE only when all the non-background threads have terminated
5900// (unless this is an exceptional termination). So the main thread calls here to
5901// wait before tearing down the EE.
5902void ThreadStore::WaitForOtherThreads()
5903{
5904 CONTRACTL {
5905 THROWS;
5906 GC_TRIGGERS;
5907 }
5908 CONTRACTL_END;
5909
5910 CHECK_ONE_STORE();
5911
5912 Thread *pCurThread = GetThread();
5913
5914 // Regardless of whether the main thread is a background thread or not, force
5915 // it to be one. This simplifies our rules for counting non-background threads.
5916 pCurThread->SetBackground(TRUE);
5917
5918 LOG((LF_SYNC, INFO3, "WaitForOtherThreads obtain lock\n"));
5919 ThreadStoreLockHolder TSLockHolder(TRUE);
5920 if (!OtherThreadsComplete())
5921 {
5922 TSLockHolder.Release();
5923
5924 FastInterlockOr((ULONG *) &pCurThread->m_State, Thread::TS_ReportDead);
5925
5926 DWORD ret = WAIT_OBJECT_0;
5927 while (CLREventWaitWithTry(&m_TerminationEvent, INFINITE, TRUE, &ret))
5928 {
5929 }
5930 _ASSERTE(ret == WAIT_OBJECT_0);
5931 }
5932}
5933
5934
5935// Every EE process can lazily create a GUID that uniquely identifies it (for
5936// purposes of remoting).
5937const GUID &ThreadStore::GetUniqueEEId()
5938{
5939 CONTRACTL {
5940 NOTHROW;
5941 GC_TRIGGERS;
5942 }
5943 CONTRACTL_END;
5944
5945 if (!m_GuidCreated)
5946 {
5947 ThreadStoreLockHolder TSLockHolder(TRUE);
5948 if (!m_GuidCreated)
5949 {
5950 HRESULT hr = ::CoCreateGuid(&m_EEGuid);
5951
5952 _ASSERTE(SUCCEEDED(hr));
5953 if (SUCCEEDED(hr))
5954 m_GuidCreated = TRUE;
5955 }
5956
5957 if (!m_GuidCreated)
5958 return IID_NULL;
5959 }
5960 return m_EEGuid;
5961}
5962
5963
5964#ifdef _DEBUG
5965BOOL ThreadStore::DbgFindThread(Thread *target)
5966{
5967 CONTRACTL {
5968 NOTHROW;
5969 GC_NOTRIGGER;
5970 }
5971 CONTRACTL_END;
5972
5973 CHECK_ONE_STORE();
5974
5975 // Cache the current change stamp for g_TrapReturningThreads
5976 LONG chgStamp = g_trtChgStamp;
5977 STRESS_LOG3(LF_STORE, LL_INFO100, "ThreadStore::DbgFindThread - [thread=%p]. trt=%d. chgStamp=%d\n", GetThread(), g_TrapReturningThreads.Load(), chgStamp);
5978
5979#if 0 // g_TrapReturningThreads debug code.
5980 int iRetry = 0;
5981Retry:
5982#endif // g_TrapReturningThreads debug code.
5983 BOOL found = FALSE;
5984 Thread *cur = NULL;
5985 LONG cnt = 0;
5986 LONG cntBack = 0;
5987 LONG cntUnstart = 0;
5988 LONG cntDead = 0;
5989 LONG cntReturn = 0;
5990
5991 while ((cur = GetAllThreadList(cur, 0, 0)) != NULL)
5992 {
5993 cnt++;
5994
5995 if (cur->IsDead())
5996 cntDead++;
5997
5998 // Unstarted threads do not contribute to the count of background threads
5999 if (cur->IsUnstarted())
6000 cntUnstart++;
6001 else
6002 if (cur->IsBackground())
6003 cntBack++;
6004
6005 if (cur == target)
6006 found = TRUE;
6007
6008 // Note that (DebugSuspendPending | SuspendPending) implies a count of 2.
6009 // We don't count GCPending because a single trap is held for the entire
6010 // GC, instead of counting each interesting thread.
6011 if (cur->m_State & Thread::TS_DebugSuspendPending)
6012 cntReturn++;
6013
6014 // CoreCLR does not support user-requested thread suspension
6015 _ASSERTE(!(cur->m_State & Thread::TS_UserSuspendPending));
6016
6017 if (cur->m_TraceCallCount > 0)
6018 cntReturn++;
6019
6020 if (cur->IsAbortRequested())
6021 cntReturn++;
6022 }
6023
6024 _ASSERTE(cnt == m_ThreadCount);
6025 _ASSERTE(cntUnstart == m_UnstartedThreadCount);
6026 _ASSERTE(cntBack == m_BackgroundThreadCount);
6027 _ASSERTE(cntDead == m_DeadThreadCount);
6028 _ASSERTE(0 <= m_PendingThreadCount);
6029
6030#if 0 // g_TrapReturningThreads debug code.
6031 if (cntReturn != g_TrapReturningThreads /*&& !g_fEEShutDown*/)
6032 { // If count is off, try again, to account for multiple threads.
6033 if (iRetry < 4)
6034 {
6035 // printf("Retry %d. cntReturn:%d, gReturn:%d\n", iRetry, cntReturn, g_TrapReturningThreads);
6036 ++iRetry;
6037 goto Retry;
6038 }
6039 printf("cnt:%d, Un:%d, Back:%d, Dead:%d, cntReturn:%d, TrapReturn:%d, eeShutdown:%d, threadShutdown:%d\n",
6040 cnt,cntUnstart,cntBack,cntDead,cntReturn,g_TrapReturningThreads, g_fEEShutDown, Thread::IsAtProcessExit());
6041 LOG((LF_CORDB, LL_INFO1000,
6042 "SUSPEND: cnt:%d, Un:%d, Back:%d, Dead:%d, cntReturn:%d, TrapReturn:%d, eeShutdown:%d, threadShutdown:%d\n",
6043 cnt,cntUnstart,cntBack,cntDead,cntReturn,g_TrapReturningThreads, g_fEEShutDown, Thread::IsAtProcessExit()) );
6044
6045 //_ASSERTE(cntReturn + 2 >= g_TrapReturningThreads);
6046 }
6047 if (iRetry > 0 && iRetry < 4)
6048 {
6049 printf("%d retries to re-sync counted TrapReturn with global TrapReturn.\n", iRetry);
6050 }
6051#endif // g_TrapReturningThreads debug code.
6052
6053 STRESS_LOG4(LF_STORE, LL_INFO100, "ThreadStore::DbgFindThread - [thread=%p]. trt=%d. chg=%d. cnt=%d\n", GetThread(), g_TrapReturningThreads.Load(), g_trtChgStamp.Load(), cntReturn);
6054
6055 // Because of race conditions and the fact that the GC places its
6056 // own count, I can't assert this precisely. But I do want to be
6057 // sure that this count isn't wandering ever higher -- with a
6058 // nasty impact on the performance of GC mode changes and method
6059 // call chaining!
6060 //
6061 // We don't bother asserting this during process exit, because
6062 // during a shutdown we will quietly terminate threads that are
6063 // being waited on. (If we aren't shutting down, we carefully
6064 // decrement our counts and alert anyone waiting for us to
6065 // return).
6066 //
6067 // Note: we don't actually assert this if
6068 // ThreadStore::TrapReturningThreads() updated g_TrapReturningThreads
6069 // between the beginning of this function and the moment of the assert.
6070 // *** The order of evaluation in the if condition is important ***
6071 _ASSERTE(
6072 (g_trtChgInFlight != 0 || (cntReturn + 2 >= g_TrapReturningThreads) || chgStamp != g_trtChgStamp) ||
6073 g_fEEShutDown);
6074
6075 return found;
6076}
6077
6078#endif // _DEBUG
6079
6080void Thread::HandleThreadInterrupt (BOOL fWaitForADUnload)
6081{
6082 STATIC_CONTRACT_THROWS;
6083 STATIC_CONTRACT_GC_TRIGGERS;
6084 STATIC_CONTRACT_SO_TOLERANT;
6085
6086 // If we're waiting for shutdown, we don't want to abort/interrupt this thread
6087 if (HasThreadStateNC(Thread::TSNC_BlockedForShutdown))
6088 return;
6089
6090 BEGIN_SO_INTOLERANT_CODE(this);
6091
6092 if ((m_UserInterrupt & TI_Abort) != 0)
6093 {
6094 // If the thread is waiting for AD unload to finish, and the thread is interrupted,
6095 // we can start aborting.
6096 HandleThreadAbort(fWaitForADUnload);
6097 }
6098 if ((m_UserInterrupt & TI_Interrupt) != 0)
6099 {
6100 ResetThreadState ((ThreadState)(TS_Interrupted | TS_Interruptible));
6101 FastInterlockAnd ((DWORD*)&m_UserInterrupt, ~TI_Interrupt);
6102
6103 COMPlusThrow(kThreadInterruptedException);
6104 }
6105 END_SO_INTOLERANT_CODE;
6106}
6107
6108#ifdef _DEBUG
6109#define MAXSTACKBYTES (2 * GetOsPageSize())
6110void CleanStackForFastGCStress ()
6111{
6112 CONTRACTL {
6113 NOTHROW;
6114 GC_NOTRIGGER;
6115 SO_TOLERANT;
6116 }
6117 CONTRACTL_END;
6118
6119 PVOID StackLimit = ClrTeb::GetStackLimit();
6120 size_t nBytes = (size_t)&nBytes - (size_t)StackLimit;
6121 nBytes &= ~sizeof (size_t);
6122 if (nBytes > MAXSTACKBYTES) {
6123 nBytes = MAXSTACKBYTES;
6124 }
6125 size_t* buffer = (size_t*) _alloca (nBytes);
6126 memset(buffer, 0, nBytes);
6127 GetThread()->m_pCleanedStackBase = &nBytes;
6128}
6129
6130void Thread::ObjectRefFlush(Thread* thread)
6131{
6132
6133 BEGIN_PRESERVE_LAST_ERROR;
6134
6135 // The constructor and destructor of AutoCleanupSONotMainlineHolder (allocated by SO_NOT_MAINLINE_FUNCTION below)
6136 // may trash the last error, so we need to save and restore last error here. Also, we need to add a scope here
6137 // because we can't let the destructor run after we call SetLastError().
6138 {
6139 // this is debug only code, so no need to validate
6140 STATIC_CONTRACT_NOTHROW;
6141 STATIC_CONTRACT_GC_NOTRIGGER;
6142 STATIC_CONTRACT_ENTRY_POINT;
6143
6144 _ASSERTE(thread->PreemptiveGCDisabled()); // Should have been in managed code
6145 memset(thread->dangerousObjRefs, 0, sizeof(thread->dangerousObjRefs));
6146 thread->m_allObjRefEntriesBad = FALSE;
6147 CLEANSTACKFORFASTGCSTRESS ();
6148 }
6149
6150 END_PRESERVE_LAST_ERROR;
6151}
6152#endif
6153
6154#if defined(STRESS_HEAP)
6155
6156PtrHashMap *g_pUniqueStackMap = NULL;
6157Crst *g_pUniqueStackCrst = NULL;
6158
6159#define UniqueStackDepth 8
6160
6161BOOL StackCompare (UPTR val1, UPTR val2)
6162{
6163 CONTRACTL {
6164 NOTHROW;
6165 GC_NOTRIGGER;
6166 }
6167 CONTRACTL_END;
6168
6169 size_t *p1 = (size_t *)(val1 << 1);
6170 size_t *p2 = (size_t *)val2;
6171 if (p1[0] != p2[0]) {
6172 return FALSE;
6173 }
6174 size_t nElem = p1[0];
6175 if (nElem >= UniqueStackDepth) {
6176 nElem = UniqueStackDepth;
6177 }
6178 p1 ++;
6179 p2 ++;
6180
6181 for (size_t n = 0; n < nElem; n ++) {
6182 if (p1[n] != p2[n]) {
6183 return FALSE;
6184 }
6185 }
6186
6187 return TRUE;
6188}
6189
6190void UniqueStackSetupMap()
6191{
6192 WRAPPER_NO_CONTRACT;
6193
6194 if (g_pUniqueStackCrst == NULL)
6195 {
6196 Crst *Attempt = new Crst (
6197 CrstUniqueStack,
6198 CrstFlags(CRST_REENTRANCY | CRST_UNSAFE_ANYMODE));
6199
6200 if (FastInterlockCompareExchangePointer(&g_pUniqueStackCrst,
6201 Attempt,
6202 NULL) != NULL)
6203 {
6204 // We lost the race
6205 delete Attempt;
6206 }
6207 }
6208
6209 // Now we have a Crst we can use to synchronize the remainder of the init.
6210 if (g_pUniqueStackMap == NULL)
6211 {
6212 CrstHolder ch(g_pUniqueStackCrst);
6213
6214 if (g_pUniqueStackMap == NULL)
6215 {
6216 PtrHashMap *map = new (SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()) PtrHashMap ();
6217 LockOwner lock = {g_pUniqueStackCrst, IsOwnerOfCrst};
6218 map->Init (256, StackCompare, TRUE, &lock);
6219 g_pUniqueStackMap = map;
6220 }
6221 }
6222}
6223
6224BOOL StartUniqueStackMapHelper()
6225{
6226 CONTRACTL
6227 {
6228 NOTHROW;
6229 GC_NOTRIGGER;
6230 }
6231 CONTRACTL_END;
6232
6233 BOOL fOK = TRUE;
6234 EX_TRY
6235 {
6236 if (g_pUniqueStackMap == NULL)
6237 {
6238 UniqueStackSetupMap();
6239 }
6240 }
6241 EX_CATCH
6242 {
6243 fOK = FALSE;
6244 }
6245 EX_END_CATCH(SwallowAllExceptions);
6246
6247 return fOK;
6248}
6249
6250BOOL StartUniqueStackMap ()
6251{
6252 CONTRACTL
6253 {
6254 NOTHROW;
6255 GC_NOTRIGGER;
6256 }
6257 CONTRACTL_END;
6258
6259 return StartUniqueStackMapHelper();
6260}
6261
6262#ifndef FEATURE_PAL
6263
6264size_t UpdateStackHash(size_t hash, size_t retAddr)
6265{
6266 return ((hash << 3) + hash) ^ retAddr;
6267}
6268
6269/***********************************************************************/
6270size_t getStackHash(size_t* stackTrace, size_t* stackTop, size_t* stackStop, size_t stackBase, size_t stackLimit)
6271{
6272 CONTRACTL {
6273 NOTHROW;
6274 GC_NOTRIGGER;
6275 }
6276 CONTRACTL_END;
6277
6278 // return a hash of every return address found between 'stackTop' (the lowest address)
6279 // and 'stackStop' (the highest address)
6280
6281 size_t hash = 0;
6282 int idx = 0;
6283
6284#ifdef _TARGET_X86_
6285
6286 static size_t moduleBase = (size_t) -1;
6287 static size_t moduleTop = (size_t) -1;
6288 if (moduleTop == (size_t) -1)
6289 {
6290 MEMORY_BASIC_INFORMATION mbi;
6291
6292 if (ClrVirtualQuery(getStackHash, &mbi, sizeof(mbi)))
6293 {
6294 moduleBase = (size_t)mbi.AllocationBase;
6295 moduleTop = (size_t)mbi.BaseAddress + mbi.RegionSize;
6296 }
6297 else
6298 {
6299 // way bad error, probably just assert and exit
6300 _ASSERTE (!"ClrVirtualQuery failed");
6301 moduleBase = 0;
6302 moduleTop = 0;
6303 }
6304 }
6305
6306 while (stackTop < stackStop)
6307 {
6308 // Clean out things that point to stack, as those can't be return addresses
6309 if (*stackTop > moduleBase && *stackTop < moduleTop)
6310 {
6311 TADDR dummy;
6312
6313 if (isRetAddr((TADDR)*stackTop, &dummy))
6314 {
6315 hash = UpdateStackHash(hash, *stackTop);
6316
6317 // If there is no jitted code on the stack, then just use the
6318 // top 16 frames as the context.
6319 idx++;
6320 if (idx <= UniqueStackDepth)
6321 {
6322 stackTrace [idx] = *stackTop;
6323 }
6324 }
6325 }
6326 stackTop++;
6327 }
6328
6329#else // _TARGET_X86_
6330
6331 CONTEXT ctx;
6332 ClrCaptureContext(&ctx);
6333
6334 UINT_PTR uControlPc = (UINT_PTR)GetIP(&ctx);
6335 UINT_PTR uImageBase;
6336
6337 UINT_PTR uPrevControlPc = uControlPc;
6338
6339 for (;;)
6340 {
6341 RtlLookupFunctionEntry(uControlPc,
6342 ARM_ONLY((DWORD*))(&uImageBase),
6343 NULL
6344 );
6345
6346 if (((UINT_PTR)g_pMSCorEE) != uImageBase)
6347 {
6348 break;
6349 }
6350
6351 uControlPc = Thread::VirtualUnwindCallFrame(&ctx);
6352
6353 UINT_PTR uRetAddrForHash = uControlPc;
6354
6355 if (uPrevControlPc == uControlPc)
6356 {
6357 // This is a special case when we fail to acquire the loader lock
6358 // in RtlLookupFunctionEntry(), which then returns false. The end
6359 // result is that we cannot go any further on the stack and
6360 // we will loop infinitely (because the owner of the loader lock
6361 // is blocked on us).
6362 hash = 0;
6363 break;
6364 }
6365 else
6366 {
6367 uPrevControlPc = uControlPc;
6368 }
6369
6370 hash = UpdateStackHash(hash, uRetAddrForHash);
6371
6372 // If there is no jitted code on the stack, then just use the
6373 // top 16 frames as the context.
6374 idx++;
6375 if (idx <= UniqueStackDepth)
6376 {
6377 stackTrace [idx] = uRetAddrForHash;
6378 }
6379 }
6380#endif // _TARGET_X86_
6381
6382 stackTrace [0] = idx;
6383
6384 return(hash);
6385}
6386
6387void UniqueStackHelper(size_t stackTraceHash, size_t *stackTrace)
6388{
6389 CONTRACTL {
6390 NOTHROW;
6391 GC_NOTRIGGER;
6392 }
6393 CONTRACTL_END;
6394
6395 EX_TRY {
6396 size_t nElem = stackTrace[0];
6397 if (nElem >= UniqueStackDepth) {
6398 nElem = UniqueStackDepth;
6399 }
6400 AllocMemHolder<size_t> stackTraceInMap = SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(size_t *)) * (S_SIZE_T(nElem) + S_SIZE_T(1)));
6401 memcpy (stackTraceInMap, stackTrace, sizeof(size_t *) * (nElem + 1));
6402 g_pUniqueStackMap->InsertValue(stackTraceHash, stackTraceInMap);
6403 stackTraceInMap.SuppressRelease();
6404 }
6405 EX_CATCH
6406 {
6407 }
6408 EX_END_CATCH(SwallowAllExceptions);
6409}
6410
6411/***********************************************************************/
6412/* returns true if this stack has not been seen before, useful for
6413 running tests only once per stack trace. */
6414
6415BOOL Thread::UniqueStack(void* stackStart)
6416{
6417 CONTRACTL
6418 {
6419 NOTHROW;
6420 GC_NOTRIGGER;
6421 SO_NOT_MAINLINE;
6422 }
6423 CONTRACTL_END;
6424
6425 // If we where not told where to start, start at the caller of UniqueStack
6426 if (stackStart == 0)
6427 {
6428 stackStart = &stackStart;
6429 }
6430
6431 if (g_pUniqueStackMap == NULL)
6432 {
6433 if (!StartUniqueStackMap ())
6434 {
6435 // We fail to initialize unique stack map due to OOM.
6436 // Let's say the stack is unique.
6437 return TRUE;
6438 }
6439 }
6440
6441 size_t stackTrace[UniqueStackDepth+1] = {0};
6442
6443 // stackTraceHash represents a hash of entire stack at the time we make the call,
6444 // We insure at least GC per unique stackTrace. What information is contained in
6445 // 'stackTrace' is somewhat arbitrary. We choose it to mean all functions live
6446 // on the stack up to the first jitted function.
6447
6448 size_t stackTraceHash;
6449 Thread* pThread = GetThread();
6450
6451
6452 void* stopPoint = pThread->m_CacheStackBase;
6453
6454#ifdef _TARGET_X86_
6455 // Find the stop point (most jitted function)
6456 Frame* pFrame = pThread->GetFrame();
6457 for(;;)
6458 {
6459 // skip GC frames
6460 if (pFrame == 0 || pFrame == (Frame*) -1)
6461 break;
6462
6463 pFrame->GetFunction(); // This insures that helper frames are inited
6464
6465 if (pFrame->GetReturnAddress() != 0)
6466 {
6467 stopPoint = pFrame;
6468 break;
6469 }
6470 pFrame = pFrame->Next();
6471 }
6472#endif // _TARGET_X86_
6473
6474 // Get hash of all return addresses between here an the top most jitted function
6475 stackTraceHash = getStackHash (stackTrace, (size_t*) stackStart, (size_t*) stopPoint,
6476 size_t(pThread->m_CacheStackBase), size_t(pThread->m_CacheStackLimit));
6477
6478 if (stackTraceHash == 0 ||
6479 g_pUniqueStackMap->LookupValue (stackTraceHash, stackTrace) != (LPVOID)INVALIDENTRY)
6480 {
6481 return FALSE;
6482 }
6483 BOOL fUnique = FALSE;
6484
6485 {
6486 CrstHolder ch(g_pUniqueStackCrst);
6487#ifdef _DEBUG
6488 if (GetThread ())
6489 GetThread ()->m_bUniqueStacking = TRUE;
6490#endif
6491 if (g_pUniqueStackMap->LookupValue (stackTraceHash, stackTrace) != (LPVOID)INVALIDENTRY)
6492 {
6493 fUnique = FALSE;
6494 }
6495 else
6496 {
6497 fUnique = TRUE;
6498 FAULT_NOT_FATAL();
6499 UniqueStackHelper(stackTraceHash, stackTrace);
6500 }
6501#ifdef _DEBUG
6502 if (GetThread ())
6503 GetThread ()->m_bUniqueStacking = FALSE;
6504#endif
6505 }
6506
6507#ifdef _DEBUG
6508 static int fCheckStack = -1;
6509 if (fCheckStack == -1)
6510 {
6511 fCheckStack = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_FastGCCheckStack);
6512 }
6513 if (fCheckStack && pThread->m_pCleanedStackBase > stackTrace
6514 && pThread->m_pCleanedStackBase - stackTrace > (int) MAXSTACKBYTES)
6515 {
6516 _ASSERTE (!"Garbage on stack");
6517 }
6518#endif
6519 return fUnique;
6520}
6521
6522#else // !FEATURE_PAL
6523
6524BOOL Thread::UniqueStack(void* stackStart)
6525{
6526 return FALSE;
6527}
6528
6529#endif // !FEATURE_PAL
6530
6531#endif // STRESS_HEAP
6532
6533
6534/*
6535 * GetStackLowerBound
6536 *
6537 * Returns the lower bound of the stack space. Note -- the practical bound is some number of pages greater than
6538 * this value -- those pages are reserved for a stack overflow exception processing.
6539 *
6540 * Parameters:
6541 * None
6542 *
6543 * Returns:
6544 * address of the lower bound of the threads's stack.
6545 */
6546void * Thread::GetStackLowerBound()
6547{
6548 // Called during fiber switch. Can not have non-static contract.
6549 STATIC_CONTRACT_NOTHROW;
6550 STATIC_CONTRACT_GC_NOTRIGGER;
6551 STATIC_CONTRACT_SO_TOLERANT;
6552
6553 #ifndef FEATURE_PAL
6554 MEMORY_BASIC_INFORMATION lowerBoundMemInfo;
6555 SIZE_T dwRes;
6556
6557 dwRes = ClrVirtualQuery((const void *)&lowerBoundMemInfo, &lowerBoundMemInfo, sizeof(MEMORY_BASIC_INFORMATION));
6558
6559 if (sizeof(MEMORY_BASIC_INFORMATION) == dwRes)
6560 {
6561 return (void *)(lowerBoundMemInfo.AllocationBase);
6562 }
6563 else
6564 {
6565 return NULL;
6566 }
6567#else // !FEATURE_PAL
6568 return PAL_GetStackLimit();
6569#endif // !FEATURE_PAL
6570}
6571
6572/*
6573 * GetStackUpperBound
6574 *
6575 * Return the upper bound of the thread's stack space.
6576 *
6577 * Parameters:
6578 * None
6579 *
6580 * Returns:
6581 * address of the base of the threads's stack.
6582 */
6583void *Thread::GetStackUpperBound()
6584{
6585 // Called during fiber switch. Can not have non-static contract.
6586 STATIC_CONTRACT_NOTHROW;
6587 STATIC_CONTRACT_GC_NOTRIGGER;
6588 STATIC_CONTRACT_SO_TOLERANT;
6589
6590 return ClrTeb::GetStackBase();
6591}
6592
6593BOOL Thread::SetStackLimits(SetStackLimitScope scope)
6594{
6595 CONTRACTL
6596 {
6597 NOTHROW;
6598 GC_NOTRIGGER;
6599 SO_TOLERANT;
6600 }
6601 CONTRACTL_END;
6602
6603 if (scope == fAll)
6604 {
6605 m_CacheStackBase = GetStackUpperBound();
6606 m_CacheStackLimit = GetStackLowerBound();
6607 if (m_CacheStackLimit == NULL)
6608 {
6609 _ASSERTE(!"Failed to set stack limits");
6610 return FALSE;
6611 }
6612
6613 // Compute the limit used by EnsureSufficientExecutionStack and cache it on the thread. This minimum stack size should
6614 // be sufficient to allow a typical non-recursive call chain to execute, including potential exception handling and
6615 // garbage collection. Used for probing for available stack space through RuntimeImports.EnsureSufficientExecutionStack,
6616 // among other things.
6617#ifdef BIT64
6618 const UINT_PTR MinExecutionStackSize = 128 * 1024;
6619#else // !BIT64
6620 const UINT_PTR MinExecutionStackSize = 64 * 1024;
6621#endif // BIT64
6622 _ASSERTE(m_CacheStackBase >= m_CacheStackLimit);
6623 if ((reinterpret_cast<UINT_PTR>(m_CacheStackBase) - reinterpret_cast<UINT_PTR>(m_CacheStackLimit)) >
6624 MinExecutionStackSize)
6625 {
6626 m_CacheStackSufficientExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackLimit) + MinExecutionStackSize;
6627 }
6628 else
6629 {
6630 m_CacheStackSufficientExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackBase);
6631 }
6632 }
6633
6634 // Ensure that we've setup the stack guarantee properly before we cache the stack limits
6635 // as they depend upon the stack guarantee.
6636 if (FAILED(CLRSetThreadStackGuarantee()))
6637 return FALSE;
6638
6639 // Cache the last stack addresses that we are allowed to touch. We throw a stack overflow
6640 // if we cross that line. Note that we ignore any subsequent calls to STSG for Whidbey until
6641 // we see an exception and recache the values. We use the LastAllowableAddresses to
6642 // determine if we've taken a hard SO and the ProbeLimits on the probes themselves.
6643
6644 m_LastAllowableStackAddress = GetLastNormalStackAddress();
6645
6646 if (g_pConfig->ProbeForStackOverflow())
6647 {
6648 m_ProbeLimit = m_LastAllowableStackAddress;
6649 }
6650 else
6651 {
6652 // If we have stack probing disabled, set the probeLimit to 0 so that all probes will pass. This
6653 // way we don't have to do an extra check in the probe code.
6654 m_ProbeLimit = 0;
6655 }
6656
6657 return TRUE;
6658}
6659
6660//---------------------------------------------------------------------------------------------
6661// Routines we use to managed a thread's stack, for fiber switching or stack overflow purposes.
6662//---------------------------------------------------------------------------------------------
6663
6664HRESULT Thread::CLRSetThreadStackGuarantee(SetThreadStackGuaranteeScope fScope)
6665{
6666 CONTRACTL
6667 {
6668 WRAPPER(NOTHROW);
6669 GC_NOTRIGGER;
6670 SO_TOLERANT;
6671 }
6672 CONTRACTL_END;
6673
6674#ifndef FEATURE_PAL
6675 // TODO: we need to measure what the stack usage needs are at the limits in the hosted scenario for host callbacks
6676
6677 if (Thread::IsSetThreadStackGuaranteeInUse(fScope))
6678 {
6679 // <TODO> Tune this as needed </TODO>
6680 ULONG uGuardSize = SIZEOF_DEFAULT_STACK_GUARANTEE;
6681 int EXTRA_PAGES = 0;
6682#if defined(_WIN64)
6683 // Free Build EH Stack Stats:
6684 // --------------------------------
6685 // currently the maximum stack usage we'll face while handling a SO includes:
6686 // 4.3k for the OS (kernel32!RaiseException, Rtl EH dispatch code, RtlUnwindEx [second pass])
6687 // 1.2k for the CLR EH setup (NakedThrowHelper*)
6688 // 4.5k for other heavy CLR stack creations (2x CONTEXT, 1x REGDISPLAY)
6689 // ~1.0k for other misc CLR stack allocations
6690 // -----
6691 // 11.0k --> ~2.75 pages for CLR SO EH dispatch
6692 //
6693 // -plus we might need some more for debugger EH dispatch, Watson, etc...
6694 // -also need to take into account that we can lose up to 1 page of the guard region
6695 // -additionally, we need to provide some region to hosts to allow for lock acquisition in a hosted scenario
6696 //
6697 EXTRA_PAGES = 3;
6698 INDEBUG(EXTRA_PAGES += 1);
6699
6700 int ThreadGuardPages = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ThreadGuardPages);
6701 if (ThreadGuardPages == 0)
6702 {
6703 uGuardSize += (EXTRA_PAGES * GetOsPageSize());
6704 }
6705 else
6706 {
6707 uGuardSize += (ThreadGuardPages * GetOsPageSize());
6708 }
6709
6710#else // _WIN64
6711#ifdef _DEBUG
6712 uGuardSize += (1 * GetOsPageSize()); // one extra page for debug infrastructure
6713#endif // _DEBUG
6714#endif // _WIN64
6715
6716 LOG((LF_EH, LL_INFO10000, "STACKOVERFLOW: setting thread stack guarantee to 0x%x\n", uGuardSize));
6717
6718 if (!::SetThreadStackGuarantee(&uGuardSize))
6719 {
6720 return HRESULT_FROM_GetLastErrorNA();
6721 }
6722 }
6723
6724#endif // !FEATURE_PAL
6725
6726 return S_OK;
6727}
6728
6729
6730/*
6731 * GetLastNormalStackAddress
6732 *
6733 * GetLastNormalStackAddress returns the last stack address before the guard
6734 * region of a thread. This is the last address that one could write to before
6735 * a stack overflow occurs.
6736 *
6737 * Parameters:
6738 * StackLimit - the base of the stack allocation
6739 *
6740 * Returns:
6741 * Address of the first page of the guard region.
6742 */
6743UINT_PTR Thread::GetLastNormalStackAddress(UINT_PTR StackLimit)
6744{
6745 CONTRACTL
6746 {
6747 NOTHROW;
6748 GC_NOTRIGGER;
6749 SO_TOLERANT;
6750 }
6751 CONTRACTL_END;
6752
6753 UINT_PTR cbStackGuarantee = GetStackGuarantee();
6754
6755 // Here we take the "hard guard region size", the "stack guarantee" and the "fault page" and add them
6756 // all together. Note that the "fault page" is the reason for the extra GetOsPageSize() below. The OS
6757 // will guarantee us a certain amount of stack remaining after a stack overflow. This is called the
6758 // "stack guarantee". But to do this, it has to fault on the page before that region as the app is
6759 // allowed to fault at the very end of that page. So, as a result, the last normal stack address is
6760 // one page sooner.
6761 return StackLimit + (cbStackGuarantee
6762#ifndef FEATURE_PAL
6763 + GetOsPageSize()
6764#endif // !FEATURE_PAL
6765 + HARD_GUARD_REGION_SIZE);
6766}
6767
6768#ifdef _DEBUG
6769
6770static void DebugLogMBIFlags(UINT uState, UINT uProtect)
6771{
6772 CONTRACTL
6773 {
6774 NOTHROW;
6775 GC_NOTRIGGER;
6776 CANNOT_TAKE_LOCK;
6777 }
6778 CONTRACTL_END;
6779
6780#ifndef FEATURE_PAL
6781
6782#define LOG_FLAG(flags, name) \
6783 if (flags & name) \
6784 { \
6785 LOG((LF_EH, LL_INFO1000, "" #name " ")); \
6786 } \
6787
6788 if (uState)
6789 {
6790 LOG((LF_EH, LL_INFO1000, "State: "));
6791
6792 LOG_FLAG(uState, MEM_COMMIT);
6793 LOG_FLAG(uState, MEM_RESERVE);
6794 LOG_FLAG(uState, MEM_DECOMMIT);
6795 LOG_FLAG(uState, MEM_RELEASE);
6796 LOG_FLAG(uState, MEM_FREE);
6797 LOG_FLAG(uState, MEM_PRIVATE);
6798 LOG_FLAG(uState, MEM_MAPPED);
6799 LOG_FLAG(uState, MEM_RESET);
6800 LOG_FLAG(uState, MEM_TOP_DOWN);
6801 LOG_FLAG(uState, MEM_WRITE_WATCH);
6802 LOG_FLAG(uState, MEM_PHYSICAL);
6803 LOG_FLAG(uState, MEM_LARGE_PAGES);
6804 LOG_FLAG(uState, MEM_4MB_PAGES);
6805 }
6806
6807 if (uProtect)
6808 {
6809 LOG((LF_EH, LL_INFO1000, "Protect: "));
6810
6811 LOG_FLAG(uProtect, PAGE_NOACCESS);
6812 LOG_FLAG(uProtect, PAGE_READONLY);
6813 LOG_FLAG(uProtect, PAGE_READWRITE);
6814 LOG_FLAG(uProtect, PAGE_WRITECOPY);
6815 LOG_FLAG(uProtect, PAGE_EXECUTE);
6816 LOG_FLAG(uProtect, PAGE_EXECUTE_READ);
6817 LOG_FLAG(uProtect, PAGE_EXECUTE_READWRITE);
6818 LOG_FLAG(uProtect, PAGE_EXECUTE_WRITECOPY);
6819 LOG_FLAG(uProtect, PAGE_GUARD);
6820 LOG_FLAG(uProtect, PAGE_NOCACHE);
6821 LOG_FLAG(uProtect, PAGE_WRITECOMBINE);
6822 }
6823
6824#undef LOG_FLAG
6825#endif // !FEATURE_PAL
6826}
6827
6828
6829static void DebugLogStackRegionMBIs(UINT_PTR uLowAddress, UINT_PTR uHighAddress)
6830{
6831 CONTRACTL
6832 {
6833 NOTHROW;
6834 GC_NOTRIGGER;
6835 SO_INTOLERANT;
6836 CANNOT_TAKE_LOCK;
6837 }
6838 CONTRACTL_END;
6839
6840 MEMORY_BASIC_INFORMATION meminfo;
6841 UINT_PTR uStartOfThisRegion = uLowAddress;
6842
6843 LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n"));
6844
6845 while (uStartOfThisRegion < uHighAddress)
6846 {
6847 SIZE_T res = ClrVirtualQuery((const void *)uStartOfThisRegion, &meminfo, sizeof(meminfo));
6848
6849 if (sizeof(meminfo) != res)
6850 {
6851 LOG((LF_EH, LL_INFO1000, "VirtualQuery failed on %p\n", uStartOfThisRegion));
6852 break;
6853 }
6854
6855 UINT_PTR uStartOfNextRegion = uStartOfThisRegion + meminfo.RegionSize;
6856
6857 if (uStartOfNextRegion > uHighAddress)
6858 {
6859 uStartOfNextRegion = uHighAddress;
6860 }
6861
6862 UINT_PTR uRegionSize = uStartOfNextRegion - uStartOfThisRegion;
6863
6864 LOG((LF_EH, LL_INFO1000, "0x%p -> 0x%p (%d pg) ", uStartOfThisRegion, uStartOfNextRegion - 1, uRegionSize / GetOsPageSize()));
6865 DebugLogMBIFlags(meminfo.State, meminfo.Protect);
6866 LOG((LF_EH, LL_INFO1000, "\n"));
6867
6868 uStartOfThisRegion = uStartOfNextRegion;
6869 }
6870
6871 LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n"));
6872}
6873
6874// static
6875void Thread::DebugLogStackMBIs()
6876{
6877 CONTRACTL
6878 {
6879 NOTHROW;
6880 GC_NOTRIGGER;
6881 SO_INTOLERANT;
6882 CANNOT_TAKE_LOCK;
6883 }
6884 CONTRACTL_END;
6885
6886 Thread* pThread = GetThread(); // N.B. this can be NULL!
6887
6888 UINT_PTR uStackLimit = (UINT_PTR)GetStackLowerBound();
6889 UINT_PTR uStackBase = (UINT_PTR)GetStackUpperBound();
6890 if (pThread)
6891 {
6892 uStackLimit = (UINT_PTR)pThread->GetCachedStackLimit();
6893 uStackBase = (UINT_PTR)pThread->GetCachedStackBase();
6894 }
6895 else
6896 {
6897 uStackLimit = (UINT_PTR)GetStackLowerBound();
6898 uStackBase = (UINT_PTR)GetStackUpperBound();
6899 }
6900 UINT_PTR uStackSize = uStackBase - uStackLimit;
6901
6902 LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n"));
6903 LOG((LF_EH, LL_INFO1000, "Stack Snapshot 0x%p -> 0x%p (%d pg)\n", uStackLimit, uStackBase, uStackSize / GetOsPageSize()));
6904 if (pThread)
6905 {
6906 LOG((LF_EH, LL_INFO1000, "Last normal addr: 0x%p\n", pThread->GetLastNormalStackAddress()));
6907 }
6908
6909 DebugLogStackRegionMBIs(uStackLimit, uStackBase);
6910}
6911#endif // _DEBUG
6912
6913//
6914// IsSPBeyondLimit
6915//
6916// Determines if the stack pointer is beyond the stack limit, in which case
6917// we can assume we've taken a hard SO.
6918//
6919// Parameters: none
6920//
6921// Returns: bool indicating if SP is beyond the limit or not
6922//
6923BOOL Thread::IsSPBeyondLimit()
6924{
6925 WRAPPER_NO_CONTRACT;
6926
6927 // Reset the stack limits if necessary.
6928 // @todo . Add a vectored handler for X86 so that we reset the stack limits
6929 // there, as anything that supports SetThreadStackGuarantee will support vectored handlers.
6930 // Then we can always assume during EH processing that our stack limits are good and we
6931 // don't have to call ResetStackLimits.
6932 ResetStackLimits();
6933 char *approxSP = (char *)GetCurrentSP();
6934 if (approxSP < (char *)(GetLastAllowableStackAddress()))
6935 {
6936 return TRUE;
6937 }
6938 return FALSE;
6939}
6940
6941__declspec(noinline) void AllocateSomeStack(){
6942 LIMITED_METHOD_CONTRACT;
6943#ifdef _TARGET_X86_
6944 const size_t size = 0x200;
6945#else //_TARGET_X86_
6946 const size_t size = 0x400;
6947#endif //_TARGET_X86_
6948
6949 INT8* mem = (INT8*)_alloca(size);
6950 // Actually touch the memory we just allocated so the compiler can't
6951 // optimize it away completely.
6952 // NOTE: this assumes the stack grows down (towards 0).
6953 VolatileStore<INT8>(mem, 0);
6954}
6955
6956#ifndef FEATURE_PAL
6957
6958// static // private
6959BOOL Thread::DoesRegionContainGuardPage(UINT_PTR uLowAddress, UINT_PTR uHighAddress)
6960{
6961 CONTRACTL
6962 {
6963 NOTHROW;
6964 GC_NOTRIGGER;
6965 SO_TOLERANT;
6966 CANNOT_TAKE_LOCK;
6967 }
6968 CONTRACTL_END;
6969
6970 SIZE_T dwRes;
6971 MEMORY_BASIC_INFORMATION meminfo;
6972 UINT_PTR uStartOfCurrentRegion = uLowAddress;
6973
6974 while (uStartOfCurrentRegion < uHighAddress)
6975 {
6976#undef VirtualQuery
6977 // This code can run below YieldTask, which means that it must not call back into the host.
6978 // The reason is that YieldTask is invoked by the host, and the host needs not be reentrant.
6979 dwRes = VirtualQuery((const void *)uStartOfCurrentRegion, &meminfo, sizeof(meminfo));
6980#define VirtualQuery(lpAddress, lpBuffer, dwLength) Dont_Use_VirtualQuery(lpAddress, lpBuffer, dwLength)
6981
6982 // If the query fails then assume we have no guard page.
6983 if (sizeof(meminfo) != dwRes)
6984 {
6985 return FALSE;
6986 }
6987
6988 if (meminfo.Protect & PAGE_GUARD)
6989 {
6990 return TRUE;
6991 }
6992
6993 uStartOfCurrentRegion += meminfo.RegionSize;
6994 }
6995
6996 return FALSE;
6997}
6998
6999#endif // !FEATURE_PAL
7000
7001/*
7002 * DetermineIfGuardPagePresent
7003 *
7004 * DetermineIfGuardPagePresent returns TRUE if the thread's stack contains a proper guard page. This function makes
7005 * a physical check of the stack, rather than relying on whether or not the CLR is currently processing a stack
7006 * overflow exception.
7007 *
7008 * It seems reasonable to want to check just the 3rd page for !MEM_COMMIT or PAGE_GUARD, but that's no good in a
7009 * world where a) one can extend the guard region arbitrarily with SetThreadStackGuarantee(), b) a thread's stack
7010 * could be pre-committed, and c) another lib might reset the guard page very high up on the stack, much as we
7011 * do. In that world, we have to do VirtualQuery from the lower bound up until we find a region with PAGE_GUARD on
7012 * it. If we've never SO'd, then that's two calls to VirtualQuery.
7013 *
7014 * Parameters:
7015 * None
7016 *
7017 * Returns:
7018 * TRUE if the thread has a guard page, FALSE otherwise.
7019 */
7020BOOL Thread::DetermineIfGuardPagePresent()
7021{
7022 CONTRACTL
7023 {
7024 NOTHROW;
7025 GC_NOTRIGGER;
7026 SO_TOLERANT;
7027 CANNOT_TAKE_LOCK;
7028 }
7029 CONTRACTL_END;
7030
7031#ifndef FEATURE_PAL
7032 BOOL bStackGuarded = FALSE;
7033 UINT_PTR uStackBase = (UINT_PTR)GetCachedStackBase();
7034 UINT_PTR uStackLimit = (UINT_PTR)GetCachedStackLimit();
7035
7036 // Note: we start our queries after the hard guard page (one page up from the base of the stack.) We know the
7037 // very last region of the stack is never the guard page (its always the uncomitted "hard" guard page) so there's
7038 // no need to waste a query on it.
7039 bStackGuarded = DoesRegionContainGuardPage(uStackLimit + HARD_GUARD_REGION_SIZE,
7040 uStackBase);
7041
7042 LOG((LF_EH, LL_INFO10000, "Thread::DetermineIfGuardPagePresent: stack guard page: %s\n", bStackGuarded ? "PRESENT" : "MISSING"));
7043
7044 return bStackGuarded;
7045#else // !FEATURE_PAL
7046 return TRUE;
7047#endif // !FEATURE_PAL
7048}
7049
7050/*
7051 * GetLastNormalStackAddress
7052 *
7053 * GetLastNormalStackAddress returns the last stack address before the guard
7054 * region of this thread. This is the last address that one could write to
7055 * before a stack overflow occurs.
7056 *
7057 * Parameters:
7058 * None
7059 *
7060 * Returns:
7061 * Address of the first page of the guard region.
7062 */
7063UINT_PTR Thread::GetLastNormalStackAddress()
7064{
7065 WRAPPER_NO_CONTRACT;
7066
7067 return GetLastNormalStackAddress((UINT_PTR)m_CacheStackLimit);
7068}
7069
7070
7071#ifdef FEATURE_STACK_PROBE
7072/*
7073 * CanResetStackTo
7074 *
7075 * Given a target stack pointer, this function will tell us whether or not we could restore the guard page if we
7076 * unwound the stack that far.
7077 *
7078 * Parameters:
7079 * stackPointer -- stack pointer that we want to try to reset the thread's stack up to.
7080 *
7081 * Returns:
7082 * TRUE if there's enough room to reset the stack, false otherwise.
7083 */
7084BOOL Thread::CanResetStackTo(LPCVOID stackPointer)
7085{
7086 CONTRACTL
7087 {
7088 NOTHROW;
7089 GC_NOTRIGGER;
7090 SO_TOLERANT;
7091 }
7092 CONTRACTL_END;
7093
7094 // How much space between the given stack pointer and the first guard page?
7095 //
7096 // This must be signed since the stack pointer might be in the guard region,
7097 // which is at a lower address than GetLastNormalStackAddress will return.
7098 INT_PTR iStackSpaceLeft = (INT_PTR)stackPointer - GetLastNormalStackAddress();
7099
7100 // We need to have enough space to call back into the EE from the handler, so we use the twice the entry point amount.
7101 // We need enough to do work and enough that partway through that work we won't probe and COMPlusThrowSO.
7102
7103 const INT_PTR iStackSizeThreshold = (ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT * 2) * GetOsPageSize());
7104
7105 if (iStackSpaceLeft > iStackSizeThreshold)
7106 {
7107 return TRUE;
7108 }
7109 else
7110 {
7111 return FALSE;
7112 }
7113}
7114
7115/*
7116 * IsStackSpaceAvailable
7117 *
7118 * Given a number of stack pages, this function will tell us whether or not we have that much space
7119 * before the top of the stack. If we are in the guard region we must be already handling an SO,
7120 * so we report how much space is left in the guard region
7121 *
7122 * Parameters:
7123 * numPages -- the number of pages that we need. This can be a fractional amount.
7124 *
7125 * Returns:
7126 * TRUE if there's that many pages of stack available
7127 */
7128BOOL Thread::IsStackSpaceAvailable(float numPages)
7129{
7130 CONTRACTL
7131 {
7132 NOTHROW;
7133 GC_NOTRIGGER;
7134 SO_TOLERANT;
7135 }
7136 CONTRACTL_END;
7137
7138 // How much space between the current stack pointer and the first guard page?
7139 //
7140 // This must be signed since the stack pointer might be in the guard region,
7141 // which is at a lower address than GetLastNormalStackAddress will return.
7142 float iStackSpaceLeft = static_cast<float>((INT_PTR)GetCurrentSP() - (INT_PTR)GetLastNormalStackAddress());
7143
7144 // If we have access to the stack guarantee (either in the guard region or we've tripped the guard page), then
7145 // use that.
7146 if ((iStackSpaceLeft/GetOsPageSize()) < numPages && !DetermineIfGuardPagePresent())
7147 {
7148 UINT_PTR stackGuarantee = GetStackGuarantee();
7149 // GetLastNormalStackAddress actually returns the 2nd to last stack page on the stack. We'll add that to our available
7150 // amount of stack, in addition to any sort of stack guarantee we might have.
7151 //
7152 // All these values are OS supplied, and will never overflow. (If they do, that means the stack is on the order
7153 // over GB, which isn't possible.
7154 iStackSpaceLeft += stackGuarantee + GetOsPageSize();
7155 }
7156 if ((iStackSpaceLeft/GetOsPageSize()) < numPages)
7157 {
7158 return FALSE;
7159 }
7160
7161 return TRUE;
7162}
7163
7164#endif // FEATURE_STACK_PROBE
7165
7166/*
7167 * GetStackGuarantee
7168 *
7169 * Returns the amount of stack guaranteed after an SO but before the OS rips the process.
7170 *
7171 * Parameters:
7172 * none
7173 *
7174 * Returns:
7175 * The stack guarantee in OS pages.
7176 */
7177UINT_PTR Thread::GetStackGuarantee()
7178{
7179 WRAPPER_NO_CONTRACT;
7180
7181#ifndef FEATURE_PAL
7182 // There is a new API available on new OS's called SetThreadStackGuarantee. It allows you to change the size of
7183 // the guard region on a per-thread basis. If we're running on an OS that supports the API, then we must query
7184 // it to see if someone has changed the size of the guard region for this thread.
7185 if (!IsSetThreadStackGuaranteeInUse())
7186 {
7187 return SIZEOF_DEFAULT_STACK_GUARANTEE;
7188 }
7189
7190 ULONG cbNewStackGuarantee = 0;
7191 // Passing in a value of 0 means that we're querying, and the value is changed with the new guard region
7192 // size.
7193 if (::SetThreadStackGuarantee(&cbNewStackGuarantee) &&
7194 (cbNewStackGuarantee != 0))
7195 {
7196 return cbNewStackGuarantee;
7197 }
7198#endif // FEATURE_PAL
7199
7200 return SIZEOF_DEFAULT_STACK_GUARANTEE;
7201}
7202
7203#ifndef FEATURE_PAL
7204
7205//
7206// MarkPageAsGuard
7207//
7208// Given a page base address, try to turn it into a guard page and then requery to determine success.
7209//
7210// static // private
7211BOOL Thread::MarkPageAsGuard(UINT_PTR uGuardPageBase)
7212{
7213 CONTRACTL
7214 {
7215 NOTHROW;
7216 GC_NOTRIGGER;
7217 SO_TOLERANT;
7218 CANNOT_TAKE_LOCK;
7219 }
7220 CONTRACTL_END;
7221
7222 DWORD flOldProtect;
7223
7224 ClrVirtualProtect((LPVOID)uGuardPageBase, 1,
7225 (PAGE_READWRITE | PAGE_GUARD), &flOldProtect);
7226
7227 // Intentionally ignore return value -- if it failed, we'll find out below
7228 // and keep moving up the stack until we either succeed or we hit the guard
7229 // region. If we don't succeed before we hit the guard region, we'll end up
7230 // with a fatal error.
7231
7232 // Now, make sure the guard page is really there. If its not, then VirtualProtect most likely failed
7233 // because our stack had grown onto the page we were trying to protect by the time we made it into
7234 // VirtualProtect. So try the next page down.
7235 MEMORY_BASIC_INFORMATION meminfo;
7236 SIZE_T dwRes;
7237
7238 dwRes = ClrVirtualQuery((const void *)uGuardPageBase, &meminfo, sizeof(meminfo));
7239
7240 return ((sizeof(meminfo) == dwRes) && (meminfo.Protect & PAGE_GUARD));
7241}
7242
7243
7244/*
7245 * RestoreGuardPage
7246 *
7247 * RestoreGuardPage will replace the guard page on this thread's stack. The assumption is that it was removed by
7248 * the OS due to a stack overflow exception. This function requires that you know that you have enough stack space
7249 * to restore the guard page, so make sure you know what you're doing when you decide to call this.
7250 *
7251 * Parameters:
7252 * None
7253 *
7254 * Returns:
7255 * Nothing
7256 */
7257VOID Thread::RestoreGuardPage()
7258{
7259 CONTRACTL
7260 {
7261 NOTHROW;
7262 GC_NOTRIGGER;
7263 SO_TOLERANT;
7264 CANNOT_TAKE_LOCK;
7265 }
7266 CONTRACTL_END;
7267
7268 // Need a hard SO probe here.
7269 CONTRACT_VIOLATION(SOToleranceViolation);
7270
7271 BOOL bStackGuarded = DetermineIfGuardPagePresent();
7272
7273 // If the guard page is still there, then just return.
7274 if (bStackGuarded)
7275 {
7276 LOG((LF_EH, LL_INFO100, "Thread::RestoreGuardPage: no need to restore... guard page is already there.\n"));
7277 return;
7278 }
7279
7280 UINT_PTR approxStackPointer;
7281 UINT_PTR guardPageBase;
7282 UINT_PTR guardRegionThreshold;
7283 BOOL pageMissing;
7284
7285 if (!bStackGuarded)
7286 {
7287 // The normal guard page is the 3rd page from the base. The first page is the "hard" guard, the second one is
7288 // reserve, and the 3rd one is marked as a guard page. However, since there is now an API (on some platforms)
7289 // to change the size of the guard region, we'll just go ahead and protect the next page down from where we are
7290 // now. The guard page will get pushed forward again, just like normal, until the next stack overflow.
7291 approxStackPointer = (UINT_PTR)GetCurrentSP();
7292 guardPageBase = (UINT_PTR)ALIGN_DOWN(approxStackPointer, GetOsPageSize()) - GetOsPageSize();
7293
7294 // OS uses soft guard page to update the stack info in TEB. If our guard page is not beyond the current stack, the TEB
7295 // will not be updated, and then OS's check of stack during exception will fail.
7296 if (approxStackPointer >= guardPageBase)
7297 {
7298 guardPageBase -= GetOsPageSize();
7299 }
7300 // If we're currently "too close" to the page we want to mark as a guard then the call to VirtualProtect to set
7301 // PAGE_GUARD will fail, but it won't return an error. Therefore, we protect the page, then query it to make
7302 // sure it worked. If it didn't, we try the next page down. We'll either find a page to protect, or run into
7303 // the guard region and rip the process down with EEPOLICY_HANDLE_FATAL_ERROR below.
7304 guardRegionThreshold = GetLastNormalStackAddress();
7305 pageMissing = TRUE;
7306
7307 while (pageMissing)
7308 {
7309 LOG((LF_EH, LL_INFO10000,
7310 "Thread::RestoreGuardPage: restoring guard page @ 0x%p, approxStackPointer=0x%p, "
7311 "last normal stack address=0x%p\n",
7312 guardPageBase, approxStackPointer, guardRegionThreshold));
7313
7314 // Make sure we set the guard page above the guard region.
7315 if (guardPageBase < guardRegionThreshold)
7316 {
7317 goto lFatalError;
7318 }
7319
7320 if (MarkPageAsGuard(guardPageBase))
7321 {
7322 // The current GuardPage should be beyond the current SP.
7323 _ASSERTE (guardPageBase < approxStackPointer);
7324 pageMissing = FALSE;
7325 }
7326 else
7327 {
7328 guardPageBase -= GetOsPageSize();
7329 }
7330 }
7331 }
7332
7333 FinishSOWork();
7334
7335 INDEBUG(DebugLogStackMBIs());
7336
7337 return;
7338
7339lFatalError:
7340 STRESS_LOG2(LF_EH, LL_ALWAYS,
7341 "Thread::RestoreGuardPage: too close to the guard region (0x%p) to restore guard page @0x%p\n",
7342 guardRegionThreshold, guardPageBase);
7343 _ASSERTE(!"Too close to the guard page to reset it!");
7344 EEPOLICY_HANDLE_FATAL_ERROR(COR_E_STACKOVERFLOW);
7345}
7346
7347#endif // !FEATURE_PAL
7348
7349#endif // #ifndef DACCESS_COMPILE
7350
7351//
7352// InitRegDisplay: initializes a REGDISPLAY for a thread. If validContext
7353// is false, pRD is filled from the current context of the thread. The
7354// thread's current context is also filled in pctx. If validContext is true,
7355// pctx should point to a valid context and pRD is filled from that.
7356//
7357bool Thread::InitRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx, bool validContext)
7358{
7359 CONTRACTL {
7360 NOTHROW;
7361 GC_NOTRIGGER;
7362 }
7363 CONTRACTL_END;
7364
7365 if (!validContext)
7366 {
7367 if (GetFilterContext()!= NULL)
7368 {
7369 pctx = GetFilterContext();
7370 }
7371 else
7372 {
7373#ifdef DACCESS_COMPILE
7374 DacNotImpl();
7375#else
7376 pctx->ContextFlags = CONTEXT_FULL;
7377
7378 _ASSERTE(this != GetThread()); // do not call GetThreadContext on the active thread
7379
7380 BOOL ret = EEGetThreadContext(this, pctx);
7381 if (!ret)
7382 {
7383 SetIP(pctx, 0);
7384#ifdef _TARGET_X86_
7385 pRD->ControlPC = pctx->Eip;
7386 pRD->PCTAddr = (TADDR)&(pctx->Eip);
7387#elif defined(_TARGET_AMD64_)
7388 // nothing more to do here, on Win64 setting the IP to 0 is enough.
7389#elif defined(_TARGET_ARM_)
7390 // nothing more to do here, on Win64 setting the IP to 0 is enough.
7391#else
7392 PORTABILITY_ASSERT("NYI for platform Thread::InitRegDisplay");
7393#endif
7394
7395 return false;
7396 }
7397#endif // DACCESS_COMPILE
7398 }
7399 }
7400
7401 FillRegDisplay( pRD, pctx );
7402
7403 return true;
7404}
7405
7406
7407void Thread::FillRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx)
7408{
7409 WRAPPER_NO_CONTRACT;
7410 SUPPORTS_DAC;
7411
7412 ::FillRegDisplay(pRD, pctx);
7413
7414#if defined(DEBUG_REGDISPLAY) && !defined(_TARGET_X86_)
7415 CONSISTENCY_CHECK(!pRD->_pThread || pRD->_pThread == this);
7416 pRD->_pThread = this;
7417
7418 CheckRegDisplaySP(pRD);
7419#endif // defined(DEBUG_REGDISPLAY) && !defined(_TARGET_X86_)
7420}
7421
7422
7423#ifdef DEBUG_REGDISPLAY
7424
7425void CheckRegDisplaySP (REGDISPLAY *pRD)
7426{
7427 if (pRD->SP && pRD->_pThread)
7428 {
7429#ifndef NO_FIXED_STACK_LIMIT
7430 _ASSERTE(PTR_VOID(pRD->SP) >= pRD->_pThread->GetCachedStackLimit());
7431#endif // NO_FIXED_STACK_LIMIT
7432 _ASSERTE(PTR_VOID(pRD->SP) < pRD->_pThread->GetCachedStackBase());
7433 }
7434}
7435
7436#endif // DEBUG_REGDISPLAY
7437
7438// Trip Functions
7439// ==============
7440// When a thread reaches a safe place, it will rendezvous back with us, via one of
7441// the following trip functions:
7442
7443void CommonTripThread()
7444{
7445#ifndef DACCESS_COMPILE
7446 CONTRACTL {
7447 THROWS;
7448 GC_TRIGGERS;
7449 }
7450 CONTRACTL_END;
7451
7452 Thread *thread = GetThread();
7453
7454 thread->HandleThreadAbort ();
7455
7456 if (thread->CatchAtSafePoint())
7457 {
7458 _ASSERTE(!ThreadStore::HoldingThreadStore(thread));
7459#ifdef FEATURE_HIJACK
7460 thread->UnhijackThread();
7461#endif // FEATURE_HIJACK
7462
7463 // Trap
7464 thread->PulseGCMode();
7465 }
7466#else
7467 DacNotImpl();
7468#endif // #ifndef DACCESS_COMPILE
7469}
7470
7471#ifndef DACCESS_COMPILE
7472
7473void Thread::SetFilterContext(CONTEXT *pContext)
7474{
7475 // SetFilterContext is like pushing a Frame onto the Frame chain.
7476 CONTRACTL {
7477 NOTHROW;
7478 GC_NOTRIGGER;
7479 MODE_COOPERATIVE; // Absolutely must be in coop to coordinate w/ Runtime suspension.
7480 PRECONDITION(GetThread() == this); // must be on current thread.
7481 } CONTRACTL_END;
7482
7483 m_debuggerFilterContext = pContext;
7484}
7485
7486#endif // #ifndef DACCESS_COMPILE
7487
7488T_CONTEXT *Thread::GetFilterContext(void)
7489{
7490 LIMITED_METHOD_DAC_CONTRACT;
7491
7492 return m_debuggerFilterContext;
7493}
7494
7495#ifndef DACCESS_COMPILE
7496
7497// @todo - eventually complete remove the CantStop count on the thread and use
7498// the one in the PreDef block. For now, we increment both our thread counter,
7499// and the FLS counter. Eventually we can remove our thread counter and only use
7500// the FLS counter.
7501void Thread::SetDebugCantStop(bool fCantStop)
7502{
7503 LIMITED_METHOD_CONTRACT;
7504
7505 if (fCantStop)
7506 {
7507 IncCantStopCount();
7508 m_debuggerCantStop++;
7509 }
7510 else
7511 {
7512 DecCantStopCount();
7513 m_debuggerCantStop--;
7514 }
7515}
7516
7517// @todo - remove this, we only read this from oop.
7518bool Thread::GetDebugCantStop(void)
7519{
7520 LIMITED_METHOD_CONTRACT;
7521
7522 return m_debuggerCantStop != 0;
7523}
7524
7525
7526//-----------------------------------------------------------------------------
7527// Call w/a wrapper.
7528// We've already transitioned AppDomains here. This just places a 1st-pass filter to sniff
7529// for catch-handler found callbacks for the debugger.
7530//-----------------------------------------------------------------------------
7531void MakeADCallDebuggerWrapper(
7532 FPAPPDOMAINCALLBACK fpCallback,
7533 CtxTransitionBaseArgs * args,
7534 ContextTransitionFrame* pFrame)
7535{
7536 STATIC_CONTRACT_THROWS;
7537 STATIC_CONTRACT_GC_TRIGGERS;
7538 STATIC_CONTRACT_MODE_ANY;
7539
7540 BYTE * pCatcherStackAddr = (BYTE*) pFrame;
7541
7542 struct Param : NotifyOfCHFFilterWrapperParam
7543 {
7544 FPAPPDOMAINCALLBACK fpCallback;
7545 CtxTransitionBaseArgs *args;
7546 } param;
7547 param.pFrame = pCatcherStackAddr;
7548 param.fpCallback = fpCallback;
7549 param.args = args;
7550
7551 PAL_TRY(Param *, pParam, &param)
7552 {
7553 pParam->fpCallback(pParam->args);
7554 }
7555 PAL_EXCEPT_FILTER(AppDomainTransitionExceptionFilter)
7556 {
7557 // Should never reach here b/c handler should always continue search.
7558 _ASSERTE(false);
7559 }
7560 PAL_ENDTRY
7561}
7562
7563
7564// Invoke a callback in another appdomain.
7565// Caller should have checked that we're actually transitioning domains here.
7566void MakeCallWithAppDomainTransition(
7567 ADID TargetDomain,
7568 FPAPPDOMAINCALLBACK fpCallback,
7569 CtxTransitionBaseArgs * args)
7570{
7571 DEBUG_ASSURE_NO_RETURN_BEGIN(MAKECALL)
7572
7573 Thread* _ctx_trans_pThread = GetThread();
7574 TESTHOOKCALL(EnteringAppDomain((TargetDomain.m_dwId)));
7575 AppDomain* pTargetDomain = SystemDomain::GetAppDomainFromId(TargetDomain, ADV_CURRENTAD);
7576 _ASSERTE(_ctx_trans_pThread != NULL);
7577 _ASSERTE(_ctx_trans_pThread->GetDomain()->GetId()!= TargetDomain);
7578
7579 bool _ctx_trans_fRaiseNeeded = false;
7580 Exception* _ctx_trans_pTargetDomainException=NULL; \
7581
7582 FrameWithCookie<ContextTransitionFrame> _ctx_trans_Frame;
7583 ContextTransitionFrame* _ctx_trans_pFrame = &_ctx_trans_Frame;
7584
7585 args->pCtxFrame = _ctx_trans_pFrame;
7586 TESTHOOKCALL(EnteredAppDomain((TargetDomain.m_dwId)));
7587 /* work around unreachable code warning */
7588 EX_TRY
7589 {
7590 // Invoke the callback
7591 if (CORDebuggerAttached())
7592 {
7593 // If a debugger is attached, do it through a wrapper that will sniff for CHF callbacks.
7594 MakeADCallDebuggerWrapper(fpCallback, args, GET_CTX_TRANSITION_FRAME());
7595 }
7596 else
7597 {
7598 // If no debugger is attached, call directly.
7599 fpCallback(args);
7600 }
7601 }
7602 EX_CATCH
7603 {
7604 LOG((LF_EH|LF_APPDOMAIN, LL_INFO1000, "ENTER_DOMAIN(%s, %s, %d): exception in flight\n",
7605 __FUNCTION__, __FILE__, __LINE__));
7606
7607 _ctx_trans_pTargetDomainException=EXTRACT_EXCEPTION();
7608 _ctx_trans_fRaiseNeeded = true;
7609 }
7610 /* SwallowAllExceptions is fine because we don't get to this point */
7611 /* unless fRaiseNeeded = true or no exception was thrown */
7612 EX_END_CATCH(SwallowAllExceptions);
7613 TESTHOOKCALL(LeavingAppDomain((TargetDomain.m_dwId)));
7614 if (_ctx_trans_fRaiseNeeded)
7615 {
7616 LOG((LF_EH, LL_INFO1000, "RaiseCrossContextException(%s, %s, %d)\n",
7617 __FUNCTION__, __FILE__, __LINE__));
7618 _ctx_trans_pThread->RaiseCrossContextException(_ctx_trans_pTargetDomainException,_ctx_trans_pFrame);
7619 }
7620
7621 LOG((LF_APPDOMAIN, LL_INFO1000, "LEAVE_DOMAIN(%s, %s, %d)\n",
7622 __FUNCTION__, __FILE__, __LINE__));
7623
7624#ifdef FEATURE_TESTHOOKS
7625 TESTHOOKCALL(LeftAppDomain(TargetDomain.m_dwId));
7626#endif
7627
7628 DEBUG_ASSURE_NO_RETURN_END(MAKECALL)
7629}
7630
7631
7632
7633void Thread::InitContext()
7634{
7635 CONTRACTL {
7636 THROWS;
7637 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
7638 }
7639 CONTRACTL_END;
7640
7641 // this should only be called when initializing a thread
7642 _ASSERTE(m_pDomain == NULL);
7643 GCX_COOP_NO_THREAD_BROKEN();
7644 m_pDomain = SystemDomain::System()->DefaultDomain();
7645 _ASSERTE(m_pDomain);
7646 m_pDomain->ThreadEnter(this, NULL);
7647}
7648
7649void Thread::ClearContext()
7650{
7651 CONTRACTL {
7652 NOTHROW;
7653 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
7654 }
7655 CONTRACTL_END;
7656
7657 if (!m_pDomain)
7658 return;
7659
7660 m_pDomain->ThreadExit(this, NULL);
7661
7662 // must set exposed context to null first otherwise object verification
7663 // checks will fail AV when m_Context is null
7664 m_pDomain = NULL;
7665#ifdef FEATURE_COMINTEROP
7666 m_fDisableComObjectEagerCleanup = false;
7667#endif //FEATURE_COMINTEROP
7668}
7669
7670void DECLSPEC_NORETURN Thread::RaiseCrossContextException(Exception* pExOrig, ContextTransitionFrame* pFrame)
7671{
7672 CONTRACTL
7673 {
7674 THROWS;
7675 WRAPPER(GC_TRIGGERS);
7676 }
7677 CONTRACTL_END;
7678
7679 // pEx is NULL means that the exception is CLRLastThrownObjectException
7680 CLRLastThrownObjectException lastThrown;
7681 Exception* pException = pExOrig ? pExOrig : &lastThrown;
7682 COMPlusThrow(CLRException::GetThrowableFromException(pException));
7683}
7684
7685
7686struct FindADCallbackType {
7687 AppDomain *pSearchDomain;
7688 AppDomain *pPrevDomain;
7689 Frame *pFrame;
7690 int count;
7691 enum TargetTransition
7692 {fFirstTransitionInto, fMostRecentTransitionInto}
7693 fTargetTransition;
7694
7695 FindADCallbackType() : pSearchDomain(NULL), pPrevDomain(NULL), pFrame(NULL)
7696 {
7697 LIMITED_METHOD_CONTRACT;
7698 }
7699};
7700
7701StackWalkAction StackWalkCallback_FindAD(CrawlFrame* pCF, void* data)
7702{
7703 CONTRACTL {
7704 NOTHROW;
7705 GC_NOTRIGGER;
7706 }
7707 CONTRACTL_END;
7708
7709 FindADCallbackType *pData = (FindADCallbackType *)data;
7710
7711 Frame *pFrame = pCF->GetFrame();
7712
7713 if (!pFrame)
7714 return SWA_CONTINUE;
7715
7716 AppDomain *pReturnDomain = pFrame->GetReturnDomain();
7717 if (!pReturnDomain || pReturnDomain == pData->pPrevDomain)
7718 return SWA_CONTINUE;
7719
7720 LOG((LF_APPDOMAIN, LL_INFO100, "StackWalkCallback_FindAD transition frame %8.8x into AD [%d]\n",
7721 pFrame, pReturnDomain->GetId().m_dwId));
7722
7723 if (pData->pPrevDomain == pData->pSearchDomain) {
7724 ++pData->count;
7725 // this is a transition into the domain we are unloading, so save it in case it is the first
7726 pData->pFrame = pFrame;
7727 if (pData->fTargetTransition == FindADCallbackType::fMostRecentTransitionInto)
7728 return SWA_ABORT; // only need to find last transition, so bail now
7729 }
7730
7731 pData->pPrevDomain = pReturnDomain;
7732 return SWA_CONTINUE;
7733}
7734
7735// This determines if a thread is running in the given domain at any point on the stack
7736Frame *Thread::IsRunningIn(AppDomain *pDomain, int *count)
7737{
7738 CONTRACTL {
7739 NOTHROW;
7740 GC_NOTRIGGER;
7741 }
7742 CONTRACTL_END;
7743
7744 FindADCallbackType fct;
7745 fct.pSearchDomain = pDomain;
7746 if (!fct.pSearchDomain)
7747 return FALSE;
7748
7749 // set prev to current so if are currently running in the target domain,
7750 // we will detect the transition
7751 fct.pPrevDomain = m_pDomain;
7752 fct.fTargetTransition = FindADCallbackType::fMostRecentTransitionInto;
7753 fct.count = 0;
7754
7755 // when this returns, if there is a transition into the AD, it will be in pFirstFrame
7756 StackWalkAction res;
7757 res = StackWalkFrames(StackWalkCallback_FindAD, (void*) &fct, ALLOW_ASYNC_STACK_WALK);
7758 if (count)
7759 *count = fct.count;
7760 return fct.pFrame;
7761}
7762
7763// This finds the very first frame on the stack where the thread transitioned into the given domain
7764Frame *Thread::GetFirstTransitionInto(AppDomain *pDomain, int *count)
7765{
7766 CONTRACTL {
7767 NOTHROW;
7768 GC_NOTRIGGER;
7769 }
7770 CONTRACTL_END;
7771
7772 FindADCallbackType fct;
7773 fct.pSearchDomain = pDomain;
7774 // set prev to current so if are currently running in the target domain,
7775 // we will detect the transition
7776 fct.pPrevDomain = m_pDomain;
7777 fct.fTargetTransition = FindADCallbackType::fFirstTransitionInto;
7778 fct.count = 0;
7779
7780 // when this returns, if there is a transition into the AD, it will be in pFirstFrame
7781 StackWalkAction res;
7782 res = StackWalkFrames(StackWalkCallback_FindAD, (void*) &fct, ALLOW_ASYNC_STACK_WALK);
7783 if (count)
7784 *count = fct.count;
7785 return fct.pFrame;
7786}
7787
7788BOOL Thread::HaveExtraWorkForFinalizer()
7789{
7790 LIMITED_METHOD_CONTRACT;
7791
7792 return m_ThreadTasks
7793 || ThreadpoolMgr::HaveTimerInfosToFlush()
7794 || ExecutionManager::IsCacheCleanupRequired()
7795 || Thread::CleanupNeededForFinalizedThread()
7796 || (m_DetachCount > 0)
7797 || SystemDomain::System()->RequireAppDomainCleanup()
7798 || ThreadStore::s_pThreadStore->ShouldTriggerGCForDeadThreads();
7799}
7800
7801void Thread::DoExtraWorkForFinalizer()
7802{
7803 CONTRACTL {
7804 THROWS;
7805 GC_TRIGGERS;
7806 }
7807 CONTRACTL_END;
7808
7809 _ASSERTE(GetThread() == this);
7810 _ASSERTE(this == FinalizerThread::GetFinalizerThread());
7811
7812#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
7813 if (RequiresCoInitialize())
7814 {
7815 SetApartment(AS_InMTA, FALSE);
7816 }
7817#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
7818
7819 if (RequireSyncBlockCleanup())
7820 {
7821#ifndef FEATURE_PAL
7822 InteropSyncBlockInfo::FlushStandbyList();
7823#endif // !FEATURE_PAL
7824
7825#ifdef FEATURE_COMINTEROP
7826 RCW::FlushStandbyList();
7827#endif // FEATURE_COMINTEROP
7828
7829 SyncBlockCache::GetSyncBlockCache()->CleanupSyncBlocks();
7830 }
7831 if (SystemDomain::System()->RequireAppDomainCleanup())
7832 {
7833 SystemDomain::System()->ProcessDelayedUnloadLoaderAllocators();
7834 }
7835
7836 if(m_DetachCount > 0 || Thread::CleanupNeededForFinalizedThread())
7837 {
7838 Thread::CleanupDetachedThreads();
7839 }
7840
7841 if(ExecutionManager::IsCacheCleanupRequired() && GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration()>=1)
7842 {
7843 ExecutionManager::ClearCaches();
7844 }
7845
7846 // If there were any TimerInfos waiting to be released, they'll get flushed now
7847 ThreadpoolMgr::FlushQueueOfTimerInfos();
7848
7849 ThreadStore::s_pThreadStore->TriggerGCForDeadThreadsIfNecessary();
7850}
7851
7852
7853// HELPERS FOR THE BASE OF A MANAGED THREAD, INCLUDING AD TRANSITION SUPPORT
7854
7855// We have numerous places where we start up a managed thread. This includes several places in the
7856// ThreadPool, the 'new Thread(...).Start()' case, and the Finalizer. Try to factor the code so our
7857// base exception handling behavior is consistent across those places. The resulting code is convoluted,
7858// but it's better than the prior situation of each thread being on a different plan.
7859
7860// We need Middle & Outer methods for the usual problem of combining C++ & SEH.
7861
7862/* The effect of all this is that we get:
7863
7864 Base of thread -- OS unhandled exception filter that we hook
7865
7866 SEH handler from DispatchOuter
7867 C++ handler from DispatchMiddle
7868
7869 And if there is an AppDomain transition before we call back to user code, we additionally get:
7870
7871 AppDomain transition -- contains its own handlers to terminate the first pass
7872 and marshal the exception.
7873
7874 SEH handler from DispatchOuter
7875 C++ handler from DispatchMiddle
7876
7877 Regardless of whether or not there is an AppDomain transition, we then have:
7878
7879 User code that obviously can throw.
7880
7881 So if we don't have an AD transition, or we take a fault before we successfully transition the
7882 AppDomain, then the base-most DispatchOuter/Middle will deal with the exception. This may
7883 involve swallowing exceptions or it may involve Watson & debugger attach. It will always
7884 involve notifications to any AppDomain.UnhandledException event listeners.
7885
7886 But if we did transition the AppDomain, then any Watson, debugger attach and UnhandledException
7887 events will occur in that AppDomain in the initial first pass. So we get a good debugging
7888 experience and we get notifications to the host that show which AppDomain is allowing exceptions
7889 to go unhandled (so perhaps it can be unloaded or otherwise dealt with).
7890
7891 The trick is that if the exception goes unhandled at the process level, we would normally try
7892 to fire AppDomain events and display the faulting exception on the console from two more
7893 places. These are the base-most DispatchOuter/Middle pair and the hook of the OS unhandled
7894 exception handler at the base of the thread.
7895
7896 This is redundant and messy. (There's no concern with getting a 2nd Watson because we only
7897 do one of these per process anyway). The solution for the base-most DispatchOuter/Middle is
7898 to use the ManagedThreadCallState.flags to control whether the exception has already been
7899 dealt with or not. These flags cause the ThreadBaseRedirectingFilter to either do normal
7900 "base of the thread" exception handling, or to ignore the exception because it has already
7901 been reported in the AppDomain we transitioned to.
7902
7903 But turning off the reporting in the OS unhandled exception filter is harder. We don't want
7904 to flip a bit on the Thread to disable this, unless we can be sure we are only disabling
7905 something we already reported, and that this thread will never recover from that situation and
7906 start executing code again. Here's the normal nightmare scenario with SEH:
7907
7908 1) exception of type A is thrown
7909 2) All the filters in the 1st pass say they don't want an A
7910 3) The exception gets all the way out and is considered unhandled. We report this "fact".
7911 4) Imagine we then set a bit that says this thread shouldn't report unhandled exceptions.
7912 5) The 2nd pass starts.
7913 6) Inside a finally, someone throws an exception of type B.
7914 7) A new 1st pass starts from the point of the throw, with a type B.
7915 8) Now a filter says "Yes, I will swallow exception B."
7916 9) We no longer have an unhandled exception, and execution continues merrily.
7917
7918 This is an unavoidable consequence of the 2-pass model. If you report unhandled exceptions
7919 in the 1st pass (for good debugging), you might find that this was premature and you don't
7920 have an unhandled exception when you get to the 2nd pass.
7921
7922 But it would not be optimal if in step 4 we set a bit that says we should suppress normal
7923 notifications and reporting on this thread, believing that the process will terminate.
7924
7925 The solution is to recognize that the base OS unhandled exception filter runs in two modes.
7926 In the first mode, it operates as today and serves as our backstop. In the second mode
7927 it is fully redundant with the handlers pushed after the AppDomain transition, which are
7928 completely containing the exception to the AD that it occurred in (for purposes of reporting).
7929 So we just need a flag on the thread that says whether or not that set of handlers are pushed
7930 and functioning. That flag enables / disables the base exception reporting and is called
7931 TSNC_AppDomainContainUnhandled
7932
7933*/
7934
7935
7936enum ManagedThreadCallStateFlags
7937{
7938 MTCSF_NormalBase,
7939 MTCSF_ContainToAppDomain,
7940 MTCSF_SuppressDuplicate,
7941};
7942
7943struct ManagedThreadCallState
7944{
7945 ADID pAppDomainId;
7946 AppDomain* pUnsafeAppDomain;
7947 BOOL bDomainIsAsID;
7948
7949 ADCallBackFcnType pTarget;
7950 LPVOID args;
7951 UnhandledExceptionLocation filterType;
7952 ManagedThreadCallStateFlags flags;
7953 BOOL IsAppDomainEqual(AppDomain* pApp)
7954 {
7955 LIMITED_METHOD_CONTRACT;
7956 return bDomainIsAsID?(pApp->GetId()==pAppDomainId):(pUnsafeAppDomain==pApp);
7957 }
7958 ManagedThreadCallState(ADID AppDomainId,ADCallBackFcnType Target,LPVOID Args,
7959 UnhandledExceptionLocation FilterType, ManagedThreadCallStateFlags Flags):
7960 pAppDomainId(AppDomainId),
7961 pUnsafeAppDomain(NULL),
7962 bDomainIsAsID(TRUE),
7963 pTarget(Target),
7964 args(Args),
7965 filterType(FilterType),
7966 flags(Flags)
7967 {
7968 LIMITED_METHOD_CONTRACT;
7969 };
7970protected:
7971 ManagedThreadCallState(AppDomain* AppDomain,ADCallBackFcnType Target,LPVOID Args,
7972 UnhandledExceptionLocation FilterType, ManagedThreadCallStateFlags Flags):
7973 pAppDomainId(ADID(0)),
7974 pUnsafeAppDomain(AppDomain),
7975 bDomainIsAsID(FALSE),
7976 pTarget(Target),
7977 args(Args),
7978 filterType(FilterType),
7979 flags(Flags)
7980 {
7981 LIMITED_METHOD_CONTRACT;
7982 };
7983 void InitForFinalizer(AppDomain* AppDomain,ADCallBackFcnType Target,LPVOID Args)
7984 {
7985 LIMITED_METHOD_CONTRACT;
7986 filterType=FinalizerThread;
7987 pUnsafeAppDomain=AppDomain;
7988 pTarget=Target;
7989 args=Args;
7990 };
7991
7992 friend void ManagedThreadBase_NoADTransition(ADCallBackFcnType pTarget,
7993 UnhandledExceptionLocation filterType);
7994 friend void ManagedThreadBase::FinalizerAppDomain(AppDomain* pAppDomain,
7995 ADCallBackFcnType pTarget,
7996 LPVOID args,
7997 ManagedThreadCallState *pTurnAround);
7998};
7999
8000// The following static helpers are outside of the ManagedThreadBase struct because I
8001// don't want to change threads.h whenever I change the mechanism for how unhandled
8002// exceptions works. The ManagedThreadBase struct is for the public exposure of the
8003// API only.
8004
8005static void ManagedThreadBase_DispatchOuter(ManagedThreadCallState *pCallState);
8006
8007static void ManagedThreadBase_DispatchInner(ManagedThreadCallState *pCallState)
8008{
8009 CONTRACTL
8010 {
8011 GC_TRIGGERS;
8012 THROWS;
8013 MODE_COOPERATIVE;
8014 }
8015 CONTRACTL_END;
8016
8017 // Go ahead and dispatch the call.
8018 (*pCallState->pTarget) (pCallState->args);
8019}
8020
8021static void ManagedThreadBase_DispatchMiddle(ManagedThreadCallState *pCallState)
8022{
8023 STATIC_CONTRACT_GC_TRIGGERS;
8024 STATIC_CONTRACT_THROWS;
8025 STATIC_CONTRACT_MODE_COOPERATIVE;
8026 STATIC_CONTRACT_SO_TOLERANT;
8027
8028 // We have the probe outside the EX_TRY below since corresponding EX_CATCH
8029 // also invokes SO_INTOLERANT code.
8030 BEGIN_SO_INTOLERANT_CODE(GetThread());
8031
8032 EX_TRY_CPP_ONLY
8033 {
8034 // During an unwind, we have some cleanup:
8035 //
8036 // 1) We should no longer suppress any unhandled exception reporting at the base
8037 // of the thread, because any handler that contained the exception to the AppDomain
8038 // where it occurred is now being removed from the stack.
8039 //
8040 // 2) We need to unwind the Frame chain. We cannot do it when we get to the __except clause
8041 // because at this point we are in the 2nd phase and the stack has been popped. Any
8042 // stack crawling from another thread will see a frame chain in a popped region of stack.
8043 // Nor can we pop it in a filter, since this would destroy all the stack-walking information
8044 // we need to perform the 2nd pass. So doing it in a C++ destructor will ensure it happens
8045 // during the 2nd pass but before the stack is actually popped.
8046 class Cleanup
8047 {
8048 Frame *m_pEntryFrame;
8049 Thread *m_pThread;
8050
8051 public:
8052 Cleanup(Thread* pThread)
8053 {
8054 m_pThread = pThread;
8055 m_pEntryFrame = pThread->m_pFrame;
8056 }
8057
8058 ~Cleanup()
8059 {
8060 GCX_COOP();
8061 m_pThread->SetFrame(m_pEntryFrame);
8062 m_pThread->ResetThreadStateNC(Thread::TSNC_AppDomainContainUnhandled);
8063 }
8064 };
8065
8066 Cleanup cleanup(GetThread());
8067
8068 ManagedThreadBase_DispatchInner(pCallState);
8069 }
8070 EX_CATCH_CPP_ONLY
8071 {
8072 GCX_COOP();
8073 Exception *pException = GET_EXCEPTION();
8074
8075 // RudeThreadAbort is a pre-allocated instance of ThreadAbort. So the following is sufficient.
8076 // For Whidbey, by default only swallow certain exceptions. If reverting back to Everett's
8077 // behavior (swallowing all unhandled exception), then swallow all unhandled exception.
8078 //
8079 if (SwallowUnhandledExceptions() ||
8080 IsExceptionOfType(kThreadAbortException, pException))
8081 {
8082 // Do nothing to swallow the exception
8083 }
8084 else
8085 {
8086 // Setting up the unwind_and_continue_handler ensures that C++ exceptions do not leak out.
8087 // An example is when Thread1 in Default AppDomain creates AppDomain2, enters it, creates
8088 // another thread T2 and T2 throws OOM exception (that goes unhandled). At the transition
8089 // boundary, END_DOMAIN_TRANSITION will catch it and invoke RaiseCrossContextException
8090 // that will rethrow the OOM as a C++ exception.
8091 //
8092 // Without unwind_and_continue_handler below, the exception will fly up the stack to
8093 // this point, where it will be rethrown and thus leak out.
8094 INSTALL_UNWIND_AND_CONTINUE_HANDLER;
8095
8096 EX_RETHROW;
8097
8098 UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
8099 }
8100 }
8101 EX_END_CATCH(SwallowAllExceptions);
8102
8103 END_SO_INTOLERANT_CODE;
8104}
8105
8106/*
8107typedef struct Param
8108{
8109 ManagedThreadCallState * m_pCallState;
8110 Frame * m_pFrame;
8111 Param(ManagedThreadCallState * pCallState, Frame * pFrame): m_pCallState(pCallState), m_pFrame(pFrame) {}
8112} TryParam;
8113*/
8114typedef struct Param: public NotifyOfCHFFilterWrapperParam
8115{
8116 ManagedThreadCallState * m_pCallState;
8117 Param(ManagedThreadCallState * pCallState): m_pCallState(pCallState) {}
8118} TryParam;
8119
8120// Dispatch to the appropriate filter, based on the active CallState.
8121static LONG ThreadBaseRedirectingFilter(PEXCEPTION_POINTERS pExceptionInfo, LPVOID pParam)
8122{
8123 STATIC_CONTRACT_THROWS;
8124 STATIC_CONTRACT_GC_TRIGGERS;
8125 STATIC_CONTRACT_MODE_ANY;
8126
8127 LONG (*ptrFilter) (PEXCEPTION_POINTERS, PVOID);
8128
8129 TryParam * pRealParam = reinterpret_cast<TryParam *>(pParam);
8130 ManagedThreadCallState * _pCallState = pRealParam->m_pCallState;
8131 ManagedThreadCallStateFlags flags = _pCallState->flags;
8132
8133 if (flags == MTCSF_SuppressDuplicate)
8134 {
8135 LOG((LF_EH, LL_INFO100, "ThreadBaseRedirectingFilter: setting TSNC_AppDomainContainUnhandled\n"));
8136 GetThread()->SetThreadStateNC(Thread::TSNC_AppDomainContainUnhandled);
8137 return EXCEPTION_CONTINUE_SEARCH;
8138 }
8139
8140 LONG ret = -1;
8141 BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return EXCEPTION_CONTINUE_SEARCH;);
8142
8143 // This will invoke the swallowing filter. If that returns EXCEPTION_CONTINUE_SEARCH,
8144 // it will trigger unhandled exception processing.
8145 ptrFilter = ThreadBaseExceptionAppDomainFilter;
8146
8147 // WARNING - ptrFilter may not return
8148 // This occurs when the debugger decides to intercept an exception and catch it in a frame closer
8149 // to the leaf than the one executing this filter
8150 ret = (*ptrFilter) (pExceptionInfo, _pCallState);
8151
8152 // Although EXCEPTION_EXECUTE_HANDLER can also be returned in cases corresponding to
8153 // unhandled exceptions, all of those cases have already notified the debugger of an unhandled
8154 // exception which prevents a second notification indicating the exception was caught
8155 if (ret == EXCEPTION_EXECUTE_HANDLER)
8156 {
8157
8158 // WARNING - NotifyOfCHFFilterWrapper may not return
8159 // This occurs when the debugger decides to intercept an exception and catch it in a frame closer
8160 // to the leaf than the one executing this filter
8161 NotifyOfCHFFilterWrapper(pExceptionInfo, pRealParam);
8162 }
8163
8164 // If we are containing unhandled exceptions to the AppDomain we transitioned into, and the
8165 // exception is coming out, then this exception is going unhandled. We have already done
8166 // Watson and managed events, so suppress all filters below us. Otherwise we are swallowing
8167 // it and returning out of the AppDomain.
8168 if (flags == MTCSF_ContainToAppDomain)
8169 {
8170 if(ret == EXCEPTION_CONTINUE_SEARCH)
8171 {
8172 _pCallState->flags = MTCSF_SuppressDuplicate;
8173 }
8174 else if(ret == EXCEPTION_EXECUTE_HANDLER)
8175 {
8176 _pCallState->flags = MTCSF_NormalBase;
8177 }
8178 // else if( EXCEPTION_CONTINUE_EXECUTION ) do nothing
8179 }
8180
8181 // Get the reference to the current thread..
8182 Thread *pCurThread = GetThread();
8183 _ASSERTE(pCurThread);
8184
8185 if (flags == MTCSF_ContainToAppDomain)
8186 {
8187
8188 if (((ManagedThreadCallState *) _pCallState)->flags == MTCSF_SuppressDuplicate)
8189 {
8190 // Set the flag that we have done unhandled exception processing
8191 // for this managed thread that started in a non-default domain
8192 LOG((LF_EH, LL_INFO100, "ThreadBaseRedirectingFilter: setting TSNC_AppDomainContainUnhandled\n"));
8193 pCurThread->SetThreadStateNC(Thread::TSNC_AppDomainContainUnhandled);
8194 }
8195 }
8196 else
8197 {
8198 _ASSERTE(flags == MTCSF_NormalBase);
8199
8200 LOG((LF_EH, LL_INFO100, "ThreadBaseRedirectingFilter: setting TSNC_ProcessedUnhandledException\n"));
8201
8202 //
8203 // In the default domain, when an exception goes unhandled on a managed thread whose threadbase is in the VM (e.g. explicitly spawned threads,
8204 // ThreadPool threads, finalizer thread, etc), CLR can end up in the unhandled exception processing path twice.
8205 //
8206 // The first attempt to perform UE processing happens at the managed thread base (via this function). When it completes,
8207 // we will set TSNC_ProcessedUnhandledException state against the thread to indicate that we have perform the unhandled exception processing.
8208 //
8209 // On the desktop CLR, after the first attempt, we will return back to the OS with EXCEPTION_CONTINUE_SEARCH as unhandled exceptions cannot be swallowed. When the exception reaches
8210 // the native threadbase in the OS kernel, the OS will invoke the UEF registered for the process. This can result in CLR's UEF (COMUnhandledExceptionFilter)
8211 // getting invoked that will attempt to perform UE processing yet again for the same thread. To avoid this duplicate processing, we check the presence of
8212 // TSNC_ProcessedUnhandledException state on the thread and if present, we simply return back to the OS.
8213 //
8214 // On desktop CoreCLR, we will only do UE processing once (at the managed threadbase) since no thread is created in default domain - all are created and executed in non-default domain.
8215 // As a result, we go via completely different codepath that prevents duplication of UE processing from happening, especially since desktop CoreCLR is targetted for SL and SL
8216 // always passes us a flag to swallow unhandled exceptions.
8217 //
8218 // On CoreSys CoreCLR, the host can ask CoreCLR to run all code in the default domain. As a result, when we return from the first attempt to perform UE
8219 // processing, the call could return back with EXCEPTION_EXECUTE_HANDLER since, like desktop CoreCLR is instructed by SL host to swallow all unhandled exceptions,
8220 // CoreSys CoreCLR can also be instructed by its Phone host to swallow all unhandled exceptions. As a result, the exception dispatch will never continue to go upstack
8221 // to the native threadbase in the OS kernel and thus, there will never be a second attempt to perform UE processing. Hence, we dont, and shouldnt, need to set
8222 // TSNC_ProcessedUnhandledException state against the thread if we are in SingleAppDomain mode and have been asked to swallow the exception.
8223 //
8224 // If we continue to set TSNC_ProcessedUnhandledException and a ThreadPool Thread A has an exception go unhandled, we will swallow it correctly for the first time.
8225 // The next time Thread A has an exception go unhandled, our UEF will see TSNC_ProcessedUnhandledException set and assume (incorrectly) UE processing has happened and
8226 // will fail to honor the host policy (e.g. swallow unhandled exception). Thus, the 2nd unhandled exception may end up crashing the app when it should not.
8227 //
8228 if (ret != EXCEPTION_EXECUTE_HANDLER)
8229 {
8230 // Since we have already done unhandled exception processing for it, we dont want it
8231 // to happen again if our UEF gets invoked upon returning back to the OS.
8232 //
8233 // Set the flag to indicate so.
8234 pCurThread->SetThreadStateNC(Thread::TSNC_ProcessedUnhandledException);
8235 }
8236 }
8237
8238
8239 END_SO_INTOLERANT_CODE;
8240 return ret;
8241}
8242
8243static void ManagedThreadBase_DispatchOuter(ManagedThreadCallState *pCallState)
8244{
8245 STATIC_CONTRACT_GC_TRIGGERS;
8246 STATIC_CONTRACT_THROWS;
8247 STATIC_CONTRACT_MODE_COOPERATIVE;
8248
8249 // HasStarted() must have already been performed by our caller
8250 _ASSERTE(GetThread() != NULL);
8251
8252 Thread *pThread = GetThread();
8253#ifdef WIN64EXCEPTIONS
8254 Frame *pFrame = pThread->m_pFrame;
8255#endif // WIN64EXCEPTIONS
8256
8257 // The sole purpose of having this frame is to tell the debugger that we have a catch handler here
8258 // which may swallow managed exceptions. The debugger needs this in order to send a
8259 // CatchHandlerFound (CHF) notification.
8260 FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame;
8261
8262 TryParam param(pCallState);
8263 param.pFrame = &catchFrame;
8264
8265 struct TryArgs
8266 {
8267 TryParam *pTryParam;
8268 Thread *pThread;
8269
8270 BOOL *pfHadException;
8271
8272#ifdef WIN64EXCEPTIONS
8273 Frame *pFrame;
8274#endif // WIN64EXCEPTIONS
8275 }args;
8276
8277 args.pTryParam = &param;
8278 args.pThread = pThread;
8279
8280 BOOL fHadException = TRUE;
8281 args.pfHadException = &fHadException;
8282
8283#ifdef WIN64EXCEPTIONS
8284 args.pFrame = pFrame;
8285#endif // WIN64EXCEPTIONS
8286
8287 PAL_TRY(TryArgs *, pArgs, &args)
8288 {
8289 PAL_TRY(TryParam *, pParam, pArgs->pTryParam)
8290 {
8291 ManagedThreadBase_DispatchMiddle(pParam->m_pCallState);
8292 }
8293 PAL_EXCEPT_FILTER(ThreadBaseRedirectingFilter)
8294 {
8295 // Note: one of our C++ exceptions will never reach this filter because they're always caught by
8296 // the EX_CATCH in ManagedThreadBase_DispatchMiddle().
8297 //
8298 // If eCLRDeterminedPolicy, we only swallow for TA, RTA, and ADU exception.
8299 // For eHostDeterminedPolicy, we will swallow all the managed exception.
8300 #ifdef WIN64EXCEPTIONS
8301 // this must be done after the second pass has run, it does not
8302 // reference anything on the stack, so it is safe to run in an
8303 // SEH __except clause as well as a C++ catch clause.
8304 ExceptionTracker::PopTrackers(pArgs->pFrame);
8305 #endif // WIN64EXCEPTIONS
8306
8307 // Fortunately, ThreadAbortExceptions are always
8308 if (pArgs->pThread->IsAbortRequested())
8309 pArgs->pThread->EEResetAbort(Thread::TAR_Thread);
8310 }
8311 PAL_ENDTRY;
8312
8313 *(pArgs->pfHadException) = FALSE;
8314 }
8315 PAL_FINALLY
8316 {
8317 catchFrame.Pop();
8318 }
8319 PAL_ENDTRY;
8320}
8321
8322
8323// For the implementation, there are three variants of work possible:
8324
8325// 1. Establish the base of a managed thread, and switch to the correct AppDomain.
8326static void ManagedThreadBase_FullTransitionWithAD(ADID pAppDomain,
8327 ADCallBackFcnType pTarget,
8328 LPVOID args,
8329 UnhandledExceptionLocation filterType)
8330{
8331 CONTRACTL
8332 {
8333 GC_TRIGGERS;
8334 THROWS;
8335 MODE_COOPERATIVE;
8336 }
8337 CONTRACTL_END;
8338
8339 ManagedThreadCallState CallState(pAppDomain, pTarget, args, filterType, MTCSF_NormalBase);
8340 ManagedThreadBase_DispatchOuter(&CallState);
8341}
8342
8343// 2. Establish the base of a managed thread, but the AppDomain transition must be
8344// deferred until later.
8345void ManagedThreadBase_NoADTransition(ADCallBackFcnType pTarget,
8346 UnhandledExceptionLocation filterType)
8347{
8348 CONTRACTL
8349 {
8350 GC_TRIGGERS;
8351 THROWS;
8352 MODE_COOPERATIVE;
8353 }
8354 CONTRACTL_END;
8355
8356 AppDomain *pAppDomain = GetAppDomain();
8357
8358 ManagedThreadCallState CallState(pAppDomain, pTarget, NULL, filterType, MTCSF_NormalBase);
8359
8360 // self-describing, to create a pTurnAround data for eventual delivery to a subsequent AppDomain
8361 // transition.
8362 CallState.args = &CallState;
8363
8364 ManagedThreadBase_DispatchOuter(&CallState);
8365}
8366
8367
8368
8369// And here are the various exposed entrypoints for base thread behavior
8370
8371// The 'new Thread(...).Start()' case from COMSynchronizable kickoff thread worker
8372void ManagedThreadBase::KickOff(ADID pAppDomain, ADCallBackFcnType pTarget, LPVOID args)
8373{
8374 WRAPPER_NO_CONTRACT;
8375 ManagedThreadBase_FullTransitionWithAD(pAppDomain, pTarget, args, ManagedThread);
8376}
8377
8378// The IOCompletion, QueueUserWorkItem, AddTimer, RegisterWaitForSingleObject cases in the ThreadPool
8379void ManagedThreadBase::ThreadPool(ADID pAppDomain, ADCallBackFcnType pTarget, LPVOID args)
8380{
8381 WRAPPER_NO_CONTRACT;
8382 ManagedThreadBase_FullTransitionWithAD(pAppDomain, pTarget, args, ThreadPoolThread);
8383}
8384
8385// The Finalizer thread establishes exception handling at its base, but defers all the AppDomain
8386// transitions.
8387void ManagedThreadBase::FinalizerBase(ADCallBackFcnType pTarget)
8388{
8389 WRAPPER_NO_CONTRACT;
8390 ManagedThreadBase_NoADTransition(pTarget, FinalizerThread);
8391}
8392
8393void ManagedThreadBase::FinalizerAppDomain(AppDomain *pAppDomain,
8394 ADCallBackFcnType pTarget,
8395 LPVOID args,
8396 ManagedThreadCallState *pTurnAround)
8397{
8398 WRAPPER_NO_CONTRACT;
8399 pTurnAround->InitForFinalizer(pAppDomain,pTarget,args);
8400 _ASSERTE(pTurnAround->flags == MTCSF_NormalBase);
8401 ManagedThreadBase_DispatchInner(pTurnAround);
8402}
8403
8404//+----------------------------------------------------------------------------
8405//
8406// Method: Thread::GetStaticFieldAddress private
8407//
8408// Synopsis: Get the address of the field relative to the current thread.
8409// If an address has not been assigned yet then create one.
8410//
8411//+----------------------------------------------------------------------------
8412
8413LPVOID Thread::GetStaticFieldAddress(FieldDesc *pFD)
8414{
8415 CONTRACTL {
8416 THROWS;
8417 GC_TRIGGERS;
8418 }
8419 CONTRACTL_END;
8420
8421 _ASSERTE(pFD != NULL);
8422 _ASSERTE(pFD->IsThreadStatic());
8423 _ASSERTE(!pFD->IsRVA());
8424
8425 // for static field the MethodTable is exact even for generic classes
8426 MethodTable *pMT = pFD->GetEnclosingMethodTable();
8427
8428 // We need to make sure that the class has been allocated, however
8429 // we should not call the class constructor
8430 ThreadStatics::GetTLM(pMT)->EnsureClassAllocated(pMT);
8431
8432 PTR_BYTE base = NULL;
8433
8434 if (pFD->GetFieldType() == ELEMENT_TYPE_CLASS ||
8435 pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
8436 {
8437 base = pMT->GetGCThreadStaticsBasePointer();
8438 }
8439 else
8440 {
8441 base = pMT->GetNonGCThreadStaticsBasePointer();
8442 }
8443
8444 _ASSERTE(base != NULL);
8445
8446 DWORD offset = pFD->GetOffset();
8447 _ASSERTE(offset <= FIELD_OFFSET_LAST_REAL_OFFSET);
8448
8449 LPVOID result = (LPVOID)((PTR_BYTE)base + (DWORD)offset);
8450
8451 // For value classes, the handle points at an OBJECTREF
8452 // which holds the boxed value class, so derefernce and unbox.
8453 if (pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
8454 {
8455 OBJECTREF obj = ObjectToOBJECTREF(*(Object**) result);
8456 result = obj->GetData();
8457 }
8458
8459 return result;
8460}
8461
8462#endif // #ifndef DACCESS_COMPILE
8463
8464 //+----------------------------------------------------------------------------
8465//
8466// Method: Thread::GetStaticFieldAddrNoCreate private
8467//
8468// Synopsis: Get the address of the field relative to the thread.
8469// If an address has not been assigned, return NULL.
8470// No creating is allowed.
8471//
8472//+----------------------------------------------------------------------------
8473
8474TADDR Thread::GetStaticFieldAddrNoCreate(FieldDesc *pFD)
8475{
8476 CONTRACTL {
8477 NOTHROW;
8478 GC_NOTRIGGER;
8479 SUPPORTS_DAC;
8480 }
8481 CONTRACTL_END;
8482
8483 _ASSERTE(pFD != NULL);
8484 _ASSERTE(pFD->IsThreadStatic());
8485
8486 // for static field the MethodTable is exact even for generic classes
8487 PTR_MethodTable pMT = pFD->GetEnclosingMethodTable();
8488
8489 PTR_BYTE base = NULL;
8490
8491 if (pFD->GetFieldType() == ELEMENT_TYPE_CLASS ||
8492 pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
8493 {
8494 base = pMT->GetGCThreadStaticsBasePointer(dac_cast<PTR_Thread>(this));
8495 }
8496 else
8497 {
8498 base = pMT->GetNonGCThreadStaticsBasePointer(dac_cast<PTR_Thread>(this));
8499 }
8500
8501 if (base == NULL)
8502 return NULL;
8503
8504 DWORD offset = pFD->GetOffset();
8505 _ASSERTE(offset <= FIELD_OFFSET_LAST_REAL_OFFSET);
8506
8507 TADDR result = dac_cast<TADDR>(base) + (DWORD)offset;
8508
8509 // For value classes, the handle points at an OBJECTREF
8510 // which holds the boxed value class, so derefernce and unbox.
8511 if (pFD->IsByValue())
8512 {
8513 _ASSERTE(result != NULL);
8514 PTR_Object obj = *PTR_UNCHECKED_OBJECTREF(result);
8515 if (obj == NULL)
8516 return NULL;
8517 result = dac_cast<TADDR>(obj->GetData());
8518 }
8519
8520 return result;
8521}
8522
8523#ifndef DACCESS_COMPILE
8524
8525//
8526// NotifyFrameChainOfExceptionUnwind
8527// -----------------------------------------------------------
8528// This method will walk the Frame chain from pStartFrame to
8529// the last frame that is below pvLimitSP and will call each
8530// frame's ExceptionUnwind method. It will return the first
8531// Frame that is above pvLimitSP.
8532//
8533Frame * Thread::NotifyFrameChainOfExceptionUnwind(Frame* pStartFrame, LPVOID pvLimitSP)
8534{
8535 CONTRACTL
8536 {
8537 NOTHROW;
8538 DISABLED(GC_TRIGGERS); // due to UnwindFrameChain from NOTRIGGER areas
8539 MODE_COOPERATIVE;
8540 PRECONDITION(CheckPointer(pStartFrame));
8541 PRECONDITION(CheckPointer(pvLimitSP));
8542 }
8543 CONTRACTL_END;
8544
8545 Frame * pFrame;
8546
8547#ifdef _DEBUG
8548 //
8549 // assert that the specified Thread's Frame chain actually
8550 // contains the start Frame.
8551 //
8552 pFrame = m_pFrame;
8553 while ((pFrame != pStartFrame) &&
8554 (pFrame != FRAME_TOP))
8555 {
8556 pFrame = pFrame->Next();
8557 }
8558 CONSISTENCY_CHECK_MSG(pFrame == pStartFrame, "pStartFrame is not on pThread's Frame chain!");
8559#endif // _DEBUG
8560
8561 pFrame = pStartFrame;
8562 while (pFrame < pvLimitSP)
8563 {
8564 CONSISTENCY_CHECK(pFrame != PTR_NULL);
8565 CONSISTENCY_CHECK((pFrame) > static_cast<Frame *>((LPVOID)GetCurrentSP()));
8566 pFrame->ExceptionUnwind();
8567 pFrame = pFrame->Next();
8568 }
8569
8570 // return the frame after the last one notified of the unwind
8571 return pFrame;
8572}
8573
8574//+----------------------------------------------------------------------------
8575//
8576// Method: Thread::DeleteThreadStaticData private
8577//
8578// Synopsis: Delete the static data for each appdomain that this thread
8579// visited.
8580//
8581//
8582//+----------------------------------------------------------------------------
8583
8584void Thread::DeleteThreadStaticData()
8585{
8586 CONTRACTL {
8587 NOTHROW;
8588 GC_NOTRIGGER;
8589 }
8590 CONTRACTL_END;
8591
8592 m_ThreadLocalBlock.FreeTable();
8593}
8594
8595//+----------------------------------------------------------------------------
8596//
8597// Method: Thread::DeleteThreadStaticData public
8598//
8599// Synopsis: Delete the static data for the given module. This is called
8600// when the AssemblyLoadContext unloads.
8601//
8602//
8603//+----------------------------------------------------------------------------
8604
8605void Thread::DeleteThreadStaticData(ModuleIndex index)
8606{
8607 m_ThreadLocalBlock.FreeTLM(index.m_dwIndex, FALSE /* isThreadShuttingDown */);
8608}
8609
8610OBJECTREF Thread::GetCulture(BOOL bUICulture)
8611{
8612 CONTRACTL {
8613 THROWS;
8614 GC_TRIGGERS;
8615 MODE_COOPERATIVE;
8616 }
8617 CONTRACTL_END;
8618
8619 FieldDesc * pFD;
8620
8621 // This is the case when we're building mscorlib and haven't yet created
8622 // the system assembly.
8623 if (SystemDomain::System()->SystemAssembly()==NULL || g_fForbidEnterEE) {
8624 return NULL;
8625 }
8626
8627 OBJECTREF pCurrentCulture;
8628 if (bUICulture) {
8629 // Call the Getter for the CurrentUICulture. This will cause it to populate the field.
8630 MethodDescCallSite propGet(METHOD__CULTURE_INFO__GET_CURRENT_UI_CULTURE);
8631 ARG_SLOT retVal = propGet.Call_RetArgSlot(NULL);
8632 pCurrentCulture = ArgSlotToObj(retVal);
8633 } else {
8634 //This is faster than calling the property, because this is what the call does anyway.
8635 pFD = MscorlibBinder::GetField(FIELD__CULTURE_INFO__CURRENT_CULTURE);
8636 _ASSERTE(pFD);
8637
8638 pFD->CheckRunClassInitThrowing();
8639
8640 pCurrentCulture = pFD->GetStaticOBJECTREF();
8641 _ASSERTE(pCurrentCulture!=NULL);
8642 }
8643
8644 return pCurrentCulture;
8645}
8646
8647void Thread::SetCulture(OBJECTREF *CultureObj, BOOL bUICulture)
8648{
8649 CONTRACTL {
8650 THROWS;
8651 GC_TRIGGERS;
8652 MODE_COOPERATIVE;
8653 }
8654 CONTRACTL_END;
8655
8656 MethodDescCallSite propSet(bUICulture
8657 ? METHOD__CULTURE_INFO__SET_CURRENT_UI_CULTURE
8658 : METHOD__CULTURE_INFO__SET_CURRENT_CULTURE);
8659
8660 // Set up the Stack.
8661 ARG_SLOT pNewArgs[] = {
8662 ObjToArgSlot(*CultureObj)
8663 };
8664
8665 // Make the actual call.
8666 propSet.Call_RetArgSlot(pNewArgs);
8667}
8668
8669void Thread::SetHasPromotedBytes ()
8670{
8671 CONTRACTL {
8672 NOTHROW;
8673 GC_NOTRIGGER;
8674 }
8675 CONTRACTL_END;
8676
8677 m_fPromoted = TRUE;
8678
8679 _ASSERTE(GCHeapUtilities::IsGCInProgress() && IsGCThread ());
8680
8681 if (!m_fPreemptiveGCDisabled)
8682 {
8683 if (FRAME_TOP == GetFrame())
8684 m_fPromoted = FALSE;
8685 }
8686}
8687
8688BOOL ThreadStore::HoldingThreadStore(Thread *pThread)
8689{
8690 CONTRACTL {
8691 NOTHROW;
8692 GC_NOTRIGGER;
8693 SO_TOLERANT;
8694 }
8695 CONTRACTL_END;
8696
8697 if (pThread)
8698 {
8699 return (pThread == s_pThreadStore->m_HoldingThread);
8700 }
8701 else
8702 {
8703 return (s_pThreadStore->m_holderthreadid.IsCurrentThread());
8704 }
8705}
8706
8707LONG Thread::GetTotalThreadPoolCompletionCount()
8708{
8709 CONTRACTL
8710 {
8711 NOTHROW;
8712 MODE_ANY;
8713 }
8714 CONTRACTL_END;
8715
8716 LONG total;
8717 if (g_fEEStarted) //make sure we actually have a thread store
8718 {
8719 // make sure up-to-date thread-local counts are visible to us
8720 ::FlushProcessWriteBuffers();
8721
8722 // enumerate all threads, summing their local counts.
8723 ThreadStoreLockHolder tsl;
8724
8725 total = s_threadPoolCompletionCountOverflow.Load();
8726
8727 Thread *pThread = NULL;
8728 while ((pThread = ThreadStore::GetAllThreadList(pThread, 0, 0)) != NULL)
8729 {
8730 total += pThread->m_threadPoolCompletionCount;
8731 }
8732 }
8733 else
8734 {
8735 total = s_threadPoolCompletionCountOverflow.Load();
8736 }
8737
8738 return total;
8739}
8740
8741
8742INT32 Thread::ResetManagedThreadObject(INT32 nPriority)
8743{
8744 CONTRACTL {
8745 NOTHROW;
8746 GC_TRIGGERS;
8747 }
8748 CONTRACTL_END;
8749
8750 GCX_COOP();
8751 return ResetManagedThreadObjectInCoopMode(nPriority);
8752}
8753
8754INT32 Thread::ResetManagedThreadObjectInCoopMode(INT32 nPriority)
8755{
8756 CONTRACTL {
8757 NOTHROW;
8758 GC_NOTRIGGER;
8759 MODE_COOPERATIVE;
8760 SO_TOLERANT;
8761 }
8762 CONTRACTL_END;
8763
8764 THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject);
8765 if (pObject != NULL)
8766 {
8767 pObject->ResetName();
8768 nPriority = pObject->GetPriority();
8769 }
8770
8771 return nPriority;
8772}
8773
8774BOOL Thread::IsRealThreadPoolResetNeeded()
8775{
8776 CONTRACTL
8777 {
8778 NOTHROW;
8779 GC_NOTRIGGER;
8780 MODE_COOPERATIVE;
8781 SO_TOLERANT;
8782 }
8783 CONTRACTL_END;
8784
8785 if(!IsBackground())
8786 return TRUE;
8787
8788 THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject);
8789
8790 if(pObject != NULL)
8791 {
8792 INT32 nPriority = pObject->GetPriority();
8793
8794 if(nPriority != ThreadNative::PRIORITY_NORMAL)
8795 return TRUE;
8796 }
8797
8798 return FALSE;
8799}
8800
8801void Thread::InternalReset(BOOL fNotFinalizerThread, BOOL fThreadObjectResetNeeded, BOOL fResetAbort)
8802{
8803 CONTRACTL {
8804 NOTHROW;
8805 if(!fNotFinalizerThread || fThreadObjectResetNeeded) {GC_TRIGGERS;SO_INTOLERANT;} else {GC_NOTRIGGER;SO_TOLERANT;}
8806 }
8807 CONTRACTL_END;
8808
8809 _ASSERTE (this == GetThread());
8810
8811 FinishSOWork();
8812
8813 INT32 nPriority = ThreadNative::PRIORITY_NORMAL;
8814
8815 if (!fNotFinalizerThread && this == FinalizerThread::GetFinalizerThread())
8816 {
8817 nPriority = ThreadNative::PRIORITY_HIGHEST;
8818 }
8819
8820 if(fThreadObjectResetNeeded)
8821 {
8822 nPriority = ResetManagedThreadObject(nPriority);
8823 }
8824
8825 //m_MarshalAlloc.Collapse(NULL);
8826
8827 if (fResetAbort && IsAbortRequested()) {
8828 UnmarkThreadForAbort(TAR_ALL);
8829 }
8830
8831 if (fResetAbort && IsAborted())
8832 ClearAborted();
8833
8834 if (IsThreadPoolThread() && fThreadObjectResetNeeded)
8835 {
8836 SetBackground(TRUE);
8837 if (nPriority != ThreadNative::PRIORITY_NORMAL)
8838 {
8839 SetThreadPriority(THREAD_PRIORITY_NORMAL);
8840 }
8841 }
8842 else if (!fNotFinalizerThread && this == FinalizerThread::GetFinalizerThread())
8843 {
8844 SetBackground(TRUE);
8845 if (nPriority != ThreadNative::PRIORITY_HIGHEST)
8846 {
8847 SetThreadPriority(THREAD_PRIORITY_HIGHEST);
8848 }
8849 }
8850}
8851
8852HRESULT Thread::Abort ()
8853{
8854 CONTRACTL
8855 {
8856 NOTHROW;
8857 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
8858 SO_TOLERANT;
8859 }
8860 CONTRACTL_END;
8861
8862 BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW;);
8863 EX_TRY
8864 {
8865 UserAbort(TAR_Thread, EEPolicy::TA_Safe, INFINITE, Thread::UAC_Host);
8866 }
8867 EX_CATCH
8868 {
8869 }
8870 EX_END_CATCH(SwallowAllExceptions);
8871 END_SO_INTOLERANT_CODE;
8872
8873 return S_OK;
8874}
8875
8876HRESULT Thread::RudeAbort()
8877{
8878 CONTRACTL
8879 {
8880 NOTHROW;
8881 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
8882 SO_TOLERANT;
8883 }
8884 CONTRACTL_END;
8885
8886 BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
8887
8888 EX_TRY
8889 {
8890 UserAbort(TAR_Thread, EEPolicy::TA_Rude, INFINITE, Thread::UAC_Host);
8891 }
8892 EX_CATCH
8893 {
8894 }
8895 EX_END_CATCH(SwallowAllExceptions);
8896
8897 END_SO_INTOLERANT_CODE;
8898
8899 return S_OK;
8900}
8901
8902HRESULT Thread::NeedsPriorityScheduling(BOOL *pbNeedsPriorityScheduling)
8903{
8904 CONTRACTL {
8905 NOTHROW;
8906 GC_NOTRIGGER;
8907 SO_TOLERANT;
8908 }
8909 CONTRACTL_END;
8910
8911 *pbNeedsPriorityScheduling = (m_fPreemptiveGCDisabled ||
8912 (g_fEEStarted && this == FinalizerThread::GetFinalizerThread()));
8913 return S_OK;
8914}
8915
8916
8917HRESULT Thread::LocksHeld(SIZE_T *pLockCount)
8918{
8919 LIMITED_METHOD_CONTRACT;
8920
8921 *pLockCount = m_dwLockCount;
8922 return S_OK;
8923}
8924
8925HRESULT Thread::BeginPreventAsyncAbort()
8926{
8927 WRAPPER_NO_CONTRACT;
8928
8929#ifdef _DEBUG
8930 int count =
8931#endif
8932 FastInterlockIncrement((LONG*)&m_PreventAbort);
8933
8934#ifdef _DEBUG
8935 ASSERT(count > 0);
8936
8937 FastInterlockIncrement((LONG*)&m_dwDisableAbortCheckCount);
8938#endif
8939
8940 return S_OK;
8941}
8942
8943HRESULT Thread::EndPreventAsyncAbort()
8944{
8945 WRAPPER_NO_CONTRACT;
8946
8947#ifdef _DEBUG
8948 int count =
8949#endif
8950 FastInterlockDecrement((LONG*)&m_PreventAbort);
8951
8952#ifdef _DEBUG
8953 ASSERT(count >= 0);
8954
8955 FastInterlockDecrement((LONG*)&m_dwDisableAbortCheckCount);
8956#endif
8957
8958 return S_OK;
8959}
8960
8961
8962ULONG Thread::AddRef()
8963{
8964 WRAPPER_NO_CONTRACT;
8965
8966 _ASSERTE(m_ExternalRefCount > 0);
8967
8968 _ASSERTE (m_UnmanagedRefCount != (DWORD) -1);
8969 ULONG ref = FastInterlockIncrement((LONG*)&m_UnmanagedRefCount);
8970
8971 return ref;
8972}
8973
8974ULONG Thread::Release()
8975{
8976 WRAPPER_NO_CONTRACT;
8977 SUPPORTS_DAC_HOST_ONLY;
8978
8979 _ASSERTE (m_ExternalRefCount > 0);
8980 _ASSERTE (m_UnmanagedRefCount > 0);
8981 ULONG ref = FastInterlockDecrement((LONG*)&m_UnmanagedRefCount);
8982 return ref;
8983}
8984
8985HRESULT Thread::QueryInterface(REFIID riid, void **ppUnk)
8986{
8987 LIMITED_METHOD_CONTRACT;
8988
8989 return E_NOINTERFACE;
8990
8991}
8992
8993void Thread::SetupThreadForHost()
8994{
8995 CONTRACTL
8996 {
8997 THROWS;
8998 GC_TRIGGERS;
8999 SO_TOLERANT;
9000 }
9001 CONTRACTL_END;
9002
9003 _ASSERTE (GetThread() == this);
9004 CONTRACT_VIOLATION(SOToleranceViolation);
9005
9006}
9007
9008
9009ETaskType GetCurrentTaskType()
9010{
9011 STATIC_CONTRACT_NOTHROW;
9012 STATIC_CONTRACT_GC_NOTRIGGER;
9013 STATIC_CONTRACT_SO_TOLERANT;
9014
9015 ETaskType TaskType = TT_UNKNOWN;
9016 size_t type = (size_t)ClrFlsGetValue (TlsIdx_ThreadType);
9017 if (type & ThreadType_DbgHelper)
9018 {
9019 TaskType = TT_DEBUGGERHELPER;
9020 }
9021 else if (type & ThreadType_GC)
9022 {
9023 TaskType = TT_GC;
9024 }
9025 else if (type & ThreadType_Finalizer)
9026 {
9027 TaskType = TT_FINALIZER;
9028 }
9029 else if (type & ThreadType_Timer)
9030 {
9031 TaskType = TT_THREADPOOL_TIMER;
9032 }
9033 else if (type & ThreadType_Gate)
9034 {
9035 TaskType = TT_THREADPOOL_GATE;
9036 }
9037 else if (type & ThreadType_Wait)
9038 {
9039 TaskType = TT_THREADPOOL_WAIT;
9040 }
9041 else if (type & ThreadType_Threadpool_IOCompletion)
9042 {
9043 TaskType = TT_THREADPOOL_IOCOMPLETION;
9044 }
9045 else if (type & ThreadType_Threadpool_Worker)
9046 {
9047 TaskType = TT_THREADPOOL_WORKER;
9048 }
9049 else
9050 {
9051 Thread *pThread = GetThread();
9052 if (pThread)
9053 {
9054 TaskType = TT_USER;
9055 }
9056 }
9057
9058 return TaskType;
9059}
9060
9061DeadlockAwareLock::DeadlockAwareLock(const char *description)
9062 : m_pHoldingThread(NULL)
9063#ifdef _DEBUG
9064 , m_description(description)
9065#endif
9066{
9067 LIMITED_METHOD_CONTRACT;
9068}
9069
9070DeadlockAwareLock::~DeadlockAwareLock()
9071{
9072 CONTRACTL
9073 {
9074 NOTHROW;
9075 GC_NOTRIGGER;
9076 MODE_ANY;
9077 CAN_TAKE_LOCK;
9078 }
9079 CONTRACTL_END;
9080
9081 // Wait for another thread to leave its loop in DeadlockAwareLock::TryBeginEnterLock
9082 CrstHolder lock(&g_DeadlockAwareCrst);
9083}
9084
9085CHECK DeadlockAwareLock::CheckDeadlock(Thread *pThread)
9086{
9087 CONTRACTL
9088 {
9089 PRECONDITION(g_DeadlockAwareCrst.OwnedByCurrentThread());
9090 NOTHROW;
9091 GC_NOTRIGGER;
9092 }
9093 CONTRACTL_END;
9094
9095 // Note that this check is recursive in order to produce descriptive check failure messages.
9096 Thread *pHoldingThread = m_pHoldingThread.Load();
9097 if (pThread == pHoldingThread)
9098 {
9099 CHECK_FAILF(("Lock %p (%s) is held by thread %d", this, m_description, pThread));
9100 }
9101
9102 if (pHoldingThread != NULL)
9103 {
9104 DeadlockAwareLock *pBlockingLock = pHoldingThread->m_pBlockingLock.Load();
9105 if (pBlockingLock != NULL)
9106 {
9107 CHECK_MSGF(pBlockingLock->CheckDeadlock(pThread),
9108 ("Deadlock: Lock %p (%s) is held by thread %d", this, m_description, pHoldingThread));
9109 }
9110 }
9111
9112 CHECK_OK;
9113}
9114
9115BOOL DeadlockAwareLock::CanEnterLock()
9116{
9117 Thread * pThread = GetThread();
9118
9119 CONSISTENCY_CHECK_MSG(pThread != NULL,
9120 "Cannot do deadlock detection on non-EE thread");
9121 CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL,
9122 "Cannot block on two locks at once");
9123
9124 {
9125 CrstHolder lock(&g_DeadlockAwareCrst);
9126
9127 // Look for deadlocks
9128 DeadlockAwareLock *pLock = this;
9129
9130 while (TRUE)
9131 {
9132 Thread * holdingThread = pLock->m_pHoldingThread;
9133
9134 if (holdingThread == pThread)
9135 {
9136 // Deadlock!
9137 return FALSE;
9138 }
9139 if (holdingThread == NULL)
9140 {
9141 // Lock is unheld
9142 break;
9143 }
9144
9145 pLock = holdingThread->m_pBlockingLock;
9146
9147 if (pLock == NULL)
9148 {
9149 // Thread is running free
9150 break;
9151 }
9152 }
9153
9154 return TRUE;
9155 }
9156}
9157
9158BOOL DeadlockAwareLock::TryBeginEnterLock()
9159{
9160 CONTRACTL
9161 {
9162 NOTHROW;
9163 GC_NOTRIGGER;
9164 }
9165 CONTRACTL_END;
9166
9167 Thread * pThread = GetThread();
9168
9169 CONSISTENCY_CHECK_MSG(pThread != NULL,
9170 "Cannot do deadlock detection on non-EE thread");
9171 CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL,
9172 "Cannot block on two locks at once");
9173
9174 {
9175 CrstHolder lock(&g_DeadlockAwareCrst);
9176
9177 // Look for deadlocks
9178 DeadlockAwareLock *pLock = this;
9179
9180 while (TRUE)
9181 {
9182 Thread * holdingThread = pLock->m_pHoldingThread;
9183
9184 if (holdingThread == pThread)
9185 {
9186 // Deadlock!
9187 return FALSE;
9188 }
9189 if (holdingThread == NULL)
9190 {
9191 // Lock is unheld
9192 break;
9193 }
9194
9195 pLock = holdingThread->m_pBlockingLock;
9196
9197 if (pLock == NULL)
9198 {
9199 // Thread is running free
9200 break;
9201 }
9202 }
9203
9204 pThread->m_pBlockingLock = this;
9205 }
9206
9207 return TRUE;
9208};
9209
9210void DeadlockAwareLock::BeginEnterLock()
9211{
9212 CONTRACTL
9213 {
9214 NOTHROW;
9215 GC_NOTRIGGER;
9216 }
9217 CONTRACTL_END;
9218
9219 Thread * pThread = GetThread();
9220
9221 CONSISTENCY_CHECK_MSG(pThread != NULL,
9222 "Cannot do deadlock detection on non-EE thread");
9223 CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL,
9224 "Cannot block on two locks at once");
9225
9226 {
9227 CrstHolder lock(&g_DeadlockAwareCrst);
9228
9229 // Look for deadlock loop
9230 CONSISTENCY_CHECK_MSG(CheckDeadlock(pThread), "Deadlock detected!");
9231
9232 pThread->m_pBlockingLock = this;
9233 }
9234};
9235
9236void DeadlockAwareLock::EndEnterLock()
9237{
9238 CONTRACTL
9239 {
9240 NOTHROW;
9241 GC_NOTRIGGER;
9242 }
9243 CONTRACTL_END;
9244
9245 Thread * pThread = GetThread();
9246
9247 CONSISTENCY_CHECK(m_pHoldingThread.Load() == NULL || m_pHoldingThread.Load() == pThread);
9248 CONSISTENCY_CHECK(pThread->m_pBlockingLock.Load() == this);
9249
9250 // No need to take a lock when going from blocking to holding. This
9251 // transition implies the lack of a deadlock that other threads can see.
9252 // (If they would see a deadlock after the transition, they would see
9253 // one before as well.)
9254
9255 m_pHoldingThread = pThread;
9256}
9257
9258void DeadlockAwareLock::LeaveLock()
9259{
9260 CONTRACTL
9261 {
9262 NOTHROW;
9263 GC_NOTRIGGER;
9264 }
9265 CONTRACTL_END;
9266
9267 CONSISTENCY_CHECK(m_pHoldingThread == GetThread());
9268 CONSISTENCY_CHECK(GetThread()->m_pBlockingLock.Load() == NULL);
9269
9270 m_pHoldingThread = NULL;
9271}
9272
9273
9274#ifdef _DEBUG
9275
9276// Normally, any thread we operate on has a Thread block in its TLS. But there are
9277// a few special threads we don't normally execute managed code on.
9278//
9279// There is a scenario where we run managed code on such a thread, which is when the
9280// DLL_THREAD_ATTACH notification of an (IJW?) module calls into managed code. This
9281// is incredibly dangerous. If a GC is provoked, the system may have trouble performing
9282// the GC because its threads aren't available yet.
9283static DWORD SpecialEEThreads[10];
9284static LONG cnt_SpecialEEThreads = 0;
9285
9286void dbgOnly_IdentifySpecialEEThread()
9287{
9288 WRAPPER_NO_CONTRACT;
9289
9290 LONG ourCount = FastInterlockIncrement(&cnt_SpecialEEThreads);
9291
9292 _ASSERTE(ourCount < (LONG) NumItems(SpecialEEThreads));
9293 SpecialEEThreads[ourCount-1] = ::GetCurrentThreadId();
9294}
9295
9296BOOL dbgOnly_IsSpecialEEThread()
9297{
9298 WRAPPER_NO_CONTRACT;
9299
9300 DWORD ourId = ::GetCurrentThreadId();
9301
9302 for (LONG i=0; i<cnt_SpecialEEThreads; i++)
9303 if (ourId == SpecialEEThreads[i])
9304 return TRUE;
9305
9306 // If we have an EE thread doing helper thread duty, then it is temporarily
9307 // 'special' too.
9308 #ifdef DEBUGGING_SUPPORTED
9309 if (g_pDebugInterface)
9310 {
9311 //<TODO>We probably should use Thread::GetThreadId</TODO>
9312 DWORD helperID = g_pDebugInterface->GetHelperThreadID();
9313 if (helperID == ourId)
9314 return TRUE;
9315 }
9316 #endif
9317
9318 //<TODO>Clean this up</TODO>
9319 if (GetThread() == NULL)
9320 return TRUE;
9321
9322
9323 return FALSE;
9324}
9325
9326#endif // _DEBUG
9327
9328
9329// There is an MDA which can detect illegal reentrancy into the CLR. For instance, if you call managed
9330// code from a native vectored exception handler, this might cause a reverse PInvoke to occur. But if the
9331// exception was triggered from code that was executing in cooperative GC mode, we now have GC holes and
9332// general corruption.
9333#ifdef MDA_SUPPORTED
9334NOINLINE BOOL HasIllegalReentrancyRare()
9335{
9336 CONTRACTL
9337 {
9338 NOTHROW;
9339 GC_TRIGGERS;
9340 ENTRY_POINT;
9341 MODE_ANY;
9342 }
9343 CONTRACTL_END;
9344
9345 Thread *pThread = GetThread();
9346 if (pThread == NULL || !pThread->PreemptiveGCDisabled())
9347 return FALSE;
9348
9349 BEGIN_ENTRYPOINT_VOIDRET;
9350 MDA_TRIGGER_ASSISTANT(Reentrancy, ReportViolation());
9351 END_ENTRYPOINT_VOIDRET;
9352 return TRUE;
9353}
9354#endif
9355
9356// Actually fire the Reentrancy probe, if warranted.
9357BOOL HasIllegalReentrancy()
9358{
9359 CONTRACTL
9360 {
9361 NOTHROW;
9362 GC_TRIGGERS;
9363 ENTRY_POINT;
9364 MODE_ANY;
9365 }
9366 CONTRACTL_END;
9367
9368#ifdef MDA_SUPPORTED
9369 if (NULL == MDA_GET_ASSISTANT(Reentrancy))
9370 return FALSE;
9371 return HasIllegalReentrancyRare();
9372#else
9373 return FALSE;
9374#endif // MDA_SUPPORTED
9375}
9376
9377
9378#endif // #ifndef DACCESS_COMPILE
9379
9380#ifdef DACCESS_COMPILE
9381
9382void
9383STATIC_DATA::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
9384{
9385 WRAPPER_NO_CONTRACT;
9386
9387 DAC_ENUM_STHIS(STATIC_DATA);
9388}
9389
9390void
9391Thread::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
9392{
9393 WRAPPER_NO_CONTRACT;
9394
9395 DAC_ENUM_VTHIS();
9396 if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
9397 {
9398 if (m_pDomain.IsValid())
9399 {
9400 m_pDomain->EnumMemoryRegions(flags, true);
9401 }
9402 }
9403
9404 if (m_debuggerFilterContext.IsValid())
9405 {
9406 m_debuggerFilterContext.EnumMem();
9407 }
9408
9409 OBJECTHANDLE_EnumMemoryRegions(m_LastThrownObjectHandle);
9410
9411 m_ExceptionState.EnumChainMemoryRegions(flags);
9412
9413 m_ThreadLocalBlock.EnumMemoryRegions(flags);
9414
9415 if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
9416 {
9417
9418 //
9419 // Allow all of the frames on the stack to enumerate
9420 // their memory.
9421 //
9422
9423 PTR_Frame frame = m_pFrame;
9424 while (frame.IsValid() &&
9425 frame.GetAddr() != dac_cast<TADDR>(FRAME_TOP))
9426 {
9427 frame->EnumMemoryRegions(flags);
9428 frame = frame->m_Next;
9429 }
9430 }
9431
9432 //
9433 // Try and do a stack trace and save information
9434 // for each part of the stack. This is very vulnerable
9435 // to memory problems so ignore all exceptions here.
9436 //
9437
9438 CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED
9439 (
9440 EnumMemoryRegionsWorker(flags);
9441 );
9442}
9443
9444void
9445Thread::EnumMemoryRegionsWorker(CLRDataEnumMemoryFlags flags)
9446{
9447 WRAPPER_NO_CONTRACT;
9448
9449 if (IsUnstarted())
9450 {
9451 return;
9452 }
9453
9454 T_CONTEXT context;
9455 BOOL DacGetThreadContext(Thread* thread, T_CONTEXT* context);
9456 REGDISPLAY regDisp;
9457 StackFrameIterator frameIter;
9458
9459 TADDR previousSP = 0; //start at zero; this allows first check to always succeed.
9460 TADDR currentSP;
9461
9462 // Init value. The Limit itself is not legal, so move one target pointer size to the smallest-magnitude
9463 // legal address.
9464 currentSP = dac_cast<TADDR>(m_CacheStackLimit) + sizeof(TADDR);
9465
9466 if (GetFilterContext())
9467 {
9468 context = *GetFilterContext();
9469 }
9470 else
9471 {
9472 DacGetThreadContext(this, &context);
9473 }
9474
9475 FillRegDisplay(&regDisp, &context);
9476 frameIter.Init(this, NULL, &regDisp, 0);
9477 while (frameIter.IsValid())
9478 {
9479 //
9480 // There are identical stack pointer checking semantics in code:ClrDataAccess::EnumMemWalkStackHelper
9481 // You ***MUST*** maintain identical semantics for both checks!
9482 //
9483
9484 // Before we continue, we should check to be sure we have a valid
9485 // stack pointer. This is to prevent stacks that are not walked
9486 // properly due to
9487 // a) stack corruption bugs
9488 // b) bad stack walks
9489 // from continuing on indefinitely.
9490 //
9491 // We will force SP to strictly increase.
9492 // this check can only happen for real stack frames (i.e. not for explicit frames that don't update the RegDisplay)
9493 // for ia64, SP may be equal, but in this case BSP must strictly decrease.
9494 // We will force SP to be properly aligned.
9495 // We will force SP to be in the correct range.
9496 //
9497 if (frameIter.GetFrameState() == StackFrameIterator::SFITER_FRAMELESS_METHOD)
9498 {
9499 // This check cannot be applied to explicit frames; they may not move the SP at all.
9500 // Also, a single function can push several on the stack at a time with no guarantees about
9501 // ordering so we can't check that the addresses of the explicit frames are monotonically increasing.
9502 // There is the potential that the walk will not terminate if a set of explicit frames reference
9503 // each other circularly. While we could choose a limit for the number of explicit frames allowed
9504 // in a row like the total stack size/pointer size, we have no known problems with this scenario.
9505 // Thus for now we ignore it.
9506 currentSP = (TADDR)GetRegdisplaySP(&regDisp);
9507
9508 if (currentSP <= previousSP)
9509 {
9510 _ASSERTE(!"Target stack has been corrupted, SP for current frame must be larger than previous frame.");
9511 break;
9512 }
9513 }
9514
9515 // On windows desktop, the stack pointer should be a multiple
9516 // of pointer-size-aligned in the target address space
9517 if (currentSP % sizeof(TADDR) != 0)
9518 {
9519 _ASSERTE(!"Target stack has been corrupted, SP must be aligned.");
9520 break;
9521 }
9522
9523 if (!IsAddressInStack(currentSP))
9524 {
9525 _ASSERTE(!"Target stack has been corrupted, SP must in in the stack range.");
9526 break;
9527 }
9528
9529 // Enumerate the code around the call site to help debugger stack walking heuristics
9530 PCODE callEnd = GetControlPC(&regDisp);
9531 DacEnumCodeForStackwalk(callEnd);
9532
9533 if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
9534 {
9535 if (frameIter.m_crawl.GetAppDomain())
9536 {
9537 frameIter.m_crawl.GetAppDomain()->EnumMemoryRegions(flags, true);
9538 }
9539 }
9540
9541 // To stackwalk through funceval frames, we need to be sure to preserve the
9542 // DebuggerModule's m_pRuntimeDomainFile. This is the only case that doesn't use the current
9543 // vmDomainFile in code:DacDbiInterfaceImpl::EnumerateInternalFrames. The following
9544 // code mimics that function.
9545 // Allow failure, since we want to continue attempting to walk the stack regardless of the outcome.
9546 EX_TRY
9547 {
9548 if ((frameIter.GetFrameState() == StackFrameIterator::SFITER_FRAME_FUNCTION) ||
9549 (frameIter.GetFrameState() == StackFrameIterator::SFITER_SKIPPED_FRAME_FUNCTION))
9550 {
9551 Frame * pFrame = frameIter.m_crawl.GetFrame();
9552 g_pDebugInterface->EnumMemoryRegionsIfFuncEvalFrame(flags, pFrame);
9553 }
9554 }
9555 EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED
9556
9557 MethodDesc* pMD = frameIter.m_crawl.GetFunction();
9558 if (pMD != NULL)
9559 {
9560 pMD->EnumMemoryRegions(flags);
9561#if defined(WIN64EXCEPTIONS) && defined(FEATURE_PREJIT)
9562 // Enumerate unwind info
9563 // Note that we don't do this based on the MethodDesc because in theory there isn't a 1:1 correspondence
9564 // between MethodDesc and code (and so unwind info, and even debug info). Eg., EnC creates new versions
9565 // of the code, but the MethodDesc always points at the latest version (which isn't necessarily
9566 // the one on the stack). In practice this is unlikely to be a problem since wanting a minidump
9567 // and making EnC edits are usually mutually exclusive.
9568 if (frameIter.m_crawl.IsFrameless())
9569 {
9570 frameIter.m_crawl.GetJitManager()->EnumMemoryRegionsForMethodUnwindInfo(flags, frameIter.m_crawl.GetCodeInfo());
9571 }
9572#endif // defined(WIN64EXCEPTIONS) && defined(FEATURE_PREJIT)
9573 }
9574
9575 previousSP = currentSP;
9576
9577 if (frameIter.Next() != SWA_CONTINUE)
9578 {
9579 break;
9580 }
9581 }
9582}
9583
9584void
9585ThreadStore::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
9586{
9587 SUPPORTS_DAC;
9588 WRAPPER_NO_CONTRACT;
9589
9590 // This will write out the context of the s_pThreadStore. ie
9591 // just the pointer
9592 //
9593 s_pThreadStore.EnumMem();
9594 if (s_pThreadStore.IsValid())
9595 {
9596 // write out the whole ThreadStore structure
9597 DacEnumHostDPtrMem(s_pThreadStore);
9598
9599 // The thread list may be corrupt, so just
9600 // ignore exceptions during enumeration.
9601 EX_TRY
9602 {
9603 Thread* thread = s_pThreadStore->m_ThreadList.GetHead();
9604 LONG dwNumThreads = s_pThreadStore->m_ThreadCount;
9605
9606 for (LONG i = 0; (i < dwNumThreads) && (thread != NULL); i++)
9607 {
9608 // Even if this thread is totally broken and we can't enum it, struggle on.
9609 // If we do not, we will leave this loop and not enum stack memory for any further threads.
9610 CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED(
9611 thread->EnumMemoryRegions(flags);
9612 );
9613 thread = s_pThreadStore->m_ThreadList.GetNext(thread);
9614 }
9615 }
9616 EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED
9617 }
9618}
9619
9620#endif // #ifdef DACCESS_COMPILE
9621
9622
9623#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
9624// For the purposes of tracking resource usage we implement a simple cpu resource usage counter on each
9625// thread. Every time QueryThreadProcessorUsage() is invoked it returns the amount of cpu time (a combination
9626// of user and kernel mode time) used since the last call to QueryThreadProcessorUsage(). The result is in 100
9627// nanosecond units.
9628ULONGLONG Thread::QueryThreadProcessorUsage()
9629{
9630 LIMITED_METHOD_CONTRACT;
9631
9632 // Get current values for the amount of kernel and user time used by this thread over its entire lifetime.
9633 FILETIME sCreationTime, sExitTime, sKernelTime, sUserTime;
9634 HANDLE hThread = GetThreadHandle();
9635 BOOL fResult = GetThreadTimes(hThread,
9636 &sCreationTime,
9637 &sExitTime,
9638 &sKernelTime,
9639 &sUserTime);
9640 if (!fResult)
9641 {
9642#ifdef _DEBUG
9643 ULONG error = GetLastError();
9644 printf("GetThreadTimes failed: %d; handle is %p\n", error, hThread);
9645 _ASSERTE(FALSE);
9646#endif
9647 return 0;
9648 }
9649
9650 // Combine the user and kernel times into a single value (FILETIME is just a structure representing an
9651 // unsigned int64 in two 32-bit pieces).
9652 _ASSERTE(sizeof(FILETIME) == sizeof(UINT64));
9653 ULONGLONG ullCurrentUsage = *(ULONGLONG*)&sKernelTime + *(ULONGLONG*)&sUserTime;
9654
9655 // Store the current processor usage as the new baseline, and retrieve the previous usage.
9656 ULONGLONG ullPreviousUsage = VolatileLoad(&m_ullProcessorUsageBaseline);
9657 if (ullPreviousUsage >= ullCurrentUsage ||
9658 ullPreviousUsage != (ULONGLONG)InterlockedCompareExchange64(
9659 (LONGLONG*)&m_ullProcessorUsageBaseline,
9660 (LONGLONG)ullCurrentUsage,
9661 (LONGLONG)ullPreviousUsage))
9662 {
9663 // another thread beat us to it, and already reported this usage.
9664 return 0;
9665 }
9666
9667 // The result is the difference between this value and the previous usage value.
9668 return ullCurrentUsage - ullPreviousUsage;
9669}
9670#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
9671