1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4//
5// SYNCBLK.CPP
6//
7
8//
9// Definition of a SyncBlock and the SyncBlockCache which manages it
10//
11
12
13#include "common.h"
14
15#include "vars.hpp"
16#include "util.hpp"
17#include "class.h"
18#include "object.h"
19#include "threads.h"
20#include "excep.h"
21#include "threads.h"
22#include "syncblk.h"
23#include "interoputil.h"
24#include "encee.h"
25#include "perfcounters.h"
26#include "eventtrace.h"
27#include "dllimportcallback.h"
28#include "comcallablewrapper.h"
29#include "eeconfig.h"
30#include "corhost.h"
31#include "comdelegate.h"
32#include "finalizerthread.h"
33
34#ifdef FEATURE_COMINTEROP
35#include "runtimecallablewrapper.h"
36#endif // FEATURE_COMINTEROP
37
38// Allocate 4K worth. Typically enough
39#define MAXSYNCBLOCK (0x1000-sizeof(void*))/sizeof(SyncBlock)
40#define SYNC_TABLE_INITIAL_SIZE 250
41
42//#define DUMP_SB
43
44class SyncBlockArray
45{
46 public:
47 SyncBlockArray *m_Next;
48 BYTE m_Blocks[MAXSYNCBLOCK * sizeof (SyncBlock)];
49};
50
51// For in-place constructor
52BYTE g_SyncBlockCacheInstance[sizeof(SyncBlockCache)];
53
54SPTR_IMPL (SyncBlockCache, SyncBlockCache, s_pSyncBlockCache);
55
56#ifndef DACCESS_COMPILE
57
58
59
60void SyncBlock::OnADUnload()
61{
62 WRAPPER_NO_CONTRACT;
63#ifdef EnC_SUPPORTED
64 if (m_pEnCInfo)
65 {
66 m_pEnCInfo->Cleanup();
67 m_pEnCInfo = NULL;
68 }
69#endif
70}
71
72#ifndef FEATURE_PAL
73// static
74SLIST_HEADER InteropSyncBlockInfo::s_InteropInfoStandbyList;
75#endif // !FEATURE_PAL
76
77InteropSyncBlockInfo::~InteropSyncBlockInfo()
78{
79 CONTRACTL
80 {
81 NOTHROW;
82 DESTRUCTOR_CHECK;
83 GC_TRIGGERS;
84 MODE_ANY;
85 }
86 CONTRACTL_END;
87
88 FreeUMEntryThunkOrInterceptStub();
89}
90
91#ifndef FEATURE_PAL
92// Deletes all items in code:s_InteropInfoStandbyList.
93void InteropSyncBlockInfo::FlushStandbyList()
94{
95 CONTRACTL
96 {
97 NOTHROW;
98 GC_TRIGGERS;
99 MODE_ANY;
100 }
101 CONTRACTL_END;
102
103 PSLIST_ENTRY pEntry = InterlockedFlushSList(&InteropSyncBlockInfo::s_InteropInfoStandbyList);
104 while (pEntry)
105 {
106 PSLIST_ENTRY pNextEntry = pEntry->Next;
107
108 // make sure to use the global delete since the destructor has already run
109 ::delete (void *)pEntry;
110 pEntry = pNextEntry;
111 }
112}
113#endif // !FEATURE_PAL
114
115void InteropSyncBlockInfo::FreeUMEntryThunkOrInterceptStub()
116{
117 CONTRACTL
118 {
119 NOTHROW;
120 DESTRUCTOR_CHECK;
121 GC_TRIGGERS;
122 MODE_ANY;
123 }
124 CONTRACTL_END
125
126 if (!g_fEEShutDown)
127 {
128 void *pUMEntryThunk = GetUMEntryThunk();
129 if (pUMEntryThunk != NULL)
130 {
131 COMDelegate::RemoveEntryFromFPtrHash((UPTR)pUMEntryThunk);
132 UMEntryThunk::FreeUMEntryThunk((UMEntryThunk *)pUMEntryThunk);
133 }
134 else
135 {
136#if defined(_TARGET_X86_)
137 Stub *pInterceptStub = GetInterceptStub();
138 if (pInterceptStub != NULL)
139 {
140 // There may be multiple chained stubs
141 pInterceptStub->DecRef();
142 }
143#else // _TARGET_X86_
144 // Intercept stubs are currently not used on other platforms.
145 _ASSERTE(GetInterceptStub() == NULL);
146#endif // _TARGET_X86_
147 }
148 }
149 m_pUMEntryThunkOrInterceptStub = NULL;
150}
151
152#ifdef FEATURE_COMINTEROP
153// Returns either NULL or an RCW on which AcquireLock has been called.
154RCW* InteropSyncBlockInfo::GetRCWAndIncrementUseCount()
155{
156 LIMITED_METHOD_CONTRACT;
157
158 DWORD dwSwitchCount = 0;
159 while (true)
160 {
161 RCW *pRCW = VolatileLoad(&m_pRCW);
162 if ((size_t)pRCW <= 0x1)
163 {
164 // the RCW never existed or has been released
165 return NULL;
166 }
167
168 if (((size_t)pRCW & 0x1) == 0x0)
169 {
170 // it looks like we have a chance, try to acquire the lock
171 RCW *pLockedRCW = (RCW *)((size_t)pRCW | 0x1);
172 if (InterlockedCompareExchangeT(&m_pRCW, pLockedRCW, pRCW) == pRCW)
173 {
174 // we have the lock on the m_pRCW field, now we can safely "use" the RCW
175 pRCW->IncrementUseCount();
176
177 // release the m_pRCW lock
178 VolatileStore(&m_pRCW, pRCW);
179
180 // and return the RCW
181 return pRCW;
182 }
183 }
184
185 // somebody else holds the lock, retry
186 __SwitchToThread(0, ++dwSwitchCount);
187 }
188}
189
190// Sets the m_pRCW field in a thread-safe manner, pRCW can be NULL.
191void InteropSyncBlockInfo::SetRawRCW(RCW* pRCW)
192{
193 LIMITED_METHOD_CONTRACT;
194
195 if (pRCW != NULL)
196 {
197 // we never set two different RCWs on a single object
198 _ASSERTE(m_pRCW == NULL);
199 m_pRCW = pRCW;
200 }
201 else
202 {
203 DWORD dwSwitchCount = 0;
204 while (true)
205 {
206 RCW *pOldRCW = VolatileLoad(&m_pRCW);
207
208 if ((size_t)pOldRCW <= 0x1)
209 {
210 // the RCW never existed or has been released
211 VolatileStore(&m_pRCW, (RCW *)0x1);
212 return;
213 }
214
215 if (((size_t)pOldRCW & 0x1) == 0x0)
216 {
217 // it looks like we have a chance, set the RCW to 0x1
218 if (InterlockedCompareExchangeT(&m_pRCW, (RCW *)0x1, pOldRCW) == pOldRCW)
219 {
220 // we made it
221 return;
222 }
223 }
224
225 // somebody else holds the lock, retry
226 __SwitchToThread(0, ++dwSwitchCount);
227 }
228 }
229}
230#endif // FEATURE_COMINTEROP
231
232void UMEntryThunk::OnADUnload()
233{
234 LIMITED_METHOD_CONTRACT;
235 m_pObjectHandle = NULL;
236}
237
238#endif // !DACCESS_COMPILE
239
240PTR_SyncTableEntry SyncTableEntry::GetSyncTableEntry()
241{
242 LIMITED_METHOD_CONTRACT;
243 SUPPORTS_DAC;
244
245 return (PTR_SyncTableEntry)g_pSyncTable;
246}
247
248#ifndef DACCESS_COMPILE
249
250SyncTableEntry*& SyncTableEntry::GetSyncTableEntryByRef()
251{
252 LIMITED_METHOD_CONTRACT;
253 return g_pSyncTable;
254}
255
256/* static */
257SyncBlockCache*& SyncBlockCache::GetSyncBlockCache()
258{
259 LIMITED_METHOD_CONTRACT;
260
261 return s_pSyncBlockCache;
262}
263
264
265//----------------------------------------------------------------------------
266//
267// ThreadQueue Implementation
268//
269//----------------------------------------------------------------------------
270#endif //!DACCESS_COMPILE
271
272// Given a link in the chain, get the Thread that it represents
273/* static */
274inline PTR_WaitEventLink ThreadQueue::WaitEventLinkForLink(PTR_SLink pLink)
275{
276 LIMITED_METHOD_CONTRACT;
277 SUPPORTS_DAC;
278 return (PTR_WaitEventLink) (((PTR_BYTE) pLink) - offsetof(WaitEventLink, m_LinkSB));
279}
280
281#ifndef DACCESS_COMPILE
282
283// Unlink the head of the Q. We are always in the SyncBlock's critical
284// section.
285/* static */
286inline WaitEventLink *ThreadQueue::DequeueThread(SyncBlock *psb)
287{
288 CONTRACTL
289 {
290 NOTHROW;
291 GC_NOTRIGGER;
292 MODE_ANY;
293 CAN_TAKE_LOCK;
294 }
295 CONTRACTL_END;
296
297 // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
298 // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
299 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
300
301 WaitEventLink *ret = NULL;
302 SLink *pLink = psb->m_Link.m_pNext;
303
304 if (pLink)
305 {
306 psb->m_Link.m_pNext = pLink->m_pNext;
307#ifdef _DEBUG
308 pLink->m_pNext = (SLink *)POISONC;
309#endif
310 ret = WaitEventLinkForLink(pLink);
311 _ASSERTE(ret->m_WaitSB == psb);
312 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cQueueLength--);
313 }
314 return ret;
315}
316
317// Enqueue is the slow one. We have to find the end of the Q since we don't
318// want to burn storage for this in the SyncBlock.
319/* static */
320inline void ThreadQueue::EnqueueThread(WaitEventLink *pWaitEventLink, SyncBlock *psb)
321{
322 CONTRACTL
323 {
324 NOTHROW;
325 GC_NOTRIGGER;
326 MODE_ANY;
327 CAN_TAKE_LOCK;
328 }
329 CONTRACTL_END;
330
331 _ASSERTE (pWaitEventLink->m_LinkSB.m_pNext == NULL);
332
333 // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
334 // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
335 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
336
337 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cQueueLength++);
338
339 SLink *pPrior = &psb->m_Link;
340
341 while (pPrior->m_pNext)
342 {
343 // We shouldn't already be in the waiting list!
344 _ASSERTE(pPrior->m_pNext != &pWaitEventLink->m_LinkSB);
345
346 pPrior = pPrior->m_pNext;
347 }
348 pPrior->m_pNext = &pWaitEventLink->m_LinkSB;
349}
350
351
352// Wade through the SyncBlock's list of waiting threads and remove the
353// specified thread.
354/* static */
355BOOL ThreadQueue::RemoveThread (Thread *pThread, SyncBlock *psb)
356{
357 CONTRACTL
358 {
359 NOTHROW;
360 GC_NOTRIGGER;
361 MODE_ANY;
362 }
363 CONTRACTL_END;
364
365 BOOL res = FALSE;
366
367 // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
368 // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
369 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
370
371 SLink *pPrior = &psb->m_Link;
372 SLink *pLink;
373 WaitEventLink *pWaitEventLink;
374
375 while ((pLink = pPrior->m_pNext) != NULL)
376 {
377 pWaitEventLink = WaitEventLinkForLink(pLink);
378 if (pWaitEventLink->m_Thread == pThread)
379 {
380 pPrior->m_pNext = pLink->m_pNext;
381#ifdef _DEBUG
382 pLink->m_pNext = (SLink *)POISONC;
383#endif
384 _ASSERTE(pWaitEventLink->m_WaitSB == psb);
385 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cQueueLength--);
386 res = TRUE;
387 break;
388 }
389 pPrior = pLink;
390 }
391 return res;
392}
393
394#endif //!DACCESS_COMPILE
395
396#ifdef DACCESS_COMPILE
397// Enumerates the threads in the queue from front to back by calling
398// pCallbackFunction on each one
399/* static */
400void ThreadQueue::EnumerateThreads(SyncBlock *psb, FP_TQ_THREAD_ENUMERATION_CALLBACK pCallbackFunction, void* pUserData)
401{
402 CONTRACTL
403 {
404 NOTHROW;
405 GC_NOTRIGGER;
406 MODE_ANY;
407 }
408 CONTRACTL_END;
409 SUPPORTS_DAC;
410
411 PTR_SLink pLink = psb->m_Link.m_pNext;
412 PTR_WaitEventLink pWaitEventLink;
413
414 while (pLink != NULL)
415 {
416 pWaitEventLink = WaitEventLinkForLink(pLink);
417
418 pCallbackFunction(pWaitEventLink->m_Thread, pUserData);
419 pLink = pLink->m_pNext;
420 }
421}
422#endif //DACCESS_COMPILE
423
424#ifndef DACCESS_COMPILE
425
426// ***************************************************************************
427//
428// Ephemeral Bitmap Helper
429//
430// ***************************************************************************
431
432#define card_size 32
433
434#define card_word_width 32
435
436size_t CardIndex (size_t card)
437{
438 LIMITED_METHOD_CONTRACT;
439 return card_size * card;
440}
441
442size_t CardOf (size_t idx)
443{
444 LIMITED_METHOD_CONTRACT;
445 return idx / card_size;
446}
447
448size_t CardWord (size_t card)
449{
450 LIMITED_METHOD_CONTRACT;
451 return card / card_word_width;
452}
453inline
454unsigned CardBit (size_t card)
455{
456 LIMITED_METHOD_CONTRACT;
457 return (unsigned)(card % card_word_width);
458}
459
460inline
461void SyncBlockCache::SetCard (size_t card)
462{
463 WRAPPER_NO_CONTRACT;
464 m_EphemeralBitmap [CardWord (card)] =
465 (m_EphemeralBitmap [CardWord (card)] | (1 << CardBit (card)));
466}
467
468inline
469void SyncBlockCache::ClearCard (size_t card)
470{
471 WRAPPER_NO_CONTRACT;
472 m_EphemeralBitmap [CardWord (card)] =
473 (m_EphemeralBitmap [CardWord (card)] & ~(1 << CardBit (card)));
474}
475
476inline
477BOOL SyncBlockCache::CardSetP (size_t card)
478{
479 WRAPPER_NO_CONTRACT;
480 return ( m_EphemeralBitmap [ CardWord (card) ] & (1 << CardBit (card)));
481}
482
483inline
484void SyncBlockCache::CardTableSetBit (size_t idx)
485{
486 WRAPPER_NO_CONTRACT;
487 SetCard (CardOf (idx));
488}
489
490
491size_t BitMapSize (size_t cacheSize)
492{
493 LIMITED_METHOD_CONTRACT;
494
495 return (cacheSize + card_size * card_word_width - 1)/ (card_size * card_word_width);
496}
497
498// ***************************************************************************
499//
500// SyncBlockCache class implementation
501//
502// ***************************************************************************
503
504SyncBlockCache::SyncBlockCache()
505 : m_pCleanupBlockList(NULL),
506 m_FreeBlockList(NULL),
507
508 // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst.
509 // If you remove this flag, we will switch to preemptive mode when entering
510 // g_criticalSection, which means all functions that enter it will become
511 // GC_TRIGGERS. (This includes all uses of LockHolder around SyncBlockCache::GetSyncBlockCache().
512 // So be sure to update the contracts if you remove this flag.
513 m_CacheLock(CrstSyncBlockCache, (CrstFlags) (CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)),
514
515 m_FreeCount(0),
516 m_ActiveCount(0),
517 m_SyncBlocks(0),
518 m_FreeSyncBlock(0),
519 m_FreeSyncTableIndex(1),
520 m_FreeSyncTableList(0),
521 m_SyncTableSize(SYNC_TABLE_INITIAL_SIZE),
522 m_OldSyncTables(0),
523 m_bSyncBlockCleanupInProgress(FALSE),
524 m_EphemeralBitmap(0)
525{
526 CONTRACTL
527 {
528 CONSTRUCTOR_CHECK;
529 THROWS;
530 GC_NOTRIGGER;
531 MODE_ANY;
532 INJECT_FAULT(COMPlusThrowOM());
533 }
534 CONTRACTL_END;
535}
536
537
538// This method is NO longer called.
539SyncBlockCache::~SyncBlockCache()
540{
541 CONTRACTL
542 {
543 DESTRUCTOR_CHECK;
544 NOTHROW;
545 GC_NOTRIGGER;
546 MODE_ANY;
547 }
548 CONTRACTL_END;
549
550 // Clear the list the fast way.
551 m_FreeBlockList = NULL;
552 //<TODO>@todo we can clear this fast too I guess</TODO>
553 m_pCleanupBlockList = NULL;
554
555 // destruct all arrays
556 while (m_SyncBlocks)
557 {
558 SyncBlockArray *next = m_SyncBlocks->m_Next;
559 delete m_SyncBlocks;
560 m_SyncBlocks = next;
561 }
562
563 // Also, now is a good time to clean up all the old tables which we discarded
564 // when we overflowed them.
565 SyncTableEntry* arr;
566 while ((arr = m_OldSyncTables) != 0)
567 {
568 m_OldSyncTables = (SyncTableEntry*)arr[0].m_Object.Load();
569 delete arr;
570 }
571}
572
573
574// When the GC determines that an object is dead the low bit of the
575// m_Object field of SyncTableEntry is set, however it is not
576// cleaned up because we cant do the COM interop cleanup at GC time.
577// It is put on a cleanup list and at a later time (typically during
578// finalization, this list is cleaned up.
579//
580void SyncBlockCache::CleanupSyncBlocks()
581{
582 STATIC_CONTRACT_THROWS;
583 STATIC_CONTRACT_MODE_COOPERATIVE;
584
585 _ASSERTE(GetThread() == FinalizerThread::GetFinalizerThread());
586
587 // Set the flag indicating sync block cleanup is in progress.
588 // IMPORTANT: This must be set before the sync block cleanup bit is reset on the thread.
589 m_bSyncBlockCleanupInProgress = TRUE;
590
591 struct Param
592 {
593 SyncBlockCache *pThis;
594 SyncBlock* psb;
595#ifdef FEATURE_COMINTEROP
596 RCW* pRCW;
597#endif
598 } param;
599 param.pThis = this;
600 param.psb = NULL;
601#ifdef FEATURE_COMINTEROP
602 param.pRCW = NULL;
603#endif
604
605 EE_TRY_FOR_FINALLY(Param *, pParam, &param)
606 {
607 // reset the flag
608 FinalizerThread::GetFinalizerThread()->ResetSyncBlockCleanup();
609
610 // walk the cleanup list and cleanup 'em up
611 while ((pParam->psb = pParam->pThis->GetNextCleanupSyncBlock()) != NULL)
612 {
613#ifdef FEATURE_COMINTEROP
614 InteropSyncBlockInfo* pInteropInfo = pParam->psb->GetInteropInfoNoCreate();
615 if (pInteropInfo)
616 {
617 pParam->pRCW = pInteropInfo->GetRawRCW();
618 if (pParam->pRCW)
619 {
620 // We should have initialized the cleanup list with the
621 // first RCW cache we created
622 _ASSERTE(g_pRCWCleanupList != NULL);
623
624 g_pRCWCleanupList->AddWrapper(pParam->pRCW);
625
626 pParam->pRCW = NULL;
627 pInteropInfo->SetRawRCW(NULL);
628 }
629 }
630#endif // FEATURE_COMINTEROP
631
632 // Delete the sync block.
633 pParam->pThis->DeleteSyncBlock(pParam->psb);
634 pParam->psb = NULL;
635
636 // pulse GC mode to allow GC to perform its work
637 if (FinalizerThread::GetFinalizerThread()->CatchAtSafePointOpportunistic())
638 {
639 FinalizerThread::GetFinalizerThread()->PulseGCMode();
640 }
641 }
642
643#ifdef FEATURE_COMINTEROP
644 // Now clean up the rcw's sorted by context
645 if (g_pRCWCleanupList != NULL)
646 g_pRCWCleanupList->CleanupAllWrappers();
647#endif // FEATURE_COMINTEROP
648 }
649 EE_FINALLY
650 {
651 // We are finished cleaning up the sync blocks.
652 m_bSyncBlockCleanupInProgress = FALSE;
653
654#ifdef FEATURE_COMINTEROP
655 if (param.pRCW)
656 param.pRCW->Cleanup();
657#endif
658
659 if (param.psb)
660 DeleteSyncBlock(param.psb);
661 } EE_END_FINALLY;
662}
663
664// When a appdomain is unloading, we need to insure that any pointers to
665// it from sync blocks (eg from COM Callable Wrappers) are properly
666// updated so that they fail gracefully if another call is made from
667// them. This is what this routine does.
668//
669VOID SyncBlockCache::CleanupSyncBlocksInAppDomain(AppDomain *pDomain)
670{
671 CONTRACTL
672 {
673 GC_TRIGGERS;
674 THROWS;
675 MODE_COOPERATIVE;
676 }
677 CONTRACTL_END;
678
679#ifndef DACCESS_COMPILE
680 _ASSERTE(IsFinalizerThread());
681
682 ADIndex index = pDomain->GetIndex();
683
684 ADID id = pDomain->GetId();
685
686 // Make sure we dont race with anybody updating the table
687 DWORD maxIndex;
688
689 {
690 // Taking this lock here avoids races whre m_FreeSyncTableIndex is being updated.
691 // (a volatile read would have been enough however).
692 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
693 maxIndex = m_FreeSyncTableIndex;
694 }
695 BOOL bModifiedCleanupList=FALSE;
696 STRESS_LOG1(LF_APPDOMAIN, LL_INFO100, "To cleanup - %d sync blocks", maxIndex);
697 DWORD nb;
698 for (nb = 1; nb < maxIndex; nb++)
699 {
700 // This is a check for syncblocks that were already cleaned up.
701 if ((size_t)SyncTableEntry::GetSyncTableEntry()[nb].m_Object.Load() & 1)
702 {
703 continue;
704 }
705
706 // If the syncblock pointer is invalid, nothing more we can do.
707 SyncBlock *pSyncBlock = SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock;
708 if (!pSyncBlock)
709 {
710 continue;
711 }
712
713 // If we happen to have a CCW living in the AppDomain being cleaned, then we need to neuter it.
714 // We do this check early because we have to neuter CCWs for agile objects as well.
715 // Neutering the object simply means we disconnect the object from the CCW so it can no longer
716 // be used. When its ref-count falls to zero, it gets cleaned up.
717 STRESS_LOG1(LF_APPDOMAIN, LL_INFO1000000, "SyncBlock %p.", pSyncBlock);
718 InteropSyncBlockInfo* pInteropInfo = pSyncBlock->GetInteropInfoNoCreate();
719 if (pInteropInfo)
720 {
721#ifdef FEATURE_COMINTEROP
722 ComCallWrapper* pWrap = pInteropInfo->GetCCW();
723 if (pWrap)
724 {
725 SimpleComCallWrapper* pSimpleWrapper = pWrap->GetSimpleWrapper();
726 _ASSERTE(pSimpleWrapper);
727
728 if (pSimpleWrapper->GetDomainID() == id)
729 {
730 pSimpleWrapper->Neuter();
731 }
732 }
733#endif // FEATURE_COMINTEROP
734
735 UMEntryThunk* umThunk=(UMEntryThunk*)pInteropInfo->GetUMEntryThunk();
736
737 if (umThunk && umThunk->GetDomainId()==id)
738 {
739 umThunk->OnADUnload();
740 STRESS_LOG1(LF_APPDOMAIN, LL_INFO100, "Thunk %x unloaded", umThunk);
741 }
742
743#ifdef FEATURE_COMINTEROP
744 {
745 // we need to take RCWCache lock to avoid the race with another thread which is
746 // removing the RCW from cache, decoupling it from the object, and deleting the RCW.
747 RCWCache* pCache = pDomain->GetRCWCache();
748 _ASSERTE(pCache);
749 RCWCache::LockHolder lh(pCache);
750 RCW* pRCW = pInteropInfo->GetRawRCW();
751 if (pRCW && pRCW->GetDomain()==pDomain)
752 {
753 // We should have initialized the cleanup list with the
754 // first RCW cache we created
755 _ASSERTE(g_pRCWCleanupList != NULL);
756
757 g_pRCWCleanupList->AddWrapper(pRCW);
758
759 pCache->RemoveWrapper(pRCW);
760 pInteropInfo->SetRawRCW(NULL);
761 bModifiedCleanupList=TRUE;
762 }
763 }
764#endif // FEATURE_COMINTEROP
765 }
766
767 // NOTE: this will only notify the sync block if it is non-agile and living in the unloading domain.
768 // Agile objects that are still alive will not get notification!
769 if (pSyncBlock->GetAppDomainIndex() == index)
770 {
771 pSyncBlock->OnADUnload();
772 }
773 }
774 STRESS_LOG1(LF_APPDOMAIN, LL_INFO100, "AD cleanup - %d sync blocks done", nb);
775 // Make sure nobody decreased m_FreeSyncTableIndex behind our back (we would read
776 // off table limits)
777 _ASSERTE(maxIndex <= m_FreeSyncTableIndex);
778
779 if (bModifiedCleanupList)
780 GetThread()->SetSyncBlockCleanup();
781
782 while (GetThread()->RequireSyncBlockCleanup()) //we also might have something in the cleanup list
783 CleanupSyncBlocks();
784
785#ifdef _DEBUG
786 {
787 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
788 DWORD maxIndex = m_FreeSyncTableIndex;
789 for (DWORD nb = 1; nb < maxIndex; nb++)
790 {
791 if ((size_t)SyncTableEntry::GetSyncTableEntry()[nb].m_Object.Load() & 1)
792 {
793 continue;
794 }
795
796 // If the syncblock pointer is invalid, nothing more we can do.
797 SyncBlock *pSyncBlock = SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock;
798 if (!pSyncBlock)
799 {
800 continue;
801 }
802 InteropSyncBlockInfo* pInteropInfo = pSyncBlock->GetInteropInfoNoCreate();
803 if (pInteropInfo)
804 {
805 UMEntryThunk* umThunk=(UMEntryThunk*)pInteropInfo->GetUMEntryThunk();
806
807 if (umThunk && umThunk->GetDomainId()==id)
808 {
809 _ASSERTE(!umThunk->GetObjectHandle());
810 }
811 }
812
813 }
814 }
815#endif
816
817#endif
818}
819
820
821// create the sync block cache
822/* static */
823void SyncBlockCache::Attach()
824{
825 LIMITED_METHOD_CONTRACT;
826}
827
828// destroy the sync block cache
829// This method is NO longer called.
830#if 0
831void SyncBlockCache::DoDetach()
832{
833 CONTRACTL
834 {
835 INSTANCE_CHECK;
836 NOTHROW;
837 GC_NOTRIGGER;
838 MODE_ANY;
839 }
840 CONTRACTL_END;
841
842 Object *pObj;
843 ObjHeader *pHeader;
844
845
846 // Ensure that all the critical sections are released. This is particularly
847 // important in DEBUG, because all critical sections are threaded onto a global
848 // list which would otherwise be corrupted.
849 for (DWORD i=0; i<m_FreeSyncTableIndex; i++)
850 if (((size_t)SyncTableEntry::GetSyncTableEntry()[i].m_Object & 1) == 0)
851 if (SyncTableEntry::GetSyncTableEntry()[i].m_SyncBlock)
852 {
853 // <TODO>@TODO -- If threads are executing during this detach, they will
854 // fail in various ways:
855 //
856 // 1) They will race between us tearing these data structures down
857 // as they navigate through them.
858 //
859 // 2) They will unexpectedly see the syncblock destroyed, even though
860 // they hold the synchronization lock, or have been exposed out
861 // to COM, etc.
862 //
863 // 3) The instance's hash code may change during the shutdown.
864 //
865 // The correct solution involves suspending the threads earlier, but
866 // changing our suspension code so that it allows pumping if we are
867 // in a shutdown case.
868 //
869 // </TODO>
870
871 // Make sure this gets updated because the finalizer thread & others
872 // will continue to run for a short while more during our shutdown.
873 pObj = SyncTableEntry::GetSyncTableEntry()[i].m_Object;
874 pHeader = pObj->GetHeader();
875
876 {
877 ENTER_SPIN_LOCK(pHeader);
878 ADIndex appDomainIndex = pHeader->GetAppDomainIndex();
879 if (! appDomainIndex.m_dwIndex)
880 {
881 SyncBlock* syncBlock = pObj->PassiveGetSyncBlock();
882 if (syncBlock)
883 appDomainIndex = syncBlock->GetAppDomainIndex();
884 }
885
886 pHeader->ResetIndex();
887
888 if (appDomainIndex.m_dwIndex)
889 {
890 pHeader->SetIndex(appDomainIndex.m_dwIndex<<SBLK_APPDOMAIN_SHIFT);
891 }
892 LEAVE_SPIN_LOCK(pHeader);
893 }
894
895 SyncTableEntry::GetSyncTableEntry()[i].m_Object = (Object *)(m_FreeSyncTableList | 1);
896 m_FreeSyncTableList = i << 1;
897
898 DeleteSyncBlock(SyncTableEntry::GetSyncTableEntry()[i].m_SyncBlock);
899 }
900}
901#endif
902
903// destroy the sync block cache
904/* static */
905// This method is NO longer called.
906#if 0
907void SyncBlockCache::Detach()
908{
909 SyncBlockCache::GetSyncBlockCache()->DoDetach();
910}
911#endif
912
913
914// create the sync block cache
915/* static */
916void SyncBlockCache::Start()
917{
918 CONTRACTL
919 {
920 THROWS;
921 GC_NOTRIGGER;
922 MODE_ANY;
923 INJECT_FAULT(COMPlusThrowOM(););
924 }
925 CONTRACTL_END;
926
927 DWORD* bm = new DWORD [BitMapSize(SYNC_TABLE_INITIAL_SIZE+1)];
928
929 memset (bm, 0, BitMapSize (SYNC_TABLE_INITIAL_SIZE+1)*sizeof(DWORD));
930
931 SyncTableEntry::GetSyncTableEntryByRef() = new SyncTableEntry[SYNC_TABLE_INITIAL_SIZE+1];
932#ifdef _DEBUG
933 for (int i=0; i<SYNC_TABLE_INITIAL_SIZE+1; i++) {
934 SyncTableEntry::GetSyncTableEntry()[i].m_SyncBlock = NULL;
935 }
936#endif
937
938 SyncTableEntry::GetSyncTableEntry()[0].m_SyncBlock = 0;
939 SyncBlockCache::GetSyncBlockCache() = new (&g_SyncBlockCacheInstance) SyncBlockCache;
940
941 SyncBlockCache::GetSyncBlockCache()->m_EphemeralBitmap = bm;
942
943#ifndef FEATURE_PAL
944 InitializeSListHead(&InteropSyncBlockInfo::s_InteropInfoStandbyList);
945#endif // !FEATURE_PAL
946}
947
948
949// destroy the sync block cache
950/* static */
951void SyncBlockCache::Stop()
952{
953 CONTRACTL
954 {
955 NOTHROW;
956 GC_NOTRIGGER;
957 MODE_ANY;
958 }
959 CONTRACTL_END;
960
961 // cache must be destroyed first, since it can traverse the table to find all the
962 // sync blocks which are live and thus must have their critical sections destroyed.
963 if (SyncBlockCache::GetSyncBlockCache())
964 {
965 delete SyncBlockCache::GetSyncBlockCache();
966 SyncBlockCache::GetSyncBlockCache() = 0;
967 }
968
969 if (SyncTableEntry::GetSyncTableEntry())
970 {
971 delete SyncTableEntry::GetSyncTableEntry();
972 SyncTableEntry::GetSyncTableEntryByRef() = 0;
973 }
974}
975
976
977void SyncBlockCache::InsertCleanupSyncBlock(SyncBlock* psb)
978{
979 CONTRACTL
980 {
981 INSTANCE_CHECK;
982 NOTHROW;
983 GC_NOTRIGGER;
984 MODE_ANY;
985 }
986 CONTRACTL_END;
987
988 // free up the threads that are waiting before we use the link
989 // for other purposes
990 if (psb->m_Link.m_pNext != NULL)
991 {
992 while (ThreadQueue::DequeueThread(psb) != NULL)
993 continue;
994 }
995
996#ifdef FEATURE_COMINTEROP
997 if (psb->m_pInteropInfo)
998 {
999 // called during GC
1000 // so do only minorcleanup
1001 MinorCleanupSyncBlockComData(psb->m_pInteropInfo);
1002 }
1003#endif // FEATURE_COMINTEROP
1004
1005 // This method will be called only by the GC thread
1006 //<TODO>@todo add an assert for the above statement</TODO>
1007 // we don't need to lock here
1008 //EnterCacheLock();
1009
1010 psb->m_Link.m_pNext = m_pCleanupBlockList;
1011 m_pCleanupBlockList = &psb->m_Link;
1012
1013 // we don't need a lock here
1014 //LeaveCacheLock();
1015}
1016
1017SyncBlock* SyncBlockCache::GetNextCleanupSyncBlock()
1018{
1019 LIMITED_METHOD_CONTRACT;
1020
1021 // we don't need a lock here,
1022 // as this is called only on the finalizer thread currently
1023
1024 SyncBlock *psb = NULL;
1025 if (m_pCleanupBlockList)
1026 {
1027 // get the actual sync block pointer
1028 psb = (SyncBlock *) (((BYTE *) m_pCleanupBlockList) - offsetof(SyncBlock, m_Link));
1029 m_pCleanupBlockList = m_pCleanupBlockList->m_pNext;
1030 }
1031 return psb;
1032}
1033
1034
1035// returns and removes the next free syncblock from the list
1036// the cache lock must be entered to call this
1037SyncBlock *SyncBlockCache::GetNextFreeSyncBlock()
1038{
1039 CONTRACTL
1040 {
1041 INJECT_FAULT(COMPlusThrowOM());
1042 THROWS;
1043 GC_NOTRIGGER;
1044 MODE_ANY;
1045 }
1046 CONTRACTL_END;
1047
1048#ifdef _DEBUG // Instrumentation for OOM fault injection testing
1049 delete new char;
1050#endif
1051
1052 SyncBlock *psb;
1053 SLink *plst = m_FreeBlockList;
1054
1055 m_ActiveCount++;
1056
1057 if (plst)
1058 {
1059 m_FreeBlockList = m_FreeBlockList->m_pNext;
1060
1061 // shouldn't be 0
1062 m_FreeCount--;
1063
1064 // get the actual sync block pointer
1065 psb = (SyncBlock *) (((BYTE *) plst) - offsetof(SyncBlock, m_Link));
1066
1067 return psb;
1068 }
1069 else
1070 {
1071 if ((m_SyncBlocks == NULL) || (m_FreeSyncBlock >= MAXSYNCBLOCK))
1072 {
1073#ifdef DUMP_SB
1074// LogSpewAlways("Allocating new syncblock array\n");
1075// DumpSyncBlockCache();
1076#endif
1077 SyncBlockArray* newsyncblocks = new(SyncBlockArray);
1078 if (!newsyncblocks)
1079 COMPlusThrowOM ();
1080
1081 newsyncblocks->m_Next = m_SyncBlocks;
1082 m_SyncBlocks = newsyncblocks;
1083 m_FreeSyncBlock = 0;
1084 }
1085 return &(((SyncBlock*)m_SyncBlocks->m_Blocks)[m_FreeSyncBlock++]);
1086 }
1087
1088}
1089
1090void SyncBlockCache::Grow()
1091{
1092 CONTRACTL
1093 {
1094 INSTANCE_CHECK;
1095 THROWS;
1096 GC_NOTRIGGER;
1097 MODE_COOPERATIVE;
1098 INJECT_FAULT(COMPlusThrowOM(););
1099 }
1100 CONTRACTL_END;
1101
1102 STRESS_LOG0(LF_SYNC, LL_INFO10000, "SyncBlockCache::NewSyncBlockSlot growing SyncBlockCache \n");
1103
1104 NewArrayHolder<SyncTableEntry> newSyncTable (NULL);
1105 NewArrayHolder<DWORD> newBitMap (NULL);
1106 DWORD * oldBitMap;
1107
1108 // Compute the size of the new synctable. Normally, we double it - unless
1109 // doing so would create slots with indices too high to fit within the
1110 // mask. If so, we create a synctable up to the mask limit. If we're
1111 // already at the mask limit, then caller is out of luck.
1112 DWORD newSyncTableSize;
1113 if (m_SyncTableSize <= (MASK_SYNCBLOCKINDEX >> 1))
1114 {
1115 newSyncTableSize = m_SyncTableSize * 2;
1116 }
1117 else
1118 {
1119 newSyncTableSize = MASK_SYNCBLOCKINDEX;
1120 }
1121
1122 if (!(newSyncTableSize > m_SyncTableSize)) // Make sure we actually found room to grow!
1123 {
1124 COMPlusThrowOM();
1125 }
1126
1127 newSyncTable = new SyncTableEntry[newSyncTableSize];
1128 newBitMap = new DWORD[BitMapSize (newSyncTableSize)];
1129
1130
1131 {
1132 //! From here on, we assume that we will succeed and start doing global side-effects.
1133 //! Any operation that could fail must occur before this point.
1134 CANNOTTHROWCOMPLUSEXCEPTION();
1135 FAULT_FORBID();
1136
1137 newSyncTable.SuppressRelease();
1138 newBitMap.SuppressRelease();
1139
1140
1141 // We chain old table because we can't delete
1142 // them before all the threads are stoppped
1143 // (next GC)
1144 SyncTableEntry::GetSyncTableEntry() [0].m_Object = (Object *)m_OldSyncTables;
1145 m_OldSyncTables = SyncTableEntry::GetSyncTableEntry();
1146
1147 memset (newSyncTable, 0, newSyncTableSize*sizeof (SyncTableEntry));
1148 memset (newBitMap, 0, BitMapSize (newSyncTableSize)*sizeof (DWORD));
1149 CopyMemory (newSyncTable, SyncTableEntry::GetSyncTableEntry(),
1150 m_SyncTableSize*sizeof (SyncTableEntry));
1151
1152 CopyMemory (newBitMap, m_EphemeralBitmap,
1153 BitMapSize (m_SyncTableSize)*sizeof (DWORD));
1154
1155 oldBitMap = m_EphemeralBitmap;
1156 m_EphemeralBitmap = newBitMap;
1157 delete[] oldBitMap;
1158
1159 _ASSERTE((m_SyncTableSize & MASK_SYNCBLOCKINDEX) == m_SyncTableSize);
1160 // note: we do not care if another thread does not see the new size
1161 // however we really do not want it to see the new size without seeing the new array
1162 //@TODO do we still leak here if two threads come here at the same time ?
1163 FastInterlockExchangePointer(&SyncTableEntry::GetSyncTableEntryByRef(), newSyncTable.GetValue());
1164
1165 m_FreeSyncTableIndex++;
1166
1167 m_SyncTableSize = newSyncTableSize;
1168
1169#ifdef _DEBUG
1170 static int dumpSBOnResize = -1;
1171
1172 if (dumpSBOnResize == -1)
1173 dumpSBOnResize = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpOnResize);
1174
1175 if (dumpSBOnResize)
1176 {
1177 LogSpewAlways("SyncBlockCache resized\n");
1178 DumpSyncBlockCache();
1179 }
1180#endif
1181 }
1182}
1183
1184DWORD SyncBlockCache::NewSyncBlockSlot(Object *obj)
1185{
1186 CONTRACTL
1187 {
1188 INSTANCE_CHECK;
1189 THROWS;
1190 GC_NOTRIGGER;
1191 MODE_COOPERATIVE;
1192 INJECT_FAULT(COMPlusThrowOM(););
1193 }
1194 CONTRACTL_END;
1195 _ASSERTE(m_CacheLock.OwnedByCurrentThread()); // GetSyncBlock takes the lock, make sure no one else does.
1196
1197 DWORD indexNewEntry;
1198 if (m_FreeSyncTableList)
1199 {
1200 indexNewEntry = (DWORD)(m_FreeSyncTableList >> 1);
1201 _ASSERTE ((size_t)SyncTableEntry::GetSyncTableEntry()[indexNewEntry].m_Object.Load() & 1);
1202 m_FreeSyncTableList = (size_t)SyncTableEntry::GetSyncTableEntry()[indexNewEntry].m_Object.Load() & ~1;
1203 }
1204 else if ((indexNewEntry = (DWORD)(m_FreeSyncTableIndex)) >= m_SyncTableSize)
1205 {
1206 // This is kept out of line to keep stuff like the C++ EH prolog (needed for holders) off
1207 // of the common path.
1208 Grow();
1209 }
1210 else
1211 {
1212#ifdef _DEBUG
1213 static int dumpSBOnNewIndex = -1;
1214
1215 if (dumpSBOnNewIndex == -1)
1216 dumpSBOnNewIndex = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpOnNewIndex);
1217
1218 if (dumpSBOnNewIndex)
1219 {
1220 LogSpewAlways("SyncBlockCache index incremented\n");
1221 DumpSyncBlockCache();
1222 }
1223#endif
1224 m_FreeSyncTableIndex ++;
1225 }
1226
1227
1228 CardTableSetBit (indexNewEntry);
1229
1230 // In debug builds the m_SyncBlock at indexNewEntry should already be null, since we should
1231 // start out with a null table and always null it out on delete.
1232 _ASSERTE(SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_SyncBlock == NULL);
1233 SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_SyncBlock = NULL;
1234 SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_Object = obj;
1235
1236 _ASSERTE(indexNewEntry != 0);
1237
1238 return indexNewEntry;
1239}
1240
1241
1242// free a used sync block, only called from CleanupSyncBlocks.
1243void SyncBlockCache::DeleteSyncBlock(SyncBlock *psb)
1244{
1245 CONTRACTL
1246 {
1247 INSTANCE_CHECK;
1248 THROWS;
1249 GC_TRIGGERS;
1250 MODE_ANY;
1251 INJECT_FAULT(COMPlusThrowOM());
1252 }
1253 CONTRACTL_END;
1254
1255 // clean up comdata
1256 if (psb->m_pInteropInfo)
1257 {
1258#ifdef FEATURE_COMINTEROP
1259 CleanupSyncBlockComData(psb->m_pInteropInfo);
1260#endif // FEATURE_COMINTEROP
1261
1262#ifndef FEATURE_PAL
1263 if (g_fEEShutDown)
1264 {
1265 delete psb->m_pInteropInfo;
1266 }
1267 else
1268 {
1269 psb->m_pInteropInfo->~InteropSyncBlockInfo();
1270 InterlockedPushEntrySList(&InteropSyncBlockInfo::s_InteropInfoStandbyList, (PSLIST_ENTRY)psb->m_pInteropInfo);
1271 }
1272#else // !FEATURE_PAL
1273 delete psb->m_pInteropInfo;
1274#endif // !FEATURE_PAL
1275 }
1276
1277#ifdef EnC_SUPPORTED
1278 // clean up EnC info
1279 if (psb->m_pEnCInfo)
1280 psb->m_pEnCInfo->Cleanup();
1281#endif // EnC_SUPPORTED
1282
1283 // Destruct the SyncBlock, but don't reclaim its memory. (Overridden
1284 // operator delete).
1285 delete psb;
1286
1287 //synchronizer with the consumers,
1288 // <TODO>@todo we don't really need a lock here, we can come up
1289 // with some simple algo to avoid taking a lock </TODO>
1290 {
1291 SyncBlockCache::LockHolder lh(this);
1292
1293 DeleteSyncBlockMemory(psb);
1294 }
1295}
1296
1297
1298// returns the sync block memory to the free pool but does not destruct sync block (must own cache lock already)
1299void SyncBlockCache::DeleteSyncBlockMemory(SyncBlock *psb)
1300{
1301 CONTRACTL
1302 {
1303 INSTANCE_CHECK;
1304 NOTHROW;
1305 GC_NOTRIGGER;
1306 FORBID_FAULT;
1307 }
1308 CONTRACTL_END
1309
1310 m_ActiveCount--;
1311 m_FreeCount++;
1312
1313 psb->m_Link.m_pNext = m_FreeBlockList;
1314 m_FreeBlockList = &psb->m_Link;
1315
1316}
1317
1318// free a used sync block
1319void SyncBlockCache::GCDeleteSyncBlock(SyncBlock *psb)
1320{
1321 CONTRACTL
1322 {
1323 INSTANCE_CHECK;
1324 NOTHROW;
1325 GC_NOTRIGGER;
1326 MODE_ANY;
1327 }
1328 CONTRACTL_END;
1329
1330 // Destruct the SyncBlock, but don't reclaim its memory. (Overridden
1331 // operator delete).
1332 delete psb;
1333
1334 m_ActiveCount--;
1335 m_FreeCount++;
1336
1337 psb->m_Link.m_pNext = m_FreeBlockList;
1338 m_FreeBlockList = &psb->m_Link;
1339}
1340
1341void SyncBlockCache::GCWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
1342{
1343 CONTRACTL
1344 {
1345 INSTANCE_CHECK;
1346 NOTHROW;
1347 GC_NOTRIGGER;
1348 MODE_ANY;
1349 }
1350 CONTRACTL_END;
1351
1352
1353 // First delete the obsolete arrays since we have exclusive access
1354 BOOL fSetSyncBlockCleanup = FALSE;
1355
1356 SyncTableEntry* arr;
1357 while ((arr = m_OldSyncTables) != NULL)
1358 {
1359 m_OldSyncTables = (SyncTableEntry*)arr[0].m_Object.Load();
1360 delete arr;
1361 }
1362
1363#ifdef DUMP_SB
1364 LogSpewAlways("GCWeakPtrScan starting\n");
1365#endif
1366
1367#ifdef VERIFY_HEAP
1368 if (g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK)
1369 STRESS_LOG0 (LF_GC | LF_SYNC, LL_INFO100, "GCWeakPtrScan starting\n");
1370#endif
1371
1372 if (GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration() < GCHeapUtilities::GetGCHeap()->GetMaxGeneration())
1373 {
1374#ifdef VERIFY_HEAP
1375 //for VSW 294550: we saw stale obeject reference in SyncBlkCache, so we want to make sure the card
1376 //table logic above works correctly so that every ephemeral entry is promoted.
1377 //For verification, we make a copy of the sync table in relocation phase and promote it use the
1378 //slow approach and compare the result with the original one
1379 DWORD freeSyncTalbeIndexCopy = m_FreeSyncTableIndex;
1380 SyncTableEntry * syncTableShadow = NULL;
1381 if ((g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK) && !((ScanContext*)lp1)->promotion)
1382 {
1383 syncTableShadow = new(nothrow) SyncTableEntry [m_FreeSyncTableIndex];
1384 if (syncTableShadow)
1385 {
1386 memcpy (syncTableShadow, SyncTableEntry::GetSyncTableEntry(), m_FreeSyncTableIndex * sizeof (SyncTableEntry));
1387 }
1388 }
1389#endif //VERIFY_HEAP
1390
1391 //scan the bitmap
1392 size_t dw = 0;
1393 while (1)
1394 {
1395 while (dw < BitMapSize (m_SyncTableSize) && (m_EphemeralBitmap[dw]==0))
1396 {
1397 dw++;
1398 }
1399 if (dw < BitMapSize (m_SyncTableSize))
1400 {
1401 //found one
1402 for (int i = 0; i < card_word_width; i++)
1403 {
1404 size_t card = i+dw*card_word_width;
1405 if (CardSetP (card))
1406 {
1407 BOOL clear_card = TRUE;
1408 for (int idx = 0; idx < card_size; idx++)
1409 {
1410 size_t nb = CardIndex (card) + idx;
1411 if (( nb < m_FreeSyncTableIndex) && (nb > 0))
1412 {
1413 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1414 if (o && !((size_t)o & 1))
1415 {
1416 if (GCHeapUtilities::GetGCHeap()->IsEphemeral (o))
1417 {
1418 clear_card = FALSE;
1419
1420 GCWeakPtrScanElement ((int)nb, scanProc,
1421 lp1, lp2, fSetSyncBlockCleanup);
1422 }
1423 }
1424 }
1425 }
1426 if (clear_card)
1427 ClearCard (card);
1428 }
1429 }
1430 dw++;
1431 }
1432 else
1433 break;
1434 }
1435
1436#ifdef VERIFY_HEAP
1437 //for VSW 294550: we saw stale obeject reference in SyncBlkCache, so we want to make sure the card
1438 //table logic above works correctly so that every ephemeral entry is promoted. To verify, we make a
1439 //copy of the sync table and promote it use the slow approach and compare the result with the real one
1440 if (g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK)
1441 {
1442 if (syncTableShadow)
1443 {
1444 for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1445 {
1446 Object **keyv = (Object **) &syncTableShadow[nb].m_Object;
1447
1448 if (((size_t) *keyv & 1) == 0)
1449 {
1450 (*scanProc) (keyv, NULL, lp1, lp2);
1451 SyncBlock *pSB = syncTableShadow[nb].m_SyncBlock;
1452 if (*keyv != 0 && (!pSB || !pSB->IsIDisposable()))
1453 {
1454 if (syncTableShadow[nb].m_Object != SyncTableEntry::GetSyncTableEntry()[nb].m_Object)
1455 DebugBreak ();
1456 }
1457 }
1458 }
1459 delete []syncTableShadow;
1460 syncTableShadow = NULL;
1461 }
1462 if (freeSyncTalbeIndexCopy != m_FreeSyncTableIndex)
1463 DebugBreak ();
1464 }
1465#endif //VERIFY_HEAP
1466
1467 }
1468 else
1469 {
1470 for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1471 {
1472 GCWeakPtrScanElement (nb, scanProc, lp1, lp2, fSetSyncBlockCleanup);
1473 }
1474
1475
1476 }
1477
1478 if (fSetSyncBlockCleanup)
1479 {
1480 // mark the finalizer thread saying requires cleanup
1481 FinalizerThread::GetFinalizerThread()->SetSyncBlockCleanup();
1482 FinalizerThread::EnableFinalization();
1483 }
1484
1485#if defined(VERIFY_HEAP)
1486 if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
1487 {
1488 if (((ScanContext*)lp1)->promotion)
1489 {
1490
1491 for (int nb = 1; nb < (int)m_FreeSyncTableIndex; nb++)
1492 {
1493 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1494 if (((size_t)o & 1) == 0)
1495 {
1496 o->Validate();
1497 }
1498 }
1499 }
1500 }
1501#endif // VERIFY_HEAP
1502}
1503
1504/* Scan the weak pointers in the SyncBlockEntry and report them to the GC. If the
1505 reference is dead, then return TRUE */
1506
1507BOOL SyncBlockCache::GCWeakPtrScanElement (int nb, HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2,
1508 BOOL& cleanup)
1509{
1510 CONTRACTL
1511 {
1512 INSTANCE_CHECK;
1513 NOTHROW;
1514 GC_NOTRIGGER;
1515 MODE_ANY;
1516 }
1517 CONTRACTL_END;
1518
1519 Object **keyv = (Object **) &SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1520
1521#ifdef DUMP_SB
1522 struct Param
1523 {
1524 Object **keyv;
1525 char *name;
1526 } param;
1527 param.keyv = keyv;
1528
1529 PAL_TRY(Param *, pParam, &param) {
1530 if (! *pParam->keyv)
1531 pParam->name = "null";
1532 else if ((size_t) *pParam->keyv & 1)
1533 pParam->name = "free";
1534 else {
1535 pParam->name = (*pParam->keyv)->GetClass()->GetDebugClassName();
1536 if (strlen(pParam->name) == 0)
1537 pParam->name = "<INVALID>";
1538 }
1539 } PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
1540 param.name = "<INVALID>";
1541 }
1542 PAL_ENDTRY
1543 LogSpewAlways("[%4.4d]: %8.8x, %s\n", nb, *keyv, param.name);
1544#endif
1545
1546 if (((size_t) *keyv & 1) == 0)
1547 {
1548#ifdef VERIFY_HEAP
1549 if (g_pConfig->GetHeapVerifyLevel () & EEConfig::HEAPVERIFY_SYNCBLK)
1550 {
1551 STRESS_LOG3 (LF_GC | LF_SYNC, LL_INFO100000, "scanning syncblk[%d, %p, %p]\n", nb, (size_t)SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock, (size_t)*keyv);
1552 }
1553#endif
1554
1555 (*scanProc) (keyv, NULL, lp1, lp2);
1556 SyncBlock *pSB = SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock;
1557 if ((*keyv == 0 ) || (pSB && pSB->IsIDisposable()))
1558 {
1559#ifdef VERIFY_HEAP
1560 if (g_pConfig->GetHeapVerifyLevel () & EEConfig::HEAPVERIFY_SYNCBLK)
1561 {
1562 STRESS_LOG3 (LF_GC | LF_SYNC, LL_INFO100000, "freeing syncblk[%d, %p, %p]\n", nb, (size_t)pSB, (size_t)*keyv);
1563 }
1564#endif
1565
1566 if (*keyv)
1567 {
1568 _ASSERTE (pSB);
1569 GCDeleteSyncBlock(pSB);
1570 //clean the object syncblock header
1571 ((Object*)(*keyv))->GetHeader()->GCResetIndex();
1572 }
1573 else if (pSB)
1574 {
1575
1576 cleanup = TRUE;
1577 // insert block into cleanup list
1578 InsertCleanupSyncBlock (SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock);
1579#ifdef DUMP_SB
1580 LogSpewAlways(" Cleaning up block at %4.4d\n", nb);
1581#endif
1582 }
1583
1584 // delete the entry
1585#ifdef DUMP_SB
1586 LogSpewAlways(" Deleting block at %4.4d\n", nb);
1587#endif
1588 SyncTableEntry::GetSyncTableEntry()[nb].m_Object = (Object *)(m_FreeSyncTableList | 1);
1589 m_FreeSyncTableList = nb << 1;
1590 SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock = NULL;
1591 return TRUE;
1592 }
1593 else
1594 {
1595#ifdef DUMP_SB
1596 LogSpewAlways(" Keeping block at %4.4d with oref %8.8x\n", nb, *keyv);
1597#endif
1598 }
1599 }
1600 return FALSE;
1601}
1602
1603void SyncBlockCache::GCDone(BOOL demoting, int max_gen)
1604{
1605 CONTRACTL
1606 {
1607 INSTANCE_CHECK;
1608 NOTHROW;
1609 GC_NOTRIGGER;
1610 MODE_ANY;
1611 }
1612 CONTRACTL_END;
1613
1614 if (demoting &&
1615 (GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration() ==
1616 GCHeapUtilities::GetGCHeap()->GetMaxGeneration()))
1617 {
1618 //scan the bitmap
1619 size_t dw = 0;
1620 while (1)
1621 {
1622 while (dw < BitMapSize (m_SyncTableSize) &&
1623 (m_EphemeralBitmap[dw]==(DWORD)~0))
1624 {
1625 dw++;
1626 }
1627 if (dw < BitMapSize (m_SyncTableSize))
1628 {
1629 //found one
1630 for (int i = 0; i < card_word_width; i++)
1631 {
1632 size_t card = i+dw*card_word_width;
1633 if (!CardSetP (card))
1634 {
1635 for (int idx = 0; idx < card_size; idx++)
1636 {
1637 size_t nb = CardIndex (card) + idx;
1638 if (( nb < m_FreeSyncTableIndex) && (nb > 0))
1639 {
1640 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1641 if (o && !((size_t)o & 1))
1642 {
1643 if (GCHeapUtilities::GetGCHeap()->WhichGeneration (o) < (unsigned int)max_gen)
1644 {
1645 SetCard (card);
1646 break;
1647
1648 }
1649 }
1650 }
1651 }
1652 }
1653 }
1654 dw++;
1655 }
1656 else
1657 break;
1658 }
1659 }
1660}
1661
1662
1663#if defined (VERIFY_HEAP)
1664
1665#ifndef _DEBUG
1666#ifdef _ASSERTE
1667#undef _ASSERTE
1668#endif
1669#define _ASSERTE(c) if (!(c)) DebugBreak()
1670#endif
1671
1672void SyncBlockCache::VerifySyncTableEntry()
1673{
1674 CONTRACTL
1675 {
1676 INSTANCE_CHECK;
1677 NOTHROW;
1678 GC_NOTRIGGER;
1679 MODE_ANY;
1680 }
1681 CONTRACTL_END;
1682
1683 for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1684 {
1685 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1686 // if the slot was just allocated, the object may still be null
1687 if (o && (((size_t)o & 1) == 0))
1688 {
1689 //there is no need to verify next object's header because this is called
1690 //from verify_heap, which will verify every object anyway
1691 o->Validate(TRUE, FALSE);
1692
1693 //
1694 // This loop is just a heuristic to try to catch errors, but it is not 100%.
1695 // To prevent false positives, we weaken our assert below to exclude the case
1696 // where the index is still NULL, but we've reached the end of our loop.
1697 //
1698 static const DWORD max_iterations = 100;
1699 DWORD loop = 0;
1700
1701 for (; loop < max_iterations; loop++)
1702 {
1703 // The syncblock index may be updating by another thread.
1704 if (o->GetHeader()->GetHeaderSyncBlockIndex() != 0)
1705 {
1706 break;
1707 }
1708 __SwitchToThread(0, CALLER_LIMITS_SPINNING);
1709 }
1710
1711 DWORD idx = o->GetHeader()->GetHeaderSyncBlockIndex();
1712 _ASSERTE(idx == nb || ((0 == idx) && (loop == max_iterations)));
1713 _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsEphemeral(o) || CardSetP(CardOf(nb)));
1714 }
1715 }
1716}
1717
1718#ifndef _DEBUG
1719#undef _ASSERTE
1720#define _ASSERTE(expr) ((void)0)
1721#endif // _DEBUG
1722
1723#endif // VERIFY_HEAP
1724
1725#ifdef _DEBUG
1726
1727void DumpSyncBlockCache()
1728{
1729 STATIC_CONTRACT_NOTHROW;
1730
1731 SyncBlockCache *pCache = SyncBlockCache::GetSyncBlockCache();
1732
1733 LogSpewAlways("Dumping SyncBlockCache size %d\n", pCache->m_FreeSyncTableIndex);
1734
1735 static int dumpSBStyle = -1;
1736 if (dumpSBStyle == -1)
1737 dumpSBStyle = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpStyle);
1738 if (dumpSBStyle == 0)
1739 return;
1740
1741 BOOL isString = FALSE;
1742 DWORD objectCount = 0;
1743 DWORD slotCount = 0;
1744
1745 for (DWORD nb = 1; nb < pCache->m_FreeSyncTableIndex; nb++)
1746 {
1747 isString = FALSE;
1748 char buffer[1024], buffer2[1024];
1749 LPCUTF8 descrip = "null";
1750 SyncTableEntry *pEntry = &SyncTableEntry::GetSyncTableEntry()[nb];
1751 Object *oref = (Object *) pEntry->m_Object;
1752 if (((size_t) oref & 1) != 0)
1753 {
1754 descrip = "free";
1755 oref = 0;
1756 }
1757 else
1758 {
1759 ++slotCount;
1760 if (oref)
1761 {
1762 ++objectCount;
1763
1764 struct Param
1765 {
1766 LPCUTF8 descrip;
1767 Object *oref;
1768 char *buffer2;
1769 UINT cch2;
1770 BOOL isString;
1771 } param;
1772 param.descrip = descrip;
1773 param.oref = oref;
1774 param.buffer2 = buffer2;
1775 param.cch2 = COUNTOF(buffer2);
1776 param.isString = isString;
1777
1778 PAL_TRY(Param *, pParam, &param)
1779 {
1780 pParam->descrip = pParam->oref->GetMethodTable()->GetDebugClassName();
1781 if (strlen(pParam->descrip) == 0)
1782 pParam->descrip = "<INVALID>";
1783 else if (pParam->oref->GetMethodTable() == g_pStringClass)
1784 {
1785 sprintf_s(pParam->buffer2, pParam->cch2, "%s (%S)", pParam->descrip, ObjectToSTRINGREF((StringObject*)pParam->oref)->GetBuffer());
1786 pParam->descrip = pParam->buffer2;
1787 pParam->isString = TRUE;
1788 }
1789 }
1790 PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
1791 param.descrip = "<INVALID>";
1792 }
1793 PAL_ENDTRY
1794
1795 descrip = param.descrip;
1796 isString = param.isString;
1797 }
1798 ADIndex idx;
1799 if (oref)
1800 idx = pEntry->m_Object->GetHeader()->GetRawAppDomainIndex();
1801 if (! idx.m_dwIndex && pEntry->m_SyncBlock)
1802 idx = pEntry->m_SyncBlock->GetAppDomainIndex();
1803 if (idx.m_dwIndex && ! SystemDomain::System()->TestGetAppDomainAtIndex(idx))
1804 {
1805 sprintf_s(buffer, COUNTOF(buffer), "** unloaded (%3.3x) %s", idx.m_dwIndex, descrip);
1806 descrip = buffer;
1807 }
1808 else
1809 {
1810 sprintf_s(buffer, COUNTOF(buffer), "(AD %3.3x) %s", idx.m_dwIndex, descrip);
1811 descrip = buffer;
1812 }
1813 }
1814 if (dumpSBStyle < 2)
1815 LogSpewAlways("[%4.4d]: %8.8x %s\n", nb, oref, descrip);
1816 else if (dumpSBStyle == 2 && ! isString)
1817 LogSpewAlways("[%4.4d]: %s\n", nb, descrip);
1818 }
1819 LogSpewAlways("Done dumping SyncBlockCache used slots: %d, objects: %d\n", slotCount, objectCount);
1820}
1821#endif
1822
1823// ***************************************************************************
1824//
1825// ObjHeader class implementation
1826//
1827// ***************************************************************************
1828
1829#if defined(ENABLE_CONTRACTS_IMPL)
1830// The LOCK_TAKEN/RELEASED macros need a "pointer" to the lock object to do
1831// comparisons between takes & releases (and to provide debugging info to the
1832// developer). Ask the syncblock for its lock contract pointer, if the
1833// syncblock exists. Otherwise, use the MethodTable* from the Object. That's not great,
1834// as it's not unique, so we might miss unbalanced lock takes/releases from
1835// different objects of the same type. However, our hands are tied, and we can't
1836// do much better.
1837void * ObjHeader::GetPtrForLockContract()
1838{
1839 if (GetHeaderSyncBlockIndex() == 0)
1840 {
1841 return (void *) GetBaseObject()->GetMethodTable();
1842 }
1843
1844 return PassiveGetSyncBlock()->GetPtrForLockContract();
1845}
1846#endif // defined(ENABLE_CONTRACTS_IMPL)
1847
1848// this enters the monitor of an object
1849void ObjHeader::EnterObjMonitor()
1850{
1851 WRAPPER_NO_CONTRACT;
1852 GetSyncBlock()->EnterMonitor();
1853}
1854
1855// Non-blocking version of above
1856BOOL ObjHeader::TryEnterObjMonitor(INT32 timeOut)
1857{
1858 WRAPPER_NO_CONTRACT;
1859 return GetSyncBlock()->TryEnterMonitor(timeOut);
1860}
1861
1862AwareLock::EnterHelperResult ObjHeader::EnterObjMonitorHelperSpin(Thread* pCurThread)
1863{
1864 CONTRACTL{
1865 SO_TOLERANT;
1866 NOTHROW;
1867 GC_NOTRIGGER;
1868 MODE_COOPERATIVE;
1869 } CONTRACTL_END;
1870
1871 // Note: EnterObjMonitorHelper must be called before this function (see below)
1872
1873 if (g_SystemInfo.dwNumberOfProcessors == 1)
1874 {
1875 return AwareLock::EnterHelperResult_Contention;
1876 }
1877
1878 YieldProcessorNormalizationInfo normalizationInfo;
1879 const DWORD spinCount = g_SpinConstants.dwMonitorSpinCount;
1880 for (DWORD spinIteration = 0; spinIteration < spinCount; ++spinIteration)
1881 {
1882 AwareLock::SpinWait(normalizationInfo, spinIteration);
1883
1884 LONG oldValue = m_SyncBlockValue.LoadWithoutBarrier();
1885
1886 // Since spinning has begun, chances are good that the monitor has already switched to AwareLock mode, so check for that
1887 // case first
1888 if (oldValue & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
1889 {
1890 // If we have a hash code already, we need to create a sync block
1891 if (oldValue & BIT_SBLK_IS_HASHCODE)
1892 {
1893 return AwareLock::EnterHelperResult_UseSlowPath;
1894 }
1895
1896 SyncBlock *syncBlock = g_pSyncTable[oldValue & MASK_SYNCBLOCKINDEX].m_SyncBlock;
1897 _ASSERTE(syncBlock != NULL);
1898 AwareLock *awareLock = &syncBlock->m_Monitor;
1899
1900 AwareLock::EnterHelperResult result = awareLock->TryEnterBeforeSpinLoopHelper(pCurThread);
1901 if (result != AwareLock::EnterHelperResult_Contention)
1902 {
1903 return result;
1904 }
1905
1906 ++spinIteration;
1907 if (spinIteration < spinCount)
1908 {
1909 while (true)
1910 {
1911 AwareLock::SpinWait(normalizationInfo, spinIteration);
1912
1913 ++spinIteration;
1914 if (spinIteration >= spinCount)
1915 {
1916 // The last lock attempt for this spin will be done after the loop
1917 break;
1918 }
1919
1920 result = awareLock->TryEnterInsideSpinLoopHelper(pCurThread);
1921 if (result == AwareLock::EnterHelperResult_Entered)
1922 {
1923 return AwareLock::EnterHelperResult_Entered;
1924 }
1925 if (result == AwareLock::EnterHelperResult_UseSlowPath)
1926 {
1927 break;
1928 }
1929 }
1930 }
1931
1932 if (awareLock->TryEnterAfterSpinLoopHelper(pCurThread))
1933 {
1934 return AwareLock::EnterHelperResult_Entered;
1935 }
1936 break;
1937 }
1938
1939 DWORD tid = pCurThread->GetThreadId();
1940 if ((oldValue & (BIT_SBLK_SPIN_LOCK +
1941 SBLK_MASK_LOCK_THREADID +
1942 SBLK_MASK_LOCK_RECLEVEL)) == 0)
1943 {
1944 if (tid > SBLK_MASK_LOCK_THREADID)
1945 {
1946 return AwareLock::EnterHelperResult_UseSlowPath;
1947 }
1948
1949 LONG newValue = oldValue | tid;
1950 if (InterlockedCompareExchangeAcquire((LONG*)&m_SyncBlockValue, newValue, oldValue) == oldValue)
1951 {
1952 pCurThread->IncLockCount();
1953 return AwareLock::EnterHelperResult_Entered;
1954 }
1955
1956 continue;
1957 }
1958
1959 // EnterObjMonitorHelper handles the thin lock recursion case. If it's not that case, it won't become that case. If
1960 // EnterObjMonitorHelper failed to increment the recursion level, it will go down the slow path and won't come here. So,
1961 // no need to check the recursion case here.
1962 _ASSERTE(
1963 // The header is transitioning - treat this as if the lock was taken
1964 oldValue & BIT_SBLK_SPIN_LOCK ||
1965 // Here we know we have the "thin lock" layout, but the lock is not free.
1966 // It can't be the recursion case though, because the call to EnterObjMonitorHelper prior to this would have taken
1967 // the slow path in the recursive case.
1968 tid != (DWORD)(oldValue & SBLK_MASK_LOCK_THREADID));
1969 }
1970
1971 return AwareLock::EnterHelperResult_Contention;
1972}
1973
1974BOOL ObjHeader::LeaveObjMonitor()
1975{
1976 CONTRACTL
1977 {
1978 NOTHROW;
1979 GC_TRIGGERS;
1980 MODE_COOPERATIVE;
1981 }
1982 CONTRACTL_END;
1983
1984 //this function switch to preemp mode so we need to protect the object in some path
1985 OBJECTREF thisObj = ObjectToOBJECTREF (GetBaseObject ());
1986
1987 DWORD dwSwitchCount = 0;
1988
1989 for (;;)
1990 {
1991 AwareLock::LeaveHelperAction action = thisObj->GetHeader ()->LeaveObjMonitorHelper(GetThread());
1992
1993 switch(action)
1994 {
1995 case AwareLock::LeaveHelperAction_None:
1996 // We are done
1997 return TRUE;
1998 case AwareLock::LeaveHelperAction_Signal:
1999 {
2000 // Signal the event
2001 SyncBlock *psb = thisObj->GetHeader ()->PassiveGetSyncBlock();
2002 if (psb != NULL)
2003 psb->QuickGetMonitor()->Signal();
2004 }
2005 return TRUE;
2006 case AwareLock::LeaveHelperAction_Yield:
2007 YieldProcessor();
2008 continue;
2009 case AwareLock::LeaveHelperAction_Contention:
2010 // Some thread is updating the syncblock value.
2011 {
2012 //protect the object before switching mode
2013 GCPROTECT_BEGIN (thisObj);
2014 GCX_PREEMP();
2015 __SwitchToThread(0, ++dwSwitchCount);
2016 GCPROTECT_END ();
2017 }
2018 continue;
2019 default:
2020 // Must be an error otherwise - ignore it
2021 _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
2022 return FALSE;
2023 }
2024 }
2025}
2026
2027// The only difference between LeaveObjMonitor and LeaveObjMonitorAtException is switch
2028// to preemptive mode around __SwitchToThread
2029BOOL ObjHeader::LeaveObjMonitorAtException()
2030{
2031 CONTRACTL
2032 {
2033 NOTHROW;
2034 GC_NOTRIGGER;
2035 MODE_COOPERATIVE;
2036 }
2037 CONTRACTL_END;
2038
2039 DWORD dwSwitchCount = 0;
2040
2041 for (;;)
2042 {
2043 AwareLock::LeaveHelperAction action = LeaveObjMonitorHelper(GetThread());
2044
2045 switch(action)
2046 {
2047 case AwareLock::LeaveHelperAction_None:
2048 // We are done
2049 return TRUE;
2050 case AwareLock::LeaveHelperAction_Signal:
2051 {
2052 // Signal the event
2053 SyncBlock *psb = PassiveGetSyncBlock();
2054 if (psb != NULL)
2055 psb->QuickGetMonitor()->Signal();
2056 }
2057 return TRUE;
2058 case AwareLock::LeaveHelperAction_Yield:
2059 YieldProcessor();
2060 continue;
2061 case AwareLock::LeaveHelperAction_Contention:
2062 // Some thread is updating the syncblock value.
2063 //
2064 // We never toggle GC mode while holding the spinlock (BeginNoTriggerGC/EndNoTriggerGC
2065 // in EnterSpinLock/ReleaseSpinLock ensures it). Thus we do not need to switch to preemptive
2066 // while waiting on the spinlock.
2067 //
2068 {
2069 __SwitchToThread(0, ++dwSwitchCount);
2070 }
2071 continue;
2072 default:
2073 // Must be an error otherwise - ignore it
2074 _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
2075 return FALSE;
2076 }
2077 }
2078}
2079
2080#endif //!DACCESS_COMPILE
2081
2082// Returns TRUE if the lock is owned and FALSE otherwise
2083// threadId is set to the ID (Thread::GetThreadId()) of the thread which owns the lock
2084// acquisitionCount is set to the number of times the lock needs to be released before
2085// it is unowned
2086BOOL ObjHeader::GetThreadOwningMonitorLock(DWORD *pThreadId, DWORD *pAcquisitionCount)
2087{
2088 CONTRACTL
2089 {
2090 NOTHROW;
2091 GC_NOTRIGGER;
2092 SO_TOLERANT;
2093#ifndef DACCESS_COMPILE
2094 if (!IsGCSpecialThread ()) {MODE_COOPERATIVE;} else {MODE_ANY;}
2095#endif
2096 }
2097 CONTRACTL_END;
2098 SUPPORTS_DAC;
2099
2100
2101 DWORD bits = GetBits();
2102
2103 if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
2104 {
2105 if (bits & BIT_SBLK_IS_HASHCODE)
2106 {
2107 //
2108 // This thread does not own the lock.
2109 //
2110 *pThreadId = 0;
2111 *pAcquisitionCount = 0;
2112 return FALSE;
2113 }
2114 else
2115 {
2116 //
2117 // We have a syncblk
2118 //
2119 DWORD index = bits & MASK_SYNCBLOCKINDEX;
2120 SyncBlock* psb = g_pSyncTable[(int)index].m_SyncBlock;
2121
2122 _ASSERTE(psb->GetMonitor() != NULL);
2123 Thread* pThread = psb->GetMonitor()->GetHoldingThread();
2124 if(pThread == NULL)
2125 {
2126 *pThreadId = 0;
2127 *pAcquisitionCount = 0;
2128 return FALSE;
2129 }
2130 else
2131 {
2132 *pThreadId = pThread->GetThreadId();
2133 *pAcquisitionCount = psb->GetMonitor()->GetRecursionLevel();
2134 return TRUE;
2135 }
2136 }
2137 }
2138 else
2139 {
2140 //
2141 // We have a thinlock
2142 //
2143
2144 DWORD lockThreadId, recursionLevel;
2145 lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
2146 recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
2147 //if thread ID is 0, recursionLevel got to be zero
2148 //but thread ID doesn't have to be valid because the lock could be orphanend
2149 _ASSERTE (lockThreadId != 0 || recursionLevel == 0 );
2150
2151 *pThreadId = lockThreadId;
2152 if(lockThreadId != 0)
2153 {
2154 // in the header, the recursionLevel of 0 means the lock is owned once
2155 // (this differs from m_Recursion in the AwareLock)
2156 *pAcquisitionCount = recursionLevel + 1;
2157 return TRUE;
2158 }
2159 else
2160 {
2161 *pAcquisitionCount = 0;
2162 return FALSE;
2163 }
2164 }
2165}
2166
2167#ifndef DACCESS_COMPILE
2168
2169#ifdef MP_LOCKS
2170DEBUG_NOINLINE void ObjHeader::EnterSpinLock()
2171{
2172 // NOTE: This function cannot have a dynamic contract. If it does, the contract's
2173 // destructor will reset the CLR debug state to what it was before entering the
2174 // function, which will undo the BeginNoTriggerGC() call below.
2175 SCAN_SCOPE_BEGIN;
2176 STATIC_CONTRACT_GC_NOTRIGGER;
2177
2178#ifdef _DEBUG
2179 int i = 0;
2180#endif
2181
2182 DWORD dwSwitchCount = 0;
2183
2184 while (TRUE)
2185 {
2186#ifdef _DEBUG
2187#ifdef _WIN64
2188 // Give 64bit more time because there isn't a remoting fast path now, and we've hit this assert
2189 // needlessly in CLRSTRESS.
2190 if (i++ > 30000)
2191#else
2192 if (i++ > 10000)
2193#endif // _WIN64
2194 _ASSERTE(!"ObjHeader::EnterLock timed out");
2195#endif
2196 // get the value so that it doesn't get changed under us.
2197 LONG curValue = m_SyncBlockValue.LoadWithoutBarrier();
2198
2199 // check if lock taken
2200 if (! (curValue & BIT_SBLK_SPIN_LOCK))
2201 {
2202 // try to take the lock
2203 LONG newValue = curValue | BIT_SBLK_SPIN_LOCK;
2204 LONG result = FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, newValue, curValue);
2205 if (result == curValue)
2206 break;
2207 }
2208 if (g_SystemInfo.dwNumberOfProcessors > 1)
2209 {
2210 for (int spinCount = 0; spinCount < BIT_SBLK_SPIN_COUNT; spinCount++)
2211 {
2212 if (! (m_SyncBlockValue & BIT_SBLK_SPIN_LOCK))
2213 break;
2214 YieldProcessor(); // indicate to the processor that we are spining
2215 }
2216 if (m_SyncBlockValue & BIT_SBLK_SPIN_LOCK)
2217 __SwitchToThread(0, ++dwSwitchCount);
2218 }
2219 else
2220 __SwitchToThread(0, ++dwSwitchCount);
2221 }
2222
2223 INCONTRACT(Thread* pThread = GetThread());
2224 INCONTRACT(if (pThread != NULL) pThread->BeginNoTriggerGC(__FILE__, __LINE__));
2225}
2226#else
2227DEBUG_NOINLINE void ObjHeader::EnterSpinLock()
2228{
2229 SCAN_SCOPE_BEGIN;
2230 STATIC_CONTRACT_GC_NOTRIGGER;
2231
2232#ifdef _DEBUG
2233 int i = 0;
2234#endif
2235
2236 DWORD dwSwitchCount = 0;
2237
2238 while (TRUE)
2239 {
2240#ifdef _DEBUG
2241 if (i++ > 10000)
2242 _ASSERTE(!"ObjHeader::EnterLock timed out");
2243#endif
2244 // get the value so that it doesn't get changed under us.
2245 LONG curValue = m_SyncBlockValue.LoadWithoutBarrier();
2246
2247 // check if lock taken
2248 if (! (curValue & BIT_SBLK_SPIN_LOCK))
2249 {
2250 // try to take the lock
2251 LONG newValue = curValue | BIT_SBLK_SPIN_LOCK;
2252 LONG result = FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, newValue, curValue);
2253 if (result == curValue)
2254 break;
2255 }
2256 __SwitchToThread(0, ++dwSwitchCount);
2257 }
2258
2259 INCONTRACT(Thread* pThread = GetThread());
2260 INCONTRACT(if (pThread != NULL) pThread->BeginNoTriggerGC(__FILE__, __LINE__));
2261}
2262#endif //MP_LOCKS
2263
2264DEBUG_NOINLINE void ObjHeader::ReleaseSpinLock()
2265{
2266 SCAN_SCOPE_END;
2267 LIMITED_METHOD_CONTRACT;
2268
2269 INCONTRACT(Thread* pThread = GetThread());
2270 INCONTRACT(if (pThread != NULL) pThread->EndNoTriggerGC());
2271
2272 FastInterlockAnd(&m_SyncBlockValue, ~BIT_SBLK_SPIN_LOCK);
2273}
2274
2275#endif //!DACCESS_COMPILE
2276
2277ADIndex ObjHeader::GetRawAppDomainIndex()
2278{
2279 LIMITED_METHOD_CONTRACT;
2280 SUPPORTS_DAC;
2281
2282 // pull the value out before checking it to avoid race condition
2283 DWORD value = m_SyncBlockValue.LoadWithoutBarrier();
2284 if ((value & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0)
2285 return ADIndex((value >> SBLK_APPDOMAIN_SHIFT) & SBLK_MASK_APPDOMAININDEX);
2286 return ADIndex(0);
2287}
2288
2289ADIndex ObjHeader::GetAppDomainIndex()
2290{
2291 STATIC_CONTRACT_NOTHROW;
2292 STATIC_CONTRACT_GC_NOTRIGGER;
2293 STATIC_CONTRACT_SO_TOLERANT;
2294 STATIC_CONTRACT_SUPPORTS_DAC;
2295
2296 ADIndex indx = GetRawAppDomainIndex();
2297 if (indx.m_dwIndex)
2298 return indx;
2299 SyncBlock* syncBlock = PassiveGetSyncBlock();
2300 if (! syncBlock)
2301 return ADIndex(0);
2302
2303 return syncBlock->GetAppDomainIndex();
2304}
2305
2306#ifndef DACCESS_COMPILE
2307
2308void ObjHeader::SetAppDomainIndex(ADIndex indx)
2309{
2310 CONTRACTL
2311 {
2312 INSTANCE_CHECK;
2313 THROWS;
2314 GC_NOTRIGGER;
2315 MODE_ANY;
2316 INJECT_FAULT(COMPlusThrowOM(););
2317 }
2318 CONTRACTL_END;
2319
2320 //
2321 // This should only be called during the header initialization,
2322 // so don't worry about races.
2323 //
2324
2325 BOOL done = FALSE;
2326
2327#ifdef _DEBUG
2328 static int forceSB = -1;
2329
2330 if (forceSB == -1)
2331 forceSB = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ADForceSB);
2332
2333 if (forceSB)
2334 // force a synblock so we get one for every object.
2335 GetSyncBlock();
2336#endif
2337
2338 if (GetHeaderSyncBlockIndex() == 0 && indx.m_dwIndex < SBLK_MASK_APPDOMAININDEX)
2339 {
2340 ENTER_SPIN_LOCK(this);
2341 //Try one more time
2342 if (GetHeaderSyncBlockIndex() == 0)
2343 {
2344 _ASSERTE(GetRawAppDomainIndex().m_dwIndex == 0);
2345 // can store it in the object header
2346 FastInterlockOr(&m_SyncBlockValue, indx.m_dwIndex << SBLK_APPDOMAIN_SHIFT);
2347 done = TRUE;
2348 }
2349 LEAVE_SPIN_LOCK(this);
2350 }
2351
2352 if (!done)
2353 {
2354 // must create a syncblock entry and store the appdomain indx there
2355 SyncBlock *psb = GetSyncBlock();
2356 _ASSERTE(psb);
2357 psb->SetAppDomainIndex(indx);
2358 }
2359}
2360
2361void ObjHeader::ResetAppDomainIndex(ADIndex indx)
2362{
2363 CONTRACTL
2364 {
2365 INSTANCE_CHECK;
2366 THROWS;
2367 GC_NOTRIGGER;
2368 MODE_ANY;
2369 }
2370 CONTRACTL_END;
2371
2372 //
2373 // This should only be called during the header initialization,
2374 // so don't worry about races.
2375 //
2376
2377 BOOL done = FALSE;
2378
2379 if (GetHeaderSyncBlockIndex() == 0 && indx.m_dwIndex < SBLK_MASK_APPDOMAININDEX)
2380 {
2381 ENTER_SPIN_LOCK(this);
2382 //Try one more time
2383 if (GetHeaderSyncBlockIndex() == 0)
2384 {
2385 // can store it in the object header
2386 while (TRUE)
2387 {
2388 DWORD oldValue = m_SyncBlockValue.LoadWithoutBarrier();
2389 DWORD newValue = (oldValue & (~(SBLK_MASK_APPDOMAININDEX << SBLK_APPDOMAIN_SHIFT))) |
2390 (indx.m_dwIndex << SBLK_APPDOMAIN_SHIFT);
2391 if (FastInterlockCompareExchange((LONG*)&m_SyncBlockValue,
2392 newValue,
2393 oldValue) == (LONG)oldValue)
2394 {
2395 break;
2396 }
2397 }
2398 done = TRUE;
2399 }
2400 LEAVE_SPIN_LOCK(this);
2401 }
2402
2403 if (!done)
2404 {
2405 // must create a syncblock entry and store the appdomain indx there
2406 SyncBlock *psb = GetSyncBlock();
2407 _ASSERTE(psb);
2408 psb->SetAppDomainIndex(indx);
2409 }
2410}
2411
2412void ObjHeader::ResetAppDomainIndexNoFailure(ADIndex indx)
2413{
2414 CONTRACTL
2415 {
2416 INSTANCE_CHECK;
2417 NOTHROW;
2418 GC_NOTRIGGER;
2419 MODE_ANY;
2420 PRECONDITION(indx.m_dwIndex < SBLK_MASK_APPDOMAININDEX);
2421 }
2422 CONTRACTL_END;
2423
2424 ENTER_SPIN_LOCK(this);
2425 if (GetHeaderSyncBlockIndex() == 0)
2426 {
2427 // can store it in the object header
2428 while (TRUE)
2429 {
2430 DWORD oldValue = m_SyncBlockValue.LoadWithoutBarrier();
2431 DWORD newValue = (oldValue & (~(SBLK_MASK_APPDOMAININDEX << SBLK_APPDOMAIN_SHIFT))) |
2432 (indx.m_dwIndex << SBLK_APPDOMAIN_SHIFT);
2433 if (FastInterlockCompareExchange((LONG*)&m_SyncBlockValue,
2434 newValue,
2435 oldValue) == (LONG)oldValue)
2436 {
2437 break;
2438 }
2439 }
2440 }
2441 else
2442 {
2443 SyncBlock *psb = PassiveGetSyncBlock();
2444 _ASSERTE(psb);
2445 psb->SetAppDomainIndex(indx);
2446 }
2447 LEAVE_SPIN_LOCK(this);
2448}
2449
2450DWORD ObjHeader::GetSyncBlockIndex()
2451{
2452 CONTRACTL
2453 {
2454 INSTANCE_CHECK;
2455 THROWS;
2456 GC_NOTRIGGER;
2457 MODE_ANY;
2458 INJECT_FAULT(COMPlusThrowOM(););
2459 }
2460 CONTRACTL_END;
2461
2462 DWORD indx;
2463
2464 if ((indx = GetHeaderSyncBlockIndex()) == 0)
2465 {
2466 BOOL fMustCreateSyncBlock = FALSE;
2467
2468 if (GetAppDomainIndex().m_dwIndex)
2469 {
2470 // if have an appdomain set then must create a sync block to store it
2471 fMustCreateSyncBlock = TRUE;
2472 }
2473 else
2474 {
2475 //Need to get it from the cache
2476 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
2477
2478 //Try one more time
2479 if (GetHeaderSyncBlockIndex() == 0)
2480 {
2481 ENTER_SPIN_LOCK(this);
2482 // Now the header will be stable - check whether hashcode, appdomain index or lock information is stored in it.
2483 DWORD bits = GetBits();
2484 if (((bits & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) == (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) ||
2485 ((bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0 &&
2486 (bits & ((SBLK_MASK_APPDOMAININDEX<<SBLK_APPDOMAIN_SHIFT)|SBLK_MASK_LOCK_RECLEVEL|SBLK_MASK_LOCK_THREADID)) != 0))
2487 {
2488 // Need a sync block to store this info
2489 fMustCreateSyncBlock = TRUE;
2490 }
2491 else
2492 {
2493 SetIndex(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | SyncBlockCache::GetSyncBlockCache()->NewSyncBlockSlot(GetBaseObject()));
2494 }
2495 LEAVE_SPIN_LOCK(this);
2496 }
2497 // SyncBlockCache::LockHolder goes out of scope here
2498 }
2499
2500 if (fMustCreateSyncBlock)
2501 GetSyncBlock();
2502
2503 if ((indx = GetHeaderSyncBlockIndex()) == 0)
2504 COMPlusThrowOM();
2505 }
2506
2507 return indx;
2508}
2509
2510#if defined (VERIFY_HEAP)
2511
2512BOOL ObjHeader::Validate (BOOL bVerifySyncBlkIndex)
2513{
2514 STATIC_CONTRACT_THROWS;
2515 STATIC_CONTRACT_GC_NOTRIGGER;
2516 STATIC_CONTRACT_SO_TOLERANT;
2517 STATIC_CONTRACT_MODE_COOPERATIVE;
2518
2519 DWORD bits = GetBits ();
2520 Object * obj = GetBaseObject ();
2521 BOOL bVerifyMore = g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_SYNCBLK;
2522 //the highest 2 bits have reloaded meaning
2523 //for string objects:
2524 // BIT_SBLK_STRING_HAS_NO_HIGH_CHARS 0x80000000
2525 // BIT_SBLK_STRING_HIGH_CHARS_KNOWN 0x40000000
2526 // BIT_SBLK_STRING_HAS_SPECIAL_SORT 0xC0000000
2527 //for other objects:
2528 // BIT_SBLK_AGILE_IN_PROGRESS 0x80000000
2529 // BIT_SBLK_FINALIZER_RUN 0x40000000
2530 if (bits & BIT_SBLK_STRING_HIGH_CHAR_MASK)
2531 {
2532 if (obj->GetGCSafeMethodTable () == g_pStringClass)
2533 {
2534 if (bVerifyMore)
2535 {
2536 ASSERT_AND_CHECK (((StringObject *)obj)->ValidateHighChars());
2537 }
2538 }
2539 else
2540 {
2541 //BIT_SBLK_AGILE_IN_PROGRESS is set only in debug build
2542 ASSERT_AND_CHECK (!(bits & BIT_SBLK_AGILE_IN_PROGRESS));
2543 if (bits & BIT_SBLK_FINALIZER_RUN)
2544 {
2545 ASSERT_AND_CHECK (obj->GetGCSafeMethodTable ()->HasFinalizer ());
2546 }
2547 }
2548 }
2549
2550 //BIT_SBLK_GC_RESERVE (0x20000000) is only set during GC. But for frozen object, we don't clean the bit
2551 if (bits & BIT_SBLK_GC_RESERVE)
2552 {
2553 if (!GCHeapUtilities::IsGCInProgress () && !GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress ())
2554 {
2555#ifdef FEATURE_BASICFREEZE
2556 ASSERT_AND_CHECK (GCHeapUtilities::GetGCHeap()->IsInFrozenSegment(obj));
2557#else //FEATURE_BASICFREEZE
2558 _ASSERTE(!"Reserve bit not cleared");
2559 return FALSE;
2560#endif //FEATURE_BASICFREEZE
2561 }
2562 }
2563
2564 //Don't know how to verify BIT_SBLK_SPIN_LOCK (0x10000000)
2565
2566 //BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX (0x08000000)
2567 if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
2568 {
2569 //if BIT_SBLK_IS_HASHCODE (0x04000000) is not set,
2570 //rest of the DWORD is SyncBlk Index
2571 if (!(bits & BIT_SBLK_IS_HASHCODE))
2572 {
2573 if (bVerifySyncBlkIndex && GCHeapUtilities::GetGCHeap()->RuntimeStructuresValid ())
2574 {
2575 DWORD sbIndex = bits & MASK_SYNCBLOCKINDEX;
2576 ASSERT_AND_CHECK(SyncTableEntry::GetSyncTableEntry()[sbIndex].m_Object == obj);
2577 }
2578 }
2579 else
2580 {
2581 // rest of the DWORD is a hash code and we don't have much to validate it
2582 }
2583 }
2584 else
2585 {
2586 //if BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX is clear, rest of DWORD is thin lock thread ID,
2587 //thin lock recursion level and appdomain index
2588 DWORD lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
2589 DWORD recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
2590 //if thread ID is 0, recursionLeve got to be zero
2591 //but thread ID doesn't have to be valid because the lock could be orphanend
2592 ASSERT_AND_CHECK (lockThreadId != 0 || recursionLevel == 0 );
2593
2594#ifndef _DEBUG
2595 DWORD adIndex = (bits >> SBLK_APPDOMAIN_SHIFT) & SBLK_MASK_APPDOMAININDEX;
2596 //in non debug build, objects do not have appdomain index in header
2597 ASSERT_AND_CHECK (adIndex == 0);
2598#endif //!_DEBUG
2599 }
2600
2601 return TRUE;
2602}
2603
2604#endif //VERIFY_HEAP
2605
2606// This holder takes care of the SyncBlock memory cleanup if an OOM occurs inside a call to NewSyncBlockSlot.
2607//
2608// Warning: Assumes you already own the cache lock.
2609// Assumes nothing allocated inside the SyncBlock (only releases the memory, does not destruct.)
2610//
2611// This holder really just meets GetSyncBlock()'s special needs. It's not a general purpose holder.
2612
2613
2614// Do not inline this call. (fyuan)
2615// SyncBlockMemoryHolder is normally a check for empty pointer and return. Inlining VoidDeleteSyncBlockMemory adds expensive exception handling.
2616void VoidDeleteSyncBlockMemory(SyncBlock* psb)
2617{
2618 LIMITED_METHOD_CONTRACT;
2619 SyncBlockCache::GetSyncBlockCache()->DeleteSyncBlockMemory(psb);
2620}
2621
2622typedef Wrapper<SyncBlock*, DoNothing<SyncBlock*>, VoidDeleteSyncBlockMemory, NULL> SyncBlockMemoryHolder;
2623
2624
2625// get the sync block for an existing object
2626SyncBlock *ObjHeader::GetSyncBlock()
2627{
2628 CONTRACT(SyncBlock *)
2629 {
2630 INSTANCE_CHECK;
2631 THROWS;
2632 GC_NOTRIGGER;
2633 MODE_ANY;
2634 INJECT_FAULT(COMPlusThrowOM(););
2635 POSTCONDITION(CheckPointer(RETVAL));
2636 }
2637 CONTRACT_END;
2638
2639 PTR_SyncBlock syncBlock = GetBaseObject()->PassiveGetSyncBlock();
2640 DWORD indx = 0;
2641 BOOL indexHeld = FALSE;
2642
2643 if (syncBlock)
2644 {
2645#ifdef _DEBUG
2646 // Has our backpointer been correctly updated through every GC?
2647 PTR_SyncTableEntry pEntries(SyncTableEntry::GetSyncTableEntry());
2648 _ASSERTE(pEntries[GetHeaderSyncBlockIndex()].m_Object == GetBaseObject());
2649#endif // _DEBUG
2650 RETURN syncBlock;
2651 }
2652
2653 //Need to get it from the cache
2654 {
2655 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
2656
2657 //Try one more time
2658 syncBlock = GetBaseObject()->PassiveGetSyncBlock();
2659 if (syncBlock)
2660 RETURN syncBlock;
2661
2662
2663 SyncBlockMemoryHolder syncBlockMemoryHolder(SyncBlockCache::GetSyncBlockCache()->GetNextFreeSyncBlock());
2664 syncBlock = syncBlockMemoryHolder;
2665
2666 if ((indx = GetHeaderSyncBlockIndex()) == 0)
2667 {
2668 indx = SyncBlockCache::GetSyncBlockCache()->NewSyncBlockSlot(GetBaseObject());
2669 }
2670 else
2671 {
2672 //We already have an index, we need to hold the syncblock
2673 indexHeld = TRUE;
2674 }
2675
2676 {
2677 //! NewSyncBlockSlot has side-effects that we don't have backout for - thus, that must be the last
2678 //! failable operation called.
2679 CANNOTTHROWCOMPLUSEXCEPTION();
2680 FAULT_FORBID();
2681
2682
2683 syncBlockMemoryHolder.SuppressRelease();
2684
2685 new (syncBlock) SyncBlock(indx);
2686
2687 {
2688 // after this point, nobody can update the index in the header to give an AD index
2689 ENTER_SPIN_LOCK(this);
2690
2691 {
2692 // If there's an appdomain index stored in the header, transfer it to the syncblock
2693
2694 ADIndex dwAppDomainIndex = GetAppDomainIndex();
2695 if (dwAppDomainIndex.m_dwIndex)
2696 syncBlock->SetAppDomainIndex(dwAppDomainIndex);
2697
2698 // If the thin lock in the header is in use, transfer the information to the syncblock
2699 DWORD bits = GetBits();
2700 if ((bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0)
2701 {
2702 DWORD lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
2703 DWORD recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
2704 if (lockThreadId != 0 || recursionLevel != 0)
2705 {
2706 // recursionLevel can't be non-zero if thread id is 0
2707 _ASSERTE(lockThreadId != 0);
2708
2709 Thread *pThread = g_pThinLockThreadIdDispenser->IdToThreadWithValidation(lockThreadId);
2710
2711 if (pThread == NULL)
2712 {
2713 // The lock is orphaned.
2714 pThread = (Thread*) -1;
2715 }
2716 syncBlock->InitState(recursionLevel + 1, pThread);
2717 }
2718 }
2719 else if ((bits & BIT_SBLK_IS_HASHCODE) != 0)
2720 {
2721 DWORD hashCode = bits & MASK_HASHCODE;
2722
2723 syncBlock->SetHashCode(hashCode);
2724 }
2725 }
2726
2727 SyncTableEntry::GetSyncTableEntry() [indx].m_SyncBlock = syncBlock;
2728
2729 // in order to avoid a race where some thread tries to get the AD index and we've already nuked it,
2730 // make sure the syncblock etc is all setup with the AD index prior to replacing the index
2731 // in the header
2732 if (GetHeaderSyncBlockIndex() == 0)
2733 {
2734 // We have transferred the AppDomain into the syncblock above.
2735 SetIndex(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | indx);
2736 }
2737
2738 //If we had already an index, hold the syncblock
2739 //for the lifetime of the object.
2740 if (indexHeld)
2741 syncBlock->SetPrecious();
2742
2743 LEAVE_SPIN_LOCK(this);
2744 }
2745 // SyncBlockCache::LockHolder goes out of scope here
2746 }
2747 }
2748
2749 RETURN syncBlock;
2750}
2751
2752BOOL ObjHeader::Wait(INT32 timeOut, BOOL exitContext)
2753{
2754 CONTRACTL
2755 {
2756 INSTANCE_CHECK;
2757 THROWS;
2758 GC_TRIGGERS;
2759 MODE_ANY;
2760 INJECT_FAULT(COMPlusThrowOM(););
2761 }
2762 CONTRACTL_END;
2763
2764 // The following code may cause GC, so we must fetch the sync block from
2765 // the object now in case it moves.
2766 SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2767
2768 // GetSyncBlock throws on failure
2769 _ASSERTE(pSB != NULL);
2770
2771 // make sure we own the crst
2772 if (!pSB->DoesCurrentThreadOwnMonitor())
2773 COMPlusThrow(kSynchronizationLockException);
2774
2775#ifdef _DEBUG
2776 Thread *pThread = GetThread();
2777 DWORD curLockCount = pThread->m_dwLockCount;
2778#endif
2779
2780 BOOL result = pSB->Wait(timeOut,exitContext);
2781
2782 _ASSERTE (curLockCount == pThread->m_dwLockCount);
2783
2784 return result;
2785}
2786
2787void ObjHeader::Pulse()
2788{
2789 CONTRACTL
2790 {
2791 INSTANCE_CHECK;
2792 THROWS;
2793 GC_TRIGGERS;
2794 MODE_ANY;
2795 INJECT_FAULT(COMPlusThrowOM(););
2796 }
2797 CONTRACTL_END;
2798
2799 // The following code may cause GC, so we must fetch the sync block from
2800 // the object now in case it moves.
2801 SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2802
2803 // GetSyncBlock throws on failure
2804 _ASSERTE(pSB != NULL);
2805
2806 // make sure we own the crst
2807 if (!pSB->DoesCurrentThreadOwnMonitor())
2808 COMPlusThrow(kSynchronizationLockException);
2809
2810 pSB->Pulse();
2811}
2812
2813void ObjHeader::PulseAll()
2814{
2815 CONTRACTL
2816 {
2817 INSTANCE_CHECK;
2818 THROWS;
2819 GC_TRIGGERS;
2820 MODE_ANY;
2821 INJECT_FAULT(COMPlusThrowOM(););
2822 }
2823 CONTRACTL_END;
2824
2825 // The following code may cause GC, so we must fetch the sync block from
2826 // the object now in case it moves.
2827 SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2828
2829 // GetSyncBlock throws on failure
2830 _ASSERTE(pSB != NULL);
2831
2832 // make sure we own the crst
2833 if (!pSB->DoesCurrentThreadOwnMonitor())
2834 COMPlusThrow(kSynchronizationLockException);
2835
2836 pSB->PulseAll();
2837}
2838
2839
2840// ***************************************************************************
2841//
2842// AwareLock class implementation (GC-aware locking)
2843//
2844// ***************************************************************************
2845
2846void AwareLock::AllocLockSemEvent()
2847{
2848 CONTRACTL
2849 {
2850 INSTANCE_CHECK;
2851 THROWS;
2852 GC_TRIGGERS;
2853 MODE_ANY;
2854 INJECT_FAULT(COMPlusThrowOM(););
2855 }
2856 CONTRACTL_END;
2857
2858 // Before we switch from cooperative, ensure that this syncblock won't disappear
2859 // under us. For something as expensive as an event, do it permanently rather
2860 // than transiently.
2861 SetPrecious();
2862
2863 GCX_PREEMP();
2864
2865 // No need to take a lock - CLREvent::CreateMonitorEvent is thread safe
2866 m_SemEvent.CreateMonitorEvent((SIZE_T)this);
2867}
2868
2869void AwareLock::Enter()
2870{
2871 CONTRACTL
2872 {
2873 INSTANCE_CHECK;
2874 THROWS;
2875 GC_TRIGGERS;
2876 MODE_ANY;
2877 INJECT_FAULT(COMPlusThrowOM(););
2878 }
2879 CONTRACTL_END;
2880
2881 Thread *pCurThread = GetThread();
2882 LockState state = m_lockState.VolatileLoadWithoutBarrier();
2883 if (!state.IsLocked() || m_HoldingThread != pCurThread)
2884 {
2885 if (m_lockState.InterlockedTryLock_Or_RegisterWaiter(this, state))
2886 {
2887 // We get here if we successfully acquired the mutex.
2888 m_HoldingThread = pCurThread;
2889 m_Recursion = 1;
2890 pCurThread->IncLockCount();
2891
2892#if defined(_DEBUG) && defined(TRACK_SYNC)
2893 // The best place to grab this is from the ECall frame
2894 Frame *pFrame = pCurThread->GetFrame();
2895 int caller = (pFrame && pFrame != FRAME_TOP
2896 ? (int)pFrame->GetReturnAddress()
2897 : -1);
2898 pCurThread->m_pTrackSync->EnterSync(caller, this);
2899#endif
2900 return;
2901 }
2902
2903 // Lock was not acquired and the waiter was registered
2904
2905 // Didn't manage to get the mutex, must wait.
2906 // The precondition for EnterEpilog is that the count of waiters be bumped
2907 // to account for this thread, which was done above.
2908 EnterEpilog(pCurThread);
2909 return;
2910 }
2911
2912 // Got the mutex via recursive locking on the same thread.
2913 _ASSERTE(m_Recursion >= 1);
2914 m_Recursion++;
2915
2916#if defined(_DEBUG) && defined(TRACK_SYNC)
2917 // The best place to grab this is from the ECall frame
2918 Frame *pFrame = pCurThread->GetFrame();
2919 int caller = (pFrame && pFrame != FRAME_TOP ? (int)pFrame->GetReturnAddress() : -1);
2920 pCurThread->m_pTrackSync->EnterSync(caller, this);
2921#endif
2922}
2923
2924BOOL AwareLock::TryEnter(INT32 timeOut)
2925{
2926 CONTRACTL
2927 {
2928 INSTANCE_CHECK;
2929 THROWS;
2930 GC_TRIGGERS;
2931 if (timeOut == 0) {MODE_ANY;} else {MODE_COOPERATIVE;}
2932 INJECT_FAULT(COMPlusThrowOM(););
2933 }
2934 CONTRACTL_END;
2935
2936 Thread *pCurThread = GetThread();
2937 TESTHOOKCALL(AppDomainCanBeUnloaded(pCurThread->GetDomain()->GetId().m_dwId, FALSE));
2938
2939 if (pCurThread->IsAbortRequested())
2940 {
2941 pCurThread->HandleThreadAbort();
2942 }
2943
2944 LockState state = m_lockState.VolatileLoadWithoutBarrier();
2945 if (!state.IsLocked() || m_HoldingThread != pCurThread)
2946 {
2947 if (timeOut == 0
2948 ? m_lockState.InterlockedTryLock(state)
2949 : m_lockState.InterlockedTryLock_Or_RegisterWaiter(this, state))
2950 {
2951 // We get here if we successfully acquired the mutex.
2952 m_HoldingThread = pCurThread;
2953 m_Recursion = 1;
2954 pCurThread->IncLockCount();
2955
2956#if defined(_DEBUG) && defined(TRACK_SYNC)
2957 // The best place to grab this is from the ECall frame
2958 Frame *pFrame = pCurThread->GetFrame();
2959 int caller = (pFrame && pFrame != FRAME_TOP ? (int)pFrame->GetReturnAddress() : -1);
2960 pCurThread->m_pTrackSync->EnterSync(caller, this);
2961#endif
2962 return true;
2963 }
2964
2965 // Lock was not acquired and the waiter was registered if the timeout is nonzero
2966
2967 // Didn't manage to get the mutex, return failure if no timeout, else wait
2968 // for at most timeout milliseconds for the mutex.
2969 if (timeOut == 0)
2970 {
2971 return false;
2972 }
2973
2974 // The precondition for EnterEpilog is that the count of waiters be bumped
2975 // to account for this thread, which was done above
2976 return EnterEpilog(pCurThread, timeOut);
2977 }
2978
2979 // Got the mutex via recursive locking on the same thread.
2980 _ASSERTE(m_Recursion >= 1);
2981 m_Recursion++;
2982#if defined(_DEBUG) && defined(TRACK_SYNC)
2983 // The best place to grab this is from the ECall frame
2984 Frame *pFrame = pCurThread->GetFrame();
2985 int caller = (pFrame && pFrame != FRAME_TOP ? (int)pFrame->GetReturnAddress() : -1);
2986 pCurThread->m_pTrackSync->EnterSync(caller, this);
2987#endif
2988 return true;
2989}
2990
2991BOOL AwareLock::EnterEpilog(Thread* pCurThread, INT32 timeOut)
2992{
2993 STATIC_CONTRACT_THROWS;
2994 STATIC_CONTRACT_MODE_COOPERATIVE;
2995 STATIC_CONTRACT_GC_TRIGGERS;
2996
2997 // While we are in this frame the thread is considered blocked on the
2998 // critical section of the monitor lock according to the debugger
2999 DebugBlockingItem blockingMonitorInfo;
3000 blockingMonitorInfo.dwTimeout = timeOut;
3001 blockingMonitorInfo.pMonitor = this;
3002 blockingMonitorInfo.pAppDomain = SystemDomain::GetCurrentDomain();
3003 blockingMonitorInfo.type = DebugBlock_MonitorCriticalSection;
3004 DebugBlockingItemHolder holder(pCurThread, &blockingMonitorInfo);
3005
3006 // We need a separate helper because it uses SEH and the holder has a
3007 // destructor
3008 return EnterEpilogHelper(pCurThread, timeOut);
3009}
3010
3011#ifdef _DEBUG
3012#define _LOGCONTENTION
3013#endif // _DEBUG
3014
3015#ifdef _LOGCONTENTION
3016inline void LogContention()
3017{
3018 WRAPPER_NO_CONTRACT;
3019#ifdef LOGGING
3020 if (LoggingOn(LF_SYNC, LL_INFO100))
3021 {
3022 LogSpewAlways("Contention: Stack Trace Begin\n");
3023 void LogStackTrace();
3024 LogStackTrace();
3025 LogSpewAlways("Contention: Stack Trace End\n");
3026 }
3027#endif
3028}
3029#else
3030#define LogContention()
3031#endif
3032
3033BOOL AwareLock::EnterEpilogHelper(Thread* pCurThread, INT32 timeOut)
3034{
3035 STATIC_CONTRACT_THROWS;
3036 STATIC_CONTRACT_MODE_COOPERATIVE;
3037 STATIC_CONTRACT_GC_TRIGGERS;
3038
3039 // IMPORTANT!!!
3040 // The caller has already registered a waiter. This function needs to unregister the waiter on all paths (exception paths
3041 // included). On runtimes where thread-abort is supported, a thread-abort also needs to unregister the waiter. There may be
3042 // a possibility for preemptive GC toggles below to handle a thread-abort, that should be taken into consideration when
3043 // porting this code back to .NET Framework.
3044
3045 // Require all callers to be in cooperative mode. If they have switched to preemptive
3046 // mode temporarily before calling here, then they are responsible for protecting
3047 // the object associated with this lock.
3048 _ASSERTE(pCurThread->PreemptiveGCDisabled());
3049
3050 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cContention++);
3051
3052 // Fire a contention start event for a managed contention
3053 FireEtwContentionStart_V1(ETW::ContentionLog::ContentionStructs::ManagedContention, GetClrInstanceId());
3054
3055 LogContention();
3056
3057 OBJECTREF obj = GetOwningObject();
3058
3059 // We cannot allow the AwareLock to be cleaned up underneath us by the GC.
3060 IncrementTransientPrecious();
3061
3062 DWORD ret;
3063 GCPROTECT_BEGIN(obj);
3064 {
3065 if (!m_SemEvent.IsMonitorEventAllocated())
3066 {
3067 AllocLockSemEvent();
3068 }
3069 _ASSERTE(m_SemEvent.IsMonitorEventAllocated());
3070
3071 pCurThread->EnablePreemptiveGC();
3072
3073 for (;;)
3074 {
3075 // We might be interrupted during the wait (Thread.Interrupt), so we need an
3076 // exception handler round the call.
3077 struct Param
3078 {
3079 AwareLock *pThis;
3080 INT32 timeOut;
3081 DWORD ret;
3082 } param;
3083 param.pThis = this;
3084 param.timeOut = timeOut;
3085
3086 // Measure the time we wait so that, in the case where we wake up
3087 // and fail to acquire the mutex, we can adjust remaining timeout
3088 // accordingly.
3089 ULONGLONG start = CLRGetTickCount64();
3090
3091 EE_TRY_FOR_FINALLY(Param *, pParam, &param)
3092 {
3093 pParam->ret = pParam->pThis->m_SemEvent.Wait(pParam->timeOut, TRUE);
3094 _ASSERTE((pParam->ret == WAIT_OBJECT_0) || (pParam->ret == WAIT_TIMEOUT));
3095 }
3096 EE_FINALLY
3097 {
3098 if (GOT_EXCEPTION())
3099 {
3100 // It is likely the case that An APC threw an exception, for instance Thread.Interrupt(). The wait subsystem
3101 // guarantees that if a signal to the event being waited upon is observed by the woken thread, that thread's
3102 // wait will return WAIT_OBJECT_0. So in any race between m_SemEvent being signaled and the wait throwing an
3103 // exception, a thread that is woken by an exception would not observe the signal, and the signal would wake
3104 // another thread as necessary.
3105
3106 // We must decrement the waiter count.
3107 m_lockState.InterlockedUnregisterWaiter();
3108 }
3109 } EE_END_FINALLY;
3110
3111 ret = param.ret;
3112 if (ret != WAIT_OBJECT_0)
3113 {
3114 // We timed out, decrement waiter count.
3115 m_lockState.InterlockedUnregisterWaiter();
3116 break;
3117 }
3118
3119 // Spin a bit while trying to acquire the lock. This has a few benefits:
3120 // - Spinning helps to reduce waiter starvation. Since other non-waiter threads can take the lock while there are
3121 // waiters (see LockState::InterlockedTryLock()), once a waiter wakes it will be able to better compete
3122 // with other spinners for the lock.
3123 // - If there is another thread that is repeatedly acquiring and releasing the lock, spinning before waiting again
3124 // helps to prevent a waiter from repeatedly context-switching in and out
3125 // - Further in the same situation above, waking up and waiting shortly thereafter deprioritizes this waiter because
3126 // events release waiters in FIFO order. Spinning a bit helps a waiter to retain its priority at least for one
3127 // spin duration before it gets deprioritized behind all other waiters.
3128 if (g_SystemInfo.dwNumberOfProcessors > 1)
3129 {
3130 bool acquiredLock = false;
3131 YieldProcessorNormalizationInfo normalizationInfo;
3132 const DWORD spinCount = g_SpinConstants.dwMonitorSpinCount;
3133 for (DWORD spinIteration = 0; spinIteration < spinCount; ++spinIteration)
3134 {
3135 if (m_lockState.InterlockedTry_LockAndUnregisterWaiterAndObserveWakeSignal(this))
3136 {
3137 acquiredLock = true;
3138 break;
3139 }
3140
3141 SpinWait(normalizationInfo, spinIteration);
3142 }
3143 if (acquiredLock)
3144 {
3145 break;
3146 }
3147 }
3148
3149 if (m_lockState.InterlockedObserveWakeSignal_Try_LockAndUnregisterWaiter(this))
3150 {
3151 break;
3152 }
3153
3154 // When calculating duration we consider a couple of special cases.
3155 // If the end tick is the same as the start tick we make the
3156 // duration a millisecond, to ensure we make forward progress if
3157 // there's a lot of contention on the mutex. Secondly, we have to
3158 // cope with the case where the tick counter wrapped while we where
3159 // waiting (we can cope with at most one wrap, so don't expect three
3160 // month timeouts to be very accurate). Luckily for us, the latter
3161 // case is taken care of by 32-bit modulo arithmetic automatically.
3162 if (timeOut != (INT32)INFINITE)
3163 {
3164 ULONGLONG end = CLRGetTickCount64();
3165 ULONGLONG duration;
3166 if (end == start)
3167 {
3168 duration = 1;
3169 }
3170 else
3171 {
3172 duration = end - start;
3173 }
3174 duration = min(duration, (DWORD)timeOut);
3175 timeOut -= (INT32)duration;
3176 }
3177 }
3178
3179 pCurThread->DisablePreemptiveGC();
3180 }
3181 GCPROTECT_END();
3182 DecrementTransientPrecious();
3183
3184 // Fire a contention end event for a managed contention
3185 FireEtwContentionStop(ETW::ContentionLog::ContentionStructs::ManagedContention, GetClrInstanceId());
3186
3187 if (ret == WAIT_TIMEOUT)
3188 {
3189 return false;
3190 }
3191
3192 m_HoldingThread = pCurThread;
3193 m_Recursion = 1;
3194 pCurThread->IncLockCount();
3195
3196#if defined(_DEBUG) && defined(TRACK_SYNC)
3197 // The best place to grab this is from the ECall frame
3198 Frame *pFrame = pCurThread->GetFrame();
3199 int caller = (pFrame && pFrame != FRAME_TOP ? (int)pFrame->GetReturnAddress() : -1);
3200 pCurThread->m_pTrackSync->EnterSync(caller, this);
3201#endif
3202 return true;
3203}
3204
3205
3206BOOL AwareLock::Leave()
3207{
3208 CONTRACTL
3209 {
3210 INSTANCE_CHECK;
3211 NOTHROW;
3212 GC_NOTRIGGER;
3213 MODE_ANY;
3214 }
3215 CONTRACTL_END;
3216
3217 Thread* pThread = GetThread();
3218
3219 AwareLock::LeaveHelperAction action = LeaveHelper(pThread);
3220
3221 switch(action)
3222 {
3223 case AwareLock::LeaveHelperAction_None:
3224 // We are done
3225 return TRUE;
3226 case AwareLock::LeaveHelperAction_Signal:
3227 // Signal the event
3228 Signal();
3229 return TRUE;
3230 default:
3231 // Must be an error otherwise
3232 _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
3233 return FALSE;
3234 }
3235}
3236
3237LONG AwareLock::LeaveCompletely()
3238{
3239 WRAPPER_NO_CONTRACT;
3240
3241 LONG count = 0;
3242 while (Leave()) {
3243 count++;
3244 }
3245 _ASSERTE(count > 0); // otherwise we were never in the lock
3246
3247 return count;
3248}
3249
3250
3251BOOL AwareLock::OwnedByCurrentThread()
3252{
3253 WRAPPER_NO_CONTRACT;
3254 return (GetThread() == m_HoldingThread);
3255}
3256
3257
3258// ***************************************************************************
3259//
3260// SyncBlock class implementation
3261//
3262// ***************************************************************************
3263
3264// We maintain two queues for SyncBlock::Wait.
3265// 1. Inside SyncBlock we queue all threads that are waiting on the SyncBlock.
3266// When we pulse, we pick the thread from this queue using FIFO.
3267// 2. We queue all SyncBlocks that a thread is waiting for in Thread::m_WaitEventLink.
3268// When we pulse a thread, we find the event from this queue to set, and we also
3269// or in a 1 bit in the syncblock value saved in the queue, so that we can return
3270// immediately from SyncBlock::Wait if the syncblock has been pulsed.
3271BOOL SyncBlock::Wait(INT32 timeOut, BOOL exitContext)
3272{
3273 CONTRACTL
3274 {
3275 INSTANCE_CHECK;
3276 THROWS;
3277 GC_TRIGGERS;
3278 MODE_ANY;
3279 INJECT_FAULT(COMPlusThrowOM());
3280 }
3281 CONTRACTL_END;
3282
3283 Thread *pCurThread = GetThread();
3284 BOOL isTimedOut = FALSE;
3285 BOOL isEnqueued = FALSE;
3286 WaitEventLink waitEventLink;
3287 WaitEventLink *pWaitEventLink;
3288
3289 // As soon as we flip the switch, we are in a race with the GC, which could clean
3290 // up the SyncBlock underneath us -- unless we report the object.
3291 _ASSERTE(pCurThread->PreemptiveGCDisabled());
3292
3293 // Does this thread already wait for this SyncBlock?
3294 WaitEventLink *walk = pCurThread->WaitEventLinkForSyncBlock(this);
3295 if (walk->m_Next) {
3296 if (walk->m_Next->m_WaitSB == this) {
3297 // Wait on the same lock again.
3298 walk->m_Next->m_RefCount ++;
3299 pWaitEventLink = walk->m_Next;
3300 }
3301 else if ((SyncBlock*)(((DWORD_PTR)walk->m_Next->m_WaitSB) & ~1)== this) {
3302 // This thread has been pulsed. No need to wait.
3303 return TRUE;
3304 }
3305 }
3306 else {
3307 // First time this thread is going to wait for this SyncBlock.
3308 CLREvent* hEvent;
3309 if (pCurThread->m_WaitEventLink.m_Next == NULL) {
3310 hEvent = &(pCurThread->m_EventWait);
3311 }
3312 else {
3313 hEvent = GetEventFromEventStore();
3314 }
3315 waitEventLink.m_WaitSB = this;
3316 waitEventLink.m_EventWait = hEvent;
3317 waitEventLink.m_Thread = pCurThread;
3318 waitEventLink.m_Next = NULL;
3319 waitEventLink.m_LinkSB.m_pNext = NULL;
3320 waitEventLink.m_RefCount = 1;
3321 pWaitEventLink = &waitEventLink;
3322 walk->m_Next = pWaitEventLink;
3323
3324 // Before we enqueue it (and, thus, before it can be dequeued), reset the event
3325 // that will awaken us.
3326 hEvent->Reset();
3327
3328 // This thread is now waiting on this sync block
3329 ThreadQueue::EnqueueThread(pWaitEventLink, this);
3330
3331 isEnqueued = TRUE;
3332 }
3333
3334 _ASSERTE ((SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB & ~1)== this);
3335
3336 PendingSync syncState(walk);
3337
3338 OBJECTREF obj = m_Monitor.GetOwningObject();
3339
3340 m_Monitor.IncrementTransientPrecious();
3341
3342 // While we are in this frame the thread is considered blocked on the
3343 // event of the monitor lock according to the debugger
3344 DebugBlockingItem blockingMonitorInfo;
3345 blockingMonitorInfo.dwTimeout = timeOut;
3346 blockingMonitorInfo.pMonitor = &m_Monitor;
3347 blockingMonitorInfo.pAppDomain = SystemDomain::GetCurrentDomain();
3348 blockingMonitorInfo.type = DebugBlock_MonitorEvent;
3349 DebugBlockingItemHolder holder(pCurThread, &blockingMonitorInfo);
3350
3351 GCPROTECT_BEGIN(obj);
3352 {
3353 GCX_PREEMP();
3354
3355 // remember how many times we synchronized
3356 syncState.m_EnterCount = LeaveMonitorCompletely();
3357 _ASSERTE(syncState.m_EnterCount > 0);
3358
3359 isTimedOut = pCurThread->Block(timeOut, &syncState);
3360 }
3361 GCPROTECT_END();
3362 m_Monitor.DecrementTransientPrecious();
3363
3364 return !isTimedOut;
3365}
3366
3367void SyncBlock::Pulse()
3368{
3369 CONTRACTL
3370 {
3371 INSTANCE_CHECK;
3372 NOTHROW;
3373 GC_NOTRIGGER;
3374 MODE_ANY;
3375 }
3376 CONTRACTL_END;
3377
3378 WaitEventLink *pWaitEventLink;
3379
3380 if ((pWaitEventLink = ThreadQueue::DequeueThread(this)) != NULL)
3381 pWaitEventLink->m_EventWait->Set();
3382}
3383
3384void SyncBlock::PulseAll()
3385{
3386 CONTRACTL
3387 {
3388 INSTANCE_CHECK;
3389 NOTHROW;
3390 GC_NOTRIGGER;
3391 MODE_ANY;
3392 }
3393 CONTRACTL_END;
3394
3395 WaitEventLink *pWaitEventLink;
3396
3397 while ((pWaitEventLink = ThreadQueue::DequeueThread(this)) != NULL)
3398 pWaitEventLink->m_EventWait->Set();
3399}
3400
3401bool SyncBlock::SetInteropInfo(InteropSyncBlockInfo* pInteropInfo)
3402{
3403 WRAPPER_NO_CONTRACT;
3404 SetPrecious();
3405
3406 // We could be agile, but not have noticed yet. We can't assert here
3407 // that we live in any given domain, nor is this an appropriate place
3408 // to re-parent the syncblock.
3409/* _ASSERTE (m_dwAppDomainIndex.m_dwIndex == 0 ||
3410 m_dwAppDomainIndex == SystemDomain::System()->DefaultDomain()->GetIndex() ||
3411 m_dwAppDomainIndex == GetAppDomain()->GetIndex());
3412 m_dwAppDomainIndex = GetAppDomain()->GetIndex();
3413*/
3414 return (FastInterlockCompareExchangePointer(&m_pInteropInfo,
3415 pInteropInfo,
3416 NULL) == NULL);
3417}
3418
3419#ifdef EnC_SUPPORTED
3420// Store information about fields added to this object by EnC
3421// This must be called from a thread in the AppDomain of this object instance
3422void SyncBlock::SetEnCInfo(EnCSyncBlockInfo *pEnCInfo)
3423{
3424 WRAPPER_NO_CONTRACT;
3425
3426 // We can't recreate the field contents, so this SyncBlock can never go away
3427 SetPrecious();
3428
3429 // Store the field info (should only ever happen once)
3430 _ASSERTE( m_pEnCInfo == NULL );
3431 m_pEnCInfo = pEnCInfo;
3432
3433 // Also store the AppDomain that this object lives in.
3434 // Also verify that the AD was either not yet set, or set correctly before overwriting it.
3435 // I'm not sure why it should ever be set to the default domain and then changed to a different domain,
3436 // perhaps that can be removed.
3437 _ASSERTE (m_dwAppDomainIndex.m_dwIndex == 0 ||
3438 m_dwAppDomainIndex == SystemDomain::System()->DefaultDomain()->GetIndex() ||
3439 m_dwAppDomainIndex == GetAppDomain()->GetIndex());
3440 m_dwAppDomainIndex = GetAppDomain()->GetIndex();
3441}
3442#endif // EnC_SUPPORTED
3443#endif // !DACCESS_COMPILE
3444
3445#if defined(_WIN64) && defined(_DEBUG)
3446void ObjHeader::IllegalAlignPad()
3447{
3448 WRAPPER_NO_CONTRACT;
3449#ifdef LOGGING
3450 void** object = ((void**) this) + 1;
3451 LogSpewAlways("\n\n******** Illegal ObjHeader m_alignpad not 0, object" FMT_ADDR "\n\n",
3452 DBG_ADDR(object));
3453#endif
3454 _ASSERTE(m_alignpad == 0);
3455}
3456#endif // _WIN64 && _DEBUG
3457
3458
3459