1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | // |
5 | // SYNCBLK.H |
6 | // |
7 | |
8 | // |
9 | // Definition of a SyncBlock and the SyncBlockCache which manages it |
10 | |
11 | // See file:#SyncBlockOverview Sync block overview |
12 | |
13 | #ifndef _SYNCBLK_H_ |
14 | #define _SYNCBLK_H_ |
15 | |
16 | #include "util.hpp" |
17 | #include "slist.h" |
18 | #include "crst.h" |
19 | #include "vars.hpp" |
20 | #include "yieldprocessornormalized.h" |
21 | |
22 | // #SyncBlockOverview |
23 | // |
24 | // Every Object is preceded by an ObjHeader (at a negative offset). The code:ObjHeader has an index to a |
25 | // code:SyncBlock. This index is 0 for the bulk of all instances, which indicates that the object shares a |
26 | // dummy SyncBlock with most other objects. |
27 | // |
28 | // The SyncBlock is primarily responsible for object synchronization. However, it is also a "kitchen sink" of |
29 | // sparsely allocated instance data. For instance, the default implementation of Hash() is based on the |
30 | // existence of a code:SyncTableEntry. And objects exposed to or from COM, or through context boundaries, can |
31 | // store sparse data here. |
32 | // |
33 | // SyncTableEntries and SyncBlocks are allocated in non-GC memory. A weak pointer from the SyncTableEntry to |
34 | // the instance is used to ensure that the SyncBlock and SyncTableEntry are reclaimed (recycled) when the |
35 | // instance dies. |
36 | // |
37 | // The organization of the SyncBlocks isn't intuitive (at least to me). Here's the explanation: |
38 | // |
39 | // Before each Object is an code:ObjHeader. If the object has a code:SyncBlock, the code:ObjHeader contains a |
40 | // non-0 index to it. |
41 | // |
42 | // The index is looked up in the code:g_pSyncTable of SyncTableEntries. This means the table is consecutive |
43 | // for all outstanding indices. Whenever it needs to grow, it doubles in size and copies all the original |
44 | // entries. The old table is kept until GC time, when it can be safely discarded. |
45 | // |
46 | // Each code:SyncTableEntry has a backpointer to the object and a forward pointer to the actual SyncBlock. |
47 | // The SyncBlock is allocated out of a SyncBlockArray which is essentially just a block of SyncBlocks. |
48 | // |
49 | // The code:SyncBlockArray s are managed by a code:SyncBlockCache that handles the actual allocations and |
50 | // frees of the blocks. |
51 | // |
52 | // So... |
53 | // |
54 | // Each allocation and release has to handle free lists in the table of entries and the table of blocks. |
55 | // |
56 | // We burn an extra 4 bytes for the pointer from the SyncTableEntry to the SyncBlock. |
57 | // |
58 | // The reason for this is that many objects have a SyncTableEntry but no SyncBlock. That's because someone |
59 | // (e.g. HashTable) called Hash() on them. |
60 | // |
61 | // Incidentally, there's a better write-up of all this stuff in the archives. |
62 | |
63 | #ifdef _TARGET_X86_ |
64 | #include <pshpack4.h> |
65 | #endif // _TARGET_X86_ |
66 | |
67 | // forwards: |
68 | class SyncBlock; |
69 | class SyncBlockCache; |
70 | class SyncTableEntry; |
71 | class SyncBlockArray; |
72 | class AwareLock; |
73 | class Thread; |
74 | class AppDomain; |
75 | |
76 | #ifdef EnC_SUPPORTED |
77 | class EnCSyncBlockInfo; |
78 | typedef DPTR(EnCSyncBlockInfo) PTR_EnCSyncBlockInfo; |
79 | |
80 | #endif // EnC_SUPPORTED |
81 | |
82 | #include "eventstore.hpp" |
83 | |
84 | #include "eventstore.hpp" |
85 | |
86 | #include "synch.h" |
87 | |
88 | // At a negative offset from each Object is an ObjHeader. The 'size' of the |
89 | // object includes these bytes. However, we rely on the previous object allocation |
90 | // to zero out the ObjHeader for the current allocation. And the limits of the |
91 | // GC space are initialized to respect this "off by one" error. |
92 | |
93 | // m_SyncBlockValue is carved up into an index and a set of bits. Steal bits by |
94 | // reducing the mask. We use the very high bit, in _DEBUG, to be sure we never forget |
95 | // to mask the Value to obtain the Index |
96 | |
97 | // These first three are only used on strings (If the first one is on, we know whether |
98 | // the string has high byte characters, and the second bit tells which way it is. |
99 | // Note that we are reusing the FINALIZER_RUN bit since strings don't have finalizers, |
100 | // so the value of this bit does not matter for strings |
101 | #define BIT_SBLK_STRING_HAS_NO_HIGH_CHARS 0x80000000 |
102 | |
103 | // Used as workaround for infinite loop case. Will set this bit in the sblk if we have already |
104 | // seen this sblk in our agile checking logic. Problem is seen when object 1 has a ref to object 2 |
105 | // and object 2 has a ref to object 1. The agile checker will infinitely loop on these references. |
106 | #define BIT_SBLK_AGILE_IN_PROGRESS 0x80000000 |
107 | #define BIT_SBLK_STRING_HIGH_CHARS_KNOWN 0x40000000 |
108 | #define BIT_SBLK_STRING_HAS_SPECIAL_SORT 0xC0000000 |
109 | #define BIT_SBLK_STRING_HIGH_CHAR_MASK 0xC0000000 |
110 | |
111 | #define BIT_SBLK_FINALIZER_RUN 0x40000000 |
112 | #define BIT_SBLK_GC_RESERVE 0x20000000 |
113 | |
114 | // This lock is only taken when we need to modify the index value in m_SyncBlockValue. |
115 | // It should not be taken if the object already has a real syncblock index. |
116 | #define BIT_SBLK_SPIN_LOCK 0x10000000 |
117 | |
118 | #define BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX 0x08000000 |
119 | |
120 | // if BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX is clear, the rest of the header dword is layed out as follows: |
121 | // - lower ten bits (bits 0 thru 9) is thread id used for the thin locks |
122 | // value is zero if no thread is holding the lock |
123 | // - following six bits (bits 10 thru 15) is recursion level used for the thin locks |
124 | // value is zero if lock is not taken or only taken once by the same thread |
125 | // - following 11 bits (bits 16 thru 26) is app domain index |
126 | // value is zero if no app domain index is set for the object |
127 | #define SBLK_MASK_LOCK_THREADID 0x000003FF // special value of 0 + 1023 thread ids |
128 | #define SBLK_MASK_LOCK_RECLEVEL 0x0000FC00 // 64 recursion levels |
129 | #define SBLK_LOCK_RECLEVEL_INC 0x00000400 // each level is this much higher than the previous one |
130 | #define SBLK_APPDOMAIN_SHIFT 16 // shift right this much to get appdomain index |
131 | #define SBLK_RECLEVEL_SHIFT 10 // shift right this much to get recursion level |
132 | #define SBLK_MASK_APPDOMAININDEX 0x000007FF // 2048 appdomain indices |
133 | |
134 | // add more bits here... (adjusting the following mask to make room) |
135 | |
136 | // if BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX is set, |
137 | // then if BIT_SBLK_IS_HASHCODE is also set, the rest of the dword is the hash code (bits 0 thru 25), |
138 | // otherwise the rest of the dword is the sync block index (bits 0 thru 25) |
139 | #define BIT_SBLK_IS_HASHCODE 0x04000000 |
140 | |
141 | #define HASHCODE_BITS 26 |
142 | |
143 | #define MASK_HASHCODE ((1<<HASHCODE_BITS)-1) |
144 | #define SYNCBLOCKINDEX_BITS 26 |
145 | #define MASK_SYNCBLOCKINDEX ((1<<SYNCBLOCKINDEX_BITS)-1) |
146 | |
147 | // Spin for about 1000 cycles before waiting longer. |
148 | #define BIT_SBLK_SPIN_COUNT 1000 |
149 | |
150 | // The GC is highly dependent on SIZE_OF_OBJHEADER being exactly the sizeof(ObjHeader) |
151 | // We define this macro so that the preprocessor can calculate padding structures. |
152 | #ifdef _WIN64 |
153 | #define 8 |
154 | #else // !_WIN64 |
155 | #define SIZEOF_OBJHEADER 4 |
156 | #endif // !_WIN64 |
157 | |
158 | |
159 | inline void InitializeSpinConstants() |
160 | { |
161 | WRAPPER_NO_CONTRACT; |
162 | |
163 | #if !defined(DACCESS_COMPILE) |
164 | g_SpinConstants.dwInitialDuration = g_pConfig->SpinInitialDuration(); |
165 | g_SpinConstants.dwMaximumDuration = min(g_pConfig->SpinLimitProcCap(), g_SystemInfo.dwNumberOfProcessors) * g_pConfig->SpinLimitProcFactor() + g_pConfig->SpinLimitConstant(); |
166 | g_SpinConstants.dwBackoffFactor = g_pConfig->SpinBackoffFactor(); |
167 | g_SpinConstants.dwRepetitions = g_pConfig->SpinRetryCount(); |
168 | g_SpinConstants.dwMonitorSpinCount = g_SpinConstants.dwMaximumDuration == 0 ? 0 : g_pConfig->MonitorSpinCount(); |
169 | #endif |
170 | } |
171 | |
172 | // this is a 'GC-aware' Lock. It is careful to enable preemptive GC before it |
173 | // attempts any operation that can block. Once the operation is finished, it |
174 | // restores the original state of GC. |
175 | |
176 | // AwareLocks can only be created inside SyncBlocks, since they depend on the |
177 | // enclosing SyncBlock for coordination. This is enforced by the private ctor. |
178 | typedef DPTR(class AwareLock) PTR_AwareLock; |
179 | |
180 | class AwareLock |
181 | { |
182 | friend class CheckAsmOffsets; |
183 | |
184 | friend class SyncBlockCache; |
185 | friend class SyncBlock; |
186 | |
187 | public: |
188 | enum EnterHelperResult { |
189 | EnterHelperResult_Entered, |
190 | EnterHelperResult_Contention, |
191 | EnterHelperResult_UseSlowPath |
192 | }; |
193 | |
194 | enum LeaveHelperAction { |
195 | LeaveHelperAction_None, |
196 | LeaveHelperAction_Signal, |
197 | LeaveHelperAction_Yield, |
198 | LeaveHelperAction_Contention, |
199 | LeaveHelperAction_Error, |
200 | }; |
201 | |
202 | private: |
203 | class LockState |
204 | { |
205 | private: |
206 | // Layout constants for m_state |
207 | static const UINT32 IsLockedMask = (UINT32)1 << 0; // bit 0 |
208 | static const UINT32 ShouldNotPreemptWaitersMask = (UINT32)1 << 1; // bit 1 |
209 | static const UINT32 SpinnerCountIncrement = (UINT32)1 << 2; |
210 | static const UINT32 SpinnerCountMask = (UINT32)0x7 << 2; // bits 2-4 |
211 | static const UINT32 IsWaiterSignaledToWakeMask = (UINT32)1 << 5; // bit 5 |
212 | static const UINT8 WaiterCountShift = 6; |
213 | static const UINT32 WaiterCountIncrement = (UINT32)1 << WaiterCountShift; |
214 | static const UINT32 WaiterCountMask = (UINT32)-1 >> WaiterCountShift << WaiterCountShift; // bits 6-31 |
215 | |
216 | private: |
217 | UINT32 m_state; |
218 | |
219 | public: |
220 | LockState(UINT32 state = 0) : m_state(state) |
221 | { |
222 | LIMITED_METHOD_CONTRACT; |
223 | } |
224 | |
225 | public: |
226 | UINT32 GetState() const |
227 | { |
228 | LIMITED_METHOD_CONTRACT; |
229 | return m_state; |
230 | } |
231 | |
232 | UINT32 GetMonitorHeldState() const |
233 | { |
234 | LIMITED_METHOD_CONTRACT; |
235 | static_assert_no_msg(IsLockedMask == 1); |
236 | static_assert_no_msg(WaiterCountShift >= 1); |
237 | |
238 | // Return only the locked state and waiter count in the previous (m_MonitorHeld) layout for the debugger: |
239 | // bit 0: 1 if locked, 0 otherwise |
240 | // bits 1-31: waiter count |
241 | UINT32 state = m_state; |
242 | return (state & IsLockedMask) + (state >> WaiterCountShift << 1); |
243 | } |
244 | |
245 | public: |
246 | bool IsUnlockedWithNoWaiters() const |
247 | { |
248 | LIMITED_METHOD_CONTRACT; |
249 | return !(m_state & (IsLockedMask + WaiterCountMask)); |
250 | } |
251 | |
252 | void InitializeToLockedWithNoWaiters() |
253 | { |
254 | LIMITED_METHOD_CONTRACT; |
255 | _ASSERTE(!m_state); |
256 | |
257 | m_state = IsLockedMask; |
258 | } |
259 | |
260 | public: |
261 | bool IsLocked() const |
262 | { |
263 | LIMITED_METHOD_CONTRACT; |
264 | return !!(m_state & IsLockedMask); |
265 | } |
266 | |
267 | private: |
268 | void InvertIsLocked() |
269 | { |
270 | LIMITED_METHOD_CONTRACT; |
271 | m_state ^= IsLockedMask; |
272 | } |
273 | |
274 | public: |
275 | bool ShouldNotPreemptWaiters() const |
276 | { |
277 | LIMITED_METHOD_CONTRACT; |
278 | return !!(m_state & ShouldNotPreemptWaitersMask); |
279 | } |
280 | |
281 | private: |
282 | void InvertShouldNotPreemptWaiters() |
283 | { |
284 | WRAPPER_NO_CONTRACT; |
285 | |
286 | m_state ^= ShouldNotPreemptWaitersMask; |
287 | _ASSERTE(!ShouldNotPreemptWaiters() || HasAnyWaiters()); |
288 | } |
289 | |
290 | bool ShouldNonWaiterAttemptToAcquireLock() const |
291 | { |
292 | WRAPPER_NO_CONTRACT; |
293 | _ASSERTE(!ShouldNotPreemptWaiters() || HasAnyWaiters()); |
294 | |
295 | return !(m_state & (IsLockedMask + ShouldNotPreemptWaitersMask)); |
296 | } |
297 | |
298 | public: |
299 | bool HasAnySpinners() const |
300 | { |
301 | LIMITED_METHOD_CONTRACT; |
302 | return !!(m_state & SpinnerCountMask); |
303 | } |
304 | |
305 | private: |
306 | bool TryIncrementSpinnerCount() |
307 | { |
308 | WRAPPER_NO_CONTRACT; |
309 | |
310 | LockState newState = m_state + SpinnerCountIncrement; |
311 | if (newState.HasAnySpinners()) // overflow check |
312 | { |
313 | m_state = newState; |
314 | return true; |
315 | } |
316 | return false; |
317 | } |
318 | |
319 | void DecrementSpinnerCount() |
320 | { |
321 | WRAPPER_NO_CONTRACT; |
322 | _ASSERTE(HasAnySpinners()); |
323 | |
324 | m_state -= SpinnerCountIncrement; |
325 | } |
326 | |
327 | public: |
328 | bool IsWaiterSignaledToWake() const |
329 | { |
330 | LIMITED_METHOD_CONTRACT; |
331 | return !!(m_state & IsWaiterSignaledToWakeMask); |
332 | } |
333 | |
334 | private: |
335 | void InvertIsWaiterSignaledToWake() |
336 | { |
337 | LIMITED_METHOD_CONTRACT; |
338 | m_state ^= IsWaiterSignaledToWakeMask; |
339 | } |
340 | |
341 | public: |
342 | bool HasAnyWaiters() const |
343 | { |
344 | LIMITED_METHOD_CONTRACT; |
345 | return m_state >= WaiterCountIncrement; |
346 | } |
347 | |
348 | private: |
349 | void IncrementWaiterCount() |
350 | { |
351 | LIMITED_METHOD_CONTRACT; |
352 | _ASSERTE(m_state + WaiterCountIncrement >= WaiterCountIncrement); |
353 | |
354 | m_state += WaiterCountIncrement; |
355 | } |
356 | |
357 | void DecrementWaiterCount() |
358 | { |
359 | WRAPPER_NO_CONTRACT; |
360 | _ASSERTE(HasAnyWaiters()); |
361 | |
362 | m_state -= WaiterCountIncrement; |
363 | } |
364 | |
365 | private: |
366 | bool NeedToSignalWaiter() const |
367 | { |
368 | WRAPPER_NO_CONTRACT; |
369 | return HasAnyWaiters() && !(m_state & (SpinnerCountMask + IsWaiterSignaledToWakeMask)); |
370 | } |
371 | |
372 | private: |
373 | operator UINT32() const |
374 | { |
375 | LIMITED_METHOD_CONTRACT; |
376 | return m_state; |
377 | } |
378 | |
379 | LockState &operator =(UINT32 state) |
380 | { |
381 | LIMITED_METHOD_CONTRACT; |
382 | |
383 | m_state = state; |
384 | return *this; |
385 | } |
386 | |
387 | public: |
388 | LockState VolatileLoadWithoutBarrier() const |
389 | { |
390 | WRAPPER_NO_CONTRACT; |
391 | return ::VolatileLoadWithoutBarrier(&m_state); |
392 | } |
393 | |
394 | LockState VolatileLoad() const |
395 | { |
396 | WRAPPER_NO_CONTRACT; |
397 | return ::VolatileLoad(&m_state); |
398 | } |
399 | |
400 | private: |
401 | LockState CompareExchange(LockState toState, LockState fromState) |
402 | { |
403 | LIMITED_METHOD_CONTRACT; |
404 | return (UINT32)InterlockedCompareExchange((LONG *)&m_state, (LONG)toState, (LONG)fromState); |
405 | } |
406 | |
407 | LockState CompareExchangeAcquire(LockState toState, LockState fromState) |
408 | { |
409 | LIMITED_METHOD_CONTRACT; |
410 | return (UINT32)InterlockedCompareExchangeAcquire((LONG *)&m_state, (LONG)toState, (LONG)fromState); |
411 | } |
412 | |
413 | public: |
414 | bool InterlockedTryLock(); |
415 | bool InterlockedTryLock(LockState state); |
416 | bool InterlockedUnlock(); |
417 | bool InterlockedTrySetShouldNotPreemptWaitersIfNecessary(AwareLock *awareLock); |
418 | bool InterlockedTrySetShouldNotPreemptWaitersIfNecessary(AwareLock *awareLock, LockState state); |
419 | EnterHelperResult InterlockedTry_LockOrRegisterSpinner(LockState state); |
420 | EnterHelperResult InterlockedTry_LockAndUnregisterSpinner(); |
421 | bool InterlockedUnregisterSpinner_TryLock(); |
422 | bool InterlockedTryLock_Or_RegisterWaiter(AwareLock *awareLock, LockState state); |
423 | void InterlockedUnregisterWaiter(); |
424 | bool InterlockedTry_LockAndUnregisterWaiterAndObserveWakeSignal(AwareLock *awareLock); |
425 | bool InterlockedObserveWakeSignal_Try_LockAndUnregisterWaiter(AwareLock *awareLock); |
426 | }; |
427 | |
428 | friend class LockState; |
429 | |
430 | private: |
431 | // Take care to use 'm_lockState.VolatileLoadWithoutBarrier()` when loading this value into a local variable that will be |
432 | // reused. That prevents an optimization in the compiler that avoids stack-spilling a value loaded from memory and instead |
433 | // reloads the value from the original memory location under the assumption that it would not be changed by another thread, |
434 | // which can result in the local variable's value changing between reads if the memory location is modifed by another |
435 | // thread. This is important for patterns such as: |
436 | // |
437 | // T x = m_x; // no barrier |
438 | // if (meetsCondition(x)) |
439 | // { |
440 | // assert(meetsCondition(x)); // This may fail! |
441 | // } |
442 | // |
443 | // The code should be written like this instead: |
444 | // |
445 | // T x = VolatileLoadWithoutBarrier(&m_x); // compile-time barrier, no run-time barrier |
446 | // if (meetsCondition(x)) |
447 | // { |
448 | // assert(meetsCondition(x)); // This will not fail |
449 | // } |
450 | LockState m_lockState; |
451 | |
452 | ULONG m_Recursion; |
453 | PTR_Thread m_HoldingThread; |
454 | |
455 | LONG m_TransientPrecious; |
456 | |
457 | |
458 | // This is a backpointer from the syncblock to the synctable entry. This allows |
459 | // us to recover the object that holds the syncblock. |
460 | DWORD m_dwSyncIndex; |
461 | |
462 | CLREvent m_SemEvent; |
463 | |
464 | DWORD m_waiterStarvationStartTimeMs; |
465 | |
466 | static const DWORD WaiterStarvationDurationMsBeforeStoppingPreemptingWaiters = 100; |
467 | |
468 | // Only SyncBlocks can create AwareLocks. Hence this private constructor. |
469 | AwareLock(DWORD indx) |
470 | : m_Recursion(0), |
471 | #ifndef DACCESS_COMPILE |
472 | // PreFAST has trouble with intializing a NULL PTR_Thread. |
473 | m_HoldingThread(NULL), |
474 | #endif // DACCESS_COMPILE |
475 | m_TransientPrecious(0), |
476 | m_dwSyncIndex(indx), |
477 | m_waiterStarvationStartTimeMs(0) |
478 | { |
479 | LIMITED_METHOD_CONTRACT; |
480 | } |
481 | |
482 | ~AwareLock() |
483 | { |
484 | LIMITED_METHOD_CONTRACT; |
485 | // We deliberately allow this to remain incremented if an exception blows |
486 | // through a lock attempt. This simply prevents the GC from aggressively |
487 | // reclaiming a particular syncblock until the associated object is garbage. |
488 | // From a perf perspective, it's not worth using SEH to prevent this from |
489 | // happening. |
490 | // |
491 | // _ASSERTE(m_TransientPrecious == 0); |
492 | } |
493 | |
494 | #if defined(ENABLE_CONTRACTS_IMPL) |
495 | // The LOCK_TAKEN/RELEASED macros need a "pointer" to the lock object to do |
496 | // comparisons between takes & releases (and to provide debugging info to the |
497 | // developer). Since AwareLocks are always allocated embedded inside SyncBlocks, |
498 | // and since SyncBlocks don't move (unlike the GC objects that use |
499 | // the syncblocks), it's safe for us to just use the AwareLock pointer directly |
500 | void * GetPtrForLockContract() |
501 | { |
502 | return (void *) this; |
503 | } |
504 | #endif // defined(ENABLE_CONTRACTS_IMPL) |
505 | |
506 | public: |
507 | UINT32 GetLockState() const |
508 | { |
509 | WRAPPER_NO_CONTRACT; |
510 | return m_lockState.VolatileLoadWithoutBarrier().GetState(); |
511 | } |
512 | |
513 | bool IsUnlockedWithNoWaiters() const |
514 | { |
515 | WRAPPER_NO_CONTRACT; |
516 | return m_lockState.VolatileLoadWithoutBarrier().IsUnlockedWithNoWaiters(); |
517 | } |
518 | |
519 | UINT32 GetMonitorHeldStateVolatile() const |
520 | { |
521 | WRAPPER_NO_CONTRACT; |
522 | return m_lockState.VolatileLoad().GetMonitorHeldState(); |
523 | } |
524 | |
525 | ULONG GetRecursionLevel() const |
526 | { |
527 | LIMITED_METHOD_CONTRACT; |
528 | return m_Recursion; |
529 | } |
530 | |
531 | PTR_Thread GetHoldingThread() const |
532 | { |
533 | LIMITED_METHOD_CONTRACT; |
534 | return m_HoldingThread; |
535 | } |
536 | |
537 | private: |
538 | void ResetWaiterStarvationStartTime(); |
539 | void RecordWaiterStarvationStartTime(); |
540 | bool ShouldStopPreemptingWaiters() const; |
541 | |
542 | private: // friend access is required for this unsafe function |
543 | void InitializeToLockedWithNoWaiters(ULONG recursionLevel, PTR_Thread holdingThread) |
544 | { |
545 | WRAPPER_NO_CONTRACT; |
546 | |
547 | m_lockState.InitializeToLockedWithNoWaiters(); |
548 | m_Recursion = recursionLevel; |
549 | m_HoldingThread = holdingThread; |
550 | } |
551 | |
552 | public: |
553 | static void SpinWait(const YieldProcessorNormalizationInfo &normalizationInfo, DWORD spinIteration); |
554 | |
555 | // Helper encapsulating the fast path entering monitor. Returns what kind of result was achieved. |
556 | bool TryEnterHelper(Thread* pCurThread); |
557 | |
558 | EnterHelperResult TryEnterBeforeSpinLoopHelper(Thread *pCurThread); |
559 | EnterHelperResult TryEnterInsideSpinLoopHelper(Thread *pCurThread); |
560 | bool TryEnterAfterSpinLoopHelper(Thread *pCurThread); |
561 | |
562 | // Helper encapsulating the core logic for leaving monitor. Returns what kind of |
563 | // follow up action is necessary |
564 | AwareLock::LeaveHelperAction LeaveHelper(Thread* pCurThread); |
565 | |
566 | void Enter(); |
567 | BOOL TryEnter(INT32 timeOut = 0); |
568 | BOOL EnterEpilog(Thread *pCurThread, INT32 timeOut = INFINITE); |
569 | BOOL EnterEpilogHelper(Thread *pCurThread, INT32 timeOut); |
570 | BOOL Leave(); |
571 | |
572 | void Signal() |
573 | { |
574 | WRAPPER_NO_CONTRACT; |
575 | |
576 | // CLREvent::SetMonitorEvent works even if the event has not been intialized yet |
577 | m_SemEvent.SetMonitorEvent(); |
578 | |
579 | m_lockState.InterlockedTrySetShouldNotPreemptWaitersIfNecessary(this); |
580 | } |
581 | |
582 | void AllocLockSemEvent(); |
583 | LONG LeaveCompletely(); |
584 | BOOL OwnedByCurrentThread(); |
585 | |
586 | void IncrementTransientPrecious() |
587 | { |
588 | LIMITED_METHOD_CONTRACT; |
589 | FastInterlockIncrement(&m_TransientPrecious); |
590 | _ASSERTE(m_TransientPrecious > 0); |
591 | } |
592 | |
593 | void DecrementTransientPrecious() |
594 | { |
595 | LIMITED_METHOD_CONTRACT; |
596 | _ASSERTE(m_TransientPrecious > 0); |
597 | FastInterlockDecrement(&m_TransientPrecious); |
598 | } |
599 | |
600 | DWORD GetSyncBlockIndex(); |
601 | |
602 | void SetPrecious(); |
603 | |
604 | // Provide access to the object associated with this awarelock, so client can |
605 | // protect it. |
606 | inline OBJECTREF GetOwningObject(); |
607 | |
608 | // Provide access to the Thread object that owns this awarelock. This is used |
609 | // to provide a host to find out owner of a lock. |
610 | inline PTR_Thread GetOwningThread() |
611 | { |
612 | LIMITED_METHOD_CONTRACT; |
613 | return m_HoldingThread; |
614 | } |
615 | }; |
616 | |
617 | #ifdef FEATURE_COMINTEROP |
618 | class ComCallWrapper; |
619 | class ComClassFactory; |
620 | struct RCW; |
621 | class RCWHolder; |
622 | typedef DPTR(class ComCallWrapper) PTR_ComCallWrapper; |
623 | #endif // FEATURE_COMINTEROP |
624 | |
625 | class InteropSyncBlockInfo |
626 | { |
627 | friend class RCWHolder; |
628 | |
629 | public: |
630 | #ifndef FEATURE_PAL |
631 | // List of InteropSyncBlockInfo instances that have been freed since the last syncblock cleanup. |
632 | static SLIST_HEADER s_InteropInfoStandbyList; |
633 | #endif // !FEATURE_PAL |
634 | |
635 | InteropSyncBlockInfo() |
636 | { |
637 | LIMITED_METHOD_CONTRACT; |
638 | ZeroMemory(this, sizeof(InteropSyncBlockInfo)); |
639 | } |
640 | #ifndef DACCESS_COMPILE |
641 | ~InteropSyncBlockInfo(); |
642 | #endif |
643 | |
644 | #ifndef FEATURE_PAL |
645 | // Deletes all items in code:s_InteropInfoStandbyList. |
646 | static void FlushStandbyList(); |
647 | #endif // !FEATURE_PAL |
648 | |
649 | #ifdef FEATURE_COMINTEROP |
650 | |
651 | // |
652 | // We'll be using the sentinel value of 0x1 to indicate that a particular |
653 | // field was set at one time, but is now NULL. |
654 | |
655 | #ifndef DACCESS_COMPILE |
656 | RCW* GetRawRCW() |
657 | { |
658 | LIMITED_METHOD_CONTRACT; |
659 | return (RCW *)((size_t)m_pRCW & ~1); |
660 | } |
661 | |
662 | // Returns either NULL or an RCW on which AcquireLock has been called. |
663 | RCW* GetRCWAndIncrementUseCount(); |
664 | |
665 | // Sets the m_pRCW field in a thread-safe manner, pRCW can be NULL. |
666 | void SetRawRCW(RCW* pRCW); |
667 | |
668 | bool RCWWasUsed() |
669 | { |
670 | LIMITED_METHOD_CONTRACT; |
671 | |
672 | return (m_pRCW != NULL); |
673 | } |
674 | #else // !DACCESS_COMPILE |
675 | TADDR DacGetRawRCW() |
676 | { |
677 | return (TADDR)((size_t)m_pRCW & ~1); |
678 | } |
679 | #endif // DACCESS_COMPILE |
680 | |
681 | #ifndef DACCESS_COMPILE |
682 | void SetCCW(ComCallWrapper* pCCW) |
683 | { |
684 | LIMITED_METHOD_CONTRACT; |
685 | |
686 | if (pCCW == NULL) |
687 | pCCW = (ComCallWrapper*) 0x1; |
688 | |
689 | m_pCCW = pCCW; |
690 | } |
691 | #endif // !DACCESS_COMPILE |
692 | |
693 | PTR_ComCallWrapper GetCCW() |
694 | { |
695 | LIMITED_METHOD_DAC_CONTRACT; |
696 | |
697 | if (m_pCCW == (PTR_ComCallWrapper)0x1) |
698 | return NULL; |
699 | |
700 | return m_pCCW; |
701 | } |
702 | |
703 | bool CCWWasUsed() |
704 | { |
705 | LIMITED_METHOD_CONTRACT; |
706 | |
707 | if (m_pCCW == NULL) |
708 | return false; |
709 | |
710 | return true; |
711 | } |
712 | |
713 | #ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION |
714 | void SetComClassFactory(ComClassFactory* pCCF) |
715 | { |
716 | LIMITED_METHOD_CONTRACT; |
717 | |
718 | if (pCCF == NULL) |
719 | pCCF = (ComClassFactory*)0x1; |
720 | |
721 | m_pCCF = pCCF; |
722 | } |
723 | |
724 | ComClassFactory* GetComClassFactory() |
725 | { |
726 | LIMITED_METHOD_CONTRACT; |
727 | |
728 | if (m_pCCF == (ComClassFactory*)0x1) |
729 | return NULL; |
730 | |
731 | return m_pCCF; |
732 | } |
733 | |
734 | bool CCFWasUsed() |
735 | { |
736 | LIMITED_METHOD_CONTRACT; |
737 | |
738 | if (m_pCCF == NULL) |
739 | return false; |
740 | |
741 | return true; |
742 | } |
743 | #endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION |
744 | #endif // FEATURE_COMINTEROP |
745 | |
746 | #if !defined(DACCESS_COMPILE) |
747 | // set m_pUMEntryThunkOrInterceptStub if not already set - return true if not already set |
748 | bool SetUMEntryThunk(void* pUMEntryThunk) |
749 | { |
750 | WRAPPER_NO_CONTRACT; |
751 | return (FastInterlockCompareExchangePointer(&m_pUMEntryThunkOrInterceptStub, |
752 | pUMEntryThunk, |
753 | NULL) == NULL); |
754 | } |
755 | |
756 | // set m_pUMEntryThunkOrInterceptStub if not already set - return true if not already set |
757 | bool SetInterceptStub(Stub* pInterceptStub) |
758 | { |
759 | WRAPPER_NO_CONTRACT; |
760 | void *pPtr = (void *)((UINT_PTR)pInterceptStub | 1); |
761 | return (FastInterlockCompareExchangePointer(&m_pUMEntryThunkOrInterceptStub, |
762 | pPtr, |
763 | NULL) == NULL); |
764 | } |
765 | |
766 | void FreeUMEntryThunkOrInterceptStub(); |
767 | |
768 | void OnADUnload(); |
769 | |
770 | #endif // DACCESS_COMPILE |
771 | |
772 | void* GetUMEntryThunk() |
773 | { |
774 | LIMITED_METHOD_CONTRACT; |
775 | return (((UINT_PTR)m_pUMEntryThunkOrInterceptStub & 1) ? NULL : m_pUMEntryThunkOrInterceptStub); |
776 | } |
777 | |
778 | Stub* GetInterceptStub() |
779 | { |
780 | LIMITED_METHOD_CONTRACT; |
781 | return (((UINT_PTR)m_pUMEntryThunkOrInterceptStub & 1) ? (Stub *)((UINT_PTR)m_pUMEntryThunkOrInterceptStub & ~1) : NULL); |
782 | } |
783 | |
784 | private: |
785 | // If this is a delegate marshalled out to unmanaged code, this points |
786 | // to the thunk generated for unmanaged code to call back on. |
787 | // If this is a delegate representing an unmanaged function pointer, |
788 | // this may point to a stub that intercepts calls to the unmng target. |
789 | // An example of an intercept call is pInvokeStackImbalance MDA. |
790 | // We differentiate between a thunk or intercept stub by setting the lowest |
791 | // bit if it is an intercept stub. |
792 | void* m_pUMEntryThunkOrInterceptStub; |
793 | |
794 | #ifdef FEATURE_COMINTEROP |
795 | // If this object is being exposed to COM, it will have an associated CCW object |
796 | PTR_ComCallWrapper m_pCCW; |
797 | |
798 | #ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION |
799 | // If this object represents a type object, it will have an associated class factory |
800 | ComClassFactory* m_pCCF; |
801 | #endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION |
802 | |
803 | public: |
804 | #ifndef DACCESS_COMPILE |
805 | // If this is a __ComObject, it will have an associated RCW object |
806 | RCW* m_pRCW; |
807 | #else |
808 | // We can't define this as PTR_RCW, as this would create a typedef cycle. Use TADDR |
809 | // instead. |
810 | TADDR m_pRCW; |
811 | #endif |
812 | #endif // FEATURE_COMINTEROP |
813 | |
814 | }; |
815 | |
816 | typedef DPTR(InteropSyncBlockInfo) PTR_InteropSyncBlockInfo; |
817 | |
818 | // this is a lazily created additional block for an object which contains |
819 | // synchronzation information and other "kitchen sink" data |
820 | typedef DPTR(SyncBlock) PTR_SyncBlock; |
821 | // See code:#SyncBlockOverview for more |
822 | class SyncBlock |
823 | { |
824 | // ObjHeader creates our Mutex and Event |
825 | friend class ObjHeader; |
826 | friend class SyncBlockCache; |
827 | friend struct ThreadQueue; |
828 | #ifdef DACCESS_COMPILE |
829 | friend class ClrDataAccess; |
830 | #endif |
831 | friend class CheckAsmOffsets; |
832 | |
833 | protected: |
834 | AwareLock m_Monitor; // the actual monitor |
835 | |
836 | public: |
837 | // If this object is exposed to unmanaged code, we keep some extra info here. |
838 | PTR_InteropSyncBlockInfo m_pInteropInfo; |
839 | |
840 | protected: |
841 | #ifdef EnC_SUPPORTED |
842 | // And if the object has new fields added via EnC, this is a list of them |
843 | PTR_EnCSyncBlockInfo m_pEnCInfo; |
844 | #endif // EnC_SUPPORTED |
845 | |
846 | // We thread two different lists through this link. When the SyncBlock is |
847 | // active, we create a list of waiting threads here. When the SyncBlock is |
848 | // released (we recycle them), the SyncBlockCache maintains a free list of |
849 | // SyncBlocks here. |
850 | // |
851 | // We can't afford to use an SList<> here because we only want to burn |
852 | // space for the minimum, which is the pointer within an SLink. |
853 | SLink m_Link; |
854 | |
855 | // This is the index for the appdomain to which the object belongs. If we |
856 | // can't set it in the object header, then we set it here. Note that an |
857 | // object doesn't always have this filled in. Only for COM interop, |
858 | // finalizers and objects in handles |
859 | ADIndex m_dwAppDomainIndex; |
860 | |
861 | // This is the hash code for the object. It can either have been transfered |
862 | // from the header dword, in which case it will be limited to 26 bits, or |
863 | // have been generated right into this member variable here, when it will |
864 | // be a full 32 bits. |
865 | |
866 | // A 0 in this variable means no hash code has been set yet - this saves having |
867 | // another flag to express this state, and it enables us to use a 32-bit interlocked |
868 | // operation to set the hash code, on the other hand it means that hash codes |
869 | // can never be 0. ObjectNative::GetHashCode in COMObject.cpp makes sure to enforce this. |
870 | DWORD m_dwHashCode; |
871 | |
872 | // In some early version of VB when there were no arrays developers used to use BSTR as arrays |
873 | // The way this was done was by adding a trail byte at the end of the BSTR |
874 | // To support this scenario, we need to use the sync block for this special case and |
875 | // save the trail character in here. |
876 | // This stores the trail character when a BSTR is used as an array |
877 | WCHAR m_BSTRTrailByte; |
878 | |
879 | public: |
880 | SyncBlock(DWORD indx) |
881 | : m_Monitor(indx) |
882 | #ifdef EnC_SUPPORTED |
883 | , m_pEnCInfo(PTR_NULL) |
884 | #endif // EnC_SUPPORTED |
885 | , m_dwHashCode(0) |
886 | , m_BSTRTrailByte(0) |
887 | { |
888 | LIMITED_METHOD_CONTRACT; |
889 | |
890 | m_pInteropInfo = NULL; |
891 | |
892 | // The monitor must be 32-bit aligned for atomicity to be guaranteed. |
893 | _ASSERTE((((size_t) &m_Monitor) & 3) == 0); |
894 | } |
895 | |
896 | DWORD GetSyncBlockIndex() |
897 | { |
898 | LIMITED_METHOD_CONTRACT; |
899 | return m_Monitor.GetSyncBlockIndex(); |
900 | } |
901 | |
902 | // As soon as a syncblock acquires some state that cannot be recreated, we latch |
903 | // a bit. |
904 | void SetPrecious() |
905 | { |
906 | WRAPPER_NO_CONTRACT; |
907 | m_Monitor.SetPrecious(); |
908 | } |
909 | |
910 | BOOL IsPrecious() |
911 | { |
912 | LIMITED_METHOD_CONTRACT; |
913 | return (m_Monitor.m_dwSyncIndex & SyncBlockPrecious) != 0; |
914 | } |
915 | |
916 | void OnADUnload(); |
917 | |
918 | // True is the syncblock and its index are disposable. |
919 | // If new members are added to the syncblock, this |
920 | // method needs to be modified accordingly |
921 | BOOL IsIDisposable() |
922 | { |
923 | WRAPPER_NO_CONTRACT; |
924 | return (!IsPrecious() && |
925 | m_Monitor.IsUnlockedWithNoWaiters() && |
926 | m_Monitor.m_TransientPrecious == 0); |
927 | } |
928 | |
929 | // Gets the InteropInfo block, creates a new one if none is present. |
930 | InteropSyncBlockInfo* GetInteropInfo() |
931 | { |
932 | CONTRACT (InteropSyncBlockInfo*) |
933 | { |
934 | THROWS; |
935 | GC_TRIGGERS; |
936 | MODE_ANY; |
937 | POSTCONDITION(CheckPointer(RETVAL)); |
938 | } |
939 | CONTRACT_END; |
940 | |
941 | if (!m_pInteropInfo) |
942 | { |
943 | NewHolder<InteropSyncBlockInfo> pInteropInfo; |
944 | #ifndef FEATURE_PAL |
945 | pInteropInfo = (InteropSyncBlockInfo *)InterlockedPopEntrySList(&InteropSyncBlockInfo::s_InteropInfoStandbyList); |
946 | |
947 | if (pInteropInfo != NULL) |
948 | { |
949 | // cache hit - reinitialize the data structure |
950 | new (pInteropInfo) InteropSyncBlockInfo(); |
951 | } |
952 | else |
953 | #endif // !FEATURE_PAL |
954 | { |
955 | pInteropInfo = new InteropSyncBlockInfo(); |
956 | } |
957 | |
958 | if (SetInteropInfo(pInteropInfo)) |
959 | pInteropInfo.SuppressRelease(); |
960 | } |
961 | |
962 | RETURN m_pInteropInfo; |
963 | } |
964 | |
965 | PTR_InteropSyncBlockInfo GetInteropInfoNoCreate() |
966 | { |
967 | CONTRACT (PTR_InteropSyncBlockInfo) |
968 | { |
969 | NOTHROW; |
970 | GC_NOTRIGGER; |
971 | MODE_ANY; |
972 | SO_TOLERANT; |
973 | SUPPORTS_DAC; |
974 | POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); |
975 | } |
976 | CONTRACT_END; |
977 | |
978 | RETURN m_pInteropInfo; |
979 | } |
980 | |
981 | // Returns false if the InteropInfo block was already set - does not overwrite the previous value. |
982 | // True if the InteropInfo block was successfully set with the passed in value. |
983 | bool SetInteropInfo(InteropSyncBlockInfo* pInteropInfo); |
984 | |
985 | #ifdef EnC_SUPPORTED |
986 | // Get information about fields added to this object by the Debugger's Edit and Continue support |
987 | PTR_EnCSyncBlockInfo GetEnCInfo() |
988 | { |
989 | LIMITED_METHOD_DAC_CONTRACT; |
990 | return m_pEnCInfo; |
991 | } |
992 | |
993 | // Store information about fields added to this object by the Debugger's Edit and Continue support |
994 | void SetEnCInfo(EnCSyncBlockInfo *pEnCInfo); |
995 | #endif // EnC_SUPPORTED |
996 | |
997 | ADIndex GetAppDomainIndex() |
998 | { |
999 | LIMITED_METHOD_DAC_CONTRACT; |
1000 | return m_dwAppDomainIndex; |
1001 | } |
1002 | |
1003 | void SetAppDomainIndex(ADIndex dwAppDomainIndex) |
1004 | { |
1005 | WRAPPER_NO_CONTRACT; |
1006 | SetPrecious(); |
1007 | m_dwAppDomainIndex = dwAppDomainIndex; |
1008 | } |
1009 | |
1010 | DWORD GetHashCode() |
1011 | { |
1012 | LIMITED_METHOD_CONTRACT; |
1013 | return m_dwHashCode; |
1014 | } |
1015 | |
1016 | DWORD SetHashCode(DWORD hashCode) |
1017 | { |
1018 | WRAPPER_NO_CONTRACT; |
1019 | DWORD result = FastInterlockCompareExchange((LONG*)&m_dwHashCode, hashCode, 0); |
1020 | if (result == 0) |
1021 | { |
1022 | // the sync block now holds a hash code, which we can't afford to lose. |
1023 | SetPrecious(); |
1024 | return hashCode; |
1025 | } |
1026 | else |
1027 | return result; |
1028 | } |
1029 | |
1030 | void *operator new (size_t sz, void* p) |
1031 | { |
1032 | LIMITED_METHOD_CONTRACT; |
1033 | return p ; |
1034 | } |
1035 | void operator delete(void *p) |
1036 | { |
1037 | LIMITED_METHOD_CONTRACT; |
1038 | // We've already destructed. But retain the memory. |
1039 | } |
1040 | |
1041 | void EnterMonitor() |
1042 | { |
1043 | WRAPPER_NO_CONTRACT; |
1044 | m_Monitor.Enter(); |
1045 | } |
1046 | |
1047 | BOOL TryEnterMonitor(INT32 timeOut = 0) |
1048 | { |
1049 | WRAPPER_NO_CONTRACT; |
1050 | return m_Monitor.TryEnter(timeOut); |
1051 | } |
1052 | |
1053 | // leave the monitor |
1054 | BOOL LeaveMonitor() |
1055 | { |
1056 | WRAPPER_NO_CONTRACT; |
1057 | return m_Monitor.Leave(); |
1058 | } |
1059 | |
1060 | AwareLock* GetMonitor() |
1061 | { |
1062 | WRAPPER_NO_CONTRACT; |
1063 | SUPPORTS_DAC; |
1064 | //hold the syncblock |
1065 | #ifndef DACCESS_COMPILE |
1066 | SetPrecious(); |
1067 | #endif |
1068 | |
1069 | //Note that for DAC we did not return a PTR_ type. This pointer is interior and |
1070 | //the SyncBlock has already been marshaled so that GetMonitor could be called. |
1071 | return &m_Monitor; |
1072 | } |
1073 | |
1074 | AwareLock* QuickGetMonitor() |
1075 | { |
1076 | LIMITED_METHOD_CONTRACT; |
1077 | // Note that the syncblock isn't marked precious, so use caution when |
1078 | // calling this method. |
1079 | return &m_Monitor; |
1080 | } |
1081 | |
1082 | BOOL DoesCurrentThreadOwnMonitor() |
1083 | { |
1084 | WRAPPER_NO_CONTRACT; |
1085 | return m_Monitor.OwnedByCurrentThread(); |
1086 | } |
1087 | |
1088 | LONG LeaveMonitorCompletely() |
1089 | { |
1090 | WRAPPER_NO_CONTRACT; |
1091 | return m_Monitor.LeaveCompletely(); |
1092 | } |
1093 | |
1094 | BOOL Wait(INT32 timeOut, BOOL exitContext); |
1095 | void Pulse(); |
1096 | void PulseAll(); |
1097 | |
1098 | enum |
1099 | { |
1100 | // This bit indicates that the syncblock is valuable and can neither be discarded |
1101 | // nor re-created. |
1102 | SyncBlockPrecious = 0x80000000, |
1103 | }; |
1104 | |
1105 | BOOL HasCOMBstrTrailByte() |
1106 | { |
1107 | LIMITED_METHOD_CONTRACT; |
1108 | return (m_BSTRTrailByte!=0); |
1109 | } |
1110 | WCHAR GetCOMBstrTrailByte() |
1111 | { |
1112 | return m_BSTRTrailByte; |
1113 | } |
1114 | void SetCOMBstrTrailByte(WCHAR trailByte) |
1115 | { |
1116 | WRAPPER_NO_CONTRACT; |
1117 | m_BSTRTrailByte = trailByte; |
1118 | SetPrecious(); |
1119 | } |
1120 | |
1121 | protected: |
1122 | // <NOTE> |
1123 | // This should ONLY be called when initializing a SyncBlock (i.e. ONLY from |
1124 | // ObjHeader::GetSyncBlock()), otherwise we'll have a race condition. |
1125 | // </NOTE> |
1126 | void InitState(ULONG recursionLevel, PTR_Thread holdingThread) |
1127 | { |
1128 | WRAPPER_NO_CONTRACT; |
1129 | m_Monitor.InitializeToLockedWithNoWaiters(recursionLevel, holdingThread); |
1130 | } |
1131 | |
1132 | #if defined(ENABLE_CONTRACTS_IMPL) |
1133 | // The LOCK_TAKEN/RELEASED macros need a "pointer" to the lock object to do |
1134 | // comparisons between takes & releases (and to provide debugging info to the |
1135 | // developer). Use the AwareLock (m_Monitor) |
1136 | void * GetPtrForLockContract() |
1137 | { |
1138 | return m_Monitor.GetPtrForLockContract(); |
1139 | } |
1140 | #endif // defined(ENABLE_CONTRACTS_IMPL) |
1141 | }; |
1142 | |
1143 | class SyncTableEntry |
1144 | { |
1145 | public: |
1146 | PTR_SyncBlock m_SyncBlock; |
1147 | VolatilePtr<Object, PTR_Object> m_Object; |
1148 | static PTR_SyncTableEntry GetSyncTableEntry(); |
1149 | #ifndef DACCESS_COMPILE |
1150 | static SyncTableEntry*& GetSyncTableEntryByRef(); |
1151 | #endif |
1152 | }; |
1153 | |
1154 | #ifdef _DEBUG |
1155 | extern void DumpSyncBlockCache(); |
1156 | #endif |
1157 | |
1158 | // this class stores free sync blocks after they're allocated and |
1159 | // unused |
1160 | |
1161 | typedef DPTR(SyncBlockCache) PTR_SyncBlockCache; |
1162 | |
1163 | // The SyncBlockCache is the data structure that manages SyncBlocks |
1164 | // as well as SyncTableEntries (See explaintation at top of this file). |
1165 | // |
1166 | // There is only one process global SyncBlockCache (SyncBlockCache::s_pSyncBlockCache) |
1167 | // and SyncTableEntry table (g_pSyncTable). |
1168 | // |
1169 | // see code:#SyncBlockOverview for more |
1170 | class SyncBlockCache |
1171 | { |
1172 | #ifdef DACCESS_COMPILE |
1173 | friend class ClrDataAccess; |
1174 | #endif |
1175 | |
1176 | friend class SyncBlock; |
1177 | |
1178 | |
1179 | private: |
1180 | PTR_SLink m_pCleanupBlockList; // list of sync blocks that need cleanup |
1181 | SLink* m_FreeBlockList; // list of free sync blocks |
1182 | Crst m_CacheLock; // cache lock |
1183 | DWORD m_FreeCount; // count of active sync blocks |
1184 | DWORD m_ActiveCount; // number active |
1185 | SyncBlockArray *m_SyncBlocks; // Array of new SyncBlocks. |
1186 | DWORD m_FreeSyncBlock; // Next Free Syncblock in the array |
1187 | |
1188 | // The next variables deal with SyncTableEntries. Instead of having the object-header |
1189 | // point directly at SyncBlocks, the object points a a syncTableEntry, which points at |
1190 | // the syncBlock. This is done because in a common case (need a hash code for an object) |
1191 | // you just need a syncTableEntry. |
1192 | |
1193 | DWORD m_FreeSyncTableIndex; // We allocate a large array of SyncTableEntry structures. |
1194 | // This index points at the boundry between used, and never-been |
1195 | // used SyncTableEntries. |
1196 | size_t m_FreeSyncTableList; // index of the first free SyncTableEntry in our free list. |
1197 | // The entry at this index has its m_object field to the index |
1198 | // of the next element (shifted by 1, low bit marks not in use) |
1199 | DWORD m_SyncTableSize; |
1200 | SyncTableEntry *m_OldSyncTables; // Next old SyncTable |
1201 | |
1202 | BOOL m_bSyncBlockCleanupInProgress; // A flag indicating if sync block cleanup is in progress. |
1203 | DWORD* m_EphemeralBitmap; // card table for ephemeral scanning |
1204 | |
1205 | BOOL GCWeakPtrScanElement(int elindex, HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2, BOOL& cleanup); |
1206 | |
1207 | void SetCard (size_t card); |
1208 | void ClearCard (size_t card); |
1209 | BOOL CardSetP (size_t card); |
1210 | void CardTableSetBit (size_t idx); |
1211 | void Grow(); |
1212 | |
1213 | |
1214 | public: |
1215 | SPTR_DECL(SyncBlockCache, s_pSyncBlockCache); |
1216 | static SyncBlockCache*& GetSyncBlockCache(); |
1217 | |
1218 | void *operator new(size_t size, void *pInPlace) |
1219 | { |
1220 | LIMITED_METHOD_CONTRACT; |
1221 | return pInPlace; |
1222 | } |
1223 | |
1224 | void operator delete(void *p) |
1225 | { |
1226 | LIMITED_METHOD_CONTRACT; |
1227 | } |
1228 | |
1229 | SyncBlockCache(); |
1230 | ~SyncBlockCache(); |
1231 | |
1232 | static void Attach(); |
1233 | static void Detach(); |
1234 | void DoDetach(); |
1235 | |
1236 | static void Start(); |
1237 | static void Stop(); |
1238 | |
1239 | // returns and removes next from free list |
1240 | SyncBlock* GetNextFreeSyncBlock(); |
1241 | // returns and removes the next from cleanup list |
1242 | SyncBlock* GetNextCleanupSyncBlock(); |
1243 | // inserts a syncblock into the cleanup list |
1244 | void InsertCleanupSyncBlock(SyncBlock* psb); |
1245 | |
1246 | // Obtain a new syncblock slot in the SyncBlock table. Used as a hash code |
1247 | DWORD NewSyncBlockSlot(Object *obj); |
1248 | |
1249 | // return sync block to cache or delete |
1250 | void DeleteSyncBlock(SyncBlock *sb); |
1251 | |
1252 | // returns the sync block memory to the free pool but does not destruct sync block (must own cache lock already) |
1253 | void DeleteSyncBlockMemory(SyncBlock *sb); |
1254 | |
1255 | // return sync block to cache or delete, called from GC |
1256 | void GCDeleteSyncBlock(SyncBlock *sb); |
1257 | |
1258 | void GCWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2); |
1259 | |
1260 | void GCDone(BOOL demoting, int max_gen); |
1261 | |
1262 | void CleanupSyncBlocks(); |
1263 | |
1264 | void CleanupSyncBlocksInAppDomain(AppDomain *pDomain); |
1265 | |
1266 | int GetTableEntryCount() |
1267 | { |
1268 | LIMITED_METHOD_CONTRACT; |
1269 | return m_FreeSyncTableIndex - 1; |
1270 | } |
1271 | |
1272 | // Determines if a sync block cleanup is in progress. |
1273 | BOOL IsSyncBlockCleanupInProgress() |
1274 | { |
1275 | LIMITED_METHOD_CONTRACT; |
1276 | return m_bSyncBlockCleanupInProgress; |
1277 | } |
1278 | |
1279 | DWORD GetActiveCount() |
1280 | { |
1281 | return m_ActiveCount; |
1282 | } |
1283 | |
1284 | // Encapsulate a CrstHolder, so that clients of our lock don't have to know |
1285 | // the details of our implementation. |
1286 | class LockHolder : public CrstHolder |
1287 | { |
1288 | public: |
1289 | LockHolder(SyncBlockCache *pCache) |
1290 | : CrstHolder(&pCache->m_CacheLock) |
1291 | { |
1292 | CONTRACTL |
1293 | { |
1294 | NOTHROW; |
1295 | GC_NOTRIGGER; |
1296 | MODE_ANY; |
1297 | CAN_TAKE_LOCK; |
1298 | } |
1299 | CONTRACTL_END; |
1300 | } |
1301 | }; |
1302 | friend class LockHolder; |
1303 | |
1304 | #ifdef _DEBUG |
1305 | friend void DumpSyncBlockCache(); |
1306 | #endif |
1307 | |
1308 | #ifdef VERIFY_HEAP |
1309 | void VerifySyncTableEntry(); |
1310 | #endif |
1311 | }; |
1312 | |
1313 | // See code:#SyncBlockOverView for more |
1314 | class |
1315 | { |
1316 | friend class CheckAsmOffsets; |
1317 | |
1318 | private: |
1319 | // !!! Notice: m_SyncBlockValue *MUST* be the last field in ObjHeader. |
1320 | #ifdef _WIN64 |
1321 | DWORD ; |
1322 | #endif // _WIN64 |
1323 | |
1324 | Volatile<DWORD> ; // the Index and the Bits |
1325 | |
1326 | #if defined(_WIN64) && defined(_DEBUG) |
1327 | void (); |
1328 | #endif // _WIN64 && _DEBUG |
1329 | |
1330 | INCONTRACT(void * GetPtrForLockContract()); |
1331 | |
1332 | public: |
1333 | |
1334 | // Access to the Sync Block Index, by masking the Value. |
1335 | FORCEINLINE DWORD () |
1336 | { |
1337 | LIMITED_METHOD_DAC_CONTRACT; |
1338 | #if defined(_WIN64) && defined(_DEBUG) && !defined(DACCESS_COMPILE) |
1339 | // On WIN64 this field is never modified, but was initialized to 0 |
1340 | if (m_alignpad != 0) |
1341 | IllegalAlignPad(); |
1342 | #endif // _WIN64 && _DEBUG && !DACCESS_COMPILE |
1343 | |
1344 | // pull the value out before checking it to avoid race condition |
1345 | DWORD value = m_SyncBlockValue.LoadWithoutBarrier(); |
1346 | if ((value & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) != BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) |
1347 | return 0; |
1348 | return value & MASK_SYNCBLOCKINDEX; |
1349 | } |
1350 | // Ditto for setting the index, which is careful not to disturb the underlying |
1351 | // bit field -- even in the presence of threaded access. |
1352 | // |
1353 | // This service can only be used to transition from a 0 index to a non-0 index. |
1354 | void (DWORD indx) |
1355 | { |
1356 | CONTRACTL |
1357 | { |
1358 | INSTANCE_CHECK; |
1359 | NOTHROW; |
1360 | GC_NOTRIGGER; |
1361 | FORBID_FAULT; |
1362 | MODE_ANY; |
1363 | PRECONDITION(GetHeaderSyncBlockIndex() == 0); |
1364 | PRECONDITION(m_SyncBlockValue & BIT_SBLK_SPIN_LOCK); |
1365 | } |
1366 | CONTRACTL_END |
1367 | |
1368 | |
1369 | #ifdef _DEBUG |
1370 | // if we have an index here, make sure we already transferred it to the syncblock |
1371 | // before we clear it out |
1372 | ADIndex adIndex = GetRawAppDomainIndex(); |
1373 | if (adIndex.m_dwIndex) |
1374 | { |
1375 | SyncBlock *pSyncBlock = SyncTableEntry::GetSyncTableEntry() [indx & ~BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX].m_SyncBlock; |
1376 | _ASSERTE(pSyncBlock && pSyncBlock->GetAppDomainIndex() == adIndex); |
1377 | } |
1378 | #endif |
1379 | |
1380 | LONG newValue; |
1381 | LONG oldValue; |
1382 | while (TRUE) { |
1383 | oldValue = m_SyncBlockValue.LoadWithoutBarrier(); |
1384 | _ASSERTE(GetHeaderSyncBlockIndex() == 0); |
1385 | // or in the old value except any index that is there - |
1386 | // note that indx could be carrying the BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX bit that we need to preserve |
1387 | newValue = (indx | |
1388 | (oldValue & ~(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE | MASK_SYNCBLOCKINDEX))); |
1389 | if (FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, |
1390 | newValue, |
1391 | oldValue) |
1392 | == oldValue) |
1393 | { |
1394 | return; |
1395 | } |
1396 | } |
1397 | } |
1398 | |
1399 | // Used only during shutdown |
1400 | void () |
1401 | { |
1402 | LIMITED_METHOD_CONTRACT; |
1403 | |
1404 | _ASSERTE(m_SyncBlockValue & BIT_SBLK_SPIN_LOCK); |
1405 | FastInterlockAnd(&m_SyncBlockValue, ~(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE | MASK_SYNCBLOCKINDEX)); |
1406 | } |
1407 | |
1408 | // Used only GC |
1409 | void () |
1410 | { |
1411 | LIMITED_METHOD_CONTRACT; |
1412 | |
1413 | m_SyncBlockValue.RawValue() &=~(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE | MASK_SYNCBLOCKINDEX); |
1414 | } |
1415 | |
1416 | void SetAppDomainIndex(ADIndex); |
1417 | void ResetAppDomainIndex(ADIndex); |
1418 | void ResetAppDomainIndexNoFailure(ADIndex); |
1419 | ADIndex GetRawAppDomainIndex(); |
1420 | ADIndex GetAppDomainIndex(); |
1421 | |
1422 | // For now, use interlocked operations to twiddle bits in the bitfield portion. |
1423 | // If we ever have high-performance requirements where we can guarantee that no |
1424 | // other threads are accessing the ObjHeader, this can be reconsidered for those |
1425 | // particular bits. |
1426 | void (DWORD bit) |
1427 | { |
1428 | LIMITED_METHOD_CONTRACT; |
1429 | |
1430 | _ASSERTE((bit & MASK_SYNCBLOCKINDEX) == 0); |
1431 | FastInterlockOr(&m_SyncBlockValue, bit); |
1432 | } |
1433 | void (DWORD bit) |
1434 | { |
1435 | LIMITED_METHOD_CONTRACT; |
1436 | |
1437 | _ASSERTE((bit & MASK_SYNCBLOCKINDEX) == 0); |
1438 | FastInterlockAnd(&m_SyncBlockValue, ~bit); |
1439 | } |
1440 | //GC accesses this bit when all threads are stopped. |
1441 | void () |
1442 | { |
1443 | LIMITED_METHOD_CONTRACT; |
1444 | |
1445 | m_SyncBlockValue.RawValue() |= BIT_SBLK_GC_RESERVE; |
1446 | } |
1447 | void () |
1448 | { |
1449 | LIMITED_METHOD_CONTRACT; |
1450 | |
1451 | m_SyncBlockValue.RawValue() &= ~BIT_SBLK_GC_RESERVE; |
1452 | } |
1453 | |
1454 | // Don't bother masking out the index since anyone who wants bits will presumably |
1455 | // restrict the bits they consider. |
1456 | DWORD () |
1457 | { |
1458 | LIMITED_METHOD_CONTRACT; |
1459 | SUPPORTS_DAC; |
1460 | |
1461 | #if defined(_WIN64) && defined(_DEBUG) && !defined(DACCESS_COMPILE) |
1462 | // On WIN64 this field is never modified, but was initialized to 0 |
1463 | if (m_alignpad != 0) |
1464 | IllegalAlignPad(); |
1465 | #endif // _WIN64 && _DEBUG && !DACCESS_COMPILE |
1466 | |
1467 | return m_SyncBlockValue.LoadWithoutBarrier(); |
1468 | } |
1469 | |
1470 | |
1471 | DWORD (DWORD newBits, DWORD oldBits) |
1472 | { |
1473 | LIMITED_METHOD_CONTRACT; |
1474 | |
1475 | _ASSERTE((oldBits & BIT_SBLK_SPIN_LOCK) == 0); |
1476 | DWORD result = FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, newBits, oldBits); |
1477 | return result; |
1478 | } |
1479 | |
1480 | #ifdef _DEBUG |
1481 | BOOL () |
1482 | { |
1483 | WRAPPER_NO_CONTRACT; |
1484 | return m_SyncBlockValue.LoadWithoutBarrier() == 0; |
1485 | } |
1486 | #endif |
1487 | |
1488 | // TRUE if the header has a real SyncBlockIndex (i.e. it has an entry in the |
1489 | // SyncTable, though it doesn't necessarily have an entry in the SyncBlockCache) |
1490 | BOOL () |
1491 | { |
1492 | LIMITED_METHOD_DAC_CONTRACT; |
1493 | return (GetHeaderSyncBlockIndex() != 0); |
1494 | } |
1495 | |
1496 | // retrieve or allocate a sync block for this object |
1497 | SyncBlock *(); |
1498 | |
1499 | // retrieve sync block but don't allocate |
1500 | PTR_SyncBlock () |
1501 | { |
1502 | LIMITED_METHOD_DAC_CONTRACT; |
1503 | return g_pSyncTable [(int)GetHeaderSyncBlockIndex()].m_SyncBlock; |
1504 | } |
1505 | |
1506 | DWORD (); |
1507 | |
1508 | // this enters the monitor of an object |
1509 | void (); |
1510 | |
1511 | // non-blocking version of above |
1512 | BOOL (INT32 timeOut = 0); |
1513 | |
1514 | // Inlineable fast path of EnterObjMonitor/TryEnterObjMonitor. Must be called before EnterObjMonitorHelperSpin. |
1515 | AwareLock::EnterHelperResult (Thread* pCurThread); |
1516 | |
1517 | // Typically non-inlined spin loop for some fast paths of EnterObjMonitor/TryEnterObjMonitor. EnterObjMonitorHelper must be |
1518 | // called before this function. |
1519 | AwareLock::EnterHelperResult (Thread* pCurThread); |
1520 | |
1521 | // leaves the monitor of an object |
1522 | BOOL (); |
1523 | |
1524 | // should be called only from unwind code |
1525 | BOOL (); |
1526 | |
1527 | // Helper encapsulating the core logic for releasing monitor. Returns what kind of |
1528 | // follow up action is necessary |
1529 | AwareLock::LeaveHelperAction (Thread* pCurThread); |
1530 | |
1531 | // Returns TRUE if the lock is owned and FALSE otherwise |
1532 | // threadId is set to the ID (Thread::GetThreadId()) of the thread which owns the lock |
1533 | // acquisitionCount is set to the number of times the lock needs to be released before |
1534 | // it is unowned |
1535 | BOOL (DWORD *pThreadId, DWORD *pAcquisitionCount); |
1536 | |
1537 | PTR_Object () |
1538 | { |
1539 | LIMITED_METHOD_DAC_CONTRACT; |
1540 | return dac_cast<PTR_Object>(dac_cast<TADDR>(this + 1)); |
1541 | } |
1542 | |
1543 | BOOL (INT32 timeOut, BOOL exitContext); |
1544 | void (); |
1545 | void (); |
1546 | |
1547 | void (); |
1548 | void (); |
1549 | |
1550 | BOOL (BOOL bVerifySyncBlkIndex = TRUE); |
1551 | }; |
1552 | |
1553 | |
1554 | typedef DPTR(class ObjHeader) ; |
1555 | |
1556 | |
1557 | #define ENTER_SPIN_LOCK(pOh) \ |
1558 | pOh->EnterSpinLock(); |
1559 | |
1560 | #define LEAVE_SPIN_LOCK(pOh) \ |
1561 | pOh->ReleaseSpinLock(); |
1562 | |
1563 | |
1564 | #ifdef DACCESS_COMPILE |
1565 | // A visitor function used to enumerate threads in the ThreadQueue below |
1566 | typedef void (*FP_TQ_THREAD_ENUMERATION_CALLBACK)(PTR_Thread pThread, VOID* pUserData); |
1567 | #endif |
1568 | |
1569 | // A SyncBlock contains an m_Link field that is used for two purposes. One |
1570 | // is to manage a FIFO queue of threads that are waiting on this synchronization |
1571 | // object. The other is to thread free SyncBlocks into a list for recycling. |
1572 | // We don't want to burn anything else on the SyncBlock instance, so we can't |
1573 | // use an SList or similar data structure. So here's the encapsulation for the |
1574 | // queue of waiting threads. |
1575 | // |
1576 | // Note that Enqueue is slower than it needs to be, because we don't want to |
1577 | // burn extra space in the SyncBlock to remember the head and the tail of the Q. |
1578 | // An alternate approach would be to treat the list as a LIFO stack, which is not |
1579 | // a fair policy because it permits to starvation. |
1580 | // |
1581 | // Important!!! While there is a lock that is used in process to keep multiple threads |
1582 | // from altering the queue simultaneously, the queue must still be consistent at all |
1583 | // times, even when the lock is held. The debugger inspects the queue from out of process |
1584 | // and just looks at the memory...it must be valid even if the lock is held. Be careful if you |
1585 | // change the way the queue is updated. |
1586 | struct ThreadQueue |
1587 | { |
1588 | // Given a link in the chain, get the Thread that it represents |
1589 | static PTR_WaitEventLink WaitEventLinkForLink(PTR_SLink pLink); |
1590 | |
1591 | // Unlink the head of the Q. We are always in the SyncBlock's critical |
1592 | // section. |
1593 | static WaitEventLink *DequeueThread(SyncBlock *psb); |
1594 | |
1595 | // Enqueue is the slow one. We have to find the end of the Q since we don't |
1596 | // want to burn storage for this in the SyncBlock. |
1597 | static void EnqueueThread(WaitEventLink *pWaitEventLink, SyncBlock *psb); |
1598 | |
1599 | // Wade through the SyncBlock's list of waiting threads and remove the |
1600 | // specified thread. |
1601 | static BOOL RemoveThread (Thread *pThread, SyncBlock *psb); |
1602 | |
1603 | #ifdef DACCESS_COMPILE |
1604 | // Enumerates the threads in the queue from front to back by calling |
1605 | // pCallbackFunction on each one |
1606 | static void EnumerateThreads(SyncBlock *psb, |
1607 | FP_TQ_THREAD_ENUMERATION_CALLBACK pCallbackFunction, |
1608 | void* pUserData); |
1609 | #endif |
1610 | }; |
1611 | |
1612 | inline void AwareLock::SetPrecious() |
1613 | { |
1614 | LIMITED_METHOD_CONTRACT; |
1615 | |
1616 | m_dwSyncIndex |= SyncBlock::SyncBlockPrecious; |
1617 | } |
1618 | |
1619 | inline DWORD AwareLock::GetSyncBlockIndex() |
1620 | { |
1621 | LIMITED_METHOD_CONTRACT; |
1622 | return (m_dwSyncIndex & ~SyncBlock::SyncBlockPrecious); |
1623 | } |
1624 | |
1625 | #ifdef _TARGET_X86_ |
1626 | #include <poppack.h> |
1627 | #endif // _TARGET_X86_ |
1628 | |
1629 | #endif // _SYNCBLK_H_ |
1630 | |
1631 | |
1632 | |