1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | //---------------------------------------------------------------------------- |
5 | // spinlock.h , defines the spin lock class and a profiler class |
6 | // |
7 | |
8 | // |
9 | //---------------------------------------------------------------------------- |
10 | |
11 | |
12 | //#ifndef _H_UTIL |
13 | //#error I am a part of util.hpp Please don't include me alone ! |
14 | //#endif |
15 | |
16 | |
17 | |
18 | #ifndef _H_SPINLOCK_ |
19 | #define _H_SPINLOCK_ |
20 | |
21 | #include <stddef.h> |
22 | |
23 | |
24 | // #SwitchToThreadSpinning |
25 | // |
26 | // If you call __SwitchToThread in a loop waiting for a condition to be met, |
27 | // it is critical that you insert periodic sleeps. This is because the thread |
28 | // you are waiting for to set that condition may need your CPU, and simply |
29 | // calling __SwitchToThread(0) will NOT guarantee that it gets a chance to run. |
30 | // If there are other runnable threads of higher priority, or even if there |
31 | // aren't and it is in another processor's queue, you will be spinning a very |
32 | // long time. |
33 | // |
34 | // To force all callers to consider this issue and to avoid each having to |
35 | // duplicate the same backoff code, __SwitchToThread takes a required second |
36 | // parameter. If you want it to handle backoff for you, this parameter should |
37 | // be the number of successive calls you have made to __SwitchToThread (a loop |
38 | // count). If you want to take care of backing off yourself, you can pass |
39 | // CALLER_LIMITS_SPINNING. There are three valid cases for doing this: |
40 | // |
41 | // - You count iterations and induce a sleep periodically |
42 | // - The number of consecutive __SwitchToThreads is limited |
43 | // - Your call to __SwitchToThread includes a non-zero sleep duration |
44 | // |
45 | // Lastly, to simplify this requirement for the following common coding pattern: |
46 | // |
47 | // while (!condition) |
48 | // SwitchToThread |
49 | // |
50 | // you can use the YIELD_WHILE macro. |
51 | |
52 | #define CALLER_LIMITS_SPINNING 0 |
53 | |
54 | #define YIELD_WHILE(condition) \ |
55 | { \ |
56 | DWORD __dwSwitchCount = 0; \ |
57 | while (condition) \ |
58 | { \ |
59 | __SwitchToThread(0, ++__dwSwitchCount); \ |
60 | } \ |
61 | } |
62 | |
63 | // non-zero return value if this function causes the OS to switch to another thread |
64 | BOOL __SwitchToThread (DWORD dwSleepMSec, DWORD dwSwitchCount); |
65 | BOOL __DangerousSwitchToThread (DWORD dwSleepMSec, DWORD dwSwitchCount, BOOL goThroughOS); |
66 | |
67 | |
68 | //---------------------------------------------------------------------------- |
69 | // class: DangerousNonHostedSpinLock |
70 | // |
71 | // PURPOSE: |
72 | // A simple wrapper around the spinloop without host interactions. To be |
73 | // used for short-time locking in the VM, in particular when the runtime |
74 | // has not been started yet. |
75 | // |
76 | //---------------------------------------------------------------------------- |
77 | class DangerousNonHostedSpinLock |
78 | { |
79 | public: |
80 | FORCEINLINE DangerousNonHostedSpinLock() { LIMITED_METHOD_CONTRACT; m_value = 0; } |
81 | |
82 | private: |
83 | // Intentionally unimplemented - prevents the compiler from generating default copy ctor. |
84 | DangerousNonHostedSpinLock(DangerousNonHostedSpinLock const & other); |
85 | |
86 | FORCEINLINE void Acquire() |
87 | { |
88 | WRAPPER_NO_CONTRACT; |
89 | YIELD_WHILE(FastInterlockExchange(&m_value, 1) == 1); |
90 | } |
91 | |
92 | FORCEINLINE BOOL TryAcquire() |
93 | { |
94 | WRAPPER_NO_CONTRACT; |
95 | return (FastInterlockExchange(&m_value, 1) == 0); |
96 | } |
97 | |
98 | FORCEINLINE void Release() |
99 | { |
100 | LIMITED_METHOD_CONTRACT; |
101 | m_value = 0; |
102 | } |
103 | |
104 | inline static void AcquireLock(DangerousNonHostedSpinLock *pLock) { WRAPPER_NO_CONTRACT; pLock->Acquire(); } |
105 | inline static BOOL TryAcquireLock(DangerousNonHostedSpinLock *pLock) { WRAPPER_NO_CONTRACT; return pLock->TryAcquire(); } |
106 | inline static void ReleaseLock(DangerousNonHostedSpinLock *pLock) { WRAPPER_NO_CONTRACT; pLock->Release(); } |
107 | |
108 | Volatile<LONG> m_value; |
109 | |
110 | public: |
111 | BOOL IsHeld() |
112 | { |
113 | LIMITED_METHOD_CONTRACT; |
114 | return (BOOL)m_value; |
115 | } |
116 | |
117 | typedef Holder<DangerousNonHostedSpinLock *, DangerousNonHostedSpinLock::AcquireLock, DangerousNonHostedSpinLock::ReleaseLock> Holder; |
118 | typedef ConditionalStateHolder<DangerousNonHostedSpinLock *, DangerousNonHostedSpinLock::TryAcquireLock, DangerousNonHostedSpinLock::ReleaseLock> TryHolder; |
119 | }; |
120 | |
121 | typedef DangerousNonHostedSpinLock::Holder DangerousNonHostedSpinLockHolder; |
122 | typedef DangerousNonHostedSpinLock::TryHolder DangerousNonHostedSpinLockTryHolder; |
123 | |
124 | |
125 | class SpinLock; |
126 | |
127 | // Lock Types, used in profiling |
128 | // |
129 | enum LOCK_TYPE |
130 | { |
131 | LOCK_PLUSWRAPPER_CACHE = 1, // change |
132 | LOCK_FCALL = 2, // leave, but rank to tip |
133 | LOCK_COMCTXENTRYCACHE = 3, // creates events, allocs memory, SEH, etc. |
134 | #ifdef FEATURE_COMINTEROP |
135 | LOCK_COMCALL = 4, |
136 | #endif |
137 | LOCK_REFLECTCACHE = 5, |
138 | LOCK_CORMAP = 7, |
139 | LOCK_TYPE_DEFAULT = 8 |
140 | }; |
141 | |
142 | //---------------------------------------------------------------------------- |
143 | // class: Spinlock |
144 | // |
145 | // PURPOSE: |
146 | // spinlock class that contains constructor and out of line spinloop. |
147 | // |
148 | //---------------------------------------------------------------------------- |
149 | class SpinLock |
150 | { |
151 | |
152 | private: |
153 | union { |
154 | // m_lock has to be the fist data member in the class |
155 | LONG m_lock; // LONG used in interlocked exchange |
156 | }; |
157 | |
158 | enum SpinLockState |
159 | { |
160 | UnInitialized, |
161 | BeingInitialized, |
162 | Initialized |
163 | }; |
164 | |
165 | Volatile<SpinLockState> m_Initialized; // To verify initialized |
166 | // And initialize once |
167 | |
168 | #ifdef _DEBUG |
169 | LOCK_TYPE m_LockType; // lock type to track statistics |
170 | |
171 | // Check for dead lock situation. |
172 | bool m_requireCoopGCMode; |
173 | EEThreadId m_holdingThreadId; |
174 | #endif |
175 | |
176 | public: |
177 | SpinLock (); |
178 | ~SpinLock (); |
179 | |
180 | //Init method, initialize lock and _DEBUG flags |
181 | void Init(LOCK_TYPE type, bool RequireCoopGC = FALSE); |
182 | |
183 | //----------------------------------------------------------------- |
184 | // Is the current thread the owner? |
185 | //----------------------------------------------------------------- |
186 | #ifdef _DEBUG |
187 | BOOL OwnedByCurrentThread(); |
188 | #endif |
189 | |
190 | private: |
191 | void SpinToAcquire (); // out of line call spins |
192 | |
193 | #ifdef _DEBUG |
194 | void dbg_PreEnterLock(); |
195 | void dbg_EnterLock(); |
196 | void dbg_LeaveLock(); |
197 | #endif |
198 | |
199 | // The following 5 APIs must remain private. We want all entry/exit code to |
200 | // occur via holders, so that exceptions will be sure to release the lock. |
201 | private: |
202 | void GetLock(Thread * pThread); // Acquire lock, blocks if unsuccessful |
203 | BOOL GetLockNoWait(); // Acquire lock, fail-fast |
204 | void FreeLock(Thread * pThread); // Release lock |
205 | |
206 | public: |
207 | static void AcquireLock(SpinLock *s, Thread * pThread); |
208 | static void ReleaseLock(SpinLock *s, Thread * pThread); |
209 | |
210 | #define SPINLOCK_THREAD_PARAM_ONLY_IN_SOME_BUILDS NULL |
211 | |
212 | class Holder |
213 | { |
214 | SpinLock * m_pSpinLock; |
215 | public: |
216 | Holder(SpinLock * s) : |
217 | m_pSpinLock(s) |
218 | { |
219 | SCAN_SCOPE_BEGIN; |
220 | STATIC_CONTRACT_GC_NOTRIGGER; |
221 | |
222 | m_pSpinLock->GetLock(SPINLOCK_THREAD_PARAM_ONLY_IN_SOME_BUILDS); |
223 | } |
224 | |
225 | ~Holder() |
226 | { |
227 | SCAN_SCOPE_END; |
228 | |
229 | m_pSpinLock->FreeLock(SPINLOCK_THREAD_PARAM_ONLY_IN_SOME_BUILDS); |
230 | } |
231 | }; |
232 | }; |
233 | |
234 | |
235 | typedef SpinLock::Holder SpinLockHolder; |
236 | #define TAKE_SPINLOCK_AND_DONOT_TRIGGER_GC(lock) \ |
237 | SpinLockHolder __spinLockHolder(lock);\ |
238 | GCX_NOTRIGGER (); |
239 | |
240 | #define ACQUIRE_SPINLOCK_NO_HOLDER(lock, thread)\ |
241 | { \ |
242 | SpinLock::AcquireLock(lock, thread); \ |
243 | GCX_NOTRIGGER(); \ |
244 | CANNOTTHROWCOMPLUSEXCEPTION(); \ |
245 | STATIC_CONTRACT_NOTHROW; \ |
246 | |
247 | |
248 | #define RELEASE_SPINLOCK_NO_HOLDER(lock, thread)\ |
249 | SpinLock::ReleaseLock(lock, thread); \ |
250 | } \ |
251 | |
252 | __inline BOOL IsOwnerOfSpinLock (LPVOID lock) |
253 | { |
254 | WRAPPER_NO_CONTRACT; |
255 | #ifdef _DEBUG |
256 | return ((SpinLock*)lock)->OwnedByCurrentThread(); |
257 | #else |
258 | // This function should not be called on free build. |
259 | DebugBreak(); |
260 | return TRUE; |
261 | #endif |
262 | } |
263 | |
264 | #ifdef _DEBUG |
265 | //---------------------------------------------------------------------------- |
266 | // class SpinLockProfiler |
267 | // to track contention, useful for profiling |
268 | // |
269 | //---------------------------------------------------------------------------- |
270 | class SpinLockProfiler |
271 | { |
272 | // Pointer to spinlock names. |
273 | // |
274 | static ULONG s_ulBackOffs; |
275 | static ULONG s_ulCollisons [LOCK_TYPE_DEFAULT + 1]; |
276 | static ULONG s_ulSpins [LOCK_TYPE_DEFAULT + 1]; |
277 | |
278 | public: |
279 | |
280 | static void InitStatics (); |
281 | |
282 | static void IncrementSpins (LOCK_TYPE type, ULONG value); |
283 | |
284 | static void IncrementCollisions (LOCK_TYPE type); |
285 | |
286 | static void IncrementBackoffs (ULONG value); |
287 | |
288 | static void DumpStatics(); |
289 | |
290 | }; |
291 | |
292 | #endif // ifdef _DEBUG |
293 | #endif // ifndef _H_SPINLOCK_ |
294 | |