1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | |
6 | #ifndef _SYNCBLK_INL_ |
7 | #define _SYNCBLK_INL_ |
8 | |
9 | #ifndef DACCESS_COMPILE |
10 | |
11 | FORCEINLINE bool AwareLock::LockState::InterlockedTryLock() |
12 | { |
13 | WRAPPER_NO_CONTRACT; |
14 | return InterlockedTryLock(VolatileLoadWithoutBarrier()); |
15 | } |
16 | |
17 | FORCEINLINE bool AwareLock::LockState::InterlockedTryLock(LockState state) |
18 | { |
19 | WRAPPER_NO_CONTRACT; |
20 | |
21 | // The monitor is fair to release waiters in FIFO order, but allows non-waiters to acquire the lock if it's available to |
22 | // avoid lock convoys. |
23 | // |
24 | // Lock convoys can be detrimental to performance in scenarios where work is being done on multiple threads and the work |
25 | // involves periodically taking a particular lock for a short time to access shared resources. With a lock convoy, once |
26 | // there is a waiter for the lock (which is not uncommon in such scenarios), a worker thread would be forced to |
27 | // context-switch on the subsequent attempt to acquire the lock, often long before the worker thread exhausts its time |
28 | // slice. This process repeats as long as the lock has a waiter, forcing every worker to context-switch on each attempt to |
29 | // acquire the lock, killing performance and creating a negative feedback loop that makes it more likely for the lock to |
30 | // have waiters. To avoid the lock convoy, each worker needs to be allowed to acquire the lock multiple times in sequence |
31 | // despite there being a waiter for the lock in order to have the worker continue working efficiently during its time slice |
32 | // as long as the lock is not contended. |
33 | // |
34 | // This scheme has the possibility to starve waiters. Waiter starvation is mitigated by other means, see |
35 | // InterlockedTrySetShouldNotPreemptWaitersIfNecessary(). |
36 | if (state.ShouldNonWaiterAttemptToAcquireLock()) |
37 | { |
38 | LockState newState = state; |
39 | newState.InvertIsLocked(); |
40 | |
41 | return CompareExchangeAcquire(newState, state) == state; |
42 | } |
43 | return false; |
44 | } |
45 | |
46 | FORCEINLINE bool AwareLock::LockState::InterlockedUnlock() |
47 | { |
48 | WRAPPER_NO_CONTRACT; |
49 | static_assert_no_msg(IsLockedMask == 1); |
50 | _ASSERTE(IsLocked()); |
51 | |
52 | LockState state = InterlockedDecrementRelease((LONG *)&m_state); |
53 | while (true) |
54 | { |
55 | // Keep track of whether a thread has been signaled to wake but has not yet woken from the wait. |
56 | // IsWaiterSignaledToWakeMask is cleared when a signaled thread wakes up by observing a signal. Since threads can |
57 | // preempt waiting threads and acquire the lock (see InterlockedTryLock()), it allows for example, one thread to acquire |
58 | // and release the lock multiple times while there are multiple waiting threads. In such a case, we don't want that |
59 | // thread to signal a waiter every time it releases the lock, as that will cause unnecessary context switches with more |
60 | // and more signaled threads waking up, finding that the lock is still locked, and going right back into a wait state. |
61 | // So, signal only one waiting thread at a time. |
62 | if (!state.NeedToSignalWaiter()) |
63 | { |
64 | return false; |
65 | } |
66 | |
67 | LockState newState = state; |
68 | newState.InvertIsWaiterSignaledToWake(); |
69 | |
70 | LockState stateBeforeUpdate = CompareExchange(newState, state); |
71 | if (stateBeforeUpdate == state) |
72 | { |
73 | return true; |
74 | } |
75 | |
76 | state = stateBeforeUpdate; |
77 | } |
78 | } |
79 | |
80 | FORCEINLINE bool AwareLock::LockState::InterlockedTrySetShouldNotPreemptWaitersIfNecessary(AwareLock *awareLock) |
81 | { |
82 | WRAPPER_NO_CONTRACT; |
83 | return InterlockedTrySetShouldNotPreemptWaitersIfNecessary(awareLock, VolatileLoadWithoutBarrier()); |
84 | } |
85 | |
86 | FORCEINLINE bool AwareLock::LockState::InterlockedTrySetShouldNotPreemptWaitersIfNecessary( |
87 | AwareLock *awareLock, |
88 | LockState state) |
89 | { |
90 | WRAPPER_NO_CONTRACT; |
91 | _ASSERTE(awareLock != nullptr); |
92 | _ASSERTE(&awareLock->m_lockState == this); |
93 | |
94 | // Normally, threads are allowed to preempt waiters to acquire the lock in order to avoid creating lock convoys, see |
95 | // InterlockedTryLock(). There are cases where waiters can be easily starved as a result. For example, a thread that |
96 | // holds a lock for a significant amount of time (much longer than the time it takes to do a context switch), then |
97 | // releases and reacquires the lock in quick succession, and repeats. Though a waiter would be woken upon lock release, |
98 | // usually it will not have enough time to context-switch-in and take the lock, and can be starved for an unreasonably long |
99 | // duration. |
100 | // |
101 | // In order to prevent such starvation and force a bit of fair forward progress, it is sometimes necessary to change the |
102 | // normal policy and disallow threads from preempting waiters. ShouldNotPreemptWaiters() indicates the current state of the |
103 | // policy and this function determines whether the policy should be changed to disallow non-waiters from preempting waiters. |
104 | // - When the first waiter begins waiting, it records the current time as a "waiter starvation start time". That is a |
105 | // point in time after which no forward progress has occurred for waiters. When a waiter acquires the lock, the time is |
106 | // updated to the current time. |
107 | // - This function checks whether the starvation duration has crossed a threshold and if so, sets |
108 | // ShouldNotPreemptWaiters() |
109 | // |
110 | // When unreasonable starvation is occurring, the lock will be released occasionally and if caused by spinners, spinners |
111 | // will be starting to spin. |
112 | // - Before starting to spin this function is called. If ShouldNotPreemptWaiters() is set, the spinner will skip spinning |
113 | // and wait instead. Spinners that are already registered at the time ShouldNotPreemptWaiters() is set will stop |
114 | // spinning as necessary. Eventually, all spinners will drain and no new ones will be registered. |
115 | // - Upon releasing a lock, if there are no spinners, a waiter will be signaled to wake. On that path, this function |
116 | // is called. |
117 | // - Eventually, after spinners have drained, only a waiter will be able to acquire the lock. When a waiter acquires |
118 | // the lock, or when the last waiter unregisters itself, ShouldNotPreemptWaiters() is cleared to restore the normal |
119 | // policy. |
120 | |
121 | while (true) |
122 | { |
123 | if (!state.HasAnyWaiters()) |
124 | { |
125 | _ASSERTE(!state.ShouldNotPreemptWaiters()); |
126 | return false; |
127 | } |
128 | if (state.ShouldNotPreemptWaiters()) |
129 | { |
130 | return true; |
131 | } |
132 | if (!awareLock->ShouldStopPreemptingWaiters()) |
133 | { |
134 | return false; |
135 | } |
136 | |
137 | LockState newState = state; |
138 | newState.InvertShouldNotPreemptWaiters(); |
139 | |
140 | LockState stateBeforeUpdate = CompareExchange(newState, state); |
141 | if (stateBeforeUpdate == state) |
142 | { |
143 | return true; |
144 | } |
145 | |
146 | state = stateBeforeUpdate; |
147 | } |
148 | } |
149 | |
150 | FORCEINLINE AwareLock::EnterHelperResult AwareLock::LockState::InterlockedTry_LockOrRegisterSpinner(LockState state) |
151 | { |
152 | WRAPPER_NO_CONTRACT; |
153 | |
154 | while (true) |
155 | { |
156 | LockState newState = state; |
157 | if (state.ShouldNonWaiterAttemptToAcquireLock()) |
158 | { |
159 | newState.InvertIsLocked(); |
160 | } |
161 | else if (state.ShouldNotPreemptWaiters() || !newState.TryIncrementSpinnerCount()) |
162 | { |
163 | return EnterHelperResult_UseSlowPath; |
164 | } |
165 | |
166 | LockState stateBeforeUpdate = CompareExchange(newState, state); |
167 | if (stateBeforeUpdate == state) |
168 | { |
169 | return state.ShouldNonWaiterAttemptToAcquireLock() ? EnterHelperResult_Entered : EnterHelperResult_Contention; |
170 | } |
171 | |
172 | state = stateBeforeUpdate; |
173 | } |
174 | } |
175 | |
176 | FORCEINLINE AwareLock::EnterHelperResult AwareLock::LockState::InterlockedTry_LockAndUnregisterSpinner() |
177 | { |
178 | WRAPPER_NO_CONTRACT; |
179 | |
180 | // This function is called from inside a spin loop, it must unregister the spinner if and only if the lock is acquired |
181 | LockState state = VolatileLoadWithoutBarrier(); |
182 | while (true) |
183 | { |
184 | _ASSERTE(state.HasAnySpinners()); |
185 | if (!state.ShouldNonWaiterAttemptToAcquireLock()) |
186 | { |
187 | return state.ShouldNotPreemptWaiters() ? EnterHelperResult_UseSlowPath : EnterHelperResult_Contention; |
188 | } |
189 | |
190 | LockState newState = state; |
191 | newState.InvertIsLocked(); |
192 | newState.DecrementSpinnerCount(); |
193 | |
194 | LockState stateBeforeUpdate = CompareExchange(newState, state); |
195 | if (stateBeforeUpdate == state) |
196 | { |
197 | return EnterHelperResult_Entered; |
198 | } |
199 | |
200 | state = stateBeforeUpdate; |
201 | } |
202 | } |
203 | |
204 | FORCEINLINE bool AwareLock::LockState::InterlockedUnregisterSpinner_TryLock() |
205 | { |
206 | WRAPPER_NO_CONTRACT; |
207 | |
208 | // This function is called at the end of a spin loop, it must unregister the spinner always and acquire the lock if it's |
209 | // available. If the lock is available, a spinner must acquire the lock along with unregistering itself, because a lock |
210 | // releaser does not wake a waiter when there is a spinner registered. |
211 | |
212 | LockState stateBeforeUpdate = InterlockedExchangeAdd((LONG *)&m_state, -(LONG)SpinnerCountIncrement); |
213 | _ASSERTE(stateBeforeUpdate.HasAnySpinners()); |
214 | if (stateBeforeUpdate.IsLocked()) |
215 | { |
216 | return false; |
217 | } |
218 | |
219 | LockState state = stateBeforeUpdate; |
220 | state.DecrementSpinnerCount(); |
221 | _ASSERTE(!state.IsLocked()); |
222 | do |
223 | { |
224 | LockState newState = state; |
225 | newState.InvertIsLocked(); |
226 | |
227 | LockState stateBeforeUpdate = CompareExchangeAcquire(newState, state); |
228 | if (stateBeforeUpdate == state) |
229 | { |
230 | return true; |
231 | } |
232 | |
233 | state = stateBeforeUpdate; |
234 | } while (!state.IsLocked()); |
235 | return false; |
236 | } |
237 | |
238 | FORCEINLINE bool AwareLock::LockState::InterlockedTryLock_Or_RegisterWaiter(AwareLock *awareLock, LockState state) |
239 | { |
240 | WRAPPER_NO_CONTRACT; |
241 | _ASSERTE(awareLock != nullptr); |
242 | _ASSERTE(&awareLock->m_lockState == this); |
243 | |
244 | bool waiterStarvationStartTimeWasReset = false; |
245 | while (true) |
246 | { |
247 | LockState newState = state; |
248 | if (state.ShouldNonWaiterAttemptToAcquireLock()) |
249 | { |
250 | newState.InvertIsLocked(); |
251 | } |
252 | else |
253 | { |
254 | newState.IncrementWaiterCount(); |
255 | |
256 | if (!state.HasAnyWaiters() && !waiterStarvationStartTimeWasReset) |
257 | { |
258 | // This would be the first waiter. Once the waiter is registered, another thread may check the waiter starvation |
259 | // start time and the previously recorded value may be stale, causing ShouldNotPreemptWaiters() to be set |
260 | // unnecessarily. Reset the start time before registering the waiter. |
261 | waiterStarvationStartTimeWasReset = true; |
262 | awareLock->ResetWaiterStarvationStartTime(); |
263 | } |
264 | } |
265 | |
266 | LockState stateBeforeUpdate = CompareExchange(newState, state); |
267 | if (stateBeforeUpdate == state) |
268 | { |
269 | if (state.ShouldNonWaiterAttemptToAcquireLock()) |
270 | { |
271 | return true; |
272 | } |
273 | |
274 | if (!state.HasAnyWaiters()) |
275 | { |
276 | // This was the first waiter, record the waiter starvation start time |
277 | _ASSERTE(waiterStarvationStartTimeWasReset); |
278 | awareLock->RecordWaiterStarvationStartTime(); |
279 | } |
280 | return false; |
281 | } |
282 | |
283 | state = stateBeforeUpdate; |
284 | } |
285 | } |
286 | |
287 | FORCEINLINE void AwareLock::LockState::InterlockedUnregisterWaiter() |
288 | { |
289 | WRAPPER_NO_CONTRACT; |
290 | |
291 | LockState state = VolatileLoadWithoutBarrier(); |
292 | while (true) |
293 | { |
294 | _ASSERTE(state.HasAnyWaiters()); |
295 | |
296 | LockState newState = state; |
297 | newState.DecrementWaiterCount(); |
298 | if (newState.ShouldNotPreemptWaiters() && !newState.HasAnyWaiters()) |
299 | { |
300 | newState.InvertShouldNotPreemptWaiters(); |
301 | } |
302 | |
303 | LockState stateBeforeUpdate = CompareExchange(newState, state); |
304 | if (stateBeforeUpdate == state) |
305 | { |
306 | return; |
307 | } |
308 | |
309 | state = stateBeforeUpdate; |
310 | } |
311 | } |
312 | |
313 | FORCEINLINE bool AwareLock::LockState::InterlockedTry_LockAndUnregisterWaiterAndObserveWakeSignal(AwareLock *awareLock) |
314 | { |
315 | WRAPPER_NO_CONTRACT; |
316 | _ASSERTE(awareLock != nullptr); |
317 | _ASSERTE(&awareLock->m_lockState == this); |
318 | |
319 | // This function is called from the waiter's spin loop and should observe the wake signal only if the lock is taken, to |
320 | // prevent a lock releaser from waking another waiter while one is already spinning to acquire the lock |
321 | bool waiterStarvationStartTimeWasRecorded = false; |
322 | LockState state = VolatileLoadWithoutBarrier(); |
323 | while (true) |
324 | { |
325 | _ASSERTE(state.HasAnyWaiters()); |
326 | _ASSERTE(state.IsWaiterSignaledToWake()); |
327 | if (state.IsLocked()) |
328 | { |
329 | return false; |
330 | } |
331 | |
332 | LockState newState = state; |
333 | newState.InvertIsLocked(); |
334 | newState.InvertIsWaiterSignaledToWake(); |
335 | newState.DecrementWaiterCount(); |
336 | if (newState.ShouldNotPreemptWaiters()) |
337 | { |
338 | newState.InvertShouldNotPreemptWaiters(); |
339 | |
340 | if (newState.HasAnyWaiters() && !waiterStarvationStartTimeWasRecorded) |
341 | { |
342 | // Update the waiter starvation start time. The time must be recorded before ShouldNotPreemptWaiters() is |
343 | // cleared, as once that is cleared, another thread may check the waiter starvation start time and the |
344 | // previously recorded value may be stale, causing ShouldNotPreemptWaiters() to be set again unnecessarily. |
345 | waiterStarvationStartTimeWasRecorded = true; |
346 | awareLock->RecordWaiterStarvationStartTime(); |
347 | } |
348 | } |
349 | |
350 | LockState stateBeforeUpdate = CompareExchange(newState, state); |
351 | if (stateBeforeUpdate == state) |
352 | { |
353 | if (newState.HasAnyWaiters()) |
354 | { |
355 | _ASSERTE(!state.ShouldNotPreemptWaiters() || waiterStarvationStartTimeWasRecorded); |
356 | if (!waiterStarvationStartTimeWasRecorded) |
357 | { |
358 | // Since the lock was acquired successfully by a waiter, update the waiter starvation start time |
359 | awareLock->RecordWaiterStarvationStartTime(); |
360 | } |
361 | } |
362 | return true; |
363 | } |
364 | |
365 | state = stateBeforeUpdate; |
366 | } |
367 | } |
368 | |
369 | FORCEINLINE bool AwareLock::LockState::InterlockedObserveWakeSignal_Try_LockAndUnregisterWaiter(AwareLock *awareLock) |
370 | { |
371 | WRAPPER_NO_CONTRACT; |
372 | _ASSERTE(awareLock != nullptr); |
373 | _ASSERTE(&awareLock->m_lockState == this); |
374 | |
375 | // This function is called at the end of the waiter's spin loop. It must observe the wake signal always, and if the lock is |
376 | // available, it must acquire the lock and unregister the waiter. If the lock is available, a waiter must acquire the lock |
377 | // along with observing the wake signal, because a lock releaser does not wake a waiter when a waiter was signaled but the |
378 | // wake signal has not been observed. |
379 | |
380 | LockState stateBeforeUpdate = InterlockedExchangeAdd((LONG *)&m_state, -(LONG)IsWaiterSignaledToWakeMask); |
381 | _ASSERTE(stateBeforeUpdate.IsWaiterSignaledToWake()); |
382 | if (stateBeforeUpdate.IsLocked()) |
383 | { |
384 | return false; |
385 | } |
386 | |
387 | bool waiterStarvationStartTimeWasRecorded = false; |
388 | LockState state = stateBeforeUpdate; |
389 | state.InvertIsWaiterSignaledToWake(); |
390 | _ASSERTE(!state.IsLocked()); |
391 | do |
392 | { |
393 | _ASSERTE(state.HasAnyWaiters()); |
394 | LockState newState = state; |
395 | newState.InvertIsLocked(); |
396 | newState.DecrementWaiterCount(); |
397 | if (newState.ShouldNotPreemptWaiters()) |
398 | { |
399 | newState.InvertShouldNotPreemptWaiters(); |
400 | |
401 | if (newState.HasAnyWaiters() && !waiterStarvationStartTimeWasRecorded) |
402 | { |
403 | // Update the waiter starvation start time. The time must be recorded before ShouldNotPreemptWaiters() is |
404 | // cleared, as once that is cleared, another thread may check the waiter starvation start time and the |
405 | // previously recorded value may be stale, causing ShouldNotPreemptWaiters() to be set again unnecessarily. |
406 | waiterStarvationStartTimeWasRecorded = true; |
407 | awareLock->RecordWaiterStarvationStartTime(); |
408 | } |
409 | } |
410 | |
411 | LockState stateBeforeUpdate = CompareExchange(newState, state); |
412 | if (stateBeforeUpdate == state) |
413 | { |
414 | if (newState.HasAnyWaiters()) |
415 | { |
416 | _ASSERTE(!state.ShouldNotPreemptWaiters() || waiterStarvationStartTimeWasRecorded); |
417 | if (!waiterStarvationStartTimeWasRecorded) |
418 | { |
419 | // Since the lock was acquired successfully by a waiter, update the waiter starvation start time |
420 | awareLock->RecordWaiterStarvationStartTime(); |
421 | } |
422 | } |
423 | return true; |
424 | } |
425 | |
426 | state = stateBeforeUpdate; |
427 | } while (!state.IsLocked()); |
428 | return false; |
429 | } |
430 | |
431 | FORCEINLINE void AwareLock::ResetWaiterStarvationStartTime() |
432 | { |
433 | LIMITED_METHOD_CONTRACT; |
434 | m_waiterStarvationStartTimeMs = 0; |
435 | } |
436 | |
437 | FORCEINLINE void AwareLock::RecordWaiterStarvationStartTime() |
438 | { |
439 | WRAPPER_NO_CONTRACT; |
440 | |
441 | DWORD currentTimeMs = GetTickCount(); |
442 | if (currentTimeMs == 0) |
443 | { |
444 | // Don't record zero, that value is reserved for identifying that a time is not recorded |
445 | --currentTimeMs; |
446 | } |
447 | m_waiterStarvationStartTimeMs = currentTimeMs; |
448 | } |
449 | |
450 | FORCEINLINE bool AwareLock::ShouldStopPreemptingWaiters() const |
451 | { |
452 | WRAPPER_NO_CONTRACT; |
453 | |
454 | // If the recorded time is zero, a time has not been recorded yet |
455 | DWORD waiterStarvationStartTimeMs = m_waiterStarvationStartTimeMs; |
456 | return |
457 | waiterStarvationStartTimeMs != 0 && |
458 | GetTickCount() - waiterStarvationStartTimeMs >= WaiterStarvationDurationMsBeforeStoppingPreemptingWaiters; |
459 | } |
460 | |
461 | FORCEINLINE void AwareLock::SpinWait(const YieldProcessorNormalizationInfo &normalizationInfo, DWORD spinIteration) |
462 | { |
463 | WRAPPER_NO_CONTRACT; |
464 | |
465 | _ASSERTE(g_SystemInfo.dwNumberOfProcessors != 1); |
466 | _ASSERTE(spinIteration < g_SpinConstants.dwMonitorSpinCount); |
467 | |
468 | YieldProcessorWithBackOffNormalized(normalizationInfo, spinIteration); |
469 | } |
470 | |
471 | FORCEINLINE bool AwareLock::TryEnterHelper(Thread* pCurThread) |
472 | { |
473 | CONTRACTL{ |
474 | SO_TOLERANT; |
475 | NOTHROW; |
476 | GC_NOTRIGGER; |
477 | MODE_ANY; |
478 | } CONTRACTL_END; |
479 | |
480 | if (m_lockState.InterlockedTryLock()) |
481 | { |
482 | m_HoldingThread = pCurThread; |
483 | m_Recursion = 1; |
484 | pCurThread->IncLockCount(); |
485 | return true; |
486 | } |
487 | |
488 | if (GetOwningThread() == pCurThread) /* monitor is held, but it could be a recursive case */ |
489 | { |
490 | m_Recursion++; |
491 | return true; |
492 | } |
493 | return false; |
494 | } |
495 | |
496 | FORCEINLINE AwareLock::EnterHelperResult AwareLock::TryEnterBeforeSpinLoopHelper(Thread *pCurThread) |
497 | { |
498 | CONTRACTL{ |
499 | SO_TOLERANT; |
500 | NOTHROW; |
501 | GC_NOTRIGGER; |
502 | MODE_ANY; |
503 | } CONTRACTL_END; |
504 | |
505 | LockState state = m_lockState.VolatileLoadWithoutBarrier(); |
506 | |
507 | // Check the recursive case once before the spin loop. If it's not the recursive case in the beginning, it will not |
508 | // be in the future, so the spin loop can avoid checking the recursive case. |
509 | if (!state.IsLocked() || GetOwningThread() != pCurThread) |
510 | { |
511 | if (m_lockState.InterlockedTrySetShouldNotPreemptWaitersIfNecessary(this, state)) |
512 | { |
513 | // This thread currently should not preempt waiters, just wait |
514 | return EnterHelperResult_UseSlowPath; |
515 | } |
516 | |
517 | // Not a recursive enter, try to acquire the lock or register the spinner |
518 | EnterHelperResult result = m_lockState.InterlockedTry_LockOrRegisterSpinner(state); |
519 | if (result != EnterHelperResult_Entered) |
520 | { |
521 | // EnterHelperResult_Contention: |
522 | // Lock was not acquired and the spinner was registered |
523 | // EnterHelperResult_UseSlowPath: |
524 | // This thread currently should not preempt waiters, or we reached the maximum number of spinners, just wait |
525 | return result; |
526 | } |
527 | |
528 | // Lock was acquired and the spinner was not registered |
529 | m_HoldingThread = pCurThread; |
530 | m_Recursion = 1; |
531 | pCurThread->IncLockCount(); |
532 | return EnterHelperResult_Entered; |
533 | } |
534 | |
535 | // Recursive enter |
536 | m_Recursion++; |
537 | return EnterHelperResult_Entered; |
538 | } |
539 | |
540 | FORCEINLINE AwareLock::EnterHelperResult AwareLock::TryEnterInsideSpinLoopHelper(Thread *pCurThread) |
541 | { |
542 | CONTRACTL{ |
543 | SO_TOLERANT; |
544 | NOTHROW; |
545 | GC_NOTRIGGER; |
546 | MODE_ANY; |
547 | } CONTRACTL_END; |
548 | |
549 | // Try to acquire the lock and unregister the spinner. The recursive case is not checked here because |
550 | // TryEnterBeforeSpinLoopHelper() would have taken care of that case before the spin loop. |
551 | EnterHelperResult result = m_lockState.InterlockedTry_LockAndUnregisterSpinner(); |
552 | if (result != EnterHelperResult_Entered) |
553 | { |
554 | // EnterHelperResult_Contention: |
555 | // Lock was not acquired and the spinner was not unregistered |
556 | // EnterHelperResult_UseSlowPath: |
557 | // This thread currently should not preempt waiters, stop spinning and just wait |
558 | return result; |
559 | } |
560 | |
561 | // Lock was acquired and spinner was unregistered |
562 | m_HoldingThread = pCurThread; |
563 | m_Recursion = 1; |
564 | pCurThread->IncLockCount(); |
565 | return EnterHelperResult_Entered; |
566 | } |
567 | |
568 | FORCEINLINE bool AwareLock::TryEnterAfterSpinLoopHelper(Thread *pCurThread) |
569 | { |
570 | CONTRACTL{ |
571 | SO_TOLERANT; |
572 | NOTHROW; |
573 | GC_NOTRIGGER; |
574 | MODE_ANY; |
575 | } CONTRACTL_END; |
576 | |
577 | // Unregister the spinner and try to acquire the lock. A spinner must not unregister itself without trying to acquire the |
578 | // lock because a lock releaser does not wake a waiter when a spinner can acquire the lock. |
579 | if (!m_lockState.InterlockedUnregisterSpinner_TryLock()) |
580 | { |
581 | // Spinner was unregistered and the lock was not acquired |
582 | return false; |
583 | } |
584 | |
585 | // Spinner was unregistered and the lock was acquired |
586 | m_HoldingThread = pCurThread; |
587 | m_Recursion = 1; |
588 | pCurThread->IncLockCount(); |
589 | return true; |
590 | } |
591 | |
592 | FORCEINLINE AwareLock::EnterHelperResult ObjHeader::(Thread* pCurThread) |
593 | { |
594 | CONTRACTL{ |
595 | SO_TOLERANT; |
596 | NOTHROW; |
597 | GC_NOTRIGGER; |
598 | MODE_COOPERATIVE; |
599 | } CONTRACTL_END; |
600 | |
601 | LONG oldValue = m_SyncBlockValue.LoadWithoutBarrier(); |
602 | |
603 | if ((oldValue & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + |
604 | BIT_SBLK_SPIN_LOCK + |
605 | SBLK_MASK_LOCK_THREADID + |
606 | SBLK_MASK_LOCK_RECLEVEL)) == 0) |
607 | { |
608 | DWORD tid = pCurThread->GetThreadId(); |
609 | if (tid > SBLK_MASK_LOCK_THREADID) |
610 | { |
611 | return AwareLock::EnterHelperResult_UseSlowPath; |
612 | } |
613 | |
614 | LONG newValue = oldValue | tid; |
615 | if (InterlockedCompareExchangeAcquire((LONG*)&m_SyncBlockValue, newValue, oldValue) == oldValue) |
616 | { |
617 | pCurThread->IncLockCount(); |
618 | return AwareLock::EnterHelperResult_Entered; |
619 | } |
620 | |
621 | return AwareLock::EnterHelperResult_Contention; |
622 | } |
623 | |
624 | if (oldValue & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) |
625 | { |
626 | // If we have a hash code already, we need to create a sync block |
627 | if (oldValue & BIT_SBLK_IS_HASHCODE) |
628 | { |
629 | return AwareLock::EnterHelperResult_UseSlowPath; |
630 | } |
631 | |
632 | SyncBlock *syncBlock = g_pSyncTable[oldValue & MASK_SYNCBLOCKINDEX].m_SyncBlock; |
633 | _ASSERTE(syncBlock != NULL); |
634 | if (syncBlock->m_Monitor.TryEnterHelper(pCurThread)) |
635 | { |
636 | return AwareLock::EnterHelperResult_Entered; |
637 | } |
638 | |
639 | return AwareLock::EnterHelperResult_Contention; |
640 | } |
641 | |
642 | // The header is transitioning - treat this as if the lock was taken |
643 | if (oldValue & BIT_SBLK_SPIN_LOCK) |
644 | { |
645 | return AwareLock::EnterHelperResult_Contention; |
646 | } |
647 | |
648 | // Here we know we have the "thin lock" layout, but the lock is not free. |
649 | // It could still be the recursion case - compare the thread id to check |
650 | if (pCurThread->GetThreadId() != (DWORD)(oldValue & SBLK_MASK_LOCK_THREADID)) |
651 | { |
652 | return AwareLock::EnterHelperResult_Contention; |
653 | } |
654 | |
655 | // Ok, the thread id matches, it's the recursion case. |
656 | // Bump up the recursion level and check for overflow |
657 | LONG newValue = oldValue + SBLK_LOCK_RECLEVEL_INC; |
658 | |
659 | if ((newValue & SBLK_MASK_LOCK_RECLEVEL) == 0) |
660 | { |
661 | return AwareLock::EnterHelperResult_UseSlowPath; |
662 | } |
663 | |
664 | if (InterlockedCompareExchangeAcquire((LONG*)&m_SyncBlockValue, newValue, oldValue) == oldValue) |
665 | { |
666 | return AwareLock::EnterHelperResult_Entered; |
667 | } |
668 | |
669 | // Use the slow path instead of spinning. The compare-exchange above would not fail often, and it's not worth forcing the |
670 | // spin loop that typically follows the call to this function to check the recursive case, so just bail to the slow path. |
671 | return AwareLock::EnterHelperResult_UseSlowPath; |
672 | } |
673 | |
674 | // Helper encapsulating the core logic for releasing monitor. Returns what kind of |
675 | // follow up action is necessary. This is FORCEINLINE to make it provide a very efficient implementation. |
676 | FORCEINLINE AwareLock::LeaveHelperAction AwareLock::LeaveHelper(Thread* pCurThread) |
677 | { |
678 | CONTRACTL { |
679 | SO_TOLERANT; |
680 | NOTHROW; |
681 | GC_NOTRIGGER; |
682 | MODE_ANY; |
683 | } CONTRACTL_END; |
684 | |
685 | if (m_HoldingThread != pCurThread) |
686 | return AwareLock::LeaveHelperAction_Error; |
687 | |
688 | _ASSERTE(m_lockState.VolatileLoadWithoutBarrier().IsLocked()); |
689 | _ASSERTE(m_Recursion >= 1); |
690 | |
691 | #if defined(_DEBUG) && defined(TRACK_SYNC) && !defined(CROSSGEN_COMPILE) |
692 | // The best place to grab this is from the ECall frame |
693 | Frame *pFrame = pCurThread->GetFrame(); |
694 | int caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1); |
695 | pCurThread->m_pTrackSync->LeaveSync(caller, this); |
696 | #endif |
697 | |
698 | if (--m_Recursion == 0) |
699 | { |
700 | m_HoldingThread->DecLockCount(); |
701 | m_HoldingThread = NULL; |
702 | |
703 | // Clear lock bit and determine whether we must signal a waiter to wake |
704 | if (!m_lockState.InterlockedUnlock()) |
705 | { |
706 | return AwareLock::LeaveHelperAction_None; |
707 | } |
708 | |
709 | // There is a waiter and we must signal a waiter to wake |
710 | return AwareLock::LeaveHelperAction_Signal; |
711 | } |
712 | return AwareLock::LeaveHelperAction_None; |
713 | } |
714 | |
715 | // Helper encapsulating the core logic for releasing monitor. Returns what kind of |
716 | // follow up action is necessary. This is FORCEINLINE to make it provide a very efficient implementation. |
717 | FORCEINLINE AwareLock::LeaveHelperAction ObjHeader::(Thread* pCurThread) |
718 | { |
719 | CONTRACTL { |
720 | SO_TOLERANT; |
721 | NOTHROW; |
722 | GC_NOTRIGGER; |
723 | MODE_COOPERATIVE; |
724 | } CONTRACTL_END; |
725 | |
726 | DWORD syncBlockValue = m_SyncBlockValue.LoadWithoutBarrier(); |
727 | |
728 | if ((syncBlockValue & (BIT_SBLK_SPIN_LOCK + BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)) == 0) |
729 | { |
730 | if ((syncBlockValue & SBLK_MASK_LOCK_THREADID) != pCurThread->GetThreadId()) |
731 | { |
732 | // This thread does not own the lock. |
733 | return AwareLock::LeaveHelperAction_Error; |
734 | } |
735 | |
736 | if (!(syncBlockValue & SBLK_MASK_LOCK_RECLEVEL)) |
737 | { |
738 | // We are leaving the lock |
739 | DWORD newValue = (syncBlockValue & (~SBLK_MASK_LOCK_THREADID)); |
740 | if (InterlockedCompareExchangeRelease((LONG*)&m_SyncBlockValue, newValue, syncBlockValue) != (LONG)syncBlockValue) |
741 | { |
742 | return AwareLock::LeaveHelperAction_Yield; |
743 | } |
744 | pCurThread->DecLockCount(); |
745 | } |
746 | else |
747 | { |
748 | // recursion and ThinLock |
749 | DWORD newValue = syncBlockValue - SBLK_LOCK_RECLEVEL_INC; |
750 | if (InterlockedCompareExchangeRelease((LONG*)&m_SyncBlockValue, newValue, syncBlockValue) != (LONG)syncBlockValue) |
751 | { |
752 | return AwareLock::LeaveHelperAction_Yield; |
753 | } |
754 | } |
755 | |
756 | return AwareLock::LeaveHelperAction_None; |
757 | } |
758 | |
759 | if ((syncBlockValue & (BIT_SBLK_SPIN_LOCK + BIT_SBLK_IS_HASHCODE)) == 0) |
760 | { |
761 | _ASSERTE((syncBlockValue & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) != 0); |
762 | SyncBlock *syncBlock = g_pSyncTable[syncBlockValue & MASK_SYNCBLOCKINDEX].m_SyncBlock; |
763 | _ASSERTE(syncBlock != NULL); |
764 | return syncBlock->m_Monitor.LeaveHelper(pCurThread); |
765 | } |
766 | |
767 | if (syncBlockValue & BIT_SBLK_SPIN_LOCK) |
768 | { |
769 | return AwareLock::LeaveHelperAction_Contention; |
770 | } |
771 | |
772 | // This thread does not own the lock. |
773 | return AwareLock::LeaveHelperAction_Error; |
774 | } |
775 | |
776 | #endif // DACCESS_COMPILE |
777 | |
778 | // Provide access to the object associated with this awarelock, so client can |
779 | // protect it. |
780 | inline OBJECTREF AwareLock::GetOwningObject() |
781 | { |
782 | LIMITED_METHOD_CONTRACT; |
783 | SUPPORTS_DAC; |
784 | |
785 | // gcc on mac needs these intermediate casts to avoid some ambiuous overloading in the DAC case |
786 | PTR_SyncTableEntry table = SyncTableEntry::GetSyncTableEntry(); |
787 | return (OBJECTREF)(Object*)(PTR_Object)table[(m_dwSyncIndex & ~SyncBlock::SyncBlockPrecious)].m_Object; |
788 | } |
789 | |
790 | #endif // _SYNCBLK_INL_ |
791 | |