1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4
5///////////////////////////////////////////////////////////////////////////////
6//
7// File:
8// cs.cpp
9//
10// Purpose:
11// Implementation of critical sections
12//
13///////////////////////////////////////////////////////////////////////////////
14
15#include "pal/thread.hpp"
16#include "pal/cs.hpp"
17#include "pal/malloc.hpp"
18#include "pal/list.h"
19#include "pal/dbgmsg.h"
20#include "pal/init.h"
21#include "pal/process.h"
22
23#include <sched.h>
24#include <pthread.h>
25
26using namespace CorUnix;
27
28//
29// Uncomment the following line to turn CS behavior from
30// unfair to fair lock
31//
32// #define PALCS_TRANSFER_OWNERSHIP_ON_RELEASE
33
34//
35// Uncomment the following line to enable simple mutex based CSs
36// Note: when MUTEX_BASED_CSS is defined, PALCS_TRANSFER_OWNERSHIP_ON_RELEASE
37// has no effect
38//
39// #define MUTEX_BASED_CSS
40
41//
42// Important notes on critical sections layout/semantics on Unix
43//
44// 1) The PAL_CRITICAL_SECTION structure below must match the size of the
45// CRITICAL_SECTION defined in pal.h. Besides the "windows part"
46// of both the structures must be identical.
47// 2) Both PAL_CRITICAL_SECTION and CRITICAL_SECTION currently do not match
48// the size of the Windows' CRITICAL_SECTION.
49// - From unmanaged code point of view, one should never make assumptions
50// on the size and layout of the CRITICAL_SECTION structure, and anyway
51// on Unix PAL's CRITICAL_SECTION extends the Windows one, so that some
52// assumptions may still work.
53// - From managed code point of view, one could try to interop directly
54// to unmanaged critical sections APIs (though that would be quite
55// meaningless). In order to do that, she would need to define a copy
56// of the CRITICAL_SECTION structure in his/her code, and that may lead
57// to access random data beyond the structure limit, if that managed
58// code is compiled on Unix.
59// In case such scenario should be supported, the current implementation
60// will have to be modified in a way to go back to the original Windows
61// CRITICAL_SECTION layout. That would require to dynamically allocate
62// the native data and use LockSemaphore as a pointer to it. The current
63// solution intentionally avoids that since an effort has been made to
64// make CSs objects completely independent from any other PAL subsystem,
65// so that they can be used during initialization and shutdown.
66// In case the "dynamically allocate native data" solution should be
67// implemented, CSs would acquire a dependency on memory allocation and
68// thread suspension subsystems, since the first contention on a specific
69// CS would trigger the native data allocation.
70// 3) The semantics of the LockCount field has not been kept compatible with
71// the Windows implementation.
72// Both on Windows and Unix the lower bit of LockCount indicates
73// whether or not the CS is locked (for both fair and unfair lock
74// solution), the second bit indicates whether or not currently there is a
75// waiter that has been awakened and that is trying to acquire the CS
76// (only unfair lock solution, unused in the fair one); starting from the
77// third bit, LockCount represents the number of waiter threads currently
78// waiting on the CS.
79// Windows, anyway, implements this semantics in negative logic, so that
80// an unlocked CS is represented by a LockCount == -1 (i.e. 0xFFFFFFFF,
81// all the bits set), while on Unix an unlocked CS has LockCount == 0.
82// Windows needs to use negative logic to support legacy code bad enough
83// to directly access CS's fields making the assumption that
84// LockCount == -1 means CS unlocked. Unix will not support that, and
85// it uses positive logic.
86// 4) The CRITICAL_SECTION_DEBUG_INFO layout on Unix is intentionally not
87// compatible with the Windows layout.
88// 5) For legacy code dependencies issues similar to those just described for
89// the LockCount field, Windows CS code maintains a per-process list of
90// debug info for all the CSs, both on debug and free/retail builds. On
91// Unix such a list is maintained only on debug builds, and no debug
92// info structure is allocated on free/retail builds
93//
94
95SET_DEFAULT_DEBUG_CHANNEL(CRITSEC);
96
97#ifdef TRACE_CS_LOGIC
98#define CS_TRACE TRACE
99#else
100#ifdef __GNUC__
101#define CS_TRACE(args...)
102#else
103#define CS_TRACE(...)
104#endif
105#endif // TRACE_CS_LOGIC
106
107//
108// Note: PALCS_LOCK_WAITER_INC must be 2 * PALCS_LOCK_AWAKENED_WAITER
109//
110#define PALCS_LOCK_INIT 0
111#define PALCS_LOCK_BIT 1
112#define PALCS_LOCK_AWAKENED_WAITER 2
113#define PALCS_LOCK_WAITER_INC 4
114
115#define PALCS_GETLBIT(val) ((int)(0!=(PALCS_LOCK_BIT&val)))
116#define PALCS_GETAWBIT(val) ((int)(0!=(PALCS_LOCK_AWAKENED_WAITER&val)))
117#define PALCS_GETWCOUNT(val) (val/PALCS_LOCK_WAITER_INC)
118
119enum PalCsInitState
120{
121 PalCsNotInitialized, // Critical section not initialized (InitializedCriticalSection
122 // has not yet been called, or DeleteCriticalsection has been
123 // called).
124 PalCsUserInitialized, // Critical section initialized from the user point of view,
125 // i.e. InitializedCriticalSection has been called.
126 PalCsFullyInitializing, // A thread found the CS locked, this is the first contention on
127 // this CS, and the thread is initializing the CS's native data.
128 PalCsFullyInitialized // Internal CS's native data has been fully initialized.
129};
130
131enum PalCsWaiterReturnState
132{
133 PalCsReturnWaiterAwakened,
134 PalCsWaiterDidntWait
135};
136
137struct _PAL_CRITICAL_SECTION; // fwd declaration
138
139typedef struct _CRITICAL_SECTION_DEBUG_INFO
140{
141 LIST_ENTRY Link;
142 struct _PAL_CRITICAL_SECTION * pOwnerCS;
143 Volatile<ULONG> lAcquireCount;
144 Volatile<ULONG> lEnterCount;
145 Volatile<LONG> lContentionCount;
146} CRITICAL_SECTION_DEBUG_INFO, *PCRITICAL_SECTION_DEBUG_INFO;
147
148typedef struct _PAL_CRITICAL_SECTION_NATIVE_DATA
149{
150 pthread_mutex_t mutex;
151 pthread_cond_t condition;
152 int iPredicate;
153} PAL_CRITICAL_SECTION_NATIVE_DATA, *PPAL_CRITICAL_SECTION_NATIVE_DATA;
154
155typedef struct _PAL_CRITICAL_SECTION {
156 // Windows part
157 PCRITICAL_SECTION_DEBUG_INFO DebugInfo;
158 Volatile<LONG> LockCount;
159 LONG RecursionCount;
160 SIZE_T OwningThread;
161 HANDLE LockSemaphore;
162 ULONG_PTR SpinCount;
163 // Private Unix part
164 BOOL fInternal;
165 Volatile<PalCsInitState> cisInitState;
166 PAL_CRITICAL_SECTION_NATIVE_DATA csndNativeData;
167} PAL_CRITICAL_SECTION, *PPAL_CRITICAL_SECTION, *LPPAL_CRITICAL_SECTION;
168
169#ifdef _DEBUG
170namespace CorUnix
171{
172 PAL_CRITICAL_SECTION g_csPALCSsListLock;
173 LIST_ENTRY g_PALCSList = { &g_PALCSList, &g_PALCSList};
174}
175#endif // _DEBUG
176
177#define ObtainCurrentThreadId(thread) ObtainCurrentThreadIdImpl(thread, __func__)
178static SIZE_T ObtainCurrentThreadIdImpl(CPalThread *pCurrentThread, const char *callingFuncName)
179{
180 SIZE_T threadId;
181 if(pCurrentThread)
182 {
183 threadId = pCurrentThread->GetThreadId();
184 _ASSERTE(threadId == THREADSilentGetCurrentThreadId());
185 }
186 else
187 {
188 threadId = THREADSilentGetCurrentThreadId();
189 CS_TRACE("Early %s, no pthread data, getting TID internally\n", callingFuncName);
190 }
191 _ASSERTE(0 != threadId);
192
193 return threadId;
194}
195
196
197/*++
198Function:
199 InitializeCriticalSection
200
201See MSDN doc.
202--*/
203void InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
204{
205 PERF_ENTRY(InitializeCriticalSection);
206 ENTRY("InitializeCriticalSection(lpCriticalSection=%p)\n",
207 lpCriticalSection);
208
209 InternalInitializeCriticalSectionAndSpinCount(lpCriticalSection,
210 0, false);
211
212 LOGEXIT("InitializeCriticalSection returns void\n");
213 PERF_EXIT(InitializeCriticalSection);
214}
215
216/*++
217Function:
218 InitializeCriticalSectionEx - Flags is ignored.
219
220See MSDN doc.
221--*/
222BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount, DWORD Flags)
223{
224 PERF_ENTRY(InitializeCriticalSection);
225 ENTRY("InitializeCriticalSectionEx(lpCriticalSection=%p, dwSpinCount=%d, Flags=%d)\n",
226 lpCriticalSection, dwSpinCount, Flags);
227
228 InternalInitializeCriticalSectionAndSpinCount(lpCriticalSection, dwSpinCount, false);
229
230 LOGEXIT("InitializeCriticalSectionEx returns TRUE\n");
231 PERF_EXIT(InitializeCriticalSection);
232 return true;
233}
234
235/*++
236Function:
237 InitializeCriticalSectionAndSpinCount
238
239See MSDN doc.
240--*/
241BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection,
242 DWORD dwSpinCount)
243{
244 BOOL bRet = TRUE;
245 PERF_ENTRY(InitializeCriticalSectionAndSpinCount);
246 ENTRY("InitializeCriticalSectionAndSpinCount(lpCriticalSection=%p, "
247 "dwSpinCount=%u)\n", lpCriticalSection, dwSpinCount);
248
249 InternalInitializeCriticalSectionAndSpinCount(lpCriticalSection,
250 dwSpinCount, false);
251
252 LOGEXIT("InitializeCriticalSectionAndSpinCount returns BOOL %d\n",
253 bRet);
254 PERF_EXIT(InitializeCriticalSectionAndSpinCount);
255 return bRet;
256}
257
258/*++
259Function:
260 DeleteCriticalSection
261
262See MSDN doc.
263--*/
264void DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
265{
266 PERF_ENTRY(DeleteCriticalSection);
267 ENTRY("DeleteCriticalSection(lpCriticalSection=%p)\n", lpCriticalSection);
268
269 InternalDeleteCriticalSection(lpCriticalSection);
270
271 LOGEXIT("DeleteCriticalSection returns void\n");
272 PERF_EXIT(DeleteCriticalSection);
273}
274
275/*++
276Function:
277 EnterCriticalSection
278
279See MSDN doc.
280--*/
281void EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
282{
283 PERF_ENTRY(EnterCriticalSection);
284 ENTRY("EnterCriticalSection(lpCriticalSection=%p)\n", lpCriticalSection);
285
286 CPalThread * pThread = InternalGetCurrentThread();
287
288 InternalEnterCriticalSection(pThread, lpCriticalSection);
289
290 LOGEXIT("EnterCriticalSection returns void\n");
291 PERF_EXIT(EnterCriticalSection);
292}
293
294/*++
295Function:
296 TryEnterCriticalSection
297
298See MSDN doc.
299--*/
300BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
301{
302 PERF_ENTRY(TryEnterCriticalSection);
303 ENTRY("TryEnterCriticalSection(lpCriticalSection=%p)\n", lpCriticalSection);
304
305 CPalThread * pThread = InternalGetCurrentThread();
306
307 bool fRet = InternalTryEnterCriticalSection(pThread,
308 lpCriticalSection);
309
310 LOGEXIT("TryEnterCriticalSection returns bool %d\n", (int)fRet);
311 PERF_EXIT(TryEnterCriticalSection);
312
313 return (BOOL)fRet;
314}
315
316/*++
317Function:
318 LeaveCriticalSection
319
320See MSDN doc.
321--*/
322VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
323{
324 PERF_ENTRY(LeaveCriticalSection);
325 ENTRY("LeaveCriticalSection(lpCriticalSection=%p)\n", lpCriticalSection);
326
327 CPalThread * pThread = InternalGetCurrentThread();
328
329 InternalLeaveCriticalSection(pThread, lpCriticalSection);
330
331 LOGEXIT("LeaveCriticalSection returns void\n");
332 PERF_EXIT(LeaveCriticalSection);
333}
334
335/*++
336Function:
337 InternalInitializeCriticalSection
338
339Initializes a critical section. It assumes the CS is an internal one,
340i.e. thread entering it will be marked unsafe for suspension
341--*/
342VOID InternalInitializeCriticalSection(CRITICAL_SECTION *pcs)
343{
344 InternalInitializeCriticalSectionAndSpinCount(pcs, 0, true);
345}
346
347/*++
348Function:
349 InternalDeleteCriticalSection
350
351Deletes a critical section
352--*/
353VOID InternalDeleteCriticalSection(
354 PCRITICAL_SECTION pCriticalSection)
355{
356 PAL_CRITICAL_SECTION * pPalCriticalSection =
357 reinterpret_cast<PAL_CRITICAL_SECTION*>(pCriticalSection);
358
359 _ASSERT_MSG(PalCsUserInitialized == pPalCriticalSection->cisInitState ||
360 PalCsFullyInitialized == pPalCriticalSection->cisInitState,
361 "CS %p is not initialized", pPalCriticalSection);
362
363#ifdef _DEBUG
364 CPalThread * pThread =
365 (PALIsThreadDataInitialized() ? GetCurrentPalThread() : NULL);
366
367 if (0 != pPalCriticalSection->LockCount)
368 {
369 SIZE_T tid;
370 tid = ObtainCurrentThreadId(pThread);
371 int iWaiterCount = (int)PALCS_GETWCOUNT(pPalCriticalSection->LockCount);
372
373 if (0 != (PALCS_LOCK_BIT & pPalCriticalSection->LockCount))
374 {
375 // CS is locked
376 if (tid != pPalCriticalSection->OwningThread)
377 {
378 // not owner
379 ASSERT("Thread tid=%u deleting a CS owned by thread tid=%u\n",
380 tid, pPalCriticalSection->OwningThread);
381 }
382 else
383 {
384 // owner
385 if (0 != iWaiterCount)
386 {
387 ERROR("Thread tid=%u is deleting a CS with %d threads waiting on it\n",
388 tid, iWaiterCount);
389 }
390 else
391 {
392 WARN("Thread tid=%u is deleting a critical section it still owns\n",
393 tid);
394 }
395 }
396 }
397 else
398 {
399 // CS is not locked
400 if (0 != iWaiterCount)
401 {
402 ERROR("Deleting a CS with %d threads waiting on it\n",
403 iWaiterCount);
404 }
405 else
406 {
407 ERROR("Thread tid=%u is deleting a critical section currently not "
408 "owned, but with one waiter awakened\n", tid);
409 }
410 }
411 }
412
413 if (NULL != pPalCriticalSection->DebugInfo)
414 {
415 if (pPalCriticalSection != &CorUnix::g_csPALCSsListLock)
416 {
417 InternalEnterCriticalSection(pThread,
418 reinterpret_cast<CRITICAL_SECTION*>(&g_csPALCSsListLock));
419 RemoveEntryList(&pPalCriticalSection->DebugInfo->Link);
420 InternalLeaveCriticalSection(pThread,
421 reinterpret_cast<CRITICAL_SECTION*>(&g_csPALCSsListLock));
422 }
423 else
424 {
425 RemoveEntryList(&pPalCriticalSection->DebugInfo->Link);
426 }
427
428#ifdef PAL_TRACK_CRITICAL_SECTIONS_DATA
429 LONG lVal, lNewVal;
430 Volatile<LONG> * plDest;
431
432 // Update delete count
433 InterlockedIncrement(pPalCriticalSection->fInternal ?
434 &g_lPALCSInternalDeleteCount : &g_lPALCSDeleteCount);
435
436 // Update acquire count
437 plDest = pPalCriticalSection->fInternal ?
438 &g_lPALCSInternalAcquireCount : &g_lPALCSAcquireCount;
439 do {
440 lVal = *plDest;
441 lNewVal = lVal + pPalCriticalSection->DebugInfo->lAcquireCount;
442 lNewVal = InterlockedCompareExchange(plDest, lNewVal, lVal);
443 } while (lVal != lNewVal);
444
445 // Update enter count
446 plDest = pPalCriticalSection->fInternal ?
447 &g_lPALCSInternalEnterCount : &g_lPALCSEnterCount;
448 do {
449 lVal = *plDest;
450 lNewVal = lVal + pPalCriticalSection->DebugInfo->lEnterCount;
451 lNewVal = InterlockedCompareExchange(plDest, lNewVal, lVal);
452 } while (lVal != lNewVal);
453
454 // Update contention count
455 plDest = pPalCriticalSection->fInternal ?
456 &g_lPALCSInternalContentionCount : &g_lPALCSContentionCount;
457 do {
458 lVal = *plDest;
459 lNewVal = lVal + pPalCriticalSection->DebugInfo->lContentionCount;
460 lNewVal = InterlockedCompareExchange(plDest, lNewVal, lVal);
461 } while (lVal != lNewVal);
462
463#endif // PAL_TRACK_CRITICAL_SECTIONS_DATA
464
465 InternalDelete(pPalCriticalSection->DebugInfo);
466 pPalCriticalSection->DebugInfo = NULL;
467 }
468#endif // _DEBUG
469
470 if (PalCsFullyInitialized == pPalCriticalSection->cisInitState)
471 {
472 int iRet;
473
474 // destroy condition
475 iRet = pthread_cond_destroy(&pPalCriticalSection->csndNativeData.condition);
476 _ASSERT_MSG(0 == iRet, "Failed destroying condition in CS @ %p "
477 "[err=%d]\n", pPalCriticalSection, iRet);
478
479 // destroy mutex
480 iRet = pthread_mutex_destroy(&pPalCriticalSection->csndNativeData.mutex);
481 _ASSERT_MSG(0 == iRet, "Failed destroying mutex in CS @ %p "
482 "[err=%d]\n", pPalCriticalSection, iRet);
483 }
484
485 // Reset critical section state
486 pPalCriticalSection->cisInitState = PalCsNotInitialized;
487}
488
489// The following PALCEnterCriticalSection and PALCLeaveCriticalSection
490// functions are intended to provide CorUnix's InternalEnterCriticalSection
491// and InternalLeaveCriticalSection functionalities to legacy C code,
492// which has no knowledge of CPalThread, classes and namespaces.
493
494/*++
495Function:
496 PALCEnterCriticalSection
497
498Provides CorUnix's InternalEnterCriticalSection functionality to legacy C code,
499which has no knowledge of CPalThread, classes and namespaces.
500--*/
501VOID PALCEnterCriticalSection(CRITICAL_SECTION * pcs)
502{
503 CPalThread * pThread =
504 (PALIsThreadDataInitialized() ? GetCurrentPalThread() : NULL);
505 CorUnix::InternalEnterCriticalSection(pThread, pcs);
506}
507
508/*++
509Function:
510 PALCLeaveCriticalSection
511
512Provides CorUnix's InternalLeaveCriticalSection functionality to legacy C code,
513which has no knowledge of CPalThread, classes and namespaces.
514--*/
515VOID PALCLeaveCriticalSection(CRITICAL_SECTION * pcs)
516{
517 CPalThread * pThread =
518 (PALIsThreadDataInitialized() ? GetCurrentPalThread() : NULL);
519 CorUnix::InternalLeaveCriticalSection(pThread, pcs);
520}
521
522namespace CorUnix
523{
524 static PalCsWaiterReturnState PALCS_WaitOnCS(
525 PAL_CRITICAL_SECTION * pPalCriticalSection,
526 LONG lInc);
527 static PAL_ERROR PALCS_DoActualWait(PAL_CRITICAL_SECTION * pPalCriticalSection);
528 static PAL_ERROR PALCS_WakeUpWaiter(PAL_CRITICAL_SECTION * pPalCriticalSection);
529 static bool PALCS_FullyInitialize(PAL_CRITICAL_SECTION * pPalCriticalSection);
530
531#ifdef _DEBUG
532 enum CSSubSysInitState
533 {
534 CSSubSysNotInitialzed,
535 CSSubSysInitializing,
536 CSSubSysInitialized
537 };
538 static Volatile<CSSubSysInitState> csssInitState = CSSubSysNotInitialzed;
539
540#ifdef PAL_TRACK_CRITICAL_SECTIONS_DATA
541 static Volatile<LONG> g_lPALCSInitializeCount = 0;
542 static Volatile<LONG> g_lPALCSDeleteCount = 0;
543 static Volatile<LONG> g_lPALCSAcquireCount = 0;
544 static Volatile<LONG> g_lPALCSEnterCount = 0;
545 static Volatile<LONG> g_lPALCSContentionCount = 0;
546 static Volatile<LONG> g_lPALCSInternalInitializeCount = 0;
547 static Volatile<LONG> g_lPALCSInternalDeleteCount = 0;
548 static Volatile<LONG> g_lPALCSInternalAcquireCount = 0;
549 static Volatile<LONG> g_lPALCSInternalEnterCount = 0;
550 static Volatile<LONG> g_lPALCSInternalContentionCount = 0;
551#endif // PAL_TRACK_CRITICAL_SECTIONS_DATA
552#endif // _DEBUG
553
554
555 /*++
556 Function:
557 CorUnix::CriticalSectionSubSysInitialize
558
559 Initializes CS subsystem
560 --*/
561 void CriticalSectionSubSysInitialize()
562 {
563 static_assert(sizeof(CRITICAL_SECTION) >= sizeof(PAL_CRITICAL_SECTION),
564 "PAL fatal internal error: sizeof(CRITICAL_SECTION) is "
565 "smaller than sizeof(PAL_CRITICAL_SECTION)");
566
567#ifdef _DEBUG
568 LONG lRet = InterlockedCompareExchange((LONG *)&csssInitState,
569 (LONG)CSSubSysInitializing,
570 (LONG)CSSubSysNotInitialzed);
571 if ((LONG)CSSubSysNotInitialzed == lRet)
572 {
573 InitializeListHead(&g_PALCSList);
574
575 InternalInitializeCriticalSectionAndSpinCount(
576 reinterpret_cast<CRITICAL_SECTION*>(&g_csPALCSsListLock),
577 0, true);
578 InterlockedExchange((LONG *)&csssInitState,
579 (LONG)CSSubSysInitialized);
580 }
581 else
582 {
583 while (csssInitState != CSSubSysInitialized)
584 {
585 sched_yield();
586 }
587 }
588#endif // _DEBUG
589 }
590
591 /*++
592 Function:
593 CorUnix::InternalInitializeCriticalSectionAndSpinCount
594
595 Initializes a CS with the given spin count. If 'fInternal' is true
596 the CS will be treatead as an internal one for its whole lifetime,
597 i.e. any thread that will enter it will be marked as unsafe for
598 suspension as long as it holds the CS
599 --*/
600 void InternalInitializeCriticalSectionAndSpinCount(
601 PCRITICAL_SECTION pCriticalSection,
602 DWORD dwSpinCount,
603 bool fInternal)
604 {
605 PAL_CRITICAL_SECTION * pPalCriticalSection =
606 reinterpret_cast<PAL_CRITICAL_SECTION*>(pCriticalSection);
607
608#ifndef PALCS_TRANSFER_OWNERSHIP_ON_RELEASE
609 // Make sure bits are defined in a usable way
610 _ASSERTE(PALCS_LOCK_AWAKENED_WAITER * 2 == PALCS_LOCK_WAITER_INC);
611#endif // !PALCS_TRANSFER_OWNERSHIP_ON_RELEASE
612
613 // Make sure structure sizes are compatible
614 _ASSERTE(sizeof(CRITICAL_SECTION) >= sizeof(PAL_CRITICAL_SECTION));
615
616#ifdef _DEBUG
617 if (sizeof(CRITICAL_SECTION) > sizeof(PAL_CRITICAL_SECTION))
618 {
619 WARN("PAL_CS_NATIVE_DATA_SIZE appears to be defined to a value (%d) "
620 "larger than needed on this platform (%d).\n",
621 sizeof(CRITICAL_SECTION), sizeof(PAL_CRITICAL_SECTION));
622 }
623#endif // _DEBUG
624
625 // Init CS data
626 pPalCriticalSection->DebugInfo = NULL;
627 pPalCriticalSection->LockCount = 0;
628 pPalCriticalSection->RecursionCount = 0;
629 pPalCriticalSection->SpinCount = dwSpinCount;
630 pPalCriticalSection->OwningThread = NULL;
631 pPalCriticalSection->LockSemaphore = NULL;
632 pPalCriticalSection->fInternal = fInternal;
633
634#ifdef _DEBUG
635 CPalThread * pThread =
636 (PALIsThreadDataInitialized() ? GetCurrentPalThread() : NULL);
637
638 pPalCriticalSection->DebugInfo = InternalNew<CRITICAL_SECTION_DEBUG_INFO>();
639 _ASSERT_MSG(NULL != pPalCriticalSection->DebugInfo,
640 "Failed to allocate debug info for new CS\n");
641
642 // Init debug info data
643 pPalCriticalSection->DebugInfo->lAcquireCount = 0;
644 pPalCriticalSection->DebugInfo->lEnterCount = 0;
645 pPalCriticalSection->DebugInfo->lContentionCount = 0;
646 pPalCriticalSection->DebugInfo->pOwnerCS = pPalCriticalSection;
647
648 // Insert debug info struct in global list
649 if (pPalCriticalSection != &g_csPALCSsListLock)
650 {
651 InternalEnterCriticalSection(pThread,
652 reinterpret_cast<CRITICAL_SECTION*>(&g_csPALCSsListLock));
653 InsertTailList(&g_PALCSList, &pPalCriticalSection->DebugInfo->Link);
654 InternalLeaveCriticalSection(pThread,
655 reinterpret_cast<CRITICAL_SECTION*>(&g_csPALCSsListLock));
656 }
657 else
658 {
659 InsertTailList(&g_PALCSList, &pPalCriticalSection->DebugInfo->Link);
660 }
661
662#ifdef PAL_TRACK_CRITICAL_SECTIONS_DATA
663 InterlockedIncrement(fInternal ?
664 &g_lPALCSInternalInitializeCount : &g_lPALCSInitializeCount);
665#endif // PAL_TRACK_CRITICAL_SECTIONS_DATA
666#endif // _DEBUG
667
668 // Set initializazion state
669 pPalCriticalSection->cisInitState = PalCsUserInitialized;
670
671#ifdef MUTEX_BASED_CSS
672 bool fInit;
673 do
674 {
675 fInit = PALCS_FullyInitialize(pPalCriticalSection);
676 _ASSERTE(fInit);
677 } while (!fInit && 0 == sched_yield());
678
679 if (fInit)
680 {
681 // Set initializazion state
682 pPalCriticalSection->cisInitState = PalCsFullyInitialized;
683 }
684#endif // MUTEX_BASED_CSS
685 }
686
687#ifndef MUTEX_BASED_CSS
688 /*++
689 Function:
690 CorUnix::InternalEnterCriticalSection
691
692 Enters a CS, causing the thread to block if the CS is owned by
693 another thread
694 --*/
695 void InternalEnterCriticalSection(
696 CPalThread * pThread,
697 PCRITICAL_SECTION pCriticalSection)
698 {
699 PAL_CRITICAL_SECTION * pPalCriticalSection =
700 reinterpret_cast<PAL_CRITICAL_SECTION*>(pCriticalSection);
701
702 LONG lSpinCount;
703 LONG lVal, lNewVal;
704 LONG lBitsToChange, lWaitInc;
705 PalCsWaiterReturnState cwrs;
706 SIZE_T threadId;
707
708 _ASSERTE(PalCsNotInitialized != pPalCriticalSection->cisInitState);
709
710 threadId = ObtainCurrentThreadId(pThread);
711
712
713 // Check if the current thread already owns the CS
714 //
715 // Note: there is no need for this double check to be atomic. In fact
716 // if the first check fails, the second doesn't count (and it's not
717 // even executed). If the first one succeeds and the second one
718 // doesn't, it doesn't matter if LockCount has already changed by the
719 // time OwningThread is tested. Instead, if the first one succeeded,
720 // and the second also succeeds, LockCount cannot have changed in the
721 // meanwhile, since this is the owning thread and only the owning
722 // thread can change the lock bit when the CS is owned.
723 if ((pPalCriticalSection->LockCount & PALCS_LOCK_BIT) &&
724 (pPalCriticalSection->OwningThread == threadId))
725 {
726 pPalCriticalSection->RecursionCount += 1;
727#ifdef _DEBUG
728 if (NULL != pPalCriticalSection->DebugInfo)
729 {
730 pPalCriticalSection->DebugInfo->lEnterCount += 1;
731 }
732#endif // _DEBUG
733 goto IECS_exit;
734 }
735
736 // Set bits to change and waiter increment for an incoming thread
737 lBitsToChange = PALCS_LOCK_BIT;
738 lWaitInc = PALCS_LOCK_WAITER_INC;
739 lSpinCount = pPalCriticalSection->SpinCount;
740
741 while (TRUE)
742 {
743 // Either this is an incoming thread, and therefore lBitsToChange
744 // is just PALCS_LOCK_BIT, or this is an awakened waiter
745 _ASSERTE(PALCS_LOCK_BIT == lBitsToChange ||
746 (PALCS_LOCK_BIT | PALCS_LOCK_AWAKENED_WAITER) == lBitsToChange);
747
748 // Make sure the waiter increment is in a valid range
749 _ASSERTE(PALCS_LOCK_WAITER_INC == lWaitInc ||
750 PALCS_LOCK_AWAKENED_WAITER == lWaitInc);
751
752 do {
753 lVal = pPalCriticalSection->LockCount;
754
755 while (0 == (lVal & PALCS_LOCK_BIT))
756 {
757 // CS is not locked: try lo lock it
758
759 // Make sure that whether we are an incoming thread
760 // or the PALCS_LOCK_AWAKENED_WAITER bit is set
761 _ASSERTE((PALCS_LOCK_BIT == lBitsToChange) ||
762 (PALCS_LOCK_AWAKENED_WAITER & lVal));
763
764 lNewVal = lVal ^ lBitsToChange;
765
766 // Make sure we are actually trying to lock
767 _ASSERTE(lNewVal & PALCS_LOCK_BIT);
768
769 CS_TRACE("[ECS %p] Switching from {%d, %d, %d} to "
770 "{%d, %d, %d} ==>\n", pPalCriticalSection,
771 PALCS_GETWCOUNT(lVal), PALCS_GETAWBIT(lVal), PALCS_GETLBIT(lVal),
772 PALCS_GETWCOUNT(lNewVal), PALCS_GETAWBIT(lNewVal), PALCS_GETLBIT(lNewVal));
773
774 // Try to switch the value
775 lNewVal = InterlockedCompareExchange (&pPalCriticalSection->LockCount,
776 lNewVal, lVal);
777
778 CS_TRACE("[ECS %p] ==> %s LockCount={%d, %d, %d} "
779 "lVal={%d, %d, %d}\n", pPalCriticalSection,
780 (lNewVal == lVal) ? "OK" : "NO",
781 PALCS_GETWCOUNT(pPalCriticalSection->LockCount),
782 PALCS_GETAWBIT(pPalCriticalSection->LockCount),
783 PALCS_GETLBIT(pPalCriticalSection->LockCount),
784 PALCS_GETWCOUNT(lVal), PALCS_GETAWBIT(lVal), PALCS_GETLBIT(lVal));
785
786 if (lNewVal == lVal)
787 {
788 // CS successfully acquired
789 goto IECS_set_ownership;
790 }
791
792 // Acquisition failed, some thread raced with us;
793 // update value for next loop
794 lVal = lNewVal;
795 }
796
797 if (0 < lSpinCount)
798 {
799 sched_yield();
800 }
801 } while (0 <= --lSpinCount);
802
803 cwrs = PALCS_WaitOnCS(pPalCriticalSection, lWaitInc);
804
805 if (PalCsReturnWaiterAwakened == cwrs)
806 {
807#ifdef PALCS_TRANSFER_OWNERSHIP_ON_RELEASE
808 //
809 // Fair Critical Sections
810 //
811 // In the fair lock case, when a waiter wakes up the CS
812 // must be locked (i.e. ownership passed on to the waiter)
813 _ASSERTE(0 != (PALCS_LOCK_BIT & pPalCriticalSection->LockCount));
814
815 // CS successfully acquired
816 goto IECS_set_ownership;
817
818#else // PALCS_TRANSFER_OWNERSHIP_ON_RELEASE
819 //
820 // Unfair Critical Sections
821 //
822 _ASSERTE(PALCS_LOCK_AWAKENED_WAITER & pPalCriticalSection->LockCount);
823
824 lBitsToChange = PALCS_LOCK_BIT | PALCS_LOCK_AWAKENED_WAITER;
825 lWaitInc = PALCS_LOCK_AWAKENED_WAITER;
826#endif // PALCS_TRANSFER_OWNERSHIP_ON_RELEASE
827 }
828 }
829
830 IECS_set_ownership:
831 // Critical section acquired: set ownership data
832 pPalCriticalSection->OwningThread = threadId;
833 pPalCriticalSection->RecursionCount = 1;
834#ifdef _DEBUG
835 if (NULL != pPalCriticalSection->DebugInfo)
836 {
837 pPalCriticalSection->DebugInfo->lAcquireCount += 1;
838 pPalCriticalSection->DebugInfo->lEnterCount += 1;
839 }
840#endif // _DEBUG
841
842 IECS_exit:
843 return;
844 }
845
846 /*++
847 Function:
848 CorUnix::InternalLeaveCriticalSection
849
850 Leaves a currently owned CS
851 --*/
852 void InternalLeaveCriticalSection(CPalThread * pThread,
853 PCRITICAL_SECTION pCriticalSection)
854 {
855 PAL_CRITICAL_SECTION * pPalCriticalSection =
856 reinterpret_cast<PAL_CRITICAL_SECTION*>(pCriticalSection);
857 LONG lVal, lNewVal;
858
859#ifdef _DEBUG
860 SIZE_T threadId;
861
862 _ASSERTE(PalCsNotInitialized != pPalCriticalSection->cisInitState);
863
864 threadId = ObtainCurrentThreadId(pThread);
865 _ASSERTE(threadId == pPalCriticalSection->OwningThread);
866#endif // _DEBUG
867
868 _ASSERT_MSG(PALCS_LOCK_BIT & pPalCriticalSection->LockCount,
869 "Trying to release an unlocked CS\n");
870 _ASSERT_MSG(0 < pPalCriticalSection->RecursionCount,
871 "Trying to release an unlocked CS\n");
872
873 if (--pPalCriticalSection->RecursionCount > 0)
874 {
875 // Recursion was > 1, still owning the CS
876 goto ILCS_cs_exit;
877 }
878
879 // Reset CS ownership
880 pPalCriticalSection->OwningThread = NULL;
881
882 // Load the current LockCount value
883 lVal = pPalCriticalSection->LockCount;
884
885 while (true)
886 {
887 _ASSERT_MSG(0 != (PALCS_LOCK_BIT & lVal),
888 "Trying to release an unlocked CS\n");
889
890 // NB: In the fair lock case (PALCS_TRANSFER_OWNERSHIP_ON_RELEASE) the
891 // PALCS_LOCK_AWAKENED_WAITER bit is not used
892 if ( (PALCS_LOCK_BIT == lVal)
893#ifndef PALCS_TRANSFER_OWNERSHIP_ON_RELEASE
894 || (PALCS_LOCK_AWAKENED_WAITER & lVal)
895#endif // !PALCS_TRANSFER_OWNERSHIP_ON_RELEASE
896 )
897 {
898 // Whether there are no waiters (PALCS_LOCK_BIT == lVal)
899 // or a waiter has already been awakened, therefore we
900 // just need to reset the lock bit and return
901 lNewVal = lVal & ~PALCS_LOCK_BIT;
902 CS_TRACE("[LCS-UN %p] Switching from {%d, %d, %d} to "
903 "{%d, %d, %d} ==>\n", pPalCriticalSection,
904 PALCS_GETWCOUNT(lVal), PALCS_GETAWBIT(lVal), PALCS_GETLBIT(lVal),
905 PALCS_GETWCOUNT(lNewVal), PALCS_GETAWBIT(lNewVal), PALCS_GETLBIT(lNewVal));
906
907 lNewVal = InterlockedCompareExchange(&pPalCriticalSection->LockCount,
908 lNewVal, lVal);
909
910 CS_TRACE("[LCS-UN %p] ==> %s\n", pPalCriticalSection,
911 (lNewVal == lVal) ? "OK" : "NO");
912
913 if (lNewVal == lVal)
914 {
915 goto ILCS_cs_exit;
916 }
917 }
918 else
919 {
920 // There is at least one waiter, we need to wake it up
921
922#ifdef PALCS_TRANSFER_OWNERSHIP_ON_RELEASE
923 // Fair lock case: passing ownership on to the first waiter.
924 // Here we need only to decrement the waiters count. CS will
925 // remain locked and ownership will be passed to the waiter,
926 // which will take care of setting ownership data as soon as
927 // it wakes up
928 lNewVal = lVal - PALCS_LOCK_WAITER_INC;
929#else // PALCS_TRANSFER_OWNERSHIP_ON_RELEASE
930 // Unfair lock case: we need to atomically decrement the waiters
931 // count (we are about ot wake up one of them), set the
932 // "waiter awakened" bit and to reset the "CS locked" bit.
933 // Note that, since we know that at this time PALCS_LOCK_BIT
934 // is set and PALCS_LOCK_AWAKENED_WAITER is not set, none of
935 // the addenda will affect bits other than its target bit(s),
936 // i.e. PALCS_LOCK_BIT will not affect PALCS_LOCK_AWAKENED_WAITER,
937 // PALCS_LOCK_AWAKENED_WAITER will not affect the actual
938 // count of waiters, and the latter will not change the two
939 // former ones
940 lNewVal = lVal - PALCS_LOCK_WAITER_INC +
941 PALCS_LOCK_AWAKENED_WAITER - PALCS_LOCK_BIT;
942#endif // PALCS_TRANSFER_OWNERSHIP_ON_RELEASE
943 CS_TRACE("[LCS-CN %p] Switching from {%d, %d, %d} to {%d, %d, %d} ==>\n",
944 pPalCriticalSection,
945 PALCS_GETWCOUNT(lVal), PALCS_GETAWBIT(lVal), PALCS_GETLBIT(lVal),
946 PALCS_GETWCOUNT(lNewVal), PALCS_GETAWBIT(lNewVal), PALCS_GETLBIT(lNewVal));
947
948 lNewVal = InterlockedCompareExchange(&pPalCriticalSection->LockCount,
949 lNewVal, lVal);
950
951 CS_TRACE("[LCS-CN %p] ==> %s\n", pPalCriticalSection,
952 (lNewVal == lVal) ? "OK" : "NO");
953
954 if (lNewVal == lVal)
955 {
956 // Wake up the waiter
957 PALCS_WakeUpWaiter (pPalCriticalSection);
958
959#ifdef PALCS_TRANSFER_OWNERSHIP_ON_RELEASE
960 // In the fair lock case, we need to yield here to defeat
961 // the inherently unfair nature of the condition/predicate
962 // construct
963 sched_yield();
964#endif // PALCS_TRANSFER_OWNERSHIP_ON_RELEASE
965
966 goto ILCS_cs_exit;
967 }
968 }
969
970 // CS unlock failed due to race with another thread trying to
971 // register as waiter on it. We need to keep on looping. We
972 // intentionally do not yield here in order to reserve higher
973 // priority for the releasing thread.
974 //
975 // At this point lNewVal contains the latest LockCount value
976 // retrieved by one of the two InterlockedCompareExchange above;
977 // we can use this value as expected LockCount for the next loop,
978 // without the need to fetch it again.
979 lVal = lNewVal;
980 }
981
982 ILCS_cs_exit:
983 return;
984 }
985
986 /*++
987 Function:
988 CorUnix::InternalTryEnterCriticalSection
989
990 Tries to acquire a CS. It returns true on success, false if the CS is
991 locked by another thread
992 --*/
993 bool InternalTryEnterCriticalSection(
994 CPalThread * pThread,
995 PCRITICAL_SECTION pCriticalSection)
996 {
997 PAL_CRITICAL_SECTION * pPalCriticalSection =
998 reinterpret_cast<PAL_CRITICAL_SECTION*>(pCriticalSection);
999
1000 LONG lNewVal;
1001 SIZE_T threadId;
1002 bool fRet = true;
1003
1004 _ASSERTE(PalCsNotInitialized != pPalCriticalSection->cisInitState);
1005
1006 threadId = ObtainCurrentThreadId(pThread);
1007
1008 lNewVal = InterlockedCompareExchange (&pPalCriticalSection->LockCount,
1009 (LONG)PALCS_LOCK_BIT,
1010 (LONG)PALCS_LOCK_INIT);
1011 if (lNewVal == PALCS_LOCK_INIT)
1012 {
1013 // CS successfully acquired: setting ownership data
1014 pPalCriticalSection->OwningThread = threadId;
1015 pPalCriticalSection->RecursionCount = 1;
1016#ifdef _DEBUG
1017 if (NULL != pPalCriticalSection->DebugInfo)
1018 {
1019 pPalCriticalSection->DebugInfo->lAcquireCount += 1;
1020 pPalCriticalSection->DebugInfo->lEnterCount += 1;
1021 }
1022#endif // _DEBUG
1023
1024 goto ITECS_exit;
1025 }
1026
1027 // check if the current thread already owns the criticalSection
1028 if ((lNewVal & PALCS_LOCK_BIT) &&
1029 (pPalCriticalSection->OwningThread == threadId))
1030 {
1031 pPalCriticalSection->RecursionCount += 1;
1032#ifdef _DEBUG
1033 if (NULL != pPalCriticalSection->DebugInfo)
1034 {
1035 pPalCriticalSection->DebugInfo->lEnterCount += 1;
1036 }
1037#endif // _DEBUG
1038
1039 goto ITECS_exit;
1040 }
1041
1042 // Failed to acquire the CS
1043 fRet = false;
1044
1045 ITECS_exit:
1046 return fRet;
1047 }
1048#endif // MUTEX_BASED_CSS
1049
1050 /*++
1051 Function:
1052 CorUnix::PALCS_FullyInitialize
1053
1054 Fully initializes a CS previously initialied true InitializeCriticalSection.
1055 This method is called at the first contention on the target CS
1056 --*/
1057 bool PALCS_FullyInitialize(PAL_CRITICAL_SECTION * pPalCriticalSection)
1058 {
1059 LONG lVal, lNewVal;
1060 bool fRet = true;
1061
1062 lVal = pPalCriticalSection->cisInitState;
1063 if (PalCsFullyInitialized == lVal)
1064 {
1065 goto PCDI_exit;
1066 }
1067 if (PalCsUserInitialized == lVal)
1068 {
1069 int iRet;
1070 lNewVal = (LONG)PalCsFullyInitializing;
1071 lNewVal = InterlockedCompareExchange(
1072 (LONG *)&pPalCriticalSection->cisInitState, lNewVal, lVal);
1073 if (lNewVal != lVal)
1074 {
1075 if (PalCsFullyInitialized == lNewVal)
1076 {
1077 // Another thread did initialize this CS: we can
1078 // safely return 'true'
1079 goto PCDI_exit;
1080 }
1081
1082 // Another thread is still initializing this CS: yield and
1083 // spin by returning 'false'
1084 sched_yield();
1085 fRet = false;
1086 goto PCDI_exit;
1087 }
1088
1089 //
1090 // Actual native initialization
1091 //
1092 // Mutex
1093 iRet = pthread_mutex_init(&pPalCriticalSection->csndNativeData.mutex, NULL);
1094 if (0 != iRet)
1095 {
1096 ASSERT("Failed initializing mutex in CS @ %p [err=%d]\n",
1097 pPalCriticalSection, iRet);
1098 pPalCriticalSection->cisInitState = PalCsUserInitialized;
1099 fRet = false;
1100 goto PCDI_exit;
1101 }
1102#ifndef MUTEX_BASED_CSS
1103 // Condition
1104 iRet = pthread_cond_init(&pPalCriticalSection->csndNativeData.condition, NULL);
1105 if (0 != iRet)
1106 {
1107 ASSERT("Failed initializing condition in CS @ %p [err=%d]\n",
1108 pPalCriticalSection, iRet);
1109 pthread_mutex_destroy(&pPalCriticalSection->csndNativeData.mutex);
1110 pPalCriticalSection->cisInitState = PalCsUserInitialized;
1111 fRet = false;
1112 goto PCDI_exit;
1113 }
1114 // Predicate
1115 pPalCriticalSection->csndNativeData.iPredicate = 0;
1116#endif
1117
1118 pPalCriticalSection->cisInitState = PalCsFullyInitialized;
1119 }
1120 else if (PalCsFullyInitializing == lVal)
1121 {
1122 // Another thread is still initializing this CS: yield and
1123 // spin by returning 'false'
1124 sched_yield();
1125 fRet = false;
1126 goto PCDI_exit;
1127 }
1128 else
1129 {
1130 ASSERT("CS %p is not initialized", pPalCriticalSection);
1131 fRet = false;
1132 goto PCDI_exit;
1133 }
1134
1135 PCDI_exit:
1136 return fRet;
1137 }
1138
1139
1140 /*++
1141 Function:
1142 CorUnix::PALCS_WaitOnCS
1143
1144 Waits on a CS owned by another thread. It returns PalCsReturnWaiterAwakened
1145 if the thread actually waited on the CS and it has been awakened on CS
1146 release. It returns PalCsWaiterDidntWait if another thread is currently
1147 fully-initializing the CS and therefore the current thread couldn't wait
1148 on it
1149 --*/
1150 PalCsWaiterReturnState PALCS_WaitOnCS(PAL_CRITICAL_SECTION * pPalCriticalSection,
1151 LONG lInc)
1152 {
1153 DWORD lVal, lNewVal;
1154 PAL_ERROR palErr = NO_ERROR;
1155
1156 if (PalCsFullyInitialized != pPalCriticalSection->cisInitState)
1157 {
1158 // First contention, the CS native wait support need to be
1159 // initialized at this time
1160 if (!PALCS_FullyInitialize(pPalCriticalSection))
1161 {
1162 // The current thread failed the full initialization of the CS,
1163 // whether because another thread is race-initializing it, or
1164 // there are no enough memory/resources at this time, or
1165 // InitializeCriticalSection has never been called. By
1166 // returning we will cause the thread to spin on CS trying
1167 // again until the CS is initialized
1168 return PalCsWaiterDidntWait;
1169 }
1170 }
1171
1172 // Make sure we have a valid waiter increment
1173 _ASSERTE(PALCS_LOCK_WAITER_INC == lInc ||
1174 PALCS_LOCK_AWAKENED_WAITER == lInc);
1175
1176 do {
1177 lVal = pPalCriticalSection->LockCount;
1178
1179 // Make sure the waiter increment is compatible with the
1180 // awakened waiter bit value
1181 _ASSERTE(PALCS_LOCK_WAITER_INC == lInc ||
1182 PALCS_LOCK_AWAKENED_WAITER & lVal);
1183
1184 if (0 == (lVal & PALCS_LOCK_BIT))
1185 {
1186 // the CS is no longer locked, let's bail out
1187 return PalCsWaiterDidntWait;
1188 }
1189
1190 lNewVal = lVal + lInc;
1191
1192 // Make sure that this thread was whether an incoming one or it
1193 // was an awakened waiter and, in this case, we are now going to
1194 // turn off the awakened waiter bit
1195 _ASSERT_MSG(PALCS_LOCK_WAITER_INC == lInc ||
1196 0 == (PALCS_LOCK_AWAKENED_WAITER & lNewVal));
1197
1198 CS_TRACE("[WCS %p] Switching from {%d, %d, %d} to "
1199 "{%d, %d, %d} ==> ", pPalCriticalSection,
1200 PALCS_GETWCOUNT(lVal), PALCS_GETAWBIT(lVal), PALCS_GETLBIT(lVal),
1201 PALCS_GETWCOUNT(lNewVal), PALCS_GETAWBIT(lNewVal), PALCS_GETLBIT(lNewVal));
1202
1203 lNewVal = InterlockedCompareExchange (&pPalCriticalSection->LockCount,
1204 lNewVal, lVal);
1205
1206 CS_TRACE("[WCS %p] ==> %s\n", pPalCriticalSection,
1207 (lNewVal == lVal) ? "OK" : "NO");
1208
1209 } while (lNewVal != lVal);
1210
1211#ifdef _DEBUG
1212 if (NULL != pPalCriticalSection->DebugInfo)
1213 {
1214 pPalCriticalSection->DebugInfo->lContentionCount += 1;
1215 }
1216#endif // _DEBUG
1217
1218 // Do the actual native wait
1219 palErr = PALCS_DoActualWait(pPalCriticalSection);
1220 _ASSERT_MSG(NO_ERROR == palErr, "Native CS wait failed\n");
1221
1222 return PalCsReturnWaiterAwakened;
1223 }
1224
1225 /*++
1226 Function:
1227 CorUnix::PALCS_DoActualWait
1228
1229 Performs the actual native wait on the CS
1230 --*/
1231 PAL_ERROR PALCS_DoActualWait(PAL_CRITICAL_SECTION * pPalCriticalSection)
1232 {
1233 int iRet;
1234 PAL_ERROR palErr = NO_ERROR;
1235
1236 CS_TRACE("Trying to go to sleep [CS=%p]\n", pPalCriticalSection);
1237
1238 // Lock the mutex
1239 iRet = pthread_mutex_lock(&pPalCriticalSection->csndNativeData.mutex);
1240 if (0 != iRet)
1241 {
1242 palErr = ERROR_INTERNAL_ERROR;
1243 goto PCDAW_exit;
1244 }
1245
1246 CS_TRACE("Actually Going to sleep [CS=%p]\n", pPalCriticalSection);
1247
1248 while (0 == pPalCriticalSection->csndNativeData.iPredicate)
1249 {
1250 // Wait on the condition
1251 iRet = pthread_cond_wait(&pPalCriticalSection->csndNativeData.condition,
1252 &pPalCriticalSection->csndNativeData.mutex);
1253
1254 CS_TRACE("Got a signal on condition [pred=%d]!\n",
1255 pPalCriticalSection->csndNativeData.iPredicate);
1256 if (0 != iRet)
1257 {
1258 // Failed: unlock the mutex and bail out
1259 ASSERT("Failed waiting on condition in CS %p [err=%d]\n",
1260 pPalCriticalSection, iRet);
1261 pthread_mutex_unlock(&pPalCriticalSection->csndNativeData.mutex);
1262 palErr = ERROR_INTERNAL_ERROR;
1263 goto PCDAW_exit;
1264 }
1265 }
1266
1267 // Reset the predicate
1268 pPalCriticalSection->csndNativeData.iPredicate = 0;
1269
1270 // Unlock the mutex
1271 iRet = pthread_mutex_unlock(&pPalCriticalSection->csndNativeData.mutex);
1272 if (0 != iRet)
1273 {
1274 palErr = ERROR_INTERNAL_ERROR;
1275 goto PCDAW_exit;
1276 }
1277
1278 PCDAW_exit:
1279
1280 CS_TRACE("Just woken up [CS=%p]\n", pPalCriticalSection);
1281
1282 return palErr;
1283 }
1284
1285 /*++
1286 Function:
1287 CorUnix::PALCS_WakeUpWaiter
1288
1289 Wakes up the first thread waiting on the CS
1290 --*/
1291 PAL_ERROR PALCS_WakeUpWaiter(PAL_CRITICAL_SECTION * pPalCriticalSection)
1292 {
1293 int iRet;
1294 PAL_ERROR palErr = NO_ERROR;
1295
1296 _ASSERT_MSG(PalCsFullyInitialized == pPalCriticalSection->cisInitState,
1297 "Trying to wake up a waiter on CS not fully initialized\n");
1298
1299 // Lock the mutex
1300 iRet = pthread_mutex_lock(&pPalCriticalSection->csndNativeData.mutex);
1301 if (0 != iRet)
1302 {
1303 palErr = ERROR_INTERNAL_ERROR;
1304 goto PCWUW_exit;
1305 }
1306
1307 // Set the predicate
1308 pPalCriticalSection->csndNativeData.iPredicate = 1;
1309
1310 CS_TRACE("Signaling condition/predicate [pred=%d]!\n",
1311 pPalCriticalSection->csndNativeData.iPredicate);
1312
1313 // Signal the condition
1314 iRet = pthread_cond_signal(&pPalCriticalSection->csndNativeData.condition);
1315 if (0 != iRet)
1316 {
1317 // Failed: set palErr, but continue in order to unlock
1318 // the mutex anyway
1319 ASSERT("Failed setting condition in CS %p [ret=%d]\n",
1320 pPalCriticalSection, iRet);
1321 palErr = ERROR_INTERNAL_ERROR;
1322 }
1323
1324 // Unlock the mutex
1325 iRet = pthread_mutex_unlock(&pPalCriticalSection->csndNativeData.mutex);
1326 if (0 != iRet)
1327 {
1328 palErr = ERROR_INTERNAL_ERROR;
1329 goto PCWUW_exit;
1330 }
1331
1332 PCWUW_exit:
1333 return palErr;
1334 }
1335
1336#ifdef _DEBUG
1337 /*++
1338 Function:
1339 CorUnix::PALCS_ReportStatisticalData
1340
1341 Report creation/acquisition/contention statistical data for the all the
1342 CSs so far existed and no longer existing in the current process
1343 --*/
1344 void PALCS_ReportStatisticalData()
1345 {
1346#ifdef PAL_TRACK_CRITICAL_SECTIONS_DATA
1347 CPalThread * pThread = InternalGetCurrentThread();
1348
1349 if (NULL == pThread) DebugBreak();
1350
1351 // Take the lock for the global list of CS debug infos
1352 InternalEnterCriticalSection(pThread, (CRITICAL_SECTION*)&g_csPALCSsListLock);
1353
1354 LONG lPALCSInitializeCount = g_lPALCSInitializeCount;
1355 LONG lPALCSDeleteCount = g_lPALCSDeleteCount;
1356 LONG lPALCSAcquireCount = g_lPALCSAcquireCount;
1357 LONG lPALCSEnterCount = g_lPALCSEnterCount;
1358 LONG lPALCSContentionCount = g_lPALCSContentionCount;
1359 LONG lPALCSInternalInitializeCount = g_lPALCSInternalInitializeCount;
1360 LONG lPALCSInternalDeleteCount = g_lPALCSInternalDeleteCount;
1361 LONG lPALCSInternalAcquireCount = g_lPALCSInternalAcquireCount;
1362 LONG lPALCSInternalEnterCount = g_lPALCSInternalEnterCount;
1363 LONG lPALCSInternalContentionCount = g_lPALCSInternalContentionCount;
1364
1365 PLIST_ENTRY pItem = g_PALCSList.Flink;
1366 while (&g_PALCSList != pItem)
1367 {
1368 PCRITICAL_SECTION_DEBUG_INFO pDebugInfo =
1369 (PCRITICAL_SECTION_DEBUG_INFO)pItem;
1370
1371 if (pDebugInfo->pOwnerCS->fInternal)
1372 {
1373 lPALCSInternalAcquireCount += pDebugInfo->lAcquireCount;
1374 lPALCSInternalEnterCount += pDebugInfo->lEnterCount;
1375 lPALCSInternalContentionCount += pDebugInfo->lContentionCount;
1376 }
1377 else
1378 {
1379 lPALCSAcquireCount += pDebugInfo->lAcquireCount;
1380 lPALCSEnterCount += pDebugInfo->lEnterCount;
1381 lPALCSContentionCount += pDebugInfo->lContentionCount;
1382 }
1383
1384 pItem = pItem->Flink;
1385 }
1386
1387 // Release the lock for the global list of CS debug infos
1388 InternalLeaveCriticalSection(pThread, (CRITICAL_SECTION*)&g_csPALCSsListLock);
1389
1390 TRACE("Critical Sections Statistical Data:\n");
1391 TRACE("{\n");
1392 TRACE(" Client code CSs:\n");
1393 TRACE(" {\n");
1394 TRACE(" Initialize Count: %d\n", lPALCSInitializeCount);
1395 TRACE(" Delete Count: %d\n", lPALCSDeleteCount);
1396 TRACE(" Acquire Count: %d\n", lPALCSAcquireCount);
1397 TRACE(" Enter Count: %d\n", lPALCSEnterCount);
1398 TRACE(" Contention Count: %d\n", lPALCSContentionCount);
1399 TRACE(" }\n");
1400 TRACE(" Internal PAL CSs:\n");
1401 TRACE(" {\n");
1402 TRACE(" Initialize Count: %d\n", lPALCSInternalInitializeCount);
1403 TRACE(" Delete Count: %d\n", lPALCSInternalDeleteCount);
1404 TRACE(" Acquire Count: %d\n", lPALCSInternalAcquireCount);
1405 TRACE(" Enter Count: %d\n", lPALCSInternalEnterCount);
1406 TRACE(" Contention Count: %d\n", lPALCSInternalContentionCount);
1407 TRACE(" }\n");
1408 TRACE("}\n");
1409#endif // PAL_TRACK_CRITICAL_SECTIONS_DATA
1410 }
1411
1412 /*++
1413 Function:
1414 CorUnix::PALCS_DumpCSList
1415
1416 Dumps the list of all the CS currently existing in this process.
1417 --*/
1418 void PALCS_DumpCSList()
1419 {
1420 CPalThread * pThread = InternalGetCurrentThread();
1421
1422 // Take the lock for the global list of CS debug infos
1423 InternalEnterCriticalSection(pThread, (CRITICAL_SECTION*)&g_csPALCSsListLock);
1424
1425 PLIST_ENTRY pItem = g_PALCSList.Flink;
1426 while (&g_PALCSList != pItem)
1427 {
1428 PCRITICAL_SECTION_DEBUG_INFO pDebugInfo =
1429 (PCRITICAL_SECTION_DEBUG_INFO)pItem;
1430 PPAL_CRITICAL_SECTION pCS = pDebugInfo->pOwnerCS;
1431
1432 printf("CS @ %p \n"
1433 "{\tDebugInfo = %p -> \n",
1434 pCS, pDebugInfo);
1435
1436 printf("\t{\n\t\t[Link]\n\t\tpOwnerCS = %p\n"
1437 "\t\tAcquireCount \t= %d\n"
1438 "\t\tEnterCount \t= %d\n"
1439 "\t\tContentionCount = %d\n",
1440 pDebugInfo->pOwnerCS, pDebugInfo->lAcquireCount.Load(),
1441 pDebugInfo->lEnterCount.Load(), pDebugInfo->lContentionCount.Load());
1442 printf("\t}\n");
1443
1444 printf("\tLockCount \t= %#x\n"
1445 "\tRecursionCount \t= %d\n"
1446 "\tOwningThread \t= %p\n"
1447 "\tLockSemaphore \t= %p\n"
1448 "\tSpinCount \t= %u\n"
1449 "\tfInternal \t= %d\n"
1450 "\teInitState \t= %u\n"
1451 "\tpNativeData \t= %p ->\n",
1452 pCS->LockCount.Load(), pCS->RecursionCount, (void *)pCS->OwningThread,
1453 pCS->LockSemaphore, (unsigned)pCS->SpinCount, (int)pCS->fInternal,
1454 pCS->cisInitState.Load(), &pCS->csndNativeData);
1455
1456 printf("\t{\n\t\t[mutex]\n\t\t[condition]\n"
1457 "\t\tPredicate \t= %d\n"
1458 "\t}\n}\n",pCS->csndNativeData.iPredicate);
1459
1460 printf("}\n");
1461
1462 pItem = pItem->Flink;
1463 }
1464
1465 // Release the lock for the global list of CS debug infos
1466 InternalLeaveCriticalSection(pThread, (CRITICAL_SECTION*)&g_csPALCSsListLock);
1467 }
1468#endif // _DEBUG
1469
1470
1471#if defined(MUTEX_BASED_CSS) || defined(_DEBUG)
1472 /*++
1473 Function:
1474 CorUnix::InternalEnterCriticalSection
1475
1476 Enters a CS, causing the thread to block if the CS is owned by
1477 another thread
1478 --*/
1479#ifdef MUTEX_BASED_CSS
1480 void InternalEnterCriticalSection(
1481 CPalThread * pThread,
1482 PCRITICAL_SECTION pCriticalSection)
1483#else // MUTEX_BASED_CSS
1484 void MTX_InternalEnterCriticalSection(
1485 CPalThread * pThread,
1486 PCRITICAL_SECTION pCriticalSection)
1487#endif // MUTEX_BASED_CSS
1488
1489 {
1490 PAL_CRITICAL_SECTION * pPalCriticalSection =
1491 reinterpret_cast<PAL_CRITICAL_SECTION*>(pCriticalSection);
1492 int iRet;
1493 SIZE_T threadId;
1494
1495 _ASSERTE(PalCsNotInitialized != pPalCriticalSection->cisInitState);
1496
1497 threadId = ObtainCurrentThreadId(pThread);
1498
1499 /* check if the current thread already owns the criticalSection */
1500 if (pPalCriticalSection->OwningThread == threadId)
1501 {
1502 _ASSERTE(0 < pPalCriticalSection->RecursionCount);
1503 pPalCriticalSection->RecursionCount += 1;
1504 return;
1505 }
1506
1507 iRet = pthread_mutex_lock(&pPalCriticalSection->csndNativeData.mutex);
1508 _ASSERTE(0 == iRet);
1509
1510 pPalCriticalSection->OwningThread = threadId;
1511 pPalCriticalSection->RecursionCount = 1;
1512 }
1513
1514
1515 /*++
1516 Function:
1517 CorUnix::InternalLeaveCriticalSection
1518
1519 Leaves a currently owned CS
1520 --*/
1521#ifdef MUTEX_BASED_CSS
1522 void InternalLeaveCriticalSection(
1523 CPalThread * pThread,
1524 PCRITICAL_SECTION pCriticalSection)
1525#else // MUTEX_BASED_CSS
1526 void MTX_InternalLeaveCriticalSection(
1527 CPalThread * pThread,
1528 PCRITICAL_SECTION pCriticalSection)
1529#endif // MUTEX_BASED_CSS
1530 {
1531 PAL_CRITICAL_SECTION * pPalCriticalSection =
1532 reinterpret_cast<PAL_CRITICAL_SECTION*>(pCriticalSection);
1533 int iRet;
1534#ifdef _DEBUG
1535 SIZE_T threadId;
1536
1537 _ASSERTE(PalCsNotInitialized != pPalCriticalSection->cisInitState);
1538
1539 threadId = ObtainCurrentThreadId(pThread);
1540 _ASSERTE(threadId == pPalCriticalSection->OwningThread);
1541
1542 if (0 >= pPalCriticalSection->RecursionCount)
1543 DebugBreak();
1544
1545 _ASSERTE(0 < pPalCriticalSection->RecursionCount);
1546#endif // _DEBUG
1547
1548 if (0 < --pPalCriticalSection->RecursionCount)
1549 return;
1550
1551 pPalCriticalSection->OwningThread = 0;
1552
1553 iRet = pthread_mutex_unlock(&pPalCriticalSection->csndNativeData.mutex);
1554 _ASSERTE(0 == iRet);
1555 }
1556
1557 /*++
1558 Function:
1559 CorUnix::InternalTryEnterCriticalSection
1560
1561 Tries to acquire a CS. It returns true on success, false if the CS is
1562 locked by another thread
1563 --*/
1564#ifdef MUTEX_BASED_CSS
1565 bool InternalTryEnterCriticalSection(
1566 CPalThread * pThread,
1567 PCRITICAL_SECTION pCriticalSection)
1568#else // MUTEX_BASED_CSS
1569 bool MTX_InternalTryEnterCriticalSection(
1570 CPalThread * pThread,
1571 PCRITICAL_SECTION pCriticalSection)
1572#endif // MUTEX_BASED_CSS
1573 {
1574 PAL_CRITICAL_SECTION * pPalCriticalSection =
1575 reinterpret_cast<PAL_CRITICAL_SECTION*>(pCriticalSection);
1576 bool fRet;
1577 SIZE_T threadId;
1578
1579 _ASSERTE(PalCsNotInitialized != pPalCriticalSection->cisInitState);
1580
1581 threadId = ObtainCurrentThreadId(pThread);
1582
1583 /* check if the current thread already owns the criticalSection */
1584 if (pPalCriticalSection->OwningThread == threadId)
1585 {
1586 pPalCriticalSection->RecursionCount += 1;
1587 fRet = true;
1588 goto ITECS_exit;
1589 }
1590
1591 fRet = (0 == pthread_mutex_trylock(&pPalCriticalSection->csndNativeData.mutex));
1592
1593 if (fRet)
1594 {
1595 pPalCriticalSection->OwningThread = threadId;
1596 pPalCriticalSection->RecursionCount = 1;
1597 }
1598
1599 ITECS_exit:
1600 return fRet;
1601 }
1602#endif // MUTEX_BASED_CSS || _DEBUG
1603}
1604