1 | /* |
2 | Copyright (c) 2005-2019 Intel Corporation |
3 | |
4 | Licensed under the Apache License, Version 2.0 (the "License"); |
5 | you may not use this file except in compliance with the License. |
6 | You may obtain a copy of the License at |
7 | |
8 | http://www.apache.org/licenses/LICENSE-2.0 |
9 | |
10 | Unless required by applicable law or agreed to in writing, software |
11 | distributed under the License is distributed on an "AS IS" BASIS, |
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | See the License for the specific language governing permissions and |
14 | limitations under the License. |
15 | */ |
16 | |
17 | #ifndef __TBB_queuing_mutex_H |
18 | #define __TBB_queuing_mutex_H |
19 | |
20 | #include <cstring> |
21 | #include "atomic.h" |
22 | #include "tbb_profiling.h" |
23 | |
24 | namespace tbb { |
25 | |
26 | //! Queuing mutex with local-only spinning. |
27 | /** @ingroup synchronization */ |
28 | class queuing_mutex : internal::mutex_copy_deprecated_and_disabled { |
29 | public: |
30 | //! Construct unacquired mutex. |
31 | queuing_mutex() { |
32 | q_tail = NULL; |
33 | #if TBB_USE_THREADING_TOOLS |
34 | internal_construct(); |
35 | #endif |
36 | } |
37 | |
38 | //! The scoped locking pattern |
39 | /** It helps to avoid the common problem of forgetting to release lock. |
40 | It also nicely provides the "node" for queuing locks. */ |
41 | class scoped_lock: internal::no_copy { |
42 | //! Initialize fields to mean "no lock held". |
43 | void initialize() { |
44 | mutex = NULL; |
45 | going = 0; |
46 | #if TBB_USE_ASSERT |
47 | internal::poison_pointer(next); |
48 | #endif /* TBB_USE_ASSERT */ |
49 | } |
50 | |
51 | public: |
52 | //! Construct lock that has not acquired a mutex. |
53 | /** Equivalent to zero-initialization of *this. */ |
54 | scoped_lock() {initialize();} |
55 | |
56 | //! Acquire lock on given mutex. |
57 | scoped_lock( queuing_mutex& m ) { |
58 | initialize(); |
59 | acquire(m); |
60 | } |
61 | |
62 | //! Release lock (if lock is held). |
63 | ~scoped_lock() { |
64 | if( mutex ) release(); |
65 | } |
66 | |
67 | //! Acquire lock on given mutex. |
68 | void __TBB_EXPORTED_METHOD acquire( queuing_mutex& m ); |
69 | |
70 | //! Acquire lock on given mutex if free (i.e. non-blocking) |
71 | bool __TBB_EXPORTED_METHOD try_acquire( queuing_mutex& m ); |
72 | |
73 | //! Release lock. |
74 | void __TBB_EXPORTED_METHOD release(); |
75 | |
76 | private: |
77 | //! The pointer to the mutex owned, or NULL if not holding a mutex. |
78 | queuing_mutex* mutex; |
79 | |
80 | //! The pointer to the next competitor for a mutex |
81 | scoped_lock *next; |
82 | |
83 | //! The local spin-wait variable |
84 | /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of |
85 | zero-initialization. Defining it as an entire word instead of |
86 | a byte seems to help performance slightly. */ |
87 | uintptr_t going; |
88 | }; |
89 | |
90 | void __TBB_EXPORTED_METHOD internal_construct(); |
91 | |
92 | // Mutex traits |
93 | static const bool is_rw_mutex = false; |
94 | static const bool is_recursive_mutex = false; |
95 | static const bool is_fair_mutex = true; |
96 | |
97 | private: |
98 | //! The last competitor requesting the lock |
99 | atomic<scoped_lock*> q_tail; |
100 | |
101 | }; |
102 | |
103 | __TBB_DEFINE_PROFILING_SET_NAME(queuing_mutex) |
104 | |
105 | } // namespace tbb |
106 | |
107 | #endif /* __TBB_queuing_mutex_H */ |
108 | |