1 | /* |
2 | Copyright (c) 2005-2019 Intel Corporation |
3 | |
4 | Licensed under the Apache License, Version 2.0 (the "License"); |
5 | you may not use this file except in compliance with the License. |
6 | You may obtain a copy of the License at |
7 | |
8 | http://www.apache.org/licenses/LICENSE-2.0 |
9 | |
10 | Unless required by applicable law or agreed to in writing, software |
11 | distributed under the License is distributed on an "AS IS" BASIS, |
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | See the License for the specific language governing permissions and |
14 | limitations under the License. |
15 | */ |
16 | |
17 | #ifndef __TBB_recursive_mutex_H |
18 | #define __TBB_recursive_mutex_H |
19 | |
20 | #if _WIN32||_WIN64 |
21 | #include "machine/windows_api.h" |
22 | #else |
23 | #include <pthread.h> |
24 | #endif /* _WIN32||_WIN64 */ |
25 | |
26 | #include <new> |
27 | #include "aligned_space.h" |
28 | #include "tbb_stddef.h" |
29 | #include "tbb_profiling.h" |
30 | |
31 | namespace tbb { |
32 | //! Mutex that allows recursive mutex acquisition. |
33 | /** Mutex that allows recursive mutex acquisition. |
34 | @ingroup synchronization */ |
35 | class recursive_mutex : internal::mutex_copy_deprecated_and_disabled { |
36 | public: |
37 | //! Construct unacquired recursive_mutex. |
38 | recursive_mutex() { |
39 | #if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS |
40 | internal_construct(); |
41 | #else |
42 | #if _WIN32||_WIN64 |
43 | InitializeCriticalSectionEx(&impl, 4000, 0); |
44 | #else |
45 | pthread_mutexattr_t mtx_attr; |
46 | int error_code = pthread_mutexattr_init( &mtx_attr ); |
47 | if( error_code ) |
48 | tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutexattr_init failed" ); |
49 | |
50 | pthread_mutexattr_settype( &mtx_attr, PTHREAD_MUTEX_RECURSIVE ); |
51 | error_code = pthread_mutex_init( &impl, &mtx_attr ); |
52 | if( error_code ) |
53 | tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_init failed" ); |
54 | |
55 | pthread_mutexattr_destroy( &mtx_attr ); |
56 | #endif /* _WIN32||_WIN64*/ |
57 | #endif /* TBB_USE_ASSERT */ |
58 | }; |
59 | |
60 | ~recursive_mutex() { |
61 | #if TBB_USE_ASSERT |
62 | internal_destroy(); |
63 | #else |
64 | #if _WIN32||_WIN64 |
65 | DeleteCriticalSection(&impl); |
66 | #else |
67 | pthread_mutex_destroy(&impl); |
68 | |
69 | #endif /* _WIN32||_WIN64 */ |
70 | #endif /* TBB_USE_ASSERT */ |
71 | }; |
72 | |
73 | class scoped_lock; |
74 | friend class scoped_lock; |
75 | |
76 | //! The scoped locking pattern |
77 | /** It helps to avoid the common problem of forgetting to release lock. |
78 | It also nicely provides the "node" for queuing locks. */ |
79 | class scoped_lock: internal::no_copy { |
80 | public: |
81 | //! Construct lock that has not acquired a recursive_mutex. |
82 | scoped_lock() : my_mutex(NULL) {}; |
83 | |
84 | //! Acquire lock on given mutex. |
85 | scoped_lock( recursive_mutex& mutex ) { |
86 | #if TBB_USE_ASSERT |
87 | my_mutex = &mutex; |
88 | #endif /* TBB_USE_ASSERT */ |
89 | acquire( mutex ); |
90 | } |
91 | |
92 | //! Release lock (if lock is held). |
93 | ~scoped_lock() { |
94 | if( my_mutex ) |
95 | release(); |
96 | } |
97 | |
98 | //! Acquire lock on given mutex. |
99 | void acquire( recursive_mutex& mutex ) { |
100 | #if TBB_USE_ASSERT |
101 | internal_acquire( mutex ); |
102 | #else |
103 | my_mutex = &mutex; |
104 | mutex.lock(); |
105 | #endif /* TBB_USE_ASSERT */ |
106 | } |
107 | |
108 | //! Try acquire lock on given recursive_mutex. |
109 | bool try_acquire( recursive_mutex& mutex ) { |
110 | #if TBB_USE_ASSERT |
111 | return internal_try_acquire( mutex ); |
112 | #else |
113 | bool result = mutex.try_lock(); |
114 | if( result ) |
115 | my_mutex = &mutex; |
116 | return result; |
117 | #endif /* TBB_USE_ASSERT */ |
118 | } |
119 | |
120 | //! Release lock |
121 | void release() { |
122 | #if TBB_USE_ASSERT |
123 | internal_release(); |
124 | #else |
125 | my_mutex->unlock(); |
126 | my_mutex = NULL; |
127 | #endif /* TBB_USE_ASSERT */ |
128 | } |
129 | |
130 | private: |
131 | //! The pointer to the current recursive_mutex to work |
132 | recursive_mutex* my_mutex; |
133 | |
134 | //! All checks from acquire using mutex.state were moved here |
135 | void __TBB_EXPORTED_METHOD internal_acquire( recursive_mutex& m ); |
136 | |
137 | //! All checks from try_acquire using mutex.state were moved here |
138 | bool __TBB_EXPORTED_METHOD internal_try_acquire( recursive_mutex& m ); |
139 | |
140 | //! All checks from release using mutex.state were moved here |
141 | void __TBB_EXPORTED_METHOD internal_release(); |
142 | |
143 | friend class recursive_mutex; |
144 | }; |
145 | |
146 | // Mutex traits |
147 | static const bool is_rw_mutex = false; |
148 | static const bool is_recursive_mutex = true; |
149 | static const bool is_fair_mutex = false; |
150 | |
151 | // C++0x compatibility interface |
152 | |
153 | //! Acquire lock |
154 | void lock() { |
155 | #if TBB_USE_ASSERT |
156 | aligned_space<scoped_lock> tmp; |
157 | new(tmp.begin()) scoped_lock(*this); |
158 | #else |
159 | #if _WIN32||_WIN64 |
160 | EnterCriticalSection(&impl); |
161 | #else |
162 | int error_code = pthread_mutex_lock(&impl); |
163 | if( error_code ) |
164 | tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_lock failed" ); |
165 | #endif /* _WIN32||_WIN64 */ |
166 | #endif /* TBB_USE_ASSERT */ |
167 | } |
168 | |
169 | //! Try acquiring lock (non-blocking) |
170 | /** Return true if lock acquired; false otherwise. */ |
171 | bool try_lock() { |
172 | #if TBB_USE_ASSERT |
173 | aligned_space<scoped_lock> tmp; |
174 | return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this); |
175 | #else |
176 | #if _WIN32||_WIN64 |
177 | return TryEnterCriticalSection(&impl)!=0; |
178 | #else |
179 | return pthread_mutex_trylock(&impl)==0; |
180 | #endif /* _WIN32||_WIN64 */ |
181 | #endif /* TBB_USE_ASSERT */ |
182 | } |
183 | |
184 | //! Release lock |
185 | void unlock() { |
186 | #if TBB_USE_ASSERT |
187 | aligned_space<scoped_lock> tmp; |
188 | scoped_lock& s = *tmp.begin(); |
189 | s.my_mutex = this; |
190 | s.internal_release(); |
191 | #else |
192 | #if _WIN32||_WIN64 |
193 | LeaveCriticalSection(&impl); |
194 | #else |
195 | pthread_mutex_unlock(&impl); |
196 | #endif /* _WIN32||_WIN64 */ |
197 | #endif /* TBB_USE_ASSERT */ |
198 | } |
199 | |
200 | //! Return native_handle |
201 | #if _WIN32||_WIN64 |
202 | typedef LPCRITICAL_SECTION native_handle_type; |
203 | #else |
204 | typedef pthread_mutex_t* native_handle_type; |
205 | #endif |
206 | native_handle_type native_handle() { return (native_handle_type) &impl; } |
207 | |
208 | private: |
209 | #if _WIN32||_WIN64 |
210 | CRITICAL_SECTION impl; |
211 | enum state_t { |
212 | INITIALIZED=0x1234, |
213 | DESTROYED=0x789A, |
214 | } state; |
215 | #else |
216 | pthread_mutex_t impl; |
217 | #endif /* _WIN32||_WIN64 */ |
218 | |
219 | //! All checks from mutex constructor using mutex.state were moved here |
220 | void __TBB_EXPORTED_METHOD internal_construct(); |
221 | |
222 | //! All checks from mutex destructor using mutex.state were moved here |
223 | void __TBB_EXPORTED_METHOD internal_destroy(); |
224 | }; |
225 | |
226 | __TBB_DEFINE_PROFILING_SET_NAME(recursive_mutex) |
227 | |
228 | } // namespace tbb |
229 | |
230 | #endif /* __TBB_recursive_mutex_H */ |
231 | |