1/*
2 Copyright 2005-2013 Intel Corporation. All Rights Reserved.
3
4 This file is part of Threading Building Blocks.
5
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
9
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
27*/
28
29#ifndef __TBB_spin_mutex_H
30#define __TBB_spin_mutex_H
31
32#include <cstddef>
33#include <new>
34#include "aligned_space.h"
35#include "tbb_stddef.h"
36#include "tbb_machine.h"
37#include "tbb_profiling.h"
38#include "internal/_mutex_padding.h"
39
40namespace tbb {
41
42//! A lock that occupies a single byte.
43/** A spin_mutex is a spin mutex that fits in a single byte.
44 It should be used only for locking short critical sections
45 (typically less than 20 instructions) when fairness is not an issue.
46 If zero-initialized, the mutex is considered unheld.
47 @ingroup synchronization */
48class spin_mutex {
49 //! 0 if lock is released, 1 if lock is acquired.
50 __TBB_atomic_flag flag;
51
52public:
53 //! Construct unacquired lock.
54 /** Equivalent to zero-initialization of *this. */
55 spin_mutex() : flag(0) {
56#if TBB_USE_THREADING_TOOLS
57 internal_construct();
58#endif
59 }
60
61 //! Represents acquisition of a mutex.
62 class scoped_lock : internal::no_copy {
63 private:
64 //! Points to currently held mutex, or NULL if no lock is held.
65 spin_mutex* my_mutex;
66
67 //! Value to store into spin_mutex::flag to unlock the mutex.
68 /** This variable is no longer used. Instead, 0 and 1 are used to
69 represent that the lock is free and acquired, respectively.
70 We keep the member variable here to ensure backward compatibility */
71 __TBB_Flag my_unlock_value;
72
73 //! Like acquire, but with ITT instrumentation.
74 void __TBB_EXPORTED_METHOD internal_acquire( spin_mutex& m );
75
76 //! Like try_acquire, but with ITT instrumentation.
77 bool __TBB_EXPORTED_METHOD internal_try_acquire( spin_mutex& m );
78
79 //! Like release, but with ITT instrumentation.
80 void __TBB_EXPORTED_METHOD internal_release();
81
82 friend class spin_mutex;
83
84 public:
85 //! Construct without acquiring a mutex.
86 scoped_lock() : my_mutex(NULL), my_unlock_value(0) {}
87
88 //! Construct and acquire lock on a mutex.
89 scoped_lock( spin_mutex& m ) : my_unlock_value(0) {
90 internal::suppress_unused_warning(my_unlock_value);
91#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
92 my_mutex=NULL;
93 internal_acquire(m);
94#else
95 my_mutex=&m;
96 __TBB_LockByte(m.flag);
97#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/
98 }
99
100 //! Acquire lock.
101 void acquire( spin_mutex& m ) {
102#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
103 internal_acquire(m);
104#else
105 my_mutex = &m;
106 __TBB_LockByte(m.flag);
107#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/
108 }
109
110 //! Try acquiring lock (non-blocking)
111 /** Return true if lock acquired; false otherwise. */
112 bool try_acquire( spin_mutex& m ) {
113#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
114 return internal_try_acquire(m);
115#else
116 bool result = __TBB_TryLockByte(m.flag);
117 if( result )
118 my_mutex = &m;
119 return result;
120#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/
121 }
122
123 //! Release lock
124 void release() {
125#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
126 internal_release();
127#else
128 __TBB_UnlockByte(my_mutex->flag);
129 my_mutex = NULL;
130#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
131 }
132
133 //! Destroy lock. If holding a lock, releases the lock first.
134 ~scoped_lock() {
135 if( my_mutex ) {
136#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
137 internal_release();
138#else
139 __TBB_UnlockByte(my_mutex->flag);
140#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
141 }
142 }
143 };
144
145 //! Internal constructor with ITT instrumentation.
146 void __TBB_EXPORTED_METHOD internal_construct();
147
148 // Mutex traits
149 static const bool is_rw_mutex = false;
150 static const bool is_recursive_mutex = false;
151 static const bool is_fair_mutex = false;
152
153 // ISO C++0x compatibility methods
154
155 //! Acquire lock
156 void lock() {
157#if TBB_USE_THREADING_TOOLS
158 aligned_space<scoped_lock,1> tmp;
159 new(tmp.begin()) scoped_lock(*this);
160#else
161 __TBB_LockByte(flag);
162#endif /* TBB_USE_THREADING_TOOLS*/
163 }
164
165 //! Try acquiring lock (non-blocking)
166 /** Return true if lock acquired; false otherwise. */
167 bool try_lock() {
168#if TBB_USE_THREADING_TOOLS
169 aligned_space<scoped_lock,1> tmp;
170 return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this);
171#else
172 return __TBB_TryLockByte(flag);
173#endif /* TBB_USE_THREADING_TOOLS*/
174 }
175
176 //! Release lock
177 void unlock() {
178#if TBB_USE_THREADING_TOOLS
179 aligned_space<scoped_lock,1> tmp;
180 scoped_lock& s = *tmp.begin();
181 s.my_mutex = this;
182 s.internal_release();
183#else
184 __TBB_store_with_release(flag, 0);
185#endif /* TBB_USE_THREADING_TOOLS */
186 }
187
188 friend class scoped_lock;
189}; // end of spin_mutex
190
191__TBB_DEFINE_PROFILING_SET_NAME(spin_mutex)
192
193} // namespace tbb
194
195#if ( __TBB_x86_32 || __TBB_x86_64 )
196#include "internal/_x86_eliding_mutex_impl.h"
197#endif
198
199namespace tbb {
200//! A cross-platform spin mutex with speculative lock acquisition.
201/** On platforms with proper HW support, this lock may speculatively execute
202 its critical sections, using HW mechanisms to detect real data races and
203 ensure atomicity of the critical sections. In particular, it uses
204 Intel(R) Transactional Synchronization Extensions (Intel(R) TSX).
205 Without such HW support, it behaves like a spin_mutex.
206 It should be used for locking short critical sections where the lock is
207 contended but the data it protects are not. If zero-initialized, the
208 mutex is considered unheld.
209 @ingroup synchronization */
210
211#if ( __TBB_x86_32 || __TBB_x86_64 )
212typedef interface7::internal::padded_mutex<interface7::internal::x86_eliding_mutex> speculative_spin_mutex;
213#else
214typedef interface7::internal::padded_mutex<spin_mutex> speculative_spin_mutex;
215#endif
216__TBB_DEFINE_PROFILING_SET_NAME(speculative_spin_mutex)
217
218} // namespace tbb
219
220#endif /* __TBB_spin_mutex_H */
221