1 | /* |
2 | Copyright 2005-2013 Intel Corporation. All Rights Reserved. |
3 | |
4 | This file is part of Threading Building Blocks. |
5 | |
6 | Threading Building Blocks is free software; you can redistribute it |
7 | and/or modify it under the terms of the GNU General Public License |
8 | version 2 as published by the Free Software Foundation. |
9 | |
10 | Threading Building Blocks is distributed in the hope that it will be |
11 | useful, but WITHOUT ANY WARRANTY; without even the implied warranty |
12 | of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | GNU General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU General Public License |
16 | along with Threading Building Blocks; if not, write to the Free Software |
17 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
18 | |
19 | As a special exception, you may use this file as part of a free software |
20 | library without restriction. Specifically, if other files instantiate |
21 | templates or use macros or inline functions from this file, or you compile |
22 | this file and link it with other files to produce an executable, this |
23 | file does not by itself cause the resulting executable to be covered by |
24 | the GNU General Public License. This exception does not however |
25 | invalidate any other reasons why the executable file might be covered by |
26 | the GNU General Public License. |
27 | */ |
28 | |
29 | #ifndef __TBB__x86_eliding_mutex_impl_H |
30 | #define __TBB__x86_eliding_mutex_impl_H |
31 | |
32 | #ifndef __TBB_spin_mutex_H |
33 | #error Do not #include this internal file directly; use public TBB headers instead. |
34 | #endif |
35 | |
36 | #if ( __TBB_x86_32 || __TBB_x86_64 ) |
37 | |
38 | namespace tbb { |
39 | namespace interface7 { |
40 | namespace internal { |
41 | |
42 | template<typename Mutex> |
43 | class padded_mutex; |
44 | |
45 | //! An eliding lock that occupies a single byte. |
46 | /** A x86_eliding_mutex is an HLE-enabled spin mutex. It is recommended to |
47 | put the mutex on a cache line that is not shared by the data it protects. |
48 | It should be used for locking short critical sections where the lock is |
49 | contended but the data it protects are not. If zero-initialized, the |
50 | mutex is considered unheld. |
51 | @ingroup synchronization */ |
52 | class x86_eliding_mutex { |
53 | |
54 | //! 0 if lock is released, 1 if lock is acquired. |
55 | __TBB_atomic_flag flag; |
56 | |
57 | friend class padded_mutex<x86_eliding_mutex>; |
58 | |
59 | public: |
60 | //! Construct unacquired lock. |
61 | /** Equivalent to zero-initialization of *this. */ |
62 | x86_eliding_mutex() : flag(0) {} |
63 | |
64 | // bug in gcc 3.x.x causes syntax error in spite of the friend declaration above. |
65 | // Make the scoped_lock public in that case. |
66 | #if __TBB_USE_X86_ELIDING_MUTEX || __TBB_GCC_VERSION < 40000 |
67 | #else |
68 | // by default we will not provide the scoped_lock interface. The user |
69 | // should use the padded version of the mutex. scoped_lock is used in |
70 | // padded_mutex template. |
71 | private: |
72 | #endif |
73 | // scoped_lock in padded_mutex<> is the interface to use. |
74 | //! Represents acquisition of a mutex. |
75 | class scoped_lock : tbb::internal::no_copy { |
76 | private: |
77 | //! Points to currently held mutex, or NULL if no lock is held. |
78 | x86_eliding_mutex* my_mutex; |
79 | |
80 | public: |
81 | //! Construct without acquiring a mutex. |
82 | scoped_lock() : my_mutex(NULL) {} |
83 | |
84 | //! Construct and acquire lock on a mutex. |
85 | scoped_lock( x86_eliding_mutex& m ) : my_mutex(NULL) { acquire(m); } |
86 | |
87 | //! Acquire lock. |
88 | void acquire( x86_eliding_mutex& m ) { |
89 | __TBB_ASSERT( !my_mutex, "already holding a lock" ); |
90 | |
91 | my_mutex=&m; |
92 | my_mutex->lock(); |
93 | } |
94 | |
95 | //! Try acquiring lock (non-blocking) |
96 | /** Return true if lock acquired; false otherwise. */ |
97 | bool try_acquire( x86_eliding_mutex& m ) { |
98 | __TBB_ASSERT( !my_mutex, "already holding a lock" ); |
99 | |
100 | bool result = m.try_lock(); |
101 | if( result ) { |
102 | my_mutex = &m; |
103 | } |
104 | return result; |
105 | } |
106 | |
107 | //! Release lock |
108 | void release() { |
109 | __TBB_ASSERT( my_mutex, "release on scoped_lock that is not holding a lock" ); |
110 | |
111 | my_mutex->unlock(); |
112 | my_mutex = NULL; |
113 | } |
114 | |
115 | //! Destroy lock. If holding a lock, releases the lock first. |
116 | ~scoped_lock() { |
117 | if( my_mutex ) { |
118 | release(); |
119 | } |
120 | } |
121 | }; |
122 | #if __TBB_USE_X86_ELIDING_MUTEX || __TBB_GCC_VERSION < 40000 |
123 | #else |
124 | public: |
125 | #endif /* __TBB_USE_X86_ELIDING_MUTEX */ |
126 | |
127 | // Mutex traits |
128 | static const bool is_rw_mutex = false; |
129 | static const bool is_recursive_mutex = false; |
130 | static const bool is_fair_mutex = false; |
131 | |
132 | // ISO C++0x compatibility methods |
133 | |
134 | //! Acquire lock |
135 | void lock() { |
136 | __TBB_LockByteElided(flag); |
137 | } |
138 | |
139 | //! Try acquiring lock (non-blocking) |
140 | /** Return true if lock acquired; false otherwise. */ |
141 | bool try_lock() { |
142 | return __TBB_TryLockByteElided(flag); |
143 | } |
144 | |
145 | //! Release lock |
146 | void unlock() { |
147 | __TBB_UnlockByteElided( flag ); |
148 | } |
149 | }; // end of x86_eliding_mutex |
150 | |
151 | } // namespace internal |
152 | } // namespace interface7 |
153 | } // namespace tbb |
154 | |
155 | #endif /* ( __TBB_x86_32 || __TBB_x86_64 ) */ |
156 | |
157 | #endif /* __TBB__x86_eliding_mutex_impl_H */ |
158 | |