1 | /* |
2 | * Distributed under the Boost Software License, Version 1.0. |
3 | * (See accompanying file LICENSE_1_0.txt or copy at |
4 | * http://www.boost.org/LICENSE_1_0.txt) |
5 | * |
6 | * Copyright (c) 2014 Andrey Semashev |
7 | */ |
8 | /*! |
9 | * \file atomic/detail/ops_gcc_atomic.hpp |
10 | * |
11 | * This header contains implementation of the \c operations template. |
12 | */ |
13 | |
14 | #ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_ |
15 | #define BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_ |
16 | |
17 | #include <boost/memory_order.hpp> |
18 | #include <boost/atomic/detail/config.hpp> |
19 | #include <boost/atomic/detail/storage_type.hpp> |
20 | #include <boost/atomic/detail/operations_fwd.hpp> |
21 | #include <boost/atomic/capabilities.hpp> |
22 | #if defined(__clang__) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)) |
23 | #include <boost/atomic/detail/ops_gcc_x86_dcas.hpp> |
24 | #include <boost/atomic/detail/ops_cas_based.hpp> |
25 | #endif |
26 | |
27 | #if __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE ||\ |
28 | __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE || __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE ||\ |
29 | __GCC_ATOMIC_CHAR_LOCK_FREE != BOOST_ATOMIC_CHAR_LOCK_FREE || __GCC_ATOMIC_BOOL_LOCK_FREE != BOOST_ATOMIC_BOOL_LOCK_FREE ||\ |
30 | __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE |
31 | // There are platforms where we need to use larger storage types |
32 | #include <boost/atomic/detail/int_sizes.hpp> |
33 | #include <boost/atomic/detail/ops_extending_cas_based.hpp> |
34 | #endif |
35 | |
36 | #ifdef BOOST_HAS_PRAGMA_ONCE |
37 | #pragma once |
38 | #endif |
39 | |
40 | #if defined(__INTEL_COMPILER) |
41 | // This is used to suppress warning #32013 described below for Intel Compiler. |
42 | // In debug builds the compiler does not inline any functions, so basically |
43 | // every atomic function call results in this warning. I don't know any other |
44 | // way to selectively disable just this one warning. |
45 | #pragma system_header |
46 | #endif |
47 | |
48 | namespace boost { |
49 | namespace atomics { |
50 | namespace detail { |
51 | |
52 | /*! |
53 | * The function converts \c boost::memory_order values to the compiler-specific constants. |
54 | * |
55 | * NOTE: The intention is that the function is optimized away by the compiler, and the |
56 | * compiler-specific constants are passed to the intrinsics. I know constexpr doesn't |
57 | * work in this case because the standard atomics interface require memory ordering |
58 | * constants to be passed as function arguments, at which point they stop being constexpr. |
59 | * However it is crucial that the compiler sees constants and not runtime values, |
60 | * because otherwise it just ignores the ordering value and always uses seq_cst. |
61 | * This is the case with Intel C++ Compiler 14.0.3 (Composer XE 2013 SP1, update 3) and |
62 | * gcc 4.8.2. Intel Compiler issues a warning in this case: |
63 | * |
64 | * warning #32013: Invalid memory order specified. Defaulting to seq_cst memory order. |
65 | * |
66 | * while gcc acts silently. |
67 | * |
68 | * To mitigate the problem ALL functions, including the atomic<> members must be |
69 | * declared with BOOST_FORCEINLINE. In this case the compilers are able to see that |
70 | * all functions are called with constant orderings and call intrinstcts properly. |
71 | * |
72 | * Unfortunately, this still doesn't work in debug mode as the compiler doesn't |
73 | * inline functions even when marked with BOOST_FORCEINLINE. In this case all atomic |
74 | * operaions will be executed with seq_cst semantics. |
75 | */ |
76 | BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT |
77 | { |
78 | return (order == memory_order_relaxed ? __ATOMIC_RELAXED : (order == memory_order_consume ? __ATOMIC_CONSUME : |
79 | (order == memory_order_acquire ? __ATOMIC_ACQUIRE : (order == memory_order_release ? __ATOMIC_RELEASE : |
80 | (order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_SEQ_CST))))); |
81 | } |
82 | |
83 | template< typename T > |
84 | struct gcc_atomic_operations |
85 | { |
86 | typedef T storage_type; |
87 | |
88 | // Note: In the current implementation, gcc_atomic_operations are used onlu when the particularly sized __atomic |
89 | // intrinsics are always lock-free (i.e. the corresponding LOCK_FREE macro is 2). Therefore it is safe to |
90 | // always set is_always_lock_free to true here. |
91 | static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true; |
92 | |
93 | static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT |
94 | { |
95 | __atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); |
96 | } |
97 | |
98 | static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT |
99 | { |
100 | return __atomic_load_n(&storage, atomics::detail::convert_memory_order_to_gcc(order)); |
101 | } |
102 | |
103 | static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT |
104 | { |
105 | return __atomic_fetch_add(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); |
106 | } |
107 | |
108 | static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT |
109 | { |
110 | return __atomic_fetch_sub(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); |
111 | } |
112 | |
113 | static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT |
114 | { |
115 | return __atomic_exchange_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); |
116 | } |
117 | |
118 | static BOOST_FORCEINLINE bool compare_exchange_strong( |
119 | storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT |
120 | { |
121 | return __atomic_compare_exchange_n |
122 | ( |
123 | &storage, &expected, desired, false, |
124 | atomics::detail::convert_memory_order_to_gcc(success_order), |
125 | atomics::detail::convert_memory_order_to_gcc(failure_order) |
126 | ); |
127 | } |
128 | |
129 | static BOOST_FORCEINLINE bool compare_exchange_weak( |
130 | storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT |
131 | { |
132 | return __atomic_compare_exchange_n |
133 | ( |
134 | &storage, &expected, desired, true, |
135 | atomics::detail::convert_memory_order_to_gcc(success_order), |
136 | atomics::detail::convert_memory_order_to_gcc(failure_order) |
137 | ); |
138 | } |
139 | |
140 | static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT |
141 | { |
142 | return __atomic_fetch_and(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); |
143 | } |
144 | |
145 | static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT |
146 | { |
147 | return __atomic_fetch_or(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); |
148 | } |
149 | |
150 | static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT |
151 | { |
152 | return __atomic_fetch_xor(&storage, v, atomics::detail::convert_memory_order_to_gcc(order)); |
153 | } |
154 | |
155 | static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT |
156 | { |
157 | return __atomic_test_and_set(&storage, atomics::detail::convert_memory_order_to_gcc(order)); |
158 | } |
159 | |
160 | static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT |
161 | { |
162 | __atomic_clear(const_cast< storage_type* >(&storage), atomics::detail::convert_memory_order_to_gcc(order)); |
163 | } |
164 | }; |
165 | |
166 | #if BOOST_ATOMIC_INT128_LOCK_FREE > 0 |
167 | #if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) |
168 | |
169 | // Workaround for clang bug: http://llvm.org/bugs/show_bug.cgi?id=19149 |
170 | // Clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 |
171 | template< bool Signed > |
172 | struct operations< 16u, Signed > : |
173 | public cas_based_operations< gcc_dcas_x86_64< Signed > > |
174 | { |
175 | }; |
176 | |
177 | #else |
178 | |
179 | template< bool Signed > |
180 | struct operations< 16u, Signed > : |
181 | public gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type > |
182 | { |
183 | typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type; |
184 | }; |
185 | |
186 | #endif |
187 | #endif |
188 | |
189 | |
190 | #if BOOST_ATOMIC_INT64_LOCK_FREE > 0 |
191 | #if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) |
192 | |
193 | // Workaround for clang bug http://llvm.org/bugs/show_bug.cgi?id=19355 |
194 | template< bool Signed > |
195 | struct operations< 8u, Signed > : |
196 | public cas_based_operations< gcc_dcas_x86< Signed > > |
197 | { |
198 | }; |
199 | |
200 | #elif (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\ |
201 | (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\ |
202 | (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\ |
203 | (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\ |
204 | (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE) |
205 | |
206 | #define BOOST_ATOMIC_DETAIL_INT64_EXTENDED |
207 | |
208 | template< bool Signed > |
209 | struct operations< 8u, Signed > : |
210 | public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 8u, Signed > |
211 | { |
212 | typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type; |
213 | }; |
214 | |
215 | #else |
216 | |
217 | template< bool Signed > |
218 | struct operations< 8u, Signed > : |
219 | public gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type > |
220 | { |
221 | typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type; |
222 | }; |
223 | |
224 | #endif |
225 | #endif |
226 | |
227 | #if BOOST_ATOMIC_INT32_LOCK_FREE > 0 |
228 | #if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\ |
229 | (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\ |
230 | (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\ |
231 | (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\ |
232 | (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE) |
233 | |
234 | #define BOOST_ATOMIC_DETAIL_INT32_EXTENDED |
235 | |
236 | #if !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED) |
237 | |
238 | template< bool Signed > |
239 | struct operations< 4u, Signed > : |
240 | public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 4u, Signed > |
241 | { |
242 | typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type; |
243 | }; |
244 | |
245 | #else // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED) |
246 | |
247 | template< bool Signed > |
248 | struct operations< 4u, Signed > : |
249 | public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 4u, Signed > |
250 | { |
251 | typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type; |
252 | }; |
253 | |
254 | #endif // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED) |
255 | |
256 | #else |
257 | |
258 | template< bool Signed > |
259 | struct operations< 4u, Signed > : |
260 | public gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type > |
261 | { |
262 | typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type; |
263 | }; |
264 | |
265 | #endif |
266 | #endif |
267 | |
268 | #if BOOST_ATOMIC_INT16_LOCK_FREE > 0 |
269 | #if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\ |
270 | (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\ |
271 | (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\ |
272 | (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\ |
273 | (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE) |
274 | |
275 | #define BOOST_ATOMIC_DETAIL_INT16_EXTENDED |
276 | |
277 | #if !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED) |
278 | |
279 | template< bool Signed > |
280 | struct operations< 2u, Signed > : |
281 | public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >, 2u, Signed > |
282 | { |
283 | typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type; |
284 | }; |
285 | |
286 | #elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED) |
287 | |
288 | template< bool Signed > |
289 | struct operations< 2u, Signed > : |
290 | public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 2u, Signed > |
291 | { |
292 | typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type; |
293 | }; |
294 | |
295 | #else |
296 | |
297 | template< bool Signed > |
298 | struct operations< 2u, Signed > : |
299 | public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 2u, Signed > |
300 | { |
301 | typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type; |
302 | }; |
303 | |
304 | #endif |
305 | |
306 | #else |
307 | |
308 | template< bool Signed > |
309 | struct operations< 2u, Signed > : |
310 | public gcc_atomic_operations< typename make_storage_type< 2u, Signed >::type > |
311 | { |
312 | typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type; |
313 | }; |
314 | |
315 | #endif |
316 | #endif |
317 | |
318 | #if BOOST_ATOMIC_INT8_LOCK_FREE > 0 |
319 | #if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\ |
320 | (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\ |
321 | (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\ |
322 | (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\ |
323 | (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE) ||\ |
324 | (__GCC_ATOMIC_CHAR_LOCK_FREE != BOOST_ATOMIC_CHAR_LOCK_FREE) ||\ |
325 | (__GCC_ATOMIC_BOOL_LOCK_FREE != BOOST_ATOMIC_BOOL_LOCK_FREE) |
326 | |
327 | #if !defined(BOOST_ATOMIC_DETAIL_INT16_EXTENDED) |
328 | |
329 | template< bool Signed > |
330 | struct operations< 1u, Signed > : |
331 | public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 2u, Signed >::type >, 1u, Signed > |
332 | { |
333 | typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type; |
334 | }; |
335 | |
336 | #elif !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED) |
337 | |
338 | template< bool Signed > |
339 | struct operations< 1u, Signed > : |
340 | public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >, 1u, Signed > |
341 | { |
342 | typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type; |
343 | }; |
344 | |
345 | #elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED) |
346 | |
347 | template< bool Signed > |
348 | struct operations< 1u, Signed > : |
349 | public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 1u, Signed > |
350 | { |
351 | typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type; |
352 | }; |
353 | |
354 | #else |
355 | |
356 | template< bool Signed > |
357 | struct operations< 1u, Signed > : |
358 | public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 1u, Signed > |
359 | { |
360 | typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type; |
361 | }; |
362 | |
363 | #endif |
364 | |
365 | #else |
366 | |
367 | template< bool Signed > |
368 | struct operations< 1u, Signed > : |
369 | public gcc_atomic_operations< typename make_storage_type< 1u, Signed >::type > |
370 | { |
371 | typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type; |
372 | }; |
373 | |
374 | #endif |
375 | #endif |
376 | |
377 | #undef BOOST_ATOMIC_DETAIL_INT16_EXTENDED |
378 | #undef BOOST_ATOMIC_DETAIL_INT32_EXTENDED |
379 | #undef BOOST_ATOMIC_DETAIL_INT64_EXTENDED |
380 | |
381 | BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT |
382 | { |
383 | __atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order)); |
384 | } |
385 | |
386 | BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT |
387 | { |
388 | __atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order)); |
389 | } |
390 | |
391 | } // namespace detail |
392 | } // namespace atomics |
393 | } // namespace boost |
394 | |
395 | #endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_ |
396 | |