1 | /* |
2 | Copyright (c) 2005-2019 Intel Corporation |
3 | |
4 | Licensed under the Apache License, Version 2.0 (the "License"); |
5 | you may not use this file except in compliance with the License. |
6 | You may obtain a copy of the License at |
7 | |
8 | http://www.apache.org/licenses/LICENSE-2.0 |
9 | |
10 | Unless required by applicable law or agreed to in writing, software |
11 | distributed under the License is distributed on an "AS IS" BASIS, |
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | See the License for the specific language governing permissions and |
14 | limitations under the License. |
15 | */ |
16 | |
17 | #ifndef __TBB_atomic_H |
18 | #define __TBB_atomic_H |
19 | |
20 | #include <cstddef> |
21 | |
22 | #if _MSC_VER |
23 | #define __TBB_LONG_LONG __int64 |
24 | #else |
25 | #define __TBB_LONG_LONG long long |
26 | #endif /* _MSC_VER */ |
27 | |
28 | #include "tbb_machine.h" |
29 | |
30 | #if _MSC_VER && !__INTEL_COMPILER |
31 | // Suppress overzealous compiler warnings till the end of the file |
32 | #pragma warning (push) |
33 | #pragma warning (disable: 4244 4267 4512) |
34 | #endif |
35 | |
36 | namespace tbb { |
37 | |
38 | //! Specifies memory semantics. |
39 | enum memory_semantics { |
40 | //! Sequential consistency |
41 | full_fence, |
42 | //! Acquire |
43 | acquire, |
44 | //! Release |
45 | release, |
46 | //! No ordering |
47 | relaxed |
48 | }; |
49 | |
50 | //! @cond INTERNAL |
51 | namespace internal { |
52 | |
53 | #if __TBB_ALIGNAS_PRESENT |
54 | #define __TBB_DECL_ATOMIC_FIELD(t,f,a) alignas(a) t f; |
55 | #elif __TBB_ATTRIBUTE_ALIGNED_PRESENT |
56 | #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f __attribute__ ((aligned(a))); |
57 | #elif __TBB_DECLSPEC_ALIGN_PRESENT |
58 | #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f; |
59 | #else |
60 | #error Do not know syntax for forcing alignment. |
61 | #endif |
62 | |
63 | template<size_t S> |
64 | struct atomic_rep; // Primary template declared, but never defined. |
65 | |
66 | template<> |
67 | struct atomic_rep<1> { // Specialization |
68 | typedef int8_t word; |
69 | }; |
70 | template<> |
71 | struct atomic_rep<2> { // Specialization |
72 | typedef int16_t word; |
73 | }; |
74 | template<> |
75 | struct atomic_rep<4> { // Specialization |
76 | #if _MSC_VER && !_WIN64 |
77 | // Work-around that avoids spurious /Wp64 warnings |
78 | typedef intptr_t word; |
79 | #else |
80 | typedef int32_t word; |
81 | #endif |
82 | }; |
83 | #if __TBB_64BIT_ATOMICS |
84 | template<> |
85 | struct atomic_rep<8> { // Specialization |
86 | typedef int64_t word; |
87 | }; |
88 | #endif |
89 | |
90 | template<typename value_type, size_t size> |
91 | struct aligned_storage; |
92 | |
93 | //the specializations are needed to please MSVC syntax of __declspec(align()) which accept _literal_ constants only |
94 | #if __TBB_ATOMIC_CTORS |
95 | #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \ |
96 | template<typename value_type> \ |
97 | struct aligned_storage<value_type,S> { \ |
98 | __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \ |
99 | aligned_storage() = default ; \ |
100 | constexpr aligned_storage(value_type value):my_value(value){} \ |
101 | }; \ |
102 | |
103 | #else |
104 | #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \ |
105 | template<typename value_type> \ |
106 | struct aligned_storage<value_type,S> { \ |
107 | __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \ |
108 | }; \ |
109 | |
110 | #endif |
111 | |
112 | template<typename value_type> |
113 | struct aligned_storage<value_type,1> { |
114 | value_type my_value; |
115 | #if __TBB_ATOMIC_CTORS |
116 | aligned_storage() = default ; |
117 | constexpr aligned_storage(value_type value):my_value(value){} |
118 | #endif |
119 | }; |
120 | |
121 | ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(2) |
122 | ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(4) |
123 | #if __TBB_64BIT_ATOMICS |
124 | ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(8) |
125 | #endif |
126 | |
127 | template<size_t Size, memory_semantics M> |
128 | struct atomic_traits; // Primary template declared, but not defined. |
129 | |
130 | #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M) \ |
131 | template<> struct atomic_traits<S,M> { \ |
132 | typedef atomic_rep<S>::word word; \ |
133 | inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \ |
134 | return __TBB_machine_cmpswp##S##M(location,new_value,comparand); \ |
135 | } \ |
136 | inline static word fetch_and_add( volatile void* location, word addend ) { \ |
137 | return __TBB_machine_fetchadd##S##M(location,addend); \ |
138 | } \ |
139 | inline static word fetch_and_store( volatile void* location, word value ) { \ |
140 | return __TBB_machine_fetchstore##S##M(location,value); \ |
141 | } \ |
142 | }; |
143 | |
144 | #define __TBB_DECL_ATOMIC_PRIMITIVES(S) \ |
145 | template<memory_semantics M> \ |
146 | struct atomic_traits<S,M> { \ |
147 | typedef atomic_rep<S>::word word; \ |
148 | inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \ |
149 | return __TBB_machine_cmpswp##S(location,new_value,comparand); \ |
150 | } \ |
151 | inline static word fetch_and_add( volatile void* location, word addend ) { \ |
152 | return __TBB_machine_fetchadd##S(location,addend); \ |
153 | } \ |
154 | inline static word fetch_and_store( volatile void* location, word value ) { \ |
155 | return __TBB_machine_fetchstore##S(location,value); \ |
156 | } \ |
157 | }; |
158 | |
159 | template<memory_semantics M> |
160 | struct atomic_load_store_traits; // Primary template declaration |
161 | |
162 | #define __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(M) \ |
163 | template<> struct atomic_load_store_traits<M> { \ |
164 | template <typename T> \ |
165 | inline static T load( const volatile T& location ) { \ |
166 | return __TBB_load_##M( location ); \ |
167 | } \ |
168 | template <typename T> \ |
169 | inline static void store( volatile T& location, T value ) { \ |
170 | __TBB_store_##M( location, value ); \ |
171 | } \ |
172 | } |
173 | |
174 | #if __TBB_USE_FENCED_ATOMICS |
175 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,full_fence) |
176 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,full_fence) |
177 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,full_fence) |
178 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire) |
179 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire) |
180 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire) |
181 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release) |
182 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release) |
183 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release) |
184 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,relaxed) |
185 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,relaxed) |
186 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,relaxed) |
187 | #if __TBB_64BIT_ATOMICS |
188 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,full_fence) |
189 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire) |
190 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release) |
191 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,relaxed) |
192 | #endif |
193 | #else /* !__TBB_USE_FENCED_ATOMICS */ |
194 | __TBB_DECL_ATOMIC_PRIMITIVES(1) |
195 | __TBB_DECL_ATOMIC_PRIMITIVES(2) |
196 | __TBB_DECL_ATOMIC_PRIMITIVES(4) |
197 | #if __TBB_64BIT_ATOMICS |
198 | __TBB_DECL_ATOMIC_PRIMITIVES(8) |
199 | #endif |
200 | #endif /* !__TBB_USE_FENCED_ATOMICS */ |
201 | |
202 | __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(full_fence); |
203 | __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(acquire); |
204 | __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(release); |
205 | __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(relaxed); |
206 | |
207 | //! Additive inverse of 1 for type T. |
208 | /** Various compilers issue various warnings if -1 is used with various integer types. |
209 | The baroque expression below avoids all the warnings (we hope). */ |
210 | #define __TBB_MINUS_ONE(T) (T(T(0)-T(1))) |
211 | |
212 | //! Base class that provides basic functionality for atomic<T> without fetch_and_add. |
213 | /** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor, |
214 | and can be copied/compared by memcpy/memcmp. */ |
215 | template<typename T> |
216 | struct atomic_impl { |
217 | protected: |
218 | aligned_storage<T,sizeof(T)> my_storage; |
219 | private: |
220 | //TODO: rechecks on recent versions of gcc if union is still the _only_ way to do a conversion without warnings |
221 | //! Union type used to convert type T to underlying integral type. |
222 | template<typename value_type> |
223 | union converter { |
224 | typedef typename atomic_rep<sizeof(value_type)>::word bits_type; |
225 | converter(){} |
226 | converter(value_type a_value) : value(a_value) {} |
227 | value_type value; |
228 | bits_type bits; |
229 | }; |
230 | |
231 | template<typename value_t> |
232 | static typename converter<value_t>::bits_type to_bits(value_t value){ |
233 | return converter<value_t>(value).bits; |
234 | } |
235 | template<typename value_t> |
236 | static value_t to_value(typename converter<value_t>::bits_type bits){ |
237 | converter<value_t> u; |
238 | u.bits = bits; |
239 | return u.value; |
240 | } |
241 | |
242 | template<typename value_t> |
243 | union ptr_converter; //Primary template declared, but never defined. |
244 | |
245 | template<typename value_t> |
246 | union ptr_converter<value_t *> { |
247 | ptr_converter(){} |
248 | ptr_converter(value_t* a_value) : value(a_value) {} |
249 | value_t* value; |
250 | uintptr_t bits; |
251 | }; |
252 | //TODO: check if making to_bits accepting reference (thus unifying it with to_bits_ref) |
253 | //does not hurt performance |
254 | template<typename value_t> |
255 | static typename converter<value_t>::bits_type & to_bits_ref(value_t& value){ |
256 | //TODO: this #ifdef is temporary workaround, as union conversion seems to fail |
257 | //on suncc for 64 bit types for 32 bit target |
258 | #if !__SUNPRO_CC |
259 | return *(typename converter<value_t>::bits_type*)ptr_converter<value_t*>(&value).bits; |
260 | #else |
261 | return *(typename converter<value_t>::bits_type*)(&value); |
262 | #endif |
263 | } |
264 | |
265 | |
266 | public: |
267 | typedef T value_type; |
268 | |
269 | #if __TBB_ATOMIC_CTORS |
270 | atomic_impl() = default ; |
271 | constexpr atomic_impl(value_type value):my_storage(value){} |
272 | #endif |
273 | template<memory_semantics M> |
274 | value_type fetch_and_store( value_type value ) { |
275 | return to_value<value_type>( |
276 | internal::atomic_traits<sizeof(value_type),M>::fetch_and_store( &my_storage.my_value, to_bits(value) ) |
277 | ); |
278 | } |
279 | |
280 | value_type fetch_and_store( value_type value ) { |
281 | return fetch_and_store<full_fence>(value); |
282 | } |
283 | |
284 | template<memory_semantics M> |
285 | value_type compare_and_swap( value_type value, value_type comparand ) { |
286 | return to_value<value_type>( |
287 | internal::atomic_traits<sizeof(value_type),M>::compare_and_swap( &my_storage.my_value, to_bits(value), to_bits(comparand) ) |
288 | ); |
289 | } |
290 | |
291 | value_type compare_and_swap( value_type value, value_type comparand ) { |
292 | return compare_and_swap<full_fence>(value,comparand); |
293 | } |
294 | |
295 | operator value_type() const volatile { // volatile qualifier here for backwards compatibility |
296 | return to_value<value_type>( |
297 | __TBB_load_with_acquire( to_bits_ref(my_storage.my_value) ) |
298 | ); |
299 | } |
300 | |
301 | template<memory_semantics M> |
302 | value_type load () const { |
303 | return to_value<value_type>( |
304 | internal::atomic_load_store_traits<M>::load( to_bits_ref(my_storage.my_value) ) |
305 | ); |
306 | } |
307 | |
308 | value_type load () const { |
309 | return load<acquire>(); |
310 | } |
311 | |
312 | template<memory_semantics M> |
313 | void store ( value_type value ) { |
314 | internal::atomic_load_store_traits<M>::store( to_bits_ref(my_storage.my_value), to_bits(value)); |
315 | } |
316 | |
317 | void store ( value_type value ) { |
318 | store<release>( value ); |
319 | } |
320 | |
321 | protected: |
322 | value_type store_with_release( value_type rhs ) { |
323 | //TODO: unify with store<release> |
324 | __TBB_store_with_release( to_bits_ref(my_storage.my_value), to_bits(rhs) ); |
325 | return rhs; |
326 | } |
327 | }; |
328 | |
329 | //! Base class that provides basic functionality for atomic<T> with fetch_and_add. |
330 | /** I is the underlying type. |
331 | D is the difference type. |
332 | StepType should be char if I is an integral type, and T if I is a T*. */ |
333 | template<typename I, typename D, typename StepType> |
334 | struct atomic_impl_with_arithmetic: atomic_impl<I> { |
335 | public: |
336 | typedef I value_type; |
337 | #if __TBB_ATOMIC_CTORS |
338 | atomic_impl_with_arithmetic() = default ; |
339 | constexpr atomic_impl_with_arithmetic(value_type value): atomic_impl<I>(value){} |
340 | #endif |
341 | template<memory_semantics M> |
342 | value_type fetch_and_add( D addend ) { |
343 | return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->my_storage.my_value, addend*sizeof(StepType) )); |
344 | } |
345 | |
346 | value_type fetch_and_add( D addend ) { |
347 | return fetch_and_add<full_fence>(addend); |
348 | } |
349 | |
350 | template<memory_semantics M> |
351 | value_type fetch_and_increment() { |
352 | return fetch_and_add<M>(1); |
353 | } |
354 | |
355 | value_type fetch_and_increment() { |
356 | return fetch_and_add(1); |
357 | } |
358 | |
359 | template<memory_semantics M> |
360 | value_type fetch_and_decrement() { |
361 | return fetch_and_add<M>(__TBB_MINUS_ONE(D)); |
362 | } |
363 | |
364 | value_type fetch_and_decrement() { |
365 | return fetch_and_add(__TBB_MINUS_ONE(D)); |
366 | } |
367 | |
368 | public: |
369 | value_type operator+=( D value ) { |
370 | return fetch_and_add(value)+value; |
371 | } |
372 | |
373 | value_type operator-=( D value ) { |
374 | // Additive inverse of value computed using binary minus, |
375 | // instead of unary minus, for sake of avoiding compiler warnings. |
376 | return operator+=(D(0)-value); |
377 | } |
378 | |
379 | value_type operator++() { |
380 | return fetch_and_add(1)+1; |
381 | } |
382 | |
383 | value_type operator--() { |
384 | return fetch_and_add(__TBB_MINUS_ONE(D))-1; |
385 | } |
386 | |
387 | value_type operator++(int) { |
388 | return fetch_and_add(1); |
389 | } |
390 | |
391 | value_type operator--(int) { |
392 | return fetch_and_add(__TBB_MINUS_ONE(D)); |
393 | } |
394 | }; |
395 | |
396 | } /* Internal */ |
397 | //! @endcond |
398 | |
399 | //! Primary template for atomic. |
400 | /** See the Reference for details. |
401 | @ingroup synchronization */ |
402 | template<typename T> |
403 | struct atomic: internal::atomic_impl<T> { |
404 | #if __TBB_ATOMIC_CTORS |
405 | atomic() = default; |
406 | constexpr atomic(T arg): internal::atomic_impl<T>(arg) {} |
407 | #endif |
408 | T operator=( T rhs ) { |
409 | // "this" required here in strict ISO C++ because store_with_release is a dependent name |
410 | return this->store_with_release(rhs); |
411 | } |
412 | atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;} |
413 | }; |
414 | |
415 | #if __TBB_ATOMIC_CTORS |
416 | #define __TBB_DECL_ATOMIC(T) \ |
417 | template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \ |
418 | atomic() = default; \ |
419 | constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T,char>(arg) {} \ |
420 | \ |
421 | T operator=( T rhs ) {return store_with_release(rhs);} \ |
422 | atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \ |
423 | }; |
424 | #else |
425 | #define __TBB_DECL_ATOMIC(T) \ |
426 | template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \ |
427 | T operator=( T rhs ) {return store_with_release(rhs);} \ |
428 | atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \ |
429 | }; |
430 | #endif |
431 | |
432 | #if __TBB_64BIT_ATOMICS |
433 | //TODO: consider adding non-default (and atomic) copy constructor for 32bit platform |
434 | __TBB_DECL_ATOMIC(__TBB_LONG_LONG) |
435 | __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG) |
436 | #else |
437 | // test_atomic will verify that sizeof(long long)==8 |
438 | #endif |
439 | __TBB_DECL_ATOMIC(long) |
440 | __TBB_DECL_ATOMIC(unsigned long) |
441 | |
442 | #if _MSC_VER && !_WIN64 |
443 | #if __TBB_ATOMIC_CTORS |
444 | /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. |
445 | It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) |
446 | with an operator=(U) that explicitly converts the U to a T. Types T and U should be |
447 | type synonyms on the platform. Type U should be the wider variant of T from the |
448 | perspective of /Wp64. */ |
449 | #define __TBB_DECL_ATOMIC_ALT(T,U) \ |
450 | template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \ |
451 | atomic() = default ; \ |
452 | constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T,char>(arg) {} \ |
453 | T operator=( U rhs ) {return store_with_release(T(rhs));} \ |
454 | atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \ |
455 | }; |
456 | #else |
457 | #define __TBB_DECL_ATOMIC_ALT(T,U) \ |
458 | template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \ |
459 | T operator=( U rhs ) {return store_with_release(T(rhs));} \ |
460 | atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \ |
461 | }; |
462 | #endif |
463 | __TBB_DECL_ATOMIC_ALT(unsigned,size_t) |
464 | __TBB_DECL_ATOMIC_ALT(int,ptrdiff_t) |
465 | #else |
466 | __TBB_DECL_ATOMIC(unsigned) |
467 | __TBB_DECL_ATOMIC(int) |
468 | #endif /* _MSC_VER && !_WIN64 */ |
469 | |
470 | __TBB_DECL_ATOMIC(unsigned short) |
471 | __TBB_DECL_ATOMIC(short) |
472 | __TBB_DECL_ATOMIC(char) |
473 | __TBB_DECL_ATOMIC(signed char) |
474 | __TBB_DECL_ATOMIC(unsigned char) |
475 | |
476 | #if !_MSC_VER || defined(_NATIVE_WCHAR_T_DEFINED) |
477 | __TBB_DECL_ATOMIC(wchar_t) |
478 | #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */ |
479 | |
480 | //! Specialization for atomic<T*> with arithmetic and operator->. |
481 | template<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> { |
482 | #if __TBB_ATOMIC_CTORS |
483 | atomic() = default ; |
484 | constexpr atomic(T* arg): internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T>(arg) {} |
485 | #endif |
486 | T* operator=( T* rhs ) { |
487 | // "this" required here in strict ISO C++ because store_with_release is a dependent name |
488 | return this->store_with_release(rhs); |
489 | } |
490 | atomic<T*>& operator=( const atomic<T*>& rhs ) { |
491 | this->store_with_release(rhs); return *this; |
492 | } |
493 | T* operator->() const { |
494 | return (*this); |
495 | } |
496 | }; |
497 | |
498 | //! Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->. |
499 | template<> struct atomic<void*>: internal::atomic_impl<void*> { |
500 | #if __TBB_ATOMIC_CTORS |
501 | atomic() = default ; |
502 | constexpr atomic(void* arg): internal::atomic_impl<void*>(arg) {} |
503 | #endif |
504 | void* operator=( void* rhs ) { |
505 | // "this" required here in strict ISO C++ because store_with_release is a dependent name |
506 | return this->store_with_release(rhs); |
507 | } |
508 | atomic<void*>& operator=( const atomic<void*>& rhs ) { |
509 | this->store_with_release(rhs); return *this; |
510 | } |
511 | }; |
512 | |
513 | // Helpers to workaround ugly syntax of calling template member function of a |
514 | // template class with template argument dependent on template parameters. |
515 | |
516 | template <memory_semantics M, typename T> |
517 | T load ( const atomic<T>& a ) { return a.template load<M>(); } |
518 | |
519 | template <memory_semantics M, typename T> |
520 | void store ( atomic<T>& a, T value ) { a.template store<M>(value); } |
521 | |
522 | namespace interface6{ |
523 | //! Make an atomic for use in an initialization (list), as an alternative to zero-initialization or normal assignment. |
524 | template<typename T> |
525 | atomic<T> make_atomic(T t) { |
526 | atomic<T> a; |
527 | store<relaxed>(a,t); |
528 | return a; |
529 | } |
530 | } |
531 | using interface6::make_atomic; |
532 | |
533 | namespace internal { |
534 | template<memory_semantics M, typename T > |
535 | void swap(atomic<T> & lhs, atomic<T> & rhs){ |
536 | T tmp = load<M>(lhs); |
537 | store<M>(lhs,load<M>(rhs)); |
538 | store<M>(rhs,tmp); |
539 | } |
540 | |
541 | // only to aid in the gradual conversion of ordinary variables to proper atomics |
542 | template<typename T> |
543 | inline atomic<T>& as_atomic( T& t ) { |
544 | return (atomic<T>&)t; |
545 | } |
546 | } // namespace tbb::internal |
547 | |
548 | } // namespace tbb |
549 | |
550 | #if _MSC_VER && !__INTEL_COMPILER |
551 | #pragma warning (pop) |
552 | #endif // warnings are restored |
553 | |
554 | #endif /* __TBB_atomic_H */ |
555 | |