1 | /* |
2 | Copyright 2005-2013 Intel Corporation. All Rights Reserved. |
3 | |
4 | This file is part of Threading Building Blocks. |
5 | |
6 | Threading Building Blocks is free software; you can redistribute it |
7 | and/or modify it under the terms of the GNU General Public License |
8 | version 2 as published by the Free Software Foundation. |
9 | |
10 | Threading Building Blocks is distributed in the hope that it will be |
11 | useful, but WITHOUT ANY WARRANTY; without even the implied warranty |
12 | of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | GNU General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU General Public License |
16 | along with Threading Building Blocks; if not, write to the Free Software |
17 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
18 | |
19 | As a special exception, you may use this file as part of a free software |
20 | library without restriction. Specifically, if other files instantiate |
21 | templates or use macros or inline functions from this file, or you compile |
22 | this file and link it with other files to produce an executable, this |
23 | file does not by itself cause the resulting executable to be covered by |
24 | the GNU General Public License. This exception does not however |
25 | invalidate any other reasons why the executable file might be covered by |
26 | the GNU General Public License. |
27 | */ |
28 | |
29 | #ifndef __TBB_atomic_H |
30 | #define __TBB_atomic_H |
31 | |
32 | #include <cstddef> |
33 | |
34 | #if _MSC_VER |
35 | #define __TBB_LONG_LONG __int64 |
36 | #else |
37 | #define __TBB_LONG_LONG long long |
38 | #endif /* _MSC_VER */ |
39 | |
40 | #include "tbb_machine.h" |
41 | |
42 | #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) |
43 | // Workaround for overzealous compiler warnings |
44 | #pragma warning (push) |
45 | #pragma warning (disable: 4244 4267 4512) |
46 | #endif |
47 | |
48 | namespace tbb { |
49 | |
50 | //! Specifies memory semantics. |
51 | enum memory_semantics { |
52 | //! Sequential consistency |
53 | full_fence, |
54 | //! Acquire |
55 | acquire, |
56 | //! Release |
57 | release, |
58 | //! No ordering |
59 | relaxed |
60 | }; |
61 | |
62 | //! @cond INTERNAL |
63 | namespace internal { |
64 | |
65 | #if __TBB_ATTRIBUTE_ALIGNED_PRESENT |
66 | #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f __attribute__ ((aligned(a))); |
67 | #elif __TBB_DECLSPEC_ALIGN_PRESENT |
68 | #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f; |
69 | #else |
70 | #error Do not know syntax for forcing alignment. |
71 | #endif |
72 | |
73 | template<size_t S> |
74 | struct atomic_rep; // Primary template declared, but never defined. |
75 | |
76 | template<> |
77 | struct atomic_rep<1> { // Specialization |
78 | typedef int8_t word; |
79 | }; |
80 | template<> |
81 | struct atomic_rep<2> { // Specialization |
82 | typedef int16_t word; |
83 | }; |
84 | template<> |
85 | struct atomic_rep<4> { // Specialization |
86 | #if _MSC_VER && !_WIN64 |
87 | // Work-around that avoids spurious /Wp64 warnings |
88 | typedef intptr_t word; |
89 | #else |
90 | typedef int32_t word; |
91 | #endif |
92 | }; |
93 | #if __TBB_64BIT_ATOMICS |
94 | template<> |
95 | struct atomic_rep<8> { // Specialization |
96 | typedef int64_t word; |
97 | }; |
98 | #endif |
99 | |
100 | template<typename value_type, size_t size> |
101 | struct aligned_storage; |
102 | |
103 | //the specializations are needed to please MSVC syntax of __declspec(align()) which accept _literal_ constants only |
104 | #if __TBB_ATOMIC_CTORS |
105 | #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \ |
106 | template<typename value_type> \ |
107 | struct aligned_storage<value_type,S> { \ |
108 | __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \ |
109 | aligned_storage() = default ; \ |
110 | constexpr aligned_storage(value_type value):my_value(value){} \ |
111 | }; \ |
112 | |
113 | #else |
114 | #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \ |
115 | template<typename value_type> \ |
116 | struct aligned_storage<value_type,S> { \ |
117 | __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \ |
118 | }; \ |
119 | |
120 | #endif |
121 | |
122 | template<typename value_type> |
123 | struct aligned_storage<value_type,1> { |
124 | value_type my_value; |
125 | #if __TBB_ATOMIC_CTORS |
126 | aligned_storage() = default ; |
127 | constexpr aligned_storage(value_type value):my_value(value){} |
128 | #endif |
129 | }; |
130 | |
131 | ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(2) |
132 | ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(4) |
133 | #if __TBB_64BIT_ATOMICS |
134 | ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(8) |
135 | #endif |
136 | |
137 | template<size_t Size, memory_semantics M> |
138 | struct atomic_traits; // Primary template declared, but not defined. |
139 | |
140 | #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M) \ |
141 | template<> struct atomic_traits<S,M> { \ |
142 | typedef atomic_rep<S>::word word; \ |
143 | inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \ |
144 | return __TBB_machine_cmpswp##S##M(location,new_value,comparand); \ |
145 | } \ |
146 | inline static word fetch_and_add( volatile void* location, word addend ) { \ |
147 | return __TBB_machine_fetchadd##S##M(location,addend); \ |
148 | } \ |
149 | inline static word fetch_and_store( volatile void* location, word value ) { \ |
150 | return __TBB_machine_fetchstore##S##M(location,value); \ |
151 | } \ |
152 | }; |
153 | |
154 | #define __TBB_DECL_ATOMIC_PRIMITIVES(S) \ |
155 | template<memory_semantics M> \ |
156 | struct atomic_traits<S,M> { \ |
157 | typedef atomic_rep<S>::word word; \ |
158 | inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \ |
159 | return __TBB_machine_cmpswp##S(location,new_value,comparand); \ |
160 | } \ |
161 | inline static word fetch_and_add( volatile void* location, word addend ) { \ |
162 | return __TBB_machine_fetchadd##S(location,addend); \ |
163 | } \ |
164 | inline static word fetch_and_store( volatile void* location, word value ) { \ |
165 | return __TBB_machine_fetchstore##S(location,value); \ |
166 | } \ |
167 | }; |
168 | |
169 | template<memory_semantics M> |
170 | struct atomic_load_store_traits; // Primary template declaration |
171 | |
172 | #define __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(M) \ |
173 | template<> struct atomic_load_store_traits<M> { \ |
174 | template <typename T> \ |
175 | inline static T load( const volatile T& location ) { \ |
176 | return __TBB_load_##M( location ); \ |
177 | } \ |
178 | template <typename T> \ |
179 | inline static void store( volatile T& location, T value ) { \ |
180 | __TBB_store_##M( location, value ); \ |
181 | } \ |
182 | } |
183 | |
184 | #if __TBB_USE_FENCED_ATOMICS |
185 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,full_fence) |
186 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,full_fence) |
187 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,full_fence) |
188 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire) |
189 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire) |
190 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire) |
191 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release) |
192 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release) |
193 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release) |
194 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,relaxed) |
195 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,relaxed) |
196 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,relaxed) |
197 | #if __TBB_64BIT_ATOMICS |
198 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,full_fence) |
199 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire) |
200 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release) |
201 | __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,relaxed) |
202 | #endif |
203 | #else /* !__TBB_USE_FENCED_ATOMICS */ |
204 | __TBB_DECL_ATOMIC_PRIMITIVES(1) |
205 | __TBB_DECL_ATOMIC_PRIMITIVES(2) |
206 | __TBB_DECL_ATOMIC_PRIMITIVES(4) |
207 | #if __TBB_64BIT_ATOMICS |
208 | __TBB_DECL_ATOMIC_PRIMITIVES(8) |
209 | #endif |
210 | #endif /* !__TBB_USE_FENCED_ATOMICS */ |
211 | |
212 | __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(full_fence); |
213 | __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(acquire); |
214 | __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(release); |
215 | __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(relaxed); |
216 | |
217 | //! Additive inverse of 1 for type T. |
218 | /** Various compilers issue various warnings if -1 is used with various integer types. |
219 | The baroque expression below avoids all the warnings (we hope). */ |
220 | #define __TBB_MINUS_ONE(T) (T(T(0)-T(1))) |
221 | |
222 | //! Base class that provides basic functionality for atomic<T> without fetch_and_add. |
223 | /** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor, |
224 | and can be copied/compared by memcpy/memcmp. */ |
225 | template<typename T> |
226 | struct atomic_impl { |
227 | protected: |
228 | aligned_storage<T,sizeof(T)> my_storage; |
229 | private: |
230 | //TODO: rechecks on recent versions of gcc if union is still the _only_ way to do a conversion without warnings |
231 | //! Union type used to convert type T to underlying integral type. |
232 | template<typename value_type> |
233 | union converter { |
234 | typedef typename atomic_rep<sizeof(value_type)>::word bits_type; |
235 | converter(){} |
236 | converter(value_type a_value) : value(a_value) {} |
237 | value_type value; |
238 | bits_type bits; |
239 | }; |
240 | |
241 | template<typename value_t> |
242 | static typename converter<value_t>::bits_type to_bits(value_t value){ |
243 | return converter<value_t>(value).bits; |
244 | } |
245 | template<typename value_t> |
246 | static value_t to_value(typename converter<value_t>::bits_type bits){ |
247 | converter<value_t> u; |
248 | u.bits = bits; |
249 | return u.value; |
250 | } |
251 | |
252 | template<typename value_t> |
253 | union ptr_converter; //Primary template declared, but never defined. |
254 | |
255 | template<typename value_t> |
256 | union ptr_converter<value_t *> { |
257 | ptr_converter(){} |
258 | ptr_converter(value_t* a_value) : value(a_value) {} |
259 | value_t* value; |
260 | uintptr_t bits; |
261 | }; |
262 | //TODO: check if making to_bits accepting reference (thus unifying it with to_bits_ref) |
263 | //does not hurt performance |
264 | template<typename value_t> |
265 | static typename converter<value_t>::bits_type & to_bits_ref(value_t& value){ |
266 | //TODO: this #ifdef is temporary workaround, as union conversion seems to fail |
267 | //on suncc for 64 bit types for 32 bit target |
268 | #if !__SUNPRO_CC |
269 | return *(typename converter<value_t>::bits_type*)ptr_converter<value_t*>(&value).bits; |
270 | #else |
271 | return *(typename converter<value_t>::bits_type*)(&value); |
272 | #endif |
273 | } |
274 | |
275 | |
276 | public: |
277 | typedef T value_type; |
278 | |
279 | #if __TBB_ATOMIC_CTORS |
280 | atomic_impl() = default ; |
281 | constexpr atomic_impl(value_type value):my_storage(value){} |
282 | #endif |
283 | template<memory_semantics M> |
284 | value_type fetch_and_store( value_type value ) { |
285 | return to_value<value_type>( |
286 | internal::atomic_traits<sizeof(value_type),M>::fetch_and_store( &my_storage.my_value, to_bits(value) ) |
287 | ); |
288 | } |
289 | |
290 | value_type fetch_and_store( value_type value ) { |
291 | return fetch_and_store<full_fence>(value); |
292 | } |
293 | |
294 | template<memory_semantics M> |
295 | value_type compare_and_swap( value_type value, value_type comparand ) { |
296 | return to_value<value_type>( |
297 | internal::atomic_traits<sizeof(value_type),M>::compare_and_swap( &my_storage.my_value, to_bits(value), to_bits(comparand) ) |
298 | ); |
299 | } |
300 | |
301 | value_type compare_and_swap( value_type value, value_type comparand ) { |
302 | return compare_and_swap<full_fence>(value,comparand); |
303 | } |
304 | |
305 | operator value_type() const volatile { // volatile qualifier here for backwards compatibility |
306 | return to_value<value_type>( |
307 | __TBB_load_with_acquire( to_bits_ref(my_storage.my_value) ) |
308 | ); |
309 | } |
310 | |
311 | template<memory_semantics M> |
312 | value_type load () const { |
313 | return to_value<value_type>( |
314 | internal::atomic_load_store_traits<M>::load( to_bits_ref(my_storage.my_value) ) |
315 | ); |
316 | } |
317 | |
318 | value_type load () const { |
319 | return load<acquire>(); |
320 | } |
321 | |
322 | template<memory_semantics M> |
323 | void store ( value_type value ) { |
324 | internal::atomic_load_store_traits<M>::store( to_bits_ref(my_storage.my_value), to_bits(value)); |
325 | } |
326 | |
327 | void store ( value_type value ) { |
328 | store<release>( value ); |
329 | } |
330 | |
331 | protected: |
332 | value_type store_with_release( value_type rhs ) { |
333 | //TODO: unify with store<release> |
334 | __TBB_store_with_release( to_bits_ref(my_storage.my_value), to_bits(rhs) ); |
335 | return rhs; |
336 | } |
337 | }; |
338 | |
339 | //! Base class that provides basic functionality for atomic<T> with fetch_and_add. |
340 | /** I is the underlying type. |
341 | D is the difference type. |
342 | StepType should be char if I is an integral type, and T if I is a T*. */ |
343 | template<typename I, typename D, typename StepType> |
344 | struct atomic_impl_with_arithmetic: atomic_impl<I> { |
345 | public: |
346 | typedef I value_type; |
347 | #if __TBB_ATOMIC_CTORS |
348 | atomic_impl_with_arithmetic() = default ; |
349 | constexpr atomic_impl_with_arithmetic(value_type value): atomic_impl<I>(value){} |
350 | #endif |
351 | template<memory_semantics M> |
352 | value_type fetch_and_add( D addend ) { |
353 | return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->my_storage.my_value, addend*sizeof(StepType) )); |
354 | } |
355 | |
356 | value_type fetch_and_add( D addend ) { |
357 | return fetch_and_add<full_fence>(addend); |
358 | } |
359 | |
360 | template<memory_semantics M> |
361 | value_type fetch_and_increment() { |
362 | return fetch_and_add<M>(1); |
363 | } |
364 | |
365 | value_type fetch_and_increment() { |
366 | return fetch_and_add(1); |
367 | } |
368 | |
369 | template<memory_semantics M> |
370 | value_type fetch_and_decrement() { |
371 | return fetch_and_add<M>(__TBB_MINUS_ONE(D)); |
372 | } |
373 | |
374 | value_type fetch_and_decrement() { |
375 | return fetch_and_add(__TBB_MINUS_ONE(D)); |
376 | } |
377 | |
378 | public: |
379 | value_type operator+=( D value ) { |
380 | return fetch_and_add(value)+value; |
381 | } |
382 | |
383 | value_type operator-=( D value ) { |
384 | // Additive inverse of value computed using binary minus, |
385 | // instead of unary minus, for sake of avoiding compiler warnings. |
386 | return operator+=(D(0)-value); |
387 | } |
388 | |
389 | value_type operator++() { |
390 | return fetch_and_add(1)+1; |
391 | } |
392 | |
393 | value_type operator--() { |
394 | return fetch_and_add(__TBB_MINUS_ONE(D))-1; |
395 | } |
396 | |
397 | value_type operator++(int) { |
398 | return fetch_and_add(1); |
399 | } |
400 | |
401 | value_type operator--(int) { |
402 | return fetch_and_add(__TBB_MINUS_ONE(D)); |
403 | } |
404 | }; |
405 | |
406 | } /* Internal */ |
407 | //! @endcond |
408 | |
409 | //! Primary template for atomic. |
410 | /** See the Reference for details. |
411 | @ingroup synchronization */ |
412 | template<typename T> |
413 | struct atomic: internal::atomic_impl<T> { |
414 | #if __TBB_ATOMIC_CTORS |
415 | atomic() = default; |
416 | constexpr atomic(T arg): internal::atomic_impl<T>(arg) {} |
417 | #endif |
418 | T operator=( T rhs ) { |
419 | // "this" required here in strict ISO C++ because store_with_release is a dependent name |
420 | return this->store_with_release(rhs); |
421 | } |
422 | atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;} |
423 | }; |
424 | |
425 | #if __TBB_ATOMIC_CTORS |
426 | #define __TBB_DECL_ATOMIC(T) \ |
427 | template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \ |
428 | atomic() = default; \ |
429 | constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T,char>(arg) {} \ |
430 | \ |
431 | T operator=( T rhs ) {return store_with_release(rhs);} \ |
432 | atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \ |
433 | }; |
434 | #else |
435 | #define __TBB_DECL_ATOMIC(T) \ |
436 | template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \ |
437 | T operator=( T rhs ) {return store_with_release(rhs);} \ |
438 | atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \ |
439 | }; |
440 | #endif |
441 | |
442 | #if __TBB_64BIT_ATOMICS |
443 | //TODO: consider adding non-default (and atomic) copy constructor for 32bit platform |
444 | __TBB_DECL_ATOMIC(__TBB_LONG_LONG) |
445 | __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG) |
446 | #else |
447 | // test_atomic will verify that sizeof(long long)==8 |
448 | #endif |
449 | __TBB_DECL_ATOMIC(long) |
450 | __TBB_DECL_ATOMIC(unsigned long) |
451 | |
452 | #if _MSC_VER && !_WIN64 |
453 | #if __TBB_ATOMIC_CTORS |
454 | /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. |
455 | It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) |
456 | with an operator=(U) that explicitly converts the U to a T. Types T and U should be |
457 | type synonyms on the platform. Type U should be the wider variant of T from the |
458 | perspective of /Wp64. */ |
459 | #define __TBB_DECL_ATOMIC_ALT(T,U) \ |
460 | template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \ |
461 | atomic() = default ; \ |
462 | constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T,char>(arg) {} \ |
463 | T operator=( U rhs ) {return store_with_release(T(rhs));} \ |
464 | atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \ |
465 | }; |
466 | #else |
467 | #define __TBB_DECL_ATOMIC_ALT(T,U) \ |
468 | template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> { \ |
469 | T operator=( U rhs ) {return store_with_release(T(rhs));} \ |
470 | atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;} \ |
471 | }; |
472 | #endif |
473 | __TBB_DECL_ATOMIC_ALT(unsigned,size_t) |
474 | __TBB_DECL_ATOMIC_ALT(int,ptrdiff_t) |
475 | #else |
476 | __TBB_DECL_ATOMIC(unsigned) |
477 | __TBB_DECL_ATOMIC(int) |
478 | #endif /* _MSC_VER && !_WIN64 */ |
479 | |
480 | __TBB_DECL_ATOMIC(unsigned short) |
481 | __TBB_DECL_ATOMIC(short) |
482 | __TBB_DECL_ATOMIC(char) |
483 | __TBB_DECL_ATOMIC(signed char) |
484 | __TBB_DECL_ATOMIC(unsigned char) |
485 | |
486 | #if !_MSC_VER || defined(_NATIVE_WCHAR_T_DEFINED) |
487 | __TBB_DECL_ATOMIC(wchar_t) |
488 | #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */ |
489 | |
490 | //! Specialization for atomic<T*> with arithmetic and operator->. |
491 | template<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> { |
492 | #if __TBB_ATOMIC_CTORS |
493 | atomic() = default ; |
494 | constexpr atomic(T* arg): internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T>(arg) {} |
495 | #endif |
496 | T* operator=( T* rhs ) { |
497 | // "this" required here in strict ISO C++ because store_with_release is a dependent name |
498 | return this->store_with_release(rhs); |
499 | } |
500 | atomic<T*>& operator=( const atomic<T*>& rhs ) { |
501 | this->store_with_release(rhs); return *this; |
502 | } |
503 | T* operator->() const { |
504 | return (*this); |
505 | } |
506 | }; |
507 | |
508 | //! Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->. |
509 | template<> struct atomic<void*>: internal::atomic_impl<void*> { |
510 | #if __TBB_ATOMIC_CTORS |
511 | atomic() = default ; |
512 | constexpr atomic(void* arg): internal::atomic_impl<void*>(arg) {} |
513 | #endif |
514 | void* operator=( void* rhs ) { |
515 | // "this" required here in strict ISO C++ because store_with_release is a dependent name |
516 | return this->store_with_release(rhs); |
517 | } |
518 | atomic<void*>& operator=( const atomic<void*>& rhs ) { |
519 | this->store_with_release(rhs); return *this; |
520 | } |
521 | }; |
522 | |
523 | // Helpers to workaround ugly syntax of calling template member function of a |
524 | // template class with template argument dependent on template parameters. |
525 | |
526 | template <memory_semantics M, typename T> |
527 | T load ( const atomic<T>& a ) { return a.template load<M>(); } |
528 | |
529 | template <memory_semantics M, typename T> |
530 | void store ( atomic<T>& a, T value ) { return a.template store<M>(value); } |
531 | |
532 | namespace interface6{ |
533 | //! Make an atomic for use in an initialization (list), as an alternative to zero-initializaton or normal assignment. |
534 | template<typename T> |
535 | atomic<T> make_atomic(T t) { |
536 | atomic<T> a; |
537 | store<relaxed>(a,t); |
538 | return a; |
539 | } |
540 | } |
541 | using interface6::make_atomic; |
542 | |
543 | namespace internal { |
544 | |
545 | // only to aid in the gradual conversion of ordinary variables to proper atomics |
546 | template<typename T> |
547 | inline atomic<T>& as_atomic( T& t ) { |
548 | return (atomic<T>&)t; |
549 | } |
550 | } // namespace tbb::internal |
551 | |
552 | } // namespace tbb |
553 | |
554 | #if _MSC_VER && !__INTEL_COMPILER |
555 | #pragma warning (pop) |
556 | #endif // warnings 4244, 4267 are back |
557 | |
558 | #endif /* __TBB_atomic_H */ |
559 | |