1 | /* |
2 | Copyright (c) 2005-2019 Intel Corporation |
3 | |
4 | Licensed under the Apache License, Version 2.0 (the "License"); |
5 | you may not use this file except in compliance with the License. |
6 | You may obtain a copy of the License at |
7 | |
8 | http://www.apache.org/licenses/LICENSE-2.0 |
9 | |
10 | Unless required by applicable law or agreed to in writing, software |
11 | distributed under the License is distributed on an "AS IS" BASIS, |
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | See the License for the specific language governing permissions and |
14 | limitations under the License. |
15 | */ |
16 | |
17 | #ifndef __TBB_malloc_Synchronize_H_ |
18 | #define __TBB_malloc_Synchronize_H_ |
19 | |
20 | #include "tbb/tbb_machine.h" |
21 | |
22 | //! Stripped down version of spin_mutex. |
23 | /** Instances of MallocMutex must be declared in memory that is zero-initialized. |
24 | There are no constructors. This is a feature that lets it be |
25 | used in situations where the mutex might be used while file-scope constructors |
26 | are running. |
27 | |
28 | There are no methods "acquire" or "release". The scoped_lock must be used |
29 | in a strict block-scoped locking pattern. Omitting these methods permitted |
30 | further simplification. */ |
31 | class MallocMutex : tbb::internal::no_copy { |
32 | __TBB_atomic_flag flag; |
33 | |
34 | public: |
35 | class scoped_lock : tbb::internal::no_copy { |
36 | MallocMutex& mutex; |
37 | bool taken; |
38 | public: |
39 | scoped_lock( MallocMutex& m ) : mutex(m), taken(true) { __TBB_LockByte(m.flag); } |
40 | scoped_lock( MallocMutex& m, bool block, bool *locked ) : mutex(m), taken(false) { |
41 | if (block) { |
42 | __TBB_LockByte(m.flag); |
43 | taken = true; |
44 | } else { |
45 | taken = __TBB_TryLockByte(m.flag); |
46 | } |
47 | if (locked) *locked = taken; |
48 | } |
49 | ~scoped_lock() { |
50 | if (taken) __TBB_UnlockByte(mutex.flag); |
51 | } |
52 | }; |
53 | friend class scoped_lock; |
54 | }; |
55 | |
56 | // TODO: use signed/unsigned in atomics more consistently |
57 | inline intptr_t AtomicIncrement( volatile intptr_t& counter ) { |
58 | return __TBB_FetchAndAddW( &counter, 1 )+1; |
59 | } |
60 | |
61 | inline uintptr_t AtomicAdd( volatile intptr_t& counter, intptr_t value ) { |
62 | return __TBB_FetchAndAddW( &counter, value ); |
63 | } |
64 | |
65 | inline intptr_t AtomicCompareExchange( volatile intptr_t& location, intptr_t new_value, intptr_t comparand) { |
66 | return __TBB_CompareAndSwapW( &location, new_value, comparand ); |
67 | } |
68 | |
69 | inline uintptr_t AtomicFetchStore(volatile void* location, uintptr_t value) { |
70 | return __TBB_FetchAndStoreW(location, value); |
71 | } |
72 | |
73 | inline void AtomicOr(volatile void *operand, uintptr_t addend) { |
74 | __TBB_AtomicOR(operand, addend); |
75 | } |
76 | |
77 | inline void AtomicAnd(volatile void *operand, uintptr_t addend) { |
78 | __TBB_AtomicAND(operand, addend); |
79 | } |
80 | |
81 | inline intptr_t FencedLoad( const volatile intptr_t &location ) { |
82 | return __TBB_load_with_acquire(location); |
83 | } |
84 | |
85 | inline void FencedStore( volatile intptr_t &location, intptr_t value ) { |
86 | __TBB_store_with_release(location, value); |
87 | } |
88 | |
89 | inline void SpinWaitWhileEq(const volatile intptr_t &location, const intptr_t value) { |
90 | tbb::internal::spin_wait_while_eq(location, value); |
91 | } |
92 | |
93 | class AtomicBackoff { |
94 | tbb::internal::atomic_backoff backoff; |
95 | public: |
96 | AtomicBackoff() {} |
97 | void pause() { backoff.pause(); } |
98 | }; |
99 | |
100 | inline void SpinWaitUntilEq(const volatile intptr_t &location, const intptr_t value) { |
101 | tbb::internal::spin_wait_until_eq(location, value); |
102 | } |
103 | |
104 | #endif /* __TBB_malloc_Synchronize_H_ */ |
105 | |