| 1 | #ifndef MY_ATOMIC_INCLUDED |
| 2 | #define MY_ATOMIC_INCLUDED |
| 3 | |
| 4 | /* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. |
| 5 | |
| 6 | This program is free software; you can redistribute it and/or modify |
| 7 | it under the terms of the GNU General Public License as published by |
| 8 | the Free Software Foundation; version 2 of the License. |
| 9 | |
| 10 | This program is distributed in the hope that it will be useful, |
| 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | GNU General Public License for more details. |
| 14 | |
| 15 | You should have received a copy of the GNU General Public License |
| 16 | along with this program; if not, write to the Free Software |
| 17 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ |
| 18 | |
| 19 | /* |
| 20 | This header defines five atomic operations: |
| 21 | |
| 22 | my_atomic_add#(&var, what) |
| 23 | my_atomic_add#_explicit(&var, what, memory_order) |
| 24 | 'Fetch and Add' |
| 25 | add 'what' to *var, and return the old value of *var |
| 26 | All memory orders are valid. |
| 27 | |
| 28 | my_atomic_fas#(&var, what) |
| 29 | my_atomic_fas#_explicit(&var, what, memory_order) |
| 30 | 'Fetch And Store' |
| 31 | store 'what' in *var, and return the old value of *var |
| 32 | All memory orders are valid. |
| 33 | |
| 34 | my_atomic_cas#(&var, &old, new) |
| 35 | my_atomic_cas#_weak_explicit(&var, &old, new, succ, fail) |
| 36 | my_atomic_cas#_strong_explicit(&var, &old, new, succ, fail) |
| 37 | 'Compare And Swap' |
| 38 | if *var is equal to *old, then store 'new' in *var, and return TRUE |
| 39 | otherwise store *var in *old, and return FALSE |
| 40 | succ - the memory synchronization ordering for the read-modify-write |
| 41 | operation if the comparison succeeds. All memory orders are valid. |
| 42 | fail - the memory synchronization ordering for the load operation if the |
| 43 | comparison fails. Cannot be MY_MEMORY_ORDER_RELEASE or |
| 44 | MY_MEMORY_ORDER_ACQ_REL and cannot specify stronger ordering than succ. |
| 45 | |
| 46 | The weak form is allowed to fail spuriously, that is, act as if *var != *old |
| 47 | even if they are equal. When a compare-and-exchange is in a loop, the weak |
| 48 | version will yield better performance on some platforms. When a weak |
| 49 | compare-and-exchange would require a loop and a strong one would not, the |
| 50 | strong one is preferable. |
| 51 | |
| 52 | my_atomic_load#(&var) |
| 53 | my_atomic_load#_explicit(&var, memory_order) |
| 54 | return *var |
| 55 | Order must be one of MY_MEMORY_ORDER_RELAXED, MY_MEMORY_ORDER_CONSUME, |
| 56 | MY_MEMORY_ORDER_ACQUIRE, MY_MEMORY_ORDER_SEQ_CST. |
| 57 | |
| 58 | my_atomic_store#(&var, what) |
| 59 | my_atomic_store#_explicit(&var, what, memory_order) |
| 60 | store 'what' in *var |
| 61 | Order must be one of MY_MEMORY_ORDER_RELAXED, MY_MEMORY_ORDER_RELEASE, |
| 62 | MY_MEMORY_ORDER_SEQ_CST. |
| 63 | |
| 64 | '#' is substituted by a size suffix - 8, 16, 32, 64, or ptr |
| 65 | (e.g. my_atomic_add8, my_atomic_fas32, my_atomic_casptr). |
| 66 | |
| 67 | The first version orders memory accesses according to MY_MEMORY_ORDER_SEQ_CST, |
| 68 | the second version (with _explicit suffix) orders memory accesses according to |
| 69 | given memory order. |
| 70 | |
| 71 | memory_order specifies how non-atomic memory accesses are to be ordered around |
| 72 | an atomic operation: |
| 73 | |
| 74 | MY_MEMORY_ORDER_RELAXED - there are no constraints on reordering of memory |
| 75 | accesses around the atomic variable. |
| 76 | MY_MEMORY_ORDER_CONSUME - no reads in the current thread dependent on the |
| 77 | value currently loaded can be reordered before this |
| 78 | load. This ensures that writes to dependent |
| 79 | variables in other threads that release the same |
| 80 | atomic variable are visible in the current thread. |
| 81 | On most platforms, this affects compiler |
| 82 | optimization only. |
| 83 | MY_MEMORY_ORDER_ACQUIRE - no reads in the current thread can be reordered |
| 84 | before this load. This ensures that all writes in |
| 85 | other threads that release the same atomic variable |
| 86 | are visible in the current thread. |
| 87 | MY_MEMORY_ORDER_RELEASE - no writes in the current thread can be reordered |
| 88 | after this store. This ensures that all writes in |
| 89 | the current thread are visible in other threads that |
| 90 | acquire the same atomic variable. |
| 91 | MY_MEMORY_ORDER_ACQ_REL - no reads in the current thread can be reordered |
| 92 | before this load as well as no writes in the current |
| 93 | thread can be reordered after this store. The |
| 94 | operation is read-modify-write operation. It is |
| 95 | ensured that all writes in another threads that |
| 96 | release the same atomic variable are visible before |
| 97 | the modification and the modification is visible in |
| 98 | other threads that acquire the same atomic variable. |
| 99 | MY_MEMORY_ORDER_SEQ_CST - The operation has the same semantics as |
| 100 | acquire-release operation, and additionally has |
| 101 | sequentially-consistent operation ordering. |
| 102 | |
| 103 | We choose implementation as follows: on Windows using Visual C++ the native |
| 104 | implementation should be preferable. When using gcc we prefer the Solaris |
| 105 | implementation before the gcc because of stability preference, we choose gcc |
| 106 | builtins if available. |
| 107 | */ |
| 108 | |
| 109 | #if defined(_MSC_VER) |
| 110 | #include "atomic/generic-msvc.h" |
| 111 | #elif defined(HAVE_SOLARIS_ATOMIC) |
| 112 | #include "atomic/solaris.h" |
| 113 | #elif defined(HAVE_GCC_C11_ATOMICS) |
| 114 | #include "atomic/gcc_builtins.h" |
| 115 | #elif defined(HAVE_GCC_ATOMIC_BUILTINS) |
| 116 | #include "atomic/gcc_sync.h" |
| 117 | #endif |
| 118 | |
| 119 | #if SIZEOF_LONG == 4 |
| 120 | #define my_atomic_addlong(A,B) my_atomic_add32((int32*) (A), (B)) |
| 121 | #define my_atomic_loadlong(A) my_atomic_load32((int32*) (A)) |
| 122 | #define my_atomic_loadlong_explicit(A,O) my_atomic_load32_explicit((int32*) (A), (O)) |
| 123 | #define my_atomic_storelong(A,B) my_atomic_store32((int32*) (A), (B)) |
| 124 | #define my_atomic_faslong(A,B) my_atomic_fas32((int32*) (A), (B)) |
| 125 | #define my_atomic_caslong(A,B,C) my_atomic_cas32((int32*) (A), (int32*) (B), (C)) |
| 126 | #else |
| 127 | #define my_atomic_addlong(A,B) my_atomic_add64((int64*) (A), (B)) |
| 128 | #define my_atomic_loadlong(A) my_atomic_load64((int64*) (A)) |
| 129 | #define my_atomic_loadlong_explicit(A,O) my_atomic_load64_explicit((int64*) (A), (O)) |
| 130 | #define my_atomic_storelong(A,B) my_atomic_store64((int64*) (A), (B)) |
| 131 | #define my_atomic_faslong(A,B) my_atomic_fas64((int64*) (A), (B)) |
| 132 | #define my_atomic_caslong(A,B,C) my_atomic_cas64((int64*) (A), (int64*) (B), (C)) |
| 133 | #endif |
| 134 | |
| 135 | #ifndef MY_MEMORY_ORDER_SEQ_CST |
| 136 | #define MY_MEMORY_ORDER_RELAXED |
| 137 | #define MY_MEMORY_ORDER_CONSUME |
| 138 | #define MY_MEMORY_ORDER_ACQUIRE |
| 139 | #define MY_MEMORY_ORDER_RELEASE |
| 140 | #define MY_MEMORY_ORDER_ACQ_REL |
| 141 | #define MY_MEMORY_ORDER_SEQ_CST |
| 142 | |
| 143 | #define my_atomic_store32_explicit(P, D, O) my_atomic_store32((P), (D)) |
| 144 | #define my_atomic_store64_explicit(P, D, O) my_atomic_store64((P), (D)) |
| 145 | #define my_atomic_storeptr_explicit(P, D, O) my_atomic_storeptr((P), (D)) |
| 146 | |
| 147 | #define my_atomic_load32_explicit(P, O) my_atomic_load32((P)) |
| 148 | #define my_atomic_load64_explicit(P, O) my_atomic_load64((P)) |
| 149 | #define my_atomic_loadptr_explicit(P, O) my_atomic_loadptr((P)) |
| 150 | |
| 151 | #define my_atomic_fas32_explicit(P, D, O) my_atomic_fas32((P), (D)) |
| 152 | #define my_atomic_fas64_explicit(P, D, O) my_atomic_fas64((P), (D)) |
| 153 | #define my_atomic_fasptr_explicit(P, D, O) my_atomic_fasptr((P), (D)) |
| 154 | |
| 155 | #define my_atomic_add32_explicit(P, A, O) my_atomic_add32((P), (A)) |
| 156 | #define my_atomic_add64_explicit(P, A, O) my_atomic_add64((P), (A)) |
| 157 | #define my_atomic_addptr_explicit(P, A, O) my_atomic_addptr((P), (A)) |
| 158 | |
| 159 | #define my_atomic_cas32_weak_explicit(P, E, D, S, F) \ |
| 160 | my_atomic_cas32((P), (E), (D)) |
| 161 | #define my_atomic_cas64_weak_explicit(P, E, D, S, F) \ |
| 162 | my_atomic_cas64((P), (E), (D)) |
| 163 | #define my_atomic_casptr_weak_explicit(P, E, D, S, F) \ |
| 164 | my_atomic_casptr((P), (E), (D)) |
| 165 | |
| 166 | #define my_atomic_cas32_strong_explicit(P, E, D, S, F) \ |
| 167 | my_atomic_cas32((P), (E), (D)) |
| 168 | #define my_atomic_cas64_strong_explicit(P, E, D, S, F) \ |
| 169 | my_atomic_cas64((P), (E), (D)) |
| 170 | #define my_atomic_casptr_strong_explicit(P, E, D, S, F) \ |
| 171 | my_atomic_casptr((P), (E), (D)) |
| 172 | #endif |
| 173 | |
| 174 | #endif /* MY_ATOMIC_INCLUDED */ |
| 175 | |