| 1 | /* |
| 2 | * librdkafka - The Apache Kafka C/C++ library |
| 3 | * |
| 4 | * Copyright (c) 2014-2016 Magnus Edenhill |
| 5 | * All rights reserved. |
| 6 | * |
| 7 | * Redistribution and use in source and binary forms, with or without |
| 8 | * modification, are permitted provided that the following conditions are met: |
| 9 | * |
| 10 | * 1. Redistributions of source code must retain the above copyright notice, |
| 11 | * this list of conditions and the following disclaimer. |
| 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, |
| 13 | * this list of conditions and the following disclaimer in the documentation |
| 14 | * and/or other materials provided with the distribution. |
| 15 | * |
| 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
| 20 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 26 | * POSSIBILITY OF SUCH DAMAGE. |
| 27 | */ |
| 28 | #ifndef _RDATOMIC_H_ |
| 29 | #define _RDATOMIC_H_ |
| 30 | |
| 31 | #include "tinycthread.h" |
| 32 | |
| 33 | typedef struct { |
| 34 | int32_t val; |
| 35 | #if !HAVE_ATOMICS_32 |
| 36 | mtx_t lock; |
| 37 | #endif |
| 38 | } rd_atomic32_t; |
| 39 | |
| 40 | typedef struct { |
| 41 | int64_t val; |
| 42 | #if !HAVE_ATOMICS_64 |
| 43 | mtx_t lock; |
| 44 | #endif |
| 45 | } rd_atomic64_t; |
| 46 | |
| 47 | |
| 48 | static RD_INLINE RD_UNUSED void rd_atomic32_init (rd_atomic32_t *ra, int32_t v) { |
| 49 | ra->val = v; |
| 50 | #if !defined(_MSC_VER) && !HAVE_ATOMICS_32 |
| 51 | mtx_init(&ra->lock, mtx_plain); |
| 52 | #endif |
| 53 | } |
| 54 | |
| 55 | |
| 56 | static RD_INLINE int32_t RD_UNUSED rd_atomic32_add (rd_atomic32_t *ra, int32_t v) { |
| 57 | #ifdef __SUNPRO_C |
| 58 | return atomic_add_32_nv(&ra->val, v); |
| 59 | #elif defined(_MSC_VER) |
| 60 | return InterlockedAdd(&ra->val, v); |
| 61 | #elif !HAVE_ATOMICS_32 |
| 62 | int32_t r; |
| 63 | mtx_lock(&ra->lock); |
| 64 | ra->val += v; |
| 65 | r = ra->val; |
| 66 | mtx_unlock(&ra->lock); |
| 67 | return r; |
| 68 | #else |
| 69 | return ATOMIC_OP32(add, fetch, &ra->val, v); |
| 70 | #endif |
| 71 | } |
| 72 | |
| 73 | static RD_INLINE int32_t RD_UNUSED rd_atomic32_sub(rd_atomic32_t *ra, int32_t v) { |
| 74 | #ifdef __SUNPRO_C |
| 75 | return atomic_add_32_nv(&ra->val, -v); |
| 76 | #elif defined(_MSC_VER) |
| 77 | return InterlockedAdd(&ra->val, -v); |
| 78 | #elif !HAVE_ATOMICS_32 |
| 79 | int32_t r; |
| 80 | mtx_lock(&ra->lock); |
| 81 | ra->val -= v; |
| 82 | r = ra->val; |
| 83 | mtx_unlock(&ra->lock); |
| 84 | return r; |
| 85 | #else |
| 86 | return ATOMIC_OP32(sub, fetch, &ra->val, v); |
| 87 | #endif |
| 88 | } |
| 89 | |
| 90 | static RD_INLINE int32_t RD_UNUSED rd_atomic32_get(rd_atomic32_t *ra) { |
| 91 | #if defined(_MSC_VER) || defined(__SUNPRO_C) |
| 92 | return ra->val; |
| 93 | #elif !HAVE_ATOMICS_32 |
| 94 | int32_t r; |
| 95 | mtx_lock(&ra->lock); |
| 96 | r = ra->val; |
| 97 | mtx_unlock(&ra->lock); |
| 98 | return r; |
| 99 | #else |
| 100 | return ATOMIC_OP32(fetch, add, &ra->val, 0); |
| 101 | #endif |
| 102 | } |
| 103 | |
| 104 | static RD_INLINE int32_t RD_UNUSED rd_atomic32_set(rd_atomic32_t *ra, int32_t v) { |
| 105 | #ifdef _MSC_VER |
| 106 | return InterlockedExchange(&ra->val, v); |
| 107 | #elif !HAVE_ATOMICS_32 |
| 108 | int32_t r; |
| 109 | mtx_lock(&ra->lock); |
| 110 | r = ra->val = v; |
| 111 | mtx_unlock(&ra->lock); |
| 112 | return r; |
| 113 | #elif HAVE_ATOMICS_32_ATOMIC |
| 114 | __atomic_store_n(&ra->val, v, __ATOMIC_SEQ_CST); |
| 115 | return v; |
| 116 | #elif HAVE_ATOMICS_32_SYNC |
| 117 | (void)__sync_lock_test_and_set(&ra->val, v); |
| 118 | return v; |
| 119 | #else |
| 120 | return ra->val = v; // FIXME |
| 121 | #endif |
| 122 | } |
| 123 | |
| 124 | |
| 125 | |
| 126 | static RD_INLINE RD_UNUSED void rd_atomic64_init (rd_atomic64_t *ra, int64_t v) { |
| 127 | ra->val = v; |
| 128 | #if !defined(_MSC_VER) && !HAVE_ATOMICS_64 |
| 129 | mtx_init(&ra->lock, mtx_plain); |
| 130 | #endif |
| 131 | } |
| 132 | |
| 133 | static RD_INLINE int64_t RD_UNUSED rd_atomic64_add (rd_atomic64_t *ra, int64_t v) { |
| 134 | #ifdef __SUNPRO_C |
| 135 | return atomic_add_64_nv(&ra->val, v); |
| 136 | #elif defined(_MSC_VER) |
| 137 | return InterlockedAdd64(&ra->val, v); |
| 138 | #elif !HAVE_ATOMICS_64 |
| 139 | int64_t r; |
| 140 | mtx_lock(&ra->lock); |
| 141 | ra->val += v; |
| 142 | r = ra->val; |
| 143 | mtx_unlock(&ra->lock); |
| 144 | return r; |
| 145 | #else |
| 146 | return ATOMIC_OP64(add, fetch, &ra->val, v); |
| 147 | #endif |
| 148 | } |
| 149 | |
| 150 | static RD_INLINE int64_t RD_UNUSED rd_atomic64_sub(rd_atomic64_t *ra, int64_t v) { |
| 151 | #ifdef __SUNPRO_C |
| 152 | return atomic_add_64_nv(&ra->val, -v); |
| 153 | #elif defined(_MSC_VER) |
| 154 | return InterlockedAdd64(&ra->val, -v); |
| 155 | #elif !HAVE_ATOMICS_64 |
| 156 | int64_t r; |
| 157 | mtx_lock(&ra->lock); |
| 158 | ra->val -= v; |
| 159 | r = ra->val; |
| 160 | mtx_unlock(&ra->lock); |
| 161 | return r; |
| 162 | #else |
| 163 | return ATOMIC_OP64(sub, fetch, &ra->val, v); |
| 164 | #endif |
| 165 | } |
| 166 | |
| 167 | static RD_INLINE int64_t RD_UNUSED rd_atomic64_get(rd_atomic64_t *ra) { |
| 168 | #if defined(_MSC_VER) || defined(__SUNPRO_C) |
| 169 | return ra->val; |
| 170 | #elif !HAVE_ATOMICS_64 |
| 171 | int64_t r; |
| 172 | mtx_lock(&ra->lock); |
| 173 | r = ra->val; |
| 174 | mtx_unlock(&ra->lock); |
| 175 | return r; |
| 176 | #else |
| 177 | return ATOMIC_OP64(fetch, add, &ra->val, 0); |
| 178 | #endif |
| 179 | } |
| 180 | |
| 181 | |
| 182 | static RD_INLINE int64_t RD_UNUSED rd_atomic64_set(rd_atomic64_t *ra, int64_t v) { |
| 183 | #ifdef _MSC_VER |
| 184 | return InterlockedExchange64(&ra->val, v); |
| 185 | #elif !HAVE_ATOMICS_64 |
| 186 | int64_t r; |
| 187 | mtx_lock(&ra->lock); |
| 188 | ra->val = v; |
| 189 | r = ra->val; |
| 190 | mtx_unlock(&ra->lock); |
| 191 | return r; |
| 192 | #elif HAVE_ATOMICS_64_ATOMIC |
| 193 | __atomic_store_n(&ra->val, v, __ATOMIC_SEQ_CST); |
| 194 | return v; |
| 195 | #elif HAVE_ATOMICS_64_SYNC |
| 196 | (void)__sync_lock_test_and_set(&ra->val, v); |
| 197 | return v; |
| 198 | #else |
| 199 | return ra->val = v; // FIXME |
| 200 | #endif |
| 201 | } |
| 202 | |
| 203 | #endif /* _RDATOMIC_H_ */ |
| 204 | |