1#ifndef AWS_COMMON_ATOMICS_H
2#define AWS_COMMON_ATOMICS_H
3
4#include <aws/common/common.h>
5
6/*
7 * Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
8 *
9 * Licensed under the Apache License, Version 2.0 (the "License").
10 * You may not use this file except in compliance with the License.
11 * A copy of the License is located at
12 *
13 * http://aws.amazon.com/apache2.0
14 *
15 * or in the "license" file accompanying this file. This file is distributed
16 * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
17 * express or implied. See the License for the specific language governing
18 * permissions and limitations under the License.
19 */
20
21/**
22 * struct aws_atomic_var represents an atomic variable - a value which can hold an integer or pointer
23 * that can be manipulated atomically. struct aws_atomic_vars should normally only be manipulated
24 * with atomics methods defined in this header.
25 */
26struct aws_atomic_var {
27 void *value;
28};
29/* Helpers for extracting the integer and pointer values from aws_atomic_var. */
30#define AWS_ATOMIC_VAR_PTRVAL(var) ((var)->value)
31#define AWS_ATOMIC_VAR_INTVAL(var) (*(aws_atomic_impl_int_t *)(var))
32
33/*
34 * This enumeration specifies the memory ordering properties requested for a particular
35 * atomic operation. The atomic operation may provide stricter ordering than requested.
36 * Note that, within a single thread, all operations are still sequenced (that is, a thread
37 * sees its own atomic writes and reads happening in program order, but other threads may
38 * disagree on this ordering).
39 *
40 * The behavior of these memory orderings are the same as in the C11 atomics API; however,
41 * we only implement a subset that can be portably implemented on the compilers we target.
42 */
43
44enum aws_memory_order {
45 /**
46 * No particular ordering constraints are guaranteed relative to other
47 * operations at all; we merely ensure that the operation itself is atomic.
48 */
49 aws_memory_order_relaxed = 0,
50 /* aws_memory_order_consume - not currently implemented */
51
52 /**
53 * Specifies acquire ordering. No reads or writes on the current thread can be
54 * reordered to happen before this operation. This is typically paired with a release
55 * ordering; any writes that happened on the releasing operation will be visible
56 * after the paired acquire operation.
57 *
58 * Acquire ordering is only meaningful on load or load-store operations.
59 */
60 aws_memory_order_acquire = 2, /* leave a spot for consume if we ever add it */
61
62 /**
63 * Specifies release order. No reads or writes can be reordered to come after this
64 * operation. Typically paired with an acquire operation.
65 *
66 * Release ordering is only meaningful on store or load-store operations.
67 */
68 aws_memory_order_release,
69
70 /**
71 * Specifies acquire-release order; if this operation acts as a load, it acts as an
72 * acquire operation; if it acts as a store, it acts as a release operation; if it's
73 * a load-store, it does both.
74 */
75 aws_memory_order_acq_rel,
76
77 /*
78 * Specifies sequentially consistent order. This behaves as acq_rel, but in addition,
79 * all seq_cst operations appear to occur in some globally consistent order.
80 *
81 * TODO: Figure out how to correctly implement this in MSVC. It appears that interlocked
82 * functions provide only acq_rel ordering.
83 */
84 aws_memory_order_seq_cst
85};
86
87/**
88 * Statically initializes an aws_atomic_var to a given size_t value.
89 */
90#define AWS_ATOMIC_INIT_INT(x) \
91 { .value = (void *)(uintptr_t)(x) }
92
93/**
94 * Statically initializes an aws_atomic_var to a given void * value.
95 */
96#define AWS_ATOMIC_INIT_PTR(x) \
97 { .value = (void *)(x) }
98
99AWS_EXTERN_C_BEGIN
100
101/*
102 * Note: We do not use the C11 atomics API; this is because we want to make sure the representation
103 * (and behavior) of atomic values is consistent, regardless of what --std= flag you pass to your compiler.
104 * Since C11 atomics can silently introduce locks, we run the risk of creating such ABI inconsistencies
105 * if we decide based on compiler features which atomics API to use, and in practice we expect to have
106 * either the GNU or MSVC atomics anyway.
107 *
108 * As future work, we could test to see if the C11 atomics API on this platform behaves consistently
109 * with the other APIs and use it if it does.
110 */
111
112/**
113 * Initializes an atomic variable with an integer value. This operation should be done before any
114 * other operations on this atomic variable, and must be done before attempting any parallel operations.
115 *
116 * This operation does not imply a barrier. Ensure that you use an acquire-release barrier (or stronger)
117 * when communicating the fact that initialization is complete to the other thread. Launching the thread
118 * implies a sufficiently strong barrier.
119 */
120AWS_STATIC_IMPL
121void aws_atomic_init_int(volatile struct aws_atomic_var *var, size_t n);
122
123/**
124 * Initializes an atomic variable with a pointer value. This operation should be done before any
125 * other operations on this atomic variable, and must be done before attempting any parallel operations.
126 *
127 * This operation does not imply a barrier. Ensure that you use an acquire-release barrier (or stronger)
128 * when communicating the fact that initialization is complete to the other thread. Launching the thread
129 * implies a sufficiently strong barrier.
130 */
131AWS_STATIC_IMPL
132void aws_atomic_init_ptr(volatile struct aws_atomic_var *var, void *p);
133
134/**
135 * Reads an atomic var as an integer, using the specified ordering, and returns the result.
136 */
137AWS_STATIC_IMPL
138size_t aws_atomic_load_int_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order);
139
140/**
141 * Reads an atomic var as an integer, using sequentially consistent ordering, and returns the result.
142 */
143AWS_STATIC_IMPL
144size_t aws_atomic_load_int(volatile const struct aws_atomic_var *var);
145/**
146 * Reads an atomic var as a pointer, using the specified ordering, and returns the result.
147 */
148AWS_STATIC_IMPL
149void *aws_atomic_load_ptr_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order);
150
151/**
152 * Reads an atomic var as a pointer, using sequentially consistent ordering, and returns the result.
153 */
154AWS_STATIC_IMPL
155void *aws_atomic_load_ptr(volatile const struct aws_atomic_var *var);
156
157/**
158 * Stores an integer into an atomic var, using the specified ordering.
159 */
160AWS_STATIC_IMPL
161void aws_atomic_store_int_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order memory_order);
162
163/**
164 * Stores an integer into an atomic var, using sequentially consistent ordering.
165 */
166AWS_STATIC_IMPL
167void aws_atomic_store_int(volatile struct aws_atomic_var *var, size_t n);
168
169/**
170 * Stores a pointer into an atomic var, using the specified ordering.
171 */
172AWS_STATIC_IMPL
173void aws_atomic_store_ptr_explicit(volatile struct aws_atomic_var *var, void *p, enum aws_memory_order memory_order);
174
175/**
176 * Stores a pointer into an atomic var, using sequentially consistent ordering.
177 */
178AWS_STATIC_IMPL
179void aws_atomic_store_ptr(volatile struct aws_atomic_var *var, void *p);
180
181/**
182 * Exchanges an integer with the value in an atomic_var, using the specified ordering.
183 * Returns the value that was previously in the atomic_var.
184 */
185AWS_STATIC_IMPL
186size_t aws_atomic_exchange_int_explicit(
187 volatile struct aws_atomic_var *var,
188 size_t n,
189 enum aws_memory_order memory_order);
190
191/**
192 * Exchanges an integer with the value in an atomic_var, using sequentially consistent ordering.
193 * Returns the value that was previously in the atomic_var.
194 */
195AWS_STATIC_IMPL
196size_t aws_atomic_exchange_int(volatile struct aws_atomic_var *var, size_t n);
197
198/**
199 * Exchanges a pointer with the value in an atomic_var, using the specified ordering.
200 * Returns the value that was previously in the atomic_var.
201 */
202AWS_STATIC_IMPL
203void *aws_atomic_exchange_ptr_explicit(
204 volatile struct aws_atomic_var *var,
205 void *p,
206 enum aws_memory_order memory_order);
207
208/**
209 * Exchanges an integer with the value in an atomic_var, using sequentially consistent ordering.
210 * Returns the value that was previously in the atomic_var.
211 */
212AWS_STATIC_IMPL
213void *aws_atomic_exchange_ptr(volatile struct aws_atomic_var *var, void *p);
214
215/**
216 * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
217 * to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure.
218 * order_failure must be no stronger than order_success, and must not be release or acq_rel.
219 * Returns true if the compare was successful and the variable updated to desired.
220 */
221AWS_STATIC_IMPL
222bool aws_atomic_compare_exchange_int_explicit(
223 volatile struct aws_atomic_var *var,
224 size_t *expected,
225 size_t desired,
226 enum aws_memory_order order_success,
227 enum aws_memory_order order_failure);
228
229/**
230 * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
231 * to the value in *var. Uses sequentially consistent memory ordering, regardless of success or failure.
232 * Returns true if the compare was successful and the variable updated to desired.
233 */
234AWS_STATIC_IMPL
235bool aws_atomic_compare_exchange_int(volatile struct aws_atomic_var *var, size_t *expected, size_t desired);
236
237/**
238 * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
239 * to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure.
240 * order_failure must be no stronger than order_success, and must not be release or acq_rel.
241 * Returns true if the compare was successful and the variable updated to desired.
242 */
243AWS_STATIC_IMPL
244bool aws_atomic_compare_exchange_ptr_explicit(
245 volatile struct aws_atomic_var *var,
246 void **expected,
247 void *desired,
248 enum aws_memory_order order_success,
249 enum aws_memory_order order_failure);
250
251/**
252 * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
253 * to the value in *var. Uses sequentially consistent memory ordering, regardless of success or failure.
254 * Returns true if the compare was successful and the variable updated to desired.
255 */
256AWS_STATIC_IMPL
257bool aws_atomic_compare_exchange_ptr(volatile struct aws_atomic_var *var, void **expected, void *desired);
258
259/**
260 * Atomically adds n to *var, and returns the previous value of *var.
261 */
262AWS_STATIC_IMPL
263size_t aws_atomic_fetch_add_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order);
264
265/**
266 * Atomically subtracts n from *var, and returns the previous value of *var.
267 */
268AWS_STATIC_IMPL
269size_t aws_atomic_fetch_sub_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order);
270
271/**
272 * Atomically ORs n with *var, and returns the previous value of *var.
273 */
274AWS_STATIC_IMPL
275size_t aws_atomic_fetch_or_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order);
276
277/**
278 * Atomically ANDs n with *var, and returns the previous value of *var.
279 */
280AWS_STATIC_IMPL
281size_t aws_atomic_fetch_and_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order);
282
283/**
284 * Atomically XORs n with *var, and returns the previous value of *var.
285 */
286AWS_STATIC_IMPL
287size_t aws_atomic_fetch_xor_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order);
288
289/**
290 * Atomically adds n to *var, and returns the previous value of *var.
291 * Uses sequentially consistent ordering.
292 */
293AWS_STATIC_IMPL
294size_t aws_atomic_fetch_add(volatile struct aws_atomic_var *var, size_t n);
295
296/**
297 * Atomically subtracts n from *var, and returns the previous value of *var.
298 * Uses sequentially consistent ordering.
299 */
300AWS_STATIC_IMPL
301size_t aws_atomic_fetch_sub(volatile struct aws_atomic_var *var, size_t n);
302
303/**
304 * Atomically ands n into *var, and returns the previous value of *var.
305 * Uses sequentially consistent ordering.
306 */
307AWS_STATIC_IMPL
308size_t aws_atomic_fetch_and(volatile struct aws_atomic_var *var, size_t n);
309
310/**
311 * Atomically ors n into *var, and returns the previous value of *var.
312 * Uses sequentially consistent ordering.
313 */
314AWS_STATIC_IMPL
315size_t aws_atomic_fetch_or(volatile struct aws_atomic_var *var, size_t n);
316
317/**
318 * Atomically xors n into *var, and returns the previous value of *var.
319 * Uses sequentially consistent ordering.
320 */
321AWS_STATIC_IMPL
322size_t aws_atomic_fetch_xor(volatile struct aws_atomic_var *var, size_t n);
323
324/**
325 * Provides the same reordering guarantees as an atomic operation with the specified memory order, without
326 * needing to actually perform an atomic operation.
327 */
328AWS_STATIC_IMPL
329void aws_atomic_thread_fence(enum aws_memory_order order);
330
331#ifndef AWS_NO_STATIC_IMPL
332# include <aws/common/atomics.inl>
333#endif /* AWS_NO_STATIC_IMPL */
334
335AWS_EXTERN_C_END
336
337#endif
338