1 | /*------------------------------------------------------------------------- |
2 | * |
3 | * generic.h |
4 | * Implement higher level operations based on some lower level atomic |
5 | * operations. |
6 | * |
7 | * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group |
8 | * Portions Copyright (c) 1994, Regents of the University of California |
9 | * |
10 | * src/include/port/atomics/generic.h |
11 | * |
12 | *------------------------------------------------------------------------- |
13 | */ |
14 | |
15 | /* intentionally no include guards, should only be included by atomics.h */ |
16 | #ifndef INSIDE_ATOMICS_H |
17 | # error "should be included via atomics.h" |
18 | #endif |
19 | |
20 | /* |
21 | * If read or write barriers are undefined, we upgrade them to full memory |
22 | * barriers. |
23 | */ |
24 | #if !defined(pg_read_barrier_impl) |
25 | # define pg_read_barrier_impl pg_memory_barrier_impl |
26 | #endif |
27 | #if !defined(pg_write_barrier_impl) |
28 | # define pg_write_barrier_impl pg_memory_barrier_impl |
29 | #endif |
30 | |
31 | #ifndef PG_HAVE_SPIN_DELAY |
32 | #define PG_HAVE_SPIN_DELAY |
33 | #define pg_spin_delay_impl() ((void)0) |
34 | #endif |
35 | |
36 | |
37 | /* provide fallback */ |
38 | #if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) && defined(PG_HAVE_ATOMIC_U32_SUPPORT) |
39 | #define PG_HAVE_ATOMIC_FLAG_SUPPORT |
40 | typedef pg_atomic_uint32 pg_atomic_flag; |
41 | #endif |
42 | |
43 | #ifndef PG_HAVE_ATOMIC_READ_U32 |
44 | #define PG_HAVE_ATOMIC_READ_U32 |
45 | static inline uint32 |
46 | pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr) |
47 | { |
48 | return ptr->value; |
49 | } |
50 | #endif |
51 | |
52 | #ifndef PG_HAVE_ATOMIC_WRITE_U32 |
53 | #define PG_HAVE_ATOMIC_WRITE_U32 |
54 | static inline void |
55 | pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val) |
56 | { |
57 | ptr->value = val; |
58 | } |
59 | #endif |
60 | |
61 | #ifndef PG_HAVE_ATOMIC_UNLOCKED_WRITE_U32 |
62 | #define PG_HAVE_ATOMIC_UNLOCKED_WRITE_U32 |
63 | static inline void |
64 | pg_atomic_unlocked_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val) |
65 | { |
66 | ptr->value = val; |
67 | } |
68 | #endif |
69 | |
70 | /* |
71 | * provide fallback for test_and_set using atomic_exchange if available |
72 | */ |
73 | #if !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) && defined(PG_HAVE_ATOMIC_EXCHANGE_U32) |
74 | |
75 | #define PG_HAVE_ATOMIC_INIT_FLAG |
76 | static inline void |
77 | pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr) |
78 | { |
79 | pg_atomic_write_u32_impl(ptr, 0); |
80 | } |
81 | |
82 | #define PG_HAVE_ATOMIC_TEST_SET_FLAG |
83 | static inline bool |
84 | pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr) |
85 | { |
86 | return pg_atomic_exchange_u32_impl(ptr, &value, 1) == 0; |
87 | } |
88 | |
89 | #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG |
90 | static inline bool |
91 | pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr) |
92 | { |
93 | return pg_atomic_read_u32_impl(ptr) == 0; |
94 | } |
95 | |
96 | |
97 | #define PG_HAVE_ATOMIC_CLEAR_FLAG |
98 | static inline void |
99 | pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr) |
100 | { |
101 | /* XXX: release semantics suffice? */ |
102 | pg_memory_barrier_impl(); |
103 | pg_atomic_write_u32_impl(ptr, 0); |
104 | } |
105 | |
106 | /* |
107 | * provide fallback for test_and_set using atomic_compare_exchange if |
108 | * available. |
109 | */ |
110 | #elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) |
111 | |
112 | #define PG_HAVE_ATOMIC_INIT_FLAG |
113 | static inline void |
114 | pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr) |
115 | { |
116 | pg_atomic_write_u32_impl(ptr, 0); |
117 | } |
118 | |
119 | #define PG_HAVE_ATOMIC_TEST_SET_FLAG |
120 | static inline bool |
121 | pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr) |
122 | { |
123 | uint32 value = 0; |
124 | return pg_atomic_compare_exchange_u32_impl(ptr, &value, 1); |
125 | } |
126 | |
127 | #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG |
128 | static inline bool |
129 | pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr) |
130 | { |
131 | return pg_atomic_read_u32_impl(ptr) == 0; |
132 | } |
133 | |
134 | #define PG_HAVE_ATOMIC_CLEAR_FLAG |
135 | static inline void |
136 | pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr) |
137 | { |
138 | /* |
139 | * Use a memory barrier + plain write if we have a native memory |
140 | * barrier. But don't do so if memory barriers use spinlocks - that'd lead |
141 | * to circularity if flags are used to implement spinlocks. |
142 | */ |
143 | #ifndef PG_HAVE_MEMORY_BARRIER_EMULATION |
144 | /* XXX: release semantics suffice? */ |
145 | pg_memory_barrier_impl(); |
146 | pg_atomic_write_u32_impl(ptr, 0); |
147 | #else |
148 | uint32 value = 1; |
149 | pg_atomic_compare_exchange_u32_impl(ptr, &value, 0); |
150 | #endif |
151 | } |
152 | |
153 | #elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) |
154 | # error "No pg_atomic_test_and_set provided" |
155 | #endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */ |
156 | |
157 | |
158 | #ifndef PG_HAVE_ATOMIC_INIT_U32 |
159 | #define PG_HAVE_ATOMIC_INIT_U32 |
160 | static inline void |
161 | pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_) |
162 | { |
163 | pg_atomic_write_u32_impl(ptr, val_); |
164 | } |
165 | #endif |
166 | |
167 | #if !defined(PG_HAVE_ATOMIC_EXCHANGE_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) |
168 | #define PG_HAVE_ATOMIC_EXCHANGE_U32 |
169 | static inline uint32 |
170 | pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_) |
171 | { |
172 | uint32 old; |
173 | old = ptr->value; /* ok if read is not atomic */ |
174 | while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, xchg_)) |
175 | /* skip */; |
176 | return old; |
177 | } |
178 | #endif |
179 | |
180 | #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) |
181 | #define PG_HAVE_ATOMIC_FETCH_ADD_U32 |
182 | static inline uint32 |
183 | pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) |
184 | { |
185 | uint32 old; |
186 | old = ptr->value; /* ok if read is not atomic */ |
187 | while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old + add_)) |
188 | /* skip */; |
189 | return old; |
190 | } |
191 | #endif |
192 | |
193 | #if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) |
194 | #define PG_HAVE_ATOMIC_FETCH_SUB_U32 |
195 | static inline uint32 |
196 | pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_) |
197 | { |
198 | return pg_atomic_fetch_add_u32_impl(ptr, -sub_); |
199 | } |
200 | #endif |
201 | |
202 | #if !defined(PG_HAVE_ATOMIC_FETCH_AND_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) |
203 | #define PG_HAVE_ATOMIC_FETCH_AND_U32 |
204 | static inline uint32 |
205 | pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_) |
206 | { |
207 | uint32 old; |
208 | old = ptr->value; /* ok if read is not atomic */ |
209 | while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old & and_)) |
210 | /* skip */; |
211 | return old; |
212 | } |
213 | #endif |
214 | |
215 | #if !defined(PG_HAVE_ATOMIC_FETCH_OR_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) |
216 | #define PG_HAVE_ATOMIC_FETCH_OR_U32 |
217 | static inline uint32 |
218 | pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_) |
219 | { |
220 | uint32 old; |
221 | old = ptr->value; /* ok if read is not atomic */ |
222 | while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old | or_)) |
223 | /* skip */; |
224 | return old; |
225 | } |
226 | #endif |
227 | |
228 | #if !defined(PG_HAVE_ATOMIC_ADD_FETCH_U32) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) |
229 | #define PG_HAVE_ATOMIC_ADD_FETCH_U32 |
230 | static inline uint32 |
231 | pg_atomic_add_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) |
232 | { |
233 | return pg_atomic_fetch_add_u32_impl(ptr, add_) + add_; |
234 | } |
235 | #endif |
236 | |
237 | #if !defined(PG_HAVE_ATOMIC_SUB_FETCH_U32) && defined(PG_HAVE_ATOMIC_FETCH_SUB_U32) |
238 | #define PG_HAVE_ATOMIC_SUB_FETCH_U32 |
239 | static inline uint32 |
240 | pg_atomic_sub_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_) |
241 | { |
242 | return pg_atomic_fetch_sub_u32_impl(ptr, sub_) - sub_; |
243 | } |
244 | #endif |
245 | |
246 | #if !defined(PG_HAVE_ATOMIC_EXCHANGE_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) |
247 | #define PG_HAVE_ATOMIC_EXCHANGE_U64 |
248 | static inline uint64 |
249 | pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_) |
250 | { |
251 | uint64 old; |
252 | old = ptr->value; /* ok if read is not atomic */ |
253 | while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, xchg_)) |
254 | /* skip */; |
255 | return old; |
256 | } |
257 | #endif |
258 | |
259 | #ifndef PG_HAVE_ATOMIC_WRITE_U64 |
260 | #define PG_HAVE_ATOMIC_WRITE_U64 |
261 | |
262 | #if defined(PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY) && \ |
263 | !defined(PG_HAVE_ATOMIC_U64_SIMULATION) |
264 | |
265 | static inline void |
266 | pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val) |
267 | { |
268 | /* |
269 | * On this platform aligned 64bit writes are guaranteed to be atomic, |
270 | * except if using the fallback implementation, where can't guarantee the |
271 | * required alignment. |
272 | */ |
273 | AssertPointerAlignment(ptr, 8); |
274 | ptr->value = val; |
275 | } |
276 | |
277 | #else |
278 | |
279 | static inline void |
280 | pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val) |
281 | { |
282 | /* |
283 | * 64 bit writes aren't safe on all platforms. In the generic |
284 | * implementation implement them as an atomic exchange. |
285 | */ |
286 | pg_atomic_exchange_u64_impl(ptr, val); |
287 | } |
288 | |
289 | #endif /* PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY && !PG_HAVE_ATOMIC_U64_SIMULATION */ |
290 | #endif /* PG_HAVE_ATOMIC_WRITE_U64 */ |
291 | |
292 | #ifndef PG_HAVE_ATOMIC_READ_U64 |
293 | #define PG_HAVE_ATOMIC_READ_U64 |
294 | |
295 | #if defined(PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY) && \ |
296 | !defined(PG_HAVE_ATOMIC_U64_SIMULATION) |
297 | |
298 | static inline uint64 |
299 | pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr) |
300 | { |
301 | /* |
302 | * On this platform aligned 64-bit reads are guaranteed to be atomic. |
303 | */ |
304 | AssertPointerAlignment(ptr, 8); |
305 | return ptr->value; |
306 | } |
307 | |
308 | #else |
309 | |
310 | static inline uint64 |
311 | pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr) |
312 | { |
313 | uint64 old = 0; |
314 | |
315 | /* |
316 | * 64-bit reads aren't atomic on all platforms. In the generic |
317 | * implementation implement them as a compare/exchange with 0. That'll |
318 | * fail or succeed, but always return the old value. Possibly might store |
319 | * a 0, but only if the previous value also was a 0 - i.e. harmless. |
320 | */ |
321 | pg_atomic_compare_exchange_u64_impl(ptr, &old, 0); |
322 | |
323 | return old; |
324 | } |
325 | #endif /* PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY && !PG_HAVE_ATOMIC_U64_SIMULATION */ |
326 | #endif /* PG_HAVE_ATOMIC_READ_U64 */ |
327 | |
328 | #ifndef PG_HAVE_ATOMIC_INIT_U64 |
329 | #define PG_HAVE_ATOMIC_INIT_U64 |
330 | static inline void |
331 | pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_) |
332 | { |
333 | pg_atomic_write_u64_impl(ptr, val_); |
334 | } |
335 | #endif |
336 | |
337 | #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) |
338 | #define PG_HAVE_ATOMIC_FETCH_ADD_U64 |
339 | static inline uint64 |
340 | pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) |
341 | { |
342 | uint64 old; |
343 | old = ptr->value; /* ok if read is not atomic */ |
344 | while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old + add_)) |
345 | /* skip */; |
346 | return old; |
347 | } |
348 | #endif |
349 | |
350 | #if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) |
351 | #define PG_HAVE_ATOMIC_FETCH_SUB_U64 |
352 | static inline uint64 |
353 | pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_) |
354 | { |
355 | return pg_atomic_fetch_add_u64_impl(ptr, -sub_); |
356 | } |
357 | #endif |
358 | |
359 | #if !defined(PG_HAVE_ATOMIC_FETCH_AND_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) |
360 | #define PG_HAVE_ATOMIC_FETCH_AND_U64 |
361 | static inline uint64 |
362 | pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_) |
363 | { |
364 | uint64 old; |
365 | old = ptr->value; /* ok if read is not atomic */ |
366 | while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old & and_)) |
367 | /* skip */; |
368 | return old; |
369 | } |
370 | #endif |
371 | |
372 | #if !defined(PG_HAVE_ATOMIC_FETCH_OR_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) |
373 | #define PG_HAVE_ATOMIC_FETCH_OR_U64 |
374 | static inline uint64 |
375 | pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_) |
376 | { |
377 | uint64 old; |
378 | old = ptr->value; /* ok if read is not atomic */ |
379 | while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old | or_)) |
380 | /* skip */; |
381 | return old; |
382 | } |
383 | #endif |
384 | |
385 | #if !defined(PG_HAVE_ATOMIC_ADD_FETCH_U64) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) |
386 | #define PG_HAVE_ATOMIC_ADD_FETCH_U64 |
387 | static inline uint64 |
388 | pg_atomic_add_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) |
389 | { |
390 | return pg_atomic_fetch_add_u64_impl(ptr, add_) + add_; |
391 | } |
392 | #endif |
393 | |
394 | #if !defined(PG_HAVE_ATOMIC_SUB_FETCH_U64) && defined(PG_HAVE_ATOMIC_FETCH_SUB_U64) |
395 | #define PG_HAVE_ATOMIC_SUB_FETCH_U64 |
396 | static inline uint64 |
397 | pg_atomic_sub_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_) |
398 | { |
399 | return pg_atomic_fetch_sub_u64_impl(ptr, sub_) - sub_; |
400 | } |
401 | #endif |
402 | |