| 1 | /*------------------------------------------------------------------------- |
| 2 | * |
| 3 | * fallback.h |
| 4 | * Fallback for platforms without spinlock and/or atomics support. Slower |
| 5 | * than native atomics support, but not unusably slow. |
| 6 | * |
| 7 | * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group |
| 8 | * Portions Copyright (c) 1994, Regents of the University of California |
| 9 | * |
| 10 | * src/include/port/atomics/fallback.h |
| 11 | * |
| 12 | *------------------------------------------------------------------------- |
| 13 | */ |
| 14 | |
| 15 | /* intentionally no include guards, should only be included by atomics.h */ |
| 16 | #ifndef INSIDE_ATOMICS_H |
| 17 | # error "should be included via atomics.h" |
| 18 | #endif |
| 19 | |
| 20 | #ifndef pg_memory_barrier_impl |
| 21 | /* |
| 22 | * If we have no memory barrier implementation for this architecture, we |
| 23 | * fall back to acquiring and releasing a spinlock. This might, in turn, |
| 24 | * fall back to the semaphore-based spinlock implementation, which will be |
| 25 | * amazingly slow. |
| 26 | * |
| 27 | * It's not self-evident that every possible legal implementation of a |
| 28 | * spinlock acquire-and-release would be equivalent to a full memory barrier. |
| 29 | * For example, I'm not sure that Itanium's acq and rel add up to a full |
| 30 | * fence. But all of our actual implementations seem OK in this regard. |
| 31 | */ |
| 32 | #define PG_HAVE_MEMORY_BARRIER_EMULATION |
| 33 | |
| 34 | extern void pg_spinlock_barrier(void); |
| 35 | #define pg_memory_barrier_impl pg_spinlock_barrier |
| 36 | #endif |
| 37 | |
| 38 | #ifndef pg_compiler_barrier_impl |
| 39 | /* |
| 40 | * If the compiler/arch combination does not provide compiler barriers, |
| 41 | * provide a fallback. The fallback simply consists of a function call into |
| 42 | * an externally defined function. That should guarantee compiler barrier |
| 43 | * semantics except for compilers that do inter translation unit/global |
| 44 | * optimization - those better provide an actual compiler barrier. |
| 45 | * |
| 46 | * A native compiler barrier for sure is a lot faster than this... |
| 47 | */ |
| 48 | #define PG_HAVE_COMPILER_BARRIER_EMULATION |
| 49 | extern void pg_extern_compiler_barrier(void); |
| 50 | #define pg_compiler_barrier_impl pg_extern_compiler_barrier |
| 51 | #endif |
| 52 | |
| 53 | |
| 54 | /* |
| 55 | * If we have atomics implementation for this platform, fall back to providing |
| 56 | * the atomics API using a spinlock to protect the internal state. Possibly |
| 57 | * the spinlock implementation uses semaphores internally... |
| 58 | * |
| 59 | * We have to be a bit careful here, as it's not guaranteed that atomic |
| 60 | * variables are mapped to the same address in every process (e.g. dynamic |
| 61 | * shared memory segments). We can't just hash the address and use that to map |
| 62 | * to a spinlock. Instead assign a spinlock on initialization of the atomic |
| 63 | * variable. |
| 64 | */ |
| 65 | #if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) && !defined(PG_HAVE_ATOMIC_U32_SUPPORT) |
| 66 | |
| 67 | #define PG_HAVE_ATOMIC_FLAG_SIMULATION |
| 68 | #define PG_HAVE_ATOMIC_FLAG_SUPPORT |
| 69 | |
| 70 | typedef struct pg_atomic_flag |
| 71 | { |
| 72 | /* |
| 73 | * To avoid circular includes we can't use s_lock as a type here. Instead |
| 74 | * just reserve enough space for all spinlock types. Some platforms would |
| 75 | * be content with just one byte instead of 4, but that's not too much |
| 76 | * waste. |
| 77 | */ |
| 78 | #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */ |
| 79 | int sema[4]; |
| 80 | #else |
| 81 | int sema; |
| 82 | #endif |
| 83 | volatile bool value; |
| 84 | } pg_atomic_flag; |
| 85 | |
| 86 | #endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */ |
| 87 | |
| 88 | #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) |
| 89 | |
| 90 | #define PG_HAVE_ATOMIC_U32_SIMULATION |
| 91 | |
| 92 | #define PG_HAVE_ATOMIC_U32_SUPPORT |
| 93 | typedef struct pg_atomic_uint32 |
| 94 | { |
| 95 | /* Check pg_atomic_flag's definition above for an explanation */ |
| 96 | #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */ |
| 97 | int sema[4]; |
| 98 | #else |
| 99 | int sema; |
| 100 | #endif |
| 101 | volatile uint32 value; |
| 102 | } pg_atomic_uint32; |
| 103 | |
| 104 | #endif /* PG_HAVE_ATOMIC_U32_SUPPORT */ |
| 105 | |
| 106 | #if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) |
| 107 | |
| 108 | #define PG_HAVE_ATOMIC_U64_SIMULATION |
| 109 | |
| 110 | #define PG_HAVE_ATOMIC_U64_SUPPORT |
| 111 | typedef struct pg_atomic_uint64 |
| 112 | { |
| 113 | /* Check pg_atomic_flag's definition above for an explanation */ |
| 114 | #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */ |
| 115 | int sema[4]; |
| 116 | #else |
| 117 | int sema; |
| 118 | #endif |
| 119 | volatile uint64 value; |
| 120 | } pg_atomic_uint64; |
| 121 | |
| 122 | #endif /* PG_HAVE_ATOMIC_U64_SUPPORT */ |
| 123 | |
| 124 | #ifdef PG_HAVE_ATOMIC_FLAG_SIMULATION |
| 125 | |
| 126 | #define PG_HAVE_ATOMIC_INIT_FLAG |
| 127 | extern void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr); |
| 128 | |
| 129 | #define PG_HAVE_ATOMIC_TEST_SET_FLAG |
| 130 | extern bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr); |
| 131 | |
| 132 | #define PG_HAVE_ATOMIC_CLEAR_FLAG |
| 133 | extern void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr); |
| 134 | |
| 135 | #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG |
| 136 | extern bool pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr); |
| 137 | |
| 138 | #endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */ |
| 139 | |
| 140 | #ifdef PG_HAVE_ATOMIC_U32_SIMULATION |
| 141 | |
| 142 | #define PG_HAVE_ATOMIC_INIT_U32 |
| 143 | extern void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_); |
| 144 | |
| 145 | #define PG_HAVE_ATOMIC_WRITE_U32 |
| 146 | extern void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val); |
| 147 | |
| 148 | #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32 |
| 149 | extern bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, |
| 150 | uint32 *expected, uint32 newval); |
| 151 | |
| 152 | #define PG_HAVE_ATOMIC_FETCH_ADD_U32 |
| 153 | extern uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_); |
| 154 | |
| 155 | #endif /* PG_HAVE_ATOMIC_U32_SIMULATION */ |
| 156 | |
| 157 | |
| 158 | #ifdef PG_HAVE_ATOMIC_U64_SIMULATION |
| 159 | |
| 160 | #define PG_HAVE_ATOMIC_INIT_U64 |
| 161 | extern void pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_); |
| 162 | |
| 163 | #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 |
| 164 | extern bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, |
| 165 | uint64 *expected, uint64 newval); |
| 166 | |
| 167 | #define PG_HAVE_ATOMIC_FETCH_ADD_U64 |
| 168 | extern uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_); |
| 169 | |
| 170 | #endif /* PG_HAVE_ATOMIC_U64_SIMULATION */ |
| 171 | |