| 1 | // -*- C++ -*- | 
| 2 | //===--------------------------- atomic -----------------------------------===// | 
| 3 | // | 
| 4 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | 
| 5 | // See https://llvm.org/LICENSE.txt for license information. | 
| 6 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | 
| 7 | // | 
| 8 | //===----------------------------------------------------------------------===// | 
| 9 |  | 
| 10 | #ifndef _LIBCPP_ATOMIC | 
| 11 | #define _LIBCPP_ATOMIC | 
| 12 |  | 
| 13 | /* | 
| 14 |     atomic synopsis | 
| 15 |  | 
| 16 | namespace std | 
| 17 | { | 
| 18 |  | 
| 19 | // feature test macro | 
| 20 |  | 
| 21 | #define __cpp_lib_atomic_is_always_lock_free // as specified by SG10 | 
| 22 |  | 
| 23 |  // order and consistency | 
| 24 |  | 
| 25 |  enum memory_order: unspecified // enum class in C++20 | 
| 26 |  { | 
| 27 |     relaxed, | 
| 28 |     consume, // load-consume | 
| 29 |     acquire, // load-acquire | 
| 30 |     release, // store-release | 
| 31 |     acq_rel, // store-release load-acquire | 
| 32 |     seq_cst // store-release load-acquire | 
| 33 |  }; | 
| 34 |  | 
| 35 |  inline constexpr auto memory_order_relaxed = memory_order::relaxed; | 
| 36 |  inline constexpr auto memory_order_consume = memory_order::consume; | 
| 37 |  inline constexpr auto memory_order_acquire = memory_order::acquire; | 
| 38 |  inline constexpr auto memory_order_release = memory_order::release; | 
| 39 |  inline constexpr auto memory_order_acq_rel = memory_order::acq_rel; | 
| 40 |  inline constexpr auto memory_order_seq_cst = memory_order::seq_cst; | 
| 41 |  | 
| 42 | template <class T> T kill_dependency(T y) noexcept; | 
| 43 |  | 
| 44 | // lock-free property | 
| 45 |  | 
| 46 | #define ATOMIC_BOOL_LOCK_FREE unspecified | 
| 47 | #define ATOMIC_CHAR_LOCK_FREE unspecified | 
| 48 | #define ATOMIC_CHAR16_T_LOCK_FREE unspecified | 
| 49 | #define ATOMIC_CHAR32_T_LOCK_FREE unspecified | 
| 50 | #define ATOMIC_WCHAR_T_LOCK_FREE unspecified | 
| 51 | #define ATOMIC_SHORT_LOCK_FREE unspecified | 
| 52 | #define ATOMIC_INT_LOCK_FREE unspecified | 
| 53 | #define ATOMIC_LONG_LOCK_FREE unspecified | 
| 54 | #define ATOMIC_LLONG_LOCK_FREE unspecified | 
| 55 | #define ATOMIC_POINTER_LOCK_FREE unspecified | 
| 56 |  | 
| 57 | // flag type and operations | 
| 58 |  | 
| 59 | typedef struct atomic_flag | 
| 60 | { | 
| 61 |     bool test_and_set(memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 62 |     bool test_and_set(memory_order m = memory_order_seq_cst) noexcept; | 
| 63 |     void clear(memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 64 |     void clear(memory_order m = memory_order_seq_cst) noexcept; | 
| 65 |     atomic_flag()  noexcept = default; | 
| 66 |     atomic_flag(const atomic_flag&) = delete; | 
| 67 |     atomic_flag& operator=(const atomic_flag&) = delete; | 
| 68 |     atomic_flag& operator=(const atomic_flag&) volatile = delete; | 
| 69 | } atomic_flag; | 
| 70 |  | 
| 71 | bool | 
| 72 |     atomic_flag_test_and_set(volatile atomic_flag* obj) noexcept; | 
| 73 |  | 
| 74 | bool | 
| 75 |     atomic_flag_test_and_set(atomic_flag* obj) noexcept; | 
| 76 |  | 
| 77 | bool | 
| 78 |     atomic_flag_test_and_set_explicit(volatile atomic_flag* obj, | 
| 79 |                                       memory_order m) noexcept; | 
| 80 |  | 
| 81 | bool | 
| 82 |     atomic_flag_test_and_set_explicit(atomic_flag* obj, memory_order m) noexcept; | 
| 83 |  | 
| 84 | void | 
| 85 |     atomic_flag_clear(volatile atomic_flag* obj) noexcept; | 
| 86 |  | 
| 87 | void | 
| 88 |     atomic_flag_clear(atomic_flag* obj) noexcept; | 
| 89 |  | 
| 90 | void | 
| 91 |     atomic_flag_clear_explicit(volatile atomic_flag* obj, memory_order m) noexcept; | 
| 92 |  | 
| 93 | void | 
| 94 |     atomic_flag_clear_explicit(atomic_flag* obj, memory_order m) noexcept; | 
| 95 |  | 
| 96 | #define ATOMIC_FLAG_INIT see below | 
| 97 | #define ATOMIC_VAR_INIT(value) see below | 
| 98 |  | 
| 99 | template <class T> | 
| 100 | struct atomic | 
| 101 | { | 
| 102 |     static constexpr bool is_always_lock_free; | 
| 103 |     bool is_lock_free() const volatile noexcept; | 
| 104 |     bool is_lock_free() const noexcept; | 
| 105 |     void store(T desr, memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 106 |     void store(T desr, memory_order m = memory_order_seq_cst) noexcept; | 
| 107 |     T load(memory_order m = memory_order_seq_cst) const volatile noexcept; | 
| 108 |     T load(memory_order m = memory_order_seq_cst) const noexcept; | 
| 109 |     operator T() const volatile noexcept; | 
| 110 |     operator T() const noexcept; | 
| 111 |     T exchange(T desr, memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 112 |     T exchange(T desr, memory_order m = memory_order_seq_cst) noexcept; | 
| 113 |     bool compare_exchange_weak(T& expc, T desr, | 
| 114 |                                memory_order s, memory_order f) volatile noexcept; | 
| 115 |     bool compare_exchange_weak(T& expc, T desr, memory_order s, memory_order f) noexcept; | 
| 116 |     bool compare_exchange_strong(T& expc, T desr, | 
| 117 |                                  memory_order s, memory_order f) volatile noexcept; | 
| 118 |     bool compare_exchange_strong(T& expc, T desr, | 
| 119 |                                  memory_order s, memory_order f) noexcept; | 
| 120 |     bool compare_exchange_weak(T& expc, T desr, | 
| 121 |                                memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 122 |     bool compare_exchange_weak(T& expc, T desr, | 
| 123 |                                memory_order m = memory_order_seq_cst) noexcept; | 
| 124 |     bool compare_exchange_strong(T& expc, T desr, | 
| 125 |                                 memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 126 |     bool compare_exchange_strong(T& expc, T desr, | 
| 127 |                                  memory_order m = memory_order_seq_cst) noexcept; | 
| 128 |  | 
| 129 |     atomic() noexcept = default; | 
| 130 |     constexpr atomic(T desr) noexcept; | 
| 131 |     atomic(const atomic&) = delete; | 
| 132 |     atomic& operator=(const atomic&) = delete; | 
| 133 |     atomic& operator=(const atomic&) volatile = delete; | 
| 134 |     T operator=(T) volatile noexcept; | 
| 135 |     T operator=(T) noexcept; | 
| 136 | }; | 
| 137 |  | 
| 138 | template <> | 
| 139 | struct atomic<integral> | 
| 140 | { | 
| 141 |     static constexpr bool is_always_lock_free; | 
| 142 |     bool is_lock_free() const volatile noexcept; | 
| 143 |     bool is_lock_free() const noexcept; | 
| 144 |     void store(integral desr, memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 145 |     void store(integral desr, memory_order m = memory_order_seq_cst) noexcept; | 
| 146 |     integral load(memory_order m = memory_order_seq_cst) const volatile noexcept; | 
| 147 |     integral load(memory_order m = memory_order_seq_cst) const noexcept; | 
| 148 |     operator integral() const volatile noexcept; | 
| 149 |     operator integral() const noexcept; | 
| 150 |     integral exchange(integral desr, | 
| 151 |                       memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 152 |     integral exchange(integral desr, memory_order m = memory_order_seq_cst) noexcept; | 
| 153 |     bool compare_exchange_weak(integral& expc, integral desr, | 
| 154 |                                memory_order s, memory_order f) volatile noexcept; | 
| 155 |     bool compare_exchange_weak(integral& expc, integral desr, | 
| 156 |                                memory_order s, memory_order f) noexcept; | 
| 157 |     bool compare_exchange_strong(integral& expc, integral desr, | 
| 158 |                                  memory_order s, memory_order f) volatile noexcept; | 
| 159 |     bool compare_exchange_strong(integral& expc, integral desr, | 
| 160 |                                  memory_order s, memory_order f) noexcept; | 
| 161 |     bool compare_exchange_weak(integral& expc, integral desr, | 
| 162 |                                memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 163 |     bool compare_exchange_weak(integral& expc, integral desr, | 
| 164 |                                memory_order m = memory_order_seq_cst) noexcept; | 
| 165 |     bool compare_exchange_strong(integral& expc, integral desr, | 
| 166 |                                 memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 167 |     bool compare_exchange_strong(integral& expc, integral desr, | 
| 168 |                                  memory_order m = memory_order_seq_cst) noexcept; | 
| 169 |  | 
| 170 |     integral | 
| 171 |         fetch_add(integral op, memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 172 |     integral fetch_add(integral op, memory_order m = memory_order_seq_cst) noexcept; | 
| 173 |     integral | 
| 174 |         fetch_sub(integral op, memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 175 |     integral fetch_sub(integral op, memory_order m = memory_order_seq_cst) noexcept; | 
| 176 |     integral | 
| 177 |         fetch_and(integral op, memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 178 |     integral fetch_and(integral op, memory_order m = memory_order_seq_cst) noexcept; | 
| 179 |     integral | 
| 180 |         fetch_or(integral op, memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 181 |     integral fetch_or(integral op, memory_order m = memory_order_seq_cst) noexcept; | 
| 182 |     integral | 
| 183 |         fetch_xor(integral op, memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 184 |     integral fetch_xor(integral op, memory_order m = memory_order_seq_cst) noexcept; | 
| 185 |  | 
| 186 |     atomic() noexcept = default; | 
| 187 |     constexpr atomic(integral desr) noexcept; | 
| 188 |     atomic(const atomic&) = delete; | 
| 189 |     atomic& operator=(const atomic&) = delete; | 
| 190 |     atomic& operator=(const atomic&) volatile = delete; | 
| 191 |     integral operator=(integral desr) volatile noexcept; | 
| 192 |     integral operator=(integral desr) noexcept; | 
| 193 |  | 
| 194 |     integral operator++(int) volatile noexcept; | 
| 195 |     integral operator++(int) noexcept; | 
| 196 |     integral operator--(int) volatile noexcept; | 
| 197 |     integral operator--(int) noexcept; | 
| 198 |     integral operator++() volatile noexcept; | 
| 199 |     integral operator++() noexcept; | 
| 200 |     integral operator--() volatile noexcept; | 
| 201 |     integral operator--() noexcept; | 
| 202 |     integral operator+=(integral op) volatile noexcept; | 
| 203 |     integral operator+=(integral op) noexcept; | 
| 204 |     integral operator-=(integral op) volatile noexcept; | 
| 205 |     integral operator-=(integral op) noexcept; | 
| 206 |     integral operator&=(integral op) volatile noexcept; | 
| 207 |     integral operator&=(integral op) noexcept; | 
| 208 |     integral operator|=(integral op) volatile noexcept; | 
| 209 |     integral operator|=(integral op) noexcept; | 
| 210 |     integral operator^=(integral op) volatile noexcept; | 
| 211 |     integral operator^=(integral op) noexcept; | 
| 212 | }; | 
| 213 |  | 
| 214 | template <class T> | 
| 215 | struct atomic<T*> | 
| 216 | { | 
| 217 |     static constexpr bool is_always_lock_free; | 
| 218 |     bool is_lock_free() const volatile noexcept; | 
| 219 |     bool is_lock_free() const noexcept; | 
| 220 |     void store(T* desr, memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 221 |     void store(T* desr, memory_order m = memory_order_seq_cst) noexcept; | 
| 222 |     T* load(memory_order m = memory_order_seq_cst) const volatile noexcept; | 
| 223 |     T* load(memory_order m = memory_order_seq_cst) const noexcept; | 
| 224 |     operator T*() const volatile noexcept; | 
| 225 |     operator T*() const noexcept; | 
| 226 |     T* exchange(T* desr, memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 227 |     T* exchange(T* desr, memory_order m = memory_order_seq_cst) noexcept; | 
| 228 |     bool compare_exchange_weak(T*& expc, T* desr, | 
| 229 |                                memory_order s, memory_order f) volatile noexcept; | 
| 230 |     bool compare_exchange_weak(T*& expc, T* desr, | 
| 231 |                                memory_order s, memory_order f) noexcept; | 
| 232 |     bool compare_exchange_strong(T*& expc, T* desr, | 
| 233 |                                  memory_order s, memory_order f) volatile noexcept; | 
| 234 |     bool compare_exchange_strong(T*& expc, T* desr, | 
| 235 |                                  memory_order s, memory_order f) noexcept; | 
| 236 |     bool compare_exchange_weak(T*& expc, T* desr, | 
| 237 |                                memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 238 |     bool compare_exchange_weak(T*& expc, T* desr, | 
| 239 |                                memory_order m = memory_order_seq_cst) noexcept; | 
| 240 |     bool compare_exchange_strong(T*& expc, T* desr, | 
| 241 |                                 memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 242 |     bool compare_exchange_strong(T*& expc, T* desr, | 
| 243 |                                  memory_order m = memory_order_seq_cst) noexcept; | 
| 244 |     T* fetch_add(ptrdiff_t op, memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 245 |     T* fetch_add(ptrdiff_t op, memory_order m = memory_order_seq_cst) noexcept; | 
| 246 |     T* fetch_sub(ptrdiff_t op, memory_order m = memory_order_seq_cst) volatile noexcept; | 
| 247 |     T* fetch_sub(ptrdiff_t op, memory_order m = memory_order_seq_cst) noexcept; | 
| 248 |  | 
| 249 |     atomic() noexcept = default; | 
| 250 |     constexpr atomic(T* desr) noexcept; | 
| 251 |     atomic(const atomic&) = delete; | 
| 252 |     atomic& operator=(const atomic&) = delete; | 
| 253 |     atomic& operator=(const atomic&) volatile = delete; | 
| 254 |  | 
| 255 |     T* operator=(T*) volatile noexcept; | 
| 256 |     T* operator=(T*) noexcept; | 
| 257 |     T* operator++(int) volatile noexcept; | 
| 258 |     T* operator++(int) noexcept; | 
| 259 |     T* operator--(int) volatile noexcept; | 
| 260 |     T* operator--(int) noexcept; | 
| 261 |     T* operator++() volatile noexcept; | 
| 262 |     T* operator++() noexcept; | 
| 263 |     T* operator--() volatile noexcept; | 
| 264 |     T* operator--() noexcept; | 
| 265 |     T* operator+=(ptrdiff_t op) volatile noexcept; | 
| 266 |     T* operator+=(ptrdiff_t op) noexcept; | 
| 267 |     T* operator-=(ptrdiff_t op) volatile noexcept; | 
| 268 |     T* operator-=(ptrdiff_t op) noexcept; | 
| 269 | }; | 
| 270 |  | 
| 271 |  | 
| 272 | template <class T> | 
| 273 |     bool | 
| 274 |     atomic_is_lock_free(const volatile atomic<T>* obj) noexcept; | 
| 275 |  | 
| 276 | template <class T> | 
| 277 |     bool | 
| 278 |     atomic_is_lock_free(const atomic<T>* obj) noexcept; | 
| 279 |  | 
| 280 | template <class T> | 
| 281 |     void | 
| 282 |     atomic_init(volatile atomic<T>* obj, T desr) noexcept; | 
| 283 |  | 
| 284 | template <class T> | 
| 285 |     void | 
| 286 |     atomic_init(atomic<T>* obj, T desr) noexcept; | 
| 287 |  | 
| 288 | template <class T> | 
| 289 |     void | 
| 290 |     atomic_store(volatile atomic<T>* obj, T desr) noexcept; | 
| 291 |  | 
| 292 | template <class T> | 
| 293 |     void | 
| 294 |     atomic_store(atomic<T>* obj, T desr) noexcept; | 
| 295 |  | 
| 296 | template <class T> | 
| 297 |     void | 
| 298 |     atomic_store_explicit(volatile atomic<T>* obj, T desr, memory_order m) noexcept; | 
| 299 |  | 
| 300 | template <class T> | 
| 301 |     void | 
| 302 |     atomic_store_explicit(atomic<T>* obj, T desr, memory_order m) noexcept; | 
| 303 |  | 
| 304 | template <class T> | 
| 305 |     T | 
| 306 |     atomic_load(const volatile atomic<T>* obj) noexcept; | 
| 307 |  | 
| 308 | template <class T> | 
| 309 |     T | 
| 310 |     atomic_load(const atomic<T>* obj) noexcept; | 
| 311 |  | 
| 312 | template <class T> | 
| 313 |     T | 
| 314 |     atomic_load_explicit(const volatile atomic<T>* obj, memory_order m) noexcept; | 
| 315 |  | 
| 316 | template <class T> | 
| 317 |     T | 
| 318 |     atomic_load_explicit(const atomic<T>* obj, memory_order m) noexcept; | 
| 319 |  | 
| 320 | template <class T> | 
| 321 |     T | 
| 322 |     atomic_exchange(volatile atomic<T>* obj, T desr) noexcept; | 
| 323 |  | 
| 324 | template <class T> | 
| 325 |     T | 
| 326 |     atomic_exchange(atomic<T>* obj, T desr) noexcept; | 
| 327 |  | 
| 328 | template <class T> | 
| 329 |     T | 
| 330 |     atomic_exchange_explicit(volatile atomic<T>* obj, T desr, memory_order m) noexcept; | 
| 331 |  | 
| 332 | template <class T> | 
| 333 |     T | 
| 334 |     atomic_exchange_explicit(atomic<T>* obj, T desr, memory_order m) noexcept; | 
| 335 |  | 
| 336 | template <class T> | 
| 337 |     bool | 
| 338 |     atomic_compare_exchange_weak(volatile atomic<T>* obj, T* expc, T desr) noexcept; | 
| 339 |  | 
| 340 | template <class T> | 
| 341 |     bool | 
| 342 |     atomic_compare_exchange_weak(atomic<T>* obj, T* expc, T desr) noexcept; | 
| 343 |  | 
| 344 | template <class T> | 
| 345 |     bool | 
| 346 |     atomic_compare_exchange_strong(volatile atomic<T>* obj, T* expc, T desr) noexcept; | 
| 347 |  | 
| 348 | template <class T> | 
| 349 |     bool | 
| 350 |     atomic_compare_exchange_strong(atomic<T>* obj, T* expc, T desr) noexcept; | 
| 351 |  | 
| 352 | template <class T> | 
| 353 |     bool | 
| 354 |     atomic_compare_exchange_weak_explicit(volatile atomic<T>* obj, T* expc, | 
| 355 |                                           T desr, | 
| 356 |                                           memory_order s, memory_order f) noexcept; | 
| 357 |  | 
| 358 | template <class T> | 
| 359 |     bool | 
| 360 |     atomic_compare_exchange_weak_explicit(atomic<T>* obj, T* expc, T desr, | 
| 361 |                                           memory_order s, memory_order f) noexcept; | 
| 362 |  | 
| 363 | template <class T> | 
| 364 |     bool | 
| 365 |     atomic_compare_exchange_strong_explicit(volatile atomic<T>* obj, | 
| 366 |                                             T* expc, T desr, | 
| 367 |                                             memory_order s, memory_order f) noexcept; | 
| 368 |  | 
| 369 | template <class T> | 
| 370 |     bool | 
| 371 |     atomic_compare_exchange_strong_explicit(atomic<T>* obj, T* expc, | 
| 372 |                                             T desr, | 
| 373 |                                             memory_order s, memory_order f) noexcept; | 
| 374 |  | 
| 375 | template <class Integral> | 
| 376 |     Integral | 
| 377 |     atomic_fetch_add(volatile atomic<Integral>* obj, Integral op) noexcept; | 
| 378 |  | 
| 379 | template <class Integral> | 
| 380 |     Integral | 
| 381 |     atomic_fetch_add(atomic<Integral>* obj, Integral op) noexcept; | 
| 382 |  | 
| 383 | template <class Integral> | 
| 384 |     Integral | 
| 385 |     atomic_fetch_add_explicit(volatile atomic<Integral>* obj, Integral op, | 
| 386 |                               memory_order m) noexcept; | 
| 387 | template <class Integral> | 
| 388 |     Integral | 
| 389 |     atomic_fetch_add_explicit(atomic<Integral>* obj, Integral op, | 
| 390 |                               memory_order m) noexcept; | 
| 391 | template <class Integral> | 
| 392 |     Integral | 
| 393 |     atomic_fetch_sub(volatile atomic<Integral>* obj, Integral op) noexcept; | 
| 394 |  | 
| 395 | template <class Integral> | 
| 396 |     Integral | 
| 397 |     atomic_fetch_sub(atomic<Integral>* obj, Integral op) noexcept; | 
| 398 |  | 
| 399 | template <class Integral> | 
| 400 |     Integral | 
| 401 |     atomic_fetch_sub_explicit(volatile atomic<Integral>* obj, Integral op, | 
| 402 |                               memory_order m) noexcept; | 
| 403 | template <class Integral> | 
| 404 |     Integral | 
| 405 |     atomic_fetch_sub_explicit(atomic<Integral>* obj, Integral op, | 
| 406 |                               memory_order m) noexcept; | 
| 407 | template <class Integral> | 
| 408 |     Integral | 
| 409 |     atomic_fetch_and(volatile atomic<Integral>* obj, Integral op) noexcept; | 
| 410 |  | 
| 411 | template <class Integral> | 
| 412 |     Integral | 
| 413 |     atomic_fetch_and(atomic<Integral>* obj, Integral op) noexcept; | 
| 414 |  | 
| 415 | template <class Integral> | 
| 416 |     Integral | 
| 417 |     atomic_fetch_and_explicit(volatile atomic<Integral>* obj, Integral op, | 
| 418 |                               memory_order m) noexcept; | 
| 419 | template <class Integral> | 
| 420 |     Integral | 
| 421 |     atomic_fetch_and_explicit(atomic<Integral>* obj, Integral op, | 
| 422 |                               memory_order m) noexcept; | 
| 423 | template <class Integral> | 
| 424 |     Integral | 
| 425 |     atomic_fetch_or(volatile atomic<Integral>* obj, Integral op) noexcept; | 
| 426 |  | 
| 427 | template <class Integral> | 
| 428 |     Integral | 
| 429 |     atomic_fetch_or(atomic<Integral>* obj, Integral op) noexcept; | 
| 430 |  | 
| 431 | template <class Integral> | 
| 432 |     Integral | 
| 433 |     atomic_fetch_or_explicit(volatile atomic<Integral>* obj, Integral op, | 
| 434 |                              memory_order m) noexcept; | 
| 435 | template <class Integral> | 
| 436 |     Integral | 
| 437 |     atomic_fetch_or_explicit(atomic<Integral>* obj, Integral op, | 
| 438 |                              memory_order m) noexcept; | 
| 439 | template <class Integral> | 
| 440 |     Integral | 
| 441 |     atomic_fetch_xor(volatile atomic<Integral>* obj, Integral op) noexcept; | 
| 442 |  | 
| 443 | template <class Integral> | 
| 444 |     Integral | 
| 445 |     atomic_fetch_xor(atomic<Integral>* obj, Integral op) noexcept; | 
| 446 |  | 
| 447 | template <class Integral> | 
| 448 |     Integral | 
| 449 |     atomic_fetch_xor_explicit(volatile atomic<Integral>* obj, Integral op, | 
| 450 |                               memory_order m) noexcept; | 
| 451 | template <class Integral> | 
| 452 |     Integral | 
| 453 |     atomic_fetch_xor_explicit(atomic<Integral>* obj, Integral op, | 
| 454 |                               memory_order m) noexcept; | 
| 455 |  | 
| 456 | template <class T> | 
| 457 |     T* | 
| 458 |     atomic_fetch_add(volatile atomic<T*>* obj, ptrdiff_t op) noexcept; | 
| 459 |  | 
| 460 | template <class T> | 
| 461 |     T* | 
| 462 |     atomic_fetch_add(atomic<T*>* obj, ptrdiff_t op) noexcept; | 
| 463 |  | 
| 464 | template <class T> | 
| 465 |     T* | 
| 466 |     atomic_fetch_add_explicit(volatile atomic<T*>* obj, ptrdiff_t op, | 
| 467 |                               memory_order m) noexcept; | 
| 468 | template <class T> | 
| 469 |     T* | 
| 470 |     atomic_fetch_add_explicit(atomic<T*>* obj, ptrdiff_t op, memory_order m) noexcept; | 
| 471 |  | 
| 472 | template <class T> | 
| 473 |     T* | 
| 474 |     atomic_fetch_sub(volatile atomic<T*>* obj, ptrdiff_t op) noexcept; | 
| 475 |  | 
| 476 | template <class T> | 
| 477 |     T* | 
| 478 |     atomic_fetch_sub(atomic<T*>* obj, ptrdiff_t op) noexcept; | 
| 479 |  | 
| 480 | template <class T> | 
| 481 |     T* | 
| 482 |     atomic_fetch_sub_explicit(volatile atomic<T*>* obj, ptrdiff_t op, | 
| 483 |                               memory_order m) noexcept; | 
| 484 | template <class T> | 
| 485 |     T* | 
| 486 |     atomic_fetch_sub_explicit(atomic<T*>* obj, ptrdiff_t op, memory_order m) noexcept; | 
| 487 |  | 
| 488 | // Atomics for standard typedef types | 
| 489 |  | 
| 490 | typedef atomic<bool>               atomic_bool; | 
| 491 | typedef atomic<char>               atomic_char; | 
| 492 | typedef atomic<signed char>        atomic_schar; | 
| 493 | typedef atomic<unsigned char>      atomic_uchar; | 
| 494 | typedef atomic<short>              atomic_short; | 
| 495 | typedef atomic<unsigned short>     atomic_ushort; | 
| 496 | typedef atomic<int>                atomic_int; | 
| 497 | typedef atomic<unsigned int>       atomic_uint; | 
| 498 | typedef atomic<long>               atomic_long; | 
| 499 | typedef atomic<unsigned long>      atomic_ulong; | 
| 500 | typedef atomic<long long>          atomic_llong; | 
| 501 | typedef atomic<unsigned long long> atomic_ullong; | 
| 502 | typedef atomic<char16_t>           atomic_char16_t; | 
| 503 | typedef atomic<char32_t>           atomic_char32_t; | 
| 504 | typedef atomic<wchar_t>            atomic_wchar_t; | 
| 505 |  | 
| 506 | typedef atomic<int_least8_t>   atomic_int_least8_t; | 
| 507 | typedef atomic<uint_least8_t>  atomic_uint_least8_t; | 
| 508 | typedef atomic<int_least16_t>  atomic_int_least16_t; | 
| 509 | typedef atomic<uint_least16_t> atomic_uint_least16_t; | 
| 510 | typedef atomic<int_least32_t>  atomic_int_least32_t; | 
| 511 | typedef atomic<uint_least32_t> atomic_uint_least32_t; | 
| 512 | typedef atomic<int_least64_t>  atomic_int_least64_t; | 
| 513 | typedef atomic<uint_least64_t> atomic_uint_least64_t; | 
| 514 |  | 
| 515 | typedef atomic<int_fast8_t>   atomic_int_fast8_t; | 
| 516 | typedef atomic<uint_fast8_t>  atomic_uint_fast8_t; | 
| 517 | typedef atomic<int_fast16_t>  atomic_int_fast16_t; | 
| 518 | typedef atomic<uint_fast16_t> atomic_uint_fast16_t; | 
| 519 | typedef atomic<int_fast32_t>  atomic_int_fast32_t; | 
| 520 | typedef atomic<uint_fast32_t> atomic_uint_fast32_t; | 
| 521 | typedef atomic<int_fast64_t>  atomic_int_fast64_t; | 
| 522 | typedef atomic<uint_fast64_t> atomic_uint_fast64_t; | 
| 523 |  | 
| 524 | typedef atomic<int8_t>   atomic_int8_t; | 
| 525 | typedef atomic<uint8_t>  atomic_uint8_t; | 
| 526 | typedef atomic<int16_t>  atomic_int16_t; | 
| 527 | typedef atomic<uint16_t> atomic_uint16_t; | 
| 528 | typedef atomic<int32_t>  atomic_int32_t; | 
| 529 | typedef atomic<uint32_t> atomic_uint32_t; | 
| 530 | typedef atomic<int64_t>  atomic_int64_t; | 
| 531 | typedef atomic<uint64_t> atomic_uint64_t; | 
| 532 |  | 
| 533 | typedef atomic<intptr_t>  atomic_intptr_t; | 
| 534 | typedef atomic<uintptr_t> atomic_uintptr_t; | 
| 535 | typedef atomic<size_t>    atomic_size_t; | 
| 536 | typedef atomic<ptrdiff_t> atomic_ptrdiff_t; | 
| 537 | typedef atomic<intmax_t>  atomic_intmax_t; | 
| 538 | typedef atomic<uintmax_t> atomic_uintmax_t; | 
| 539 |  | 
| 540 | // fences | 
| 541 |  | 
| 542 | void atomic_thread_fence(memory_order m) noexcept; | 
| 543 | void atomic_signal_fence(memory_order m) noexcept; | 
| 544 |  | 
| 545 | }  // std | 
| 546 |  | 
| 547 | */ | 
| 548 |  | 
| 549 | #include <__config> | 
| 550 | #include <cstddef> | 
| 551 | #include <cstdint> | 
| 552 | #include <type_traits> | 
| 553 | #include <version> | 
| 554 |  | 
| 555 | #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) | 
| 556 | #pragma GCC system_header | 
| 557 | #endif | 
| 558 |  | 
| 559 | #ifdef _LIBCPP_HAS_NO_THREADS | 
| 560 | # error <atomic> is not supported on this single threaded system | 
| 561 | #endif | 
| 562 | #ifdef _LIBCPP_HAS_NO_ATOMIC_HEADER | 
| 563 | # error <atomic> is not implemented | 
| 564 | #endif | 
| 565 | #ifdef kill_dependency | 
| 566 | # error C++ standard library is incompatible with <stdatomic.h> | 
| 567 | #endif | 
| 568 |  | 
| 569 | #define _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) \ | 
| 570 |   _LIBCPP_DIAGNOSE_WARNING(__m == memory_order_consume || \ | 
| 571 |                            __m == memory_order_acquire || \ | 
| 572 |                            __m == memory_order_acq_rel,   \ | 
| 573 |                         "memory order argument to atomic operation is invalid") | 
| 574 |  | 
| 575 | #define _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) \ | 
| 576 |   _LIBCPP_DIAGNOSE_WARNING(__m == memory_order_release || \ | 
| 577 |                            __m == memory_order_acq_rel,   \ | 
| 578 |                         "memory order argument to atomic operation is invalid") | 
| 579 |  | 
| 580 | #define _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__m, __f) \ | 
| 581 |   _LIBCPP_DIAGNOSE_WARNING(__f == memory_order_release || \ | 
| 582 |                            __f == memory_order_acq_rel,   \ | 
| 583 |                         "memory order argument to atomic operation is invalid") | 
| 584 |  | 
| 585 | _LIBCPP_BEGIN_NAMESPACE_STD | 
| 586 |  | 
| 587 | // Figure out what the underlying type for `memory_order` would be if it were | 
| 588 | // declared as an unscoped enum (accounting for -fshort-enums). Use this result | 
| 589 | // to pin the underlying type in C++20. | 
| 590 | enum __legacy_memory_order { | 
| 591 |     __mo_relaxed, | 
| 592 |     __mo_consume, | 
| 593 |     __mo_acquire, | 
| 594 |     __mo_release, | 
| 595 |     __mo_acq_rel, | 
| 596 |     __mo_seq_cst | 
| 597 | }; | 
| 598 |  | 
| 599 | typedef underlying_type<__legacy_memory_order>::type __memory_order_underlying_t; | 
| 600 |  | 
| 601 | #if _LIBCPP_STD_VER > 17 | 
| 602 |  | 
| 603 | enum class memory_order : __memory_order_underlying_t { | 
| 604 |   relaxed = __mo_relaxed, | 
| 605 |   consume = __mo_consume, | 
| 606 |   acquire = __mo_acquire, | 
| 607 |   release = __mo_release, | 
| 608 |   acq_rel = __mo_acq_rel, | 
| 609 |   seq_cst = __mo_seq_cst | 
| 610 | }; | 
| 611 |  | 
| 612 | inline constexpr auto memory_order_relaxed = memory_order::relaxed; | 
| 613 | inline constexpr auto memory_order_consume = memory_order::consume; | 
| 614 | inline constexpr auto memory_order_acquire = memory_order::acquire; | 
| 615 | inline constexpr auto memory_order_release = memory_order::release; | 
| 616 | inline constexpr auto memory_order_acq_rel = memory_order::acq_rel; | 
| 617 | inline constexpr auto memory_order_seq_cst = memory_order::seq_cst; | 
| 618 |  | 
| 619 | #else | 
| 620 |  | 
| 621 | typedef enum memory_order { | 
| 622 |   memory_order_relaxed = __mo_relaxed, | 
| 623 |   memory_order_consume = __mo_consume, | 
| 624 |   memory_order_acquire = __mo_acquire, | 
| 625 |   memory_order_release = __mo_release, | 
| 626 |   memory_order_acq_rel = __mo_acq_rel, | 
| 627 |   memory_order_seq_cst = __mo_seq_cst, | 
| 628 | } memory_order; | 
| 629 |  | 
| 630 | #endif // _LIBCPP_STD_VER > 17 | 
| 631 |  | 
| 632 | static_assert((is_same<underlying_type<memory_order>::type, __memory_order_underlying_t>::value), | 
| 633 |   "unexpected underlying type for std::memory_order" ); | 
| 634 |  | 
| 635 | #if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) || \ | 
| 636 | 	defined(_LIBCPP_ATOMIC_ONLY_USE_BUILTINS) | 
| 637 |  | 
| 638 | // [atomics.types.generic]p1 guarantees _Tp is trivially copyable. Because | 
| 639 | // the default operator= in an object is not volatile, a byte-by-byte copy | 
| 640 | // is required. | 
| 641 | template <typename _Tp, typename _Tv> _LIBCPP_INLINE_VISIBILITY | 
| 642 | typename enable_if<is_assignable<_Tp&, _Tv>::value>::type | 
| 643 | __cxx_atomic_assign_volatile(_Tp& __a_value, _Tv const& __val) { | 
| 644 |   __a_value = __val; | 
| 645 | } | 
| 646 | template <typename _Tp, typename _Tv> _LIBCPP_INLINE_VISIBILITY | 
| 647 | typename enable_if<is_assignable<_Tp&, _Tv>::value>::type | 
| 648 | __cxx_atomic_assign_volatile(_Tp volatile& __a_value, _Tv volatile const& __val) { | 
| 649 |   volatile char* __to = reinterpret_cast<volatile char*>(&__a_value); | 
| 650 |   volatile char* __end = __to + sizeof(_Tp); | 
| 651 |   volatile const char* __from = reinterpret_cast<volatile const char*>(&__val); | 
| 652 |   while (__to != __end) | 
| 653 |     *__to++ = *__from++; | 
| 654 | } | 
| 655 |  | 
| 656 | #endif | 
| 657 |  | 
| 658 | #if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) | 
| 659 |  | 
| 660 | template <typename _Tp> | 
| 661 | struct __cxx_atomic_base_impl { | 
| 662 |  | 
| 663 |   _LIBCPP_INLINE_VISIBILITY | 
| 664 | #ifndef _LIBCPP_CXX03_LANG | 
| 665 |     __cxx_atomic_base_impl() _NOEXCEPT = default; | 
| 666 | #else | 
| 667 |     __cxx_atomic_base_impl() _NOEXCEPT : __a_value() {} | 
| 668 | #endif // _LIBCPP_CXX03_LANG | 
| 669 |   _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp value) _NOEXCEPT | 
| 670 |     : __a_value(value) {} | 
| 671 |   _Tp __a_value; | 
| 672 | }; | 
| 673 |  | 
| 674 | _LIBCPP_INLINE_VISIBILITY inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) { | 
| 675 |   // Avoid switch statement to make this a constexpr. | 
| 676 |   return __order == memory_order_relaxed ? __ATOMIC_RELAXED: | 
| 677 |          (__order == memory_order_acquire ? __ATOMIC_ACQUIRE: | 
| 678 |           (__order == memory_order_release ? __ATOMIC_RELEASE: | 
| 679 |            (__order == memory_order_seq_cst ? __ATOMIC_SEQ_CST: | 
| 680 |             (__order == memory_order_acq_rel ? __ATOMIC_ACQ_REL: | 
| 681 |               __ATOMIC_CONSUME)))); | 
| 682 | } | 
| 683 |  | 
| 684 | _LIBCPP_INLINE_VISIBILITY inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory_order __order) { | 
| 685 |   // Avoid switch statement to make this a constexpr. | 
| 686 |   return __order == memory_order_relaxed ? __ATOMIC_RELAXED: | 
| 687 |          (__order == memory_order_acquire ? __ATOMIC_ACQUIRE: | 
| 688 |           (__order == memory_order_release ? __ATOMIC_RELAXED: | 
| 689 |            (__order == memory_order_seq_cst ? __ATOMIC_SEQ_CST: | 
| 690 |             (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE: | 
| 691 |               __ATOMIC_CONSUME)))); | 
| 692 | } | 
| 693 |  | 
| 694 | template <typename _Tp> | 
| 695 | _LIBCPP_INLINE_VISIBILITY | 
| 696 | void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a,  _Tp __val) { | 
| 697 |   __cxx_atomic_assign_volatile(__a->__a_value, __val); | 
| 698 | } | 
| 699 |  | 
| 700 | template <typename _Tp> | 
| 701 | _LIBCPP_INLINE_VISIBILITY | 
| 702 | void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a,  _Tp __val) { | 
| 703 |   __a->__a_value = __val; | 
| 704 | } | 
| 705 |  | 
| 706 | _LIBCPP_INLINE_VISIBILITY inline | 
| 707 | void __cxx_atomic_thread_fence(memory_order __order) { | 
| 708 |   __atomic_thread_fence(__to_gcc_order(__order)); | 
| 709 | } | 
| 710 |  | 
| 711 | _LIBCPP_INLINE_VISIBILITY inline | 
| 712 | void __cxx_atomic_signal_fence(memory_order __order) { | 
| 713 |   __atomic_signal_fence(__to_gcc_order(__order)); | 
| 714 | } | 
| 715 |  | 
| 716 | template <typename _Tp> | 
| 717 | _LIBCPP_INLINE_VISIBILITY | 
| 718 | void __cxx_atomic_store(volatile __cxx_atomic_base_impl<_Tp>* __a,  _Tp __val, | 
| 719 |                         memory_order __order) { | 
| 720 |   __atomic_store(&__a->__a_value, &__val, | 
| 721 |                  __to_gcc_order(__order)); | 
| 722 | } | 
| 723 |  | 
| 724 | template <typename _Tp> | 
| 725 | _LIBCPP_INLINE_VISIBILITY | 
| 726 | void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a,  _Tp __val, | 
| 727 |                         memory_order __order) { | 
| 728 |   __atomic_store(&__a->__a_value, &__val, | 
| 729 |                  __to_gcc_order(__order)); | 
| 730 | } | 
| 731 |  | 
| 732 | template <typename _Tp> | 
| 733 | _LIBCPP_INLINE_VISIBILITY | 
| 734 | _Tp __cxx_atomic_load(const volatile __cxx_atomic_base_impl<_Tp>* __a, | 
| 735 |                       memory_order __order) { | 
| 736 |   _Tp __ret; | 
| 737 |   __atomic_load(&__a->__a_value, &__ret, | 
| 738 |                 __to_gcc_order(__order)); | 
| 739 |   return __ret; | 
| 740 | } | 
| 741 |  | 
| 742 | template <typename _Tp> | 
| 743 | _LIBCPP_INLINE_VISIBILITY | 
| 744 | _Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) { | 
| 745 |   _Tp __ret; | 
| 746 |   __atomic_load(&__a->__a_value, &__ret, | 
| 747 |                 __to_gcc_order(__order)); | 
| 748 |   return __ret; | 
| 749 | } | 
| 750 |  | 
| 751 | template <typename _Tp> | 
| 752 | _LIBCPP_INLINE_VISIBILITY | 
| 753 | _Tp __cxx_atomic_exchange(volatile __cxx_atomic_base_impl<_Tp>* __a, | 
| 754 |                           _Tp __value, memory_order __order) { | 
| 755 |   _Tp __ret; | 
| 756 |   __atomic_exchange(&__a->__a_value, &__value, &__ret, | 
| 757 |                     __to_gcc_order(__order)); | 
| 758 |   return __ret; | 
| 759 | } | 
| 760 |  | 
| 761 | template <typename _Tp> | 
| 762 | _LIBCPP_INLINE_VISIBILITY | 
| 763 | _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value, | 
| 764 |                           memory_order __order) { | 
| 765 |   _Tp __ret; | 
| 766 |   __atomic_exchange(&__a->__a_value, &__value, &__ret, | 
| 767 |                     __to_gcc_order(__order)); | 
| 768 |   return __ret; | 
| 769 | } | 
| 770 |  | 
| 771 | template <typename _Tp> | 
| 772 | _LIBCPP_INLINE_VISIBILITY | 
| 773 | bool __cxx_atomic_compare_exchange_strong( | 
| 774 |     volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, | 
| 775 |     memory_order __success, memory_order __failure) { | 
| 776 |   return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, | 
| 777 |                                    false, | 
| 778 |                                    __to_gcc_order(__success), | 
| 779 |                                    __to_gcc_failure_order(__failure)); | 
| 780 | } | 
| 781 |  | 
| 782 | template <typename _Tp> | 
| 783 | _LIBCPP_INLINE_VISIBILITY | 
| 784 | bool __cxx_atomic_compare_exchange_strong( | 
| 785 |     __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, | 
| 786 |     memory_order __failure) { | 
| 787 |   return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, | 
| 788 |                                    false, | 
| 789 |                                    __to_gcc_order(__success), | 
| 790 |                                    __to_gcc_failure_order(__failure)); | 
| 791 | } | 
| 792 |  | 
| 793 | template <typename _Tp> | 
| 794 | _LIBCPP_INLINE_VISIBILITY | 
| 795 | bool __cxx_atomic_compare_exchange_weak( | 
| 796 |     volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, | 
| 797 |     memory_order __success, memory_order __failure) { | 
| 798 |   return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, | 
| 799 |                                    true, | 
| 800 |                                    __to_gcc_order(__success), | 
| 801 |                                    __to_gcc_failure_order(__failure)); | 
| 802 | } | 
| 803 |  | 
| 804 | template <typename _Tp> | 
| 805 | _LIBCPP_INLINE_VISIBILITY | 
| 806 | bool __cxx_atomic_compare_exchange_weak( | 
| 807 |     __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, | 
| 808 |     memory_order __failure) { | 
| 809 |   return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, | 
| 810 |                                    true, | 
| 811 |                                    __to_gcc_order(__success), | 
| 812 |                                    __to_gcc_failure_order(__failure)); | 
| 813 | } | 
| 814 |  | 
| 815 | template <typename _Tp> | 
| 816 | struct __skip_amt { enum {value = 1}; }; | 
| 817 |  | 
| 818 | template <typename _Tp> | 
| 819 | struct __skip_amt<_Tp*> { enum {value = sizeof(_Tp)}; }; | 
| 820 |  | 
| 821 | // FIXME: Haven't figured out what the spec says about using arrays with | 
| 822 | // atomic_fetch_add. Force a failure rather than creating bad behavior. | 
| 823 | template <typename _Tp> | 
| 824 | struct __skip_amt<_Tp[]> { }; | 
| 825 | template <typename _Tp, int n> | 
| 826 | struct __skip_amt<_Tp[n]> { }; | 
| 827 |  | 
| 828 | template <typename _Tp, typename _Td> | 
| 829 | _LIBCPP_INLINE_VISIBILITY | 
| 830 | _Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_base_impl<_Tp>* __a, | 
| 831 |                            _Td __delta, memory_order __order) { | 
| 832 |   return __atomic_fetch_add(&__a->__a_value, __delta * __skip_amt<_Tp>::value, | 
| 833 |                             __to_gcc_order(__order)); | 
| 834 | } | 
| 835 |  | 
| 836 | template <typename _Tp, typename _Td> | 
| 837 | _LIBCPP_INLINE_VISIBILITY | 
| 838 | _Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, | 
| 839 |                            memory_order __order) { | 
| 840 |   return __atomic_fetch_add(&__a->__a_value, __delta * __skip_amt<_Tp>::value, | 
| 841 |                             __to_gcc_order(__order)); | 
| 842 | } | 
| 843 |  | 
| 844 | template <typename _Tp, typename _Td> | 
| 845 | _LIBCPP_INLINE_VISIBILITY | 
| 846 | _Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_base_impl<_Tp>* __a, | 
| 847 |                            _Td __delta, memory_order __order) { | 
| 848 |   return __atomic_fetch_sub(&__a->__a_value, __delta * __skip_amt<_Tp>::value, | 
| 849 |                             __to_gcc_order(__order)); | 
| 850 | } | 
| 851 |  | 
| 852 | template <typename _Tp, typename _Td> | 
| 853 | _LIBCPP_INLINE_VISIBILITY | 
| 854 | _Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, | 
| 855 |                            memory_order __order) { | 
| 856 |   return __atomic_fetch_sub(&__a->__a_value, __delta * __skip_amt<_Tp>::value, | 
| 857 |                             __to_gcc_order(__order)); | 
| 858 | } | 
| 859 |  | 
| 860 | template <typename _Tp> | 
| 861 | _LIBCPP_INLINE_VISIBILITY | 
| 862 | _Tp __cxx_atomic_fetch_and(volatile __cxx_atomic_base_impl<_Tp>* __a, | 
| 863 |                            _Tp __pattern, memory_order __order) { | 
| 864 |   return __atomic_fetch_and(&__a->__a_value, __pattern, | 
| 865 |                             __to_gcc_order(__order)); | 
| 866 | } | 
| 867 |  | 
| 868 | template <typename _Tp> | 
| 869 | _LIBCPP_INLINE_VISIBILITY | 
| 870 | _Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp>* __a, | 
| 871 |                            _Tp __pattern, memory_order __order) { | 
| 872 |   return __atomic_fetch_and(&__a->__a_value, __pattern, | 
| 873 |                             __to_gcc_order(__order)); | 
| 874 | } | 
| 875 |  | 
| 876 | template <typename _Tp> | 
| 877 | _LIBCPP_INLINE_VISIBILITY | 
| 878 | _Tp __cxx_atomic_fetch_or(volatile __cxx_atomic_base_impl<_Tp>* __a, | 
| 879 |                           _Tp __pattern, memory_order __order) { | 
| 880 |   return __atomic_fetch_or(&__a->__a_value, __pattern, | 
| 881 |                            __to_gcc_order(__order)); | 
| 882 | } | 
| 883 |  | 
| 884 | template <typename _Tp> | 
| 885 | _LIBCPP_INLINE_VISIBILITY | 
| 886 | _Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, | 
| 887 |                           memory_order __order) { | 
| 888 |   return __atomic_fetch_or(&__a->__a_value, __pattern, | 
| 889 |                            __to_gcc_order(__order)); | 
| 890 | } | 
| 891 |  | 
| 892 | template <typename _Tp> | 
| 893 | _LIBCPP_INLINE_VISIBILITY | 
| 894 | _Tp __cxx_atomic_fetch_xor(volatile __cxx_atomic_base_impl<_Tp>* __a, | 
| 895 |                            _Tp __pattern, memory_order __order) { | 
| 896 |   return __atomic_fetch_xor(&__a->__a_value, __pattern, | 
| 897 |                             __to_gcc_order(__order)); | 
| 898 | } | 
| 899 |  | 
| 900 | template <typename _Tp> | 
| 901 | _LIBCPP_INLINE_VISIBILITY | 
| 902 | _Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, | 
| 903 |                            memory_order __order) { | 
| 904 |   return __atomic_fetch_xor(&__a->__a_value, __pattern, | 
| 905 |                             __to_gcc_order(__order)); | 
| 906 | } | 
| 907 |  | 
| 908 | #define __cxx_atomic_is_lock_free(__s) __atomic_is_lock_free(__s, 0) | 
| 909 |  | 
| 910 | #elif defined(_LIBCPP_HAS_C_ATOMIC_IMP) | 
| 911 |  | 
| 912 | template <typename _Tp> | 
| 913 | struct __cxx_atomic_base_impl { | 
| 914 |  | 
| 915 |   _LIBCPP_INLINE_VISIBILITY | 
| 916 | #ifndef _LIBCPP_CXX03_LANG | 
| 917 |     __cxx_atomic_base_impl() _NOEXCEPT = default; | 
| 918 | #else | 
| 919 |     __cxx_atomic_base_impl() _NOEXCEPT : __a_value() {} | 
| 920 | #endif // _LIBCPP_CXX03_LANG | 
| 921 |   _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp value) _NOEXCEPT | 
| 922 |     : __a_value(value) {} | 
| 923 |   _LIBCPP_DISABLE_EXTENSION_WARNING _Atomic(_Tp) __a_value; | 
| 924 | }; | 
| 925 |  | 
| 926 | #define __cxx_atomic_is_lock_free(__s) __c11_atomic_is_lock_free(__s) | 
| 927 |  | 
| 928 | _LIBCPP_INLINE_VISIBILITY inline | 
| 929 | void __cxx_atomic_thread_fence(memory_order __order) _NOEXCEPT { | 
| 930 |     __c11_atomic_thread_fence(static_cast<__memory_order_underlying_t>(__order)); | 
| 931 | } | 
| 932 |  | 
| 933 | _LIBCPP_INLINE_VISIBILITY inline | 
| 934 | void __cxx_atomic_signal_fence(memory_order __order) _NOEXCEPT { | 
| 935 |     __c11_atomic_signal_fence(static_cast<__memory_order_underlying_t>(__order)); | 
| 936 | } | 
| 937 |  | 
| 938 | template<class _Tp> | 
| 939 | _LIBCPP_INLINE_VISIBILITY | 
| 940 | void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val) _NOEXCEPT { | 
| 941 |     __c11_atomic_init(&__a->__a_value, __val); | 
| 942 | } | 
| 943 | template<class _Tp> | 
| 944 | _LIBCPP_INLINE_VISIBILITY | 
| 945 | void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val) _NOEXCEPT { | 
| 946 |     __c11_atomic_init(&__a->__a_value, __val); | 
| 947 | } | 
| 948 |  | 
| 949 | template<class _Tp> | 
| 950 | _LIBCPP_INLINE_VISIBILITY | 
| 951 | void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val, memory_order __order) _NOEXCEPT { | 
| 952 |     __c11_atomic_store(&__a->__a_value, __val, static_cast<__memory_order_underlying_t>(__order)); | 
| 953 | } | 
| 954 | template<class _Tp> | 
| 955 | _LIBCPP_INLINE_VISIBILITY | 
| 956 | void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val, memory_order __order) _NOEXCEPT { | 
| 957 |     __c11_atomic_store(&__a->__a_value, __val, static_cast<__memory_order_underlying_t>(__order)); | 
| 958 | } | 
| 959 |  | 
| 960 | template<class _Tp> | 
| 961 | _LIBCPP_INLINE_VISIBILITY | 
| 962 | _Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const volatile* __a, memory_order __order) _NOEXCEPT { | 
| 963 |     using __ptr_type = typename remove_const<decltype(__a->__a_value)>::type*; | 
| 964 |     return __c11_atomic_load(const_cast<__ptr_type>(&__a->__a_value), static_cast<__memory_order_underlying_t>(__order)); | 
| 965 | } | 
| 966 | template<class _Tp> | 
| 967 | _LIBCPP_INLINE_VISIBILITY | 
| 968 | _Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const* __a, memory_order __order) _NOEXCEPT { | 
| 969 |     using __ptr_type = typename remove_const<decltype(__a->__a_value)>::type*; | 
| 970 |     return __c11_atomic_load(const_cast<__ptr_type>(&__a->__a_value), static_cast<__memory_order_underlying_t>(__order)); | 
| 971 | } | 
| 972 |  | 
| 973 | template<class _Tp> | 
| 974 | _LIBCPP_INLINE_VISIBILITY | 
| 975 | _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __value, memory_order __order) _NOEXCEPT { | 
| 976 |     return __c11_atomic_exchange(&__a->__a_value, __value, static_cast<__memory_order_underlying_t>(__order)); | 
| 977 | } | 
| 978 | template<class _Tp> | 
| 979 | _LIBCPP_INLINE_VISIBILITY | 
| 980 | _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> * __a, _Tp __value, memory_order __order) _NOEXCEPT { | 
| 981 |     return __c11_atomic_exchange(&__a->__a_value, __value, static_cast<__memory_order_underlying_t>(__order)); | 
| 982 | } | 
| 983 |  | 
| 984 | template<class _Tp> | 
| 985 | _LIBCPP_INLINE_VISIBILITY | 
| 986 | bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { | 
| 987 |     return __c11_atomic_compare_exchange_strong(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); | 
| 988 | } | 
| 989 | template<class _Tp> | 
| 990 | _LIBCPP_INLINE_VISIBILITY | 
| 991 | bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { | 
| 992 |     return __c11_atomic_compare_exchange_strong(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); | 
| 993 | } | 
| 994 |  | 
| 995 | template<class _Tp> | 
| 996 | _LIBCPP_INLINE_VISIBILITY | 
| 997 | bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { | 
| 998 |     return __c11_atomic_compare_exchange_weak(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); | 
| 999 | } | 
| 1000 | template<class _Tp> | 
| 1001 | _LIBCPP_INLINE_VISIBILITY | 
| 1002 | bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { | 
| 1003 |     return __c11_atomic_compare_exchange_weak(&__a->__a_value, __expected, __value,  static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); | 
| 1004 | } | 
| 1005 |  | 
| 1006 | template<class _Tp> | 
| 1007 | _LIBCPP_INLINE_VISIBILITY | 
| 1008 | _Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT { | 
| 1009 |     return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); | 
| 1010 | } | 
| 1011 | template<class _Tp> | 
| 1012 | _LIBCPP_INLINE_VISIBILITY | 
| 1013 | _Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _NOEXCEPT { | 
| 1014 |     return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); | 
| 1015 | } | 
| 1016 |  | 
| 1017 | template<class _Tp> | 
| 1018 | _LIBCPP_INLINE_VISIBILITY | 
| 1019 | _Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { | 
| 1020 |     return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); | 
| 1021 | } | 
| 1022 | template<class _Tp> | 
| 1023 | _LIBCPP_INLINE_VISIBILITY | 
| 1024 | _Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { | 
| 1025 |     return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); | 
| 1026 | } | 
| 1027 |  | 
| 1028 | template<class _Tp> | 
| 1029 | _LIBCPP_INLINE_VISIBILITY | 
| 1030 | _Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT { | 
| 1031 |     return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); | 
| 1032 | } | 
| 1033 | template<class _Tp> | 
| 1034 | _LIBCPP_INLINE_VISIBILITY | 
| 1035 | _Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _NOEXCEPT { | 
| 1036 |     return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); | 
| 1037 | } | 
| 1038 | template<class _Tp> | 
| 1039 | _LIBCPP_INLINE_VISIBILITY | 
| 1040 | _Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { | 
| 1041 |     return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); | 
| 1042 | } | 
| 1043 | template<class _Tp> | 
| 1044 | _LIBCPP_INLINE_VISIBILITY | 
| 1045 | _Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { | 
| 1046 |     return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); | 
| 1047 | } | 
| 1048 |  | 
| 1049 | template<class _Tp> | 
| 1050 | _LIBCPP_INLINE_VISIBILITY | 
| 1051 | _Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT { | 
| 1052 |     return __c11_atomic_fetch_and(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); | 
| 1053 | } | 
| 1054 | template<class _Tp> | 
| 1055 | _LIBCPP_INLINE_VISIBILITY | 
| 1056 | _Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT { | 
| 1057 |     return __c11_atomic_fetch_and(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); | 
| 1058 | } | 
| 1059 |  | 
| 1060 | template<class _Tp> | 
| 1061 | _LIBCPP_INLINE_VISIBILITY | 
| 1062 | _Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT { | 
| 1063 |     return __c11_atomic_fetch_or(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); | 
| 1064 | } | 
| 1065 | template<class _Tp> | 
| 1066 | _LIBCPP_INLINE_VISIBILITY | 
| 1067 | _Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT { | 
| 1068 |     return __c11_atomic_fetch_or(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); | 
| 1069 | } | 
| 1070 |  | 
| 1071 | template<class _Tp> | 
| 1072 | _LIBCPP_INLINE_VISIBILITY | 
| 1073 | _Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT { | 
| 1074 |     return __c11_atomic_fetch_xor(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); | 
| 1075 | } | 
| 1076 | template<class _Tp> | 
| 1077 | _LIBCPP_INLINE_VISIBILITY | 
| 1078 | _Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT { | 
| 1079 |     return __c11_atomic_fetch_xor(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); | 
| 1080 | } | 
| 1081 |  | 
| 1082 | #endif // _LIBCPP_HAS_GCC_ATOMIC_IMP, _LIBCPP_HAS_C_ATOMIC_IMP | 
| 1083 |  | 
| 1084 | template <class _Tp> | 
| 1085 | _LIBCPP_INLINE_VISIBILITY | 
| 1086 | _Tp kill_dependency(_Tp __y) _NOEXCEPT | 
| 1087 | { | 
| 1088 |     return __y; | 
| 1089 | } | 
| 1090 |  | 
| 1091 | #if defined(__CLANG_ATOMIC_BOOL_LOCK_FREE) | 
| 1092 | # define ATOMIC_BOOL_LOCK_FREE      __CLANG_ATOMIC_BOOL_LOCK_FREE | 
| 1093 | # define ATOMIC_CHAR_LOCK_FREE      __CLANG_ATOMIC_CHAR_LOCK_FREE | 
| 1094 | # define ATOMIC_CHAR16_T_LOCK_FREE  __CLANG_ATOMIC_CHAR16_T_LOCK_FREE | 
| 1095 | # define ATOMIC_CHAR32_T_LOCK_FREE  __CLANG_ATOMIC_CHAR32_T_LOCK_FREE | 
| 1096 | # define ATOMIC_WCHAR_T_LOCK_FREE   __CLANG_ATOMIC_WCHAR_T_LOCK_FREE | 
| 1097 | # define ATOMIC_SHORT_LOCK_FREE     __CLANG_ATOMIC_SHORT_LOCK_FREE | 
| 1098 | # define ATOMIC_INT_LOCK_FREE       __CLANG_ATOMIC_INT_LOCK_FREE | 
| 1099 | # define ATOMIC_LONG_LOCK_FREE      __CLANG_ATOMIC_LONG_LOCK_FREE | 
| 1100 | # define ATOMIC_LLONG_LOCK_FREE     __CLANG_ATOMIC_LLONG_LOCK_FREE | 
| 1101 | # define ATOMIC_POINTER_LOCK_FREE   __CLANG_ATOMIC_POINTER_LOCK_FREE | 
| 1102 | #elif defined(__GCC_ATOMIC_BOOL_LOCK_FREE) | 
| 1103 | # define ATOMIC_BOOL_LOCK_FREE      __GCC_ATOMIC_BOOL_LOCK_FREE | 
| 1104 | # define ATOMIC_CHAR_LOCK_FREE      __GCC_ATOMIC_CHAR_LOCK_FREE | 
| 1105 | # define ATOMIC_CHAR16_T_LOCK_FREE  __GCC_ATOMIC_CHAR16_T_LOCK_FREE | 
| 1106 | # define ATOMIC_CHAR32_T_LOCK_FREE  __GCC_ATOMIC_CHAR32_T_LOCK_FREE | 
| 1107 | # define ATOMIC_WCHAR_T_LOCK_FREE   __GCC_ATOMIC_WCHAR_T_LOCK_FREE | 
| 1108 | # define ATOMIC_SHORT_LOCK_FREE     __GCC_ATOMIC_SHORT_LOCK_FREE | 
| 1109 | # define ATOMIC_INT_LOCK_FREE       __GCC_ATOMIC_INT_LOCK_FREE | 
| 1110 | # define ATOMIC_LONG_LOCK_FREE      __GCC_ATOMIC_LONG_LOCK_FREE | 
| 1111 | # define ATOMIC_LLONG_LOCK_FREE     __GCC_ATOMIC_LLONG_LOCK_FREE | 
| 1112 | # define ATOMIC_POINTER_LOCK_FREE   __GCC_ATOMIC_POINTER_LOCK_FREE | 
| 1113 | #endif | 
| 1114 |  | 
| 1115 | #ifdef _LIBCPP_ATOMIC_ONLY_USE_BUILTINS | 
| 1116 |  | 
| 1117 | template<typename _Tp> | 
| 1118 | struct __cxx_atomic_lock_impl { | 
| 1119 |  | 
| 1120 |   _LIBCPP_INLINE_VISIBILITY | 
| 1121 |   __cxx_atomic_lock_impl() _NOEXCEPT | 
| 1122 |     : __a_value(), __a_lock(0) {} | 
| 1123 |   _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR explicit | 
| 1124 |   __cxx_atomic_lock_impl(_Tp value) _NOEXCEPT | 
| 1125 |     : __a_value(value), __a_lock(0) {} | 
| 1126 |  | 
| 1127 |   _Tp __a_value; | 
| 1128 |   mutable __cxx_atomic_base_impl<_LIBCPP_ATOMIC_FLAG_TYPE> __a_lock; | 
| 1129 |  | 
| 1130 |   _LIBCPP_INLINE_VISIBILITY void __lock() const volatile { | 
| 1131 |     while(1 == __cxx_atomic_exchange(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(true), memory_order_acquire)) | 
| 1132 |         /*spin*/; | 
| 1133 |   } | 
| 1134 |   _LIBCPP_INLINE_VISIBILITY void __lock() const { | 
| 1135 |     while(1 == __cxx_atomic_exchange(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(true), memory_order_acquire)) | 
| 1136 |         /*spin*/; | 
| 1137 |   } | 
| 1138 |   _LIBCPP_INLINE_VISIBILITY void __unlock() const volatile { | 
| 1139 |     __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release); | 
| 1140 |   } | 
| 1141 |   _LIBCPP_INLINE_VISIBILITY void __unlock() const { | 
| 1142 |     __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release); | 
| 1143 |   } | 
| 1144 |   _LIBCPP_INLINE_VISIBILITY _Tp __read() const volatile { | 
| 1145 |     __lock(); | 
| 1146 |     _Tp __old; | 
| 1147 |     __cxx_atomic_assign_volatile(__old, __a_value); | 
| 1148 |     __unlock(); | 
| 1149 |     return __old; | 
| 1150 |   } | 
| 1151 |   _LIBCPP_INLINE_VISIBILITY _Tp __read() const { | 
| 1152 |     __lock(); | 
| 1153 |     _Tp __old = __a_value; | 
| 1154 |     __unlock(); | 
| 1155 |     return __old; | 
| 1156 |   } | 
| 1157 | }; | 
| 1158 |  | 
| 1159 | template <typename _Tp> | 
| 1160 | _LIBCPP_INLINE_VISIBILITY | 
| 1161 | void __cxx_atomic_init(volatile __cxx_atomic_lock_impl<_Tp>* __a,  _Tp __val) { | 
| 1162 |   __cxx_atomic_assign_volatile(__a->__a_value, __val); | 
| 1163 | } | 
| 1164 | template <typename _Tp> | 
| 1165 | _LIBCPP_INLINE_VISIBILITY | 
| 1166 | void __cxx_atomic_init(__cxx_atomic_lock_impl<_Tp>* __a,  _Tp __val) { | 
| 1167 |   __a->__a_value = __val; | 
| 1168 | } | 
| 1169 |  | 
| 1170 | template <typename _Tp> | 
| 1171 | _LIBCPP_INLINE_VISIBILITY | 
| 1172 | void __cxx_atomic_store(volatile __cxx_atomic_lock_impl<_Tp>* __a,  _Tp __val, memory_order) { | 
| 1173 |   __a->__lock(); | 
| 1174 |   __cxx_atomic_assign_volatile(__a->__a_value, __val); | 
| 1175 |   __a->__unlock(); | 
| 1176 | } | 
| 1177 | template <typename _Tp> | 
| 1178 | _LIBCPP_INLINE_VISIBILITY | 
| 1179 | void __cxx_atomic_store(__cxx_atomic_lock_impl<_Tp>* __a,  _Tp __val, memory_order) { | 
| 1180 |   __a->__lock(); | 
| 1181 |   __a->__a_value = __val; | 
| 1182 |   __a->__unlock(); | 
| 1183 | } | 
| 1184 |  | 
| 1185 | template <typename _Tp> | 
| 1186 | _LIBCPP_INLINE_VISIBILITY | 
| 1187 | _Tp __cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp>* __a, memory_order) { | 
| 1188 |   return __a->__read(); | 
| 1189 | } | 
| 1190 | template <typename _Tp> | 
| 1191 | _LIBCPP_INLINE_VISIBILITY | 
| 1192 | _Tp __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp>* __a, memory_order) { | 
| 1193 |   return __a->__read(); | 
| 1194 | } | 
| 1195 |  | 
| 1196 | template <typename _Tp> | 
| 1197 | _LIBCPP_INLINE_VISIBILITY | 
| 1198 | _Tp __cxx_atomic_exchange(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) { | 
| 1199 |   __a->__lock(); | 
| 1200 |   _Tp __old; | 
| 1201 |   __cxx_atomic_assign_volatile(__old, __a->__a_value); | 
| 1202 |   __cxx_atomic_assign_volatile(__a->__a_value, __value); | 
| 1203 |   __a->__unlock(); | 
| 1204 |   return __old; | 
| 1205 | } | 
| 1206 | template <typename _Tp> | 
| 1207 | _LIBCPP_INLINE_VISIBILITY | 
| 1208 | _Tp __cxx_atomic_exchange(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) { | 
| 1209 |   __a->__lock(); | 
| 1210 |   _Tp __old = __a->__a_value; | 
| 1211 |   __a->__a_value = __value; | 
| 1212 |   __a->__unlock(); | 
| 1213 |   return __old; | 
| 1214 | } | 
| 1215 |  | 
| 1216 | template <typename _Tp> | 
| 1217 | _LIBCPP_INLINE_VISIBILITY | 
| 1218 | bool __cxx_atomic_compare_exchange_strong(volatile __cxx_atomic_lock_impl<_Tp>* __a, | 
| 1219 |                                           _Tp* __expected, _Tp __value, memory_order, memory_order) { | 
| 1220 |   __a->__lock(); | 
| 1221 |   _Tp temp; | 
| 1222 |   __cxx_atomic_assign_volatile(temp, __a->__a_value); | 
| 1223 |   bool __ret = temp == *__expected; | 
| 1224 |   if(__ret) | 
| 1225 |     __cxx_atomic_assign_volatile(__a->__a_value, __value); | 
| 1226 |   else | 
| 1227 |     __cxx_atomic_assign_volatile(*__expected, __a->__a_value); | 
| 1228 |   __a->__unlock(); | 
| 1229 |   return __ret; | 
| 1230 | } | 
| 1231 | template <typename _Tp> | 
| 1232 | _LIBCPP_INLINE_VISIBILITY | 
| 1233 | bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_lock_impl<_Tp>* __a, | 
| 1234 |                                           _Tp* __expected, _Tp __value, memory_order, memory_order) { | 
| 1235 |   __a->__lock(); | 
| 1236 |   bool __ret = __a->__a_value == *__expected; | 
| 1237 |   if(__ret) | 
| 1238 |     __a->__a_value = __value; | 
| 1239 |   else | 
| 1240 |     *__expected = __a->__a_value; | 
| 1241 |   __a->__unlock(); | 
| 1242 |   return __ret; | 
| 1243 | } | 
| 1244 |  | 
| 1245 | template <typename _Tp> | 
| 1246 | _LIBCPP_INLINE_VISIBILITY | 
| 1247 | bool __cxx_atomic_compare_exchange_weak(volatile __cxx_atomic_lock_impl<_Tp>* __a, | 
| 1248 |                                         _Tp* __expected, _Tp __value, memory_order, memory_order) { | 
| 1249 |   __a->__lock(); | 
| 1250 |   _Tp temp; | 
| 1251 |   __cxx_atomic_assign_volatile(temp, __a->__a_value); | 
| 1252 |   bool __ret = temp == *__expected; | 
| 1253 |   if(__ret) | 
| 1254 |     __cxx_atomic_assign_volatile(__a->__a_value, __value); | 
| 1255 |   else | 
| 1256 |     __cxx_atomic_assign_volatile(*__expected, __a->__a_value); | 
| 1257 |   __a->__unlock(); | 
| 1258 |   return __ret; | 
| 1259 | } | 
| 1260 | template <typename _Tp> | 
| 1261 | _LIBCPP_INLINE_VISIBILITY | 
| 1262 | bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_lock_impl<_Tp>* __a, | 
| 1263 |                                         _Tp* __expected, _Tp __value, memory_order, memory_order) { | 
| 1264 |   __a->__lock(); | 
| 1265 |   bool __ret = __a->__a_value == *__expected; | 
| 1266 |   if(__ret) | 
| 1267 |     __a->__a_value = __value; | 
| 1268 |   else | 
| 1269 |     *__expected = __a->__a_value; | 
| 1270 |   __a->__unlock(); | 
| 1271 |   return __ret; | 
| 1272 | } | 
| 1273 |  | 
| 1274 | template <typename _Tp, typename _Td> | 
| 1275 | _LIBCPP_INLINE_VISIBILITY | 
| 1276 | _Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp>* __a, | 
| 1277 |                            _Td __delta, memory_order) { | 
| 1278 |   __a->__lock(); | 
| 1279 |   _Tp __old; | 
| 1280 |   __cxx_atomic_assign_volatile(__old, __a->__a_value); | 
| 1281 |   __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old + __delta)); | 
| 1282 |   __a->__unlock(); | 
| 1283 |   return __old; | 
| 1284 | } | 
| 1285 | template <typename _Tp, typename _Td> | 
| 1286 | _LIBCPP_INLINE_VISIBILITY | 
| 1287 | _Tp __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp>* __a, | 
| 1288 |                            _Td __delta, memory_order) { | 
| 1289 |   __a->__lock(); | 
| 1290 |   _Tp __old = __a->__a_value; | 
| 1291 |   __a->__a_value += __delta; | 
| 1292 |   __a->__unlock(); | 
| 1293 |   return __old; | 
| 1294 | } | 
| 1295 |  | 
| 1296 | template <typename _Tp, typename _Td> | 
| 1297 | _LIBCPP_INLINE_VISIBILITY | 
| 1298 | _Tp* __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp*>* __a, | 
| 1299 |                            ptrdiff_t __delta, memory_order) { | 
| 1300 |   __a->__lock(); | 
| 1301 |   _Tp* __old; | 
| 1302 |   __cxx_atomic_assign_volatile(__old, __a->__a_value); | 
| 1303 |   __cxx_atomic_assign_volatile(__a->__a_value, __old + __delta); | 
| 1304 |   __a->__unlock(); | 
| 1305 |   return __old; | 
| 1306 | } | 
| 1307 | template <typename _Tp, typename _Td> | 
| 1308 | _LIBCPP_INLINE_VISIBILITY | 
| 1309 | _Tp* __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp*>* __a, | 
| 1310 |                            ptrdiff_t __delta, memory_order) { | 
| 1311 |   __a->__lock(); | 
| 1312 |   _Tp* __old = __a->__a_value; | 
| 1313 |   __a->__a_value += __delta; | 
| 1314 |   __a->__unlock(); | 
| 1315 |   return __old; | 
| 1316 | } | 
| 1317 |  | 
| 1318 | template <typename _Tp, typename _Td> | 
| 1319 | _LIBCPP_INLINE_VISIBILITY | 
| 1320 | _Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_lock_impl<_Tp>* __a, | 
| 1321 |                            _Td __delta, memory_order) { | 
| 1322 |   __a->__lock(); | 
| 1323 |   _Tp __old; | 
| 1324 |   __cxx_atomic_assign_volatile(__old, __a->__a_value); | 
| 1325 |   __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old - __delta)); | 
| 1326 |   __a->__unlock(); | 
| 1327 |   return __old; | 
| 1328 | } | 
| 1329 | template <typename _Tp, typename _Td> | 
| 1330 | _LIBCPP_INLINE_VISIBILITY | 
| 1331 | _Tp __cxx_atomic_fetch_sub(__cxx_atomic_lock_impl<_Tp>* __a, | 
| 1332 |                            _Td __delta, memory_order) { | 
| 1333 |   __a->__lock(); | 
| 1334 |   _Tp __old = __a->__a_value; | 
| 1335 |   __a->__a_value -= __delta; | 
| 1336 |   __a->__unlock(); | 
| 1337 |   return __old; | 
| 1338 | } | 
| 1339 |  | 
| 1340 | template <typename _Tp> | 
| 1341 | _LIBCPP_INLINE_VISIBILITY | 
| 1342 | _Tp __cxx_atomic_fetch_and(volatile __cxx_atomic_lock_impl<_Tp>* __a, | 
| 1343 |                            _Tp __pattern, memory_order) { | 
| 1344 |   __a->__lock(); | 
| 1345 |   _Tp __old; | 
| 1346 |   __cxx_atomic_assign_volatile(__old, __a->__a_value); | 
| 1347 |   __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old & __pattern)); | 
| 1348 |   __a->__unlock(); | 
| 1349 |   return __old; | 
| 1350 | } | 
| 1351 | template <typename _Tp> | 
| 1352 | _LIBCPP_INLINE_VISIBILITY | 
| 1353 | _Tp __cxx_atomic_fetch_and(__cxx_atomic_lock_impl<_Tp>* __a, | 
| 1354 |                            _Tp __pattern, memory_order) { | 
| 1355 |   __a->__lock(); | 
| 1356 |   _Tp __old = __a->__a_value; | 
| 1357 |   __a->__a_value &= __pattern; | 
| 1358 |   __a->__unlock(); | 
| 1359 |   return __old; | 
| 1360 | } | 
| 1361 |  | 
| 1362 | template <typename _Tp> | 
| 1363 | _LIBCPP_INLINE_VISIBILITY | 
| 1364 | _Tp __cxx_atomic_fetch_or(volatile __cxx_atomic_lock_impl<_Tp>* __a, | 
| 1365 |                           _Tp __pattern, memory_order) { | 
| 1366 |   __a->__lock(); | 
| 1367 |   _Tp __old; | 
| 1368 |   __cxx_atomic_assign_volatile(__old, __a->__a_value); | 
| 1369 |   __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old | __pattern)); | 
| 1370 |   __a->__unlock(); | 
| 1371 |   return __old; | 
| 1372 | } | 
| 1373 | template <typename _Tp> | 
| 1374 | _LIBCPP_INLINE_VISIBILITY | 
| 1375 | _Tp __cxx_atomic_fetch_or(__cxx_atomic_lock_impl<_Tp>* __a, | 
| 1376 |                           _Tp __pattern, memory_order) { | 
| 1377 |   __a->__lock(); | 
| 1378 |   _Tp __old = __a->__a_value; | 
| 1379 |   __a->__a_value |= __pattern; | 
| 1380 |   __a->__unlock(); | 
| 1381 |   return __old; | 
| 1382 | } | 
| 1383 |  | 
| 1384 | template <typename _Tp> | 
| 1385 | _LIBCPP_INLINE_VISIBILITY | 
| 1386 | _Tp __cxx_atomic_fetch_xor(volatile __cxx_atomic_lock_impl<_Tp>* __a, | 
| 1387 |                            _Tp __pattern, memory_order) { | 
| 1388 |   __a->__lock(); | 
| 1389 |   _Tp __old; | 
| 1390 |   __cxx_atomic_assign_volatile(__old, __a->__a_value); | 
| 1391 |   __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old ^ __pattern)); | 
| 1392 |   __a->__unlock(); | 
| 1393 |   return __old; | 
| 1394 | } | 
| 1395 | template <typename _Tp> | 
| 1396 | _LIBCPP_INLINE_VISIBILITY | 
| 1397 | _Tp __cxx_atomic_fetch_xor(__cxx_atomic_lock_impl<_Tp>* __a, | 
| 1398 |                            _Tp __pattern, memory_order) { | 
| 1399 |   __a->__lock(); | 
| 1400 |   _Tp __old = __a->__a_value; | 
| 1401 |   __a->__a_value ^= __pattern; | 
| 1402 |   __a->__unlock(); | 
| 1403 |   return __old; | 
| 1404 | } | 
| 1405 |  | 
| 1406 | #ifdef __cpp_lib_atomic_is_always_lock_free | 
| 1407 |  | 
| 1408 | template<typename _Tp> struct __cxx_is_always_lock_free { | 
| 1409 |     enum { __value = __atomic_always_lock_free(sizeof(_Tp), 0) }; }; | 
| 1410 |  | 
| 1411 | #else | 
| 1412 |  | 
| 1413 | template<typename _Tp> struct __cxx_is_always_lock_free { enum { __value = false }; }; | 
| 1414 | // Implementations must match the C ATOMIC_*_LOCK_FREE macro values. | 
| 1415 | template<> struct __cxx_is_always_lock_free<bool> { enum { __value = 2 == ATOMIC_BOOL_LOCK_FREE }; }; | 
| 1416 | template<> struct __cxx_is_always_lock_free<char> { enum { __value = 2 == ATOMIC_CHAR_LOCK_FREE }; }; | 
| 1417 | template<> struct __cxx_is_always_lock_free<signed char> { enum { __value = 2 == ATOMIC_CHAR_LOCK_FREE }; }; | 
| 1418 | template<> struct __cxx_is_always_lock_free<unsigned char> { enum { __value = 2 == ATOMIC_CHAR_LOCK_FREE }; }; | 
| 1419 | template<> struct __cxx_is_always_lock_free<char16_t> { enum { __value = 2 == ATOMIC_CHAR16_T_LOCK_FREE }; }; | 
| 1420 | template<> struct __cxx_is_always_lock_free<char32_t> { enum { __value = 2 == ATOMIC_CHAR32_T_LOCK_FREE }; }; | 
| 1421 | template<> struct __cxx_is_always_lock_free<wchar_t> { enum { __value = 2 == ATOMIC_WCHAR_T_LOCK_FREE }; }; | 
| 1422 | template<> struct __cxx_is_always_lock_free<short> { enum { __value = 2 == ATOMIC_SHORT_LOCK_FREE }; }; | 
| 1423 | template<> struct __cxx_is_always_lock_free<unsigned short> { enum { __value = 2 == ATOMIC_SHORT_LOCK_FREE }; }; | 
| 1424 | template<> struct __cxx_is_always_lock_free<int> { enum { __value = 2 == ATOMIC_INT_LOCK_FREE }; }; | 
| 1425 | template<> struct __cxx_is_always_lock_free<unsigned int> { enum { __value = 2 == ATOMIC_INT_LOCK_FREE }; }; | 
| 1426 | template<> struct __cxx_is_always_lock_free<long> { enum { __value = 2 == ATOMIC_LONG_LOCK_FREE }; }; | 
| 1427 | template<> struct __cxx_is_always_lock_free<unsigned long> { enum { __value = 2 == ATOMIC_LONG_LOCK_FREE }; }; | 
| 1428 | template<> struct __cxx_is_always_lock_free<long long> { enum { __value = 2 == ATOMIC_LLONG_LOCK_FREE }; }; | 
| 1429 | template<> struct __cxx_is_always_lock_free<unsigned long long> { enum { __value = 2 == ATOMIC_LLONG_LOCK_FREE }; }; | 
| 1430 | template<typename _Tp> struct __cxx_is_always_lock_free<_Tp*> { enum { __value = 2 == ATOMIC_POINTER_LOCK_FREE }; }; | 
| 1431 | template<> struct __cxx_is_always_lock_free<std::nullptr_t> { enum { __value = 2 == ATOMIC_POINTER_LOCK_FREE }; }; | 
| 1432 |  | 
| 1433 | #endif //__cpp_lib_atomic_is_always_lock_free | 
| 1434 |  | 
| 1435 | template <typename _Tp, | 
| 1436 |           typename _Base = typename conditional<__cxx_is_always_lock_free<_Tp>::__value, | 
| 1437 |                                                 __cxx_atomic_base_impl<_Tp>, | 
| 1438 |                                                 __cxx_atomic_lock_impl<_Tp> >::type> | 
| 1439 | #else | 
| 1440 | template <typename _Tp, | 
| 1441 |           typename _Base = __cxx_atomic_base_impl<_Tp> > | 
| 1442 | #endif //_LIBCPP_ATOMIC_ONLY_USE_BUILTINS | 
| 1443 | struct __cxx_atomic_impl : public _Base { | 
| 1444 |  | 
| 1445 | #if _GNUC_VER >= 501 | 
| 1446 |     static_assert(is_trivially_copyable<_Tp>::value, | 
| 1447 |       "std::atomic<Tp> requires that 'Tp' be a trivially copyable type" ); | 
| 1448 | #endif | 
| 1449 |  | 
| 1450 |   _LIBCPP_INLINE_VISIBILITY __cxx_atomic_impl() _NOEXCEPT _LIBCPP_DEFAULT | 
| 1451 |   _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR explicit __cxx_atomic_impl(_Tp value) _NOEXCEPT | 
| 1452 |     : _Base(value) {} | 
| 1453 | }; | 
| 1454 |  | 
| 1455 | // general atomic<T> | 
| 1456 |  | 
| 1457 | template <class _Tp, bool = is_integral<_Tp>::value && !is_same<_Tp, bool>::value> | 
| 1458 | struct __atomic_base  // false | 
| 1459 | { | 
| 1460 |     mutable __cxx_atomic_impl<_Tp> __a_; | 
| 1461 |  | 
| 1462 | #if defined(__cpp_lib_atomic_is_always_lock_free) | 
| 1463 |   static _LIBCPP_CONSTEXPR bool is_always_lock_free = __atomic_always_lock_free(sizeof(__a_), 0); | 
| 1464 | #endif | 
| 1465 |  | 
| 1466 |     _LIBCPP_INLINE_VISIBILITY | 
| 1467 |     bool is_lock_free() const volatile _NOEXCEPT | 
| 1468 |         {return __cxx_atomic_is_lock_free(sizeof(_Tp));} | 
| 1469 |     _LIBCPP_INLINE_VISIBILITY | 
| 1470 |     bool is_lock_free() const _NOEXCEPT | 
| 1471 |         {return static_cast<__atomic_base const volatile*>(this)->is_lock_free();} | 
| 1472 |     _LIBCPP_INLINE_VISIBILITY | 
| 1473 |     void store(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT | 
| 1474 |       _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) | 
| 1475 |         {__cxx_atomic_store(&__a_, __d, __m);} | 
| 1476 |     _LIBCPP_INLINE_VISIBILITY | 
| 1477 |     void store(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT | 
| 1478 |       _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) | 
| 1479 |         {__cxx_atomic_store(&__a_, __d, __m);} | 
| 1480 |     _LIBCPP_INLINE_VISIBILITY | 
| 1481 |     _Tp load(memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT | 
| 1482 |       _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) | 
| 1483 |         {return __cxx_atomic_load(&__a_, __m);} | 
| 1484 |     _LIBCPP_INLINE_VISIBILITY | 
| 1485 |     _Tp load(memory_order __m = memory_order_seq_cst) const _NOEXCEPT | 
| 1486 |       _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) | 
| 1487 |         {return __cxx_atomic_load(&__a_, __m);} | 
| 1488 |     _LIBCPP_INLINE_VISIBILITY | 
| 1489 |     operator _Tp() const volatile _NOEXCEPT {return load();} | 
| 1490 |     _LIBCPP_INLINE_VISIBILITY | 
| 1491 |     operator _Tp() const _NOEXCEPT          {return load();} | 
| 1492 |     _LIBCPP_INLINE_VISIBILITY | 
| 1493 |     _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT | 
| 1494 |         {return __cxx_atomic_exchange(&__a_, __d, __m);} | 
| 1495 |     _LIBCPP_INLINE_VISIBILITY | 
| 1496 |     _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT | 
| 1497 |         {return __cxx_atomic_exchange(&__a_, __d, __m);} | 
| 1498 |     _LIBCPP_INLINE_VISIBILITY | 
| 1499 |     bool compare_exchange_weak(_Tp& __e, _Tp __d, | 
| 1500 |                                memory_order __s, memory_order __f) volatile _NOEXCEPT | 
| 1501 |       _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) | 
| 1502 |         {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __s, __f);} | 
| 1503 |     _LIBCPP_INLINE_VISIBILITY | 
| 1504 |     bool compare_exchange_weak(_Tp& __e, _Tp __d, | 
| 1505 |                                memory_order __s, memory_order __f) _NOEXCEPT | 
| 1506 |       _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) | 
| 1507 |         {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __s, __f);} | 
| 1508 |     _LIBCPP_INLINE_VISIBILITY | 
| 1509 |     bool compare_exchange_strong(_Tp& __e, _Tp __d, | 
| 1510 |                                  memory_order __s, memory_order __f) volatile _NOEXCEPT | 
| 1511 |       _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) | 
| 1512 |         {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __s, __f);} | 
| 1513 |     _LIBCPP_INLINE_VISIBILITY | 
| 1514 |     bool compare_exchange_strong(_Tp& __e, _Tp __d, | 
| 1515 |                                  memory_order __s, memory_order __f) _NOEXCEPT | 
| 1516 |       _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) | 
| 1517 |         {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __s, __f);} | 
| 1518 |     _LIBCPP_INLINE_VISIBILITY | 
| 1519 |     bool compare_exchange_weak(_Tp& __e, _Tp __d, | 
| 1520 |                               memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT | 
| 1521 |         {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __m, __m);} | 
| 1522 |     _LIBCPP_INLINE_VISIBILITY | 
| 1523 |     bool compare_exchange_weak(_Tp& __e, _Tp __d, | 
| 1524 |                                memory_order __m = memory_order_seq_cst) _NOEXCEPT | 
| 1525 |         {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __m, __m);} | 
| 1526 |     _LIBCPP_INLINE_VISIBILITY | 
| 1527 |     bool compare_exchange_strong(_Tp& __e, _Tp __d, | 
| 1528 |                               memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT | 
| 1529 |         {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __m, __m);} | 
| 1530 |     _LIBCPP_INLINE_VISIBILITY | 
| 1531 |     bool compare_exchange_strong(_Tp& __e, _Tp __d, | 
| 1532 |                                  memory_order __m = memory_order_seq_cst) _NOEXCEPT | 
| 1533 |         {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __m, __m);} | 
| 1534 |  | 
| 1535 |     _LIBCPP_INLINE_VISIBILITY | 
| 1536 |     __atomic_base() _NOEXCEPT _LIBCPP_DEFAULT | 
| 1537 |  | 
| 1538 |     _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR | 
| 1539 |     __atomic_base(_Tp __d) _NOEXCEPT : __a_(__d) {} | 
| 1540 |  | 
| 1541 | #ifndef _LIBCPP_CXX03_LANG | 
| 1542 |     __atomic_base(const __atomic_base&) = delete; | 
| 1543 |     __atomic_base& operator=(const __atomic_base&) = delete; | 
| 1544 |     __atomic_base& operator=(const __atomic_base&) volatile = delete; | 
| 1545 | #else | 
| 1546 | private: | 
| 1547 |     __atomic_base(const __atomic_base&); | 
| 1548 |     __atomic_base& operator=(const __atomic_base&); | 
| 1549 |     __atomic_base& operator=(const __atomic_base&) volatile; | 
| 1550 | #endif | 
| 1551 | }; | 
| 1552 |  | 
| 1553 | #if defined(__cpp_lib_atomic_is_always_lock_free) | 
| 1554 | template <class _Tp, bool __b> | 
| 1555 | _LIBCPP_CONSTEXPR bool __atomic_base<_Tp, __b>::is_always_lock_free; | 
| 1556 | #endif | 
| 1557 |  | 
| 1558 | // atomic<Integral> | 
| 1559 |  | 
| 1560 | template <class _Tp> | 
| 1561 | struct __atomic_base<_Tp, true> | 
| 1562 |     : public __atomic_base<_Tp, false> | 
| 1563 | { | 
| 1564 |     typedef __atomic_base<_Tp, false> __base; | 
| 1565 |     _LIBCPP_INLINE_VISIBILITY | 
| 1566 |     __atomic_base() _NOEXCEPT _LIBCPP_DEFAULT | 
| 1567 |     _LIBCPP_INLINE_VISIBILITY | 
| 1568 |     _LIBCPP_CONSTEXPR __atomic_base(_Tp __d) _NOEXCEPT : __base(__d) {} | 
| 1569 |  | 
| 1570 |     _LIBCPP_INLINE_VISIBILITY | 
| 1571 |     _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT | 
| 1572 |         {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} | 
| 1573 |     _LIBCPP_INLINE_VISIBILITY | 
| 1574 |     _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT | 
| 1575 |         {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} | 
| 1576 |     _LIBCPP_INLINE_VISIBILITY | 
| 1577 |     _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT | 
| 1578 |         {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} | 
| 1579 |     _LIBCPP_INLINE_VISIBILITY | 
| 1580 |     _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT | 
| 1581 |         {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} | 
| 1582 |     _LIBCPP_INLINE_VISIBILITY | 
| 1583 |     _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT | 
| 1584 |         {return __cxx_atomic_fetch_and(&this->__a_, __op, __m);} | 
| 1585 |     _LIBCPP_INLINE_VISIBILITY | 
| 1586 |     _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT | 
| 1587 |         {return __cxx_atomic_fetch_and(&this->__a_, __op, __m);} | 
| 1588 |     _LIBCPP_INLINE_VISIBILITY | 
| 1589 |     _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT | 
| 1590 |         {return __cxx_atomic_fetch_or(&this->__a_, __op, __m);} | 
| 1591 |     _LIBCPP_INLINE_VISIBILITY | 
| 1592 |     _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT | 
| 1593 |         {return __cxx_atomic_fetch_or(&this->__a_, __op, __m);} | 
| 1594 |     _LIBCPP_INLINE_VISIBILITY | 
| 1595 |     _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT | 
| 1596 |         {return __cxx_atomic_fetch_xor(&this->__a_, __op, __m);} | 
| 1597 |     _LIBCPP_INLINE_VISIBILITY | 
| 1598 |     _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT | 
| 1599 |         {return __cxx_atomic_fetch_xor(&this->__a_, __op, __m);} | 
| 1600 |  | 
| 1601 |     _LIBCPP_INLINE_VISIBILITY | 
| 1602 |     _Tp operator++(int) volatile _NOEXCEPT      {return fetch_add(_Tp(1));} | 
| 1603 |     _LIBCPP_INLINE_VISIBILITY | 
| 1604 |     _Tp operator++(int) _NOEXCEPT               {return fetch_add(_Tp(1));} | 
| 1605 |     _LIBCPP_INLINE_VISIBILITY | 
| 1606 |     _Tp operator--(int) volatile _NOEXCEPT      {return fetch_sub(_Tp(1));} | 
| 1607 |     _LIBCPP_INLINE_VISIBILITY | 
| 1608 |     _Tp operator--(int) _NOEXCEPT               {return fetch_sub(_Tp(1));} | 
| 1609 |     _LIBCPP_INLINE_VISIBILITY | 
| 1610 |     _Tp operator++() volatile _NOEXCEPT         {return fetch_add(_Tp(1)) + _Tp(1);} | 
| 1611 |     _LIBCPP_INLINE_VISIBILITY | 
| 1612 |     _Tp operator++() _NOEXCEPT                  {return fetch_add(_Tp(1)) + _Tp(1);} | 
| 1613 |     _LIBCPP_INLINE_VISIBILITY | 
| 1614 |     _Tp operator--() volatile _NOEXCEPT         {return fetch_sub(_Tp(1)) - _Tp(1);} | 
| 1615 |     _LIBCPP_INLINE_VISIBILITY | 
| 1616 |     _Tp operator--() _NOEXCEPT                  {return fetch_sub(_Tp(1)) - _Tp(1);} | 
| 1617 |     _LIBCPP_INLINE_VISIBILITY | 
| 1618 |     _Tp operator+=(_Tp __op) volatile _NOEXCEPT {return fetch_add(__op) + __op;} | 
| 1619 |     _LIBCPP_INLINE_VISIBILITY | 
| 1620 |     _Tp operator+=(_Tp __op) _NOEXCEPT          {return fetch_add(__op) + __op;} | 
| 1621 |     _LIBCPP_INLINE_VISIBILITY | 
| 1622 |     _Tp operator-=(_Tp __op) volatile _NOEXCEPT {return fetch_sub(__op) - __op;} | 
| 1623 |     _LIBCPP_INLINE_VISIBILITY | 
| 1624 |     _Tp operator-=(_Tp __op) _NOEXCEPT          {return fetch_sub(__op) - __op;} | 
| 1625 |     _LIBCPP_INLINE_VISIBILITY | 
| 1626 |     _Tp operator&=(_Tp __op) volatile _NOEXCEPT {return fetch_and(__op) & __op;} | 
| 1627 |     _LIBCPP_INLINE_VISIBILITY | 
| 1628 |     _Tp operator&=(_Tp __op) _NOEXCEPT          {return fetch_and(__op) & __op;} | 
| 1629 |     _LIBCPP_INLINE_VISIBILITY | 
| 1630 |     _Tp operator|=(_Tp __op) volatile _NOEXCEPT {return fetch_or(__op) | __op;} | 
| 1631 |     _LIBCPP_INLINE_VISIBILITY | 
| 1632 |     _Tp operator|=(_Tp __op) _NOEXCEPT          {return fetch_or(__op) | __op;} | 
| 1633 |     _LIBCPP_INLINE_VISIBILITY | 
| 1634 |     _Tp operator^=(_Tp __op) volatile _NOEXCEPT {return fetch_xor(__op) ^ __op;} | 
| 1635 |     _LIBCPP_INLINE_VISIBILITY | 
| 1636 |     _Tp operator^=(_Tp __op) _NOEXCEPT          {return fetch_xor(__op) ^ __op;} | 
| 1637 | }; | 
| 1638 |  | 
| 1639 | // atomic<T> | 
| 1640 |  | 
| 1641 | template <class _Tp> | 
| 1642 | struct atomic | 
| 1643 |     : public __atomic_base<_Tp> | 
| 1644 | { | 
| 1645 |     typedef __atomic_base<_Tp> __base; | 
| 1646 |     _LIBCPP_INLINE_VISIBILITY | 
| 1647 |     atomic() _NOEXCEPT _LIBCPP_DEFAULT | 
| 1648 |     _LIBCPP_INLINE_VISIBILITY | 
| 1649 |     _LIBCPP_CONSTEXPR atomic(_Tp __d) _NOEXCEPT : __base(__d) {} | 
| 1650 |  | 
| 1651 |     _LIBCPP_INLINE_VISIBILITY | 
| 1652 |     _Tp operator=(_Tp __d) volatile _NOEXCEPT | 
| 1653 |         {__base::store(__d); return __d;} | 
| 1654 |     _LIBCPP_INLINE_VISIBILITY | 
| 1655 |     _Tp operator=(_Tp __d) _NOEXCEPT | 
| 1656 |         {__base::store(__d); return __d;} | 
| 1657 | }; | 
| 1658 |  | 
| 1659 | // atomic<T*> | 
| 1660 |  | 
| 1661 | template <class _Tp> | 
| 1662 | struct atomic<_Tp*> | 
| 1663 |     : public __atomic_base<_Tp*> | 
| 1664 | { | 
| 1665 |     typedef __atomic_base<_Tp*> __base; | 
| 1666 |     _LIBCPP_INLINE_VISIBILITY | 
| 1667 |     atomic() _NOEXCEPT _LIBCPP_DEFAULT | 
| 1668 |     _LIBCPP_INLINE_VISIBILITY | 
| 1669 |     _LIBCPP_CONSTEXPR atomic(_Tp* __d) _NOEXCEPT : __base(__d) {} | 
| 1670 |  | 
| 1671 |     _LIBCPP_INLINE_VISIBILITY | 
| 1672 |     _Tp* operator=(_Tp* __d) volatile _NOEXCEPT | 
| 1673 |         {__base::store(__d); return __d;} | 
| 1674 |     _LIBCPP_INLINE_VISIBILITY | 
| 1675 |     _Tp* operator=(_Tp* __d) _NOEXCEPT | 
| 1676 |         {__base::store(__d); return __d;} | 
| 1677 |  | 
| 1678 |     _LIBCPP_INLINE_VISIBILITY | 
| 1679 |     _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) | 
| 1680 |                                                                         volatile _NOEXCEPT | 
| 1681 |         {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} | 
| 1682 |     _LIBCPP_INLINE_VISIBILITY | 
| 1683 |     _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT | 
| 1684 |         {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} | 
| 1685 |     _LIBCPP_INLINE_VISIBILITY | 
| 1686 |     _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) | 
| 1687 |                                                                         volatile _NOEXCEPT | 
| 1688 |         {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} | 
| 1689 |     _LIBCPP_INLINE_VISIBILITY | 
| 1690 |     _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT | 
| 1691 |         {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} | 
| 1692 |  | 
| 1693 |     _LIBCPP_INLINE_VISIBILITY | 
| 1694 |     _Tp* operator++(int) volatile _NOEXCEPT            {return fetch_add(1);} | 
| 1695 |     _LIBCPP_INLINE_VISIBILITY | 
| 1696 |     _Tp* operator++(int) _NOEXCEPT                     {return fetch_add(1);} | 
| 1697 |     _LIBCPP_INLINE_VISIBILITY | 
| 1698 |     _Tp* operator--(int) volatile _NOEXCEPT            {return fetch_sub(1);} | 
| 1699 |     _LIBCPP_INLINE_VISIBILITY | 
| 1700 |     _Tp* operator--(int) _NOEXCEPT                     {return fetch_sub(1);} | 
| 1701 |     _LIBCPP_INLINE_VISIBILITY | 
| 1702 |     _Tp* operator++() volatile _NOEXCEPT               {return fetch_add(1) + 1;} | 
| 1703 |     _LIBCPP_INLINE_VISIBILITY | 
| 1704 |     _Tp* operator++() _NOEXCEPT                        {return fetch_add(1) + 1;} | 
| 1705 |     _LIBCPP_INLINE_VISIBILITY | 
| 1706 |     _Tp* operator--() volatile _NOEXCEPT               {return fetch_sub(1) - 1;} | 
| 1707 |     _LIBCPP_INLINE_VISIBILITY | 
| 1708 |     _Tp* operator--() _NOEXCEPT                        {return fetch_sub(1) - 1;} | 
| 1709 |     _LIBCPP_INLINE_VISIBILITY | 
| 1710 |     _Tp* operator+=(ptrdiff_t __op) volatile _NOEXCEPT {return fetch_add(__op) + __op;} | 
| 1711 |     _LIBCPP_INLINE_VISIBILITY | 
| 1712 |     _Tp* operator+=(ptrdiff_t __op) _NOEXCEPT          {return fetch_add(__op) + __op;} | 
| 1713 |     _LIBCPP_INLINE_VISIBILITY | 
| 1714 |     _Tp* operator-=(ptrdiff_t __op) volatile _NOEXCEPT {return fetch_sub(__op) - __op;} | 
| 1715 |     _LIBCPP_INLINE_VISIBILITY | 
| 1716 |     _Tp* operator-=(ptrdiff_t __op) _NOEXCEPT          {return fetch_sub(__op) - __op;} | 
| 1717 | }; | 
| 1718 |  | 
| 1719 | // atomic_is_lock_free | 
| 1720 |  | 
| 1721 | template <class _Tp> | 
| 1722 | _LIBCPP_INLINE_VISIBILITY | 
| 1723 | bool | 
| 1724 | atomic_is_lock_free(const volatile atomic<_Tp>* __o) _NOEXCEPT | 
| 1725 | { | 
| 1726 |     return __o->is_lock_free(); | 
| 1727 | } | 
| 1728 |  | 
| 1729 | template <class _Tp> | 
| 1730 | _LIBCPP_INLINE_VISIBILITY | 
| 1731 | bool | 
| 1732 | atomic_is_lock_free(const atomic<_Tp>* __o) _NOEXCEPT | 
| 1733 | { | 
| 1734 |     return __o->is_lock_free(); | 
| 1735 | } | 
| 1736 |  | 
| 1737 | // atomic_init | 
| 1738 |  | 
| 1739 | template <class _Tp> | 
| 1740 | _LIBCPP_INLINE_VISIBILITY | 
| 1741 | void | 
| 1742 | atomic_init(volatile atomic<_Tp>* __o, _Tp __d) _NOEXCEPT | 
| 1743 | { | 
| 1744 |     __cxx_atomic_init(&__o->__a_, __d); | 
| 1745 | } | 
| 1746 |  | 
| 1747 | template <class _Tp> | 
| 1748 | _LIBCPP_INLINE_VISIBILITY | 
| 1749 | void | 
| 1750 | atomic_init(atomic<_Tp>* __o, _Tp __d) _NOEXCEPT | 
| 1751 | { | 
| 1752 |     __cxx_atomic_init(&__o->__a_, __d); | 
| 1753 | } | 
| 1754 |  | 
| 1755 | // atomic_store | 
| 1756 |  | 
| 1757 | template <class _Tp> | 
| 1758 | _LIBCPP_INLINE_VISIBILITY | 
| 1759 | void | 
| 1760 | atomic_store(volatile atomic<_Tp>* __o, _Tp __d) _NOEXCEPT | 
| 1761 | { | 
| 1762 |     __o->store(__d); | 
| 1763 | } | 
| 1764 |  | 
| 1765 | template <class _Tp> | 
| 1766 | _LIBCPP_INLINE_VISIBILITY | 
| 1767 | void | 
| 1768 | atomic_store(atomic<_Tp>* __o, _Tp __d) _NOEXCEPT | 
| 1769 | { | 
| 1770 |     __o->store(__d); | 
| 1771 | } | 
| 1772 |  | 
| 1773 | // atomic_store_explicit | 
| 1774 |  | 
| 1775 | template <class _Tp> | 
| 1776 | _LIBCPP_INLINE_VISIBILITY | 
| 1777 | void | 
| 1778 | atomic_store_explicit(volatile atomic<_Tp>* __o, _Tp __d, memory_order __m) _NOEXCEPT | 
| 1779 |   _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) | 
| 1780 | { | 
| 1781 |     __o->store(__d, __m); | 
| 1782 | } | 
| 1783 |  | 
| 1784 | template <class _Tp> | 
| 1785 | _LIBCPP_INLINE_VISIBILITY | 
| 1786 | void | 
| 1787 | atomic_store_explicit(atomic<_Tp>* __o, _Tp __d, memory_order __m) _NOEXCEPT | 
| 1788 |   _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) | 
| 1789 | { | 
| 1790 |     __o->store(__d, __m); | 
| 1791 | } | 
| 1792 |  | 
| 1793 | // atomic_load | 
| 1794 |  | 
| 1795 | template <class _Tp> | 
| 1796 | _LIBCPP_INLINE_VISIBILITY | 
| 1797 | _Tp | 
| 1798 | atomic_load(const volatile atomic<_Tp>* __o) _NOEXCEPT | 
| 1799 | { | 
| 1800 |     return __o->load(); | 
| 1801 | } | 
| 1802 |  | 
| 1803 | template <class _Tp> | 
| 1804 | _LIBCPP_INLINE_VISIBILITY | 
| 1805 | _Tp | 
| 1806 | atomic_load(const atomic<_Tp>* __o) _NOEXCEPT | 
| 1807 | { | 
| 1808 |     return __o->load(); | 
| 1809 | } | 
| 1810 |  | 
| 1811 | // atomic_load_explicit | 
| 1812 |  | 
| 1813 | template <class _Tp> | 
| 1814 | _LIBCPP_INLINE_VISIBILITY | 
| 1815 | _Tp | 
| 1816 | atomic_load_explicit(const volatile atomic<_Tp>* __o, memory_order __m) _NOEXCEPT | 
| 1817 |   _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) | 
| 1818 | { | 
| 1819 |     return __o->load(__m); | 
| 1820 | } | 
| 1821 |  | 
| 1822 | template <class _Tp> | 
| 1823 | _LIBCPP_INLINE_VISIBILITY | 
| 1824 | _Tp | 
| 1825 | atomic_load_explicit(const atomic<_Tp>* __o, memory_order __m) _NOEXCEPT | 
| 1826 |   _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) | 
| 1827 | { | 
| 1828 |     return __o->load(__m); | 
| 1829 | } | 
| 1830 |  | 
| 1831 | // atomic_exchange | 
| 1832 |  | 
| 1833 | template <class _Tp> | 
| 1834 | _LIBCPP_INLINE_VISIBILITY | 
| 1835 | _Tp | 
| 1836 | atomic_exchange(volatile atomic<_Tp>* __o, _Tp __d) _NOEXCEPT | 
| 1837 | { | 
| 1838 |     return __o->exchange(__d); | 
| 1839 | } | 
| 1840 |  | 
| 1841 | template <class _Tp> | 
| 1842 | _LIBCPP_INLINE_VISIBILITY | 
| 1843 | _Tp | 
| 1844 | atomic_exchange(atomic<_Tp>* __o, _Tp __d) _NOEXCEPT | 
| 1845 | { | 
| 1846 |     return __o->exchange(__d); | 
| 1847 | } | 
| 1848 |  | 
| 1849 | // atomic_exchange_explicit | 
| 1850 |  | 
| 1851 | template <class _Tp> | 
| 1852 | _LIBCPP_INLINE_VISIBILITY | 
| 1853 | _Tp | 
| 1854 | atomic_exchange_explicit(volatile atomic<_Tp>* __o, _Tp __d, memory_order __m) _NOEXCEPT | 
| 1855 | { | 
| 1856 |     return __o->exchange(__d, __m); | 
| 1857 | } | 
| 1858 |  | 
| 1859 | template <class _Tp> | 
| 1860 | _LIBCPP_INLINE_VISIBILITY | 
| 1861 | _Tp | 
| 1862 | atomic_exchange_explicit(atomic<_Tp>* __o, _Tp __d, memory_order __m) _NOEXCEPT | 
| 1863 | { | 
| 1864 |     return __o->exchange(__d, __m); | 
| 1865 | } | 
| 1866 |  | 
| 1867 | // atomic_compare_exchange_weak | 
| 1868 |  | 
| 1869 | template <class _Tp> | 
| 1870 | _LIBCPP_INLINE_VISIBILITY | 
| 1871 | bool | 
| 1872 | atomic_compare_exchange_weak(volatile atomic<_Tp>* __o, _Tp* __e, _Tp __d) _NOEXCEPT | 
| 1873 | { | 
| 1874 |     return __o->compare_exchange_weak(*__e, __d); | 
| 1875 | } | 
| 1876 |  | 
| 1877 | template <class _Tp> | 
| 1878 | _LIBCPP_INLINE_VISIBILITY | 
| 1879 | bool | 
| 1880 | atomic_compare_exchange_weak(atomic<_Tp>* __o, _Tp* __e, _Tp __d) _NOEXCEPT | 
| 1881 | { | 
| 1882 |     return __o->compare_exchange_weak(*__e, __d); | 
| 1883 | } | 
| 1884 |  | 
| 1885 | // atomic_compare_exchange_strong | 
| 1886 |  | 
| 1887 | template <class _Tp> | 
| 1888 | _LIBCPP_INLINE_VISIBILITY | 
| 1889 | bool | 
| 1890 | atomic_compare_exchange_strong(volatile atomic<_Tp>* __o, _Tp* __e, _Tp __d) _NOEXCEPT | 
| 1891 | { | 
| 1892 |     return __o->compare_exchange_strong(*__e, __d); | 
| 1893 | } | 
| 1894 |  | 
| 1895 | template <class _Tp> | 
| 1896 | _LIBCPP_INLINE_VISIBILITY | 
| 1897 | bool | 
| 1898 | atomic_compare_exchange_strong(atomic<_Tp>* __o, _Tp* __e, _Tp __d) _NOEXCEPT | 
| 1899 | { | 
| 1900 |     return __o->compare_exchange_strong(*__e, __d); | 
| 1901 | } | 
| 1902 |  | 
| 1903 | // atomic_compare_exchange_weak_explicit | 
| 1904 |  | 
| 1905 | template <class _Tp> | 
| 1906 | _LIBCPP_INLINE_VISIBILITY | 
| 1907 | bool | 
| 1908 | atomic_compare_exchange_weak_explicit(volatile atomic<_Tp>* __o, _Tp* __e, | 
| 1909 |                                       _Tp __d, | 
| 1910 |                                       memory_order __s, memory_order __f) _NOEXCEPT | 
| 1911 |   _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) | 
| 1912 | { | 
| 1913 |     return __o->compare_exchange_weak(*__e, __d, __s, __f); | 
| 1914 | } | 
| 1915 |  | 
| 1916 | template <class _Tp> | 
| 1917 | _LIBCPP_INLINE_VISIBILITY | 
| 1918 | bool | 
| 1919 | atomic_compare_exchange_weak_explicit(atomic<_Tp>* __o, _Tp* __e, _Tp __d, | 
| 1920 |                                       memory_order __s, memory_order __f) _NOEXCEPT | 
| 1921 |   _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) | 
| 1922 | { | 
| 1923 |     return __o->compare_exchange_weak(*__e, __d, __s, __f); | 
| 1924 | } | 
| 1925 |  | 
| 1926 | // atomic_compare_exchange_strong_explicit | 
| 1927 |  | 
| 1928 | template <class _Tp> | 
| 1929 | _LIBCPP_INLINE_VISIBILITY | 
| 1930 | bool | 
| 1931 | atomic_compare_exchange_strong_explicit(volatile atomic<_Tp>* __o, | 
| 1932 |                                         _Tp* __e, _Tp __d, | 
| 1933 |                                         memory_order __s, memory_order __f) _NOEXCEPT | 
| 1934 |   _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) | 
| 1935 | { | 
| 1936 |     return __o->compare_exchange_strong(*__e, __d, __s, __f); | 
| 1937 | } | 
| 1938 |  | 
| 1939 | template <class _Tp> | 
| 1940 | _LIBCPP_INLINE_VISIBILITY | 
| 1941 | bool | 
| 1942 | atomic_compare_exchange_strong_explicit(atomic<_Tp>* __o, _Tp* __e, | 
| 1943 |                                         _Tp __d, | 
| 1944 |                                         memory_order __s, memory_order __f) _NOEXCEPT | 
| 1945 |   _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) | 
| 1946 | { | 
| 1947 |     return __o->compare_exchange_strong(*__e, __d, __s, __f); | 
| 1948 | } | 
| 1949 |  | 
| 1950 | // atomic_fetch_add | 
| 1951 |  | 
| 1952 | template <class _Tp> | 
| 1953 | _LIBCPP_INLINE_VISIBILITY | 
| 1954 | typename enable_if | 
| 1955 | < | 
| 1956 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 1957 |     _Tp | 
| 1958 | >::type | 
| 1959 | atomic_fetch_add(volatile atomic<_Tp>* __o, _Tp __op) _NOEXCEPT | 
| 1960 | { | 
| 1961 |     return __o->fetch_add(__op); | 
| 1962 | } | 
| 1963 |  | 
| 1964 | template <class _Tp> | 
| 1965 | _LIBCPP_INLINE_VISIBILITY | 
| 1966 | typename enable_if | 
| 1967 | < | 
| 1968 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 1969 |     _Tp | 
| 1970 | >::type | 
| 1971 | atomic_fetch_add(atomic<_Tp>* __o, _Tp __op) _NOEXCEPT | 
| 1972 | { | 
| 1973 |     return __o->fetch_add(__op); | 
| 1974 | } | 
| 1975 |  | 
| 1976 | template <class _Tp> | 
| 1977 | _LIBCPP_INLINE_VISIBILITY | 
| 1978 | _Tp* | 
| 1979 | atomic_fetch_add(volatile atomic<_Tp*>* __o, ptrdiff_t __op) _NOEXCEPT | 
| 1980 | { | 
| 1981 |     return __o->fetch_add(__op); | 
| 1982 | } | 
| 1983 |  | 
| 1984 | template <class _Tp> | 
| 1985 | _LIBCPP_INLINE_VISIBILITY | 
| 1986 | _Tp* | 
| 1987 | atomic_fetch_add(atomic<_Tp*>* __o, ptrdiff_t __op) _NOEXCEPT | 
| 1988 | { | 
| 1989 |     return __o->fetch_add(__op); | 
| 1990 | } | 
| 1991 |  | 
| 1992 | // atomic_fetch_add_explicit | 
| 1993 |  | 
| 1994 | template <class _Tp> | 
| 1995 | _LIBCPP_INLINE_VISIBILITY | 
| 1996 | typename enable_if | 
| 1997 | < | 
| 1998 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 1999 |     _Tp | 
| 2000 | >::type | 
| 2001 | atomic_fetch_add_explicit(volatile atomic<_Tp>* __o, _Tp __op, memory_order __m) _NOEXCEPT | 
| 2002 | { | 
| 2003 |     return __o->fetch_add(__op, __m); | 
| 2004 | } | 
| 2005 |  | 
| 2006 | template <class _Tp> | 
| 2007 | _LIBCPP_INLINE_VISIBILITY | 
| 2008 | typename enable_if | 
| 2009 | < | 
| 2010 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2011 |     _Tp | 
| 2012 | >::type | 
| 2013 | atomic_fetch_add_explicit(atomic<_Tp>* __o, _Tp __op, memory_order __m) _NOEXCEPT | 
| 2014 | { | 
| 2015 |     return __o->fetch_add(__op, __m); | 
| 2016 | } | 
| 2017 |  | 
| 2018 | template <class _Tp> | 
| 2019 | _LIBCPP_INLINE_VISIBILITY | 
| 2020 | _Tp* | 
| 2021 | atomic_fetch_add_explicit(volatile atomic<_Tp*>* __o, ptrdiff_t __op, | 
| 2022 |                           memory_order __m) _NOEXCEPT | 
| 2023 | { | 
| 2024 |     return __o->fetch_add(__op, __m); | 
| 2025 | } | 
| 2026 |  | 
| 2027 | template <class _Tp> | 
| 2028 | _LIBCPP_INLINE_VISIBILITY | 
| 2029 | _Tp* | 
| 2030 | atomic_fetch_add_explicit(atomic<_Tp*>* __o, ptrdiff_t __op, memory_order __m) _NOEXCEPT | 
| 2031 | { | 
| 2032 |     return __o->fetch_add(__op, __m); | 
| 2033 | } | 
| 2034 |  | 
| 2035 | // atomic_fetch_sub | 
| 2036 |  | 
| 2037 | template <class _Tp> | 
| 2038 | _LIBCPP_INLINE_VISIBILITY | 
| 2039 | typename enable_if | 
| 2040 | < | 
| 2041 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2042 |     _Tp | 
| 2043 | >::type | 
| 2044 | atomic_fetch_sub(volatile atomic<_Tp>* __o, _Tp __op) _NOEXCEPT | 
| 2045 | { | 
| 2046 |     return __o->fetch_sub(__op); | 
| 2047 | } | 
| 2048 |  | 
| 2049 | template <class _Tp> | 
| 2050 | _LIBCPP_INLINE_VISIBILITY | 
| 2051 | typename enable_if | 
| 2052 | < | 
| 2053 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2054 |     _Tp | 
| 2055 | >::type | 
| 2056 | atomic_fetch_sub(atomic<_Tp>* __o, _Tp __op) _NOEXCEPT | 
| 2057 | { | 
| 2058 |     return __o->fetch_sub(__op); | 
| 2059 | } | 
| 2060 |  | 
| 2061 | template <class _Tp> | 
| 2062 | _LIBCPP_INLINE_VISIBILITY | 
| 2063 | _Tp* | 
| 2064 | atomic_fetch_sub(volatile atomic<_Tp*>* __o, ptrdiff_t __op) _NOEXCEPT | 
| 2065 | { | 
| 2066 |     return __o->fetch_sub(__op); | 
| 2067 | } | 
| 2068 |  | 
| 2069 | template <class _Tp> | 
| 2070 | _LIBCPP_INLINE_VISIBILITY | 
| 2071 | _Tp* | 
| 2072 | atomic_fetch_sub(atomic<_Tp*>* __o, ptrdiff_t __op) _NOEXCEPT | 
| 2073 | { | 
| 2074 |     return __o->fetch_sub(__op); | 
| 2075 | } | 
| 2076 |  | 
| 2077 | // atomic_fetch_sub_explicit | 
| 2078 |  | 
| 2079 | template <class _Tp> | 
| 2080 | _LIBCPP_INLINE_VISIBILITY | 
| 2081 | typename enable_if | 
| 2082 | < | 
| 2083 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2084 |     _Tp | 
| 2085 | >::type | 
| 2086 | atomic_fetch_sub_explicit(volatile atomic<_Tp>* __o, _Tp __op, memory_order __m) _NOEXCEPT | 
| 2087 | { | 
| 2088 |     return __o->fetch_sub(__op, __m); | 
| 2089 | } | 
| 2090 |  | 
| 2091 | template <class _Tp> | 
| 2092 | _LIBCPP_INLINE_VISIBILITY | 
| 2093 | typename enable_if | 
| 2094 | < | 
| 2095 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2096 |     _Tp | 
| 2097 | >::type | 
| 2098 | atomic_fetch_sub_explicit(atomic<_Tp>* __o, _Tp __op, memory_order __m) _NOEXCEPT | 
| 2099 | { | 
| 2100 |     return __o->fetch_sub(__op, __m); | 
| 2101 | } | 
| 2102 |  | 
| 2103 | template <class _Tp> | 
| 2104 | _LIBCPP_INLINE_VISIBILITY | 
| 2105 | _Tp* | 
| 2106 | atomic_fetch_sub_explicit(volatile atomic<_Tp*>* __o, ptrdiff_t __op, | 
| 2107 |                           memory_order __m) _NOEXCEPT | 
| 2108 | { | 
| 2109 |     return __o->fetch_sub(__op, __m); | 
| 2110 | } | 
| 2111 |  | 
| 2112 | template <class _Tp> | 
| 2113 | _LIBCPP_INLINE_VISIBILITY | 
| 2114 | _Tp* | 
| 2115 | atomic_fetch_sub_explicit(atomic<_Tp*>* __o, ptrdiff_t __op, memory_order __m) _NOEXCEPT | 
| 2116 | { | 
| 2117 |     return __o->fetch_sub(__op, __m); | 
| 2118 | } | 
| 2119 |  | 
| 2120 | // atomic_fetch_and | 
| 2121 |  | 
| 2122 | template <class _Tp> | 
| 2123 | _LIBCPP_INLINE_VISIBILITY | 
| 2124 | typename enable_if | 
| 2125 | < | 
| 2126 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2127 |     _Tp | 
| 2128 | >::type | 
| 2129 | atomic_fetch_and(volatile atomic<_Tp>* __o, _Tp __op) _NOEXCEPT | 
| 2130 | { | 
| 2131 |     return __o->fetch_and(__op); | 
| 2132 | } | 
| 2133 |  | 
| 2134 | template <class _Tp> | 
| 2135 | _LIBCPP_INLINE_VISIBILITY | 
| 2136 | typename enable_if | 
| 2137 | < | 
| 2138 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2139 |     _Tp | 
| 2140 | >::type | 
| 2141 | atomic_fetch_and(atomic<_Tp>* __o, _Tp __op) _NOEXCEPT | 
| 2142 | { | 
| 2143 |     return __o->fetch_and(__op); | 
| 2144 | } | 
| 2145 |  | 
| 2146 | // atomic_fetch_and_explicit | 
| 2147 |  | 
| 2148 | template <class _Tp> | 
| 2149 | _LIBCPP_INLINE_VISIBILITY | 
| 2150 | typename enable_if | 
| 2151 | < | 
| 2152 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2153 |     _Tp | 
| 2154 | >::type | 
| 2155 | atomic_fetch_and_explicit(volatile atomic<_Tp>* __o, _Tp __op, memory_order __m) _NOEXCEPT | 
| 2156 | { | 
| 2157 |     return __o->fetch_and(__op, __m); | 
| 2158 | } | 
| 2159 |  | 
| 2160 | template <class _Tp> | 
| 2161 | _LIBCPP_INLINE_VISIBILITY | 
| 2162 | typename enable_if | 
| 2163 | < | 
| 2164 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2165 |     _Tp | 
| 2166 | >::type | 
| 2167 | atomic_fetch_and_explicit(atomic<_Tp>* __o, _Tp __op, memory_order __m) _NOEXCEPT | 
| 2168 | { | 
| 2169 |     return __o->fetch_and(__op, __m); | 
| 2170 | } | 
| 2171 |  | 
| 2172 | // atomic_fetch_or | 
| 2173 |  | 
| 2174 | template <class _Tp> | 
| 2175 | _LIBCPP_INLINE_VISIBILITY | 
| 2176 | typename enable_if | 
| 2177 | < | 
| 2178 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2179 |     _Tp | 
| 2180 | >::type | 
| 2181 | atomic_fetch_or(volatile atomic<_Tp>* __o, _Tp __op) _NOEXCEPT | 
| 2182 | { | 
| 2183 |     return __o->fetch_or(__op); | 
| 2184 | } | 
| 2185 |  | 
| 2186 | template <class _Tp> | 
| 2187 | _LIBCPP_INLINE_VISIBILITY | 
| 2188 | typename enable_if | 
| 2189 | < | 
| 2190 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2191 |     _Tp | 
| 2192 | >::type | 
| 2193 | atomic_fetch_or(atomic<_Tp>* __o, _Tp __op) _NOEXCEPT | 
| 2194 | { | 
| 2195 |     return __o->fetch_or(__op); | 
| 2196 | } | 
| 2197 |  | 
| 2198 | // atomic_fetch_or_explicit | 
| 2199 |  | 
| 2200 | template <class _Tp> | 
| 2201 | _LIBCPP_INLINE_VISIBILITY | 
| 2202 | typename enable_if | 
| 2203 | < | 
| 2204 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2205 |     _Tp | 
| 2206 | >::type | 
| 2207 | atomic_fetch_or_explicit(volatile atomic<_Tp>* __o, _Tp __op, memory_order __m) _NOEXCEPT | 
| 2208 | { | 
| 2209 |     return __o->fetch_or(__op, __m); | 
| 2210 | } | 
| 2211 |  | 
| 2212 | template <class _Tp> | 
| 2213 | _LIBCPP_INLINE_VISIBILITY | 
| 2214 | typename enable_if | 
| 2215 | < | 
| 2216 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2217 |     _Tp | 
| 2218 | >::type | 
| 2219 | atomic_fetch_or_explicit(atomic<_Tp>* __o, _Tp __op, memory_order __m) _NOEXCEPT | 
| 2220 | { | 
| 2221 |     return __o->fetch_or(__op, __m); | 
| 2222 | } | 
| 2223 |  | 
| 2224 | // atomic_fetch_xor | 
| 2225 |  | 
| 2226 | template <class _Tp> | 
| 2227 | _LIBCPP_INLINE_VISIBILITY | 
| 2228 | typename enable_if | 
| 2229 | < | 
| 2230 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2231 |     _Tp | 
| 2232 | >::type | 
| 2233 | atomic_fetch_xor(volatile atomic<_Tp>* __o, _Tp __op) _NOEXCEPT | 
| 2234 | { | 
| 2235 |     return __o->fetch_xor(__op); | 
| 2236 | } | 
| 2237 |  | 
| 2238 | template <class _Tp> | 
| 2239 | _LIBCPP_INLINE_VISIBILITY | 
| 2240 | typename enable_if | 
| 2241 | < | 
| 2242 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2243 |     _Tp | 
| 2244 | >::type | 
| 2245 | atomic_fetch_xor(atomic<_Tp>* __o, _Tp __op) _NOEXCEPT | 
| 2246 | { | 
| 2247 |     return __o->fetch_xor(__op); | 
| 2248 | } | 
| 2249 |  | 
| 2250 | // atomic_fetch_xor_explicit | 
| 2251 |  | 
| 2252 | template <class _Tp> | 
| 2253 | _LIBCPP_INLINE_VISIBILITY | 
| 2254 | typename enable_if | 
| 2255 | < | 
| 2256 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2257 |     _Tp | 
| 2258 | >::type | 
| 2259 | atomic_fetch_xor_explicit(volatile atomic<_Tp>* __o, _Tp __op, memory_order __m) _NOEXCEPT | 
| 2260 | { | 
| 2261 |     return __o->fetch_xor(__op, __m); | 
| 2262 | } | 
| 2263 |  | 
| 2264 | template <class _Tp> | 
| 2265 | _LIBCPP_INLINE_VISIBILITY | 
| 2266 | typename enable_if | 
| 2267 | < | 
| 2268 |     is_integral<_Tp>::value && !is_same<_Tp, bool>::value, | 
| 2269 |     _Tp | 
| 2270 | >::type | 
| 2271 | atomic_fetch_xor_explicit(atomic<_Tp>* __o, _Tp __op, memory_order __m) _NOEXCEPT | 
| 2272 | { | 
| 2273 |     return __o->fetch_xor(__op, __m); | 
| 2274 | } | 
| 2275 |  | 
| 2276 | // flag type and operations | 
| 2277 |  | 
| 2278 | typedef struct atomic_flag | 
| 2279 | { | 
| 2280 |     __cxx_atomic_impl<_LIBCPP_ATOMIC_FLAG_TYPE> __a_; | 
| 2281 |  | 
| 2282 |     _LIBCPP_INLINE_VISIBILITY | 
| 2283 |     bool test_and_set(memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT | 
| 2284 |         {return __cxx_atomic_exchange(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(true), __m);} | 
| 2285 |     _LIBCPP_INLINE_VISIBILITY | 
| 2286 |     bool test_and_set(memory_order __m = memory_order_seq_cst) _NOEXCEPT | 
| 2287 |         {return __cxx_atomic_exchange(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(true), __m);} | 
| 2288 |     _LIBCPP_INLINE_VISIBILITY | 
| 2289 |     void clear(memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT | 
| 2290 |         {__cxx_atomic_store(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(false), __m);} | 
| 2291 |     _LIBCPP_INLINE_VISIBILITY | 
| 2292 |     void clear(memory_order __m = memory_order_seq_cst) _NOEXCEPT | 
| 2293 |         {__cxx_atomic_store(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(false), __m);} | 
| 2294 |  | 
| 2295 |     _LIBCPP_INLINE_VISIBILITY | 
| 2296 |     atomic_flag() _NOEXCEPT _LIBCPP_DEFAULT | 
| 2297 |  | 
| 2298 |     _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR | 
| 2299 |     atomic_flag(bool __b) _NOEXCEPT : __a_(__b) {} // EXTENSION | 
| 2300 |  | 
| 2301 | #ifndef _LIBCPP_CXX03_LANG | 
| 2302 |     atomic_flag(const atomic_flag&) = delete; | 
| 2303 |     atomic_flag& operator=(const atomic_flag&) = delete; | 
| 2304 |     atomic_flag& operator=(const atomic_flag&) volatile = delete; | 
| 2305 | #else | 
| 2306 | private: | 
| 2307 |     atomic_flag(const atomic_flag&); | 
| 2308 |     atomic_flag& operator=(const atomic_flag&); | 
| 2309 |     atomic_flag& operator=(const atomic_flag&) volatile; | 
| 2310 | #endif | 
| 2311 | } atomic_flag; | 
| 2312 |  | 
| 2313 | inline _LIBCPP_INLINE_VISIBILITY | 
| 2314 | bool | 
| 2315 | atomic_flag_test_and_set(volatile atomic_flag* __o) _NOEXCEPT | 
| 2316 | { | 
| 2317 |     return __o->test_and_set(); | 
| 2318 | } | 
| 2319 |  | 
| 2320 | inline _LIBCPP_INLINE_VISIBILITY | 
| 2321 | bool | 
| 2322 | atomic_flag_test_and_set(atomic_flag* __o) _NOEXCEPT | 
| 2323 | { | 
| 2324 |     return __o->test_and_set(); | 
| 2325 | } | 
| 2326 |  | 
| 2327 | inline _LIBCPP_INLINE_VISIBILITY | 
| 2328 | bool | 
| 2329 | atomic_flag_test_and_set_explicit(volatile atomic_flag* __o, memory_order __m) _NOEXCEPT | 
| 2330 | { | 
| 2331 |     return __o->test_and_set(__m); | 
| 2332 | } | 
| 2333 |  | 
| 2334 | inline _LIBCPP_INLINE_VISIBILITY | 
| 2335 | bool | 
| 2336 | atomic_flag_test_and_set_explicit(atomic_flag* __o, memory_order __m) _NOEXCEPT | 
| 2337 | { | 
| 2338 |     return __o->test_and_set(__m); | 
| 2339 | } | 
| 2340 |  | 
| 2341 | inline _LIBCPP_INLINE_VISIBILITY | 
| 2342 | void | 
| 2343 | atomic_flag_clear(volatile atomic_flag* __o) _NOEXCEPT | 
| 2344 | { | 
| 2345 |     __o->clear(); | 
| 2346 | } | 
| 2347 |  | 
| 2348 | inline _LIBCPP_INLINE_VISIBILITY | 
| 2349 | void | 
| 2350 | atomic_flag_clear(atomic_flag* __o) _NOEXCEPT | 
| 2351 | { | 
| 2352 |     __o->clear(); | 
| 2353 | } | 
| 2354 |  | 
| 2355 | inline _LIBCPP_INLINE_VISIBILITY | 
| 2356 | void | 
| 2357 | atomic_flag_clear_explicit(volatile atomic_flag* __o, memory_order __m) _NOEXCEPT | 
| 2358 | { | 
| 2359 |     __o->clear(__m); | 
| 2360 | } | 
| 2361 |  | 
| 2362 | inline _LIBCPP_INLINE_VISIBILITY | 
| 2363 | void | 
| 2364 | atomic_flag_clear_explicit(atomic_flag* __o, memory_order __m) _NOEXCEPT | 
| 2365 | { | 
| 2366 |     __o->clear(__m); | 
| 2367 | } | 
| 2368 |  | 
| 2369 | // fences | 
| 2370 |  | 
| 2371 | inline _LIBCPP_INLINE_VISIBILITY | 
| 2372 | void | 
| 2373 | atomic_thread_fence(memory_order __m) _NOEXCEPT | 
| 2374 | { | 
| 2375 |     __cxx_atomic_thread_fence(__m); | 
| 2376 | } | 
| 2377 |  | 
| 2378 | inline _LIBCPP_INLINE_VISIBILITY | 
| 2379 | void | 
| 2380 | atomic_signal_fence(memory_order __m) _NOEXCEPT | 
| 2381 | { | 
| 2382 |     __cxx_atomic_signal_fence(__m); | 
| 2383 | } | 
| 2384 |  | 
| 2385 | // Atomics for standard typedef types | 
| 2386 |  | 
| 2387 | typedef atomic<bool>               atomic_bool; | 
| 2388 | typedef atomic<char>               atomic_char; | 
| 2389 | typedef atomic<signed char>        atomic_schar; | 
| 2390 | typedef atomic<unsigned char>      atomic_uchar; | 
| 2391 | typedef atomic<short>              atomic_short; | 
| 2392 | typedef atomic<unsigned short>     atomic_ushort; | 
| 2393 | typedef atomic<int>                atomic_int; | 
| 2394 | typedef atomic<unsigned int>       atomic_uint; | 
| 2395 | typedef atomic<long>               atomic_long; | 
| 2396 | typedef atomic<unsigned long>      atomic_ulong; | 
| 2397 | typedef atomic<long long>          atomic_llong; | 
| 2398 | typedef atomic<unsigned long long> atomic_ullong; | 
| 2399 | typedef atomic<char16_t>           atomic_char16_t; | 
| 2400 | typedef atomic<char32_t>           atomic_char32_t; | 
| 2401 | typedef atomic<wchar_t>            atomic_wchar_t; | 
| 2402 |  | 
| 2403 | typedef atomic<int_least8_t>   atomic_int_least8_t; | 
| 2404 | typedef atomic<uint_least8_t>  atomic_uint_least8_t; | 
| 2405 | typedef atomic<int_least16_t>  atomic_int_least16_t; | 
| 2406 | typedef atomic<uint_least16_t> atomic_uint_least16_t; | 
| 2407 | typedef atomic<int_least32_t>  atomic_int_least32_t; | 
| 2408 | typedef atomic<uint_least32_t> atomic_uint_least32_t; | 
| 2409 | typedef atomic<int_least64_t>  atomic_int_least64_t; | 
| 2410 | typedef atomic<uint_least64_t> atomic_uint_least64_t; | 
| 2411 |  | 
| 2412 | typedef atomic<int_fast8_t>   atomic_int_fast8_t; | 
| 2413 | typedef atomic<uint_fast8_t>  atomic_uint_fast8_t; | 
| 2414 | typedef atomic<int_fast16_t>  atomic_int_fast16_t; | 
| 2415 | typedef atomic<uint_fast16_t> atomic_uint_fast16_t; | 
| 2416 | typedef atomic<int_fast32_t>  atomic_int_fast32_t; | 
| 2417 | typedef atomic<uint_fast32_t> atomic_uint_fast32_t; | 
| 2418 | typedef atomic<int_fast64_t>  atomic_int_fast64_t; | 
| 2419 | typedef atomic<uint_fast64_t> atomic_uint_fast64_t; | 
| 2420 |  | 
| 2421 | typedef atomic< int8_t>  atomic_int8_t; | 
| 2422 | typedef atomic<uint8_t>  atomic_uint8_t; | 
| 2423 | typedef atomic< int16_t> atomic_int16_t; | 
| 2424 | typedef atomic<uint16_t> atomic_uint16_t; | 
| 2425 | typedef atomic< int32_t> atomic_int32_t; | 
| 2426 | typedef atomic<uint32_t> atomic_uint32_t; | 
| 2427 | typedef atomic< int64_t> atomic_int64_t; | 
| 2428 | typedef atomic<uint64_t> atomic_uint64_t; | 
| 2429 |  | 
| 2430 | typedef atomic<intptr_t>  atomic_intptr_t; | 
| 2431 | typedef atomic<uintptr_t> atomic_uintptr_t; | 
| 2432 | typedef atomic<size_t>    atomic_size_t; | 
| 2433 | typedef atomic<ptrdiff_t> atomic_ptrdiff_t; | 
| 2434 | typedef atomic<intmax_t>  atomic_intmax_t; | 
| 2435 | typedef atomic<uintmax_t> atomic_uintmax_t; | 
| 2436 |  | 
| 2437 | #define ATOMIC_FLAG_INIT {false} | 
| 2438 | #define ATOMIC_VAR_INIT(__v) {__v} | 
| 2439 |  | 
| 2440 | _LIBCPP_END_NAMESPACE_STD | 
| 2441 |  | 
| 2442 | #endif  // _LIBCPP_ATOMIC | 
| 2443 |  |