| 1 | /* |
| 2 | * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_RUNTIME_ATOMIC_HPP |
| 26 | #define SHARE_RUNTIME_ATOMIC_HPP |
| 27 | |
| 28 | #include "memory/allocation.hpp" |
| 29 | #include "metaprogramming/conditional.hpp" |
| 30 | #include "metaprogramming/enableIf.hpp" |
| 31 | #include "metaprogramming/isIntegral.hpp" |
| 32 | #include "metaprogramming/isPointer.hpp" |
| 33 | #include "metaprogramming/isSame.hpp" |
| 34 | #include "metaprogramming/primitiveConversions.hpp" |
| 35 | #include "metaprogramming/removeCV.hpp" |
| 36 | #include "metaprogramming/removePointer.hpp" |
| 37 | #include "utilities/align.hpp" |
| 38 | #include "utilities/macros.hpp" |
| 39 | |
| 40 | enum atomic_memory_order { |
| 41 | // The modes that align with C++11 are intended to |
| 42 | // follow the same semantics. |
| 43 | memory_order_relaxed = 0, |
| 44 | memory_order_acquire = 2, |
| 45 | memory_order_release = 3, |
| 46 | memory_order_acq_rel = 4, |
| 47 | // Strong two-way memory barrier. |
| 48 | memory_order_conservative = 8 |
| 49 | }; |
| 50 | |
| 51 | class Atomic : AllStatic { |
| 52 | public: |
| 53 | // Atomic operations on int64 types are not available on all 32-bit |
| 54 | // platforms. If atomic ops on int64 are defined here they must only |
| 55 | // be used from code that verifies they are available at runtime and |
| 56 | // can provide an alternative action if not - see supports_cx8() for |
| 57 | // a means to test availability. |
| 58 | |
| 59 | // The memory operations that are mentioned with each of the atomic |
| 60 | // function families come from src/share/vm/runtime/orderAccess.hpp, |
| 61 | // e.g., <fence> is described in that file and is implemented by the |
| 62 | // OrderAccess::fence() function. See that file for the gory details |
| 63 | // on the Memory Access Ordering Model. |
| 64 | |
| 65 | // All of the atomic operations that imply a read-modify-write action |
| 66 | // guarantee a two-way memory barrier across that operation. Historically |
| 67 | // these semantics reflect the strength of atomic operations that are |
| 68 | // provided on SPARC/X86. We assume that strength is necessary unless |
| 69 | // we can prove that a weaker form is sufficiently safe. |
| 70 | |
| 71 | // Atomically store to a location |
| 72 | // The type T must be either a pointer type convertible to or equal |
| 73 | // to D, an integral/enum type equal to D, or a type equal to D that |
| 74 | // is primitive convertible using PrimitiveConversions. |
| 75 | template<typename T, typename D> |
| 76 | inline static void store(T store_value, volatile D* dest); |
| 77 | |
| 78 | // Atomically load from a location |
| 79 | // The type T must be either a pointer type, an integral/enum type, |
| 80 | // or a type that is primitive convertible using PrimitiveConversions. |
| 81 | template<typename T> |
| 82 | inline static T load(const volatile T* dest); |
| 83 | |
| 84 | // Atomically add to a location. Returns updated value. add*() provide: |
| 85 | // <fence> add-value-to-dest <membar StoreLoad|StoreStore> |
| 86 | |
| 87 | template<typename I, typename D> |
| 88 | inline static D add(I add_value, D volatile* dest, |
| 89 | atomic_memory_order order = memory_order_conservative); |
| 90 | |
| 91 | template<typename I, typename D> |
| 92 | inline static D sub(I sub_value, D volatile* dest, |
| 93 | atomic_memory_order order = memory_order_conservative); |
| 94 | |
| 95 | // Atomically increment location. inc() provide: |
| 96 | // <fence> increment-dest <membar StoreLoad|StoreStore> |
| 97 | // The type D may be either a pointer type, or an integral |
| 98 | // type. If it is a pointer type, then the increment is |
| 99 | // scaled to the size of the type pointed to by the pointer. |
| 100 | template<typename D> |
| 101 | inline static void inc(D volatile* dest, |
| 102 | atomic_memory_order order = memory_order_conservative); |
| 103 | |
| 104 | // Atomically decrement a location. dec() provide: |
| 105 | // <fence> decrement-dest <membar StoreLoad|StoreStore> |
| 106 | // The type D may be either a pointer type, or an integral |
| 107 | // type. If it is a pointer type, then the decrement is |
| 108 | // scaled to the size of the type pointed to by the pointer. |
| 109 | template<typename D> |
| 110 | inline static void dec(D volatile* dest, |
| 111 | atomic_memory_order order = memory_order_conservative); |
| 112 | |
| 113 | // Performs atomic exchange of *dest with exchange_value. Returns old |
| 114 | // prior value of *dest. xchg*() provide: |
| 115 | // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore> |
| 116 | // The type T must be either a pointer type convertible to or equal |
| 117 | // to D, an integral/enum type equal to D, or a type equal to D that |
| 118 | // is primitive convertible using PrimitiveConversions. |
| 119 | template<typename T, typename D> |
| 120 | inline static D xchg(T exchange_value, volatile D* dest, |
| 121 | atomic_memory_order order = memory_order_conservative); |
| 122 | |
| 123 | // Performs atomic compare of *dest and compare_value, and exchanges |
| 124 | // *dest with exchange_value if the comparison succeeded. Returns prior |
| 125 | // value of *dest. cmpxchg*() provide: |
| 126 | // <fence> compare-and-exchange <membar StoreLoad|StoreStore> |
| 127 | |
| 128 | template<typename T, typename D, typename U> |
| 129 | inline static D cmpxchg(T exchange_value, |
| 130 | D volatile* dest, |
| 131 | U compare_value, |
| 132 | atomic_memory_order order = memory_order_conservative); |
| 133 | |
| 134 | // Performs atomic compare of *dest and NULL, and replaces *dest |
| 135 | // with exchange_value if the comparison succeeded. Returns true if |
| 136 | // the comparison succeeded and the exchange occurred. This is |
| 137 | // often used as part of lazy initialization, as a lock-free |
| 138 | // alternative to the Double-Checked Locking Pattern. |
| 139 | template<typename T, typename D> |
| 140 | inline static bool replace_if_null(T* value, D* volatile* dest, |
| 141 | atomic_memory_order order = memory_order_conservative); |
| 142 | |
| 143 | private: |
| 144 | WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private |
| 145 | // Test whether From is implicitly convertible to To. |
| 146 | // From and To must be pointer types. |
| 147 | // Note: Provides the limited subset of C++11 std::is_convertible |
| 148 | // that is needed here. |
| 149 | template<typename From, typename To> struct IsPointerConvertible; |
| 150 | |
| 151 | protected: |
| 152 | // Dispatch handler for store. Provides type-based validity |
| 153 | // checking and limited conversions around calls to the platform- |
| 154 | // specific implementation layer provided by PlatformOp. |
| 155 | template<typename T, typename D, typename PlatformOp, typename Enable = void> |
| 156 | struct StoreImpl; |
| 157 | |
| 158 | // Platform-specific implementation of store. Support for sizes |
| 159 | // of 1, 2, 4, and (if different) pointer size bytes are required. |
| 160 | // The class is a function object that must be default constructable, |
| 161 | // with these requirements: |
| 162 | // |
| 163 | // either: |
| 164 | // - dest is of type D*, an integral, enum or pointer type. |
| 165 | // - new_value are of type T, an integral, enum or pointer type D or |
| 166 | // pointer type convertible to D. |
| 167 | // or: |
| 168 | // - T and D are the same and are primitive convertible using PrimitiveConversions |
| 169 | // and either way: |
| 170 | // - platform_store is an object of type PlatformStore<sizeof(T)>. |
| 171 | // |
| 172 | // Then |
| 173 | // platform_store(new_value, dest) |
| 174 | // must be a valid expression. |
| 175 | // |
| 176 | // The default implementation is a volatile store. If a platform |
| 177 | // requires more for e.g. 64 bit stores, a specialization is required |
| 178 | template<size_t byte_size> struct PlatformStore; |
| 179 | |
| 180 | // Dispatch handler for load. Provides type-based validity |
| 181 | // checking and limited conversions around calls to the platform- |
| 182 | // specific implementation layer provided by PlatformOp. |
| 183 | template<typename T, typename PlatformOp, typename Enable = void> |
| 184 | struct LoadImpl; |
| 185 | |
| 186 | // Platform-specific implementation of load. Support for sizes of |
| 187 | // 1, 2, 4 bytes and (if different) pointer size bytes are required. |
| 188 | // The class is a function object that must be default |
| 189 | // constructable, with these requirements: |
| 190 | // |
| 191 | // - dest is of type T*, an integral, enum or pointer type, or |
| 192 | // T is convertible to a primitive type using PrimitiveConversions |
| 193 | // - platform_load is an object of type PlatformLoad<sizeof(T)>. |
| 194 | // |
| 195 | // Then |
| 196 | // platform_load(src) |
| 197 | // must be a valid expression, returning a result convertible to T. |
| 198 | // |
| 199 | // The default implementation is a volatile load. If a platform |
| 200 | // requires more for e.g. 64 bit loads, a specialization is required |
| 201 | template<size_t byte_size> struct PlatformLoad; |
| 202 | |
| 203 | private: |
| 204 | // Dispatch handler for add. Provides type-based validity checking |
| 205 | // and limited conversions around calls to the platform-specific |
| 206 | // implementation layer provided by PlatformAdd. |
| 207 | template<typename I, typename D, typename Enable = void> |
| 208 | struct AddImpl; |
| 209 | |
| 210 | // Platform-specific implementation of add. Support for sizes of 4 |
| 211 | // bytes and (if different) pointer size bytes are required. The |
| 212 | // class is a function object that must be default constructable, |
| 213 | // with these requirements: |
| 214 | // |
| 215 | // - dest is of type D*, an integral or pointer type. |
| 216 | // - add_value is of type I, an integral type. |
| 217 | // - sizeof(I) == sizeof(D). |
| 218 | // - if D is an integral type, I == D. |
| 219 | // - platform_add is an object of type PlatformAdd<sizeof(D)>. |
| 220 | // |
| 221 | // Then |
| 222 | // platform_add(add_value, dest) |
| 223 | // must be a valid expression, returning a result convertible to D. |
| 224 | // |
| 225 | // No definition is provided; all platforms must explicitly define |
| 226 | // this class and any needed specializations. |
| 227 | template<size_t byte_size> struct PlatformAdd; |
| 228 | |
| 229 | // Helper base classes for defining PlatformAdd. To use, define |
| 230 | // PlatformAdd or a specialization that derives from one of these, |
| 231 | // and include in the PlatformAdd definition the support function |
| 232 | // (described below) required by the base class. |
| 233 | // |
| 234 | // These classes implement the required function object protocol for |
| 235 | // PlatformAdd, using a support function template provided by the |
| 236 | // derived class. Let add_value (of type I) and dest (of type D) be |
| 237 | // the arguments the object is called with. If D is a pointer type |
| 238 | // P*, then let addend (of type I) be add_value * sizeof(P); |
| 239 | // otherwise, addend is add_value. |
| 240 | // |
| 241 | // FetchAndAdd requires the derived class to provide |
| 242 | // fetch_and_add(addend, dest) |
| 243 | // atomically adding addend to the value of dest, and returning the |
| 244 | // old value. |
| 245 | // |
| 246 | // AddAndFetch requires the derived class to provide |
| 247 | // add_and_fetch(addend, dest) |
| 248 | // atomically adding addend to the value of dest, and returning the |
| 249 | // new value. |
| 250 | // |
| 251 | // When D is a pointer type P*, both fetch_and_add and add_and_fetch |
| 252 | // treat it as if it were a uintptr_t; they do not perform any |
| 253 | // scaling of the addend, as that has already been done by the |
| 254 | // caller. |
| 255 | public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. |
| 256 | template<typename Derived> struct FetchAndAdd; |
| 257 | template<typename Derived> struct AddAndFetch; |
| 258 | private: |
| 259 | |
| 260 | // Support for platforms that implement some variants of add using a |
| 261 | // (typically out of line) non-template helper function. The |
| 262 | // generic arguments passed to PlatformAdd need to be translated to |
| 263 | // the appropriate type for the helper function, the helper function |
| 264 | // invoked on the translated arguments, and the result translated |
| 265 | // back. Type is the parameter / return type of the helper |
| 266 | // function. No scaling of add_value is performed when D is a pointer |
| 267 | // type, so this function can be used to implement the support function |
| 268 | // required by AddAndFetch. |
| 269 | template<typename Type, typename Fn, typename I, typename D> |
| 270 | static D add_using_helper(Fn fn, I add_value, D volatile* dest); |
| 271 | |
| 272 | // Dispatch handler for cmpxchg. Provides type-based validity |
| 273 | // checking and limited conversions around calls to the |
| 274 | // platform-specific implementation layer provided by |
| 275 | // PlatformCmpxchg. |
| 276 | template<typename T, typename D, typename U, typename Enable = void> |
| 277 | struct CmpxchgImpl; |
| 278 | |
| 279 | // Platform-specific implementation of cmpxchg. Support for sizes |
| 280 | // of 1, 4, and 8 are required. The class is a function object that |
| 281 | // must be default constructable, with these requirements: |
| 282 | // |
| 283 | // - dest is of type T*. |
| 284 | // - exchange_value and compare_value are of type T. |
| 285 | // - order is of type atomic_memory_order. |
| 286 | // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>. |
| 287 | // |
| 288 | // Then |
| 289 | // platform_cmpxchg(exchange_value, dest, compare_value, order) |
| 290 | // must be a valid expression, returning a result convertible to T. |
| 291 | // |
| 292 | // A default definition is provided, which declares a function template |
| 293 | // T operator()(T, T volatile*, T, atomic_memory_order) const |
| 294 | // |
| 295 | // For each required size, a platform must either provide an |
| 296 | // appropriate definition of that function, or must entirely |
| 297 | // specialize the class template for that size. |
| 298 | template<size_t byte_size> struct PlatformCmpxchg; |
| 299 | |
| 300 | // Support for platforms that implement some variants of cmpxchg |
| 301 | // using a (typically out of line) non-template helper function. |
| 302 | // The generic arguments passed to PlatformCmpxchg need to be |
| 303 | // translated to the appropriate type for the helper function, the |
| 304 | // helper invoked on the translated arguments, and the result |
| 305 | // translated back. Type is the parameter / return type of the |
| 306 | // helper function. |
| 307 | template<typename Type, typename Fn, typename T> |
| 308 | static T cmpxchg_using_helper(Fn fn, |
| 309 | T exchange_value, |
| 310 | T volatile* dest, |
| 311 | T compare_value); |
| 312 | |
| 313 | // Support platforms that do not provide Read-Modify-Write |
| 314 | // byte-level atomic access. To use, derive PlatformCmpxchg<1> from |
| 315 | // this class. |
| 316 | public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. |
| 317 | struct CmpxchgByteUsingInt; |
| 318 | private: |
| 319 | |
| 320 | // Dispatch handler for xchg. Provides type-based validity |
| 321 | // checking and limited conversions around calls to the |
| 322 | // platform-specific implementation layer provided by |
| 323 | // PlatformXchg. |
| 324 | template<typename T, typename D, typename Enable = void> |
| 325 | struct XchgImpl; |
| 326 | |
| 327 | // Platform-specific implementation of xchg. Support for sizes |
| 328 | // of 4, and sizeof(intptr_t) are required. The class is a function |
| 329 | // object that must be default constructable, with these requirements: |
| 330 | // |
| 331 | // - dest is of type T*. |
| 332 | // - exchange_value is of type T. |
| 333 | // - platform_xchg is an object of type PlatformXchg<sizeof(T)>. |
| 334 | // |
| 335 | // Then |
| 336 | // platform_xchg(exchange_value, dest) |
| 337 | // must be a valid expression, returning a result convertible to T. |
| 338 | // |
| 339 | // A default definition is provided, which declares a function template |
| 340 | // T operator()(T, T volatile*, T, atomic_memory_order) const |
| 341 | // |
| 342 | // For each required size, a platform must either provide an |
| 343 | // appropriate definition of that function, or must entirely |
| 344 | // specialize the class template for that size. |
| 345 | template<size_t byte_size> struct PlatformXchg; |
| 346 | |
| 347 | // Support for platforms that implement some variants of xchg |
| 348 | // using a (typically out of line) non-template helper function. |
| 349 | // The generic arguments passed to PlatformXchg need to be |
| 350 | // translated to the appropriate type for the helper function, the |
| 351 | // helper invoked on the translated arguments, and the result |
| 352 | // translated back. Type is the parameter / return type of the |
| 353 | // helper function. |
| 354 | template<typename Type, typename Fn, typename T> |
| 355 | static T xchg_using_helper(Fn fn, |
| 356 | T exchange_value, |
| 357 | T volatile* dest); |
| 358 | }; |
| 359 | |
| 360 | template<typename From, typename To> |
| 361 | struct Atomic::IsPointerConvertible<From*, To*> : AllStatic { |
| 362 | // Determine whether From* is implicitly convertible to To*, using |
| 363 | // the "sizeof trick". |
| 364 | typedef char yes; |
| 365 | typedef char (&no)[2]; |
| 366 | |
| 367 | static yes test(To*); |
| 368 | static no test(...); |
| 369 | static From* test_value; |
| 370 | |
| 371 | static const bool value = (sizeof(yes) == sizeof(test(test_value))); |
| 372 | }; |
| 373 | |
| 374 | // Handle load for pointer, integral and enum types. |
| 375 | template<typename T, typename PlatformOp> |
| 376 | struct Atomic::LoadImpl< |
| 377 | T, |
| 378 | PlatformOp, |
| 379 | typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value || IsPointer<T>::value>::type> |
| 380 | { |
| 381 | T operator()(T const volatile* dest) const { |
| 382 | // Forward to the platform handler for the size of T. |
| 383 | return PlatformOp()(dest); |
| 384 | } |
| 385 | }; |
| 386 | |
| 387 | // Handle load for types that have a translator. |
| 388 | // |
| 389 | // All the involved types must be identical. |
| 390 | // |
| 391 | // This translates the original call into a call on the decayed |
| 392 | // arguments, and returns the recovered result of that translated |
| 393 | // call. |
| 394 | template<typename T, typename PlatformOp> |
| 395 | struct Atomic::LoadImpl< |
| 396 | T, |
| 397 | PlatformOp, |
| 398 | typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> |
| 399 | { |
| 400 | T operator()(T const volatile* dest) const { |
| 401 | typedef PrimitiveConversions::Translate<T> Translator; |
| 402 | typedef typename Translator::Decayed Decayed; |
| 403 | STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); |
| 404 | Decayed result = PlatformOp()(reinterpret_cast<Decayed const volatile*>(dest)); |
| 405 | return Translator::recover(result); |
| 406 | } |
| 407 | }; |
| 408 | |
| 409 | // Default implementation of atomic load if a specific platform |
| 410 | // does not provide a specialization for a certain size class. |
| 411 | // For increased safety, the default implementation only allows |
| 412 | // load types that are pointer sized or smaller. If a platform still |
| 413 | // supports wide atomics, then it has to use specialization |
| 414 | // of Atomic::PlatformLoad for that wider size class. |
| 415 | template<size_t byte_size> |
| 416 | struct Atomic::PlatformLoad { |
| 417 | template<typename T> |
| 418 | T operator()(T const volatile* dest) const { |
| 419 | STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization |
| 420 | return *dest; |
| 421 | } |
| 422 | }; |
| 423 | |
| 424 | // Handle store for integral and enum types. |
| 425 | // |
| 426 | // All the involved types must be identical. |
| 427 | template<typename T, typename PlatformOp> |
| 428 | struct Atomic::StoreImpl< |
| 429 | T, T, |
| 430 | PlatformOp, |
| 431 | typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> |
| 432 | { |
| 433 | void operator()(T new_value, T volatile* dest) const { |
| 434 | // Forward to the platform handler for the size of T. |
| 435 | PlatformOp()(new_value, dest); |
| 436 | } |
| 437 | }; |
| 438 | |
| 439 | // Handle store for pointer types. |
| 440 | // |
| 441 | // The new_value must be implicitly convertible to the |
| 442 | // destination's type; it must be type-correct to store the |
| 443 | // new_value in the destination. |
| 444 | template<typename T, typename D, typename PlatformOp> |
| 445 | struct Atomic::StoreImpl< |
| 446 | T*, D*, |
| 447 | PlatformOp, |
| 448 | typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type> |
| 449 | { |
| 450 | void operator()(T* new_value, D* volatile* dest) const { |
| 451 | // Allow derived to base conversion, and adding cv-qualifiers. |
| 452 | D* value = new_value; |
| 453 | PlatformOp()(value, dest); |
| 454 | } |
| 455 | }; |
| 456 | |
| 457 | // Handle store for types that have a translator. |
| 458 | // |
| 459 | // All the involved types must be identical. |
| 460 | // |
| 461 | // This translates the original call into a call on the decayed |
| 462 | // arguments. |
| 463 | template<typename T, typename PlatformOp> |
| 464 | struct Atomic::StoreImpl< |
| 465 | T, T, |
| 466 | PlatformOp, |
| 467 | typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> |
| 468 | { |
| 469 | void operator()(T new_value, T volatile* dest) const { |
| 470 | typedef PrimitiveConversions::Translate<T> Translator; |
| 471 | typedef typename Translator::Decayed Decayed; |
| 472 | STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); |
| 473 | PlatformOp()(Translator::decay(new_value), |
| 474 | reinterpret_cast<Decayed volatile*>(dest)); |
| 475 | } |
| 476 | }; |
| 477 | |
| 478 | // Default implementation of atomic store if a specific platform |
| 479 | // does not provide a specialization for a certain size class. |
| 480 | // For increased safety, the default implementation only allows |
| 481 | // storing types that are pointer sized or smaller. If a platform still |
| 482 | // supports wide atomics, then it has to use specialization |
| 483 | // of Atomic::PlatformStore for that wider size class. |
| 484 | template<size_t byte_size> |
| 485 | struct Atomic::PlatformStore { |
| 486 | template<typename T> |
| 487 | void operator()(T new_value, |
| 488 | T volatile* dest) const { |
| 489 | STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization |
| 490 | (void)const_cast<T&>(*dest = new_value); |
| 491 | } |
| 492 | }; |
| 493 | |
| 494 | // Define FetchAndAdd and AddAndFetch helper classes before including |
| 495 | // platform file, which may use these as base classes, requiring they |
| 496 | // be complete. |
| 497 | |
| 498 | template<typename Derived> |
| 499 | struct Atomic::FetchAndAdd { |
| 500 | template<typename I, typename D> |
| 501 | D operator()(I add_value, D volatile* dest, atomic_memory_order order) const; |
| 502 | }; |
| 503 | |
| 504 | template<typename Derived> |
| 505 | struct Atomic::AddAndFetch { |
| 506 | template<typename I, typename D> |
| 507 | D operator()(I add_value, D volatile* dest, atomic_memory_order order) const; |
| 508 | }; |
| 509 | |
| 510 | template<typename D> |
| 511 | inline void Atomic::inc(D volatile* dest, atomic_memory_order order) { |
| 512 | STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); |
| 513 | typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; |
| 514 | Atomic::add(I(1), dest, order); |
| 515 | } |
| 516 | |
| 517 | template<typename D> |
| 518 | inline void Atomic::dec(D volatile* dest, atomic_memory_order order) { |
| 519 | STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); |
| 520 | typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; |
| 521 | // Assumes two's complement integer representation. |
| 522 | #pragma warning(suppress: 4146) |
| 523 | Atomic::add(I(-1), dest, order); |
| 524 | } |
| 525 | |
| 526 | template<typename I, typename D> |
| 527 | inline D Atomic::sub(I sub_value, D volatile* dest, atomic_memory_order order) { |
| 528 | STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); |
| 529 | STATIC_ASSERT(IsIntegral<I>::value); |
| 530 | // If D is a pointer type, use [u]intptr_t as the addend type, |
| 531 | // matching signedness of I. Otherwise, use D as the addend type. |
| 532 | typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI; |
| 533 | typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType; |
| 534 | // Only allow conversions that can't change the value. |
| 535 | STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value); |
| 536 | STATIC_ASSERT(sizeof(I) <= sizeof(AddendType)); |
| 537 | AddendType addend = sub_value; |
| 538 | // Assumes two's complement integer representation. |
| 539 | #pragma warning(suppress: 4146) // In case AddendType is not signed. |
| 540 | return Atomic::add(-addend, dest, order); |
| 541 | } |
| 542 | |
| 543 | // Define the class before including platform file, which may specialize |
| 544 | // the operator definition. No generic definition of specializations |
| 545 | // of the operator template are provided, nor are there any generic |
| 546 | // specializations of the class. The platform file is responsible for |
| 547 | // providing those. |
| 548 | template<size_t byte_size> |
| 549 | struct Atomic::PlatformCmpxchg { |
| 550 | template<typename T> |
| 551 | T operator()(T exchange_value, |
| 552 | T volatile* dest, |
| 553 | T compare_value, |
| 554 | atomic_memory_order order) const; |
| 555 | }; |
| 556 | |
| 557 | // Define the class before including platform file, which may use this |
| 558 | // as a base class, requiring it be complete. The definition is later |
| 559 | // in this file, near the other definitions related to cmpxchg. |
| 560 | struct Atomic::CmpxchgByteUsingInt { |
| 561 | template<typename T> |
| 562 | T operator()(T exchange_value, |
| 563 | T volatile* dest, |
| 564 | T compare_value, |
| 565 | atomic_memory_order order) const; |
| 566 | }; |
| 567 | |
| 568 | // Define the class before including platform file, which may specialize |
| 569 | // the operator definition. No generic definition of specializations |
| 570 | // of the operator template are provided, nor are there any generic |
| 571 | // specializations of the class. The platform file is responsible for |
| 572 | // providing those. |
| 573 | template<size_t byte_size> |
| 574 | struct Atomic::PlatformXchg { |
| 575 | template<typename T> |
| 576 | T operator()(T exchange_value, |
| 577 | T volatile* dest, |
| 578 | atomic_memory_order order) const; |
| 579 | }; |
| 580 | |
| 581 | // platform specific in-line definitions - must come before shared definitions |
| 582 | |
| 583 | #include OS_CPU_HEADER(atomic) |
| 584 | |
| 585 | // shared in-line definitions |
| 586 | |
| 587 | // size_t casts... |
| 588 | #if (SIZE_MAX != UINTPTR_MAX) |
| 589 | #error size_t is not WORD_SIZE, interesting platform, but missing implementation here |
| 590 | #endif |
| 591 | |
| 592 | template<typename T> |
| 593 | inline T Atomic::load(const volatile T* dest) { |
| 594 | return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest); |
| 595 | } |
| 596 | |
| 597 | template<typename T, typename D> |
| 598 | inline void Atomic::store(T store_value, volatile D* dest) { |
| 599 | StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest); |
| 600 | } |
| 601 | |
| 602 | template<typename I, typename D> |
| 603 | inline D Atomic::add(I add_value, D volatile* dest, |
| 604 | atomic_memory_order order) { |
| 605 | return AddImpl<I, D>()(add_value, dest, order); |
| 606 | } |
| 607 | |
| 608 | template<typename I, typename D> |
| 609 | struct Atomic::AddImpl< |
| 610 | I, D, |
| 611 | typename EnableIf<IsIntegral<I>::value && |
| 612 | IsIntegral<D>::value && |
| 613 | (sizeof(I) <= sizeof(D)) && |
| 614 | (IsSigned<I>::value == IsSigned<D>::value)>::type> |
| 615 | { |
| 616 | D operator()(I add_value, D volatile* dest, atomic_memory_order order) const { |
| 617 | D addend = add_value; |
| 618 | return PlatformAdd<sizeof(D)>()(addend, dest, order); |
| 619 | } |
| 620 | }; |
| 621 | |
| 622 | template<typename I, typename P> |
| 623 | struct Atomic::AddImpl< |
| 624 | I, P*, |
| 625 | typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type> |
| 626 | { |
| 627 | P* operator()(I add_value, P* volatile* dest, atomic_memory_order order) const { |
| 628 | STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); |
| 629 | STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); |
| 630 | typedef typename Conditional<IsSigned<I>::value, |
| 631 | intptr_t, |
| 632 | uintptr_t>::type CI; |
| 633 | CI addend = add_value; |
| 634 | return PlatformAdd<sizeof(P*)>()(addend, dest, order); |
| 635 | } |
| 636 | }; |
| 637 | |
| 638 | template<typename Derived> |
| 639 | template<typename I, typename D> |
| 640 | inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest, |
| 641 | atomic_memory_order order) const { |
| 642 | I addend = add_value; |
| 643 | // If D is a pointer type P*, scale by sizeof(P). |
| 644 | if (IsPointer<D>::value) { |
| 645 | addend *= sizeof(typename RemovePointer<D>::type); |
| 646 | } |
| 647 | D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest, order); |
| 648 | return old + add_value; |
| 649 | } |
| 650 | |
| 651 | template<typename Derived> |
| 652 | template<typename I, typename D> |
| 653 | inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest, |
| 654 | atomic_memory_order order) const { |
| 655 | // If D is a pointer type P*, scale by sizeof(P). |
| 656 | if (IsPointer<D>::value) { |
| 657 | add_value *= sizeof(typename RemovePointer<D>::type); |
| 658 | } |
| 659 | return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest, order); |
| 660 | } |
| 661 | |
| 662 | template<typename Type, typename Fn, typename I, typename D> |
| 663 | inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) { |
| 664 | return PrimitiveConversions::cast<D>( |
| 665 | fn(PrimitiveConversions::cast<Type>(add_value), |
| 666 | reinterpret_cast<Type volatile*>(dest))); |
| 667 | } |
| 668 | |
| 669 | template<typename T, typename D, typename U> |
| 670 | inline D Atomic::cmpxchg(T exchange_value, |
| 671 | D volatile* dest, |
| 672 | U compare_value, |
| 673 | atomic_memory_order order) { |
| 674 | return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order); |
| 675 | } |
| 676 | |
| 677 | template<typename T, typename D> |
| 678 | inline bool Atomic::replace_if_null(T* value, D* volatile* dest, |
| 679 | atomic_memory_order order) { |
| 680 | // Presently using a trivial implementation in terms of cmpxchg. |
| 681 | // Consider adding platform support, to permit the use of compiler |
| 682 | // intrinsics like gcc's __sync_bool_compare_and_swap. |
| 683 | D* expected_null = NULL; |
| 684 | return expected_null == cmpxchg(value, dest, expected_null, order); |
| 685 | } |
| 686 | |
| 687 | // Handle cmpxchg for integral and enum types. |
| 688 | // |
| 689 | // All the involved types must be identical. |
| 690 | template<typename T> |
| 691 | struct Atomic::CmpxchgImpl< |
| 692 | T, T, T, |
| 693 | typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> |
| 694 | { |
| 695 | T operator()(T exchange_value, T volatile* dest, T compare_value, |
| 696 | atomic_memory_order order) const { |
| 697 | // Forward to the platform handler for the size of T. |
| 698 | return PlatformCmpxchg<sizeof(T)>()(exchange_value, |
| 699 | dest, |
| 700 | compare_value, |
| 701 | order); |
| 702 | } |
| 703 | }; |
| 704 | |
| 705 | // Handle cmpxchg for pointer types. |
| 706 | // |
| 707 | // The destination's type and the compare_value type must be the same, |
| 708 | // ignoring cv-qualifiers; we don't care about the cv-qualifiers of |
| 709 | // the compare_value. |
| 710 | // |
| 711 | // The exchange_value must be implicitly convertible to the |
| 712 | // destination's type; it must be type-correct to store the |
| 713 | // exchange_value in the destination. |
| 714 | template<typename T, typename D, typename U> |
| 715 | struct Atomic::CmpxchgImpl< |
| 716 | T*, D*, U*, |
| 717 | typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value && |
| 718 | IsSame<typename RemoveCV<D>::type, |
| 719 | typename RemoveCV<U>::type>::value>::type> |
| 720 | { |
| 721 | D* operator()(T* exchange_value, D* volatile* dest, U* compare_value, |
| 722 | atomic_memory_order order) const { |
| 723 | // Allow derived to base conversion, and adding cv-qualifiers. |
| 724 | D* new_value = exchange_value; |
| 725 | // Don't care what the CV qualifiers for compare_value are, |
| 726 | // but we need to match D* when calling platform support. |
| 727 | D* old_value = const_cast<D*>(compare_value); |
| 728 | return PlatformCmpxchg<sizeof(D*)>()(new_value, dest, old_value, order); |
| 729 | } |
| 730 | }; |
| 731 | |
| 732 | // Handle cmpxchg for types that have a translator. |
| 733 | // |
| 734 | // All the involved types must be identical. |
| 735 | // |
| 736 | // This translates the original call into a call on the decayed |
| 737 | // arguments, and returns the recovered result of that translated |
| 738 | // call. |
| 739 | template<typename T> |
| 740 | struct Atomic::CmpxchgImpl< |
| 741 | T, T, T, |
| 742 | typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> |
| 743 | { |
| 744 | T operator()(T exchange_value, T volatile* dest, T compare_value, |
| 745 | atomic_memory_order order) const { |
| 746 | typedef PrimitiveConversions::Translate<T> Translator; |
| 747 | typedef typename Translator::Decayed Decayed; |
| 748 | STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); |
| 749 | return Translator::recover( |
| 750 | cmpxchg(Translator::decay(exchange_value), |
| 751 | reinterpret_cast<Decayed volatile*>(dest), |
| 752 | Translator::decay(compare_value), |
| 753 | order)); |
| 754 | } |
| 755 | }; |
| 756 | |
| 757 | template<typename Type, typename Fn, typename T> |
| 758 | inline T Atomic::cmpxchg_using_helper(Fn fn, |
| 759 | T exchange_value, |
| 760 | T volatile* dest, |
| 761 | T compare_value) { |
| 762 | STATIC_ASSERT(sizeof(Type) == sizeof(T)); |
| 763 | return PrimitiveConversions::cast<T>( |
| 764 | fn(PrimitiveConversions::cast<Type>(exchange_value), |
| 765 | reinterpret_cast<Type volatile*>(dest), |
| 766 | PrimitiveConversions::cast<Type>(compare_value))); |
| 767 | } |
| 768 | |
| 769 | template<typename T> |
| 770 | inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value, |
| 771 | T volatile* dest, |
| 772 | T compare_value, |
| 773 | atomic_memory_order order) const { |
| 774 | STATIC_ASSERT(sizeof(T) == sizeof(uint8_t)); |
| 775 | uint8_t canon_exchange_value = exchange_value; |
| 776 | uint8_t canon_compare_value = compare_value; |
| 777 | volatile uint32_t* aligned_dest |
| 778 | = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t))); |
| 779 | size_t offset = pointer_delta(dest, aligned_dest, 1); |
| 780 | uint32_t cur = *aligned_dest; |
| 781 | uint8_t* cur_as_bytes = reinterpret_cast<uint8_t*>(&cur); |
| 782 | |
| 783 | // current value may not be what we are looking for, so force it |
| 784 | // to that value so the initial cmpxchg will fail if it is different |
| 785 | cur_as_bytes[offset] = canon_compare_value; |
| 786 | |
| 787 | // always execute a real cmpxchg so that we get the required memory |
| 788 | // barriers even on initial failure |
| 789 | do { |
| 790 | // value to swap in matches current value ... |
| 791 | uint32_t new_value = cur; |
| 792 | // ... except for the one byte we want to update |
| 793 | reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value; |
| 794 | |
| 795 | uint32_t res = cmpxchg(new_value, aligned_dest, cur, order); |
| 796 | if (res == cur) break; // success |
| 797 | |
| 798 | // at least one byte in the int changed value, so update |
| 799 | // our view of the current int |
| 800 | cur = res; |
| 801 | // if our byte is still as cur we loop and try again |
| 802 | } while (cur_as_bytes[offset] == canon_compare_value); |
| 803 | |
| 804 | return PrimitiveConversions::cast<T>(cur_as_bytes[offset]); |
| 805 | } |
| 806 | |
| 807 | // Handle xchg for integral and enum types. |
| 808 | // |
| 809 | // All the involved types must be identical. |
| 810 | template<typename T> |
| 811 | struct Atomic::XchgImpl< |
| 812 | T, T, |
| 813 | typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> |
| 814 | { |
| 815 | T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const { |
| 816 | // Forward to the platform handler for the size of T. |
| 817 | return PlatformXchg<sizeof(T)>()(exchange_value, dest, order); |
| 818 | } |
| 819 | }; |
| 820 | |
| 821 | // Handle xchg for pointer types. |
| 822 | // |
| 823 | // The exchange_value must be implicitly convertible to the |
| 824 | // destination's type; it must be type-correct to store the |
| 825 | // exchange_value in the destination. |
| 826 | template<typename T, typename D> |
| 827 | struct Atomic::XchgImpl< |
| 828 | T*, D*, |
| 829 | typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type> |
| 830 | { |
| 831 | D* operator()(T* exchange_value, D* volatile* dest, atomic_memory_order order) const { |
| 832 | // Allow derived to base conversion, and adding cv-qualifiers. |
| 833 | D* new_value = exchange_value; |
| 834 | return PlatformXchg<sizeof(D*)>()(new_value, dest, order); |
| 835 | } |
| 836 | }; |
| 837 | |
| 838 | // Handle xchg for types that have a translator. |
| 839 | // |
| 840 | // All the involved types must be identical. |
| 841 | // |
| 842 | // This translates the original call into a call on the decayed |
| 843 | // arguments, and returns the recovered result of that translated |
| 844 | // call. |
| 845 | template<typename T> |
| 846 | struct Atomic::XchgImpl< |
| 847 | T, T, |
| 848 | typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> |
| 849 | { |
| 850 | T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const { |
| 851 | typedef PrimitiveConversions::Translate<T> Translator; |
| 852 | typedef typename Translator::Decayed Decayed; |
| 853 | STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); |
| 854 | return Translator::recover( |
| 855 | xchg(Translator::decay(exchange_value), |
| 856 | reinterpret_cast<Decayed volatile*>(dest), |
| 857 | order)); |
| 858 | } |
| 859 | }; |
| 860 | |
| 861 | template<typename Type, typename Fn, typename T> |
| 862 | inline T Atomic::xchg_using_helper(Fn fn, |
| 863 | T exchange_value, |
| 864 | T volatile* dest) { |
| 865 | STATIC_ASSERT(sizeof(Type) == sizeof(T)); |
| 866 | return PrimitiveConversions::cast<T>( |
| 867 | fn(PrimitiveConversions::cast<Type>(exchange_value), |
| 868 | reinterpret_cast<Type volatile*>(dest))); |
| 869 | } |
| 870 | |
| 871 | template<typename T, typename D> |
| 872 | inline D Atomic::xchg(T exchange_value, volatile D* dest, atomic_memory_order order) { |
| 873 | return XchgImpl<T, D>()(exchange_value, dest, order); |
| 874 | } |
| 875 | |
| 876 | #endif // SHARE_RUNTIME_ATOMIC_HPP |
| 877 | |