| 1 | /* |
| 2 | * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_GC_SHARED_BARRIERSET_HPP |
| 26 | #define SHARE_GC_SHARED_BARRIERSET_HPP |
| 27 | |
| 28 | #include "gc/shared/barrierSetConfig.hpp" |
| 29 | #include "memory/memRegion.hpp" |
| 30 | #include "oops/access.hpp" |
| 31 | #include "oops/accessBackend.hpp" |
| 32 | #include "oops/oopsHierarchy.hpp" |
| 33 | #include "utilities/fakeRttiSupport.hpp" |
| 34 | #include "utilities/macros.hpp" |
| 35 | |
| 36 | class BarrierSetAssembler; |
| 37 | class BarrierSetC1; |
| 38 | class BarrierSetC2; |
| 39 | class BarrierSetNMethod; |
| 40 | class JavaThread; |
| 41 | |
| 42 | // This class provides the interface between a barrier implementation and |
| 43 | // the rest of the system. |
| 44 | |
| 45 | class BarrierSet: public CHeapObj<mtGC> { |
| 46 | friend class VMStructs; |
| 47 | |
| 48 | static BarrierSet* _barrier_set; |
| 49 | |
| 50 | public: |
| 51 | enum Name { |
| 52 | #define BARRIER_SET_DECLARE_BS_ENUM(bs_name) bs_name , |
| 53 | FOR_EACH_BARRIER_SET_DO(BARRIER_SET_DECLARE_BS_ENUM) |
| 54 | #undef BARRIER_SET_DECLARE_BS_ENUM |
| 55 | UnknownBS |
| 56 | }; |
| 57 | |
| 58 | protected: |
| 59 | // Fake RTTI support. For a derived class T to participate |
| 60 | // - T must have a corresponding Name entry. |
| 61 | // - GetName<T> must be specialized to return the corresponding Name |
| 62 | // entry. |
| 63 | // - If T is a base class, the constructor must have a FakeRtti |
| 64 | // parameter and pass it up to its base class, with the tag set |
| 65 | // augmented with the corresponding Name entry. |
| 66 | // - If T is a concrete class, the constructor must create a |
| 67 | // FakeRtti object whose tag set includes the corresponding Name |
| 68 | // entry, and pass it up to its base class. |
| 69 | typedef FakeRttiSupport<BarrierSet, Name> FakeRtti; |
| 70 | |
| 71 | private: |
| 72 | FakeRtti _fake_rtti; |
| 73 | BarrierSetAssembler* _barrier_set_assembler; |
| 74 | BarrierSetC1* _barrier_set_c1; |
| 75 | BarrierSetC2* _barrier_set_c2; |
| 76 | BarrierSetNMethod* _barrier_set_nmethod; |
| 77 | |
| 78 | public: |
| 79 | // Metafunction mapping a class derived from BarrierSet to the |
| 80 | // corresponding Name enum tag. |
| 81 | template<typename T> struct GetName; |
| 82 | |
| 83 | // Metafunction mapping a Name enum type to the corresponding |
| 84 | // lass derived from BarrierSet. |
| 85 | template<BarrierSet::Name T> struct GetType; |
| 86 | |
| 87 | // Note: This is not presently the Name corresponding to the |
| 88 | // concrete class of this object. |
| 89 | BarrierSet::Name kind() const { return _fake_rtti.concrete_tag(); } |
| 90 | |
| 91 | // Test whether this object is of the type corresponding to bsn. |
| 92 | bool is_a(BarrierSet::Name bsn) const { return _fake_rtti.has_tag(bsn); } |
| 93 | |
| 94 | // End of fake RTTI support. |
| 95 | |
| 96 | protected: |
| 97 | BarrierSet(BarrierSetAssembler* barrier_set_assembler, |
| 98 | BarrierSetC1* barrier_set_c1, |
| 99 | BarrierSetC2* barrier_set_c2, |
| 100 | BarrierSetNMethod* barrier_set_nmethod, |
| 101 | const FakeRtti& fake_rtti) : |
| 102 | _fake_rtti(fake_rtti), |
| 103 | _barrier_set_assembler(barrier_set_assembler), |
| 104 | _barrier_set_c1(barrier_set_c1), |
| 105 | _barrier_set_c2(barrier_set_c2), |
| 106 | _barrier_set_nmethod(barrier_set_nmethod) {} |
| 107 | ~BarrierSet() { } |
| 108 | |
| 109 | template <class BarrierSetAssemblerT> |
| 110 | static BarrierSetAssembler* make_barrier_set_assembler() { |
| 111 | return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(NULL); |
| 112 | } |
| 113 | |
| 114 | template <class BarrierSetC1T> |
| 115 | static BarrierSetC1* make_barrier_set_c1() { |
| 116 | return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(NULL); |
| 117 | } |
| 118 | |
| 119 | template <class BarrierSetC2T> |
| 120 | static BarrierSetC2* make_barrier_set_c2() { |
| 121 | return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(NULL); |
| 122 | } |
| 123 | |
| 124 | public: |
| 125 | // Support for optimizing compilers to call the barrier set on slow path allocations |
| 126 | // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks. |
| 127 | // The allocation is safe to use iff it returns true. If not, the slow-path allocation |
| 128 | // is redone until it succeeds. This can e.g. prevent allocations from the slow path |
| 129 | // to be in old. |
| 130 | virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {} |
| 131 | virtual void on_thread_create(Thread* thread) {} |
| 132 | virtual void on_thread_destroy(Thread* thread) {} |
| 133 | |
| 134 | // These perform BarrierSet-related initialization/cleanup before the thread |
| 135 | // is added to or removed from the corresponding set of threads. The |
| 136 | // argument thread is the current thread. These are called either holding |
| 137 | // the Threads_lock (for a JavaThread) and so not at a safepoint, or holding |
| 138 | // the NonJavaThreadsList_lock (for a NonJavaThread) locked by the |
| 139 | // caller. That locking ensures the operation is "atomic" with the list |
| 140 | // modification wrto operations that hold the NJTList_lock and either also |
| 141 | // hold the Threads_lock or are at a safepoint. |
| 142 | virtual void on_thread_attach(Thread* thread) {} |
| 143 | virtual void on_thread_detach(Thread* thread) {} |
| 144 | |
| 145 | virtual void make_parsable(JavaThread* thread) {} |
| 146 | |
| 147 | #ifdef CHECK_UNHANDLED_OOPS |
| 148 | virtual bool oop_equals_operator_allowed() { return true; } |
| 149 | #endif |
| 150 | |
| 151 | public: |
| 152 | // Print a description of the memory for the barrier set |
| 153 | virtual void print_on(outputStream* st) const = 0; |
| 154 | |
| 155 | static BarrierSet* barrier_set() { return _barrier_set; } |
| 156 | static void set_barrier_set(BarrierSet* barrier_set); |
| 157 | |
| 158 | BarrierSetAssembler* barrier_set_assembler() { |
| 159 | assert(_barrier_set_assembler != NULL, "should be set" ); |
| 160 | return _barrier_set_assembler; |
| 161 | } |
| 162 | |
| 163 | BarrierSetC1* barrier_set_c1() { |
| 164 | assert(_barrier_set_c1 != NULL, "should be set" ); |
| 165 | return _barrier_set_c1; |
| 166 | } |
| 167 | |
| 168 | BarrierSetC2* barrier_set_c2() { |
| 169 | assert(_barrier_set_c2 != NULL, "should be set" ); |
| 170 | return _barrier_set_c2; |
| 171 | } |
| 172 | |
| 173 | BarrierSetNMethod* barrier_set_nmethod() { |
| 174 | return _barrier_set_nmethod; |
| 175 | } |
| 176 | |
| 177 | // The AccessBarrier of a BarrierSet subclass is called by the Access API |
| 178 | // (cf. oops/access.hpp) to perform decorated accesses. GC implementations |
| 179 | // may override these default access operations by declaring an |
| 180 | // AccessBarrier class in its BarrierSet. Its accessors will then be |
| 181 | // automatically resolved at runtime. |
| 182 | // |
| 183 | // In order to register a new FooBarrierSet::AccessBarrier with the Access API, |
| 184 | // the following steps should be taken: |
| 185 | // 1) Provide an enum "name" for the BarrierSet in barrierSetConfig.hpp |
| 186 | // 2) Make sure the barrier set headers are included from barrierSetConfig.inline.hpp |
| 187 | // 3) Provide specializations for BarrierSet::GetName and BarrierSet::GetType. |
| 188 | template <DecoratorSet decorators, typename BarrierSetT> |
| 189 | class AccessBarrier: protected RawAccessBarrier<decorators> { |
| 190 | private: |
| 191 | typedef RawAccessBarrier<decorators> Raw; |
| 192 | |
| 193 | public: |
| 194 | // Primitive heap accesses. These accessors get resolved when |
| 195 | // IN_HEAP is set (e.g. when using the HeapAccess API), it is |
| 196 | // not an oop_* overload, and the barrier strength is AS_NORMAL. |
| 197 | template <typename T> |
| 198 | static T load_in_heap(T* addr) { |
| 199 | return Raw::template load<T>(addr); |
| 200 | } |
| 201 | |
| 202 | template <typename T> |
| 203 | static T load_in_heap_at(oop base, ptrdiff_t offset) { |
| 204 | return Raw::template load_at<T>(base, offset); |
| 205 | } |
| 206 | |
| 207 | template <typename T> |
| 208 | static void store_in_heap(T* addr, T value) { |
| 209 | Raw::store(addr, value); |
| 210 | } |
| 211 | |
| 212 | template <typename T> |
| 213 | static void store_in_heap_at(oop base, ptrdiff_t offset, T value) { |
| 214 | Raw::store_at(base, offset, value); |
| 215 | } |
| 216 | |
| 217 | template <typename T> |
| 218 | static T atomic_cmpxchg_in_heap(T new_value, T* addr, T compare_value) { |
| 219 | return Raw::atomic_cmpxchg(new_value, addr, compare_value); |
| 220 | } |
| 221 | |
| 222 | template <typename T> |
| 223 | static T atomic_cmpxchg_in_heap_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { |
| 224 | return Raw::atomic_cmpxchg_at(new_value, base, offset, compare_value); |
| 225 | } |
| 226 | |
| 227 | template <typename T> |
| 228 | static T atomic_xchg_in_heap(T new_value, T* addr) { |
| 229 | return Raw::atomic_xchg(new_value, addr); |
| 230 | } |
| 231 | |
| 232 | template <typename T> |
| 233 | static T atomic_xchg_in_heap_at(T new_value, oop base, ptrdiff_t offset) { |
| 234 | return Raw::atomic_xchg_at(new_value, base, offset); |
| 235 | } |
| 236 | |
| 237 | template <typename T> |
| 238 | static void arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, |
| 239 | arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, |
| 240 | size_t length) { |
| 241 | Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw, |
| 242 | dst_obj, dst_offset_in_bytes, dst_raw, |
| 243 | length); |
| 244 | } |
| 245 | |
| 246 | // Heap oop accesses. These accessors get resolved when |
| 247 | // IN_HEAP is set (e.g. when using the HeapAccess API), it is |
| 248 | // an oop_* overload, and the barrier strength is AS_NORMAL. |
| 249 | template <typename T> |
| 250 | static oop oop_load_in_heap(T* addr) { |
| 251 | return Raw::template oop_load<oop>(addr); |
| 252 | } |
| 253 | |
| 254 | static oop oop_load_in_heap_at(oop base, ptrdiff_t offset) { |
| 255 | return Raw::template oop_load_at<oop>(base, offset); |
| 256 | } |
| 257 | |
| 258 | template <typename T> |
| 259 | static void oop_store_in_heap(T* addr, oop value) { |
| 260 | Raw::oop_store(addr, value); |
| 261 | } |
| 262 | |
| 263 | static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) { |
| 264 | Raw::oop_store_at(base, offset, value); |
| 265 | } |
| 266 | |
| 267 | template <typename T> |
| 268 | static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) { |
| 269 | return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value); |
| 270 | } |
| 271 | |
| 272 | static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) { |
| 273 | return Raw::oop_atomic_cmpxchg_at(new_value, base, offset, compare_value); |
| 274 | } |
| 275 | |
| 276 | template <typename T> |
| 277 | static oop oop_atomic_xchg_in_heap(oop new_value, T* addr) { |
| 278 | return Raw::oop_atomic_xchg(new_value, addr); |
| 279 | } |
| 280 | |
| 281 | static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) { |
| 282 | return Raw::oop_atomic_xchg_at(new_value, base, offset); |
| 283 | } |
| 284 | |
| 285 | template <typename T> |
| 286 | static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, |
| 287 | arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, |
| 288 | size_t length); |
| 289 | |
| 290 | // Off-heap oop accesses. These accessors get resolved when |
| 291 | // IN_HEAP is not set (e.g. when using the NativeAccess API), it is |
| 292 | // an oop* overload, and the barrier strength is AS_NORMAL. |
| 293 | template <typename T> |
| 294 | static oop oop_load_not_in_heap(T* addr) { |
| 295 | return Raw::template oop_load<oop>(addr); |
| 296 | } |
| 297 | |
| 298 | template <typename T> |
| 299 | static void oop_store_not_in_heap(T* addr, oop value) { |
| 300 | Raw::oop_store(addr, value); |
| 301 | } |
| 302 | |
| 303 | template <typename T> |
| 304 | static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) { |
| 305 | return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value); |
| 306 | } |
| 307 | |
| 308 | template <typename T> |
| 309 | static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr) { |
| 310 | return Raw::oop_atomic_xchg(new_value, addr); |
| 311 | } |
| 312 | |
| 313 | // Clone barrier support |
| 314 | static void clone_in_heap(oop src, oop dst, size_t size) { |
| 315 | Raw::clone(src, dst, size); |
| 316 | } |
| 317 | |
| 318 | static oop resolve(oop obj) { |
| 319 | return Raw::resolve(obj); |
| 320 | } |
| 321 | |
| 322 | static bool equals(oop o1, oop o2) { |
| 323 | return Raw::equals(o1, o2); |
| 324 | } |
| 325 | }; |
| 326 | }; |
| 327 | |
| 328 | template<typename T> |
| 329 | inline T* barrier_set_cast(BarrierSet* bs) { |
| 330 | assert(bs->is_a(BarrierSet::GetName<T>::value), "wrong type of barrier set" ); |
| 331 | return static_cast<T*>(bs); |
| 332 | } |
| 333 | |
| 334 | #endif // SHARE_GC_SHARED_BARRIERSET_HPP |
| 335 | |