| 1 | /* |
| 2 | * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | */ |
| 23 | |
| 24 | #ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP |
| 25 | #define SHARE_GC_Z_ZBARRIER_INLINE_HPP |
| 26 | |
| 27 | #include "gc/z/zAddress.inline.hpp" |
| 28 | #include "gc/z/zBarrier.hpp" |
| 29 | #include "gc/z/zOop.inline.hpp" |
| 30 | #include "gc/z/zResurrection.inline.hpp" |
| 31 | #include "runtime/atomic.hpp" |
| 32 | |
| 33 | template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> |
| 34 | inline oop ZBarrier::barrier(volatile oop* p, oop o) { |
| 35 | uintptr_t addr = ZOop::to_address(o); |
| 36 | |
| 37 | retry: |
| 38 | // Fast path |
| 39 | if (fast_path(addr)) { |
| 40 | return ZOop::from_address(addr); |
| 41 | } |
| 42 | |
| 43 | // Slow path |
| 44 | const uintptr_t good_addr = slow_path(addr); |
| 45 | |
| 46 | // Self heal, but only if the address was actually updated by the slow path, |
| 47 | // which might not be the case, e.g. when marking through an already good oop. |
| 48 | if (p != NULL && good_addr != addr) { |
| 49 | const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr); |
| 50 | if (prev_addr != addr) { |
| 51 | // Some other thread overwrote the oop. If this oop was updated by a |
| 52 | // weak barrier the new oop might not be good, in which case we need |
| 53 | // to re-apply this barrier. |
| 54 | addr = prev_addr; |
| 55 | goto retry; |
| 56 | } |
| 57 | } |
| 58 | |
| 59 | return ZOop::from_address(good_addr); |
| 60 | } |
| 61 | |
| 62 | template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> |
| 63 | inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) { |
| 64 | const uintptr_t addr = ZOop::to_address(o); |
| 65 | |
| 66 | // Fast path |
| 67 | if (fast_path(addr)) { |
| 68 | // Return the good address instead of the weak good address |
| 69 | // to ensure that the currently active heap view is used. |
| 70 | return ZOop::from_address(ZAddress::good_or_null(addr)); |
| 71 | } |
| 72 | |
| 73 | // Slow path |
| 74 | uintptr_t good_addr = slow_path(addr); |
| 75 | |
| 76 | // Self heal unless the address returned from the slow path is null, |
| 77 | // in which case resurrection was blocked and we must let the reference |
| 78 | // processor clear the oop. Mutators are not allowed to clear oops in |
| 79 | // these cases, since that would be similar to calling Reference.clear(), |
| 80 | // which would make the reference non-discoverable or silently dropped |
| 81 | // by the reference processor. |
| 82 | if (p != NULL && good_addr != 0) { |
| 83 | // The slow path returns a good/marked address, but we never mark oops |
| 84 | // in a weak load barrier so we always self heal with the remapped address. |
| 85 | const uintptr_t weak_good_addr = ZAddress::remapped(good_addr); |
| 86 | const uintptr_t prev_addr = Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr); |
| 87 | if (prev_addr != addr) { |
| 88 | // Some other thread overwrote the oop. The new |
| 89 | // oop is guaranteed to be weak good or null. |
| 90 | assert(ZAddress::is_weak_good_or_null(prev_addr), "Bad weak overwrite" ); |
| 91 | |
| 92 | // Return the good address instead of the weak good address |
| 93 | // to ensure that the currently active heap view is used. |
| 94 | good_addr = ZAddress::good_or_null(prev_addr); |
| 95 | } |
| 96 | } |
| 97 | |
| 98 | return ZOop::from_address(good_addr); |
| 99 | } |
| 100 | |
| 101 | template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> |
| 102 | inline void ZBarrier::root_barrier(oop* p, oop o) { |
| 103 | const uintptr_t addr = ZOop::to_address(o); |
| 104 | |
| 105 | // Fast path |
| 106 | if (fast_path(addr)) { |
| 107 | return; |
| 108 | } |
| 109 | |
| 110 | // Slow path |
| 111 | const uintptr_t good_addr = slow_path(addr); |
| 112 | |
| 113 | // Non-atomic healing helps speed up root scanning. This is safe to do |
| 114 | // since we are always healing roots in a safepoint, or under a lock, |
| 115 | // which ensures we are never racing with mutators modifying roots while |
| 116 | // we are healing them. It's also safe in case multiple GC threads try |
| 117 | // to heal the same root if it is aligned, since they would always heal |
| 118 | // the root in the same way and it does not matter in which order it |
| 119 | // happens. For misaligned oops, there needs to be mutual exclusion. |
| 120 | *p = ZOop::from_address(good_addr); |
| 121 | } |
| 122 | |
| 123 | inline bool ZBarrier::is_null_fast_path(uintptr_t addr) { |
| 124 | return ZAddress::is_null(addr); |
| 125 | } |
| 126 | |
| 127 | inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) { |
| 128 | return ZAddress::is_good_or_null(addr); |
| 129 | } |
| 130 | |
| 131 | inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) { |
| 132 | return ZAddress::is_weak_good_or_null(addr); |
| 133 | } |
| 134 | |
| 135 | inline bool ZBarrier::is_resurrection_blocked(volatile oop* p, oop* o) { |
| 136 | const bool is_blocked = ZResurrection::is_blocked(); |
| 137 | |
| 138 | // Reload oop after checking the resurrection blocked state. This is |
| 139 | // done to prevent a race where we first load an oop, which is logically |
| 140 | // null but not yet cleared, then this oop is cleared by the reference |
| 141 | // processor and resurrection is unblocked. At this point the mutator |
| 142 | // would see the unblocked state and pass this invalid oop through the |
| 143 | // normal barrier path, which would incorrectly try to mark this oop. |
| 144 | if (p != NULL) { |
| 145 | // First assign to reloaded_o to avoid compiler warning about |
| 146 | // implicit dereference of volatile oop. |
| 147 | const oop reloaded_o = *p; |
| 148 | *o = reloaded_o; |
| 149 | } |
| 150 | |
| 151 | return is_blocked; |
| 152 | } |
| 153 | |
| 154 | // |
| 155 | // Load barrier |
| 156 | // |
| 157 | inline oop ZBarrier::load_barrier_on_oop(oop o) { |
| 158 | return load_barrier_on_oop_field_preloaded((oop*)NULL, o); |
| 159 | } |
| 160 | |
| 161 | inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) { |
| 162 | const oop o = *p; |
| 163 | return load_barrier_on_oop_field_preloaded(p, o); |
| 164 | } |
| 165 | |
| 166 | inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) { |
| 167 | return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o); |
| 168 | } |
| 169 | |
| 170 | inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) { |
| 171 | for (volatile const oop* const end = p + length; p < end; p++) { |
| 172 | load_barrier_on_oop_field(p); |
| 173 | } |
| 174 | } |
| 175 | |
| 176 | inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { |
| 177 | if (is_resurrection_blocked(p, &o)) { |
| 178 | return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o); |
| 179 | } |
| 180 | |
| 181 | return load_barrier_on_oop_field_preloaded(p, o); |
| 182 | } |
| 183 | |
| 184 | inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { |
| 185 | if (is_resurrection_blocked(p, &o)) { |
| 186 | return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o); |
| 187 | } |
| 188 | |
| 189 | return load_barrier_on_oop_field_preloaded(p, o); |
| 190 | } |
| 191 | |
| 192 | inline void ZBarrier::load_barrier_on_root_oop_field(oop* p) { |
| 193 | const oop o = *p; |
| 194 | root_barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o); |
| 195 | } |
| 196 | |
| 197 | // |
| 198 | // Weak load barrier |
| 199 | // |
| 200 | inline oop ZBarrier::weak_load_barrier_on_oop_field(volatile oop* p) { |
| 201 | assert(!ZResurrection::is_blocked(), "Should not be called during resurrection blocked phase" ); |
| 202 | const oop o = *p; |
| 203 | return weak_load_barrier_on_oop_field_preloaded(p, o); |
| 204 | } |
| 205 | |
| 206 | inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) { |
| 207 | return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o); |
| 208 | } |
| 209 | |
| 210 | inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) { |
| 211 | return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o); |
| 212 | } |
| 213 | |
| 214 | inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) { |
| 215 | const oop o = *p; |
| 216 | return weak_load_barrier_on_weak_oop_field_preloaded(p, o); |
| 217 | } |
| 218 | |
| 219 | inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { |
| 220 | if (is_resurrection_blocked(p, &o)) { |
| 221 | return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o); |
| 222 | } |
| 223 | |
| 224 | return weak_load_barrier_on_oop_field_preloaded(p, o); |
| 225 | } |
| 226 | |
| 227 | inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) { |
| 228 | return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o); |
| 229 | } |
| 230 | |
| 231 | inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) { |
| 232 | const oop o = *p; |
| 233 | return weak_load_barrier_on_phantom_oop_field_preloaded(p, o); |
| 234 | } |
| 235 | |
| 236 | inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { |
| 237 | if (is_resurrection_blocked(p, &o)) { |
| 238 | return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o); |
| 239 | } |
| 240 | |
| 241 | return weak_load_barrier_on_oop_field_preloaded(p, o); |
| 242 | } |
| 243 | |
| 244 | // |
| 245 | // Is alive barrier |
| 246 | // |
| 247 | inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) { |
| 248 | // Check if oop is logically non-null. This operation |
| 249 | // is only valid when resurrection is blocked. |
| 250 | assert(ZResurrection::is_blocked(), "Invalid phase" ); |
| 251 | return weak_load_barrier_on_weak_oop(o) != NULL; |
| 252 | } |
| 253 | |
| 254 | inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) { |
| 255 | // Check if oop is logically non-null. This operation |
| 256 | // is only valid when resurrection is blocked. |
| 257 | assert(ZResurrection::is_blocked(), "Invalid phase" ); |
| 258 | return weak_load_barrier_on_phantom_oop(o) != NULL; |
| 259 | } |
| 260 | |
| 261 | // |
| 262 | // Keep alive barrier |
| 263 | // |
| 264 | inline void ZBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) { |
| 265 | // This operation is only valid when resurrection is blocked. |
| 266 | assert(ZResurrection::is_blocked(), "Invalid phase" ); |
| 267 | const oop o = *p; |
| 268 | barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o); |
| 269 | } |
| 270 | |
| 271 | inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) { |
| 272 | // This operation is only valid when resurrection is blocked. |
| 273 | assert(ZResurrection::is_blocked(), "Invalid phase" ); |
| 274 | const oop o = *p; |
| 275 | barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o); |
| 276 | } |
| 277 | |
| 278 | inline void ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) { |
| 279 | // This operation is only valid when resurrection is blocked. |
| 280 | assert(ZResurrection::is_blocked(), "Invalid phase" ); |
| 281 | const oop o = *p; |
| 282 | root_barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o); |
| 283 | } |
| 284 | |
| 285 | // |
| 286 | // Mark barrier |
| 287 | // |
| 288 | inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) { |
| 289 | // The fast path only checks for null since the GC worker |
| 290 | // threads doing marking wants to mark through good oops. |
| 291 | const oop o = *p; |
| 292 | |
| 293 | if (finalizable) { |
| 294 | barrier<is_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o); |
| 295 | } else { |
| 296 | barrier<is_null_fast_path, mark_barrier_on_oop_slow_path>(p, o); |
| 297 | } |
| 298 | } |
| 299 | |
| 300 | inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) { |
| 301 | for (volatile const oop* const end = p + length; p < end; p++) { |
| 302 | mark_barrier_on_oop_field(p, finalizable); |
| 303 | } |
| 304 | } |
| 305 | |
| 306 | inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) { |
| 307 | const oop o = *p; |
| 308 | root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o); |
| 309 | } |
| 310 | |
| 311 | // |
| 312 | // Relocate barrier |
| 313 | // |
| 314 | inline void ZBarrier::relocate_barrier_on_root_oop_field(oop* p) { |
| 315 | const oop o = *p; |
| 316 | root_barrier<is_good_or_null_fast_path, relocate_barrier_on_root_oop_slow_path>(p, o); |
| 317 | } |
| 318 | |
| 319 | #endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP |
| 320 | |