| 1 | /* |
| 2 | * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_RUNTIME_BIASEDLOCKING_HPP |
| 26 | #define SHARE_RUNTIME_BIASEDLOCKING_HPP |
| 27 | |
| 28 | #include "runtime/handles.hpp" |
| 29 | #include "utilities/growableArray.hpp" |
| 30 | |
| 31 | // This class describes operations to implement Store-Free Biased |
| 32 | // Locking. The high-level properties of the scheme are similar to |
| 33 | // IBM's lock reservation, Dice-Moir-Scherer QR locks, and other biased |
| 34 | // locking mechanisms. The principal difference is in the handling of |
| 35 | // recursive locking which is how this technique achieves a more |
| 36 | // efficient fast path than these other schemes. |
| 37 | // |
| 38 | // The basic observation is that in HotSpot's current fast locking |
| 39 | // scheme, recursive locking (in the fast path) causes no update to |
| 40 | // the object header. The recursion is described simply by stack |
| 41 | // records containing a specific value (NULL). Only the last unlock by |
| 42 | // a given thread causes an update to the object header. |
| 43 | // |
| 44 | // This observation, coupled with the fact that HotSpot only compiles |
| 45 | // methods for which monitor matching is obeyed (and which therefore |
| 46 | // can not throw IllegalMonitorStateException), implies that we can |
| 47 | // completely eliminate modifications to the object header for |
| 48 | // recursive locking in compiled code, and perform similar recursion |
| 49 | // checks and throwing of IllegalMonitorStateException in the |
| 50 | // interpreter with little or no impact on the performance of the fast |
| 51 | // path. |
| 52 | // |
| 53 | // The basic algorithm is as follows (note, see below for more details |
| 54 | // and information). A pattern in the low three bits is reserved in |
| 55 | // the object header to indicate whether biasing of a given object's |
| 56 | // lock is currently being done or is allowed at all. If the bias |
| 57 | // pattern is present, the contents of the rest of the header are |
| 58 | // either the JavaThread* of the thread to which the lock is biased, |
| 59 | // or NULL, indicating that the lock is "anonymously biased". The |
| 60 | // first thread which locks an anonymously biased object biases the |
| 61 | // lock toward that thread. If another thread subsequently attempts to |
| 62 | // lock the same object, the bias is revoked. |
| 63 | // |
| 64 | // Because there are no updates to the object header at all during |
| 65 | // recursive locking while the lock is biased, the biased lock entry |
| 66 | // code is simply a test of the object header's value. If this test |
| 67 | // succeeds, the lock has been acquired by the thread. If this test |
| 68 | // fails, a bit test is done to see whether the bias bit is still |
| 69 | // set. If not, we fall back to HotSpot's original CAS-based locking |
| 70 | // scheme. If it is set, we attempt to CAS in a bias toward this |
| 71 | // thread. The latter operation is expected to be the rarest operation |
| 72 | // performed on these locks. We optimistically expect the biased lock |
| 73 | // entry to hit most of the time, and want the CAS-based fallthrough |
| 74 | // to occur quickly in the situations where the bias has been revoked. |
| 75 | // |
| 76 | // Revocation of the lock's bias is fairly straightforward. We want to |
| 77 | // restore the object's header and stack-based BasicObjectLocks and |
| 78 | // BasicLocks to the state they would have been in had the object been |
| 79 | // locked by HotSpot's usual fast locking scheme. To do this, we bring |
| 80 | // the system to a safepoint and walk the stack of the thread toward |
| 81 | // which the lock is biased. We find all of the lock records on the |
| 82 | // stack corresponding to this object, in particular the first / |
| 83 | // "highest" record. We fill in the highest lock record with the |
| 84 | // object's displaced header (which is a well-known value given that |
| 85 | // we don't maintain an identity hash nor age bits for the object |
| 86 | // while it's in the biased state) and all other lock records with 0, |
| 87 | // the value for recursive locks. When the safepoint is released, the |
| 88 | // formerly-biased thread and all other threads revert back to |
| 89 | // HotSpot's CAS-based locking. |
| 90 | // |
| 91 | // This scheme can not handle transfers of biases of single objects |
| 92 | // from thread to thread efficiently, but it can handle bulk transfers |
| 93 | // of such biases, which is a usage pattern showing up in some |
| 94 | // applications and benchmarks. We implement "bulk rebias" and "bulk |
| 95 | // revoke" operations using a "bias epoch" on a per-data-type basis. |
| 96 | // If too many bias revocations are occurring for a particular data |
| 97 | // type, the bias epoch for the data type is incremented at a |
| 98 | // safepoint, effectively meaning that all previous biases are |
| 99 | // invalid. The fast path locking case checks for an invalid epoch in |
| 100 | // the object header and attempts to rebias the object with a CAS if |
| 101 | // found, avoiding safepoints or bulk heap sweeps (the latter which |
| 102 | // was used in a prior version of this algorithm and did not scale |
| 103 | // well). If too many bias revocations persist, biasing is completely |
| 104 | // disabled for the data type by resetting the prototype header to the |
| 105 | // unbiased markOop. The fast-path locking code checks to see whether |
| 106 | // the instance's bias pattern differs from the prototype header's and |
| 107 | // causes the bias to be revoked without reaching a safepoint or, |
| 108 | // again, a bulk heap sweep. |
| 109 | |
| 110 | // Biased locking counters |
| 111 | class BiasedLockingCounters { |
| 112 | private: |
| 113 | int _total_entry_count; |
| 114 | int _biased_lock_entry_count; |
| 115 | int _anonymously_biased_lock_entry_count; |
| 116 | int _rebiased_lock_entry_count; |
| 117 | int _revoked_lock_entry_count; |
| 118 | int _fast_path_entry_count; |
| 119 | int _slow_path_entry_count; |
| 120 | |
| 121 | public: |
| 122 | BiasedLockingCounters() : |
| 123 | _total_entry_count(0), |
| 124 | _biased_lock_entry_count(0), |
| 125 | _anonymously_biased_lock_entry_count(0), |
| 126 | _rebiased_lock_entry_count(0), |
| 127 | _revoked_lock_entry_count(0), |
| 128 | _fast_path_entry_count(0), |
| 129 | _slow_path_entry_count(0) {} |
| 130 | |
| 131 | int slow_path_entry_count() const; // Compute this field if necessary |
| 132 | |
| 133 | int* total_entry_count_addr() { return &_total_entry_count; } |
| 134 | int* biased_lock_entry_count_addr() { return &_biased_lock_entry_count; } |
| 135 | int* anonymously_biased_lock_entry_count_addr() { return &_anonymously_biased_lock_entry_count; } |
| 136 | int* rebiased_lock_entry_count_addr() { return &_rebiased_lock_entry_count; } |
| 137 | int* revoked_lock_entry_count_addr() { return &_revoked_lock_entry_count; } |
| 138 | int* fast_path_entry_count_addr() { return &_fast_path_entry_count; } |
| 139 | int* slow_path_entry_count_addr() { return &_slow_path_entry_count; } |
| 140 | |
| 141 | bool nonzero() { return _total_entry_count > 0; } |
| 142 | |
| 143 | void print_on(outputStream* st) const; |
| 144 | void print() const; |
| 145 | }; |
| 146 | |
| 147 | |
| 148 | class BiasedLocking : AllStatic { |
| 149 | private: |
| 150 | static BiasedLockingCounters _counters; |
| 151 | |
| 152 | public: |
| 153 | static int* total_entry_count_addr(); |
| 154 | static int* biased_lock_entry_count_addr(); |
| 155 | static int* anonymously_biased_lock_entry_count_addr(); |
| 156 | static int* rebiased_lock_entry_count_addr(); |
| 157 | static int* revoked_lock_entry_count_addr(); |
| 158 | static int* fast_path_entry_count_addr(); |
| 159 | static int* slow_path_entry_count_addr(); |
| 160 | |
| 161 | enum Condition { |
| 162 | NOT_BIASED = 1, |
| 163 | BIAS_REVOKED = 2, |
| 164 | BIAS_REVOKED_AND_REBIASED = 3 |
| 165 | }; |
| 166 | |
| 167 | // This initialization routine should only be called once and |
| 168 | // schedules a PeriodicTask to turn on biased locking a few seconds |
| 169 | // into the VM run to avoid startup time regressions |
| 170 | static void init(); |
| 171 | |
| 172 | // This provides a global switch for leaving biased locking disabled |
| 173 | // for the first part of a run and enabling it later |
| 174 | static bool enabled(); |
| 175 | |
| 176 | // This should be called by JavaThreads to revoke the bias of an object |
| 177 | static Condition revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS); |
| 178 | |
| 179 | // These do not allow rebiasing; they are used by deoptimization to |
| 180 | // ensure that monitors on the stack can be migrated |
| 181 | static void revoke(GrowableArray<Handle>* objs); |
| 182 | static void revoke_at_safepoint(Handle obj); |
| 183 | static void revoke_at_safepoint(GrowableArray<Handle>* objs); |
| 184 | |
| 185 | static void print_counters() { _counters.print(); } |
| 186 | static BiasedLockingCounters* counters() { return &_counters; } |
| 187 | |
| 188 | // These routines are GC-related and should not be called by end |
| 189 | // users. GCs which do not do preservation of mark words do not need |
| 190 | // to call these routines. |
| 191 | static void preserve_marks(); |
| 192 | static void restore_marks(); |
| 193 | }; |
| 194 | |
| 195 | #endif // SHARE_RUNTIME_BIASEDLOCKING_HPP |
| 196 | |