1 | /* |
2 | * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_RUNTIME_SYNCHRONIZER_HPP |
26 | #define SHARE_RUNTIME_SYNCHRONIZER_HPP |
27 | |
28 | #include "memory/padded.hpp" |
29 | #include "oops/markOop.hpp" |
30 | #include "runtime/basicLock.hpp" |
31 | #include "runtime/handles.hpp" |
32 | #include "runtime/perfData.hpp" |
33 | |
34 | class ObjectMonitor; |
35 | class ThreadsList; |
36 | |
37 | struct DeflateMonitorCounters { |
38 | int nInuse; // currently associated with objects |
39 | int nInCirculation; // extant |
40 | int nScavenged; // reclaimed (global and per-thread) |
41 | int perThreadScavenged; // per-thread scavenge total |
42 | double perThreadTimes; // per-thread scavenge times |
43 | }; |
44 | |
45 | class ObjectSynchronizer : AllStatic { |
46 | friend class VMStructs; |
47 | public: |
48 | typedef enum { |
49 | owner_self, |
50 | owner_none, |
51 | owner_other |
52 | } LockOwnership; |
53 | |
54 | typedef enum { |
55 | inflate_cause_vm_internal = 0, |
56 | inflate_cause_monitor_enter = 1, |
57 | inflate_cause_wait = 2, |
58 | inflate_cause_notify = 3, |
59 | inflate_cause_hash_code = 4, |
60 | inflate_cause_jni_enter = 5, |
61 | inflate_cause_jni_exit = 6, |
62 | inflate_cause_nof = 7 // Number of causes |
63 | } InflateCause; |
64 | |
65 | // exit must be implemented non-blocking, since the compiler cannot easily handle |
66 | // deoptimization at monitor exit. Hence, it does not take a Handle argument. |
67 | |
68 | // This is full version of monitor enter and exit. I choose not |
69 | // to use enter() and exit() in order to make sure user be ware |
70 | // of the performance and semantics difference. They are normally |
71 | // used by ObjectLocker etc. The interpreter and compiler use |
72 | // assembly copies of these routines. Please keep them synchronized. |
73 | // |
74 | // attempt_rebias flag is used by UseBiasedLocking implementation |
75 | static void fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, |
76 | TRAPS); |
77 | static void fast_exit(oop obj, BasicLock* lock, Thread* THREAD); |
78 | |
79 | // WARNING: They are ONLY used to handle the slow cases. They should |
80 | // only be used when the fast cases failed. Use of these functions |
81 | // without previous fast case check may cause fatal error. |
82 | static void slow_enter(Handle obj, BasicLock* lock, TRAPS); |
83 | static void slow_exit(oop obj, BasicLock* lock, Thread* THREAD); |
84 | |
85 | // Used only to handle jni locks or other unmatched monitor enter/exit |
86 | // Internally they will use heavy weight monitor. |
87 | static void jni_enter(Handle obj, TRAPS); |
88 | static void jni_exit(oop obj, Thread* THREAD); |
89 | |
90 | // Handle all interpreter, compiler and jni cases |
91 | static int wait(Handle obj, jlong millis, TRAPS); |
92 | static void notify(Handle obj, TRAPS); |
93 | static void notifyall(Handle obj, TRAPS); |
94 | |
95 | static bool quick_notify(oopDesc* obj, Thread* Self, bool All); |
96 | static bool quick_enter(oop obj, Thread* Self, BasicLock* Lock); |
97 | |
98 | // Special internal-use-only method for use by JVM infrastructure |
99 | // that needs to wait() on a java-level object but that can't risk |
100 | // throwing unexpected InterruptedExecutionExceptions. |
101 | static void waitUninterruptibly(Handle obj, jlong Millis, Thread * THREAD); |
102 | |
103 | // used by classloading to free classloader object lock, |
104 | // wait on an internal lock, and reclaim original lock |
105 | // with original recursion count |
106 | static intptr_t complete_exit(Handle obj, TRAPS); |
107 | static void reenter (Handle obj, intptr_t recursion, TRAPS); |
108 | |
109 | // thread-specific and global objectMonitor free list accessors |
110 | static ObjectMonitor * omAlloc(Thread * Self); |
111 | static void omRelease(Thread * Self, ObjectMonitor * m, |
112 | bool FromPerThreadAlloc); |
113 | static void omFlush(Thread * Self); |
114 | |
115 | // Inflate light weight monitor to heavy weight monitor |
116 | static ObjectMonitor* inflate(Thread * Self, oop obj, const InflateCause cause); |
117 | // This version is only for internal use |
118 | static void inflate_helper(oop obj); |
119 | static const char* inflate_cause_name(const InflateCause cause); |
120 | |
121 | // Returns the identity hash value for an oop |
122 | // NOTE: It may cause monitor inflation |
123 | static intptr_t identity_hash_value_for(Handle obj); |
124 | static intptr_t FastHashCode(Thread * Self, oop obj); |
125 | |
126 | // java.lang.Thread support |
127 | static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj); |
128 | static LockOwnership query_lock_ownership(JavaThread * self, Handle h_obj); |
129 | |
130 | static JavaThread* get_lock_owner(ThreadsList * t_list, Handle h_obj); |
131 | |
132 | // JNI detach support |
133 | static void release_monitors_owned_by_thread(TRAPS); |
134 | static void monitors_iterate(MonitorClosure* m); |
135 | |
136 | // GC: we current use aggressive monitor deflation policy |
137 | // Basically we deflate all monitors that are not busy. |
138 | // An adaptive profile-based deflation policy could be used if needed |
139 | static void deflate_idle_monitors(DeflateMonitorCounters* counters); |
140 | static void deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters); |
141 | static void prepare_deflate_idle_monitors(DeflateMonitorCounters* counters); |
142 | static void finish_deflate_idle_monitors(DeflateMonitorCounters* counters); |
143 | |
144 | // For a given monitor list: global or per-thread, deflate idle monitors |
145 | static int deflate_monitor_list(ObjectMonitor** listheadp, |
146 | ObjectMonitor** freeHeadp, |
147 | ObjectMonitor** freeTailp); |
148 | static bool deflate_monitor(ObjectMonitor* mid, oop obj, |
149 | ObjectMonitor** freeHeadp, |
150 | ObjectMonitor** freeTailp); |
151 | static bool is_cleanup_needed(); |
152 | static void oops_do(OopClosure* f); |
153 | // Process oops in thread local used monitors |
154 | static void thread_local_used_oops_do(Thread* thread, OopClosure* f); |
155 | |
156 | // debugging |
157 | static void audit_and_print_stats(bool on_exit); |
158 | static void chk_free_entry(JavaThread * jt, ObjectMonitor * n, |
159 | outputStream * out, int *error_cnt_p); |
160 | static void chk_global_free_list_and_count(outputStream * out, |
161 | int *error_cnt_p); |
162 | static void chk_global_in_use_list_and_count(outputStream * out, |
163 | int *error_cnt_p); |
164 | static void chk_in_use_entry(JavaThread * jt, ObjectMonitor * n, |
165 | outputStream * out, int *error_cnt_p); |
166 | static void chk_per_thread_in_use_list_and_count(JavaThread *jt, |
167 | outputStream * out, |
168 | int *error_cnt_p); |
169 | static void chk_per_thread_free_list_and_count(JavaThread *jt, |
170 | outputStream * out, |
171 | int *error_cnt_p); |
172 | static void log_in_use_monitor_details(outputStream * out, bool on_exit); |
173 | static int log_monitor_list_counts(outputStream * out); |
174 | static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0; |
175 | |
176 | private: |
177 | friend class SynchronizerTest; |
178 | |
179 | enum { _BLOCKSIZE = 128 }; |
180 | // global list of blocks of monitors |
181 | static PaddedEnd<ObjectMonitor> * volatile gBlockList; |
182 | // global monitor free list |
183 | static ObjectMonitor * volatile gFreeList; |
184 | // global monitor in-use list, for moribund threads, |
185 | // monitors they inflated need to be scanned for deflation |
186 | static ObjectMonitor * volatile gOmInUseList; |
187 | // count of entries in gOmInUseList |
188 | static int gOmInUseCount; |
189 | |
190 | // Process oops in all global used monitors (i.e. moribund thread's monitors) |
191 | static void global_used_oops_do(OopClosure* f); |
192 | // Process oops in monitors on the given list |
193 | static void list_oops_do(ObjectMonitor* list, OopClosure* f); |
194 | |
195 | // Support for SynchronizerTest access to GVars fields: |
196 | static u_char* get_gvars_addr(); |
197 | static u_char* get_gvars_hcSequence_addr(); |
198 | static size_t get_gvars_size(); |
199 | static u_char* get_gvars_stwRandom_addr(); |
200 | }; |
201 | |
202 | // ObjectLocker enforces balanced locking and can never throw an |
203 | // IllegalMonitorStateException. However, a pending exception may |
204 | // have to pass through, and we must also be able to deal with |
205 | // asynchronous exceptions. The caller is responsible for checking |
206 | // the thread's pending exception if needed. |
207 | class ObjectLocker : public StackObj { |
208 | private: |
209 | Thread* _thread; |
210 | Handle _obj; |
211 | BasicLock _lock; |
212 | bool _dolock; // default true |
213 | public: |
214 | ObjectLocker(Handle obj, Thread* thread, bool doLock = true); |
215 | ~ObjectLocker(); |
216 | |
217 | // Monitor behavior |
218 | void wait(TRAPS) { ObjectSynchronizer::wait(_obj, 0, CHECK); } // wait forever |
219 | void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); } |
220 | void waitUninterruptibly(TRAPS) { ObjectSynchronizer::waitUninterruptibly(_obj, 0, CHECK); } |
221 | // complete_exit gives up lock completely, returning recursion count |
222 | // reenter reclaims lock with original recursion count |
223 | intptr_t complete_exit(TRAPS) { return ObjectSynchronizer::complete_exit(_obj, THREAD); } |
224 | void reenter(intptr_t recursion, TRAPS) { ObjectSynchronizer::reenter(_obj, recursion, CHECK); } |
225 | }; |
226 | |
227 | #endif // SHARE_RUNTIME_SYNCHRONIZER_HPP |
228 | |