1/*
2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/vmSymbols.hpp"
27#include "logging/log.hpp"
28#include "logging/logStream.hpp"
29#include "jfr/jfrEvents.hpp"
30#include "memory/allocation.inline.hpp"
31#include "memory/metaspaceShared.hpp"
32#include "memory/padded.hpp"
33#include "memory/resourceArea.hpp"
34#include "memory/universe.hpp"
35#include "oops/markOop.hpp"
36#include "oops/oop.inline.hpp"
37#include "runtime/atomic.hpp"
38#include "runtime/biasedLocking.hpp"
39#include "runtime/handles.inline.hpp"
40#include "runtime/interfaceSupport.inline.hpp"
41#include "runtime/mutexLocker.hpp"
42#include "runtime/objectMonitor.hpp"
43#include "runtime/objectMonitor.inline.hpp"
44#include "runtime/osThread.hpp"
45#include "runtime/safepointVerifiers.hpp"
46#include "runtime/sharedRuntime.hpp"
47#include "runtime/stubRoutines.hpp"
48#include "runtime/synchronizer.hpp"
49#include "runtime/thread.inline.hpp"
50#include "runtime/timer.hpp"
51#include "runtime/vframe.hpp"
52#include "runtime/vmThread.hpp"
53#include "utilities/align.hpp"
54#include "utilities/dtrace.hpp"
55#include "utilities/events.hpp"
56#include "utilities/preserveException.hpp"
57
58// The "core" versions of monitor enter and exit reside in this file.
59// The interpreter and compilers contain specialized transliterated
60// variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
61// for instance. If you make changes here, make sure to modify the
62// interpreter, and both C1 and C2 fast-path inline locking code emission.
63//
64// -----------------------------------------------------------------------------
65
66#ifdef DTRACE_ENABLED
67
68// Only bother with this argument setup if dtrace is available
69// TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
70
71#define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
72 char* bytes = NULL; \
73 int len = 0; \
74 jlong jtid = SharedRuntime::get_java_tid(thread); \
75 Symbol* klassname = ((oop)(obj))->klass()->name(); \
76 if (klassname != NULL) { \
77 bytes = (char*)klassname->bytes(); \
78 len = klassname->utf8_length(); \
79 }
80
81#define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
82 { \
83 if (DTraceMonitorProbes) { \
84 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
85 HOTSPOT_MONITOR_WAIT(jtid, \
86 (uintptr_t)(monitor), bytes, len, (millis)); \
87 } \
88 }
89
90#define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
91#define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
92#define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
93
94#define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
95 { \
96 if (DTraceMonitorProbes) { \
97 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
98 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \
99 (uintptr_t)(monitor), bytes, len); \
100 } \
101 }
102
103#else // ndef DTRACE_ENABLED
104
105#define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
106#define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
107
108#endif // ndef DTRACE_ENABLED
109
110// This exists only as a workaround of dtrace bug 6254741
111int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
112 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
113 return 0;
114}
115
116#define NINFLATIONLOCKS 256
117static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
118
119// global list of blocks of monitors
120PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
121// global monitor free list
122ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
123// global monitor in-use list, for moribund threads,
124// monitors they inflated need to be scanned for deflation
125ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL;
126// count of entries in gOmInUseList
127int ObjectSynchronizer::gOmInUseCount = 0;
128
129static volatile intptr_t gListLock = 0; // protects global monitor lists
130static volatile int gMonitorFreeCount = 0; // # on gFreeList
131static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
132
133#define CHAINMARKER (cast_to_oop<intptr_t>(-1))
134
135
136// =====================> Quick functions
137
138// The quick_* forms are special fast-path variants used to improve
139// performance. In the simplest case, a "quick_*" implementation could
140// simply return false, in which case the caller will perform the necessary
141// state transitions and call the slow-path form.
142// The fast-path is designed to handle frequently arising cases in an efficient
143// manner and is just a degenerate "optimistic" variant of the slow-path.
144// returns true -- to indicate the call was satisfied.
145// returns false -- to indicate the call needs the services of the slow-path.
146// A no-loitering ordinance is in effect for code in the quick_* family
147// operators: safepoints or indefinite blocking (blocking that might span a
148// safepoint) are forbidden. Generally the thread_state() is _in_Java upon
149// entry.
150//
151// Consider: An interesting optimization is to have the JIT recognize the
152// following common idiom:
153// synchronized (someobj) { .... ; notify(); }
154// That is, we find a notify() or notifyAll() call that immediately precedes
155// the monitorexit operation. In that case the JIT could fuse the operations
156// into a single notifyAndExit() runtime primitive.
157
158bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
159 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
160 assert(self->is_Java_thread(), "invariant");
161 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
162 NoSafepointVerifier nsv;
163 if (obj == NULL) return false; // slow-path for invalid obj
164 const markOop mark = obj->mark();
165
166 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
167 // Degenerate notify
168 // stack-locked by caller so by definition the implied waitset is empty.
169 return true;
170 }
171
172 if (mark->has_monitor()) {
173 ObjectMonitor * const mon = mark->monitor();
174 assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
175 if (mon->owner() != self) return false; // slow-path for IMS exception
176
177 if (mon->first_waiter() != NULL) {
178 // We have one or more waiters. Since this is an inflated monitor
179 // that we own, we can transfer one or more threads from the waitset
180 // to the entrylist here and now, avoiding the slow-path.
181 if (all) {
182 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
183 } else {
184 DTRACE_MONITOR_PROBE(notify, mon, obj, self);
185 }
186 int tally = 0;
187 do {
188 mon->INotify(self);
189 ++tally;
190 } while (mon->first_waiter() != NULL && all);
191 OM_PERFDATA_OP(Notifications, inc(tally));
192 }
193 return true;
194 }
195
196 // biased locking and any other IMS exception states take the slow-path
197 return false;
198}
199
200
201// The LockNode emitted directly at the synchronization site would have
202// been too big if it were to have included support for the cases of inflated
203// recursive enter and exit, so they go here instead.
204// Note that we can't safely call AsyncPrintJavaStack() from within
205// quick_enter() as our thread state remains _in_Java.
206
207bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
208 BasicLock * lock) {
209 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
210 assert(Self->is_Java_thread(), "invariant");
211 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
212 NoSafepointVerifier nsv;
213 if (obj == NULL) return false; // Need to throw NPE
214 const markOop mark = obj->mark();
215
216 if (mark->has_monitor()) {
217 ObjectMonitor * const m = mark->monitor();
218 assert(oopDesc::equals((oop) m->object(), obj), "invariant");
219 Thread * const owner = (Thread *) m->_owner;
220
221 // Lock contention and Transactional Lock Elision (TLE) diagnostics
222 // and observability
223 // Case: light contention possibly amenable to TLE
224 // Case: TLE inimical operations such as nested/recursive synchronization
225
226 if (owner == Self) {
227 m->_recursions++;
228 return true;
229 }
230
231 // This Java Monitor is inflated so obj's header will never be
232 // displaced to this thread's BasicLock. Make the displaced header
233 // non-NULL so this BasicLock is not seen as recursive nor as
234 // being locked. We do this unconditionally so that this thread's
235 // BasicLock cannot be mis-interpreted by any stack walkers. For
236 // performance reasons, stack walkers generally first check for
237 // Biased Locking in the object's header, the second check is for
238 // stack-locking in the object's header, the third check is for
239 // recursive stack-locking in the displaced header in the BasicLock,
240 // and last are the inflated Java Monitor (ObjectMonitor) checks.
241 lock->set_displaced_header(markOopDesc::unused_mark());
242
243 if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) {
244 assert(m->_recursions == 0, "invariant");
245 return true;
246 }
247 }
248
249 // Note that we could inflate in quick_enter.
250 // This is likely a useful optimization
251 // Critically, in quick_enter() we must not:
252 // -- perform bias revocation, or
253 // -- block indefinitely, or
254 // -- reach a safepoint
255
256 return false; // revert to slow-path
257}
258
259// -----------------------------------------------------------------------------
260// Fast Monitor Enter/Exit
261// This the fast monitor enter. The interpreter and compiler use
262// some assembly copies of this code. Make sure update those code
263// if the following function is changed. The implementation is
264// extremely sensitive to race condition. Be careful.
265
266void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
267 bool attempt_rebias, TRAPS) {
268 if (UseBiasedLocking) {
269 if (!SafepointSynchronize::is_at_safepoint()) {
270 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
271 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
272 return;
273 }
274 } else {
275 assert(!attempt_rebias, "can not rebias toward VM thread");
276 BiasedLocking::revoke_at_safepoint(obj);
277 }
278 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
279 }
280
281 slow_enter(obj, lock, THREAD);
282}
283
284void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
285 markOop mark = object->mark();
286 // We cannot check for Biased Locking if we are racing an inflation.
287 assert(mark == markOopDesc::INFLATING() ||
288 !mark->has_bias_pattern(), "should not see bias pattern here");
289
290 markOop dhw = lock->displaced_header();
291 if (dhw == NULL) {
292 // If the displaced header is NULL, then this exit matches up with
293 // a recursive enter. No real work to do here except for diagnostics.
294#ifndef PRODUCT
295 if (mark != markOopDesc::INFLATING()) {
296 // Only do diagnostics if we are not racing an inflation. Simply
297 // exiting a recursive enter of a Java Monitor that is being
298 // inflated is safe; see the has_monitor() comment below.
299 assert(!mark->is_neutral(), "invariant");
300 assert(!mark->has_locker() ||
301 THREAD->is_lock_owned((address)mark->locker()), "invariant");
302 if (mark->has_monitor()) {
303 // The BasicLock's displaced_header is marked as a recursive
304 // enter and we have an inflated Java Monitor (ObjectMonitor).
305 // This is a special case where the Java Monitor was inflated
306 // after this thread entered the stack-lock recursively. When a
307 // Java Monitor is inflated, we cannot safely walk the Java
308 // Monitor owner's stack and update the BasicLocks because a
309 // Java Monitor can be asynchronously inflated by a thread that
310 // does not own the Java Monitor.
311 ObjectMonitor * m = mark->monitor();
312 assert(((oop)(m->object()))->mark() == mark, "invariant");
313 assert(m->is_entered(THREAD), "invariant");
314 }
315 }
316#endif
317 return;
318 }
319
320 if (mark == (markOop) lock) {
321 // If the object is stack-locked by the current thread, try to
322 // swing the displaced header from the BasicLock back to the mark.
323 assert(dhw->is_neutral(), "invariant");
324 if (object->cas_set_mark(dhw, mark) == mark) {
325 return;
326 }
327 }
328
329 // We have to take the slow-path of possible inflation and then exit.
330 inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD);
331}
332
333// -----------------------------------------------------------------------------
334// Interpreter/Compiler Slow Case
335// This routine is used to handle interpreter/compiler slow case
336// We don't need to use fast path here, because it must have been
337// failed in the interpreter/compiler code.
338void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
339 markOop mark = obj->mark();
340 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
341
342 if (mark->is_neutral()) {
343 // Anticipate successful CAS -- the ST of the displaced mark must
344 // be visible <= the ST performed by the CAS.
345 lock->set_displaced_header(mark);
346 if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
347 return;
348 }
349 // Fall through to inflate() ...
350 } else if (mark->has_locker() &&
351 THREAD->is_lock_owned((address)mark->locker())) {
352 assert(lock != mark->locker(), "must not re-lock the same lock");
353 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
354 lock->set_displaced_header(NULL);
355 return;
356 }
357
358 // The object header will never be displaced to this lock,
359 // so it does not matter what the value is, except that it
360 // must be non-zero to avoid looking like a re-entrant lock,
361 // and must not look locked either.
362 lock->set_displaced_header(markOopDesc::unused_mark());
363 inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD);
364}
365
366// This routine is used to handle interpreter/compiler slow case
367// We don't need to use fast path here, because it must have
368// failed in the interpreter/compiler code. Simply use the heavy
369// weight monitor should be ok, unless someone find otherwise.
370void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
371 fast_exit(object, lock, THREAD);
372}
373
374// -----------------------------------------------------------------------------
375// Class Loader support to workaround deadlocks on the class loader lock objects
376// Also used by GC
377// complete_exit()/reenter() are used to wait on a nested lock
378// i.e. to give up an outer lock completely and then re-enter
379// Used when holding nested locks - lock acquisition order: lock1 then lock2
380// 1) complete_exit lock1 - saving recursion count
381// 2) wait on lock2
382// 3) when notified on lock2, unlock lock2
383// 4) reenter lock1 with original recursion count
384// 5) lock lock2
385// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
386intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
387 if (UseBiasedLocking) {
388 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
389 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
390 }
391
392 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
393
394 return monitor->complete_exit(THREAD);
395}
396
397// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
398void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
399 if (UseBiasedLocking) {
400 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
401 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
402 }
403
404 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
405
406 monitor->reenter(recursion, THREAD);
407}
408// -----------------------------------------------------------------------------
409// JNI locks on java objects
410// NOTE: must use heavy weight monitor to handle jni monitor enter
411void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
412 // the current locking is from JNI instead of Java code
413 if (UseBiasedLocking) {
414 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
415 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
416 }
417 THREAD->set_current_pending_monitor_is_from_java(false);
418 inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
419 THREAD->set_current_pending_monitor_is_from_java(true);
420}
421
422// NOTE: must use heavy weight monitor to handle jni monitor exit
423void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
424 if (UseBiasedLocking) {
425 Handle h_obj(THREAD, obj);
426 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
427 obj = h_obj();
428 }
429 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
430
431 ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit);
432 // If this thread has locked the object, exit the monitor. Note: can't use
433 // monitor->check(CHECK); must exit even if an exception is pending.
434 if (monitor->check(THREAD)) {
435 monitor->exit(true, THREAD);
436 }
437}
438
439// -----------------------------------------------------------------------------
440// Internal VM locks on java objects
441// standard constructor, allows locking failures
442ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
443 _dolock = doLock;
444 _thread = thread;
445 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
446 _obj = obj;
447
448 if (_dolock) {
449 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
450 }
451}
452
453ObjectLocker::~ObjectLocker() {
454 if (_dolock) {
455 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
456 }
457}
458
459
460// -----------------------------------------------------------------------------
461// Wait/Notify/NotifyAll
462// NOTE: must use heavy weight monitor to handle wait()
463int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
464 if (UseBiasedLocking) {
465 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
466 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
467 }
468 if (millis < 0) {
469 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
470 }
471 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait);
472
473 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
474 monitor->wait(millis, true, THREAD);
475
476 // This dummy call is in place to get around dtrace bug 6254741. Once
477 // that's fixed we can uncomment the following line, remove the call
478 // and change this function back into a "void" func.
479 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
480 return dtrace_waited_probe(monitor, obj, THREAD);
481}
482
483void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
484 if (UseBiasedLocking) {
485 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
486 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
487 }
488 if (millis < 0) {
489 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
490 }
491 inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD);
492}
493
494void ObjectSynchronizer::notify(Handle obj, TRAPS) {
495 if (UseBiasedLocking) {
496 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
497 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
498 }
499
500 markOop mark = obj->mark();
501 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
502 return;
503 }
504 inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD);
505}
506
507// NOTE: see comment of notify()
508void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
509 if (UseBiasedLocking) {
510 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
511 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
512 }
513
514 markOop mark = obj->mark();
515 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
516 return;
517 }
518 inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD);
519}
520
521// -----------------------------------------------------------------------------
522// Hash Code handling
523//
524// Performance concern:
525// OrderAccess::storestore() calls release() which at one time stored 0
526// into the global volatile OrderAccess::dummy variable. This store was
527// unnecessary for correctness. Many threads storing into a common location
528// causes considerable cache migration or "sloshing" on large SMP systems.
529// As such, I avoided using OrderAccess::storestore(). In some cases
530// OrderAccess::fence() -- which incurs local latency on the executing
531// processor -- is a better choice as it scales on SMP systems.
532//
533// See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
534// a discussion of coherency costs. Note that all our current reference
535// platforms provide strong ST-ST order, so the issue is moot on IA32,
536// x64, and SPARC.
537//
538// As a general policy we use "volatile" to control compiler-based reordering
539// and explicit fences (barriers) to control for architectural reordering
540// performed by the CPU(s) or platform.
541
542struct SharedGlobals {
543 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
544 // These are highly shared mostly-read variables.
545 // To avoid false-sharing they need to be the sole occupants of a cache line.
546 volatile int stwRandom;
547 volatile int stwCycle;
548 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
549 // Hot RW variable -- Sequester to avoid false-sharing
550 volatile int hcSequence;
551 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
552};
553
554static SharedGlobals GVars;
555static int MonitorScavengeThreshold = 1000000;
556static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
557
558static markOop ReadStableMark(oop obj) {
559 markOop mark = obj->mark();
560 if (!mark->is_being_inflated()) {
561 return mark; // normal fast-path return
562 }
563
564 int its = 0;
565 for (;;) {
566 markOop mark = obj->mark();
567 if (!mark->is_being_inflated()) {
568 return mark; // normal fast-path return
569 }
570
571 // The object is being inflated by some other thread.
572 // The caller of ReadStableMark() must wait for inflation to complete.
573 // Avoid live-lock
574 // TODO: consider calling SafepointSynchronize::do_call_back() while
575 // spinning to see if there's a safepoint pending. If so, immediately
576 // yielding or blocking would be appropriate. Avoid spinning while
577 // there is a safepoint pending.
578 // TODO: add inflation contention performance counters.
579 // TODO: restrict the aggregate number of spinners.
580
581 ++its;
582 if (its > 10000 || !os::is_MP()) {
583 if (its & 1) {
584 os::naked_yield();
585 } else {
586 // Note that the following code attenuates the livelock problem but is not
587 // a complete remedy. A more complete solution would require that the inflating
588 // thread hold the associated inflation lock. The following code simply restricts
589 // the number of spinners to at most one. We'll have N-2 threads blocked
590 // on the inflationlock, 1 thread holding the inflation lock and using
591 // a yield/park strategy, and 1 thread in the midst of inflation.
592 // A more refined approach would be to change the encoding of INFLATING
593 // to allow encapsulation of a native thread pointer. Threads waiting for
594 // inflation to complete would use CAS to push themselves onto a singly linked
595 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag
596 // and calling park(). When inflation was complete the thread that accomplished inflation
597 // would detach the list and set the markword to inflated with a single CAS and
598 // then for each thread on the list, set the flag and unpark() the thread.
599 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
600 // wakes at most one thread whereas we need to wake the entire list.
601 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
602 int YieldThenBlock = 0;
603 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
604 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
605 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock");
606 while (obj->mark() == markOopDesc::INFLATING()) {
607 // Beware: NakedYield() is advisory and has almost no effect on some platforms
608 // so we periodically call Self->_ParkEvent->park(1).
609 // We use a mixed spin/yield/block mechanism.
610 if ((YieldThenBlock++) >= 16) {
611 Thread::current()->_ParkEvent->park(1);
612 } else {
613 os::naked_yield();
614 }
615 }
616 Thread::muxRelease(gInflationLocks + ix);
617 }
618 } else {
619 SpinPause(); // SMP-polite spinning
620 }
621 }
622}
623
624// hashCode() generation :
625//
626// Possibilities:
627// * MD5Digest of {obj,stwRandom}
628// * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
629// * A DES- or AES-style SBox[] mechanism
630// * One of the Phi-based schemes, such as:
631// 2654435761 = 2^32 * Phi (golden ratio)
632// HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
633// * A variation of Marsaglia's shift-xor RNG scheme.
634// * (obj ^ stwRandom) is appealing, but can result
635// in undesirable regularity in the hashCode values of adjacent objects
636// (objects allocated back-to-back, in particular). This could potentially
637// result in hashtable collisions and reduced hashtable efficiency.
638// There are simple ways to "diffuse" the middle address bits over the
639// generated hashCode values:
640
641static inline intptr_t get_next_hash(Thread * Self, oop obj) {
642 intptr_t value = 0;
643 if (hashCode == 0) {
644 // This form uses global Park-Miller RNG.
645 // On MP system we'll have lots of RW access to a global, so the
646 // mechanism induces lots of coherency traffic.
647 value = os::random();
648 } else if (hashCode == 1) {
649 // This variation has the property of being stable (idempotent)
650 // between STW operations. This can be useful in some of the 1-0
651 // synchronization schemes.
652 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3;
653 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom;
654 } else if (hashCode == 2) {
655 value = 1; // for sensitivity testing
656 } else if (hashCode == 3) {
657 value = ++GVars.hcSequence;
658 } else if (hashCode == 4) {
659 value = cast_from_oop<intptr_t>(obj);
660 } else {
661 // Marsaglia's xor-shift scheme with thread-specific state
662 // This is probably the best overall implementation -- we'll
663 // likely make this the default in future releases.
664 unsigned t = Self->_hashStateX;
665 t ^= (t << 11);
666 Self->_hashStateX = Self->_hashStateY;
667 Self->_hashStateY = Self->_hashStateZ;
668 Self->_hashStateZ = Self->_hashStateW;
669 unsigned v = Self->_hashStateW;
670 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
671 Self->_hashStateW = v;
672 value = v;
673 }
674
675 value &= markOopDesc::hash_mask;
676 if (value == 0) value = 0xBAD;
677 assert(value != markOopDesc::no_hash, "invariant");
678 return value;
679}
680
681intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
682 if (UseBiasedLocking) {
683 // NOTE: many places throughout the JVM do not expect a safepoint
684 // to be taken here, in particular most operations on perm gen
685 // objects. However, we only ever bias Java instances and all of
686 // the call sites of identity_hash that might revoke biases have
687 // been checked to make sure they can handle a safepoint. The
688 // added check of the bias pattern is to avoid useless calls to
689 // thread-local storage.
690 if (obj->mark()->has_bias_pattern()) {
691 // Handle for oop obj in case of STW safepoint
692 Handle hobj(Self, obj);
693 // Relaxing assertion for bug 6320749.
694 assert(Universe::verify_in_progress() ||
695 !SafepointSynchronize::is_at_safepoint(),
696 "biases should not be seen by VM thread here");
697 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
698 obj = hobj();
699 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
700 }
701 }
702
703 // hashCode() is a heap mutator ...
704 // Relaxing assertion for bug 6320749.
705 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
706 !SafepointSynchronize::is_at_safepoint(), "invariant");
707 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
708 Self->is_Java_thread() , "invariant");
709 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
710 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
711
712 ObjectMonitor* monitor = NULL;
713 markOop temp, test;
714 intptr_t hash;
715 markOop mark = ReadStableMark(obj);
716
717 // object should remain ineligible for biased locking
718 assert(!mark->has_bias_pattern(), "invariant");
719
720 if (mark->is_neutral()) {
721 hash = mark->hash(); // this is a normal header
722 if (hash != 0) { // if it has hash, just return it
723 return hash;
724 }
725 hash = get_next_hash(Self, obj); // allocate a new hash code
726 temp = mark->copy_set_hash(hash); // merge the hash code into header
727 // use (machine word version) atomic operation to install the hash
728 test = obj->cas_set_mark(temp, mark);
729 if (test == mark) {
730 return hash;
731 }
732 // If atomic operation failed, we must inflate the header
733 // into heavy weight monitor. We could add more code here
734 // for fast path, but it does not worth the complexity.
735 } else if (mark->has_monitor()) {
736 monitor = mark->monitor();
737 temp = monitor->header();
738 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
739 hash = temp->hash();
740 if (hash != 0) {
741 return hash;
742 }
743 // Skip to the following code to reduce code size
744 } else if (Self->is_lock_owned((address)mark->locker())) {
745 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
746 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
747 hash = temp->hash(); // by current thread, check if the displaced
748 if (hash != 0) { // header contains hash code
749 return hash;
750 }
751 // WARNING:
752 // The displaced header in the BasicLock on a thread's stack
753 // is strictly immutable. It CANNOT be changed in ANY cases.
754 // So we have to inflate the stack lock into an ObjectMonitor
755 // even if the current thread owns the lock. The BasicLock on
756 // a thread's stack can be asynchronously read by other threads
757 // during an inflate() call so any change to that stack memory
758 // may not propagate to other threads correctly.
759 }
760
761 // Inflate the monitor to set hash code
762 monitor = inflate(Self, obj, inflate_cause_hash_code);
763 // Load displaced header and check it has hash code
764 mark = monitor->header();
765 assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark));
766 hash = mark->hash();
767 if (hash == 0) {
768 hash = get_next_hash(Self, obj);
769 temp = mark->copy_set_hash(hash); // merge hash code into header
770 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
771 test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
772 if (test != mark) {
773 // The only update to the ObjectMonitor's header/dmw field
774 // is to merge in the hash code. If someone adds a new usage
775 // of the header/dmw field, please update this code.
776 hash = test->hash();
777 assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(test));
778 assert(hash != 0, "Trivial unexpected object/monitor header usage.");
779 }
780 }
781 // We finally get the hash
782 return hash;
783}
784
785// Deprecated -- use FastHashCode() instead.
786
787intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
788 return FastHashCode(Thread::current(), obj());
789}
790
791
792bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
793 Handle h_obj) {
794 if (UseBiasedLocking) {
795 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
796 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
797 }
798
799 assert(thread == JavaThread::current(), "Can only be called on current thread");
800 oop obj = h_obj();
801
802 markOop mark = ReadStableMark(obj);
803
804 // Uncontended case, header points to stack
805 if (mark->has_locker()) {
806 return thread->is_lock_owned((address)mark->locker());
807 }
808 // Contended case, header points to ObjectMonitor (tagged pointer)
809 if (mark->has_monitor()) {
810 ObjectMonitor* monitor = mark->monitor();
811 return monitor->is_entered(thread) != 0;
812 }
813 // Unlocked case, header in place
814 assert(mark->is_neutral(), "sanity check");
815 return false;
816}
817
818// Be aware of this method could revoke bias of the lock object.
819// This method queries the ownership of the lock handle specified by 'h_obj'.
820// If the current thread owns the lock, it returns owner_self. If no
821// thread owns the lock, it returns owner_none. Otherwise, it will return
822// owner_other.
823ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
824(JavaThread *self, Handle h_obj) {
825 // The caller must beware this method can revoke bias, and
826 // revocation can result in a safepoint.
827 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
828 assert(self->thread_state() != _thread_blocked, "invariant");
829
830 // Possible mark states: neutral, biased, stack-locked, inflated
831
832 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
833 // CASE: biased
834 BiasedLocking::revoke_and_rebias(h_obj, false, self);
835 assert(!h_obj->mark()->has_bias_pattern(),
836 "biases should be revoked by now");
837 }
838
839 assert(self == JavaThread::current(), "Can only be called on current thread");
840 oop obj = h_obj();
841 markOop mark = ReadStableMark(obj);
842
843 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
844 if (mark->has_locker()) {
845 return self->is_lock_owned((address)mark->locker()) ?
846 owner_self : owner_other;
847 }
848
849 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
850 // The Object:ObjectMonitor relationship is stable as long as we're
851 // not at a safepoint.
852 if (mark->has_monitor()) {
853 void * owner = mark->monitor()->_owner;
854 if (owner == NULL) return owner_none;
855 return (owner == self ||
856 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
857 }
858
859 // CASE: neutral
860 assert(mark->is_neutral(), "sanity check");
861 return owner_none; // it's unlocked
862}
863
864// FIXME: jvmti should call this
865JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
866 if (UseBiasedLocking) {
867 if (SafepointSynchronize::is_at_safepoint()) {
868 BiasedLocking::revoke_at_safepoint(h_obj);
869 } else {
870 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
871 }
872 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
873 }
874
875 oop obj = h_obj();
876 address owner = NULL;
877
878 markOop mark = ReadStableMark(obj);
879
880 // Uncontended case, header points to stack
881 if (mark->has_locker()) {
882 owner = (address) mark->locker();
883 }
884
885 // Contended case, header points to ObjectMonitor (tagged pointer)
886 else if (mark->has_monitor()) {
887 ObjectMonitor* monitor = mark->monitor();
888 assert(monitor != NULL, "monitor should be non-null");
889 owner = (address) monitor->owner();
890 }
891
892 if (owner != NULL) {
893 // owning_thread_from_monitor_owner() may also return NULL here
894 return Threads::owning_thread_from_monitor_owner(t_list, owner);
895 }
896
897 // Unlocked case, header in place
898 // Cannot have assertion since this object may have been
899 // locked by another thread when reaching here.
900 // assert(mark->is_neutral(), "sanity check");
901
902 return NULL;
903}
904
905// Visitors ...
906
907void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
908 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
909 while (block != NULL) {
910 assert(block->object() == CHAINMARKER, "must be a block header");
911 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
912 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
913 oop object = (oop)mid->object();
914 if (object != NULL) {
915 closure->do_monitor(mid);
916 }
917 }
918 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
919 }
920}
921
922// Get the next block in the block list.
923static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) {
924 assert(block->object() == CHAINMARKER, "must be a block header");
925 block = (PaddedEnd<ObjectMonitor>*) block->FreeNext;
926 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
927 return block;
928}
929
930static bool monitors_used_above_threshold() {
931 if (gMonitorPopulation == 0) {
932 return false;
933 }
934 int monitors_used = gMonitorPopulation - gMonitorFreeCount;
935 int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation;
936 return monitor_usage > MonitorUsedDeflationThreshold;
937}
938
939bool ObjectSynchronizer::is_cleanup_needed() {
940 if (MonitorUsedDeflationThreshold > 0) {
941 return monitors_used_above_threshold();
942 }
943 return false;
944}
945
946void ObjectSynchronizer::oops_do(OopClosure* f) {
947 // We only scan the global used list here (for moribund threads), and
948 // the thread-local monitors in Thread::oops_do().
949 global_used_oops_do(f);
950}
951
952void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
953 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
954 list_oops_do(gOmInUseList, f);
955}
956
957void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
958 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
959 list_oops_do(thread->omInUseList, f);
960}
961
962void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
963 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
964 ObjectMonitor* mid;
965 for (mid = list; mid != NULL; mid = mid->FreeNext) {
966 if (mid->object() != NULL) {
967 f->do_oop((oop*)mid->object_addr());
968 }
969 }
970}
971
972
973// -----------------------------------------------------------------------------
974// ObjectMonitor Lifecycle
975// -----------------------
976// Inflation unlinks monitors from the global gFreeList and
977// associates them with objects. Deflation -- which occurs at
978// STW-time -- disassociates idle monitors from objects. Such
979// scavenged monitors are returned to the gFreeList.
980//
981// The global list is protected by gListLock. All the critical sections
982// are short and operate in constant-time.
983//
984// ObjectMonitors reside in type-stable memory (TSM) and are immortal.
985//
986// Lifecycle:
987// -- unassigned and on the global free list
988// -- unassigned and on a thread's private omFreeList
989// -- assigned to an object. The object is inflated and the mark refers
990// to the objectmonitor.
991
992
993// Constraining monitor pool growth via MonitorBound ...
994//
995// The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
996// the rate of scavenging is driven primarily by GC. As such, we can find
997// an inordinate number of monitors in circulation.
998// To avoid that scenario we can artificially induce a STW safepoint
999// if the pool appears to be growing past some reasonable bound.
1000// Generally we favor time in space-time tradeoffs, but as there's no
1001// natural back-pressure on the # of extant monitors we need to impose some
1002// type of limit. Beware that if MonitorBound is set to too low a value
1003// we could just loop. In addition, if MonitorBound is set to a low value
1004// we'll incur more safepoints, which are harmful to performance.
1005// See also: GuaranteedSafepointInterval
1006//
1007// The current implementation uses asynchronous VM operations.
1008
1009static void InduceScavenge(Thread * Self, const char * Whence) {
1010 // Induce STW safepoint to trim monitors
1011 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
1012 // More precisely, trigger an asynchronous STW safepoint as the number
1013 // of active monitors passes the specified threshold.
1014 // TODO: assert thread state is reasonable
1015
1016 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
1017 // Induce a 'null' safepoint to scavenge monitors
1018 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
1019 // to the VMthread and have a lifespan longer than that of this activation record.
1020 // The VMThread will delete the op when completed.
1021 VMThread::execute(new VM_ScavengeMonitors());
1022 }
1023}
1024
1025ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
1026 // A large MAXPRIVATE value reduces both list lock contention
1027 // and list coherency traffic, but also tends to increase the
1028 // number of objectMonitors in circulation as well as the STW
1029 // scavenge costs. As usual, we lean toward time in space-time
1030 // tradeoffs.
1031 const int MAXPRIVATE = 1024;
1032 stringStream ss;
1033 for (;;) {
1034 ObjectMonitor * m;
1035
1036 // 1: try to allocate from the thread's local omFreeList.
1037 // Threads will attempt to allocate first from their local list, then
1038 // from the global list, and only after those attempts fail will the thread
1039 // attempt to instantiate new monitors. Thread-local free lists take
1040 // heat off the gListLock and improve allocation latency, as well as reducing
1041 // coherency traffic on the shared global list.
1042 m = Self->omFreeList;
1043 if (m != NULL) {
1044 Self->omFreeList = m->FreeNext;
1045 Self->omFreeCount--;
1046 guarantee(m->object() == NULL, "invariant");
1047 m->FreeNext = Self->omInUseList;
1048 Self->omInUseList = m;
1049 Self->omInUseCount++;
1050 return m;
1051 }
1052
1053 // 2: try to allocate from the global gFreeList
1054 // CONSIDER: use muxTry() instead of muxAcquire().
1055 // If the muxTry() fails then drop immediately into case 3.
1056 // If we're using thread-local free lists then try
1057 // to reprovision the caller's free list.
1058 if (gFreeList != NULL) {
1059 // Reprovision the thread's omFreeList.
1060 // Use bulk transfers to reduce the allocation rate and heat
1061 // on various locks.
1062 Thread::muxAcquire(&gListLock, "omAlloc(1)");
1063 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1064 gMonitorFreeCount--;
1065 ObjectMonitor * take = gFreeList;
1066 gFreeList = take->FreeNext;
1067 guarantee(take->object() == NULL, "invariant");
1068 take->Recycle();
1069 omRelease(Self, take, false);
1070 }
1071 Thread::muxRelease(&gListLock);
1072 Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1073 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1074
1075 const int mx = MonitorBound;
1076 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
1077 // We can't safely induce a STW safepoint from omAlloc() as our thread
1078 // state may not be appropriate for such activities and callers may hold
1079 // naked oops, so instead we defer the action.
1080 InduceScavenge(Self, "omAlloc");
1081 }
1082 continue;
1083 }
1084
1085 // 3: allocate a block of new ObjectMonitors
1086 // Both the local and global free lists are empty -- resort to malloc().
1087 // In the current implementation objectMonitors are TSM - immortal.
1088 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1089 // each ObjectMonitor to start at the beginning of a cache line,
1090 // so we use align_up().
1091 // A better solution would be to use C++ placement-new.
1092 // BEWARE: As it stands currently, we don't run the ctors!
1093 assert(_BLOCKSIZE > 1, "invariant");
1094 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE;
1095 PaddedEnd<ObjectMonitor> * temp;
1096 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
1097 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size,
1098 mtInternal);
1099 temp = (PaddedEnd<ObjectMonitor> *)
1100 align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE);
1101
1102 // NOTE: (almost) no way to recover if allocation failed.
1103 // We might be able to induce a STW safepoint and scavenge enough
1104 // objectMonitors to permit progress.
1105 if (temp == NULL) {
1106 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR,
1107 "Allocate ObjectMonitors");
1108 }
1109 (void)memset((void *) temp, 0, neededsize);
1110
1111 // Format the block.
1112 // initialize the linked list, each monitor points to its next
1113 // forming the single linked free list, the very first monitor
1114 // will points to next block, which forms the block list.
1115 // The trick of using the 1st element in the block as gBlockList
1116 // linkage should be reconsidered. A better implementation would
1117 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1118
1119 for (int i = 1; i < _BLOCKSIZE; i++) {
1120 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1];
1121 }
1122
1123 // terminate the last monitor as the end of list
1124 temp[_BLOCKSIZE - 1].FreeNext = NULL;
1125
1126 // Element [0] is reserved for global list linkage
1127 temp[0].set_object(CHAINMARKER);
1128
1129 // Consider carving out this thread's current request from the
1130 // block in hand. This avoids some lock traffic and redundant
1131 // list activity.
1132
1133 // Acquire the gListLock to manipulate gBlockList and gFreeList.
1134 // An Oyama-Taura-Yonezawa scheme might be more efficient.
1135 Thread::muxAcquire(&gListLock, "omAlloc(2)");
1136 gMonitorPopulation += _BLOCKSIZE-1;
1137 gMonitorFreeCount += _BLOCKSIZE-1;
1138
1139 // Add the new block to the list of extant blocks (gBlockList).
1140 // The very first objectMonitor in a block is reserved and dedicated.
1141 // It serves as blocklist "next" linkage.
1142 temp[0].FreeNext = gBlockList;
1143 // There are lock-free uses of gBlockList so make sure that
1144 // the previous stores happen before we update gBlockList.
1145 OrderAccess::release_store(&gBlockList, temp);
1146
1147 // Add the new string of objectMonitors to the global free list
1148 temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
1149 gFreeList = temp + 1;
1150 Thread::muxRelease(&gListLock);
1151 }
1152}
1153
1154// Place "m" on the caller's private per-thread omFreeList.
1155// In practice there's no need to clamp or limit the number of
1156// monitors on a thread's omFreeList as the only time we'll call
1157// omRelease is to return a monitor to the free list after a CAS
1158// attempt failed. This doesn't allow unbounded #s of monitors to
1159// accumulate on a thread's free list.
1160//
1161// Key constraint: all ObjectMonitors on a thread's free list and the global
1162// free list must have their object field set to null. This prevents the
1163// scavenger -- deflate_monitor_list() -- from reclaiming them.
1164
1165void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1166 bool fromPerThreadAlloc) {
1167 guarantee(m->header() == NULL, "invariant");
1168 guarantee(m->object() == NULL, "invariant");
1169 stringStream ss;
1170 guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
1171 "%s, recursions=" INTPTR_FORMAT, m->is_busy_to_string(&ss),
1172 m->_recursions);
1173 // Remove from omInUseList
1174 if (fromPerThreadAlloc) {
1175 ObjectMonitor* cur_mid_in_use = NULL;
1176 bool extracted = false;
1177 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) {
1178 if (m == mid) {
1179 // extract from per-thread in-use list
1180 if (mid == Self->omInUseList) {
1181 Self->omInUseList = mid->FreeNext;
1182 } else if (cur_mid_in_use != NULL) {
1183 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1184 }
1185 extracted = true;
1186 Self->omInUseCount--;
1187 break;
1188 }
1189 }
1190 assert(extracted, "Should have extracted from in-use list");
1191 }
1192
1193 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new
1194 m->FreeNext = Self->omFreeList;
1195 Self->omFreeList = m;
1196 Self->omFreeCount++;
1197}
1198
1199// Return the monitors of a moribund thread's local free list to
1200// the global free list. Typically a thread calls omFlush() when
1201// it's dying. We could also consider having the VM thread steal
1202// monitors from threads that have not run java code over a few
1203// consecutive STW safepoints. Relatedly, we might decay
1204// omFreeProvision at STW safepoints.
1205//
1206// Also return the monitors of a moribund thread's omInUseList to
1207// a global gOmInUseList under the global list lock so these
1208// will continue to be scanned.
1209//
1210// We currently call omFlush() from Threads::remove() _before the thread
1211// has been excised from the thread list and is no longer a mutator.
1212// This means that omFlush() cannot run concurrently with a safepoint and
1213// interleave with the deflate_idle_monitors scavenge operator. In particular,
1214// this ensures that the thread's monitors are scanned by a GC safepoint,
1215// either via Thread::oops_do() (if safepoint happens before omFlush()) or via
1216// ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's
1217// monitors have been transferred to the global in-use list).
1218
1219void ObjectSynchronizer::omFlush(Thread * Self) {
1220 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL
1221 ObjectMonitor * tail = NULL;
1222 int tally = 0;
1223 if (list != NULL) {
1224 ObjectMonitor * s;
1225 // The thread is going away. Set 'tail' to the last per-thread free
1226 // monitor which will be linked to gFreeList below under the gListLock.
1227 stringStream ss;
1228 for (s = list; s != NULL; s = s->FreeNext) {
1229 tally++;
1230 tail = s;
1231 guarantee(s->object() == NULL, "invariant");
1232 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
1233 }
1234 guarantee(tail != NULL, "invariant");
1235 assert(Self->omFreeCount == tally, "free-count off");
1236 Self->omFreeList = NULL;
1237 Self->omFreeCount = 0;
1238 }
1239
1240 ObjectMonitor * inUseList = Self->omInUseList;
1241 ObjectMonitor * inUseTail = NULL;
1242 int inUseTally = 0;
1243 if (inUseList != NULL) {
1244 ObjectMonitor *cur_om;
1245 // The thread is going away, however the omInUseList inflated
1246 // monitors may still be in-use by other threads.
1247 // Link them to inUseTail, which will be linked into the global in-use list
1248 // gOmInUseList below, under the gListLock
1249 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
1250 inUseTail = cur_om;
1251 inUseTally++;
1252 }
1253 guarantee(inUseTail != NULL, "invariant");
1254 assert(Self->omInUseCount == inUseTally, "in-use count off");
1255 Self->omInUseList = NULL;
1256 Self->omInUseCount = 0;
1257 }
1258
1259 Thread::muxAcquire(&gListLock, "omFlush");
1260 if (tail != NULL) {
1261 tail->FreeNext = gFreeList;
1262 gFreeList = list;
1263 gMonitorFreeCount += tally;
1264 }
1265
1266 if (inUseTail != NULL) {
1267 inUseTail->FreeNext = gOmInUseList;
1268 gOmInUseList = inUseList;
1269 gOmInUseCount += inUseTally;
1270 }
1271
1272 Thread::muxRelease(&gListLock);
1273
1274 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1275 LogStreamHandle(Info, monitorinflation) lsh_info;
1276 LogStream * ls = NULL;
1277 if (log_is_enabled(Debug, monitorinflation)) {
1278 ls = &lsh_debug;
1279 } else if ((tally != 0 || inUseTally != 0) &&
1280 log_is_enabled(Info, monitorinflation)) {
1281 ls = &lsh_info;
1282 }
1283 if (ls != NULL) {
1284 ls->print_cr("omFlush: jt=" INTPTR_FORMAT ", free_monitor_tally=%d"
1285 ", in_use_monitor_tally=%d" ", omFreeProvision=%d",
1286 p2i(Self), tally, inUseTally, Self->omFreeProvision);
1287 }
1288}
1289
1290static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1291 const oop obj,
1292 ObjectSynchronizer::InflateCause cause) {
1293 assert(event != NULL, "invariant");
1294 assert(event->should_commit(), "invariant");
1295 event->set_monitorClass(obj->klass());
1296 event->set_address((uintptr_t)(void*)obj);
1297 event->set_cause((u1)cause);
1298 event->commit();
1299}
1300
1301// Fast path code shared by multiple functions
1302void ObjectSynchronizer::inflate_helper(oop obj) {
1303 markOop mark = obj->mark();
1304 if (mark->has_monitor()) {
1305 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1306 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1307 return;
1308 }
1309 inflate(Thread::current(), obj, inflate_cause_vm_internal);
1310}
1311
1312ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1313 oop object,
1314 const InflateCause cause) {
1315 // Inflate mutates the heap ...
1316 // Relaxing assertion for bug 6320749.
1317 assert(Universe::verify_in_progress() ||
1318 !SafepointSynchronize::is_at_safepoint(), "invariant");
1319
1320 EventJavaMonitorInflate event;
1321
1322 for (;;) {
1323 const markOop mark = object->mark();
1324 assert(!mark->has_bias_pattern(), "invariant");
1325
1326 // The mark can be in one of the following states:
1327 // * Inflated - just return
1328 // * Stack-locked - coerce it to inflated
1329 // * INFLATING - busy wait for conversion to complete
1330 // * Neutral - aggressively inflate the object.
1331 // * BIASED - Illegal. We should never see this
1332
1333 // CASE: inflated
1334 if (mark->has_monitor()) {
1335 ObjectMonitor * inf = mark->monitor();
1336 markOop dmw = inf->header();
1337 assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
1338 assert(oopDesc::equals((oop) inf->object(), object), "invariant");
1339 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1340 return inf;
1341 }
1342
1343 // CASE: inflation in progress - inflating over a stack-lock.
1344 // Some other thread is converting from stack-locked to inflated.
1345 // Only that thread can complete inflation -- other threads must wait.
1346 // The INFLATING value is transient.
1347 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1348 // We could always eliminate polling by parking the thread on some auxiliary list.
1349 if (mark == markOopDesc::INFLATING()) {
1350 ReadStableMark(object);
1351 continue;
1352 }
1353
1354 // CASE: stack-locked
1355 // Could be stack-locked either by this thread or by some other thread.
1356 //
1357 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1358 // to install INFLATING into the mark word. We originally installed INFLATING,
1359 // allocated the objectmonitor, and then finally STed the address of the
1360 // objectmonitor into the mark. This was correct, but artificially lengthened
1361 // the interval in which INFLATED appeared in the mark, thus increasing
1362 // the odds of inflation contention.
1363 //
1364 // We now use per-thread private objectmonitor free lists.
1365 // These list are reprovisioned from the global free list outside the
1366 // critical INFLATING...ST interval. A thread can transfer
1367 // multiple objectmonitors en-mass from the global free list to its local free list.
1368 // This reduces coherency traffic and lock contention on the global free list.
1369 // Using such local free lists, it doesn't matter if the omAlloc() call appears
1370 // before or after the CAS(INFLATING) operation.
1371 // See the comments in omAlloc().
1372
1373 LogStreamHandle(Trace, monitorinflation) lsh;
1374
1375 if (mark->has_locker()) {
1376 ObjectMonitor * m = omAlloc(Self);
1377 // Optimistically prepare the objectmonitor - anticipate successful CAS
1378 // We do this before the CAS in order to minimize the length of time
1379 // in which INFLATING appears in the mark.
1380 m->Recycle();
1381 m->_Responsible = NULL;
1382 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class
1383
1384 markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark);
1385 if (cmp != mark) {
1386 omRelease(Self, m, true);
1387 continue; // Interference -- just retry
1388 }
1389
1390 // We've successfully installed INFLATING (0) into the mark-word.
1391 // This is the only case where 0 will appear in a mark-word.
1392 // Only the singular thread that successfully swings the mark-word
1393 // to 0 can perform (or more precisely, complete) inflation.
1394 //
1395 // Why do we CAS a 0 into the mark-word instead of just CASing the
1396 // mark-word from the stack-locked value directly to the new inflated state?
1397 // Consider what happens when a thread unlocks a stack-locked object.
1398 // It attempts to use CAS to swing the displaced header value from the
1399 // on-stack basiclock back into the object header. Recall also that the
1400 // header value (hash code, etc) can reside in (a) the object header, or
1401 // (b) a displaced header associated with the stack-lock, or (c) a displaced
1402 // header in an objectMonitor. The inflate() routine must copy the header
1403 // value from the basiclock on the owner's stack to the objectMonitor, all
1404 // the while preserving the hashCode stability invariants. If the owner
1405 // decides to release the lock while the value is 0, the unlock will fail
1406 // and control will eventually pass from slow_exit() to inflate. The owner
1407 // will then spin, waiting for the 0 value to disappear. Put another way,
1408 // the 0 causes the owner to stall if the owner happens to try to
1409 // drop the lock (restoring the header from the basiclock to the object)
1410 // while inflation is in-progress. This protocol avoids races that might
1411 // would otherwise permit hashCode values to change or "flicker" for an object.
1412 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
1413 // 0 serves as a "BUSY" inflate-in-progress indicator.
1414
1415
1416 // fetch the displaced mark from the owner's stack.
1417 // The owner can't die or unwind past the lock while our INFLATING
1418 // object is in the mark. Furthermore the owner can't complete
1419 // an unlock on the object, either.
1420 markOop dmw = mark->displaced_mark_helper();
1421 // Catch if the object's header is not neutral (not locked and
1422 // not marked is what we care about here).
1423 assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
1424
1425 // Setup monitor fields to proper values -- prepare the monitor
1426 m->set_header(dmw);
1427
1428 // Optimization: if the mark->locker stack address is associated
1429 // with this thread we could simply set m->_owner = Self.
1430 // Note that a thread can inflate an object
1431 // that it has stack-locked -- as might happen in wait() -- directly
1432 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1433 m->set_owner(mark->locker());
1434 m->set_object(object);
1435 // TODO-FIXME: assert BasicLock->dhw != 0.
1436
1437 // Must preserve store ordering. The monitor state must
1438 // be stable at the time of publishing the monitor address.
1439 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1440 object->release_set_mark(markOopDesc::encode(m));
1441
1442 // Hopefully the performance counters are allocated on distinct cache lines
1443 // to avoid false sharing on MP systems ...
1444 OM_PERFDATA_OP(Inflations, inc());
1445 if (log_is_enabled(Trace, monitorinflation)) {
1446 ResourceMark rm(Self);
1447 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1448 INTPTR_FORMAT ", type='%s'", p2i(object),
1449 p2i(object->mark()), object->klass()->external_name());
1450 }
1451 if (event.should_commit()) {
1452 post_monitor_inflate_event(&event, object, cause);
1453 }
1454 return m;
1455 }
1456
1457 // CASE: neutral
1458 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1459 // If we know we're inflating for entry it's better to inflate by swinging a
1460 // pre-locked objectMonitor pointer into the object header. A successful
1461 // CAS inflates the object *and* confers ownership to the inflating thread.
1462 // In the current implementation we use a 2-step mechanism where we CAS()
1463 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1464 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1465 // would be useful.
1466
1467 // Catch if the object's header is not neutral (not locked and
1468 // not marked is what we care about here).
1469 assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark));
1470 ObjectMonitor * m = omAlloc(Self);
1471 // prepare m for installation - set monitor to initial state
1472 m->Recycle();
1473 m->set_header(mark);
1474 m->set_object(object);
1475 m->_Responsible = NULL;
1476 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class
1477
1478 if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1479 m->set_header(NULL);
1480 m->set_object(NULL);
1481 m->Recycle();
1482 omRelease(Self, m, true);
1483 m = NULL;
1484 continue;
1485 // interference - the markword changed - just retry.
1486 // The state-transitions are one-way, so there's no chance of
1487 // live-lock -- "Inflated" is an absorbing state.
1488 }
1489
1490 // Hopefully the performance counters are allocated on distinct
1491 // cache lines to avoid false sharing on MP systems ...
1492 OM_PERFDATA_OP(Inflations, inc());
1493 if (log_is_enabled(Trace, monitorinflation)) {
1494 ResourceMark rm(Self);
1495 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1496 INTPTR_FORMAT ", type='%s'", p2i(object),
1497 p2i(object->mark()), object->klass()->external_name());
1498 }
1499 if (event.should_commit()) {
1500 post_monitor_inflate_event(&event, object, cause);
1501 }
1502 return m;
1503 }
1504}
1505
1506
1507// We maintain a list of in-use monitors for each thread.
1508//
1509// deflate_thread_local_monitors() scans a single thread's in-use list, while
1510// deflate_idle_monitors() scans only a global list of in-use monitors which
1511// is populated only as a thread dies (see omFlush()).
1512//
1513// These operations are called at all safepoints, immediately after mutators
1514// are stopped, but before any objects have moved. Collectively they traverse
1515// the population of in-use monitors, deflating where possible. The scavenged
1516// monitors are returned to the global monitor free list.
1517//
1518// Beware that we scavenge at *every* stop-the-world point. Having a large
1519// number of monitors in-use could negatively impact performance. We also want
1520// to minimize the total # of monitors in circulation, as they incur a small
1521// footprint penalty.
1522//
1523// Perversely, the heap size -- and thus the STW safepoint rate --
1524// typically drives the scavenge rate. Large heaps can mean infrequent GC,
1525// which in turn can mean large(r) numbers of ObjectMonitors in circulation.
1526// This is an unfortunate aspect of this design.
1527
1528// Deflate a single monitor if not in-use
1529// Return true if deflated, false if in-use
1530bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1531 ObjectMonitor** freeHeadp,
1532 ObjectMonitor** freeTailp) {
1533 bool deflated;
1534 // Normal case ... The monitor is associated with obj.
1535 const markOop mark = obj->mark();
1536 guarantee(mark == markOopDesc::encode(mid), "should match: mark="
1537 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, p2i(mark),
1538 p2i(markOopDesc::encode(mid)));
1539 // Make sure that mark->monitor() and markOopDesc::encode() agree:
1540 guarantee(mark->monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
1541 ", mid=" INTPTR_FORMAT, p2i(mark->monitor()), p2i(mid));
1542 const markOop dmw = mid->header();
1543 guarantee(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
1544
1545 if (mid->is_busy()) {
1546 deflated = false;
1547 } else {
1548 // Deflate the monitor if it is no longer being used
1549 // It's idle - scavenge and return to the global free list
1550 // plain old deflation ...
1551 if (log_is_enabled(Trace, monitorinflation)) {
1552 ResourceMark rm;
1553 log_trace(monitorinflation)("deflate_monitor: "
1554 "object=" INTPTR_FORMAT ", mark="
1555 INTPTR_FORMAT ", type='%s'", p2i(obj),
1556 p2i(mark), obj->klass()->external_name());
1557 }
1558
1559 // Restore the header back to obj
1560 obj->release_set_mark(dmw);
1561 mid->clear();
1562
1563 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
1564 p2i(mid->object()));
1565
1566 // Move the object to the working free list defined by freeHeadp, freeTailp
1567 if (*freeHeadp == NULL) *freeHeadp = mid;
1568 if (*freeTailp != NULL) {
1569 ObjectMonitor * prevtail = *freeTailp;
1570 assert(prevtail->FreeNext == NULL, "cleaned up deflated?");
1571 prevtail->FreeNext = mid;
1572 }
1573 *freeTailp = mid;
1574 deflated = true;
1575 }
1576 return deflated;
1577}
1578
1579// Walk a given monitor list, and deflate idle monitors
1580// The given list could be a per-thread list or a global list
1581// Caller acquires gListLock as needed.
1582//
1583// In the case of parallel processing of thread local monitor lists,
1584// work is done by Threads::parallel_threads_do() which ensures that
1585// each Java thread is processed by exactly one worker thread, and
1586// thus avoid conflicts that would arise when worker threads would
1587// process the same monitor lists concurrently.
1588//
1589// See also ParallelSPCleanupTask and
1590// SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
1591// Threads::parallel_java_threads_do() in thread.cpp.
1592int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp,
1593 ObjectMonitor** freeHeadp,
1594 ObjectMonitor** freeTailp) {
1595 ObjectMonitor* mid;
1596 ObjectMonitor* next;
1597 ObjectMonitor* cur_mid_in_use = NULL;
1598 int deflated_count = 0;
1599
1600 for (mid = *listHeadp; mid != NULL;) {
1601 oop obj = (oop) mid->object();
1602 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) {
1603 // if deflate_monitor succeeded,
1604 // extract from per-thread in-use list
1605 if (mid == *listHeadp) {
1606 *listHeadp = mid->FreeNext;
1607 } else if (cur_mid_in_use != NULL) {
1608 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1609 }
1610 next = mid->FreeNext;
1611 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list
1612 mid = next;
1613 deflated_count++;
1614 } else {
1615 cur_mid_in_use = mid;
1616 mid = mid->FreeNext;
1617 }
1618 }
1619 return deflated_count;
1620}
1621
1622void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1623 counters->nInuse = 0; // currently associated with objects
1624 counters->nInCirculation = 0; // extant
1625 counters->nScavenged = 0; // reclaimed (global and per-thread)
1626 counters->perThreadScavenged = 0; // per-thread scavenge total
1627 counters->perThreadTimes = 0.0; // per-thread scavenge times
1628}
1629
1630void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
1631 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1632 bool deflated = false;
1633
1634 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors
1635 ObjectMonitor * freeTailp = NULL;
1636 elapsedTimer timer;
1637
1638 if (log_is_enabled(Info, monitorinflation)) {
1639 timer.start();
1640 }
1641
1642 // Prevent omFlush from changing mids in Thread dtor's during deflation
1643 // And in case the vm thread is acquiring a lock during a safepoint
1644 // See e.g. 6320749
1645 Thread::muxAcquire(&gListLock, "deflate_idle_monitors");
1646
1647 // Note: the thread-local monitors lists get deflated in
1648 // a separate pass. See deflate_thread_local_monitors().
1649
1650 // For moribund threads, scan gOmInUseList
1651 int deflated_count = 0;
1652 if (gOmInUseList) {
1653 counters->nInCirculation += gOmInUseCount;
1654 deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp);
1655 gOmInUseCount -= deflated_count;
1656 counters->nScavenged += deflated_count;
1657 counters->nInuse += gOmInUseCount;
1658 }
1659
1660 // Move the scavenged monitors back to the global free list.
1661 if (freeHeadp != NULL) {
1662 guarantee(freeTailp != NULL && counters->nScavenged > 0, "invariant");
1663 assert(freeTailp->FreeNext == NULL, "invariant");
1664 // constant-time list splice - prepend scavenged segment to gFreeList
1665 freeTailp->FreeNext = gFreeList;
1666 gFreeList = freeHeadp;
1667 }
1668 Thread::muxRelease(&gListLock);
1669 timer.stop();
1670
1671 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1672 LogStreamHandle(Info, monitorinflation) lsh_info;
1673 LogStream * ls = NULL;
1674 if (log_is_enabled(Debug, monitorinflation)) {
1675 ls = &lsh_debug;
1676 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
1677 ls = &lsh_info;
1678 }
1679 if (ls != NULL) {
1680 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
1681 }
1682}
1683
1684void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1685 // Report the cumulative time for deflating each thread's idle
1686 // monitors. Note: if the work is split among more than one
1687 // worker thread, then the reported time will likely be more
1688 // than a beginning to end measurement of the phase.
1689 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->perThreadTimes, counters->perThreadScavenged);
1690
1691 gMonitorFreeCount += counters->nScavenged;
1692
1693 if (log_is_enabled(Debug, monitorinflation)) {
1694 // exit_globals()'s call to audit_and_print_stats() is done
1695 // at the Info level.
1696 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
1697 } else if (log_is_enabled(Info, monitorinflation)) {
1698 Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors");
1699 log_info(monitorinflation)("gMonitorPopulation=%d, gOmInUseCount=%d, "
1700 "gMonitorFreeCount=%d", gMonitorPopulation,
1701 gOmInUseCount, gMonitorFreeCount);
1702 Thread::muxRelease(&gListLock);
1703 }
1704
1705 ForceMonitorScavenge = 0; // Reset
1706
1707 OM_PERFDATA_OP(Deflations, inc(counters->nScavenged));
1708 OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation));
1709
1710 GVars.stwRandom = os::random();
1711 GVars.stwCycle++;
1712}
1713
1714void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
1715 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1716
1717 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors
1718 ObjectMonitor * freeTailp = NULL;
1719 elapsedTimer timer;
1720
1721 if (log_is_enabled(Info, safepoint, cleanup) ||
1722 log_is_enabled(Info, monitorinflation)) {
1723 timer.start();
1724 }
1725
1726 int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp);
1727
1728 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
1729
1730 // Adjust counters
1731 counters->nInCirculation += thread->omInUseCount;
1732 thread->omInUseCount -= deflated_count;
1733 counters->nScavenged += deflated_count;
1734 counters->nInuse += thread->omInUseCount;
1735 counters->perThreadScavenged += deflated_count;
1736
1737 // Move the scavenged monitors back to the global free list.
1738 if (freeHeadp != NULL) {
1739 guarantee(freeTailp != NULL && deflated_count > 0, "invariant");
1740 assert(freeTailp->FreeNext == NULL, "invariant");
1741
1742 // constant-time list splice - prepend scavenged segment to gFreeList
1743 freeTailp->FreeNext = gFreeList;
1744 gFreeList = freeHeadp;
1745 }
1746
1747 timer.stop();
1748 // Safepoint logging cares about cumulative perThreadTimes and
1749 // we'll capture most of the cost, but not the muxRelease() which
1750 // should be cheap.
1751 counters->perThreadTimes += timer.seconds();
1752
1753 Thread::muxRelease(&gListLock);
1754
1755 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1756 LogStreamHandle(Info, monitorinflation) lsh_info;
1757 LogStream * ls = NULL;
1758 if (log_is_enabled(Debug, monitorinflation)) {
1759 ls = &lsh_debug;
1760 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
1761 ls = &lsh_info;
1762 }
1763 if (ls != NULL) {
1764 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count);
1765 }
1766}
1767
1768// Monitor cleanup on JavaThread::exit
1769
1770// Iterate through monitor cache and attempt to release thread's monitors
1771// Gives up on a particular monitor if an exception occurs, but continues
1772// the overall iteration, swallowing the exception.
1773class ReleaseJavaMonitorsClosure: public MonitorClosure {
1774 private:
1775 TRAPS;
1776
1777 public:
1778 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
1779 void do_monitor(ObjectMonitor* mid) {
1780 if (mid->owner() == THREAD) {
1781 (void)mid->complete_exit(CHECK);
1782 }
1783 }
1784};
1785
1786// Release all inflated monitors owned by THREAD. Lightweight monitors are
1787// ignored. This is meant to be called during JNI thread detach which assumes
1788// all remaining monitors are heavyweight. All exceptions are swallowed.
1789// Scanning the extant monitor list can be time consuming.
1790// A simple optimization is to add a per-thread flag that indicates a thread
1791// called jni_monitorenter() during its lifetime.
1792//
1793// Instead of No_Savepoint_Verifier it might be cheaper to
1794// use an idiom of the form:
1795// auto int tmp = SafepointSynchronize::_safepoint_counter ;
1796// <code that must not run at safepoint>
1797// guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1798// Since the tests are extremely cheap we could leave them enabled
1799// for normal product builds.
1800
1801void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
1802 assert(THREAD == JavaThread::current(), "must be current Java thread");
1803 NoSafepointVerifier nsv;
1804 ReleaseJavaMonitorsClosure rjmc(THREAD);
1805 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread");
1806 ObjectSynchronizer::monitors_iterate(&rjmc);
1807 Thread::muxRelease(&gListLock);
1808 THREAD->clear_pending_exception();
1809}
1810
1811const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1812 switch (cause) {
1813 case inflate_cause_vm_internal: return "VM Internal";
1814 case inflate_cause_monitor_enter: return "Monitor Enter";
1815 case inflate_cause_wait: return "Monitor Wait";
1816 case inflate_cause_notify: return "Monitor Notify";
1817 case inflate_cause_hash_code: return "Monitor Hash Code";
1818 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1819 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1820 default:
1821 ShouldNotReachHere();
1822 }
1823 return "Unknown";
1824}
1825
1826//------------------------------------------------------------------------------
1827// Debugging code
1828
1829u_char* ObjectSynchronizer::get_gvars_addr() {
1830 return (u_char*)&GVars;
1831}
1832
1833u_char* ObjectSynchronizer::get_gvars_hcSequence_addr() {
1834 return (u_char*)&GVars.hcSequence;
1835}
1836
1837size_t ObjectSynchronizer::get_gvars_size() {
1838 return sizeof(SharedGlobals);
1839}
1840
1841u_char* ObjectSynchronizer::get_gvars_stwRandom_addr() {
1842 return (u_char*)&GVars.stwRandom;
1843}
1844
1845void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
1846 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant");
1847
1848 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1849 LogStreamHandle(Info, monitorinflation) lsh_info;
1850 LogStreamHandle(Trace, monitorinflation) lsh_trace;
1851 LogStream * ls = NULL;
1852 if (log_is_enabled(Trace, monitorinflation)) {
1853 ls = &lsh_trace;
1854 } else if (log_is_enabled(Debug, monitorinflation)) {
1855 ls = &lsh_debug;
1856 } else if (log_is_enabled(Info, monitorinflation)) {
1857 ls = &lsh_info;
1858 }
1859 assert(ls != NULL, "sanity check");
1860
1861 if (!on_exit) {
1862 // Not at VM exit so grab the global list lock.
1863 Thread::muxAcquire(&gListLock, "audit_and_print_stats");
1864 }
1865
1866 // Log counts for the global and per-thread monitor lists:
1867 int chkMonitorPopulation = log_monitor_list_counts(ls);
1868 int error_cnt = 0;
1869
1870 ls->print_cr("Checking global lists:");
1871
1872 // Check gMonitorPopulation:
1873 if (gMonitorPopulation == chkMonitorPopulation) {
1874 ls->print_cr("gMonitorPopulation=%d equals chkMonitorPopulation=%d",
1875 gMonitorPopulation, chkMonitorPopulation);
1876 } else {
1877 ls->print_cr("ERROR: gMonitorPopulation=%d is not equal to "
1878 "chkMonitorPopulation=%d", gMonitorPopulation,
1879 chkMonitorPopulation);
1880 error_cnt++;
1881 }
1882
1883 // Check gOmInUseList and gOmInUseCount:
1884 chk_global_in_use_list_and_count(ls, &error_cnt);
1885
1886 // Check gFreeList and gMonitorFreeCount:
1887 chk_global_free_list_and_count(ls, &error_cnt);
1888
1889 if (!on_exit) {
1890 Thread::muxRelease(&gListLock);
1891 }
1892
1893 ls->print_cr("Checking per-thread lists:");
1894
1895 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1896 // Check omInUseList and omInUseCount:
1897 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
1898
1899 // Check omFreeList and omFreeCount:
1900 chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
1901 }
1902
1903 if (error_cnt == 0) {
1904 ls->print_cr("No errors found in monitor list checks.");
1905 } else {
1906 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
1907 }
1908
1909 if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
1910 (!on_exit && log_is_enabled(Trace, monitorinflation))) {
1911 // When exiting this log output is at the Info level. When called
1912 // at a safepoint, this log output is at the Trace level since
1913 // there can be a lot of it.
1914 log_in_use_monitor_details(ls, on_exit);
1915 }
1916
1917 ls->flush();
1918
1919 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
1920}
1921
1922// Check a free monitor entry; log any errors.
1923void ObjectSynchronizer::chk_free_entry(JavaThread * jt, ObjectMonitor * n,
1924 outputStream * out, int *error_cnt_p) {
1925 stringStream ss;
1926 if (n->is_busy()) {
1927 if (jt != NULL) {
1928 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1929 ": free per-thread monitor must not be busy: %s", p2i(jt),
1930 p2i(n), n->is_busy_to_string(&ss));
1931 } else {
1932 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
1933 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss));
1934 }
1935 *error_cnt_p = *error_cnt_p + 1;
1936 }
1937 if (n->header() != NULL) {
1938 if (jt != NULL) {
1939 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1940 ": free per-thread monitor must have NULL _header "
1941 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
1942 p2i(n->header()));
1943 } else {
1944 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
1945 "must have NULL _header field: _header=" INTPTR_FORMAT,
1946 p2i(n), p2i(n->header()));
1947 }
1948 *error_cnt_p = *error_cnt_p + 1;
1949 }
1950 if (n->object() != NULL) {
1951 if (jt != NULL) {
1952 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1953 ": free per-thread monitor must have NULL _object "
1954 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
1955 p2i(n->object()));
1956 } else {
1957 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
1958 "must have NULL _object field: _object=" INTPTR_FORMAT,
1959 p2i(n), p2i(n->object()));
1960 }
1961 *error_cnt_p = *error_cnt_p + 1;
1962 }
1963}
1964
1965// Check the global free list and count; log the results of the checks.
1966void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
1967 int *error_cnt_p) {
1968 int chkMonitorFreeCount = 0;
1969 for (ObjectMonitor * n = gFreeList; n != NULL; n = n->FreeNext) {
1970 chk_free_entry(NULL /* jt */, n, out, error_cnt_p);
1971 chkMonitorFreeCount++;
1972 }
1973 if (gMonitorFreeCount == chkMonitorFreeCount) {
1974 out->print_cr("gMonitorFreeCount=%d equals chkMonitorFreeCount=%d",
1975 gMonitorFreeCount, chkMonitorFreeCount);
1976 } else {
1977 out->print_cr("ERROR: gMonitorFreeCount=%d is not equal to "
1978 "chkMonitorFreeCount=%d", gMonitorFreeCount,
1979 chkMonitorFreeCount);
1980 *error_cnt_p = *error_cnt_p + 1;
1981 }
1982}
1983
1984// Check the global in-use list and count; log the results of the checks.
1985void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
1986 int *error_cnt_p) {
1987 int chkOmInUseCount = 0;
1988 for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) {
1989 chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p);
1990 chkOmInUseCount++;
1991 }
1992 if (gOmInUseCount == chkOmInUseCount) {
1993 out->print_cr("gOmInUseCount=%d equals chkOmInUseCount=%d", gOmInUseCount,
1994 chkOmInUseCount);
1995 } else {
1996 out->print_cr("ERROR: gOmInUseCount=%d is not equal to chkOmInUseCount=%d",
1997 gOmInUseCount, chkOmInUseCount);
1998 *error_cnt_p = *error_cnt_p + 1;
1999 }
2000}
2001
2002// Check an in-use monitor entry; log any errors.
2003void ObjectSynchronizer::chk_in_use_entry(JavaThread * jt, ObjectMonitor * n,
2004 outputStream * out, int *error_cnt_p) {
2005 if (n->header() == NULL) {
2006 if (jt != NULL) {
2007 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2008 ": in-use per-thread monitor must have non-NULL _header "
2009 "field.", p2i(jt), p2i(n));
2010 } else {
2011 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor "
2012 "must have non-NULL _header field.", p2i(n));
2013 }
2014 *error_cnt_p = *error_cnt_p + 1;
2015 }
2016 if (n->object() == NULL) {
2017 if (jt != NULL) {
2018 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2019 ": in-use per-thread monitor must have non-NULL _object "
2020 "field.", p2i(jt), p2i(n));
2021 } else {
2022 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor "
2023 "must have non-NULL _object field.", p2i(n));
2024 }
2025 *error_cnt_p = *error_cnt_p + 1;
2026 }
2027 const oop obj = (oop)n->object();
2028 const markOop mark = obj->mark();
2029 if (!mark->has_monitor()) {
2030 if (jt != NULL) {
2031 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2032 ": in-use per-thread monitor's object does not think "
2033 "it has a monitor: obj=" INTPTR_FORMAT ", mark="
2034 INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), p2i(mark));
2035 } else {
2036 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
2037 "monitor's object does not think it has a monitor: obj="
2038 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
2039 p2i(obj), p2i(mark));
2040 }
2041 *error_cnt_p = *error_cnt_p + 1;
2042 }
2043 ObjectMonitor * const obj_mon = mark->monitor();
2044 if (n != obj_mon) {
2045 if (jt != NULL) {
2046 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2047 ": in-use per-thread monitor's object does not refer "
2048 "to the same monitor: obj=" INTPTR_FORMAT ", mark="
2049 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt),
2050 p2i(n), p2i(obj), p2i(mark), p2i(obj_mon));
2051 } else {
2052 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
2053 "monitor's object does not refer to the same monitor: obj="
2054 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
2055 INTPTR_FORMAT, p2i(n), p2i(obj), p2i(mark), p2i(obj_mon));
2056 }
2057 *error_cnt_p = *error_cnt_p + 1;
2058 }
2059}
2060
2061// Check the thread's free list and count; log the results of the checks.
2062void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt,
2063 outputStream * out,
2064 int *error_cnt_p) {
2065 int chkOmFreeCount = 0;
2066 for (ObjectMonitor * n = jt->omFreeList; n != NULL; n = n->FreeNext) {
2067 chk_free_entry(jt, n, out, error_cnt_p);
2068 chkOmFreeCount++;
2069 }
2070 if (jt->omFreeCount == chkOmFreeCount) {
2071 out->print_cr("jt=" INTPTR_FORMAT ": omFreeCount=%d equals "
2072 "chkOmFreeCount=%d", p2i(jt), jt->omFreeCount, chkOmFreeCount);
2073 } else {
2074 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omFreeCount=%d is not "
2075 "equal to chkOmFreeCount=%d", p2i(jt), jt->omFreeCount,
2076 chkOmFreeCount);
2077 *error_cnt_p = *error_cnt_p + 1;
2078 }
2079}
2080
2081// Check the thread's in-use list and count; log the results of the checks.
2082void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt,
2083 outputStream * out,
2084 int *error_cnt_p) {
2085 int chkOmInUseCount = 0;
2086 for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) {
2087 chk_in_use_entry(jt, n, out, error_cnt_p);
2088 chkOmInUseCount++;
2089 }
2090 if (jt->omInUseCount == chkOmInUseCount) {
2091 out->print_cr("jt=" INTPTR_FORMAT ": omInUseCount=%d equals "
2092 "chkOmInUseCount=%d", p2i(jt), jt->omInUseCount,
2093 chkOmInUseCount);
2094 } else {
2095 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omInUseCount=%d is not "
2096 "equal to chkOmInUseCount=%d", p2i(jt), jt->omInUseCount,
2097 chkOmInUseCount);
2098 *error_cnt_p = *error_cnt_p + 1;
2099 }
2100}
2101
2102// Log details about ObjectMonitors on the in-use lists. The 'BHL'
2103// flags indicate why the entry is in-use, 'object' and 'object type'
2104// indicate the associated object and its type.
2105void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out,
2106 bool on_exit) {
2107 if (!on_exit) {
2108 // Not at VM exit so grab the global list lock.
2109 Thread::muxAcquire(&gListLock, "log_in_use_monitor_details");
2110 }
2111
2112 stringStream ss;
2113 if (gOmInUseCount > 0) {
2114 out->print_cr("In-use global monitor info:");
2115 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2116 out->print_cr("%18s %s %18s %18s",
2117 "monitor", "BHL", "object", "object type");
2118 out->print_cr("================== === ================== ==================");
2119 for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) {
2120 const oop obj = (oop) n->object();
2121 const markOop mark = n->header();
2122 ResourceMark rm;
2123 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(n),
2124 n->is_busy() != 0, mark->hash() != 0, n->owner() != NULL,
2125 p2i(obj), obj->klass()->external_name());
2126 if (n->is_busy() != 0) {
2127 out->print(" (%s)", n->is_busy_to_string(&ss));
2128 ss.reset();
2129 }
2130 out->cr();
2131 }
2132 }
2133
2134 if (!on_exit) {
2135 Thread::muxRelease(&gListLock);
2136 }
2137
2138 out->print_cr("In-use per-thread monitor info:");
2139 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2140 out->print_cr("%18s %18s %s %18s %18s",
2141 "jt", "monitor", "BHL", "object", "object type");
2142 out->print_cr("================== ================== === ================== ==================");
2143 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2144 for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) {
2145 const oop obj = (oop) n->object();
2146 const markOop mark = n->header();
2147 ResourceMark rm;
2148 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT
2149 " %s", p2i(jt), p2i(n), n->is_busy() != 0,
2150 mark->hash() != 0, n->owner() != NULL, p2i(obj),
2151 obj->klass()->external_name());
2152 if (n->is_busy() != 0) {
2153 out->print(" (%s)", n->is_busy_to_string(&ss));
2154 ss.reset();
2155 }
2156 out->cr();
2157 }
2158 }
2159
2160 out->flush();
2161}
2162
2163// Log counts for the global and per-thread monitor lists and return
2164// the population count.
2165int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
2166 int popCount = 0;
2167 out->print_cr("%18s %10s %10s %10s",
2168 "Global Lists:", "InUse", "Free", "Total");
2169 out->print_cr("================== ========== ========== ==========");
2170 out->print_cr("%18s %10d %10d %10d", "",
2171 gOmInUseCount, gMonitorFreeCount, gMonitorPopulation);
2172 popCount += gOmInUseCount + gMonitorFreeCount;
2173
2174 out->print_cr("%18s %10s %10s %10s",
2175 "Per-Thread Lists:", "InUse", "Free", "Provision");
2176 out->print_cr("================== ========== ========== ==========");
2177
2178 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2179 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt),
2180 jt->omInUseCount, jt->omFreeCount, jt->omFreeProvision);
2181 popCount += jt->omInUseCount + jt->omFreeCount;
2182 }
2183 return popCount;
2184}
2185
2186#ifndef PRODUCT
2187
2188// Check if monitor belongs to the monitor cache
2189// The list is grow-only so it's *relatively* safe to traverse
2190// the list of extant blocks without taking a lock.
2191
2192int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
2193 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
2194 while (block != NULL) {
2195 assert(block->object() == CHAINMARKER, "must be a block header");
2196 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
2197 address mon = (address)monitor;
2198 address blk = (address)block;
2199 size_t diff = mon - blk;
2200 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned");
2201 return 1;
2202 }
2203 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
2204 }
2205 return 0;
2206}
2207
2208#endif
2209