1/*
2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/vmSymbols.hpp"
27#include "jfr/jfrEvents.hpp"
28#include "jfr/support/jfrThreadId.hpp"
29#include "memory/allocation.inline.hpp"
30#include "memory/resourceArea.hpp"
31#include "oops/markOop.hpp"
32#include "oops/oop.inline.hpp"
33#include "runtime/atomic.hpp"
34#include "runtime/handles.inline.hpp"
35#include "runtime/interfaceSupport.inline.hpp"
36#include "runtime/mutexLocker.hpp"
37#include "runtime/objectMonitor.hpp"
38#include "runtime/objectMonitor.inline.hpp"
39#include "runtime/orderAccess.hpp"
40#include "runtime/osThread.hpp"
41#include "runtime/safepointMechanism.inline.hpp"
42#include "runtime/sharedRuntime.hpp"
43#include "runtime/stubRoutines.hpp"
44#include "runtime/thread.inline.hpp"
45#include "services/threadService.hpp"
46#include "utilities/dtrace.hpp"
47#include "utilities/macros.hpp"
48#include "utilities/preserveException.hpp"
49#if INCLUDE_JFR
50#include "jfr/support/jfrFlush.hpp"
51#endif
52
53#ifdef DTRACE_ENABLED
54
55// Only bother with this argument setup if dtrace is available
56// TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
57
58
59#define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
60 char* bytes = NULL; \
61 int len = 0; \
62 jlong jtid = SharedRuntime::get_java_tid(thread); \
63 Symbol* klassname = ((oop)obj)->klass()->name(); \
64 if (klassname != NULL) { \
65 bytes = (char*)klassname->bytes(); \
66 len = klassname->utf8_length(); \
67 }
68
69#define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
70 { \
71 if (DTraceMonitorProbes) { \
72 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
73 HOTSPOT_MONITOR_WAIT(jtid, \
74 (monitor), bytes, len, (millis)); \
75 } \
76 }
77
78#define HOTSPOT_MONITOR_contended__enter HOTSPOT_MONITOR_CONTENDED_ENTER
79#define HOTSPOT_MONITOR_contended__entered HOTSPOT_MONITOR_CONTENDED_ENTERED
80#define HOTSPOT_MONITOR_contended__exit HOTSPOT_MONITOR_CONTENDED_EXIT
81#define HOTSPOT_MONITOR_notify HOTSPOT_MONITOR_NOTIFY
82#define HOTSPOT_MONITOR_notifyAll HOTSPOT_MONITOR_NOTIFYALL
83
84#define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
85 { \
86 if (DTraceMonitorProbes) { \
87 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
88 HOTSPOT_MONITOR_##probe(jtid, \
89 (uintptr_t)(monitor), bytes, len); \
90 } \
91 }
92
93#else // ndef DTRACE_ENABLED
94
95#define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
96#define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
97
98#endif // ndef DTRACE_ENABLED
99
100// Tunables ...
101// The knob* variables are effectively final. Once set they should
102// never be modified hence. Consider using __read_mostly with GCC.
103
104int ObjectMonitor::Knob_SpinLimit = 5000; // derived by an external tool -
105
106static int Knob_Bonus = 100; // spin success bonus
107static int Knob_BonusB = 100; // spin success bonus
108static int Knob_Penalty = 200; // spin failure penalty
109static int Knob_Poverty = 1000;
110static int Knob_FixedSpin = 0;
111static int Knob_PreSpin = 10; // 20-100 likely better
112
113DEBUG_ONLY(static volatile bool InitDone = false;)
114
115// -----------------------------------------------------------------------------
116// Theory of operations -- Monitors lists, thread residency, etc:
117//
118// * A thread acquires ownership of a monitor by successfully
119// CAS()ing the _owner field from null to non-null.
120//
121// * Invariant: A thread appears on at most one monitor list --
122// cxq, EntryList or WaitSet -- at any one time.
123//
124// * Contending threads "push" themselves onto the cxq with CAS
125// and then spin/park.
126//
127// * After a contending thread eventually acquires the lock it must
128// dequeue itself from either the EntryList or the cxq.
129//
130// * The exiting thread identifies and unparks an "heir presumptive"
131// tentative successor thread on the EntryList. Critically, the
132// exiting thread doesn't unlink the successor thread from the EntryList.
133// After having been unparked, the wakee will recontend for ownership of
134// the monitor. The successor (wakee) will either acquire the lock or
135// re-park itself.
136//
137// Succession is provided for by a policy of competitive handoff.
138// The exiting thread does _not_ grant or pass ownership to the
139// successor thread. (This is also referred to as "handoff" succession").
140// Instead the exiting thread releases ownership and possibly wakes
141// a successor, so the successor can (re)compete for ownership of the lock.
142// If the EntryList is empty but the cxq is populated the exiting
143// thread will drain the cxq into the EntryList. It does so by
144// by detaching the cxq (installing null with CAS) and folding
145// the threads from the cxq into the EntryList. The EntryList is
146// doubly linked, while the cxq is singly linked because of the
147// CAS-based "push" used to enqueue recently arrived threads (RATs).
148//
149// * Concurrency invariants:
150//
151// -- only the monitor owner may access or mutate the EntryList.
152// The mutex property of the monitor itself protects the EntryList
153// from concurrent interference.
154// -- Only the monitor owner may detach the cxq.
155//
156// * The monitor entry list operations avoid locks, but strictly speaking
157// they're not lock-free. Enter is lock-free, exit is not.
158// For a description of 'Methods and apparatus providing non-blocking access
159// to a resource,' see U.S. Pat. No. 7844973.
160//
161// * The cxq can have multiple concurrent "pushers" but only one concurrent
162// detaching thread. This mechanism is immune from the ABA corruption.
163// More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
164//
165// * Taken together, the cxq and the EntryList constitute or form a
166// single logical queue of threads stalled trying to acquire the lock.
167// We use two distinct lists to improve the odds of a constant-time
168// dequeue operation after acquisition (in the ::enter() epilogue) and
169// to reduce heat on the list ends. (c.f. Michael Scott's "2Q" algorithm).
170// A key desideratum is to minimize queue & monitor metadata manipulation
171// that occurs while holding the monitor lock -- that is, we want to
172// minimize monitor lock holds times. Note that even a small amount of
173// fixed spinning will greatly reduce the # of enqueue-dequeue operations
174// on EntryList|cxq. That is, spinning relieves contention on the "inner"
175// locks and monitor metadata.
176//
177// Cxq points to the set of Recently Arrived Threads attempting entry.
178// Because we push threads onto _cxq with CAS, the RATs must take the form of
179// a singly-linked LIFO. We drain _cxq into EntryList at unlock-time when
180// the unlocking thread notices that EntryList is null but _cxq is != null.
181//
182// The EntryList is ordered by the prevailing queue discipline and
183// can be organized in any convenient fashion, such as a doubly-linked list or
184// a circular doubly-linked list. Critically, we want insert and delete operations
185// to operate in constant-time. If we need a priority queue then something akin
186// to Solaris' sleepq would work nicely. Viz.,
187// http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
188// Queue discipline is enforced at ::exit() time, when the unlocking thread
189// drains the cxq into the EntryList, and orders or reorders the threads on the
190// EntryList accordingly.
191//
192// Barring "lock barging", this mechanism provides fair cyclic ordering,
193// somewhat similar to an elevator-scan.
194//
195// * The monitor synchronization subsystem avoids the use of native
196// synchronization primitives except for the narrow platform-specific
197// park-unpark abstraction. See the comments in os_solaris.cpp regarding
198// the semantics of park-unpark. Put another way, this monitor implementation
199// depends only on atomic operations and park-unpark. The monitor subsystem
200// manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
201// underlying OS manages the READY<->RUN transitions.
202//
203// * Waiting threads reside on the WaitSet list -- wait() puts
204// the caller onto the WaitSet.
205//
206// * notify() or notifyAll() simply transfers threads from the WaitSet to
207// either the EntryList or cxq. Subsequent exit() operations will
208// unpark the notifyee. Unparking a notifee in notify() is inefficient -
209// it's likely the notifyee would simply impale itself on the lock held
210// by the notifier.
211//
212// * An interesting alternative is to encode cxq as (List,LockByte) where
213// the LockByte is 0 iff the monitor is owned. _owner is simply an auxiliary
214// variable, like _recursions, in the scheme. The threads or Events that form
215// the list would have to be aligned in 256-byte addresses. A thread would
216// try to acquire the lock or enqueue itself with CAS, but exiting threads
217// could use a 1-0 protocol and simply STB to set the LockByte to 0.
218// Note that is is *not* word-tearing, but it does presume that full-word
219// CAS operations are coherent with intermix with STB operations. That's true
220// on most common processors.
221//
222// * See also http://blogs.sun.com/dave
223
224
225void* ObjectMonitor::operator new (size_t size) throw() {
226 return AllocateHeap(size, mtInternal);
227}
228void* ObjectMonitor::operator new[] (size_t size) throw() {
229 return operator new (size);
230}
231void ObjectMonitor::operator delete(void* p) {
232 FreeHeap(p);
233}
234void ObjectMonitor::operator delete[] (void *p) {
235 operator delete(p);
236}
237
238// -----------------------------------------------------------------------------
239// Enter support
240
241void ObjectMonitor::enter(TRAPS) {
242 // The following code is ordered to check the most common cases first
243 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
244 Thread * const Self = THREAD;
245
246 void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
247 if (cur == NULL) {
248 assert(_recursions == 0, "invariant");
249 return;
250 }
251
252 if (cur == Self) {
253 // TODO-FIXME: check for integer overflow! BUGID 6557169.
254 _recursions++;
255 return;
256 }
257
258 if (Self->is_lock_owned ((address)cur)) {
259 assert(_recursions == 0, "internal state error");
260 _recursions = 1;
261 // Commute owner from a thread-specific on-stack BasicLockObject address to
262 // a full-fledged "Thread *".
263 _owner = Self;
264 return;
265 }
266
267 // We've encountered genuine contention.
268 assert(Self->_Stalled == 0, "invariant");
269 Self->_Stalled = intptr_t(this);
270
271 // Try one round of spinning *before* enqueueing Self
272 // and before going through the awkward and expensive state
273 // transitions. The following spin is strictly optional ...
274 // Note that if we acquire the monitor from an initial spin
275 // we forgo posting JVMTI events and firing DTRACE probes.
276 if (TrySpin(Self) > 0) {
277 assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
278 assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
279 _recursions);
280 assert(((oop)object())->mark() == markOopDesc::encode(this),
281 "object mark must match encoded this: mark=" INTPTR_FORMAT
282 ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
283 p2i(markOopDesc::encode(this)));
284 Self->_Stalled = 0;
285 return;
286 }
287
288 assert(_owner != Self, "invariant");
289 assert(_succ != Self, "invariant");
290 assert(Self->is_Java_thread(), "invariant");
291 JavaThread * jt = (JavaThread *) Self;
292 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
293 assert(jt->thread_state() != _thread_blocked, "invariant");
294 assert(this->object() != NULL, "invariant");
295 assert(_contentions >= 0, "invariant");
296
297 // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
298 // Ensure the object-monitor relationship remains stable while there's contention.
299 Atomic::inc(&_contentions);
300
301 JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
302 EventJavaMonitorEnter event;
303 if (event.should_commit()) {
304 event.set_monitorClass(((oop)this->object())->klass());
305 event.set_address((uintptr_t)(this->object_addr()));
306 }
307
308 { // Change java thread status to indicate blocked on monitor enter.
309 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
310
311 Self->set_current_pending_monitor(this);
312
313 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
314 if (JvmtiExport::should_post_monitor_contended_enter()) {
315 JvmtiExport::post_monitor_contended_enter(jt, this);
316
317 // The current thread does not yet own the monitor and does not
318 // yet appear on any queues that would get it made the successor.
319 // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
320 // handler cannot accidentally consume an unpark() meant for the
321 // ParkEvent associated with this ObjectMonitor.
322 }
323
324 OSThreadContendState osts(Self->osthread());
325 ThreadBlockInVM tbivm(jt);
326
327 // TODO-FIXME: change the following for(;;) loop to straight-line code.
328 for (;;) {
329 jt->set_suspend_equivalent();
330 // cleared by handle_special_suspend_equivalent_condition()
331 // or java_suspend_self()
332
333 EnterI(THREAD);
334
335 if (!ExitSuspendEquivalent(jt)) break;
336
337 // We have acquired the contended monitor, but while we were
338 // waiting another thread suspended us. We don't want to enter
339 // the monitor while suspended because that would surprise the
340 // thread that suspended us.
341 //
342 _recursions = 0;
343 _succ = NULL;
344 exit(false, Self);
345
346 jt->java_suspend_self();
347 }
348 Self->set_current_pending_monitor(NULL);
349
350 // We cleared the pending monitor info since we've just gotten past
351 // the enter-check-for-suspend dance and we now own the monitor free
352 // and clear, i.e., it is no longer pending. The ThreadBlockInVM
353 // destructor can go to a safepoint at the end of this block. If we
354 // do a thread dump during that safepoint, then this thread will show
355 // as having "-locked" the monitor, but the OS and java.lang.Thread
356 // states will still report that the thread is blocked trying to
357 // acquire it.
358 }
359
360 Atomic::dec(&_contentions);
361 assert(_contentions >= 0, "invariant");
362 Self->_Stalled = 0;
363
364 // Must either set _recursions = 0 or ASSERT _recursions == 0.
365 assert(_recursions == 0, "invariant");
366 assert(_owner == Self, "invariant");
367 assert(_succ != Self, "invariant");
368 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
369
370 // The thread -- now the owner -- is back in vm mode.
371 // Report the glorious news via TI,DTrace and jvmstat.
372 // The probe effect is non-trivial. All the reportage occurs
373 // while we hold the monitor, increasing the length of the critical
374 // section. Amdahl's parallel speedup law comes vividly into play.
375 //
376 // Another option might be to aggregate the events (thread local or
377 // per-monitor aggregation) and defer reporting until a more opportune
378 // time -- such as next time some thread encounters contention but has
379 // yet to acquire the lock. While spinning that thread could
380 // spinning we could increment JVMStat counters, etc.
381
382 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
383 if (JvmtiExport::should_post_monitor_contended_entered()) {
384 JvmtiExport::post_monitor_contended_entered(jt, this);
385
386 // The current thread already owns the monitor and is not going to
387 // call park() for the remainder of the monitor enter protocol. So
388 // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
389 // event handler consumed an unpark() issued by the thread that
390 // just exited the monitor.
391 }
392 if (event.should_commit()) {
393 event.set_previousOwner((uintptr_t)_previous_owner_tid);
394 event.commit();
395 }
396 OM_PERFDATA_OP(ContendedLockAttempts, inc());
397}
398
399// Caveat: TryLock() is not necessarily serializing if it returns failure.
400// Callers must compensate as needed.
401
402int ObjectMonitor::TryLock(Thread * Self) {
403 void * own = _owner;
404 if (own != NULL) return 0;
405 if (Atomic::replace_if_null(Self, &_owner)) {
406 assert(_recursions == 0, "invariant");
407 return 1;
408 }
409 // The lock had been free momentarily, but we lost the race to the lock.
410 // Interference -- the CAS failed.
411 // We can either return -1 or retry.
412 // Retry doesn't make as much sense because the lock was just acquired.
413 return -1;
414}
415
416// Convert the fields used by is_busy() to a string that can be
417// used for diagnostic output.
418const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
419 ss->print("is_busy: contentions=%d, waiters=%d, owner=" INTPTR_FORMAT
420 ", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, _contentions,
421 _waiters, p2i(_owner), p2i(_cxq), p2i(_EntryList));
422 return ss->base();
423}
424
425#define MAX_RECHECK_INTERVAL 1000
426
427void ObjectMonitor::EnterI(TRAPS) {
428 Thread * const Self = THREAD;
429 assert(Self->is_Java_thread(), "invariant");
430 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
431
432 // Try the lock - TATAS
433 if (TryLock (Self) > 0) {
434 assert(_succ != Self, "invariant");
435 assert(_owner == Self, "invariant");
436 assert(_Responsible != Self, "invariant");
437 return;
438 }
439
440 assert(InitDone, "Unexpectedly not initialized");
441
442 // We try one round of spinning *before* enqueueing Self.
443 //
444 // If the _owner is ready but OFFPROC we could use a YieldTo()
445 // operation to donate the remainder of this thread's quantum
446 // to the owner. This has subtle but beneficial affinity
447 // effects.
448
449 if (TrySpin(Self) > 0) {
450 assert(_owner == Self, "invariant");
451 assert(_succ != Self, "invariant");
452 assert(_Responsible != Self, "invariant");
453 return;
454 }
455
456 // The Spin failed -- Enqueue and park the thread ...
457 assert(_succ != Self, "invariant");
458 assert(_owner != Self, "invariant");
459 assert(_Responsible != Self, "invariant");
460
461 // Enqueue "Self" on ObjectMonitor's _cxq.
462 //
463 // Node acts as a proxy for Self.
464 // As an aside, if were to ever rewrite the synchronization code mostly
465 // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
466 // Java objects. This would avoid awkward lifecycle and liveness issues,
467 // as well as eliminate a subset of ABA issues.
468 // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
469
470 ObjectWaiter node(Self);
471 Self->_ParkEvent->reset();
472 node._prev = (ObjectWaiter *) 0xBAD;
473 node.TState = ObjectWaiter::TS_CXQ;
474
475 // Push "Self" onto the front of the _cxq.
476 // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
477 // Note that spinning tends to reduce the rate at which threads
478 // enqueue and dequeue on EntryList|cxq.
479 ObjectWaiter * nxt;
480 for (;;) {
481 node._next = nxt = _cxq;
482 if (Atomic::cmpxchg(&node, &_cxq, nxt) == nxt) break;
483
484 // Interference - the CAS failed because _cxq changed. Just retry.
485 // As an optional optimization we retry the lock.
486 if (TryLock (Self) > 0) {
487 assert(_succ != Self, "invariant");
488 assert(_owner == Self, "invariant");
489 assert(_Responsible != Self, "invariant");
490 return;
491 }
492 }
493
494 // Check for cxq|EntryList edge transition to non-null. This indicates
495 // the onset of contention. While contention persists exiting threads
496 // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit
497 // operations revert to the faster 1-0 mode. This enter operation may interleave
498 // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
499 // arrange for one of the contending thread to use a timed park() operations
500 // to detect and recover from the race. (Stranding is form of progress failure
501 // where the monitor is unlocked but all the contending threads remain parked).
502 // That is, at least one of the contended threads will periodically poll _owner.
503 // One of the contending threads will become the designated "Responsible" thread.
504 // The Responsible thread uses a timed park instead of a normal indefinite park
505 // operation -- it periodically wakes and checks for and recovers from potential
506 // strandings admitted by 1-0 exit operations. We need at most one Responsible
507 // thread per-monitor at any given moment. Only threads on cxq|EntryList may
508 // be responsible for a monitor.
509 //
510 // Currently, one of the contended threads takes on the added role of "Responsible".
511 // A viable alternative would be to use a dedicated "stranding checker" thread
512 // that periodically iterated over all the threads (or active monitors) and unparked
513 // successors where there was risk of stranding. This would help eliminate the
514 // timer scalability issues we see on some platforms as we'd only have one thread
515 // -- the checker -- parked on a timer.
516
517 if (nxt == NULL && _EntryList == NULL) {
518 // Try to assume the role of responsible thread for the monitor.
519 // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
520 Atomic::replace_if_null(Self, &_Responsible);
521 }
522
523 // The lock might have been released while this thread was occupied queueing
524 // itself onto _cxq. To close the race and avoid "stranding" and
525 // progress-liveness failure we must resample-retry _owner before parking.
526 // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
527 // In this case the ST-MEMBAR is accomplished with CAS().
528 //
529 // TODO: Defer all thread state transitions until park-time.
530 // Since state transitions are heavy and inefficient we'd like
531 // to defer the state transitions until absolutely necessary,
532 // and in doing so avoid some transitions ...
533
534 int nWakeups = 0;
535 int recheckInterval = 1;
536
537 for (;;) {
538
539 if (TryLock(Self) > 0) break;
540 assert(_owner != Self, "invariant");
541
542 // park self
543 if (_Responsible == Self) {
544 Self->_ParkEvent->park((jlong) recheckInterval);
545 // Increase the recheckInterval, but clamp the value.
546 recheckInterval *= 8;
547 if (recheckInterval > MAX_RECHECK_INTERVAL) {
548 recheckInterval = MAX_RECHECK_INTERVAL;
549 }
550 } else {
551 Self->_ParkEvent->park();
552 }
553
554 if (TryLock(Self) > 0) break;
555
556 // The lock is still contested.
557 // Keep a tally of the # of futile wakeups.
558 // Note that the counter is not protected by a lock or updated by atomics.
559 // That is by design - we trade "lossy" counters which are exposed to
560 // races during updates for a lower probe effect.
561
562 // This PerfData object can be used in parallel with a safepoint.
563 // See the work around in PerfDataManager::destroy().
564 OM_PERFDATA_OP(FutileWakeups, inc());
565 ++nWakeups;
566
567 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
568 // We can defer clearing _succ until after the spin completes
569 // TrySpin() must tolerate being called with _succ == Self.
570 // Try yet another round of adaptive spinning.
571 if (TrySpin(Self) > 0) break;
572
573 // We can find that we were unpark()ed and redesignated _succ while
574 // we were spinning. That's harmless. If we iterate and call park(),
575 // park() will consume the event and return immediately and we'll
576 // just spin again. This pattern can repeat, leaving _succ to simply
577 // spin on a CPU.
578
579 if (_succ == Self) _succ = NULL;
580
581 // Invariant: after clearing _succ a thread *must* retry _owner before parking.
582 OrderAccess::fence();
583 }
584
585 // Egress :
586 // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
587 // Normally we'll find Self on the EntryList .
588 // From the perspective of the lock owner (this thread), the
589 // EntryList is stable and cxq is prepend-only.
590 // The head of cxq is volatile but the interior is stable.
591 // In addition, Self.TState is stable.
592
593 assert(_owner == Self, "invariant");
594 assert(object() != NULL, "invariant");
595 // I'd like to write:
596 // guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
597 // but as we're at a safepoint that's not safe.
598
599 UnlinkAfterAcquire(Self, &node);
600 if (_succ == Self) _succ = NULL;
601
602 assert(_succ != Self, "invariant");
603 if (_Responsible == Self) {
604 _Responsible = NULL;
605 OrderAccess::fence(); // Dekker pivot-point
606
607 // We may leave threads on cxq|EntryList without a designated
608 // "Responsible" thread. This is benign. When this thread subsequently
609 // exits the monitor it can "see" such preexisting "old" threads --
610 // threads that arrived on the cxq|EntryList before the fence, above --
611 // by LDing cxq|EntryList. Newly arrived threads -- that is, threads
612 // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
613 // non-null and elect a new "Responsible" timer thread.
614 //
615 // This thread executes:
616 // ST Responsible=null; MEMBAR (in enter epilogue - here)
617 // LD cxq|EntryList (in subsequent exit)
618 //
619 // Entering threads in the slow/contended path execute:
620 // ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
621 // The (ST cxq; MEMBAR) is accomplished with CAS().
622 //
623 // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
624 // exit operation from floating above the ST Responsible=null.
625 }
626
627 // We've acquired ownership with CAS().
628 // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
629 // But since the CAS() this thread may have also stored into _succ,
630 // EntryList, cxq or Responsible. These meta-data updates must be
631 // visible __before this thread subsequently drops the lock.
632 // Consider what could occur if we didn't enforce this constraint --
633 // STs to monitor meta-data and user-data could reorder with (become
634 // visible after) the ST in exit that drops ownership of the lock.
635 // Some other thread could then acquire the lock, but observe inconsistent
636 // or old monitor meta-data and heap data. That violates the JMM.
637 // To that end, the 1-0 exit() operation must have at least STST|LDST
638 // "release" barrier semantics. Specifically, there must be at least a
639 // STST|LDST barrier in exit() before the ST of null into _owner that drops
640 // the lock. The barrier ensures that changes to monitor meta-data and data
641 // protected by the lock will be visible before we release the lock, and
642 // therefore before some other thread (CPU) has a chance to acquire the lock.
643 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
644 //
645 // Critically, any prior STs to _succ or EntryList must be visible before
646 // the ST of null into _owner in the *subsequent* (following) corresponding
647 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
648 // execute a serializing instruction.
649
650 return;
651}
652
653// ReenterI() is a specialized inline form of the latter half of the
654// contended slow-path from EnterI(). We use ReenterI() only for
655// monitor reentry in wait().
656//
657// In the future we should reconcile EnterI() and ReenterI().
658
659void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
660 assert(Self != NULL, "invariant");
661 assert(SelfNode != NULL, "invariant");
662 assert(SelfNode->_thread == Self, "invariant");
663 assert(_waiters > 0, "invariant");
664 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
665 assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
666 JavaThread * jt = (JavaThread *) Self;
667
668 int nWakeups = 0;
669 for (;;) {
670 ObjectWaiter::TStates v = SelfNode->TState;
671 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
672 assert(_owner != Self, "invariant");
673
674 if (TryLock(Self) > 0) break;
675 if (TrySpin(Self) > 0) break;
676
677 // State transition wrappers around park() ...
678 // ReenterI() wisely defers state transitions until
679 // it's clear we must park the thread.
680 {
681 OSThreadContendState osts(Self->osthread());
682 ThreadBlockInVM tbivm(jt);
683
684 // cleared by handle_special_suspend_equivalent_condition()
685 // or java_suspend_self()
686 jt->set_suspend_equivalent();
687 Self->_ParkEvent->park();
688
689 // were we externally suspended while we were waiting?
690 for (;;) {
691 if (!ExitSuspendEquivalent(jt)) break;
692 if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
693 jt->java_suspend_self();
694 jt->set_suspend_equivalent();
695 }
696 }
697
698 // Try again, but just so we distinguish between futile wakeups and
699 // successful wakeups. The following test isn't algorithmically
700 // necessary, but it helps us maintain sensible statistics.
701 if (TryLock(Self) > 0) break;
702
703 // The lock is still contested.
704 // Keep a tally of the # of futile wakeups.
705 // Note that the counter is not protected by a lock or updated by atomics.
706 // That is by design - we trade "lossy" counters which are exposed to
707 // races during updates for a lower probe effect.
708 ++nWakeups;
709
710 // Assuming this is not a spurious wakeup we'll normally
711 // find that _succ == Self.
712 if (_succ == Self) _succ = NULL;
713
714 // Invariant: after clearing _succ a contending thread
715 // *must* retry _owner before parking.
716 OrderAccess::fence();
717
718 // This PerfData object can be used in parallel with a safepoint.
719 // See the work around in PerfDataManager::destroy().
720 OM_PERFDATA_OP(FutileWakeups, inc());
721 }
722
723 // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
724 // Normally we'll find Self on the EntryList.
725 // Unlinking from the EntryList is constant-time and atomic-free.
726 // From the perspective of the lock owner (this thread), the
727 // EntryList is stable and cxq is prepend-only.
728 // The head of cxq is volatile but the interior is stable.
729 // In addition, Self.TState is stable.
730
731 assert(_owner == Self, "invariant");
732 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
733 UnlinkAfterAcquire(Self, SelfNode);
734 if (_succ == Self) _succ = NULL;
735 assert(_succ != Self, "invariant");
736 SelfNode->TState = ObjectWaiter::TS_RUN;
737 OrderAccess::fence(); // see comments at the end of EnterI()
738}
739
740// By convention we unlink a contending thread from EntryList|cxq immediately
741// after the thread acquires the lock in ::enter(). Equally, we could defer
742// unlinking the thread until ::exit()-time.
743
744void ObjectMonitor::UnlinkAfterAcquire(Thread *Self, ObjectWaiter *SelfNode) {
745 assert(_owner == Self, "invariant");
746 assert(SelfNode->_thread == Self, "invariant");
747
748 if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
749 // Normal case: remove Self from the DLL EntryList .
750 // This is a constant-time operation.
751 ObjectWaiter * nxt = SelfNode->_next;
752 ObjectWaiter * prv = SelfNode->_prev;
753 if (nxt != NULL) nxt->_prev = prv;
754 if (prv != NULL) prv->_next = nxt;
755 if (SelfNode == _EntryList) _EntryList = nxt;
756 assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
757 assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
758 } else {
759 assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
760 // Inopportune interleaving -- Self is still on the cxq.
761 // This usually means the enqueue of self raced an exiting thread.
762 // Normally we'll find Self near the front of the cxq, so
763 // dequeueing is typically fast. If needbe we can accelerate
764 // this with some MCS/CHL-like bidirectional list hints and advisory
765 // back-links so dequeueing from the interior will normally operate
766 // in constant-time.
767 // Dequeue Self from either the head (with CAS) or from the interior
768 // with a linear-time scan and normal non-atomic memory operations.
769 // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
770 // and then unlink Self from EntryList. We have to drain eventually,
771 // so it might as well be now.
772
773 ObjectWaiter * v = _cxq;
774 assert(v != NULL, "invariant");
775 if (v != SelfNode || Atomic::cmpxchg(SelfNode->_next, &_cxq, v) != v) {
776 // The CAS above can fail from interference IFF a "RAT" arrived.
777 // In that case Self must be in the interior and can no longer be
778 // at the head of cxq.
779 if (v == SelfNode) {
780 assert(_cxq != v, "invariant");
781 v = _cxq; // CAS above failed - start scan at head of list
782 }
783 ObjectWaiter * p;
784 ObjectWaiter * q = NULL;
785 for (p = v; p != NULL && p != SelfNode; p = p->_next) {
786 q = p;
787 assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
788 }
789 assert(v != SelfNode, "invariant");
790 assert(p == SelfNode, "Node not found on cxq");
791 assert(p != _cxq, "invariant");
792 assert(q != NULL, "invariant");
793 assert(q->_next == p, "invariant");
794 q->_next = p->_next;
795 }
796 }
797
798#ifdef ASSERT
799 // Diagnostic hygiene ...
800 SelfNode->_prev = (ObjectWaiter *) 0xBAD;
801 SelfNode->_next = (ObjectWaiter *) 0xBAD;
802 SelfNode->TState = ObjectWaiter::TS_RUN;
803#endif
804}
805
806// -----------------------------------------------------------------------------
807// Exit support
808//
809// exit()
810// ~~~~~~
811// Note that the collector can't reclaim the objectMonitor or deflate
812// the object out from underneath the thread calling ::exit() as the
813// thread calling ::exit() never transitions to a stable state.
814// This inhibits GC, which in turn inhibits asynchronous (and
815// inopportune) reclamation of "this".
816//
817// We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
818// There's one exception to the claim above, however. EnterI() can call
819// exit() to drop a lock if the acquirer has been externally suspended.
820// In that case exit() is called with _thread_state as _thread_blocked,
821// but the monitor's _contentions field is > 0, which inhibits reclamation.
822//
823// 1-0 exit
824// ~~~~~~~~
825// ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
826// the fast-path operators have been optimized so the common ::exit()
827// operation is 1-0, e.g., see macroAssembler_x86.cpp: fast_unlock().
828// The code emitted by fast_unlock() elides the usual MEMBAR. This
829// greatly improves latency -- MEMBAR and CAS having considerable local
830// latency on modern processors -- but at the cost of "stranding". Absent the
831// MEMBAR, a thread in fast_unlock() can race a thread in the slow
832// ::enter() path, resulting in the entering thread being stranding
833// and a progress-liveness failure. Stranding is extremely rare.
834// We use timers (timed park operations) & periodic polling to detect
835// and recover from stranding. Potentially stranded threads periodically
836// wake up and poll the lock. See the usage of the _Responsible variable.
837//
838// The CAS() in enter provides for safety and exclusion, while the CAS or
839// MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
840// eliminates the CAS/MEMBAR from the exit path, but it admits stranding.
841// We detect and recover from stranding with timers.
842//
843// If a thread transiently strands it'll park until (a) another
844// thread acquires the lock and then drops the lock, at which time the
845// exiting thread will notice and unpark the stranded thread, or, (b)
846// the timer expires. If the lock is high traffic then the stranding latency
847// will be low due to (a). If the lock is low traffic then the odds of
848// stranding are lower, although the worst-case stranding latency
849// is longer. Critically, we don't want to put excessive load in the
850// platform's timer subsystem. We want to minimize both the timer injection
851// rate (timers created/sec) as well as the number of timers active at
852// any one time. (more precisely, we want to minimize timer-seconds, which is
853// the integral of the # of active timers at any instant over time).
854// Both impinge on OS scalability. Given that, at most one thread parked on
855// a monitor will use a timer.
856//
857// There is also the risk of a futile wake-up. If we drop the lock
858// another thread can reacquire the lock immediately, and we can
859// then wake a thread unnecessarily. This is benign, and we've
860// structured the code so the windows are short and the frequency
861// of such futile wakups is low.
862
863void ObjectMonitor::exit(bool not_suspended, TRAPS) {
864 Thread * const Self = THREAD;
865 if (THREAD != _owner) {
866 if (THREAD->is_lock_owned((address) _owner)) {
867 // Transmute _owner from a BasicLock pointer to a Thread address.
868 // We don't need to hold _mutex for this transition.
869 // Non-null to Non-null is safe as long as all readers can
870 // tolerate either flavor.
871 assert(_recursions == 0, "invariant");
872 _owner = THREAD;
873 _recursions = 0;
874 } else {
875 // Apparent unbalanced locking ...
876 // Naively we'd like to throw IllegalMonitorStateException.
877 // As a practical matter we can neither allocate nor throw an
878 // exception as ::exit() can be called from leaf routines.
879 // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
880 // Upon deeper reflection, however, in a properly run JVM the only
881 // way we should encounter this situation is in the presence of
882 // unbalanced JNI locking. TODO: CheckJNICalls.
883 // See also: CR4414101
884 assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
885 return;
886 }
887 }
888
889 if (_recursions != 0) {
890 _recursions--; // this is simple recursive enter
891 return;
892 }
893
894 // Invariant: after setting Responsible=null an thread must execute
895 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
896 _Responsible = NULL;
897
898#if INCLUDE_JFR
899 // get the owner's thread id for the MonitorEnter event
900 // if it is enabled and the thread isn't suspended
901 if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
902 _previous_owner_tid = JFR_THREAD_ID(Self);
903 }
904#endif
905
906 for (;;) {
907 assert(THREAD == _owner, "invariant");
908
909 // release semantics: prior loads and stores from within the critical section
910 // must not float (reorder) past the following store that drops the lock.
911 // On SPARC that requires MEMBAR #loadstore|#storestore.
912 // But of course in TSO #loadstore|#storestore is not required.
913 OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock
914 OrderAccess::storeload(); // See if we need to wake a successor
915 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
916 return;
917 }
918 // Other threads are blocked trying to acquire the lock.
919
920 // Normally the exiting thread is responsible for ensuring succession,
921 // but if other successors are ready or other entering threads are spinning
922 // then this thread can simply store NULL into _owner and exit without
923 // waking a successor. The existence of spinners or ready successors
924 // guarantees proper succession (liveness). Responsibility passes to the
925 // ready or running successors. The exiting thread delegates the duty.
926 // More precisely, if a successor already exists this thread is absolved
927 // of the responsibility of waking (unparking) one.
928 //
929 // The _succ variable is critical to reducing futile wakeup frequency.
930 // _succ identifies the "heir presumptive" thread that has been made
931 // ready (unparked) but that has not yet run. We need only one such
932 // successor thread to guarantee progress.
933 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
934 // section 3.3 "Futile Wakeup Throttling" for details.
935 //
936 // Note that spinners in Enter() also set _succ non-null.
937 // In the current implementation spinners opportunistically set
938 // _succ so that exiting threads might avoid waking a successor.
939 // Another less appealing alternative would be for the exiting thread
940 // to drop the lock and then spin briefly to see if a spinner managed
941 // to acquire the lock. If so, the exiting thread could exit
942 // immediately without waking a successor, otherwise the exiting
943 // thread would need to dequeue and wake a successor.
944 // (Note that we'd need to make the post-drop spin short, but no
945 // shorter than the worst-case round-trip cache-line migration time.
946 // The dropped lock needs to become visible to the spinner, and then
947 // the acquisition of the lock by the spinner must become visible to
948 // the exiting thread).
949
950 // It appears that an heir-presumptive (successor) must be made ready.
951 // Only the current lock owner can manipulate the EntryList or
952 // drain _cxq, so we need to reacquire the lock. If we fail
953 // to reacquire the lock the responsibility for ensuring succession
954 // falls to the new owner.
955 //
956 if (!Atomic::replace_if_null(THREAD, &_owner)) {
957 return;
958 }
959
960 guarantee(_owner == THREAD, "invariant");
961
962 ObjectWaiter * w = NULL;
963
964 w = _EntryList;
965 if (w != NULL) {
966 // I'd like to write: guarantee (w->_thread != Self).
967 // But in practice an exiting thread may find itself on the EntryList.
968 // Let's say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
969 // then calls exit(). Exit release the lock by setting O._owner to NULL.
970 // Let's say T1 then stalls. T2 acquires O and calls O.notify(). The
971 // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
972 // release the lock "O". T2 resumes immediately after the ST of null into
973 // _owner, above. T2 notices that the EntryList is populated, so it
974 // reacquires the lock and then finds itself on the EntryList.
975 // Given all that, we have to tolerate the circumstance where "w" is
976 // associated with Self.
977 assert(w->TState == ObjectWaiter::TS_ENTER, "invariant");
978 ExitEpilog(Self, w);
979 return;
980 }
981
982 // If we find that both _cxq and EntryList are null then just
983 // re-run the exit protocol from the top.
984 w = _cxq;
985 if (w == NULL) continue;
986
987 // Drain _cxq into EntryList - bulk transfer.
988 // First, detach _cxq.
989 // The following loop is tantamount to: w = swap(&cxq, NULL)
990 for (;;) {
991 assert(w != NULL, "Invariant");
992 ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w);
993 if (u == w) break;
994 w = u;
995 }
996
997 assert(w != NULL, "invariant");
998 assert(_EntryList == NULL, "invariant");
999
1000 // Convert the LIFO SLL anchored by _cxq into a DLL.
1001 // The list reorganization step operates in O(LENGTH(w)) time.
1002 // It's critical that this step operate quickly as
1003 // "Self" still holds the outer-lock, restricting parallelism
1004 // and effectively lengthening the critical section.
1005 // Invariant: s chases t chases u.
1006 // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
1007 // we have faster access to the tail.
1008
1009 _EntryList = w;
1010 ObjectWaiter * q = NULL;
1011 ObjectWaiter * p;
1012 for (p = w; p != NULL; p = p->_next) {
1013 guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
1014 p->TState = ObjectWaiter::TS_ENTER;
1015 p->_prev = q;
1016 q = p;
1017 }
1018
1019 // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
1020 // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
1021
1022 // See if we can abdicate to a spinner instead of waking a thread.
1023 // A primary goal of the implementation is to reduce the
1024 // context-switch rate.
1025 if (_succ != NULL) continue;
1026
1027 w = _EntryList;
1028 if (w != NULL) {
1029 guarantee(w->TState == ObjectWaiter::TS_ENTER, "invariant");
1030 ExitEpilog(Self, w);
1031 return;
1032 }
1033 }
1034}
1035
1036// ExitSuspendEquivalent:
1037// A faster alternate to handle_special_suspend_equivalent_condition()
1038//
1039// handle_special_suspend_equivalent_condition() unconditionally
1040// acquires the SR_lock. On some platforms uncontended MutexLocker()
1041// operations have high latency. Note that in ::enter() we call HSSEC
1042// while holding the monitor, so we effectively lengthen the critical sections.
1043//
1044// There are a number of possible solutions:
1045//
1046// A. To ameliorate the problem we might also defer state transitions
1047// to as late as possible -- just prior to parking.
1048// Given that, we'd call HSSEC after having returned from park(),
1049// but before attempting to acquire the monitor. This is only a
1050// partial solution. It avoids calling HSSEC while holding the
1051// monitor (good), but it still increases successor reacquisition latency --
1052// the interval between unparking a successor and the time the successor
1053// resumes and retries the lock. See ReenterI(), which defers state transitions.
1054// If we use this technique we can also avoid EnterI()-exit() loop
1055// in ::enter() where we iteratively drop the lock and then attempt
1056// to reacquire it after suspending.
1057//
1058// B. In the future we might fold all the suspend bits into a
1059// composite per-thread suspend flag and then update it with CAS().
1060// Alternately, a Dekker-like mechanism with multiple variables
1061// would suffice:
1062// ST Self->_suspend_equivalent = false
1063// MEMBAR
1064// LD Self_>_suspend_flags
1065
1066bool ObjectMonitor::ExitSuspendEquivalent(JavaThread * jSelf) {
1067 return jSelf->handle_special_suspend_equivalent_condition();
1068}
1069
1070
1071void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1072 assert(_owner == Self, "invariant");
1073
1074 // Exit protocol:
1075 // 1. ST _succ = wakee
1076 // 2. membar #loadstore|#storestore;
1077 // 2. ST _owner = NULL
1078 // 3. unpark(wakee)
1079
1080 _succ = Wakee->_thread;
1081 ParkEvent * Trigger = Wakee->_event;
1082
1083 // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1084 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1085 // out-of-scope (non-extant).
1086 Wakee = NULL;
1087
1088 // Drop the lock
1089 OrderAccess::release_store(&_owner, (void*)NULL);
1090 OrderAccess::fence(); // ST _owner vs LD in unpark()
1091
1092 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1093 Trigger->unpark();
1094
1095 // Maintain stats and report events to JVMTI
1096 OM_PERFDATA_OP(Parks, inc());
1097}
1098
1099
1100// -----------------------------------------------------------------------------
1101// Class Loader deadlock handling.
1102//
1103// complete_exit exits a lock returning recursion count
1104// complete_exit/reenter operate as a wait without waiting
1105// complete_exit requires an inflated monitor
1106// The _owner field is not always the Thread addr even with an
1107// inflated monitor, e.g. the monitor can be inflated by a non-owning
1108// thread due to contention.
1109intptr_t ObjectMonitor::complete_exit(TRAPS) {
1110 Thread * const Self = THREAD;
1111 assert(Self->is_Java_thread(), "Must be Java thread!");
1112 JavaThread *jt = (JavaThread *)THREAD;
1113
1114 assert(InitDone, "Unexpectedly not initialized");
1115
1116 if (THREAD != _owner) {
1117 if (THREAD->is_lock_owned ((address)_owner)) {
1118 assert(_recursions == 0, "internal state error");
1119 _owner = THREAD; // Convert from basiclock addr to Thread addr
1120 _recursions = 0;
1121 }
1122 }
1123
1124 guarantee(Self == _owner, "complete_exit not owner");
1125 intptr_t save = _recursions; // record the old recursion count
1126 _recursions = 0; // set the recursion level to be 0
1127 exit(true, Self); // exit the monitor
1128 guarantee(_owner != Self, "invariant");
1129 return save;
1130}
1131
1132// reenter() enters a lock and sets recursion count
1133// complete_exit/reenter operate as a wait without waiting
1134void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1135 Thread * const Self = THREAD;
1136 assert(Self->is_Java_thread(), "Must be Java thread!");
1137 JavaThread *jt = (JavaThread *)THREAD;
1138
1139 guarantee(_owner != Self, "reenter already owner");
1140 enter(THREAD); // enter the monitor
1141 guarantee(_recursions == 0, "reenter recursion");
1142 _recursions = recursions;
1143 return;
1144}
1145
1146
1147// -----------------------------------------------------------------------------
1148// A macro is used below because there may already be a pending
1149// exception which should not abort the execution of the routines
1150// which use this (which is why we don't put this into check_slow and
1151// call it with a CHECK argument).
1152
1153#define CHECK_OWNER() \
1154 do { \
1155 if (THREAD != _owner) { \
1156 if (THREAD->is_lock_owned((address) _owner)) { \
1157 _owner = THREAD; /* Convert from basiclock addr to Thread addr */ \
1158 _recursions = 0; \
1159 } else { \
1160 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
1161 } \
1162 } \
1163 } while (false)
1164
1165// check_slow() is a misnomer. It's called to simply to throw an IMSX exception.
1166// TODO-FIXME: remove check_slow() -- it's likely dead.
1167
1168void ObjectMonitor::check_slow(TRAPS) {
1169 assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
1170 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
1171}
1172
1173static void post_monitor_wait_event(EventJavaMonitorWait* event,
1174 ObjectMonitor* monitor,
1175 jlong notifier_tid,
1176 jlong timeout,
1177 bool timedout) {
1178 assert(event != NULL, "invariant");
1179 assert(monitor != NULL, "invariant");
1180 event->set_monitorClass(((oop)monitor->object())->klass());
1181 event->set_timeout(timeout);
1182 event->set_address((uintptr_t)monitor->object_addr());
1183 event->set_notifier(notifier_tid);
1184 event->set_timedOut(timedout);
1185 event->commit();
1186}
1187
1188// -----------------------------------------------------------------------------
1189// Wait/Notify/NotifyAll
1190//
1191// Note: a subset of changes to ObjectMonitor::wait()
1192// will need to be replicated in complete_exit
1193void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1194 Thread * const Self = THREAD;
1195 assert(Self->is_Java_thread(), "Must be Java thread!");
1196 JavaThread *jt = (JavaThread *)THREAD;
1197
1198 assert(InitDone, "Unexpectedly not initialized");
1199
1200 // Throw IMSX or IEX.
1201 CHECK_OWNER();
1202
1203 EventJavaMonitorWait event;
1204
1205 // check for a pending interrupt
1206 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1207 // post monitor waited event. Note that this is past-tense, we are done waiting.
1208 if (JvmtiExport::should_post_monitor_waited()) {
1209 // Note: 'false' parameter is passed here because the
1210 // wait was not timed out due to thread interrupt.
1211 JvmtiExport::post_monitor_waited(jt, this, false);
1212
1213 // In this short circuit of the monitor wait protocol, the
1214 // current thread never drops ownership of the monitor and
1215 // never gets added to the wait queue so the current thread
1216 // cannot be made the successor. This means that the
1217 // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
1218 // consume an unpark() meant for the ParkEvent associated with
1219 // this ObjectMonitor.
1220 }
1221 if (event.should_commit()) {
1222 post_monitor_wait_event(&event, this, 0, millis, false);
1223 }
1224 THROW(vmSymbols::java_lang_InterruptedException());
1225 return;
1226 }
1227
1228 assert(Self->_Stalled == 0, "invariant");
1229 Self->_Stalled = intptr_t(this);
1230 jt->set_current_waiting_monitor(this);
1231
1232 // create a node to be put into the queue
1233 // Critically, after we reset() the event but prior to park(), we must check
1234 // for a pending interrupt.
1235 ObjectWaiter node(Self);
1236 node.TState = ObjectWaiter::TS_WAIT;
1237 Self->_ParkEvent->reset();
1238 OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag
1239
1240 // Enter the waiting queue, which is a circular doubly linked list in this case
1241 // but it could be a priority queue or any data structure.
1242 // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
1243 // by the the owner of the monitor *except* in the case where park()
1244 // returns because of a timeout of interrupt. Contention is exceptionally rare
1245 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
1246
1247 Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
1248 AddWaiter(&node);
1249 Thread::SpinRelease(&_WaitSetLock);
1250
1251 _Responsible = NULL;
1252
1253 intptr_t save = _recursions; // record the old recursion count
1254 _waiters++; // increment the number of waiters
1255 _recursions = 0; // set the recursion level to be 1
1256 exit(true, Self); // exit the monitor
1257 guarantee(_owner != Self, "invariant");
1258
1259 // The thread is on the WaitSet list - now park() it.
1260 // On MP systems it's conceivable that a brief spin before we park
1261 // could be profitable.
1262 //
1263 // TODO-FIXME: change the following logic to a loop of the form
1264 // while (!timeout && !interrupted && _notified == 0) park()
1265
1266 int ret = OS_OK;
1267 int WasNotified = 0;
1268 { // State transition wrappers
1269 OSThread* osthread = Self->osthread();
1270 OSThreadWaitState osts(osthread, true);
1271 {
1272 ThreadBlockInVM tbivm(jt);
1273 // Thread is in thread_blocked state and oop access is unsafe.
1274 jt->set_suspend_equivalent();
1275
1276 if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
1277 // Intentionally empty
1278 } else if (node._notified == 0) {
1279 if (millis <= 0) {
1280 Self->_ParkEvent->park();
1281 } else {
1282 ret = Self->_ParkEvent->park(millis);
1283 }
1284 }
1285
1286 // were we externally suspended while we were waiting?
1287 if (ExitSuspendEquivalent (jt)) {
1288 // TODO-FIXME: add -- if succ == Self then succ = null.
1289 jt->java_suspend_self();
1290 }
1291
1292 } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
1293
1294 // Node may be on the WaitSet, the EntryList (or cxq), or in transition
1295 // from the WaitSet to the EntryList.
1296 // See if we need to remove Node from the WaitSet.
1297 // We use double-checked locking to avoid grabbing _WaitSetLock
1298 // if the thread is not on the wait queue.
1299 //
1300 // Note that we don't need a fence before the fetch of TState.
1301 // In the worst case we'll fetch a old-stale value of TS_WAIT previously
1302 // written by the is thread. (perhaps the fetch might even be satisfied
1303 // by a look-aside into the processor's own store buffer, although given
1304 // the length of the code path between the prior ST and this load that's
1305 // highly unlikely). If the following LD fetches a stale TS_WAIT value
1306 // then we'll acquire the lock and then re-fetch a fresh TState value.
1307 // That is, we fail toward safety.
1308
1309 if (node.TState == ObjectWaiter::TS_WAIT) {
1310 Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
1311 if (node.TState == ObjectWaiter::TS_WAIT) {
1312 DequeueSpecificWaiter(&node); // unlink from WaitSet
1313 assert(node._notified == 0, "invariant");
1314 node.TState = ObjectWaiter::TS_RUN;
1315 }
1316 Thread::SpinRelease(&_WaitSetLock);
1317 }
1318
1319 // The thread is now either on off-list (TS_RUN),
1320 // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
1321 // The Node's TState variable is stable from the perspective of this thread.
1322 // No other threads will asynchronously modify TState.
1323 guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
1324 OrderAccess::loadload();
1325 if (_succ == Self) _succ = NULL;
1326 WasNotified = node._notified;
1327
1328 // Reentry phase -- reacquire the monitor.
1329 // re-enter contended monitor after object.wait().
1330 // retain OBJECT_WAIT state until re-enter successfully completes
1331 // Thread state is thread_in_vm and oop access is again safe,
1332 // although the raw address of the object may have changed.
1333 // (Don't cache naked oops over safepoints, of course).
1334
1335 // post monitor waited event. Note that this is past-tense, we are done waiting.
1336 if (JvmtiExport::should_post_monitor_waited()) {
1337 JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
1338
1339 if (node._notified != 0 && _succ == Self) {
1340 // In this part of the monitor wait-notify-reenter protocol it
1341 // is possible (and normal) for another thread to do a fastpath
1342 // monitor enter-exit while this thread is still trying to get
1343 // to the reenter portion of the protocol.
1344 //
1345 // The ObjectMonitor was notified and the current thread is
1346 // the successor which also means that an unpark() has already
1347 // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
1348 // consume the unpark() that was done when the successor was
1349 // set because the same ParkEvent is shared between Java
1350 // monitors and JVM/TI RawMonitors (for now).
1351 //
1352 // We redo the unpark() to ensure forward progress, i.e., we
1353 // don't want all pending threads hanging (parked) with none
1354 // entering the unlocked monitor.
1355 node._event->unpark();
1356 }
1357 }
1358
1359 if (event.should_commit()) {
1360 post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1361 }
1362
1363 OrderAccess::fence();
1364
1365 assert(Self->_Stalled != 0, "invariant");
1366 Self->_Stalled = 0;
1367
1368 assert(_owner != Self, "invariant");
1369 ObjectWaiter::TStates v = node.TState;
1370 if (v == ObjectWaiter::TS_RUN) {
1371 enter(Self);
1372 } else {
1373 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1374 ReenterI(Self, &node);
1375 node.wait_reenter_end(this);
1376 }
1377
1378 // Self has reacquired the lock.
1379 // Lifecycle - the node representing Self must not appear on any queues.
1380 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1381 // want residual elements associated with this thread left on any lists.
1382 guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1383 assert(_owner == Self, "invariant");
1384 assert(_succ != Self, "invariant");
1385 } // OSThreadWaitState()
1386
1387 jt->set_current_waiting_monitor(NULL);
1388
1389 guarantee(_recursions == 0, "invariant");
1390 _recursions = save; // restore the old recursion count
1391 _waiters--; // decrement the number of waiters
1392
1393 // Verify a few postconditions
1394 assert(_owner == Self, "invariant");
1395 assert(_succ != Self, "invariant");
1396 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
1397
1398 // check if the notification happened
1399 if (!WasNotified) {
1400 // no, it could be timeout or Thread.interrupt() or both
1401 // check for interrupt event, otherwise it is timeout
1402 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
1403 THROW(vmSymbols::java_lang_InterruptedException());
1404 }
1405 }
1406
1407 // NOTE: Spurious wake up will be consider as timeout.
1408 // Monitor notify has precedence over thread interrupt.
1409}
1410
1411
1412// Consider:
1413// If the lock is cool (cxq == null && succ == null) and we're on an MP system
1414// then instead of transferring a thread from the WaitSet to the EntryList
1415// we might just dequeue a thread from the WaitSet and directly unpark() it.
1416
1417void ObjectMonitor::INotify(Thread * Self) {
1418 Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
1419 ObjectWaiter * iterator = DequeueWaiter();
1420 if (iterator != NULL) {
1421 guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
1422 guarantee(iterator->_notified == 0, "invariant");
1423 // Disposition - what might we do with iterator ?
1424 // a. add it directly to the EntryList - either tail (policy == 1)
1425 // or head (policy == 0).
1426 // b. push it onto the front of the _cxq (policy == 2).
1427 // For now we use (b).
1428
1429 iterator->TState = ObjectWaiter::TS_ENTER;
1430
1431 iterator->_notified = 1;
1432 iterator->_notifier_tid = JFR_THREAD_ID(Self);
1433
1434 ObjectWaiter * list = _EntryList;
1435 if (list != NULL) {
1436 assert(list->_prev == NULL, "invariant");
1437 assert(list->TState == ObjectWaiter::TS_ENTER, "invariant");
1438 assert(list != iterator, "invariant");
1439 }
1440
1441 // prepend to cxq
1442 if (list == NULL) {
1443 iterator->_next = iterator->_prev = NULL;
1444 _EntryList = iterator;
1445 } else {
1446 iterator->TState = ObjectWaiter::TS_CXQ;
1447 for (;;) {
1448 ObjectWaiter * front = _cxq;
1449 iterator->_next = front;
1450 if (Atomic::cmpxchg(iterator, &_cxq, front) == front) {
1451 break;
1452 }
1453 }
1454 }
1455
1456 // _WaitSetLock protects the wait queue, not the EntryList. We could
1457 // move the add-to-EntryList operation, above, outside the critical section
1458 // protected by _WaitSetLock. In practice that's not useful. With the
1459 // exception of wait() timeouts and interrupts the monitor owner
1460 // is the only thread that grabs _WaitSetLock. There's almost no contention
1461 // on _WaitSetLock so it's not profitable to reduce the length of the
1462 // critical section.
1463
1464 iterator->wait_reenter_begin(this);
1465 }
1466 Thread::SpinRelease(&_WaitSetLock);
1467}
1468
1469// Consider: a not-uncommon synchronization bug is to use notify() when
1470// notifyAll() is more appropriate, potentially resulting in stranded
1471// threads; this is one example of a lost wakeup. A useful diagnostic
1472// option is to force all notify() operations to behave as notifyAll().
1473//
1474// Note: We can also detect many such problems with a "minimum wait".
1475// When the "minimum wait" is set to a small non-zero timeout value
1476// and the program does not hang whereas it did absent "minimum wait",
1477// that suggests a lost wakeup bug.
1478
1479void ObjectMonitor::notify(TRAPS) {
1480 CHECK_OWNER();
1481 if (_WaitSet == NULL) {
1482 return;
1483 }
1484 DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1485 INotify(THREAD);
1486 OM_PERFDATA_OP(Notifications, inc(1));
1487}
1488
1489
1490// The current implementation of notifyAll() transfers the waiters one-at-a-time
1491// from the waitset to the EntryList. This could be done more efficiently with a
1492// single bulk transfer but in practice it's not time-critical. Beware too,
1493// that in prepend-mode we invert the order of the waiters. Let's say that the
1494// waitset is "ABCD" and the EntryList is "XYZ". After a notifyAll() in prepend
1495// mode the waitset will be empty and the EntryList will be "DCBAXYZ".
1496
1497void ObjectMonitor::notifyAll(TRAPS) {
1498 CHECK_OWNER();
1499 if (_WaitSet == NULL) {
1500 return;
1501 }
1502
1503 DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
1504 int tally = 0;
1505 while (_WaitSet != NULL) {
1506 tally++;
1507 INotify(THREAD);
1508 }
1509
1510 OM_PERFDATA_OP(Notifications, inc(tally));
1511}
1512
1513// -----------------------------------------------------------------------------
1514// Adaptive Spinning Support
1515//
1516// Adaptive spin-then-block - rational spinning
1517//
1518// Note that we spin "globally" on _owner with a classic SMP-polite TATAS
1519// algorithm. On high order SMP systems it would be better to start with
1520// a brief global spin and then revert to spinning locally. In the spirit of MCS/CLH,
1521// a contending thread could enqueue itself on the cxq and then spin locally
1522// on a thread-specific variable such as its ParkEvent._Event flag.
1523// That's left as an exercise for the reader. Note that global spinning is
1524// not problematic on Niagara, as the L2 cache serves the interconnect and
1525// has both low latency and massive bandwidth.
1526//
1527// Broadly, we can fix the spin frequency -- that is, the % of contended lock
1528// acquisition attempts where we opt to spin -- at 100% and vary the spin count
1529// (duration) or we can fix the count at approximately the duration of
1530// a context switch and vary the frequency. Of course we could also
1531// vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
1532// For a description of 'Adaptive spin-then-block mutual exclusion in
1533// multi-threaded processing,' see U.S. Pat. No. 8046758.
1534//
1535// This implementation varies the duration "D", where D varies with
1536// the success rate of recent spin attempts. (D is capped at approximately
1537// length of a round-trip context switch). The success rate for recent
1538// spin attempts is a good predictor of the success rate of future spin
1539// attempts. The mechanism adapts automatically to varying critical
1540// section length (lock modality), system load and degree of parallelism.
1541// D is maintained per-monitor in _SpinDuration and is initialized
1542// optimistically. Spin frequency is fixed at 100%.
1543//
1544// Note that _SpinDuration is volatile, but we update it without locks
1545// or atomics. The code is designed so that _SpinDuration stays within
1546// a reasonable range even in the presence of races. The arithmetic
1547// operations on _SpinDuration are closed over the domain of legal values,
1548// so at worst a race will install and older but still legal value.
1549// At the very worst this introduces some apparent non-determinism.
1550// We might spin when we shouldn't or vice-versa, but since the spin
1551// count are relatively short, even in the worst case, the effect is harmless.
1552//
1553// Care must be taken that a low "D" value does not become an
1554// an absorbing state. Transient spinning failures -- when spinning
1555// is overall profitable -- should not cause the system to converge
1556// on low "D" values. We want spinning to be stable and predictable
1557// and fairly responsive to change and at the same time we don't want
1558// it to oscillate, become metastable, be "too" non-deterministic,
1559// or converge on or enter undesirable stable absorbing states.
1560//
1561// We implement a feedback-based control system -- using past behavior
1562// to predict future behavior. We face two issues: (a) if the
1563// input signal is random then the spin predictor won't provide optimal
1564// results, and (b) if the signal frequency is too high then the control
1565// system, which has some natural response lag, will "chase" the signal.
1566// (b) can arise from multimodal lock hold times. Transient preemption
1567// can also result in apparent bimodal lock hold times.
1568// Although sub-optimal, neither condition is particularly harmful, as
1569// in the worst-case we'll spin when we shouldn't or vice-versa.
1570// The maximum spin duration is rather short so the failure modes aren't bad.
1571// To be conservative, I've tuned the gain in system to bias toward
1572// _not spinning. Relatedly, the system can sometimes enter a mode where it
1573// "rings" or oscillates between spinning and not spinning. This happens
1574// when spinning is just on the cusp of profitability, however, so the
1575// situation is not dire. The state is benign -- there's no need to add
1576// hysteresis control to damp the transition rate between spinning and
1577// not spinning.
1578
1579// Spinning: Fixed frequency (100%), vary duration
1580int ObjectMonitor::TrySpin(Thread * Self) {
1581 // Dumb, brutal spin. Good for comparative measurements against adaptive spinning.
1582 int ctr = Knob_FixedSpin;
1583 if (ctr != 0) {
1584 while (--ctr >= 0) {
1585 if (TryLock(Self) > 0) return 1;
1586 SpinPause();
1587 }
1588 return 0;
1589 }
1590
1591 for (ctr = Knob_PreSpin + 1; --ctr >= 0;) {
1592 if (TryLock(Self) > 0) {
1593 // Increase _SpinDuration ...
1594 // Note that we don't clamp SpinDuration precisely at SpinLimit.
1595 // Raising _SpurDuration to the poverty line is key.
1596 int x = _SpinDuration;
1597 if (x < Knob_SpinLimit) {
1598 if (x < Knob_Poverty) x = Knob_Poverty;
1599 _SpinDuration = x + Knob_BonusB;
1600 }
1601 return 1;
1602 }
1603 SpinPause();
1604 }
1605
1606 // Admission control - verify preconditions for spinning
1607 //
1608 // We always spin a little bit, just to prevent _SpinDuration == 0 from
1609 // becoming an absorbing state. Put another way, we spin briefly to
1610 // sample, just in case the system load, parallelism, contention, or lock
1611 // modality changed.
1612 //
1613 // Consider the following alternative:
1614 // Periodically set _SpinDuration = _SpinLimit and try a long/full
1615 // spin attempt. "Periodically" might mean after a tally of
1616 // the # of failed spin attempts (or iterations) reaches some threshold.
1617 // This takes us into the realm of 1-out-of-N spinning, where we
1618 // hold the duration constant but vary the frequency.
1619
1620 ctr = _SpinDuration;
1621 if (ctr <= 0) return 0;
1622
1623 if (NotRunnable(Self, (Thread *) _owner)) {
1624 return 0;
1625 }
1626
1627 // We're good to spin ... spin ingress.
1628 // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
1629 // when preparing to LD...CAS _owner, etc and the CAS is likely
1630 // to succeed.
1631 if (_succ == NULL) {
1632 _succ = Self;
1633 }
1634 Thread * prv = NULL;
1635
1636 // There are three ways to exit the following loop:
1637 // 1. A successful spin where this thread has acquired the lock.
1638 // 2. Spin failure with prejudice
1639 // 3. Spin failure without prejudice
1640
1641 while (--ctr >= 0) {
1642
1643 // Periodic polling -- Check for pending GC
1644 // Threads may spin while they're unsafe.
1645 // We don't want spinning threads to delay the JVM from reaching
1646 // a stop-the-world safepoint or to steal cycles from GC.
1647 // If we detect a pending safepoint we abort in order that
1648 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
1649 // this thread, if safe, doesn't steal cycles from GC.
1650 // This is in keeping with the "no loitering in runtime" rule.
1651 // We periodically check to see if there's a safepoint pending.
1652 if ((ctr & 0xFF) == 0) {
1653 if (SafepointMechanism::should_block(Self)) {
1654 goto Abort; // abrupt spin egress
1655 }
1656 SpinPause();
1657 }
1658
1659 // Probe _owner with TATAS
1660 // If this thread observes the monitor transition or flicker
1661 // from locked to unlocked to locked, then the odds that this
1662 // thread will acquire the lock in this spin attempt go down
1663 // considerably. The same argument applies if the CAS fails
1664 // or if we observe _owner change from one non-null value to
1665 // another non-null value. In such cases we might abort
1666 // the spin without prejudice or apply a "penalty" to the
1667 // spin count-down variable "ctr", reducing it by 100, say.
1668
1669 Thread * ox = (Thread *) _owner;
1670 if (ox == NULL) {
1671 ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL);
1672 if (ox == NULL) {
1673 // The CAS succeeded -- this thread acquired ownership
1674 // Take care of some bookkeeping to exit spin state.
1675 if (_succ == Self) {
1676 _succ = NULL;
1677 }
1678
1679 // Increase _SpinDuration :
1680 // The spin was successful (profitable) so we tend toward
1681 // longer spin attempts in the future.
1682 // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1683 // If we acquired the lock early in the spin cycle it
1684 // makes sense to increase _SpinDuration proportionally.
1685 // Note that we don't clamp SpinDuration precisely at SpinLimit.
1686 int x = _SpinDuration;
1687 if (x < Knob_SpinLimit) {
1688 if (x < Knob_Poverty) x = Knob_Poverty;
1689 _SpinDuration = x + Knob_Bonus;
1690 }
1691 return 1;
1692 }
1693
1694 // The CAS failed ... we can take any of the following actions:
1695 // * penalize: ctr -= CASPenalty
1696 // * exit spin with prejudice -- goto Abort;
1697 // * exit spin without prejudice.
1698 // * Since CAS is high-latency, retry again immediately.
1699 prv = ox;
1700 goto Abort;
1701 }
1702
1703 // Did lock ownership change hands ?
1704 if (ox != prv && prv != NULL) {
1705 goto Abort;
1706 }
1707 prv = ox;
1708
1709 // Abort the spin if the owner is not executing.
1710 // The owner must be executing in order to drop the lock.
1711 // Spinning while the owner is OFFPROC is idiocy.
1712 // Consider: ctr -= RunnablePenalty ;
1713 if (NotRunnable(Self, ox)) {
1714 goto Abort;
1715 }
1716 if (_succ == NULL) {
1717 _succ = Self;
1718 }
1719 }
1720
1721 // Spin failed with prejudice -- reduce _SpinDuration.
1722 // TODO: Use an AIMD-like policy to adjust _SpinDuration.
1723 // AIMD is globally stable.
1724 {
1725 int x = _SpinDuration;
1726 if (x > 0) {
1727 // Consider an AIMD scheme like: x -= (x >> 3) + 100
1728 // This is globally sample and tends to damp the response.
1729 x -= Knob_Penalty;
1730 if (x < 0) x = 0;
1731 _SpinDuration = x;
1732 }
1733 }
1734
1735 Abort:
1736 if (_succ == Self) {
1737 _succ = NULL;
1738 // Invariant: after setting succ=null a contending thread
1739 // must recheck-retry _owner before parking. This usually happens
1740 // in the normal usage of TrySpin(), but it's safest
1741 // to make TrySpin() as foolproof as possible.
1742 OrderAccess::fence();
1743 if (TryLock(Self) > 0) return 1;
1744 }
1745 return 0;
1746}
1747
1748// NotRunnable() -- informed spinning
1749//
1750// Don't bother spinning if the owner is not eligible to drop the lock.
1751// Spin only if the owner thread is _thread_in_Java or _thread_in_vm.
1752// The thread must be runnable in order to drop the lock in timely fashion.
1753// If the _owner is not runnable then spinning will not likely be
1754// successful (profitable).
1755//
1756// Beware -- the thread referenced by _owner could have died
1757// so a simply fetch from _owner->_thread_state might trap.
1758// Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
1759// Because of the lifecycle issues, the _thread_state values
1760// observed by NotRunnable() might be garbage. NotRunnable must
1761// tolerate this and consider the observed _thread_state value
1762// as advisory.
1763//
1764// Beware too, that _owner is sometimes a BasicLock address and sometimes
1765// a thread pointer.
1766// Alternately, we might tag the type (thread pointer vs basiclock pointer)
1767// with the LSB of _owner. Another option would be to probabilistically probe
1768// the putative _owner->TypeTag value.
1769//
1770// Checking _thread_state isn't perfect. Even if the thread is
1771// in_java it might be blocked on a page-fault or have been preempted
1772// and sitting on a ready/dispatch queue.
1773//
1774// The return value from NotRunnable() is *advisory* -- the
1775// result is based on sampling and is not necessarily coherent.
1776// The caller must tolerate false-negative and false-positive errors.
1777// Spinning, in general, is probabilistic anyway.
1778
1779
1780int ObjectMonitor::NotRunnable(Thread * Self, Thread * ox) {
1781 // Check ox->TypeTag == 2BAD.
1782 if (ox == NULL) return 0;
1783
1784 // Avoid transitive spinning ...
1785 // Say T1 spins or blocks trying to acquire L. T1._Stalled is set to L.
1786 // Immediately after T1 acquires L it's possible that T2, also
1787 // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
1788 // This occurs transiently after T1 acquired L but before
1789 // T1 managed to clear T1.Stalled. T2 does not need to abort
1790 // its spin in this circumstance.
1791 intptr_t BlockedOn = SafeFetchN((intptr_t *) &ox->_Stalled, intptr_t(1));
1792
1793 if (BlockedOn == 1) return 1;
1794 if (BlockedOn != 0) {
1795 return BlockedOn != intptr_t(this) && _owner == ox;
1796 }
1797
1798 assert(sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant");
1799 int jst = SafeFetch32((int *) &((JavaThread *) ox)->_thread_state, -1);;
1800 // consider also: jst != _thread_in_Java -- but that's overspecific.
1801 return jst == _thread_blocked || jst == _thread_in_native;
1802}
1803
1804
1805// -----------------------------------------------------------------------------
1806// WaitSet management ...
1807
1808ObjectWaiter::ObjectWaiter(Thread* thread) {
1809 _next = NULL;
1810 _prev = NULL;
1811 _notified = 0;
1812 _notifier_tid = 0;
1813 TState = TS_RUN;
1814 _thread = thread;
1815 _event = thread->_ParkEvent;
1816 _active = false;
1817 assert(_event != NULL, "invariant");
1818}
1819
1820void ObjectWaiter::wait_reenter_begin(ObjectMonitor * const mon) {
1821 JavaThread *jt = (JavaThread *)this->_thread;
1822 _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
1823}
1824
1825void ObjectWaiter::wait_reenter_end(ObjectMonitor * const mon) {
1826 JavaThread *jt = (JavaThread *)this->_thread;
1827 JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
1828}
1829
1830inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
1831 assert(node != NULL, "should not add NULL node");
1832 assert(node->_prev == NULL, "node already in list");
1833 assert(node->_next == NULL, "node already in list");
1834 // put node at end of queue (circular doubly linked list)
1835 if (_WaitSet == NULL) {
1836 _WaitSet = node;
1837 node->_prev = node;
1838 node->_next = node;
1839 } else {
1840 ObjectWaiter* head = _WaitSet;
1841 ObjectWaiter* tail = head->_prev;
1842 assert(tail->_next == head, "invariant check");
1843 tail->_next = node;
1844 head->_prev = node;
1845 node->_next = head;
1846 node->_prev = tail;
1847 }
1848}
1849
1850inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
1851 // dequeue the very first waiter
1852 ObjectWaiter* waiter = _WaitSet;
1853 if (waiter) {
1854 DequeueSpecificWaiter(waiter);
1855 }
1856 return waiter;
1857}
1858
1859inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
1860 assert(node != NULL, "should not dequeue NULL node");
1861 assert(node->_prev != NULL, "node already removed from list");
1862 assert(node->_next != NULL, "node already removed from list");
1863 // when the waiter has woken up because of interrupt,
1864 // timeout or other spurious wake-up, dequeue the
1865 // waiter from waiting list
1866 ObjectWaiter* next = node->_next;
1867 if (next == node) {
1868 assert(node->_prev == node, "invariant check");
1869 _WaitSet = NULL;
1870 } else {
1871 ObjectWaiter* prev = node->_prev;
1872 assert(prev->_next == node, "invariant check");
1873 assert(next->_prev == node, "invariant check");
1874 next->_prev = prev;
1875 prev->_next = next;
1876 if (_WaitSet == node) {
1877 _WaitSet = next;
1878 }
1879 }
1880 node->_next = NULL;
1881 node->_prev = NULL;
1882}
1883
1884// -----------------------------------------------------------------------------
1885// PerfData support
1886PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts = NULL;
1887PerfCounter * ObjectMonitor::_sync_FutileWakeups = NULL;
1888PerfCounter * ObjectMonitor::_sync_Parks = NULL;
1889PerfCounter * ObjectMonitor::_sync_Notifications = NULL;
1890PerfCounter * ObjectMonitor::_sync_Inflations = NULL;
1891PerfCounter * ObjectMonitor::_sync_Deflations = NULL;
1892PerfLongVariable * ObjectMonitor::_sync_MonExtant = NULL;
1893
1894// One-shot global initialization for the sync subsystem.
1895// We could also defer initialization and initialize on-demand
1896// the first time we call ObjectSynchronizer::inflate().
1897// Initialization would be protected - like so many things - by
1898// the MonitorCache_lock.
1899
1900void ObjectMonitor::Initialize() {
1901 assert(!InitDone, "invariant");
1902
1903 if (!os::is_MP()) {
1904 Knob_SpinLimit = 0;
1905 Knob_PreSpin = 0;
1906 Knob_FixedSpin = -1;
1907 }
1908
1909 if (UsePerfData) {
1910 EXCEPTION_MARK;
1911#define NEWPERFCOUNTER(n) \
1912 { \
1913 n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events, \
1914 CHECK); \
1915 }
1916#define NEWPERFVARIABLE(n) \
1917 { \
1918 n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events, \
1919 CHECK); \
1920 }
1921 NEWPERFCOUNTER(_sync_Inflations);
1922 NEWPERFCOUNTER(_sync_Deflations);
1923 NEWPERFCOUNTER(_sync_ContendedLockAttempts);
1924 NEWPERFCOUNTER(_sync_FutileWakeups);
1925 NEWPERFCOUNTER(_sync_Parks);
1926 NEWPERFCOUNTER(_sync_Notifications);
1927 NEWPERFVARIABLE(_sync_MonExtant);
1928#undef NEWPERFCOUNTER
1929#undef NEWPERFVARIABLE
1930 }
1931
1932 DEBUG_ONLY(InitDone = true;)
1933}
1934
1935void ObjectMonitor::print_on(outputStream* st) const {
1936 // The minimal things to print for markOop printing, more can be added for debugging and logging.
1937 st->print("{contentions=0x%08x,waiters=0x%08x"
1938 ",recursions=" INTPTR_FORMAT ",owner=" INTPTR_FORMAT "}",
1939 contentions(), waiters(), recursions(),
1940 p2i(owner()));
1941}
1942void ObjectMonitor::print() const { print_on(tty); }
1943