1/*
2 * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/classLoaderDataGraph.hpp"
27#include "jfr/jfrEvents.hpp"
28#include "jfr/support/jfrThreadId.hpp"
29#include "logging/log.hpp"
30#include "memory/resourceArea.hpp"
31#include "oops/klass.inline.hpp"
32#include "oops/markOop.hpp"
33#include "oops/oop.inline.hpp"
34#include "runtime/atomic.hpp"
35#include "runtime/basicLock.hpp"
36#include "runtime/biasedLocking.hpp"
37#include "runtime/handles.inline.hpp"
38#include "runtime/task.hpp"
39#include "runtime/threadSMR.hpp"
40#include "runtime/vframe.hpp"
41#include "runtime/vmThread.hpp"
42#include "runtime/vmOperations.hpp"
43
44
45static bool _biased_locking_enabled = false;
46BiasedLockingCounters BiasedLocking::_counters;
47
48static GrowableArray<Handle>* _preserved_oop_stack = NULL;
49static GrowableArray<markOop>* _preserved_mark_stack = NULL;
50
51static void enable_biased_locking(InstanceKlass* k) {
52 k->set_prototype_header(markOopDesc::biased_locking_prototype());
53}
54
55class VM_EnableBiasedLocking: public VM_Operation {
56 private:
57 bool _is_cheap_allocated;
58 public:
59 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
60 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
61 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; }
62 bool is_cheap_allocated() const { return _is_cheap_allocated; }
63
64 void doit() {
65 // Iterate the class loader data dictionaries enabling biased locking for all
66 // currently loaded classes.
67 ClassLoaderDataGraph::dictionary_classes_do(enable_biased_locking);
68 // Indicate that future instances should enable it as well
69 _biased_locking_enabled = true;
70
71 log_info(biasedlocking)("Biased locking enabled");
72 }
73
74 bool allow_nested_vm_operations() const { return false; }
75};
76
77
78// One-shot PeriodicTask subclass for enabling biased locking
79class EnableBiasedLockingTask : public PeriodicTask {
80 public:
81 EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
82
83 virtual void task() {
84 // Use async VM operation to avoid blocking the Watcher thread.
85 // VM Thread will free C heap storage.
86 VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true);
87 VMThread::execute(op);
88
89 // Reclaim our storage and disenroll ourself
90 delete this;
91 }
92};
93
94
95void BiasedLocking::init() {
96 // If biased locking is enabled, schedule a task to fire a few
97 // seconds into the run which turns on biased locking for all
98 // currently loaded classes as well as future ones. This is a
99 // workaround for startup time regressions due to a large number of
100 // safepoints being taken during VM startup for bias revocation.
101 // Ideally we would have a lower cost for individual bias revocation
102 // and not need a mechanism like this.
103 if (UseBiasedLocking) {
104 if (BiasedLockingStartupDelay > 0) {
105 EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay);
106 task->enroll();
107 } else {
108 VM_EnableBiasedLocking op(false);
109 VMThread::execute(&op);
110 }
111 }
112}
113
114
115bool BiasedLocking::enabled() {
116 return _biased_locking_enabled;
117}
118
119// Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
120static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) {
121 GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info();
122 if (info != NULL) {
123 return info;
124 }
125
126 info = new GrowableArray<MonitorInfo*>();
127
128 // It's possible for the thread to not have any Java frames on it,
129 // i.e., if it's the main thread and it's already returned from main()
130 if (thread->has_last_Java_frame()) {
131 RegisterMap rm(thread);
132 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
133 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
134 if (monitors != NULL) {
135 int len = monitors->length();
136 // Walk monitors youngest to oldest
137 for (int i = len - 1; i >= 0; i--) {
138 MonitorInfo* mon_info = monitors->at(i);
139 if (mon_info->eliminated()) continue;
140 oop owner = mon_info->owner();
141 if (owner != NULL) {
142 info->append(mon_info);
143 }
144 }
145 }
146 }
147 }
148
149 thread->set_cached_monitor_info(info);
150 return info;
151}
152
153// After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
154// AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
155static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
156 markOop mark = obj->mark();
157 if (!mark->has_bias_pattern()) {
158 if (log_is_enabled(Info, biasedlocking)) {
159 ResourceMark rm;
160 log_info(biasedlocking)(" (Skipping revocation of object " INTPTR_FORMAT
161 ", mark " INTPTR_FORMAT ", type %s"
162 ", requesting thread " INTPTR_FORMAT
163 " because it's no longer biased)",
164 p2i((void *)obj), (intptr_t) mark,
165 obj->klass()->external_name(),
166 (intptr_t) requesting_thread);
167 }
168 return BiasedLocking::NOT_BIASED;
169 }
170
171 uint age = mark->age();
172 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
173 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
174
175 // Log at "info" level if not bulk, else "trace" level
176 if (!is_bulk) {
177 ResourceMark rm;
178 log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark "
179 INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
180 ", allow rebias %d, requesting thread " INTPTR_FORMAT,
181 p2i((void *)obj),
182 (intptr_t) mark,
183 obj->klass()->external_name(),
184 (intptr_t) obj->klass()->prototype_header(),
185 (allow_rebias ? 1 : 0),
186 (intptr_t) requesting_thread);
187 } else {
188 ResourceMark rm;
189 log_trace(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark "
190 INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT
191 " , allow rebias %d , requesting thread " INTPTR_FORMAT,
192 p2i((void *)obj),
193 (intptr_t) mark,
194 obj->klass()->external_name(),
195 (intptr_t) obj->klass()->prototype_header(),
196 (allow_rebias ? 1 : 0),
197 (intptr_t) requesting_thread);
198 }
199
200 JavaThread* biased_thread = mark->biased_locker();
201 if (biased_thread == NULL) {
202 // Object is anonymously biased. We can get here if, for
203 // example, we revoke the bias due to an identity hash code
204 // being computed for an object.
205 if (!allow_rebias) {
206 obj->set_mark(unbiased_prototype);
207 }
208 // Log at "info" level if not bulk, else "trace" level
209 if (!is_bulk) {
210 log_info(biasedlocking)(" Revoked bias of anonymously-biased object");
211 } else {
212 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object");
213 }
214 return BiasedLocking::BIAS_REVOKED;
215 }
216
217 // Handle case where the thread toward which the object was biased has exited
218 bool thread_is_alive = false;
219 if (requesting_thread == biased_thread) {
220 thread_is_alive = true;
221 } else {
222 ThreadsListHandle tlh;
223 thread_is_alive = tlh.includes(biased_thread);
224 }
225 if (!thread_is_alive) {
226 if (allow_rebias) {
227 obj->set_mark(biased_prototype);
228 } else {
229 obj->set_mark(unbiased_prototype);
230 }
231 // Log at "info" level if not bulk, else "trace" level
232 if (!is_bulk) {
233 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread ("
234 PTR_FORMAT ")", p2i(biased_thread));
235 } else {
236 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread ("
237 PTR_FORMAT ")", p2i(biased_thread));
238 }
239 return BiasedLocking::BIAS_REVOKED;
240 }
241
242 // Log at "info" level if not bulk, else "trace" level
243 if (!is_bulk) {
244 log_info(biasedlocking)(" Revoked bias of object biased toward live thread ("
245 PTR_FORMAT ")", p2i(biased_thread));
246 } else {
247 log_trace(biasedlocking)(" Revoked bias of object biased toward live thread ("
248 PTR_FORMAT ")", p2i(biased_thread));
249 }
250
251 // Thread owning bias is alive.
252 // Check to see whether it currently owns the lock and, if so,
253 // write down the needed displaced headers to the thread's stack.
254 // Otherwise, restore the object's header either to the unlocked
255 // or unbiased state.
256 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
257 BasicLock* highest_lock = NULL;
258 for (int i = 0; i < cached_monitor_info->length(); i++) {
259 MonitorInfo* mon_info = cached_monitor_info->at(i);
260 if (oopDesc::equals(mon_info->owner(), obj)) {
261 log_trace(biasedlocking)(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
262 p2i((void *) mon_info->owner()),
263 p2i((void *) obj));
264 // Assume recursive case and fix up highest lock later
265 markOop mark = markOopDesc::encode((BasicLock*) NULL);
266 highest_lock = mon_info->lock();
267 highest_lock->set_displaced_header(mark);
268 } else {
269 log_trace(biasedlocking)(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
270 p2i((void *) mon_info->owner()),
271 p2i((void *) obj));
272 }
273 }
274 if (highest_lock != NULL) {
275 // Fix up highest lock to contain displaced header and point
276 // object at it
277 highest_lock->set_displaced_header(unbiased_prototype);
278 // Reset object header to point to displaced mark.
279 // Must release storing the lock address for platforms without TSO
280 // ordering (e.g. ppc).
281 obj->release_set_mark(markOopDesc::encode(highest_lock));
282 assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
283 // Log at "info" level if not bulk, else "trace" level
284 if (!is_bulk) {
285 log_info(biasedlocking)(" Revoked bias of currently-locked object");
286 } else {
287 log_trace(biasedlocking)(" Revoked bias of currently-locked object");
288 }
289 } else {
290 // Log at "info" level if not bulk, else "trace" level
291 if (!is_bulk) {
292 log_info(biasedlocking)(" Revoked bias of currently-unlocked object");
293 } else {
294 log_trace(biasedlocking)(" Revoked bias of currently-unlocked object");
295 }
296 if (allow_rebias) {
297 obj->set_mark(biased_prototype);
298 } else {
299 // Store the unlocked value into the object's header.
300 obj->set_mark(unbiased_prototype);
301 }
302 }
303
304 // If requested, return information on which thread held the bias
305 if (biased_locker != NULL) {
306 *biased_locker = biased_thread;
307 }
308
309 return BiasedLocking::BIAS_REVOKED;
310}
311
312
313enum HeuristicsResult {
314 HR_NOT_BIASED = 1,
315 HR_SINGLE_REVOKE = 2,
316 HR_BULK_REBIAS = 3,
317 HR_BULK_REVOKE = 4
318};
319
320
321static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
322 markOop mark = o->mark();
323 if (!mark->has_bias_pattern()) {
324 return HR_NOT_BIASED;
325 }
326
327 // Heuristics to attempt to throttle the number of revocations.
328 // Stages:
329 // 1. Revoke the biases of all objects in the heap of this type,
330 // but allow rebiasing of those objects if unlocked.
331 // 2. Revoke the biases of all objects in the heap of this type
332 // and don't allow rebiasing of these objects. Disable
333 // allocation of objects of that type with the bias bit set.
334 Klass* k = o->klass();
335 jlong cur_time = os::javaTimeMillis();
336 jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time();
337 int revocation_count = k->biased_lock_revocation_count();
338 if ((revocation_count >= BiasedLockingBulkRebiasThreshold) &&
339 (revocation_count < BiasedLockingBulkRevokeThreshold) &&
340 (last_bulk_revocation_time != 0) &&
341 (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) {
342 // This is the first revocation we've seen in a while of an
343 // object of this type since the last time we performed a bulk
344 // rebiasing operation. The application is allocating objects in
345 // bulk which are biased toward a thread and then handing them
346 // off to another thread. We can cope with this allocation
347 // pattern via the bulk rebiasing mechanism so we reset the
348 // klass's revocation count rather than allow it to increase
349 // monotonically. If we see the need to perform another bulk
350 // rebias operation later, we will, and if subsequently we see
351 // many more revocation operations in a short period of time we
352 // will completely disable biasing for this type.
353 k->set_biased_lock_revocation_count(0);
354 revocation_count = 0;
355 }
356
357 // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
358 if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
359 revocation_count = k->atomic_incr_biased_lock_revocation_count();
360 }
361
362 if (revocation_count == BiasedLockingBulkRevokeThreshold) {
363 return HR_BULK_REVOKE;
364 }
365
366 if (revocation_count == BiasedLockingBulkRebiasThreshold) {
367 return HR_BULK_REBIAS;
368 }
369
370 return HR_SINGLE_REVOKE;
371}
372
373
374static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
375 bool bulk_rebias,
376 bool attempt_rebias_of_object,
377 JavaThread* requesting_thread) {
378 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
379
380 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
381 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
382 (bulk_rebias ? "rebias" : "revoke"),
383 p2i((void *) o),
384 (intptr_t) o->mark(),
385 o->klass()->external_name());
386
387 jlong cur_time = os::javaTimeMillis();
388 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
389
390
391 Klass* k_o = o->klass();
392 Klass* klass = k_o;
393
394 {
395 JavaThreadIteratorWithHandle jtiwh;
396
397 if (bulk_rebias) {
398 // Use the epoch in the klass of the object to implicitly revoke
399 // all biases of objects of this data type and force them to be
400 // reacquired. However, we also need to walk the stacks of all
401 // threads and update the headers of lightweight locked objects
402 // with biases to have the current epoch.
403
404 // If the prototype header doesn't have the bias pattern, don't
405 // try to update the epoch -- assume another VM operation came in
406 // and reset the header to the unbiased state, which will
407 // implicitly cause all existing biases to be revoked
408 if (klass->prototype_header()->has_bias_pattern()) {
409 int prev_epoch = klass->prototype_header()->bias_epoch();
410 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
411 int cur_epoch = klass->prototype_header()->bias_epoch();
412
413 // Now walk all threads' stacks and adjust epochs of any biased
414 // and locked objects of this data type we encounter
415 for (; JavaThread *thr = jtiwh.next(); ) {
416 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
417 for (int i = 0; i < cached_monitor_info->length(); i++) {
418 MonitorInfo* mon_info = cached_monitor_info->at(i);
419 oop owner = mon_info->owner();
420 markOop mark = owner->mark();
421 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
422 // We might have encountered this object already in the case of recursive locking
423 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
424 owner->set_mark(mark->set_bias_epoch(cur_epoch));
425 }
426 }
427 }
428 }
429
430 // At this point we're done. All we have to do is potentially
431 // adjust the header of the given object to revoke its bias.
432 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
433 } else {
434 if (log_is_enabled(Info, biasedlocking)) {
435 ResourceMark rm;
436 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
437 }
438
439 // Disable biased locking for this data type. Not only will this
440 // cause future instances to not be biased, but existing biased
441 // instances will notice that this implicitly caused their biases
442 // to be revoked.
443 klass->set_prototype_header(markOopDesc::prototype());
444
445 // Now walk all threads' stacks and forcibly revoke the biases of
446 // any locked and biased objects of this data type we encounter.
447 for (; JavaThread *thr = jtiwh.next(); ) {
448 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
449 for (int i = 0; i < cached_monitor_info->length(); i++) {
450 MonitorInfo* mon_info = cached_monitor_info->at(i);
451 oop owner = mon_info->owner();
452 markOop mark = owner->mark();
453 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
454 revoke_bias(owner, false, true, requesting_thread, NULL);
455 }
456 }
457 }
458
459 // Must force the bias of the passed object to be forcibly revoked
460 // as well to ensure guarantees to callers
461 revoke_bias(o, false, true, requesting_thread, NULL);
462 }
463 } // ThreadsListHandle is destroyed here.
464
465 log_info(biasedlocking)("* Ending bulk revocation");
466
467 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
468
469 if (attempt_rebias_of_object &&
470 o->mark()->has_bias_pattern() &&
471 klass->prototype_header()->has_bias_pattern()) {
472 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
473 klass->prototype_header()->bias_epoch());
474 o->set_mark(new_mark);
475 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
476 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
477 }
478
479 assert(!o->mark()->has_bias_pattern() ||
480 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
481 "bug in bulk bias revocation");
482
483 return status_code;
484}
485
486
487static void clean_up_cached_monitor_info() {
488 // Walk the thread list clearing out the cached monitors
489 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
490 thr->set_cached_monitor_info(NULL);
491 }
492}
493
494
495class VM_RevokeBias : public VM_Operation {
496protected:
497 Handle* _obj;
498 GrowableArray<Handle>* _objs;
499 JavaThread* _requesting_thread;
500 BiasedLocking::Condition _status_code;
501 traceid _biased_locker_id;
502 uint64_t _safepoint_id;
503
504public:
505 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
506 : _obj(obj)
507 , _objs(NULL)
508 , _requesting_thread(requesting_thread)
509 , _status_code(BiasedLocking::NOT_BIASED)
510 , _biased_locker_id(0)
511 , _safepoint_id(0) {}
512
513 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
514 : _obj(NULL)
515 , _objs(objs)
516 , _requesting_thread(requesting_thread)
517 , _status_code(BiasedLocking::NOT_BIASED)
518 , _biased_locker_id(0)
519 , _safepoint_id(0) {}
520
521 virtual VMOp_Type type() const { return VMOp_RevokeBias; }
522
523 virtual bool doit_prologue() {
524 // Verify that there is actual work to do since the callers just
525 // give us locked object(s). If we don't find any biased objects
526 // there is nothing to do and we avoid a safepoint.
527 if (_obj != NULL) {
528 markOop mark = (*_obj)()->mark();
529 if (mark->has_bias_pattern()) {
530 return true;
531 }
532 } else {
533 for ( int i = 0 ; i < _objs->length(); i++ ) {
534 markOop mark = (_objs->at(i))()->mark();
535 if (mark->has_bias_pattern()) {
536 return true;
537 }
538 }
539 }
540 return false;
541 }
542
543 virtual void doit() {
544 if (_obj != NULL) {
545 log_info(biasedlocking)("Revoking bias with potentially per-thread safepoint:");
546 JavaThread* biased_locker = NULL;
547 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker);
548 if (biased_locker != NULL) {
549 _biased_locker_id = JFR_THREAD_ID(biased_locker);
550 }
551 _safepoint_id = SafepointSynchronize::safepoint_id();
552 clean_up_cached_monitor_info();
553 return;
554 } else {
555 log_info(biasedlocking)("Revoking bias with global safepoint:");
556 BiasedLocking::revoke_at_safepoint(_objs);
557 }
558 }
559
560 BiasedLocking::Condition status_code() const {
561 return _status_code;
562 }
563
564 traceid biased_locker() const {
565 return _biased_locker_id;
566 }
567
568 uint64_t safepoint_id() const {
569 return _safepoint_id;
570 }
571};
572
573
574class VM_BulkRevokeBias : public VM_RevokeBias {
575private:
576 bool _bulk_rebias;
577 bool _attempt_rebias_of_object;
578
579public:
580 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
581 bool bulk_rebias,
582 bool attempt_rebias_of_object)
583 : VM_RevokeBias(obj, requesting_thread)
584 , _bulk_rebias(bulk_rebias)
585 , _attempt_rebias_of_object(attempt_rebias_of_object) {}
586
587 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
588 virtual bool doit_prologue() { return true; }
589
590 virtual void doit() {
591 _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
592 _safepoint_id = SafepointSynchronize::safepoint_id();
593 clean_up_cached_monitor_info();
594 }
595
596 bool is_bulk_rebias() const {
597 return _bulk_rebias;
598 }
599};
600
601static void post_self_revocation_event(EventBiasedLockSelfRevocation* event, Klass* k) {
602 assert(event != NULL, "invariant");
603 assert(k != NULL, "invariant");
604 assert(event->should_commit(), "invariant");
605 event->set_lockClass(k);
606 event->commit();
607}
608
609static void post_revocation_event(EventBiasedLockRevocation* event, Klass* k, VM_RevokeBias* op) {
610 assert(event != NULL, "invariant");
611 assert(k != NULL, "invariant");
612 assert(op != NULL, "invariant");
613 assert(event->should_commit(), "invariant");
614 event->set_lockClass(k);
615 event->set_safepointId(op->safepoint_id());
616 event->set_previousOwner(op->biased_locker());
617 event->commit();
618}
619
620static void post_class_revocation_event(EventBiasedLockClassRevocation* event, Klass* k, VM_BulkRevokeBias* op) {
621 assert(event != NULL, "invariant");
622 assert(k != NULL, "invariant");
623 assert(op != NULL, "invariant");
624 assert(event->should_commit(), "invariant");
625 event->set_revokedClass(k);
626 event->set_disableBiasing(!op->is_bulk_rebias());
627 event->set_safepointId(op->safepoint_id());
628 event->commit();
629}
630
631BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
632 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
633
634 // We can revoke the biases of anonymously-biased objects
635 // efficiently enough that we should not cause these revocations to
636 // update the heuristics because doing so may cause unwanted bulk
637 // revocations (which are expensive) to occur.
638 markOop mark = obj->mark();
639 if (mark->is_biased_anonymously() && !attempt_rebias) {
640 // We are probably trying to revoke the bias of this object due to
641 // an identity hash code computation. Try to revoke the bias
642 // without a safepoint. This is possible if we can successfully
643 // compare-and-exchange an unbiased header into the mark word of
644 // the object, meaning that no other thread has raced to acquire
645 // the bias of the object.
646 markOop biased_value = mark;
647 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
648 markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark);
649 if (res_mark == biased_value) {
650 return BIAS_REVOKED;
651 }
652 } else if (mark->has_bias_pattern()) {
653 Klass* k = obj->klass();
654 markOop prototype_header = k->prototype_header();
655 if (!prototype_header->has_bias_pattern()) {
656 // This object has a stale bias from before the bulk revocation
657 // for this data type occurred. It's pointless to update the
658 // heuristics at this point so simply update the header with a
659 // CAS. If we fail this race, the object's bias has been revoked
660 // by another thread so we simply return and let the caller deal
661 // with it.
662 markOop biased_value = mark;
663 markOop res_mark = obj->cas_set_mark(prototype_header, mark);
664 assert(!obj->mark()->has_bias_pattern(), "even if we raced, should still be revoked");
665 return BIAS_REVOKED;
666 } else if (prototype_header->bias_epoch() != mark->bias_epoch()) {
667 // The epoch of this biasing has expired indicating that the
668 // object is effectively unbiased. Depending on whether we need
669 // to rebias or revoke the bias of this object we can do it
670 // efficiently enough with a CAS that we shouldn't update the
671 // heuristics. This is normally done in the assembly code but we
672 // can reach this point due to various points in the runtime
673 // needing to revoke biases.
674 if (attempt_rebias) {
675 assert(THREAD->is_Java_thread(), "");
676 markOop biased_value = mark;
677 markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
678 markOop res_mark = obj->cas_set_mark(rebiased_prototype, mark);
679 if (res_mark == biased_value) {
680 return BIAS_REVOKED_AND_REBIASED;
681 }
682 } else {
683 markOop biased_value = mark;
684 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
685 markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark);
686 if (res_mark == biased_value) {
687 return BIAS_REVOKED;
688 }
689 }
690 }
691 }
692
693 HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias);
694 if (heuristics == HR_NOT_BIASED) {
695 return NOT_BIASED;
696 } else if (heuristics == HR_SINGLE_REVOKE) {
697 Klass *k = obj->klass();
698 markOop prototype_header = k->prototype_header();
699 if (mark->biased_locker() == THREAD &&
700 prototype_header->bias_epoch() == mark->bias_epoch()) {
701 // A thread is trying to revoke the bias of an object biased
702 // toward it, again likely due to an identity hash code
703 // computation. We can again avoid a safepoint in this case
704 // since we are only going to walk our own stack. There are no
705 // races with revocations occurring in other threads because we
706 // reach no safepoints in the revocation path.
707 // Also check the epoch because even if threads match, another thread
708 // can come in with a CAS to steal the bias of an object that has a
709 // stale epoch.
710 ResourceMark rm;
711 log_info(biasedlocking)("Revoking bias by walking my own stack:");
712 EventBiasedLockSelfRevocation event;
713 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL);
714 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
715 assert(cond == BIAS_REVOKED, "why not?");
716 if (event.should_commit()) {
717 post_self_revocation_event(&event, k);
718 }
719 return cond;
720 } else {
721 EventBiasedLockRevocation event;
722 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
723 VMThread::execute(&revoke);
724 if (event.should_commit() && revoke.status_code() != NOT_BIASED) {
725 post_revocation_event(&event, k, &revoke);
726 }
727 return revoke.status_code();
728 }
729 }
730
731 assert((heuristics == HR_BULK_REVOKE) ||
732 (heuristics == HR_BULK_REBIAS), "?");
733 EventBiasedLockClassRevocation event;
734 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
735 (heuristics == HR_BULK_REBIAS),
736 attempt_rebias);
737 VMThread::execute(&bulk_revoke);
738 if (event.should_commit()) {
739 post_class_revocation_event(&event, obj->klass(), &bulk_revoke);
740 }
741 return bulk_revoke.status_code();
742}
743
744
745void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
746 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
747 if (objs->length() == 0) {
748 return;
749 }
750 VM_RevokeBias revoke(objs, JavaThread::current());
751 VMThread::execute(&revoke);
752}
753
754
755void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
756 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
757 oop obj = h_obj();
758 HeuristicsResult heuristics = update_heuristics(obj, false);
759 if (heuristics == HR_SINGLE_REVOKE) {
760 revoke_bias(obj, false, false, NULL, NULL);
761 } else if ((heuristics == HR_BULK_REBIAS) ||
762 (heuristics == HR_BULK_REVOKE)) {
763 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
764 }
765 clean_up_cached_monitor_info();
766}
767
768
769void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
770 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
771 int len = objs->length();
772 for (int i = 0; i < len; i++) {
773 oop obj = (objs->at(i))();
774 HeuristicsResult heuristics = update_heuristics(obj, false);
775 if (heuristics == HR_SINGLE_REVOKE) {
776 revoke_bias(obj, false, false, NULL, NULL);
777 } else if ((heuristics == HR_BULK_REBIAS) ||
778 (heuristics == HR_BULK_REVOKE)) {
779 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
780 }
781 }
782 clean_up_cached_monitor_info();
783}
784
785
786void BiasedLocking::preserve_marks() {
787 if (!UseBiasedLocking)
788 return;
789
790 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
791
792 assert(_preserved_oop_stack == NULL, "double initialization");
793 assert(_preserved_mark_stack == NULL, "double initialization");
794
795 // In order to reduce the number of mark words preserved during GC
796 // due to the presence of biased locking, we reinitialize most mark
797 // words to the class's prototype during GC -- even those which have
798 // a currently valid bias owner. One important situation where we
799 // must not clobber a bias is when a biased object is currently
800 // locked. To handle this case we iterate over the currently-locked
801 // monitors in a prepass and, if they are biased, preserve their
802 // mark words here. This should be a relatively small set of objects
803 // especially compared to the number of objects in the heap.
804 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
805 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
806
807 ResourceMark rm;
808 Thread* cur = Thread::current();
809 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
810 if (thread->has_last_Java_frame()) {
811 RegisterMap rm(thread);
812 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
813 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
814 if (monitors != NULL) {
815 int len = monitors->length();
816 // Walk monitors youngest to oldest
817 for (int i = len - 1; i >= 0; i--) {
818 MonitorInfo* mon_info = monitors->at(i);
819 if (mon_info->owner_is_scalar_replaced()) continue;
820 oop owner = mon_info->owner();
821 if (owner != NULL) {
822 markOop mark = owner->mark();
823 if (mark->has_bias_pattern()) {
824 _preserved_oop_stack->push(Handle(cur, owner));
825 _preserved_mark_stack->push(mark);
826 }
827 }
828 }
829 }
830 }
831 }
832 }
833}
834
835
836void BiasedLocking::restore_marks() {
837 if (!UseBiasedLocking)
838 return;
839
840 assert(_preserved_oop_stack != NULL, "double free");
841 assert(_preserved_mark_stack != NULL, "double free");
842
843 int len = _preserved_oop_stack->length();
844 for (int i = 0; i < len; i++) {
845 Handle owner = _preserved_oop_stack->at(i);
846 markOop mark = _preserved_mark_stack->at(i);
847 owner->set_mark(mark);
848 }
849
850 delete _preserved_oop_stack;
851 _preserved_oop_stack = NULL;
852 delete _preserved_mark_stack;
853 _preserved_mark_stack = NULL;
854}
855
856
857int* BiasedLocking::total_entry_count_addr() { return _counters.total_entry_count_addr(); }
858int* BiasedLocking::biased_lock_entry_count_addr() { return _counters.biased_lock_entry_count_addr(); }
859int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
860int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); }
861int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); }
862int* BiasedLocking::fast_path_entry_count_addr() { return _counters.fast_path_entry_count_addr(); }
863int* BiasedLocking::slow_path_entry_count_addr() { return _counters.slow_path_entry_count_addr(); }
864
865
866// BiasedLockingCounters
867
868int BiasedLockingCounters::slow_path_entry_count() const {
869 if (_slow_path_entry_count != 0) {
870 return _slow_path_entry_count;
871 }
872 int sum = _biased_lock_entry_count + _anonymously_biased_lock_entry_count +
873 _rebiased_lock_entry_count + _revoked_lock_entry_count +
874 _fast_path_entry_count;
875
876 return _total_entry_count - sum;
877}
878
879void BiasedLockingCounters::print_on(outputStream* st) const {
880 tty->print_cr("# total entries: %d", _total_entry_count);
881 tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
882 tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
883 tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
884 tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
885 tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
886 tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
887}
888
889void BiasedLockingCounters::print() const { print_on(tty); }
890