1 | /* |
2 | * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "classfile/systemDictionary.hpp" |
27 | #include "memory/allocation.hpp" |
28 | #include "memory/heapInspection.hpp" |
29 | #include "memory/oopFactory.hpp" |
30 | #include "memory/resourceArea.hpp" |
31 | #include "oops/instanceKlass.hpp" |
32 | #include "oops/objArrayKlass.hpp" |
33 | #include "oops/objArrayOop.inline.hpp" |
34 | #include "oops/oop.inline.hpp" |
35 | #include "runtime/atomic.hpp" |
36 | #include "runtime/handles.inline.hpp" |
37 | #include "runtime/init.hpp" |
38 | #include "runtime/objectMonitor.inline.hpp" |
39 | #include "runtime/thread.inline.hpp" |
40 | #include "runtime/threadSMR.inline.hpp" |
41 | #include "runtime/vframe.hpp" |
42 | #include "runtime/vmThread.hpp" |
43 | #include "runtime/vmOperations.hpp" |
44 | #include "services/threadService.hpp" |
45 | |
46 | // TODO: we need to define a naming convention for perf counters |
47 | // to distinguish counters for: |
48 | // - standard JSR174 use |
49 | // - Hotspot extension (public and committed) |
50 | // - Hotspot extension (private/internal and uncommitted) |
51 | |
52 | // Default is disabled. |
53 | bool ThreadService::_thread_monitoring_contention_enabled = false; |
54 | bool ThreadService::_thread_cpu_time_enabled = false; |
55 | bool ThreadService::_thread_allocated_memory_enabled = false; |
56 | |
57 | PerfCounter* ThreadService::_total_threads_count = NULL; |
58 | PerfVariable* ThreadService::_live_threads_count = NULL; |
59 | PerfVariable* ThreadService::_peak_threads_count = NULL; |
60 | PerfVariable* ThreadService::_daemon_threads_count = NULL; |
61 | volatile int ThreadService::_atomic_threads_count = 0; |
62 | volatile int ThreadService::_atomic_daemon_threads_count = 0; |
63 | |
64 | ThreadDumpResult* ThreadService::_threaddump_list = NULL; |
65 | |
66 | static const int INITIAL_ARRAY_SIZE = 10; |
67 | |
68 | void ThreadService::init() { |
69 | EXCEPTION_MARK; |
70 | |
71 | // These counters are for java.lang.management API support. |
72 | // They are created even if -XX:-UsePerfData is set and in |
73 | // that case, they will be allocated on C heap. |
74 | |
75 | _total_threads_count = |
76 | PerfDataManager::create_counter(JAVA_THREADS, "started" , |
77 | PerfData::U_Events, CHECK); |
78 | |
79 | _live_threads_count = |
80 | PerfDataManager::create_variable(JAVA_THREADS, "live" , |
81 | PerfData::U_None, CHECK); |
82 | |
83 | _peak_threads_count = |
84 | PerfDataManager::create_variable(JAVA_THREADS, "livePeak" , |
85 | PerfData::U_None, CHECK); |
86 | |
87 | _daemon_threads_count = |
88 | PerfDataManager::create_variable(JAVA_THREADS, "daemon" , |
89 | PerfData::U_None, CHECK); |
90 | |
91 | if (os::is_thread_cpu_time_supported()) { |
92 | _thread_cpu_time_enabled = true; |
93 | } |
94 | |
95 | _thread_allocated_memory_enabled = true; // Always on, so enable it |
96 | } |
97 | |
98 | void ThreadService::reset_peak_thread_count() { |
99 | // Acquire the lock to update the peak thread count |
100 | // to synchronize with thread addition and removal. |
101 | MutexLocker mu(Threads_lock); |
102 | _peak_threads_count->set_value(get_live_thread_count()); |
103 | } |
104 | |
105 | static bool is_hidden_thread(JavaThread *thread) { |
106 | // hide VM internal or JVMTI agent threads |
107 | return thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread(); |
108 | } |
109 | |
110 | void ThreadService::add_thread(JavaThread* thread, bool daemon) { |
111 | assert(Threads_lock->owned_by_self(), "must have threads lock" ); |
112 | |
113 | // Do not count hidden threads |
114 | if (is_hidden_thread(thread)) { |
115 | return; |
116 | } |
117 | |
118 | _total_threads_count->inc(); |
119 | _live_threads_count->inc(); |
120 | Atomic::inc(&_atomic_threads_count); |
121 | int count = _atomic_threads_count; |
122 | |
123 | if (count > _peak_threads_count->get_value()) { |
124 | _peak_threads_count->set_value(count); |
125 | } |
126 | |
127 | if (daemon) { |
128 | _daemon_threads_count->inc(); |
129 | Atomic::inc(&_atomic_daemon_threads_count); |
130 | } |
131 | } |
132 | |
133 | void ThreadService::decrement_thread_counts(JavaThread* jt, bool daemon) { |
134 | Atomic::dec(&_atomic_threads_count); |
135 | |
136 | if (daemon) { |
137 | Atomic::dec(&_atomic_daemon_threads_count); |
138 | } |
139 | } |
140 | |
141 | void ThreadService::remove_thread(JavaThread* thread, bool daemon) { |
142 | assert(Threads_lock->owned_by_self(), "must have threads lock" ); |
143 | |
144 | // Do not count hidden threads |
145 | if (is_hidden_thread(thread)) { |
146 | return; |
147 | } |
148 | |
149 | assert(!thread->is_terminated(), "must not be terminated" ); |
150 | if (!thread->is_exiting()) { |
151 | // JavaThread::exit() skipped calling current_thread_exiting() |
152 | decrement_thread_counts(thread, daemon); |
153 | } |
154 | |
155 | int daemon_count = _atomic_daemon_threads_count; |
156 | int count = _atomic_threads_count; |
157 | |
158 | // Counts are incremented at the same time, but atomic counts are |
159 | // decremented earlier than perf counts. |
160 | assert(_live_threads_count->get_value() > count, |
161 | "thread count mismatch %d : %d" , |
162 | (int)_live_threads_count->get_value(), count); |
163 | |
164 | _live_threads_count->dec(1); |
165 | if (daemon) { |
166 | assert(_daemon_threads_count->get_value() > daemon_count, |
167 | "thread count mismatch %d : %d" , |
168 | (int)_daemon_threads_count->get_value(), daemon_count); |
169 | |
170 | _daemon_threads_count->dec(1); |
171 | } |
172 | |
173 | // Counts are incremented at the same time, but atomic counts are |
174 | // decremented earlier than perf counts. |
175 | assert(_daemon_threads_count->get_value() >= daemon_count, |
176 | "thread count mismatch %d : %d" , |
177 | (int)_daemon_threads_count->get_value(), daemon_count); |
178 | assert(_live_threads_count->get_value() >= count, |
179 | "thread count mismatch %d : %d" , |
180 | (int)_live_threads_count->get_value(), count); |
181 | assert(_live_threads_count->get_value() > 0 || |
182 | (_live_threads_count->get_value() == 0 && count == 0 && |
183 | _daemon_threads_count->get_value() == 0 && daemon_count == 0), |
184 | "thread counts should reach 0 at the same time, live %d,%d daemon %d,%d" , |
185 | (int)_live_threads_count->get_value(), count, |
186 | (int)_daemon_threads_count->get_value(), daemon_count); |
187 | assert(_daemon_threads_count->get_value() > 0 || |
188 | (_daemon_threads_count->get_value() == 0 && daemon_count == 0), |
189 | "thread counts should reach 0 at the same time, daemon %d,%d" , |
190 | (int)_daemon_threads_count->get_value(), daemon_count); |
191 | } |
192 | |
193 | void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) { |
194 | // Do not count hidden threads |
195 | if (is_hidden_thread(jt)) { |
196 | return; |
197 | } |
198 | |
199 | assert(jt == JavaThread::current(), "Called by current thread" ); |
200 | assert(!jt->is_terminated() && jt->is_exiting(), "must be exiting" ); |
201 | |
202 | decrement_thread_counts(jt, daemon); |
203 | } |
204 | |
205 | // FIXME: JVMTI should call this function |
206 | Handle ThreadService::get_current_contended_monitor(JavaThread* thread) { |
207 | assert(thread != NULL, "should be non-NULL" ); |
208 | debug_only(Thread::check_for_dangling_thread_pointer(thread);) |
209 | |
210 | ObjectMonitor *wait_obj = thread->current_waiting_monitor(); |
211 | |
212 | oop obj = NULL; |
213 | if (wait_obj != NULL) { |
214 | // thread is doing an Object.wait() call |
215 | obj = (oop) wait_obj->object(); |
216 | assert(obj != NULL, "Object.wait() should have an object" ); |
217 | } else { |
218 | ObjectMonitor *enter_obj = thread->current_pending_monitor(); |
219 | if (enter_obj != NULL) { |
220 | // thread is trying to enter() or raw_enter() an ObjectMonitor. |
221 | obj = (oop) enter_obj->object(); |
222 | } |
223 | // If obj == NULL, then ObjectMonitor is raw which doesn't count. |
224 | } |
225 | |
226 | Handle h(Thread::current(), obj); |
227 | return h; |
228 | } |
229 | |
230 | bool ThreadService::set_thread_monitoring_contention(bool flag) { |
231 | MutexLocker m(Management_lock); |
232 | |
233 | bool prev = _thread_monitoring_contention_enabled; |
234 | _thread_monitoring_contention_enabled = flag; |
235 | |
236 | return prev; |
237 | } |
238 | |
239 | bool ThreadService::set_thread_cpu_time_enabled(bool flag) { |
240 | MutexLocker m(Management_lock); |
241 | |
242 | bool prev = _thread_cpu_time_enabled; |
243 | _thread_cpu_time_enabled = flag; |
244 | |
245 | return prev; |
246 | } |
247 | |
248 | bool ThreadService::set_thread_allocated_memory_enabled(bool flag) { |
249 | MutexLocker m(Management_lock); |
250 | |
251 | bool prev = _thread_allocated_memory_enabled; |
252 | _thread_allocated_memory_enabled = flag; |
253 | |
254 | return prev; |
255 | } |
256 | |
257 | // GC support |
258 | void ThreadService::oops_do(OopClosure* f) { |
259 | for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) { |
260 | dump->oops_do(f); |
261 | } |
262 | } |
263 | |
264 | void ThreadService::metadata_do(void f(Metadata*)) { |
265 | for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) { |
266 | dump->metadata_do(f); |
267 | } |
268 | } |
269 | |
270 | void ThreadService::add_thread_dump(ThreadDumpResult* dump) { |
271 | MutexLocker ml(Management_lock); |
272 | if (_threaddump_list == NULL) { |
273 | _threaddump_list = dump; |
274 | } else { |
275 | dump->set_next(_threaddump_list); |
276 | _threaddump_list = dump; |
277 | } |
278 | } |
279 | |
280 | void ThreadService::remove_thread_dump(ThreadDumpResult* dump) { |
281 | MutexLocker ml(Management_lock); |
282 | |
283 | ThreadDumpResult* prev = NULL; |
284 | bool found = false; |
285 | for (ThreadDumpResult* d = _threaddump_list; d != NULL; prev = d, d = d->next()) { |
286 | if (d == dump) { |
287 | if (prev == NULL) { |
288 | _threaddump_list = dump->next(); |
289 | } else { |
290 | prev->set_next(dump->next()); |
291 | } |
292 | found = true; |
293 | break; |
294 | } |
295 | } |
296 | assert(found, "The threaddump result to be removed must exist." ); |
297 | } |
298 | |
299 | // Dump stack trace of threads specified in the given threads array. |
300 | // Returns StackTraceElement[][] each element is the stack trace of a thread in |
301 | // the corresponding entry in the given threads array |
302 | Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads, |
303 | int num_threads, |
304 | TRAPS) { |
305 | assert(num_threads > 0, "just checking" ); |
306 | |
307 | ThreadDumpResult dump_result; |
308 | VM_ThreadDump op(&dump_result, |
309 | threads, |
310 | num_threads, |
311 | -1, /* entire stack */ |
312 | false, /* with locked monitors */ |
313 | false /* with locked synchronizers */); |
314 | VMThread::execute(&op); |
315 | |
316 | // Allocate the resulting StackTraceElement[][] object |
317 | |
318 | ResourceMark rm(THREAD); |
319 | Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH); |
320 | ObjArrayKlass* ik = ObjArrayKlass::cast(k); |
321 | objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH); |
322 | objArrayHandle result_obj(THREAD, r); |
323 | |
324 | int num_snapshots = dump_result.num_snapshots(); |
325 | assert(num_snapshots == num_threads, "Must have num_threads thread snapshots" ); |
326 | assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot" ); |
327 | int i = 0; |
328 | for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) { |
329 | ThreadStackTrace* stacktrace = ts->get_stack_trace(); |
330 | if (stacktrace == NULL) { |
331 | // No stack trace |
332 | result_obj->obj_at_put(i, NULL); |
333 | } else { |
334 | // Construct an array of java/lang/StackTraceElement object |
335 | Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH); |
336 | result_obj->obj_at_put(i, backtrace_h()); |
337 | } |
338 | } |
339 | |
340 | return result_obj; |
341 | } |
342 | |
343 | void ThreadService::reset_contention_count_stat(JavaThread* thread) { |
344 | ThreadStatistics* stat = thread->get_thread_stat(); |
345 | if (stat != NULL) { |
346 | stat->reset_count_stat(); |
347 | } |
348 | } |
349 | |
350 | void ThreadService::reset_contention_time_stat(JavaThread* thread) { |
351 | ThreadStatistics* stat = thread->get_thread_stat(); |
352 | if (stat != NULL) { |
353 | stat->reset_time_stat(); |
354 | } |
355 | } |
356 | |
357 | // Find deadlocks involving object monitors and concurrent locks if concurrent_locks is true |
358 | DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) { |
359 | assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint" ); |
360 | |
361 | // This code was modified from the original Threads::find_deadlocks code. |
362 | int globalDfn = 0, thisDfn; |
363 | ObjectMonitor* waitingToLockMonitor = NULL; |
364 | oop waitingToLockBlocker = NULL; |
365 | bool blocked_on_monitor = false; |
366 | JavaThread *currentThread, *previousThread; |
367 | int num_deadlocks = 0; |
368 | |
369 | // Initialize the depth-first-number for each JavaThread. |
370 | JavaThreadIterator jti(t_list); |
371 | for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) { |
372 | jt->set_depth_first_number(-1); |
373 | } |
374 | |
375 | DeadlockCycle* deadlocks = NULL; |
376 | DeadlockCycle* last = NULL; |
377 | DeadlockCycle* cycle = new DeadlockCycle(); |
378 | for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) { |
379 | if (jt->depth_first_number() >= 0) { |
380 | // this thread was already visited |
381 | continue; |
382 | } |
383 | |
384 | thisDfn = globalDfn; |
385 | jt->set_depth_first_number(globalDfn++); |
386 | previousThread = jt; |
387 | currentThread = jt; |
388 | |
389 | cycle->reset(); |
390 | |
391 | // When there is a deadlock, all the monitors involved in the dependency |
392 | // cycle must be contended and heavyweight. So we only care about the |
393 | // heavyweight monitor a thread is waiting to lock. |
394 | waitingToLockMonitor = (ObjectMonitor*)jt->current_pending_monitor(); |
395 | if (concurrent_locks) { |
396 | waitingToLockBlocker = jt->current_park_blocker(); |
397 | } |
398 | while (waitingToLockMonitor != NULL || waitingToLockBlocker != NULL) { |
399 | cycle->add_thread(currentThread); |
400 | if (waitingToLockMonitor != NULL) { |
401 | address currentOwner = (address)waitingToLockMonitor->owner(); |
402 | if (currentOwner != NULL) { |
403 | currentThread = Threads::owning_thread_from_monitor_owner(t_list, |
404 | currentOwner); |
405 | if (currentThread == NULL) { |
406 | // This function is called at a safepoint so the JavaThread |
407 | // that owns waitingToLockMonitor should be findable, but |
408 | // if it is not findable, then the previous currentThread is |
409 | // blocked permanently. We record this as a deadlock. |
410 | num_deadlocks++; |
411 | |
412 | cycle->set_deadlock(true); |
413 | |
414 | // add this cycle to the deadlocks list |
415 | if (deadlocks == NULL) { |
416 | deadlocks = cycle; |
417 | } else { |
418 | last->set_next(cycle); |
419 | } |
420 | last = cycle; |
421 | cycle = new DeadlockCycle(); |
422 | break; |
423 | } |
424 | } |
425 | } else { |
426 | if (concurrent_locks) { |
427 | if (waitingToLockBlocker->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) { |
428 | oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker); |
429 | // This JavaThread (if there is one) is protected by the |
430 | // ThreadsListSetter in VM_FindDeadlocks::doit(). |
431 | currentThread = threadObj != NULL ? java_lang_Thread::thread(threadObj) : NULL; |
432 | } else { |
433 | currentThread = NULL; |
434 | } |
435 | } |
436 | } |
437 | |
438 | if (currentThread == NULL) { |
439 | // No dependency on another thread |
440 | break; |
441 | } |
442 | if (currentThread->depth_first_number() < 0) { |
443 | // First visit to this thread |
444 | currentThread->set_depth_first_number(globalDfn++); |
445 | } else if (currentThread->depth_first_number() < thisDfn) { |
446 | // Thread already visited, and not on a (new) cycle |
447 | break; |
448 | } else if (currentThread == previousThread) { |
449 | // Self-loop, ignore |
450 | break; |
451 | } else { |
452 | // We have a (new) cycle |
453 | num_deadlocks++; |
454 | |
455 | cycle->set_deadlock(true); |
456 | |
457 | // add this cycle to the deadlocks list |
458 | if (deadlocks == NULL) { |
459 | deadlocks = cycle; |
460 | } else { |
461 | last->set_next(cycle); |
462 | } |
463 | last = cycle; |
464 | cycle = new DeadlockCycle(); |
465 | break; |
466 | } |
467 | previousThread = currentThread; |
468 | waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor(); |
469 | if (concurrent_locks) { |
470 | waitingToLockBlocker = currentThread->current_park_blocker(); |
471 | } |
472 | } |
473 | |
474 | } |
475 | delete cycle; |
476 | return deadlocks; |
477 | } |
478 | |
479 | ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() { |
480 | |
481 | // Create a new ThreadDumpResult object and append to the list. |
482 | // If GC happens before this function returns, Method* |
483 | // in the stack trace will be visited. |
484 | ThreadService::add_thread_dump(this); |
485 | } |
486 | |
487 | ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() { |
488 | // Create a new ThreadDumpResult object and append to the list. |
489 | // If GC happens before this function returns, oops |
490 | // will be visited. |
491 | ThreadService::add_thread_dump(this); |
492 | } |
493 | |
494 | ThreadDumpResult::~ThreadDumpResult() { |
495 | ThreadService::remove_thread_dump(this); |
496 | |
497 | // free all the ThreadSnapshot objects created during |
498 | // the VM_ThreadDump operation |
499 | ThreadSnapshot* ts = _snapshots; |
500 | while (ts != NULL) { |
501 | ThreadSnapshot* p = ts; |
502 | ts = ts->next(); |
503 | delete p; |
504 | } |
505 | } |
506 | |
507 | ThreadSnapshot* ThreadDumpResult::add_thread_snapshot() { |
508 | ThreadSnapshot* ts = new ThreadSnapshot(); |
509 | link_thread_snapshot(ts); |
510 | return ts; |
511 | } |
512 | |
513 | ThreadSnapshot* ThreadDumpResult::add_thread_snapshot(JavaThread* thread) { |
514 | // Note: it is very important that the ThreadSnapshot* gets linked before |
515 | // ThreadSnapshot::initialize gets called. This is to ensure that |
516 | // ThreadSnapshot::oops_do can get called prior to the field |
517 | // ThreadSnapshot::_threadObj being assigned a value (to prevent a dangling |
518 | // oop). |
519 | ThreadSnapshot* ts = new ThreadSnapshot(); |
520 | link_thread_snapshot(ts); |
521 | ts->initialize(t_list(), thread); |
522 | return ts; |
523 | } |
524 | |
525 | void ThreadDumpResult::link_thread_snapshot(ThreadSnapshot* ts) { |
526 | assert(_num_threads == 0 || _num_snapshots < _num_threads, |
527 | "_num_snapshots must be less than _num_threads" ); |
528 | _num_snapshots++; |
529 | if (_snapshots == NULL) { |
530 | _snapshots = ts; |
531 | } else { |
532 | _last->set_next(ts); |
533 | } |
534 | _last = ts; |
535 | } |
536 | |
537 | void ThreadDumpResult::oops_do(OopClosure* f) { |
538 | for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) { |
539 | ts->oops_do(f); |
540 | } |
541 | } |
542 | |
543 | void ThreadDumpResult::metadata_do(void f(Metadata*)) { |
544 | for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) { |
545 | ts->metadata_do(f); |
546 | } |
547 | } |
548 | |
549 | ThreadsList* ThreadDumpResult::t_list() { |
550 | return _setter.list(); |
551 | } |
552 | |
553 | StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) { |
554 | _method = jvf->method(); |
555 | _bci = jvf->bci(); |
556 | _class_holder = _method->method_holder()->klass_holder(); |
557 | _locked_monitors = NULL; |
558 | if (with_lock_info) { |
559 | ResourceMark rm; |
560 | GrowableArray<MonitorInfo*>* list = jvf->locked_monitors(); |
561 | int length = list->length(); |
562 | if (length > 0) { |
563 | _locked_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(length, true); |
564 | for (int i = 0; i < length; i++) { |
565 | MonitorInfo* monitor = list->at(i); |
566 | assert(monitor->owner() != NULL, "This monitor must have an owning object" ); |
567 | _locked_monitors->append(monitor->owner()); |
568 | } |
569 | } |
570 | } |
571 | } |
572 | |
573 | void StackFrameInfo::oops_do(OopClosure* f) { |
574 | if (_locked_monitors != NULL) { |
575 | int length = _locked_monitors->length(); |
576 | for (int i = 0; i < length; i++) { |
577 | f->do_oop((oop*) _locked_monitors->adr_at(i)); |
578 | } |
579 | } |
580 | f->do_oop(&_class_holder); |
581 | } |
582 | |
583 | void StackFrameInfo::metadata_do(void f(Metadata*)) { |
584 | f(_method); |
585 | } |
586 | |
587 | void StackFrameInfo::print_on(outputStream* st) const { |
588 | ResourceMark rm; |
589 | java_lang_Throwable::print_stack_element(st, method(), bci()); |
590 | int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0); |
591 | for (int i = 0; i < len; i++) { |
592 | oop o = _locked_monitors->at(i); |
593 | st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)" , p2i(o), o->klass()->external_name()); |
594 | } |
595 | |
596 | } |
597 | |
598 | // Iterate through monitor cache to find JNI locked monitors |
599 | class InflatedMonitorsClosure: public MonitorClosure { |
600 | private: |
601 | ThreadStackTrace* _stack_trace; |
602 | Thread* _thread; |
603 | public: |
604 | InflatedMonitorsClosure(Thread* t, ThreadStackTrace* st) { |
605 | _thread = t; |
606 | _stack_trace = st; |
607 | } |
608 | void do_monitor(ObjectMonitor* mid) { |
609 | if (mid->owner() == _thread) { |
610 | oop object = (oop) mid->object(); |
611 | if (!_stack_trace->is_owned_monitor_on_stack(object)) { |
612 | _stack_trace->add_jni_locked_monitor(object); |
613 | } |
614 | } |
615 | } |
616 | }; |
617 | |
618 | ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) { |
619 | _thread = t; |
620 | _frames = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, true); |
621 | _depth = 0; |
622 | _with_locked_monitors = with_locked_monitors; |
623 | if (_with_locked_monitors) { |
624 | _jni_locked_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(INITIAL_ARRAY_SIZE, true); |
625 | } else { |
626 | _jni_locked_monitors = NULL; |
627 | } |
628 | } |
629 | |
630 | ThreadStackTrace::~ThreadStackTrace() { |
631 | for (int i = 0; i < _frames->length(); i++) { |
632 | delete _frames->at(i); |
633 | } |
634 | delete _frames; |
635 | if (_jni_locked_monitors != NULL) { |
636 | delete _jni_locked_monitors; |
637 | } |
638 | } |
639 | |
640 | void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth) { |
641 | assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped" ); |
642 | |
643 | if (_thread->has_last_Java_frame()) { |
644 | RegisterMap reg_map(_thread); |
645 | vframe* start_vf = _thread->last_java_vframe(®_map); |
646 | int count = 0; |
647 | for (vframe* f = start_vf; f; f = f->sender() ) { |
648 | if (maxDepth >= 0 && count == maxDepth) { |
649 | // Skip frames if more than maxDepth |
650 | break; |
651 | } |
652 | if (f->is_java_frame()) { |
653 | javaVFrame* jvf = javaVFrame::cast(f); |
654 | add_stack_frame(jvf); |
655 | count++; |
656 | } else { |
657 | // Ignore non-Java frames |
658 | } |
659 | } |
660 | } |
661 | |
662 | if (_with_locked_monitors) { |
663 | // Iterate inflated monitors and find monitors locked by this thread |
664 | // not found in the stack |
665 | InflatedMonitorsClosure imc(_thread, this); |
666 | ObjectSynchronizer::monitors_iterate(&imc); |
667 | } |
668 | } |
669 | |
670 | |
671 | bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) { |
672 | assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped" ); |
673 | |
674 | bool found = false; |
675 | int num_frames = get_stack_depth(); |
676 | for (int depth = 0; depth < num_frames; depth++) { |
677 | StackFrameInfo* frame = stack_frame_at(depth); |
678 | int len = frame->num_locked_monitors(); |
679 | GrowableArray<oop>* locked_monitors = frame->locked_monitors(); |
680 | for (int j = 0; j < len; j++) { |
681 | oop monitor = locked_monitors->at(j); |
682 | assert(monitor != NULL, "must be a Java object" ); |
683 | if (oopDesc::equals(monitor, object)) { |
684 | found = true; |
685 | break; |
686 | } |
687 | } |
688 | } |
689 | return found; |
690 | } |
691 | |
692 | Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) { |
693 | InstanceKlass* ik = SystemDictionary::StackTraceElement_klass(); |
694 | assert(ik != NULL, "must be loaded in 1.4+" ); |
695 | |
696 | // Allocate an array of java/lang/StackTraceElement object |
697 | objArrayOop ste = oopFactory::new_objArray(ik, _depth, CHECK_NH); |
698 | objArrayHandle backtrace(THREAD, ste); |
699 | for (int j = 0; j < _depth; j++) { |
700 | StackFrameInfo* frame = _frames->at(j); |
701 | methodHandle mh(THREAD, frame->method()); |
702 | oop element = java_lang_StackTraceElement::create(mh, frame->bci(), CHECK_NH); |
703 | backtrace->obj_at_put(j, element); |
704 | } |
705 | return backtrace; |
706 | } |
707 | |
708 | void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) { |
709 | StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors); |
710 | _frames->append(frame); |
711 | _depth++; |
712 | } |
713 | |
714 | void ThreadStackTrace::oops_do(OopClosure* f) { |
715 | int length = _frames->length(); |
716 | for (int i = 0; i < length; i++) { |
717 | _frames->at(i)->oops_do(f); |
718 | } |
719 | |
720 | length = (_jni_locked_monitors != NULL ? _jni_locked_monitors->length() : 0); |
721 | for (int j = 0; j < length; j++) { |
722 | f->do_oop((oop*) _jni_locked_monitors->adr_at(j)); |
723 | } |
724 | } |
725 | |
726 | void ThreadStackTrace::metadata_do(void f(Metadata*)) { |
727 | int length = _frames->length(); |
728 | for (int i = 0; i < length; i++) { |
729 | _frames->at(i)->metadata_do(f); |
730 | } |
731 | } |
732 | |
733 | |
734 | ConcurrentLocksDump::~ConcurrentLocksDump() { |
735 | if (_retain_map_on_free) { |
736 | return; |
737 | } |
738 | |
739 | for (ThreadConcurrentLocks* t = _map; t != NULL;) { |
740 | ThreadConcurrentLocks* tcl = t; |
741 | t = t->next(); |
742 | delete tcl; |
743 | } |
744 | } |
745 | |
746 | void ConcurrentLocksDump::dump_at_safepoint() { |
747 | // dump all locked concurrent locks |
748 | assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped" ); |
749 | |
750 | GrowableArray<oop>* aos_objects = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(INITIAL_ARRAY_SIZE, true /* C_heap */); |
751 | |
752 | // Find all instances of AbstractOwnableSynchronizer |
753 | HeapInspection::find_instances_at_safepoint(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(), |
754 | aos_objects); |
755 | // Build a map of thread to its owned AQS locks |
756 | build_map(aos_objects); |
757 | |
758 | delete aos_objects; |
759 | } |
760 | |
761 | |
762 | // build a map of JavaThread to all its owned AbstractOwnableSynchronizer |
763 | void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) { |
764 | int length = aos_objects->length(); |
765 | for (int i = 0; i < length; i++) { |
766 | oop o = aos_objects->at(i); |
767 | oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o); |
768 | if (owner_thread_obj != NULL) { |
769 | // See comments in ThreadConcurrentLocks to see how this |
770 | // JavaThread* is protected. |
771 | JavaThread* thread = java_lang_Thread::thread(owner_thread_obj); |
772 | assert(o->is_instance(), "Must be an instanceOop" ); |
773 | add_lock(thread, (instanceOop) o); |
774 | } |
775 | } |
776 | } |
777 | |
778 | void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) { |
779 | ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread); |
780 | if (tcl != NULL) { |
781 | tcl->add_lock(o); |
782 | return; |
783 | } |
784 | |
785 | // First owned lock found for this thread |
786 | tcl = new ThreadConcurrentLocks(thread); |
787 | tcl->add_lock(o); |
788 | if (_map == NULL) { |
789 | _map = tcl; |
790 | } else { |
791 | _last->set_next(tcl); |
792 | } |
793 | _last = tcl; |
794 | } |
795 | |
796 | ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) { |
797 | for (ThreadConcurrentLocks* tcl = _map; tcl != NULL; tcl = tcl->next()) { |
798 | if (tcl->java_thread() == thread) { |
799 | return tcl; |
800 | } |
801 | } |
802 | return NULL; |
803 | } |
804 | |
805 | void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) { |
806 | st->print_cr(" Locked ownable synchronizers:" ); |
807 | ThreadConcurrentLocks* tcl = thread_concurrent_locks(t); |
808 | GrowableArray<instanceOop>* locks = (tcl != NULL ? tcl->owned_locks() : NULL); |
809 | if (locks == NULL || locks->is_empty()) { |
810 | st->print_cr("\t- None" ); |
811 | st->cr(); |
812 | return; |
813 | } |
814 | |
815 | for (int i = 0; i < locks->length(); i++) { |
816 | instanceOop obj = locks->at(i); |
817 | st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)" , p2i(obj), obj->klass()->external_name()); |
818 | } |
819 | st->cr(); |
820 | } |
821 | |
822 | ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) { |
823 | _thread = thread; |
824 | _owned_locks = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<instanceOop>(INITIAL_ARRAY_SIZE, true); |
825 | _next = NULL; |
826 | } |
827 | |
828 | ThreadConcurrentLocks::~ThreadConcurrentLocks() { |
829 | delete _owned_locks; |
830 | } |
831 | |
832 | void ThreadConcurrentLocks::add_lock(instanceOop o) { |
833 | _owned_locks->append(o); |
834 | } |
835 | |
836 | void ThreadConcurrentLocks::oops_do(OopClosure* f) { |
837 | int length = _owned_locks->length(); |
838 | for (int i = 0; i < length; i++) { |
839 | f->do_oop((oop*) _owned_locks->adr_at(i)); |
840 | } |
841 | } |
842 | |
843 | ThreadStatistics::ThreadStatistics() { |
844 | _contended_enter_count = 0; |
845 | _monitor_wait_count = 0; |
846 | _sleep_count = 0; |
847 | _count_pending_reset = false; |
848 | _timer_pending_reset = false; |
849 | memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts)); |
850 | } |
851 | |
852 | void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) { |
853 | _thread = thread; |
854 | _threadObj = thread->threadObj(); |
855 | |
856 | ThreadStatistics* stat = thread->get_thread_stat(); |
857 | _contended_enter_ticks = stat->contended_enter_ticks(); |
858 | _contended_enter_count = stat->contended_enter_count(); |
859 | _monitor_wait_ticks = stat->monitor_wait_ticks(); |
860 | _monitor_wait_count = stat->monitor_wait_count(); |
861 | _sleep_ticks = stat->sleep_ticks(); |
862 | _sleep_count = stat->sleep_count(); |
863 | |
864 | _thread_status = java_lang_Thread::get_thread_status(_threadObj); |
865 | _is_ext_suspended = thread->is_being_ext_suspended(); |
866 | _is_in_native = (thread->thread_state() == _thread_in_native); |
867 | |
868 | if (_thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER || |
869 | _thread_status == java_lang_Thread::IN_OBJECT_WAIT || |
870 | _thread_status == java_lang_Thread::IN_OBJECT_WAIT_TIMED) { |
871 | |
872 | Handle obj = ThreadService::get_current_contended_monitor(thread); |
873 | if (obj() == NULL) { |
874 | // monitor no longer exists; thread is not blocked |
875 | _thread_status = java_lang_Thread::RUNNABLE; |
876 | } else { |
877 | _blocker_object = obj(); |
878 | JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj); |
879 | if ((owner == NULL && _thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER) |
880 | || (owner != NULL && owner->is_attaching_via_jni())) { |
881 | // ownership information of the monitor is not available |
882 | // (may no longer be owned or releasing to some other thread) |
883 | // make this thread in RUNNABLE state. |
884 | // And when the owner thread is in attaching state, the java thread |
885 | // is not completely initialized. For example thread name and id |
886 | // and may not be set, so hide the attaching thread. |
887 | _thread_status = java_lang_Thread::RUNNABLE; |
888 | _blocker_object = NULL; |
889 | } else if (owner != NULL) { |
890 | _blocker_object_owner = owner->threadObj(); |
891 | } |
892 | } |
893 | } |
894 | |
895 | // Support for JSR-166 locks |
896 | if (_thread_status == java_lang_Thread::PARKED || _thread_status == java_lang_Thread::PARKED_TIMED) { |
897 | _blocker_object = thread->current_park_blocker(); |
898 | if (_blocker_object != NULL && _blocker_object->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) { |
899 | _blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(_blocker_object); |
900 | } |
901 | } |
902 | } |
903 | |
904 | ThreadSnapshot::~ThreadSnapshot() { |
905 | delete _stack_trace; |
906 | delete _concurrent_locks; |
907 | } |
908 | |
909 | void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors) { |
910 | _stack_trace = new ThreadStackTrace(_thread, with_locked_monitors); |
911 | _stack_trace->dump_stack_at_safepoint(max_depth); |
912 | } |
913 | |
914 | |
915 | void ThreadSnapshot::oops_do(OopClosure* f) { |
916 | f->do_oop(&_threadObj); |
917 | f->do_oop(&_blocker_object); |
918 | f->do_oop(&_blocker_object_owner); |
919 | if (_stack_trace != NULL) { |
920 | _stack_trace->oops_do(f); |
921 | } |
922 | if (_concurrent_locks != NULL) { |
923 | _concurrent_locks->oops_do(f); |
924 | } |
925 | } |
926 | |
927 | void ThreadSnapshot::metadata_do(void f(Metadata*)) { |
928 | if (_stack_trace != NULL) { |
929 | _stack_trace->metadata_do(f); |
930 | } |
931 | } |
932 | |
933 | |
934 | DeadlockCycle::DeadlockCycle() { |
935 | _is_deadlock = false; |
936 | _threads = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, true); |
937 | _next = NULL; |
938 | } |
939 | |
940 | DeadlockCycle::~DeadlockCycle() { |
941 | delete _threads; |
942 | } |
943 | |
944 | void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const { |
945 | st->cr(); |
946 | st->print_cr("Found one Java-level deadlock:" ); |
947 | st->print("=============================" ); |
948 | |
949 | JavaThread* currentThread; |
950 | ObjectMonitor* waitingToLockMonitor; |
951 | oop waitingToLockBlocker; |
952 | int len = _threads->length(); |
953 | for (int i = 0; i < len; i++) { |
954 | currentThread = _threads->at(i); |
955 | waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor(); |
956 | waitingToLockBlocker = currentThread->current_park_blocker(); |
957 | st->cr(); |
958 | st->print_cr("\"%s\":" , currentThread->get_thread_name()); |
959 | const char* owner_desc = ",\n which is held by" ; |
960 | if (waitingToLockMonitor != NULL) { |
961 | st->print(" waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor)); |
962 | oop obj = (oop)waitingToLockMonitor->object(); |
963 | if (obj != NULL) { |
964 | st->print(" (object " INTPTR_FORMAT ", a %s)" , p2i(obj), |
965 | obj->klass()->external_name()); |
966 | |
967 | if (!currentThread->current_pending_monitor_is_from_java()) { |
968 | owner_desc = "\n in JNI, which is held by" ; |
969 | } |
970 | } else { |
971 | // No Java object associated - a JVMTI raw monitor |
972 | owner_desc = " (JVMTI raw monitor),\n which is held by" ; |
973 | } |
974 | currentThread = Threads::owning_thread_from_monitor_owner(t_list, |
975 | (address)waitingToLockMonitor->owner()); |
976 | if (currentThread == NULL) { |
977 | // The deadlock was detected at a safepoint so the JavaThread |
978 | // that owns waitingToLockMonitor should be findable, but |
979 | // if it is not findable, then the previous currentThread is |
980 | // blocked permanently. |
981 | st->print("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc, |
982 | p2i(waitingToLockMonitor->owner())); |
983 | continue; |
984 | } |
985 | } else { |
986 | st->print(" waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)" , |
987 | p2i(waitingToLockBlocker), |
988 | waitingToLockBlocker->klass()->external_name()); |
989 | assert(waitingToLockBlocker->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass()), |
990 | "Must be an AbstractOwnableSynchronizer" ); |
991 | oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker); |
992 | currentThread = java_lang_Thread::thread(ownerObj); |
993 | assert(currentThread != NULL, "AbstractOwnableSynchronizer owning thread is unexpectedly NULL" ); |
994 | } |
995 | st->print("%s \"%s\"" , owner_desc, currentThread->get_thread_name()); |
996 | } |
997 | |
998 | st->cr(); |
999 | st->cr(); |
1000 | |
1001 | // Print stack traces |
1002 | bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace; |
1003 | JavaMonitorsInStackTrace = true; |
1004 | st->print_cr("Java stack information for the threads listed above:" ); |
1005 | st->print_cr("===================================================" ); |
1006 | for (int j = 0; j < len; j++) { |
1007 | currentThread = _threads->at(j); |
1008 | st->print_cr("\"%s\":" , currentThread->get_thread_name()); |
1009 | currentThread->print_stack_on(st); |
1010 | } |
1011 | JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace; |
1012 | } |
1013 | |
1014 | ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread, |
1015 | bool include_jvmti_agent_threads, |
1016 | bool include_jni_attaching_threads) { |
1017 | assert(cur_thread == Thread::current(), "Check current thread" ); |
1018 | |
1019 | int init_size = ThreadService::get_live_thread_count(); |
1020 | _threads_array = new GrowableArray<instanceHandle>(init_size); |
1021 | |
1022 | for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { |
1023 | // skips JavaThreads in the process of exiting |
1024 | // and also skips VM internal JavaThreads |
1025 | // Threads in _thread_new or _thread_new_trans state are included. |
1026 | // i.e. threads have been started but not yet running. |
1027 | if (jt->threadObj() == NULL || |
1028 | jt->is_exiting() || |
1029 | !java_lang_Thread::is_alive(jt->threadObj()) || |
1030 | jt->is_hidden_from_external_view()) { |
1031 | continue; |
1032 | } |
1033 | |
1034 | // skip agent threads |
1035 | if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) { |
1036 | continue; |
1037 | } |
1038 | |
1039 | // skip jni threads in the process of attaching |
1040 | if (!include_jni_attaching_threads && jt->is_attaching_via_jni()) { |
1041 | continue; |
1042 | } |
1043 | |
1044 | instanceHandle h(cur_thread, (instanceOop) jt->threadObj()); |
1045 | _threads_array->append(h); |
1046 | } |
1047 | } |
1048 | |