| 1 | /* |
| 2 | * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "jfr/jfrEvents.hpp" |
| 27 | #include "jfr/recorder/jfrRecorder.hpp" |
| 28 | #include "jfr/periodic/sampling/jfrCallTrace.hpp" |
| 29 | #include "jfr/periodic/sampling/jfrThreadSampler.hpp" |
| 30 | #include "jfr/recorder/service/jfrOptionSet.hpp" |
| 31 | #include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" |
| 32 | #include "jfr/support/jfrThreadId.hpp" |
| 33 | #include "jfr/utilities/jfrTime.hpp" |
| 34 | #include "logging/log.hpp" |
| 35 | #include "runtime/frame.inline.hpp" |
| 36 | #include "runtime/os.hpp" |
| 37 | #include "runtime/semaphore.hpp" |
| 38 | #include "runtime/thread.inline.hpp" |
| 39 | #include "runtime/threadSMR.hpp" |
| 40 | |
| 41 | enum JfrSampleType { |
| 42 | NO_SAMPLE = 0, |
| 43 | JAVA_SAMPLE = 1, |
| 44 | NATIVE_SAMPLE = 2 |
| 45 | }; |
| 46 | |
| 47 | static bool thread_state_in_java(JavaThread* thread) { |
| 48 | assert(thread != NULL, "invariant" ); |
| 49 | switch(thread->thread_state()) { |
| 50 | case _thread_new: |
| 51 | case _thread_uninitialized: |
| 52 | case _thread_new_trans: |
| 53 | case _thread_in_vm_trans: |
| 54 | case _thread_blocked_trans: |
| 55 | case _thread_in_native_trans: |
| 56 | case _thread_blocked: |
| 57 | case _thread_in_vm: |
| 58 | case _thread_in_native: |
| 59 | case _thread_in_Java_trans: |
| 60 | break; |
| 61 | case _thread_in_Java: |
| 62 | return true; |
| 63 | default: |
| 64 | ShouldNotReachHere(); |
| 65 | break; |
| 66 | } |
| 67 | return false; |
| 68 | } |
| 69 | |
| 70 | static bool thread_state_in_native(JavaThread* thread) { |
| 71 | assert(thread != NULL, "invariant" ); |
| 72 | switch(thread->thread_state()) { |
| 73 | case _thread_new: |
| 74 | case _thread_uninitialized: |
| 75 | case _thread_new_trans: |
| 76 | case _thread_blocked_trans: |
| 77 | case _thread_blocked: |
| 78 | case _thread_in_vm: |
| 79 | case _thread_in_vm_trans: |
| 80 | case _thread_in_Java_trans: |
| 81 | case _thread_in_Java: |
| 82 | case _thread_in_native_trans: |
| 83 | break; |
| 84 | case _thread_in_native: |
| 85 | return true; |
| 86 | default: |
| 87 | ShouldNotReachHere(); |
| 88 | break; |
| 89 | } |
| 90 | return false; |
| 91 | } |
| 92 | |
| 93 | class JfrThreadSampleClosure { |
| 94 | public: |
| 95 | JfrThreadSampleClosure(EventExecutionSample* events, EventNativeMethodSample* events_native); |
| 96 | ~JfrThreadSampleClosure() {} |
| 97 | EventExecutionSample* next_event() { return &_events[_added_java++]; } |
| 98 | EventNativeMethodSample* next_event_native() { return &_events_native[_added_native++]; } |
| 99 | void commit_events(JfrSampleType type); |
| 100 | bool do_sample_thread(JavaThread* thread, JfrStackFrame* frames, u4 max_frames, JfrSampleType type); |
| 101 | uint java_entries() { return _added_java; } |
| 102 | uint native_entries() { return _added_native; } |
| 103 | |
| 104 | private: |
| 105 | bool sample_thread_in_java(JavaThread* thread, JfrStackFrame* frames, u4 max_frames); |
| 106 | bool sample_thread_in_native(JavaThread* thread, JfrStackFrame* frames, u4 max_frames); |
| 107 | EventExecutionSample* _events; |
| 108 | EventNativeMethodSample* _events_native; |
| 109 | Thread* _self; |
| 110 | uint _added_java; |
| 111 | uint _added_native; |
| 112 | }; |
| 113 | |
| 114 | class OSThreadSampler : public os::SuspendedThreadTask { |
| 115 | public: |
| 116 | OSThreadSampler(JavaThread* thread, |
| 117 | JfrThreadSampleClosure& closure, |
| 118 | JfrStackFrame *frames, |
| 119 | u4 max_frames) : os::SuspendedThreadTask((Thread*)thread), |
| 120 | _success(false), |
| 121 | _stacktrace(frames, max_frames), |
| 122 | _closure(closure), |
| 123 | _suspend_time() {} |
| 124 | |
| 125 | void take_sample(); |
| 126 | void do_task(const os::SuspendedThreadTaskContext& context); |
| 127 | void protected_task(const os::SuspendedThreadTaskContext& context); |
| 128 | bool success() const { return _success; } |
| 129 | const JfrStackTrace& stacktrace() const { return _stacktrace; } |
| 130 | |
| 131 | private: |
| 132 | bool _success; |
| 133 | JfrStackTrace _stacktrace; |
| 134 | JfrThreadSampleClosure& _closure; |
| 135 | JfrTicks _suspend_time; |
| 136 | }; |
| 137 | |
| 138 | class OSThreadSamplerCallback : public os::CrashProtectionCallback { |
| 139 | public: |
| 140 | OSThreadSamplerCallback(OSThreadSampler& sampler, const os::SuspendedThreadTaskContext &context) : |
| 141 | _sampler(sampler), _context(context) { |
| 142 | } |
| 143 | virtual void call() { |
| 144 | _sampler.protected_task(_context); |
| 145 | } |
| 146 | private: |
| 147 | OSThreadSampler& _sampler; |
| 148 | const os::SuspendedThreadTaskContext& _context; |
| 149 | }; |
| 150 | |
| 151 | void OSThreadSampler::do_task(const os::SuspendedThreadTaskContext& context) { |
| 152 | #ifndef ASSERT |
| 153 | guarantee(JfrOptionSet::sample_protection(), "Sample Protection should be on in product builds" ); |
| 154 | #endif |
| 155 | assert(_suspend_time.value() == 0, "already timestamped!" ); |
| 156 | _suspend_time = JfrTicks::now(); |
| 157 | |
| 158 | if (JfrOptionSet::sample_protection()) { |
| 159 | OSThreadSamplerCallback cb(*this, context); |
| 160 | os::ThreadCrashProtection crash_protection; |
| 161 | if (!crash_protection.call(cb)) { |
| 162 | log_error(jfr)("Thread method sampler crashed" ); |
| 163 | } |
| 164 | } else { |
| 165 | protected_task(context); |
| 166 | } |
| 167 | } |
| 168 | |
| 169 | /* |
| 170 | * From this method and down the call tree we attempt to protect against crashes |
| 171 | * using a signal handler / __try block. Don't take locks, rely on destructors or |
| 172 | * leave memory (in case of signal / exception) in an inconsistent state. */ |
| 173 | void OSThreadSampler::protected_task(const os::SuspendedThreadTaskContext& context) { |
| 174 | JavaThread* jth = (JavaThread*)context.thread(); |
| 175 | // Skip sample if we signaled a thread that moved to other state |
| 176 | if (!thread_state_in_java(jth)) { |
| 177 | return; |
| 178 | } |
| 179 | JfrGetCallTrace trace(true, jth); |
| 180 | frame topframe; |
| 181 | if (trace.get_topframe(context.ucontext(), topframe)) { |
| 182 | if (_stacktrace.record_thread(*jth, topframe)) { |
| 183 | /* If we managed to get a topframe and a stacktrace, create an event |
| 184 | * and put it into our array. We can't call Jfr::_stacktraces.add() |
| 185 | * here since it would allocate memory using malloc. Doing so while |
| 186 | * the stopped thread is inside malloc would deadlock. */ |
| 187 | _success = true; |
| 188 | EventExecutionSample *ev = _closure.next_event(); |
| 189 | ev->set_starttime(_suspend_time); |
| 190 | ev->set_endtime(_suspend_time); // fake to not take an end time |
| 191 | ev->set_sampledThread(JFR_THREAD_ID(jth)); |
| 192 | ev->set_state(java_lang_Thread::get_thread_status(jth->threadObj())); |
| 193 | } |
| 194 | } |
| 195 | } |
| 196 | |
| 197 | void OSThreadSampler::take_sample() { |
| 198 | run(); |
| 199 | } |
| 200 | |
| 201 | class JfrNativeSamplerCallback : public os::CrashProtectionCallback { |
| 202 | public: |
| 203 | JfrNativeSamplerCallback(JfrThreadSampleClosure& closure, JavaThread* jt, JfrStackFrame* frames, u4 max_frames) : |
| 204 | _closure(closure), _jt(jt), _stacktrace(frames, max_frames), _success(false) { |
| 205 | } |
| 206 | virtual void call(); |
| 207 | bool success() { return _success; } |
| 208 | JfrStackTrace& stacktrace() { return _stacktrace; } |
| 209 | |
| 210 | private: |
| 211 | JfrThreadSampleClosure& _closure; |
| 212 | JavaThread* _jt; |
| 213 | JfrStackTrace _stacktrace; |
| 214 | bool _success; |
| 215 | }; |
| 216 | |
| 217 | static void write_native_event(JfrThreadSampleClosure& closure, JavaThread* jt) { |
| 218 | EventNativeMethodSample *ev = closure.next_event_native(); |
| 219 | ev->set_starttime(JfrTicks::now()); |
| 220 | ev->set_sampledThread(JFR_THREAD_ID(jt)); |
| 221 | ev->set_state(java_lang_Thread::get_thread_status(jt->threadObj())); |
| 222 | } |
| 223 | |
| 224 | void JfrNativeSamplerCallback::call() { |
| 225 | // When a thread is only attach it will be native without a last java frame |
| 226 | if (!_jt->has_last_Java_frame()) { |
| 227 | return; |
| 228 | } |
| 229 | |
| 230 | frame topframe = _jt->last_frame(); |
| 231 | frame first_java_frame; |
| 232 | Method* method = NULL; |
| 233 | JfrGetCallTrace gct(false, _jt); |
| 234 | if (!gct.find_top_frame(topframe, &method, first_java_frame)) { |
| 235 | return; |
| 236 | } |
| 237 | if (method == NULL) { |
| 238 | return; |
| 239 | } |
| 240 | topframe = first_java_frame; |
| 241 | _success = _stacktrace.record_thread(*_jt, topframe); |
| 242 | if (_success) { |
| 243 | write_native_event(_closure, _jt); |
| 244 | } |
| 245 | } |
| 246 | |
| 247 | bool JfrThreadSampleClosure::sample_thread_in_java(JavaThread* thread, JfrStackFrame* frames, u4 max_frames) { |
| 248 | OSThreadSampler sampler(thread, *this, frames, max_frames); |
| 249 | sampler.take_sample(); |
| 250 | /* We don't want to allocate any memory using malloc/etc while the thread |
| 251 | * is stopped, so everything is stored in stack allocated memory until this |
| 252 | * point where the thread has been resumed again, if the sampling was a success |
| 253 | * we need to store the stacktrace in the stacktrace repository and update |
| 254 | * the event with the id that was returned. */ |
| 255 | if (!sampler.success()) { |
| 256 | return false; |
| 257 | } |
| 258 | EventExecutionSample *event = &_events[_added_java - 1]; |
| 259 | traceid id = JfrStackTraceRepository::add(sampler.stacktrace()); |
| 260 | assert(id != 0, "Stacktrace id should not be 0" ); |
| 261 | event->set_stackTrace(id); |
| 262 | return true; |
| 263 | } |
| 264 | |
| 265 | bool JfrThreadSampleClosure::sample_thread_in_native(JavaThread* thread, JfrStackFrame* frames, u4 max_frames) { |
| 266 | JfrNativeSamplerCallback cb(*this, thread, frames, max_frames); |
| 267 | if (JfrOptionSet::sample_protection()) { |
| 268 | os::ThreadCrashProtection crash_protection; |
| 269 | if (!crash_protection.call(cb)) { |
| 270 | log_error(jfr)("Thread method sampler crashed for native" ); |
| 271 | } |
| 272 | } else { |
| 273 | cb.call(); |
| 274 | } |
| 275 | if (!cb.success()) { |
| 276 | return false; |
| 277 | } |
| 278 | EventNativeMethodSample *event = &_events_native[_added_native - 1]; |
| 279 | traceid id = JfrStackTraceRepository::add(cb.stacktrace()); |
| 280 | assert(id != 0, "Stacktrace id should not be 0" ); |
| 281 | event->set_stackTrace(id); |
| 282 | return true; |
| 283 | } |
| 284 | |
| 285 | static const uint MAX_NR_OF_JAVA_SAMPLES = 5; |
| 286 | static const uint MAX_NR_OF_NATIVE_SAMPLES = 1; |
| 287 | |
| 288 | void JfrThreadSampleClosure::commit_events(JfrSampleType type) { |
| 289 | if (JAVA_SAMPLE == type) { |
| 290 | assert(_added_java > 0 && _added_java <= MAX_NR_OF_JAVA_SAMPLES, "invariant" ); |
| 291 | for (uint i = 0; i < _added_java; ++i) { |
| 292 | _events[i].commit(); |
| 293 | } |
| 294 | } else { |
| 295 | assert(NATIVE_SAMPLE == type, "invariant" ); |
| 296 | assert(_added_native > 0 && _added_native <= MAX_NR_OF_NATIVE_SAMPLES, "invariant" ); |
| 297 | for (uint i = 0; i < _added_native; ++i) { |
| 298 | _events_native[i].commit(); |
| 299 | } |
| 300 | } |
| 301 | } |
| 302 | |
| 303 | JfrThreadSampleClosure::JfrThreadSampleClosure(EventExecutionSample* events, EventNativeMethodSample* events_native) : |
| 304 | _events(events), |
| 305 | _events_native(events_native), |
| 306 | _self(Thread::current()), |
| 307 | _added_java(0), |
| 308 | _added_native(0) { |
| 309 | } |
| 310 | |
| 311 | class JfrThreadSampler : public NonJavaThread { |
| 312 | friend class JfrThreadSampling; |
| 313 | private: |
| 314 | Semaphore _sample; |
| 315 | Thread* _sampler_thread; |
| 316 | JfrStackFrame* const _frames; |
| 317 | JavaThread* _last_thread_java; |
| 318 | JavaThread* _last_thread_native; |
| 319 | size_t _interval_java; |
| 320 | size_t _interval_native; |
| 321 | int _cur_index; |
| 322 | const u4 _max_frames; |
| 323 | volatile bool _disenrolled; |
| 324 | |
| 325 | JavaThread* next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current); |
| 326 | void task_stacktrace(JfrSampleType type, JavaThread** last_thread); |
| 327 | JfrThreadSampler(size_t interval_java, size_t interval_native, u4 max_frames); |
| 328 | ~JfrThreadSampler(); |
| 329 | |
| 330 | void start_thread(); |
| 331 | |
| 332 | void enroll(); |
| 333 | void disenroll(); |
| 334 | void set_java_interval(size_t interval) { _interval_java = interval; }; |
| 335 | void set_native_interval(size_t interval) { _interval_native = interval; }; |
| 336 | size_t get_java_interval() { return _interval_java; }; |
| 337 | size_t get_native_interval() { return _interval_native; }; |
| 338 | protected: |
| 339 | virtual void post_run(); |
| 340 | public: |
| 341 | void run(); |
| 342 | static Monitor* transition_block() { return JfrThreadSampler_lock; } |
| 343 | static void on_javathread_suspend(JavaThread* thread); |
| 344 | }; |
| 345 | |
| 346 | static void clear_transition_block(JavaThread* jt) { |
| 347 | jt->clear_trace_flag(); |
| 348 | JfrThreadLocal* const tl = jt->jfr_thread_local(); |
| 349 | if (tl->is_trace_block()) { |
| 350 | MutexLocker ml(JfrThreadSampler::transition_block(), Mutex::_no_safepoint_check_flag); |
| 351 | JfrThreadSampler::transition_block()->notify_all(); |
| 352 | } |
| 353 | } |
| 354 | |
| 355 | bool JfrThreadSampleClosure::do_sample_thread(JavaThread* thread, JfrStackFrame* frames, u4 max_frames, JfrSampleType type) { |
| 356 | assert(Threads_lock->owned_by_self(), "Holding the thread table lock." ); |
| 357 | if (thread->is_hidden_from_external_view() || thread->in_deopt_handler()) { |
| 358 | return false; |
| 359 | } |
| 360 | |
| 361 | bool ret = false; |
| 362 | thread->set_trace_flag(); // Provides StoreLoad, needed to keep read of thread state from floating up. |
| 363 | if (JAVA_SAMPLE == type) { |
| 364 | if (thread_state_in_java(thread)) { |
| 365 | ret = sample_thread_in_java(thread, frames, max_frames); |
| 366 | } |
| 367 | } else { |
| 368 | assert(NATIVE_SAMPLE == type, "invariant" ); |
| 369 | if (thread_state_in_native(thread)) { |
| 370 | ret = sample_thread_in_native(thread, frames, max_frames); |
| 371 | } |
| 372 | } |
| 373 | clear_transition_block(thread); |
| 374 | return ret; |
| 375 | } |
| 376 | |
| 377 | JfrThreadSampler::JfrThreadSampler(size_t interval_java, size_t interval_native, u4 max_frames) : |
| 378 | _sample(), |
| 379 | _sampler_thread(NULL), |
| 380 | _frames(JfrCHeapObj::new_array<JfrStackFrame>(max_frames)), |
| 381 | _last_thread_java(NULL), |
| 382 | _last_thread_native(NULL), |
| 383 | _interval_java(interval_java), |
| 384 | _interval_native(interval_native), |
| 385 | _cur_index(-1), |
| 386 | _max_frames(max_frames), |
| 387 | _disenrolled(true) { |
| 388 | } |
| 389 | |
| 390 | JfrThreadSampler::~JfrThreadSampler() { |
| 391 | JfrCHeapObj::free(_frames, sizeof(JfrStackFrame) * _max_frames); |
| 392 | } |
| 393 | |
| 394 | void JfrThreadSampler::on_javathread_suspend(JavaThread* thread) { |
| 395 | JfrThreadLocal* const tl = thread->jfr_thread_local(); |
| 396 | tl->set_trace_block(); |
| 397 | { |
| 398 | MonitorLocker ml(transition_block(), Mutex::_no_safepoint_check_flag); |
| 399 | while (thread->is_trace_suspend()) { |
| 400 | ml.wait(); |
| 401 | } |
| 402 | tl->clear_trace_block(); |
| 403 | } |
| 404 | } |
| 405 | |
| 406 | JavaThread* JfrThreadSampler::next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current) { |
| 407 | assert(t_list != NULL, "invariant" ); |
| 408 | assert(Threads_lock->owned_by_self(), "Holding the thread table lock." ); |
| 409 | assert(_cur_index >= -1 && (uint)_cur_index + 1 <= t_list->length(), "invariant" ); |
| 410 | assert((current == NULL && -1 == _cur_index) || (t_list->find_index_of_JavaThread(current) == _cur_index), "invariant" ); |
| 411 | if ((uint)_cur_index + 1 == t_list->length()) { |
| 412 | // wrap |
| 413 | _cur_index = 0; |
| 414 | } else { |
| 415 | _cur_index++; |
| 416 | } |
| 417 | assert(_cur_index >= 0 && (uint)_cur_index < t_list->length(), "invariant" ); |
| 418 | JavaThread* const next = t_list->thread_at(_cur_index); |
| 419 | return next != first_sampled ? next : NULL; |
| 420 | } |
| 421 | |
| 422 | void JfrThreadSampler::start_thread() { |
| 423 | if (os::create_thread(this, os::os_thread)) { |
| 424 | os::start_thread(this); |
| 425 | } else { |
| 426 | log_error(jfr)("Failed to create thread for thread sampling" ); |
| 427 | } |
| 428 | } |
| 429 | |
| 430 | void JfrThreadSampler::enroll() { |
| 431 | if (_disenrolled) { |
| 432 | log_info(jfr)("Enrolling thread sampler" ); |
| 433 | _sample.signal(); |
| 434 | _disenrolled = false; |
| 435 | } |
| 436 | } |
| 437 | |
| 438 | void JfrThreadSampler::disenroll() { |
| 439 | if (!_disenrolled) { |
| 440 | _sample.wait(); |
| 441 | _disenrolled = true; |
| 442 | log_info(jfr)("Disenrolling thread sampler" ); |
| 443 | } |
| 444 | } |
| 445 | |
| 446 | static jlong get_monotonic_ms() { |
| 447 | return os::javaTimeNanos() / 1000000; |
| 448 | } |
| 449 | |
| 450 | void JfrThreadSampler::run() { |
| 451 | assert(_sampler_thread == NULL, "invariant" ); |
| 452 | |
| 453 | _sampler_thread = this; |
| 454 | |
| 455 | jlong last_java_ms = get_monotonic_ms(); |
| 456 | jlong last_native_ms = last_java_ms; |
| 457 | while (true) { |
| 458 | if (!_sample.trywait()) { |
| 459 | // disenrolled |
| 460 | _sample.wait(); |
| 461 | last_java_ms = get_monotonic_ms(); |
| 462 | last_native_ms = last_java_ms; |
| 463 | } |
| 464 | _sample.signal(); |
| 465 | jlong java_interval = _interval_java == 0 ? max_jlong : MAX2<jlong>(_interval_java, 1); |
| 466 | jlong native_interval = _interval_native == 0 ? max_jlong : MAX2<jlong>(_interval_native, 1); |
| 467 | |
| 468 | jlong now_ms = get_monotonic_ms(); |
| 469 | |
| 470 | /* |
| 471 | * Let I be java_interval or native_interval. |
| 472 | * Let L be last_java_ms or last_native_ms. |
| 473 | * Let N be now_ms. |
| 474 | * |
| 475 | * Interval, I, might be max_jlong so the addition |
| 476 | * could potentially overflow without parenthesis (UB). Also note that |
| 477 | * L - N < 0. Avoid UB, by adding parenthesis. |
| 478 | */ |
| 479 | jlong next_j = java_interval + (last_java_ms - now_ms); |
| 480 | jlong next_n = native_interval + (last_native_ms - now_ms); |
| 481 | |
| 482 | jlong sleep_to_next = MIN2<jlong>(next_j, next_n); |
| 483 | |
| 484 | if (sleep_to_next > 0) { |
| 485 | os::naked_short_sleep(sleep_to_next); |
| 486 | } |
| 487 | |
| 488 | if ((next_j - sleep_to_next) <= 0) { |
| 489 | task_stacktrace(JAVA_SAMPLE, &_last_thread_java); |
| 490 | last_java_ms = get_monotonic_ms(); |
| 491 | } |
| 492 | if ((next_n - sleep_to_next) <= 0) { |
| 493 | task_stacktrace(NATIVE_SAMPLE, &_last_thread_native); |
| 494 | last_native_ms = get_monotonic_ms(); |
| 495 | } |
| 496 | } |
| 497 | } |
| 498 | |
| 499 | void JfrThreadSampler::post_run() { |
| 500 | this->NonJavaThread::post_run(); |
| 501 | delete this; |
| 502 | } |
| 503 | |
| 504 | |
| 505 | void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thread) { |
| 506 | ResourceMark rm; |
| 507 | EventExecutionSample samples[MAX_NR_OF_JAVA_SAMPLES]; |
| 508 | EventNativeMethodSample samples_native[MAX_NR_OF_NATIVE_SAMPLES]; |
| 509 | JfrThreadSampleClosure sample_task(samples, samples_native); |
| 510 | |
| 511 | const uint sample_limit = JAVA_SAMPLE == type ? MAX_NR_OF_JAVA_SAMPLES : MAX_NR_OF_NATIVE_SAMPLES; |
| 512 | uint num_samples = 0; |
| 513 | JavaThread* start = NULL; |
| 514 | |
| 515 | { |
| 516 | elapsedTimer sample_time; |
| 517 | sample_time.start(); |
| 518 | { |
| 519 | MutexLocker tlock(Threads_lock, Mutex::_no_safepoint_check_flag); |
| 520 | ThreadsListHandle tlh; |
| 521 | // Resolve a sample session relative start position index into the thread list array. |
| 522 | // In cases where the last sampled thread is NULL or not-NULL but stale, find_index() returns -1. |
| 523 | _cur_index = tlh.list()->find_index_of_JavaThread(*last_thread); |
| 524 | JavaThread* current = _cur_index != -1 ? *last_thread : NULL; |
| 525 | |
| 526 | while (num_samples < sample_limit) { |
| 527 | current = next_thread(tlh.list(), start, current); |
| 528 | if (current == NULL) { |
| 529 | break; |
| 530 | } |
| 531 | if (start == NULL) { |
| 532 | start = current; // remember the thread where we started to attempt sampling |
| 533 | } |
| 534 | if (current->is_Compiler_thread()) { |
| 535 | continue; |
| 536 | } |
| 537 | if (sample_task.do_sample_thread(current, _frames, _max_frames, type)) { |
| 538 | num_samples++; |
| 539 | } |
| 540 | } |
| 541 | *last_thread = current; // remember the thread we last attempted to sample |
| 542 | } |
| 543 | sample_time.stop(); |
| 544 | log_trace(jfr)("JFR thread sampling done in %3.7f secs with %d java %d native samples" , |
| 545 | sample_time.seconds(), sample_task.java_entries(), sample_task.native_entries()); |
| 546 | } |
| 547 | if (num_samples > 0) { |
| 548 | sample_task.commit_events(type); |
| 549 | } |
| 550 | } |
| 551 | |
| 552 | static JfrThreadSampling* _instance = NULL; |
| 553 | |
| 554 | JfrThreadSampling& JfrThreadSampling::instance() { |
| 555 | return *_instance; |
| 556 | } |
| 557 | |
| 558 | JfrThreadSampling* JfrThreadSampling::create() { |
| 559 | assert(_instance == NULL, "invariant" ); |
| 560 | _instance = new JfrThreadSampling(); |
| 561 | return _instance; |
| 562 | } |
| 563 | |
| 564 | void JfrThreadSampling::destroy() { |
| 565 | if (_instance != NULL) { |
| 566 | delete _instance; |
| 567 | _instance = NULL; |
| 568 | } |
| 569 | } |
| 570 | |
| 571 | JfrThreadSampling::JfrThreadSampling() : _sampler(NULL) {} |
| 572 | |
| 573 | JfrThreadSampling::~JfrThreadSampling() { |
| 574 | if (_sampler != NULL) { |
| 575 | _sampler->disenroll(); |
| 576 | } |
| 577 | } |
| 578 | |
| 579 | static void log(size_t interval_java, size_t interval_native) { |
| 580 | log_info(jfr)("Updated thread sampler for java: " SIZE_FORMAT " ms, native " SIZE_FORMAT " ms" , interval_java, interval_native); |
| 581 | } |
| 582 | |
| 583 | void JfrThreadSampling::start_sampler(size_t interval_java, size_t interval_native) { |
| 584 | assert(_sampler == NULL, "invariant" ); |
| 585 | log_info(jfr)("Enrolling thread sampler" ); |
| 586 | _sampler = new JfrThreadSampler(interval_java, interval_native, JfrOptionSet::stackdepth()); |
| 587 | _sampler->start_thread(); |
| 588 | _sampler->enroll(); |
| 589 | } |
| 590 | |
| 591 | void JfrThreadSampling::set_sampling_interval(bool java_interval, size_t period) { |
| 592 | size_t interval_java = 0; |
| 593 | size_t interval_native = 0; |
| 594 | if (_sampler != NULL) { |
| 595 | interval_java = _sampler->get_java_interval(); |
| 596 | interval_native = _sampler->get_native_interval(); |
| 597 | } |
| 598 | if (java_interval) { |
| 599 | interval_java = period; |
| 600 | } else { |
| 601 | interval_native = period; |
| 602 | } |
| 603 | if (interval_java > 0 || interval_native > 0) { |
| 604 | if (_sampler == NULL) { |
| 605 | log_info(jfr)("Creating thread sampler for java:%zu ms, native %zu ms" , interval_java, interval_native); |
| 606 | start_sampler(interval_java, interval_native); |
| 607 | } else { |
| 608 | _sampler->set_java_interval(interval_java); |
| 609 | _sampler->set_native_interval(interval_native); |
| 610 | _sampler->enroll(); |
| 611 | } |
| 612 | assert(_sampler != NULL, "invariant" ); |
| 613 | log(interval_java, interval_native); |
| 614 | } else if (_sampler != NULL) { |
| 615 | _sampler->disenroll(); |
| 616 | } |
| 617 | } |
| 618 | |
| 619 | void JfrThreadSampling::set_java_sample_interval(size_t period) { |
| 620 | if (_instance == NULL && 0 == period) { |
| 621 | return; |
| 622 | } |
| 623 | instance().set_sampling_interval(true, period); |
| 624 | } |
| 625 | |
| 626 | void JfrThreadSampling::set_native_sample_interval(size_t period) { |
| 627 | if (_instance == NULL && 0 == period) { |
| 628 | return; |
| 629 | } |
| 630 | instance().set_sampling_interval(false, period); |
| 631 | } |
| 632 | |
| 633 | void JfrThreadSampling::on_javathread_suspend(JavaThread* thread) { |
| 634 | JfrThreadSampler::on_javathread_suspend(thread); |
| 635 | } |
| 636 | |