1 | /* |
2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "classfile/classLoaderDataGraph.inline.hpp" |
27 | #include "classfile/dictionary.hpp" |
28 | #include "classfile/stringTable.hpp" |
29 | #include "classfile/symbolTable.hpp" |
30 | #include "classfile/systemDictionary.hpp" |
31 | #include "code/codeCache.hpp" |
32 | #include "code/icBuffer.hpp" |
33 | #include "code/nmethod.hpp" |
34 | #include "code/pcDesc.hpp" |
35 | #include "code/scopeDesc.hpp" |
36 | #include "gc/shared/collectedHeap.hpp" |
37 | #include "gc/shared/gcLocker.hpp" |
38 | #include "gc/shared/oopStorage.hpp" |
39 | #include "gc/shared/strongRootsScope.hpp" |
40 | #include "gc/shared/workgroup.hpp" |
41 | #include "interpreter/interpreter.hpp" |
42 | #include "jfr/jfrEvents.hpp" |
43 | #include "logging/log.hpp" |
44 | #include "logging/logStream.hpp" |
45 | #include "memory/resourceArea.hpp" |
46 | #include "memory/universe.hpp" |
47 | #include "oops/oop.inline.hpp" |
48 | #include "oops/symbol.hpp" |
49 | #include "runtime/atomic.hpp" |
50 | #include "runtime/compilationPolicy.hpp" |
51 | #include "runtime/deoptimization.hpp" |
52 | #include "runtime/frame.inline.hpp" |
53 | #include "runtime/handles.inline.hpp" |
54 | #include "runtime/interfaceSupport.inline.hpp" |
55 | #include "runtime/mutexLocker.hpp" |
56 | #include "runtime/orderAccess.hpp" |
57 | #include "runtime/osThread.hpp" |
58 | #include "runtime/safepoint.hpp" |
59 | #include "runtime/safepointMechanism.inline.hpp" |
60 | #include "runtime/signature.hpp" |
61 | #include "runtime/stubCodeGenerator.hpp" |
62 | #include "runtime/stubRoutines.hpp" |
63 | #include "runtime/sweeper.hpp" |
64 | #include "runtime/synchronizer.hpp" |
65 | #include "runtime/thread.inline.hpp" |
66 | #include "runtime/threadSMR.hpp" |
67 | #include "runtime/timerTrace.hpp" |
68 | #include "services/runtimeService.hpp" |
69 | #include "utilities/events.hpp" |
70 | #include "utilities/macros.hpp" |
71 | |
72 | static void post_safepoint_begin_event(EventSafepointBegin& event, |
73 | uint64_t safepoint_id, |
74 | int thread_count, |
75 | int critical_thread_count) { |
76 | if (event.should_commit()) { |
77 | event.set_safepointId(safepoint_id); |
78 | event.set_totalThreadCount(thread_count); |
79 | event.set_jniCriticalThreadCount(critical_thread_count); |
80 | event.commit(); |
81 | } |
82 | } |
83 | |
84 | static void post_safepoint_cleanup_event(EventSafepointCleanup& event, uint64_t safepoint_id) { |
85 | if (event.should_commit()) { |
86 | event.set_safepointId(safepoint_id); |
87 | event.commit(); |
88 | } |
89 | } |
90 | |
91 | static void post_safepoint_synchronize_event(EventSafepointStateSynchronization& event, |
92 | uint64_t safepoint_id, |
93 | int initial_number_of_threads, |
94 | int threads_waiting_to_block, |
95 | uint64_t iterations) { |
96 | if (event.should_commit()) { |
97 | event.set_safepointId(safepoint_id); |
98 | event.set_initialThreadCount(initial_number_of_threads); |
99 | event.set_runningThreadCount(threads_waiting_to_block); |
100 | event.set_iterations(iterations); |
101 | event.commit(); |
102 | } |
103 | } |
104 | |
105 | static void post_safepoint_cleanup_task_event(EventSafepointCleanupTask& event, |
106 | uint64_t safepoint_id, |
107 | const char* name) { |
108 | if (event.should_commit()) { |
109 | event.set_safepointId(safepoint_id); |
110 | event.set_name(name); |
111 | event.commit(); |
112 | } |
113 | } |
114 | |
115 | static void post_safepoint_end_event(EventSafepointEnd& event, uint64_t safepoint_id) { |
116 | if (event.should_commit()) { |
117 | event.set_safepointId(safepoint_id); |
118 | event.commit(); |
119 | } |
120 | } |
121 | |
122 | // SafepointCheck |
123 | SafepointStateTracker::SafepointStateTracker(uint64_t safepoint_id, bool at_safepoint) |
124 | : _safepoint_id(safepoint_id), _at_safepoint(at_safepoint) {} |
125 | |
126 | bool SafepointStateTracker::safepoint_state_changed() { |
127 | return _safepoint_id != SafepointSynchronize::safepoint_id() || |
128 | _at_safepoint != SafepointSynchronize::is_at_safepoint(); |
129 | } |
130 | |
131 | // -------------------------------------------------------------------------------------------------- |
132 | // Implementation of Safepoint begin/end |
133 | |
134 | SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized; |
135 | int SafepointSynchronize::_waiting_to_block = 0; |
136 | volatile uint64_t SafepointSynchronize::_safepoint_counter = 0; |
137 | uint64_t SafepointSynchronize::_safepoint_id = 0; |
138 | const uint64_t SafepointSynchronize::InactiveSafepointCounter = 0; |
139 | int SafepointSynchronize::_current_jni_active_count = 0; |
140 | |
141 | WaitBarrier* SafepointSynchronize::_wait_barrier; |
142 | |
143 | static volatile bool PageArmed = false; // safepoint polling page is RO|RW vs PROT_NONE |
144 | static bool timeout_error_printed = false; |
145 | |
146 | // Statistic related |
147 | static jlong _safepoint_begin_time = 0; |
148 | static volatile int _nof_threads_hit_polling_page = 0; |
149 | |
150 | void SafepointSynchronize::init(Thread* vmthread) { |
151 | // WaitBarrier should never be destroyed since we will have |
152 | // threads waiting on it while exiting. |
153 | _wait_barrier = new WaitBarrier(vmthread); |
154 | SafepointTracing::init(); |
155 | } |
156 | |
157 | void SafepointSynchronize::increment_jni_active_count() { |
158 | assert(Thread::current()->is_VM_thread(), "Only VM thread may increment" ); |
159 | ++_current_jni_active_count; |
160 | } |
161 | |
162 | void SafepointSynchronize::decrement_waiting_to_block() { |
163 | assert(_waiting_to_block > 0, "sanity check" ); |
164 | assert(Thread::current()->is_VM_thread(), "Only VM thread may decrement" ); |
165 | --_waiting_to_block; |
166 | } |
167 | |
168 | bool SafepointSynchronize::thread_not_running(ThreadSafepointState *cur_state) { |
169 | if (!cur_state->is_running()) { |
170 | return true; |
171 | } |
172 | cur_state->examine_state_of_thread(SafepointSynchronize::safepoint_counter()); |
173 | if (!cur_state->is_running()) { |
174 | return true; |
175 | } |
176 | LogTarget(Trace, safepoint) lt; |
177 | if (lt.is_enabled()) { |
178 | ResourceMark rm; |
179 | LogStream ls(lt); |
180 | cur_state->print_on(&ls); |
181 | } |
182 | return false; |
183 | } |
184 | |
185 | #ifdef ASSERT |
186 | static void assert_list_is_valid(const ThreadSafepointState* tss_head, int still_running) { |
187 | int a = 0; |
188 | const ThreadSafepointState *tmp_tss = tss_head; |
189 | while (tmp_tss != NULL) { |
190 | ++a; |
191 | assert(tmp_tss->is_running(), "Illegal initial state" ); |
192 | tmp_tss = tmp_tss->get_next(); |
193 | } |
194 | assert(a == still_running, "Must be the same" ); |
195 | } |
196 | #endif // ASSERT |
197 | |
198 | static void back_off(int64_t start_time) { |
199 | // We start with fine-grained nanosleeping until a millisecond has |
200 | // passed, at which point we resort to plain naked_short_sleep. |
201 | if (os::javaTimeNanos() - start_time < NANOSECS_PER_MILLISEC) { |
202 | os::naked_short_nanosleep(10 * (NANOUNITS / MICROUNITS)); |
203 | } else { |
204 | os::naked_short_sleep(1); |
205 | } |
206 | } |
207 | |
208 | int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int nof_threads, int* initial_running) |
209 | { |
210 | JavaThreadIteratorWithHandle jtiwh; |
211 | |
212 | #ifdef ASSERT |
213 | for (; JavaThread *cur = jtiwh.next(); ) { |
214 | assert(cur->safepoint_state()->is_running(), "Illegal initial state" ); |
215 | } |
216 | jtiwh.rewind(); |
217 | #endif // ASSERT |
218 | |
219 | // Iterate through all threads until it has been determined how to stop them all at a safepoint. |
220 | int still_running = nof_threads; |
221 | ThreadSafepointState *tss_head = NULL; |
222 | ThreadSafepointState **p_prev = &tss_head; |
223 | for (; JavaThread *cur = jtiwh.next(); ) { |
224 | ThreadSafepointState *cur_tss = cur->safepoint_state(); |
225 | assert(cur_tss->get_next() == NULL, "Must be NULL" ); |
226 | if (thread_not_running(cur_tss)) { |
227 | --still_running; |
228 | } else { |
229 | *p_prev = cur_tss; |
230 | p_prev = cur_tss->next_ptr(); |
231 | } |
232 | } |
233 | *p_prev = NULL; |
234 | |
235 | DEBUG_ONLY(assert_list_is_valid(tss_head, still_running);) |
236 | |
237 | *initial_running = still_running; |
238 | |
239 | // If there is no thread still running, we are already done. |
240 | if (still_running <= 0) { |
241 | assert(tss_head == NULL, "Must be empty" ); |
242 | return 1; |
243 | } |
244 | |
245 | int iterations = 1; // The first iteration is above. |
246 | int64_t start_time = os::javaTimeNanos(); |
247 | |
248 | do { |
249 | // Check if this has taken too long: |
250 | if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) { |
251 | print_safepoint_timeout(); |
252 | } |
253 | if (int(iterations) == -1) { // overflow - something is wrong. |
254 | // We can only overflow here when we are using global |
255 | // polling pages. We keep this guarantee in its original |
256 | // form so that searches of the bug database for this |
257 | // failure mode find the right bugs. |
258 | guarantee (!PageArmed, "invariant" ); |
259 | } |
260 | |
261 | p_prev = &tss_head; |
262 | ThreadSafepointState *cur_tss = tss_head; |
263 | while (cur_tss != NULL) { |
264 | assert(cur_tss->is_running(), "Illegal initial state" ); |
265 | if (thread_not_running(cur_tss)) { |
266 | --still_running; |
267 | *p_prev = NULL; |
268 | ThreadSafepointState *tmp = cur_tss; |
269 | cur_tss = cur_tss->get_next(); |
270 | tmp->set_next(NULL); |
271 | } else { |
272 | *p_prev = cur_tss; |
273 | p_prev = cur_tss->next_ptr(); |
274 | cur_tss = cur_tss->get_next(); |
275 | } |
276 | } |
277 | |
278 | DEBUG_ONLY(assert_list_is_valid(tss_head, still_running);) |
279 | |
280 | if (still_running > 0) { |
281 | back_off(start_time); |
282 | } |
283 | |
284 | iterations++; |
285 | } while (still_running > 0); |
286 | |
287 | assert(tss_head == NULL, "Must be empty" ); |
288 | |
289 | return iterations; |
290 | } |
291 | |
292 | void SafepointSynchronize::arm_safepoint() { |
293 | // Begin the process of bringing the system to a safepoint. |
294 | // Java threads can be in several different states and are |
295 | // stopped by different mechanisms: |
296 | // |
297 | // 1. Running interpreted |
298 | // When executing branching/returning byte codes interpreter |
299 | // checks if the poll is armed, if so blocks in SS::block(). |
300 | // When using global polling the interpreter dispatch table |
301 | // is changed to force it to check for a safepoint condition |
302 | // between bytecodes. |
303 | // 2. Running in native code |
304 | // When returning from the native code, a Java thread must check |
305 | // the safepoint _state to see if we must block. If the |
306 | // VM thread sees a Java thread in native, it does |
307 | // not wait for this thread to block. The order of the memory |
308 | // writes and reads of both the safepoint state and the Java |
309 | // threads state is critical. In order to guarantee that the |
310 | // memory writes are serialized with respect to each other, |
311 | // the VM thread issues a memory barrier instruction. |
312 | // 3. Running compiled Code |
313 | // Compiled code reads the local polling page that |
314 | // is set to fault if we are trying to get to a safepoint. |
315 | // 4. Blocked |
316 | // A thread which is blocked will not be allowed to return from the |
317 | // block condition until the safepoint operation is complete. |
318 | // 5. In VM or Transitioning between states |
319 | // If a Java thread is currently running in the VM or transitioning |
320 | // between states, the safepointing code will poll the thread state |
321 | // until the thread blocks itself when it attempts transitions to a |
322 | // new state or locking a safepoint checked monitor. |
323 | |
324 | // We must never miss a thread with correct safepoint id, so we must make sure we arm |
325 | // the wait barrier for the next safepoint id/counter. |
326 | // Arming must be done after resetting _current_jni_active_count, _waiting_to_block. |
327 | _wait_barrier->arm(static_cast<int>(_safepoint_counter + 1)); |
328 | |
329 | assert((_safepoint_counter & 0x1) == 0, "must be even" ); |
330 | // The store to _safepoint_counter must happen after any stores in arming. |
331 | OrderAccess::release_store(&_safepoint_counter, _safepoint_counter + 1); |
332 | |
333 | // We are synchronizing |
334 | OrderAccess::storestore(); // Ordered with _safepoint_counter |
335 | _state = _synchronizing; |
336 | |
337 | if (SafepointMechanism::uses_thread_local_poll()) { |
338 | // Arming the per thread poll while having _state != _not_synchronized means safepointing |
339 | log_trace(safepoint)("Setting thread local yield flag for threads" ); |
340 | OrderAccess::storestore(); // storestore, global state -> local state |
341 | for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur = jtiwh.next(); ) { |
342 | // Make sure the threads start polling, it is time to yield. |
343 | SafepointMechanism::arm_local_poll(cur); |
344 | } |
345 | } |
346 | OrderAccess::fence(); // storestore|storeload, global state -> local state |
347 | |
348 | if (SafepointMechanism::uses_global_page_poll()) { |
349 | // Make interpreter safepoint aware |
350 | Interpreter::notice_safepoints(); |
351 | |
352 | // Make polling safepoint aware |
353 | guarantee (!PageArmed, "invariant" ) ; |
354 | PageArmed = true; |
355 | os::make_polling_page_unreadable(); |
356 | } |
357 | } |
358 | |
359 | // Roll all threads forward to a safepoint and suspend them all |
360 | void SafepointSynchronize::begin() { |
361 | assert(Thread::current()->is_VM_thread(), "Only VM thread may execute a safepoint" ); |
362 | |
363 | EventSafepointBegin begin_event; |
364 | SafepointTracing::begin(VMThread::vm_op_type()); |
365 | |
366 | Universe::heap()->safepoint_synchronize_begin(); |
367 | |
368 | // By getting the Threads_lock, we assure that no threads are about to start or |
369 | // exit. It is released again in SafepointSynchronize::end(). |
370 | Threads_lock->lock(); |
371 | |
372 | assert( _state == _not_synchronized, "trying to safepoint synchronize with wrong state" ); |
373 | |
374 | int nof_threads = Threads::number_of_threads(); |
375 | |
376 | _nof_threads_hit_polling_page = 0; |
377 | |
378 | log_debug(safepoint)("Safepoint synchronization initiated using %s wait barrier. (%d threads)" , _wait_barrier->description(), nof_threads); |
379 | |
380 | // Reset the count of active JNI critical threads |
381 | _current_jni_active_count = 0; |
382 | |
383 | // Set number of threads to wait for |
384 | _waiting_to_block = nof_threads; |
385 | |
386 | jlong safepoint_limit_time = 0; |
387 | if (SafepointTimeout) { |
388 | // Set the limit time, so that it can be compared to see if this has taken |
389 | // too long to complete. |
390 | safepoint_limit_time = SafepointTracing::start_of_safepoint() + (jlong)SafepointTimeoutDelay * (NANOUNITS / MILLIUNITS); |
391 | timeout_error_printed = false; |
392 | } |
393 | |
394 | EventSafepointStateSynchronization sync_event; |
395 | int initial_running = 0; |
396 | |
397 | // Arms the safepoint, _current_jni_active_count and _waiting_to_block must be set before. |
398 | arm_safepoint(); |
399 | |
400 | // Will spin until all threads are safe. |
401 | int iterations = synchronize_threads(safepoint_limit_time, nof_threads, &initial_running); |
402 | assert(_waiting_to_block == 0, "No thread should be running" ); |
403 | |
404 | #ifndef PRODUCT |
405 | if (safepoint_limit_time != 0) { |
406 | jlong current_time = os::javaTimeNanos(); |
407 | if (safepoint_limit_time < current_time) { |
408 | log_warning(safepoint)("# SafepointSynchronize: Finished after " |
409 | INT64_FORMAT_W(6) " ms" , |
410 | (int64_t)(current_time - SafepointTracing::start_of_safepoint()) / (NANOUNITS / MILLIUNITS)); |
411 | } |
412 | } |
413 | #endif |
414 | |
415 | assert(Threads_lock->owned_by_self(), "must hold Threads_lock" ); |
416 | |
417 | // Record state |
418 | _state = _synchronized; |
419 | |
420 | OrderAccess::fence(); |
421 | |
422 | // Set the new id |
423 | ++_safepoint_id; |
424 | |
425 | #ifdef ASSERT |
426 | // Make sure all the threads were visited. |
427 | for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur = jtiwh.next(); ) { |
428 | assert(cur->was_visited_for_critical_count(_safepoint_counter), "missed a thread" ); |
429 | } |
430 | #endif // ASSERT |
431 | |
432 | // Update the count of active JNI critical regions |
433 | GCLocker::set_jni_lock_count(_current_jni_active_count); |
434 | |
435 | post_safepoint_synchronize_event(sync_event, |
436 | _safepoint_id, |
437 | initial_running, |
438 | _waiting_to_block, iterations); |
439 | |
440 | SafepointTracing::synchronized(nof_threads, initial_running, _nof_threads_hit_polling_page); |
441 | |
442 | // We do the safepoint cleanup first since a GC related safepoint |
443 | // needs cleanup to be completed before running the GC op. |
444 | EventSafepointCleanup cleanup_event; |
445 | do_cleanup_tasks(); |
446 | post_safepoint_cleanup_event(cleanup_event, _safepoint_id); |
447 | |
448 | post_safepoint_begin_event(begin_event, _safepoint_id, nof_threads, _current_jni_active_count); |
449 | SafepointTracing::cleanup(); |
450 | } |
451 | |
452 | void SafepointSynchronize::disarm_safepoint() { |
453 | uint64_t active_safepoint_counter = _safepoint_counter; |
454 | { |
455 | JavaThreadIteratorWithHandle jtiwh; |
456 | #ifdef ASSERT |
457 | // A pending_exception cannot be installed during a safepoint. The threads |
458 | // may install an async exception after they come back from a safepoint into |
459 | // pending_exception after they unblock. But that should happen later. |
460 | for (; JavaThread *cur = jtiwh.next(); ) { |
461 | assert (!(cur->has_pending_exception() && |
462 | cur->safepoint_state()->is_at_poll_safepoint()), |
463 | "safepoint installed a pending exception" ); |
464 | } |
465 | #endif // ASSERT |
466 | |
467 | if (SafepointMechanism::uses_global_page_poll()) { |
468 | guarantee (PageArmed, "invariant" ); |
469 | // Make polling safepoint aware |
470 | os::make_polling_page_readable(); |
471 | PageArmed = false; |
472 | // Remove safepoint check from interpreter |
473 | Interpreter::ignore_safepoints(); |
474 | } |
475 | |
476 | OrderAccess::fence(); // keep read and write of _state from floating up |
477 | assert(_state == _synchronized, "must be synchronized before ending safepoint synchronization" ); |
478 | |
479 | // Change state first to _not_synchronized. |
480 | // No threads should see _synchronized when running. |
481 | _state = _not_synchronized; |
482 | |
483 | // Set the next dormant (even) safepoint id. |
484 | assert((_safepoint_counter & 0x1) == 1, "must be odd" ); |
485 | OrderAccess::release_store(&_safepoint_counter, _safepoint_counter + 1); |
486 | |
487 | OrderAccess::fence(); // Keep the local state from floating up. |
488 | |
489 | jtiwh.rewind(); |
490 | for (; JavaThread *current = jtiwh.next(); ) { |
491 | // Clear the visited flag to ensure that the critical counts are collected properly. |
492 | DEBUG_ONLY(current->reset_visited_for_critical_count(active_safepoint_counter);) |
493 | ThreadSafepointState* cur_state = current->safepoint_state(); |
494 | assert(!cur_state->is_running(), "Thread not suspended at safepoint" ); |
495 | cur_state->restart(); // TSS _running |
496 | assert(cur_state->is_running(), "safepoint state has not been reset" ); |
497 | |
498 | SafepointMechanism::disarm_if_needed(current, false /* NO release */); |
499 | } |
500 | } // ~JavaThreadIteratorWithHandle |
501 | |
502 | // Release threads lock, so threads can be created/destroyed again. |
503 | Threads_lock->unlock(); |
504 | |
505 | // Wake threads after local state is correctly set. |
506 | _wait_barrier->disarm(); |
507 | } |
508 | |
509 | // Wake up all threads, so they are ready to resume execution after the safepoint |
510 | // operation has been carried out |
511 | void SafepointSynchronize::end() { |
512 | assert(Threads_lock->owned_by_self(), "must hold Threads_lock" ); |
513 | EventSafepointEnd event; |
514 | assert(Thread::current()->is_VM_thread(), "Only VM thread can execute a safepoint" ); |
515 | |
516 | disarm_safepoint(); |
517 | |
518 | Universe::heap()->safepoint_synchronize_end(); |
519 | |
520 | SafepointTracing::end(); |
521 | |
522 | post_safepoint_end_event(event, safepoint_id()); |
523 | } |
524 | |
525 | bool SafepointSynchronize::is_cleanup_needed() { |
526 | // Need a safepoint if there are many monitors to deflate. |
527 | if (ObjectSynchronizer::is_cleanup_needed()) return true; |
528 | // Need a safepoint if some inline cache buffers is non-empty |
529 | if (!InlineCacheBuffer::is_empty()) return true; |
530 | if (StringTable::needs_rehashing()) return true; |
531 | if (SymbolTable::needs_rehashing()) return true; |
532 | return false; |
533 | } |
534 | |
535 | class ParallelSPCleanupThreadClosure : public ThreadClosure { |
536 | private: |
537 | CodeBlobClosure* _nmethod_cl; |
538 | DeflateMonitorCounters* _counters; |
539 | |
540 | public: |
541 | ParallelSPCleanupThreadClosure(DeflateMonitorCounters* counters) : |
542 | _nmethod_cl(UseCodeAging ? NMethodSweeper::prepare_reset_hotness_counters() : NULL), |
543 | _counters(counters) {} |
544 | |
545 | void do_thread(Thread* thread) { |
546 | ObjectSynchronizer::deflate_thread_local_monitors(thread, _counters); |
547 | if (_nmethod_cl != NULL && thread->is_Java_thread() && |
548 | ! thread->is_Code_cache_sweeper_thread()) { |
549 | JavaThread* jt = (JavaThread*) thread; |
550 | jt->nmethods_do(_nmethod_cl); |
551 | } |
552 | } |
553 | }; |
554 | |
555 | class ParallelSPCleanupTask : public AbstractGangTask { |
556 | private: |
557 | SubTasksDone _subtasks; |
558 | ParallelSPCleanupThreadClosure _cleanup_threads_cl; |
559 | uint _num_workers; |
560 | DeflateMonitorCounters* _counters; |
561 | public: |
562 | ParallelSPCleanupTask(uint num_workers, DeflateMonitorCounters* counters) : |
563 | AbstractGangTask("Parallel Safepoint Cleanup" ), |
564 | _subtasks(SubTasksDone(SafepointSynchronize::SAFEPOINT_CLEANUP_NUM_TASKS)), |
565 | _cleanup_threads_cl(ParallelSPCleanupThreadClosure(counters)), |
566 | _num_workers(num_workers), |
567 | _counters(counters) {} |
568 | |
569 | void work(uint worker_id) { |
570 | uint64_t safepoint_id = SafepointSynchronize::safepoint_id(); |
571 | // All threads deflate monitors and mark nmethods (if necessary). |
572 | Threads::possibly_parallel_threads_do(true, &_cleanup_threads_cl); |
573 | |
574 | if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_DEFLATE_MONITORS)) { |
575 | const char* name = "deflating global idle monitors" ; |
576 | EventSafepointCleanupTask event; |
577 | TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup)); |
578 | ObjectSynchronizer::deflate_idle_monitors(_counters); |
579 | |
580 | post_safepoint_cleanup_task_event(event, safepoint_id, name); |
581 | } |
582 | |
583 | if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_UPDATE_INLINE_CACHES)) { |
584 | const char* name = "updating inline caches" ; |
585 | EventSafepointCleanupTask event; |
586 | TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup)); |
587 | InlineCacheBuffer::update_inline_caches(); |
588 | |
589 | post_safepoint_cleanup_task_event(event, safepoint_id, name); |
590 | } |
591 | |
592 | if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_COMPILATION_POLICY)) { |
593 | const char* name = "compilation policy safepoint handler" ; |
594 | EventSafepointCleanupTask event; |
595 | TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup)); |
596 | CompilationPolicy::policy()->do_safepoint_work(); |
597 | |
598 | post_safepoint_cleanup_task_event(event, safepoint_id, name); |
599 | } |
600 | |
601 | if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_SYMBOL_TABLE_REHASH)) { |
602 | if (SymbolTable::needs_rehashing()) { |
603 | const char* name = "rehashing symbol table" ; |
604 | EventSafepointCleanupTask event; |
605 | TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup)); |
606 | SymbolTable::rehash_table(); |
607 | |
608 | post_safepoint_cleanup_task_event(event, safepoint_id, name); |
609 | } |
610 | } |
611 | |
612 | if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_STRING_TABLE_REHASH)) { |
613 | if (StringTable::needs_rehashing()) { |
614 | const char* name = "rehashing string table" ; |
615 | EventSafepointCleanupTask event; |
616 | TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup)); |
617 | StringTable::rehash_table(); |
618 | |
619 | post_safepoint_cleanup_task_event(event, safepoint_id, name); |
620 | } |
621 | } |
622 | |
623 | if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_CLD_PURGE)) { |
624 | if (ClassLoaderDataGraph::should_purge_and_reset()) { |
625 | // CMS delays purging the CLDG until the beginning of the next safepoint and to |
626 | // make sure concurrent sweep is done |
627 | const char* name = "purging class loader data graph" ; |
628 | EventSafepointCleanupTask event; |
629 | TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup)); |
630 | ClassLoaderDataGraph::purge(); |
631 | |
632 | post_safepoint_cleanup_task_event(event, safepoint_id, name); |
633 | } |
634 | } |
635 | |
636 | if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_SYSTEM_DICTIONARY_RESIZE)) { |
637 | if (Dictionary::does_any_dictionary_needs_resizing()) { |
638 | const char* name = "resizing system dictionaries" ; |
639 | EventSafepointCleanupTask event; |
640 | TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup)); |
641 | ClassLoaderDataGraph::resize_dictionaries(); |
642 | |
643 | post_safepoint_cleanup_task_event(event, safepoint_id, name); |
644 | } |
645 | } |
646 | |
647 | if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_REQUEST_OOPSTORAGE_CLEANUP)) { |
648 | // Don't bother reporting event or time for this very short operation. |
649 | // To have any utility we'd also want to report whether needed. |
650 | OopStorage::trigger_cleanup_if_needed(); |
651 | } |
652 | |
653 | _subtasks.all_tasks_completed(_num_workers); |
654 | } |
655 | }; |
656 | |
657 | // Various cleaning tasks that should be done periodically at safepoints. |
658 | void SafepointSynchronize::do_cleanup_tasks() { |
659 | |
660 | TraceTime timer("safepoint cleanup tasks" , TRACETIME_LOG(Info, safepoint, cleanup)); |
661 | |
662 | // Prepare for monitor deflation. |
663 | DeflateMonitorCounters deflate_counters; |
664 | ObjectSynchronizer::prepare_deflate_idle_monitors(&deflate_counters); |
665 | |
666 | CollectedHeap* heap = Universe::heap(); |
667 | assert(heap != NULL, "heap not initialized yet?" ); |
668 | WorkGang* cleanup_workers = heap->get_safepoint_workers(); |
669 | if (cleanup_workers != NULL) { |
670 | // Parallel cleanup using GC provided thread pool. |
671 | uint num_cleanup_workers = cleanup_workers->active_workers(); |
672 | ParallelSPCleanupTask cleanup(num_cleanup_workers, &deflate_counters); |
673 | StrongRootsScope srs(num_cleanup_workers); |
674 | cleanup_workers->run_task(&cleanup); |
675 | } else { |
676 | // Serial cleanup using VMThread. |
677 | ParallelSPCleanupTask cleanup(1, &deflate_counters); |
678 | StrongRootsScope srs(1); |
679 | cleanup.work(0); |
680 | } |
681 | |
682 | // Needs to be done single threaded by the VMThread. This walks |
683 | // the thread stacks looking for references to metadata before |
684 | // deciding to remove it from the metaspaces. |
685 | if (ClassLoaderDataGraph::should_clean_metaspaces_and_reset()) { |
686 | const char* name = "cleanup live ClassLoaderData metaspaces" ; |
687 | TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup)); |
688 | ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces(); |
689 | } |
690 | |
691 | // Finish monitor deflation. |
692 | ObjectSynchronizer::finish_deflate_idle_monitors(&deflate_counters); |
693 | |
694 | assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer" ); |
695 | } |
696 | |
697 | // Methods for determining if a JavaThread is safepoint safe. |
698 | |
699 | // False means unsafe with undetermined state. |
700 | // True means a determined state, but it may be an unsafe state. |
701 | // If called from a non-safepoint context safepoint_count MUST be InactiveSafepointCounter. |
702 | bool SafepointSynchronize::try_stable_load_state(JavaThreadState *state, JavaThread *thread, uint64_t safepoint_count) { |
703 | assert((safepoint_count != InactiveSafepointCounter && |
704 | Thread::current() == (Thread*)VMThread::vm_thread() && |
705 | SafepointSynchronize::_state != _not_synchronized) |
706 | || safepoint_count == InactiveSafepointCounter, "Invalid check" ); |
707 | |
708 | // To handle the thread_blocked state on the backedge of the WaitBarrier from |
709 | // previous safepoint and reading the reset value (0/InactiveSafepointCounter) we |
710 | // re-read state after we read thread safepoint id. The JavaThread changes its |
711 | // thread state from thread_blocked before resetting safepoint id to 0. |
712 | // This guarantees the second read will be from an updated thread state. It can |
713 | // either be different state making this an unsafe state or it can see blocked |
714 | // again. When we see blocked twice with a 0 safepoint id, either: |
715 | // - It is normally blocked, e.g. on Mutex, TBIVM. |
716 | // - It was in SS:block(), looped around to SS:block() and is blocked on the WaitBarrier. |
717 | // - It was in SS:block() but now on a Mutex. |
718 | // All of these cases are safe. |
719 | |
720 | *state = thread->thread_state(); |
721 | OrderAccess::loadload(); |
722 | uint64_t sid = thread->safepoint_state()->get_safepoint_id(); // Load acquire |
723 | if (sid != InactiveSafepointCounter && sid != safepoint_count) { |
724 | // In an old safepoint, state not relevant. |
725 | return false; |
726 | } |
727 | return *state == thread->thread_state(); |
728 | } |
729 | |
730 | static bool safepoint_safe_with(JavaThread *thread, JavaThreadState state) { |
731 | switch(state) { |
732 | case _thread_in_native: |
733 | // native threads are safe if they have no java stack or have walkable stack |
734 | return !thread->has_last_Java_frame() || thread->frame_anchor()->walkable(); |
735 | |
736 | case _thread_blocked: |
737 | // On wait_barrier or blocked. |
738 | // Blocked threads should already have walkable stack. |
739 | assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "blocked and not walkable" ); |
740 | return true; |
741 | |
742 | default: |
743 | return false; |
744 | } |
745 | } |
746 | |
747 | bool SafepointSynchronize::handshake_safe(JavaThread *thread) { |
748 | // This function must be called with the Threads_lock held so an externally |
749 | // suspended thread cannot be resumed thus it is safe. |
750 | assert(Threads_lock->owned_by_self() && Thread::current()->is_VM_thread(), |
751 | "Must hold Threads_lock and be VMThread" ); |
752 | if (thread->is_ext_suspended() || thread->is_terminated()) { |
753 | return true; |
754 | } |
755 | JavaThreadState stable_state; |
756 | if (try_stable_load_state(&stable_state, thread, InactiveSafepointCounter)) { |
757 | return safepoint_safe_with(thread, stable_state); |
758 | } |
759 | return false; |
760 | } |
761 | |
762 | // See if the thread is running inside a lazy critical native and |
763 | // update the thread critical count if so. Also set a suspend flag to |
764 | // cause the native wrapper to return into the JVM to do the unlock |
765 | // once the native finishes. |
766 | static void check_for_lazy_critical_native(JavaThread *thread, JavaThreadState state) { |
767 | if (state == _thread_in_native && |
768 | thread->has_last_Java_frame() && |
769 | thread->frame_anchor()->walkable()) { |
770 | // This thread might be in a critical native nmethod so look at |
771 | // the top of the stack and increment the critical count if it |
772 | // is. |
773 | frame wrapper_frame = thread->last_frame(); |
774 | CodeBlob* stub_cb = wrapper_frame.cb(); |
775 | if (stub_cb != NULL && |
776 | stub_cb->is_nmethod() && |
777 | stub_cb->as_nmethod_or_null()->is_lazy_critical_native()) { |
778 | // A thread could potentially be in a critical native across |
779 | // more than one safepoint, so only update the critical state on |
780 | // the first one. When it returns it will perform the unlock. |
781 | if (!thread->do_critical_native_unlock()) { |
782 | #ifdef ASSERT |
783 | if (!thread->in_critical()) { |
784 | GCLocker::increment_debug_jni_lock_count(); |
785 | } |
786 | #endif |
787 | thread->enter_critical(); |
788 | // Make sure the native wrapper calls back on return to |
789 | // perform the needed critical unlock. |
790 | thread->set_critical_native_unlock(); |
791 | } |
792 | } |
793 | } |
794 | } |
795 | |
796 | // ------------------------------------------------------------------------------------------------------- |
797 | // Implementation of Safepoint blocking point |
798 | |
799 | void SafepointSynchronize::block(JavaThread *thread) { |
800 | assert(thread != NULL, "thread must be set" ); |
801 | assert(thread->is_Java_thread(), "not a Java thread" ); |
802 | |
803 | // Threads shouldn't block if they are in the middle of printing, but... |
804 | ttyLocker::break_tty_lock_for_safepoint(os::current_thread_id()); |
805 | |
806 | // Only bail from the block() call if the thread is gone from the |
807 | // thread list; starting to exit should still block. |
808 | if (thread->is_terminated()) { |
809 | // block current thread if we come here from native code when VM is gone |
810 | thread->block_if_vm_exited(); |
811 | |
812 | // otherwise do nothing |
813 | return; |
814 | } |
815 | |
816 | JavaThreadState state = thread->thread_state(); |
817 | thread->frame_anchor()->make_walkable(thread); |
818 | |
819 | uint64_t safepoint_id = SafepointSynchronize::safepoint_counter(); |
820 | // Check that we have a valid thread_state at this point |
821 | switch(state) { |
822 | case _thread_in_vm_trans: |
823 | case _thread_in_Java: // From compiled code |
824 | case _thread_in_native_trans: |
825 | case _thread_blocked_trans: |
826 | case _thread_new_trans: |
827 | |
828 | // We have no idea where the VMThread is, it might even be at next safepoint. |
829 | // So we can miss this poll, but stop at next. |
830 | |
831 | // Load dependent store, it must not pass loading of safepoint_id. |
832 | thread->safepoint_state()->set_safepoint_id(safepoint_id); // Release store |
833 | |
834 | // This part we can skip if we notice we miss or are in a future safepoint. |
835 | OrderAccess::storestore(); |
836 | // Load in wait barrier should not float up |
837 | thread->set_thread_state_fence(_thread_blocked); |
838 | |
839 | _wait_barrier->wait(static_cast<int>(safepoint_id)); |
840 | assert(_state != _synchronized, "Can't be" ); |
841 | |
842 | // If barrier is disarmed stop store from floating above loads in barrier. |
843 | OrderAccess::loadstore(); |
844 | thread->set_thread_state(state); |
845 | |
846 | // Then we reset the safepoint id to inactive. |
847 | thread->safepoint_state()->reset_safepoint_id(); // Release store |
848 | |
849 | OrderAccess::fence(); |
850 | |
851 | break; |
852 | |
853 | default: |
854 | fatal("Illegal threadstate encountered: %d" , state); |
855 | } |
856 | guarantee(thread->safepoint_state()->get_safepoint_id() == InactiveSafepointCounter, |
857 | "The safepoint id should be set only in block path" ); |
858 | |
859 | // Check for pending. async. exceptions or suspends - except if the |
860 | // thread was blocked inside the VM. has_special_runtime_exit_condition() |
861 | // is called last since it grabs a lock and we only want to do that when |
862 | // we must. |
863 | // |
864 | // Note: we never deliver an async exception at a polling point as the |
865 | // compiler may not have an exception handler for it. The polling |
866 | // code will notice the async and deoptimize and the exception will |
867 | // be delivered. (Polling at a return point is ok though). Sure is |
868 | // a lot of bother for a deprecated feature... |
869 | // |
870 | // We don't deliver an async exception if the thread state is |
871 | // _thread_in_native_trans so JNI functions won't be called with |
872 | // a surprising pending exception. If the thread state is going back to java, |
873 | // async exception is checked in check_special_condition_for_native_trans(). |
874 | |
875 | if (state != _thread_blocked_trans && |
876 | state != _thread_in_vm_trans && |
877 | thread->has_special_runtime_exit_condition()) { |
878 | thread->handle_special_runtime_exit_condition( |
879 | !thread->is_at_poll_safepoint() && (state != _thread_in_native_trans)); |
880 | } |
881 | |
882 | // cross_modify_fence is done by SafepointMechanism::block_if_requested_slow |
883 | // which is the only caller here. |
884 | } |
885 | |
886 | // ------------------------------------------------------------------------------------------------------ |
887 | // Exception handlers |
888 | |
889 | |
890 | void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) { |
891 | assert(thread->is_Java_thread(), "polling reference encountered by VM thread" ); |
892 | assert(thread->thread_state() == _thread_in_Java, "should come from Java code" ); |
893 | if (!ThreadLocalHandshakes) { |
894 | assert(SafepointSynchronize::is_synchronizing(), "polling encountered outside safepoint synchronization" ); |
895 | } |
896 | |
897 | if (log_is_enabled(Info, safepoint, stats)) { |
898 | Atomic::inc(&_nof_threads_hit_polling_page); |
899 | } |
900 | |
901 | ThreadSafepointState* state = thread->safepoint_state(); |
902 | |
903 | state->handle_polling_page_exception(); |
904 | } |
905 | |
906 | |
907 | void SafepointSynchronize::print_safepoint_timeout() { |
908 | if (!timeout_error_printed) { |
909 | timeout_error_printed = true; |
910 | // Print out the thread info which didn't reach the safepoint for debugging |
911 | // purposes (useful when there are lots of threads in the debugger). |
912 | LogTarget(Warning, safepoint) lt; |
913 | if (lt.is_enabled()) { |
914 | ResourceMark rm; |
915 | LogStream ls(lt); |
916 | |
917 | ls.cr(); |
918 | ls.print_cr("# SafepointSynchronize::begin: Timeout detected:" ); |
919 | ls.print_cr("# SafepointSynchronize::begin: Timed out while spinning to reach a safepoint." ); |
920 | ls.print_cr("# SafepointSynchronize::begin: Threads which did not reach the safepoint:" ); |
921 | for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur_thread = jtiwh.next(); ) { |
922 | if (cur_thread->safepoint_state()->is_running()) { |
923 | ls.print("# " ); |
924 | cur_thread->print_on(&ls); |
925 | ls.cr(); |
926 | } |
927 | } |
928 | ls.print_cr("# SafepointSynchronize::begin: (End of list)" ); |
929 | } |
930 | } |
931 | |
932 | // To debug the long safepoint, specify both AbortVMOnSafepointTimeout & |
933 | // ShowMessageBoxOnError. |
934 | if (AbortVMOnSafepointTimeout) { |
935 | // Send the blocking thread a signal to terminate and write an error file. |
936 | for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur_thread = jtiwh.next(); ) { |
937 | if (cur_thread->safepoint_state()->is_running()) { |
938 | if (!os::signal_thread(cur_thread, SIGILL, "blocking a safepoint" )) { |
939 | break; // Could not send signal. Report fatal error. |
940 | } |
941 | // Give cur_thread a chance to report the error and terminate the VM. |
942 | os::sleep(Thread::current(), 3000, false); |
943 | } |
944 | } |
945 | fatal("Safepoint sync time longer than " INTX_FORMAT "ms detected when executing %s." , |
946 | SafepointTimeoutDelay, VMThread::vm_operation()->name()); |
947 | } |
948 | } |
949 | |
950 | // ------------------------------------------------------------------------------------------------------- |
951 | // Implementation of ThreadSafepointState |
952 | |
953 | ThreadSafepointState::ThreadSafepointState(JavaThread *thread) |
954 | : _at_poll_safepoint(false), _thread(thread), _safepoint_safe(false), |
955 | _safepoint_id(SafepointSynchronize::InactiveSafepointCounter), |
956 | _orig_thread_state(_thread_uninitialized), _next(NULL) { |
957 | } |
958 | |
959 | void ThreadSafepointState::create(JavaThread *thread) { |
960 | ThreadSafepointState *state = new ThreadSafepointState(thread); |
961 | thread->set_safepoint_state(state); |
962 | } |
963 | |
964 | void ThreadSafepointState::destroy(JavaThread *thread) { |
965 | if (thread->safepoint_state()) { |
966 | delete(thread->safepoint_state()); |
967 | thread->set_safepoint_state(NULL); |
968 | } |
969 | } |
970 | |
971 | uint64_t ThreadSafepointState::get_safepoint_id() const { |
972 | return OrderAccess::load_acquire(&_safepoint_id); |
973 | } |
974 | |
975 | void ThreadSafepointState::reset_safepoint_id() { |
976 | OrderAccess::release_store(&_safepoint_id, SafepointSynchronize::InactiveSafepointCounter); |
977 | } |
978 | |
979 | void ThreadSafepointState::set_safepoint_id(uint64_t safepoint_id) { |
980 | OrderAccess::release_store(&_safepoint_id, safepoint_id); |
981 | } |
982 | |
983 | void ThreadSafepointState::examine_state_of_thread(uint64_t safepoint_count) { |
984 | assert(is_running(), "better be running or just have hit safepoint poll" ); |
985 | |
986 | JavaThreadState stable_state; |
987 | if (!SafepointSynchronize::try_stable_load_state(&stable_state, _thread, safepoint_count)) { |
988 | // We could not get stable state of the JavaThread. |
989 | // Consider it running and just return. |
990 | return; |
991 | } |
992 | |
993 | // Save the state at the start of safepoint processing. |
994 | _orig_thread_state = stable_state; |
995 | |
996 | // Check for a thread that is suspended. Note that thread resume tries |
997 | // to grab the Threads_lock which we own here, so a thread cannot be |
998 | // resumed during safepoint synchronization. |
999 | |
1000 | // We check to see if this thread is suspended without locking to |
1001 | // avoid deadlocking with a third thread that is waiting for this |
1002 | // thread to be suspended. The third thread can notice the safepoint |
1003 | // that we're trying to start at the beginning of its SR_lock->wait() |
1004 | // call. If that happens, then the third thread will block on the |
1005 | // safepoint while still holding the underlying SR_lock. We won't be |
1006 | // able to get the SR_lock and we'll deadlock. |
1007 | // |
1008 | // We don't need to grab the SR_lock here for two reasons: |
1009 | // 1) The suspend flags are both volatile and are set with an |
1010 | // Atomic::cmpxchg() call so we should see the suspended |
1011 | // state right away. |
1012 | // 2) We're being called from the safepoint polling loop; if |
1013 | // we don't see the suspended state on this iteration, then |
1014 | // we'll come around again. |
1015 | // |
1016 | bool is_suspended = _thread->is_ext_suspended(); |
1017 | if (is_suspended) { |
1018 | account_safe_thread(); |
1019 | return; |
1020 | } |
1021 | |
1022 | if (safepoint_safe_with(_thread, stable_state)) { |
1023 | check_for_lazy_critical_native(_thread, stable_state); |
1024 | account_safe_thread(); |
1025 | return; |
1026 | } |
1027 | |
1028 | // All other thread states will continue to run until they |
1029 | // transition and self-block in state _blocked |
1030 | // Safepoint polling in compiled code causes the Java threads to do the same. |
1031 | // Note: new threads may require a malloc so they must be allowed to finish |
1032 | |
1033 | assert(is_running(), "examine_state_of_thread on non-running thread" ); |
1034 | return; |
1035 | } |
1036 | |
1037 | void ThreadSafepointState::account_safe_thread() { |
1038 | SafepointSynchronize::decrement_waiting_to_block(); |
1039 | if (_thread->in_critical()) { |
1040 | // Notice that this thread is in a critical section |
1041 | SafepointSynchronize::increment_jni_active_count(); |
1042 | } |
1043 | DEBUG_ONLY(_thread->set_visited_for_critical_count(SafepointSynchronize::safepoint_counter());) |
1044 | assert(!_safepoint_safe, "Must be unsafe before safe" ); |
1045 | _safepoint_safe = true; |
1046 | } |
1047 | |
1048 | void ThreadSafepointState::restart() { |
1049 | assert(_safepoint_safe, "Must be safe before unsafe" ); |
1050 | _safepoint_safe = false; |
1051 | } |
1052 | |
1053 | void ThreadSafepointState::print_on(outputStream *st) const { |
1054 | const char *s = _safepoint_safe ? "_at_safepoint" : "_running" ; |
1055 | |
1056 | st->print_cr("Thread: " INTPTR_FORMAT |
1057 | " [0x%2x] State: %s _at_poll_safepoint %d" , |
1058 | p2i(_thread), _thread->osthread()->thread_id(), s, _at_poll_safepoint); |
1059 | |
1060 | _thread->print_thread_state_on(st); |
1061 | } |
1062 | |
1063 | void ThreadSafepointState::print() const { print_on(tty); } |
1064 | |
1065 | // --------------------------------------------------------------------------------------------------------------------- |
1066 | |
1067 | // Block the thread at poll or poll return for safepoint/handshake. |
1068 | void ThreadSafepointState::handle_polling_page_exception() { |
1069 | |
1070 | // If we're using a global poll, then the thread should not be |
1071 | // marked as safepoint safe yet. |
1072 | assert(!SafepointMechanism::uses_global_page_poll() || !_safepoint_safe, |
1073 | "polling page exception on thread safepoint safe" ); |
1074 | |
1075 | // Step 1: Find the nmethod from the return address |
1076 | address real_return_addr = thread()->saved_exception_pc(); |
1077 | |
1078 | CodeBlob *cb = CodeCache::find_blob(real_return_addr); |
1079 | assert(cb != NULL && cb->is_compiled(), "return address should be in nmethod" ); |
1080 | CompiledMethod* nm = (CompiledMethod*)cb; |
1081 | |
1082 | // Find frame of caller |
1083 | frame stub_fr = thread()->last_frame(); |
1084 | CodeBlob* stub_cb = stub_fr.cb(); |
1085 | assert(stub_cb->is_safepoint_stub(), "must be a safepoint stub" ); |
1086 | RegisterMap map(thread(), true); |
1087 | frame caller_fr = stub_fr.sender(&map); |
1088 | |
1089 | // Should only be poll_return or poll |
1090 | assert( nm->is_at_poll_or_poll_return(real_return_addr), "should not be at call" ); |
1091 | |
1092 | // This is a poll immediately before a return. The exception handling code |
1093 | // has already had the effect of causing the return to occur, so the execution |
1094 | // will continue immediately after the call. In addition, the oopmap at the |
1095 | // return point does not mark the return value as an oop (if it is), so |
1096 | // it needs a handle here to be updated. |
1097 | if( nm->is_at_poll_return(real_return_addr) ) { |
1098 | // See if return type is an oop. |
1099 | bool return_oop = nm->method()->is_returning_oop(); |
1100 | Handle return_value; |
1101 | if (return_oop) { |
1102 | // The oop result has been saved on the stack together with all |
1103 | // the other registers. In order to preserve it over GCs we need |
1104 | // to keep it in a handle. |
1105 | oop result = caller_fr.saved_oop_result(&map); |
1106 | assert(oopDesc::is_oop_or_null(result), "must be oop" ); |
1107 | return_value = Handle(thread(), result); |
1108 | assert(Universe::heap()->is_in_or_null(result), "must be heap pointer" ); |
1109 | } |
1110 | |
1111 | // Block the thread |
1112 | SafepointMechanism::block_if_requested(thread()); |
1113 | |
1114 | // restore oop result, if any |
1115 | if (return_oop) { |
1116 | caller_fr.set_saved_oop_result(&map, return_value()); |
1117 | } |
1118 | } |
1119 | |
1120 | // This is a safepoint poll. Verify the return address and block. |
1121 | else { |
1122 | set_at_poll_safepoint(true); |
1123 | |
1124 | // verify the blob built the "return address" correctly |
1125 | assert(real_return_addr == caller_fr.pc(), "must match" ); |
1126 | |
1127 | // Block the thread |
1128 | SafepointMechanism::block_if_requested(thread()); |
1129 | set_at_poll_safepoint(false); |
1130 | |
1131 | // If we have a pending async exception deoptimize the frame |
1132 | // as otherwise we may never deliver it. |
1133 | if (thread()->has_async_condition()) { |
1134 | ThreadInVMfromJavaNoAsyncException __tiv(thread()); |
1135 | Deoptimization::deoptimize_frame(thread(), caller_fr.id()); |
1136 | } |
1137 | |
1138 | // If an exception has been installed we must check for a pending deoptimization |
1139 | // Deoptimize frame if exception has been thrown. |
1140 | |
1141 | if (thread()->has_pending_exception() ) { |
1142 | RegisterMap map(thread(), true); |
1143 | frame caller_fr = stub_fr.sender(&map); |
1144 | if (caller_fr.is_deoptimized_frame()) { |
1145 | // The exception patch will destroy registers that are still |
1146 | // live and will be needed during deoptimization. Defer the |
1147 | // Async exception should have deferred the exception until the |
1148 | // next safepoint which will be detected when we get into |
1149 | // the interpreter so if we have an exception now things |
1150 | // are messed up. |
1151 | |
1152 | fatal("Exception installed and deoptimization is pending" ); |
1153 | } |
1154 | } |
1155 | } |
1156 | } |
1157 | |
1158 | |
1159 | // ------------------------------------------------------------------------------------------------------- |
1160 | // Implementation of SafepointTracing |
1161 | |
1162 | jlong SafepointTracing::_last_safepoint_begin_time_ns = 0; |
1163 | jlong SafepointTracing::_last_safepoint_sync_time_ns = 0; |
1164 | jlong SafepointTracing::_last_safepoint_cleanup_time_ns = 0; |
1165 | jlong SafepointTracing::_last_safepoint_end_time_ns = 0; |
1166 | jlong SafepointTracing::_last_safepoint_end_time_epoch_ms = 0; |
1167 | jlong SafepointTracing::_last_app_time_ns = 0; |
1168 | int SafepointTracing::_nof_threads = 0; |
1169 | int SafepointTracing::_nof_running = 0; |
1170 | int SafepointTracing::_page_trap = 0; |
1171 | VM_Operation::VMOp_Type SafepointTracing::_current_type; |
1172 | jlong SafepointTracing::_max_sync_time = 0; |
1173 | jlong SafepointTracing::_max_vmop_time = 0; |
1174 | uint64_t SafepointTracing::_op_count[VM_Operation::VMOp_Terminating] = {0}; |
1175 | |
1176 | void SafepointTracing::init() { |
1177 | // Application start |
1178 | _last_safepoint_end_time_ns = os::javaTimeNanos(); |
1179 | // amount of time since epoch |
1180 | _last_safepoint_end_time_epoch_ms = os::javaTimeMillis(); |
1181 | } |
1182 | |
1183 | // Helper method to print the header. |
1184 | static void (outputStream* st) { |
1185 | // The number of spaces is significant here, and should match the format |
1186 | // specifiers in print_statistics(). |
1187 | |
1188 | st->print("VM Operation " |
1189 | "[ threads: total initial_running ]" |
1190 | "[ time: sync cleanup vmop total ]" ); |
1191 | |
1192 | st->print_cr(" page_trap_count" ); |
1193 | } |
1194 | |
1195 | // This prints a nice table. To get the statistics to not shift due to the logging uptime |
1196 | // decorator, use the option as: -Xlog:safepoint+stats:[outputfile]:none |
1197 | void SafepointTracing::statistics_log() { |
1198 | LogTarget(Info, safepoint, stats) lt; |
1199 | assert (lt.is_enabled(), "should only be called when printing statistics is enabled" ); |
1200 | LogStream ls(lt); |
1201 | |
1202 | static int _cur_stat_index = 0; |
1203 | |
1204 | // Print header every 30 entries |
1205 | if ((_cur_stat_index % 30) == 0) { |
1206 | print_header(&ls); |
1207 | _cur_stat_index = 1; // wrap |
1208 | } else { |
1209 | _cur_stat_index++; |
1210 | } |
1211 | |
1212 | ls.print("%-28s [ " |
1213 | INT32_FORMAT_W(8) " " INT32_FORMAT_W(8) " " |
1214 | "]" , |
1215 | VM_Operation::name(_current_type), |
1216 | _nof_threads, |
1217 | _nof_running); |
1218 | ls.print("[ " |
1219 | INT64_FORMAT_W(10) " " INT64_FORMAT_W(10) " " |
1220 | INT64_FORMAT_W(10) " " INT64_FORMAT_W(10) " ]" , |
1221 | (int64_t)(_last_safepoint_sync_time_ns - _last_safepoint_begin_time_ns), |
1222 | (int64_t)(_last_safepoint_cleanup_time_ns - _last_safepoint_sync_time_ns), |
1223 | (int64_t)(_last_safepoint_end_time_ns - _last_safepoint_cleanup_time_ns), |
1224 | (int64_t)(_last_safepoint_end_time_ns - _last_safepoint_begin_time_ns)); |
1225 | |
1226 | ls.print_cr(INT32_FORMAT_W(16), _page_trap); |
1227 | } |
1228 | |
1229 | // This method will be called when VM exits. This tries to summarize the sampling. |
1230 | // Current thread may already be deleted, so don't use ResourceMark. |
1231 | void SafepointTracing::statistics_exit_log() { |
1232 | if (!log_is_enabled(Info, safepoint, stats)) { |
1233 | return; |
1234 | } |
1235 | for (int index = 0; index < VM_Operation::VMOp_Terminating; index++) { |
1236 | if (_op_count[index] != 0) { |
1237 | log_info(safepoint, stats)("%-28s" UINT64_FORMAT_W(10), VM_Operation::name(index), |
1238 | _op_count[index]); |
1239 | } |
1240 | } |
1241 | |
1242 | log_info(safepoint, stats)("VM operations coalesced during safepoint " INT64_FORMAT, |
1243 | VMThread::get_coalesced_count()); |
1244 | log_info(safepoint, stats)("Maximum sync time " INT64_FORMAT" ns" , |
1245 | (int64_t)(_max_sync_time)); |
1246 | log_info(safepoint, stats)("Maximum vm operation time (except for Exit VM operation) " |
1247 | INT64_FORMAT " ns" , |
1248 | (int64_t)(_max_vmop_time)); |
1249 | } |
1250 | |
1251 | void SafepointTracing::begin(VM_Operation::VMOp_Type type) { |
1252 | _op_count[type]++; |
1253 | _current_type = type; |
1254 | |
1255 | // update the time stamp to begin recording safepoint time |
1256 | _last_safepoint_begin_time_ns = os::javaTimeNanos(); |
1257 | _last_safepoint_sync_time_ns = 0; |
1258 | _last_safepoint_cleanup_time_ns = 0; |
1259 | |
1260 | _last_app_time_ns = _last_safepoint_begin_time_ns - _last_safepoint_end_time_ns; |
1261 | _last_safepoint_end_time_ns = 0; |
1262 | |
1263 | RuntimeService::record_safepoint_begin(_last_app_time_ns); |
1264 | } |
1265 | |
1266 | void SafepointTracing::synchronized(int nof_threads, int nof_running, int traps) { |
1267 | _last_safepoint_sync_time_ns = os::javaTimeNanos(); |
1268 | _nof_threads = nof_threads; |
1269 | _nof_running = nof_running; |
1270 | _page_trap = traps; |
1271 | RuntimeService::record_safepoint_synchronized(_last_safepoint_sync_time_ns - _last_safepoint_begin_time_ns); |
1272 | } |
1273 | |
1274 | void SafepointTracing::cleanup() { |
1275 | _last_safepoint_cleanup_time_ns = os::javaTimeNanos(); |
1276 | } |
1277 | |
1278 | void SafepointTracing::end() { |
1279 | _last_safepoint_end_time_ns = os::javaTimeNanos(); |
1280 | // amount of time since epoch |
1281 | _last_safepoint_end_time_epoch_ms = os::javaTimeMillis(); |
1282 | |
1283 | if (_max_sync_time < (_last_safepoint_sync_time_ns - _last_safepoint_begin_time_ns)) { |
1284 | _max_sync_time = _last_safepoint_sync_time_ns - _last_safepoint_begin_time_ns; |
1285 | } |
1286 | if (_max_vmop_time < (_last_safepoint_end_time_ns - _last_safepoint_sync_time_ns)) { |
1287 | _max_vmop_time = _last_safepoint_end_time_ns - _last_safepoint_sync_time_ns; |
1288 | } |
1289 | if (log_is_enabled(Info, safepoint, stats)) { |
1290 | statistics_log(); |
1291 | } |
1292 | |
1293 | log_info(safepoint)( |
1294 | "Safepoint \"%s\", " |
1295 | "Time since last: " JLONG_FORMAT " ns, " |
1296 | "Reaching safepoint: " JLONG_FORMAT " ns, " |
1297 | "At safepoint: " JLONG_FORMAT " ns, " |
1298 | "Total: " JLONG_FORMAT " ns" , |
1299 | VM_Operation::name(_current_type), |
1300 | _last_app_time_ns, |
1301 | _last_safepoint_cleanup_time_ns - _last_safepoint_begin_time_ns, |
1302 | _last_safepoint_end_time_ns - _last_safepoint_cleanup_time_ns, |
1303 | _last_safepoint_end_time_ns - _last_safepoint_begin_time_ns |
1304 | ); |
1305 | |
1306 | RuntimeService::record_safepoint_end(_last_safepoint_end_time_ns - _last_safepoint_cleanup_time_ns); |
1307 | } |
1308 | |