1 | /* |
2 | * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "compiler/compileBroker.hpp" |
27 | #include "gc/shared/collectedHeap.hpp" |
28 | #include "jfr/jfrEvents.hpp" |
29 | #include "jfr/support/jfrThreadId.hpp" |
30 | #include "logging/log.hpp" |
31 | #include "logging/logStream.hpp" |
32 | #include "logging/logConfiguration.hpp" |
33 | #include "memory/resourceArea.hpp" |
34 | #include "memory/universe.hpp" |
35 | #include "oops/method.hpp" |
36 | #include "oops/oop.inline.hpp" |
37 | #include "oops/verifyOopClosure.hpp" |
38 | #include "runtime/handles.inline.hpp" |
39 | #include "runtime/interfaceSupport.inline.hpp" |
40 | #include "runtime/mutexLocker.hpp" |
41 | #include "runtime/os.hpp" |
42 | #include "runtime/safepoint.hpp" |
43 | #include "runtime/thread.inline.hpp" |
44 | #include "runtime/vmThread.hpp" |
45 | #include "runtime/vmOperations.hpp" |
46 | #include "services/runtimeService.hpp" |
47 | #include "utilities/dtrace.hpp" |
48 | #include "utilities/events.hpp" |
49 | #include "utilities/vmError.hpp" |
50 | #include "utilities/xmlstream.hpp" |
51 | |
52 | VMOperationQueue::VMOperationQueue() { |
53 | // The queue is a circular doubled-linked list, which always contains |
54 | // one element (i.e., one element means empty). |
55 | for(int i = 0; i < nof_priorities; i++) { |
56 | _queue_length[i] = 0; |
57 | _queue_counter = 0; |
58 | _queue[i] = new VM_None("QueueHead" ); |
59 | _queue[i]->set_next(_queue[i]); |
60 | _queue[i]->set_prev(_queue[i]); |
61 | } |
62 | _drain_list = NULL; |
63 | } |
64 | |
65 | |
66 | bool VMOperationQueue::queue_empty(int prio) { |
67 | // It is empty if there is exactly one element |
68 | bool empty = (_queue[prio] == _queue[prio]->next()); |
69 | assert( (_queue_length[prio] == 0 && empty) || |
70 | (_queue_length[prio] > 0 && !empty), "sanity check" ); |
71 | return _queue_length[prio] == 0; |
72 | } |
73 | |
74 | // Inserts an element to the right of the q element |
75 | void VMOperationQueue::insert(VM_Operation* q, VM_Operation* n) { |
76 | assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check" ); |
77 | n->set_prev(q); |
78 | n->set_next(q->next()); |
79 | q->next()->set_prev(n); |
80 | q->set_next(n); |
81 | } |
82 | |
83 | void VMOperationQueue::queue_add_front(int prio, VM_Operation *op) { |
84 | _queue_length[prio]++; |
85 | insert(_queue[prio]->next(), op); |
86 | } |
87 | |
88 | void VMOperationQueue::queue_add_back(int prio, VM_Operation *op) { |
89 | _queue_length[prio]++; |
90 | insert(_queue[prio]->prev(), op); |
91 | } |
92 | |
93 | |
94 | void VMOperationQueue::unlink(VM_Operation* q) { |
95 | assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check" ); |
96 | q->prev()->set_next(q->next()); |
97 | q->next()->set_prev(q->prev()); |
98 | } |
99 | |
100 | VM_Operation* VMOperationQueue::queue_remove_front(int prio) { |
101 | if (queue_empty(prio)) return NULL; |
102 | assert(_queue_length[prio] >= 0, "sanity check" ); |
103 | _queue_length[prio]--; |
104 | VM_Operation* r = _queue[prio]->next(); |
105 | assert(r != _queue[prio], "cannot remove base element" ); |
106 | unlink(r); |
107 | return r; |
108 | } |
109 | |
110 | VM_Operation* VMOperationQueue::queue_drain(int prio) { |
111 | if (queue_empty(prio)) return NULL; |
112 | DEBUG_ONLY(int length = _queue_length[prio];); |
113 | assert(length >= 0, "sanity check" ); |
114 | _queue_length[prio] = 0; |
115 | VM_Operation* r = _queue[prio]->next(); |
116 | assert(r != _queue[prio], "cannot remove base element" ); |
117 | // remove links to base element from head and tail |
118 | r->set_prev(NULL); |
119 | _queue[prio]->prev()->set_next(NULL); |
120 | // restore queue to empty state |
121 | _queue[prio]->set_next(_queue[prio]); |
122 | _queue[prio]->set_prev(_queue[prio]); |
123 | assert(queue_empty(prio), "drain corrupted queue" ); |
124 | #ifdef ASSERT |
125 | int len = 0; |
126 | VM_Operation* cur; |
127 | for(cur = r; cur != NULL; cur=cur->next()) len++; |
128 | assert(len == length, "drain lost some ops" ); |
129 | #endif |
130 | return r; |
131 | } |
132 | |
133 | void VMOperationQueue::queue_oops_do(int queue, OopClosure* f) { |
134 | VM_Operation* cur = _queue[queue]; |
135 | cur = cur->next(); |
136 | while (cur != _queue[queue]) { |
137 | cur->oops_do(f); |
138 | cur = cur->next(); |
139 | } |
140 | } |
141 | |
142 | void VMOperationQueue::drain_list_oops_do(OopClosure* f) { |
143 | VM_Operation* cur = _drain_list; |
144 | while (cur != NULL) { |
145 | cur->oops_do(f); |
146 | cur = cur->next(); |
147 | } |
148 | } |
149 | |
150 | //----------------------------------------------------------------- |
151 | // High-level interface |
152 | void VMOperationQueue::add(VM_Operation *op) { |
153 | |
154 | HOTSPOT_VMOPS_REQUEST( |
155 | (char *) op->name(), strlen(op->name()), |
156 | op->evaluation_mode()); |
157 | |
158 | // Encapsulates VM queue policy. Currently, that |
159 | // only involves putting them on the right list |
160 | queue_add_back(op->evaluate_at_safepoint() ? SafepointPriority : MediumPriority, op); |
161 | } |
162 | |
163 | VM_Operation* VMOperationQueue::remove_next() { |
164 | // Assuming VMOperation queue is two-level priority queue. If there are |
165 | // more than two priorities, we need a different scheduling algorithm. |
166 | assert(SafepointPriority == 0 && MediumPriority == 1 && nof_priorities == 2, |
167 | "current algorithm does not work" ); |
168 | |
169 | // simple counter based scheduling to prevent starvation of lower priority |
170 | // queue. -- see 4390175 |
171 | int high_prio, low_prio; |
172 | if (_queue_counter++ < 10) { |
173 | high_prio = SafepointPriority; |
174 | low_prio = MediumPriority; |
175 | } else { |
176 | _queue_counter = 0; |
177 | high_prio = MediumPriority; |
178 | low_prio = SafepointPriority; |
179 | } |
180 | |
181 | return queue_remove_front(queue_empty(high_prio) ? low_prio : high_prio); |
182 | } |
183 | |
184 | void VMOperationQueue::oops_do(OopClosure* f) { |
185 | for(int i = 0; i < nof_priorities; i++) { |
186 | queue_oops_do(i, f); |
187 | } |
188 | drain_list_oops_do(f); |
189 | } |
190 | |
191 | //------------------------------------------------------------------------------------------------------------------ |
192 | // Timeout machinery |
193 | |
194 | void VMOperationTimeoutTask::task() { |
195 | assert(AbortVMOnVMOperationTimeout, "only if enabled" ); |
196 | if (is_armed()) { |
197 | jlong delay = (os::javaTimeMillis() - _arm_time); |
198 | if (delay > AbortVMOnVMOperationTimeoutDelay) { |
199 | fatal("VM operation took too long: " JLONG_FORMAT " ms (timeout: " INTX_FORMAT " ms)" , |
200 | delay, AbortVMOnVMOperationTimeoutDelay); |
201 | } |
202 | } |
203 | } |
204 | |
205 | bool VMOperationTimeoutTask::is_armed() { |
206 | return OrderAccess::load_acquire(&_armed) != 0; |
207 | } |
208 | |
209 | void VMOperationTimeoutTask::arm() { |
210 | _arm_time = os::javaTimeMillis(); |
211 | OrderAccess::release_store_fence(&_armed, 1); |
212 | } |
213 | |
214 | void VMOperationTimeoutTask::disarm() { |
215 | OrderAccess::release_store_fence(&_armed, 0); |
216 | } |
217 | |
218 | //------------------------------------------------------------------------------------------------------------------ |
219 | // Implementation of VMThread stuff |
220 | |
221 | bool VMThread::_should_terminate = false; |
222 | bool VMThread::_terminated = false; |
223 | Monitor* VMThread::_terminate_lock = NULL; |
224 | VMThread* VMThread::_vm_thread = NULL; |
225 | VM_Operation* VMThread::_cur_vm_operation = NULL; |
226 | VMOperationQueue* VMThread::_vm_queue = NULL; |
227 | PerfCounter* VMThread::_perf_accumulated_vm_operation_time = NULL; |
228 | uint64_t VMThread::_coalesced_count = 0; |
229 | VMOperationTimeoutTask* VMThread::_timeout_task = NULL; |
230 | |
231 | |
232 | void VMThread::create() { |
233 | assert(vm_thread() == NULL, "we can only allocate one VMThread" ); |
234 | _vm_thread = new VMThread(); |
235 | |
236 | if (AbortVMOnVMOperationTimeout) { |
237 | // Make sure we call the timeout task frequently enough, but not too frequent. |
238 | // Try to make the interval 10% of the timeout delay, so that we miss the timeout |
239 | // by those 10% at max. Periodic task also expects it to fit min/max intervals. |
240 | size_t interval = (size_t)AbortVMOnVMOperationTimeoutDelay / 10; |
241 | interval = interval / PeriodicTask::interval_gran * PeriodicTask::interval_gran; |
242 | interval = MAX2<size_t>(interval, PeriodicTask::min_interval); |
243 | interval = MIN2<size_t>(interval, PeriodicTask::max_interval); |
244 | |
245 | _timeout_task = new VMOperationTimeoutTask(interval); |
246 | _timeout_task->enroll(); |
247 | } else { |
248 | assert(_timeout_task == NULL, "sanity" ); |
249 | } |
250 | |
251 | // Create VM operation queue |
252 | _vm_queue = new VMOperationQueue(); |
253 | guarantee(_vm_queue != NULL, "just checking" ); |
254 | |
255 | _terminate_lock = new Monitor(Mutex::safepoint, "VMThread::_terminate_lock" , true, |
256 | Monitor::_safepoint_check_never); |
257 | |
258 | if (UsePerfData) { |
259 | // jvmstat performance counters |
260 | Thread* THREAD = Thread::current(); |
261 | _perf_accumulated_vm_operation_time = |
262 | PerfDataManager::create_counter(SUN_THREADS, "vmOperationTime" , |
263 | PerfData::U_Ticks, CHECK); |
264 | } |
265 | } |
266 | |
267 | VMThread::VMThread() : NamedThread() { |
268 | set_name("VM Thread" ); |
269 | } |
270 | |
271 | void VMThread::destroy() { |
272 | _vm_thread = NULL; // VM thread is gone |
273 | } |
274 | |
275 | static VM_None halt_op("Halt" ); |
276 | |
277 | void VMThread::run() { |
278 | assert(this == vm_thread(), "check" ); |
279 | |
280 | // Notify_lock wait checks on active_handles() to rewait in |
281 | // case of spurious wakeup, it should wait on the last |
282 | // value set prior to the notify |
283 | this->set_active_handles(JNIHandleBlock::allocate_block()); |
284 | |
285 | { |
286 | MutexLocker ml(Notify_lock); |
287 | Notify_lock->notify(); |
288 | } |
289 | // Notify_lock is destroyed by Threads::create_vm() |
290 | |
291 | int prio = (VMThreadPriority == -1) |
292 | ? os::java_to_os_priority[NearMaxPriority] |
293 | : VMThreadPriority; |
294 | // Note that I cannot call os::set_priority because it expects Java |
295 | // priorities and I am *explicitly* using OS priorities so that it's |
296 | // possible to set the VM thread priority higher than any Java thread. |
297 | os::set_native_priority( this, prio ); |
298 | |
299 | // Wait for VM_Operations until termination |
300 | this->loop(); |
301 | |
302 | // Note the intention to exit before safepointing. |
303 | // 6295565 This has the effect of waiting for any large tty |
304 | // outputs to finish. |
305 | if (xtty != NULL) { |
306 | ttyLocker ttyl; |
307 | xtty->begin_elem("destroy_vm" ); |
308 | xtty->stamp(); |
309 | xtty->end_elem(); |
310 | assert(should_terminate(), "termination flag must be set" ); |
311 | } |
312 | |
313 | // 4526887 let VM thread exit at Safepoint |
314 | _cur_vm_operation = &halt_op; |
315 | SafepointSynchronize::begin(); |
316 | |
317 | if (VerifyBeforeExit) { |
318 | HandleMark hm(VMThread::vm_thread()); |
319 | // Among other things, this ensures that Eden top is correct. |
320 | Universe::heap()->prepare_for_verify(); |
321 | // Silent verification so as not to pollute normal output, |
322 | // unless we really asked for it. |
323 | Universe::verify(); |
324 | } |
325 | |
326 | CompileBroker::set_should_block(); |
327 | |
328 | // wait for threads (compiler threads or daemon threads) in the |
329 | // _thread_in_native state to block. |
330 | VM_Exit::wait_for_threads_in_native_to_block(); |
331 | |
332 | // signal other threads that VM process is gone |
333 | { |
334 | // Note: we must have the _no_safepoint_check_flag. Mutex::lock() allows |
335 | // VM thread to enter any lock at Safepoint as long as its _owner is NULL. |
336 | // If that happens after _terminate_lock->wait() has unset _owner |
337 | // but before it actually drops the lock and waits, the notification below |
338 | // may get lost and we will have a hang. To avoid this, we need to use |
339 | // Mutex::lock_without_safepoint_check(). |
340 | MonitorLocker ml(_terminate_lock, Mutex::_no_safepoint_check_flag); |
341 | _terminated = true; |
342 | ml.notify(); |
343 | } |
344 | |
345 | // We are now racing with the VM termination being carried out in |
346 | // another thread, so we don't "delete this". Numerous threads don't |
347 | // get deleted when the VM terminates |
348 | |
349 | } |
350 | |
351 | |
352 | // Notify the VMThread that the last non-daemon JavaThread has terminated, |
353 | // and wait until operation is performed. |
354 | void VMThread::wait_for_vm_thread_exit() { |
355 | assert(Thread::current()->is_Java_thread(), "Should be a JavaThread" ); |
356 | assert(((JavaThread*)Thread::current())->is_terminated(), "Should be terminated" ); |
357 | { MutexLocker mu(VMOperationQueue_lock, Mutex::_no_safepoint_check_flag); |
358 | _should_terminate = true; |
359 | VMOperationQueue_lock->notify(); |
360 | } |
361 | |
362 | // Note: VM thread leaves at Safepoint. We are not stopped by Safepoint |
363 | // because this thread has been removed from the threads list. But anything |
364 | // that could get blocked by Safepoint should not be used after this point, |
365 | // otherwise we will hang, since there is no one can end the safepoint. |
366 | |
367 | // Wait until VM thread is terminated |
368 | // Note: it should be OK to use Terminator_lock here. But this is called |
369 | // at a very delicate time (VM shutdown) and we are operating in non- VM |
370 | // thread at Safepoint. It's safer to not share lock with other threads. |
371 | { MonitorLocker ml(_terminate_lock, Mutex::_no_safepoint_check_flag); |
372 | while(!VMThread::is_terminated()) { |
373 | ml.wait(); |
374 | } |
375 | } |
376 | } |
377 | |
378 | static void post_vm_operation_event(EventExecuteVMOperation* event, VM_Operation* op) { |
379 | assert(event != NULL, "invariant" ); |
380 | assert(event->should_commit(), "invariant" ); |
381 | assert(op != NULL, "invariant" ); |
382 | const bool is_concurrent = op->evaluate_concurrently(); |
383 | const bool evaluate_at_safepoint = op->evaluate_at_safepoint(); |
384 | event->set_operation(op->type()); |
385 | event->set_safepoint(evaluate_at_safepoint); |
386 | event->set_blocking(!is_concurrent); |
387 | // Only write caller thread information for non-concurrent vm operations. |
388 | // For concurrent vm operations, the thread id is set to 0 indicating thread is unknown. |
389 | // This is because the caller thread could have exited already. |
390 | event->set_caller(is_concurrent ? 0 : JFR_THREAD_ID(op->calling_thread())); |
391 | event->set_safepointId(evaluate_at_safepoint ? SafepointSynchronize::safepoint_id() : 0); |
392 | event->commit(); |
393 | } |
394 | |
395 | void VMThread::evaluate_operation(VM_Operation* op) { |
396 | ResourceMark rm; |
397 | |
398 | { |
399 | PerfTraceTime vm_op_timer(perf_accumulated_vm_operation_time()); |
400 | HOTSPOT_VMOPS_BEGIN( |
401 | (char *) op->name(), strlen(op->name()), |
402 | op->evaluation_mode()); |
403 | |
404 | EventExecuteVMOperation event; |
405 | op->evaluate(); |
406 | if (event.should_commit()) { |
407 | post_vm_operation_event(&event, op); |
408 | } |
409 | |
410 | HOTSPOT_VMOPS_END( |
411 | (char *) op->name(), strlen(op->name()), |
412 | op->evaluation_mode()); |
413 | } |
414 | |
415 | // Last access of info in _cur_vm_operation! |
416 | bool c_heap_allocated = op->is_cheap_allocated(); |
417 | |
418 | // Mark as completed |
419 | if (!op->evaluate_concurrently()) { |
420 | op->calling_thread()->increment_vm_operation_completed_count(); |
421 | } |
422 | // It is unsafe to access the _cur_vm_operation after the 'increment_vm_operation_completed_count' call, |
423 | // since if it is stack allocated the calling thread might have deallocated |
424 | if (c_heap_allocated) { |
425 | delete _cur_vm_operation; |
426 | } |
427 | } |
428 | |
429 | static VM_None safepointALot_op("SafepointALot" ); |
430 | static VM_Cleanup cleanup_op; |
431 | |
432 | class HandshakeALotTC : public ThreadClosure { |
433 | public: |
434 | virtual void do_thread(Thread* thread) { |
435 | #ifdef ASSERT |
436 | assert(thread->is_Java_thread(), "must be" ); |
437 | JavaThread* jt = (JavaThread*)thread; |
438 | jt->verify_states_for_handshake(); |
439 | #endif |
440 | } |
441 | }; |
442 | |
443 | VM_Operation* VMThread::no_op_safepoint() { |
444 | // Check for handshakes first since we may need to return a VMop. |
445 | if (HandshakeALot) { |
446 | HandshakeALotTC haltc; |
447 | Handshake::execute(&haltc); |
448 | } |
449 | // Check for a cleanup before SafepointALot to keep stats correct. |
450 | long interval_ms = SafepointTracing::time_since_last_safepoint_ms(); |
451 | bool max_time_exceeded = GuaranteedSafepointInterval != 0 && |
452 | (interval_ms >= GuaranteedSafepointInterval); |
453 | if (max_time_exceeded && SafepointSynchronize::is_cleanup_needed()) { |
454 | return &cleanup_op; |
455 | } |
456 | if (SafepointALot) { |
457 | return &safepointALot_op; |
458 | } |
459 | // Nothing to be done. |
460 | return NULL; |
461 | } |
462 | |
463 | void VMThread::loop() { |
464 | assert(_cur_vm_operation == NULL, "no current one should be executing" ); |
465 | |
466 | SafepointSynchronize::init(_vm_thread); |
467 | |
468 | while(true) { |
469 | VM_Operation* safepoint_ops = NULL; |
470 | // |
471 | // Wait for VM operation |
472 | // |
473 | // use no_safepoint_check to get lock without attempting to "sneak" |
474 | { MonitorLocker mu_queue(VMOperationQueue_lock, |
475 | Mutex::_no_safepoint_check_flag); |
476 | |
477 | // Look for new operation |
478 | assert(_cur_vm_operation == NULL, "no current one should be executing" ); |
479 | _cur_vm_operation = _vm_queue->remove_next(); |
480 | |
481 | // Stall time tracking code |
482 | if (PrintVMQWaitTime && _cur_vm_operation != NULL && |
483 | !_cur_vm_operation->evaluate_concurrently()) { |
484 | long stall = os::javaTimeMillis() - _cur_vm_operation->timestamp(); |
485 | if (stall > 0) |
486 | tty->print_cr("%s stall: %ld" , _cur_vm_operation->name(), stall); |
487 | } |
488 | |
489 | while (!should_terminate() && _cur_vm_operation == NULL) { |
490 | // wait with a timeout to guarantee safepoints at regular intervals |
491 | bool timedout = |
492 | mu_queue.wait(GuaranteedSafepointInterval); |
493 | |
494 | // Support for self destruction |
495 | if ((SelfDestructTimer != 0) && !VMError::is_error_reported() && |
496 | (os::elapsedTime() > (double)SelfDestructTimer * 60.0)) { |
497 | tty->print_cr("VM self-destructed" ); |
498 | exit(-1); |
499 | } |
500 | |
501 | if (timedout) { |
502 | // Have to unlock VMOperationQueue_lock just in case no_op_safepoint() |
503 | // has to do a handshake. |
504 | MutexUnlocker mul(VMOperationQueue_lock, Mutex::_no_safepoint_check_flag); |
505 | if ((_cur_vm_operation = VMThread::no_op_safepoint()) != NULL) { |
506 | // Force a safepoint since we have not had one for at least |
507 | // 'GuaranteedSafepointInterval' milliseconds and we need to clean |
508 | // something. This will run all the clean-up processing that needs |
509 | // to be done at a safepoint. |
510 | SafepointSynchronize::begin(); |
511 | #ifdef ASSERT |
512 | if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot(); |
513 | #endif |
514 | SafepointSynchronize::end(); |
515 | _cur_vm_operation = NULL; |
516 | } |
517 | } |
518 | _cur_vm_operation = _vm_queue->remove_next(); |
519 | |
520 | // If we are at a safepoint we will evaluate all the operations that |
521 | // follow that also require a safepoint |
522 | if (_cur_vm_operation != NULL && |
523 | _cur_vm_operation->evaluate_at_safepoint()) { |
524 | safepoint_ops = _vm_queue->drain_at_safepoint_priority(); |
525 | } |
526 | } |
527 | |
528 | if (should_terminate()) break; |
529 | } // Release mu_queue_lock |
530 | |
531 | // |
532 | // Execute VM operation |
533 | // |
534 | { HandleMark hm(VMThread::vm_thread()); |
535 | |
536 | EventMark em("Executing VM operation: %s" , vm_operation()->name()); |
537 | assert(_cur_vm_operation != NULL, "we should have found an operation to execute" ); |
538 | |
539 | // If we are at a safepoint we will evaluate all the operations that |
540 | // follow that also require a safepoint |
541 | if (_cur_vm_operation->evaluate_at_safepoint()) { |
542 | log_debug(vmthread)("Evaluating safepoint VM operation: %s" , _cur_vm_operation->name()); |
543 | |
544 | _vm_queue->set_drain_list(safepoint_ops); // ensure ops can be scanned |
545 | |
546 | SafepointSynchronize::begin(); |
547 | |
548 | if (_timeout_task != NULL) { |
549 | _timeout_task->arm(); |
550 | } |
551 | |
552 | evaluate_operation(_cur_vm_operation); |
553 | // now process all queued safepoint ops, iteratively draining |
554 | // the queue until there are none left |
555 | do { |
556 | _cur_vm_operation = safepoint_ops; |
557 | if (_cur_vm_operation != NULL) { |
558 | do { |
559 | log_debug(vmthread)("Evaluating coalesced safepoint VM operation: %s" , _cur_vm_operation->name()); |
560 | // evaluate_operation deletes the op object so we have |
561 | // to grab the next op now |
562 | VM_Operation* next = _cur_vm_operation->next(); |
563 | _vm_queue->set_drain_list(next); |
564 | evaluate_operation(_cur_vm_operation); |
565 | _cur_vm_operation = next; |
566 | _coalesced_count++; |
567 | } while (_cur_vm_operation != NULL); |
568 | } |
569 | // There is a chance that a thread enqueued a safepoint op |
570 | // since we released the op-queue lock and initiated the safepoint. |
571 | // So we drain the queue again if there is anything there, as an |
572 | // optimization to try and reduce the number of safepoints. |
573 | // As the safepoint synchronizes us with JavaThreads we will see |
574 | // any enqueue made by a JavaThread, but the peek will not |
575 | // necessarily detect a concurrent enqueue by a GC thread, but |
576 | // that simply means the op will wait for the next major cycle of the |
577 | // VMThread - just as it would if the GC thread lost the race for |
578 | // the lock. |
579 | if (_vm_queue->peek_at_safepoint_priority()) { |
580 | // must hold lock while draining queue |
581 | MutexLocker mu_queue(VMOperationQueue_lock, |
582 | Mutex::_no_safepoint_check_flag); |
583 | safepoint_ops = _vm_queue->drain_at_safepoint_priority(); |
584 | } else { |
585 | safepoint_ops = NULL; |
586 | } |
587 | } while(safepoint_ops != NULL); |
588 | |
589 | _vm_queue->set_drain_list(NULL); |
590 | |
591 | if (_timeout_task != NULL) { |
592 | _timeout_task->disarm(); |
593 | } |
594 | |
595 | // Complete safepoint synchronization |
596 | SafepointSynchronize::end(); |
597 | |
598 | } else { // not a safepoint operation |
599 | log_debug(vmthread)("Evaluating non-safepoint VM operation: %s" , _cur_vm_operation->name()); |
600 | if (TraceLongCompiles) { |
601 | elapsedTimer t; |
602 | t.start(); |
603 | evaluate_operation(_cur_vm_operation); |
604 | t.stop(); |
605 | double secs = t.seconds(); |
606 | if (secs * 1e3 > LongCompileThreshold) { |
607 | // XXX - _cur_vm_operation should not be accessed after |
608 | // the completed count has been incremented; the waiting |
609 | // thread may have already freed this memory. |
610 | tty->print_cr("vm %s: %3.7f secs]" , _cur_vm_operation->name(), secs); |
611 | } |
612 | } else { |
613 | evaluate_operation(_cur_vm_operation); |
614 | } |
615 | |
616 | _cur_vm_operation = NULL; |
617 | } |
618 | } |
619 | |
620 | // |
621 | // Notify (potential) waiting Java thread(s) |
622 | { MutexLocker mu(VMOperationRequest_lock, Mutex::_no_safepoint_check_flag); |
623 | VMOperationRequest_lock->notify_all(); |
624 | } |
625 | |
626 | // We want to make sure that we get to a safepoint regularly |
627 | // even when executing VMops that don't require safepoints. |
628 | if ((_cur_vm_operation = VMThread::no_op_safepoint()) != NULL) { |
629 | HandleMark hm(VMThread::vm_thread()); |
630 | SafepointSynchronize::begin(); |
631 | SafepointSynchronize::end(); |
632 | _cur_vm_operation = NULL; |
633 | } |
634 | } |
635 | } |
636 | |
637 | // A SkipGCALot object is used to elide the usual effect of gc-a-lot |
638 | // over a section of execution by a thread. Currently, it's used only to |
639 | // prevent re-entrant calls to GC. |
640 | class SkipGCALot : public StackObj { |
641 | private: |
642 | bool _saved; |
643 | Thread* _t; |
644 | |
645 | public: |
646 | #ifdef ASSERT |
647 | SkipGCALot(Thread* t) : _t(t) { |
648 | _saved = _t->skip_gcalot(); |
649 | _t->set_skip_gcalot(true); |
650 | } |
651 | |
652 | ~SkipGCALot() { |
653 | assert(_t->skip_gcalot(), "Save-restore protocol invariant" ); |
654 | _t->set_skip_gcalot(_saved); |
655 | } |
656 | #else |
657 | SkipGCALot(Thread* t) { } |
658 | ~SkipGCALot() { } |
659 | #endif |
660 | }; |
661 | |
662 | void VMThread::execute(VM_Operation* op) { |
663 | Thread* t = Thread::current(); |
664 | |
665 | if (!t->is_VM_thread()) { |
666 | SkipGCALot sgcalot(t); // avoid re-entrant attempts to gc-a-lot |
667 | // JavaThread or WatcherThread |
668 | bool concurrent = op->evaluate_concurrently(); |
669 | // only blocking VM operations need to verify the caller's safepoint state: |
670 | if (!concurrent) { |
671 | t->check_for_valid_safepoint_state(true); |
672 | } |
673 | |
674 | // New request from Java thread, evaluate prologue |
675 | if (!op->doit_prologue()) { |
676 | return; // op was cancelled |
677 | } |
678 | |
679 | // Setup VM_operations for execution |
680 | op->set_calling_thread(t, Thread::get_priority(t)); |
681 | |
682 | // It does not make sense to execute the epilogue, if the VM operation object is getting |
683 | // deallocated by the VM thread. |
684 | bool execute_epilog = !op->is_cheap_allocated(); |
685 | assert(!concurrent || op->is_cheap_allocated(), "concurrent => cheap_allocated" ); |
686 | |
687 | // Get ticket number for non-concurrent VM operations |
688 | int ticket = 0; |
689 | if (!concurrent) { |
690 | ticket = t->vm_operation_ticket(); |
691 | } |
692 | |
693 | // Add VM operation to list of waiting threads. We are guaranteed not to block while holding the |
694 | // VMOperationQueue_lock, so we can block without a safepoint check. This allows vm operation requests |
695 | // to be queued up during a safepoint synchronization. |
696 | { |
697 | VMOperationQueue_lock->lock_without_safepoint_check(); |
698 | log_debug(vmthread)("Adding VM operation: %s" , op->name()); |
699 | _vm_queue->add(op); |
700 | op->set_timestamp(os::javaTimeMillis()); |
701 | VMOperationQueue_lock->notify(); |
702 | VMOperationQueue_lock->unlock(); |
703 | } |
704 | |
705 | if (!concurrent) { |
706 | // Wait for completion of request (non-concurrent) |
707 | // Note: only a JavaThread triggers the safepoint check when locking |
708 | MonitorLocker ml(VMOperationRequest_lock, |
709 | t->is_Java_thread() ? Mutex::_safepoint_check_flag : Mutex::_no_safepoint_check_flag); |
710 | while(t->vm_operation_completed_count() < ticket) { |
711 | ml.wait(); |
712 | } |
713 | } |
714 | |
715 | if (execute_epilog) { |
716 | op->doit_epilogue(); |
717 | } |
718 | } else { |
719 | // invoked by VM thread; usually nested VM operation |
720 | assert(t->is_VM_thread(), "must be a VM thread" ); |
721 | VM_Operation* prev_vm_operation = vm_operation(); |
722 | if (prev_vm_operation != NULL) { |
723 | // Check the VM operation allows nested VM operation. This normally not the case, e.g., the compiler |
724 | // does not allow nested scavenges or compiles. |
725 | if (!prev_vm_operation->allow_nested_vm_operations()) { |
726 | fatal("Nested VM operation %s requested by operation %s" , |
727 | op->name(), vm_operation()->name()); |
728 | } |
729 | op->set_calling_thread(prev_vm_operation->calling_thread(), prev_vm_operation->priority()); |
730 | } |
731 | |
732 | EventMark em("Executing %s VM operation: %s" , prev_vm_operation ? "nested" : "" , op->name()); |
733 | |
734 | // Release all internal handles after operation is evaluated |
735 | HandleMark hm(t); |
736 | _cur_vm_operation = op; |
737 | |
738 | if (op->evaluate_at_safepoint() && !SafepointSynchronize::is_at_safepoint()) { |
739 | SafepointSynchronize::begin(); |
740 | op->evaluate(); |
741 | SafepointSynchronize::end(); |
742 | } else { |
743 | op->evaluate(); |
744 | } |
745 | |
746 | // Free memory if needed |
747 | if (op->is_cheap_allocated()) delete op; |
748 | |
749 | _cur_vm_operation = prev_vm_operation; |
750 | } |
751 | } |
752 | |
753 | |
754 | void VMThread::oops_do(OopClosure* f, CodeBlobClosure* cf) { |
755 | Thread::oops_do(f, cf); |
756 | _vm_queue->oops_do(f); |
757 | } |
758 | |
759 | //------------------------------------------------------------------------------------------------------------------ |
760 | #ifndef PRODUCT |
761 | |
762 | void VMOperationQueue::verify_queue(int prio) { |
763 | // Check that list is correctly linked |
764 | int length = _queue_length[prio]; |
765 | VM_Operation *cur = _queue[prio]; |
766 | int i; |
767 | |
768 | // Check forward links |
769 | for(i = 0; i < length; i++) { |
770 | cur = cur->next(); |
771 | assert(cur != _queue[prio], "list to short (forward)" ); |
772 | } |
773 | assert(cur->next() == _queue[prio], "list to long (forward)" ); |
774 | |
775 | // Check backwards links |
776 | cur = _queue[prio]; |
777 | for(i = 0; i < length; i++) { |
778 | cur = cur->prev(); |
779 | assert(cur != _queue[prio], "list to short (backwards)" ); |
780 | } |
781 | assert(cur->prev() == _queue[prio], "list to long (backwards)" ); |
782 | } |
783 | |
784 | #endif |
785 | |
786 | void VMThread::verify() { |
787 | oops_do(&VerifyOopClosure::verify_oop, NULL); |
788 | } |
789 | |