| 1 | /* |
| 2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "classfile/symbolTable.hpp" |
| 27 | #include "classfile/vmSymbols.hpp" |
| 28 | #include "code/codeCache.hpp" |
| 29 | #include "compiler/compileBroker.hpp" |
| 30 | #include "gc/shared/collectedHeap.hpp" |
| 31 | #include "gc/shared/isGCActiveMark.hpp" |
| 32 | #include "logging/log.hpp" |
| 33 | #include "logging/logStream.hpp" |
| 34 | #include "logging/logConfiguration.hpp" |
| 35 | #include "memory/heapInspection.hpp" |
| 36 | #include "memory/resourceArea.hpp" |
| 37 | #include "memory/universe.hpp" |
| 38 | #include "oops/symbol.hpp" |
| 39 | #include "runtime/arguments.hpp" |
| 40 | #include "runtime/deoptimization.hpp" |
| 41 | #include "runtime/frame.inline.hpp" |
| 42 | #include "runtime/interfaceSupport.inline.hpp" |
| 43 | #include "runtime/sweeper.hpp" |
| 44 | #include "runtime/thread.inline.hpp" |
| 45 | #include "runtime/threadSMR.inline.hpp" |
| 46 | #include "runtime/vmOperations.hpp" |
| 47 | #include "services/threadService.hpp" |
| 48 | |
| 49 | #define VM_OP_NAME_INITIALIZE(name) #name, |
| 50 | |
| 51 | const char* VM_Operation::_names[VM_Operation::VMOp_Terminating] = \ |
| 52 | { VM_OPS_DO(VM_OP_NAME_INITIALIZE) }; |
| 53 | |
| 54 | void VM_Operation::set_calling_thread(Thread* thread, ThreadPriority priority) { |
| 55 | _calling_thread = thread; |
| 56 | assert(MinPriority <= priority && priority <= MaxPriority, "sanity check" ); |
| 57 | _priority = priority; |
| 58 | } |
| 59 | |
| 60 | |
| 61 | void VM_Operation::evaluate() { |
| 62 | ResourceMark rm; |
| 63 | LogTarget(Debug, vmoperation) lt; |
| 64 | if (lt.is_enabled()) { |
| 65 | LogStream ls(lt); |
| 66 | ls.print("begin " ); |
| 67 | print_on_error(&ls); |
| 68 | ls.cr(); |
| 69 | } |
| 70 | doit(); |
| 71 | if (lt.is_enabled()) { |
| 72 | LogStream ls(lt); |
| 73 | ls.print("end " ); |
| 74 | print_on_error(&ls); |
| 75 | ls.cr(); |
| 76 | } |
| 77 | } |
| 78 | |
| 79 | const char* VM_Operation::mode_to_string(Mode mode) { |
| 80 | switch(mode) { |
| 81 | case _safepoint : return "safepoint" ; |
| 82 | case _no_safepoint : return "no safepoint" ; |
| 83 | case _concurrent : return "concurrent" ; |
| 84 | case _async_safepoint: return "async safepoint" ; |
| 85 | default : return "unknown" ; |
| 86 | } |
| 87 | } |
| 88 | // Called by fatal error handler. |
| 89 | void VM_Operation::print_on_error(outputStream* st) const { |
| 90 | st->print("VM_Operation (" PTR_FORMAT "): " , p2i(this)); |
| 91 | st->print("%s" , name()); |
| 92 | |
| 93 | const char* mode = mode_to_string(evaluation_mode()); |
| 94 | st->print(", mode: %s" , mode); |
| 95 | |
| 96 | if (calling_thread()) { |
| 97 | st->print(", requested by thread " PTR_FORMAT, p2i(calling_thread())); |
| 98 | } |
| 99 | } |
| 100 | |
| 101 | void VM_ThreadStop::doit() { |
| 102 | assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint" ); |
| 103 | ThreadsListHandle tlh; |
| 104 | JavaThread* target = java_lang_Thread::thread(target_thread()); |
| 105 | // Note that this now allows multiple ThreadDeath exceptions to be |
| 106 | // thrown at a thread. |
| 107 | if (target != NULL && (!EnableThreadSMRExtraValidityChecks || tlh.includes(target))) { |
| 108 | // The target thread has run and has not exited yet. |
| 109 | target->send_thread_stop(throwable()); |
| 110 | } |
| 111 | } |
| 112 | |
| 113 | void VM_ClearICs::doit() { |
| 114 | if (_preserve_static_stubs) { |
| 115 | CodeCache::cleanup_inline_caches(); |
| 116 | } else { |
| 117 | CodeCache::clear_inline_caches(); |
| 118 | } |
| 119 | } |
| 120 | |
| 121 | void VM_Deoptimize::doit() { |
| 122 | // We do not want any GCs to happen while we are in the middle of this VM operation |
| 123 | ResourceMark rm; |
| 124 | DeoptimizationMarker dm; |
| 125 | |
| 126 | // Deoptimize all activations depending on marked nmethods |
| 127 | Deoptimization::deoptimize_dependents(); |
| 128 | |
| 129 | // Make the dependent methods not entrant |
| 130 | CodeCache::make_marked_nmethods_not_entrant(); |
| 131 | } |
| 132 | |
| 133 | void VM_MarkActiveNMethods::doit() { |
| 134 | NMethodSweeper::mark_active_nmethods(); |
| 135 | } |
| 136 | |
| 137 | VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason) { |
| 138 | _thread = thread; |
| 139 | _id = id; |
| 140 | _reason = reason; |
| 141 | } |
| 142 | |
| 143 | |
| 144 | void VM_DeoptimizeFrame::doit() { |
| 145 | assert(_reason > Deoptimization::Reason_none && _reason < Deoptimization::Reason_LIMIT, "invalid deopt reason" ); |
| 146 | Deoptimization::deoptimize_frame_internal(_thread, _id, (Deoptimization::DeoptReason)_reason); |
| 147 | } |
| 148 | |
| 149 | |
| 150 | #ifndef PRODUCT |
| 151 | |
| 152 | void VM_DeoptimizeAll::doit() { |
| 153 | DeoptimizationMarker dm; |
| 154 | JavaThreadIteratorWithHandle jtiwh; |
| 155 | // deoptimize all java threads in the system |
| 156 | if (DeoptimizeALot) { |
| 157 | for (; JavaThread *thread = jtiwh.next(); ) { |
| 158 | if (thread->has_last_Java_frame()) { |
| 159 | thread->deoptimize(); |
| 160 | } |
| 161 | } |
| 162 | } else if (DeoptimizeRandom) { |
| 163 | |
| 164 | // Deoptimize some selected threads and frames |
| 165 | int tnum = os::random() & 0x3; |
| 166 | int fnum = os::random() & 0x3; |
| 167 | int tcount = 0; |
| 168 | for (; JavaThread *thread = jtiwh.next(); ) { |
| 169 | if (thread->has_last_Java_frame()) { |
| 170 | if (tcount++ == tnum) { |
| 171 | tcount = 0; |
| 172 | int fcount = 0; |
| 173 | // Deoptimize some selected frames. |
| 174 | // Biased llocking wants a updated register map |
| 175 | for(StackFrameStream fst(thread, UseBiasedLocking); !fst.is_done(); fst.next()) { |
| 176 | if (fst.current()->can_be_deoptimized()) { |
| 177 | if (fcount++ == fnum) { |
| 178 | fcount = 0; |
| 179 | Deoptimization::deoptimize(thread, *fst.current(), fst.register_map()); |
| 180 | } |
| 181 | } |
| 182 | } |
| 183 | } |
| 184 | } |
| 185 | } |
| 186 | } |
| 187 | } |
| 188 | |
| 189 | |
| 190 | void VM_ZombieAll::doit() { |
| 191 | JavaThread *thread = (JavaThread *)calling_thread(); |
| 192 | assert(thread->is_Java_thread(), "must be a Java thread" ); |
| 193 | thread->make_zombies(); |
| 194 | } |
| 195 | |
| 196 | #endif // !PRODUCT |
| 197 | |
| 198 | void VM_Verify::doit() { |
| 199 | Universe::heap()->prepare_for_verify(); |
| 200 | Universe::verify(); |
| 201 | } |
| 202 | |
| 203 | bool VM_PrintThreads::doit_prologue() { |
| 204 | // Get Heap_lock if concurrent locks will be dumped |
| 205 | if (_print_concurrent_locks) { |
| 206 | Heap_lock->lock(); |
| 207 | } |
| 208 | return true; |
| 209 | } |
| 210 | |
| 211 | void VM_PrintThreads::doit() { |
| 212 | Threads::print_on(_out, true, false, _print_concurrent_locks, _print_extended_info); |
| 213 | } |
| 214 | |
| 215 | void VM_PrintThreads::doit_epilogue() { |
| 216 | if (_print_concurrent_locks) { |
| 217 | // Release Heap_lock |
| 218 | Heap_lock->unlock(); |
| 219 | } |
| 220 | } |
| 221 | |
| 222 | void VM_PrintJNI::doit() { |
| 223 | JNIHandles::print_on(_out); |
| 224 | } |
| 225 | |
| 226 | void VM_PrintMetadata::doit() { |
| 227 | MetaspaceUtils::print_report(_out, _scale, _flags); |
| 228 | } |
| 229 | |
| 230 | VM_FindDeadlocks::~VM_FindDeadlocks() { |
| 231 | if (_deadlocks != NULL) { |
| 232 | DeadlockCycle* cycle = _deadlocks; |
| 233 | while (cycle != NULL) { |
| 234 | DeadlockCycle* d = cycle; |
| 235 | cycle = cycle->next(); |
| 236 | delete d; |
| 237 | } |
| 238 | } |
| 239 | } |
| 240 | |
| 241 | void VM_FindDeadlocks::doit() { |
| 242 | // Update the hazard ptr in the originating thread to the current |
| 243 | // list of threads. This VM operation needs the current list of |
| 244 | // threads for proper deadlock detection and those are the |
| 245 | // JavaThreads we need to be protected when we return info to the |
| 246 | // originating thread. |
| 247 | _setter.set(); |
| 248 | |
| 249 | _deadlocks = ThreadService::find_deadlocks_at_safepoint(_setter.list(), _concurrent_locks); |
| 250 | if (_out != NULL) { |
| 251 | int num_deadlocks = 0; |
| 252 | for (DeadlockCycle* cycle = _deadlocks; cycle != NULL; cycle = cycle->next()) { |
| 253 | num_deadlocks++; |
| 254 | cycle->print_on_with(_setter.list(), _out); |
| 255 | } |
| 256 | |
| 257 | if (num_deadlocks == 1) { |
| 258 | _out->print_cr("\nFound 1 deadlock.\n" ); |
| 259 | _out->flush(); |
| 260 | } else if (num_deadlocks > 1) { |
| 261 | _out->print_cr("\nFound %d deadlocks.\n" , num_deadlocks); |
| 262 | _out->flush(); |
| 263 | } |
| 264 | } |
| 265 | } |
| 266 | |
| 267 | VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result, |
| 268 | int max_depth, |
| 269 | bool with_locked_monitors, |
| 270 | bool with_locked_synchronizers) { |
| 271 | _result = result; |
| 272 | _num_threads = 0; // 0 indicates all threads |
| 273 | _threads = NULL; |
| 274 | _result = result; |
| 275 | _max_depth = max_depth; |
| 276 | _with_locked_monitors = with_locked_monitors; |
| 277 | _with_locked_synchronizers = with_locked_synchronizers; |
| 278 | } |
| 279 | |
| 280 | VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result, |
| 281 | GrowableArray<instanceHandle>* threads, |
| 282 | int num_threads, |
| 283 | int max_depth, |
| 284 | bool with_locked_monitors, |
| 285 | bool with_locked_synchronizers) { |
| 286 | _result = result; |
| 287 | _num_threads = num_threads; |
| 288 | _threads = threads; |
| 289 | _result = result; |
| 290 | _max_depth = max_depth; |
| 291 | _with_locked_monitors = with_locked_monitors; |
| 292 | _with_locked_synchronizers = with_locked_synchronizers; |
| 293 | } |
| 294 | |
| 295 | bool VM_ThreadDump::doit_prologue() { |
| 296 | if (_with_locked_synchronizers) { |
| 297 | // Acquire Heap_lock to dump concurrent locks |
| 298 | Heap_lock->lock(); |
| 299 | } |
| 300 | |
| 301 | return true; |
| 302 | } |
| 303 | |
| 304 | void VM_ThreadDump::doit_epilogue() { |
| 305 | if (_with_locked_synchronizers) { |
| 306 | // Release Heap_lock |
| 307 | Heap_lock->unlock(); |
| 308 | } |
| 309 | } |
| 310 | |
| 311 | void VM_ThreadDump::doit() { |
| 312 | ResourceMark rm; |
| 313 | |
| 314 | // Set the hazard ptr in the originating thread to protect the |
| 315 | // current list of threads. This VM operation needs the current list |
| 316 | // of threads for a proper dump and those are the JavaThreads we need |
| 317 | // to be protected when we return info to the originating thread. |
| 318 | _result->set_t_list(); |
| 319 | |
| 320 | ConcurrentLocksDump concurrent_locks(true); |
| 321 | if (_with_locked_synchronizers) { |
| 322 | concurrent_locks.dump_at_safepoint(); |
| 323 | } |
| 324 | |
| 325 | if (_num_threads == 0) { |
| 326 | // Snapshot all live threads |
| 327 | |
| 328 | for (uint i = 0; i < _result->t_list()->length(); i++) { |
| 329 | JavaThread* jt = _result->t_list()->thread_at(i); |
| 330 | if (jt->is_exiting() || |
| 331 | jt->is_hidden_from_external_view()) { |
| 332 | // skip terminating threads and hidden threads |
| 333 | continue; |
| 334 | } |
| 335 | ThreadConcurrentLocks* tcl = NULL; |
| 336 | if (_with_locked_synchronizers) { |
| 337 | tcl = concurrent_locks.thread_concurrent_locks(jt); |
| 338 | } |
| 339 | snapshot_thread(jt, tcl); |
| 340 | } |
| 341 | } else { |
| 342 | // Snapshot threads in the given _threads array |
| 343 | // A dummy snapshot is created if a thread doesn't exist |
| 344 | |
| 345 | for (int i = 0; i < _num_threads; i++) { |
| 346 | instanceHandle th = _threads->at(i); |
| 347 | if (th() == NULL) { |
| 348 | // skip if the thread doesn't exist |
| 349 | // Add a dummy snapshot |
| 350 | _result->add_thread_snapshot(); |
| 351 | continue; |
| 352 | } |
| 353 | |
| 354 | // Dump thread stack only if the thread is alive and not exiting |
| 355 | // and not VM internal thread. |
| 356 | JavaThread* jt = java_lang_Thread::thread(th()); |
| 357 | if (jt != NULL && !_result->t_list()->includes(jt)) { |
| 358 | // _threads[i] doesn't refer to a valid JavaThread; this check |
| 359 | // is primarily for JVM_DumpThreads() which doesn't have a good |
| 360 | // way to validate the _threads array. |
| 361 | jt = NULL; |
| 362 | } |
| 363 | if (jt == NULL || /* thread not alive */ |
| 364 | jt->is_exiting() || |
| 365 | jt->is_hidden_from_external_view()) { |
| 366 | // add a NULL snapshot if skipped |
| 367 | _result->add_thread_snapshot(); |
| 368 | continue; |
| 369 | } |
| 370 | ThreadConcurrentLocks* tcl = NULL; |
| 371 | if (_with_locked_synchronizers) { |
| 372 | tcl = concurrent_locks.thread_concurrent_locks(jt); |
| 373 | } |
| 374 | snapshot_thread(jt, tcl); |
| 375 | } |
| 376 | } |
| 377 | } |
| 378 | |
| 379 | void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl) { |
| 380 | ThreadSnapshot* snapshot = _result->add_thread_snapshot(java_thread); |
| 381 | snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors); |
| 382 | snapshot->set_concurrent_locks(tcl); |
| 383 | } |
| 384 | |
| 385 | volatile bool VM_Exit::_vm_exited = false; |
| 386 | Thread * volatile VM_Exit::_shutdown_thread = NULL; |
| 387 | |
| 388 | int VM_Exit::set_vm_exited() { |
| 389 | |
| 390 | Thread * thr_cur = Thread::current(); |
| 391 | |
| 392 | assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already" ); |
| 393 | |
| 394 | int num_active = 0; |
| 395 | |
| 396 | _shutdown_thread = thr_cur; |
| 397 | _vm_exited = true; // global flag |
| 398 | for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) { |
| 399 | if (thr!=thr_cur && thr->thread_state() == _thread_in_native) { |
| 400 | ++num_active; |
| 401 | thr->set_terminated(JavaThread::_vm_exited); // per-thread flag |
| 402 | } |
| 403 | } |
| 404 | |
| 405 | return num_active; |
| 406 | } |
| 407 | |
| 408 | int VM_Exit::wait_for_threads_in_native_to_block() { |
| 409 | // VM exits at safepoint. This function must be called at the final safepoint |
| 410 | // to wait for threads in _thread_in_native state to be quiescent. |
| 411 | assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already" ); |
| 412 | |
| 413 | Thread * thr_cur = Thread::current(); |
| 414 | Monitor timer(Mutex::leaf, "VM_Exit timer" , true, |
| 415 | Monitor::_safepoint_check_never); |
| 416 | |
| 417 | // Compiler threads need longer wait because they can access VM data directly |
| 418 | // while in native. If they are active and some structures being used are |
| 419 | // deleted by the shutdown sequence, they will crash. On the other hand, user |
| 420 | // threads must go through native=>Java/VM transitions first to access VM |
| 421 | // data, and they will be stopped during state transition. In theory, we |
| 422 | // don't have to wait for user threads to be quiescent, but it's always |
| 423 | // better to terminate VM when current thread is the only active thread, so |
| 424 | // wait for user threads too. Numbers are in 10 milliseconds. |
| 425 | int max_wait_user_thread = 30; // at least 300 milliseconds |
| 426 | int max_wait_compiler_thread = 1000; // at least 10 seconds |
| 427 | |
| 428 | int max_wait = max_wait_compiler_thread; |
| 429 | |
| 430 | int attempts = 0; |
| 431 | JavaThreadIteratorWithHandle jtiwh; |
| 432 | while (true) { |
| 433 | int num_active = 0; |
| 434 | int num_active_compiler_thread = 0; |
| 435 | |
| 436 | jtiwh.rewind(); |
| 437 | for (; JavaThread *thr = jtiwh.next(); ) { |
| 438 | if (thr!=thr_cur && thr->thread_state() == _thread_in_native) { |
| 439 | num_active++; |
| 440 | if (thr->is_Compiler_thread()) { |
| 441 | #if INCLUDE_JVMCI |
| 442 | CompilerThread* ct = (CompilerThread*) thr; |
| 443 | if (ct->compiler() == NULL || !ct->compiler()->is_jvmci()) { |
| 444 | num_active_compiler_thread++; |
| 445 | } else { |
| 446 | // A JVMCI compiler thread never accesses VM data structures |
| 447 | // while in _thread_in_native state so there's no need to wait |
| 448 | // for it and potentially add a 300 millisecond delay to VM |
| 449 | // shutdown. |
| 450 | num_active--; |
| 451 | } |
| 452 | #else |
| 453 | num_active_compiler_thread++; |
| 454 | #endif |
| 455 | } |
| 456 | } |
| 457 | } |
| 458 | |
| 459 | if (num_active == 0) { |
| 460 | return 0; |
| 461 | } else if (attempts > max_wait) { |
| 462 | return num_active; |
| 463 | } else if (num_active_compiler_thread == 0 && attempts > max_wait_user_thread) { |
| 464 | return num_active; |
| 465 | } |
| 466 | |
| 467 | attempts++; |
| 468 | |
| 469 | MonitorLocker ml(&timer, Mutex::_no_safepoint_check_flag); |
| 470 | ml.wait(10); |
| 471 | } |
| 472 | } |
| 473 | |
| 474 | void VM_Exit::doit() { |
| 475 | |
| 476 | if (VerifyBeforeExit) { |
| 477 | HandleMark hm(VMThread::vm_thread()); |
| 478 | // Among other things, this ensures that Eden top is correct. |
| 479 | Universe::heap()->prepare_for_verify(); |
| 480 | // Silent verification so as not to pollute normal output, |
| 481 | // unless we really asked for it. |
| 482 | Universe::verify(); |
| 483 | } |
| 484 | |
| 485 | CompileBroker::set_should_block(); |
| 486 | |
| 487 | // Wait for a short period for threads in native to block. Any thread |
| 488 | // still executing native code after the wait will be stopped at |
| 489 | // native==>Java/VM barriers. |
| 490 | // Among 16276 JCK tests, 94% of them come here without any threads still |
| 491 | // running in native; the other 6% are quiescent within 250ms (Ultra 80). |
| 492 | wait_for_threads_in_native_to_block(); |
| 493 | |
| 494 | set_vm_exited(); |
| 495 | |
| 496 | // We'd like to call IdealGraphPrinter::clean_up() to finalize the |
| 497 | // XML logging, but we can't safely do that here. The logic to make |
| 498 | // XML termination logging safe is tied to the termination of the |
| 499 | // VMThread, and it doesn't terminate on this exit path. See 8222534. |
| 500 | |
| 501 | // cleanup globals resources before exiting. exit_globals() currently |
| 502 | // cleans up outputStream resources and PerfMemory resources. |
| 503 | exit_globals(); |
| 504 | |
| 505 | LogConfiguration::finalize(); |
| 506 | |
| 507 | // Check for exit hook |
| 508 | exit_hook_t exit_hook = Arguments::exit_hook(); |
| 509 | if (exit_hook != NULL) { |
| 510 | // exit hook should exit. |
| 511 | exit_hook(_exit_code); |
| 512 | // ... but if it didn't, we must do it here |
| 513 | vm_direct_exit(_exit_code); |
| 514 | } else { |
| 515 | vm_direct_exit(_exit_code); |
| 516 | } |
| 517 | } |
| 518 | |
| 519 | |
| 520 | void VM_Exit::wait_if_vm_exited() { |
| 521 | if (_vm_exited && |
| 522 | Thread::current_or_null() != _shutdown_thread) { |
| 523 | // _vm_exited is set at safepoint, and the Threads_lock is never released |
| 524 | // we will block here until the process dies |
| 525 | Threads_lock->lock_without_safepoint_check(); |
| 526 | ShouldNotReachHere(); |
| 527 | } |
| 528 | } |
| 529 | |
| 530 | void VM_PrintCompileQueue::doit() { |
| 531 | CompileBroker::print_compile_queues(_out); |
| 532 | } |
| 533 | |
| 534 | #if INCLUDE_SERVICES |
| 535 | void VM_PrintClassHierarchy::doit() { |
| 536 | KlassHierarchy::print_class_hierarchy(_out, _print_interfaces, _print_subclasses, _classname); |
| 537 | } |
| 538 | #endif |
| 539 | |