| 1 | /* |
| 2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_RUNTIME_VMOPERATIONS_HPP |
| 26 | #define SHARE_RUNTIME_VMOPERATIONS_HPP |
| 27 | |
| 28 | #include "classfile/javaClasses.hpp" |
| 29 | #include "memory/allocation.hpp" |
| 30 | #include "oops/oop.hpp" |
| 31 | #include "runtime/thread.hpp" |
| 32 | #include "runtime/threadSMR.hpp" |
| 33 | #include "code/codeCache.hpp" |
| 34 | |
| 35 | // The following classes are used for operations |
| 36 | // initiated by a Java thread but that must |
| 37 | // take place in the VMThread. |
| 38 | |
| 39 | #define VM_OP_ENUM(type) VMOp_##type, |
| 40 | |
| 41 | // Note: When new VM_XXX comes up, add 'XXX' to the template table. |
| 42 | #define VM_OPS_DO(template) \ |
| 43 | template(None) \ |
| 44 | template(Cleanup) \ |
| 45 | template(ThreadStop) \ |
| 46 | template(ThreadDump) \ |
| 47 | template(PrintThreads) \ |
| 48 | template(FindDeadlocks) \ |
| 49 | template(ClearICs) \ |
| 50 | template(ForceSafepoint) \ |
| 51 | template(ForceAsyncSafepoint) \ |
| 52 | template(Deoptimize) \ |
| 53 | template(DeoptimizeFrame) \ |
| 54 | template(DeoptimizeAll) \ |
| 55 | template(ZombieAll) \ |
| 56 | template(Verify) \ |
| 57 | template(PrintJNI) \ |
| 58 | template(HeapDumper) \ |
| 59 | template(DeoptimizeTheWorld) \ |
| 60 | template(CollectForMetadataAllocation) \ |
| 61 | template(GC_HeapInspection) \ |
| 62 | template(GenCollectFull) \ |
| 63 | template(GenCollectFullConcurrent) \ |
| 64 | template(GenCollectForAllocation) \ |
| 65 | template(ParallelGCFailedAllocation) \ |
| 66 | template(ParallelGCSystemGC) \ |
| 67 | template(CMS_Initial_Mark) \ |
| 68 | template(CMS_Final_Remark) \ |
| 69 | template(G1CollectForAllocation) \ |
| 70 | template(G1CollectFull) \ |
| 71 | template(G1Concurrent) \ |
| 72 | template(ZMarkStart) \ |
| 73 | template(ZMarkEnd) \ |
| 74 | template(ZRelocateStart) \ |
| 75 | template(ZVerify) \ |
| 76 | template(HandshakeOneThread) \ |
| 77 | template(HandshakeAllThreads) \ |
| 78 | template(HandshakeFallback) \ |
| 79 | template(EnableBiasedLocking) \ |
| 80 | template(RevokeBias) \ |
| 81 | template(BulkRevokeBias) \ |
| 82 | template(PopulateDumpSharedSpace) \ |
| 83 | template(JNIFunctionTableCopier) \ |
| 84 | template(RedefineClasses) \ |
| 85 | template(UpdateForPopTopFrame) \ |
| 86 | template(SetFramePop) \ |
| 87 | template(GetOwnedMonitorInfo) \ |
| 88 | template(GetObjectMonitorUsage) \ |
| 89 | template(GetCurrentContendedMonitor) \ |
| 90 | template(GetStackTrace) \ |
| 91 | template(GetMultipleStackTraces) \ |
| 92 | template(GetAllStackTraces) \ |
| 93 | template(GetThreadListStackTraces) \ |
| 94 | template(GetFrameCount) \ |
| 95 | template(GetFrameLocation) \ |
| 96 | template(ChangeBreakpoints) \ |
| 97 | template(GetOrSetLocal) \ |
| 98 | template(GetCurrentLocation) \ |
| 99 | template(EnterInterpOnlyMode) \ |
| 100 | template(ChangeSingleStep) \ |
| 101 | template(HeapWalkOperation) \ |
| 102 | template(HeapIterateOperation) \ |
| 103 | template(ReportJavaOutOfMemory) \ |
| 104 | template(JFRCheckpoint) \ |
| 105 | template(ShenandoahFullGC) \ |
| 106 | template(ShenandoahInitMark) \ |
| 107 | template(ShenandoahFinalMarkStartEvac) \ |
| 108 | template(ShenandoahFinalEvac) \ |
| 109 | template(ShenandoahInitTraversalGC) \ |
| 110 | template(ShenandoahFinalTraversalGC) \ |
| 111 | template(ShenandoahInitUpdateRefs) \ |
| 112 | template(ShenandoahFinalUpdateRefs) \ |
| 113 | template(ShenandoahDegeneratedGC) \ |
| 114 | template(Exit) \ |
| 115 | template(LinuxDllLoad) \ |
| 116 | template(RotateGCLog) \ |
| 117 | template(WhiteBoxOperation) \ |
| 118 | template(JVMCIResizeCounters) \ |
| 119 | template(ClassLoaderStatsOperation) \ |
| 120 | template(ClassLoaderHierarchyOperation) \ |
| 121 | template(DumpHashtable) \ |
| 122 | template(DumpTouchedMethods) \ |
| 123 | template(MarkActiveNMethods) \ |
| 124 | template(PrintCompileQueue) \ |
| 125 | template(PrintClassHierarchy) \ |
| 126 | template(ThreadSuspend) \ |
| 127 | template(ThreadsSuspendJVMTI) \ |
| 128 | template(ICBufferFull) \ |
| 129 | template(ScavengeMonitors) \ |
| 130 | template(PrintMetadata) \ |
| 131 | template(GTestExecuteAtSafepoint) \ |
| 132 | template(JFROldObject) \ |
| 133 | |
| 134 | class VM_Operation: public CHeapObj<mtInternal> { |
| 135 | public: |
| 136 | enum Mode { |
| 137 | _safepoint, // blocking, safepoint, vm_op C-heap allocated |
| 138 | _no_safepoint, // blocking, no safepoint, vm_op C-Heap allocated |
| 139 | _concurrent, // non-blocking, no safepoint, vm_op C-Heap allocated |
| 140 | _async_safepoint // non-blocking, safepoint, vm_op C-Heap allocated |
| 141 | }; |
| 142 | |
| 143 | enum VMOp_Type { |
| 144 | VM_OPS_DO(VM_OP_ENUM) |
| 145 | VMOp_Terminating |
| 146 | }; |
| 147 | |
| 148 | private: |
| 149 | Thread* _calling_thread; |
| 150 | ThreadPriority _priority; |
| 151 | long _timestamp; |
| 152 | VM_Operation* _next; |
| 153 | VM_Operation* _prev; |
| 154 | |
| 155 | // The VM operation name array |
| 156 | static const char* _names[]; |
| 157 | |
| 158 | public: |
| 159 | VM_Operation() { _calling_thread = NULL; _next = NULL; _prev = NULL; } |
| 160 | virtual ~VM_Operation() {} |
| 161 | |
| 162 | // VM operation support (used by VM thread) |
| 163 | Thread* calling_thread() const { return _calling_thread; } |
| 164 | ThreadPriority priority() { return _priority; } |
| 165 | void set_calling_thread(Thread* thread, ThreadPriority priority); |
| 166 | |
| 167 | long timestamp() const { return _timestamp; } |
| 168 | void set_timestamp(long timestamp) { _timestamp = timestamp; } |
| 169 | |
| 170 | // Called by VM thread - does in turn invoke doit(). Do not override this |
| 171 | void evaluate(); |
| 172 | |
| 173 | // evaluate() is called by the VMThread and in turn calls doit(). |
| 174 | // If the thread invoking VMThread::execute((VM_Operation*) is a JavaThread, |
| 175 | // doit_prologue() is called in that thread before transferring control to |
| 176 | // the VMThread. |
| 177 | // If doit_prologue() returns true the VM operation will proceed, and |
| 178 | // doit_epilogue() will be called by the JavaThread once the VM operation |
| 179 | // completes. If doit_prologue() returns false the VM operation is cancelled. |
| 180 | virtual void doit() = 0; |
| 181 | virtual bool doit_prologue() { return true; }; |
| 182 | virtual void doit_epilogue() {}; // Note: Not called if mode is: _concurrent |
| 183 | |
| 184 | // Type test |
| 185 | virtual bool is_methodCompiler() const { return false; } |
| 186 | |
| 187 | // Linking |
| 188 | VM_Operation *next() const { return _next; } |
| 189 | VM_Operation *prev() const { return _prev; } |
| 190 | void set_next(VM_Operation *next) { _next = next; } |
| 191 | void set_prev(VM_Operation *prev) { _prev = prev; } |
| 192 | |
| 193 | // Configuration. Override these appropriately in subclasses. |
| 194 | virtual VMOp_Type type() const = 0; |
| 195 | virtual Mode evaluation_mode() const { return _safepoint; } |
| 196 | virtual bool allow_nested_vm_operations() const { return false; } |
| 197 | virtual bool is_cheap_allocated() const { return false; } |
| 198 | virtual void oops_do(OopClosure* f) { /* do nothing */ }; |
| 199 | |
| 200 | // CAUTION: <don't hang yourself with following rope> |
| 201 | // If you override these methods, make sure that the evaluation |
| 202 | // of these methods is race-free and non-blocking, since these |
| 203 | // methods may be evaluated either by the mutators or by the |
| 204 | // vm thread, either concurrently with mutators or with the mutators |
| 205 | // stopped. In other words, taking locks is verboten, and if there |
| 206 | // are any races in evaluating the conditions, they'd better be benign. |
| 207 | virtual bool evaluate_at_safepoint() const { |
| 208 | return evaluation_mode() == _safepoint || |
| 209 | evaluation_mode() == _async_safepoint; |
| 210 | } |
| 211 | virtual bool evaluate_concurrently() const { |
| 212 | return evaluation_mode() == _concurrent || |
| 213 | evaluation_mode() == _async_safepoint; |
| 214 | } |
| 215 | |
| 216 | static const char* mode_to_string(Mode mode); |
| 217 | |
| 218 | // Debugging |
| 219 | virtual void print_on_error(outputStream* st) const; |
| 220 | virtual const char* name() const { return _names[type()]; } |
| 221 | static const char* name(int type) { |
| 222 | assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type" ); |
| 223 | return _names[type]; |
| 224 | } |
| 225 | #ifndef PRODUCT |
| 226 | void print_on(outputStream* st) const { print_on_error(st); } |
| 227 | #endif |
| 228 | }; |
| 229 | |
| 230 | class VM_None: public VM_Operation { |
| 231 | const char* _reason; |
| 232 | public: |
| 233 | VM_None(const char* reason) : _reason(reason) {} |
| 234 | const char* name() const { return _reason; } |
| 235 | VMOp_Type type() const { return VMOp_None; } |
| 236 | void doit() {}; |
| 237 | }; |
| 238 | |
| 239 | class VM_Cleanup: public VM_Operation { |
| 240 | public: |
| 241 | VMOp_Type type() const { return VMOp_Cleanup; } |
| 242 | void doit() {}; |
| 243 | }; |
| 244 | |
| 245 | class VM_ThreadStop: public VM_Operation { |
| 246 | private: |
| 247 | oop _thread; // The Thread that the Throwable is thrown against |
| 248 | oop _throwable; // The Throwable thrown at the target Thread |
| 249 | public: |
| 250 | // All oops are passed as JNI handles, since there is no guarantee that a GC might happen before the |
| 251 | // VM operation is executed. |
| 252 | VM_ThreadStop(oop thread, oop throwable) { |
| 253 | _thread = thread; |
| 254 | _throwable = throwable; |
| 255 | } |
| 256 | VMOp_Type type() const { return VMOp_ThreadStop; } |
| 257 | oop target_thread() const { return _thread; } |
| 258 | oop throwable() const { return _throwable;} |
| 259 | void doit(); |
| 260 | // We deoptimize if top-most frame is compiled - this might require a C2I adapter to be generated |
| 261 | bool allow_nested_vm_operations() const { return true; } |
| 262 | Mode evaluation_mode() const { return _async_safepoint; } |
| 263 | bool is_cheap_allocated() const { return true; } |
| 264 | |
| 265 | // GC support |
| 266 | void oops_do(OopClosure* f) { |
| 267 | f->do_oop(&_thread); f->do_oop(&_throwable); |
| 268 | } |
| 269 | }; |
| 270 | |
| 271 | class VM_ClearICs: public VM_Operation { |
| 272 | private: |
| 273 | bool _preserve_static_stubs; |
| 274 | public: |
| 275 | VM_ClearICs(bool preserve_static_stubs) { _preserve_static_stubs = preserve_static_stubs; } |
| 276 | void doit(); |
| 277 | VMOp_Type type() const { return VMOp_ClearICs; } |
| 278 | }; |
| 279 | |
| 280 | // empty vm op, evaluated just to force a safepoint |
| 281 | class VM_ForceSafepoint: public VM_Operation { |
| 282 | public: |
| 283 | void doit() {} |
| 284 | VMOp_Type type() const { return VMOp_ForceSafepoint; } |
| 285 | }; |
| 286 | |
| 287 | // empty vm op, when forcing a safepoint to suspend a thread |
| 288 | class VM_ThreadSuspend: public VM_ForceSafepoint { |
| 289 | public: |
| 290 | VMOp_Type type() const { return VMOp_ThreadSuspend; } |
| 291 | }; |
| 292 | |
| 293 | // empty vm op, when forcing a safepoint to suspend threads from jvmti |
| 294 | class VM_ThreadsSuspendJVMTI: public VM_ForceSafepoint { |
| 295 | public: |
| 296 | VMOp_Type type() const { return VMOp_ThreadsSuspendJVMTI; } |
| 297 | }; |
| 298 | |
| 299 | // empty vm op, when forcing a safepoint due to inline cache buffers being full |
| 300 | class VM_ICBufferFull: public VM_ForceSafepoint { |
| 301 | public: |
| 302 | VMOp_Type type() const { return VMOp_ICBufferFull; } |
| 303 | }; |
| 304 | |
| 305 | // empty asynchronous vm op, when forcing a safepoint to scavenge monitors |
| 306 | class VM_ScavengeMonitors: public VM_ForceSafepoint { |
| 307 | public: |
| 308 | VMOp_Type type() const { return VMOp_ScavengeMonitors; } |
| 309 | Mode evaluation_mode() const { return _async_safepoint; } |
| 310 | bool is_cheap_allocated() const { return true; } |
| 311 | }; |
| 312 | |
| 313 | // Base class for invoking parts of a gtest in a safepoint. |
| 314 | // Derived classes provide the doit method. |
| 315 | // Typically also need to transition the gtest thread from native to VM. |
| 316 | class VM_GTestExecuteAtSafepoint: public VM_Operation { |
| 317 | public: |
| 318 | VMOp_Type type() const { return VMOp_GTestExecuteAtSafepoint; } |
| 319 | |
| 320 | protected: |
| 321 | VM_GTestExecuteAtSafepoint() {} |
| 322 | }; |
| 323 | |
| 324 | class VM_Deoptimize: public VM_Operation { |
| 325 | public: |
| 326 | VM_Deoptimize() {} |
| 327 | VMOp_Type type() const { return VMOp_Deoptimize; } |
| 328 | void doit(); |
| 329 | bool allow_nested_vm_operations() const { return true; } |
| 330 | }; |
| 331 | |
| 332 | class VM_MarkActiveNMethods: public VM_Operation { |
| 333 | public: |
| 334 | VM_MarkActiveNMethods() {} |
| 335 | VMOp_Type type() const { return VMOp_MarkActiveNMethods; } |
| 336 | void doit(); |
| 337 | bool allow_nested_vm_operations() const { return true; } |
| 338 | }; |
| 339 | |
| 340 | // Deopt helper that can deoptimize frames in threads other than the |
| 341 | // current thread. Only used through Deoptimization::deoptimize_frame. |
| 342 | class VM_DeoptimizeFrame: public VM_Operation { |
| 343 | friend class Deoptimization; |
| 344 | |
| 345 | private: |
| 346 | JavaThread* _thread; |
| 347 | intptr_t* _id; |
| 348 | int _reason; |
| 349 | VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason); |
| 350 | |
| 351 | public: |
| 352 | VMOp_Type type() const { return VMOp_DeoptimizeFrame; } |
| 353 | void doit(); |
| 354 | bool allow_nested_vm_operations() const { return true; } |
| 355 | }; |
| 356 | |
| 357 | #ifndef PRODUCT |
| 358 | class VM_DeoptimizeAll: public VM_Operation { |
| 359 | private: |
| 360 | Klass* _dependee; |
| 361 | public: |
| 362 | VM_DeoptimizeAll() {} |
| 363 | VMOp_Type type() const { return VMOp_DeoptimizeAll; } |
| 364 | void doit(); |
| 365 | bool allow_nested_vm_operations() const { return true; } |
| 366 | }; |
| 367 | |
| 368 | |
| 369 | class VM_ZombieAll: public VM_Operation { |
| 370 | public: |
| 371 | VM_ZombieAll() {} |
| 372 | VMOp_Type type() const { return VMOp_ZombieAll; } |
| 373 | void doit(); |
| 374 | bool allow_nested_vm_operations() const { return true; } |
| 375 | }; |
| 376 | #endif // PRODUCT |
| 377 | |
| 378 | class VM_Verify: public VM_Operation { |
| 379 | public: |
| 380 | VMOp_Type type() const { return VMOp_Verify; } |
| 381 | void doit(); |
| 382 | }; |
| 383 | |
| 384 | |
| 385 | class VM_PrintThreads: public VM_Operation { |
| 386 | private: |
| 387 | outputStream* _out; |
| 388 | bool _print_concurrent_locks; |
| 389 | bool _print_extended_info; |
| 390 | public: |
| 391 | VM_PrintThreads() |
| 392 | : _out(tty), _print_concurrent_locks(PrintConcurrentLocks), _print_extended_info(false) |
| 393 | {} |
| 394 | VM_PrintThreads(outputStream* out, bool print_concurrent_locks, bool print_extended_info) |
| 395 | : _out(out), _print_concurrent_locks(print_concurrent_locks), _print_extended_info(print_extended_info) |
| 396 | {} |
| 397 | VMOp_Type type() const { |
| 398 | return VMOp_PrintThreads; |
| 399 | } |
| 400 | void doit(); |
| 401 | bool doit_prologue(); |
| 402 | void doit_epilogue(); |
| 403 | }; |
| 404 | |
| 405 | class VM_PrintJNI: public VM_Operation { |
| 406 | private: |
| 407 | outputStream* _out; |
| 408 | public: |
| 409 | VM_PrintJNI() { _out = tty; } |
| 410 | VM_PrintJNI(outputStream* out) { _out = out; } |
| 411 | VMOp_Type type() const { return VMOp_PrintJNI; } |
| 412 | void doit(); |
| 413 | }; |
| 414 | |
| 415 | class VM_PrintMetadata : public VM_Operation { |
| 416 | private: |
| 417 | outputStream* const _out; |
| 418 | const size_t _scale; |
| 419 | const int _flags; |
| 420 | |
| 421 | public: |
| 422 | VM_PrintMetadata(outputStream* out, size_t scale, int flags) |
| 423 | : _out(out), _scale(scale), _flags(flags) |
| 424 | {}; |
| 425 | |
| 426 | VMOp_Type type() const { return VMOp_PrintMetadata; } |
| 427 | void doit(); |
| 428 | }; |
| 429 | |
| 430 | class DeadlockCycle; |
| 431 | class VM_FindDeadlocks: public VM_Operation { |
| 432 | private: |
| 433 | bool _concurrent_locks; |
| 434 | DeadlockCycle* _deadlocks; |
| 435 | outputStream* _out; |
| 436 | ThreadsListSetter _setter; // Helper to set hazard ptr in the originating thread |
| 437 | // which protects the JavaThreads in _deadlocks. |
| 438 | |
| 439 | public: |
| 440 | VM_FindDeadlocks(bool concurrent_locks) : _concurrent_locks(concurrent_locks), _deadlocks(NULL), _out(NULL), _setter() {}; |
| 441 | VM_FindDeadlocks(outputStream* st) : _concurrent_locks(true), _deadlocks(NULL), _out(st) {}; |
| 442 | ~VM_FindDeadlocks(); |
| 443 | |
| 444 | DeadlockCycle* result() { return _deadlocks; }; |
| 445 | VMOp_Type type() const { return VMOp_FindDeadlocks; } |
| 446 | void doit(); |
| 447 | }; |
| 448 | |
| 449 | class ThreadDumpResult; |
| 450 | class ThreadSnapshot; |
| 451 | class ThreadConcurrentLocks; |
| 452 | |
| 453 | class VM_ThreadDump : public VM_Operation { |
| 454 | private: |
| 455 | ThreadDumpResult* _result; |
| 456 | int _num_threads; |
| 457 | GrowableArray<instanceHandle>* _threads; |
| 458 | int _max_depth; |
| 459 | bool _with_locked_monitors; |
| 460 | bool _with_locked_synchronizers; |
| 461 | |
| 462 | void snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl); |
| 463 | |
| 464 | public: |
| 465 | VM_ThreadDump(ThreadDumpResult* result, |
| 466 | int max_depth, // -1 indicates entire stack |
| 467 | bool with_locked_monitors, |
| 468 | bool with_locked_synchronizers); |
| 469 | |
| 470 | VM_ThreadDump(ThreadDumpResult* result, |
| 471 | GrowableArray<instanceHandle>* threads, |
| 472 | int num_threads, // -1 indicates entire stack |
| 473 | int max_depth, |
| 474 | bool with_locked_monitors, |
| 475 | bool with_locked_synchronizers); |
| 476 | |
| 477 | VMOp_Type type() const { return VMOp_ThreadDump; } |
| 478 | void doit(); |
| 479 | bool doit_prologue(); |
| 480 | void doit_epilogue(); |
| 481 | }; |
| 482 | |
| 483 | |
| 484 | class VM_Exit: public VM_Operation { |
| 485 | private: |
| 486 | int _exit_code; |
| 487 | static volatile bool _vm_exited; |
| 488 | static Thread * volatile _shutdown_thread; |
| 489 | static void wait_if_vm_exited(); |
| 490 | public: |
| 491 | VM_Exit(int exit_code) { |
| 492 | _exit_code = exit_code; |
| 493 | } |
| 494 | static int wait_for_threads_in_native_to_block(); |
| 495 | static int set_vm_exited(); |
| 496 | static bool vm_exited() { return _vm_exited; } |
| 497 | static Thread * shutdown_thread() { return _shutdown_thread; } |
| 498 | static void block_if_vm_exited() { |
| 499 | if (_vm_exited) { |
| 500 | wait_if_vm_exited(); |
| 501 | } |
| 502 | } |
| 503 | VMOp_Type type() const { return VMOp_Exit; } |
| 504 | void doit(); |
| 505 | }; |
| 506 | |
| 507 | class VM_PrintCompileQueue: public VM_Operation { |
| 508 | private: |
| 509 | outputStream* _out; |
| 510 | |
| 511 | public: |
| 512 | VM_PrintCompileQueue(outputStream* st) : _out(st) {} |
| 513 | VMOp_Type type() const { return VMOp_PrintCompileQueue; } |
| 514 | Mode evaluation_mode() const { return _safepoint; } |
| 515 | void doit(); |
| 516 | }; |
| 517 | |
| 518 | #if INCLUDE_SERVICES |
| 519 | class VM_PrintClassHierarchy: public VM_Operation { |
| 520 | private: |
| 521 | outputStream* _out; |
| 522 | bool _print_interfaces; |
| 523 | bool _print_subclasses; |
| 524 | char* _classname; |
| 525 | |
| 526 | public: |
| 527 | VM_PrintClassHierarchy(outputStream* st, bool print_interfaces, bool print_subclasses, char* classname) : |
| 528 | _out(st), _print_interfaces(print_interfaces), _print_subclasses(print_subclasses), |
| 529 | _classname(classname) {} |
| 530 | VMOp_Type type() const { return VMOp_PrintClassHierarchy; } |
| 531 | void doit(); |
| 532 | }; |
| 533 | #endif // INCLUDE_SERVICES |
| 534 | |
| 535 | #endif // SHARE_RUNTIME_VMOPERATIONS_HPP |
| 536 | |