| 1 | /* |
| 2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_RUNTIME_THREAD_HPP |
| 26 | #define SHARE_RUNTIME_THREAD_HPP |
| 27 | |
| 28 | #include "jni.h" |
| 29 | #include "code/compiledMethod.hpp" |
| 30 | #include "gc/shared/gcThreadLocalData.hpp" |
| 31 | #include "gc/shared/threadLocalAllocBuffer.hpp" |
| 32 | #include "memory/allocation.hpp" |
| 33 | #include "oops/oop.hpp" |
| 34 | #include "prims/jvmtiExport.hpp" |
| 35 | #include "runtime/frame.hpp" |
| 36 | #include "runtime/globals.hpp" |
| 37 | #include "runtime/handshake.hpp" |
| 38 | #include "runtime/javaFrameAnchor.hpp" |
| 39 | #include "runtime/jniHandles.hpp" |
| 40 | #include "runtime/mutexLocker.hpp" |
| 41 | #include "runtime/os.hpp" |
| 42 | #include "runtime/osThread.hpp" |
| 43 | #include "runtime/park.hpp" |
| 44 | #include "runtime/stubRoutines.hpp" |
| 45 | #include "runtime/threadHeapSampler.hpp" |
| 46 | #include "runtime/threadLocalStorage.hpp" |
| 47 | #include "runtime/threadStatisticalInfo.hpp" |
| 48 | #include "runtime/unhandledOops.hpp" |
| 49 | #include "utilities/align.hpp" |
| 50 | #include "utilities/exceptions.hpp" |
| 51 | #include "utilities/macros.hpp" |
| 52 | #ifdef ZERO |
| 53 | # include "stack_zero.hpp" |
| 54 | #endif |
| 55 | #if INCLUDE_JFR |
| 56 | #include "jfr/support/jfrThreadExtension.hpp" |
| 57 | #endif |
| 58 | |
| 59 | |
| 60 | class SafeThreadsListPtr; |
| 61 | class ThreadSafepointState; |
| 62 | class ThreadsList; |
| 63 | class ThreadsSMRSupport; |
| 64 | |
| 65 | class JvmtiThreadState; |
| 66 | class ThreadStatistics; |
| 67 | class ConcurrentLocksDump; |
| 68 | class ParkEvent; |
| 69 | class Parker; |
| 70 | class MonitorInfo; |
| 71 | |
| 72 | class ciEnv; |
| 73 | class CompileThread; |
| 74 | class CompileLog; |
| 75 | class CompileTask; |
| 76 | class CompileQueue; |
| 77 | class CompilerCounters; |
| 78 | |
| 79 | class vframeArray; |
| 80 | class vframe; |
| 81 | class javaVFrame; |
| 82 | |
| 83 | class DeoptResourceMark; |
| 84 | class jvmtiDeferredLocalVariableSet; |
| 85 | |
| 86 | class GCTaskQueue; |
| 87 | class ThreadClosure; |
| 88 | class ICRefillVerifier; |
| 89 | class IdealGraphPrinter; |
| 90 | |
| 91 | class JVMCIEnv; |
| 92 | class JVMCIPrimitiveArray; |
| 93 | |
| 94 | class Metadata; |
| 95 | class ResourceArea; |
| 96 | |
| 97 | DEBUG_ONLY(class ResourceMark;) |
| 98 | |
| 99 | class WorkerThread; |
| 100 | |
| 101 | // Class hierarchy |
| 102 | // - Thread |
| 103 | // - JavaThread |
| 104 | // - various subclasses eg CompilerThread, ServiceThread |
| 105 | // - NonJavaThread |
| 106 | // - NamedThread |
| 107 | // - VMThread |
| 108 | // - ConcurrentGCThread |
| 109 | // - WorkerThread |
| 110 | // - GangWorker |
| 111 | // - GCTaskThread |
| 112 | // - WatcherThread |
| 113 | // - JfrThreadSampler |
| 114 | // |
| 115 | // All Thread subclasses must be either JavaThread or NonJavaThread. |
| 116 | // This means !t->is_Java_thread() iff t is a NonJavaThread, or t is |
| 117 | // a partially constructed/destroyed Thread. |
| 118 | |
| 119 | // Thread execution sequence and actions: |
| 120 | // All threads: |
| 121 | // - thread_native_entry // per-OS native entry point |
| 122 | // - stack initialization |
| 123 | // - other OS-level initialization (signal masks etc) |
| 124 | // - handshake with creating thread (if not started suspended) |
| 125 | // - this->call_run() // common shared entry point |
| 126 | // - shared common initialization |
| 127 | // - this->pre_run() // virtual per-thread-type initialization |
| 128 | // - this->run() // virtual per-thread-type "main" logic |
| 129 | // - shared common tear-down |
| 130 | // - this->post_run() // virtual per-thread-type tear-down |
| 131 | // - // 'this' no longer referenceable |
| 132 | // - OS-level tear-down (minimal) |
| 133 | // - final logging |
| 134 | // |
| 135 | // For JavaThread: |
| 136 | // - this->run() // virtual but not normally overridden |
| 137 | // - this->thread_main_inner() // extra call level to ensure correct stack calculations |
| 138 | // - this->entry_point() // set differently for each kind of JavaThread |
| 139 | |
| 140 | class Thread: public ThreadShadow { |
| 141 | friend class VMStructs; |
| 142 | friend class JVMCIVMStructs; |
| 143 | private: |
| 144 | |
| 145 | #ifndef USE_LIBRARY_BASED_TLS_ONLY |
| 146 | // Current thread is maintained as a thread-local variable |
| 147 | static THREAD_LOCAL_DECL Thread* _thr_current; |
| 148 | #endif |
| 149 | |
| 150 | // Thread local data area available to the GC. The internal |
| 151 | // structure and contents of this data area is GC-specific. |
| 152 | // Only GC and GC barrier code should access this data area. |
| 153 | GCThreadLocalData _gc_data; |
| 154 | |
| 155 | public: |
| 156 | static ByteSize gc_data_offset() { |
| 157 | return byte_offset_of(Thread, _gc_data); |
| 158 | } |
| 159 | |
| 160 | template <typename T> T* gc_data() { |
| 161 | STATIC_ASSERT(sizeof(T) <= sizeof(_gc_data)); |
| 162 | return reinterpret_cast<T*>(&_gc_data); |
| 163 | } |
| 164 | |
| 165 | // Exception handling |
| 166 | // (Note: _pending_exception and friends are in ThreadShadow) |
| 167 | //oop _pending_exception; // pending exception for current thread |
| 168 | // const char* _exception_file; // file information for exception (debugging only) |
| 169 | // int _exception_line; // line information for exception (debugging only) |
| 170 | protected: |
| 171 | |
| 172 | DEBUG_ONLY(static Thread* _starting_thread;) |
| 173 | |
| 174 | // Support for forcing alignment of thread objects for biased locking |
| 175 | void* _real_malloc_address; |
| 176 | |
| 177 | // JavaThread lifecycle support: |
| 178 | friend class SafeThreadsListPtr; // for _threads_list_ptr, cmpxchg_threads_hazard_ptr(), {dec_,inc_,}nested_threads_hazard_ptr_cnt(), {g,s}et_threads_hazard_ptr(), inc_nested_handle_cnt(), tag_hazard_ptr() access |
| 179 | friend class ScanHazardPtrGatherProtectedThreadsClosure; // for cmpxchg_threads_hazard_ptr(), get_threads_hazard_ptr(), is_hazard_ptr_tagged() access |
| 180 | friend class ScanHazardPtrGatherThreadsListClosure; // for get_threads_hazard_ptr(), untag_hazard_ptr() access |
| 181 | friend class ScanHazardPtrPrintMatchingThreadsClosure; // for get_threads_hazard_ptr(), is_hazard_ptr_tagged() access |
| 182 | friend class ThreadsSMRSupport; // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access |
| 183 | |
| 184 | ThreadsList* volatile _threads_hazard_ptr; |
| 185 | SafeThreadsListPtr* _threads_list_ptr; |
| 186 | ThreadsList* cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value); |
| 187 | ThreadsList* get_threads_hazard_ptr(); |
| 188 | void set_threads_hazard_ptr(ThreadsList* new_list); |
| 189 | static bool is_hazard_ptr_tagged(ThreadsList* list) { |
| 190 | return (intptr_t(list) & intptr_t(1)) == intptr_t(1); |
| 191 | } |
| 192 | static ThreadsList* tag_hazard_ptr(ThreadsList* list) { |
| 193 | return (ThreadsList*)(intptr_t(list) | intptr_t(1)); |
| 194 | } |
| 195 | static ThreadsList* untag_hazard_ptr(ThreadsList* list) { |
| 196 | return (ThreadsList*)(intptr_t(list) & ~intptr_t(1)); |
| 197 | } |
| 198 | // This field is enabled via -XX:+EnableThreadSMRStatistics: |
| 199 | uint _nested_threads_hazard_ptr_cnt; |
| 200 | void dec_nested_threads_hazard_ptr_cnt() { |
| 201 | assert(_nested_threads_hazard_ptr_cnt != 0, "mismatched {dec,inc}_nested_threads_hazard_ptr_cnt()" ); |
| 202 | _nested_threads_hazard_ptr_cnt--; |
| 203 | } |
| 204 | void inc_nested_threads_hazard_ptr_cnt() { |
| 205 | _nested_threads_hazard_ptr_cnt++; |
| 206 | } |
| 207 | uint nested_threads_hazard_ptr_cnt() { |
| 208 | return _nested_threads_hazard_ptr_cnt; |
| 209 | } |
| 210 | |
| 211 | public: |
| 212 | void* operator new(size_t size) throw() { return allocate(size, true); } |
| 213 | void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { |
| 214 | return allocate(size, false); } |
| 215 | void operator delete(void* p); |
| 216 | |
| 217 | protected: |
| 218 | static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread); |
| 219 | private: |
| 220 | |
| 221 | // *************************************************************** |
| 222 | // Suspend and resume support |
| 223 | // *************************************************************** |
| 224 | // |
| 225 | // VM suspend/resume no longer exists - it was once used for various |
| 226 | // things including safepoints but was deprecated and finally removed |
| 227 | // in Java 7. Because VM suspension was considered "internal" Java-level |
| 228 | // suspension was considered "external", and this legacy naming scheme |
| 229 | // remains. |
| 230 | // |
| 231 | // External suspend/resume requests come from JVM_SuspendThread, |
| 232 | // JVM_ResumeThread, JVMTI SuspendThread, and finally JVMTI |
| 233 | // ResumeThread. External |
| 234 | // suspend requests cause _external_suspend to be set and external |
| 235 | // resume requests cause _external_suspend to be cleared. |
| 236 | // External suspend requests do not nest on top of other external |
| 237 | // suspend requests. The higher level APIs reject suspend requests |
| 238 | // for already suspended threads. |
| 239 | // |
| 240 | // The external_suspend |
| 241 | // flag is checked by has_special_runtime_exit_condition() and java thread |
| 242 | // will self-suspend when handle_special_runtime_exit_condition() is |
| 243 | // called. Most uses of the _thread_blocked state in JavaThreads are |
| 244 | // considered the same as being externally suspended; if the blocking |
| 245 | // condition lifts, the JavaThread will self-suspend. Other places |
| 246 | // where VM checks for external_suspend include: |
| 247 | // + mutex granting (do not enter monitors when thread is suspended) |
| 248 | // + state transitions from _thread_in_native |
| 249 | // |
| 250 | // In general, java_suspend() does not wait for an external suspend |
| 251 | // request to complete. When it returns, the only guarantee is that |
| 252 | // the _external_suspend field is true. |
| 253 | // |
| 254 | // wait_for_ext_suspend_completion() is used to wait for an external |
| 255 | // suspend request to complete. External suspend requests are usually |
| 256 | // followed by some other interface call that requires the thread to |
| 257 | // be quiescent, e.g., GetCallTrace(). By moving the "wait time" into |
| 258 | // the interface that requires quiescence, we give the JavaThread a |
| 259 | // chance to self-suspend before we need it to be quiescent. This |
| 260 | // improves overall suspend/query performance. |
| 261 | // |
| 262 | // _suspend_flags controls the behavior of java_ suspend/resume. |
| 263 | // It must be set under the protection of SR_lock. Read from the flag is |
| 264 | // OK without SR_lock as long as the value is only used as a hint. |
| 265 | // (e.g., check _external_suspend first without lock and then recheck |
| 266 | // inside SR_lock and finish the suspension) |
| 267 | // |
| 268 | // _suspend_flags is also overloaded for other "special conditions" so |
| 269 | // that a single check indicates whether any special action is needed |
| 270 | // eg. for async exceptions. |
| 271 | // ------------------------------------------------------------------- |
| 272 | // Notes: |
| 273 | // 1. The suspend/resume logic no longer uses ThreadState in OSThread |
| 274 | // but we still update its value to keep other part of the system (mainly |
| 275 | // JVMTI) happy. ThreadState is legacy code (see notes in |
| 276 | // osThread.hpp). |
| 277 | // |
| 278 | // 2. It would be more natural if set_external_suspend() is private and |
| 279 | // part of java_suspend(), but that probably would affect the suspend/query |
| 280 | // performance. Need more investigation on this. |
| 281 | |
| 282 | // suspend/resume lock: used for self-suspend |
| 283 | Monitor* _SR_lock; |
| 284 | |
| 285 | protected: |
| 286 | enum SuspendFlags { |
| 287 | // NOTE: avoid using the sign-bit as cc generates different test code |
| 288 | // when the sign-bit is used, and sometimes incorrectly - see CR 6398077 |
| 289 | |
| 290 | _external_suspend = 0x20000000U, // thread is asked to self suspend |
| 291 | _ext_suspended = 0x40000000U, // thread has self-suspended |
| 292 | |
| 293 | _has_async_exception = 0x00000001U, // there is a pending async exception |
| 294 | _critical_native_unlock = 0x00000002U, // Must call back to unlock JNI critical lock |
| 295 | |
| 296 | _trace_flag = 0x00000004U // call tracing backend |
| 297 | }; |
| 298 | |
| 299 | // various suspension related flags - atomically updated |
| 300 | // overloaded for async exception checking in check_special_condition_for_native_trans. |
| 301 | volatile uint32_t _suspend_flags; |
| 302 | |
| 303 | private: |
| 304 | int _num_nested_signal; |
| 305 | |
| 306 | DEBUG_ONLY(bool _suspendible_thread;) |
| 307 | |
| 308 | public: |
| 309 | void enter_signal_handler() { _num_nested_signal++; } |
| 310 | void leave_signal_handler() { _num_nested_signal--; } |
| 311 | bool is_inside_signal_handler() const { return _num_nested_signal > 0; } |
| 312 | |
| 313 | // Determines if a heap allocation failure will be retried |
| 314 | // (e.g., by deoptimizing and re-executing in the interpreter). |
| 315 | // In this case, the failed allocation must raise |
| 316 | // Universe::out_of_memory_error_retry() and omit side effects |
| 317 | // such as JVMTI events and handling -XX:+HeapDumpOnOutOfMemoryError |
| 318 | // and -XX:OnOutOfMemoryError. |
| 319 | virtual bool in_retryable_allocation() const { return false; } |
| 320 | |
| 321 | #ifdef ASSERT |
| 322 | void set_suspendible_thread() { |
| 323 | _suspendible_thread = true; |
| 324 | } |
| 325 | |
| 326 | void clear_suspendible_thread() { |
| 327 | _suspendible_thread = false; |
| 328 | } |
| 329 | |
| 330 | bool is_suspendible_thread() { return _suspendible_thread; } |
| 331 | #endif |
| 332 | |
| 333 | private: |
| 334 | // Active_handles points to a block of handles |
| 335 | JNIHandleBlock* _active_handles; |
| 336 | |
| 337 | // One-element thread local free list |
| 338 | JNIHandleBlock* _free_handle_block; |
| 339 | |
| 340 | // Point to the last handle mark |
| 341 | HandleMark* _last_handle_mark; |
| 342 | |
| 343 | // Claim value for parallel iteration over threads. |
| 344 | uintx _threads_do_token; |
| 345 | |
| 346 | // Support for GlobalCounter |
| 347 | private: |
| 348 | volatile uintx _rcu_counter; |
| 349 | public: |
| 350 | volatile uintx* get_rcu_counter() { |
| 351 | return &_rcu_counter; |
| 352 | } |
| 353 | |
| 354 | public: |
| 355 | void set_last_handle_mark(HandleMark* mark) { _last_handle_mark = mark; } |
| 356 | HandleMark* last_handle_mark() const { return _last_handle_mark; } |
| 357 | private: |
| 358 | |
| 359 | #ifdef ASSERT |
| 360 | ICRefillVerifier* _missed_ic_stub_refill_verifier; |
| 361 | |
| 362 | public: |
| 363 | ICRefillVerifier* missed_ic_stub_refill_verifier() { |
| 364 | return _missed_ic_stub_refill_verifier; |
| 365 | } |
| 366 | |
| 367 | void set_missed_ic_stub_refill_verifier(ICRefillVerifier* verifier) { |
| 368 | _missed_ic_stub_refill_verifier = verifier; |
| 369 | } |
| 370 | #endif |
| 371 | |
| 372 | private: |
| 373 | |
| 374 | // debug support for checking if code does allow safepoints or not |
| 375 | // GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on |
| 376 | // mutex, or blocking on an object synchronizer (Java locking). |
| 377 | // If !allow_safepoint(), then an assertion failure will happen in any of the above cases |
| 378 | // If !allow_allocation(), then an assertion failure will happen during allocation |
| 379 | // (Hence, !allow_safepoint() => !allow_allocation()). |
| 380 | // |
| 381 | // The two classes NoSafepointVerifier and No_Allocation_Verifier are used to set these counters. |
| 382 | // |
| 383 | NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen |
| 384 | debug_only(int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops. |
| 385 | |
| 386 | // Used by SkipGCALot class. |
| 387 | NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot? |
| 388 | |
| 389 | friend class NoAllocVerifier; |
| 390 | friend class NoSafepointVerifier; |
| 391 | friend class PauseNoSafepointVerifier; |
| 392 | friend class GCLocker; |
| 393 | |
| 394 | volatile void* _polling_page; // Thread local polling page |
| 395 | |
| 396 | ThreadLocalAllocBuffer _tlab; // Thread-local eden |
| 397 | jlong _allocated_bytes; // Cumulative number of bytes allocated on |
| 398 | // the Java heap |
| 399 | ThreadHeapSampler _heap_sampler; // For use when sampling the memory. |
| 400 | |
| 401 | ThreadStatisticalInfo _statistical_info; // Statistics about the thread |
| 402 | |
| 403 | JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;) // Thread-local data for jfr |
| 404 | |
| 405 | int _vm_operation_started_count; // VM_Operation support |
| 406 | int _vm_operation_completed_count; // VM_Operation support |
| 407 | |
| 408 | ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread |
| 409 | // is waiting to lock |
| 410 | bool _current_pending_monitor_is_from_java; // locking is from Java code |
| 411 | |
| 412 | // ObjectMonitor on which this thread called Object.wait() |
| 413 | ObjectMonitor* _current_waiting_monitor; |
| 414 | |
| 415 | // Private thread-local objectmonitor list - a simple cache organized as a SLL. |
| 416 | public: |
| 417 | ObjectMonitor* omFreeList; |
| 418 | int omFreeCount; // length of omFreeList |
| 419 | int omFreeProvision; // reload chunk size |
| 420 | ObjectMonitor* omInUseList; // SLL to track monitors in circulation |
| 421 | int omInUseCount; // length of omInUseList |
| 422 | |
| 423 | #ifdef ASSERT |
| 424 | private: |
| 425 | volatile uint64_t _visited_for_critical_count; |
| 426 | |
| 427 | public: |
| 428 | void set_visited_for_critical_count(uint64_t safepoint_id) { |
| 429 | assert(_visited_for_critical_count == 0, "Must be reset before set" ); |
| 430 | assert((safepoint_id & 0x1) == 1, "Must be odd" ); |
| 431 | _visited_for_critical_count = safepoint_id; |
| 432 | } |
| 433 | void reset_visited_for_critical_count(uint64_t safepoint_id) { |
| 434 | assert(_visited_for_critical_count == safepoint_id, "Was not visited" ); |
| 435 | _visited_for_critical_count = 0; |
| 436 | } |
| 437 | bool was_visited_for_critical_count(uint64_t safepoint_id) const { |
| 438 | return _visited_for_critical_count == safepoint_id; |
| 439 | } |
| 440 | #endif |
| 441 | |
| 442 | public: |
| 443 | enum { |
| 444 | is_definitely_current_thread = true |
| 445 | }; |
| 446 | |
| 447 | // Constructor |
| 448 | Thread(); |
| 449 | virtual ~Thread() = 0; // Thread is abstract. |
| 450 | |
| 451 | // Manage Thread::current() |
| 452 | void initialize_thread_current(); |
| 453 | static void clear_thread_current(); // TLS cleanup needed before threads terminate |
| 454 | |
| 455 | protected: |
| 456 | // To be implemented by children. |
| 457 | virtual void run() = 0; |
| 458 | virtual void pre_run() = 0; |
| 459 | virtual void post_run() = 0; // Note: Thread must not be deleted prior to calling this! |
| 460 | |
| 461 | #ifdef ASSERT |
| 462 | enum RunState { |
| 463 | PRE_CALL_RUN, |
| 464 | CALL_RUN, |
| 465 | PRE_RUN, |
| 466 | RUN, |
| 467 | POST_RUN |
| 468 | // POST_CALL_RUN - can't define this one as 'this' may be deleted when we want to set it |
| 469 | }; |
| 470 | RunState _run_state; // for lifecycle checks |
| 471 | #endif |
| 472 | |
| 473 | |
| 474 | public: |
| 475 | // invokes <ChildThreadClass>::run(), with common preparations and cleanups. |
| 476 | void call_run(); |
| 477 | |
| 478 | // Testers |
| 479 | virtual bool is_VM_thread() const { return false; } |
| 480 | virtual bool is_Java_thread() const { return false; } |
| 481 | virtual bool is_Compiler_thread() const { return false; } |
| 482 | virtual bool is_Code_cache_sweeper_thread() const { return false; } |
| 483 | virtual bool is_hidden_from_external_view() const { return false; } |
| 484 | virtual bool is_jvmti_agent_thread() const { return false; } |
| 485 | // True iff the thread can perform GC operations at a safepoint. |
| 486 | // Generally will be true only of VM thread and parallel GC WorkGang |
| 487 | // threads. |
| 488 | virtual bool is_GC_task_thread() const { return false; } |
| 489 | virtual bool is_Watcher_thread() const { return false; } |
| 490 | virtual bool is_ConcurrentGC_thread() const { return false; } |
| 491 | virtual bool is_Named_thread() const { return false; } |
| 492 | virtual bool is_Worker_thread() const { return false; } |
| 493 | |
| 494 | // Can this thread make Java upcalls |
| 495 | virtual bool can_call_java() const { return false; } |
| 496 | |
| 497 | // Casts |
| 498 | virtual WorkerThread* as_Worker_thread() const { return NULL; } |
| 499 | |
| 500 | virtual char* name() const { return (char*)"Unknown thread" ; } |
| 501 | |
| 502 | // Returns the current thread (ASSERTS if NULL) |
| 503 | static inline Thread* current(); |
| 504 | // Returns the current thread, or NULL if not attached |
| 505 | static inline Thread* current_or_null(); |
| 506 | // Returns the current thread, or NULL if not attached, and is |
| 507 | // safe for use from signal-handlers |
| 508 | static inline Thread* current_or_null_safe(); |
| 509 | |
| 510 | // Common thread operations |
| 511 | #ifdef ASSERT |
| 512 | static void check_for_dangling_thread_pointer(Thread *thread); |
| 513 | #endif |
| 514 | static void set_priority(Thread* thread, ThreadPriority priority); |
| 515 | static ThreadPriority get_priority(const Thread* const thread); |
| 516 | static void start(Thread* thread); |
| 517 | static void interrupt(Thread* thr); |
| 518 | static bool is_interrupted(Thread* thr, bool clear_interrupted); |
| 519 | |
| 520 | void set_native_thread_name(const char *name) { |
| 521 | assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread" ); |
| 522 | os::set_native_thread_name(name); |
| 523 | } |
| 524 | |
| 525 | ObjectMonitor** omInUseList_addr() { return (ObjectMonitor **)&omInUseList; } |
| 526 | Monitor* SR_lock() const { return _SR_lock; } |
| 527 | |
| 528 | bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; } |
| 529 | |
| 530 | inline void set_suspend_flag(SuspendFlags f); |
| 531 | inline void clear_suspend_flag(SuspendFlags f); |
| 532 | |
| 533 | inline void set_has_async_exception(); |
| 534 | inline void clear_has_async_exception(); |
| 535 | |
| 536 | bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; } |
| 537 | |
| 538 | inline void set_critical_native_unlock(); |
| 539 | inline void clear_critical_native_unlock(); |
| 540 | |
| 541 | inline void set_trace_flag(); |
| 542 | inline void clear_trace_flag(); |
| 543 | |
| 544 | // Support for Unhandled Oop detection |
| 545 | // Add the field for both, fastdebug and debug, builds to keep |
| 546 | // Thread's fields layout the same. |
| 547 | // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build. |
| 548 | #ifdef CHECK_UNHANDLED_OOPS |
| 549 | private: |
| 550 | UnhandledOops* _unhandled_oops; |
| 551 | #elif defined(ASSERT) |
| 552 | private: |
| 553 | void* _unhandled_oops; |
| 554 | #endif |
| 555 | #ifdef CHECK_UNHANDLED_OOPS |
| 556 | public: |
| 557 | UnhandledOops* unhandled_oops() { return _unhandled_oops; } |
| 558 | // Mark oop safe for gc. It may be stack allocated but won't move. |
| 559 | void allow_unhandled_oop(oop *op) { |
| 560 | if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op); |
| 561 | } |
| 562 | // Clear oops at safepoint so crashes point to unhandled oop violator |
| 563 | void clear_unhandled_oops() { |
| 564 | if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops(); |
| 565 | } |
| 566 | #endif // CHECK_UNHANDLED_OOPS |
| 567 | |
| 568 | public: |
| 569 | #ifndef PRODUCT |
| 570 | bool skip_gcalot() { return _skip_gcalot; } |
| 571 | void set_skip_gcalot(bool v) { _skip_gcalot = v; } |
| 572 | #endif |
| 573 | |
| 574 | // Installs a pending exception to be inserted later |
| 575 | static void send_async_exception(oop thread_oop, oop java_throwable); |
| 576 | |
| 577 | // Resource area |
| 578 | ResourceArea* resource_area() const { return _resource_area; } |
| 579 | void set_resource_area(ResourceArea* area) { _resource_area = area; } |
| 580 | |
| 581 | OSThread* osthread() const { return _osthread; } |
| 582 | void set_osthread(OSThread* thread) { _osthread = thread; } |
| 583 | |
| 584 | // JNI handle support |
| 585 | JNIHandleBlock* active_handles() const { return _active_handles; } |
| 586 | void set_active_handles(JNIHandleBlock* block) { _active_handles = block; } |
| 587 | JNIHandleBlock* free_handle_block() const { return _free_handle_block; } |
| 588 | void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; } |
| 589 | |
| 590 | // Internal handle support |
| 591 | HandleArea* handle_area() const { return _handle_area; } |
| 592 | void set_handle_area(HandleArea* area) { _handle_area = area; } |
| 593 | |
| 594 | GrowableArray<Metadata*>* metadata_handles() const { return _metadata_handles; } |
| 595 | void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; } |
| 596 | |
| 597 | // Thread-Local Allocation Buffer (TLAB) support |
| 598 | ThreadLocalAllocBuffer& tlab() { return _tlab; } |
| 599 | void initialize_tlab() { |
| 600 | if (UseTLAB) { |
| 601 | tlab().initialize(); |
| 602 | } |
| 603 | } |
| 604 | |
| 605 | jlong allocated_bytes() { return _allocated_bytes; } |
| 606 | void set_allocated_bytes(jlong value) { _allocated_bytes = value; } |
| 607 | void incr_allocated_bytes(jlong size) { _allocated_bytes += size; } |
| 608 | inline jlong cooked_allocated_bytes(); |
| 609 | |
| 610 | ThreadHeapSampler& heap_sampler() { return _heap_sampler; } |
| 611 | |
| 612 | ThreadStatisticalInfo& statistical_info() { return _statistical_info; } |
| 613 | |
| 614 | JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;) |
| 615 | |
| 616 | bool is_trace_suspend() { return (_suspend_flags & _trace_flag) != 0; } |
| 617 | |
| 618 | // VM operation support |
| 619 | int vm_operation_ticket() { return ++_vm_operation_started_count; } |
| 620 | int vm_operation_completed_count() { return _vm_operation_completed_count; } |
| 621 | void increment_vm_operation_completed_count() { _vm_operation_completed_count++; } |
| 622 | |
| 623 | // For tracking the heavyweight monitor the thread is pending on. |
| 624 | ObjectMonitor* current_pending_monitor() { |
| 625 | return _current_pending_monitor; |
| 626 | } |
| 627 | void set_current_pending_monitor(ObjectMonitor* monitor) { |
| 628 | _current_pending_monitor = monitor; |
| 629 | } |
| 630 | void set_current_pending_monitor_is_from_java(bool from_java) { |
| 631 | _current_pending_monitor_is_from_java = from_java; |
| 632 | } |
| 633 | bool current_pending_monitor_is_from_java() { |
| 634 | return _current_pending_monitor_is_from_java; |
| 635 | } |
| 636 | |
| 637 | // For tracking the ObjectMonitor on which this thread called Object.wait() |
| 638 | ObjectMonitor* current_waiting_monitor() { |
| 639 | return _current_waiting_monitor; |
| 640 | } |
| 641 | void set_current_waiting_monitor(ObjectMonitor* monitor) { |
| 642 | _current_waiting_monitor = monitor; |
| 643 | } |
| 644 | |
| 645 | // GC support |
| 646 | // Apply "f->do_oop" to all root oops in "this". |
| 647 | // Used by JavaThread::oops_do. |
| 648 | // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames |
| 649 | virtual void oops_do(OopClosure* f, CodeBlobClosure* cf); |
| 650 | |
| 651 | // Handles the parallel case for claim_threads_do. |
| 652 | private: |
| 653 | bool claim_par_threads_do(uintx claim_token); |
| 654 | public: |
| 655 | // Requires that "claim_token" is that of the current iteration. |
| 656 | // If "is_par" is false, sets the token of "this" to |
| 657 | // "claim_token", and returns "true". If "is_par" is true, |
| 658 | // uses an atomic instruction to set the current thread's token to |
| 659 | // "claim_token", if it is not already. Returns "true" iff the |
| 660 | // calling thread does the update, this indicates that the calling thread |
| 661 | // has claimed the thread in the current iteration. |
| 662 | bool claim_threads_do(bool is_par, uintx claim_token) { |
| 663 | if (!is_par) { |
| 664 | _threads_do_token = claim_token; |
| 665 | return true; |
| 666 | } else { |
| 667 | return claim_par_threads_do(claim_token); |
| 668 | } |
| 669 | } |
| 670 | |
| 671 | uintx threads_do_token() const { return _threads_do_token; } |
| 672 | |
| 673 | // jvmtiRedefineClasses support |
| 674 | void metadata_handles_do(void f(Metadata*)); |
| 675 | |
| 676 | // Used by fast lock support |
| 677 | virtual bool is_lock_owned(address adr) const; |
| 678 | |
| 679 | // Check if address is in the stack of the thread (not just for locks). |
| 680 | // Warning: the method can only be used on the running thread |
| 681 | bool is_in_stack(address adr) const; |
| 682 | // Check if address is in the usable part of the stack (excludes protected |
| 683 | // guard pages) |
| 684 | bool is_in_usable_stack(address adr) const; |
| 685 | |
| 686 | // Sets this thread as starting thread. Returns failure if thread |
| 687 | // creation fails due to lack of memory, too many threads etc. |
| 688 | bool set_as_starting_thread(); |
| 689 | |
| 690 | protected: |
| 691 | // OS data associated with the thread |
| 692 | OSThread* _osthread; // Platform-specific thread information |
| 693 | |
| 694 | // Thread local resource area for temporary allocation within the VM |
| 695 | ResourceArea* _resource_area; |
| 696 | |
| 697 | DEBUG_ONLY(ResourceMark* _current_resource_mark;) |
| 698 | |
| 699 | // Thread local handle area for allocation of handles within the VM |
| 700 | HandleArea* _handle_area; |
| 701 | GrowableArray<Metadata*>* _metadata_handles; |
| 702 | |
| 703 | // Support for stack overflow handling, get_thread, etc. |
| 704 | address _stack_base; |
| 705 | size_t _stack_size; |
| 706 | uintptr_t _self_raw_id; // used by get_thread (mutable) |
| 707 | int _lgrp_id; |
| 708 | |
| 709 | volatile void** polling_page_addr() { return &_polling_page; } |
| 710 | |
| 711 | public: |
| 712 | // Stack overflow support |
| 713 | address stack_base() const { assert(_stack_base != NULL,"Sanity check" ); return _stack_base; } |
| 714 | void set_stack_base(address base) { _stack_base = base; } |
| 715 | size_t stack_size() const { return _stack_size; } |
| 716 | void set_stack_size(size_t size) { _stack_size = size; } |
| 717 | address stack_end() const { return stack_base() - stack_size(); } |
| 718 | void record_stack_base_and_size(); |
| 719 | void register_thread_stack_with_NMT() NOT_NMT_RETURN; |
| 720 | |
| 721 | bool on_local_stack(address adr) const { |
| 722 | // QQQ this has knowledge of direction, ought to be a stack method |
| 723 | return (_stack_base >= adr && adr >= stack_end()); |
| 724 | } |
| 725 | |
| 726 | uintptr_t self_raw_id() { return _self_raw_id; } |
| 727 | void set_self_raw_id(uintptr_t value) { _self_raw_id = value; } |
| 728 | |
| 729 | int lgrp_id() const { return _lgrp_id; } |
| 730 | void set_lgrp_id(int value) { _lgrp_id = value; } |
| 731 | |
| 732 | // Printing |
| 733 | void print_on(outputStream* st, bool print_extended_info) const; |
| 734 | virtual void print_on(outputStream* st) const { print_on(st, false); } |
| 735 | void print() const; |
| 736 | virtual void print_on_error(outputStream* st, char* buf, int buflen) const; |
| 737 | void print_value_on(outputStream* st) const; |
| 738 | |
| 739 | // Debug-only code |
| 740 | #ifdef ASSERT |
| 741 | private: |
| 742 | // Deadlock detection support for Mutex locks. List of locks own by thread. |
| 743 | Monitor* _owned_locks; |
| 744 | // Mutex::set_owner_implementation is the only place where _owned_locks is modified, |
| 745 | // thus the friendship |
| 746 | friend class Mutex; |
| 747 | friend class Monitor; |
| 748 | |
| 749 | public: |
| 750 | void print_owned_locks_on(outputStream* st) const; |
| 751 | void print_owned_locks() const { print_owned_locks_on(tty); } |
| 752 | Monitor* owned_locks() const { return _owned_locks; } |
| 753 | bool owns_locks() const { return owned_locks() != NULL; } |
| 754 | bool owns_locks_but_compiled_lock() const; |
| 755 | |
| 756 | // Deadlock detection |
| 757 | bool allow_allocation() { return _allow_allocation_count == 0; } |
| 758 | ResourceMark* current_resource_mark() { return _current_resource_mark; } |
| 759 | void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; } |
| 760 | #endif |
| 761 | |
| 762 | void check_for_valid_safepoint_state(bool potential_vm_operation) PRODUCT_RETURN; |
| 763 | |
| 764 | private: |
| 765 | volatile int _jvmti_env_iteration_count; |
| 766 | |
| 767 | public: |
| 768 | void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; } |
| 769 | void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; } |
| 770 | bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; } |
| 771 | |
| 772 | // Code generation |
| 773 | static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); } |
| 774 | static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); } |
| 775 | static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); } |
| 776 | |
| 777 | static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); } |
| 778 | static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); } |
| 779 | |
| 780 | static ByteSize polling_page_offset() { return byte_offset_of(Thread, _polling_page); } |
| 781 | |
| 782 | static ByteSize tlab_start_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::start_offset(); } |
| 783 | static ByteSize tlab_end_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::end_offset(); } |
| 784 | static ByteSize tlab_top_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::top_offset(); } |
| 785 | static ByteSize tlab_pf_top_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::pf_top_offset(); } |
| 786 | |
| 787 | static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); } |
| 788 | |
| 789 | JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;) |
| 790 | |
| 791 | public: |
| 792 | volatile intptr_t _Stalled; |
| 793 | volatile int _TypeTag; |
| 794 | ParkEvent * _ParkEvent; // for synchronized() |
| 795 | ParkEvent * _SleepEvent; // for Thread.sleep |
| 796 | ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease |
| 797 | int NativeSyncRecursion; // diagnostic |
| 798 | |
| 799 | volatile int _OnTrap; // Resume-at IP delta |
| 800 | jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG |
| 801 | jint _hashStateX; // thread-specific hashCode generator state |
| 802 | jint _hashStateY; |
| 803 | jint _hashStateZ; |
| 804 | |
| 805 | // Low-level leaf-lock primitives used to implement synchronization |
| 806 | // and native monitor-mutex infrastructure. |
| 807 | // Not for general synchronization use. |
| 808 | static void SpinAcquire(volatile int * Lock, const char * Name); |
| 809 | static void SpinRelease(volatile int * Lock); |
| 810 | static void muxAcquire(volatile intptr_t * Lock, const char * Name); |
| 811 | static void muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev); |
| 812 | static void muxRelease(volatile intptr_t * Lock); |
| 813 | }; |
| 814 | |
| 815 | // Inline implementation of Thread::current() |
| 816 | inline Thread* Thread::current() { |
| 817 | Thread* current = current_or_null(); |
| 818 | assert(current != NULL, "Thread::current() called on detached thread" ); |
| 819 | return current; |
| 820 | } |
| 821 | |
| 822 | inline Thread* Thread::current_or_null() { |
| 823 | #ifndef USE_LIBRARY_BASED_TLS_ONLY |
| 824 | return _thr_current; |
| 825 | #else |
| 826 | if (ThreadLocalStorage::is_initialized()) { |
| 827 | return ThreadLocalStorage::thread(); |
| 828 | } |
| 829 | return NULL; |
| 830 | #endif |
| 831 | } |
| 832 | |
| 833 | inline Thread* Thread::current_or_null_safe() { |
| 834 | if (ThreadLocalStorage::is_initialized()) { |
| 835 | return ThreadLocalStorage::thread(); |
| 836 | } |
| 837 | return NULL; |
| 838 | } |
| 839 | |
| 840 | class NonJavaThread: public Thread { |
| 841 | friend class VMStructs; |
| 842 | |
| 843 | NonJavaThread* volatile _next; |
| 844 | |
| 845 | class List; |
| 846 | static List _the_list; |
| 847 | |
| 848 | void add_to_the_list(); |
| 849 | void remove_from_the_list(); |
| 850 | |
| 851 | protected: |
| 852 | virtual void pre_run(); |
| 853 | virtual void post_run(); |
| 854 | |
| 855 | public: |
| 856 | NonJavaThread(); |
| 857 | ~NonJavaThread(); |
| 858 | |
| 859 | class Iterator; |
| 860 | }; |
| 861 | |
| 862 | // Provides iteration over the list of NonJavaThreads. |
| 863 | // List addition occurs in pre_run(), and removal occurs in post_run(), |
| 864 | // so that only live fully-initialized threads can be found in the list. |
| 865 | // Threads created after an iterator is constructed will not be visited |
| 866 | // by the iterator. The scope of an iterator is a critical section; there |
| 867 | // must be no safepoint checks in that scope. |
| 868 | class NonJavaThread::Iterator : public StackObj { |
| 869 | uint _protect_enter; |
| 870 | NonJavaThread* _current; |
| 871 | |
| 872 | // Noncopyable. |
| 873 | Iterator(const Iterator&); |
| 874 | Iterator& operator=(const Iterator&); |
| 875 | |
| 876 | public: |
| 877 | Iterator(); |
| 878 | ~Iterator(); |
| 879 | |
| 880 | bool end() const { return _current == NULL; } |
| 881 | NonJavaThread* current() const { return _current; } |
| 882 | void step(); |
| 883 | }; |
| 884 | |
| 885 | // Name support for threads. non-JavaThread subclasses with multiple |
| 886 | // uniquely named instances should derive from this. |
| 887 | class NamedThread: public NonJavaThread { |
| 888 | friend class VMStructs; |
| 889 | enum { |
| 890 | max_name_len = 64 |
| 891 | }; |
| 892 | private: |
| 893 | char* _name; |
| 894 | // log JavaThread being processed by oops_do |
| 895 | JavaThread* _processed_thread; |
| 896 | uint _gc_id; // The current GC id when a thread takes part in GC |
| 897 | |
| 898 | public: |
| 899 | NamedThread(); |
| 900 | ~NamedThread(); |
| 901 | // May only be called once per thread. |
| 902 | void set_name(const char* format, ...) ATTRIBUTE_PRINTF(2, 3); |
| 903 | virtual bool is_Named_thread() const { return true; } |
| 904 | virtual char* name() const { return _name == NULL ? (char*)"Unknown Thread" : _name; } |
| 905 | JavaThread *processed_thread() { return _processed_thread; } |
| 906 | void set_processed_thread(JavaThread *thread) { _processed_thread = thread; } |
| 907 | virtual void print_on(outputStream* st) const; |
| 908 | |
| 909 | void set_gc_id(uint gc_id) { _gc_id = gc_id; } |
| 910 | uint gc_id() { return _gc_id; } |
| 911 | }; |
| 912 | |
| 913 | // Worker threads are named and have an id of an assigned work. |
| 914 | class WorkerThread: public NamedThread { |
| 915 | private: |
| 916 | uint _id; |
| 917 | public: |
| 918 | WorkerThread() : _id(0) { } |
| 919 | virtual bool is_Worker_thread() const { return true; } |
| 920 | |
| 921 | virtual WorkerThread* as_Worker_thread() const { |
| 922 | assert(is_Worker_thread(), "Dubious cast to WorkerThread*?" ); |
| 923 | return (WorkerThread*) this; |
| 924 | } |
| 925 | |
| 926 | void set_id(uint work_id) { _id = work_id; } |
| 927 | uint id() const { return _id; } |
| 928 | }; |
| 929 | |
| 930 | // A single WatcherThread is used for simulating timer interrupts. |
| 931 | class WatcherThread: public NonJavaThread { |
| 932 | friend class VMStructs; |
| 933 | protected: |
| 934 | virtual void run(); |
| 935 | |
| 936 | private: |
| 937 | static WatcherThread* _watcher_thread; |
| 938 | |
| 939 | static bool _startable; |
| 940 | // volatile due to at least one lock-free read |
| 941 | volatile static bool _should_terminate; |
| 942 | public: |
| 943 | enum SomeConstants { |
| 944 | delay_interval = 10 // interrupt delay in milliseconds |
| 945 | }; |
| 946 | |
| 947 | // Constructor |
| 948 | WatcherThread(); |
| 949 | |
| 950 | // No destruction allowed |
| 951 | ~WatcherThread() { |
| 952 | guarantee(false, "WatcherThread deletion must fix the race with VM termination" ); |
| 953 | } |
| 954 | |
| 955 | // Tester |
| 956 | bool is_Watcher_thread() const { return true; } |
| 957 | |
| 958 | // Printing |
| 959 | char* name() const { return (char*)"VM Periodic Task Thread" ; } |
| 960 | void print_on(outputStream* st) const; |
| 961 | void unpark(); |
| 962 | |
| 963 | // Returns the single instance of WatcherThread |
| 964 | static WatcherThread* watcher_thread() { return _watcher_thread; } |
| 965 | |
| 966 | // Create and start the single instance of WatcherThread, or stop it on shutdown |
| 967 | static void start(); |
| 968 | static void stop(); |
| 969 | // Only allow start once the VM is sufficiently initialized |
| 970 | // Otherwise the first task to enroll will trigger the start |
| 971 | static void make_startable(); |
| 972 | private: |
| 973 | int sleep() const; |
| 974 | }; |
| 975 | |
| 976 | |
| 977 | class CompilerThread; |
| 978 | |
| 979 | typedef void (*ThreadFunction)(JavaThread*, TRAPS); |
| 980 | |
| 981 | class JavaThread: public Thread { |
| 982 | friend class VMStructs; |
| 983 | friend class JVMCIVMStructs; |
| 984 | friend class WhiteBox; |
| 985 | private: |
| 986 | bool _on_thread_list; // Is set when this JavaThread is added to the Threads list |
| 987 | oop _threadObj; // The Java level thread object |
| 988 | |
| 989 | #ifdef ASSERT |
| 990 | private: |
| 991 | int _java_call_counter; |
| 992 | |
| 993 | public: |
| 994 | int java_call_counter() { return _java_call_counter; } |
| 995 | void inc_java_call_counter() { _java_call_counter++; } |
| 996 | void dec_java_call_counter() { |
| 997 | assert(_java_call_counter > 0, "Invalid nesting of JavaCallWrapper" ); |
| 998 | _java_call_counter--; |
| 999 | } |
| 1000 | private: // restore original namespace restriction |
| 1001 | #endif // ifdef ASSERT |
| 1002 | |
| 1003 | #ifndef PRODUCT |
| 1004 | public: |
| 1005 | enum { |
| 1006 | jump_ring_buffer_size = 16 |
| 1007 | }; |
| 1008 | private: // restore original namespace restriction |
| 1009 | #endif |
| 1010 | |
| 1011 | JavaFrameAnchor _anchor; // Encapsulation of current java frame and it state |
| 1012 | |
| 1013 | ThreadFunction _entry_point; |
| 1014 | |
| 1015 | JNIEnv _jni_environment; |
| 1016 | |
| 1017 | // Deopt support |
| 1018 | DeoptResourceMark* _deopt_mark; // Holds special ResourceMark for deoptimization |
| 1019 | |
| 1020 | intptr_t* _must_deopt_id; // id of frame that needs to be deopted once we |
| 1021 | // transition out of native |
| 1022 | CompiledMethod* _deopt_nmethod; // CompiledMethod that is currently being deoptimized |
| 1023 | vframeArray* _vframe_array_head; // Holds the heap of the active vframeArrays |
| 1024 | vframeArray* _vframe_array_last; // Holds last vFrameArray we popped |
| 1025 | // Because deoptimization is lazy we must save jvmti requests to set locals |
| 1026 | // in compiled frames until we deoptimize and we have an interpreter frame. |
| 1027 | // This holds the pointer to array (yeah like there might be more than one) of |
| 1028 | // description of compiled vframes that have locals that need to be updated. |
| 1029 | GrowableArray<jvmtiDeferredLocalVariableSet*>* _deferred_locals_updates; |
| 1030 | |
| 1031 | // Handshake value for fixing 6243940. We need a place for the i2c |
| 1032 | // adapter to store the callee Method*. This value is NEVER live |
| 1033 | // across a gc point so it does NOT have to be gc'd |
| 1034 | // The handshake is open ended since we can't be certain that it will |
| 1035 | // be NULLed. This is because we rarely ever see the race and end up |
| 1036 | // in handle_wrong_method which is the backend of the handshake. See |
| 1037 | // code in i2c adapters and handle_wrong_method. |
| 1038 | |
| 1039 | Method* _callee_target; |
| 1040 | |
| 1041 | // Used to pass back results to the interpreter or generated code running Java code. |
| 1042 | oop _vm_result; // oop result is GC-preserved |
| 1043 | Metadata* _vm_result_2; // non-oop result |
| 1044 | |
| 1045 | // See ReduceInitialCardMarks: this holds the precise space interval of |
| 1046 | // the most recent slow path allocation for which compiled code has |
| 1047 | // elided card-marks for performance along the fast-path. |
| 1048 | MemRegion _deferred_card_mark; |
| 1049 | |
| 1050 | MonitorChunk* _monitor_chunks; // Contains the off stack monitors |
| 1051 | // allocated during deoptimization |
| 1052 | // and by JNI_MonitorEnter/Exit |
| 1053 | |
| 1054 | // Async. requests support |
| 1055 | enum AsyncRequests { |
| 1056 | _no_async_condition = 0, |
| 1057 | _async_exception, |
| 1058 | _async_unsafe_access_error |
| 1059 | }; |
| 1060 | AsyncRequests _special_runtime_exit_condition; // Enum indicating pending async. request |
| 1061 | oop _pending_async_exception; |
| 1062 | |
| 1063 | // Safepoint support |
| 1064 | public: // Expose _thread_state for SafeFetchInt() |
| 1065 | volatile JavaThreadState _thread_state; |
| 1066 | private: |
| 1067 | ThreadSafepointState* _safepoint_state; // Holds information about a thread during a safepoint |
| 1068 | address _saved_exception_pc; // Saved pc of instruction where last implicit exception happened |
| 1069 | |
| 1070 | // JavaThread termination support |
| 1071 | enum TerminatedTypes { |
| 1072 | _not_terminated = 0xDEAD - 2, |
| 1073 | _thread_exiting, // JavaThread::exit() has been called for this thread |
| 1074 | _thread_terminated, // JavaThread is removed from thread list |
| 1075 | _vm_exited // JavaThread is still executing native code, but VM is terminated |
| 1076 | // only VM_Exit can set _vm_exited |
| 1077 | }; |
| 1078 | |
| 1079 | // In general a JavaThread's _terminated field transitions as follows: |
| 1080 | // |
| 1081 | // _not_terminated => _thread_exiting => _thread_terminated |
| 1082 | // |
| 1083 | // _vm_exited is a special value to cover the case of a JavaThread |
| 1084 | // executing native code after the VM itself is terminated. |
| 1085 | volatile TerminatedTypes _terminated; |
| 1086 | // suspend/resume support |
| 1087 | volatile bool _suspend_equivalent; // Suspend equivalent condition |
| 1088 | jint _in_deopt_handler; // count of deoptimization |
| 1089 | // handlers thread is in |
| 1090 | volatile bool _doing_unsafe_access; // Thread may fault due to unsafe access |
| 1091 | bool _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was |
| 1092 | // never locked) when throwing an exception. Used by interpreter only. |
| 1093 | |
| 1094 | // JNI attach states: |
| 1095 | enum JNIAttachStates { |
| 1096 | _not_attaching_via_jni = 1, // thread is not attaching via JNI |
| 1097 | _attaching_via_jni, // thread is attaching via JNI |
| 1098 | _attached_via_jni // thread has attached via JNI |
| 1099 | }; |
| 1100 | |
| 1101 | // A regular JavaThread's _jni_attach_state is _not_attaching_via_jni. |
| 1102 | // A native thread that is attaching via JNI starts with a value |
| 1103 | // of _attaching_via_jni and transitions to _attached_via_jni. |
| 1104 | volatile JNIAttachStates _jni_attach_state; |
| 1105 | |
| 1106 | public: |
| 1107 | // State of the stack guard pages for this thread. |
| 1108 | enum StackGuardState { |
| 1109 | stack_guard_unused, // not needed |
| 1110 | stack_guard_reserved_disabled, |
| 1111 | stack_guard_yellow_reserved_disabled,// disabled (temporarily) after stack overflow |
| 1112 | stack_guard_enabled // enabled |
| 1113 | }; |
| 1114 | |
| 1115 | private: |
| 1116 | |
| 1117 | #if INCLUDE_JVMCI |
| 1118 | // The _pending_* fields below are used to communicate extra information |
| 1119 | // from an uncommon trap in JVMCI compiled code to the uncommon trap handler. |
| 1120 | |
| 1121 | // Communicates the DeoptReason and DeoptAction of the uncommon trap |
| 1122 | int _pending_deoptimization; |
| 1123 | |
| 1124 | // Specifies whether the uncommon trap is to bci 0 of a synchronized method |
| 1125 | // before the monitor has been acquired. |
| 1126 | bool _pending_monitorenter; |
| 1127 | |
| 1128 | // Specifies if the DeoptReason for the last uncommon trap was Reason_transfer_to_interpreter |
| 1129 | bool _pending_transfer_to_interpreter; |
| 1130 | |
| 1131 | // True if in a runtime call from compiled code that will deoptimize |
| 1132 | // and re-execute a failed heap allocation in the interpreter. |
| 1133 | bool _in_retryable_allocation; |
| 1134 | |
| 1135 | // An id of a speculation that JVMCI compiled code can use to further describe and |
| 1136 | // uniquely identify the speculative optimization guarded by the uncommon trap |
| 1137 | jlong _pending_failed_speculation; |
| 1138 | |
| 1139 | // These fields are mutually exclusive in terms of live ranges. |
| 1140 | union { |
| 1141 | // Communicates the pc at which the most recent implicit exception occurred |
| 1142 | // from the signal handler to a deoptimization stub. |
| 1143 | address _implicit_exception_pc; |
| 1144 | |
| 1145 | // Communicates an alternative call target to an i2c stub from a JavaCall . |
| 1146 | address _alternate_call_target; |
| 1147 | } _jvmci; |
| 1148 | |
| 1149 | // Support for high precision, thread sensitive counters in JVMCI compiled code. |
| 1150 | jlong* _jvmci_counters; |
| 1151 | |
| 1152 | public: |
| 1153 | static jlong* _jvmci_old_thread_counters; |
| 1154 | static void collect_counters(jlong* array, int length); |
| 1155 | |
| 1156 | bool resize_counters(int current_size, int new_size); |
| 1157 | |
| 1158 | static bool resize_all_jvmci_counters(int new_size); |
| 1159 | |
| 1160 | private: |
| 1161 | #endif // INCLUDE_JVMCI |
| 1162 | |
| 1163 | StackGuardState _stack_guard_state; |
| 1164 | |
| 1165 | // Precompute the limit of the stack as used in stack overflow checks. |
| 1166 | // We load it from here to simplify the stack overflow check in assembly. |
| 1167 | address _stack_overflow_limit; |
| 1168 | address _reserved_stack_activation; |
| 1169 | |
| 1170 | // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is |
| 1171 | // used to temp. parsing values into and out of the runtime system during exception handling for compiled |
| 1172 | // code) |
| 1173 | volatile oop _exception_oop; // Exception thrown in compiled code |
| 1174 | volatile address _exception_pc; // PC where exception happened |
| 1175 | volatile address _exception_handler_pc; // PC for handler of exception |
| 1176 | volatile int _is_method_handle_return; // true (== 1) if the current exception PC is a MethodHandle call site. |
| 1177 | |
| 1178 | private: |
| 1179 | // support for JNI critical regions |
| 1180 | jint _jni_active_critical; // count of entries into JNI critical region |
| 1181 | |
| 1182 | // Checked JNI: function name requires exception check |
| 1183 | char* _pending_jni_exception_check_fn; |
| 1184 | |
| 1185 | // For deadlock detection. |
| 1186 | int _depth_first_number; |
| 1187 | |
| 1188 | // JVMTI PopFrame support |
| 1189 | // This is set to popframe_pending to signal that top Java frame should be popped immediately |
| 1190 | int _popframe_condition; |
| 1191 | |
| 1192 | // If reallocation of scalar replaced objects fails, we throw OOM |
| 1193 | // and during exception propagation, pop the top |
| 1194 | // _frames_to_pop_failed_realloc frames, the ones that reference |
| 1195 | // failed reallocations. |
| 1196 | int _frames_to_pop_failed_realloc; |
| 1197 | |
| 1198 | #ifndef PRODUCT |
| 1199 | int _jmp_ring_index; |
| 1200 | struct { |
| 1201 | // We use intptr_t instead of address so debugger doesn't try and display strings |
| 1202 | intptr_t _target; |
| 1203 | intptr_t _instruction; |
| 1204 | const char* _file; |
| 1205 | int _line; |
| 1206 | } _jmp_ring[jump_ring_buffer_size]; |
| 1207 | #endif // PRODUCT |
| 1208 | |
| 1209 | friend class VMThread; |
| 1210 | friend class ThreadWaitTransition; |
| 1211 | friend class VM_Exit; |
| 1212 | |
| 1213 | void initialize(); // Initialized the instance variables |
| 1214 | |
| 1215 | public: |
| 1216 | // Constructor |
| 1217 | JavaThread(bool is_attaching_via_jni = false); // for main thread and JNI attached threads |
| 1218 | JavaThread(ThreadFunction entry_point, size_t stack_size = 0); |
| 1219 | ~JavaThread(); |
| 1220 | |
| 1221 | #ifdef ASSERT |
| 1222 | // verify this JavaThread hasn't be published in the Threads::list yet |
| 1223 | void verify_not_published(); |
| 1224 | #endif |
| 1225 | |
| 1226 | //JNI functiontable getter/setter for JVMTI jni function table interception API. |
| 1227 | void set_jni_functions(struct JNINativeInterface_* functionTable) { |
| 1228 | _jni_environment.functions = functionTable; |
| 1229 | } |
| 1230 | struct JNINativeInterface_* get_jni_functions() { |
| 1231 | return (struct JNINativeInterface_ *)_jni_environment.functions; |
| 1232 | } |
| 1233 | |
| 1234 | // This function is called at thread creation to allow |
| 1235 | // platform specific thread variables to be initialized. |
| 1236 | void cache_global_variables(); |
| 1237 | |
| 1238 | // Executes Shutdown.shutdown() |
| 1239 | void invoke_shutdown_hooks(); |
| 1240 | |
| 1241 | // Cleanup on thread exit |
| 1242 | enum ExitType { |
| 1243 | normal_exit, |
| 1244 | jni_detach |
| 1245 | }; |
| 1246 | void exit(bool destroy_vm, ExitType exit_type = normal_exit); |
| 1247 | |
| 1248 | void cleanup_failed_attach_current_thread(bool is_daemon); |
| 1249 | |
| 1250 | // Testers |
| 1251 | virtual bool is_Java_thread() const { return true; } |
| 1252 | virtual bool can_call_java() const { return true; } |
| 1253 | |
| 1254 | // Thread oop. threadObj() can be NULL for initial JavaThread |
| 1255 | // (or for threads attached via JNI) |
| 1256 | oop threadObj() const { return _threadObj; } |
| 1257 | void set_threadObj(oop p) { _threadObj = p; } |
| 1258 | |
| 1259 | ThreadPriority java_priority() const; // Read from threadObj() |
| 1260 | |
| 1261 | // Prepare thread and add to priority queue. If a priority is |
| 1262 | // not specified, use the priority of the thread object. Threads_lock |
| 1263 | // must be held while this function is called. |
| 1264 | void prepare(jobject jni_thread, ThreadPriority prio=NoPriority); |
| 1265 | |
| 1266 | void set_saved_exception_pc(address pc) { _saved_exception_pc = pc; } |
| 1267 | address saved_exception_pc() { return _saved_exception_pc; } |
| 1268 | |
| 1269 | |
| 1270 | ThreadFunction entry_point() const { return _entry_point; } |
| 1271 | |
| 1272 | // Allocates a new Java level thread object for this thread. thread_name may be NULL. |
| 1273 | void allocate_threadObj(Handle thread_group, const char* thread_name, bool daemon, TRAPS); |
| 1274 | |
| 1275 | // Last frame anchor routines |
| 1276 | |
| 1277 | JavaFrameAnchor* frame_anchor(void) { return &_anchor; } |
| 1278 | |
| 1279 | // last_Java_sp |
| 1280 | bool has_last_Java_frame() const { return _anchor.has_last_Java_frame(); } |
| 1281 | intptr_t* last_Java_sp() const { return _anchor.last_Java_sp(); } |
| 1282 | |
| 1283 | // last_Java_pc |
| 1284 | |
| 1285 | address last_Java_pc(void) { return _anchor.last_Java_pc(); } |
| 1286 | |
| 1287 | // Safepoint support |
| 1288 | inline JavaThreadState thread_state() const; |
| 1289 | inline void set_thread_state(JavaThreadState s); |
| 1290 | inline void set_thread_state_fence(JavaThreadState s); // fence after setting thread state |
| 1291 | inline ThreadSafepointState* safepoint_state() const; |
| 1292 | inline void set_safepoint_state(ThreadSafepointState* state); |
| 1293 | inline bool is_at_poll_safepoint(); |
| 1294 | |
| 1295 | // JavaThread termination and lifecycle support: |
| 1296 | void smr_delete(); |
| 1297 | bool on_thread_list() const { return _on_thread_list; } |
| 1298 | void set_on_thread_list() { _on_thread_list = true; } |
| 1299 | |
| 1300 | // thread has called JavaThread::exit() or is terminated |
| 1301 | bool is_exiting() const; |
| 1302 | // thread is terminated (no longer on the threads list); we compare |
| 1303 | // against the two non-terminated values so that a freed JavaThread |
| 1304 | // will also be considered terminated. |
| 1305 | bool check_is_terminated(TerminatedTypes l_terminated) const { |
| 1306 | return l_terminated != _not_terminated && l_terminated != _thread_exiting; |
| 1307 | } |
| 1308 | bool is_terminated() const; |
| 1309 | void set_terminated(TerminatedTypes t); |
| 1310 | // special for Threads::remove() which is static: |
| 1311 | void set_terminated_value(); |
| 1312 | void block_if_vm_exited(); |
| 1313 | |
| 1314 | bool doing_unsafe_access() { return _doing_unsafe_access; } |
| 1315 | void set_doing_unsafe_access(bool val) { _doing_unsafe_access = val; } |
| 1316 | |
| 1317 | bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; } |
| 1318 | void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; } |
| 1319 | |
| 1320 | inline void set_polling_page_release(void* poll_value); |
| 1321 | inline void set_polling_page(void* poll_value); |
| 1322 | inline volatile void* get_polling_page(); |
| 1323 | |
| 1324 | private: |
| 1325 | // Support for thread handshake operations |
| 1326 | HandshakeState _handshake; |
| 1327 | public: |
| 1328 | void set_handshake_operation(HandshakeOperation* op) { |
| 1329 | _handshake.set_operation(this, op); |
| 1330 | } |
| 1331 | |
| 1332 | bool has_handshake() const { |
| 1333 | return _handshake.has_operation(); |
| 1334 | } |
| 1335 | |
| 1336 | void handshake_process_by_self() { |
| 1337 | _handshake.process_by_self(this); |
| 1338 | } |
| 1339 | |
| 1340 | void handshake_process_by_vmthread() { |
| 1341 | _handshake.process_by_vmthread(this); |
| 1342 | } |
| 1343 | |
| 1344 | // Suspend/resume support for JavaThread |
| 1345 | private: |
| 1346 | inline void set_ext_suspended(); |
| 1347 | inline void clear_ext_suspended(); |
| 1348 | |
| 1349 | public: |
| 1350 | void java_suspend(); // higher-level suspension logic called by the public APIs |
| 1351 | void java_resume(); // higher-level resume logic called by the public APIs |
| 1352 | int java_suspend_self(); // low-level self-suspension mechanics |
| 1353 | |
| 1354 | private: |
| 1355 | // mid-level wrapper around java_suspend_self to set up correct state and |
| 1356 | // check for a pending safepoint at the end |
| 1357 | void java_suspend_self_with_safepoint_check(); |
| 1358 | |
| 1359 | public: |
| 1360 | void check_and_wait_while_suspended() { |
| 1361 | assert(JavaThread::current() == this, "sanity check" ); |
| 1362 | |
| 1363 | bool do_self_suspend; |
| 1364 | do { |
| 1365 | // were we externally suspended while we were waiting? |
| 1366 | do_self_suspend = handle_special_suspend_equivalent_condition(); |
| 1367 | if (do_self_suspend) { |
| 1368 | // don't surprise the thread that suspended us by returning |
| 1369 | java_suspend_self(); |
| 1370 | set_suspend_equivalent(); |
| 1371 | } |
| 1372 | } while (do_self_suspend); |
| 1373 | } |
| 1374 | static void check_safepoint_and_suspend_for_native_trans(JavaThread *thread); |
| 1375 | // Check for async exception in addition to safepoint and suspend request. |
| 1376 | static void check_special_condition_for_native_trans(JavaThread *thread); |
| 1377 | |
| 1378 | // Same as check_special_condition_for_native_trans but finishes the |
| 1379 | // transition into thread_in_Java mode so that it can potentially |
| 1380 | // block. |
| 1381 | static void check_special_condition_for_native_trans_and_transition(JavaThread *thread); |
| 1382 | |
| 1383 | bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits); |
| 1384 | bool is_ext_suspend_completed_with_lock(uint32_t *bits) { |
| 1385 | MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag); |
| 1386 | // Warning: is_ext_suspend_completed() may temporarily drop the |
| 1387 | // SR_lock to allow the thread to reach a stable thread state if |
| 1388 | // it is currently in a transient thread state. |
| 1389 | return is_ext_suspend_completed(false /* !called_by_wait */, |
| 1390 | SuspendRetryDelay, bits); |
| 1391 | } |
| 1392 | |
| 1393 | // We cannot allow wait_for_ext_suspend_completion() to run forever or |
| 1394 | // we could hang. SuspendRetryCount and SuspendRetryDelay are normally |
| 1395 | // passed as the count and delay parameters. Experiments with specific |
| 1396 | // calls to wait_for_ext_suspend_completion() can be done by passing |
| 1397 | // other values in the code. Experiments with all calls can be done |
| 1398 | // via the appropriate -XX options. |
| 1399 | bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits); |
| 1400 | |
| 1401 | // test for suspend - most (all?) of these should go away |
| 1402 | bool is_thread_fully_suspended(bool wait_for_suspend, uint32_t *bits); |
| 1403 | |
| 1404 | inline void set_external_suspend(); |
| 1405 | inline void clear_external_suspend(); |
| 1406 | |
| 1407 | bool is_external_suspend() const { |
| 1408 | return (_suspend_flags & _external_suspend) != 0; |
| 1409 | } |
| 1410 | // Whenever a thread transitions from native to vm/java it must suspend |
| 1411 | // if external|deopt suspend is present. |
| 1412 | bool is_suspend_after_native() const { |
| 1413 | return (_suspend_flags & (_external_suspend JFR_ONLY(| _trace_flag))) != 0; |
| 1414 | } |
| 1415 | |
| 1416 | // external suspend request is completed |
| 1417 | bool is_ext_suspended() const { |
| 1418 | return (_suspend_flags & _ext_suspended) != 0; |
| 1419 | } |
| 1420 | |
| 1421 | bool is_external_suspend_with_lock() const { |
| 1422 | MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag); |
| 1423 | return is_external_suspend(); |
| 1424 | } |
| 1425 | |
| 1426 | // Special method to handle a pending external suspend request |
| 1427 | // when a suspend equivalent condition lifts. |
| 1428 | bool handle_special_suspend_equivalent_condition() { |
| 1429 | assert(is_suspend_equivalent(), |
| 1430 | "should only be called in a suspend equivalence condition" ); |
| 1431 | MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag); |
| 1432 | bool ret = is_external_suspend(); |
| 1433 | if (!ret) { |
| 1434 | // not about to self-suspend so clear suspend equivalence |
| 1435 | clear_suspend_equivalent(); |
| 1436 | } |
| 1437 | // implied else: |
| 1438 | // We have a pending external suspend request so we leave the |
| 1439 | // suspend_equivalent flag set until java_suspend_self() sets |
| 1440 | // the ext_suspended flag and clears the suspend_equivalent |
| 1441 | // flag. This insures that wait_for_ext_suspend_completion() |
| 1442 | // will return consistent values. |
| 1443 | return ret; |
| 1444 | } |
| 1445 | |
| 1446 | // utility methods to see if we are doing some kind of suspension |
| 1447 | bool is_being_ext_suspended() const { |
| 1448 | MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag); |
| 1449 | return is_ext_suspended() || is_external_suspend(); |
| 1450 | } |
| 1451 | |
| 1452 | bool is_suspend_equivalent() const { return _suspend_equivalent; } |
| 1453 | |
| 1454 | void set_suspend_equivalent() { _suspend_equivalent = true; } |
| 1455 | void clear_suspend_equivalent() { _suspend_equivalent = false; } |
| 1456 | |
| 1457 | // Thread.stop support |
| 1458 | void send_thread_stop(oop throwable); |
| 1459 | AsyncRequests clear_special_runtime_exit_condition() { |
| 1460 | AsyncRequests x = _special_runtime_exit_condition; |
| 1461 | _special_runtime_exit_condition = _no_async_condition; |
| 1462 | return x; |
| 1463 | } |
| 1464 | |
| 1465 | // Are any async conditions present? |
| 1466 | bool has_async_condition() { return (_special_runtime_exit_condition != _no_async_condition); } |
| 1467 | |
| 1468 | void check_and_handle_async_exceptions(bool check_unsafe_error = true); |
| 1469 | |
| 1470 | // these next two are also used for self-suspension and async exception support |
| 1471 | void handle_special_runtime_exit_condition(bool check_asyncs = true); |
| 1472 | |
| 1473 | // Return true if JavaThread has an asynchronous condition or |
| 1474 | // if external suspension is requested. |
| 1475 | bool has_special_runtime_exit_condition() { |
| 1476 | // Because we don't use is_external_suspend_with_lock |
| 1477 | // it is possible that we won't see an asynchronous external suspend |
| 1478 | // request that has just gotten started, i.e., SR_lock grabbed but |
| 1479 | // _external_suspend field change either not made yet or not visible |
| 1480 | // yet. However, this is okay because the request is asynchronous and |
| 1481 | // we will see the new flag value the next time through. It's also |
| 1482 | // possible that the external suspend request is dropped after |
| 1483 | // we have checked is_external_suspend(), we will recheck its value |
| 1484 | // under SR_lock in java_suspend_self(). |
| 1485 | return (_special_runtime_exit_condition != _no_async_condition) || |
| 1486 | is_external_suspend() || is_trace_suspend(); |
| 1487 | } |
| 1488 | |
| 1489 | void set_pending_unsafe_access_error() { _special_runtime_exit_condition = _async_unsafe_access_error; } |
| 1490 | |
| 1491 | inline void set_pending_async_exception(oop e); |
| 1492 | |
| 1493 | // Fast-locking support |
| 1494 | bool is_lock_owned(address adr) const; |
| 1495 | |
| 1496 | // Accessors for vframe array top |
| 1497 | // The linked list of vframe arrays are sorted on sp. This means when we |
| 1498 | // unpack the head must contain the vframe array to unpack. |
| 1499 | void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; } |
| 1500 | vframeArray* vframe_array_head() const { return _vframe_array_head; } |
| 1501 | |
| 1502 | // Side structure for deferring update of java frame locals until deopt occurs |
| 1503 | GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; } |
| 1504 | void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; } |
| 1505 | |
| 1506 | // These only really exist to make debugging deopt problems simpler |
| 1507 | |
| 1508 | void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; } |
| 1509 | vframeArray* vframe_array_last() const { return _vframe_array_last; } |
| 1510 | |
| 1511 | // The special resourceMark used during deoptimization |
| 1512 | |
| 1513 | void set_deopt_mark(DeoptResourceMark* value) { _deopt_mark = value; } |
| 1514 | DeoptResourceMark* deopt_mark(void) { return _deopt_mark; } |
| 1515 | |
| 1516 | intptr_t* must_deopt_id() { return _must_deopt_id; } |
| 1517 | void set_must_deopt_id(intptr_t* id) { _must_deopt_id = id; } |
| 1518 | void clear_must_deopt_id() { _must_deopt_id = NULL; } |
| 1519 | |
| 1520 | void set_deopt_compiled_method(CompiledMethod* nm) { _deopt_nmethod = nm; } |
| 1521 | CompiledMethod* deopt_compiled_method() { return _deopt_nmethod; } |
| 1522 | |
| 1523 | Method* callee_target() const { return _callee_target; } |
| 1524 | void set_callee_target (Method* x) { _callee_target = x; } |
| 1525 | |
| 1526 | // Oop results of vm runtime calls |
| 1527 | oop vm_result() const { return _vm_result; } |
| 1528 | void set_vm_result (oop x) { _vm_result = x; } |
| 1529 | |
| 1530 | Metadata* vm_result_2() const { return _vm_result_2; } |
| 1531 | void set_vm_result_2 (Metadata* x) { _vm_result_2 = x; } |
| 1532 | |
| 1533 | MemRegion deferred_card_mark() const { return _deferred_card_mark; } |
| 1534 | void set_deferred_card_mark(MemRegion mr) { _deferred_card_mark = mr; } |
| 1535 | |
| 1536 | #if INCLUDE_JVMCI |
| 1537 | int pending_deoptimization() const { return _pending_deoptimization; } |
| 1538 | jlong pending_failed_speculation() const { return _pending_failed_speculation; } |
| 1539 | bool has_pending_monitorenter() const { return _pending_monitorenter; } |
| 1540 | void set_pending_monitorenter(bool b) { _pending_monitorenter = b; } |
| 1541 | void set_pending_deoptimization(int reason) { _pending_deoptimization = reason; } |
| 1542 | void set_pending_failed_speculation(jlong failed_speculation) { _pending_failed_speculation = failed_speculation; } |
| 1543 | void set_pending_transfer_to_interpreter(bool b) { _pending_transfer_to_interpreter = b; } |
| 1544 | void set_jvmci_alternate_call_target(address a) { assert(_jvmci._alternate_call_target == NULL, "must be" ); _jvmci._alternate_call_target = a; } |
| 1545 | void set_jvmci_implicit_exception_pc(address a) { assert(_jvmci._implicit_exception_pc == NULL, "must be" ); _jvmci._implicit_exception_pc = a; } |
| 1546 | |
| 1547 | virtual bool in_retryable_allocation() const { return _in_retryable_allocation; } |
| 1548 | void set_in_retryable_allocation(bool b) { _in_retryable_allocation = b; } |
| 1549 | #endif // INCLUDE_JVMCI |
| 1550 | |
| 1551 | // Exception handling for compiled methods |
| 1552 | oop exception_oop() const { return _exception_oop; } |
| 1553 | address exception_pc() const { return _exception_pc; } |
| 1554 | address exception_handler_pc() const { return _exception_handler_pc; } |
| 1555 | bool is_method_handle_return() const { return _is_method_handle_return == 1; } |
| 1556 | |
| 1557 | void set_exception_oop(oop o) { (void)const_cast<oop&>(_exception_oop = o); } |
| 1558 | void set_exception_pc(address a) { _exception_pc = a; } |
| 1559 | void set_exception_handler_pc(address a) { _exception_handler_pc = a; } |
| 1560 | void set_is_method_handle_return(bool value) { _is_method_handle_return = value ? 1 : 0; } |
| 1561 | |
| 1562 | void clear_exception_oop_and_pc() { |
| 1563 | set_exception_oop(NULL); |
| 1564 | set_exception_pc(NULL); |
| 1565 | } |
| 1566 | |
| 1567 | // Stack overflow support |
| 1568 | // |
| 1569 | // (small addresses) |
| 1570 | // |
| 1571 | // -- <-- stack_end() --- |
| 1572 | // | | |
| 1573 | // | red pages | |
| 1574 | // | | |
| 1575 | // -- <-- stack_red_zone_base() | |
| 1576 | // | | |
| 1577 | // | guard |
| 1578 | // | yellow pages zone |
| 1579 | // | | |
| 1580 | // | | |
| 1581 | // -- <-- stack_yellow_zone_base() | |
| 1582 | // | | |
| 1583 | // | | |
| 1584 | // | reserved pages | |
| 1585 | // | | |
| 1586 | // -- <-- stack_reserved_zone_base() --- --- |
| 1587 | // /|\ shadow <-- stack_overflow_limit() (somewhere in here) |
| 1588 | // | zone |
| 1589 | // \|/ size |
| 1590 | // some untouched memory --- |
| 1591 | // |
| 1592 | // |
| 1593 | // -- |
| 1594 | // | |
| 1595 | // | shadow zone |
| 1596 | // | |
| 1597 | // -- |
| 1598 | // x frame n |
| 1599 | // -- |
| 1600 | // x frame n-1 |
| 1601 | // x |
| 1602 | // -- |
| 1603 | // ... |
| 1604 | // |
| 1605 | // -- |
| 1606 | // x frame 0 |
| 1607 | // -- <-- stack_base() |
| 1608 | // |
| 1609 | // (large addresses) |
| 1610 | // |
| 1611 | |
| 1612 | private: |
| 1613 | // These values are derived from flags StackRedPages, StackYellowPages, |
| 1614 | // StackReservedPages and StackShadowPages. The zone size is determined |
| 1615 | // ergonomically if page_size > 4K. |
| 1616 | static size_t _stack_red_zone_size; |
| 1617 | static size_t _stack_yellow_zone_size; |
| 1618 | static size_t _stack_reserved_zone_size; |
| 1619 | static size_t _stack_shadow_zone_size; |
| 1620 | public: |
| 1621 | inline size_t stack_available(address cur_sp); |
| 1622 | |
| 1623 | static size_t stack_red_zone_size() { |
| 1624 | assert(_stack_red_zone_size > 0, "Don't call this before the field is initialized." ); |
| 1625 | return _stack_red_zone_size; |
| 1626 | } |
| 1627 | static void set_stack_red_zone_size(size_t s) { |
| 1628 | assert(is_aligned(s, os::vm_page_size()), |
| 1629 | "We can not protect if the red zone size is not page aligned." ); |
| 1630 | assert(_stack_red_zone_size == 0, "This should be called only once." ); |
| 1631 | _stack_red_zone_size = s; |
| 1632 | } |
| 1633 | address stack_red_zone_base() { |
| 1634 | return (address)(stack_end() + stack_red_zone_size()); |
| 1635 | } |
| 1636 | bool in_stack_red_zone(address a) { |
| 1637 | return a <= stack_red_zone_base() && a >= stack_end(); |
| 1638 | } |
| 1639 | |
| 1640 | static size_t stack_yellow_zone_size() { |
| 1641 | assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized." ); |
| 1642 | return _stack_yellow_zone_size; |
| 1643 | } |
| 1644 | static void set_stack_yellow_zone_size(size_t s) { |
| 1645 | assert(is_aligned(s, os::vm_page_size()), |
| 1646 | "We can not protect if the yellow zone size is not page aligned." ); |
| 1647 | assert(_stack_yellow_zone_size == 0, "This should be called only once." ); |
| 1648 | _stack_yellow_zone_size = s; |
| 1649 | } |
| 1650 | |
| 1651 | static size_t stack_reserved_zone_size() { |
| 1652 | // _stack_reserved_zone_size may be 0. This indicates the feature is off. |
| 1653 | return _stack_reserved_zone_size; |
| 1654 | } |
| 1655 | static void set_stack_reserved_zone_size(size_t s) { |
| 1656 | assert(is_aligned(s, os::vm_page_size()), |
| 1657 | "We can not protect if the reserved zone size is not page aligned." ); |
| 1658 | assert(_stack_reserved_zone_size == 0, "This should be called only once." ); |
| 1659 | _stack_reserved_zone_size = s; |
| 1660 | } |
| 1661 | address stack_reserved_zone_base() { |
| 1662 | return (address)(stack_end() + |
| 1663 | (stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size())); |
| 1664 | } |
| 1665 | bool in_stack_reserved_zone(address a) { |
| 1666 | return (a <= stack_reserved_zone_base()) && |
| 1667 | (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size())); |
| 1668 | } |
| 1669 | |
| 1670 | static size_t stack_yellow_reserved_zone_size() { |
| 1671 | return _stack_yellow_zone_size + _stack_reserved_zone_size; |
| 1672 | } |
| 1673 | bool in_stack_yellow_reserved_zone(address a) { |
| 1674 | return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base()); |
| 1675 | } |
| 1676 | |
| 1677 | // Size of red + yellow + reserved zones. |
| 1678 | static size_t stack_guard_zone_size() { |
| 1679 | return stack_red_zone_size() + stack_yellow_reserved_zone_size(); |
| 1680 | } |
| 1681 | |
| 1682 | static size_t stack_shadow_zone_size() { |
| 1683 | assert(_stack_shadow_zone_size > 0, "Don't call this before the field is initialized." ); |
| 1684 | return _stack_shadow_zone_size; |
| 1685 | } |
| 1686 | static void set_stack_shadow_zone_size(size_t s) { |
| 1687 | // The shadow area is not allocated or protected, so |
| 1688 | // it needs not be page aligned. |
| 1689 | // But the stack bang currently assumes that it is a |
| 1690 | // multiple of page size. This guarantees that the bang |
| 1691 | // loop touches all pages in the shadow zone. |
| 1692 | // This can be guaranteed differently, as well. E.g., if |
| 1693 | // the page size is a multiple of 4K, banging in 4K steps |
| 1694 | // suffices to touch all pages. (Some pages are banged |
| 1695 | // several times, though.) |
| 1696 | assert(is_aligned(s, os::vm_page_size()), |
| 1697 | "Stack bang assumes multiple of page size." ); |
| 1698 | assert(_stack_shadow_zone_size == 0, "This should be called only once." ); |
| 1699 | _stack_shadow_zone_size = s; |
| 1700 | } |
| 1701 | |
| 1702 | void create_stack_guard_pages(); |
| 1703 | void remove_stack_guard_pages(); |
| 1704 | |
| 1705 | void enable_stack_reserved_zone(); |
| 1706 | void disable_stack_reserved_zone(); |
| 1707 | void enable_stack_yellow_reserved_zone(); |
| 1708 | void disable_stack_yellow_reserved_zone(); |
| 1709 | void enable_stack_red_zone(); |
| 1710 | void disable_stack_red_zone(); |
| 1711 | |
| 1712 | inline bool stack_guard_zone_unused(); |
| 1713 | inline bool stack_yellow_reserved_zone_disabled(); |
| 1714 | inline bool stack_reserved_zone_disabled(); |
| 1715 | inline bool stack_guards_enabled(); |
| 1716 | |
| 1717 | address reserved_stack_activation() const { return _reserved_stack_activation; } |
| 1718 | void set_reserved_stack_activation(address addr) { |
| 1719 | assert(_reserved_stack_activation == stack_base() |
| 1720 | || _reserved_stack_activation == NULL |
| 1721 | || addr == stack_base(), "Must not be set twice" ); |
| 1722 | _reserved_stack_activation = addr; |
| 1723 | } |
| 1724 | |
| 1725 | // Attempt to reguard the stack after a stack overflow may have occurred. |
| 1726 | // Returns true if (a) guard pages are not needed on this thread, (b) the |
| 1727 | // pages are already guarded, or (c) the pages were successfully reguarded. |
| 1728 | // Returns false if there is not enough stack space to reguard the pages, in |
| 1729 | // which case the caller should unwind a frame and try again. The argument |
| 1730 | // should be the caller's (approximate) sp. |
| 1731 | bool reguard_stack(address cur_sp); |
| 1732 | // Similar to above but see if current stackpoint is out of the guard area |
| 1733 | // and reguard if possible. |
| 1734 | bool reguard_stack(void); |
| 1735 | |
| 1736 | address stack_overflow_limit() { return _stack_overflow_limit; } |
| 1737 | void set_stack_overflow_limit() { |
| 1738 | _stack_overflow_limit = |
| 1739 | stack_end() + MAX2(JavaThread::stack_guard_zone_size(), JavaThread::stack_shadow_zone_size()); |
| 1740 | } |
| 1741 | |
| 1742 | // Misc. accessors/mutators |
| 1743 | void set_do_not_unlock(void) { _do_not_unlock_if_synchronized = true; } |
| 1744 | void clr_do_not_unlock(void) { _do_not_unlock_if_synchronized = false; } |
| 1745 | bool do_not_unlock(void) { return _do_not_unlock_if_synchronized; } |
| 1746 | |
| 1747 | #ifndef PRODUCT |
| 1748 | void record_jump(address target, address instr, const char* file, int line); |
| 1749 | #endif // PRODUCT |
| 1750 | |
| 1751 | // For assembly stub generation |
| 1752 | static ByteSize threadObj_offset() { return byte_offset_of(JavaThread, _threadObj); } |
| 1753 | #ifndef PRODUCT |
| 1754 | static ByteSize jmp_ring_index_offset() { return byte_offset_of(JavaThread, _jmp_ring_index); } |
| 1755 | static ByteSize jmp_ring_offset() { return byte_offset_of(JavaThread, _jmp_ring); } |
| 1756 | #endif // PRODUCT |
| 1757 | static ByteSize jni_environment_offset() { return byte_offset_of(JavaThread, _jni_environment); } |
| 1758 | static ByteSize pending_jni_exception_check_fn_offset() { |
| 1759 | return byte_offset_of(JavaThread, _pending_jni_exception_check_fn); |
| 1760 | } |
| 1761 | static ByteSize last_Java_sp_offset() { |
| 1762 | return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset(); |
| 1763 | } |
| 1764 | static ByteSize last_Java_pc_offset() { |
| 1765 | return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_pc_offset(); |
| 1766 | } |
| 1767 | static ByteSize frame_anchor_offset() { |
| 1768 | return byte_offset_of(JavaThread, _anchor); |
| 1769 | } |
| 1770 | static ByteSize callee_target_offset() { return byte_offset_of(JavaThread, _callee_target); } |
| 1771 | static ByteSize vm_result_offset() { return byte_offset_of(JavaThread, _vm_result); } |
| 1772 | static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2); } |
| 1773 | static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state); } |
| 1774 | static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc); } |
| 1775 | static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread); } |
| 1776 | #if INCLUDE_JVMCI |
| 1777 | static ByteSize pending_deoptimization_offset() { return byte_offset_of(JavaThread, _pending_deoptimization); } |
| 1778 | static ByteSize pending_monitorenter_offset() { return byte_offset_of(JavaThread, _pending_monitorenter); } |
| 1779 | static ByteSize pending_failed_speculation_offset() { return byte_offset_of(JavaThread, _pending_failed_speculation); } |
| 1780 | static ByteSize jvmci_alternate_call_target_offset() { return byte_offset_of(JavaThread, _jvmci._alternate_call_target); } |
| 1781 | static ByteSize jvmci_implicit_exception_pc_offset() { return byte_offset_of(JavaThread, _jvmci._implicit_exception_pc); } |
| 1782 | static ByteSize jvmci_counters_offset() { return byte_offset_of(JavaThread, _jvmci_counters); } |
| 1783 | #endif // INCLUDE_JVMCI |
| 1784 | static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop); } |
| 1785 | static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc); } |
| 1786 | static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); } |
| 1787 | static ByteSize stack_overflow_limit_offset() { return byte_offset_of(JavaThread, _stack_overflow_limit); } |
| 1788 | static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); } |
| 1789 | static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state); } |
| 1790 | static ByteSize reserved_stack_activation_offset() { return byte_offset_of(JavaThread, _reserved_stack_activation); } |
| 1791 | static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); } |
| 1792 | |
| 1793 | static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); } |
| 1794 | static ByteSize should_post_on_exceptions_flag_offset() { |
| 1795 | return byte_offset_of(JavaThread, _should_post_on_exceptions_flag); |
| 1796 | } |
| 1797 | |
| 1798 | // Returns the jni environment for this thread |
| 1799 | JNIEnv* jni_environment() { return &_jni_environment; } |
| 1800 | |
| 1801 | static JavaThread* thread_from_jni_environment(JNIEnv* env) { |
| 1802 | JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset())); |
| 1803 | // Only return NULL if thread is off the thread list; starting to |
| 1804 | // exit should not return NULL. |
| 1805 | if (thread_from_jni_env->is_terminated()) { |
| 1806 | thread_from_jni_env->block_if_vm_exited(); |
| 1807 | return NULL; |
| 1808 | } else { |
| 1809 | return thread_from_jni_env; |
| 1810 | } |
| 1811 | } |
| 1812 | |
| 1813 | // JNI critical regions. These can nest. |
| 1814 | bool in_critical() { return _jni_active_critical > 0; } |
| 1815 | bool in_last_critical() { return _jni_active_critical == 1; } |
| 1816 | inline void enter_critical(); |
| 1817 | void exit_critical() { |
| 1818 | assert(Thread::current() == this, "this must be current thread" ); |
| 1819 | _jni_active_critical--; |
| 1820 | assert(_jni_active_critical >= 0, "JNI critical nesting problem?" ); |
| 1821 | } |
| 1822 | |
| 1823 | // Checked JNI: is the programmer required to check for exceptions, if so specify |
| 1824 | // which function name. Returning to a Java frame should implicitly clear the |
| 1825 | // pending check, this is done for Native->Java transitions (i.e. user JNI code). |
| 1826 | // VM->Java transistions are not cleared, it is expected that JNI code enclosed |
| 1827 | // within ThreadToNativeFromVM makes proper exception checks (i.e. VM internal). |
| 1828 | bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; } |
| 1829 | void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = NULL; } |
| 1830 | const char* get_pending_jni_exception_check() const { return _pending_jni_exception_check_fn; } |
| 1831 | void set_pending_jni_exception_check(const char* fn_name) { _pending_jni_exception_check_fn = (char*) fn_name; } |
| 1832 | |
| 1833 | // For deadlock detection |
| 1834 | int depth_first_number() { return _depth_first_number; } |
| 1835 | void set_depth_first_number(int dfn) { _depth_first_number = dfn; } |
| 1836 | |
| 1837 | private: |
| 1838 | void set_monitor_chunks(MonitorChunk* monitor_chunks) { _monitor_chunks = monitor_chunks; } |
| 1839 | |
| 1840 | public: |
| 1841 | MonitorChunk* monitor_chunks() const { return _monitor_chunks; } |
| 1842 | void add_monitor_chunk(MonitorChunk* chunk); |
| 1843 | void remove_monitor_chunk(MonitorChunk* chunk); |
| 1844 | bool in_deopt_handler() const { return _in_deopt_handler > 0; } |
| 1845 | void inc_in_deopt_handler() { _in_deopt_handler++; } |
| 1846 | void dec_in_deopt_handler() { |
| 1847 | assert(_in_deopt_handler > 0, "mismatched deopt nesting" ); |
| 1848 | if (_in_deopt_handler > 0) { // robustness |
| 1849 | _in_deopt_handler--; |
| 1850 | } |
| 1851 | } |
| 1852 | |
| 1853 | private: |
| 1854 | void set_entry_point(ThreadFunction entry_point) { _entry_point = entry_point; } |
| 1855 | |
| 1856 | public: |
| 1857 | |
| 1858 | // Frame iteration; calls the function f for all frames on the stack |
| 1859 | void frames_do(void f(frame*, const RegisterMap*)); |
| 1860 | |
| 1861 | // Memory operations |
| 1862 | void oops_do(OopClosure* f, CodeBlobClosure* cf); |
| 1863 | |
| 1864 | // Sweeper operations |
| 1865 | virtual void nmethods_do(CodeBlobClosure* cf); |
| 1866 | |
| 1867 | // RedefineClasses Support |
| 1868 | void metadata_do(MetadataClosure* f); |
| 1869 | |
| 1870 | // Debug method asserting thread states are correct during a handshake operation. |
| 1871 | DEBUG_ONLY(void verify_states_for_handshake();) |
| 1872 | |
| 1873 | // Misc. operations |
| 1874 | char* name() const { return (char*)get_thread_name(); } |
| 1875 | void print_on(outputStream* st, bool print_extended_info) const; |
| 1876 | void print_on(outputStream* st) const { print_on(st, false); } |
| 1877 | void print() const; |
| 1878 | void print_value(); |
| 1879 | void print_thread_state_on(outputStream*) const PRODUCT_RETURN; |
| 1880 | void print_thread_state() const PRODUCT_RETURN; |
| 1881 | void print_on_error(outputStream* st, char* buf, int buflen) const; |
| 1882 | void print_name_on_error(outputStream* st, char* buf, int buflen) const; |
| 1883 | void verify(); |
| 1884 | const char* get_thread_name() const; |
| 1885 | protected: |
| 1886 | // factor out low-level mechanics for use in both normal and error cases |
| 1887 | virtual const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const; |
| 1888 | public: |
| 1889 | const char* get_threadgroup_name() const; |
| 1890 | const char* get_parent_name() const; |
| 1891 | |
| 1892 | // Accessing frames |
| 1893 | frame last_frame() { |
| 1894 | _anchor.make_walkable(this); |
| 1895 | return pd_last_frame(); |
| 1896 | } |
| 1897 | javaVFrame* last_java_vframe(RegisterMap* reg_map); |
| 1898 | |
| 1899 | // Returns method at 'depth' java or native frames down the stack |
| 1900 | // Used for security checks |
| 1901 | Klass* security_get_caller_class(int depth); |
| 1902 | |
| 1903 | // Print stack trace in external format |
| 1904 | void print_stack_on(outputStream* st); |
| 1905 | void print_stack() { print_stack_on(tty); } |
| 1906 | |
| 1907 | // Print stack traces in various internal formats |
| 1908 | void trace_stack() PRODUCT_RETURN; |
| 1909 | void trace_stack_from(vframe* start_vf) PRODUCT_RETURN; |
| 1910 | void trace_frames() PRODUCT_RETURN; |
| 1911 | void trace_oops() PRODUCT_RETURN; |
| 1912 | |
| 1913 | // Print an annotated view of the stack frames |
| 1914 | void print_frame_layout(int depth = 0, bool validate_only = false) NOT_DEBUG_RETURN; |
| 1915 | void validate_frame_layout() { |
| 1916 | print_frame_layout(0, true); |
| 1917 | } |
| 1918 | |
| 1919 | // Returns the number of stack frames on the stack |
| 1920 | int depth() const; |
| 1921 | |
| 1922 | // Function for testing deoptimization |
| 1923 | void deoptimize(); |
| 1924 | void make_zombies(); |
| 1925 | |
| 1926 | void deoptimized_wrt_marked_nmethods(); |
| 1927 | |
| 1928 | public: |
| 1929 | // Returns the running thread as a JavaThread |
| 1930 | static inline JavaThread* current(); |
| 1931 | |
| 1932 | // Returns the active Java thread. Do not use this if you know you are calling |
| 1933 | // from a JavaThread, as it's slower than JavaThread::current. If called from |
| 1934 | // the VMThread, it also returns the JavaThread that instigated the VMThread's |
| 1935 | // operation. You may not want that either. |
| 1936 | static JavaThread* active(); |
| 1937 | |
| 1938 | inline CompilerThread* as_CompilerThread(); |
| 1939 | |
| 1940 | protected: |
| 1941 | virtual void pre_run(); |
| 1942 | virtual void run(); |
| 1943 | void thread_main_inner(); |
| 1944 | virtual void post_run(); |
| 1945 | |
| 1946 | |
| 1947 | private: |
| 1948 | GrowableArray<oop>* _array_for_gc; |
| 1949 | public: |
| 1950 | |
| 1951 | void register_array_for_gc(GrowableArray<oop>* array) { _array_for_gc = array; } |
| 1952 | |
| 1953 | public: |
| 1954 | // Thread local information maintained by JVMTI. |
| 1955 | void set_jvmti_thread_state(JvmtiThreadState *value) { _jvmti_thread_state = value; } |
| 1956 | // A JvmtiThreadState is lazily allocated. This jvmti_thread_state() |
| 1957 | // getter is used to get this JavaThread's JvmtiThreadState if it has |
| 1958 | // one which means NULL can be returned. JvmtiThreadState::state_for() |
| 1959 | // is used to get the specified JavaThread's JvmtiThreadState if it has |
| 1960 | // one or it allocates a new JvmtiThreadState for the JavaThread and |
| 1961 | // returns it. JvmtiThreadState::state_for() will return NULL only if |
| 1962 | // the specified JavaThread is exiting. |
| 1963 | JvmtiThreadState *jvmti_thread_state() const { return _jvmti_thread_state; } |
| 1964 | static ByteSize jvmti_thread_state_offset() { return byte_offset_of(JavaThread, _jvmti_thread_state); } |
| 1965 | |
| 1966 | // JVMTI PopFrame support |
| 1967 | // Setting and clearing popframe_condition |
| 1968 | // All of these enumerated values are bits. popframe_pending |
| 1969 | // indicates that a PopFrame() has been requested and not yet been |
| 1970 | // completed. popframe_processing indicates that that PopFrame() is in |
| 1971 | // the process of being completed. popframe_force_deopt_reexecution_bit |
| 1972 | // indicates that special handling is required when returning to a |
| 1973 | // deoptimized caller. |
| 1974 | enum PopCondition { |
| 1975 | popframe_inactive = 0x00, |
| 1976 | popframe_pending_bit = 0x01, |
| 1977 | popframe_processing_bit = 0x02, |
| 1978 | popframe_force_deopt_reexecution_bit = 0x04 |
| 1979 | }; |
| 1980 | PopCondition popframe_condition() { return (PopCondition) _popframe_condition; } |
| 1981 | void set_popframe_condition(PopCondition c) { _popframe_condition = c; } |
| 1982 | void set_popframe_condition_bit(PopCondition c) { _popframe_condition |= c; } |
| 1983 | void clear_popframe_condition() { _popframe_condition = popframe_inactive; } |
| 1984 | static ByteSize popframe_condition_offset() { return byte_offset_of(JavaThread, _popframe_condition); } |
| 1985 | bool has_pending_popframe() { return (popframe_condition() & popframe_pending_bit) != 0; } |
| 1986 | bool popframe_forcing_deopt_reexecution() { return (popframe_condition() & popframe_force_deopt_reexecution_bit) != 0; } |
| 1987 | void clear_popframe_forcing_deopt_reexecution() { _popframe_condition &= ~popframe_force_deopt_reexecution_bit; } |
| 1988 | #ifdef CC_INTERP |
| 1989 | bool pop_frame_pending(void) { return ((_popframe_condition & popframe_pending_bit) != 0); } |
| 1990 | void clr_pop_frame_pending(void) { _popframe_condition = popframe_inactive; } |
| 1991 | bool pop_frame_in_process(void) { return ((_popframe_condition & popframe_processing_bit) != 0); } |
| 1992 | void set_pop_frame_in_process(void) { _popframe_condition |= popframe_processing_bit; } |
| 1993 | void clr_pop_frame_in_process(void) { _popframe_condition &= ~popframe_processing_bit; } |
| 1994 | #endif |
| 1995 | |
| 1996 | int frames_to_pop_failed_realloc() const { return _frames_to_pop_failed_realloc; } |
| 1997 | void set_frames_to_pop_failed_realloc(int nb) { _frames_to_pop_failed_realloc = nb; } |
| 1998 | void dec_frames_to_pop_failed_realloc() { _frames_to_pop_failed_realloc--; } |
| 1999 | |
| 2000 | private: |
| 2001 | // Saved incoming arguments to popped frame. |
| 2002 | // Used only when popped interpreted frame returns to deoptimized frame. |
| 2003 | void* _popframe_preserved_args; |
| 2004 | int _popframe_preserved_args_size; |
| 2005 | |
| 2006 | public: |
| 2007 | void popframe_preserve_args(ByteSize size_in_bytes, void* start); |
| 2008 | void* popframe_preserved_args(); |
| 2009 | ByteSize popframe_preserved_args_size(); |
| 2010 | WordSize popframe_preserved_args_size_in_words(); |
| 2011 | void popframe_free_preserved_args(); |
| 2012 | |
| 2013 | |
| 2014 | private: |
| 2015 | JvmtiThreadState *_jvmti_thread_state; |
| 2016 | |
| 2017 | // Used by the interpreter in fullspeed mode for frame pop, method |
| 2018 | // entry, method exit and single stepping support. This field is |
| 2019 | // only set to non-zero by the VM_EnterInterpOnlyMode VM operation. |
| 2020 | // It can be set to zero asynchronously (i.e., without a VM operation |
| 2021 | // or a lock) so we have to be very careful. |
| 2022 | int _interp_only_mode; |
| 2023 | |
| 2024 | public: |
| 2025 | // used by the interpreter for fullspeed debugging support (see above) |
| 2026 | static ByteSize interp_only_mode_offset() { return byte_offset_of(JavaThread, _interp_only_mode); } |
| 2027 | bool is_interp_only_mode() { return (_interp_only_mode != 0); } |
| 2028 | int get_interp_only_mode() { return _interp_only_mode; } |
| 2029 | void increment_interp_only_mode() { ++_interp_only_mode; } |
| 2030 | void decrement_interp_only_mode() { --_interp_only_mode; } |
| 2031 | |
| 2032 | // support for cached flag that indicates whether exceptions need to be posted for this thread |
| 2033 | // if this is false, we can avoid deoptimizing when events are thrown |
| 2034 | // this gets set to reflect whether jvmtiExport::post_exception_throw would actually do anything |
| 2035 | private: |
| 2036 | int _should_post_on_exceptions_flag; |
| 2037 | |
| 2038 | public: |
| 2039 | int should_post_on_exceptions_flag() { return _should_post_on_exceptions_flag; } |
| 2040 | void set_should_post_on_exceptions_flag(int val) { _should_post_on_exceptions_flag = val; } |
| 2041 | |
| 2042 | private: |
| 2043 | ThreadStatistics *_thread_stat; |
| 2044 | |
| 2045 | public: |
| 2046 | ThreadStatistics* get_thread_stat() const { return _thread_stat; } |
| 2047 | |
| 2048 | // Return a blocker object for which this thread is blocked parking. |
| 2049 | oop current_park_blocker(); |
| 2050 | |
| 2051 | private: |
| 2052 | static size_t _stack_size_at_create; |
| 2053 | |
| 2054 | public: |
| 2055 | static inline size_t stack_size_at_create(void) { |
| 2056 | return _stack_size_at_create; |
| 2057 | } |
| 2058 | static inline void set_stack_size_at_create(size_t value) { |
| 2059 | _stack_size_at_create = value; |
| 2060 | } |
| 2061 | |
| 2062 | // Machine dependent stuff |
| 2063 | #include OS_CPU_HEADER(thread) |
| 2064 | |
| 2065 | public: |
| 2066 | void set_blocked_on_compilation(bool value) { |
| 2067 | _blocked_on_compilation = value; |
| 2068 | } |
| 2069 | |
| 2070 | bool blocked_on_compilation() { |
| 2071 | return _blocked_on_compilation; |
| 2072 | } |
| 2073 | protected: |
| 2074 | bool _blocked_on_compilation; |
| 2075 | |
| 2076 | |
| 2077 | // JSR166 per-thread parker |
| 2078 | private: |
| 2079 | Parker* _parker; |
| 2080 | public: |
| 2081 | Parker* parker() { return _parker; } |
| 2082 | |
| 2083 | // Biased locking support |
| 2084 | private: |
| 2085 | GrowableArray<MonitorInfo*>* _cached_monitor_info; |
| 2086 | public: |
| 2087 | GrowableArray<MonitorInfo*>* cached_monitor_info() { return _cached_monitor_info; } |
| 2088 | void set_cached_monitor_info(GrowableArray<MonitorInfo*>* info) { _cached_monitor_info = info; } |
| 2089 | |
| 2090 | // clearing/querying jni attach status |
| 2091 | bool is_attaching_via_jni() const { return _jni_attach_state == _attaching_via_jni; } |
| 2092 | bool has_attached_via_jni() const { return is_attaching_via_jni() || _jni_attach_state == _attached_via_jni; } |
| 2093 | inline void set_done_attaching_via_jni(); |
| 2094 | |
| 2095 | // Stack dump assistance: |
| 2096 | // Track the class we want to initialize but for which we have to wait |
| 2097 | // on its init_lock() because it is already being initialized. |
| 2098 | void set_class_to_be_initialized(InstanceKlass* k); |
| 2099 | InstanceKlass* class_to_be_initialized() const; |
| 2100 | |
| 2101 | private: |
| 2102 | InstanceKlass* _class_to_be_initialized; |
| 2103 | |
| 2104 | }; |
| 2105 | |
| 2106 | // Inline implementation of JavaThread::current |
| 2107 | inline JavaThread* JavaThread::current() { |
| 2108 | Thread* thread = Thread::current(); |
| 2109 | assert(thread->is_Java_thread(), "just checking" ); |
| 2110 | return (JavaThread*)thread; |
| 2111 | } |
| 2112 | |
| 2113 | inline CompilerThread* JavaThread::as_CompilerThread() { |
| 2114 | assert(is_Compiler_thread(), "just checking" ); |
| 2115 | return (CompilerThread*)this; |
| 2116 | } |
| 2117 | |
| 2118 | // Dedicated thread to sweep the code cache |
| 2119 | class CodeCacheSweeperThread : public JavaThread { |
| 2120 | CompiledMethod* _scanned_compiled_method; // nmethod being scanned by the sweeper |
| 2121 | public: |
| 2122 | CodeCacheSweeperThread(); |
| 2123 | // Track the nmethod currently being scanned by the sweeper |
| 2124 | void set_scanned_compiled_method(CompiledMethod* cm) { |
| 2125 | assert(_scanned_compiled_method == NULL || cm == NULL, "should reset to NULL before writing a new value" ); |
| 2126 | _scanned_compiled_method = cm; |
| 2127 | } |
| 2128 | |
| 2129 | // Hide sweeper thread from external view. |
| 2130 | bool is_hidden_from_external_view() const { return true; } |
| 2131 | |
| 2132 | bool is_Code_cache_sweeper_thread() const { return true; } |
| 2133 | |
| 2134 | // Prevent GC from unloading _scanned_compiled_method |
| 2135 | void oops_do(OopClosure* f, CodeBlobClosure* cf); |
| 2136 | void nmethods_do(CodeBlobClosure* cf); |
| 2137 | }; |
| 2138 | |
| 2139 | // A thread used for Compilation. |
| 2140 | class CompilerThread : public JavaThread { |
| 2141 | friend class VMStructs; |
| 2142 | private: |
| 2143 | CompilerCounters* _counters; |
| 2144 | |
| 2145 | ciEnv* _env; |
| 2146 | CompileLog* _log; |
| 2147 | CompileTask* volatile _task; // print_threads_compiling can read this concurrently. |
| 2148 | CompileQueue* _queue; |
| 2149 | BufferBlob* _buffer_blob; |
| 2150 | |
| 2151 | AbstractCompiler* _compiler; |
| 2152 | TimeStamp _idle_time; |
| 2153 | |
| 2154 | public: |
| 2155 | |
| 2156 | static CompilerThread* current(); |
| 2157 | |
| 2158 | CompilerThread(CompileQueue* queue, CompilerCounters* counters); |
| 2159 | ~CompilerThread(); |
| 2160 | |
| 2161 | bool is_Compiler_thread() const { return true; } |
| 2162 | |
| 2163 | virtual bool can_call_java() const; |
| 2164 | |
| 2165 | // Hide native compiler threads from external view. |
| 2166 | bool is_hidden_from_external_view() const { return !can_call_java(); } |
| 2167 | |
| 2168 | void set_compiler(AbstractCompiler* c) { _compiler = c; } |
| 2169 | AbstractCompiler* compiler() const { return _compiler; } |
| 2170 | |
| 2171 | CompileQueue* queue() const { return _queue; } |
| 2172 | CompilerCounters* counters() const { return _counters; } |
| 2173 | |
| 2174 | // Get/set the thread's compilation environment. |
| 2175 | ciEnv* env() { return _env; } |
| 2176 | void set_env(ciEnv* env) { _env = env; } |
| 2177 | |
| 2178 | BufferBlob* get_buffer_blob() const { return _buffer_blob; } |
| 2179 | void set_buffer_blob(BufferBlob* b) { _buffer_blob = b; } |
| 2180 | |
| 2181 | // Get/set the thread's logging information |
| 2182 | CompileLog* log() { return _log; } |
| 2183 | void init_log(CompileLog* log) { |
| 2184 | // Set once, for good. |
| 2185 | assert(_log == NULL, "set only once" ); |
| 2186 | _log = log; |
| 2187 | } |
| 2188 | |
| 2189 | void start_idle_timer() { _idle_time.update(); } |
| 2190 | jlong idle_time_millis() { |
| 2191 | return TimeHelper::counter_to_millis(_idle_time.ticks_since_update()); |
| 2192 | } |
| 2193 | |
| 2194 | #ifndef PRODUCT |
| 2195 | private: |
| 2196 | IdealGraphPrinter *_ideal_graph_printer; |
| 2197 | public: |
| 2198 | IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; } |
| 2199 | void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; } |
| 2200 | #endif |
| 2201 | |
| 2202 | // Get/set the thread's current task |
| 2203 | CompileTask* task() { return _task; } |
| 2204 | void set_task(CompileTask* task) { _task = task; } |
| 2205 | }; |
| 2206 | |
| 2207 | inline CompilerThread* CompilerThread::current() { |
| 2208 | return JavaThread::current()->as_CompilerThread(); |
| 2209 | } |
| 2210 | |
| 2211 | // The active thread queue. It also keeps track of the current used |
| 2212 | // thread priorities. |
| 2213 | class Threads: AllStatic { |
| 2214 | friend class VMStructs; |
| 2215 | private: |
| 2216 | static int _number_of_threads; |
| 2217 | static int _number_of_non_daemon_threads; |
| 2218 | static int _return_code; |
| 2219 | static uintx _thread_claim_token; |
| 2220 | #ifdef ASSERT |
| 2221 | static bool _vm_complete; |
| 2222 | #endif |
| 2223 | |
| 2224 | static void initialize_java_lang_classes(JavaThread* main_thread, TRAPS); |
| 2225 | static void initialize_jsr292_core_classes(TRAPS); |
| 2226 | |
| 2227 | public: |
| 2228 | // Thread management |
| 2229 | // force_daemon is a concession to JNI, where we may need to add a |
| 2230 | // thread to the thread list before allocating its thread object |
| 2231 | static void add(JavaThread* p, bool force_daemon = false); |
| 2232 | static void remove(JavaThread* p, bool is_daemon); |
| 2233 | static void non_java_threads_do(ThreadClosure* tc); |
| 2234 | static void java_threads_do(ThreadClosure* tc); |
| 2235 | static void java_threads_and_vm_thread_do(ThreadClosure* tc); |
| 2236 | static void threads_do(ThreadClosure* tc); |
| 2237 | static void possibly_parallel_threads_do(bool is_par, ThreadClosure* tc); |
| 2238 | |
| 2239 | // Initializes the vm and creates the vm thread |
| 2240 | static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain); |
| 2241 | static void convert_vm_init_libraries_to_agents(); |
| 2242 | static void create_vm_init_libraries(); |
| 2243 | static void create_vm_init_agents(); |
| 2244 | static void shutdown_vm_agents(); |
| 2245 | static bool destroy_vm(); |
| 2246 | // Supported VM versions via JNI |
| 2247 | // Includes JNI_VERSION_1_1 |
| 2248 | static jboolean is_supported_jni_version_including_1_1(jint version); |
| 2249 | // Does not include JNI_VERSION_1_1 |
| 2250 | static jboolean is_supported_jni_version(jint version); |
| 2251 | |
| 2252 | // The "thread claim token" provides a way for threads to be claimed |
| 2253 | // by parallel worker tasks. |
| 2254 | // |
| 2255 | // Each thread contains a "token" field. A task will claim the |
| 2256 | // thread only if its token is different from the global token, |
| 2257 | // which is updated by calling change_thread_claim_token(). When |
| 2258 | // a thread is claimed, it's token is set to the global token value |
| 2259 | // so other threads in the same iteration pass won't claim it. |
| 2260 | // |
| 2261 | // For this to work change_thread_claim_token() needs to be called |
| 2262 | // exactly once in sequential code before starting parallel tasks |
| 2263 | // that should claim threads. |
| 2264 | // |
| 2265 | // New threads get their token set to 0 and change_thread_claim_token() |
| 2266 | // never sets the global token to 0. |
| 2267 | static uintx thread_claim_token() { return _thread_claim_token; } |
| 2268 | static void change_thread_claim_token(); |
| 2269 | static void assert_all_threads_claimed() NOT_DEBUG_RETURN; |
| 2270 | |
| 2271 | // Apply "f->do_oop" to all root oops in all threads. |
| 2272 | // This version may only be called by sequential code. |
| 2273 | static void oops_do(OopClosure* f, CodeBlobClosure* cf); |
| 2274 | // This version may be called by sequential or parallel code. |
| 2275 | static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf); |
| 2276 | |
| 2277 | // Apply "f->do_oop" to roots in all threads that |
| 2278 | // are part of compiled frames |
| 2279 | static void compiled_frame_oops_do(OopClosure* f, CodeBlobClosure* cf); |
| 2280 | |
| 2281 | static void convert_hcode_pointers(); |
| 2282 | static void restore_hcode_pointers(); |
| 2283 | |
| 2284 | // Sweeper |
| 2285 | static void nmethods_do(CodeBlobClosure* cf); |
| 2286 | |
| 2287 | // RedefineClasses support |
| 2288 | static void metadata_do(MetadataClosure* f); |
| 2289 | static void metadata_handles_do(void f(Metadata*)); |
| 2290 | |
| 2291 | #ifdef ASSERT |
| 2292 | static bool is_vm_complete() { return _vm_complete; } |
| 2293 | #endif // ASSERT |
| 2294 | |
| 2295 | // Verification |
| 2296 | static void verify(); |
| 2297 | static void print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks, bool print_extended_info); |
| 2298 | static void print(bool print_stacks, bool internal_format) { |
| 2299 | // this function is only used by debug.cpp |
| 2300 | print_on(tty, print_stacks, internal_format, false /* no concurrent lock printed */, false /* simple format */); |
| 2301 | } |
| 2302 | static void print_on_error(outputStream* st, Thread* current, char* buf, int buflen); |
| 2303 | static void print_on_error(Thread* this_thread, outputStream* st, Thread* current, char* buf, |
| 2304 | int buflen, bool* found_current); |
| 2305 | static void print_threads_compiling(outputStream* st, char* buf, int buflen, bool short_form = false); |
| 2306 | |
| 2307 | // Get Java threads that are waiting to enter a monitor. |
| 2308 | static GrowableArray<JavaThread*>* get_pending_threads(ThreadsList * t_list, |
| 2309 | int count, address monitor); |
| 2310 | |
| 2311 | // Get owning Java thread from the monitor's owner field. |
| 2312 | static JavaThread *owning_thread_from_monitor_owner(ThreadsList * t_list, |
| 2313 | address owner); |
| 2314 | |
| 2315 | // Number of threads on the active threads list |
| 2316 | static int number_of_threads() { return _number_of_threads; } |
| 2317 | // Number of non-daemon threads on the active threads list |
| 2318 | static int number_of_non_daemon_threads() { return _number_of_non_daemon_threads; } |
| 2319 | |
| 2320 | // Deoptimizes all frames tied to marked nmethods |
| 2321 | static void deoptimized_wrt_marked_nmethods(); |
| 2322 | |
| 2323 | struct Test; // For private gtest access. |
| 2324 | }; |
| 2325 | |
| 2326 | |
| 2327 | // Thread iterator |
| 2328 | class ThreadClosure: public StackObj { |
| 2329 | public: |
| 2330 | virtual void do_thread(Thread* thread) = 0; |
| 2331 | }; |
| 2332 | |
| 2333 | class SignalHandlerMark: public StackObj { |
| 2334 | private: |
| 2335 | Thread* _thread; |
| 2336 | public: |
| 2337 | SignalHandlerMark(Thread* t) { |
| 2338 | _thread = t; |
| 2339 | if (_thread) _thread->enter_signal_handler(); |
| 2340 | } |
| 2341 | ~SignalHandlerMark() { |
| 2342 | if (_thread) _thread->leave_signal_handler(); |
| 2343 | _thread = NULL; |
| 2344 | } |
| 2345 | }; |
| 2346 | |
| 2347 | |
| 2348 | #endif // SHARE_RUNTIME_THREAD_HPP |
| 2349 | |