1/*
2 * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_RUNTIME_THREAD_INLINE_HPP
26#define SHARE_RUNTIME_THREAD_INLINE_HPP
27
28#include "runtime/atomic.hpp"
29#include "runtime/globals.hpp"
30#include "runtime/orderAccess.hpp"
31#include "runtime/os.inline.hpp"
32#include "runtime/safepoint.hpp"
33#include "runtime/thread.hpp"
34
35inline void Thread::set_suspend_flag(SuspendFlags f) {
36 uint32_t flags;
37 do {
38 flags = _suspend_flags;
39 }
40 while (Atomic::cmpxchg((flags | f), &_suspend_flags, flags) != flags);
41}
42inline void Thread::clear_suspend_flag(SuspendFlags f) {
43 uint32_t flags;
44 do {
45 flags = _suspend_flags;
46 }
47 while (Atomic::cmpxchg((flags & ~f), &_suspend_flags, flags) != flags);
48}
49
50inline void Thread::set_has_async_exception() {
51 set_suspend_flag(_has_async_exception);
52}
53inline void Thread::clear_has_async_exception() {
54 clear_suspend_flag(_has_async_exception);
55}
56inline void Thread::set_critical_native_unlock() {
57 set_suspend_flag(_critical_native_unlock);
58}
59inline void Thread::clear_critical_native_unlock() {
60 clear_suspend_flag(_critical_native_unlock);
61}
62inline void Thread::set_trace_flag() {
63 set_suspend_flag(_trace_flag);
64}
65inline void Thread::clear_trace_flag() {
66 clear_suspend_flag(_trace_flag);
67}
68
69inline jlong Thread::cooked_allocated_bytes() {
70 jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
71 if (UseTLAB) {
72 size_t used_bytes = tlab().used_bytes();
73 if (used_bytes <= ThreadLocalAllocBuffer::max_size_in_bytes()) {
74 // Comparing used_bytes with the maximum allowed size will ensure
75 // that we don't add the used bytes from a semi-initialized TLAB
76 // ending up with incorrect values. There is still a race between
77 // incrementing _allocated_bytes and clearing the TLAB, that might
78 // cause double counting in rare cases.
79 return allocated_bytes + used_bytes;
80 }
81 }
82 return allocated_bytes;
83}
84
85inline ThreadsList* Thread::cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value) {
86 return (ThreadsList*)Atomic::cmpxchg(exchange_value, &_threads_hazard_ptr, compare_value);
87}
88
89inline ThreadsList* Thread::get_threads_hazard_ptr() {
90 return (ThreadsList*)OrderAccess::load_acquire(&_threads_hazard_ptr);
91}
92
93inline void Thread::set_threads_hazard_ptr(ThreadsList* new_list) {
94 OrderAccess::release_store_fence(&_threads_hazard_ptr, new_list);
95}
96
97inline void JavaThread::set_ext_suspended() {
98 set_suspend_flag (_ext_suspended);
99}
100inline void JavaThread::clear_ext_suspended() {
101 clear_suspend_flag(_ext_suspended);
102}
103
104inline void JavaThread::set_external_suspend() {
105 set_suspend_flag(_external_suspend);
106}
107inline void JavaThread::clear_external_suspend() {
108 clear_suspend_flag(_external_suspend);
109}
110
111inline void JavaThread::set_pending_async_exception(oop e) {
112 _pending_async_exception = e;
113 _special_runtime_exit_condition = _async_exception;
114 set_has_async_exception();
115}
116
117inline JavaThreadState JavaThread::thread_state() const {
118#if defined(PPC64) || defined (AARCH64)
119 // Use membars when accessing volatile _thread_state. See
120 // Threads::create_vm() for size checks.
121 return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state);
122#else
123 return _thread_state;
124#endif
125}
126
127inline void JavaThread::set_thread_state(JavaThreadState s) {
128#if defined(PPC64) || defined (AARCH64)
129 // Use membars when accessing volatile _thread_state. See
130 // Threads::create_vm() for size checks.
131 OrderAccess::release_store((volatile jint*)&_thread_state, (jint)s);
132#else
133 _thread_state = s;
134#endif
135}
136
137inline void JavaThread::set_thread_state_fence(JavaThreadState s) {
138 set_thread_state(s);
139 OrderAccess::fence();
140}
141
142ThreadSafepointState* JavaThread::safepoint_state() const {
143 return _safepoint_state;
144}
145
146void JavaThread::set_safepoint_state(ThreadSafepointState *state) {
147 _safepoint_state = state;
148}
149
150bool JavaThread::is_at_poll_safepoint() {
151 return _safepoint_state->is_at_poll_safepoint();
152}
153
154void JavaThread::enter_critical() {
155 assert(Thread::current() == this ||
156 (Thread::current()->is_VM_thread() &&
157 SafepointSynchronize::is_synchronizing()),
158 "this must be current thread or synchronizing");
159 _jni_active_critical++;
160}
161
162inline void JavaThread::set_done_attaching_via_jni() {
163 _jni_attach_state = _attached_via_jni;
164 OrderAccess::fence();
165}
166
167inline bool JavaThread::stack_guard_zone_unused() {
168 return _stack_guard_state == stack_guard_unused;
169}
170
171inline bool JavaThread::stack_yellow_reserved_zone_disabled() {
172 return _stack_guard_state == stack_guard_yellow_reserved_disabled;
173}
174
175inline bool JavaThread::stack_reserved_zone_disabled() {
176 return _stack_guard_state == stack_guard_reserved_disabled;
177}
178
179inline size_t JavaThread::stack_available(address cur_sp) {
180 // This code assumes java stacks grow down
181 address low_addr; // Limit on the address for deepest stack depth
182 if (_stack_guard_state == stack_guard_unused) {
183 low_addr = stack_end();
184 } else {
185 low_addr = stack_reserved_zone_base();
186 }
187 return cur_sp > low_addr ? cur_sp - low_addr : 0;
188}
189
190inline bool JavaThread::stack_guards_enabled() {
191#ifdef ASSERT
192 if (os::uses_stack_guard_pages() &&
193 !(DisablePrimordialThreadGuardPages && os::is_primordial_thread())) {
194 assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
195 }
196#endif
197 return _stack_guard_state == stack_guard_enabled;
198}
199
200// The release make sure this store is done after storing the handshake
201// operation or global state
202inline void JavaThread::set_polling_page_release(void* poll_value) {
203 OrderAccess::release_store(polling_page_addr(), poll_value);
204}
205
206// Caller is responsible for using a memory barrier if needed.
207inline void JavaThread::set_polling_page(void* poll_value) {
208 *polling_page_addr() = poll_value;
209}
210
211// The aqcquire make sure reading of polling page is done before
212// the reading the handshake operation or the global state
213inline volatile void* JavaThread::get_polling_page() {
214 return OrderAccess::load_acquire(polling_page_addr());
215}
216
217inline bool JavaThread::is_exiting() const {
218 // Use load-acquire so that setting of _terminated by
219 // JavaThread::exit() is seen more quickly.
220 TerminatedTypes l_terminated = (TerminatedTypes)
221 OrderAccess::load_acquire((volatile jint *) &_terminated);
222 return l_terminated == _thread_exiting || check_is_terminated(l_terminated);
223}
224
225inline bool JavaThread::is_terminated() const {
226 // Use load-acquire so that setting of _terminated by
227 // JavaThread::exit() is seen more quickly.
228 TerminatedTypes l_terminated = (TerminatedTypes)
229 OrderAccess::load_acquire((volatile jint *) &_terminated);
230 return check_is_terminated(l_terminated);
231}
232
233inline void JavaThread::set_terminated(TerminatedTypes t) {
234 // use release-store so the setting of _terminated is seen more quickly
235 OrderAccess::release_store((volatile jint *) &_terminated, (jint) t);
236}
237
238// special for Threads::remove() which is static:
239inline void JavaThread::set_terminated_value() {
240 // use release-store so the setting of _terminated is seen more quickly
241 OrderAccess::release_store((volatile jint *) &_terminated, (jint) _thread_terminated);
242}
243
244// Allow tracking of class initialization monitor use
245inline void JavaThread::set_class_to_be_initialized(InstanceKlass* k) {
246 assert((k == NULL && _class_to_be_initialized != NULL) ||
247 (k != NULL && _class_to_be_initialized == NULL), "incorrect usage");
248 assert(this == Thread::current(), "Only the current thread can set this field");
249 _class_to_be_initialized = k;
250}
251
252inline InstanceKlass* JavaThread::class_to_be_initialized() const {
253 return _class_to_be_initialized;
254}
255
256#endif // SHARE_RUNTIME_THREAD_INLINE_HPP
257