| 1 | // Copyright (c) 2015, the Dart project authors.  Please see the AUTHORS file | 
|---|
| 2 | // for details. All rights reserved. Use of this source code is governed by a | 
|---|
| 3 | // BSD-style license that can be found in the LICENSE file. | 
|---|
| 4 |  | 
|---|
| 5 | #ifndef RUNTIME_VM_THREAD_H_ | 
|---|
| 6 | #define RUNTIME_VM_THREAD_H_ | 
|---|
| 7 |  | 
|---|
| 8 | #if defined(SHOULD_NOT_INCLUDE_RUNTIME) | 
|---|
| 9 | #error "Should not include runtime" | 
|---|
| 10 | #endif | 
|---|
| 11 |  | 
|---|
| 12 | #include "include/dart_api.h" | 
|---|
| 13 | #include "platform/assert.h" | 
|---|
| 14 | #include "platform/atomic.h" | 
|---|
| 15 | #include "platform/safe_stack.h" | 
|---|
| 16 | #include "vm/bitfield.h" | 
|---|
| 17 | #include "vm/compiler/runtime_api.h" | 
|---|
| 18 | #include "vm/constants.h" | 
|---|
| 19 | #include "vm/globals.h" | 
|---|
| 20 | #include "vm/handles.h" | 
|---|
| 21 | #include "vm/heap/pointer_block.h" | 
|---|
| 22 | #include "vm/os_thread.h" | 
|---|
| 23 | #include "vm/random.h" | 
|---|
| 24 | #include "vm/runtime_entry_list.h" | 
|---|
| 25 | #include "vm/thread_stack_resource.h" | 
|---|
| 26 | #include "vm/thread_state.h" | 
|---|
| 27 | namespace dart { | 
|---|
| 28 |  | 
|---|
| 29 | class AbstractType; | 
|---|
| 30 | class ApiLocalScope; | 
|---|
| 31 | class Array; | 
|---|
| 32 | class CompilerState; | 
|---|
| 33 | class Class; | 
|---|
| 34 | class Code; | 
|---|
| 35 | class Bytecode; | 
|---|
| 36 | class Error; | 
|---|
| 37 | class ExceptionHandlers; | 
|---|
| 38 | class Field; | 
|---|
| 39 | class FieldTable; | 
|---|
| 40 | class Function; | 
|---|
| 41 | class GrowableObjectArray; | 
|---|
| 42 | class HandleScope; | 
|---|
| 43 | class Heap; | 
|---|
| 44 | class HierarchyInfo; | 
|---|
| 45 | class Instance; | 
|---|
| 46 | class Interpreter; | 
|---|
| 47 | class Isolate; | 
|---|
| 48 | class IsolateGroup; | 
|---|
| 49 | class Library; | 
|---|
| 50 | class Object; | 
|---|
| 51 | class OSThread; | 
|---|
| 52 | class JSONObject; | 
|---|
| 53 | class PcDescriptors; | 
|---|
| 54 | class RuntimeEntry; | 
|---|
| 55 | class Smi; | 
|---|
| 56 | class StackResource; | 
|---|
| 57 | class StackTrace; | 
|---|
| 58 | class String; | 
|---|
| 59 | class TimelineStream; | 
|---|
| 60 | class TypeArguments; | 
|---|
| 61 | class TypeParameter; | 
|---|
| 62 | class TypeUsageInfo; | 
|---|
| 63 | class Zone; | 
|---|
| 64 |  | 
|---|
| 65 | namespace compiler { | 
|---|
| 66 | namespace target { | 
|---|
| 67 | class Thread; | 
|---|
| 68 | }  // namespace target | 
|---|
| 69 | }  // namespace compiler | 
|---|
| 70 |  | 
|---|
| 71 | #define REUSABLE_HANDLE_LIST(V)                                                \ | 
|---|
| 72 | V(AbstractType)                                                              \ | 
|---|
| 73 | V(Array)                                                                     \ | 
|---|
| 74 | V(Class)                                                                     \ | 
|---|
| 75 | V(Code)                                                                      \ | 
|---|
| 76 | V(Bytecode)                                                                  \ | 
|---|
| 77 | V(Error)                                                                     \ | 
|---|
| 78 | V(ExceptionHandlers)                                                         \ | 
|---|
| 79 | V(Field)                                                                     \ | 
|---|
| 80 | V(Function)                                                                  \ | 
|---|
| 81 | V(GrowableObjectArray)                                                       \ | 
|---|
| 82 | V(Instance)                                                                  \ | 
|---|
| 83 | V(Library)                                                                   \ | 
|---|
| 84 | V(Object)                                                                    \ | 
|---|
| 85 | V(PcDescriptors)                                                             \ | 
|---|
| 86 | V(Smi)                                                                       \ | 
|---|
| 87 | V(String)                                                                    \ | 
|---|
| 88 | V(TypeArguments)                                                             \ | 
|---|
| 89 | V(TypeParameter) | 
|---|
| 90 |  | 
|---|
| 91 | #define CACHED_VM_STUBS_LIST(V)                                                \ | 
|---|
| 92 | V(CodePtr, write_barrier_code_, StubCode::WriteBarrier().raw(), nullptr)     \ | 
|---|
| 93 | V(CodePtr, array_write_barrier_code_, StubCode::ArrayWriteBarrier().raw(),   \ | 
|---|
| 94 | nullptr)                                                                   \ | 
|---|
| 95 | V(CodePtr, fix_callers_target_code_, StubCode::FixCallersTarget().raw(),     \ | 
|---|
| 96 | nullptr)                                                                   \ | 
|---|
| 97 | V(CodePtr, fix_allocation_stub_code_,                                        \ | 
|---|
| 98 | StubCode::FixAllocationStubTarget().raw(), nullptr)                        \ | 
|---|
| 99 | V(CodePtr, invoke_dart_code_stub_, StubCode::InvokeDartCode().raw(),         \ | 
|---|
| 100 | nullptr)                                                                   \ | 
|---|
| 101 | V(CodePtr, invoke_dart_code_from_bytecode_stub_,                             \ | 
|---|
| 102 | StubCode::InvokeDartCodeFromBytecode().raw(), nullptr)                     \ | 
|---|
| 103 | V(CodePtr, call_to_runtime_stub_, StubCode::CallToRuntime().raw(), nullptr)  \ | 
|---|
| 104 | V(CodePtr, null_error_shared_without_fpu_regs_stub_,                         \ | 
|---|
| 105 | StubCode::NullErrorSharedWithoutFPURegs().raw(), nullptr)                  \ | 
|---|
| 106 | V(CodePtr, null_error_shared_with_fpu_regs_stub_,                            \ | 
|---|
| 107 | StubCode::NullErrorSharedWithFPURegs().raw(), nullptr)                     \ | 
|---|
| 108 | V(CodePtr, null_arg_error_shared_without_fpu_regs_stub_,                     \ | 
|---|
| 109 | StubCode::NullArgErrorSharedWithoutFPURegs().raw(), nullptr)               \ | 
|---|
| 110 | V(CodePtr, null_arg_error_shared_with_fpu_regs_stub_,                        \ | 
|---|
| 111 | StubCode::NullArgErrorSharedWithFPURegs().raw(), nullptr)                  \ | 
|---|
| 112 | V(CodePtr, null_cast_error_shared_without_fpu_regs_stub_,                    \ | 
|---|
| 113 | StubCode::NullCastErrorSharedWithoutFPURegs().raw(), nullptr)              \ | 
|---|
| 114 | V(CodePtr, null_cast_error_shared_with_fpu_regs_stub_,                       \ | 
|---|
| 115 | StubCode::NullCastErrorSharedWithFPURegs().raw(), nullptr)                 \ | 
|---|
| 116 | V(CodePtr, range_error_shared_without_fpu_regs_stub_,                        \ | 
|---|
| 117 | StubCode::RangeErrorSharedWithoutFPURegs().raw(), nullptr)                 \ | 
|---|
| 118 | V(CodePtr, range_error_shared_with_fpu_regs_stub_,                           \ | 
|---|
| 119 | StubCode::RangeErrorSharedWithFPURegs().raw(), nullptr)                    \ | 
|---|
| 120 | V(CodePtr, allocate_mint_with_fpu_regs_stub_,                                \ | 
|---|
| 121 | StubCode::AllocateMintSharedWithFPURegs().raw(), nullptr)                  \ | 
|---|
| 122 | V(CodePtr, allocate_mint_without_fpu_regs_stub_,                             \ | 
|---|
| 123 | StubCode::AllocateMintSharedWithoutFPURegs().raw(), nullptr)               \ | 
|---|
| 124 | V(CodePtr, allocate_object_stub_, StubCode::AllocateObject().raw(), nullptr) \ | 
|---|
| 125 | V(CodePtr, allocate_object_parameterized_stub_,                              \ | 
|---|
| 126 | StubCode::AllocateObjectParameterized().raw(), nullptr)                    \ | 
|---|
| 127 | V(CodePtr, allocate_object_slow_stub_, StubCode::AllocateObjectSlow().raw(), \ | 
|---|
| 128 | nullptr)                                                                   \ | 
|---|
| 129 | V(CodePtr, stack_overflow_shared_without_fpu_regs_stub_,                     \ | 
|---|
| 130 | StubCode::StackOverflowSharedWithoutFPURegs().raw(), nullptr)              \ | 
|---|
| 131 | V(CodePtr, stack_overflow_shared_with_fpu_regs_stub_,                        \ | 
|---|
| 132 | StubCode::StackOverflowSharedWithFPURegs().raw(), nullptr)                 \ | 
|---|
| 133 | V(CodePtr, switchable_call_miss_stub_, StubCode::SwitchableCallMiss().raw(), \ | 
|---|
| 134 | nullptr)                                                                   \ | 
|---|
| 135 | V(CodePtr, throw_stub_, StubCode::Throw().raw(), nullptr)                    \ | 
|---|
| 136 | V(CodePtr, re_throw_stub_, StubCode::Throw().raw(), nullptr)                 \ | 
|---|
| 137 | V(CodePtr, assert_boolean_stub_, StubCode::AssertBoolean().raw(), nullptr)   \ | 
|---|
| 138 | V(CodePtr, optimize_stub_, StubCode::OptimizeFunction().raw(), nullptr)      \ | 
|---|
| 139 | V(CodePtr, deoptimize_stub_, StubCode::Deoptimize().raw(), nullptr)          \ | 
|---|
| 140 | V(CodePtr, lazy_deopt_from_return_stub_,                                     \ | 
|---|
| 141 | StubCode::DeoptimizeLazyFromReturn().raw(), nullptr)                       \ | 
|---|
| 142 | V(CodePtr, lazy_deopt_from_throw_stub_,                                      \ | 
|---|
| 143 | StubCode::DeoptimizeLazyFromThrow().raw(), nullptr)                        \ | 
|---|
| 144 | V(CodePtr, slow_type_test_stub_, StubCode::SlowTypeTest().raw(), nullptr)    \ | 
|---|
| 145 | V(CodePtr, lazy_specialize_type_test_stub_,                                  \ | 
|---|
| 146 | StubCode::LazySpecializeTypeTest().raw(), nullptr)                         \ | 
|---|
| 147 | V(CodePtr, enter_safepoint_stub_, StubCode::EnterSafepoint().raw(), nullptr) \ | 
|---|
| 148 | V(CodePtr, exit_safepoint_stub_, StubCode::ExitSafepoint().raw(), nullptr)   \ | 
|---|
| 149 | V(CodePtr, call_native_through_safepoint_stub_,                              \ | 
|---|
| 150 | StubCode::CallNativeThroughSafepoint().raw(), nullptr) | 
|---|
| 151 |  | 
|---|
| 152 | #define CACHED_NON_VM_STUB_LIST(V)                                             \ | 
|---|
| 153 | V(ObjectPtr, object_null_, Object::null(), nullptr)                          \ | 
|---|
| 154 | V(BoolPtr, bool_true_, Object::bool_true().raw(), nullptr)                   \ | 
|---|
| 155 | V(BoolPtr, bool_false_, Object::bool_false().raw(), nullptr) | 
|---|
| 156 |  | 
|---|
| 157 | // List of VM-global objects/addresses cached in each Thread object. | 
|---|
| 158 | // Important: constant false must immediately follow constant true. | 
|---|
| 159 | #define CACHED_VM_OBJECTS_LIST(V)                                              \ | 
|---|
| 160 | CACHED_NON_VM_STUB_LIST(V)                                                   \ | 
|---|
| 161 | CACHED_VM_STUBS_LIST(V) | 
|---|
| 162 |  | 
|---|
| 163 | // This assertion marks places which assume that boolean false immediate | 
|---|
| 164 | // follows bool true in the CACHED_VM_OBJECTS_LIST | 
|---|
| 165 | #define ASSERT_BOOL_FALSE_FOLLOWS_BOOL_TRUE()                                  \ | 
|---|
| 166 | ASSERT((Thread::bool_true_offset() + kWordSize) ==                           \ | 
|---|
| 167 | Thread::bool_false_offset()); | 
|---|
| 168 |  | 
|---|
| 169 | #define CACHED_VM_STUBS_ADDRESSES_LIST(V)                                      \ | 
|---|
| 170 | V(uword, write_barrier_entry_point_, StubCode::WriteBarrier().EntryPoint(),  \ | 
|---|
| 171 | 0)                                                                         \ | 
|---|
| 172 | V(uword, array_write_barrier_entry_point_,                                   \ | 
|---|
| 173 | StubCode::ArrayWriteBarrier().EntryPoint(), 0)                             \ | 
|---|
| 174 | V(uword, call_to_runtime_entry_point_,                                       \ | 
|---|
| 175 | StubCode::CallToRuntime().EntryPoint(), 0)                                 \ | 
|---|
| 176 | V(uword, allocate_mint_with_fpu_regs_entry_point_,                           \ | 
|---|
| 177 | StubCode::AllocateMintSharedWithFPURegs().EntryPoint(), 0)                 \ | 
|---|
| 178 | V(uword, allocate_mint_without_fpu_regs_entry_point_,                        \ | 
|---|
| 179 | StubCode::AllocateMintSharedWithoutFPURegs().EntryPoint(), 0)              \ | 
|---|
| 180 | V(uword, allocate_object_entry_point_,                                       \ | 
|---|
| 181 | StubCode::AllocateObject().EntryPoint(), 0)                                \ | 
|---|
| 182 | V(uword, allocate_object_parameterized_entry_point_,                         \ | 
|---|
| 183 | StubCode::AllocateObjectParameterized().EntryPoint(), 0)                   \ | 
|---|
| 184 | V(uword, allocate_object_slow_entry_point_,                                  \ | 
|---|
| 185 | StubCode::AllocateObjectSlow().EntryPoint(), 0)                            \ | 
|---|
| 186 | V(uword, stack_overflow_shared_without_fpu_regs_entry_point_,                \ | 
|---|
| 187 | StubCode::StackOverflowSharedWithoutFPURegs().EntryPoint(), 0)             \ | 
|---|
| 188 | V(uword, stack_overflow_shared_with_fpu_regs_entry_point_,                   \ | 
|---|
| 189 | StubCode::StackOverflowSharedWithFPURegs().EntryPoint(), 0)                \ | 
|---|
| 190 | V(uword, megamorphic_call_checked_entry_,                                    \ | 
|---|
| 191 | StubCode::MegamorphicCall().EntryPoint(), 0)                               \ | 
|---|
| 192 | V(uword, switchable_call_miss_entry_,                                        \ | 
|---|
| 193 | StubCode::SwitchableCallMiss().EntryPoint(), 0)                            \ | 
|---|
| 194 | V(uword, optimize_entry_, StubCode::OptimizeFunction().EntryPoint(), 0)      \ | 
|---|
| 195 | V(uword, deoptimize_entry_, StubCode::Deoptimize().EntryPoint(), 0)          \ | 
|---|
| 196 | V(uword, call_native_through_safepoint_entry_point_,                         \ | 
|---|
| 197 | StubCode::CallNativeThroughSafepoint().EntryPoint(), 0)                    \ | 
|---|
| 198 | V(uword, slow_type_test_entry_point_, StubCode::SlowTypeTest().EntryPoint(), \ | 
|---|
| 199 | 0) | 
|---|
| 200 |  | 
|---|
| 201 | #define CACHED_ADDRESSES_LIST(V)                                               \ | 
|---|
| 202 | CACHED_VM_STUBS_ADDRESSES_LIST(V)                                            \ | 
|---|
| 203 | V(uword, bootstrap_native_wrapper_entry_point_,                              \ | 
|---|
| 204 | NativeEntry::BootstrapNativeCallWrapperEntry(), 0)                         \ | 
|---|
| 205 | V(uword, no_scope_native_wrapper_entry_point_,                               \ | 
|---|
| 206 | NativeEntry::NoScopeNativeCallWrapperEntry(), 0)                           \ | 
|---|
| 207 | V(uword, auto_scope_native_wrapper_entry_point_,                             \ | 
|---|
| 208 | NativeEntry::AutoScopeNativeCallWrapperEntry(), 0)                         \ | 
|---|
| 209 | V(uword, interpret_call_entry_point_, RuntimeEntry::InterpretCallEntry(), 0) \ | 
|---|
| 210 | V(StringPtr*, predefined_symbols_address_, Symbols::PredefinedAddress(),     \ | 
|---|
| 211 | NULL)                                                                      \ | 
|---|
| 212 | V(uword, double_nan_address_, reinterpret_cast<uword>(&double_nan_constant), \ | 
|---|
| 213 | 0)                                                                         \ | 
|---|
| 214 | V(uword, double_negate_address_,                                             \ | 
|---|
| 215 | reinterpret_cast<uword>(&double_negate_constant), 0)                       \ | 
|---|
| 216 | V(uword, double_abs_address_, reinterpret_cast<uword>(&double_abs_constant), \ | 
|---|
| 217 | 0)                                                                         \ | 
|---|
| 218 | V(uword, float_not_address_, reinterpret_cast<uword>(&float_not_constant),   \ | 
|---|
| 219 | 0)                                                                         \ | 
|---|
| 220 | V(uword, float_negate_address_,                                              \ | 
|---|
| 221 | reinterpret_cast<uword>(&float_negate_constant), 0)                        \ | 
|---|
| 222 | V(uword, float_absolute_address_,                                            \ | 
|---|
| 223 | reinterpret_cast<uword>(&float_absolute_constant), 0)                      \ | 
|---|
| 224 | V(uword, float_zerow_address_,                                               \ | 
|---|
| 225 | reinterpret_cast<uword>(&float_zerow_constant), 0) | 
|---|
| 226 |  | 
|---|
| 227 | #define CACHED_CONSTANTS_LIST(V)                                               \ | 
|---|
| 228 | CACHED_VM_OBJECTS_LIST(V)                                                    \ | 
|---|
| 229 | CACHED_ADDRESSES_LIST(V) | 
|---|
| 230 |  | 
|---|
| 231 | enum class ValidationPolicy { | 
|---|
| 232 | kValidateFrames = 0, | 
|---|
| 233 | kDontValidateFrames = 1, | 
|---|
| 234 | }; | 
|---|
| 235 |  | 
|---|
| 236 | // A VM thread; may be executing Dart code or performing helper tasks like | 
|---|
| 237 | // garbage collection or compilation. The Thread structure associated with | 
|---|
| 238 | // a thread is allocated by EnsureInit before entering an isolate, and destroyed | 
|---|
| 239 | // automatically when the underlying OS thread exits. NOTE: On Windows, CleanUp | 
|---|
| 240 | // must currently be called manually (issue 23474). | 
|---|
| 241 | class Thread : public ThreadState { | 
|---|
| 242 | public: | 
|---|
| 243 | // The kind of task this thread is performing. Sampled by the profiler. | 
|---|
| 244 | enum TaskKind { | 
|---|
| 245 | kUnknownTask = 0x0, | 
|---|
| 246 | kMutatorTask = 0x1, | 
|---|
| 247 | kCompilerTask = 0x2, | 
|---|
| 248 | kMarkerTask = 0x4, | 
|---|
| 249 | kSweeperTask = 0x8, | 
|---|
| 250 | kCompactorTask = 0x10, | 
|---|
| 251 | kScavengerTask = 0x20, | 
|---|
| 252 | }; | 
|---|
| 253 | // Converts a TaskKind to its corresponding C-String name. | 
|---|
| 254 | static const char* TaskKindToCString(TaskKind kind); | 
|---|
| 255 |  | 
|---|
| 256 | ~Thread(); | 
|---|
| 257 |  | 
|---|
| 258 | // The currently executing thread, or NULL if not yet initialized. | 
|---|
| 259 | static Thread* Current() { | 
|---|
| 260 | #if defined(HAS_C11_THREAD_LOCAL) | 
|---|
| 261 | return static_cast<Thread*>(OSThread::CurrentVMThread()); | 
|---|
| 262 | #else | 
|---|
| 263 | BaseThread* thread = OSThread::GetCurrentTLS(); | 
|---|
| 264 | if (thread == NULL || thread->is_os_thread()) { | 
|---|
| 265 | return NULL; | 
|---|
| 266 | } | 
|---|
| 267 | return static_cast<Thread*>(thread); | 
|---|
| 268 | #endif | 
|---|
| 269 | } | 
|---|
| 270 |  | 
|---|
| 271 | // Makes the current thread enter 'isolate'. | 
|---|
| 272 | static bool EnterIsolate(Isolate* isolate); | 
|---|
| 273 | // Makes the current thread exit its isolate. | 
|---|
| 274 | static void ExitIsolate(); | 
|---|
| 275 |  | 
|---|
| 276 | // A VM thread other than the main mutator thread can enter an isolate as a | 
|---|
| 277 | // "helper" to gain limited concurrent access to the isolate. One example is | 
|---|
| 278 | // SweeperTask (which uses the class table, which is copy-on-write). | 
|---|
| 279 | // TODO(koda): Properly synchronize heap access to expand allowed operations. | 
|---|
| 280 | static bool EnterIsolateAsHelper(Isolate* isolate, | 
|---|
| 281 | TaskKind kind, | 
|---|
| 282 | bool bypass_safepoint = false); | 
|---|
| 283 | static void ExitIsolateAsHelper(bool bypass_safepoint = false); | 
|---|
| 284 |  | 
|---|
| 285 | static bool EnterIsolateGroupAsHelper(IsolateGroup* isolate_group, | 
|---|
| 286 | TaskKind kind, | 
|---|
| 287 | bool bypass_safepoint); | 
|---|
| 288 | static void ExitIsolateGroupAsHelper(bool bypass_safepoint); | 
|---|
| 289 |  | 
|---|
| 290 | // Empties the store buffer block into the isolate. | 
|---|
| 291 | void ReleaseStoreBuffer(); | 
|---|
| 292 | void (); | 
|---|
| 293 | void ReleaseMarkingStack(); | 
|---|
| 294 |  | 
|---|
| 295 | void SetStackLimit(uword value); | 
|---|
| 296 | void ClearStackLimit(); | 
|---|
| 297 |  | 
|---|
| 298 | // Access to the current stack limit for generated code. Either the true OS | 
|---|
| 299 | // thread's stack limit minus some headroom, or a special value to trigger | 
|---|
| 300 | // interrupts. | 
|---|
| 301 | uword stack_limit_address() const { | 
|---|
| 302 | return reinterpret_cast<uword>(&stack_limit_); | 
|---|
| 303 | } | 
|---|
| 304 | static intptr_t stack_limit_offset() { | 
|---|
| 305 | return OFFSET_OF(Thread, stack_limit_); | 
|---|
| 306 | } | 
|---|
| 307 |  | 
|---|
| 308 | // The true stack limit for this OS thread. | 
|---|
| 309 | static intptr_t saved_stack_limit_offset() { | 
|---|
| 310 | return OFFSET_OF(Thread, saved_stack_limit_); | 
|---|
| 311 | } | 
|---|
| 312 | uword saved_stack_limit() const { return saved_stack_limit_; } | 
|---|
| 313 |  | 
|---|
| 314 | #if defined(USING_SAFE_STACK) | 
|---|
| 315 | uword saved_safestack_limit() const { return saved_safestack_limit_; } | 
|---|
| 316 | void set_saved_safestack_limit(uword limit) { | 
|---|
| 317 | saved_safestack_limit_ = limit; | 
|---|
| 318 | } | 
|---|
| 319 | #endif | 
|---|
| 320 | static uword saved_shadow_call_stack_offset() { | 
|---|
| 321 | return OFFSET_OF(Thread, saved_shadow_call_stack_); | 
|---|
| 322 | } | 
|---|
| 323 |  | 
|---|
| 324 | // Stack overflow flags | 
|---|
| 325 | enum { | 
|---|
| 326 | kOsrRequest = 0x1,  // Current stack overflow caused by OSR request. | 
|---|
| 327 | }; | 
|---|
| 328 |  | 
|---|
| 329 | uword write_barrier_mask() const { return write_barrier_mask_; } | 
|---|
| 330 |  | 
|---|
| 331 | static intptr_t write_barrier_mask_offset() { | 
|---|
| 332 | return OFFSET_OF(Thread, write_barrier_mask_); | 
|---|
| 333 | } | 
|---|
| 334 | static intptr_t stack_overflow_flags_offset() { | 
|---|
| 335 | return OFFSET_OF(Thread, stack_overflow_flags_); | 
|---|
| 336 | } | 
|---|
| 337 |  | 
|---|
| 338 | int32_t IncrementAndGetStackOverflowCount() { | 
|---|
| 339 | return ++stack_overflow_count_; | 
|---|
| 340 | } | 
|---|
| 341 |  | 
|---|
| 342 | static uword stack_overflow_shared_stub_entry_point_offset(bool fpu_regs) { | 
|---|
| 343 | return fpu_regs | 
|---|
| 344 | ? stack_overflow_shared_with_fpu_regs_entry_point_offset() | 
|---|
| 345 | : stack_overflow_shared_without_fpu_regs_entry_point_offset(); | 
|---|
| 346 | } | 
|---|
| 347 |  | 
|---|
| 348 | static intptr_t safepoint_state_offset() { | 
|---|
| 349 | return OFFSET_OF(Thread, safepoint_state_); | 
|---|
| 350 | } | 
|---|
| 351 |  | 
|---|
| 352 | static intptr_t callback_code_offset() { | 
|---|
| 353 | return OFFSET_OF(Thread, ffi_callback_code_); | 
|---|
| 354 | } | 
|---|
| 355 |  | 
|---|
| 356 | // Tag state is maintained on transitions. | 
|---|
| 357 | enum { | 
|---|
| 358 | // Always true in generated state. | 
|---|
| 359 | kDidNotExit = 0, | 
|---|
| 360 | // The VM did exit the generated state through FFI. | 
|---|
| 361 | // This can be true in both native and VM state. | 
|---|
| 362 | kExitThroughFfi = 1, | 
|---|
| 363 | // The VM exited the generated state through FFI. | 
|---|
| 364 | // This can be true in both native and VM state. | 
|---|
| 365 | kExitThroughRuntimeCall = 2, | 
|---|
| 366 | }; | 
|---|
| 367 |  | 
|---|
| 368 | static intptr_t exit_through_ffi_offset() { | 
|---|
| 369 | return OFFSET_OF(Thread, exit_through_ffi_); | 
|---|
| 370 | } | 
|---|
| 371 |  | 
|---|
| 372 | TaskKind task_kind() const { return task_kind_; } | 
|---|
| 373 |  | 
|---|
| 374 | // Retrieves and clears the stack overflow flags.  These are set by | 
|---|
| 375 | // the generated code before the slow path runtime routine for a | 
|---|
| 376 | // stack overflow is called. | 
|---|
| 377 | uword GetAndClearStackOverflowFlags(); | 
|---|
| 378 |  | 
|---|
| 379 | // Interrupt bits. | 
|---|
| 380 | enum { | 
|---|
| 381 | kVMInterrupt = 0x1,  // Internal VM checks: safepoints, store buffers, etc. | 
|---|
| 382 | kMessageInterrupt = 0x2,  // An interrupt to process an out of band message. | 
|---|
| 383 |  | 
|---|
| 384 | kInterruptsMask = (kVMInterrupt | kMessageInterrupt), | 
|---|
| 385 | }; | 
|---|
| 386 |  | 
|---|
| 387 | void ScheduleInterrupts(uword interrupt_bits); | 
|---|
| 388 | void ScheduleInterruptsLocked(uword interrupt_bits); | 
|---|
| 389 | ErrorPtr HandleInterrupts(); | 
|---|
| 390 | uword GetAndClearInterrupts(); | 
|---|
| 391 | bool HasScheduledInterrupts() const { | 
|---|
| 392 | return (stack_limit_ & kInterruptsMask) != 0; | 
|---|
| 393 | } | 
|---|
| 394 |  | 
|---|
| 395 | // Monitor corresponding to this thread. | 
|---|
| 396 | Monitor* thread_lock() const { return &thread_lock_; } | 
|---|
| 397 |  | 
|---|
| 398 | // The reusable api local scope for this thread. | 
|---|
| 399 | ApiLocalScope* api_reusable_scope() const { return api_reusable_scope_; } | 
|---|
| 400 | void set_api_reusable_scope(ApiLocalScope* value) { | 
|---|
| 401 | ASSERT(value == NULL || api_reusable_scope_ == NULL); | 
|---|
| 402 | api_reusable_scope_ = value; | 
|---|
| 403 | } | 
|---|
| 404 |  | 
|---|
| 405 | // The api local scope for this thread, this where all local handles | 
|---|
| 406 | // are allocated. | 
|---|
| 407 | ApiLocalScope* api_top_scope() const { return api_top_scope_; } | 
|---|
| 408 | void set_api_top_scope(ApiLocalScope* value) { api_top_scope_ = value; } | 
|---|
| 409 | static intptr_t api_top_scope_offset() { | 
|---|
| 410 | return OFFSET_OF(Thread, api_top_scope_); | 
|---|
| 411 | } | 
|---|
| 412 |  | 
|---|
| 413 | void EnterApiScope(); | 
|---|
| 414 | void ExitApiScope(); | 
|---|
| 415 |  | 
|---|
| 416 | // The isolate that this thread is operating on, or nullptr if none. | 
|---|
| 417 | Isolate* isolate() const { return isolate_; } | 
|---|
| 418 | static intptr_t isolate_offset() { return OFFSET_OF(Thread, isolate_); } | 
|---|
| 419 |  | 
|---|
| 420 | // The isolate group that this thread is operating on, or nullptr if none. | 
|---|
| 421 | IsolateGroup* isolate_group() const { return isolate_group_; } | 
|---|
| 422 |  | 
|---|
| 423 | static intptr_t field_table_values_offset() { | 
|---|
| 424 | return OFFSET_OF(Thread, field_table_values_); | 
|---|
| 425 | } | 
|---|
| 426 |  | 
|---|
| 427 | bool IsMutatorThread() const { return is_mutator_thread_; } | 
|---|
| 428 |  | 
|---|
| 429 | bool CanCollectGarbage() const; | 
|---|
| 430 |  | 
|---|
| 431 | // Offset of Dart TimelineStream object. | 
|---|
| 432 | static intptr_t dart_stream_offset() { | 
|---|
| 433 | return OFFSET_OF(Thread, dart_stream_); | 
|---|
| 434 | } | 
|---|
| 435 |  | 
|---|
| 436 | // Is |this| executing Dart code? | 
|---|
| 437 | bool IsExecutingDartCode() const; | 
|---|
| 438 |  | 
|---|
| 439 | // Has |this| exited Dart code? | 
|---|
| 440 | bool HasExitedDartCode() const; | 
|---|
| 441 |  | 
|---|
| 442 | CompilerState& compiler_state() { | 
|---|
| 443 | ASSERT(compiler_state_ != nullptr); | 
|---|
| 444 | return *compiler_state_; | 
|---|
| 445 | } | 
|---|
| 446 |  | 
|---|
| 447 | HierarchyInfo* hierarchy_info() const { | 
|---|
| 448 | ASSERT(isolate_ != NULL); | 
|---|
| 449 | return hierarchy_info_; | 
|---|
| 450 | } | 
|---|
| 451 |  | 
|---|
| 452 | void set_hierarchy_info(HierarchyInfo* value) { | 
|---|
| 453 | ASSERT(isolate_ != NULL); | 
|---|
| 454 | ASSERT((hierarchy_info_ == NULL && value != NULL) || | 
|---|
| 455 | (hierarchy_info_ != NULL && value == NULL)); | 
|---|
| 456 | hierarchy_info_ = value; | 
|---|
| 457 | } | 
|---|
| 458 |  | 
|---|
| 459 | TypeUsageInfo* type_usage_info() const { | 
|---|
| 460 | ASSERT(isolate_ != NULL); | 
|---|
| 461 | return type_usage_info_; | 
|---|
| 462 | } | 
|---|
| 463 |  | 
|---|
| 464 | void set_type_usage_info(TypeUsageInfo* value) { | 
|---|
| 465 | ASSERT(isolate_ != NULL); | 
|---|
| 466 | ASSERT((type_usage_info_ == NULL && value != NULL) || | 
|---|
| 467 | (type_usage_info_ != NULL && value == NULL)); | 
|---|
| 468 | type_usage_info_ = value; | 
|---|
| 469 | } | 
|---|
| 470 |  | 
|---|
| 471 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
|---|
| 472 | Interpreter* interpreter() const { return interpreter_; } | 
|---|
| 473 | void set_interpreter(Interpreter* value) { interpreter_ = value; } | 
|---|
| 474 | #endif | 
|---|
| 475 |  | 
|---|
| 476 | int32_t no_callback_scope_depth() const { return no_callback_scope_depth_; } | 
|---|
| 477 |  | 
|---|
| 478 | void IncrementNoCallbackScopeDepth() { | 
|---|
| 479 | ASSERT(no_callback_scope_depth_ < INT_MAX); | 
|---|
| 480 | no_callback_scope_depth_ += 1; | 
|---|
| 481 | } | 
|---|
| 482 |  | 
|---|
| 483 | void DecrementNoCallbackScopeDepth() { | 
|---|
| 484 | ASSERT(no_callback_scope_depth_ > 0); | 
|---|
| 485 | no_callback_scope_depth_ -= 1; | 
|---|
| 486 | } | 
|---|
| 487 |  | 
|---|
| 488 | void StoreBufferAddObject(ObjectPtr obj); | 
|---|
| 489 | void StoreBufferAddObjectGC(ObjectPtr obj); | 
|---|
| 490 | #if defined(TESTING) | 
|---|
| 491 | bool StoreBufferContains(ObjectPtr obj) const { | 
|---|
| 492 | return store_buffer_block_->Contains(obj); | 
|---|
| 493 | } | 
|---|
| 494 | #endif | 
|---|
| 495 | void StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy); | 
|---|
| 496 | static intptr_t store_buffer_block_offset() { | 
|---|
| 497 | return OFFSET_OF(Thread, store_buffer_block_); | 
|---|
| 498 | } | 
|---|
| 499 |  | 
|---|
| 500 | bool is_marking() const { return marking_stack_block_ != NULL; } | 
|---|
| 501 | void MarkingStackAddObject(ObjectPtr obj); | 
|---|
| 502 | void DeferredMarkingStackAddObject(ObjectPtr obj); | 
|---|
| 503 | void MarkingStackBlockProcess(); | 
|---|
| 504 | void DeferredMarkingStackBlockProcess(); | 
|---|
| 505 | static intptr_t marking_stack_block_offset() { | 
|---|
| 506 | return OFFSET_OF(Thread, marking_stack_block_); | 
|---|
| 507 | } | 
|---|
| 508 |  | 
|---|
| 509 | uword top_exit_frame_info() const { return top_exit_frame_info_; } | 
|---|
| 510 | void set_top_exit_frame_info(uword top_exit_frame_info) { | 
|---|
| 511 | top_exit_frame_info_ = top_exit_frame_info; | 
|---|
| 512 | } | 
|---|
| 513 | static intptr_t top_exit_frame_info_offset() { | 
|---|
| 514 | return OFFSET_OF(Thread, top_exit_frame_info_); | 
|---|
| 515 | } | 
|---|
| 516 |  | 
|---|
| 517 | // Heap of the isolate that this thread is operating on. | 
|---|
| 518 | Heap* heap() const { return heap_; } | 
|---|
| 519 | static intptr_t heap_offset() { return OFFSET_OF(Thread, heap_); } | 
|---|
| 520 |  | 
|---|
| 521 | uword top() const { return top_; } | 
|---|
| 522 | uword end() const { return end_; } | 
|---|
| 523 | void set_top(uword top) { top_ = top; } | 
|---|
| 524 | void set_end(uword end) { end_ = end; } | 
|---|
| 525 | static intptr_t top_offset() { return OFFSET_OF(Thread, top_); } | 
|---|
| 526 | static intptr_t end_offset() { return OFFSET_OF(Thread, end_); } | 
|---|
| 527 |  | 
|---|
| 528 | int32_t no_safepoint_scope_depth() const { | 
|---|
| 529 | #if defined(DEBUG) | 
|---|
| 530 | return no_safepoint_scope_depth_; | 
|---|
| 531 | #else | 
|---|
| 532 | return 0; | 
|---|
| 533 | #endif | 
|---|
| 534 | } | 
|---|
| 535 |  | 
|---|
| 536 | void IncrementNoSafepointScopeDepth() { | 
|---|
| 537 | #if defined(DEBUG) | 
|---|
| 538 | ASSERT(no_safepoint_scope_depth_ < INT_MAX); | 
|---|
| 539 | no_safepoint_scope_depth_ += 1; | 
|---|
| 540 | #endif | 
|---|
| 541 | } | 
|---|
| 542 |  | 
|---|
| 543 | void DecrementNoSafepointScopeDepth() { | 
|---|
| 544 | #if defined(DEBUG) | 
|---|
| 545 | ASSERT(no_safepoint_scope_depth_ > 0); | 
|---|
| 546 | no_safepoint_scope_depth_ -= 1; | 
|---|
| 547 | #endif | 
|---|
| 548 | } | 
|---|
| 549 |  | 
|---|
| 550 | #define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value) \ | 
|---|
| 551 | static intptr_t member_name##offset() {                                      \ | 
|---|
| 552 | return OFFSET_OF(Thread, member_name);                                     \ | 
|---|
| 553 | } | 
|---|
| 554 | CACHED_CONSTANTS_LIST(DEFINE_OFFSET_METHOD) | 
|---|
| 555 | #undef DEFINE_OFFSET_METHOD | 
|---|
| 556 |  | 
|---|
| 557 | #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) ||                  \ | 
|---|
| 558 | defined(TARGET_ARCH_X64) | 
|---|
| 559 | static intptr_t write_barrier_wrappers_thread_offset(Register reg) { | 
|---|
| 560 | ASSERT((kDartAvailableCpuRegs & (1 << reg)) != 0); | 
|---|
| 561 | intptr_t index = 0; | 
|---|
| 562 | for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) { | 
|---|
| 563 | if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue; | 
|---|
| 564 | if (i == reg) break; | 
|---|
| 565 | ++index; | 
|---|
| 566 | } | 
|---|
| 567 | return OFFSET_OF(Thread, write_barrier_wrappers_entry_points_) + | 
|---|
| 568 | index * sizeof(uword); | 
|---|
| 569 | } | 
|---|
| 570 |  | 
|---|
| 571 | static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg) { | 
|---|
| 572 | intptr_t index = 0; | 
|---|
| 573 | for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) { | 
|---|
| 574 | if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue; | 
|---|
| 575 | if (i == reg) { | 
|---|
| 576 | return index * kStoreBufferWrapperSize; | 
|---|
| 577 | } | 
|---|
| 578 | ++index; | 
|---|
| 579 | } | 
|---|
| 580 | UNREACHABLE(); | 
|---|
| 581 | return 0; | 
|---|
| 582 | } | 
|---|
| 583 | #endif | 
|---|
| 584 |  | 
|---|
| 585 | #define DEFINE_OFFSET_METHOD(name)                                             \ | 
|---|
| 586 | static intptr_t name##_entry_point_offset() {                                \ | 
|---|
| 587 | return OFFSET_OF(Thread, name##_entry_point_);                             \ | 
|---|
| 588 | } | 
|---|
| 589 | RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD) | 
|---|
| 590 | #undef DEFINE_OFFSET_METHOD | 
|---|
| 591 |  | 
|---|
| 592 | #define DEFINE_OFFSET_METHOD(returntype, name, ...)                            \ | 
|---|
| 593 | static intptr_t name##_entry_point_offset() {                                \ | 
|---|
| 594 | return OFFSET_OF(Thread, name##_entry_point_);                             \ | 
|---|
| 595 | } | 
|---|
| 596 | LEAF_RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD) | 
|---|
| 597 | #undef DEFINE_OFFSET_METHOD | 
|---|
| 598 |  | 
|---|
| 599 | ObjectPoolPtr global_object_pool() const { return global_object_pool_; } | 
|---|
| 600 | void set_global_object_pool(ObjectPoolPtr raw_value) { | 
|---|
| 601 | global_object_pool_ = raw_value; | 
|---|
| 602 | } | 
|---|
| 603 |  | 
|---|
| 604 | const uword* dispatch_table_array() const { return dispatch_table_array_; } | 
|---|
| 605 | void set_dispatch_table_array(const uword* array) { | 
|---|
| 606 | dispatch_table_array_ = array; | 
|---|
| 607 | } | 
|---|
| 608 |  | 
|---|
| 609 | static bool CanLoadFromThread(const Object& object); | 
|---|
| 610 | static intptr_t OffsetFromThread(const Object& object); | 
|---|
| 611 | static bool ObjectAtOffset(intptr_t offset, Object* object); | 
|---|
| 612 | static intptr_t OffsetFromThread(const RuntimeEntry* runtime_entry); | 
|---|
| 613 |  | 
|---|
| 614 | #if defined(DEBUG) | 
|---|
| 615 | // For asserts only. Has false positives when running with a simulator or | 
|---|
| 616 | // SafeStack. | 
|---|
| 617 | bool TopErrorHandlerIsSetJump() const; | 
|---|
| 618 | bool TopErrorHandlerIsExitFrame() const; | 
|---|
| 619 | #endif | 
|---|
| 620 |  | 
|---|
| 621 | uword vm_tag() const { return vm_tag_; } | 
|---|
| 622 | void set_vm_tag(uword tag) { vm_tag_ = tag; } | 
|---|
| 623 | static intptr_t vm_tag_offset() { return OFFSET_OF(Thread, vm_tag_); } | 
|---|
| 624 |  | 
|---|
| 625 | int64_t unboxed_int64_runtime_arg() const { | 
|---|
| 626 | return unboxed_int64_runtime_arg_; | 
|---|
| 627 | } | 
|---|
| 628 | void set_unboxed_int64_runtime_arg(int64_t value) { | 
|---|
| 629 | unboxed_int64_runtime_arg_ = value; | 
|---|
| 630 | } | 
|---|
| 631 | static intptr_t unboxed_int64_runtime_arg_offset() { | 
|---|
| 632 | return OFFSET_OF(Thread, unboxed_int64_runtime_arg_); | 
|---|
| 633 | } | 
|---|
| 634 |  | 
|---|
| 635 | GrowableObjectArrayPtr pending_functions(); | 
|---|
| 636 | void clear_pending_functions(); | 
|---|
| 637 |  | 
|---|
| 638 | static intptr_t global_object_pool_offset() { | 
|---|
| 639 | return OFFSET_OF(Thread, global_object_pool_); | 
|---|
| 640 | } | 
|---|
| 641 |  | 
|---|
| 642 | static intptr_t dispatch_table_array_offset() { | 
|---|
| 643 | return OFFSET_OF(Thread, dispatch_table_array_); | 
|---|
| 644 | } | 
|---|
| 645 |  | 
|---|
| 646 | ObjectPtr active_exception() const { return active_exception_; } | 
|---|
| 647 | void set_active_exception(const Object& value); | 
|---|
| 648 | static intptr_t active_exception_offset() { | 
|---|
| 649 | return OFFSET_OF(Thread, active_exception_); | 
|---|
| 650 | } | 
|---|
| 651 |  | 
|---|
| 652 | ObjectPtr active_stacktrace() const { return active_stacktrace_; } | 
|---|
| 653 | void set_active_stacktrace(const Object& value); | 
|---|
| 654 | static intptr_t active_stacktrace_offset() { | 
|---|
| 655 | return OFFSET_OF(Thread, active_stacktrace_); | 
|---|
| 656 | } | 
|---|
| 657 |  | 
|---|
| 658 | uword resume_pc() const { return resume_pc_; } | 
|---|
| 659 | void set_resume_pc(uword value) { resume_pc_ = value; } | 
|---|
| 660 | static uword resume_pc_offset() { return OFFSET_OF(Thread, resume_pc_); } | 
|---|
| 661 |  | 
|---|
| 662 | ErrorPtr sticky_error() const; | 
|---|
| 663 | void set_sticky_error(const Error& value); | 
|---|
| 664 | void ClearStickyError(); | 
|---|
| 665 | DART_WARN_UNUSED_RESULT ErrorPtr StealStickyError(); | 
|---|
| 666 |  | 
|---|
| 667 | StackTracePtr async_stack_trace() const; | 
|---|
| 668 | void set_async_stack_trace(const StackTrace& stack_trace); | 
|---|
| 669 | void set_raw_async_stack_trace(StackTracePtr raw_stack_trace); | 
|---|
| 670 | void clear_async_stack_trace(); | 
|---|
| 671 | static intptr_t async_stack_trace_offset() { | 
|---|
| 672 | return OFFSET_OF(Thread, async_stack_trace_); | 
|---|
| 673 | } | 
|---|
| 674 |  | 
|---|
| 675 | #if defined(DEBUG) | 
|---|
| 676 | #define REUSABLE_HANDLE_SCOPE_ACCESSORS(object)                                \ | 
|---|
| 677 | void set_reusable_##object##_handle_scope_active(bool value) {               \ | 
|---|
| 678 | reusable_##object##_handle_scope_active_ = value;                          \ | 
|---|
| 679 | }                                                                            \ | 
|---|
| 680 | bool reusable_##object##_handle_scope_active() const {                       \ | 
|---|
| 681 | return reusable_##object##_handle_scope_active_;                           \ | 
|---|
| 682 | } | 
|---|
| 683 | REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_ACCESSORS) | 
|---|
| 684 | #undef REUSABLE_HANDLE_SCOPE_ACCESSORS | 
|---|
| 685 |  | 
|---|
| 686 | bool IsAnyReusableHandleScopeActive() const { | 
|---|
| 687 | #define IS_REUSABLE_HANDLE_SCOPE_ACTIVE(object)                                \ | 
|---|
| 688 | if (reusable_##object##_handle_scope_active_) {                              \ | 
|---|
| 689 | return true;                                                               \ | 
|---|
| 690 | } | 
|---|
| 691 | REUSABLE_HANDLE_LIST(IS_REUSABLE_HANDLE_SCOPE_ACTIVE) | 
|---|
| 692 | return false; | 
|---|
| 693 | #undef IS_REUSABLE_HANDLE_SCOPE_ACTIVE | 
|---|
| 694 | } | 
|---|
| 695 | #endif  // defined(DEBUG) | 
|---|
| 696 |  | 
|---|
| 697 | void ClearReusableHandles(); | 
|---|
| 698 |  | 
|---|
| 699 | #define REUSABLE_HANDLE(object)                                                \ | 
|---|
| 700 | object& object##Handle() const { return *object##_handle_; } | 
|---|
| 701 | REUSABLE_HANDLE_LIST(REUSABLE_HANDLE) | 
|---|
| 702 | #undef REUSABLE_HANDLE | 
|---|
| 703 |  | 
|---|
| 704 | /* | 
|---|
| 705 | * Fields used to support safepointing a thread. | 
|---|
| 706 | * | 
|---|
| 707 | * - Bit 0 of the safepoint_state_ field is used to indicate if the thread is | 
|---|
| 708 | *   already at a safepoint, | 
|---|
| 709 | * - Bit 1 of the safepoint_state_ field is used to indicate if a safepoint | 
|---|
| 710 | *   operation is requested for this thread. | 
|---|
| 711 | * - Bit 2 of the safepoint_state_ field is used to indicate that the thread | 
|---|
| 712 | *   is blocked for the safepoint operation to complete. | 
|---|
| 713 | * | 
|---|
| 714 | * The safepoint execution state (described above) for a thread is stored in | 
|---|
| 715 | * in the execution_state_ field. | 
|---|
| 716 | * Potential execution states a thread could be in: | 
|---|
| 717 | *   kThreadInGenerated - The thread is running jitted dart/stub code. | 
|---|
| 718 | *   kThreadInVM - The thread is running VM code. | 
|---|
| 719 | *   kThreadInNative - The thread is running native code. | 
|---|
| 720 | *   kThreadInBlockedState - The thread is blocked waiting for a resource. | 
|---|
| 721 | */ | 
|---|
| 722 | static bool IsAtSafepoint(uword state) { | 
|---|
| 723 | return AtSafepointField::decode(state); | 
|---|
| 724 | } | 
|---|
| 725 | bool IsAtSafepoint() const { | 
|---|
| 726 | return AtSafepointField::decode(safepoint_state_); | 
|---|
| 727 | } | 
|---|
| 728 | static uword SetAtSafepoint(bool value, uword state) { | 
|---|
| 729 | return AtSafepointField::update(value, state); | 
|---|
| 730 | } | 
|---|
| 731 | void SetAtSafepoint(bool value) { | 
|---|
| 732 | ASSERT(thread_lock()->IsOwnedByCurrentThread()); | 
|---|
| 733 | safepoint_state_ = AtSafepointField::update(value, safepoint_state_); | 
|---|
| 734 | } | 
|---|
| 735 | bool IsSafepointRequested() const { | 
|---|
| 736 | return SafepointRequestedField::decode(safepoint_state_); | 
|---|
| 737 | } | 
|---|
| 738 | static uword SetSafepointRequested(bool value, uword state) { | 
|---|
| 739 | return SafepointRequestedField::update(value, state); | 
|---|
| 740 | } | 
|---|
| 741 | uword SetSafepointRequested(bool value) { | 
|---|
| 742 | ASSERT(thread_lock()->IsOwnedByCurrentThread()); | 
|---|
| 743 | if (value) { | 
|---|
| 744 | // acquire pulls from the release in TryEnterSafepoint. | 
|---|
| 745 | return safepoint_state_.fetch_or(SafepointRequestedField::encode(true), | 
|---|
| 746 | std::memory_order_acquire); | 
|---|
| 747 | } else { | 
|---|
| 748 | // release pushes to the acquire in TryExitSafepoint. | 
|---|
| 749 | return safepoint_state_.fetch_and(~SafepointRequestedField::encode(true), | 
|---|
| 750 | std::memory_order_release); | 
|---|
| 751 | } | 
|---|
| 752 | } | 
|---|
| 753 | static bool IsBlockedForSafepoint(uword state) { | 
|---|
| 754 | return BlockedForSafepointField::decode(state); | 
|---|
| 755 | } | 
|---|
| 756 | bool IsBlockedForSafepoint() const { | 
|---|
| 757 | return BlockedForSafepointField::decode(safepoint_state_); | 
|---|
| 758 | } | 
|---|
| 759 | void SetBlockedForSafepoint(bool value) { | 
|---|
| 760 | ASSERT(thread_lock()->IsOwnedByCurrentThread()); | 
|---|
| 761 | safepoint_state_ = | 
|---|
| 762 | BlockedForSafepointField::update(value, safepoint_state_); | 
|---|
| 763 | } | 
|---|
| 764 | bool BypassSafepoints() const { | 
|---|
| 765 | return BypassSafepointsField::decode(safepoint_state_); | 
|---|
| 766 | } | 
|---|
| 767 | static uword SetBypassSafepoints(bool value, uword state) { | 
|---|
| 768 | return BypassSafepointsField::update(value, state); | 
|---|
| 769 | } | 
|---|
| 770 |  | 
|---|
| 771 | enum ExecutionState { | 
|---|
| 772 | kThreadInVM = 0, | 
|---|
| 773 | kThreadInGenerated, | 
|---|
| 774 | kThreadInNative, | 
|---|
| 775 | kThreadInBlockedState | 
|---|
| 776 | }; | 
|---|
| 777 |  | 
|---|
| 778 | ExecutionState execution_state() const { | 
|---|
| 779 | return static_cast<ExecutionState>(execution_state_); | 
|---|
| 780 | } | 
|---|
| 781 | // Normally execution state is only accessed for the current thread. | 
|---|
| 782 | NO_SANITIZE_THREAD | 
|---|
| 783 | ExecutionState execution_state_cross_thread_for_testing() const { | 
|---|
| 784 | return static_cast<ExecutionState>(execution_state_); | 
|---|
| 785 | } | 
|---|
| 786 | void set_execution_state(ExecutionState state) { | 
|---|
| 787 | execution_state_ = static_cast<uword>(state); | 
|---|
| 788 | } | 
|---|
| 789 | static intptr_t execution_state_offset() { | 
|---|
| 790 | return OFFSET_OF(Thread, execution_state_); | 
|---|
| 791 | } | 
|---|
| 792 |  | 
|---|
| 793 | virtual bool MayAllocateHandles() { | 
|---|
| 794 | return (execution_state() == kThreadInVM) || | 
|---|
| 795 | (execution_state() == kThreadInGenerated); | 
|---|
| 796 | } | 
|---|
| 797 |  | 
|---|
| 798 | static uword safepoint_state_unacquired() { return SetAtSafepoint(false, 0); } | 
|---|
| 799 | static uword safepoint_state_acquired() { return SetAtSafepoint(true, 0); } | 
|---|
| 800 |  | 
|---|
| 801 | bool TryEnterSafepoint() { | 
|---|
| 802 | uword old_state = 0; | 
|---|
| 803 | uword new_state = SetAtSafepoint(true, 0); | 
|---|
| 804 | return safepoint_state_.compare_exchange_strong(old_state, new_state, | 
|---|
| 805 | std::memory_order_release); | 
|---|
| 806 | } | 
|---|
| 807 |  | 
|---|
| 808 | void EnterSafepoint() { | 
|---|
| 809 | ASSERT(no_safepoint_scope_depth() == 0); | 
|---|
| 810 | // First try a fast update of the thread state to indicate it is at a | 
|---|
| 811 | // safepoint. | 
|---|
| 812 | if (!TryEnterSafepoint()) { | 
|---|
| 813 | // Fast update failed which means we could potentially be in the middle | 
|---|
| 814 | // of a safepoint operation. | 
|---|
| 815 | EnterSafepointUsingLock(); | 
|---|
| 816 | } | 
|---|
| 817 | } | 
|---|
| 818 |  | 
|---|
| 819 | bool TryExitSafepoint() { | 
|---|
| 820 | uword old_state = SetAtSafepoint(true, 0); | 
|---|
| 821 | uword new_state = 0; | 
|---|
| 822 | return safepoint_state_.compare_exchange_strong(old_state, new_state, | 
|---|
| 823 | std::memory_order_acquire); | 
|---|
| 824 | } | 
|---|
| 825 |  | 
|---|
| 826 | void ExitSafepoint() { | 
|---|
| 827 | // First try a fast update of the thread state to indicate it is not at a | 
|---|
| 828 | // safepoint anymore. | 
|---|
| 829 | if (!TryExitSafepoint()) { | 
|---|
| 830 | // Fast update failed which means we could potentially be in the middle | 
|---|
| 831 | // of a safepoint operation. | 
|---|
| 832 | ExitSafepointUsingLock(); | 
|---|
| 833 | } | 
|---|
| 834 | } | 
|---|
| 835 |  | 
|---|
| 836 | void CheckForSafepoint() { | 
|---|
| 837 | ASSERT(no_safepoint_scope_depth() == 0); | 
|---|
| 838 | if (IsSafepointRequested()) { | 
|---|
| 839 | BlockForSafepoint(); | 
|---|
| 840 | } | 
|---|
| 841 | } | 
|---|
| 842 |  | 
|---|
| 843 | int32_t AllocateFfiCallbackId(); | 
|---|
| 844 |  | 
|---|
| 845 | // Store 'code' for the native callback identified by 'callback_id'. | 
|---|
| 846 | // | 
|---|
| 847 | // Expands the callback code array as necessary to accomodate the callback ID. | 
|---|
| 848 | void SetFfiCallbackCode(int32_t callback_id, const Code& code); | 
|---|
| 849 |  | 
|---|
| 850 | // Ensure that 'callback_id' refers to a valid callback in this isolate. | 
|---|
| 851 | // | 
|---|
| 852 | // If "entry != 0", additionally checks that entry is inside the instructions | 
|---|
| 853 | // of this callback. | 
|---|
| 854 | // | 
|---|
| 855 | // Aborts if any of these conditions fails. | 
|---|
| 856 | void VerifyCallbackIsolate(int32_t callback_id, uword entry); | 
|---|
| 857 |  | 
|---|
| 858 | Thread* next() const { return next_; } | 
|---|
| 859 |  | 
|---|
| 860 | // Visit all object pointers. | 
|---|
| 861 | void VisitObjectPointers(ObjectPointerVisitor* visitor, | 
|---|
| 862 | ValidationPolicy validate_frames); | 
|---|
| 863 | void RememberLiveTemporaries(); | 
|---|
| 864 | void DeferredMarkLiveTemporaries(); | 
|---|
| 865 |  | 
|---|
| 866 | bool IsValidHandle(Dart_Handle object) const; | 
|---|
| 867 | bool IsValidLocalHandle(Dart_Handle object) const; | 
|---|
| 868 | intptr_t CountLocalHandles() const; | 
|---|
| 869 | int ZoneSizeInBytes() const; | 
|---|
| 870 | void UnwindScopes(uword stack_marker); | 
|---|
| 871 |  | 
|---|
| 872 | void InitVMConstants(); | 
|---|
| 873 |  | 
|---|
| 874 | uint64_t GetRandomUInt64() { return thread_random_.NextUInt64(); } | 
|---|
| 875 |  | 
|---|
| 876 | uint64_t* GetFfiMarshalledArguments(intptr_t size) { | 
|---|
| 877 | if (ffi_marshalled_arguments_size_ < size) { | 
|---|
| 878 | if (ffi_marshalled_arguments_size_ > 0) { | 
|---|
| 879 | free(ffi_marshalled_arguments_); | 
|---|
| 880 | } | 
|---|
| 881 | ffi_marshalled_arguments_ = | 
|---|
| 882 | reinterpret_cast<uint64_t*>(malloc(size * sizeof(uint64_t))); | 
|---|
| 883 | } | 
|---|
| 884 | return ffi_marshalled_arguments_; | 
|---|
| 885 | } | 
|---|
| 886 |  | 
|---|
| 887 | #ifndef PRODUCT | 
|---|
| 888 | void PrintJSON(JSONStream* stream) const; | 
|---|
| 889 | #endif | 
|---|
| 890 |  | 
|---|
| 891 | private: | 
|---|
| 892 | template <class T> | 
|---|
| 893 | T* AllocateReusableHandle(); | 
|---|
| 894 |  | 
|---|
| 895 | enum class RestoreWriteBarrierInvariantOp { | 
|---|
| 896 | kAddToRememberedSet, | 
|---|
| 897 | kAddToDeferredMarkingStack | 
|---|
| 898 | }; | 
|---|
| 899 | friend class RestoreWriteBarrierInvariantVisitor; | 
|---|
| 900 | void RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op); | 
|---|
| 901 |  | 
|---|
| 902 | // Set the current compiler state and return the previous compiler state. | 
|---|
| 903 | CompilerState* SetCompilerState(CompilerState* state) { | 
|---|
| 904 | CompilerState* previous = compiler_state_; | 
|---|
| 905 | compiler_state_ = state; | 
|---|
| 906 | return previous; | 
|---|
| 907 | } | 
|---|
| 908 |  | 
|---|
| 909 | // Accessed from generated code. | 
|---|
| 910 | // ** This block of fields must come first! ** | 
|---|
| 911 | // For AOT cross-compilation, we rely on these members having the same offsets | 
|---|
| 912 | // in SIMARM(IA32) and ARM, and the same offsets in SIMARM64(X64) and ARM64. | 
|---|
| 913 | // We use only word-sized fields to avoid differences in struct packing on the | 
|---|
| 914 | // different architectures. See also CheckOffsets in dart.cc. | 
|---|
| 915 | RelaxedAtomic<uword> stack_limit_; | 
|---|
| 916 | uword write_barrier_mask_; | 
|---|
| 917 | Isolate* isolate_; | 
|---|
| 918 | const uword* dispatch_table_array_; | 
|---|
| 919 | uword top_ = 0; | 
|---|
| 920 | uword end_ = 0; | 
|---|
| 921 | // Offsets up to this point can all fit in a byte on X64. All of the above | 
|---|
| 922 | // fields are very abundantly accessed from code. Thus, keeping them first | 
|---|
| 923 | // is important for code size (although code size on X64 is not a priority). | 
|---|
| 924 | uword saved_stack_limit_; | 
|---|
| 925 | uword stack_overflow_flags_; | 
|---|
| 926 | InstancePtr* field_table_values_; | 
|---|
| 927 | Heap* heap_; | 
|---|
| 928 | uword volatile top_exit_frame_info_; | 
|---|
| 929 | StoreBufferBlock* store_buffer_block_; | 
|---|
| 930 | MarkingStackBlock* marking_stack_block_; | 
|---|
| 931 | MarkingStackBlock* deferred_marking_stack_block_; | 
|---|
| 932 | uword volatile vm_tag_; | 
|---|
| 933 | StackTracePtr async_stack_trace_; | 
|---|
| 934 | // Memory location dedicated for passing unboxed int64 values from | 
|---|
| 935 | // generated code to runtime. | 
|---|
| 936 | // TODO(dartbug.com/33549): Clean this up when unboxed values | 
|---|
| 937 | // could be passed as arguments. | 
|---|
| 938 | ALIGN8 int64_t unboxed_int64_runtime_arg_; | 
|---|
| 939 |  | 
|---|
| 940 | // State that is cached in the TLS for fast access in generated code. | 
|---|
| 941 | #define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value)      \ | 
|---|
| 942 | type_name member_name; | 
|---|
| 943 | CACHED_CONSTANTS_LIST(DECLARE_MEMBERS) | 
|---|
| 944 | #undef DECLARE_MEMBERS | 
|---|
| 945 |  | 
|---|
| 946 | #define DECLARE_MEMBERS(name) uword name##_entry_point_; | 
|---|
| 947 | RUNTIME_ENTRY_LIST(DECLARE_MEMBERS) | 
|---|
| 948 | #undef DECLARE_MEMBERS | 
|---|
| 949 |  | 
|---|
| 950 | #define DECLARE_MEMBERS(returntype, name, ...) uword name##_entry_point_; | 
|---|
| 951 | LEAF_RUNTIME_ENTRY_LIST(DECLARE_MEMBERS) | 
|---|
| 952 | #undef DECLARE_MEMBERS | 
|---|
| 953 |  | 
|---|
| 954 | #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) ||                  \ | 
|---|
| 955 | defined(TARGET_ARCH_X64) | 
|---|
| 956 | uword write_barrier_wrappers_entry_points_[kNumberOfDartAvailableCpuRegs]; | 
|---|
| 957 | #endif | 
|---|
| 958 |  | 
|---|
| 959 | // JumpToExceptionHandler state: | 
|---|
| 960 | ObjectPtr active_exception_; | 
|---|
| 961 | ObjectPtr active_stacktrace_; | 
|---|
| 962 | ObjectPoolPtr global_object_pool_; | 
|---|
| 963 | uword resume_pc_; | 
|---|
| 964 | uword saved_shadow_call_stack_ = 0; | 
|---|
| 965 | uword execution_state_; | 
|---|
| 966 | std::atomic<uword> safepoint_state_; | 
|---|
| 967 | GrowableObjectArrayPtr ffi_callback_code_; | 
|---|
| 968 | uword exit_through_ffi_ = 0; | 
|---|
| 969 | ApiLocalScope* api_top_scope_; | 
|---|
| 970 |  | 
|---|
| 971 | // ---- End accessed from generated code. ---- | 
|---|
| 972 |  | 
|---|
| 973 | // The layout of Thread object up to this point should not depend | 
|---|
| 974 | // on DART_PRECOMPILED_RUNTIME, as it is accessed from generated code. | 
|---|
| 975 | // The code is generated without DART_PRECOMPILED_RUNTIME, but used with | 
|---|
| 976 | // DART_PRECOMPILED_RUNTIME. | 
|---|
| 977 |  | 
|---|
| 978 | TaskKind task_kind_; | 
|---|
| 979 | TimelineStream* dart_stream_; | 
|---|
| 980 | IsolateGroup* isolate_group_ = nullptr; | 
|---|
| 981 | mutable Monitor thread_lock_; | 
|---|
| 982 | ApiLocalScope* api_reusable_scope_; | 
|---|
| 983 | int32_t no_callback_scope_depth_; | 
|---|
| 984 | #if defined(DEBUG) | 
|---|
| 985 | int32_t no_safepoint_scope_depth_; | 
|---|
| 986 | #endif | 
|---|
| 987 | VMHandles reusable_handles_; | 
|---|
| 988 | intptr_t defer_oob_messages_count_; | 
|---|
| 989 | uint16_t deferred_interrupts_mask_; | 
|---|
| 990 | uint16_t deferred_interrupts_; | 
|---|
| 991 | int32_t stack_overflow_count_; | 
|---|
| 992 |  | 
|---|
| 993 | // Compiler state: | 
|---|
| 994 | CompilerState* compiler_state_ = nullptr; | 
|---|
| 995 | HierarchyInfo* hierarchy_info_; | 
|---|
| 996 | TypeUsageInfo* type_usage_info_; | 
|---|
| 997 | GrowableObjectArrayPtr pending_functions_; | 
|---|
| 998 |  | 
|---|
| 999 | ErrorPtr sticky_error_; | 
|---|
| 1000 |  | 
|---|
| 1001 | Random thread_random_; | 
|---|
| 1002 |  | 
|---|
| 1003 | intptr_t ffi_marshalled_arguments_size_ = 0; | 
|---|
| 1004 | uint64_t* ffi_marshalled_arguments_; | 
|---|
| 1005 |  | 
|---|
| 1006 | InstancePtr* field_table_values() const { return field_table_values_; } | 
|---|
| 1007 |  | 
|---|
| 1008 | // Reusable handles support. | 
|---|
| 1009 | #define REUSABLE_HANDLE_FIELDS(object) object* object##_handle_; | 
|---|
| 1010 | REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_FIELDS) | 
|---|
| 1011 | #undef REUSABLE_HANDLE_FIELDS | 
|---|
| 1012 |  | 
|---|
| 1013 | #if defined(DEBUG) | 
|---|
| 1014 | #define REUSABLE_HANDLE_SCOPE_VARIABLE(object)                                 \ | 
|---|
| 1015 | bool reusable_##object##_handle_scope_active_; | 
|---|
| 1016 | REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_VARIABLE); | 
|---|
| 1017 | #undef REUSABLE_HANDLE_SCOPE_VARIABLE | 
|---|
| 1018 | #endif  // defined(DEBUG) | 
|---|
| 1019 |  | 
|---|
| 1020 | // Generated code assumes that AtSafepointField is the LSB. | 
|---|
| 1021 | class AtSafepointField : public BitField<uword, bool, 0, 1> {}; | 
|---|
| 1022 | class SafepointRequestedField : public BitField<uword, bool, 1, 1> {}; | 
|---|
| 1023 | class BlockedForSafepointField : public BitField<uword, bool, 2, 1> {}; | 
|---|
| 1024 | class BypassSafepointsField : public BitField<uword, bool, 3, 1> {}; | 
|---|
| 1025 |  | 
|---|
| 1026 | #if defined(USING_SAFE_STACK) | 
|---|
| 1027 | uword saved_safestack_limit_; | 
|---|
| 1028 | #endif | 
|---|
| 1029 |  | 
|---|
| 1030 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
|---|
| 1031 | Interpreter* interpreter_; | 
|---|
| 1032 | #endif | 
|---|
| 1033 |  | 
|---|
| 1034 | Thread* next_;  // Used to chain the thread structures in an isolate. | 
|---|
| 1035 | bool is_mutator_thread_ = false; | 
|---|
| 1036 |  | 
|---|
| 1037 | explicit Thread(bool is_vm_isolate); | 
|---|
| 1038 |  | 
|---|
| 1039 | void StoreBufferRelease( | 
|---|
| 1040 | StoreBuffer::ThresholdPolicy policy = StoreBuffer::kCheckThreshold); | 
|---|
| 1041 | void StoreBufferAcquire(); | 
|---|
| 1042 |  | 
|---|
| 1043 | void MarkingStackRelease(); | 
|---|
| 1044 | void MarkingStackAcquire(); | 
|---|
| 1045 | void DeferredMarkingStackRelease(); | 
|---|
| 1046 | void DeferredMarkingStackAcquire(); | 
|---|
| 1047 |  | 
|---|
| 1048 | void set_safepoint_state(uint32_t value) { safepoint_state_ = value; } | 
|---|
| 1049 | void EnterSafepointUsingLock(); | 
|---|
| 1050 | void ExitSafepointUsingLock(); | 
|---|
| 1051 | void BlockForSafepoint(); | 
|---|
| 1052 |  | 
|---|
| 1053 | void FinishEntering(TaskKind kind); | 
|---|
| 1054 | void PrepareLeaving(); | 
|---|
| 1055 |  | 
|---|
| 1056 | static void SetCurrent(Thread* current) { OSThread::SetCurrentTLS(current); } | 
|---|
| 1057 |  | 
|---|
| 1058 | void DeferOOBMessageInterrupts(); | 
|---|
| 1059 | void RestoreOOBMessageInterrupts(); | 
|---|
| 1060 |  | 
|---|
| 1061 | #define REUSABLE_FRIEND_DECLARATION(name)                                      \ | 
|---|
| 1062 | friend class Reusable##name##HandleScope; | 
|---|
| 1063 | REUSABLE_HANDLE_LIST(REUSABLE_FRIEND_DECLARATION) | 
|---|
| 1064 | #undef REUSABLE_FRIEND_DECLARATION | 
|---|
| 1065 |  | 
|---|
| 1066 | friend class ApiZone; | 
|---|
| 1067 | friend class Interpreter; | 
|---|
| 1068 | friend class InterruptChecker; | 
|---|
| 1069 | friend class Isolate; | 
|---|
| 1070 | friend class IsolateGroup; | 
|---|
| 1071 | friend class IsolateTestHelper; | 
|---|
| 1072 | friend class NoOOBMessageScope; | 
|---|
| 1073 | friend class Simulator; | 
|---|
| 1074 | friend class StackZone; | 
|---|
| 1075 | friend class ThreadRegistry; | 
|---|
| 1076 | friend class NoActiveIsolateScope; | 
|---|
| 1077 | friend class CompilerState; | 
|---|
| 1078 | friend class compiler::target::Thread; | 
|---|
| 1079 | friend class FieldTable; | 
|---|
| 1080 | friend Isolate* CreateWithinExistingIsolateGroup(IsolateGroup*, | 
|---|
| 1081 | const char*, | 
|---|
| 1082 | char**); | 
|---|
| 1083 | DISALLOW_COPY_AND_ASSIGN(Thread); | 
|---|
| 1084 | }; | 
|---|
| 1085 |  | 
|---|
| 1086 | #if defined(HOST_OS_WINDOWS) | 
|---|
| 1087 | // Clears the state of the current thread and frees the allocation. | 
|---|
| 1088 | void WindowsThreadCleanUp(); | 
|---|
| 1089 | #endif | 
|---|
| 1090 |  | 
|---|
| 1091 | // Disable thread interrupts. | 
|---|
| 1092 | class DisableThreadInterruptsScope : public StackResource { | 
|---|
| 1093 | public: | 
|---|
| 1094 | explicit DisableThreadInterruptsScope(Thread* thread); | 
|---|
| 1095 | ~DisableThreadInterruptsScope(); | 
|---|
| 1096 | }; | 
|---|
| 1097 |  | 
|---|
| 1098 | // Within a NoSafepointScope, the thread must not reach any safepoint. Used | 
|---|
| 1099 | // around code that manipulates raw object pointers directly without handles. | 
|---|
| 1100 | #if defined(DEBUG) | 
|---|
| 1101 | class NoSafepointScope : public ThreadStackResource { | 
|---|
| 1102 | public: | 
|---|
| 1103 | explicit NoSafepointScope(Thread* thread = nullptr) | 
|---|
| 1104 | : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) { | 
|---|
| 1105 | this->thread()->IncrementNoSafepointScopeDepth(); | 
|---|
| 1106 | } | 
|---|
| 1107 | ~NoSafepointScope() { thread()->DecrementNoSafepointScopeDepth(); } | 
|---|
| 1108 |  | 
|---|
| 1109 | private: | 
|---|
| 1110 | DISALLOW_COPY_AND_ASSIGN(NoSafepointScope); | 
|---|
| 1111 | }; | 
|---|
| 1112 | #else   // defined(DEBUG) | 
|---|
| 1113 | class NoSafepointScope : public ValueObject { | 
|---|
| 1114 | public: | 
|---|
| 1115 | explicit NoSafepointScope(Thread* thread = nullptr) {} | 
|---|
| 1116 |  | 
|---|
| 1117 | private: | 
|---|
| 1118 | DISALLOW_COPY_AND_ASSIGN(NoSafepointScope); | 
|---|
| 1119 | }; | 
|---|
| 1120 | #endif  // defined(DEBUG) | 
|---|
| 1121 |  | 
|---|
| 1122 | }  // namespace dart | 
|---|
| 1123 |  | 
|---|
| 1124 | #endif  // RUNTIME_VM_THREAD_H_ | 
|---|
| 1125 |  | 
|---|