1// Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/thread.h"
6
7#include "vm/dart_api_state.h"
8#include "vm/growable_array.h"
9#include "vm/heap/safepoint.h"
10#include "vm/isolate.h"
11#include "vm/json_stream.h"
12#include "vm/lockers.h"
13#include "vm/log.h"
14#include "vm/message_handler.h"
15#include "vm/native_entry.h"
16#include "vm/object.h"
17#include "vm/os_thread.h"
18#include "vm/profiler.h"
19#include "vm/runtime_entry.h"
20#include "vm/stub_code.h"
21#include "vm/symbols.h"
22#include "vm/thread_interrupter.h"
23#include "vm/thread_registry.h"
24#include "vm/timeline.h"
25#include "vm/zone.h"
26
27#if !defined(DART_PRECOMPILED_RUNTIME)
28#include "vm/ffi_callback_trampolines.h"
29#endif // !defined(DART_PRECOMPILED_RUNTIME)
30
31namespace dart {
32
33#if !defined(PRODUCT)
34DECLARE_FLAG(bool, trace_service);
35DECLARE_FLAG(bool, trace_service_verbose);
36#endif // !defined(PRODUCT)
37
38Thread::~Thread() {
39 // We should cleanly exit any isolate before destruction.
40 ASSERT(isolate_ == NULL);
41 ASSERT(store_buffer_block_ == NULL);
42 ASSERT(marking_stack_block_ == NULL);
43#if !defined(DART_PRECOMPILED_RUNTIME)
44 delete interpreter_;
45 interpreter_ = nullptr;
46#endif
47 // There should be no top api scopes at this point.
48 ASSERT(api_top_scope() == NULL);
49 // Delete the resusable api scope if there is one.
50 if (api_reusable_scope_ != nullptr) {
51 delete api_reusable_scope_;
52 api_reusable_scope_ = NULL;
53 }
54}
55
56#if defined(DEBUG)
57#define REUSABLE_HANDLE_SCOPE_INIT(object) \
58 reusable_##object##_handle_scope_active_(false),
59#else
60#define REUSABLE_HANDLE_SCOPE_INIT(object)
61#endif // defined(DEBUG)
62
63#define REUSABLE_HANDLE_INITIALIZERS(object) object##_handle_(NULL),
64
65Thread::Thread(bool is_vm_isolate)
66 : ThreadState(false),
67 stack_limit_(0),
68 write_barrier_mask_(ObjectLayout::kGenerationalBarrierMask),
69 isolate_(NULL),
70 dispatch_table_array_(NULL),
71 saved_stack_limit_(0),
72 stack_overflow_flags_(0),
73 heap_(NULL),
74 top_exit_frame_info_(0),
75 store_buffer_block_(NULL),
76 marking_stack_block_(NULL),
77 vm_tag_(0),
78 async_stack_trace_(StackTrace::null()),
79 unboxed_int64_runtime_arg_(0),
80 active_exception_(Object::null()),
81 active_stacktrace_(Object::null()),
82 global_object_pool_(ObjectPool::null()),
83 resume_pc_(0),
84 execution_state_(kThreadInNative),
85 safepoint_state_(0),
86 ffi_callback_code_(GrowableObjectArray::null()),
87 api_top_scope_(NULL),
88 task_kind_(kUnknownTask),
89 dart_stream_(NULL),
90 thread_lock_(),
91 api_reusable_scope_(NULL),
92 no_callback_scope_depth_(0),
93#if defined(DEBUG)
94 no_safepoint_scope_depth_(0),
95#endif
96 reusable_handles_(),
97 defer_oob_messages_count_(0),
98 deferred_interrupts_mask_(0),
99 deferred_interrupts_(0),
100 stack_overflow_count_(0),
101 hierarchy_info_(NULL),
102 type_usage_info_(NULL),
103 pending_functions_(GrowableObjectArray::null()),
104 sticky_error_(Error::null()),
105 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_INITIALIZERS)
106 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_INIT)
107#if defined(USING_SAFE_STACK)
108 saved_safestack_limit_(0),
109#endif
110#if !defined(DART_PRECOMPILED_RUNTIME)
111 interpreter_(nullptr),
112#endif
113 next_(NULL) {
114#if defined(SUPPORT_TIMELINE)
115 dart_stream_ = Timeline::GetDartStream();
116 ASSERT(dart_stream_ != NULL);
117#endif
118#define DEFAULT_INIT(type_name, member_name, init_expr, default_init_value) \
119 member_name = default_init_value;
120 CACHED_CONSTANTS_LIST(DEFAULT_INIT)
121#undef DEFAULT_INIT
122
123#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) || \
124 defined(TARGET_ARCH_X64)
125 for (intptr_t i = 0; i < kNumberOfDartAvailableCpuRegs; ++i) {
126 write_barrier_wrappers_entry_points_[i] = 0;
127 }
128#endif
129
130#define DEFAULT_INIT(name) name##_entry_point_ = 0;
131 RUNTIME_ENTRY_LIST(DEFAULT_INIT)
132#undef DEFAULT_INIT
133
134#define DEFAULT_INIT(returntype, name, ...) name##_entry_point_ = 0;
135 LEAF_RUNTIME_ENTRY_LIST(DEFAULT_INIT)
136#undef DEFAULT_INIT
137
138 // We cannot initialize the VM constants here for the vm isolate thread
139 // due to boot strapping issues.
140 if (!is_vm_isolate) {
141 InitVMConstants();
142 }
143}
144
145static const double double_nan_constant = NAN;
146
147static const struct ALIGN16 {
148 uint64_t a;
149 uint64_t b;
150} double_negate_constant = {0x8000000000000000ULL, 0x8000000000000000ULL};
151
152static const struct ALIGN16 {
153 uint64_t a;
154 uint64_t b;
155} double_abs_constant = {0x7FFFFFFFFFFFFFFFULL, 0x7FFFFFFFFFFFFFFFULL};
156
157static const struct ALIGN16 {
158 uint32_t a;
159 uint32_t b;
160 uint32_t c;
161 uint32_t d;
162} float_not_constant = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
163
164static const struct ALIGN16 {
165 uint32_t a;
166 uint32_t b;
167 uint32_t c;
168 uint32_t d;
169} float_negate_constant = {0x80000000, 0x80000000, 0x80000000, 0x80000000};
170
171static const struct ALIGN16 {
172 uint32_t a;
173 uint32_t b;
174 uint32_t c;
175 uint32_t d;
176} float_absolute_constant = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
177
178static const struct ALIGN16 {
179 uint32_t a;
180 uint32_t b;
181 uint32_t c;
182 uint32_t d;
183} float_zerow_constant = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000};
184
185void Thread::InitVMConstants() {
186#define ASSERT_VM_HEAP(type_name, member_name, init_expr, default_init_value) \
187 ASSERT((init_expr)->IsOldObject());
188 CACHED_VM_OBJECTS_LIST(ASSERT_VM_HEAP)
189#undef ASSERT_VM_HEAP
190
191#define INIT_VALUE(type_name, member_name, init_expr, default_init_value) \
192 ASSERT(member_name == default_init_value); \
193 member_name = (init_expr);
194 CACHED_CONSTANTS_LIST(INIT_VALUE)
195#undef INIT_VALUE
196
197#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) || \
198 defined(TARGET_ARCH_X64)
199 for (intptr_t i = 0; i < kNumberOfDartAvailableCpuRegs; ++i) {
200 write_barrier_wrappers_entry_points_[i] =
201 StubCode::WriteBarrierWrappers().EntryPoint() +
202 i * kStoreBufferWrapperSize;
203 }
204#endif
205
206#define INIT_VALUE(name) \
207 ASSERT(name##_entry_point_ == 0); \
208 name##_entry_point_ = k##name##RuntimeEntry.GetEntryPoint();
209 RUNTIME_ENTRY_LIST(INIT_VALUE)
210#undef INIT_VALUE
211
212#define INIT_VALUE(returntype, name, ...) \
213 ASSERT(name##_entry_point_ == 0); \
214 name##_entry_point_ = k##name##RuntimeEntry.GetEntryPoint();
215 LEAF_RUNTIME_ENTRY_LIST(INIT_VALUE)
216#undef INIT_VALUE
217
218// Setup the thread specific reusable handles.
219#define REUSABLE_HANDLE_ALLOCATION(object) \
220 this->object##_handle_ = this->AllocateReusableHandle<object>();
221 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_ALLOCATION)
222#undef REUSABLE_HANDLE_ALLOCATION
223}
224
225#ifndef PRODUCT
226// Collect information about each individual zone associated with this thread.
227void Thread::PrintJSON(JSONStream* stream) const {
228 JSONObject jsobj(stream);
229 jsobj.AddProperty("type", "_Thread");
230 jsobj.AddPropertyF("id", "threads/%" Pd "",
231 OSThread::ThreadIdToIntPtr(os_thread()->trace_id()));
232 jsobj.AddProperty("kind", TaskKindToCString(task_kind()));
233 jsobj.AddPropertyF("_zoneHighWatermark", "%" Pu "", zone_high_watermark());
234 jsobj.AddPropertyF("_zoneCapacity", "%" Pu "", current_zone_capacity());
235}
236#endif
237
238GrowableObjectArrayPtr Thread::pending_functions() {
239 if (pending_functions_ == GrowableObjectArray::null()) {
240 pending_functions_ = GrowableObjectArray::New(Heap::kOld);
241 }
242 return pending_functions_;
243}
244
245void Thread::clear_pending_functions() {
246 pending_functions_ = GrowableObjectArray::null();
247}
248
249void Thread::set_active_exception(const Object& value) {
250 active_exception_ = value.raw();
251}
252
253void Thread::set_active_stacktrace(const Object& value) {
254 active_stacktrace_ = value.raw();
255}
256
257ErrorPtr Thread::sticky_error() const {
258 return sticky_error_;
259}
260
261void Thread::set_sticky_error(const Error& value) {
262 ASSERT(!value.IsNull());
263 sticky_error_ = value.raw();
264}
265
266void Thread::ClearStickyError() {
267 sticky_error_ = Error::null();
268}
269
270ErrorPtr Thread::StealStickyError() {
271 NoSafepointScope no_safepoint;
272 ErrorPtr return_value = sticky_error_;
273 sticky_error_ = Error::null();
274 return return_value;
275}
276
277const char* Thread::TaskKindToCString(TaskKind kind) {
278 switch (kind) {
279 case kUnknownTask:
280 return "kUnknownTask";
281 case kMutatorTask:
282 return "kMutatorTask";
283 case kCompilerTask:
284 return "kCompilerTask";
285 case kSweeperTask:
286 return "kSweeperTask";
287 case kMarkerTask:
288 return "kMarkerTask";
289 default:
290 UNREACHABLE();
291 return "";
292 }
293}
294
295StackTracePtr Thread::async_stack_trace() const {
296 return async_stack_trace_;
297}
298
299void Thread::set_async_stack_trace(const StackTrace& stack_trace) {
300 ASSERT(!stack_trace.IsNull());
301 async_stack_trace_ = stack_trace.raw();
302}
303
304void Thread::set_raw_async_stack_trace(StackTracePtr raw_stack_trace) {
305 async_stack_trace_ = raw_stack_trace;
306}
307
308void Thread::clear_async_stack_trace() {
309 async_stack_trace_ = StackTrace::null();
310}
311
312bool Thread::EnterIsolate(Isolate* isolate) {
313 const bool kIsMutatorThread = true;
314 Thread* thread = isolate->ScheduleThread(kIsMutatorThread);
315 if (thread != NULL) {
316 ASSERT(thread->store_buffer_block_ == NULL);
317 ASSERT(thread->isolate() == isolate);
318 ASSERT(thread->isolate_group() == isolate->group());
319 thread->FinishEntering(kMutatorTask);
320 return true;
321 }
322 return false;
323}
324
325void Thread::ExitIsolate() {
326 Thread* thread = Thread::Current();
327 ASSERT(thread != nullptr);
328 ASSERT(thread->IsMutatorThread());
329 ASSERT(thread->isolate() != nullptr);
330 ASSERT(thread->isolate_group() != nullptr);
331 DEBUG_ASSERT(!thread->IsAnyReusableHandleScopeActive());
332
333 thread->PrepareLeaving();
334
335 Isolate* isolate = thread->isolate();
336 thread->set_vm_tag(isolate->is_runnable() ? VMTag::kIdleTagId
337 : VMTag::kLoadWaitTagId);
338 const bool kIsMutatorThread = true;
339 isolate->UnscheduleThread(thread, kIsMutatorThread);
340}
341
342bool Thread::EnterIsolateAsHelper(Isolate* isolate,
343 TaskKind kind,
344 bool bypass_safepoint) {
345 ASSERT(kind != kMutatorTask);
346 const bool kIsMutatorThread = false;
347 Thread* thread = isolate->ScheduleThread(kIsMutatorThread, bypass_safepoint);
348 if (thread != NULL) {
349 ASSERT(!thread->IsMutatorThread());
350 ASSERT(thread->isolate() == isolate);
351 ASSERT(thread->isolate_group() == isolate->group());
352 thread->FinishEntering(kind);
353 return true;
354 }
355 return false;
356}
357
358void Thread::ExitIsolateAsHelper(bool bypass_safepoint) {
359 Thread* thread = Thread::Current();
360 ASSERT(thread != nullptr);
361 ASSERT(!thread->IsMutatorThread());
362 ASSERT(thread->isolate() != nullptr);
363 ASSERT(thread->isolate_group() != nullptr);
364
365 thread->PrepareLeaving();
366
367 Isolate* isolate = thread->isolate();
368 ASSERT(isolate != NULL);
369 const bool kIsMutatorThread = false;
370 isolate->UnscheduleThread(thread, kIsMutatorThread, bypass_safepoint);
371}
372
373bool Thread::EnterIsolateGroupAsHelper(IsolateGroup* isolate_group,
374 TaskKind kind,
375 bool bypass_safepoint) {
376 ASSERT(kind != kMutatorTask);
377 Thread* thread = isolate_group->ScheduleThread(bypass_safepoint);
378 if (thread != NULL) {
379 ASSERT(!thread->IsMutatorThread());
380 ASSERT(thread->isolate() == nullptr);
381 ASSERT(thread->isolate_group() == isolate_group);
382 thread->FinishEntering(kind);
383 return true;
384 }
385 return false;
386}
387
388void Thread::ExitIsolateGroupAsHelper(bool bypass_safepoint) {
389 Thread* thread = Thread::Current();
390 ASSERT(thread != nullptr);
391 ASSERT(!thread->IsMutatorThread());
392 ASSERT(thread->isolate() == nullptr);
393 ASSERT(thread->isolate_group() != nullptr);
394
395 thread->PrepareLeaving();
396
397 const bool kIsMutatorThread = false;
398 thread->isolate_group()->UnscheduleThread(thread, kIsMutatorThread,
399 bypass_safepoint);
400}
401
402void Thread::ReleaseStoreBuffer() {
403 ASSERT(IsAtSafepoint());
404 // Prevent scheduling another GC by ignoring the threshold.
405 ASSERT(store_buffer_block_ != NULL);
406 StoreBufferRelease(StoreBuffer::kIgnoreThreshold);
407 // Make sure to get an *empty* block; the isolate needs all entries
408 // at GC time.
409 // TODO(koda): Replace with an epilogue (PrepareAfterGC) that acquires.
410 store_buffer_block_ = isolate_group()->store_buffer()->PopEmptyBlock();
411}
412
413void Thread::SetStackLimit(uword limit) {
414 // The thread setting the stack limit is not necessarily the thread which
415 // the stack limit is being set on.
416 MonitorLocker ml(&thread_lock_);
417 if (!HasScheduledInterrupts()) {
418 // No interrupt pending, set stack_limit_ too.
419 stack_limit_ = limit;
420 }
421 saved_stack_limit_ = limit;
422}
423
424void Thread::ClearStackLimit() {
425 SetStackLimit(~static_cast<uword>(0));
426}
427
428void Thread::ScheduleInterrupts(uword interrupt_bits) {
429 MonitorLocker ml(&thread_lock_);
430 ScheduleInterruptsLocked(interrupt_bits);
431}
432
433void Thread::ScheduleInterruptsLocked(uword interrupt_bits) {
434 ASSERT(thread_lock_.IsOwnedByCurrentThread());
435 ASSERT((interrupt_bits & ~kInterruptsMask) == 0); // Must fit in mask.
436
437 // Check to see if any of the requested interrupts should be deferred.
438 uword defer_bits = interrupt_bits & deferred_interrupts_mask_;
439 if (defer_bits != 0) {
440 deferred_interrupts_ |= defer_bits;
441 interrupt_bits &= ~deferred_interrupts_mask_;
442 if (interrupt_bits == 0) {
443 return;
444 }
445 }
446
447 if (stack_limit_ == saved_stack_limit_) {
448 stack_limit_ = (kInterruptStackLimit & ~kInterruptsMask) | interrupt_bits;
449 } else {
450 stack_limit_ = stack_limit_ | interrupt_bits;
451 }
452}
453
454uword Thread::GetAndClearInterrupts() {
455 MonitorLocker ml(&thread_lock_);
456 if (stack_limit_ == saved_stack_limit_) {
457 return 0; // No interrupt was requested.
458 }
459 uword interrupt_bits = stack_limit_ & kInterruptsMask;
460 stack_limit_ = saved_stack_limit_;
461 return interrupt_bits;
462}
463
464void Thread::DeferOOBMessageInterrupts() {
465 MonitorLocker ml(&thread_lock_);
466 defer_oob_messages_count_++;
467 if (defer_oob_messages_count_ > 1) {
468 // OOB message interrupts are already deferred.
469 return;
470 }
471 ASSERT(deferred_interrupts_mask_ == 0);
472 deferred_interrupts_mask_ = kMessageInterrupt;
473
474 if (stack_limit_ != saved_stack_limit_) {
475 // Defer any interrupts which are currently pending.
476 deferred_interrupts_ = stack_limit_ & deferred_interrupts_mask_;
477
478 // Clear deferrable interrupts, if present.
479 stack_limit_ = stack_limit_ & ~deferred_interrupts_mask_;
480
481 if ((stack_limit_ & kInterruptsMask) == 0) {
482 // No other pending interrupts. Restore normal stack limit.
483 stack_limit_ = saved_stack_limit_;
484 }
485 }
486#if !defined(PRODUCT)
487 if (FLAG_trace_service && FLAG_trace_service_verbose) {
488 OS::PrintErr("[+%" Pd64 "ms] Isolate %s deferring OOB interrupts\n",
489 Dart::UptimeMillis(), isolate()->name());
490 }
491#endif // !defined(PRODUCT)
492}
493
494void Thread::RestoreOOBMessageInterrupts() {
495 MonitorLocker ml(&thread_lock_);
496 defer_oob_messages_count_--;
497 if (defer_oob_messages_count_ > 0) {
498 return;
499 }
500 ASSERT(defer_oob_messages_count_ == 0);
501 ASSERT(deferred_interrupts_mask_ == kMessageInterrupt);
502 deferred_interrupts_mask_ = 0;
503 if (deferred_interrupts_ != 0) {
504 if (stack_limit_ == saved_stack_limit_) {
505 stack_limit_ = kInterruptStackLimit & ~kInterruptsMask;
506 }
507 stack_limit_ = stack_limit_ | deferred_interrupts_;
508 deferred_interrupts_ = 0;
509 }
510#if !defined(PRODUCT)
511 if (FLAG_trace_service && FLAG_trace_service_verbose) {
512 OS::PrintErr("[+%" Pd64 "ms] Isolate %s restoring OOB interrupts\n",
513 Dart::UptimeMillis(), isolate()->name());
514 }
515#endif // !defined(PRODUCT)
516}
517
518ErrorPtr Thread::HandleInterrupts() {
519 uword interrupt_bits = GetAndClearInterrupts();
520 if ((interrupt_bits & kVMInterrupt) != 0) {
521 CheckForSafepoint();
522 if (isolate_group()->store_buffer()->Overflowed()) {
523 if (FLAG_verbose_gc) {
524 OS::PrintErr("Scavenge scheduled by store buffer overflow.\n");
525 }
526 heap()->CollectGarbage(Heap::kNew);
527 }
528 }
529 if ((interrupt_bits & kMessageInterrupt) != 0) {
530 MessageHandler::MessageStatus status =
531 isolate()->message_handler()->HandleOOBMessages();
532 if (status != MessageHandler::kOK) {
533 // False result from HandleOOBMessages signals that the isolate should
534 // be terminating.
535 if (FLAG_trace_isolates) {
536 OS::PrintErr(
537 "[!] Terminating isolate due to OOB message:\n"
538 "\tisolate: %s\n",
539 isolate()->name());
540 }
541 NoSafepointScope no_safepoint;
542 ErrorPtr error = Thread::Current()->StealStickyError();
543 ASSERT(error->IsUnwindError());
544 return error;
545 }
546 }
547 return Error::null();
548}
549
550uword Thread::GetAndClearStackOverflowFlags() {
551 uword stack_overflow_flags = stack_overflow_flags_;
552 stack_overflow_flags_ = 0;
553 return stack_overflow_flags;
554}
555
556void Thread::StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy) {
557 StoreBufferRelease(policy);
558 StoreBufferAcquire();
559}
560
561void Thread::StoreBufferAddObject(ObjectPtr obj) {
562 ASSERT(this == Thread::Current());
563 store_buffer_block_->Push(obj);
564 if (store_buffer_block_->IsFull()) {
565 StoreBufferBlockProcess(StoreBuffer::kCheckThreshold);
566 }
567}
568
569void Thread::StoreBufferAddObjectGC(ObjectPtr obj) {
570 store_buffer_block_->Push(obj);
571 if (store_buffer_block_->IsFull()) {
572 StoreBufferBlockProcess(StoreBuffer::kIgnoreThreshold);
573 }
574}
575
576void Thread::StoreBufferRelease(StoreBuffer::ThresholdPolicy policy) {
577 StoreBufferBlock* block = store_buffer_block_;
578 store_buffer_block_ = NULL;
579 isolate_group()->store_buffer()->PushBlock(block, policy);
580}
581
582void Thread::StoreBufferAcquire() {
583 store_buffer_block_ = isolate_group()->store_buffer()->PopNonFullBlock();
584}
585
586void Thread::MarkingStackBlockProcess() {
587 MarkingStackRelease();
588 MarkingStackAcquire();
589}
590
591void Thread::DeferredMarkingStackBlockProcess() {
592 DeferredMarkingStackRelease();
593 DeferredMarkingStackAcquire();
594}
595
596void Thread::MarkingStackAddObject(ObjectPtr obj) {
597 marking_stack_block_->Push(obj);
598 if (marking_stack_block_->IsFull()) {
599 MarkingStackBlockProcess();
600 }
601}
602
603void Thread::DeferredMarkingStackAddObject(ObjectPtr obj) {
604 deferred_marking_stack_block_->Push(obj);
605 if (deferred_marking_stack_block_->IsFull()) {
606 DeferredMarkingStackBlockProcess();
607 }
608}
609
610void Thread::MarkingStackRelease() {
611 MarkingStackBlock* block = marking_stack_block_;
612 marking_stack_block_ = NULL;
613 write_barrier_mask_ = ObjectLayout::kGenerationalBarrierMask;
614 isolate_group()->marking_stack()->PushBlock(block);
615}
616
617void Thread::MarkingStackAcquire() {
618 marking_stack_block_ = isolate_group()->marking_stack()->PopEmptyBlock();
619 write_barrier_mask_ = ObjectLayout::kGenerationalBarrierMask |
620 ObjectLayout::kIncrementalBarrierMask;
621}
622
623void Thread::DeferredMarkingStackRelease() {
624 MarkingStackBlock* block = deferred_marking_stack_block_;
625 deferred_marking_stack_block_ = NULL;
626 isolate_group()->deferred_marking_stack()->PushBlock(block);
627}
628
629void Thread::DeferredMarkingStackAcquire() {
630 deferred_marking_stack_block_ =
631 isolate_group()->deferred_marking_stack()->PopEmptyBlock();
632}
633
634bool Thread::CanCollectGarbage() const {
635 // We grow the heap instead of triggering a garbage collection when a
636 // thread is at a safepoint in the following situations :
637 // - background compiler thread finalizing and installing code
638 // - disassembly of the generated code is done after compilation
639 // So essentially we state that garbage collection is possible only
640 // when we are not at a safepoint.
641 return !IsAtSafepoint();
642}
643
644bool Thread::IsExecutingDartCode() const {
645 return (top_exit_frame_info() == 0) && VMTag::IsDartTag(vm_tag());
646}
647
648bool Thread::HasExitedDartCode() const {
649 return (top_exit_frame_info() != 0) && !VMTag::IsDartTag(vm_tag());
650}
651
652template <class C>
653C* Thread::AllocateReusableHandle() {
654 C* handle = reinterpret_cast<C*>(reusable_handles_.AllocateScopedHandle());
655 C::initializeHandle(handle, C::null());
656 return handle;
657}
658
659void Thread::ClearReusableHandles() {
660#define CLEAR_REUSABLE_HANDLE(object) *object##_handle_ = object::null();
661 REUSABLE_HANDLE_LIST(CLEAR_REUSABLE_HANDLE)
662#undef CLEAR_REUSABLE_HANDLE
663}
664
665void Thread::VisitObjectPointers(ObjectPointerVisitor* visitor,
666 ValidationPolicy validation_policy) {
667 ASSERT(visitor != NULL);
668
669 if (zone() != NULL) {
670 zone()->VisitObjectPointers(visitor);
671 }
672
673 // Visit objects in thread specific handles area.
674 reusable_handles_.VisitObjectPointers(visitor);
675
676 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&pending_functions_));
677 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&global_object_pool_));
678 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&active_exception_));
679 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&active_stacktrace_));
680 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&sticky_error_));
681 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&async_stack_trace_));
682 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&ffi_callback_code_));
683
684#if !defined(DART_PRECOMPILED_RUNTIME)
685 if (interpreter() != NULL) {
686 interpreter()->VisitObjectPointers(visitor);
687 }
688#endif
689
690 // Visit the api local scope as it has all the api local handles.
691 ApiLocalScope* scope = api_top_scope_;
692 while (scope != NULL) {
693 scope->local_handles()->VisitObjectPointers(visitor);
694 scope = scope->previous();
695 }
696
697 // Only the mutator thread can run Dart code.
698 if (IsMutatorThread()) {
699 // The MarkTask, which calls this method, can run on a different thread. We
700 // therefore assume the mutator is at a safepoint and we can iterate its
701 // stack.
702 // TODO(vm-team): It would be beneficial to be able to ask the mutator
703 // thread whether it is in fact blocked at the moment (at a "safepoint") so
704 // we can safely iterate its stack.
705 //
706 // Unfortunately we cannot use `this->IsAtSafepoint()` here because that
707 // will return `false` even though the mutator thread is waiting for mark
708 // tasks (which iterate its stack) to finish.
709 const StackFrameIterator::CrossThreadPolicy cross_thread_policy =
710 StackFrameIterator::kAllowCrossThreadIteration;
711
712 // Iterate over all the stack frames and visit objects on the stack.
713 StackFrameIterator frames_iterator(top_exit_frame_info(), validation_policy,
714 this, cross_thread_policy);
715 StackFrame* frame = frames_iterator.NextFrame();
716 while (frame != NULL) {
717 frame->VisitObjectPointers(visitor);
718 frame = frames_iterator.NextFrame();
719 }
720 } else {
721 // We are not on the mutator thread.
722 RELEASE_ASSERT(top_exit_frame_info() == 0);
723 }
724}
725
726class RestoreWriteBarrierInvariantVisitor : public ObjectPointerVisitor {
727 public:
728 RestoreWriteBarrierInvariantVisitor(IsolateGroup* group,
729 Thread* thread,
730 Thread::RestoreWriteBarrierInvariantOp op)
731 : ObjectPointerVisitor(group),
732 thread_(thread),
733 current_(Thread::Current()),
734 op_(op) {}
735
736 void VisitPointers(ObjectPtr* first, ObjectPtr* last) {
737 for (; first != last + 1; first++) {
738 ObjectPtr obj = *first;
739 // Stores into new-space objects don't need a write barrier.
740 if (obj->IsSmiOrNewObject()) continue;
741
742 // To avoid adding too much work into the remembered set, skip
743 // arrays. Write barrier elimination will not remove the barrier
744 // if we can trigger GC between array allocation and store.
745 if (obj->GetClassId() == kArrayCid) continue;
746
747 // Dart code won't store into VM-internal objects except Contexts and
748 // UnhandledExceptions. This assumption is checked by an assertion in
749 // WriteBarrierElimination::UpdateVectorForBlock.
750 if (!obj->IsDartInstance() && !obj->IsContext() &&
751 !obj->IsUnhandledException())
752 continue;
753
754 // Dart code won't store into canonical instances.
755 if (obj->ptr()->IsCanonical()) continue;
756
757 // Objects in the VM isolate heap are immutable and won't be
758 // stored into. Check this condition last because there's no bit
759 // in the header for it.
760 if (obj->ptr()->InVMIsolateHeap()) continue;
761
762 switch (op_) {
763 case Thread::RestoreWriteBarrierInvariantOp::kAddToRememberedSet:
764 if (!obj->ptr()->IsRemembered()) {
765 obj->ptr()->AddToRememberedSet(current_);
766 }
767 if (current_->is_marking()) {
768 current_->DeferredMarkingStackAddObject(obj);
769 }
770 break;
771 case Thread::RestoreWriteBarrierInvariantOp::kAddToDeferredMarkingStack:
772 // Re-scan obj when finalizing marking.
773 current_->DeferredMarkingStackAddObject(obj);
774 break;
775 }
776 }
777 }
778
779 private:
780 Thread* const thread_;
781 Thread* const current_;
782 Thread::RestoreWriteBarrierInvariantOp op_;
783};
784
785// Write barrier elimination assumes that all live temporaries will be
786// in the remembered set after a scavenge triggered by a non-Dart-call
787// instruction (see Instruction::CanCallDart()), and additionally they will be
788// in the deferred marking stack if concurrent marking started. Specifically,
789// this includes any instruction which will always create an exit frame
790// below the current frame before any other Dart frames.
791//
792// Therefore, to support this assumption, we scan the stack after a scavenge
793// or when concurrent marking begins and add all live temporaries in
794// Dart frames preceeding an exit frame to the store buffer or deferred
795// marking stack.
796void Thread::RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op) {
797 ASSERT(IsAtSafepoint());
798 ASSERT(IsMutatorThread());
799
800 const StackFrameIterator::CrossThreadPolicy cross_thread_policy =
801 StackFrameIterator::kAllowCrossThreadIteration;
802 StackFrameIterator frames_iterator(top_exit_frame_info(),
803 ValidationPolicy::kDontValidateFrames,
804 this, cross_thread_policy);
805 RestoreWriteBarrierInvariantVisitor visitor(isolate_group(), this, op);
806 bool scan_next_dart_frame = false;
807 for (StackFrame* frame = frames_iterator.NextFrame(); frame != NULL;
808 frame = frames_iterator.NextFrame()) {
809 if (frame->IsExitFrame()) {
810 scan_next_dart_frame = true;
811 } else if (frame->IsDartFrame(/*validate=*/false)) {
812 if (scan_next_dart_frame) {
813 frame->VisitObjectPointers(&visitor);
814 }
815 scan_next_dart_frame = false;
816 }
817 }
818}
819
820void Thread::DeferredMarkLiveTemporaries() {
821 RestoreWriteBarrierInvariant(
822 RestoreWriteBarrierInvariantOp::kAddToDeferredMarkingStack);
823}
824
825void Thread::RememberLiveTemporaries() {
826 RestoreWriteBarrierInvariant(
827 RestoreWriteBarrierInvariantOp::kAddToRememberedSet);
828}
829
830bool Thread::CanLoadFromThread(const Object& object) {
831 // In order to allow us to use assembler helper routines with non-[Code]
832 // objects *before* stubs are initialized, we only loop ver the stubs if the
833 // [object] is in fact a [Code] object.
834 if (object.IsCode()) {
835#define CHECK_OBJECT(type_name, member_name, expr, default_init_value) \
836 if (object.raw() == expr) { \
837 return true; \
838 }
839 CACHED_VM_STUBS_LIST(CHECK_OBJECT)
840#undef CHECK_OBJECT
841 }
842
843 // For non [Code] objects we check if the object equals to any of the cached
844 // non-stub entries.
845#define CHECK_OBJECT(type_name, member_name, expr, default_init_value) \
846 if (object.raw() == expr) { \
847 return true; \
848 }
849 CACHED_NON_VM_STUB_LIST(CHECK_OBJECT)
850#undef CHECK_OBJECT
851 return false;
852}
853
854intptr_t Thread::OffsetFromThread(const Object& object) {
855 // In order to allow us to use assembler helper routines with non-[Code]
856 // objects *before* stubs are initialized, we only loop ver the stubs if the
857 // [object] is in fact a [Code] object.
858 if (object.IsCode()) {
859#define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \
860 ASSERT((expr)->ptr()->InVMIsolateHeap()); \
861 if (object.raw() == expr) { \
862 return Thread::member_name##offset(); \
863 }
864 CACHED_VM_STUBS_LIST(COMPUTE_OFFSET)
865#undef COMPUTE_OFFSET
866 }
867
868 // For non [Code] objects we check if the object equals to any of the cached
869 // non-stub entries.
870#define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \
871 if (object.raw() == expr) { \
872 return Thread::member_name##offset(); \
873 }
874 CACHED_NON_VM_STUB_LIST(COMPUTE_OFFSET)
875#undef COMPUTE_OFFSET
876
877 UNREACHABLE();
878 return -1;
879}
880
881bool Thread::ObjectAtOffset(intptr_t offset, Object* object) {
882 if (Isolate::Current() == Dart::vm_isolate()) {
883 // --disassemble-stubs runs before all the references through
884 // thread have targets
885 return false;
886 }
887
888#define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \
889 if (Thread::member_name##offset() == offset) { \
890 *object = expr; \
891 return true; \
892 }
893 CACHED_VM_OBJECTS_LIST(COMPUTE_OFFSET)
894#undef COMPUTE_OFFSET
895 return false;
896}
897
898intptr_t Thread::OffsetFromThread(const RuntimeEntry* runtime_entry) {
899#define COMPUTE_OFFSET(name) \
900 if (runtime_entry->function() == k##name##RuntimeEntry.function()) { \
901 return Thread::name##_entry_point_offset(); \
902 }
903 RUNTIME_ENTRY_LIST(COMPUTE_OFFSET)
904#undef COMPUTE_OFFSET
905
906#define COMPUTE_OFFSET(returntype, name, ...) \
907 if (runtime_entry->function() == k##name##RuntimeEntry.function()) { \
908 return Thread::name##_entry_point_offset(); \
909 }
910 LEAF_RUNTIME_ENTRY_LIST(COMPUTE_OFFSET)
911#undef COMPUTE_OFFSET
912
913 UNREACHABLE();
914 return -1;
915}
916
917#if defined(DEBUG)
918bool Thread::TopErrorHandlerIsSetJump() const {
919 if (long_jump_base() == nullptr) return false;
920 if (top_exit_frame_info_ == 0) return true;
921#if defined(USING_SIMULATOR) || defined(USING_SAFE_STACK)
922 // False positives: simulator stack and native stack are unordered.
923 return true;
924#else
925#if !defined(DART_PRECOMPILED_RUNTIME)
926 // False positives: interpreter stack and native stack are unordered.
927 if ((interpreter_ != nullptr) && interpreter_->HasFrame(top_exit_frame_info_))
928 return true;
929#endif
930 return reinterpret_cast<uword>(long_jump_base()) < top_exit_frame_info_;
931#endif
932}
933
934bool Thread::TopErrorHandlerIsExitFrame() const {
935 if (top_exit_frame_info_ == 0) return false;
936 if (long_jump_base() == nullptr) return true;
937#if defined(USING_SIMULATOR) || defined(USING_SAFE_STACK)
938 // False positives: simulator stack and native stack are unordered.
939 return true;
940#else
941#if !defined(DART_PRECOMPILED_RUNTIME)
942 // False positives: interpreter stack and native stack are unordered.
943 if ((interpreter_ != nullptr) && interpreter_->HasFrame(top_exit_frame_info_))
944 return true;
945#endif
946 return top_exit_frame_info_ < reinterpret_cast<uword>(long_jump_base());
947#endif
948}
949#endif // defined(DEBUG)
950
951bool Thread::IsValidHandle(Dart_Handle object) const {
952 return IsValidLocalHandle(object) || IsValidZoneHandle(object) ||
953 IsValidScopedHandle(object);
954}
955
956bool Thread::IsValidLocalHandle(Dart_Handle object) const {
957 ApiLocalScope* scope = api_top_scope_;
958 while (scope != NULL) {
959 if (scope->local_handles()->IsValidHandle(object)) {
960 return true;
961 }
962 scope = scope->previous();
963 }
964 return false;
965}
966
967intptr_t Thread::CountLocalHandles() const {
968 intptr_t total = 0;
969 ApiLocalScope* scope = api_top_scope_;
970 while (scope != NULL) {
971 total += scope->local_handles()->CountHandles();
972 scope = scope->previous();
973 }
974 return total;
975}
976
977int Thread::ZoneSizeInBytes() const {
978 int total = 0;
979 ApiLocalScope* scope = api_top_scope_;
980 while (scope != NULL) {
981 total += scope->zone()->SizeInBytes();
982 scope = scope->previous();
983 }
984 return total;
985}
986
987void Thread::EnterApiScope() {
988 ASSERT(MayAllocateHandles());
989 ApiLocalScope* new_scope = api_reusable_scope();
990 if (new_scope == NULL) {
991 new_scope = new ApiLocalScope(api_top_scope(), top_exit_frame_info());
992 ASSERT(new_scope != NULL);
993 } else {
994 new_scope->Reinit(this, api_top_scope(), top_exit_frame_info());
995 set_api_reusable_scope(NULL);
996 }
997 set_api_top_scope(new_scope); // New scope is now the top scope.
998}
999
1000void Thread::ExitApiScope() {
1001 ASSERT(MayAllocateHandles());
1002 ApiLocalScope* scope = api_top_scope();
1003 ApiLocalScope* reusable_scope = api_reusable_scope();
1004 set_api_top_scope(scope->previous()); // Reset top scope to previous.
1005 if (reusable_scope == NULL) {
1006 scope->Reset(this); // Reset the old scope which we just exited.
1007 set_api_reusable_scope(scope);
1008 } else {
1009 ASSERT(reusable_scope != scope);
1010 delete scope;
1011 }
1012}
1013
1014void Thread::UnwindScopes(uword stack_marker) {
1015 // Unwind all scopes using the same stack_marker, i.e. all scopes allocated
1016 // under the same top_exit_frame_info.
1017 ApiLocalScope* scope = api_top_scope_;
1018 while (scope != NULL && scope->stack_marker() != 0 &&
1019 scope->stack_marker() == stack_marker) {
1020 api_top_scope_ = scope->previous();
1021 delete scope;
1022 scope = api_top_scope_;
1023 }
1024}
1025
1026void Thread::EnterSafepointUsingLock() {
1027 isolate_group()->safepoint_handler()->EnterSafepointUsingLock(this);
1028}
1029
1030void Thread::ExitSafepointUsingLock() {
1031 isolate_group()->safepoint_handler()->ExitSafepointUsingLock(this);
1032}
1033
1034void Thread::BlockForSafepoint() {
1035 isolate_group()->safepoint_handler()->BlockForSafepoint(this);
1036}
1037
1038void Thread::FinishEntering(TaskKind kind) {
1039 ASSERT(store_buffer_block_ == nullptr);
1040
1041 task_kind_ = kind;
1042 if (isolate_group()->marking_stack() != NULL) {
1043 // Concurrent mark in progress. Enable barrier for this thread.
1044 MarkingStackAcquire();
1045 DeferredMarkingStackAcquire();
1046 }
1047
1048 // TODO(koda): Use StoreBufferAcquire once we properly flush
1049 // before Scavenge.
1050 if (kind == kMutatorTask) {
1051 StoreBufferAcquire();
1052 } else {
1053 store_buffer_block_ = isolate_group()->store_buffer()->PopEmptyBlock();
1054 }
1055}
1056
1057void Thread::PrepareLeaving() {
1058 ASSERT(store_buffer_block_ != nullptr);
1059 ASSERT(execution_state() == Thread::kThreadInVM);
1060
1061 task_kind_ = kUnknownTask;
1062 if (is_marking()) {
1063 MarkingStackRelease();
1064 DeferredMarkingStackRelease();
1065 }
1066 StoreBufferRelease();
1067}
1068
1069DisableThreadInterruptsScope::DisableThreadInterruptsScope(Thread* thread)
1070 : StackResource(thread) {
1071 if (thread != NULL) {
1072 OSThread* os_thread = thread->os_thread();
1073 ASSERT(os_thread != NULL);
1074 os_thread->DisableThreadInterrupts();
1075 }
1076}
1077
1078DisableThreadInterruptsScope::~DisableThreadInterruptsScope() {
1079 if (thread() != NULL) {
1080 OSThread* os_thread = thread()->os_thread();
1081 ASSERT(os_thread != NULL);
1082 os_thread->EnableThreadInterrupts();
1083 }
1084}
1085
1086const intptr_t kInitialCallbackIdsReserved = 1024;
1087int32_t Thread::AllocateFfiCallbackId() {
1088 Zone* Z = isolate()->current_zone();
1089 if (ffi_callback_code_ == GrowableObjectArray::null()) {
1090 ffi_callback_code_ = GrowableObjectArray::New(kInitialCallbackIdsReserved);
1091 }
1092 const auto& array = GrowableObjectArray::Handle(Z, ffi_callback_code_);
1093 array.Add(Code::Handle(Z, Code::null()));
1094 const int32_t id = array.Length() - 1;
1095
1096 // Allocate a native callback trampoline if necessary.
1097#if !defined(DART_PRECOMPILED_RUNTIME)
1098 if (NativeCallbackTrampolines::Enabled()) {
1099 auto* const tramps = isolate()->native_callback_trampolines();
1100 ASSERT(tramps->next_callback_id() == id);
1101 tramps->AllocateTrampoline();
1102 }
1103#endif
1104
1105 return id;
1106}
1107
1108void Thread::SetFfiCallbackCode(int32_t callback_id, const Code& code) {
1109 Zone* Z = isolate()->current_zone();
1110
1111 /// In AOT the callback ID might have been allocated during compilation but
1112 /// 'ffi_callback_code_' is initialized to empty again when the program
1113 /// starts. Therefore we may need to initialize or expand it to accomodate
1114 /// the callback ID.
1115
1116 if (ffi_callback_code_ == GrowableObjectArray::null()) {
1117 ffi_callback_code_ = GrowableObjectArray::New(kInitialCallbackIdsReserved);
1118 }
1119
1120 const auto& array = GrowableObjectArray::Handle(Z, ffi_callback_code_);
1121
1122 if (callback_id >= array.Length()) {
1123 if (callback_id >= array.Capacity()) {
1124 array.Grow(callback_id + 1);
1125 }
1126 array.SetLength(callback_id + 1);
1127 }
1128
1129 array.SetAt(callback_id, code);
1130}
1131
1132void Thread::VerifyCallbackIsolate(int32_t callback_id, uword entry) {
1133 NoSafepointScope _;
1134
1135 const GrowableObjectArrayPtr array = ffi_callback_code_;
1136 if (array == GrowableObjectArray::null()) {
1137 FATAL("Cannot invoke callback on incorrect isolate.");
1138 }
1139
1140 const SmiPtr length_smi = GrowableObjectArray::NoSafepointLength(array);
1141 const intptr_t length = Smi::Value(length_smi);
1142
1143 if (callback_id < 0 || callback_id >= length) {
1144 FATAL("Cannot invoke callback on incorrect isolate.");
1145 }
1146
1147 if (entry != 0) {
1148 ObjectPtr* const code_array =
1149 Array::DataOf(GrowableObjectArray::NoSafepointData(array));
1150 // RawCast allocates handles in ASSERTs.
1151 const CodePtr code = static_cast<CodePtr>(code_array[callback_id]);
1152 if (!Code::ContainsInstructionAt(code, entry)) {
1153 FATAL("Cannot invoke callback on incorrect isolate.");
1154 }
1155 }
1156}
1157
1158} // namespace dart
1159