1 | // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #ifndef RUNTIME_VM_ISOLATE_H_ |
6 | #define RUNTIME_VM_ISOLATE_H_ |
7 | |
8 | #if defined(SHOULD_NOT_INCLUDE_RUNTIME) |
9 | #error "Should not include runtime" |
10 | #endif |
11 | |
12 | #include <functional> |
13 | #include <memory> |
14 | #include <utility> |
15 | |
16 | #include "include/dart_api.h" |
17 | #include "platform/assert.h" |
18 | #include "platform/atomic.h" |
19 | #include "vm/base_isolate.h" |
20 | #include "vm/class_table.h" |
21 | #include "vm/constants_kbc.h" |
22 | #include "vm/dispatch_table.h" |
23 | #include "vm/exceptions.h" |
24 | #include "vm/field_table.h" |
25 | #include "vm/fixed_cache.h" |
26 | #include "vm/growable_array.h" |
27 | #include "vm/handles.h" |
28 | #include "vm/heap/verifier.h" |
29 | #include "vm/intrusive_dlist.h" |
30 | #include "vm/megamorphic_cache_table.h" |
31 | #include "vm/metrics.h" |
32 | #include "vm/os_thread.h" |
33 | #include "vm/random.h" |
34 | #include "vm/tags.h" |
35 | #include "vm/thread.h" |
36 | #include "vm/thread_pool.h" |
37 | #include "vm/thread_stack_resource.h" |
38 | #include "vm/token_position.h" |
39 | #include "vm/virtual_memory.h" |
40 | |
41 | #if !defined(DART_PRECOMPILED_RUNTIME) |
42 | #include "vm/ffi_callback_trampolines.h" |
43 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
44 | |
45 | namespace dart { |
46 | |
47 | // Forward declarations. |
48 | class ApiState; |
49 | class BackgroundCompiler; |
50 | class Capability; |
51 | class CodeIndexTable; |
52 | class Debugger; |
53 | class DeoptContext; |
54 | class ExternalTypedData; |
55 | class HandleScope; |
56 | class HandleVisitor; |
57 | class Heap; |
58 | class ICData; |
59 | #if !defined(DART_PRECOMPILED_RUNTIME) |
60 | class Interpreter; |
61 | #endif |
62 | class IsolateObjectStore; |
63 | class IsolateProfilerData; |
64 | class IsolateReloadContext; |
65 | class IsolateSpawnState; |
66 | class Log; |
67 | class Message; |
68 | class MessageHandler; |
69 | class MonitorLocker; |
70 | class Mutex; |
71 | class Object; |
72 | class ObjectIdRing; |
73 | class ObjectPointerVisitor; |
74 | class ObjectStore; |
75 | class PersistentHandle; |
76 | class RwLock; |
77 | class SafepointRwLock; |
78 | class SafepointHandler; |
79 | class SampleBuffer; |
80 | class SendPort; |
81 | class SerializedObjectBuffer; |
82 | class ServiceIdZone; |
83 | class Simulator; |
84 | class StackResource; |
85 | class StackZone; |
86 | class StoreBuffer; |
87 | class StubCode; |
88 | class ThreadRegistry; |
89 | class UserTag; |
90 | class WeakTable; |
91 | |
92 | /* |
93 | * Possible values of null safety flag |
94 | 0 - not specified |
95 | 1 - weak mode |
96 | 2 - strong mode) |
97 | */ |
98 | constexpr int kNullSafetyOptionUnspecified = 0; |
99 | constexpr int kNullSafetyOptionWeak = 1; |
100 | constexpr int kNullSafetyOptionStrong = 2; |
101 | extern int FLAG_sound_null_safety; |
102 | |
103 | class PendingLazyDeopt { |
104 | public: |
105 | PendingLazyDeopt(uword fp, uword pc) : fp_(fp), pc_(pc) {} |
106 | uword fp() { return fp_; } |
107 | uword pc() { return pc_; } |
108 | void set_pc(uword pc) { pc_ = pc; } |
109 | |
110 | private: |
111 | uword fp_; |
112 | uword pc_; |
113 | }; |
114 | |
115 | class IsolateVisitor { |
116 | public: |
117 | IsolateVisitor() {} |
118 | virtual ~IsolateVisitor() {} |
119 | |
120 | virtual void VisitIsolate(Isolate* isolate) = 0; |
121 | |
122 | protected: |
123 | // Returns true if |isolate| is the VM or service isolate. |
124 | bool IsVMInternalIsolate(Isolate* isolate) const; |
125 | |
126 | private: |
127 | DISALLOW_COPY_AND_ASSIGN(IsolateVisitor); |
128 | }; |
129 | |
130 | class Callable : public ValueObject { |
131 | public: |
132 | Callable() {} |
133 | virtual ~Callable() {} |
134 | |
135 | virtual void Call() = 0; |
136 | |
137 | private: |
138 | DISALLOW_COPY_AND_ASSIGN(Callable); |
139 | }; |
140 | |
141 | template <typename T> |
142 | class LambdaCallable : public Callable { |
143 | public: |
144 | explicit LambdaCallable(T& lambda) : lambda_(lambda) {} |
145 | void Call() { lambda_(); } |
146 | |
147 | private: |
148 | T& lambda_; |
149 | DISALLOW_COPY_AND_ASSIGN(LambdaCallable); |
150 | }; |
151 | |
152 | // Disallow OOB message handling within this scope. |
153 | class NoOOBMessageScope : public ThreadStackResource { |
154 | public: |
155 | explicit NoOOBMessageScope(Thread* thread); |
156 | ~NoOOBMessageScope(); |
157 | |
158 | private: |
159 | DISALLOW_COPY_AND_ASSIGN(NoOOBMessageScope); |
160 | }; |
161 | |
162 | // Disallow isolate reload. |
163 | class NoReloadScope : public ThreadStackResource { |
164 | public: |
165 | NoReloadScope(Isolate* isolate, Thread* thread); |
166 | ~NoReloadScope(); |
167 | |
168 | private: |
169 | Isolate* isolate_; |
170 | DISALLOW_COPY_AND_ASSIGN(NoReloadScope); |
171 | }; |
172 | |
173 | // Fixed cache for exception handler lookup. |
174 | typedef FixedCache<intptr_t, ExceptionHandlerInfo, 16> HandlerInfoCache; |
175 | // Fixed cache for catch entry state lookup. |
176 | typedef FixedCache<intptr_t, CatchEntryMovesRefPtr, 16> CatchEntryMovesCache; |
177 | |
178 | // List of Isolate flags with corresponding members of Dart_IsolateFlags and |
179 | // corresponding global command line flags. |
180 | // |
181 | // V(when, name, bit-name, Dart_IsolateFlags-name, command-line-flag-name) |
182 | // |
183 | #define ISOLATE_FLAG_LIST(V) \ |
184 | V(NONPRODUCT, asserts, EnableAsserts, enable_asserts, FLAG_enable_asserts) \ |
185 | V(NONPRODUCT, use_field_guards, UseFieldGuards, use_field_guards, \ |
186 | FLAG_use_field_guards) \ |
187 | V(NONPRODUCT, use_osr, UseOsr, use_osr, FLAG_use_osr) \ |
188 | V(PRECOMPILER, obfuscate, Obfuscate, obfuscate, false_by_default) |
189 | |
190 | // Represents the information used for spawning the first isolate within an |
191 | // isolate group. |
192 | // |
193 | // Any subsequent isolates created via `Isolate.spawn()` will be created using |
194 | // the same [IsolateGroupSource] (the object itself is shared among all isolates |
195 | // within the same group). |
196 | // |
197 | // Issue(http://dartbug.com/36097): It is still possible to run into issues if |
198 | // an isolate has spawned another one and then loads more code into the first |
199 | // one, which the latter will not get. Though it makes the status quo better |
200 | // than what we had before (where the embedder needed to maintain the |
201 | // same-source guarantee). |
202 | // |
203 | // => This is only the first step towards having multiple isolates share the |
204 | // same heap (and therefore the same program structure). |
205 | // |
206 | class IsolateGroupSource { |
207 | public: |
208 | IsolateGroupSource(const char* script_uri, |
209 | const char* name, |
210 | const uint8_t* snapshot_data, |
211 | const uint8_t* snapshot_instructions, |
212 | const uint8_t* kernel_buffer, |
213 | intptr_t kernel_buffer_size, |
214 | Dart_IsolateFlags flags) |
215 | : script_uri(script_uri), |
216 | name(Utils::StrDup(name)), |
217 | snapshot_data(snapshot_data), |
218 | snapshot_instructions(snapshot_instructions), |
219 | kernel_buffer(kernel_buffer), |
220 | kernel_buffer_size(kernel_buffer_size), |
221 | flags(flags), |
222 | script_kernel_buffer(nullptr), |
223 | script_kernel_size(-1), |
224 | loaded_blobs_(nullptr), |
225 | num_blob_loads_(0) {} |
226 | ~IsolateGroupSource() { free(name); } |
227 | |
228 | void add_loaded_blob(Zone* zone_, |
229 | const ExternalTypedData& external_typed_data); |
230 | |
231 | // The arguments used for spawning in |
232 | // `Dart_CreateIsolateGroupFromKernel` / `Dart_CreateIsolate`. |
233 | const char* script_uri; |
234 | char* name; |
235 | const uint8_t* snapshot_data; |
236 | const uint8_t* snapshot_instructions; |
237 | const uint8_t* kernel_buffer; |
238 | const intptr_t kernel_buffer_size; |
239 | Dart_IsolateFlags flags; |
240 | |
241 | // The kernel buffer used in `Dart_LoadScriptFromKernel`. |
242 | const uint8_t* script_kernel_buffer; |
243 | intptr_t script_kernel_size; |
244 | |
245 | // During AppJit training we perform a permutation of the class ids before |
246 | // invoking the "main" script. |
247 | // Any newly spawned isolates need to use this permutation map. |
248 | std::unique_ptr<intptr_t[]> cid_permutation_map; |
249 | |
250 | // List of weak pointers to external typed data for loaded blobs. |
251 | ArrayPtr loaded_blobs_; |
252 | intptr_t num_blob_loads_; |
253 | }; |
254 | |
255 | // Tracks idle time and notifies heap when idle time expired. |
256 | class IdleTimeHandler : public ValueObject { |
257 | public: |
258 | IdleTimeHandler() {} |
259 | |
260 | // Initializes the idle time handler with the given [heap], to which |
261 | // idle notifications will be sent. |
262 | void InitializeWithHeap(Heap* heap); |
263 | |
264 | // Returns whether the caller should check for idle timeouts. |
265 | bool ShouldCheckForIdle(); |
266 | |
267 | // Declares that the idle time should be reset to now. |
268 | void UpdateStartIdleTime(); |
269 | |
270 | // Returns whether idle time expired and [NotifyIdle] should be called. |
271 | bool ShouldNotifyIdle(int64_t* expiry); |
272 | |
273 | // Notifies the heap that now is a good time to do compactions and indicates |
274 | // we have time for the GC until [deadline]. |
275 | void NotifyIdle(int64_t deadline); |
276 | |
277 | // Calls [NotifyIdle] with the default deadline. |
278 | void NotifyIdleUsingDefaultDeadline(); |
279 | |
280 | private: |
281 | friend class DisableIdleTimerScope; |
282 | |
283 | Mutex mutex_; |
284 | Heap* heap_ = nullptr; |
285 | intptr_t disabled_counter_ = 0; |
286 | int64_t idle_start_time_ = 0; |
287 | }; |
288 | |
289 | // Disables firing of the idle timer while this object is alive. |
290 | class DisableIdleTimerScope : public ValueObject { |
291 | public: |
292 | explicit DisableIdleTimerScope(IdleTimeHandler* handler); |
293 | ~DisableIdleTimerScope(); |
294 | |
295 | private: |
296 | IdleTimeHandler* handler_; |
297 | }; |
298 | |
299 | class MutatorThreadPool : public ThreadPool { |
300 | public: |
301 | MutatorThreadPool(IsolateGroup* isolate_group, intptr_t max_pool_size) |
302 | : ThreadPool(max_pool_size), isolate_group_(isolate_group) {} |
303 | virtual ~MutatorThreadPool() {} |
304 | |
305 | protected: |
306 | virtual void OnEnterIdleLocked(MonitorLocker* ml); |
307 | |
308 | private: |
309 | void NotifyIdle(); |
310 | |
311 | IsolateGroup* isolate_group_ = nullptr; |
312 | }; |
313 | |
314 | // Represents an isolate group and is shared among all isolates within a group. |
315 | class IsolateGroup : public IntrusiveDListEntry<IsolateGroup> { |
316 | public: |
317 | IsolateGroup(std::shared_ptr<IsolateGroupSource> source, |
318 | void* embedder_data, |
319 | ObjectStore* object_store); |
320 | IsolateGroup(std::shared_ptr<IsolateGroupSource> source, void* embedder_data); |
321 | ~IsolateGroup(); |
322 | |
323 | IsolateGroupSource* source() const { return source_.get(); } |
324 | std::shared_ptr<IsolateGroupSource> shareable_source() const { |
325 | return source_; |
326 | } |
327 | void* embedder_data() const { return embedder_data_; } |
328 | |
329 | bool initial_spawn_successful() { return initial_spawn_successful_; } |
330 | void set_initial_spawn_successful() { initial_spawn_successful_ = true; } |
331 | |
332 | Heap* heap() const { return heap_.get(); } |
333 | |
334 | IdleTimeHandler* idle_time_handler() { return &idle_time_handler_; } |
335 | |
336 | // Returns true if this is the first isolate registered. |
337 | void RegisterIsolate(Isolate* isolate); |
338 | void RegisterIsolateLocked(Isolate* isolate); |
339 | void UnregisterIsolate(Isolate* isolate); |
340 | // Returns `true` if this was the last isolate and the caller is responsible |
341 | // for deleting the isolate group. |
342 | bool UnregisterIsolateDecrementCount(Isolate* isolate); |
343 | |
344 | bool ContainsOnlyOneIsolate(); |
345 | |
346 | void RunWithLockedGroup(std::function<void()> fun); |
347 | |
348 | Monitor* threads_lock() const; |
349 | ThreadRegistry* thread_registry() const { return thread_registry_.get(); } |
350 | SafepointHandler* safepoint_handler() { return safepoint_handler_.get(); } |
351 | |
352 | void CreateHeap(bool is_vm_isolate, bool is_service_or_kernel_isolate); |
353 | void Shutdown(); |
354 | |
355 | #define ISOLATE_METRIC_ACCESSOR(type, variable, name, unit) \ |
356 | type* Get##variable##Metric() { return &metric_##variable##_; } |
357 | ISOLATE_GROUP_METRIC_LIST(ISOLATE_METRIC_ACCESSOR); |
358 | #undef ISOLATE_METRIC_ACCESSOR |
359 | |
360 | #if !defined(PRODUCT) |
361 | void UpdateLastAllocationProfileAccumulatorResetTimestamp() { |
362 | last_allocationprofile_accumulator_reset_timestamp_ = |
363 | OS::GetCurrentTimeMillis(); |
364 | } |
365 | |
366 | int64_t last_allocationprofile_accumulator_reset_timestamp() const { |
367 | return last_allocationprofile_accumulator_reset_timestamp_; |
368 | } |
369 | |
370 | void UpdateLastAllocationProfileGCTimestamp() { |
371 | last_allocationprofile_gc_timestamp_ = OS::GetCurrentTimeMillis(); |
372 | } |
373 | |
374 | int64_t last_allocationprofile_gc_timestamp() const { |
375 | return last_allocationprofile_gc_timestamp_; |
376 | } |
377 | #endif // !defined(PRODUCT) |
378 | |
379 | DispatchTable* dispatch_table() const { return dispatch_table_.get(); } |
380 | void set_dispatch_table(DispatchTable* table) { |
381 | dispatch_table_.reset(table); |
382 | } |
383 | |
384 | SharedClassTable* shared_class_table() const { |
385 | return shared_class_table_.get(); |
386 | } |
387 | StoreBuffer* store_buffer() const { return store_buffer_.get(); } |
388 | ClassTable* class_table() const { return class_table_.get(); } |
389 | ObjectStore* object_store() const { return object_store_.get(); } |
390 | SafepointRwLock* symbols_lock() { return symbols_lock_.get(); } |
391 | Mutex* type_canonicalization_mutex() { return &type_canonicalization_mutex_; } |
392 | Mutex* type_arguments_canonicalization_mutex() { |
393 | return &type_arguments_canonicalization_mutex_; |
394 | } |
395 | Mutex* subtype_test_cache_mutex() { return &subtype_test_cache_mutex_; } |
396 | |
397 | #if defined(DART_PRECOMPILED_RUNTIME) |
398 | Mutex* unlinked_call_map_mutex() { return &unlinked_call_map_mutex_; } |
399 | #endif |
400 | |
401 | #if !defined(DART_PRECOMPILED_RUNTIME) |
402 | Mutex* initializer_functions_mutex() { return &initializer_functions_mutex_; } |
403 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
404 | |
405 | static inline IsolateGroup* Current() { |
406 | Thread* thread = Thread::Current(); |
407 | return thread == nullptr ? nullptr : thread->isolate_group(); |
408 | } |
409 | |
410 | Thread* ScheduleThreadLocked(MonitorLocker* ml, |
411 | Thread* existing_mutator_thread, |
412 | bool is_vm_isolate, |
413 | bool is_mutator, |
414 | bool bypass_safepoint = false); |
415 | void UnscheduleThreadLocked(MonitorLocker* ml, |
416 | Thread* thread, |
417 | bool is_mutator, |
418 | bool bypass_safepoint = false); |
419 | |
420 | Thread* ScheduleThread(bool bypass_safepoint = false); |
421 | void UnscheduleThread(Thread* thread, |
422 | bool is_mutator, |
423 | bool bypass_safepoint = false); |
424 | |
425 | void IncreaseMutatorCount(Isolate* mutator); |
426 | void DecreaseMutatorCount(Isolate* mutator); |
427 | |
428 | Dart_LibraryTagHandler library_tag_handler() const { |
429 | return library_tag_handler_; |
430 | } |
431 | void set_library_tag_handler(Dart_LibraryTagHandler handler) { |
432 | library_tag_handler_ = handler; |
433 | } |
434 | Dart_DeferredLoadHandler deferred_load_handler() const { |
435 | return deferred_load_handler_; |
436 | } |
437 | void set_deferred_load_handler(Dart_DeferredLoadHandler handler) { |
438 | deferred_load_handler_ = handler; |
439 | } |
440 | |
441 | intptr_t GetClassSizeForHeapWalkAt(intptr_t cid); |
442 | |
443 | // Prepares all threads in an isolate for Garbage Collection. |
444 | void ReleaseStoreBuffers(); |
445 | void EnableIncrementalBarrier(MarkingStack* marking_stack, |
446 | MarkingStack* deferred_marking_stack); |
447 | void DisableIncrementalBarrier(); |
448 | |
449 | MarkingStack* marking_stack() const { return marking_stack_; } |
450 | MarkingStack* deferred_marking_stack() const { |
451 | return deferred_marking_stack_; |
452 | } |
453 | |
454 | // Runs the given [function] on every isolate in the isolate group. |
455 | // |
456 | // During the duration of this function, no new isolates can be added or |
457 | // removed. |
458 | // |
459 | // If [at_safepoint] is `true`, then the entire isolate group must be in a |
460 | // safepoint. There is therefore no reason to guard against other threads |
461 | // adding/removing isolates, so no locks will be held. |
462 | void ForEachIsolate(std::function<void(Isolate* isolate)> function, |
463 | bool at_safepoint = false); |
464 | Isolate* FirstIsolate() const; |
465 | Isolate* FirstIsolateLocked() const; |
466 | |
467 | // Ensures mutators are stopped during execution of the provided function. |
468 | // |
469 | // If the current thread is the only mutator in the isolate group, |
470 | // [single_current_mutator] will be called. Otherwise [otherwise] will be |
471 | // called inside a [SafepointOperationsScope] (or |
472 | // [ForceGrowthSafepointOperationScope] if [use_force_growth_in_otherwise] |
473 | // is set). |
474 | // |
475 | // During the duration of this function, no new isolates can be added to the |
476 | // isolate group. |
477 | void RunWithStoppedMutatorsCallable( |
478 | Callable* single_current_mutator, |
479 | Callable* otherwise, |
480 | bool use_force_growth_in_otherwise = false); |
481 | |
482 | template <typename T, typename S> |
483 | void RunWithStoppedMutators(T single_current_mutator, |
484 | S otherwise, |
485 | bool use_force_growth_in_otherwise = false) { |
486 | LambdaCallable<T> single_callable(single_current_mutator); |
487 | LambdaCallable<S> otherwise_callable(otherwise); |
488 | RunWithStoppedMutatorsCallable(&single_callable, &otherwise_callable, |
489 | use_force_growth_in_otherwise); |
490 | } |
491 | |
492 | template <typename T> |
493 | void RunWithStoppedMutators(T function, bool use_force_growth = false) { |
494 | LambdaCallable<T> callable(function); |
495 | RunWithStoppedMutatorsCallable(&callable, &callable, use_force_growth); |
496 | } |
497 | |
498 | #ifndef PRODUCT |
499 | void PrintJSON(JSONStream* stream, bool ref = true); |
500 | void PrintToJSONObject(JSONObject* jsobj, bool ref); |
501 | |
502 | // Creates an object with the total heap memory usage statistics for this |
503 | // isolate group. |
504 | void PrintMemoryUsageJSON(JSONStream* stream); |
505 | #endif |
506 | |
507 | #if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME) |
508 | // By default the reload context is deleted. This parameter allows |
509 | // the caller to delete is separately if it is still needed. |
510 | bool ReloadSources(JSONStream* js, |
511 | bool force_reload, |
512 | const char* root_script_url = nullptr, |
513 | const char* packages_url = nullptr, |
514 | bool dont_delete_reload_context = false); |
515 | |
516 | // If provided, the VM takes ownership of kernel_buffer. |
517 | bool ReloadKernel(JSONStream* js, |
518 | bool force_reload, |
519 | const uint8_t* kernel_buffer = nullptr, |
520 | intptr_t kernel_buffer_size = 0, |
521 | bool dont_delete_reload_context = false); |
522 | |
523 | void set_last_reload_timestamp(int64_t value) { |
524 | last_reload_timestamp_ = value; |
525 | } |
526 | int64_t last_reload_timestamp() const { return last_reload_timestamp_; } |
527 | |
528 | IsolateGroupReloadContext* reload_context() { |
529 | return group_reload_context_.get(); |
530 | } |
531 | |
532 | void DeleteReloadContext(); |
533 | |
534 | bool IsReloading() const { return group_reload_context_ != nullptr; } |
535 | #endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME) |
536 | |
537 | uint64_t id() { return id_; } |
538 | |
539 | static void Init(); |
540 | static void Cleanup(); |
541 | |
542 | static void ForEach(std::function<void(IsolateGroup*)> action); |
543 | static void RunWithIsolateGroup(uint64_t id, |
544 | std::function<void(IsolateGroup*)> action, |
545 | std::function<void()> not_found); |
546 | |
547 | // Manage list of existing isolate groups. |
548 | static void RegisterIsolateGroup(IsolateGroup* isolate_group); |
549 | static void UnregisterIsolateGroup(IsolateGroup* isolate_group); |
550 | |
551 | static bool HasApplicationIsolateGroups(); |
552 | static bool HasOnlyVMIsolateGroup(); |
553 | static bool IsVMInternalIsolateGroup(const IsolateGroup* group); |
554 | |
555 | int64_t UptimeMicros() const; |
556 | |
557 | ApiState* api_state() const { return api_state_.get(); } |
558 | |
559 | // Visit all object pointers. Caller must ensure concurrent sweeper is not |
560 | // running, and the visitor must not allocate. |
561 | void VisitObjectPointers(ObjectPointerVisitor* visitor, |
562 | ValidationPolicy validate_frames); |
563 | void VisitStackPointers(ObjectPointerVisitor* visitor, |
564 | ValidationPolicy validate_frames); |
565 | void VisitObjectIdRingPointers(ObjectPointerVisitor* visitor); |
566 | void VisitWeakPersistentHandles(HandleVisitor* visitor); |
567 | |
568 | bool compaction_in_progress() const { |
569 | return CompactionInProgressBit::decode(isolate_group_flags_); |
570 | } |
571 | void set_compaction_in_progress(bool value) { |
572 | isolate_group_flags_ = |
573 | CompactionInProgressBit::update(value, isolate_group_flags_); |
574 | } |
575 | |
576 | uword FindPendingDeoptAtSafepoint(uword fp); |
577 | |
578 | void RememberLiveTemporaries(); |
579 | void DeferredMarkLiveTemporaries(); |
580 | |
581 | ArrayPtr saved_unlinked_calls() const { return saved_unlinked_calls_; } |
582 | void set_saved_unlinked_calls(const Array& saved_unlinked_calls); |
583 | |
584 | FieldTable* saved_initial_field_table() const { |
585 | return saved_initial_field_table_.get(); |
586 | } |
587 | std::shared_ptr<FieldTable> saved_initial_field_table_shareable() { |
588 | return saved_initial_field_table_; |
589 | } |
590 | void set_saved_initial_field_table(std::shared_ptr<FieldTable> field_table) { |
591 | saved_initial_field_table_ = field_table; |
592 | } |
593 | |
594 | MutatorThreadPool* thread_pool() { return thread_pool_.get(); } |
595 | |
596 | private: |
597 | friend class Dart; // For `object_store_ = ` in Dart::Init |
598 | friend class Heap; |
599 | friend class StackFrame; // For `[isolates_].First()`. |
600 | // For `object_store_shared_ptr()`, `class_table_shared_ptr()` |
601 | friend class Isolate; |
602 | |
603 | #define ISOLATE_GROUP_FLAG_BITS(V) V(CompactionInProgress) |
604 | |
605 | // Isolate specific flags. |
606 | enum FlagBits { |
607 | #define DECLARE_BIT(Name) k##Name##Bit, |
608 | ISOLATE_GROUP_FLAG_BITS(DECLARE_BIT) |
609 | #undef DECLARE_BIT |
610 | }; |
611 | |
612 | #define DECLARE_BITFIELD(Name) \ |
613 | class Name##Bit : public BitField<uint32_t, bool, k##Name##Bit, 1> {}; |
614 | ISOLATE_GROUP_FLAG_BITS(DECLARE_BITFIELD) |
615 | #undef DECLARE_BITFIELD |
616 | |
617 | void set_heap(std::unique_ptr<Heap> value); |
618 | |
619 | const std::shared_ptr<ClassTable>& class_table_shared_ptr() const { |
620 | return class_table_; |
621 | } |
622 | const std::shared_ptr<ObjectStore>& object_store_shared_ptr() const { |
623 | return object_store_; |
624 | } |
625 | |
626 | bool is_vm_isolate_heap_ = false; |
627 | void* embedder_data_ = nullptr; |
628 | |
629 | IdleTimeHandler idle_time_handler_; |
630 | std::unique_ptr<MutatorThreadPool> thread_pool_; |
631 | std::unique_ptr<SafepointRwLock> isolates_lock_; |
632 | IntrusiveDList<Isolate> isolates_; |
633 | intptr_t isolate_count_ = 0; |
634 | bool initial_spawn_successful_ = false; |
635 | Dart_LibraryTagHandler library_tag_handler_ = nullptr; |
636 | Dart_DeferredLoadHandler deferred_load_handler_ = nullptr; |
637 | int64_t start_time_micros_; |
638 | |
639 | #if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME) |
640 | int64_t last_reload_timestamp_; |
641 | std::shared_ptr<IsolateGroupReloadContext> group_reload_context_; |
642 | #endif |
643 | |
644 | #define ISOLATE_METRIC_VARIABLE(type, variable, name, unit) \ |
645 | type metric_##variable##_; |
646 | ISOLATE_GROUP_METRIC_LIST(ISOLATE_METRIC_VARIABLE); |
647 | #undef ISOLATE_METRIC_VARIABLE |
648 | |
649 | #if !defined(PRODUCT) |
650 | // Timestamps of last operation via service. |
651 | int64_t last_allocationprofile_accumulator_reset_timestamp_ = 0; |
652 | int64_t last_allocationprofile_gc_timestamp_ = 0; |
653 | |
654 | #endif // !defined(PRODUCT) |
655 | |
656 | MarkingStack* marking_stack_ = nullptr; |
657 | MarkingStack* deferred_marking_stack_ = nullptr; |
658 | std::shared_ptr<IsolateGroupSource> source_; |
659 | std::unique_ptr<ApiState> api_state_; |
660 | std::unique_ptr<ThreadRegistry> thread_registry_; |
661 | std::unique_ptr<SafepointHandler> safepoint_handler_; |
662 | |
663 | static RwLock* isolate_groups_rwlock_; |
664 | static IntrusiveDList<IsolateGroup>* isolate_groups_; |
665 | static Random* isolate_group_random_; |
666 | |
667 | uint64_t id_ = 0; |
668 | |
669 | std::unique_ptr<SharedClassTable> shared_class_table_; |
670 | std::shared_ptr<ObjectStore> object_store_; // nullptr in JIT mode |
671 | std::shared_ptr<ClassTable> class_table_; // nullptr in JIT mode |
672 | std::unique_ptr<StoreBuffer> store_buffer_; |
673 | std::unique_ptr<Heap> heap_; |
674 | std::unique_ptr<DispatchTable> dispatch_table_; |
675 | ArrayPtr saved_unlinked_calls_; |
676 | std::shared_ptr<FieldTable> saved_initial_field_table_; |
677 | uint32_t isolate_group_flags_ = 0; |
678 | |
679 | std::unique_ptr<SafepointRwLock> symbols_lock_; |
680 | Mutex type_canonicalization_mutex_; |
681 | Mutex type_arguments_canonicalization_mutex_; |
682 | Mutex subtype_test_cache_mutex_; |
683 | |
684 | #if defined(DART_PRECOMPILED_RUNTIME) |
685 | Mutex unlinked_call_map_mutex_; |
686 | #endif |
687 | |
688 | #if !defined(DART_PRECOMPILED_RUNTIME) |
689 | Mutex initializer_functions_mutex_; |
690 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
691 | |
692 | // Allow us to ensure the number of active mutators is limited by a maximum. |
693 | std::unique_ptr<Monitor> active_mutators_monitor_; |
694 | intptr_t active_mutators_ = 0; |
695 | intptr_t waiting_mutators_ = 0; |
696 | intptr_t max_active_mutators_ = 0; |
697 | }; |
698 | |
699 | // When an isolate sends-and-exits this class represent things that it passed |
700 | // to the beneficiary. |
701 | class Bequest { |
702 | public: |
703 | Bequest(PersistentHandle* handle, Dart_Port beneficiary) |
704 | : handle_(handle), beneficiary_(beneficiary) {} |
705 | ~Bequest(); |
706 | |
707 | PersistentHandle* handle() { return handle_; } |
708 | Dart_Port beneficiary() { return beneficiary_; } |
709 | |
710 | private: |
711 | PersistentHandle* handle_; |
712 | Dart_Port beneficiary_; |
713 | }; |
714 | |
715 | class Isolate : public BaseIsolate, public IntrusiveDListEntry<Isolate> { |
716 | public: |
717 | // Keep both these enums in sync with isolate_patch.dart. |
718 | // The different Isolate API message types. |
719 | enum LibMsgId { |
720 | kPauseMsg = 1, |
721 | kResumeMsg = 2, |
722 | kPingMsg = 3, |
723 | kKillMsg = 4, |
724 | kAddExitMsg = 5, |
725 | kDelExitMsg = 6, |
726 | kAddErrorMsg = 7, |
727 | kDelErrorMsg = 8, |
728 | kErrorFatalMsg = 9, |
729 | |
730 | // Internal message ids. |
731 | kInterruptMsg = 10, // Break in the debugger. |
732 | kInternalKillMsg = 11, // Like kill, but does not run exit listeners, etc. |
733 | kLowMemoryMsg = 12, // Run compactor, etc. |
734 | kDrainServiceExtensionsMsg = 13, // Invoke pending service extensions |
735 | }; |
736 | // The different Isolate API message priorities for ping and kill messages. |
737 | enum LibMsgPriority { |
738 | kImmediateAction = 0, |
739 | kBeforeNextEventAction = 1, |
740 | kAsEventAction = 2 |
741 | }; |
742 | |
743 | ~Isolate(); |
744 | |
745 | static inline Isolate* Current() { |
746 | Thread* thread = Thread::Current(); |
747 | return thread == nullptr ? nullptr : thread->isolate(); |
748 | } |
749 | |
750 | // Register a newly introduced class. |
751 | void RegisterClass(const Class& cls); |
752 | #if defined(DEBUG) |
753 | void ValidateClassTable(); |
754 | #endif |
755 | // Register a newly introduced static field. |
756 | void RegisterStaticField(const Field& field); |
757 | |
758 | void RehashConstants(); |
759 | #if defined(DEBUG) |
760 | void ValidateConstants(); |
761 | #endif |
762 | |
763 | ThreadRegistry* thread_registry() const { return group()->thread_registry(); } |
764 | |
765 | SafepointHandler* safepoint_handler() const { |
766 | return group()->safepoint_handler(); |
767 | } |
768 | |
769 | ClassTable* class_table() { return class_table_.get(); } |
770 | |
771 | ClassPtr* cached_class_table_table() { return cached_class_table_table_; } |
772 | void set_cached_class_table_table(ClassPtr* cached_class_table_table) { |
773 | cached_class_table_table_ = cached_class_table_table; |
774 | } |
775 | static intptr_t cached_class_table_table_offset() { |
776 | return OFFSET_OF(Isolate, cached_class_table_table_); |
777 | } |
778 | |
779 | SharedClassTable* shared_class_table() const { return shared_class_table_; } |
780 | // Used during isolate creation to re-register isolate with right group. |
781 | void set_shared_class_table(SharedClassTable* table) { |
782 | shared_class_table_ = table; |
783 | } |
784 | // Used by the generated code. |
785 | static intptr_t shared_class_table_offset() { |
786 | return OFFSET_OF(Isolate, shared_class_table_); |
787 | } |
788 | |
789 | ObjectStore* object_store() const { return object_store_shared_ptr_.get(); } |
790 | void set_object_store(ObjectStore* object_store); |
791 | static intptr_t cached_object_store_offset() { |
792 | return OFFSET_OF(Isolate, cached_object_store_); |
793 | } |
794 | |
795 | FieldTable* field_table() const { return field_table_; } |
796 | void set_field_table(Thread* T, FieldTable* field_table) { |
797 | delete field_table_; |
798 | field_table_ = field_table; |
799 | T->field_table_values_ = field_table->table(); |
800 | } |
801 | |
802 | IsolateObjectStore* isolate_object_store() const { |
803 | return isolate_object_store_.get(); |
804 | } |
805 | |
806 | // Prefers old classes when we are in the middle of a reload. |
807 | ClassPtr GetClassForHeapWalkAt(intptr_t cid); |
808 | |
809 | static intptr_t ic_miss_code_offset() { |
810 | return OFFSET_OF(Isolate, ic_miss_code_); |
811 | } |
812 | |
813 | Dart_MessageNotifyCallback message_notify_callback() const { |
814 | return message_notify_callback_; |
815 | } |
816 | |
817 | void set_message_notify_callback(Dart_MessageNotifyCallback value) { |
818 | message_notify_callback_ = value; |
819 | } |
820 | |
821 | void bequeath(std::unique_ptr<Bequest> bequest) { |
822 | bequest_ = std::move(bequest); |
823 | } |
824 | |
825 | IsolateGroupSource* source() const { return isolate_group_->source(); } |
826 | IsolateGroup* group() const { return isolate_group_; } |
827 | |
828 | bool HasPendingMessages(); |
829 | |
830 | Thread* mutator_thread() const; |
831 | |
832 | const char* name() const { return name_; } |
833 | void set_name(const char* name); |
834 | |
835 | int64_t UptimeMicros() const; |
836 | |
837 | Dart_Port main_port() const { return main_port_; } |
838 | void set_main_port(Dart_Port port) { |
839 | ASSERT(main_port_ == 0); // Only set main port once. |
840 | main_port_ = port; |
841 | } |
842 | Dart_Port origin_id(); |
843 | void set_origin_id(Dart_Port id); |
844 | void set_pause_capability(uint64_t value) { pause_capability_ = value; } |
845 | uint64_t pause_capability() const { return pause_capability_; } |
846 | void set_terminate_capability(uint64_t value) { |
847 | terminate_capability_ = value; |
848 | } |
849 | uint64_t terminate_capability() const { return terminate_capability_; } |
850 | |
851 | void SendInternalLibMessage(LibMsgId msg_id, uint64_t capability); |
852 | |
853 | Heap* heap() const { return isolate_group_->heap(); } |
854 | |
855 | void set_init_callback_data(void* value) { init_callback_data_ = value; } |
856 | void* init_callback_data() const { return init_callback_data_; } |
857 | |
858 | #if !defined(DART_PRECOMPILED_RUNTIME) |
859 | NativeCallbackTrampolines* native_callback_trampolines() { |
860 | return &native_callback_trampolines_; |
861 | } |
862 | #endif |
863 | |
864 | Dart_EnvironmentCallback environment_callback() const { |
865 | return environment_callback_; |
866 | } |
867 | void set_environment_callback(Dart_EnvironmentCallback value) { |
868 | environment_callback_ = value; |
869 | } |
870 | |
871 | bool HasTagHandler() const { |
872 | return group()->library_tag_handler() != nullptr; |
873 | } |
874 | ObjectPtr CallTagHandler(Dart_LibraryTag tag, |
875 | const Object& arg1, |
876 | const Object& arg2); |
877 | bool HasDeferredLoadHandler() const { |
878 | return group()->deferred_load_handler() != nullptr; |
879 | } |
880 | ObjectPtr CallDeferredLoadHandler(intptr_t id); |
881 | |
882 | void SetupImagePage(const uint8_t* snapshot_buffer, bool is_executable); |
883 | |
884 | void ScheduleInterrupts(uword interrupt_bits); |
885 | |
886 | const char* MakeRunnable(); |
887 | void Run(); |
888 | |
889 | MessageHandler* message_handler() const { return message_handler_; } |
890 | void set_message_handler(MessageHandler* value) { message_handler_ = value; } |
891 | |
892 | bool is_runnable() const { return IsRunnableBit::decode(isolate_flags_); } |
893 | void set_is_runnable(bool value) { |
894 | isolate_flags_ = IsRunnableBit::update(value, isolate_flags_); |
895 | #if !defined(PRODUCT) |
896 | if (is_runnable()) { |
897 | set_last_resume_timestamp(); |
898 | } |
899 | #endif |
900 | } |
901 | |
902 | IsolateSpawnState* spawn_state() const { return spawn_state_.get(); } |
903 | void set_spawn_state(std::unique_ptr<IsolateSpawnState> value) { |
904 | spawn_state_ = std::move(value); |
905 | } |
906 | |
907 | Mutex* mutex() { return &mutex_; } |
908 | Mutex* constant_canonicalization_mutex() { |
909 | return &constant_canonicalization_mutex_; |
910 | } |
911 | Mutex* megamorphic_mutex() { return &megamorphic_mutex_; } |
912 | |
913 | Mutex* kernel_data_lib_cache_mutex() { return &kernel_data_lib_cache_mutex_; } |
914 | Mutex* kernel_data_class_cache_mutex() { |
915 | return &kernel_data_class_cache_mutex_; |
916 | } |
917 | |
918 | // Any access to constants arrays must be locked since mutator and |
919 | // background compiler can access the arrays at the same time. |
920 | Mutex* kernel_constants_mutex() { return &kernel_constants_mutex_; } |
921 | |
922 | #if !defined(PRODUCT) |
923 | Debugger* debugger() const { |
924 | return debugger_; |
925 | } |
926 | |
927 | void set_single_step(bool value) { single_step_ = value; } |
928 | bool single_step() const { return single_step_; } |
929 | static intptr_t single_step_offset() { |
930 | return OFFSET_OF(Isolate, single_step_); |
931 | } |
932 | |
933 | bool ResumeRequest() const { |
934 | return ResumeRequestBit::decode(isolate_flags_); |
935 | } |
936 | // Lets the embedder know that a service message resulted in a resume request. |
937 | void SetResumeRequest() { |
938 | isolate_flags_ = ResumeRequestBit::update(true, isolate_flags_); |
939 | set_last_resume_timestamp(); |
940 | } |
941 | |
942 | void set_last_resume_timestamp() { |
943 | last_resume_timestamp_ = OS::GetCurrentTimeMillis(); |
944 | } |
945 | |
946 | int64_t last_resume_timestamp() const { return last_resume_timestamp_; } |
947 | |
948 | // Returns whether the vm service has requested that the debugger |
949 | // resume execution. |
950 | bool GetAndClearResumeRequest() { |
951 | bool resume_request = ResumeRequestBit::decode(isolate_flags_); |
952 | isolate_flags_ = ResumeRequestBit::update(false, isolate_flags_); |
953 | return resume_request; |
954 | } |
955 | #endif |
956 | |
957 | // Verify that the sender has the capability to pause or terminate the |
958 | // isolate. |
959 | bool VerifyPauseCapability(const Object& capability) const; |
960 | bool VerifyTerminateCapability(const Object& capability) const; |
961 | |
962 | // Returns true if the capability was added or removed from this isolate's |
963 | // list of pause events. |
964 | bool AddResumeCapability(const Capability& capability); |
965 | bool RemoveResumeCapability(const Capability& capability); |
966 | |
967 | void AddExitListener(const SendPort& listener, const Instance& response); |
968 | void RemoveExitListener(const SendPort& listener); |
969 | void NotifyExitListeners(); |
970 | |
971 | void AddErrorListener(const SendPort& listener); |
972 | void RemoveErrorListener(const SendPort& listener); |
973 | bool NotifyErrorListeners(const String& msg, const String& stacktrace); |
974 | |
975 | bool ErrorsFatal() const { return ErrorsFatalBit::decode(isolate_flags_); } |
976 | void SetErrorsFatal(bool val) { |
977 | isolate_flags_ = ErrorsFatalBit::update(val, isolate_flags_); |
978 | } |
979 | |
980 | Random* random() { return &random_; } |
981 | |
982 | Simulator* simulator() const { return simulator_; } |
983 | void set_simulator(Simulator* value) { simulator_ = value; } |
984 | |
985 | void IncrementSpawnCount(); |
986 | void DecrementSpawnCount(); |
987 | void WaitForOutstandingSpawns(); |
988 | |
989 | static void SetCreateGroupCallback(Dart_IsolateGroupCreateCallback cb) { |
990 | create_group_callback_ = cb; |
991 | } |
992 | static Dart_IsolateGroupCreateCallback CreateGroupCallback() { |
993 | return create_group_callback_; |
994 | } |
995 | |
996 | static void SetInitializeCallback_(Dart_InitializeIsolateCallback cb) { |
997 | initialize_callback_ = cb; |
998 | } |
999 | static Dart_InitializeIsolateCallback InitializeCallback() { |
1000 | return initialize_callback_; |
1001 | } |
1002 | |
1003 | static void SetShutdownCallback(Dart_IsolateShutdownCallback cb) { |
1004 | shutdown_callback_ = cb; |
1005 | } |
1006 | static Dart_IsolateShutdownCallback ShutdownCallback() { |
1007 | return shutdown_callback_; |
1008 | } |
1009 | |
1010 | static void SetCleanupCallback(Dart_IsolateCleanupCallback cb) { |
1011 | cleanup_callback_ = cb; |
1012 | } |
1013 | static Dart_IsolateCleanupCallback CleanupCallback() { |
1014 | return cleanup_callback_; |
1015 | } |
1016 | |
1017 | static void SetGroupCleanupCallback(Dart_IsolateGroupCleanupCallback cb) { |
1018 | cleanup_group_callback_ = cb; |
1019 | } |
1020 | static Dart_IsolateGroupCleanupCallback GroupCleanupCallback() { |
1021 | return cleanup_group_callback_; |
1022 | } |
1023 | |
1024 | #if !defined(PRODUCT) |
1025 | ObjectIdRing* object_id_ring() const { return object_id_ring_; } |
1026 | ObjectIdRing* EnsureObjectIdRing(); |
1027 | #endif // !defined(PRODUCT) |
1028 | |
1029 | void AddPendingDeopt(uword fp, uword pc); |
1030 | uword FindPendingDeopt(uword fp) const; |
1031 | void ClearPendingDeoptsAtOrBelow(uword fp) const; |
1032 | MallocGrowableArray<PendingLazyDeopt>* pending_deopts() const { |
1033 | return pending_deopts_; |
1034 | } |
1035 | bool IsDeoptimizing() const { return deopt_context_ != nullptr; } |
1036 | DeoptContext* deopt_context() const { return deopt_context_; } |
1037 | void set_deopt_context(DeoptContext* value) { |
1038 | ASSERT(value == nullptr || deopt_context_ == nullptr); |
1039 | deopt_context_ = value; |
1040 | } |
1041 | |
1042 | BackgroundCompiler* background_compiler() const { |
1043 | return background_compiler_; |
1044 | } |
1045 | |
1046 | BackgroundCompiler* optimizing_background_compiler() const { |
1047 | return optimizing_background_compiler_; |
1048 | } |
1049 | |
1050 | intptr_t BlockClassFinalization() { |
1051 | ASSERT(defer_finalization_count_ >= 0); |
1052 | return defer_finalization_count_++; |
1053 | } |
1054 | |
1055 | intptr_t UnblockClassFinalization() { |
1056 | ASSERT(defer_finalization_count_ > 0); |
1057 | return defer_finalization_count_--; |
1058 | } |
1059 | |
1060 | bool AllowClassFinalization() { |
1061 | ASSERT(defer_finalization_count_ >= 0); |
1062 | return defer_finalization_count_ == 0; |
1063 | } |
1064 | |
1065 | #ifndef PRODUCT |
1066 | void PrintJSON(JSONStream* stream, bool ref = true); |
1067 | |
1068 | // Creates an object with the total heap memory usage statistics for this |
1069 | // isolate. |
1070 | void PrintMemoryUsageJSON(JSONStream* stream); |
1071 | #endif |
1072 | |
1073 | #if !defined(PRODUCT) |
1074 | VMTagCounters* vm_tag_counters() { return &vm_tag_counters_; } |
1075 | |
1076 | #if !defined(DART_PRECOMPILED_RUNTIME) |
1077 | IsolateReloadContext* reload_context() { return reload_context_; } |
1078 | |
1079 | void DeleteReloadContext(); |
1080 | |
1081 | bool HasAttemptedReload() const { |
1082 | return HasAttemptedReloadBit::decode(isolate_flags_); |
1083 | } |
1084 | void SetHasAttemptedReload(bool value) { |
1085 | isolate_flags_ = HasAttemptedReloadBit::update(value, isolate_flags_); |
1086 | } |
1087 | |
1088 | bool CanReload() const; |
1089 | #else |
1090 | bool IsReloading() const { return false; } |
1091 | bool HasAttemptedReload() const { return false; } |
1092 | bool CanReload() const { return false; } |
1093 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
1094 | #endif // !defined(PRODUCT) |
1095 | |
1096 | bool IsPaused() const; |
1097 | |
1098 | #if !defined(PRODUCT) |
1099 | bool should_pause_post_service_request() const { |
1100 | return ShouldPausePostServiceRequestBit::decode(isolate_flags_); |
1101 | } |
1102 | void set_should_pause_post_service_request(bool value) { |
1103 | isolate_flags_ = |
1104 | ShouldPausePostServiceRequestBit::update(value, isolate_flags_); |
1105 | } |
1106 | #endif // !defined(PRODUCT) |
1107 | |
1108 | ErrorPtr PausePostRequest(); |
1109 | |
1110 | uword user_tag() const { return user_tag_; } |
1111 | static intptr_t user_tag_offset() { return OFFSET_OF(Isolate, user_tag_); } |
1112 | static intptr_t current_tag_offset() { |
1113 | return OFFSET_OF(Isolate, current_tag_); |
1114 | } |
1115 | static intptr_t default_tag_offset() { |
1116 | return OFFSET_OF(Isolate, default_tag_); |
1117 | } |
1118 | |
1119 | #if !defined(PRODUCT) |
1120 | #define ISOLATE_METRIC_ACCESSOR(type, variable, name, unit) \ |
1121 | type* Get##variable##Metric() { return &metric_##variable##_; } |
1122 | ISOLATE_METRIC_LIST(ISOLATE_METRIC_ACCESSOR); |
1123 | #undef ISOLATE_METRIC_ACCESSOR |
1124 | #endif // !defined(PRODUCT) |
1125 | |
1126 | static intptr_t IsolateListLength(); |
1127 | |
1128 | GrowableObjectArrayPtr tag_table() const { return tag_table_; } |
1129 | void set_tag_table(const GrowableObjectArray& value); |
1130 | |
1131 | UserTagPtr current_tag() const { return current_tag_; } |
1132 | void set_current_tag(const UserTag& tag); |
1133 | |
1134 | UserTagPtr default_tag() const { return default_tag_; } |
1135 | void set_default_tag(const UserTag& tag); |
1136 | |
1137 | void set_ic_miss_code(const Code& code); |
1138 | |
1139 | GrowableObjectArrayPtr deoptimized_code_array() const { |
1140 | return deoptimized_code_array_; |
1141 | } |
1142 | void set_deoptimized_code_array(const GrowableObjectArray& value); |
1143 | void TrackDeoptimizedCode(const Code& code); |
1144 | |
1145 | // Also sends a paused at exit event over the service protocol. |
1146 | void SetStickyError(ErrorPtr sticky_error); |
1147 | |
1148 | ErrorPtr sticky_error() const { return sticky_error_; } |
1149 | DART_WARN_UNUSED_RESULT ErrorPtr StealStickyError(); |
1150 | |
1151 | // In precompilation we finalize all regular classes before compiling. |
1152 | bool all_classes_finalized() const { |
1153 | return AllClassesFinalizedBit::decode(isolate_flags_); |
1154 | } |
1155 | void set_all_classes_finalized(bool value) { |
1156 | isolate_flags_ = AllClassesFinalizedBit::update(value, isolate_flags_); |
1157 | } |
1158 | |
1159 | bool remapping_cids() const { |
1160 | return RemappingCidsBit::decode(isolate_flags_); |
1161 | } |
1162 | void set_remapping_cids(bool value) { |
1163 | isolate_flags_ = RemappingCidsBit::update(value, isolate_flags_); |
1164 | } |
1165 | |
1166 | // Used by background compiler which field became boxed and must trigger |
1167 | // deoptimization in the mutator thread. |
1168 | void AddDeoptimizingBoxedField(const Field& field); |
1169 | // Returns Field::null() if none available in the list. |
1170 | FieldPtr GetDeoptimizingBoxedField(); |
1171 | |
1172 | #ifndef PRODUCT |
1173 | ErrorPtr InvokePendingServiceExtensionCalls(); |
1174 | void AppendServiceExtensionCall(const Instance& closure, |
1175 | const String& method_name, |
1176 | const Array& parameter_keys, |
1177 | const Array& parameter_values, |
1178 | const Instance& reply_port, |
1179 | const Instance& id); |
1180 | void RegisterServiceExtensionHandler(const String& name, |
1181 | const Instance& closure); |
1182 | InstancePtr LookupServiceExtensionHandler(const String& name); |
1183 | #endif |
1184 | |
1185 | static void VisitIsolates(IsolateVisitor* visitor); |
1186 | |
1187 | #if !defined(PRODUCT) |
1188 | // Handle service messages until we are told to resume execution. |
1189 | void PauseEventHandler(); |
1190 | #endif |
1191 | |
1192 | void AddClosureFunction(const Function& function) const; |
1193 | FunctionPtr LookupClosureFunction(const Function& parent, |
1194 | TokenPosition token_pos) const; |
1195 | intptr_t FindClosureIndex(const Function& needle) const; |
1196 | FunctionPtr ClosureFunctionFromIndex(intptr_t idx) const; |
1197 | |
1198 | bool is_service_isolate() const { |
1199 | return IsServiceIsolateBit::decode(isolate_flags_); |
1200 | } |
1201 | void set_is_service_isolate(bool value) { |
1202 | isolate_flags_ = IsServiceIsolateBit::update(value, isolate_flags_); |
1203 | } |
1204 | |
1205 | bool is_kernel_isolate() const { |
1206 | return IsKernelIsolateBit::decode(isolate_flags_); |
1207 | } |
1208 | void set_is_kernel_isolate(bool value) { |
1209 | isolate_flags_ = IsKernelIsolateBit::update(value, isolate_flags_); |
1210 | } |
1211 | |
1212 | // Whether it's possible for unoptimized code to optimize immediately on entry |
1213 | // (can happen with random or very low optimization counter thresholds) |
1214 | bool CanOptimizeImmediately() const { |
1215 | return FLAG_optimization_counter_threshold < 2 || |
1216 | FLAG_randomize_optimization_counter; |
1217 | } |
1218 | |
1219 | bool should_load_vmservice() const { |
1220 | return ShouldLoadVmServiceBit::decode(isolate_flags_); |
1221 | } |
1222 | void set_should_load_vmservice(bool value) { |
1223 | isolate_flags_ = ShouldLoadVmServiceBit::update(value, isolate_flags_); |
1224 | } |
1225 | |
1226 | Dart_QualifiedFunctionName* embedder_entry_points() const { |
1227 | return embedder_entry_points_; |
1228 | } |
1229 | |
1230 | void set_obfuscation_map(const char** map) { obfuscation_map_ = map; } |
1231 | const char** obfuscation_map() const { return obfuscation_map_; } |
1232 | |
1233 | const DispatchTable* dispatch_table() const { |
1234 | return group()->dispatch_table(); |
1235 | } |
1236 | |
1237 | // Isolate-specific flag handling. |
1238 | static void FlagsInitialize(Dart_IsolateFlags* api_flags); |
1239 | void FlagsCopyTo(Dart_IsolateFlags* api_flags) const; |
1240 | void FlagsCopyFrom(const Dart_IsolateFlags& api_flags); |
1241 | |
1242 | #if defined(DART_PRECOMPILER) |
1243 | #define FLAG_FOR_PRECOMPILER(from_field, from_flag) (from_field) |
1244 | #else |
1245 | #define FLAG_FOR_PRECOMPILER(from_field, from_flag) (from_flag) |
1246 | #endif |
1247 | |
1248 | #if !defined(PRODUCT) |
1249 | #define FLAG_FOR_NONPRODUCT(from_field, from_flag) (from_field) |
1250 | #else |
1251 | #define FLAG_FOR_NONPRODUCT(from_field, from_flag) (from_flag) |
1252 | #endif |
1253 | |
1254 | #define FLAG_FOR_PRODUCT(from_field, from_flag) (from_field) |
1255 | |
1256 | #define DECLARE_GETTER(when, name, bitname, isolate_flag_name, flag_name) \ |
1257 | bool name() const { \ |
1258 | const bool false_by_default = false; \ |
1259 | USE(false_by_default); \ |
1260 | return FLAG_FOR_##when(bitname##Bit::decode(isolate_flags_), flag_name); \ |
1261 | } |
1262 | ISOLATE_FLAG_LIST(DECLARE_GETTER) |
1263 | #undef FLAG_FOR_NONPRODUCT |
1264 | #undef FLAG_FOR_PRECOMPILER |
1265 | #undef FLAG_FOR_PRODUCT |
1266 | #undef DECLARE_GETTER |
1267 | |
1268 | #if defined(PRODUCT) |
1269 | void set_use_osr(bool use_osr) { ASSERT(!use_osr); } |
1270 | #else // defined(PRODUCT) |
1271 | void set_use_osr(bool use_osr) { |
1272 | isolate_flags_ = UseOsrBit::update(use_osr, isolate_flags_); |
1273 | } |
1274 | #endif // defined(PRODUCT) |
1275 | |
1276 | bool null_safety_not_set() const { |
1277 | return !NullSafetySetBit::decode(isolate_flags_); |
1278 | } |
1279 | |
1280 | bool null_safety() const { |
1281 | ASSERT(!null_safety_not_set()); |
1282 | return NullSafetyBit::decode(isolate_flags_); |
1283 | } |
1284 | void set_null_safety(bool null_safety) { |
1285 | isolate_flags_ = NullSafetySetBit::update(true, isolate_flags_); |
1286 | isolate_flags_ = NullSafetyBit::update(null_safety, isolate_flags_); |
1287 | } |
1288 | |
1289 | bool has_attempted_stepping() const { |
1290 | return HasAttemptedSteppingBit::decode(isolate_flags_); |
1291 | } |
1292 | void set_has_attempted_stepping(bool value) { |
1293 | isolate_flags_ = HasAttemptedSteppingBit::update(value, isolate_flags_); |
1294 | } |
1295 | |
1296 | static void KillAllIsolates(LibMsgId msg_id); |
1297 | static void KillIfExists(Isolate* isolate, LibMsgId msg_id); |
1298 | |
1299 | // Lookup an isolate by its main port. Returns nullptr if no matching isolate |
1300 | // is found. |
1301 | static Isolate* LookupIsolateByPort(Dart_Port port); |
1302 | |
1303 | // Lookup an isolate by its main port and return a copy of its name. Returns |
1304 | // nullptr if not matching isolate is found. |
1305 | static std::unique_ptr<char[]> LookupIsolateNameByPort(Dart_Port port); |
1306 | |
1307 | static void DisableIsolateCreation(); |
1308 | static void EnableIsolateCreation(); |
1309 | static bool IsolateCreationEnabled(); |
1310 | static bool IsVMInternalIsolate(const Isolate* isolate) { |
1311 | return IsolateGroup::IsVMInternalIsolateGroup(isolate->group()); |
1312 | } |
1313 | |
1314 | #if !defined(PRODUCT) |
1315 | intptr_t reload_every_n_stack_overflow_checks() const { |
1316 | return reload_every_n_stack_overflow_checks_; |
1317 | } |
1318 | #endif // !defined(PRODUCT) |
1319 | |
1320 | HandlerInfoCache* handler_info_cache() { return &handler_info_cache_; } |
1321 | |
1322 | CatchEntryMovesCache* catch_entry_moves_cache() { |
1323 | return &catch_entry_moves_cache_; |
1324 | } |
1325 | |
1326 | void MaybeIncreaseReloadEveryNStackOverflowChecks(); |
1327 | |
1328 | // The weak table used in the snapshot writer for the purpose of fast message |
1329 | // sending. |
1330 | WeakTable* forward_table_new() { return forward_table_new_.get(); } |
1331 | void set_forward_table_new(WeakTable* table); |
1332 | |
1333 | WeakTable* forward_table_old() { return forward_table_old_.get(); } |
1334 | void set_forward_table_old(WeakTable* table); |
1335 | |
1336 | static void NotifyLowMemory(); |
1337 | |
1338 | void RememberLiveTemporaries(); |
1339 | void DeferredMarkLiveTemporaries(); |
1340 | |
1341 | std::unique_ptr<VirtualMemory> TakeRegexpBacktrackStack() { |
1342 | return std::move(regexp_backtracking_stack_cache_); |
1343 | } |
1344 | |
1345 | void CacheRegexpBacktrackStack(std::unique_ptr<VirtualMemory> stack) { |
1346 | regexp_backtracking_stack_cache_ = std::move(stack); |
1347 | } |
1348 | |
1349 | private: |
1350 | friend class Dart; // Init, InitOnce, Shutdown. |
1351 | friend class IsolateKillerVisitor; // Kill(). |
1352 | friend Isolate* CreateWithinExistingIsolateGroup(IsolateGroup* g, |
1353 | const char* n, |
1354 | char** e); |
1355 | |
1356 | Isolate(IsolateGroup* group, const Dart_IsolateFlags& api_flags); |
1357 | |
1358 | static void InitVM(); |
1359 | static Isolate* InitIsolate(const char* name_prefix, |
1360 | IsolateGroup* isolate_group, |
1361 | const Dart_IsolateFlags& api_flags, |
1362 | bool is_vm_isolate = false); |
1363 | |
1364 | // The isolate_creation_monitor_ should be held when calling Kill(). |
1365 | void KillLocked(LibMsgId msg_id); |
1366 | |
1367 | void Shutdown(); |
1368 | void LowLevelShutdown(); |
1369 | |
1370 | // Unregister the [isolate] from the thread, remove it from the isolate group, |
1371 | // invoke the cleanup function (if any), delete the isolate and possibly |
1372 | // delete the isolate group (if it's the last isolate in the group). |
1373 | static void LowLevelCleanup(Isolate* isolate); |
1374 | |
1375 | void BuildName(const char* name_prefix); |
1376 | |
1377 | void ProfileIdle(); |
1378 | |
1379 | // Visit all object pointers. Caller must ensure concurrent sweeper is not |
1380 | // running, and the visitor must not allocate. |
1381 | void VisitObjectPointers(ObjectPointerVisitor* visitor, |
1382 | ValidationPolicy validate_frames); |
1383 | void VisitStackPointers(ObjectPointerVisitor* visitor, |
1384 | ValidationPolicy validate_frames); |
1385 | |
1386 | void set_user_tag(uword tag) { user_tag_ = tag; } |
1387 | |
1388 | #if !defined(PRODUCT) |
1389 | GrowableObjectArrayPtr GetAndClearPendingServiceExtensionCalls(); |
1390 | GrowableObjectArrayPtr pending_service_extension_calls() const { |
1391 | return pending_service_extension_calls_; |
1392 | } |
1393 | void set_pending_service_extension_calls(const GrowableObjectArray& value); |
1394 | GrowableObjectArrayPtr registered_service_extension_handlers() const { |
1395 | return registered_service_extension_handlers_; |
1396 | } |
1397 | void set_registered_service_extension_handlers( |
1398 | const GrowableObjectArray& value); |
1399 | #endif // !defined(PRODUCT) |
1400 | |
1401 | Thread* ScheduleThread(bool is_mutator, bool bypass_safepoint = false); |
1402 | void UnscheduleThread(Thread* thread, |
1403 | bool is_mutator, |
1404 | bool bypass_safepoint = false); |
1405 | |
1406 | // DEPRECATED: Use Thread's methods instead. During migration, these default |
1407 | // to using the mutator thread (which must also be the current thread). |
1408 | Zone* current_zone() const { |
1409 | ASSERT(Thread::Current() == mutator_thread()); |
1410 | return mutator_thread()->zone(); |
1411 | } |
1412 | |
1413 | // Accessed from generated code. |
1414 | // ** This block of fields must come first! ** |
1415 | // For AOT cross-compilation, we rely on these members having the same offsets |
1416 | // in SIMARM(IA32) and ARM, and the same offsets in SIMARM64(X64) and ARM64. |
1417 | // We use only word-sized fields to avoid differences in struct packing on the |
1418 | // different architectures. See also CheckOffsets in dart.cc. |
1419 | uword user_tag_ = 0; |
1420 | UserTagPtr current_tag_; |
1421 | UserTagPtr default_tag_; |
1422 | CodePtr ic_miss_code_; |
1423 | // Cached value of object_store_shared_ptr_, here for generated code access |
1424 | ObjectStore* cached_object_store_ = nullptr; |
1425 | SharedClassTable* shared_class_table_ = nullptr; |
1426 | // Cached value of class_table_->table_, here for generated code access |
1427 | ClassPtr* cached_class_table_table_ = nullptr; |
1428 | FieldTable* field_table_ = nullptr; |
1429 | bool single_step_ = false; |
1430 | // End accessed from generated code. |
1431 | |
1432 | IsolateGroup* isolate_group_; |
1433 | IdleTimeHandler idle_time_handler_; |
1434 | std::unique_ptr<IsolateObjectStore> isolate_object_store_; |
1435 | // shared in AOT(same pointer as on IsolateGroup), not shared in JIT |
1436 | std::shared_ptr<ObjectStore> object_store_shared_ptr_; |
1437 | // shared in AOT(same pointer as on IsolateGroup), not shared in JIT |
1438 | std::shared_ptr<ClassTable> class_table_; |
1439 | |
1440 | #if !defined(DART_PRECOMPILED_RUNTIME) |
1441 | NativeCallbackTrampolines native_callback_trampolines_; |
1442 | #endif |
1443 | |
1444 | #define ISOLATE_FLAG_BITS(V) \ |
1445 | V(ErrorsFatal) \ |
1446 | V(IsRunnable) \ |
1447 | V(IsServiceIsolate) \ |
1448 | V(IsKernelIsolate) \ |
1449 | V(AllClassesFinalized) \ |
1450 | V(RemappingCids) \ |
1451 | V(ResumeRequest) \ |
1452 | V(HasAttemptedReload) \ |
1453 | V(HasAttemptedStepping) \ |
1454 | V(ShouldPausePostServiceRequest) \ |
1455 | V(EnableAsserts) \ |
1456 | V(UseFieldGuards) \ |
1457 | V(UseOsr) \ |
1458 | V(Obfuscate) \ |
1459 | V(ShouldLoadVmService) \ |
1460 | V(NullSafety) \ |
1461 | V(NullSafetySet) |
1462 | |
1463 | // Isolate specific flags. |
1464 | enum FlagBits { |
1465 | #define DECLARE_BIT(Name) k##Name##Bit, |
1466 | ISOLATE_FLAG_BITS(DECLARE_BIT) |
1467 | #undef DECLARE_BIT |
1468 | }; |
1469 | |
1470 | #define DECLARE_BITFIELD(Name) \ |
1471 | class Name##Bit : public BitField<uint32_t, bool, k##Name##Bit, 1> {}; |
1472 | ISOLATE_FLAG_BITS(DECLARE_BITFIELD) |
1473 | #undef DECLARE_BITFIELD |
1474 | |
1475 | uint32_t isolate_flags_ = 0; |
1476 | |
1477 | // Unoptimized background compilation. |
1478 | BackgroundCompiler* background_compiler_ = nullptr; |
1479 | |
1480 | // Optimized background compilation. |
1481 | BackgroundCompiler* optimizing_background_compiler_ = nullptr; |
1482 | |
1483 | // Fields that aren't needed in a product build go here with boolean flags at |
1484 | // the top. |
1485 | #if !defined(PRODUCT) |
1486 | Debugger* debugger_ = nullptr; |
1487 | int64_t last_resume_timestamp_; |
1488 | |
1489 | VMTagCounters vm_tag_counters_; |
1490 | |
1491 | // We use 6 list entries for each pending service extension calls. |
1492 | enum {kPendingHandlerIndex = 0, kPendingMethodNameIndex, kPendingKeysIndex, |
1493 | kPendingValuesIndex, kPendingReplyPortIndex, kPendingIdIndex, |
1494 | kPendingEntrySize}; |
1495 | GrowableObjectArrayPtr pending_service_extension_calls_; |
1496 | |
1497 | // We use 2 list entries for each registered extension handler. |
1498 | enum {kRegisteredNameIndex = 0, kRegisteredHandlerIndex, |
1499 | kRegisteredEntrySize}; |
1500 | GrowableObjectArrayPtr registered_service_extension_handlers_; |
1501 | |
1502 | // Used to wake the isolate when it is in the pause event loop. |
1503 | Monitor* pause_loop_monitor_ = nullptr; |
1504 | |
1505 | #define ISOLATE_METRIC_VARIABLE(type, variable, name, unit) \ |
1506 | type metric_##variable##_; |
1507 | ISOLATE_METRIC_LIST(ISOLATE_METRIC_VARIABLE); |
1508 | #undef ISOLATE_METRIC_VARIABLE |
1509 | |
1510 | RelaxedAtomic<intptr_t> no_reload_scope_depth_ = |
1511 | 0; // we can only reload when this is 0. |
1512 | // Per-isolate copy of FLAG_reload_every. |
1513 | intptr_t reload_every_n_stack_overflow_checks_; |
1514 | IsolateReloadContext* reload_context_ = nullptr; |
1515 | // Ring buffer of objects assigned an id. |
1516 | ObjectIdRing* object_id_ring_ = nullptr; |
1517 | #endif // !defined(PRODUCT) |
1518 | |
1519 | // All other fields go here. |
1520 | int64_t start_time_micros_; |
1521 | Dart_MessageNotifyCallback message_notify_callback_ = nullptr; |
1522 | char* name_ = nullptr; |
1523 | Dart_Port main_port_ = 0; |
1524 | // Isolates created by Isolate.spawn have the same origin id. |
1525 | Dart_Port origin_id_ = 0; |
1526 | Mutex origin_id_mutex_; |
1527 | uint64_t pause_capability_ = 0; |
1528 | uint64_t terminate_capability_ = 0; |
1529 | void* init_callback_data_ = nullptr; |
1530 | Dart_EnvironmentCallback environment_callback_ = nullptr; |
1531 | Random random_; |
1532 | Simulator* simulator_ = nullptr; |
1533 | Mutex mutex_; // Protects compiler stats. |
1534 | Mutex constant_canonicalization_mutex_; // Protects const canonicalization. |
1535 | Mutex megamorphic_mutex_; // Protects the table of megamorphic caches and |
1536 | // their entries. |
1537 | Mutex kernel_data_lib_cache_mutex_; |
1538 | Mutex kernel_data_class_cache_mutex_; |
1539 | Mutex kernel_constants_mutex_; |
1540 | MessageHandler* message_handler_ = nullptr; |
1541 | std::unique_ptr<IsolateSpawnState> spawn_state_; |
1542 | intptr_t defer_finalization_count_ = 0; |
1543 | MallocGrowableArray<PendingLazyDeopt>* pending_deopts_; |
1544 | DeoptContext* deopt_context_ = nullptr; |
1545 | |
1546 | GrowableObjectArrayPtr tag_table_; |
1547 | |
1548 | GrowableObjectArrayPtr deoptimized_code_array_; |
1549 | |
1550 | ErrorPtr sticky_error_; |
1551 | |
1552 | std::unique_ptr<Bequest> bequest_; |
1553 | Dart_Port beneficiary_ = 0; |
1554 | |
1555 | // Protect access to boxed_field_list_. |
1556 | Mutex field_list_mutex_; |
1557 | // List of fields that became boxed and that trigger deoptimization. |
1558 | GrowableObjectArrayPtr boxed_field_list_; |
1559 | |
1560 | // This guards spawn_count_. An isolate cannot complete shutdown and be |
1561 | // destroyed while there are child isolates in the midst of a spawn. |
1562 | Monitor spawn_count_monitor_; |
1563 | intptr_t spawn_count_ = 0; |
1564 | |
1565 | HandlerInfoCache handler_info_cache_; |
1566 | CatchEntryMovesCache catch_entry_moves_cache_; |
1567 | |
1568 | Dart_QualifiedFunctionName* embedder_entry_points_ = nullptr; |
1569 | const char** obfuscation_map_ = nullptr; |
1570 | |
1571 | DispatchTable* dispatch_table_ = nullptr; |
1572 | |
1573 | // Used during message sending of messages between isolates. |
1574 | std::unique_ptr<WeakTable> forward_table_new_; |
1575 | std::unique_ptr<WeakTable> forward_table_old_; |
1576 | |
1577 | // Signals whether the isolate can receive messages (e.g. KillAllIsolates can |
1578 | // send a kill message). |
1579 | // This is protected by [isolate_creation_monitor_]. |
1580 | bool accepts_messages_ = false; |
1581 | |
1582 | std::unique_ptr<VirtualMemory> regexp_backtracking_stack_cache_ = nullptr; |
1583 | |
1584 | static Dart_IsolateGroupCreateCallback create_group_callback_; |
1585 | static Dart_InitializeIsolateCallback initialize_callback_; |
1586 | static Dart_IsolateShutdownCallback shutdown_callback_; |
1587 | static Dart_IsolateCleanupCallback cleanup_callback_; |
1588 | static Dart_IsolateGroupCleanupCallback cleanup_group_callback_; |
1589 | |
1590 | #if !defined(PRODUCT) |
1591 | static void WakePauseEventHandler(Dart_Isolate isolate); |
1592 | #endif |
1593 | |
1594 | // Manage list of existing isolates. |
1595 | static bool TryMarkIsolateReady(Isolate* isolate); |
1596 | static void UnMarkIsolateReady(Isolate* isolate); |
1597 | static void MaybeNotifyVMShutdown(); |
1598 | bool AcceptsMessagesLocked() { |
1599 | ASSERT(isolate_creation_monitor_->IsOwnedByCurrentThread()); |
1600 | return accepts_messages_; |
1601 | } |
1602 | |
1603 | // This monitor protects [creation_enabled_]. |
1604 | static Monitor* isolate_creation_monitor_; |
1605 | static bool creation_enabled_; |
1606 | |
1607 | #define REUSABLE_FRIEND_DECLARATION(name) \ |
1608 | friend class Reusable##name##HandleScope; |
1609 | REUSABLE_HANDLE_LIST(REUSABLE_FRIEND_DECLARATION) |
1610 | #undef REUSABLE_FRIEND_DECLARATION |
1611 | |
1612 | friend class Become; // VisitObjectPointers |
1613 | friend class GCCompactor; // VisitObjectPointers |
1614 | friend class GCMarker; // VisitObjectPointers |
1615 | friend class SafepointHandler; |
1616 | friend class ObjectGraph; // VisitObjectPointers |
1617 | friend class HeapSnapshotWriter; // VisitObjectPointers |
1618 | friend class Scavenger; // VisitObjectPointers |
1619 | friend class HeapIterationScope; // VisitObjectPointers |
1620 | friend class ServiceIsolate; |
1621 | friend class Thread; |
1622 | friend class Timeline; |
1623 | friend class NoReloadScope; // reload_block |
1624 | friend class IsolateGroup; // reload_context_ |
1625 | |
1626 | DISALLOW_COPY_AND_ASSIGN(Isolate); |
1627 | }; |
1628 | |
1629 | // When we need to execute code in an isolate, we use the |
1630 | // StartIsolateScope. |
1631 | class StartIsolateScope { |
1632 | public: |
1633 | explicit StartIsolateScope(Isolate* new_isolate) |
1634 | : new_isolate_(new_isolate), saved_isolate_(Isolate::Current()) { |
1635 | if (new_isolate_ == nullptr) { |
1636 | ASSERT(Isolate::Current() == nullptr); |
1637 | // Do nothing. |
1638 | return; |
1639 | } |
1640 | if (saved_isolate_ != new_isolate_) { |
1641 | ASSERT(Isolate::Current() == nullptr); |
1642 | Thread::EnterIsolate(new_isolate_); |
1643 | // Ensure this is not a nested 'isolate enter' with prior state. |
1644 | ASSERT(Thread::Current()->saved_stack_limit() == 0); |
1645 | } |
1646 | } |
1647 | |
1648 | ~StartIsolateScope() { |
1649 | if (new_isolate_ == nullptr) { |
1650 | ASSERT(Isolate::Current() == nullptr); |
1651 | // Do nothing. |
1652 | return; |
1653 | } |
1654 | if (saved_isolate_ != new_isolate_) { |
1655 | ASSERT(saved_isolate_ == nullptr); |
1656 | // ASSERT that we have bottomed out of all Dart invocations. |
1657 | ASSERT(Thread::Current()->saved_stack_limit() == 0); |
1658 | Thread::ExitIsolate(); |
1659 | } |
1660 | } |
1661 | |
1662 | private: |
1663 | Isolate* new_isolate_; |
1664 | Isolate* saved_isolate_; |
1665 | |
1666 | DISALLOW_COPY_AND_ASSIGN(StartIsolateScope); |
1667 | }; |
1668 | |
1669 | class EnterIsolateGroupScope { |
1670 | public: |
1671 | explicit EnterIsolateGroupScope(IsolateGroup* isolate_group) |
1672 | : isolate_group_(isolate_group) { |
1673 | ASSERT(IsolateGroup::Current() == nullptr); |
1674 | const bool result = Thread::EnterIsolateGroupAsHelper( |
1675 | isolate_group_, Thread::kUnknownTask, /*bypass_safepoint=*/false); |
1676 | ASSERT(result); |
1677 | } |
1678 | |
1679 | ~EnterIsolateGroupScope() { |
1680 | Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/false); |
1681 | } |
1682 | |
1683 | private: |
1684 | IsolateGroup* isolate_group_; |
1685 | |
1686 | DISALLOW_COPY_AND_ASSIGN(EnterIsolateGroupScope); |
1687 | }; |
1688 | |
1689 | class IsolateSpawnState { |
1690 | public: |
1691 | IsolateSpawnState(Dart_Port parent_port, |
1692 | Dart_Port origin_id, |
1693 | const char* script_url, |
1694 | const Function& func, |
1695 | SerializedObjectBuffer* message_buffer, |
1696 | const char* package_config, |
1697 | bool paused, |
1698 | bool errorsAreFatal, |
1699 | Dart_Port onExit, |
1700 | Dart_Port onError, |
1701 | const char* debug_name, |
1702 | IsolateGroup* group); |
1703 | IsolateSpawnState(Dart_Port parent_port, |
1704 | const char* script_url, |
1705 | const char* package_config, |
1706 | SerializedObjectBuffer* args_buffer, |
1707 | SerializedObjectBuffer* message_buffer, |
1708 | bool paused, |
1709 | bool errorsAreFatal, |
1710 | Dart_Port onExit, |
1711 | Dart_Port onError, |
1712 | const char* debug_name, |
1713 | IsolateGroup* group); |
1714 | ~IsolateSpawnState(); |
1715 | |
1716 | Isolate* isolate() const { return isolate_; } |
1717 | void set_isolate(Isolate* value) { isolate_ = value; } |
1718 | |
1719 | Dart_Port parent_port() const { return parent_port_; } |
1720 | Dart_Port origin_id() const { return origin_id_; } |
1721 | Dart_Port on_exit_port() const { return on_exit_port_; } |
1722 | Dart_Port on_error_port() const { return on_error_port_; } |
1723 | const char* script_url() const { return script_url_; } |
1724 | const char* package_config() const { return package_config_; } |
1725 | const char* library_url() const { return library_url_; } |
1726 | const char* class_name() const { return class_name_; } |
1727 | const char* function_name() const { return function_name_; } |
1728 | const char* debug_name() const { return debug_name_; } |
1729 | bool is_spawn_uri() const { return library_url_ == nullptr; } |
1730 | bool paused() const { return paused_; } |
1731 | bool errors_are_fatal() const { return errors_are_fatal_; } |
1732 | Dart_IsolateFlags* isolate_flags() { return &isolate_flags_; } |
1733 | |
1734 | ObjectPtr ResolveFunction(); |
1735 | InstancePtr BuildArgs(Thread* thread); |
1736 | InstancePtr BuildMessage(Thread* thread); |
1737 | |
1738 | IsolateGroup* isolate_group() const { return isolate_group_; } |
1739 | |
1740 | private: |
1741 | Isolate* isolate_; |
1742 | Dart_Port parent_port_; |
1743 | Dart_Port origin_id_; |
1744 | Dart_Port on_exit_port_; |
1745 | Dart_Port on_error_port_; |
1746 | const char* script_url_; |
1747 | const char* package_config_; |
1748 | const char* library_url_; |
1749 | const char* class_name_; |
1750 | const char* function_name_; |
1751 | const char* debug_name_; |
1752 | IsolateGroup* isolate_group_; |
1753 | std::unique_ptr<Message> serialized_args_; |
1754 | std::unique_ptr<Message> serialized_message_; |
1755 | |
1756 | Dart_IsolateFlags isolate_flags_; |
1757 | bool paused_; |
1758 | bool errors_are_fatal_; |
1759 | }; |
1760 | |
1761 | } // namespace dart |
1762 | |
1763 | #endif // RUNTIME_VM_ISOLATE_H_ |
1764 | |