1// Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_CLUSTERED_SNAPSHOT_H_
6#define RUNTIME_VM_CLUSTERED_SNAPSHOT_H_
7
8#include "platform/assert.h"
9#include "vm/allocation.h"
10#include "vm/bitfield.h"
11#include "vm/datastream.h"
12#include "vm/globals.h"
13#include "vm/growable_array.h"
14#include "vm/hash_map.h"
15#include "vm/heap/heap.h"
16#include "vm/image_snapshot.h"
17#include "vm/object.h"
18#include "vm/raw_object_fields.h"
19#include "vm/snapshot.h"
20#include "vm/v8_snapshot_writer.h"
21#include "vm/version.h"
22
23#if defined(DEBUG)
24#define SNAPSHOT_BACKTRACE
25#endif
26
27namespace dart {
28
29// For full snapshots, we use a clustered snapshot format that trades longer
30// serialization time for faster deserialization time and smaller snapshots.
31// Objects are clustered by class to allow writing type information once per
32// class instead once per object, and to allow filling the objects in a tight
33// loop. The snapshot has two major sections: the first describes how to
34// allocate the objects and the second describes how to initialize them.
35// Deserialization starts by allocating a reference array large enough to hold
36// the base objects (objects already available to both the serializer and
37// deserializer) and the objects written in the snapshot. The allocation section
38// is then read for each cluster, filling the reference array. Then the
39// initialization/fill secton is read for each cluster, using the indices into
40// the reference array to fill pointers. At this point, every object has been
41// touched exactly once and in order, making this approach very cache friendly.
42// Finally, each cluster is given an opportunity to perform some fix-ups that
43// require the graph has been fully loaded, such as rehashing, though most
44// clusters do not require fixups.
45
46// Forward declarations.
47class Serializer;
48class Deserializer;
49class ObjectStore;
50class ImageWriter;
51class ImageReader;
52
53class LoadingUnitSerializationData : public ZoneAllocated {
54 public:
55 LoadingUnitSerializationData(intptr_t id,
56 LoadingUnitSerializationData* parent)
57 : id_(id), parent_(parent), deferred_objects_(), num_objects_(0) {}
58
59 intptr_t id() const { return id_; }
60 LoadingUnitSerializationData* parent() const { return parent_; }
61 intptr_t num_objects() const { return num_objects_; }
62 void set_num_objects(intptr_t value) { num_objects_ = value; }
63 void AddDeferredObject(CodePtr obj) {
64 deferred_objects_.Add(&Code::ZoneHandle(obj));
65 }
66 GrowableArray<Code*>* deferred_objects() { return &deferred_objects_; }
67
68 private:
69 intptr_t id_;
70 LoadingUnitSerializationData* parent_;
71 GrowableArray<Code*> deferred_objects_;
72 intptr_t num_objects_;
73};
74
75class SerializationCluster : public ZoneAllocated {
76 public:
77 explicit SerializationCluster(const char* name)
78 : name_(name), size_(0), num_objects_(0) {}
79 virtual ~SerializationCluster() {}
80
81 // Add [object] to the cluster and push its outgoing references.
82 virtual void Trace(Serializer* serializer, ObjectPtr object) = 0;
83
84 // Write the cluster type and information needed to allocate the cluster's
85 // objects. For fixed sized objects, this is just the object count. For
86 // variable sized objects, this is the object count and length of each object.
87 virtual void WriteAlloc(Serializer* serializer) = 0;
88
89 // Write the byte and reference data of the cluster's objects.
90 virtual void WriteFill(Serializer* serializer) = 0;
91
92 void WriteAndMeasureAlloc(Serializer* serializer);
93 void WriteAndMeasureFill(Serializer* serializer);
94
95 const char* name() const { return name_; }
96 intptr_t size() const { return size_; }
97 intptr_t num_objects() const { return num_objects_; }
98
99 protected:
100 const char* name_;
101 intptr_t size_;
102 intptr_t num_objects_;
103};
104
105class DeserializationCluster : public ZoneAllocated {
106 public:
107 DeserializationCluster() : start_index_(-1), stop_index_(-1) {}
108 virtual ~DeserializationCluster() {}
109
110 // Allocate memory for all objects in the cluster and write their addresses
111 // into the ref array. Do not touch this memory.
112 virtual void ReadAlloc(Deserializer* deserializer) = 0;
113
114 // Initialize the cluster's objects. Do not touch the memory of other objects.
115 virtual void ReadFill(Deserializer* deserializer) = 0;
116
117 // Complete any action that requires the full graph to be deserialized, such
118 // as rehashing.
119 virtual void PostLoad(Deserializer* deserializer, const Array& refs) {}
120
121 protected:
122 // The range of the ref array that belongs to this cluster.
123 intptr_t start_index_;
124 intptr_t stop_index_;
125};
126
127class SmiObjectIdPair {
128 public:
129 SmiObjectIdPair() : smi_(nullptr), id_(0) {}
130 SmiPtr smi_;
131 intptr_t id_;
132
133 bool operator==(const SmiObjectIdPair& other) const {
134 return (smi_ == other.smi_) && (id_ == other.id_);
135 }
136};
137
138class SmiObjectIdPairTrait {
139 public:
140 typedef SmiPtr Key;
141 typedef intptr_t Value;
142 typedef SmiObjectIdPair Pair;
143
144 static Key KeyOf(Pair kv) { return kv.smi_; }
145 static Value ValueOf(Pair kv) { return kv.id_; }
146 static inline intptr_t Hashcode(Key key) { return Smi::Value(key); }
147 static inline bool IsKeyEqual(Pair kv, Key key) { return kv.smi_ == key; }
148};
149
150typedef DirectChainedHashMap<SmiObjectIdPairTrait> SmiObjectIdMap;
151
152class Serializer : public ThreadStackResource {
153 public:
154 Serializer(Thread* thread,
155 Snapshot::Kind kind,
156 uint8_t** buffer,
157 ReAlloc alloc,
158 intptr_t initial_size,
159 ImageWriter* image_writer_,
160 bool vm_,
161 V8SnapshotProfileWriter* profile_writer = nullptr);
162 ~Serializer();
163
164 // Reference value for objects that either are not reachable from the roots or
165 // should never have a reference in the snapshot (because they are dropped,
166 // for example). Should be the default value for Heap::GetObjectId.
167 static constexpr intptr_t kUnreachableReference = 0;
168 COMPILE_ASSERT(kUnreachableReference == WeakTable::kNoValue);
169
170 static constexpr bool IsReachableReference(intptr_t ref) {
171 return ref == kUnallocatedReference || IsAllocatedReference(ref);
172 }
173
174 // Reference value for traced objects that have not been allocated their final
175 // reference ID.
176 static const intptr_t kUnallocatedReference = -1;
177
178 static constexpr bool IsAllocatedReference(intptr_t ref) {
179 return ref > kUnreachableReference;
180 }
181
182 static constexpr bool IsArtificialReference(intptr_t ref) {
183 return ref < kUnallocatedReference;
184 }
185
186 intptr_t WriteVMSnapshot(const Array& symbols);
187 void WriteProgramSnapshot(intptr_t num_base_objects,
188 ObjectStore* object_store);
189 void WriteUnitSnapshot(LoadingUnitSerializationData* unit,
190 uint32_t program_hash);
191
192 void AddVMIsolateBaseObjects();
193
194 void AddBaseObject(ObjectPtr base_object,
195 const char* type = nullptr,
196 const char* name = nullptr) {
197 intptr_t ref = AssignRef(base_object);
198 num_base_objects_++;
199
200 if (profile_writer_ != nullptr) {
201 if (type == nullptr) {
202 type = "Unknown";
203 }
204 if (name == nullptr) {
205 name = "<base object>";
206 }
207 profile_writer_->SetObjectTypeAndName(
208 {V8SnapshotProfileWriter::kSnapshot, ref}, type, name);
209 profile_writer_->AddRoot({V8SnapshotProfileWriter::kSnapshot, ref});
210 }
211 }
212
213 intptr_t AssignRef(ObjectPtr object) {
214 ASSERT(IsAllocatedReference(next_ref_index_));
215 if (object->IsHeapObject()) {
216 // The object id weak table holds image offsets for Instructions instead
217 // of ref indices.
218 ASSERT(!object->IsInstructions());
219 heap_->SetObjectId(object, next_ref_index_);
220 ASSERT(heap_->GetObjectId(object) == next_ref_index_);
221 } else {
222 SmiPtr smi = Smi::RawCast(object);
223 SmiObjectIdPair* existing_pair = smi_ids_.Lookup(smi);
224 if (existing_pair != NULL) {
225 ASSERT(existing_pair->id_ == kUnallocatedReference);
226 existing_pair->id_ = next_ref_index_;
227 } else {
228 SmiObjectIdPair new_pair;
229 new_pair.smi_ = smi;
230 new_pair.id_ = next_ref_index_;
231 smi_ids_.Insert(new_pair);
232 }
233 }
234 return next_ref_index_++;
235 }
236
237 intptr_t AssignArtificialRef(ObjectPtr object) {
238 ASSERT(object.IsHeapObject());
239 const intptr_t ref = -(next_ref_index_++);
240 ASSERT(IsArtificialReference(ref));
241 heap_->SetObjectId(object, ref);
242 ASSERT(heap_->GetObjectId(object) == ref);
243 return ref;
244 }
245
246 void Push(ObjectPtr object);
247
248 void AddUntracedRef() { num_written_objects_++; }
249
250 void Trace(ObjectPtr object);
251
252 void UnexpectedObject(ObjectPtr object, const char* message);
253#if defined(SNAPSHOT_BACKTRACE)
254 ObjectPtr ParentOf(const Object& object);
255#endif
256
257 SerializationCluster* NewClusterForClass(intptr_t cid);
258
259 void ReserveHeader() {
260 // Make room for recording snapshot buffer size.
261 stream_.SetPosition(Snapshot::kHeaderSize);
262 }
263
264 void FillHeader(Snapshot::Kind kind) {
265 Snapshot* header = reinterpret_cast<Snapshot*>(stream_.buffer());
266 header->set_magic();
267 header->set_length(stream_.bytes_written());
268 header->set_kind(kind);
269 }
270
271 void WriteVersionAndFeatures(bool is_vm_snapshot);
272
273 void Serialize();
274 void PrintSnapshotSizes();
275
276 FieldTable* field_table() { return field_table_; }
277
278 WriteStream* stream() { return &stream_; }
279 intptr_t bytes_written() { return stream_.bytes_written(); }
280
281 void FlushBytesWrittenToRoot();
282 void TraceStartWritingObject(const char* type, ObjectPtr obj, StringPtr name);
283 void TraceStartWritingObject(const char* type,
284 ObjectPtr obj,
285 const char* name);
286 void TraceEndWritingObject();
287
288 // Writes raw data to the stream (basic type).
289 // sizeof(T) must be in {1,2,4,8}.
290 template <typename T>
291 void Write(T value) {
292 WriteStream::Raw<sizeof(T), T>::Write(&stream_, value);
293 }
294 void WriteUnsigned(intptr_t value) { stream_.WriteUnsigned(value); }
295 void WriteUnsigned64(uint64_t value) { stream_.WriteUnsigned(value); }
296
297 void WriteWordWith32BitWrites(uword value) {
298 stream_.WriteWordWith32BitWrites(value);
299 }
300
301 void WriteBytes(const uint8_t* addr, intptr_t len) {
302 stream_.WriteBytes(addr, len);
303 }
304 void Align(intptr_t alignment) { stream_.Align(alignment); }
305
306 void WriteRootRef(ObjectPtr object, const char* name = nullptr) {
307 intptr_t id = RefId(object);
308 WriteUnsigned(id);
309 if (profile_writer_ != nullptr) {
310 profile_writer_->AddRoot({V8SnapshotProfileWriter::kSnapshot, id}, name);
311 }
312 }
313
314 void WriteElementRef(ObjectPtr object, intptr_t index) {
315 WriteUnsigned(AttributeElementRef(object, index));
316 }
317
318 // Record a reference from the currently written object to the given object
319 // and return reference id for the given object.
320 intptr_t AttributeElementRef(ObjectPtr object,
321 intptr_t index,
322 bool permit_artificial_ref = false) {
323 intptr_t id = RefId(object, permit_artificial_ref);
324 if (profile_writer_ != nullptr) {
325 profile_writer_->AttributeReferenceTo(
326 {V8SnapshotProfileWriter::kSnapshot, object_currently_writing_.id_},
327 {{V8SnapshotProfileWriter::kSnapshot, id},
328 V8SnapshotProfileWriter::Reference::kElement,
329 index});
330 }
331 return id;
332 }
333
334 void WritePropertyRef(ObjectPtr object, const char* property) {
335 WriteUnsigned(AttributePropertyRef(object, property));
336 }
337
338 // Record a reference from the currently written object to the given object
339 // and return reference id for the given object.
340 intptr_t AttributePropertyRef(ObjectPtr object,
341 const char* property,
342 bool permit_artificial_ref = false) {
343 intptr_t id = RefId(object, permit_artificial_ref);
344 if (profile_writer_ != nullptr) {
345 profile_writer_->AttributeReferenceTo(
346 {V8SnapshotProfileWriter::kSnapshot, object_currently_writing_.id_},
347 {{V8SnapshotProfileWriter::kSnapshot, id},
348 V8SnapshotProfileWriter::Reference::kProperty,
349 profile_writer_->EnsureString(property)});
350 }
351 return id;
352 }
353
354 void WriteOffsetRef(ObjectPtr object, intptr_t offset) {
355 intptr_t id = RefId(object);
356 WriteUnsigned(id);
357 if (profile_writer_ != nullptr) {
358 const char* property = offsets_table_->FieldNameForOffset(
359 object_currently_writing_.cid_, offset);
360 if (property != nullptr) {
361 profile_writer_->AttributeReferenceTo(
362 {V8SnapshotProfileWriter::kSnapshot, object_currently_writing_.id_},
363 {{V8SnapshotProfileWriter::kSnapshot, id},
364 V8SnapshotProfileWriter::Reference::kProperty,
365 profile_writer_->EnsureString(property)});
366 } else {
367 profile_writer_->AttributeReferenceTo(
368 {V8SnapshotProfileWriter::kSnapshot, object_currently_writing_.id_},
369 {{V8SnapshotProfileWriter::kSnapshot, id},
370 V8SnapshotProfileWriter::Reference::kElement,
371 offset});
372 }
373 }
374 }
375
376 template <typename T, typename... P>
377 void WriteFromTo(T obj, P&&... args) {
378 ObjectPtr* from = obj->ptr()->from();
379 ObjectPtr* to = obj->ptr()->to_snapshot(kind(), args...);
380 for (ObjectPtr* p = from; p <= to; p++) {
381 WriteOffsetRef(*p, (p - reinterpret_cast<ObjectPtr*>(obj->ptr())) *
382 sizeof(ObjectPtr));
383 }
384 }
385
386 template <typename T, typename... P>
387 void PushFromTo(T obj, P&&... args) {
388 ObjectPtr* from = obj->ptr()->from();
389 ObjectPtr* to = obj->ptr()->to_snapshot(kind(), args...);
390 for (ObjectPtr* p = from; p <= to; p++) {
391 Push(*p);
392 }
393 }
394
395 void WriteTokenPosition(TokenPosition pos) {
396 Write<int32_t>(pos.SnapshotEncode());
397 }
398
399 void WriteCid(intptr_t cid) {
400 COMPILE_ASSERT(ObjectLayout::kClassIdTagSize <= 32);
401 Write<int32_t>(cid);
402 }
403
404 void PrepareInstructions(GrowableArray<CodePtr>* codes);
405 void WriteInstructions(InstructionsPtr instr,
406 uint32_t unchecked_offset,
407 CodePtr code,
408 bool deferred);
409 uint32_t GetDataOffset(ObjectPtr object) const;
410 void TraceDataOffset(uint32_t offset);
411 intptr_t GetDataSize() const;
412
413 void WriteDispatchTable(const Array& entries);
414
415 Heap* heap() const { return heap_; }
416 Zone* zone() const { return zone_; }
417 Snapshot::Kind kind() const { return kind_; }
418 intptr_t next_ref_index() const { return next_ref_index_; }
419
420 void DumpCombinedCodeStatistics();
421
422 V8SnapshotProfileWriter* profile_writer() const { return profile_writer_; }
423
424 // If the given [obj] was not included into the snaposhot and have not
425 // yet gotten an artificial node created for it create an artificial node
426 // in the profile representing this object.
427 // Returns true if [obj] has an artificial profile node associated with it.
428 bool CreateArtificalNodeIfNeeded(ObjectPtr obj);
429
430 bool InCurrentLoadingUnit(ObjectPtr obj, bool record = false);
431 GrowableArray<LoadingUnitSerializationData*>* loading_units() {
432 return loading_units_;
433 }
434 void set_loading_units(GrowableArray<LoadingUnitSerializationData*>* units) {
435 loading_units_ = units;
436 }
437 void set_current_loading_unit_id(intptr_t id) {
438 current_loading_unit_id_ = id;
439 }
440
441 private:
442 static const char* ReadOnlyObjectType(intptr_t cid);
443
444 // Returns the reference ID for the object. Fails for objects that have not
445 // been allocated a reference ID yet, so should be used only after all
446 // WriteAlloc calls.
447 intptr_t RefId(ObjectPtr object, bool permit_artificial_ref = false) {
448 if (!object->IsHeapObject()) {
449 SmiPtr smi = Smi::RawCast(object);
450 auto const id = smi_ids_.Lookup(smi)->id_;
451 if (IsAllocatedReference(id)) return id;
452 FATAL("Missing ref");
453 }
454 // The object id weak table holds image offsets for Instructions instead
455 // of ref indices.
456 ASSERT(!object->IsInstructions());
457 auto const id = heap_->GetObjectId(object);
458 if (permit_artificial_ref && IsArtificialReference(id)) {
459 return -id;
460 }
461 ASSERT(!IsArtificialReference(id));
462 if (IsAllocatedReference(id)) return id;
463 if (object->IsWeakSerializationReference()) {
464 // If a reachable WSR has an object ID of 0, then its target was marked
465 // for serialization due to reachable strong references and the WSR will
466 // be dropped instead. Thus, we change the reference to the WSR to a
467 // direct reference to the serialized target.
468 auto const ref = WeakSerializationReference::RawCast(object);
469 auto const target = WeakSerializationReference::TargetOf(ref);
470 auto const target_id = heap_->GetObjectId(target);
471 ASSERT(IsAllocatedReference(target_id));
472 return target_id;
473 }
474 if (object->IsCode() && !Snapshot::IncludesCode(kind_)) {
475 return RefId(Object::null());
476 }
477#if !defined(DART_PRECOMPILED_RUNTIME)
478 if (object->IsBytecode() && !Snapshot::IncludesBytecode(kind_)) {
479 return RefId(Object::null());
480 }
481#endif // !DART_PRECOMPILED_RUNTIME
482 FATAL("Missing ref");
483 }
484
485 Heap* heap_;
486 Zone* zone_;
487 Snapshot::Kind kind_;
488 WriteStream stream_;
489 ImageWriter* image_writer_;
490 SerializationCluster** clusters_by_cid_;
491 GrowableArray<ObjectPtr> stack_;
492 intptr_t num_cids_;
493 intptr_t num_tlc_cids_;
494 intptr_t num_base_objects_;
495 intptr_t num_written_objects_;
496 intptr_t next_ref_index_;
497 intptr_t previous_text_offset_;
498 SmiObjectIdMap smi_ids_;
499 FieldTable* field_table_;
500
501 intptr_t dispatch_table_size_ = 0;
502
503 // True if writing VM snapshot, false for Isolate snapshot.
504 bool vm_;
505
506 V8SnapshotProfileWriter* profile_writer_ = nullptr;
507 struct ProfilingObject {
508 ObjectPtr object_ = nullptr;
509 intptr_t id_ = 0;
510 intptr_t stream_start_ = 0;
511 intptr_t cid_ = -1;
512 } object_currently_writing_;
513 OffsetsTable* offsets_table_ = nullptr;
514
515#if defined(SNAPSHOT_BACKTRACE)
516 ObjectPtr current_parent_;
517 GrowableArray<Object*> parent_pairs_;
518#endif
519
520#if defined(DART_PRECOMPILER)
521 IntMap<intptr_t> deduped_instructions_sources_;
522#endif
523
524 intptr_t current_loading_unit_id_ = 0;
525 GrowableArray<LoadingUnitSerializationData*>* loading_units_ = nullptr;
526
527 DISALLOW_IMPLICIT_CONSTRUCTORS(Serializer);
528};
529
530#define AutoTraceObject(obj) \
531 SerializerWritingObjectScope scope_##__COUNTER__(s, name(), obj, nullptr)
532
533#define AutoTraceObjectName(obj, str) \
534 SerializerWritingObjectScope scope_##__COUNTER__(s, name(), obj, str)
535
536#define WriteFieldValue(field, value) s->WritePropertyRef(value, #field);
537
538#define WriteFromTo(obj, ...) s->WriteFromTo(obj, ##__VA_ARGS__);
539
540#define PushFromTo(obj, ...) s->PushFromTo(obj, ##__VA_ARGS__);
541
542#define WriteField(obj, field) s->WritePropertyRef(obj->ptr()->field, #field)
543
544class SerializerWritingObjectScope {
545 public:
546 SerializerWritingObjectScope(Serializer* serializer,
547 const char* type,
548 ObjectPtr object,
549 StringPtr name)
550 : serializer_(serializer) {
551 serializer_->TraceStartWritingObject(type, object, name);
552 }
553
554 SerializerWritingObjectScope(Serializer* serializer,
555 const char* type,
556 ObjectPtr object,
557 const char* name)
558 : serializer_(serializer) {
559 serializer_->TraceStartWritingObject(type, object, name);
560 }
561
562 ~SerializerWritingObjectScope() { serializer_->TraceEndWritingObject(); }
563
564 private:
565 Serializer* serializer_;
566};
567
568// This class can be used to read version and features from a snapshot before
569// the VM has been initialized.
570class SnapshotHeaderReader {
571 public:
572 static char* InitializeGlobalVMFlagsFromSnapshot(const Snapshot* snapshot);
573 static bool NullSafetyFromSnapshot(const Snapshot* snapshot);
574
575 explicit SnapshotHeaderReader(const Snapshot* snapshot)
576 : SnapshotHeaderReader(snapshot->kind(),
577 snapshot->Addr(),
578 snapshot->length()) {}
579
580 SnapshotHeaderReader(Snapshot::Kind kind,
581 const uint8_t* buffer,
582 intptr_t size)
583 : kind_(kind), stream_(buffer, size) {
584 stream_.SetPosition(Snapshot::kHeaderSize);
585 }
586
587 // Verifies the version and features in the snapshot are compatible with the
588 // current VM. If isolate is non-null it validates isolate-specific features.
589 //
590 // Returns null on success and a malloc()ed error on failure.
591 // The [offset] will be the next position in the snapshot stream after the
592 // features.
593 char* VerifyVersionAndFeatures(Isolate* isolate, intptr_t* offset);
594
595 private:
596 char* VerifyVersion();
597 char* ReadFeatures(const char** features, intptr_t* features_length);
598 char* VerifyFeatures(Isolate* isolate);
599 char* BuildError(const char* message);
600
601 Snapshot::Kind kind_;
602 ReadStream stream_;
603};
604
605class Deserializer : public ThreadStackResource {
606 public:
607 Deserializer(Thread* thread,
608 Snapshot::Kind kind,
609 const uint8_t* buffer,
610 intptr_t size,
611 const uint8_t* data_buffer,
612 const uint8_t* instructions_buffer,
613 intptr_t offset = 0);
614 ~Deserializer();
615
616 // Verifies the image alignment.
617 //
618 // Returns ApiError::null() on success and an ApiError with an an appropriate
619 // message otherwise.
620 ApiErrorPtr VerifyImageAlignment();
621
622 void ReadProgramSnapshot(ObjectStore* object_store);
623 ApiErrorPtr ReadUnitSnapshot(const LoadingUnit& unit);
624 void ReadVMSnapshot();
625
626 void AddVMIsolateBaseObjects();
627
628 static void InitializeHeader(ObjectPtr raw,
629 intptr_t cid,
630 intptr_t size,
631 bool is_canonical = false);
632
633 // Reads raw data (for basic types).
634 // sizeof(T) must be in {1,2,4,8}.
635 template <typename T>
636 T Read() {
637 return ReadStream::Raw<sizeof(T), T>::Read(&stream_);
638 }
639 intptr_t ReadUnsigned() { return stream_.ReadUnsigned(); }
640 uint64_t ReadUnsigned64() { return stream_.ReadUnsigned<uint64_t>(); }
641 void ReadBytes(uint8_t* addr, intptr_t len) { stream_.ReadBytes(addr, len); }
642
643 uword ReadWordWith32BitReads() { return stream_.ReadWordWith32BitReads(); }
644
645 const uint8_t* CurrentBufferAddress() const {
646 return stream_.AddressOfCurrentPosition();
647 }
648
649 void Advance(intptr_t value) { stream_.Advance(value); }
650 void Align(intptr_t alignment) { stream_.Align(alignment); }
651
652 void AddBaseObject(ObjectPtr base_object) { AssignRef(base_object); }
653
654 void AssignRef(ObjectPtr object) {
655 ASSERT(next_ref_index_ <= num_objects_);
656 refs_->ptr()->data()[next_ref_index_] = object;
657 next_ref_index_++;
658 }
659
660 ObjectPtr Ref(intptr_t index) const {
661 ASSERT(index > 0);
662 ASSERT(index <= num_objects_);
663 return refs_->ptr()->data()[index];
664 }
665
666 ObjectPtr ReadRef() { return Ref(ReadUnsigned()); }
667
668 template <typename T, typename... P>
669 void ReadFromTo(T obj, P&&... params) {
670 ObjectPtr* from = obj->ptr()->from();
671 ObjectPtr* to_snapshot = obj->ptr()->to_snapshot(kind(), params...);
672 ObjectPtr* to = obj->ptr()->to(params...);
673 for (ObjectPtr* p = from; p <= to_snapshot; p++) {
674 *p = ReadRef();
675 }
676 // This is necessary because, unlike Object::Allocate, the clustered
677 // deserializer allocates object without null-initializing them. Instead,
678 // each deserialization cluster is responsible for initializing every field,
679 // ensuring that every field is written to exactly once.
680 for (ObjectPtr* p = to_snapshot + 1; p <= to; p++) {
681 *p = Object::null();
682 }
683 }
684
685 TokenPosition ReadTokenPosition() {
686 return TokenPosition::SnapshotDecode(Read<int32_t>());
687 }
688
689 intptr_t ReadCid() {
690 COMPILE_ASSERT(ObjectLayout::kClassIdTagSize <= 32);
691 return Read<int32_t>();
692 }
693
694 void ReadInstructions(CodePtr code, bool deferred);
695 void EndInstructions(const Array& refs,
696 intptr_t start_index,
697 intptr_t stop_index);
698 ObjectPtr GetObjectAt(uint32_t offset) const;
699
700 void SkipHeader() { stream_.SetPosition(Snapshot::kHeaderSize); }
701
702 void Prepare();
703 void Deserialize();
704
705 DeserializationCluster* ReadCluster();
706
707 void ReadDispatchTable();
708
709 intptr_t next_index() const { return next_ref_index_; }
710 Heap* heap() const { return heap_; }
711 Zone* zone() const { return zone_; }
712 Snapshot::Kind kind() const { return kind_; }
713 FieldTable* field_table() const { return field_table_; }
714
715 private:
716 Heap* heap_;
717 Zone* zone_;
718 Snapshot::Kind kind_;
719 ReadStream stream_;
720 ImageReader* image_reader_;
721 intptr_t num_base_objects_;
722 intptr_t num_objects_;
723 intptr_t num_clusters_;
724 ArrayPtr refs_;
725 intptr_t next_ref_index_;
726 intptr_t previous_text_offset_;
727 DeserializationCluster** clusters_;
728 FieldTable* field_table_;
729};
730
731#define ReadFromTo(obj, ...) d->ReadFromTo(obj, ##__VA_ARGS__);
732
733class FullSnapshotWriter {
734 public:
735 static const intptr_t kInitialSize = 64 * KB;
736 FullSnapshotWriter(Snapshot::Kind kind,
737 uint8_t** vm_snapshot_data_buffer,
738 uint8_t** isolate_snapshot_data_buffer,
739 ReAlloc alloc,
740 ImageWriter* vm_image_writer,
741 ImageWriter* iso_image_writer);
742 ~FullSnapshotWriter();
743
744 uint8_t** vm_snapshot_data_buffer() const { return vm_snapshot_data_buffer_; }
745
746 uint8_t** isolate_snapshot_data_buffer() const {
747 return isolate_snapshot_data_buffer_;
748 }
749
750 Thread* thread() const { return thread_; }
751 Zone* zone() const { return thread_->zone(); }
752 Isolate* isolate() const { return thread_->isolate(); }
753 Heap* heap() const { return isolate()->heap(); }
754
755 // Writes a full snapshot of the program(VM isolate, regular isolate group).
756 void WriteFullSnapshot(
757 GrowableArray<LoadingUnitSerializationData*>* data = nullptr);
758 void WriteUnitSnapshot(GrowableArray<LoadingUnitSerializationData*>* units,
759 LoadingUnitSerializationData* unit,
760 uint32_t program_hash);
761
762 intptr_t VmIsolateSnapshotSize() const { return vm_isolate_snapshot_size_; }
763 intptr_t IsolateSnapshotSize() const { return isolate_snapshot_size_; }
764
765 private:
766 // Writes a snapshot of the VM Isolate.
767 intptr_t WriteVMSnapshot();
768
769 // Writes a full snapshot of regular Dart isolate group.
770 void WriteProgramSnapshot(intptr_t num_base_objects,
771 GrowableArray<LoadingUnitSerializationData*>* data);
772
773 Thread* thread_;
774 Snapshot::Kind kind_;
775 uint8_t** vm_snapshot_data_buffer_;
776 uint8_t** isolate_snapshot_data_buffer_;
777 ReAlloc alloc_;
778 intptr_t vm_isolate_snapshot_size_;
779 intptr_t isolate_snapshot_size_;
780 ImageWriter* vm_image_writer_;
781 ImageWriter* isolate_image_writer_;
782
783 // Stats for benchmarking.
784 intptr_t clustered_vm_size_;
785 intptr_t clustered_isolate_size_;
786 intptr_t mapped_data_size_;
787 intptr_t mapped_text_size_;
788
789 V8SnapshotProfileWriter* profile_writer_ = nullptr;
790
791 DISALLOW_COPY_AND_ASSIGN(FullSnapshotWriter);
792};
793
794class FullSnapshotReader {
795 public:
796 FullSnapshotReader(const Snapshot* snapshot,
797 const uint8_t* instructions_buffer,
798 Thread* thread);
799 ~FullSnapshotReader() {}
800
801 ApiErrorPtr ReadVMSnapshot();
802 ApiErrorPtr ReadProgramSnapshot();
803 ApiErrorPtr ReadUnitSnapshot(const LoadingUnit& unit);
804
805 private:
806 ApiErrorPtr ConvertToApiError(char* message);
807 void PatchGlobalObjectPool();
808 void InitializeBSS();
809
810 Snapshot::Kind kind_;
811 Thread* thread_;
812 const uint8_t* buffer_;
813 intptr_t size_;
814 const uint8_t* data_image_;
815 const uint8_t* instructions_image_;
816
817 DISALLOW_COPY_AND_ASSIGN(FullSnapshotReader);
818};
819
820} // namespace dart
821
822#endif // RUNTIME_VM_CLUSTERED_SNAPSHOT_H_
823