1 | // Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include <memory> |
6 | |
7 | #include "vm/clustered_snapshot.h" |
8 | |
9 | #include "platform/assert.h" |
10 | #include "vm/bootstrap.h" |
11 | #include "vm/bss_relocs.h" |
12 | #include "vm/class_id.h" |
13 | #include "vm/code_observers.h" |
14 | #include "vm/compiler/api/print_filter.h" |
15 | #include "vm/compiler/assembler/disassembler.h" |
16 | #include "vm/dart.h" |
17 | #include "vm/dispatch_table.h" |
18 | #include "vm/flag_list.h" |
19 | #include "vm/growable_array.h" |
20 | #include "vm/heap/heap.h" |
21 | #include "vm/image_snapshot.h" |
22 | #include "vm/native_entry.h" |
23 | #include "vm/object.h" |
24 | #include "vm/object_store.h" |
25 | #include "vm/program_visitor.h" |
26 | #include "vm/stub_code.h" |
27 | #include "vm/symbols.h" |
28 | #include "vm/timeline.h" |
29 | #include "vm/version.h" |
30 | #include "vm/zone_text_buffer.h" |
31 | |
32 | #if !defined(DART_PRECOMPILED_RUNTIME) |
33 | #include "vm/compiler/backend/code_statistics.h" |
34 | #include "vm/compiler/backend/il_printer.h" |
35 | #include "vm/compiler/relocation.h" |
36 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
37 | |
38 | namespace dart { |
39 | |
40 | #if !defined(DART_PRECOMPILED_RUNTIME) |
41 | DEFINE_FLAG(bool, |
42 | print_cluster_information, |
43 | false, |
44 | "Print information about clusters written to snapshot" ); |
45 | #endif |
46 | |
47 | #if defined(DART_PRECOMPILER) |
48 | DEFINE_FLAG(charp, |
49 | write_v8_snapshot_profile_to, |
50 | NULL, |
51 | "Write a snapshot profile in V8 format to a file." ); |
52 | #endif // defined(DART_PRECOMPILER) |
53 | |
54 | #if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32) |
55 | |
56 | static void RelocateCodeObjects( |
57 | bool is_vm, |
58 | GrowableArray<CodePtr>* code_objects, |
59 | GrowableArray<ImageWriterCommand>* image_writer_commands) { |
60 | auto thread = Thread::Current(); |
61 | auto isolate = is_vm ? Dart::vm_isolate() : thread->isolate(); |
62 | |
63 | WritableCodePages writable_code_pages(thread, isolate); |
64 | CodeRelocator::Relocate(thread, code_objects, image_writer_commands, is_vm); |
65 | } |
66 | |
67 | class CodePtrKeyValueTrait { |
68 | public: |
69 | // Typedefs needed for the DirectChainedHashMap template. |
70 | typedef const CodePtr Key; |
71 | typedef const CodePtr Value; |
72 | typedef CodePtr Pair; |
73 | |
74 | static Key KeyOf(Pair kv) { return kv; } |
75 | static Value ValueOf(Pair kv) { return kv; } |
76 | static inline intptr_t Hashcode(Key key) { |
77 | return static_cast<intptr_t>(key); |
78 | } |
79 | |
80 | static inline bool IsKeyEqual(Pair pair, Key key) { return pair == key; } |
81 | }; |
82 | |
83 | typedef DirectChainedHashMap<CodePtrKeyValueTrait> RawCodeSet; |
84 | |
85 | #endif // defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32) |
86 | |
87 | static ObjectPtr AllocateUninitialized(PageSpace* old_space, intptr_t size) { |
88 | ASSERT(Utils::IsAligned(size, kObjectAlignment)); |
89 | uword address = old_space->TryAllocateDataBumpLocked(size); |
90 | if (address == 0) { |
91 | OUT_OF_MEMORY(); |
92 | } |
93 | return ObjectLayout::FromAddr(address); |
94 | } |
95 | |
96 | void Deserializer::(ObjectPtr raw, |
97 | intptr_t class_id, |
98 | intptr_t size, |
99 | bool is_canonical) { |
100 | ASSERT(Utils::IsAligned(size, kObjectAlignment)); |
101 | uint32_t tags = 0; |
102 | tags = ObjectLayout::ClassIdTag::update(class_id, tags); |
103 | tags = ObjectLayout::SizeTag::update(size, tags); |
104 | tags = ObjectLayout::CanonicalBit::update(is_canonical, tags); |
105 | tags = ObjectLayout::OldBit::update(true, tags); |
106 | tags = ObjectLayout::OldAndNotMarkedBit::update(true, tags); |
107 | tags = ObjectLayout::OldAndNotRememberedBit::update(true, tags); |
108 | tags = ObjectLayout::NewBit::update(false, tags); |
109 | raw->ptr()->tags_ = tags; |
110 | #if defined(HASH_IN_OBJECT_HEADER) |
111 | raw->ptr()->hash_ = 0; |
112 | #endif |
113 | } |
114 | |
115 | #if !defined(DART_PRECOMPILED_RUNTIME) |
116 | void SerializationCluster::WriteAndMeasureAlloc(Serializer* serializer) { |
117 | intptr_t start_size = serializer->bytes_written(); |
118 | intptr_t start_data = serializer->GetDataSize(); |
119 | intptr_t start_objects = serializer->next_ref_index(); |
120 | WriteAlloc(serializer); |
121 | intptr_t stop_size = serializer->bytes_written(); |
122 | intptr_t stop_data = serializer->GetDataSize(); |
123 | intptr_t stop_objects = serializer->next_ref_index(); |
124 | if (FLAG_print_cluster_information) { |
125 | OS::PrintErr("Snapshot 0x%" Pp " (%" Pd "), " , start_size, |
126 | stop_size - start_size); |
127 | OS::PrintErr("Data 0x%" Pp " (%" Pd "): " , start_data, |
128 | stop_data - start_data); |
129 | OS::PrintErr("Alloc %s (%" Pd ")\n" , name(), stop_objects - start_objects); |
130 | } |
131 | size_ += (stop_size - start_size) + (stop_data - start_data); |
132 | num_objects_ += (stop_objects - start_objects); |
133 | } |
134 | |
135 | void SerializationCluster::WriteAndMeasureFill(Serializer* serializer) { |
136 | intptr_t start = serializer->bytes_written(); |
137 | WriteFill(serializer); |
138 | intptr_t stop = serializer->bytes_written(); |
139 | if (FLAG_print_cluster_information) { |
140 | OS::PrintErr("Snapshot 0x%" Pp " (%" Pd "): Fill %s\n" , start, stop - start, |
141 | name()); |
142 | } |
143 | size_ += (stop - start); |
144 | } |
145 | |
146 | class ClassSerializationCluster : public SerializationCluster { |
147 | public: |
148 | explicit ClassSerializationCluster(intptr_t num_cids) |
149 | : SerializationCluster("Class" ), |
150 | predefined_(kNumPredefinedCids), |
151 | objects_(num_cids) {} |
152 | ~ClassSerializationCluster() {} |
153 | |
154 | void Trace(Serializer* s, ObjectPtr object) { |
155 | ClassPtr cls = Class::RawCast(object); |
156 | intptr_t class_id = cls->ptr()->id_; |
157 | |
158 | if (class_id == kIllegalCid) { |
159 | // Classes expected to be dropped by the precompiler should not be traced. |
160 | s->UnexpectedObject(cls, "Class with illegal cid" ); |
161 | } |
162 | if (class_id < kNumPredefinedCids) { |
163 | // These classes are allocated by Object::Init or Object::InitOnce, so the |
164 | // deserializer must find them in the class table instead of allocating |
165 | // them. |
166 | predefined_.Add(cls); |
167 | } else { |
168 | objects_.Add(cls); |
169 | } |
170 | |
171 | PushFromTo(cls); |
172 | } |
173 | |
174 | void WriteAlloc(Serializer* s) { |
175 | s->WriteCid(kClassCid); |
176 | intptr_t count = predefined_.length(); |
177 | s->WriteUnsigned(count); |
178 | for (intptr_t i = 0; i < count; i++) { |
179 | ClassPtr cls = predefined_[i]; |
180 | s->AssignRef(cls); |
181 | AutoTraceObject(cls); |
182 | intptr_t class_id = cls->ptr()->id_; |
183 | s->WriteCid(class_id); |
184 | } |
185 | count = objects_.length(); |
186 | s->WriteUnsigned(count); |
187 | for (intptr_t i = 0; i < count; i++) { |
188 | ClassPtr cls = objects_[i]; |
189 | s->AssignRef(cls); |
190 | } |
191 | } |
192 | |
193 | void WriteFill(Serializer* s) { |
194 | intptr_t count = predefined_.length(); |
195 | for (intptr_t i = 0; i < count; i++) { |
196 | WriteClass(s, predefined_[i]); |
197 | } |
198 | count = objects_.length(); |
199 | for (intptr_t i = 0; i < count; i++) { |
200 | WriteClass(s, objects_[i]); |
201 | } |
202 | } |
203 | |
204 | private: |
205 | void WriteClass(Serializer* s, ClassPtr cls) { |
206 | AutoTraceObjectName(cls, cls->ptr()->name_); |
207 | WriteFromTo(cls); |
208 | intptr_t class_id = cls->ptr()->id_; |
209 | if (class_id == kIllegalCid) { |
210 | s->UnexpectedObject(cls, "Class with illegal cid" ); |
211 | } |
212 | s->WriteCid(class_id); |
213 | if (s->kind() == Snapshot::kFull && RequireLegacyErasureOfConstants(cls)) { |
214 | s->UnexpectedObject(cls, "Class with non mode agnostic constants" ); |
215 | } |
216 | if (s->kind() != Snapshot::kFullAOT) { |
217 | s->Write<uint32_t>(cls->ptr()->binary_declaration_); |
218 | } |
219 | s->Write<int32_t>(Class::target_instance_size_in_words(cls)); |
220 | s->Write<int32_t>(Class::target_next_field_offset_in_words(cls)); |
221 | s->Write<int32_t>(Class::target_type_arguments_field_offset_in_words(cls)); |
222 | s->Write<int16_t>(cls->ptr()->num_type_arguments_); |
223 | s->Write<uint16_t>(cls->ptr()->num_native_fields_); |
224 | s->WriteTokenPosition(cls->ptr()->token_pos_); |
225 | s->WriteTokenPosition(cls->ptr()->end_token_pos_); |
226 | s->Write<uint32_t>(cls->ptr()->state_bits_); |
227 | |
228 | // In AOT, the bitmap of unboxed fields should also be serialized |
229 | if (FLAG_precompiled_mode && !ClassTable::IsTopLevelCid(class_id)) { |
230 | s->WriteUnsigned64( |
231 | CalculateTargetUnboxedFieldsBitmap(s, class_id).Value()); |
232 | } |
233 | } |
234 | |
235 | GrowableArray<ClassPtr> predefined_; |
236 | GrowableArray<ClassPtr> objects_; |
237 | |
238 | UnboxedFieldBitmap CalculateTargetUnboxedFieldsBitmap(Serializer* s, |
239 | intptr_t class_id) { |
240 | const auto unboxed_fields_bitmap_host = |
241 | s->isolate()->group()->shared_class_table()->GetUnboxedFieldsMapAt( |
242 | class_id); |
243 | |
244 | UnboxedFieldBitmap unboxed_fields_bitmap; |
245 | if (unboxed_fields_bitmap_host.IsEmpty() || |
246 | kWordSize == compiler::target::kWordSize) { |
247 | unboxed_fields_bitmap = unboxed_fields_bitmap_host; |
248 | } else { |
249 | ASSERT(kWordSize == 8 && compiler::target::kWordSize == 4); |
250 | // A new bitmap is built if the word sizes in the target and |
251 | // host are different |
252 | unboxed_fields_bitmap.Reset(); |
253 | intptr_t target_i = 0, host_i = 0; |
254 | |
255 | while (host_i < UnboxedFieldBitmap::Length()) { |
256 | // Each unboxed field has constant length, therefore the number of |
257 | // words used by it should double when compiling from 64-bit to 32-bit. |
258 | if (unboxed_fields_bitmap_host.Get(host_i++)) { |
259 | unboxed_fields_bitmap.Set(target_i++); |
260 | unboxed_fields_bitmap.Set(target_i++); |
261 | } else { |
262 | // For object pointers, the field is always one word length |
263 | target_i++; |
264 | } |
265 | } |
266 | } |
267 | |
268 | return unboxed_fields_bitmap; |
269 | } |
270 | |
271 | bool RequireLegacyErasureOfConstants(ClassPtr cls) { |
272 | // Do not generate a core snapshot containing constants that would require |
273 | // a legacy erasure of their types if loaded in an isolate running in weak |
274 | // mode. |
275 | if (cls->ptr()->host_type_arguments_field_offset_in_words_ == |
276 | Class::kNoTypeArguments || |
277 | cls->ptr()->constants_ == Object::empty_array().raw()) { |
278 | return false; |
279 | } |
280 | Zone* zone = Thread::Current()->zone(); |
281 | const Class& clazz = Class::Handle(zone, cls); |
282 | return clazz.RequireLegacyErasureOfConstants(zone); |
283 | } |
284 | }; |
285 | #endif // !DART_PRECOMPILED_RUNTIME |
286 | |
287 | class ClassDeserializationCluster : public DeserializationCluster { |
288 | public: |
289 | ClassDeserializationCluster() {} |
290 | ~ClassDeserializationCluster() {} |
291 | |
292 | void ReadAlloc(Deserializer* d) { |
293 | predefined_start_index_ = d->next_index(); |
294 | PageSpace* old_space = d->heap()->old_space(); |
295 | intptr_t count = d->ReadUnsigned(); |
296 | ClassTable* table = d->isolate()->class_table(); |
297 | for (intptr_t i = 0; i < count; i++) { |
298 | intptr_t class_id = d->ReadCid(); |
299 | ASSERT(table->HasValidClassAt(class_id)); |
300 | ClassPtr cls = table->At(class_id); |
301 | ASSERT(cls != nullptr); |
302 | d->AssignRef(cls); |
303 | } |
304 | predefined_stop_index_ = d->next_index(); |
305 | |
306 | start_index_ = d->next_index(); |
307 | count = d->ReadUnsigned(); |
308 | for (intptr_t i = 0; i < count; i++) { |
309 | d->AssignRef(AllocateUninitialized(old_space, Class::InstanceSize())); |
310 | } |
311 | stop_index_ = d->next_index(); |
312 | } |
313 | |
314 | void ReadFill(Deserializer* d) { |
315 | ClassTable* table = d->isolate()->class_table(); |
316 | |
317 | for (intptr_t id = predefined_start_index_; id < predefined_stop_index_; |
318 | id++) { |
319 | ClassPtr cls = static_cast<ClassPtr>(d->Ref(id)); |
320 | ReadFromTo(cls); |
321 | intptr_t class_id = d->ReadCid(); |
322 | cls->ptr()->id_ = class_id; |
323 | #if !defined(DART_PRECOMPILED_RUNTIME) |
324 | if (d->kind() != Snapshot::kFullAOT) { |
325 | cls->ptr()->binary_declaration_ = d->Read<uint32_t>(); |
326 | } |
327 | #endif |
328 | if (!IsInternalVMdefinedClassId(class_id)) { |
329 | cls->ptr()->host_instance_size_in_words_ = d->Read<int32_t>(); |
330 | cls->ptr()->host_next_field_offset_in_words_ = d->Read<int32_t>(); |
331 | #if !defined(DART_PRECOMPILED_RUNTIME) |
332 | // Only one pair is serialized. The target field only exists when |
333 | // DART_PRECOMPILED_RUNTIME is not defined |
334 | cls->ptr()->target_instance_size_in_words_ = |
335 | cls->ptr()->host_instance_size_in_words_; |
336 | cls->ptr()->target_next_field_offset_in_words_ = |
337 | cls->ptr()->host_next_field_offset_in_words_; |
338 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
339 | } else { |
340 | d->Read<int32_t>(); // Skip. |
341 | d->Read<int32_t>(); // Skip. |
342 | } |
343 | cls->ptr()->host_type_arguments_field_offset_in_words_ = |
344 | d->Read<int32_t>(); |
345 | #if !defined(DART_PRECOMPILED_RUNTIME) |
346 | cls->ptr()->target_type_arguments_field_offset_in_words_ = |
347 | cls->ptr()->host_type_arguments_field_offset_in_words_; |
348 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
349 | cls->ptr()->num_type_arguments_ = d->Read<int16_t>(); |
350 | cls->ptr()->num_native_fields_ = d->Read<uint16_t>(); |
351 | cls->ptr()->token_pos_ = d->ReadTokenPosition(); |
352 | cls->ptr()->end_token_pos_ = d->ReadTokenPosition(); |
353 | cls->ptr()->state_bits_ = d->Read<uint32_t>(); |
354 | |
355 | if (FLAG_precompiled_mode) { |
356 | d->ReadUnsigned64(); // Skip unboxed fields bitmap. |
357 | } |
358 | } |
359 | |
360 | auto shared_class_table = d->isolate()->group()->shared_class_table(); |
361 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
362 | ClassPtr cls = static_cast<ClassPtr>(d->Ref(id)); |
363 | Deserializer::InitializeHeader(cls, kClassCid, Class::InstanceSize()); |
364 | ReadFromTo(cls); |
365 | |
366 | intptr_t class_id = d->ReadCid(); |
367 | ASSERT(class_id >= kNumPredefinedCids); |
368 | cls->ptr()->id_ = class_id; |
369 | |
370 | #if !defined(DART_PRECOMPILED_RUNTIME) |
371 | if (d->kind() != Snapshot::kFullAOT) { |
372 | cls->ptr()->binary_declaration_ = d->Read<uint32_t>(); |
373 | } |
374 | #endif |
375 | cls->ptr()->host_instance_size_in_words_ = d->Read<int32_t>(); |
376 | cls->ptr()->host_next_field_offset_in_words_ = d->Read<int32_t>(); |
377 | cls->ptr()->host_type_arguments_field_offset_in_words_ = |
378 | d->Read<int32_t>(); |
379 | #if !defined(DART_PRECOMPILED_RUNTIME) |
380 | cls->ptr()->target_instance_size_in_words_ = |
381 | cls->ptr()->host_instance_size_in_words_; |
382 | cls->ptr()->target_next_field_offset_in_words_ = |
383 | cls->ptr()->host_next_field_offset_in_words_; |
384 | cls->ptr()->target_type_arguments_field_offset_in_words_ = |
385 | cls->ptr()->host_type_arguments_field_offset_in_words_; |
386 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
387 | cls->ptr()->num_type_arguments_ = d->Read<int16_t>(); |
388 | cls->ptr()->num_native_fields_ = d->Read<uint16_t>(); |
389 | cls->ptr()->token_pos_ = d->ReadTokenPosition(); |
390 | cls->ptr()->end_token_pos_ = d->ReadTokenPosition(); |
391 | cls->ptr()->state_bits_ = d->Read<uint32_t>(); |
392 | |
393 | table->AllocateIndex(class_id); |
394 | table->SetAt(class_id, cls); |
395 | |
396 | if (FLAG_precompiled_mode && !ClassTable::IsTopLevelCid(class_id)) { |
397 | const UnboxedFieldBitmap unboxed_fields_map(d->ReadUnsigned64()); |
398 | shared_class_table->SetUnboxedFieldsMapAt(class_id, unboxed_fields_map); |
399 | } |
400 | } |
401 | } |
402 | |
403 | private: |
404 | intptr_t predefined_start_index_; |
405 | intptr_t predefined_stop_index_; |
406 | }; |
407 | |
408 | #if !defined(DART_PRECOMPILED_RUNTIME) |
409 | class TypeArgumentsSerializationCluster : public SerializationCluster { |
410 | public: |
411 | TypeArgumentsSerializationCluster() : SerializationCluster("TypeArguments" ) {} |
412 | ~TypeArgumentsSerializationCluster() {} |
413 | |
414 | void Trace(Serializer* s, ObjectPtr object) { |
415 | TypeArgumentsPtr type_args = TypeArguments::RawCast(object); |
416 | objects_.Add(type_args); |
417 | |
418 | s->Push(type_args->ptr()->instantiations_); |
419 | const intptr_t length = Smi::Value(type_args->ptr()->length_); |
420 | for (intptr_t i = 0; i < length; i++) { |
421 | s->Push(type_args->ptr()->types()[i]); |
422 | } |
423 | } |
424 | |
425 | void WriteAlloc(Serializer* s) { |
426 | s->WriteCid(kTypeArgumentsCid); |
427 | const intptr_t count = objects_.length(); |
428 | s->WriteUnsigned(count); |
429 | for (intptr_t i = 0; i < count; i++) { |
430 | TypeArgumentsPtr type_args = objects_[i]; |
431 | s->AssignRef(type_args); |
432 | AutoTraceObject(type_args); |
433 | const intptr_t length = Smi::Value(type_args->ptr()->length_); |
434 | s->WriteUnsigned(length); |
435 | } |
436 | } |
437 | |
438 | void WriteFill(Serializer* s) { |
439 | const intptr_t count = objects_.length(); |
440 | for (intptr_t i = 0; i < count; i++) { |
441 | TypeArgumentsPtr type_args = objects_[i]; |
442 | AutoTraceObject(type_args); |
443 | const intptr_t length = Smi::Value(type_args->ptr()->length_); |
444 | s->WriteUnsigned(length); |
445 | s->Write<bool>(type_args->ptr()->IsCanonical()); |
446 | intptr_t hash = Smi::Value(type_args->ptr()->hash_); |
447 | s->Write<int32_t>(hash); |
448 | const intptr_t nullability = Smi::Value(type_args->ptr()->nullability_); |
449 | s->WriteUnsigned(nullability); |
450 | WriteField(type_args, instantiations_); |
451 | for (intptr_t j = 0; j < length; j++) { |
452 | s->WriteElementRef(type_args->ptr()->types()[j], j); |
453 | } |
454 | } |
455 | } |
456 | |
457 | private: |
458 | GrowableArray<TypeArgumentsPtr> objects_; |
459 | }; |
460 | #endif // !DART_PRECOMPILED_RUNTIME |
461 | |
462 | class TypeArgumentsDeserializationCluster : public DeserializationCluster { |
463 | public: |
464 | TypeArgumentsDeserializationCluster() {} |
465 | ~TypeArgumentsDeserializationCluster() {} |
466 | |
467 | void ReadAlloc(Deserializer* d) { |
468 | start_index_ = d->next_index(); |
469 | PageSpace* old_space = d->heap()->old_space(); |
470 | const intptr_t count = d->ReadUnsigned(); |
471 | for (intptr_t i = 0; i < count; i++) { |
472 | const intptr_t length = d->ReadUnsigned(); |
473 | d->AssignRef(AllocateUninitialized(old_space, |
474 | TypeArguments::InstanceSize(length))); |
475 | } |
476 | stop_index_ = d->next_index(); |
477 | } |
478 | |
479 | void ReadFill(Deserializer* d) { |
480 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
481 | TypeArgumentsPtr type_args = static_cast<TypeArgumentsPtr>(d->Ref(id)); |
482 | const intptr_t length = d->ReadUnsigned(); |
483 | bool is_canonical = d->Read<bool>(); |
484 | Deserializer::InitializeHeader(type_args, kTypeArgumentsCid, |
485 | TypeArguments::InstanceSize(length), |
486 | is_canonical); |
487 | type_args->ptr()->length_ = Smi::New(length); |
488 | type_args->ptr()->hash_ = Smi::New(d->Read<int32_t>()); |
489 | type_args->ptr()->nullability_ = Smi::New(d->ReadUnsigned()); |
490 | type_args->ptr()->instantiations_ = static_cast<ArrayPtr>(d->ReadRef()); |
491 | for (intptr_t j = 0; j < length; j++) { |
492 | type_args->ptr()->types()[j] = |
493 | static_cast<AbstractTypePtr>(d->ReadRef()); |
494 | } |
495 | } |
496 | } |
497 | }; |
498 | |
499 | #if !defined(DART_PRECOMPILED_RUNTIME) |
500 | class PatchClassSerializationCluster : public SerializationCluster { |
501 | public: |
502 | PatchClassSerializationCluster() : SerializationCluster("PatchClass" ) {} |
503 | ~PatchClassSerializationCluster() {} |
504 | |
505 | void Trace(Serializer* s, ObjectPtr object) { |
506 | PatchClassPtr cls = PatchClass::RawCast(object); |
507 | objects_.Add(cls); |
508 | PushFromTo(cls); |
509 | } |
510 | |
511 | void WriteAlloc(Serializer* s) { |
512 | s->WriteCid(kPatchClassCid); |
513 | const intptr_t count = objects_.length(); |
514 | s->WriteUnsigned(count); |
515 | for (intptr_t i = 0; i < count; i++) { |
516 | PatchClassPtr cls = objects_[i]; |
517 | s->AssignRef(cls); |
518 | } |
519 | } |
520 | |
521 | void WriteFill(Serializer* s) { |
522 | const intptr_t count = objects_.length(); |
523 | for (intptr_t i = 0; i < count; i++) { |
524 | PatchClassPtr cls = objects_[i]; |
525 | AutoTraceObject(cls); |
526 | WriteFromTo(cls); |
527 | if (s->kind() != Snapshot::kFullAOT) { |
528 | s->Write<int32_t>(cls->ptr()->library_kernel_offset_); |
529 | } |
530 | } |
531 | } |
532 | |
533 | private: |
534 | GrowableArray<PatchClassPtr> objects_; |
535 | }; |
536 | #endif // !DART_PRECOMPILED_RUNTIME |
537 | |
538 | class PatchClassDeserializationCluster : public DeserializationCluster { |
539 | public: |
540 | PatchClassDeserializationCluster() {} |
541 | ~PatchClassDeserializationCluster() {} |
542 | |
543 | void ReadAlloc(Deserializer* d) { |
544 | start_index_ = d->next_index(); |
545 | PageSpace* old_space = d->heap()->old_space(); |
546 | const intptr_t count = d->ReadUnsigned(); |
547 | for (intptr_t i = 0; i < count; i++) { |
548 | d->AssignRef( |
549 | AllocateUninitialized(old_space, PatchClass::InstanceSize())); |
550 | } |
551 | stop_index_ = d->next_index(); |
552 | } |
553 | |
554 | void ReadFill(Deserializer* d) { |
555 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
556 | PatchClassPtr cls = static_cast<PatchClassPtr>(d->Ref(id)); |
557 | Deserializer::InitializeHeader(cls, kPatchClassCid, |
558 | PatchClass::InstanceSize()); |
559 | ReadFromTo(cls); |
560 | #if !defined(DART_PRECOMPILED_RUNTIME) |
561 | if (d->kind() != Snapshot::kFullAOT) { |
562 | cls->ptr()->library_kernel_offset_ = d->Read<int32_t>(); |
563 | } |
564 | #endif |
565 | } |
566 | } |
567 | }; |
568 | |
569 | #if !defined(DART_PRECOMPILED_RUNTIME) |
570 | class FunctionSerializationCluster : public SerializationCluster { |
571 | public: |
572 | FunctionSerializationCluster() : SerializationCluster("Function" ) {} |
573 | ~FunctionSerializationCluster() {} |
574 | |
575 | void Trace(Serializer* s, ObjectPtr object) { |
576 | Snapshot::Kind kind = s->kind(); |
577 | FunctionPtr func = Function::RawCast(object); |
578 | objects_.Add(func); |
579 | |
580 | PushFromTo(func); |
581 | if (kind == Snapshot::kFull) { |
582 | NOT_IN_PRECOMPILED(s->Push(func->ptr()->bytecode_)); |
583 | } else if (kind == Snapshot::kFullAOT) { |
584 | s->Push(func->ptr()->code_); |
585 | } else if (kind == Snapshot::kFullJIT) { |
586 | NOT_IN_PRECOMPILED(s->Push(func->ptr()->unoptimized_code_)); |
587 | NOT_IN_PRECOMPILED(s->Push(func->ptr()->bytecode_)); |
588 | s->Push(func->ptr()->code_); |
589 | s->Push(func->ptr()->ic_data_array_); |
590 | } |
591 | } |
592 | |
593 | void WriteAlloc(Serializer* s) { |
594 | s->WriteCid(kFunctionCid); |
595 | const intptr_t count = objects_.length(); |
596 | s->WriteUnsigned(count); |
597 | for (intptr_t i = 0; i < count; i++) { |
598 | FunctionPtr func = objects_[i]; |
599 | s->AssignRef(func); |
600 | } |
601 | } |
602 | |
603 | void WriteFill(Serializer* s) { |
604 | Snapshot::Kind kind = s->kind(); |
605 | const intptr_t count = objects_.length(); |
606 | for (intptr_t i = 0; i < count; i++) { |
607 | FunctionPtr func = objects_[i]; |
608 | AutoTraceObjectName(func, MakeDisambiguatedFunctionName(s, func)); |
609 | WriteFromTo(func); |
610 | if (kind == Snapshot::kFull) { |
611 | NOT_IN_PRECOMPILED(WriteField(func, bytecode_)); |
612 | } else if (kind == Snapshot::kFullAOT) { |
613 | WriteField(func, code_); |
614 | } else if (s->kind() == Snapshot::kFullJIT) { |
615 | NOT_IN_PRECOMPILED(WriteField(func, unoptimized_code_)); |
616 | NOT_IN_PRECOMPILED(WriteField(func, bytecode_)); |
617 | WriteField(func, code_); |
618 | WriteField(func, ic_data_array_); |
619 | } |
620 | |
621 | if (kind != Snapshot::kFullAOT) { |
622 | s->WriteTokenPosition(func->ptr()->token_pos_); |
623 | s->WriteTokenPosition(func->ptr()->end_token_pos_); |
624 | s->Write<uint32_t>(func->ptr()->binary_declaration_); |
625 | } |
626 | |
627 | s->Write<uint32_t>(func->ptr()->packed_fields_); |
628 | s->Write<uint32_t>(func->ptr()->kind_tag_); |
629 | } |
630 | } |
631 | |
632 | static const char* MakeDisambiguatedFunctionName(Serializer* s, |
633 | FunctionPtr f) { |
634 | if (s->profile_writer() == nullptr) { |
635 | return nullptr; |
636 | } |
637 | |
638 | REUSABLE_FUNCTION_HANDLESCOPE(s->thread()); |
639 | Function& fun = reused_function_handle.Handle(); |
640 | fun = f; |
641 | ZoneTextBuffer printer(s->thread()->zone()); |
642 | fun.PrintName(NameFormattingParams::DisambiguatedUnqualified( |
643 | Object::NameVisibility::kInternalName), |
644 | &printer); |
645 | return printer.buffer(); |
646 | } |
647 | |
648 | private: |
649 | GrowableArray<FunctionPtr> objects_; |
650 | }; |
651 | #endif // !DART_PRECOMPILED_RUNTIME |
652 | |
653 | class FunctionDeserializationCluster : public DeserializationCluster { |
654 | public: |
655 | FunctionDeserializationCluster() {} |
656 | ~FunctionDeserializationCluster() {} |
657 | |
658 | void ReadAlloc(Deserializer* d) { |
659 | start_index_ = d->next_index(); |
660 | PageSpace* old_space = d->heap()->old_space(); |
661 | const intptr_t count = d->ReadUnsigned(); |
662 | for (intptr_t i = 0; i < count; i++) { |
663 | d->AssignRef(AllocateUninitialized(old_space, Function::InstanceSize())); |
664 | } |
665 | stop_index_ = d->next_index(); |
666 | } |
667 | |
668 | void ReadFill(Deserializer* d) { |
669 | Snapshot::Kind kind = d->kind(); |
670 | |
671 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
672 | FunctionPtr func = static_cast<FunctionPtr>(d->Ref(id)); |
673 | Deserializer::InitializeHeader(func, kFunctionCid, |
674 | Function::InstanceSize()); |
675 | ReadFromTo(func); |
676 | |
677 | if (kind == Snapshot::kFull) { |
678 | NOT_IN_PRECOMPILED(func->ptr()->bytecode_ = |
679 | static_cast<BytecodePtr>(d->ReadRef())); |
680 | } else if (kind == Snapshot::kFullAOT) { |
681 | func->ptr()->code_ = static_cast<CodePtr>(d->ReadRef()); |
682 | } else if (kind == Snapshot::kFullJIT) { |
683 | NOT_IN_PRECOMPILED(func->ptr()->unoptimized_code_ = |
684 | static_cast<CodePtr>(d->ReadRef())); |
685 | NOT_IN_PRECOMPILED(func->ptr()->bytecode_ = |
686 | static_cast<BytecodePtr>(d->ReadRef())); |
687 | func->ptr()->code_ = static_cast<CodePtr>(d->ReadRef()); |
688 | func->ptr()->ic_data_array_ = static_cast<ArrayPtr>(d->ReadRef()); |
689 | } |
690 | |
691 | #if defined(DEBUG) |
692 | func->ptr()->entry_point_ = 0; |
693 | func->ptr()->unchecked_entry_point_ = 0; |
694 | #endif |
695 | |
696 | #if !defined(DART_PRECOMPILED_RUNTIME) |
697 | if (kind != Snapshot::kFullAOT) { |
698 | func->ptr()->token_pos_ = d->ReadTokenPosition(); |
699 | func->ptr()->end_token_pos_ = d->ReadTokenPosition(); |
700 | func->ptr()->binary_declaration_ = d->Read<uint32_t>(); |
701 | } |
702 | func->ptr()->unboxed_parameters_info_.Reset(); |
703 | #endif |
704 | func->ptr()->packed_fields_ = d->Read<uint32_t>(); |
705 | func->ptr()->kind_tag_ = d->Read<uint32_t>(); |
706 | if (kind == Snapshot::kFullAOT) { |
707 | // Omit fields used to support de/reoptimization. |
708 | } else { |
709 | #if !defined(DART_PRECOMPILED_RUNTIME) |
710 | func->ptr()->usage_counter_ = 0; |
711 | func->ptr()->optimized_instruction_count_ = 0; |
712 | func->ptr()->optimized_call_site_count_ = 0; |
713 | func->ptr()->deoptimization_counter_ = 0; |
714 | func->ptr()->state_bits_ = 0; |
715 | func->ptr()->inlining_depth_ = 0; |
716 | #endif |
717 | } |
718 | } |
719 | } |
720 | |
721 | void PostLoad(Deserializer* d, const Array& refs) { |
722 | if (d->kind() == Snapshot::kFullAOT) { |
723 | Function& func = Function::Handle(d->zone()); |
724 | for (intptr_t i = start_index_; i < stop_index_; i++) { |
725 | func ^= refs.At(i); |
726 | ASSERT(func.raw()->ptr()->code_->IsCode()); |
727 | uword entry_point = func.raw()->ptr()->code_->ptr()->entry_point_; |
728 | ASSERT(entry_point != 0); |
729 | func.raw()->ptr()->entry_point_ = entry_point; |
730 | uword unchecked_entry_point = |
731 | func.raw()->ptr()->code_->ptr()->unchecked_entry_point_; |
732 | ASSERT(unchecked_entry_point != 0); |
733 | func.raw()->ptr()->unchecked_entry_point_ = unchecked_entry_point; |
734 | } |
735 | } else if (d->kind() == Snapshot::kFullJIT) { |
736 | Function& func = Function::Handle(d->zone()); |
737 | Code& code = Code::Handle(d->zone()); |
738 | for (intptr_t i = start_index_; i < stop_index_; i++) { |
739 | func ^= refs.At(i); |
740 | code = func.CurrentCode(); |
741 | if (func.HasCode() && !code.IsDisabled()) { |
742 | func.SetInstructions(code); // Set entrypoint. |
743 | func.SetWasCompiled(true); |
744 | } else { |
745 | func.ClearCode(); // Set code and entrypoint to lazy compile stub. |
746 | } |
747 | } |
748 | } else { |
749 | Function& func = Function::Handle(d->zone()); |
750 | for (intptr_t i = start_index_; i < stop_index_; i++) { |
751 | func ^= refs.At(i); |
752 | func.ClearCode(); // Set code and entrypoint to lazy compile stub. |
753 | } |
754 | } |
755 | } |
756 | }; |
757 | |
758 | #if !defined(DART_PRECOMPILED_RUNTIME) |
759 | class ClosureDataSerializationCluster : public SerializationCluster { |
760 | public: |
761 | ClosureDataSerializationCluster() : SerializationCluster("ClosureData" ) {} |
762 | ~ClosureDataSerializationCluster() {} |
763 | |
764 | void Trace(Serializer* s, ObjectPtr object) { |
765 | ClosureDataPtr data = ClosureData::RawCast(object); |
766 | objects_.Add(data); |
767 | |
768 | if (s->kind() != Snapshot::kFullAOT) { |
769 | s->Push(data->ptr()->context_scope_); |
770 | } |
771 | s->Push(data->ptr()->parent_function_); |
772 | s->Push(data->ptr()->signature_type_); |
773 | s->Push(data->ptr()->closure_); |
774 | } |
775 | |
776 | void WriteAlloc(Serializer* s) { |
777 | s->WriteCid(kClosureDataCid); |
778 | const intptr_t count = objects_.length(); |
779 | s->WriteUnsigned(count); |
780 | for (intptr_t i = 0; i < count; i++) { |
781 | ClosureDataPtr data = objects_[i]; |
782 | s->AssignRef(data); |
783 | } |
784 | } |
785 | |
786 | void WriteFill(Serializer* s) { |
787 | const intptr_t count = objects_.length(); |
788 | for (intptr_t i = 0; i < count; i++) { |
789 | ClosureDataPtr data = objects_[i]; |
790 | AutoTraceObject(data); |
791 | if (s->kind() != Snapshot::kFullAOT) { |
792 | WriteField(data, context_scope_); |
793 | } |
794 | WriteField(data, parent_function_); |
795 | WriteField(data, signature_type_); |
796 | WriteField(data, closure_); |
797 | } |
798 | } |
799 | |
800 | private: |
801 | GrowableArray<ClosureDataPtr> objects_; |
802 | }; |
803 | #endif // !DART_PRECOMPILED_RUNTIME |
804 | |
805 | class ClosureDataDeserializationCluster : public DeserializationCluster { |
806 | public: |
807 | ClosureDataDeserializationCluster() {} |
808 | ~ClosureDataDeserializationCluster() {} |
809 | |
810 | void ReadAlloc(Deserializer* d) { |
811 | start_index_ = d->next_index(); |
812 | PageSpace* old_space = d->heap()->old_space(); |
813 | const intptr_t count = d->ReadUnsigned(); |
814 | for (intptr_t i = 0; i < count; i++) { |
815 | d->AssignRef( |
816 | AllocateUninitialized(old_space, ClosureData::InstanceSize())); |
817 | } |
818 | stop_index_ = d->next_index(); |
819 | } |
820 | |
821 | void ReadFill(Deserializer* d) { |
822 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
823 | ClosureDataPtr data = static_cast<ClosureDataPtr>(d->Ref(id)); |
824 | Deserializer::InitializeHeader(data, kClosureDataCid, |
825 | ClosureData::InstanceSize()); |
826 | if (d->kind() == Snapshot::kFullAOT) { |
827 | data->ptr()->context_scope_ = ContextScope::null(); |
828 | } else { |
829 | data->ptr()->context_scope_ = |
830 | static_cast<ContextScopePtr>(d->ReadRef()); |
831 | } |
832 | data->ptr()->parent_function_ = static_cast<FunctionPtr>(d->ReadRef()); |
833 | data->ptr()->signature_type_ = static_cast<TypePtr>(d->ReadRef()); |
834 | data->ptr()->closure_ = static_cast<InstancePtr>(d->ReadRef()); |
835 | } |
836 | } |
837 | }; |
838 | |
839 | #if !defined(DART_PRECOMPILED_RUNTIME) |
840 | class SignatureDataSerializationCluster : public SerializationCluster { |
841 | public: |
842 | SignatureDataSerializationCluster() : SerializationCluster("SignatureData" ) {} |
843 | ~SignatureDataSerializationCluster() {} |
844 | |
845 | void Trace(Serializer* s, ObjectPtr object) { |
846 | SignatureDataPtr data = SignatureData::RawCast(object); |
847 | objects_.Add(data); |
848 | PushFromTo(data); |
849 | } |
850 | |
851 | void WriteAlloc(Serializer* s) { |
852 | s->WriteCid(kSignatureDataCid); |
853 | const intptr_t count = objects_.length(); |
854 | s->WriteUnsigned(count); |
855 | for (intptr_t i = 0; i < count; i++) { |
856 | SignatureDataPtr data = objects_[i]; |
857 | s->AssignRef(data); |
858 | } |
859 | } |
860 | |
861 | void WriteFill(Serializer* s) { |
862 | const intptr_t count = objects_.length(); |
863 | for (intptr_t i = 0; i < count; i++) { |
864 | SignatureDataPtr data = objects_[i]; |
865 | AutoTraceObject(data); |
866 | WriteFromTo(data); |
867 | } |
868 | } |
869 | |
870 | private: |
871 | GrowableArray<SignatureDataPtr> objects_; |
872 | }; |
873 | #endif // !DART_PRECOMPILED_RUNTIME |
874 | |
875 | class SignatureDataDeserializationCluster : public DeserializationCluster { |
876 | public: |
877 | SignatureDataDeserializationCluster() {} |
878 | ~SignatureDataDeserializationCluster() {} |
879 | |
880 | void ReadAlloc(Deserializer* d) { |
881 | start_index_ = d->next_index(); |
882 | PageSpace* old_space = d->heap()->old_space(); |
883 | const intptr_t count = d->ReadUnsigned(); |
884 | for (intptr_t i = 0; i < count; i++) { |
885 | d->AssignRef( |
886 | AllocateUninitialized(old_space, SignatureData::InstanceSize())); |
887 | } |
888 | stop_index_ = d->next_index(); |
889 | } |
890 | |
891 | void ReadFill(Deserializer* d) { |
892 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
893 | SignatureDataPtr data = static_cast<SignatureDataPtr>(d->Ref(id)); |
894 | Deserializer::InitializeHeader(data, kSignatureDataCid, |
895 | SignatureData::InstanceSize()); |
896 | ReadFromTo(data); |
897 | } |
898 | } |
899 | }; |
900 | |
901 | #if !defined(DART_PRECOMPILED_RUNTIME) |
902 | class FfiTrampolineDataSerializationCluster : public SerializationCluster { |
903 | public: |
904 | FfiTrampolineDataSerializationCluster() |
905 | : SerializationCluster("FfiTrampolineData" ) {} |
906 | ~FfiTrampolineDataSerializationCluster() {} |
907 | |
908 | void Trace(Serializer* s, ObjectPtr object) { |
909 | FfiTrampolineDataPtr data = FfiTrampolineData::RawCast(object); |
910 | objects_.Add(data); |
911 | PushFromTo(data); |
912 | } |
913 | |
914 | void WriteAlloc(Serializer* s) { |
915 | s->WriteCid(kFfiTrampolineDataCid); |
916 | const intptr_t count = objects_.length(); |
917 | s->WriteUnsigned(count); |
918 | for (intptr_t i = 0; i < count; i++) { |
919 | s->AssignRef(objects_[i]); |
920 | } |
921 | } |
922 | |
923 | void WriteFill(Serializer* s) { |
924 | const intptr_t count = objects_.length(); |
925 | for (intptr_t i = 0; i < count; i++) { |
926 | FfiTrampolineDataPtr const data = objects_[i]; |
927 | AutoTraceObject(data); |
928 | WriteFromTo(data); |
929 | |
930 | if (s->kind() == Snapshot::kFullAOT) { |
931 | s->WriteUnsigned(data->ptr()->callback_id_); |
932 | } else { |
933 | // FFI callbacks can only be written to AOT snapshots. |
934 | ASSERT(data->ptr()->callback_target_ == Object::null()); |
935 | } |
936 | } |
937 | } |
938 | |
939 | private: |
940 | GrowableArray<FfiTrampolineDataPtr> objects_; |
941 | }; |
942 | #endif // !DART_PRECOMPILED_RUNTIME |
943 | |
944 | class FfiTrampolineDataDeserializationCluster : public DeserializationCluster { |
945 | public: |
946 | FfiTrampolineDataDeserializationCluster() {} |
947 | ~FfiTrampolineDataDeserializationCluster() {} |
948 | |
949 | void ReadAlloc(Deserializer* d) { |
950 | start_index_ = d->next_index(); |
951 | PageSpace* old_space = d->heap()->old_space(); |
952 | const intptr_t count = d->ReadUnsigned(); |
953 | for (intptr_t i = 0; i < count; i++) { |
954 | d->AssignRef( |
955 | AllocateUninitialized(old_space, FfiTrampolineData::InstanceSize())); |
956 | } |
957 | stop_index_ = d->next_index(); |
958 | } |
959 | |
960 | void ReadFill(Deserializer* d) { |
961 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
962 | FfiTrampolineDataPtr data = static_cast<FfiTrampolineDataPtr>(d->Ref(id)); |
963 | Deserializer::InitializeHeader(data, kFfiTrampolineDataCid, |
964 | FfiTrampolineData::InstanceSize()); |
965 | ReadFromTo(data); |
966 | data->ptr()->callback_id_ = |
967 | d->kind() == Snapshot::kFullAOT ? d->ReadUnsigned() : 0; |
968 | } |
969 | } |
970 | }; |
971 | |
972 | #if !defined(DART_PRECOMPILED_RUNTIME) |
973 | class RedirectionDataSerializationCluster : public SerializationCluster { |
974 | public: |
975 | RedirectionDataSerializationCluster() |
976 | : SerializationCluster("RedirectionData" ) {} |
977 | ~RedirectionDataSerializationCluster() {} |
978 | |
979 | void Trace(Serializer* s, ObjectPtr object) { |
980 | RedirectionDataPtr data = RedirectionData::RawCast(object); |
981 | objects_.Add(data); |
982 | PushFromTo(data); |
983 | } |
984 | |
985 | void WriteAlloc(Serializer* s) { |
986 | s->WriteCid(kRedirectionDataCid); |
987 | const intptr_t count = objects_.length(); |
988 | s->WriteUnsigned(count); |
989 | for (intptr_t i = 0; i < count; i++) { |
990 | RedirectionDataPtr data = objects_[i]; |
991 | s->AssignRef(data); |
992 | } |
993 | } |
994 | |
995 | void WriteFill(Serializer* s) { |
996 | const intptr_t count = objects_.length(); |
997 | for (intptr_t i = 0; i < count; i++) { |
998 | RedirectionDataPtr data = objects_[i]; |
999 | AutoTraceObject(data); |
1000 | WriteFromTo(data); |
1001 | } |
1002 | } |
1003 | |
1004 | private: |
1005 | GrowableArray<RedirectionDataPtr> objects_; |
1006 | }; |
1007 | #endif // !DART_PRECOMPILED_RUNTIME |
1008 | |
1009 | class RedirectionDataDeserializationCluster : public DeserializationCluster { |
1010 | public: |
1011 | RedirectionDataDeserializationCluster() {} |
1012 | ~RedirectionDataDeserializationCluster() {} |
1013 | |
1014 | void ReadAlloc(Deserializer* d) { |
1015 | start_index_ = d->next_index(); |
1016 | PageSpace* old_space = d->heap()->old_space(); |
1017 | const intptr_t count = d->ReadUnsigned(); |
1018 | for (intptr_t i = 0; i < count; i++) { |
1019 | d->AssignRef( |
1020 | AllocateUninitialized(old_space, RedirectionData::InstanceSize())); |
1021 | } |
1022 | stop_index_ = d->next_index(); |
1023 | } |
1024 | |
1025 | void ReadFill(Deserializer* d) { |
1026 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
1027 | RedirectionDataPtr data = static_cast<RedirectionDataPtr>(d->Ref(id)); |
1028 | Deserializer::InitializeHeader(data, kRedirectionDataCid, |
1029 | RedirectionData::InstanceSize()); |
1030 | ReadFromTo(data); |
1031 | } |
1032 | } |
1033 | }; |
1034 | |
1035 | #if !defined(DART_PRECOMPILED_RUNTIME) |
1036 | class FieldSerializationCluster : public SerializationCluster { |
1037 | public: |
1038 | FieldSerializationCluster() : SerializationCluster("Field" ) {} |
1039 | ~FieldSerializationCluster() {} |
1040 | |
1041 | void Trace(Serializer* s, ObjectPtr object) { |
1042 | FieldPtr field = Field::RawCast(object); |
1043 | objects_.Add(field); |
1044 | |
1045 | Snapshot::Kind kind = s->kind(); |
1046 | |
1047 | s->Push(field->ptr()->name_); |
1048 | s->Push(field->ptr()->owner_); |
1049 | s->Push(field->ptr()->type_); |
1050 | // Write out the initializer function |
1051 | s->Push(field->ptr()->initializer_function_); |
1052 | |
1053 | if (kind != Snapshot::kFullAOT) { |
1054 | s->Push(field->ptr()->saved_initial_value_); |
1055 | s->Push(field->ptr()->guarded_list_length_); |
1056 | } |
1057 | if (kind == Snapshot::kFullJIT) { |
1058 | s->Push(field->ptr()->dependent_code_); |
1059 | } |
1060 | // Write out either static value, initial value or field offset. |
1061 | if (Field::StaticBit::decode(field->ptr()->kind_bits_)) { |
1062 | if ( |
1063 | // For precompiled static fields, the value was already reset and |
1064 | // initializer_ now contains a Function. |
1065 | kind == Snapshot::kFullAOT || |
1066 | // Do not reset const fields. |
1067 | Field::ConstBit::decode(field->ptr()->kind_bits_)) { |
1068 | s->Push(s->field_table()->At( |
1069 | Smi::Value(field->ptr()->host_offset_or_field_id_))); |
1070 | } else { |
1071 | // Otherwise, for static fields we write out the initial static value. |
1072 | s->Push(field->ptr()->saved_initial_value_); |
1073 | } |
1074 | } else { |
1075 | s->Push(Smi::New(Field::TargetOffsetOf(field))); |
1076 | } |
1077 | } |
1078 | |
1079 | void WriteAlloc(Serializer* s) { |
1080 | s->WriteCid(kFieldCid); |
1081 | const intptr_t count = objects_.length(); |
1082 | s->WriteUnsigned(count); |
1083 | for (intptr_t i = 0; i < count; i++) { |
1084 | FieldPtr field = objects_[i]; |
1085 | s->AssignRef(field); |
1086 | } |
1087 | } |
1088 | |
1089 | void WriteFill(Serializer* s) { |
1090 | Snapshot::Kind kind = s->kind(); |
1091 | const intptr_t count = objects_.length(); |
1092 | for (intptr_t i = 0; i < count; i++) { |
1093 | FieldPtr field = objects_[i]; |
1094 | AutoTraceObjectName(field, field->ptr()->name_); |
1095 | |
1096 | WriteField(field, name_); |
1097 | WriteField(field, owner_); |
1098 | WriteField(field, type_); |
1099 | // Write out the initializer function and initial value if not in AOT. |
1100 | WriteField(field, initializer_function_); |
1101 | if (kind != Snapshot::kFullAOT) { |
1102 | WriteField(field, saved_initial_value_); |
1103 | WriteField(field, guarded_list_length_); |
1104 | } |
1105 | if (kind == Snapshot::kFullJIT) { |
1106 | WriteField(field, dependent_code_); |
1107 | } |
1108 | |
1109 | if (kind != Snapshot::kFullAOT) { |
1110 | s->WriteTokenPosition(field->ptr()->token_pos_); |
1111 | s->WriteTokenPosition(field->ptr()->end_token_pos_); |
1112 | s->WriteCid(field->ptr()->guarded_cid_); |
1113 | s->WriteCid(field->ptr()->is_nullable_); |
1114 | s->Write<int8_t>(field->ptr()->static_type_exactness_state_); |
1115 | s->Write<uint32_t>(field->ptr()->binary_declaration_); |
1116 | } |
1117 | s->Write<uint16_t>(field->ptr()->kind_bits_); |
1118 | |
1119 | // Write out the initial static value or field offset. |
1120 | if (Field::StaticBit::decode(field->ptr()->kind_bits_)) { |
1121 | if ( |
1122 | // For precompiled static fields, the value was already reset and |
1123 | // initializer_ now contains a Function. |
1124 | kind == Snapshot::kFullAOT || |
1125 | // Do not reset const fields. |
1126 | Field::ConstBit::decode(field->ptr()->kind_bits_)) { |
1127 | WriteFieldValue("static value" , |
1128 | s->field_table()->At(Smi::Value( |
1129 | field->ptr()->host_offset_or_field_id_))); |
1130 | } else { |
1131 | // Otherwise, for static fields we write out the initial static value. |
1132 | WriteFieldValue("static value" , field->ptr()->saved_initial_value_); |
1133 | } |
1134 | s->WriteUnsigned(Smi::Value(field->ptr()->host_offset_or_field_id_)); |
1135 | } else { |
1136 | WriteFieldValue("offset" , Smi::New(Field::TargetOffsetOf(field))); |
1137 | } |
1138 | } |
1139 | } |
1140 | |
1141 | private: |
1142 | GrowableArray<FieldPtr> objects_; |
1143 | }; |
1144 | #endif // !DART_PRECOMPILED_RUNTIME |
1145 | |
1146 | class FieldDeserializationCluster : public DeserializationCluster { |
1147 | public: |
1148 | FieldDeserializationCluster() {} |
1149 | ~FieldDeserializationCluster() {} |
1150 | |
1151 | void ReadAlloc(Deserializer* d) { |
1152 | start_index_ = d->next_index(); |
1153 | PageSpace* old_space = d->heap()->old_space(); |
1154 | const intptr_t count = d->ReadUnsigned(); |
1155 | for (intptr_t i = 0; i < count; i++) { |
1156 | d->AssignRef(AllocateUninitialized(old_space, Field::InstanceSize())); |
1157 | } |
1158 | stop_index_ = d->next_index(); |
1159 | } |
1160 | |
1161 | void ReadFill(Deserializer* d) { |
1162 | Snapshot::Kind kind = d->kind(); |
1163 | |
1164 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
1165 | FieldPtr field = static_cast<FieldPtr>(d->Ref(id)); |
1166 | Deserializer::InitializeHeader(field, kFieldCid, Field::InstanceSize()); |
1167 | ReadFromTo(field); |
1168 | if (kind != Snapshot::kFullAOT) { |
1169 | #if !defined(DART_PRECOMPILED_RUNTIME) |
1170 | field->ptr()->saved_initial_value_ = |
1171 | static_cast<InstancePtr>(d->ReadRef()); |
1172 | #endif |
1173 | field->ptr()->guarded_list_length_ = static_cast<SmiPtr>(d->ReadRef()); |
1174 | } |
1175 | if (kind == Snapshot::kFullJIT) { |
1176 | field->ptr()->dependent_code_ = static_cast<ArrayPtr>(d->ReadRef()); |
1177 | } |
1178 | if (kind != Snapshot::kFullAOT) { |
1179 | field->ptr()->token_pos_ = d->ReadTokenPosition(); |
1180 | field->ptr()->end_token_pos_ = d->ReadTokenPosition(); |
1181 | field->ptr()->guarded_cid_ = d->ReadCid(); |
1182 | field->ptr()->is_nullable_ = d->ReadCid(); |
1183 | field->ptr()->static_type_exactness_state_ = d->Read<int8_t>(); |
1184 | #if !defined(DART_PRECOMPILED_RUNTIME) |
1185 | field->ptr()->binary_declaration_ = d->Read<uint32_t>(); |
1186 | #endif |
1187 | } |
1188 | field->ptr()->kind_bits_ = d->Read<uint16_t>(); |
1189 | |
1190 | ObjectPtr value_or_offset = d->ReadRef(); |
1191 | if (Field::StaticBit::decode(field->ptr()->kind_bits_)) { |
1192 | intptr_t field_id = d->ReadUnsigned(); |
1193 | d->field_table()->SetAt(field_id, |
1194 | static_cast<InstancePtr>(value_or_offset)); |
1195 | field->ptr()->host_offset_or_field_id_ = Smi::New(field_id); |
1196 | } else { |
1197 | field->ptr()->host_offset_or_field_id_ = Smi::RawCast(value_or_offset); |
1198 | #if !defined(DART_PRECOMPILED_RUNTIME) |
1199 | field->ptr()->target_offset_ = |
1200 | Smi::Value(field->ptr()->host_offset_or_field_id_); |
1201 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
1202 | } |
1203 | } |
1204 | } |
1205 | |
1206 | void PostLoad(Deserializer* d, const Array& refs) { |
1207 | Field& field = Field::Handle(d->zone()); |
1208 | if (!Isolate::Current()->use_field_guards()) { |
1209 | for (intptr_t i = start_index_; i < stop_index_; i++) { |
1210 | field ^= refs.At(i); |
1211 | field.set_guarded_cid(kDynamicCid); |
1212 | field.set_is_nullable(true); |
1213 | field.set_guarded_list_length(Field::kNoFixedLength); |
1214 | field.set_guarded_list_length_in_object_offset( |
1215 | Field::kUnknownLengthOffset); |
1216 | field.set_static_type_exactness_state( |
1217 | StaticTypeExactnessState::NotTracking()); |
1218 | } |
1219 | } else { |
1220 | for (intptr_t i = start_index_; i < stop_index_; i++) { |
1221 | field ^= refs.At(i); |
1222 | field.InitializeGuardedListLengthInObjectOffset(); |
1223 | } |
1224 | } |
1225 | } |
1226 | }; |
1227 | |
1228 | #if !defined(DART_PRECOMPILED_RUNTIME) |
1229 | class ScriptSerializationCluster : public SerializationCluster { |
1230 | public: |
1231 | ScriptSerializationCluster() : SerializationCluster("Script" ) {} |
1232 | ~ScriptSerializationCluster() {} |
1233 | |
1234 | void Trace(Serializer* s, ObjectPtr object) { |
1235 | ScriptPtr script = Script::RawCast(object); |
1236 | objects_.Add(script); |
1237 | PushFromTo(script); |
1238 | } |
1239 | |
1240 | void WriteAlloc(Serializer* s) { |
1241 | s->WriteCid(kScriptCid); |
1242 | const intptr_t count = objects_.length(); |
1243 | s->WriteUnsigned(count); |
1244 | for (intptr_t i = 0; i < count; i++) { |
1245 | ScriptPtr script = objects_[i]; |
1246 | s->AssignRef(script); |
1247 | } |
1248 | } |
1249 | |
1250 | void WriteFill(Serializer* s) { |
1251 | const intptr_t count = objects_.length(); |
1252 | for (intptr_t i = 0; i < count; i++) { |
1253 | ScriptPtr script = objects_[i]; |
1254 | AutoTraceObjectName(script, script->ptr()->url_); |
1255 | WriteFromTo(script); |
1256 | s->Write<int32_t>(script->ptr()->line_offset_); |
1257 | s->Write<int32_t>(script->ptr()->col_offset_); |
1258 | s->Write<uint8_t>(script->ptr()->flags_); |
1259 | s->Write<int32_t>(script->ptr()->kernel_script_index_); |
1260 | } |
1261 | } |
1262 | |
1263 | private: |
1264 | GrowableArray<ScriptPtr> objects_; |
1265 | }; |
1266 | #endif // !DART_PRECOMPILED_RUNTIME |
1267 | |
1268 | class ScriptDeserializationCluster : public DeserializationCluster { |
1269 | public: |
1270 | ScriptDeserializationCluster() {} |
1271 | ~ScriptDeserializationCluster() {} |
1272 | |
1273 | void ReadAlloc(Deserializer* d) { |
1274 | start_index_ = d->next_index(); |
1275 | PageSpace* old_space = d->heap()->old_space(); |
1276 | const intptr_t count = d->ReadUnsigned(); |
1277 | for (intptr_t i = 0; i < count; i++) { |
1278 | d->AssignRef(AllocateUninitialized(old_space, Script::InstanceSize())); |
1279 | } |
1280 | stop_index_ = d->next_index(); |
1281 | } |
1282 | |
1283 | void ReadFill(Deserializer* d) { |
1284 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
1285 | ScriptPtr script = static_cast<ScriptPtr>(d->Ref(id)); |
1286 | Deserializer::InitializeHeader(script, kScriptCid, |
1287 | Script::InstanceSize()); |
1288 | ReadFromTo(script); |
1289 | script->ptr()->line_offset_ = d->Read<int32_t>(); |
1290 | script->ptr()->col_offset_ = d->Read<int32_t>(); |
1291 | script->ptr()->flags_ = d->Read<uint8_t>(); |
1292 | script->ptr()->kernel_script_index_ = d->Read<int32_t>(); |
1293 | script->ptr()->load_timestamp_ = 0; |
1294 | } |
1295 | } |
1296 | }; |
1297 | |
1298 | #if !defined(DART_PRECOMPILED_RUNTIME) |
1299 | class LibrarySerializationCluster : public SerializationCluster { |
1300 | public: |
1301 | LibrarySerializationCluster() : SerializationCluster("Library" ) {} |
1302 | ~LibrarySerializationCluster() {} |
1303 | |
1304 | void Trace(Serializer* s, ObjectPtr object) { |
1305 | LibraryPtr lib = Library::RawCast(object); |
1306 | objects_.Add(lib); |
1307 | PushFromTo(lib); |
1308 | } |
1309 | |
1310 | void WriteAlloc(Serializer* s) { |
1311 | s->WriteCid(kLibraryCid); |
1312 | const intptr_t count = objects_.length(); |
1313 | s->WriteUnsigned(count); |
1314 | for (intptr_t i = 0; i < count; i++) { |
1315 | LibraryPtr lib = objects_[i]; |
1316 | s->AssignRef(lib); |
1317 | } |
1318 | } |
1319 | |
1320 | void WriteFill(Serializer* s) { |
1321 | const intptr_t count = objects_.length(); |
1322 | for (intptr_t i = 0; i < count; i++) { |
1323 | LibraryPtr lib = objects_[i]; |
1324 | AutoTraceObjectName(lib, lib->ptr()->url_); |
1325 | WriteFromTo(lib); |
1326 | s->Write<int32_t>(lib->ptr()->index_); |
1327 | s->Write<uint16_t>(lib->ptr()->num_imports_); |
1328 | s->Write<int8_t>(lib->ptr()->load_state_); |
1329 | s->Write<uint8_t>(lib->ptr()->flags_); |
1330 | if (s->kind() != Snapshot::kFullAOT) { |
1331 | s->Write<uint32_t>(lib->ptr()->binary_declaration_); |
1332 | } |
1333 | } |
1334 | } |
1335 | |
1336 | private: |
1337 | GrowableArray<LibraryPtr> objects_; |
1338 | }; |
1339 | #endif // !DART_PRECOMPILED_RUNTIME |
1340 | |
1341 | class LibraryDeserializationCluster : public DeserializationCluster { |
1342 | public: |
1343 | LibraryDeserializationCluster() {} |
1344 | ~LibraryDeserializationCluster() {} |
1345 | |
1346 | void ReadAlloc(Deserializer* d) { |
1347 | start_index_ = d->next_index(); |
1348 | PageSpace* old_space = d->heap()->old_space(); |
1349 | const intptr_t count = d->ReadUnsigned(); |
1350 | for (intptr_t i = 0; i < count; i++) { |
1351 | d->AssignRef(AllocateUninitialized(old_space, Library::InstanceSize())); |
1352 | } |
1353 | stop_index_ = d->next_index(); |
1354 | } |
1355 | |
1356 | void ReadFill(Deserializer* d) { |
1357 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
1358 | LibraryPtr lib = static_cast<LibraryPtr>(d->Ref(id)); |
1359 | Deserializer::InitializeHeader(lib, kLibraryCid, Library::InstanceSize()); |
1360 | ReadFromTo(lib); |
1361 | lib->ptr()->native_entry_resolver_ = NULL; |
1362 | lib->ptr()->native_entry_symbol_resolver_ = NULL; |
1363 | lib->ptr()->index_ = d->Read<int32_t>(); |
1364 | lib->ptr()->num_imports_ = d->Read<uint16_t>(); |
1365 | lib->ptr()->load_state_ = d->Read<int8_t>(); |
1366 | lib->ptr()->flags_ = |
1367 | LibraryLayout::InFullSnapshotBit::update(true, d->Read<uint8_t>()); |
1368 | #if !defined(DART_PRECOMPILED_RUNTIME) |
1369 | if (d->kind() != Snapshot::kFullAOT) { |
1370 | lib->ptr()->binary_declaration_ = d->Read<uint32_t>(); |
1371 | } |
1372 | #endif |
1373 | } |
1374 | } |
1375 | }; |
1376 | |
1377 | #if !defined(DART_PRECOMPILED_RUNTIME) |
1378 | class NamespaceSerializationCluster : public SerializationCluster { |
1379 | public: |
1380 | NamespaceSerializationCluster() : SerializationCluster("Namespace" ) {} |
1381 | ~NamespaceSerializationCluster() {} |
1382 | |
1383 | void Trace(Serializer* s, ObjectPtr object) { |
1384 | NamespacePtr ns = Namespace::RawCast(object); |
1385 | objects_.Add(ns); |
1386 | PushFromTo(ns); |
1387 | } |
1388 | |
1389 | void WriteAlloc(Serializer* s) { |
1390 | s->WriteCid(kNamespaceCid); |
1391 | const intptr_t count = objects_.length(); |
1392 | s->WriteUnsigned(count); |
1393 | for (intptr_t i = 0; i < count; i++) { |
1394 | NamespacePtr ns = objects_[i]; |
1395 | s->AssignRef(ns); |
1396 | } |
1397 | } |
1398 | |
1399 | void WriteFill(Serializer* s) { |
1400 | const intptr_t count = objects_.length(); |
1401 | for (intptr_t i = 0; i < count; i++) { |
1402 | NamespacePtr ns = objects_[i]; |
1403 | AutoTraceObject(ns); |
1404 | WriteFromTo(ns); |
1405 | } |
1406 | } |
1407 | |
1408 | private: |
1409 | GrowableArray<NamespacePtr> objects_; |
1410 | }; |
1411 | #endif // !DART_PRECOMPILED_RUNTIME |
1412 | |
1413 | class NamespaceDeserializationCluster : public DeserializationCluster { |
1414 | public: |
1415 | NamespaceDeserializationCluster() {} |
1416 | ~NamespaceDeserializationCluster() {} |
1417 | |
1418 | void ReadAlloc(Deserializer* d) { |
1419 | start_index_ = d->next_index(); |
1420 | PageSpace* old_space = d->heap()->old_space(); |
1421 | const intptr_t count = d->ReadUnsigned(); |
1422 | for (intptr_t i = 0; i < count; i++) { |
1423 | d->AssignRef(AllocateUninitialized(old_space, Namespace::InstanceSize())); |
1424 | } |
1425 | stop_index_ = d->next_index(); |
1426 | } |
1427 | |
1428 | void ReadFill(Deserializer* d) { |
1429 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
1430 | NamespacePtr ns = static_cast<NamespacePtr>(d->Ref(id)); |
1431 | Deserializer::InitializeHeader(ns, kNamespaceCid, |
1432 | Namespace::InstanceSize()); |
1433 | ReadFromTo(ns); |
1434 | } |
1435 | } |
1436 | }; |
1437 | |
1438 | #if !defined(DART_PRECOMPILED_RUNTIME) |
1439 | // KernelProgramInfo objects are not written into a full AOT snapshot. |
1440 | class KernelProgramInfoSerializationCluster : public SerializationCluster { |
1441 | public: |
1442 | KernelProgramInfoSerializationCluster() |
1443 | : SerializationCluster("KernelProgramInfo" ) {} |
1444 | ~KernelProgramInfoSerializationCluster() {} |
1445 | |
1446 | void Trace(Serializer* s, ObjectPtr object) { |
1447 | KernelProgramInfoPtr info = KernelProgramInfo::RawCast(object); |
1448 | objects_.Add(info); |
1449 | PushFromTo(info); |
1450 | } |
1451 | |
1452 | void WriteAlloc(Serializer* s) { |
1453 | s->WriteCid(kKernelProgramInfoCid); |
1454 | const intptr_t count = objects_.length(); |
1455 | s->WriteUnsigned(count); |
1456 | for (intptr_t i = 0; i < count; i++) { |
1457 | KernelProgramInfoPtr info = objects_[i]; |
1458 | s->AssignRef(info); |
1459 | } |
1460 | } |
1461 | |
1462 | void WriteFill(Serializer* s) { |
1463 | const intptr_t count = objects_.length(); |
1464 | for (intptr_t i = 0; i < count; i++) { |
1465 | KernelProgramInfoPtr info = objects_[i]; |
1466 | AutoTraceObject(info); |
1467 | WriteFromTo(info); |
1468 | s->Write<uint32_t>(info->ptr()->kernel_binary_version_); |
1469 | } |
1470 | } |
1471 | |
1472 | private: |
1473 | GrowableArray<KernelProgramInfoPtr> objects_; |
1474 | }; |
1475 | |
1476 | // Since KernelProgramInfo objects are not written into full AOT snapshots, |
1477 | // one will never need to read them from a full AOT snapshot. |
1478 | class KernelProgramInfoDeserializationCluster : public DeserializationCluster { |
1479 | public: |
1480 | KernelProgramInfoDeserializationCluster() {} |
1481 | ~KernelProgramInfoDeserializationCluster() {} |
1482 | |
1483 | void ReadAlloc(Deserializer* d) { |
1484 | start_index_ = d->next_index(); |
1485 | PageSpace* old_space = d->heap()->old_space(); |
1486 | const intptr_t count = d->ReadUnsigned(); |
1487 | for (intptr_t i = 0; i < count; i++) { |
1488 | d->AssignRef( |
1489 | AllocateUninitialized(old_space, KernelProgramInfo::InstanceSize())); |
1490 | } |
1491 | stop_index_ = d->next_index(); |
1492 | } |
1493 | |
1494 | void ReadFill(Deserializer* d) { |
1495 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
1496 | KernelProgramInfoPtr info = static_cast<KernelProgramInfoPtr>(d->Ref(id)); |
1497 | Deserializer::InitializeHeader(info, kKernelProgramInfoCid, |
1498 | KernelProgramInfo::InstanceSize()); |
1499 | ReadFromTo(info); |
1500 | info->ptr()->kernel_binary_version_ = d->Read<uint32_t>(); |
1501 | } |
1502 | } |
1503 | |
1504 | void PostLoad(Deserializer* d, const Array& refs) { |
1505 | Array& array = Array::Handle(d->zone()); |
1506 | KernelProgramInfo& info = KernelProgramInfo::Handle(d->zone()); |
1507 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
1508 | info ^= refs.At(id); |
1509 | array = HashTables::New<UnorderedHashMap<SmiTraits>>(16, Heap::kOld); |
1510 | info.set_libraries_cache(array); |
1511 | array = HashTables::New<UnorderedHashMap<SmiTraits>>(16, Heap::kOld); |
1512 | info.set_classes_cache(array); |
1513 | } |
1514 | } |
1515 | }; |
1516 | |
1517 | class CodeSerializationCluster : public SerializationCluster { |
1518 | public: |
1519 | explicit CodeSerializationCluster(Heap* heap) |
1520 | : SerializationCluster("Code" ) {} |
1521 | ~CodeSerializationCluster() {} |
1522 | |
1523 | void Trace(Serializer* s, ObjectPtr object) { |
1524 | CodePtr code = Code::RawCast(object); |
1525 | |
1526 | if (s->InCurrentLoadingUnit(code, /*record*/ true)) { |
1527 | objects_.Add(code); |
1528 | } |
1529 | |
1530 | if (!(s->kind() == Snapshot::kFullAOT && FLAG_use_bare_instructions)) { |
1531 | s->Push(code->ptr()->object_pool_); |
1532 | } |
1533 | s->Push(code->ptr()->owner_); |
1534 | s->Push(code->ptr()->exception_handlers_); |
1535 | s->Push(code->ptr()->pc_descriptors_); |
1536 | s->Push(code->ptr()->catch_entry_); |
1537 | if (s->InCurrentLoadingUnit(code->ptr()->compressed_stackmaps_)) { |
1538 | s->Push(code->ptr()->compressed_stackmaps_); |
1539 | } |
1540 | if (!FLAG_precompiled_mode || !FLAG_dwarf_stack_traces_mode) { |
1541 | s->Push(code->ptr()->inlined_id_to_function_); |
1542 | if (s->InCurrentLoadingUnit(code->ptr()->code_source_map_)) { |
1543 | s->Push(code->ptr()->code_source_map_); |
1544 | } |
1545 | } |
1546 | if (s->kind() == Snapshot::kFullJIT) { |
1547 | s->Push(code->ptr()->deopt_info_array_); |
1548 | s->Push(code->ptr()->static_calls_target_table_); |
1549 | } else if (s->kind() == Snapshot::kFullAOT) { |
1550 | #if defined(DART_PRECOMPILER) |
1551 | auto const calls_array = code->ptr()->static_calls_target_table_; |
1552 | if (calls_array != Array::null()) { |
1553 | // Some Code entries in the static calls target table may only be |
1554 | // accessible via here, so push the Code objects. |
1555 | auto const length = Smi::Value(calls_array->ptr()->length_); |
1556 | for (intptr_t i = 0; i < length; i++) { |
1557 | auto const object = calls_array->ptr()->data()[i]; |
1558 | if (object->IsHeapObject() && object->IsCode()) { |
1559 | s->Push(object); |
1560 | } |
1561 | } |
1562 | } |
1563 | #else |
1564 | UNREACHABLE(); |
1565 | #endif |
1566 | } |
1567 | #if !defined(PRODUCT) |
1568 | s->Push(code->ptr()->return_address_metadata_); |
1569 | if (FLAG_code_comments) { |
1570 | s->Push(code->ptr()->comments_); |
1571 | } |
1572 | #endif |
1573 | } |
1574 | |
1575 | struct CodeOrderInfo { |
1576 | CodePtr code; |
1577 | intptr_t order; |
1578 | }; |
1579 | |
1580 | static int CompareCodeOrderInfo(CodeOrderInfo const* a, |
1581 | CodeOrderInfo const* b) { |
1582 | if (a->order < b->order) return -1; |
1583 | if (a->order > b->order) return 1; |
1584 | return 0; |
1585 | } |
1586 | |
1587 | static void Insert(GrowableArray<CodeOrderInfo>* order_list, |
1588 | IntMap<intptr_t>* order_map, |
1589 | CodePtr code) { |
1590 | InstructionsPtr instr = code->ptr()->instructions_; |
1591 | intptr_t key = static_cast<intptr_t>(instr); |
1592 | intptr_t order; |
1593 | if (order_map->HasKey(key)) { |
1594 | order = order_map->Lookup(key); |
1595 | } else { |
1596 | order = order_list->length() + 1; |
1597 | order_map->Insert(key, order); |
1598 | } |
1599 | CodeOrderInfo info; |
1600 | info.code = code; |
1601 | info.order = order; |
1602 | order_list->Add(info); |
1603 | } |
1604 | |
1605 | static void Sort(GrowableArray<CodePtr>* codes) { |
1606 | GrowableArray<CodeOrderInfo> order_list; |
1607 | IntMap<intptr_t> order_map; |
1608 | for (intptr_t i = 0; i < codes->length(); i++) { |
1609 | Insert(&order_list, &order_map, (*codes)[i]); |
1610 | } |
1611 | order_list.Sort(CompareCodeOrderInfo); |
1612 | ASSERT(order_list.length() == codes->length()); |
1613 | for (intptr_t i = 0; i < order_list.length(); i++) { |
1614 | (*codes)[i] = order_list[i].code; |
1615 | } |
1616 | } |
1617 | |
1618 | static void Sort(GrowableArray<Code*>* codes) { |
1619 | GrowableArray<CodeOrderInfo> order_list; |
1620 | IntMap<intptr_t> order_map; |
1621 | for (intptr_t i = 0; i < codes->length(); i++) { |
1622 | Insert(&order_list, &order_map, (*codes)[i]->raw()); |
1623 | } |
1624 | order_list.Sort(CompareCodeOrderInfo); |
1625 | ASSERT(order_list.length() == codes->length()); |
1626 | for (intptr_t i = 0; i < order_list.length(); i++) { |
1627 | *(*codes)[i] = order_list[i].code; |
1628 | } |
1629 | } |
1630 | |
1631 | void WriteAlloc(Serializer* s) { |
1632 | Sort(&objects_); |
1633 | auto loading_units = s->loading_units(); |
1634 | if (loading_units != nullptr) { |
1635 | for (intptr_t i = LoadingUnit::kRootId + 1; i < loading_units->length(); |
1636 | i++) { |
1637 | auto unit_objects = loading_units->At(i)->deferred_objects(); |
1638 | Sort(unit_objects); |
1639 | for (intptr_t j = 0; j < unit_objects->length(); j++) { |
1640 | deferred_objects_.Add(unit_objects->At(j)->raw()); |
1641 | } |
1642 | } |
1643 | } |
1644 | s->PrepareInstructions(&objects_); |
1645 | |
1646 | s->WriteCid(kCodeCid); |
1647 | const intptr_t count = objects_.length(); |
1648 | s->WriteUnsigned(count); |
1649 | for (intptr_t i = 0; i < count; i++) { |
1650 | CodePtr code = objects_[i]; |
1651 | s->AssignRef(code); |
1652 | } |
1653 | const intptr_t deferred_count = deferred_objects_.length(); |
1654 | s->WriteUnsigned(deferred_count); |
1655 | for (intptr_t i = 0; i < deferred_count; i++) { |
1656 | CodePtr code = deferred_objects_[i]; |
1657 | s->AssignRef(code); |
1658 | } |
1659 | } |
1660 | |
1661 | void WriteFill(Serializer* s) { |
1662 | Snapshot::Kind kind = s->kind(); |
1663 | const intptr_t count = objects_.length(); |
1664 | for (intptr_t i = 0; i < count; i++) { |
1665 | CodePtr code = objects_[i]; |
1666 | WriteFill(s, kind, code, false); |
1667 | } |
1668 | const intptr_t deferred_count = deferred_objects_.length(); |
1669 | for (intptr_t i = 0; i < deferred_count; i++) { |
1670 | CodePtr code = deferred_objects_[i]; |
1671 | WriteFill(s, kind, code, true); |
1672 | } |
1673 | } |
1674 | |
1675 | void WriteFill(Serializer* s, |
1676 | Snapshot::Kind kind, |
1677 | CodePtr code, |
1678 | bool deferred) { |
1679 | AutoTraceObjectName(code, MakeDisambiguatedCodeName(s, code)); |
1680 | |
1681 | intptr_t pointer_offsets_length = |
1682 | Code::PtrOffBits::decode(code->ptr()->state_bits_); |
1683 | if (pointer_offsets_length != 0) { |
1684 | FATAL("Cannot serialize code with embedded pointers" ); |
1685 | } |
1686 | if (kind == Snapshot::kFullAOT && Code::IsDisabled(code)) { |
1687 | // Disabled code is fatal in AOT since we cannot recompile. |
1688 | s->UnexpectedObject(code, "Disabled code" ); |
1689 | } |
1690 | |
1691 | s->WriteInstructions(code->ptr()->instructions_, |
1692 | code->ptr()->unchecked_offset_, code, deferred); |
1693 | if (kind == Snapshot::kFullJIT) { |
1694 | // TODO(rmacnak): Fix references to disabled code before serializing. |
1695 | // For now, we may write the FixCallersTarget or equivalent stub. This |
1696 | // will cause a fixup if this code is called. |
1697 | const uint32_t active_unchecked_offset = |
1698 | code->ptr()->unchecked_entry_point_ - code->ptr()->entry_point_; |
1699 | s->WriteInstructions(code->ptr()->active_instructions_, |
1700 | active_unchecked_offset, code, deferred); |
1701 | } |
1702 | |
1703 | // No need to write object pool out if we are producing full AOT |
1704 | // snapshot with bare instructions. |
1705 | if (!(kind == Snapshot::kFullAOT && FLAG_use_bare_instructions)) { |
1706 | WriteField(code, object_pool_); |
1707 | #if defined(DART_PRECOMPILER) |
1708 | } else if (FLAG_write_v8_snapshot_profile_to != nullptr && |
1709 | code->ptr()->object_pool_ != ObjectPool::null()) { |
1710 | // If we are writing V8 snapshot profile then attribute references |
1711 | // going through the object pool to the code object itself. |
1712 | ObjectPoolPtr pool = code->ptr()->object_pool_; |
1713 | |
1714 | for (intptr_t i = 0; i < pool->ptr()->length_; i++) { |
1715 | uint8_t bits = pool->ptr()->entry_bits()[i]; |
1716 | if (ObjectPool::TypeBits::decode(bits) == |
1717 | ObjectPool::EntryType::kTaggedObject) { |
1718 | s->AttributeElementRef(pool->ptr()->data()[i].raw_obj_, i); |
1719 | } |
1720 | } |
1721 | #endif // defined(DART_PRECOMPILER) |
1722 | } |
1723 | WriteField(code, owner_); |
1724 | WriteField(code, exception_handlers_); |
1725 | WriteField(code, pc_descriptors_); |
1726 | WriteField(code, catch_entry_); |
1727 | if (s->InCurrentLoadingUnit(code->ptr()->compressed_stackmaps_)) { |
1728 | WriteField(code, compressed_stackmaps_); |
1729 | } else { |
1730 | WriteFieldValue(compressed_stackmaps_, CompressedStackMaps::null()); |
1731 | } |
1732 | if (FLAG_precompiled_mode && FLAG_dwarf_stack_traces_mode) { |
1733 | WriteFieldValue(inlined_id_to_function_, Array::null()); |
1734 | WriteFieldValue(code_source_map_, CodeSourceMap::null()); |
1735 | } else { |
1736 | WriteField(code, inlined_id_to_function_); |
1737 | if (s->InCurrentLoadingUnit(code->ptr()->code_source_map_)) { |
1738 | WriteField(code, code_source_map_); |
1739 | } else { |
1740 | WriteFieldValue(code_source_map_, CodeSourceMap::null()); |
1741 | } |
1742 | } |
1743 | if (kind == Snapshot::kFullJIT) { |
1744 | WriteField(code, deopt_info_array_); |
1745 | WriteField(code, static_calls_target_table_); |
1746 | } |
1747 | #if !defined(PRODUCT) |
1748 | WriteField(code, return_address_metadata_); |
1749 | if (FLAG_code_comments) { |
1750 | WriteField(code, comments_); |
1751 | } |
1752 | #endif |
1753 | s->Write<int32_t>(code->ptr()->state_bits_); |
1754 | } |
1755 | |
1756 | GrowableArray<CodePtr>* discovered_objects() { return &objects_; } |
1757 | |
1758 | // Some code objects would have their owners dropped from the snapshot, |
1759 | // which makes it is impossible to recover program structure when |
1760 | // analysing snapshot profile. To facilitate analysis of snapshot profiles |
1761 | // we include artificial nodes into profile representing such dropped |
1762 | // owners. |
1763 | void WriteDroppedOwnersIntoProfile(Serializer* s) { |
1764 | ASSERT(s->profile_writer() != nullptr); |
1765 | |
1766 | for (auto code : objects_) { |
1767 | ObjectPtr owner = WeakSerializationReference::Unwrap(code->ptr()->owner_); |
1768 | if (s->CreateArtificalNodeIfNeeded(owner)) { |
1769 | AutoTraceObject(code); |
1770 | s->AttributePropertyRef(owner, ":owner_" , |
1771 | /*permit_artificial_ref=*/true); |
1772 | } |
1773 | } |
1774 | } |
1775 | |
1776 | private: |
1777 | static const char* MakeDisambiguatedCodeName(Serializer* s, CodePtr c) { |
1778 | if (s->profile_writer() == nullptr) { |
1779 | return nullptr; |
1780 | } |
1781 | |
1782 | REUSABLE_CODE_HANDLESCOPE(s->thread()); |
1783 | Code& code = reused_code_handle.Handle(); |
1784 | code = c; |
1785 | return code.QualifiedName( |
1786 | NameFormattingParams::DisambiguatedWithoutClassName( |
1787 | Object::NameVisibility::kInternalName)); |
1788 | } |
1789 | |
1790 | GrowableArray<CodePtr> objects_; |
1791 | GrowableArray<CodePtr> deferred_objects_; |
1792 | }; |
1793 | #endif // !DART_PRECOMPILED_RUNTIME |
1794 | |
1795 | class CodeDeserializationCluster : public DeserializationCluster { |
1796 | public: |
1797 | CodeDeserializationCluster() {} |
1798 | ~CodeDeserializationCluster() {} |
1799 | |
1800 | void ReadAlloc(Deserializer* d) { |
1801 | PageSpace* old_space = d->heap()->old_space(); |
1802 | start_index_ = d->next_index(); |
1803 | const intptr_t count = d->ReadUnsigned(); |
1804 | for (intptr_t i = 0; i < count; i++) { |
1805 | auto code = AllocateUninitialized(old_space, Code::InstanceSize(0)); |
1806 | d->AssignRef(code); |
1807 | } |
1808 | stop_index_ = d->next_index(); |
1809 | deferred_start_index_ = d->next_index(); |
1810 | const intptr_t deferred_count = d->ReadUnsigned(); |
1811 | for (intptr_t i = 0; i < deferred_count; i++) { |
1812 | auto code = AllocateUninitialized(old_space, Code::InstanceSize(0)); |
1813 | d->AssignRef(code); |
1814 | } |
1815 | deferred_stop_index_ = d->next_index(); |
1816 | } |
1817 | |
1818 | void ReadFill(Deserializer* d) { |
1819 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
1820 | ReadFill(d, id, false); |
1821 | } |
1822 | for (intptr_t id = deferred_start_index_; id < deferred_stop_index_; id++) { |
1823 | ReadFill(d, id, true); |
1824 | } |
1825 | } |
1826 | |
1827 | void ReadFill(Deserializer* d, intptr_t id, bool deferred) { |
1828 | auto const code = static_cast<CodePtr>(d->Ref(id)); |
1829 | Deserializer::InitializeHeader(code, kCodeCid, Code::InstanceSize(0)); |
1830 | |
1831 | d->ReadInstructions(code, deferred); |
1832 | |
1833 | // There would be a single global pool if this is a full AOT snapshot |
1834 | // with bare instructions. |
1835 | if (!(d->kind() == Snapshot::kFullAOT && FLAG_use_bare_instructions)) { |
1836 | code->ptr()->object_pool_ = static_cast<ObjectPoolPtr>(d->ReadRef()); |
1837 | } else { |
1838 | code->ptr()->object_pool_ = ObjectPool::null(); |
1839 | } |
1840 | code->ptr()->owner_ = d->ReadRef(); |
1841 | code->ptr()->exception_handlers_ = |
1842 | static_cast<ExceptionHandlersPtr>(d->ReadRef()); |
1843 | code->ptr()->pc_descriptors_ = static_cast<PcDescriptorsPtr>(d->ReadRef()); |
1844 | code->ptr()->catch_entry_ = d->ReadRef(); |
1845 | code->ptr()->compressed_stackmaps_ = |
1846 | static_cast<CompressedStackMapsPtr>(d->ReadRef()); |
1847 | code->ptr()->inlined_id_to_function_ = static_cast<ArrayPtr>(d->ReadRef()); |
1848 | code->ptr()->code_source_map_ = static_cast<CodeSourceMapPtr>(d->ReadRef()); |
1849 | |
1850 | #if !defined(DART_PRECOMPILED_RUNTIME) |
1851 | if (d->kind() == Snapshot::kFullJIT) { |
1852 | code->ptr()->deopt_info_array_ = static_cast<ArrayPtr>(d->ReadRef()); |
1853 | code->ptr()->static_calls_target_table_ = |
1854 | static_cast<ArrayPtr>(d->ReadRef()); |
1855 | } |
1856 | #endif // !DART_PRECOMPILED_RUNTIME |
1857 | |
1858 | #if !defined(PRODUCT) |
1859 | code->ptr()->return_address_metadata_ = d->ReadRef(); |
1860 | code->ptr()->var_descriptors_ = LocalVarDescriptors::null(); |
1861 | code->ptr()->comments_ = FLAG_code_comments |
1862 | ? static_cast<ArrayPtr>(d->ReadRef()) |
1863 | : Array::null(); |
1864 | code->ptr()->compile_timestamp_ = 0; |
1865 | #endif |
1866 | |
1867 | code->ptr()->state_bits_ = d->Read<int32_t>(); |
1868 | } |
1869 | |
1870 | void PostLoad(Deserializer* d, const Array& refs) { |
1871 | d->EndInstructions(refs, start_index_, stop_index_); |
1872 | |
1873 | #if !defined(PRODUCT) |
1874 | if (!CodeObservers::AreActive() && !FLAG_support_disassembler) return; |
1875 | #endif |
1876 | Code& code = Code::Handle(d->zone()); |
1877 | #if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER) |
1878 | Object& owner = Object::Handle(d->zone()); |
1879 | #endif |
1880 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
1881 | code ^= refs.At(id); |
1882 | #if !defined(DART_PRECOMPILED_RUNTIME) && !defined(PRODUCT) |
1883 | if (CodeObservers::AreActive()) { |
1884 | Code::NotifyCodeObservers(code, code.is_optimized()); |
1885 | } |
1886 | #endif |
1887 | #if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER) |
1888 | owner = code.owner(); |
1889 | if (owner.IsFunction()) { |
1890 | if ((FLAG_disassemble || |
1891 | (code.is_optimized() && FLAG_disassemble_optimized)) && |
1892 | compiler::PrintFilter::ShouldPrint(Function::Cast(owner))) { |
1893 | Disassembler::DisassembleCode(Function::Cast(owner), code, |
1894 | code.is_optimized()); |
1895 | } |
1896 | } else if (FLAG_disassemble_stubs) { |
1897 | Disassembler::DisassembleStub(code.Name(), code); |
1898 | } |
1899 | #endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER) |
1900 | } |
1901 | } |
1902 | |
1903 | private: |
1904 | intptr_t deferred_start_index_; |
1905 | intptr_t deferred_stop_index_; |
1906 | }; |
1907 | |
1908 | #if !defined(DART_PRECOMPILED_RUNTIME) |
1909 | class BytecodeSerializationCluster : public SerializationCluster { |
1910 | public: |
1911 | BytecodeSerializationCluster() : SerializationCluster("Bytecode" ) {} |
1912 | virtual ~BytecodeSerializationCluster() {} |
1913 | |
1914 | void Trace(Serializer* s, ObjectPtr object) { |
1915 | BytecodePtr bytecode = Bytecode::RawCast(object); |
1916 | objects_.Add(bytecode); |
1917 | PushFromTo(bytecode); |
1918 | } |
1919 | |
1920 | void WriteAlloc(Serializer* s) { |
1921 | s->WriteCid(kBytecodeCid); |
1922 | const intptr_t count = objects_.length(); |
1923 | s->WriteUnsigned(count); |
1924 | for (intptr_t i = 0; i < count; i++) { |
1925 | BytecodePtr bytecode = objects_[i]; |
1926 | s->AssignRef(bytecode); |
1927 | } |
1928 | } |
1929 | |
1930 | void WriteFill(Serializer* s) { |
1931 | ASSERT(s->kind() != Snapshot::kFullAOT); |
1932 | const intptr_t count = objects_.length(); |
1933 | for (intptr_t i = 0; i < count; i++) { |
1934 | BytecodePtr bytecode = objects_[i]; |
1935 | s->Write<int32_t>(bytecode->ptr()->instructions_size_); |
1936 | WriteFromTo(bytecode); |
1937 | s->Write<int32_t>(bytecode->ptr()->instructions_binary_offset_); |
1938 | s->Write<int32_t>(bytecode->ptr()->source_positions_binary_offset_); |
1939 | s->Write<int32_t>(bytecode->ptr()->local_variables_binary_offset_); |
1940 | } |
1941 | } |
1942 | |
1943 | private: |
1944 | GrowableArray<BytecodePtr> objects_; |
1945 | }; |
1946 | |
1947 | class BytecodeDeserializationCluster : public DeserializationCluster { |
1948 | public: |
1949 | BytecodeDeserializationCluster() {} |
1950 | virtual ~BytecodeDeserializationCluster() {} |
1951 | |
1952 | void ReadAlloc(Deserializer* d) { |
1953 | start_index_ = d->next_index(); |
1954 | PageSpace* old_space = d->heap()->old_space(); |
1955 | const intptr_t count = d->ReadUnsigned(); |
1956 | for (intptr_t i = 0; i < count; i++) { |
1957 | d->AssignRef(AllocateUninitialized(old_space, Bytecode::InstanceSize())); |
1958 | } |
1959 | stop_index_ = d->next_index(); |
1960 | } |
1961 | |
1962 | void ReadFill(Deserializer* d) { |
1963 | ASSERT(d->kind() != Snapshot::kFullAOT); |
1964 | |
1965 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
1966 | BytecodePtr bytecode = static_cast<BytecodePtr>(d->Ref(id)); |
1967 | Deserializer::InitializeHeader(bytecode, kBytecodeCid, |
1968 | Bytecode::InstanceSize()); |
1969 | bytecode->ptr()->instructions_ = 0; |
1970 | bytecode->ptr()->instructions_size_ = d->Read<int32_t>(); |
1971 | ReadFromTo(bytecode); |
1972 | bytecode->ptr()->instructions_binary_offset_ = d->Read<int32_t>(); |
1973 | bytecode->ptr()->source_positions_binary_offset_ = d->Read<int32_t>(); |
1974 | bytecode->ptr()->local_variables_binary_offset_ = d->Read<int32_t>(); |
1975 | } |
1976 | } |
1977 | |
1978 | void PostLoad(Deserializer* d, const Array& refs) { |
1979 | Bytecode& bytecode = Bytecode::Handle(d->zone()); |
1980 | ExternalTypedData& binary = ExternalTypedData::Handle(d->zone()); |
1981 | |
1982 | for (intptr_t i = start_index_; i < stop_index_; i++) { |
1983 | bytecode ^= refs.At(i); |
1984 | binary = bytecode.GetBinary(d->zone()); |
1985 | bytecode.set_instructions(reinterpret_cast<uword>( |
1986 | binary.DataAddr(bytecode.instructions_binary_offset()))); |
1987 | } |
1988 | } |
1989 | }; |
1990 | |
1991 | class ObjectPoolSerializationCluster : public SerializationCluster { |
1992 | public: |
1993 | ObjectPoolSerializationCluster() : SerializationCluster("ObjectPool" ) {} |
1994 | ~ObjectPoolSerializationCluster() {} |
1995 | |
1996 | void Trace(Serializer* s, ObjectPtr object) { |
1997 | ObjectPoolPtr pool = ObjectPool::RawCast(object); |
1998 | objects_.Add(pool); |
1999 | |
2000 | const intptr_t length = pool->ptr()->length_; |
2001 | uint8_t* entry_bits = pool->ptr()->entry_bits(); |
2002 | for (intptr_t i = 0; i < length; i++) { |
2003 | auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]); |
2004 | if ((entry_type == ObjectPool::EntryType::kTaggedObject) || |
2005 | (entry_type == ObjectPool::EntryType::kNativeEntryData)) { |
2006 | s->Push(pool->ptr()->data()[i].raw_obj_); |
2007 | } |
2008 | } |
2009 | } |
2010 | |
2011 | void WriteAlloc(Serializer* s) { |
2012 | s->WriteCid(kObjectPoolCid); |
2013 | const intptr_t count = objects_.length(); |
2014 | s->WriteUnsigned(count); |
2015 | for (intptr_t i = 0; i < count; i++) { |
2016 | ObjectPoolPtr pool = objects_[i]; |
2017 | s->AssignRef(pool); |
2018 | AutoTraceObject(pool); |
2019 | const intptr_t length = pool->ptr()->length_; |
2020 | s->WriteUnsigned(length); |
2021 | } |
2022 | } |
2023 | |
2024 | void WriteFill(Serializer* s) { |
2025 | const intptr_t count = objects_.length(); |
2026 | for (intptr_t i = 0; i < count; i++) { |
2027 | ObjectPoolPtr pool = objects_[i]; |
2028 | AutoTraceObject(pool); |
2029 | const intptr_t length = pool->ptr()->length_; |
2030 | s->WriteUnsigned(length); |
2031 | uint8_t* entry_bits = pool->ptr()->entry_bits(); |
2032 | for (intptr_t j = 0; j < length; j++) { |
2033 | s->Write<uint8_t>(entry_bits[j]); |
2034 | ObjectPoolLayout::Entry& entry = pool->ptr()->data()[j]; |
2035 | switch (ObjectPool::TypeBits::decode(entry_bits[j])) { |
2036 | case ObjectPool::EntryType::kTaggedObject: { |
2037 | if ((entry.raw_obj_ == StubCode::CallNoScopeNative().raw()) || |
2038 | (entry.raw_obj_ == StubCode::CallAutoScopeNative().raw())) { |
2039 | // Natives can run while precompiling, becoming linked and |
2040 | // switching their stub. Reset to the initial stub used for |
2041 | // lazy-linking. |
2042 | s->WriteElementRef(StubCode::CallBootstrapNative().raw(), j); |
2043 | break; |
2044 | } |
2045 | s->WriteElementRef(entry.raw_obj_, j); |
2046 | break; |
2047 | } |
2048 | case ObjectPool::EntryType::kImmediate: { |
2049 | s->Write<intptr_t>(entry.raw_value_); |
2050 | break; |
2051 | } |
2052 | case ObjectPool::EntryType::kNativeEntryData: { |
2053 | ObjectPtr raw = entry.raw_obj_; |
2054 | TypedDataPtr raw_data = static_cast<TypedDataPtr>(raw); |
2055 | // kNativeEntryData object pool entries are for linking natives for |
2056 | // the interpreter. Before writing these entries into the snapshot, |
2057 | // we need to unlink them by nulling out the 'trampoline' and |
2058 | // 'native_function' fields. |
2059 | NativeEntryData::Payload* payload = |
2060 | NativeEntryData::FromTypedArray(raw_data); |
2061 | if (payload->kind == MethodRecognizer::kUnknown) { |
2062 | payload->trampoline = NULL; |
2063 | payload->native_function = NULL; |
2064 | } |
2065 | s->WriteElementRef(raw, j); |
2066 | break; |
2067 | } |
2068 | case ObjectPool::EntryType::kNativeFunction: |
2069 | case ObjectPool::EntryType::kNativeFunctionWrapper: { |
2070 | // Write nothing. Will initialize with the lazy link entry. |
2071 | break; |
2072 | } |
2073 | default: |
2074 | UNREACHABLE(); |
2075 | } |
2076 | } |
2077 | } |
2078 | } |
2079 | |
2080 | private: |
2081 | GrowableArray<ObjectPoolPtr> objects_; |
2082 | }; |
2083 | #endif // !DART_PRECOMPILED_RUNTIME |
2084 | |
2085 | class ObjectPoolDeserializationCluster : public DeserializationCluster { |
2086 | public: |
2087 | ObjectPoolDeserializationCluster() {} |
2088 | ~ObjectPoolDeserializationCluster() {} |
2089 | |
2090 | void ReadAlloc(Deserializer* d) { |
2091 | start_index_ = d->next_index(); |
2092 | PageSpace* old_space = d->heap()->old_space(); |
2093 | const intptr_t count = d->ReadUnsigned(); |
2094 | for (intptr_t i = 0; i < count; i++) { |
2095 | const intptr_t length = d->ReadUnsigned(); |
2096 | d->AssignRef( |
2097 | AllocateUninitialized(old_space, ObjectPool::InstanceSize(length))); |
2098 | } |
2099 | stop_index_ = d->next_index(); |
2100 | } |
2101 | |
2102 | void ReadFill(Deserializer* d) { |
2103 | for (intptr_t id = start_index_; id < stop_index_; id += 1) { |
2104 | const intptr_t length = d->ReadUnsigned(); |
2105 | ObjectPoolPtr pool = static_cast<ObjectPoolPtr>(d->Ref(id + 0)); |
2106 | Deserializer::InitializeHeader(pool, kObjectPoolCid, |
2107 | ObjectPool::InstanceSize(length)); |
2108 | pool->ptr()->length_ = length; |
2109 | for (intptr_t j = 0; j < length; j++) { |
2110 | const uint8_t entry_bits = d->Read<uint8_t>(); |
2111 | pool->ptr()->entry_bits()[j] = entry_bits; |
2112 | ObjectPoolLayout::Entry& entry = pool->ptr()->data()[j]; |
2113 | switch (ObjectPool::TypeBits::decode(entry_bits)) { |
2114 | case ObjectPool::EntryType::kNativeEntryData: |
2115 | case ObjectPool::EntryType::kTaggedObject: |
2116 | entry.raw_obj_ = d->ReadRef(); |
2117 | break; |
2118 | case ObjectPool::EntryType::kImmediate: |
2119 | entry.raw_value_ = d->Read<intptr_t>(); |
2120 | break; |
2121 | case ObjectPool::EntryType::kNativeFunction: { |
2122 | // Read nothing. Initialize with the lazy link entry. |
2123 | uword new_entry = NativeEntry::LinkNativeCallEntry(); |
2124 | entry.raw_value_ = static_cast<intptr_t>(new_entry); |
2125 | break; |
2126 | } |
2127 | default: |
2128 | UNREACHABLE(); |
2129 | } |
2130 | } |
2131 | } |
2132 | } |
2133 | }; |
2134 | |
2135 | #if defined(DART_PRECOMPILER) |
2136 | class WeakSerializationReferenceSerializationCluster |
2137 | : public SerializationCluster { |
2138 | public: |
2139 | WeakSerializationReferenceSerializationCluster(Zone* zone, Heap* heap) |
2140 | : SerializationCluster("WeakSerializationReference" ), |
2141 | heap_(ASSERT_NOTNULL(heap)), |
2142 | objects_(zone, 0), |
2143 | canonical_wsrs_(zone, 0), |
2144 | canonical_wsr_map_(zone) {} |
2145 | ~WeakSerializationReferenceSerializationCluster() {} |
2146 | |
2147 | void Trace(Serializer* s, ObjectPtr object) { |
2148 | ASSERT(s->kind() == Snapshot::kFullAOT); |
2149 | // Make sure we don't trace again after choosing canonical WSRs. |
2150 | ASSERT(!have_canonicalized_wsrs_); |
2151 | |
2152 | auto const ref = WeakSerializationReference::RawCast(object); |
2153 | objects_.Add(ref); |
2154 | // We do _not_ push the target, since this is not a strong reference. |
2155 | } |
2156 | |
2157 | void WriteAlloc(Serializer* s) { |
2158 | ASSERT(s->kind() == Snapshot::kFullAOT); |
2159 | ASSERT(have_canonicalized_wsrs_); |
2160 | |
2161 | s->WriteCid(kWeakSerializationReferenceCid); |
2162 | s->WriteUnsigned(WrittenCount()); |
2163 | |
2164 | // Set up references for those objects that will be written. |
2165 | for (auto const ref : canonical_wsrs_) { |
2166 | s->AssignRef(ref); |
2167 | } |
2168 | |
2169 | // In precompiled mode, set the object ID of each non-canonical WSR to |
2170 | // its canonical counterpart's object ID. This ensures that any reference to |
2171 | // it is serialized as a reference to the canonicalized one. |
2172 | for (auto const ref : objects_) { |
2173 | ASSERT(Serializer::IsReachableReference(heap_->GetObjectId(ref))); |
2174 | if (ShouldDrop(ref)) { |
2175 | // For dropped references, reset their ID to be the unreachable |
2176 | // reference value, so RefId retrieves the target ID instead. |
2177 | heap_->SetObjectId(ref, Serializer::kUnreachableReference); |
2178 | continue; |
2179 | } |
2180 | // Skip if we've already allocated a reference (this is a canonical WSR). |
2181 | if (Serializer::IsAllocatedReference(heap_->GetObjectId(ref))) continue; |
2182 | auto const target_cid = WeakSerializationReference::TargetClassIdOf(ref); |
2183 | ASSERT(canonical_wsr_map_.HasKey(target_cid)); |
2184 | auto const canonical_index = canonical_wsr_map_.Lookup(target_cid) - 1; |
2185 | auto const canonical_wsr = objects_[canonical_index]; |
2186 | // Set the object ID of this non-canonical WSR to the same as its |
2187 | // canonical WSR entry, so we'll reference the canonical WSR when |
2188 | // serializing references to this object. |
2189 | auto const canonical_heap_id = heap_->GetObjectId(canonical_wsr); |
2190 | ASSERT(Serializer::IsAllocatedReference(canonical_heap_id)); |
2191 | heap_->SetObjectId(ref, canonical_heap_id); |
2192 | } |
2193 | } |
2194 | |
2195 | void WriteFill(Serializer* s) { |
2196 | ASSERT(s->kind() == Snapshot::kFullAOT); |
2197 | for (auto const ref : canonical_wsrs_) { |
2198 | AutoTraceObject(ref); |
2199 | |
2200 | // In precompiled mode, we drop the reference to the target and only |
2201 | // keep the class ID. |
2202 | s->WriteCid(WeakSerializationReference::TargetClassIdOf(ref)); |
2203 | } |
2204 | } |
2205 | |
2206 | // Picks a WSR for each target class ID to be canonical. Should only be run |
2207 | // after all objects have been traced. |
2208 | void CanonicalizeReferences() { |
2209 | ASSERT(!have_canonicalized_wsrs_); |
2210 | for (intptr_t i = 0; i < objects_.length(); i++) { |
2211 | auto const ref = objects_[i]; |
2212 | if (ShouldDrop(ref)) continue; |
2213 | auto const target_cid = WeakSerializationReference::TargetClassIdOf(ref); |
2214 | if (canonical_wsr_map_.HasKey(target_cid)) continue; |
2215 | canonical_wsr_map_.Insert(target_cid, i + 1); |
2216 | canonical_wsrs_.Add(ref); |
2217 | } |
2218 | have_canonicalized_wsrs_ = true; |
2219 | } |
2220 | |
2221 | intptr_t WrittenCount() const { |
2222 | ASSERT(have_canonicalized_wsrs_); |
2223 | return canonical_wsrs_.length(); |
2224 | } |
2225 | |
2226 | intptr_t DroppedCount() const { return TotalCount() - WrittenCount(); } |
2227 | |
2228 | intptr_t TotalCount() const { return objects_.length(); } |
2229 | |
2230 | private: |
2231 | // Returns whether a WSR should be dropped due to its target being reachable |
2232 | // via strong references. WSRs only wrap heap objects, so we can just retrieve |
2233 | // the object ID from the heap directly. |
2234 | bool ShouldDrop(WeakSerializationReferencePtr ref) const { |
2235 | auto const target = WeakSerializationReference::TargetOf(ref); |
2236 | return Serializer::IsReachableReference(heap_->GetObjectId(target)); |
2237 | } |
2238 | |
2239 | Heap* const heap_; |
2240 | GrowableArray<WeakSerializationReferencePtr> objects_; |
2241 | GrowableArray<WeakSerializationReferencePtr> canonical_wsrs_; |
2242 | IntMap<intptr_t> canonical_wsr_map_; |
2243 | bool have_canonicalized_wsrs_ = false; |
2244 | }; |
2245 | #endif |
2246 | |
2247 | #if defined(DART_PRECOMPILED_RUNTIME) |
2248 | class WeakSerializationReferenceDeserializationCluster |
2249 | : public DeserializationCluster { |
2250 | public: |
2251 | WeakSerializationReferenceDeserializationCluster() {} |
2252 | ~WeakSerializationReferenceDeserializationCluster() {} |
2253 | |
2254 | void ReadAlloc(Deserializer* d) { |
2255 | start_index_ = d->next_index(); |
2256 | PageSpace* old_space = d->heap()->old_space(); |
2257 | const intptr_t count = d->ReadUnsigned(); |
2258 | |
2259 | for (intptr_t i = 0; i < count; i++) { |
2260 | auto ref = AllocateUninitialized( |
2261 | old_space, WeakSerializationReference::InstanceSize()); |
2262 | d->AssignRef(ref); |
2263 | } |
2264 | |
2265 | stop_index_ = d->next_index(); |
2266 | } |
2267 | |
2268 | void ReadFill(Deserializer* d) { |
2269 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
2270 | auto const ref = static_cast<WeakSerializationReferencePtr>(d->Ref(id)); |
2271 | Deserializer::InitializeHeader( |
2272 | ref, kWeakSerializationReferenceCid, |
2273 | WeakSerializationReference::InstanceSize()); |
2274 | ref->ptr()->cid_ = d->ReadCid(); |
2275 | } |
2276 | } |
2277 | }; |
2278 | #endif |
2279 | |
2280 | #if !defined(DART_PRECOMPILED_RUNTIME) |
2281 | class : public SerializationCluster { |
2282 | public: |
2283 | () : SerializationCluster("PcDescriptors" ) {} |
2284 | () {} |
2285 | |
2286 | void (Serializer* s, ObjectPtr object) { |
2287 | PcDescriptorsPtr desc = PcDescriptors::RawCast(object); |
2288 | objects_.Add(desc); |
2289 | } |
2290 | |
2291 | void (Serializer* s) { |
2292 | s->WriteCid(kPcDescriptorsCid); |
2293 | const intptr_t count = objects_.length(); |
2294 | s->WriteUnsigned(count); |
2295 | for (intptr_t i = 0; i < count; i++) { |
2296 | PcDescriptorsPtr desc = objects_[i]; |
2297 | s->AssignRef(desc); |
2298 | AutoTraceObject(desc); |
2299 | const intptr_t length = desc->ptr()->length_; |
2300 | s->WriteUnsigned(length); |
2301 | } |
2302 | } |
2303 | |
2304 | void (Serializer* s) { |
2305 | const intptr_t count = objects_.length(); |
2306 | for (intptr_t i = 0; i < count; i++) { |
2307 | PcDescriptorsPtr desc = objects_[i]; |
2308 | AutoTraceObject(desc); |
2309 | const intptr_t length = desc->ptr()->length_; |
2310 | s->WriteUnsigned(length); |
2311 | uint8_t* cdata = reinterpret_cast<uint8_t*>(desc->ptr()->data()); |
2312 | s->WriteBytes(cdata, length); |
2313 | } |
2314 | } |
2315 | |
2316 | private: |
2317 | GrowableArray<PcDescriptorsPtr> ; |
2318 | }; |
2319 | #endif // !DART_PRECOMPILED_RUNTIME |
2320 | |
2321 | class PcDescriptorsDeserializationCluster : public DeserializationCluster { |
2322 | public: |
2323 | PcDescriptorsDeserializationCluster() {} |
2324 | ~PcDescriptorsDeserializationCluster() {} |
2325 | |
2326 | void ReadAlloc(Deserializer* d) { |
2327 | start_index_ = d->next_index(); |
2328 | PageSpace* old_space = d->heap()->old_space(); |
2329 | const intptr_t count = d->ReadUnsigned(); |
2330 | for (intptr_t i = 0; i < count; i++) { |
2331 | const intptr_t length = d->ReadUnsigned(); |
2332 | d->AssignRef(AllocateUninitialized(old_space, |
2333 | PcDescriptors::InstanceSize(length))); |
2334 | } |
2335 | stop_index_ = d->next_index(); |
2336 | } |
2337 | |
2338 | void ReadFill(Deserializer* d) { |
2339 | for (intptr_t id = start_index_; id < stop_index_; id += 1) { |
2340 | const intptr_t length = d->ReadUnsigned(); |
2341 | PcDescriptorsPtr desc = static_cast<PcDescriptorsPtr>(d->Ref(id)); |
2342 | Deserializer::InitializeHeader(desc, kPcDescriptorsCid, |
2343 | PcDescriptors::InstanceSize(length)); |
2344 | desc->ptr()->length_ = length; |
2345 | uint8_t* cdata = reinterpret_cast<uint8_t*>(desc->ptr()->data()); |
2346 | d->ReadBytes(cdata, length); |
2347 | } |
2348 | } |
2349 | }; |
2350 | |
2351 | #if !defined(DART_PRECOMPILED_RUNTIME) |
2352 | // PcDescriptor, CompressedStackMaps, OneByteString, TwoByteString |
2353 | class RODataSerializationCluster : public SerializationCluster { |
2354 | public: |
2355 | RODataSerializationCluster(Zone* zone, const char* type, intptr_t cid) |
2356 | : SerializationCluster(ImageWriter::TagObjectTypeAsReadOnly(zone, type)), |
2357 | cid_(cid), |
2358 | objects_(), |
2359 | type_(type) {} |
2360 | ~RODataSerializationCluster() {} |
2361 | |
2362 | void Trace(Serializer* s, ObjectPtr object) { |
2363 | // A string's hash must already be computed when we write it because it |
2364 | // will be loaded into read-only memory. Extra bytes due to allocation |
2365 | // rounding need to be deterministically set for reliable deduplication in |
2366 | // shared images. |
2367 | if (object->ptr()->InVMIsolateHeap() || |
2368 | s->heap()->old_space()->IsObjectFromImagePages(object)) { |
2369 | // This object is already read-only. |
2370 | } else { |
2371 | Object::FinalizeReadOnlyObject(object); |
2372 | } |
2373 | |
2374 | objects_.Add(object); |
2375 | } |
2376 | |
2377 | void WriteAlloc(Serializer* s) { |
2378 | s->WriteCid(cid_); |
2379 | |
2380 | intptr_t count = objects_.length(); |
2381 | s->WriteUnsigned(count); |
2382 | uint32_t running_offset = 0; |
2383 | for (intptr_t i = 0; i < count; i++) { |
2384 | ObjectPtr object = objects_[i]; |
2385 | s->AssignRef(object); |
2386 | if (cid_ == kOneByteStringCid || cid_ == kTwoByteStringCid) { |
2387 | s->TraceStartWritingObject(type_, object, String::RawCast(object)); |
2388 | } else { |
2389 | s->TraceStartWritingObject(type_, object, nullptr); |
2390 | } |
2391 | uint32_t offset = s->GetDataOffset(object); |
2392 | s->TraceDataOffset(offset); |
2393 | ASSERT(Utils::IsAligned( |
2394 | offset, compiler::target::ObjectAlignment::kObjectAlignment)); |
2395 | ASSERT(offset > running_offset); |
2396 | s->WriteUnsigned((offset - running_offset) >> |
2397 | compiler::target::ObjectAlignment::kObjectAlignmentLog2); |
2398 | running_offset = offset; |
2399 | s->TraceEndWritingObject(); |
2400 | } |
2401 | } |
2402 | |
2403 | void WriteFill(Serializer* s) { |
2404 | // No-op. |
2405 | } |
2406 | |
2407 | private: |
2408 | const intptr_t cid_; |
2409 | GrowableArray<ObjectPtr> objects_; |
2410 | const char* const type_; |
2411 | }; |
2412 | #endif // !DART_PRECOMPILED_RUNTIME |
2413 | |
2414 | class RODataDeserializationCluster : public DeserializationCluster { |
2415 | public: |
2416 | RODataDeserializationCluster() {} |
2417 | ~RODataDeserializationCluster() {} |
2418 | |
2419 | void ReadAlloc(Deserializer* d) { |
2420 | intptr_t count = d->ReadUnsigned(); |
2421 | uint32_t running_offset = 0; |
2422 | for (intptr_t i = 0; i < count; i++) { |
2423 | running_offset += d->ReadUnsigned() << kObjectAlignmentLog2; |
2424 | d->AssignRef(d->GetObjectAt(running_offset)); |
2425 | } |
2426 | } |
2427 | |
2428 | void ReadFill(Deserializer* d) { |
2429 | // No-op. |
2430 | } |
2431 | }; |
2432 | |
2433 | #if !defined(DART_PRECOMPILED_RUNTIME) |
2434 | class ExceptionHandlersSerializationCluster : public SerializationCluster { |
2435 | public: |
2436 | ExceptionHandlersSerializationCluster() |
2437 | : SerializationCluster("ExceptionHandlers" ) {} |
2438 | ~ExceptionHandlersSerializationCluster() {} |
2439 | |
2440 | void Trace(Serializer* s, ObjectPtr object) { |
2441 | ExceptionHandlersPtr handlers = ExceptionHandlers::RawCast(object); |
2442 | objects_.Add(handlers); |
2443 | |
2444 | s->Push(handlers->ptr()->handled_types_data_); |
2445 | } |
2446 | |
2447 | void WriteAlloc(Serializer* s) { |
2448 | s->WriteCid(kExceptionHandlersCid); |
2449 | const intptr_t count = objects_.length(); |
2450 | s->WriteUnsigned(count); |
2451 | for (intptr_t i = 0; i < count; i++) { |
2452 | ExceptionHandlersPtr handlers = objects_[i]; |
2453 | s->AssignRef(handlers); |
2454 | AutoTraceObject(handlers); |
2455 | const intptr_t length = handlers->ptr()->num_entries_; |
2456 | s->WriteUnsigned(length); |
2457 | } |
2458 | } |
2459 | |
2460 | void WriteFill(Serializer* s) { |
2461 | const intptr_t count = objects_.length(); |
2462 | for (intptr_t i = 0; i < count; i++) { |
2463 | ExceptionHandlersPtr handlers = objects_[i]; |
2464 | AutoTraceObject(handlers); |
2465 | const intptr_t length = handlers->ptr()->num_entries_; |
2466 | s->WriteUnsigned(length); |
2467 | WriteField(handlers, handled_types_data_); |
2468 | for (intptr_t j = 0; j < length; j++) { |
2469 | const ExceptionHandlerInfo& info = handlers->ptr()->data()[j]; |
2470 | s->Write<uint32_t>(info.handler_pc_offset); |
2471 | s->Write<int16_t>(info.outer_try_index); |
2472 | s->Write<int8_t>(info.needs_stacktrace); |
2473 | s->Write<int8_t>(info.has_catch_all); |
2474 | s->Write<int8_t>(info.is_generated); |
2475 | } |
2476 | } |
2477 | } |
2478 | |
2479 | private: |
2480 | GrowableArray<ExceptionHandlersPtr> objects_; |
2481 | }; |
2482 | #endif // !DART_PRECOMPILED_RUNTIME |
2483 | |
2484 | class ExceptionHandlersDeserializationCluster : public DeserializationCluster { |
2485 | public: |
2486 | ExceptionHandlersDeserializationCluster() {} |
2487 | ~ExceptionHandlersDeserializationCluster() {} |
2488 | |
2489 | void ReadAlloc(Deserializer* d) { |
2490 | start_index_ = d->next_index(); |
2491 | PageSpace* old_space = d->heap()->old_space(); |
2492 | const intptr_t count = d->ReadUnsigned(); |
2493 | for (intptr_t i = 0; i < count; i++) { |
2494 | const intptr_t length = d->ReadUnsigned(); |
2495 | d->AssignRef(AllocateUninitialized( |
2496 | old_space, ExceptionHandlers::InstanceSize(length))); |
2497 | } |
2498 | stop_index_ = d->next_index(); |
2499 | } |
2500 | |
2501 | void ReadFill(Deserializer* d) { |
2502 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
2503 | ExceptionHandlersPtr handlers = |
2504 | static_cast<ExceptionHandlersPtr>(d->Ref(id)); |
2505 | const intptr_t length = d->ReadUnsigned(); |
2506 | Deserializer::InitializeHeader(handlers, kExceptionHandlersCid, |
2507 | ExceptionHandlers::InstanceSize(length)); |
2508 | handlers->ptr()->num_entries_ = length; |
2509 | handlers->ptr()->handled_types_data_ = |
2510 | static_cast<ArrayPtr>(d->ReadRef()); |
2511 | for (intptr_t j = 0; j < length; j++) { |
2512 | ExceptionHandlerInfo& info = handlers->ptr()->data()[j]; |
2513 | info.handler_pc_offset = d->Read<uint32_t>(); |
2514 | info.outer_try_index = d->Read<int16_t>(); |
2515 | info.needs_stacktrace = d->Read<int8_t>(); |
2516 | info.has_catch_all = d->Read<int8_t>(); |
2517 | info.is_generated = d->Read<int8_t>(); |
2518 | } |
2519 | } |
2520 | } |
2521 | }; |
2522 | |
2523 | #if !defined(DART_PRECOMPILED_RUNTIME) |
2524 | class ContextSerializationCluster : public SerializationCluster { |
2525 | public: |
2526 | ContextSerializationCluster() : SerializationCluster("Context" ) {} |
2527 | ~ContextSerializationCluster() {} |
2528 | |
2529 | void Trace(Serializer* s, ObjectPtr object) { |
2530 | ContextPtr context = Context::RawCast(object); |
2531 | objects_.Add(context); |
2532 | |
2533 | s->Push(context->ptr()->parent_); |
2534 | const intptr_t length = context->ptr()->num_variables_; |
2535 | for (intptr_t i = 0; i < length; i++) { |
2536 | s->Push(context->ptr()->data()[i]); |
2537 | } |
2538 | } |
2539 | |
2540 | void WriteAlloc(Serializer* s) { |
2541 | s->WriteCid(kContextCid); |
2542 | const intptr_t count = objects_.length(); |
2543 | s->WriteUnsigned(count); |
2544 | for (intptr_t i = 0; i < count; i++) { |
2545 | ContextPtr context = objects_[i]; |
2546 | s->AssignRef(context); |
2547 | AutoTraceObject(context); |
2548 | const intptr_t length = context->ptr()->num_variables_; |
2549 | s->WriteUnsigned(length); |
2550 | } |
2551 | } |
2552 | |
2553 | void WriteFill(Serializer* s) { |
2554 | const intptr_t count = objects_.length(); |
2555 | for (intptr_t i = 0; i < count; i++) { |
2556 | ContextPtr context = objects_[i]; |
2557 | AutoTraceObject(context); |
2558 | const intptr_t length = context->ptr()->num_variables_; |
2559 | s->WriteUnsigned(length); |
2560 | WriteField(context, parent_); |
2561 | for (intptr_t j = 0; j < length; j++) { |
2562 | s->WriteElementRef(context->ptr()->data()[j], j); |
2563 | } |
2564 | } |
2565 | } |
2566 | |
2567 | private: |
2568 | GrowableArray<ContextPtr> objects_; |
2569 | }; |
2570 | #endif // !DART_PRECOMPILED_RUNTIME |
2571 | |
2572 | class ContextDeserializationCluster : public DeserializationCluster { |
2573 | public: |
2574 | ContextDeserializationCluster() {} |
2575 | ~ContextDeserializationCluster() {} |
2576 | |
2577 | void ReadAlloc(Deserializer* d) { |
2578 | start_index_ = d->next_index(); |
2579 | PageSpace* old_space = d->heap()->old_space(); |
2580 | const intptr_t count = d->ReadUnsigned(); |
2581 | for (intptr_t i = 0; i < count; i++) { |
2582 | const intptr_t length = d->ReadUnsigned(); |
2583 | d->AssignRef( |
2584 | AllocateUninitialized(old_space, Context::InstanceSize(length))); |
2585 | } |
2586 | stop_index_ = d->next_index(); |
2587 | } |
2588 | |
2589 | void ReadFill(Deserializer* d) { |
2590 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
2591 | ContextPtr context = static_cast<ContextPtr>(d->Ref(id)); |
2592 | const intptr_t length = d->ReadUnsigned(); |
2593 | Deserializer::InitializeHeader(context, kContextCid, |
2594 | Context::InstanceSize(length)); |
2595 | context->ptr()->num_variables_ = length; |
2596 | context->ptr()->parent_ = static_cast<ContextPtr>(d->ReadRef()); |
2597 | for (intptr_t j = 0; j < length; j++) { |
2598 | context->ptr()->data()[j] = d->ReadRef(); |
2599 | } |
2600 | } |
2601 | } |
2602 | }; |
2603 | |
2604 | #if !defined(DART_PRECOMPILED_RUNTIME) |
2605 | class ContextScopeSerializationCluster : public SerializationCluster { |
2606 | public: |
2607 | ContextScopeSerializationCluster() : SerializationCluster("ContextScope" ) {} |
2608 | ~ContextScopeSerializationCluster() {} |
2609 | |
2610 | void Trace(Serializer* s, ObjectPtr object) { |
2611 | ContextScopePtr scope = ContextScope::RawCast(object); |
2612 | objects_.Add(scope); |
2613 | |
2614 | const intptr_t length = scope->ptr()->num_variables_; |
2615 | PushFromTo(scope, length); |
2616 | } |
2617 | |
2618 | void WriteAlloc(Serializer* s) { |
2619 | s->WriteCid(kContextScopeCid); |
2620 | const intptr_t count = objects_.length(); |
2621 | s->WriteUnsigned(count); |
2622 | for (intptr_t i = 0; i < count; i++) { |
2623 | ContextScopePtr scope = objects_[i]; |
2624 | s->AssignRef(scope); |
2625 | AutoTraceObject(scope); |
2626 | const intptr_t length = scope->ptr()->num_variables_; |
2627 | s->WriteUnsigned(length); |
2628 | } |
2629 | } |
2630 | |
2631 | void WriteFill(Serializer* s) { |
2632 | const intptr_t count = objects_.length(); |
2633 | for (intptr_t i = 0; i < count; i++) { |
2634 | ContextScopePtr scope = objects_[i]; |
2635 | AutoTraceObject(scope); |
2636 | const intptr_t length = scope->ptr()->num_variables_; |
2637 | s->WriteUnsigned(length); |
2638 | s->Write<bool>(scope->ptr()->is_implicit_); |
2639 | WriteFromTo(scope, length); |
2640 | } |
2641 | } |
2642 | |
2643 | private: |
2644 | GrowableArray<ContextScopePtr> objects_; |
2645 | }; |
2646 | #endif // !DART_PRECOMPILED_RUNTIME |
2647 | |
2648 | class ContextScopeDeserializationCluster : public DeserializationCluster { |
2649 | public: |
2650 | ContextScopeDeserializationCluster() {} |
2651 | ~ContextScopeDeserializationCluster() {} |
2652 | |
2653 | void ReadAlloc(Deserializer* d) { |
2654 | start_index_ = d->next_index(); |
2655 | PageSpace* old_space = d->heap()->old_space(); |
2656 | const intptr_t count = d->ReadUnsigned(); |
2657 | for (intptr_t i = 0; i < count; i++) { |
2658 | const intptr_t length = d->ReadUnsigned(); |
2659 | d->AssignRef( |
2660 | AllocateUninitialized(old_space, ContextScope::InstanceSize(length))); |
2661 | } |
2662 | stop_index_ = d->next_index(); |
2663 | } |
2664 | |
2665 | void ReadFill(Deserializer* d) { |
2666 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
2667 | ContextScopePtr scope = static_cast<ContextScopePtr>(d->Ref(id)); |
2668 | const intptr_t length = d->ReadUnsigned(); |
2669 | Deserializer::InitializeHeader(scope, kContextScopeCid, |
2670 | ContextScope::InstanceSize(length)); |
2671 | scope->ptr()->num_variables_ = length; |
2672 | scope->ptr()->is_implicit_ = d->Read<bool>(); |
2673 | ReadFromTo(scope, length); |
2674 | } |
2675 | } |
2676 | }; |
2677 | |
2678 | #if !defined(DART_PRECOMPILED_RUNTIME) |
2679 | class ParameterTypeCheckSerializationCluster : public SerializationCluster { |
2680 | public: |
2681 | ParameterTypeCheckSerializationCluster() |
2682 | : SerializationCluster("ParameterTypeCheck" ) {} |
2683 | ~ParameterTypeCheckSerializationCluster() {} |
2684 | |
2685 | void Trace(Serializer* s, ObjectPtr object) { |
2686 | ParameterTypeCheckPtr unlinked = ParameterTypeCheck::RawCast(object); |
2687 | objects_.Add(unlinked); |
2688 | PushFromTo(unlinked); |
2689 | } |
2690 | |
2691 | void WriteAlloc(Serializer* s) { |
2692 | s->WriteCid(kParameterTypeCheckCid); |
2693 | const intptr_t count = objects_.length(); |
2694 | s->WriteUnsigned(count); |
2695 | for (intptr_t i = 0; i < count; i++) { |
2696 | ParameterTypeCheckPtr check = objects_[i]; |
2697 | s->AssignRef(check); |
2698 | } |
2699 | } |
2700 | |
2701 | void WriteFill(Serializer* s) { |
2702 | const intptr_t count = objects_.length(); |
2703 | for (intptr_t i = 0; i < count; i++) { |
2704 | ParameterTypeCheckPtr check = objects_[i]; |
2705 | s->Write<intptr_t>(check->ptr()->index_); |
2706 | WriteFromTo(check); |
2707 | } |
2708 | } |
2709 | |
2710 | private: |
2711 | GrowableArray<ParameterTypeCheckPtr> objects_; |
2712 | }; |
2713 | #endif // !DART_PRECOMPILED_RUNTIME |
2714 | |
2715 | class ParameterTypeCheckDeserializationCluster : public DeserializationCluster { |
2716 | public: |
2717 | ParameterTypeCheckDeserializationCluster() {} |
2718 | ~ParameterTypeCheckDeserializationCluster() {} |
2719 | |
2720 | void ReadAlloc(Deserializer* d) { |
2721 | start_index_ = d->next_index(); |
2722 | PageSpace* old_space = d->heap()->old_space(); |
2723 | const intptr_t count = d->ReadUnsigned(); |
2724 | for (intptr_t i = 0; i < count; i++) { |
2725 | d->AssignRef( |
2726 | AllocateUninitialized(old_space, ParameterTypeCheck::InstanceSize())); |
2727 | } |
2728 | stop_index_ = d->next_index(); |
2729 | } |
2730 | |
2731 | void ReadFill(Deserializer* d) { |
2732 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
2733 | ParameterTypeCheckPtr check = |
2734 | static_cast<ParameterTypeCheckPtr>(d->Ref(id)); |
2735 | Deserializer::InitializeHeader(check, kParameterTypeCheckCid, |
2736 | ParameterTypeCheck::InstanceSize()); |
2737 | check->ptr()->index_ = d->Read<intptr_t>(); |
2738 | ReadFromTo(check); |
2739 | } |
2740 | } |
2741 | }; |
2742 | |
2743 | #if !defined(DART_PRECOMPILED_RUNTIME) |
2744 | class UnlinkedCallSerializationCluster : public SerializationCluster { |
2745 | public: |
2746 | UnlinkedCallSerializationCluster() : SerializationCluster("UnlinkedCall" ) {} |
2747 | ~UnlinkedCallSerializationCluster() {} |
2748 | |
2749 | void Trace(Serializer* s, ObjectPtr object) { |
2750 | UnlinkedCallPtr unlinked = UnlinkedCall::RawCast(object); |
2751 | objects_.Add(unlinked); |
2752 | PushFromTo(unlinked); |
2753 | } |
2754 | |
2755 | void WriteAlloc(Serializer* s) { |
2756 | s->WriteCid(kUnlinkedCallCid); |
2757 | const intptr_t count = objects_.length(); |
2758 | s->WriteUnsigned(count); |
2759 | for (intptr_t i = 0; i < count; i++) { |
2760 | UnlinkedCallPtr unlinked = objects_[i]; |
2761 | s->AssignRef(unlinked); |
2762 | } |
2763 | } |
2764 | |
2765 | void WriteFill(Serializer* s) { |
2766 | const intptr_t count = objects_.length(); |
2767 | for (intptr_t i = 0; i < count; i++) { |
2768 | UnlinkedCallPtr unlinked = objects_[i]; |
2769 | AutoTraceObjectName(unlinked, unlinked->ptr()->target_name_); |
2770 | WriteFromTo(unlinked); |
2771 | s->Write<bool>(unlinked->ptr()->can_patch_to_monomorphic_); |
2772 | } |
2773 | } |
2774 | |
2775 | private: |
2776 | GrowableArray<UnlinkedCallPtr> objects_; |
2777 | }; |
2778 | #endif // !DART_PRECOMPILED_RUNTIME |
2779 | |
2780 | class UnlinkedCallDeserializationCluster : public DeserializationCluster { |
2781 | public: |
2782 | UnlinkedCallDeserializationCluster() {} |
2783 | ~UnlinkedCallDeserializationCluster() {} |
2784 | |
2785 | void ReadAlloc(Deserializer* d) { |
2786 | start_index_ = d->next_index(); |
2787 | PageSpace* old_space = d->heap()->old_space(); |
2788 | const intptr_t count = d->ReadUnsigned(); |
2789 | for (intptr_t i = 0; i < count; i++) { |
2790 | d->AssignRef( |
2791 | AllocateUninitialized(old_space, UnlinkedCall::InstanceSize())); |
2792 | } |
2793 | stop_index_ = d->next_index(); |
2794 | } |
2795 | |
2796 | void ReadFill(Deserializer* d) { |
2797 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
2798 | UnlinkedCallPtr unlinked = static_cast<UnlinkedCallPtr>(d->Ref(id)); |
2799 | Deserializer::InitializeHeader(unlinked, kUnlinkedCallCid, |
2800 | UnlinkedCall::InstanceSize()); |
2801 | ReadFromTo(unlinked); |
2802 | unlinked->ptr()->can_patch_to_monomorphic_ = d->Read<bool>(); |
2803 | } |
2804 | } |
2805 | }; |
2806 | |
2807 | #if !defined(DART_PRECOMPILED_RUNTIME) |
2808 | class ICDataSerializationCluster : public SerializationCluster { |
2809 | public: |
2810 | ICDataSerializationCluster() : SerializationCluster("ICData" ) {} |
2811 | ~ICDataSerializationCluster() {} |
2812 | |
2813 | void Trace(Serializer* s, ObjectPtr object) { |
2814 | ICDataPtr ic = ICData::RawCast(object); |
2815 | objects_.Add(ic); |
2816 | PushFromTo(ic); |
2817 | } |
2818 | |
2819 | void WriteAlloc(Serializer* s) { |
2820 | s->WriteCid(kICDataCid); |
2821 | const intptr_t count = objects_.length(); |
2822 | s->WriteUnsigned(count); |
2823 | for (intptr_t i = 0; i < count; i++) { |
2824 | ICDataPtr ic = objects_[i]; |
2825 | s->AssignRef(ic); |
2826 | } |
2827 | } |
2828 | |
2829 | void WriteFill(Serializer* s) { |
2830 | Snapshot::Kind kind = s->kind(); |
2831 | const intptr_t count = objects_.length(); |
2832 | for (intptr_t i = 0; i < count; i++) { |
2833 | ICDataPtr ic = objects_[i]; |
2834 | AutoTraceObjectName(ic, ic->ptr()->target_name_); |
2835 | WriteFromTo(ic); |
2836 | if (kind != Snapshot::kFullAOT) { |
2837 | NOT_IN_PRECOMPILED(s->Write<int32_t>(ic->ptr()->deopt_id_)); |
2838 | } |
2839 | s->Write<uint32_t>(ic->ptr()->state_bits_); |
2840 | } |
2841 | } |
2842 | |
2843 | private: |
2844 | GrowableArray<ICDataPtr> objects_; |
2845 | }; |
2846 | #endif // !DART_PRECOMPILED_RUNTIME |
2847 | |
2848 | class ICDataDeserializationCluster : public DeserializationCluster { |
2849 | public: |
2850 | ICDataDeserializationCluster() {} |
2851 | ~ICDataDeserializationCluster() {} |
2852 | |
2853 | void ReadAlloc(Deserializer* d) { |
2854 | start_index_ = d->next_index(); |
2855 | PageSpace* old_space = d->heap()->old_space(); |
2856 | const intptr_t count = d->ReadUnsigned(); |
2857 | for (intptr_t i = 0; i < count; i++) { |
2858 | d->AssignRef(AllocateUninitialized(old_space, ICData::InstanceSize())); |
2859 | } |
2860 | stop_index_ = d->next_index(); |
2861 | } |
2862 | |
2863 | void ReadFill(Deserializer* d) { |
2864 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
2865 | ICDataPtr ic = static_cast<ICDataPtr>(d->Ref(id)); |
2866 | Deserializer::InitializeHeader(ic, kICDataCid, ICData::InstanceSize()); |
2867 | ReadFromTo(ic); |
2868 | NOT_IN_PRECOMPILED(ic->ptr()->deopt_id_ = d->Read<int32_t>()); |
2869 | ic->ptr()->state_bits_ = d->Read<int32_t>(); |
2870 | } |
2871 | } |
2872 | }; |
2873 | |
2874 | #if !defined(DART_PRECOMPILED_RUNTIME) |
2875 | class MegamorphicCacheSerializationCluster : public SerializationCluster { |
2876 | public: |
2877 | MegamorphicCacheSerializationCluster() |
2878 | : SerializationCluster("MegamorphicCache" ) {} |
2879 | ~MegamorphicCacheSerializationCluster() {} |
2880 | |
2881 | void Trace(Serializer* s, ObjectPtr object) { |
2882 | MegamorphicCachePtr cache = MegamorphicCache::RawCast(object); |
2883 | objects_.Add(cache); |
2884 | PushFromTo(cache); |
2885 | } |
2886 | |
2887 | void WriteAlloc(Serializer* s) { |
2888 | s->WriteCid(kMegamorphicCacheCid); |
2889 | const intptr_t count = objects_.length(); |
2890 | s->WriteUnsigned(count); |
2891 | for (intptr_t i = 0; i < count; i++) { |
2892 | MegamorphicCachePtr cache = objects_[i]; |
2893 | s->AssignRef(cache); |
2894 | } |
2895 | } |
2896 | |
2897 | void WriteFill(Serializer* s) { |
2898 | const intptr_t count = objects_.length(); |
2899 | for (intptr_t i = 0; i < count; i++) { |
2900 | MegamorphicCachePtr cache = objects_[i]; |
2901 | AutoTraceObjectName(cache, cache->ptr()->target_name_); |
2902 | WriteFromTo(cache); |
2903 | s->Write<int32_t>(cache->ptr()->filled_entry_count_); |
2904 | } |
2905 | } |
2906 | |
2907 | private: |
2908 | GrowableArray<MegamorphicCachePtr> objects_; |
2909 | }; |
2910 | #endif // !DART_PRECOMPILED_RUNTIME |
2911 | |
2912 | class MegamorphicCacheDeserializationCluster : public DeserializationCluster { |
2913 | public: |
2914 | MegamorphicCacheDeserializationCluster() {} |
2915 | ~MegamorphicCacheDeserializationCluster() {} |
2916 | |
2917 | void ReadAlloc(Deserializer* d) { |
2918 | start_index_ = d->next_index(); |
2919 | PageSpace* old_space = d->heap()->old_space(); |
2920 | const intptr_t count = d->ReadUnsigned(); |
2921 | for (intptr_t i = 0; i < count; i++) { |
2922 | d->AssignRef( |
2923 | AllocateUninitialized(old_space, MegamorphicCache::InstanceSize())); |
2924 | } |
2925 | stop_index_ = d->next_index(); |
2926 | } |
2927 | |
2928 | void ReadFill(Deserializer* d) { |
2929 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
2930 | MegamorphicCachePtr cache = static_cast<MegamorphicCachePtr>(d->Ref(id)); |
2931 | Deserializer::InitializeHeader(cache, kMegamorphicCacheCid, |
2932 | MegamorphicCache::InstanceSize()); |
2933 | ReadFromTo(cache); |
2934 | cache->ptr()->filled_entry_count_ = d->Read<int32_t>(); |
2935 | } |
2936 | } |
2937 | |
2938 | #if defined(DART_PRECOMPILED_RUNTIME) |
2939 | void PostLoad(Deserializer* d, const Array& refs) { |
2940 | if (FLAG_use_bare_instructions) { |
2941 | // By default, every megamorphic call site will load the target |
2942 | // [Function] from the hash table and call indirectly via loading the |
2943 | // entrypoint from the function. |
2944 | // |
2945 | // In --use-bare-instruction we reduce the extra indirection via the |
2946 | // [Function] object by storing the entry point directly into the hashmap. |
2947 | // |
2948 | // Currently our AOT compiler will emit megamorphic calls in certain |
2949 | // situations (namely in slow-path code of CheckedSmi* instructions). |
2950 | // |
2951 | // TODO(compiler-team): Change the CheckedSmi* slow path code to use |
2952 | // normal switchable calls instead of megamorphic calls. (This is also a |
2953 | // memory balance beause [MegamorphicCache]s are per-selector while |
2954 | // [ICData] are per-callsite.) |
2955 | auto& cache = MegamorphicCache::Handle(d->zone()); |
2956 | for (intptr_t i = start_index_; i < stop_index_; ++i) { |
2957 | cache ^= refs.At(i); |
2958 | cache.SwitchToBareInstructions(); |
2959 | } |
2960 | } |
2961 | } |
2962 | #endif // defined(DART_PRECOMPILED_RUNTIME) |
2963 | }; |
2964 | |
2965 | #if !defined(DART_PRECOMPILED_RUNTIME) |
2966 | class SubtypeTestCacheSerializationCluster : public SerializationCluster { |
2967 | public: |
2968 | SubtypeTestCacheSerializationCluster() |
2969 | : SerializationCluster("SubtypeTestCache" ) {} |
2970 | ~SubtypeTestCacheSerializationCluster() {} |
2971 | |
2972 | void Trace(Serializer* s, ObjectPtr object) { |
2973 | SubtypeTestCachePtr cache = SubtypeTestCache::RawCast(object); |
2974 | objects_.Add(cache); |
2975 | s->Push(cache->ptr()->cache_); |
2976 | } |
2977 | |
2978 | void WriteAlloc(Serializer* s) { |
2979 | s->WriteCid(kSubtypeTestCacheCid); |
2980 | const intptr_t count = objects_.length(); |
2981 | s->WriteUnsigned(count); |
2982 | for (intptr_t i = 0; i < count; i++) { |
2983 | SubtypeTestCachePtr cache = objects_[i]; |
2984 | s->AssignRef(cache); |
2985 | } |
2986 | } |
2987 | |
2988 | void WriteFill(Serializer* s) { |
2989 | const intptr_t count = objects_.length(); |
2990 | for (intptr_t i = 0; i < count; i++) { |
2991 | SubtypeTestCachePtr cache = objects_[i]; |
2992 | AutoTraceObject(cache); |
2993 | WriteField(cache, cache_); |
2994 | } |
2995 | } |
2996 | |
2997 | private: |
2998 | GrowableArray<SubtypeTestCachePtr> objects_; |
2999 | }; |
3000 | #endif // !DART_PRECOMPILED_RUNTIME |
3001 | |
3002 | class SubtypeTestCacheDeserializationCluster : public DeserializationCluster { |
3003 | public: |
3004 | SubtypeTestCacheDeserializationCluster() {} |
3005 | ~SubtypeTestCacheDeserializationCluster() {} |
3006 | |
3007 | void ReadAlloc(Deserializer* d) { |
3008 | start_index_ = d->next_index(); |
3009 | PageSpace* old_space = d->heap()->old_space(); |
3010 | const intptr_t count = d->ReadUnsigned(); |
3011 | for (intptr_t i = 0; i < count; i++) { |
3012 | d->AssignRef( |
3013 | AllocateUninitialized(old_space, SubtypeTestCache::InstanceSize())); |
3014 | } |
3015 | stop_index_ = d->next_index(); |
3016 | } |
3017 | |
3018 | void ReadFill(Deserializer* d) { |
3019 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3020 | SubtypeTestCachePtr cache = static_cast<SubtypeTestCachePtr>(d->Ref(id)); |
3021 | Deserializer::InitializeHeader(cache, kSubtypeTestCacheCid, |
3022 | SubtypeTestCache::InstanceSize()); |
3023 | cache->ptr()->cache_ = static_cast<ArrayPtr>(d->ReadRef()); |
3024 | } |
3025 | } |
3026 | }; |
3027 | |
3028 | #if !defined(DART_PRECOMPILED_RUNTIME) |
3029 | class LoadingUnitSerializationCluster : public SerializationCluster { |
3030 | public: |
3031 | LoadingUnitSerializationCluster() : SerializationCluster("LoadingUnit" ) {} |
3032 | ~LoadingUnitSerializationCluster() {} |
3033 | |
3034 | void Trace(Serializer* s, ObjectPtr object) { |
3035 | LoadingUnitPtr unit = LoadingUnit::RawCast(object); |
3036 | objects_.Add(unit); |
3037 | s->Push(unit->ptr()->parent_); |
3038 | } |
3039 | |
3040 | void WriteAlloc(Serializer* s) { |
3041 | s->WriteCid(kLoadingUnitCid); |
3042 | const intptr_t count = objects_.length(); |
3043 | s->WriteUnsigned(count); |
3044 | for (intptr_t i = 0; i < count; i++) { |
3045 | LoadingUnitPtr unit = objects_[i]; |
3046 | s->AssignRef(unit); |
3047 | } |
3048 | } |
3049 | |
3050 | void WriteFill(Serializer* s) { |
3051 | const intptr_t count = objects_.length(); |
3052 | for (intptr_t i = 0; i < count; i++) { |
3053 | LoadingUnitPtr unit = objects_[i]; |
3054 | AutoTraceObject(unit); |
3055 | WriteField(unit, parent_); |
3056 | s->Write<int32_t>(unit->ptr()->id_); |
3057 | } |
3058 | } |
3059 | |
3060 | private: |
3061 | GrowableArray<LoadingUnitPtr> objects_; |
3062 | }; |
3063 | #endif // !DART_PRECOMPILED_RUNTIME |
3064 | |
3065 | class LoadingUnitDeserializationCluster : public DeserializationCluster { |
3066 | public: |
3067 | LoadingUnitDeserializationCluster() {} |
3068 | ~LoadingUnitDeserializationCluster() {} |
3069 | |
3070 | void ReadAlloc(Deserializer* d) { |
3071 | start_index_ = d->next_index(); |
3072 | PageSpace* old_space = d->heap()->old_space(); |
3073 | const intptr_t count = d->ReadUnsigned(); |
3074 | for (intptr_t i = 0; i < count; i++) { |
3075 | d->AssignRef( |
3076 | AllocateUninitialized(old_space, LoadingUnit::InstanceSize())); |
3077 | } |
3078 | stop_index_ = d->next_index(); |
3079 | } |
3080 | |
3081 | void ReadFill(Deserializer* d) { |
3082 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3083 | LoadingUnitPtr unit = static_cast<LoadingUnitPtr>(d->Ref(id)); |
3084 | Deserializer::InitializeHeader(unit, kLoadingUnitCid, |
3085 | LoadingUnit::InstanceSize()); |
3086 | unit->ptr()->parent_ = static_cast<LoadingUnitPtr>(d->ReadRef()); |
3087 | unit->ptr()->base_objects_ = Array::null(); |
3088 | unit->ptr()->id_ = d->Read<int32_t>(); |
3089 | unit->ptr()->loaded_ = false; |
3090 | unit->ptr()->load_outstanding_ = false; |
3091 | } |
3092 | } |
3093 | }; |
3094 | |
3095 | #if !defined(DART_PRECOMPILED_RUNTIME) |
3096 | class LanguageErrorSerializationCluster : public SerializationCluster { |
3097 | public: |
3098 | LanguageErrorSerializationCluster() : SerializationCluster("LanguageError" ) {} |
3099 | ~LanguageErrorSerializationCluster() {} |
3100 | |
3101 | void Trace(Serializer* s, ObjectPtr object) { |
3102 | LanguageErrorPtr error = LanguageError::RawCast(object); |
3103 | objects_.Add(error); |
3104 | PushFromTo(error); |
3105 | } |
3106 | |
3107 | void WriteAlloc(Serializer* s) { |
3108 | s->WriteCid(kLanguageErrorCid); |
3109 | const intptr_t count = objects_.length(); |
3110 | s->WriteUnsigned(count); |
3111 | for (intptr_t i = 0; i < count; i++) { |
3112 | LanguageErrorPtr error = objects_[i]; |
3113 | s->AssignRef(error); |
3114 | } |
3115 | } |
3116 | |
3117 | void WriteFill(Serializer* s) { |
3118 | const intptr_t count = objects_.length(); |
3119 | for (intptr_t i = 0; i < count; i++) { |
3120 | LanguageErrorPtr error = objects_[i]; |
3121 | AutoTraceObject(error); |
3122 | WriteFromTo(error); |
3123 | s->WriteTokenPosition(error->ptr()->token_pos_); |
3124 | s->Write<bool>(error->ptr()->report_after_token_); |
3125 | s->Write<int8_t>(error->ptr()->kind_); |
3126 | } |
3127 | } |
3128 | |
3129 | private: |
3130 | GrowableArray<LanguageErrorPtr> objects_; |
3131 | }; |
3132 | #endif // !DART_PRECOMPILED_RUNTIME |
3133 | |
3134 | class LanguageErrorDeserializationCluster : public DeserializationCluster { |
3135 | public: |
3136 | LanguageErrorDeserializationCluster() {} |
3137 | ~LanguageErrorDeserializationCluster() {} |
3138 | |
3139 | void ReadAlloc(Deserializer* d) { |
3140 | start_index_ = d->next_index(); |
3141 | PageSpace* old_space = d->heap()->old_space(); |
3142 | const intptr_t count = d->ReadUnsigned(); |
3143 | for (intptr_t i = 0; i < count; i++) { |
3144 | d->AssignRef( |
3145 | AllocateUninitialized(old_space, LanguageError::InstanceSize())); |
3146 | } |
3147 | stop_index_ = d->next_index(); |
3148 | } |
3149 | |
3150 | void ReadFill(Deserializer* d) { |
3151 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3152 | LanguageErrorPtr error = static_cast<LanguageErrorPtr>(d->Ref(id)); |
3153 | Deserializer::InitializeHeader(error, kLanguageErrorCid, |
3154 | LanguageError::InstanceSize()); |
3155 | ReadFromTo(error); |
3156 | error->ptr()->token_pos_ = d->ReadTokenPosition(); |
3157 | error->ptr()->report_after_token_ = d->Read<bool>(); |
3158 | error->ptr()->kind_ = d->Read<int8_t>(); |
3159 | } |
3160 | } |
3161 | }; |
3162 | |
3163 | #if !defined(DART_PRECOMPILED_RUNTIME) |
3164 | class UnhandledExceptionSerializationCluster : public SerializationCluster { |
3165 | public: |
3166 | UnhandledExceptionSerializationCluster() |
3167 | : SerializationCluster("UnhandledException" ) {} |
3168 | ~UnhandledExceptionSerializationCluster() {} |
3169 | |
3170 | void Trace(Serializer* s, ObjectPtr object) { |
3171 | UnhandledExceptionPtr exception = UnhandledException::RawCast(object); |
3172 | objects_.Add(exception); |
3173 | PushFromTo(exception); |
3174 | } |
3175 | |
3176 | void WriteAlloc(Serializer* s) { |
3177 | s->WriteCid(kUnhandledExceptionCid); |
3178 | const intptr_t count = objects_.length(); |
3179 | s->WriteUnsigned(count); |
3180 | for (intptr_t i = 0; i < count; i++) { |
3181 | UnhandledExceptionPtr exception = objects_[i]; |
3182 | s->AssignRef(exception); |
3183 | } |
3184 | } |
3185 | |
3186 | void WriteFill(Serializer* s) { |
3187 | const intptr_t count = objects_.length(); |
3188 | for (intptr_t i = 0; i < count; i++) { |
3189 | UnhandledExceptionPtr exception = objects_[i]; |
3190 | AutoTraceObject(exception); |
3191 | WriteFromTo(exception); |
3192 | } |
3193 | } |
3194 | |
3195 | private: |
3196 | GrowableArray<UnhandledExceptionPtr> objects_; |
3197 | }; |
3198 | #endif // !DART_PRECOMPILED_RUNTIME |
3199 | |
3200 | class UnhandledExceptionDeserializationCluster : public DeserializationCluster { |
3201 | public: |
3202 | UnhandledExceptionDeserializationCluster() {} |
3203 | ~UnhandledExceptionDeserializationCluster() {} |
3204 | |
3205 | void ReadAlloc(Deserializer* d) { |
3206 | start_index_ = d->next_index(); |
3207 | PageSpace* old_space = d->heap()->old_space(); |
3208 | const intptr_t count = d->ReadUnsigned(); |
3209 | for (intptr_t i = 0; i < count; i++) { |
3210 | d->AssignRef( |
3211 | AllocateUninitialized(old_space, UnhandledException::InstanceSize())); |
3212 | } |
3213 | stop_index_ = d->next_index(); |
3214 | } |
3215 | |
3216 | void ReadFill(Deserializer* d) { |
3217 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3218 | UnhandledExceptionPtr exception = |
3219 | static_cast<UnhandledExceptionPtr>(d->Ref(id)); |
3220 | Deserializer::InitializeHeader(exception, kUnhandledExceptionCid, |
3221 | UnhandledException::InstanceSize()); |
3222 | ReadFromTo(exception); |
3223 | } |
3224 | } |
3225 | }; |
3226 | |
3227 | #if !defined(DART_PRECOMPILED_RUNTIME) |
3228 | class InstanceSerializationCluster : public SerializationCluster { |
3229 | public: |
3230 | explicit InstanceSerializationCluster(intptr_t cid) |
3231 | : SerializationCluster("Instance" ), cid_(cid) { |
3232 | ClassPtr cls = Isolate::Current()->class_table()->At(cid); |
3233 | host_next_field_offset_in_words_ = |
3234 | cls->ptr()->host_next_field_offset_in_words_; |
3235 | ASSERT(host_next_field_offset_in_words_ > 0); |
3236 | #if !defined(DART_PRECOMPILED_RUNTIME) |
3237 | target_next_field_offset_in_words_ = |
3238 | cls->ptr()->target_next_field_offset_in_words_; |
3239 | target_instance_size_in_words_ = cls->ptr()->target_instance_size_in_words_; |
3240 | ASSERT(target_next_field_offset_in_words_ > 0); |
3241 | ASSERT(target_instance_size_in_words_ > 0); |
3242 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
3243 | } |
3244 | ~InstanceSerializationCluster() {} |
3245 | |
3246 | void Trace(Serializer* s, ObjectPtr object) { |
3247 | InstancePtr instance = Instance::RawCast(object); |
3248 | objects_.Add(instance); |
3249 | const intptr_t next_field_offset = host_next_field_offset_in_words_ |
3250 | << kWordSizeLog2; |
3251 | const auto unboxed_fields_bitmap = |
3252 | s->isolate()->group()->shared_class_table()->GetUnboxedFieldsMapAt( |
3253 | cid_); |
3254 | intptr_t offset = Instance::NextFieldOffset(); |
3255 | while (offset < next_field_offset) { |
3256 | // Skips unboxed fields |
3257 | if (!unboxed_fields_bitmap.Get(offset / kWordSize)) { |
3258 | ObjectPtr raw_obj = *reinterpret_cast<ObjectPtr*>( |
3259 | reinterpret_cast<uword>(instance->ptr()) + offset); |
3260 | s->Push(raw_obj); |
3261 | } |
3262 | offset += kWordSize; |
3263 | } |
3264 | } |
3265 | |
3266 | void WriteAlloc(Serializer* s) { |
3267 | s->WriteCid(cid_); |
3268 | const intptr_t count = objects_.length(); |
3269 | s->WriteUnsigned(count); |
3270 | |
3271 | #if !defined(DART_PRECOMPILED_RUNTIME) |
3272 | s->Write<int32_t>(target_next_field_offset_in_words_); |
3273 | s->Write<int32_t>(target_instance_size_in_words_); |
3274 | #else |
3275 | s->Write<int32_t>(host_next_field_offset_in_words_); |
3276 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
3277 | |
3278 | for (intptr_t i = 0; i < count; i++) { |
3279 | InstancePtr instance = objects_[i]; |
3280 | s->AssignRef(instance); |
3281 | } |
3282 | } |
3283 | |
3284 | void WriteFill(Serializer* s) { |
3285 | intptr_t next_field_offset = host_next_field_offset_in_words_ |
3286 | << kWordSizeLog2; |
3287 | const intptr_t count = objects_.length(); |
3288 | const auto unboxed_fields_bitmap = |
3289 | s->isolate()->group()->shared_class_table()->GetUnboxedFieldsMapAt( |
3290 | cid_); |
3291 | for (intptr_t i = 0; i < count; i++) { |
3292 | InstancePtr instance = objects_[i]; |
3293 | AutoTraceObject(instance); |
3294 | s->Write<bool>(instance->ptr()->IsCanonical()); |
3295 | intptr_t offset = Instance::NextFieldOffset(); |
3296 | while (offset < next_field_offset) { |
3297 | if (unboxed_fields_bitmap.Get(offset / kWordSize)) { |
3298 | // Writes 32 bits of the unboxed value at a time |
3299 | const uword value = *reinterpret_cast<uword*>( |
3300 | reinterpret_cast<uword>(instance->ptr()) + offset); |
3301 | s->WriteWordWith32BitWrites(value); |
3302 | } else { |
3303 | ObjectPtr raw_obj = *reinterpret_cast<ObjectPtr*>( |
3304 | reinterpret_cast<uword>(instance->ptr()) + offset); |
3305 | s->WriteElementRef(raw_obj, offset); |
3306 | } |
3307 | offset += kWordSize; |
3308 | } |
3309 | } |
3310 | } |
3311 | |
3312 | private: |
3313 | const intptr_t cid_; |
3314 | intptr_t host_next_field_offset_in_words_; |
3315 | #if !defined(DART_PRECOMPILED_RUNTIME) |
3316 | intptr_t target_next_field_offset_in_words_; |
3317 | intptr_t target_instance_size_in_words_; |
3318 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
3319 | GrowableArray<InstancePtr> objects_; |
3320 | }; |
3321 | #endif // !DART_PRECOMPILED_RUNTIME |
3322 | |
3323 | class InstanceDeserializationCluster : public DeserializationCluster { |
3324 | public: |
3325 | explicit InstanceDeserializationCluster(intptr_t cid) : cid_(cid) {} |
3326 | ~InstanceDeserializationCluster() {} |
3327 | |
3328 | void ReadAlloc(Deserializer* d) { |
3329 | start_index_ = d->next_index(); |
3330 | PageSpace* old_space = d->heap()->old_space(); |
3331 | const intptr_t count = d->ReadUnsigned(); |
3332 | next_field_offset_in_words_ = d->Read<int32_t>(); |
3333 | instance_size_in_words_ = d->Read<int32_t>(); |
3334 | intptr_t instance_size = |
3335 | Object::RoundedAllocationSize(instance_size_in_words_ * kWordSize); |
3336 | for (intptr_t i = 0; i < count; i++) { |
3337 | d->AssignRef(AllocateUninitialized(old_space, instance_size)); |
3338 | } |
3339 | stop_index_ = d->next_index(); |
3340 | } |
3341 | |
3342 | void ReadFill(Deserializer* d) { |
3343 | intptr_t next_field_offset = next_field_offset_in_words_ << kWordSizeLog2; |
3344 | intptr_t instance_size = |
3345 | Object::RoundedAllocationSize(instance_size_in_words_ * kWordSize); |
3346 | |
3347 | const auto unboxed_fields_bitmap = |
3348 | d->isolate()->group()->shared_class_table()->GetUnboxedFieldsMapAt( |
3349 | cid_); |
3350 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3351 | InstancePtr instance = static_cast<InstancePtr>(d->Ref(id)); |
3352 | bool is_canonical = d->Read<bool>(); |
3353 | Deserializer::InitializeHeader(instance, cid_, instance_size, |
3354 | is_canonical); |
3355 | intptr_t offset = Instance::NextFieldOffset(); |
3356 | while (offset < next_field_offset) { |
3357 | if (unboxed_fields_bitmap.Get(offset / kWordSize)) { |
3358 | uword* p = reinterpret_cast<uword*>( |
3359 | reinterpret_cast<uword>(instance->ptr()) + offset); |
3360 | // Reads 32 bits of the unboxed value at a time |
3361 | *p = d->ReadWordWith32BitReads(); |
3362 | } else { |
3363 | ObjectPtr* p = reinterpret_cast<ObjectPtr*>( |
3364 | reinterpret_cast<uword>(instance->ptr()) + offset); |
3365 | *p = d->ReadRef(); |
3366 | } |
3367 | offset += kWordSize; |
3368 | } |
3369 | if (offset < instance_size) { |
3370 | ObjectPtr* p = reinterpret_cast<ObjectPtr*>( |
3371 | reinterpret_cast<uword>(instance->ptr()) + offset); |
3372 | *p = Object::null(); |
3373 | offset += kWordSize; |
3374 | } |
3375 | ASSERT(offset == instance_size); |
3376 | } |
3377 | } |
3378 | |
3379 | private: |
3380 | const intptr_t cid_; |
3381 | intptr_t next_field_offset_in_words_; |
3382 | intptr_t instance_size_in_words_; |
3383 | }; |
3384 | |
3385 | #if !defined(DART_PRECOMPILED_RUNTIME) |
3386 | class LibraryPrefixSerializationCluster : public SerializationCluster { |
3387 | public: |
3388 | LibraryPrefixSerializationCluster() : SerializationCluster("LibraryPrefix" ) {} |
3389 | ~LibraryPrefixSerializationCluster() {} |
3390 | |
3391 | void Trace(Serializer* s, ObjectPtr object) { |
3392 | LibraryPrefixPtr prefix = LibraryPrefix::RawCast(object); |
3393 | objects_.Add(prefix); |
3394 | PushFromTo(prefix); |
3395 | } |
3396 | |
3397 | void WriteAlloc(Serializer* s) { |
3398 | s->WriteCid(kLibraryPrefixCid); |
3399 | const intptr_t count = objects_.length(); |
3400 | s->WriteUnsigned(count); |
3401 | for (intptr_t i = 0; i < count; i++) { |
3402 | LibraryPrefixPtr prefix = objects_[i]; |
3403 | s->AssignRef(prefix); |
3404 | } |
3405 | } |
3406 | |
3407 | void WriteFill(Serializer* s) { |
3408 | const intptr_t count = objects_.length(); |
3409 | for (intptr_t i = 0; i < count; i++) { |
3410 | LibraryPrefixPtr prefix = objects_[i]; |
3411 | AutoTraceObject(prefix); |
3412 | WriteFromTo(prefix); |
3413 | s->Write<uint16_t>(prefix->ptr()->num_imports_); |
3414 | s->Write<bool>(prefix->ptr()->is_deferred_load_); |
3415 | } |
3416 | } |
3417 | |
3418 | private: |
3419 | GrowableArray<LibraryPrefixPtr> objects_; |
3420 | }; |
3421 | #endif // !DART_PRECOMPILED_RUNTIME |
3422 | |
3423 | class LibraryPrefixDeserializationCluster : public DeserializationCluster { |
3424 | public: |
3425 | LibraryPrefixDeserializationCluster() {} |
3426 | ~LibraryPrefixDeserializationCluster() {} |
3427 | |
3428 | void ReadAlloc(Deserializer* d) { |
3429 | start_index_ = d->next_index(); |
3430 | PageSpace* old_space = d->heap()->old_space(); |
3431 | const intptr_t count = d->ReadUnsigned(); |
3432 | for (intptr_t i = 0; i < count; i++) { |
3433 | d->AssignRef( |
3434 | AllocateUninitialized(old_space, LibraryPrefix::InstanceSize())); |
3435 | } |
3436 | stop_index_ = d->next_index(); |
3437 | } |
3438 | |
3439 | void ReadFill(Deserializer* d) { |
3440 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3441 | LibraryPrefixPtr prefix = static_cast<LibraryPrefixPtr>(d->Ref(id)); |
3442 | Deserializer::InitializeHeader(prefix, kLibraryPrefixCid, |
3443 | LibraryPrefix::InstanceSize()); |
3444 | ReadFromTo(prefix); |
3445 | prefix->ptr()->num_imports_ = d->Read<uint16_t>(); |
3446 | prefix->ptr()->is_deferred_load_ = d->Read<bool>(); |
3447 | prefix->ptr()->is_loaded_ = !prefix->ptr()->is_deferred_load_; |
3448 | } |
3449 | } |
3450 | }; |
3451 | |
3452 | // Used to pack nullability into other serialized values. |
3453 | static constexpr intptr_t kNullabilityBitSize = 2; |
3454 | static constexpr intptr_t kNullabilityBitMask = (1 << kNullabilityBitSize) - 1; |
3455 | |
3456 | #if !defined(DART_PRECOMPILED_RUNTIME) |
3457 | class TypeSerializationCluster : public SerializationCluster { |
3458 | public: |
3459 | TypeSerializationCluster() : SerializationCluster("Type" ) {} |
3460 | ~TypeSerializationCluster() {} |
3461 | |
3462 | void Trace(Serializer* s, ObjectPtr object) { |
3463 | TypePtr type = Type::RawCast(object); |
3464 | if (type->ptr()->IsCanonical()) { |
3465 | canonical_objects_.Add(type); |
3466 | } else { |
3467 | objects_.Add(type); |
3468 | } |
3469 | |
3470 | PushFromTo(type); |
3471 | |
3472 | if (type->ptr()->type_class_id_->IsHeapObject()) { |
3473 | // Type class is still an unresolved class. |
3474 | UNREACHABLE(); |
3475 | } |
3476 | |
3477 | SmiPtr raw_type_class_id = Smi::RawCast(type->ptr()->type_class_id_); |
3478 | ClassPtr type_class = |
3479 | s->isolate()->class_table()->At(Smi::Value(raw_type_class_id)); |
3480 | s->Push(type_class); |
3481 | } |
3482 | |
3483 | void WriteAlloc(Serializer* s) { |
3484 | s->WriteCid(kTypeCid); |
3485 | intptr_t count = canonical_objects_.length(); |
3486 | s->WriteUnsigned(count); |
3487 | for (intptr_t i = 0; i < count; i++) { |
3488 | TypePtr type = canonical_objects_[i]; |
3489 | s->AssignRef(type); |
3490 | } |
3491 | count = objects_.length(); |
3492 | s->WriteUnsigned(count); |
3493 | for (intptr_t i = 0; i < count; i++) { |
3494 | TypePtr type = objects_[i]; |
3495 | s->AssignRef(type); |
3496 | } |
3497 | } |
3498 | |
3499 | void WriteFill(Serializer* s) { |
3500 | intptr_t count = canonical_objects_.length(); |
3501 | for (intptr_t i = 0; i < count; i++) { |
3502 | WriteType(s, canonical_objects_[i]); |
3503 | } |
3504 | count = objects_.length(); |
3505 | for (intptr_t i = 0; i < count; i++) { |
3506 | WriteType(s, objects_[i]); |
3507 | } |
3508 | } |
3509 | |
3510 | private: |
3511 | void WriteType(Serializer* s, TypePtr type) { |
3512 | AutoTraceObject(type); |
3513 | WriteFromTo(type); |
3514 | s->WriteTokenPosition(type->ptr()->token_pos_); |
3515 | ASSERT(type->ptr()->type_state_ < (1 << TypeLayout::kTypeStateBitSize)); |
3516 | ASSERT(type->ptr()->nullability_ < (1 << kNullabilityBitSize)); |
3517 | static_assert(TypeLayout::kTypeStateBitSize + kNullabilityBitSize <= |
3518 | kBitsPerByte * sizeof(uint8_t), |
3519 | "Cannot pack type_state_ and nullability_ into a uint8_t" ); |
3520 | const uint8_t combined = (type->ptr()->type_state_ << kNullabilityBitSize) | |
3521 | type->ptr()->nullability_; |
3522 | ASSERT_EQUAL(type->ptr()->type_state_, combined >> kNullabilityBitSize); |
3523 | ASSERT_EQUAL(type->ptr()->nullability_, combined & kNullabilityBitMask); |
3524 | s->Write<uint8_t>(combined); |
3525 | } |
3526 | |
3527 | GrowableArray<TypePtr> canonical_objects_; |
3528 | GrowableArray<TypePtr> objects_; |
3529 | }; |
3530 | #endif // !DART_PRECOMPILED_RUNTIME |
3531 | |
3532 | class TypeDeserializationCluster : public DeserializationCluster { |
3533 | public: |
3534 | TypeDeserializationCluster() {} |
3535 | ~TypeDeserializationCluster() {} |
3536 | |
3537 | void ReadAlloc(Deserializer* d) { |
3538 | canonical_start_index_ = d->next_index(); |
3539 | PageSpace* old_space = d->heap()->old_space(); |
3540 | intptr_t count = d->ReadUnsigned(); |
3541 | for (intptr_t i = 0; i < count; i++) { |
3542 | d->AssignRef(AllocateUninitialized(old_space, Type::InstanceSize())); |
3543 | } |
3544 | canonical_stop_index_ = d->next_index(); |
3545 | |
3546 | start_index_ = d->next_index(); |
3547 | count = d->ReadUnsigned(); |
3548 | for (intptr_t i = 0; i < count; i++) { |
3549 | d->AssignRef(AllocateUninitialized(old_space, Type::InstanceSize())); |
3550 | } |
3551 | stop_index_ = d->next_index(); |
3552 | } |
3553 | |
3554 | void ReadFill(Deserializer* d) { |
3555 | for (intptr_t id = canonical_start_index_; id < canonical_stop_index_; |
3556 | id++) { |
3557 | TypePtr type = static_cast<TypePtr>(d->Ref(id)); |
3558 | ReadType(d, type, /*is_canonical=*/true); |
3559 | } |
3560 | |
3561 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3562 | TypePtr type = static_cast<TypePtr>(d->Ref(id)); |
3563 | ReadType(d, type, /*is_canonical=*/false); |
3564 | } |
3565 | } |
3566 | |
3567 | void PostLoad(Deserializer* d, const Array& refs) { |
3568 | Type& type = Type::Handle(d->zone()); |
3569 | Code& stub = Code::Handle(d->zone()); |
3570 | |
3571 | if (Snapshot::IncludesCode(d->kind())) { |
3572 | for (intptr_t id = canonical_start_index_; id < canonical_stop_index_; |
3573 | id++) { |
3574 | type ^= refs.At(id); |
3575 | stub = type.type_test_stub(); |
3576 | type.SetTypeTestingStub(stub); // Update type_test_stub_entry_point_ |
3577 | } |
3578 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3579 | type ^= refs.At(id); |
3580 | stub = type.type_test_stub(); |
3581 | type.SetTypeTestingStub(stub); // Update type_test_stub_entry_point_ |
3582 | } |
3583 | } else { |
3584 | for (intptr_t id = canonical_start_index_; id < canonical_stop_index_; |
3585 | id++) { |
3586 | type ^= refs.At(id); |
3587 | stub = TypeTestingStubGenerator::DefaultCodeForType(type); |
3588 | type.SetTypeTestingStub(stub); |
3589 | } |
3590 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3591 | type ^= refs.At(id); |
3592 | stub = TypeTestingStubGenerator::DefaultCodeForType(type); |
3593 | type.SetTypeTestingStub(stub); |
3594 | } |
3595 | } |
3596 | } |
3597 | |
3598 | private: |
3599 | void ReadType(Deserializer* d, TypePtr type, bool is_canonical) { |
3600 | Deserializer::InitializeHeader(type, kTypeCid, Type::InstanceSize(), |
3601 | is_canonical); |
3602 | ReadFromTo(type); |
3603 | type->ptr()->token_pos_ = d->ReadTokenPosition(); |
3604 | const uint8_t combined = d->Read<uint8_t>(); |
3605 | type->ptr()->type_state_ = combined >> kNullabilityBitSize; |
3606 | type->ptr()->nullability_ = combined & kNullabilityBitMask; |
3607 | } |
3608 | |
3609 | intptr_t canonical_start_index_; |
3610 | intptr_t canonical_stop_index_; |
3611 | }; |
3612 | |
3613 | #if !defined(DART_PRECOMPILED_RUNTIME) |
3614 | class TypeRefSerializationCluster : public SerializationCluster { |
3615 | public: |
3616 | TypeRefSerializationCluster() : SerializationCluster("TypeRef" ) {} |
3617 | ~TypeRefSerializationCluster() {} |
3618 | |
3619 | void Trace(Serializer* s, ObjectPtr object) { |
3620 | TypeRefPtr type = TypeRef::RawCast(object); |
3621 | objects_.Add(type); |
3622 | PushFromTo(type); |
3623 | } |
3624 | |
3625 | void WriteAlloc(Serializer* s) { |
3626 | s->WriteCid(kTypeRefCid); |
3627 | const intptr_t count = objects_.length(); |
3628 | s->WriteUnsigned(count); |
3629 | for (intptr_t i = 0; i < count; i++) { |
3630 | TypeRefPtr type = objects_[i]; |
3631 | s->AssignRef(type); |
3632 | } |
3633 | } |
3634 | |
3635 | void WriteFill(Serializer* s) { |
3636 | const intptr_t count = objects_.length(); |
3637 | for (intptr_t i = 0; i < count; i++) { |
3638 | TypeRefPtr type = objects_[i]; |
3639 | AutoTraceObject(type); |
3640 | WriteFromTo(type); |
3641 | } |
3642 | } |
3643 | |
3644 | private: |
3645 | GrowableArray<TypeRefPtr> objects_; |
3646 | }; |
3647 | #endif // !DART_PRECOMPILED_RUNTIME |
3648 | |
3649 | class TypeRefDeserializationCluster : public DeserializationCluster { |
3650 | public: |
3651 | TypeRefDeserializationCluster() {} |
3652 | ~TypeRefDeserializationCluster() {} |
3653 | |
3654 | void ReadAlloc(Deserializer* d) { |
3655 | start_index_ = d->next_index(); |
3656 | PageSpace* old_space = d->heap()->old_space(); |
3657 | const intptr_t count = d->ReadUnsigned(); |
3658 | for (intptr_t i = 0; i < count; i++) { |
3659 | d->AssignRef(AllocateUninitialized(old_space, TypeRef::InstanceSize())); |
3660 | } |
3661 | stop_index_ = d->next_index(); |
3662 | } |
3663 | |
3664 | void ReadFill(Deserializer* d) { |
3665 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3666 | TypeRefPtr type = static_cast<TypeRefPtr>(d->Ref(id)); |
3667 | Deserializer::InitializeHeader(type, kTypeRefCid, |
3668 | TypeRef::InstanceSize()); |
3669 | ReadFromTo(type); |
3670 | } |
3671 | } |
3672 | |
3673 | void PostLoad(Deserializer* d, const Array& refs) { |
3674 | TypeRef& type_ref = TypeRef::Handle(d->zone()); |
3675 | Code& stub = Code::Handle(d->zone()); |
3676 | |
3677 | if (Snapshot::IncludesCode(d->kind())) { |
3678 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3679 | type_ref ^= refs.At(id); |
3680 | stub = type_ref.type_test_stub(); |
3681 | type_ref.SetTypeTestingStub( |
3682 | stub); // Update type_test_stub_entry_point_ |
3683 | } |
3684 | } else { |
3685 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3686 | type_ref ^= refs.At(id); |
3687 | stub = TypeTestingStubGenerator::DefaultCodeForType(type_ref); |
3688 | type_ref.SetTypeTestingStub(stub); |
3689 | } |
3690 | } |
3691 | } |
3692 | }; |
3693 | |
3694 | #if !defined(DART_PRECOMPILED_RUNTIME) |
3695 | class TypeParameterSerializationCluster : public SerializationCluster { |
3696 | public: |
3697 | TypeParameterSerializationCluster() : SerializationCluster("TypeParameter" ) {} |
3698 | ~TypeParameterSerializationCluster() {} |
3699 | |
3700 | void Trace(Serializer* s, ObjectPtr object) { |
3701 | TypeParameterPtr type = TypeParameter::RawCast(object); |
3702 | if (type->ptr()->IsCanonical()) { |
3703 | canonical_objects_.Add(type); |
3704 | } else { |
3705 | objects_.Add(type); |
3706 | } |
3707 | |
3708 | PushFromTo(type); |
3709 | } |
3710 | |
3711 | void WriteAlloc(Serializer* s) { |
3712 | s->WriteCid(kTypeParameterCid); |
3713 | intptr_t count = canonical_objects_.length(); |
3714 | s->WriteUnsigned(count); |
3715 | for (intptr_t i = 0; i < count; i++) { |
3716 | TypeParameterPtr type = canonical_objects_[i]; |
3717 | s->AssignRef(type); |
3718 | } |
3719 | count = objects_.length(); |
3720 | s->WriteUnsigned(count); |
3721 | for (intptr_t i = 0; i < count; i++) { |
3722 | TypeParameterPtr type = objects_[i]; |
3723 | s->AssignRef(type); |
3724 | } |
3725 | } |
3726 | |
3727 | void WriteFill(Serializer* s) { |
3728 | intptr_t count = canonical_objects_.length(); |
3729 | for (intptr_t i = 0; i < count; i++) { |
3730 | WriteTypeParameter(s, canonical_objects_[i]); |
3731 | } |
3732 | count = objects_.length(); |
3733 | for (intptr_t i = 0; i < count; i++) { |
3734 | WriteTypeParameter(s, objects_[i]); |
3735 | } |
3736 | } |
3737 | |
3738 | private: |
3739 | void WriteTypeParameter(Serializer* s, TypeParameterPtr type) { |
3740 | AutoTraceObject(type); |
3741 | WriteFromTo(type); |
3742 | s->Write<int32_t>(type->ptr()->parameterized_class_id_); |
3743 | s->WriteTokenPosition(type->ptr()->token_pos_); |
3744 | s->Write<int16_t>(type->ptr()->index_); |
3745 | ASSERT(type->ptr()->flags_ < (1 << TypeParameterLayout::kFlagsBitSize)); |
3746 | ASSERT(type->ptr()->nullability_ < (1 << kNullabilityBitSize)); |
3747 | static_assert(TypeParameterLayout::kFlagsBitSize + kNullabilityBitSize <= |
3748 | kBitsPerByte * sizeof(uint8_t), |
3749 | "Cannot pack flags_ and nullability_ into a uint8_t" ); |
3750 | const uint8_t combined = (type->ptr()->flags_ << kNullabilityBitSize) | |
3751 | type->ptr()->nullability_; |
3752 | ASSERT_EQUAL(type->ptr()->flags_, combined >> kNullabilityBitSize); |
3753 | ASSERT_EQUAL(type->ptr()->nullability_, combined & kNullabilityBitMask); |
3754 | s->Write<uint8_t>(combined); |
3755 | } |
3756 | |
3757 | GrowableArray<TypeParameterPtr> canonical_objects_; |
3758 | GrowableArray<TypeParameterPtr> objects_; |
3759 | }; |
3760 | #endif // !DART_PRECOMPILED_RUNTIME |
3761 | |
3762 | class TypeParameterDeserializationCluster : public DeserializationCluster { |
3763 | public: |
3764 | TypeParameterDeserializationCluster() {} |
3765 | ~TypeParameterDeserializationCluster() {} |
3766 | |
3767 | void ReadAlloc(Deserializer* d) { |
3768 | canonical_start_index_ = d->next_index(); |
3769 | PageSpace* old_space = d->heap()->old_space(); |
3770 | intptr_t count = d->ReadUnsigned(); |
3771 | for (intptr_t i = 0; i < count; i++) { |
3772 | d->AssignRef( |
3773 | AllocateUninitialized(old_space, TypeParameter::InstanceSize())); |
3774 | } |
3775 | canonical_stop_index_ = d->next_index(); |
3776 | |
3777 | start_index_ = d->next_index(); |
3778 | count = d->ReadUnsigned(); |
3779 | for (intptr_t i = 0; i < count; i++) { |
3780 | d->AssignRef( |
3781 | AllocateUninitialized(old_space, TypeParameter::InstanceSize())); |
3782 | } |
3783 | stop_index_ = d->next_index(); |
3784 | } |
3785 | |
3786 | void ReadFill(Deserializer* d) { |
3787 | for (intptr_t id = canonical_start_index_; id < canonical_stop_index_; |
3788 | id++) { |
3789 | TypeParameterPtr type = static_cast<TypeParameterPtr>(d->Ref(id)); |
3790 | ReadTypeParameter(d, type, /* is_canonical = */ true); |
3791 | } |
3792 | |
3793 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3794 | TypeParameterPtr type = static_cast<TypeParameterPtr>(d->Ref(id)); |
3795 | ReadTypeParameter(d, type, /* is_canonical = */ false); |
3796 | } |
3797 | } |
3798 | |
3799 | void PostLoad(Deserializer* d, const Array& refs) { |
3800 | TypeParameter& type_param = TypeParameter::Handle(d->zone()); |
3801 | Code& stub = Code::Handle(d->zone()); |
3802 | |
3803 | if (Snapshot::IncludesCode(d->kind())) { |
3804 | for (intptr_t id = canonical_start_index_; id < canonical_stop_index_; |
3805 | id++) { |
3806 | type_param ^= refs.At(id); |
3807 | stub = type_param.type_test_stub(); |
3808 | type_param.SetTypeTestingStub( |
3809 | stub); // Update type_test_stub_entry_point_ |
3810 | } |
3811 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3812 | type_param ^= refs.At(id); |
3813 | stub = type_param.type_test_stub(); |
3814 | type_param.SetTypeTestingStub( |
3815 | stub); // Update type_test_stub_entry_point_ |
3816 | } |
3817 | } else { |
3818 | for (intptr_t id = canonical_start_index_; id < canonical_stop_index_; |
3819 | id++) { |
3820 | type_param ^= refs.At(id); |
3821 | stub = TypeTestingStubGenerator::DefaultCodeForType(type_param); |
3822 | type_param.SetTypeTestingStub(stub); |
3823 | } |
3824 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3825 | type_param ^= refs.At(id); |
3826 | stub = TypeTestingStubGenerator::DefaultCodeForType(type_param); |
3827 | type_param.SetTypeTestingStub(stub); |
3828 | } |
3829 | } |
3830 | } |
3831 | |
3832 | private: |
3833 | void ReadTypeParameter(Deserializer* d, |
3834 | TypeParameterPtr type, |
3835 | bool is_canonical) { |
3836 | Deserializer::InitializeHeader(type, kTypeParameterCid, |
3837 | TypeParameter::InstanceSize(), is_canonical); |
3838 | ReadFromTo(type); |
3839 | type->ptr()->parameterized_class_id_ = d->Read<int32_t>(); |
3840 | type->ptr()->token_pos_ = d->ReadTokenPosition(); |
3841 | type->ptr()->index_ = d->Read<int16_t>(); |
3842 | const uint8_t combined = d->Read<uint8_t>(); |
3843 | type->ptr()->flags_ = combined >> kNullabilityBitSize; |
3844 | type->ptr()->nullability_ = combined & kNullabilityBitMask; |
3845 | } |
3846 | |
3847 | intptr_t canonical_start_index_; |
3848 | intptr_t canonical_stop_index_; |
3849 | }; |
3850 | |
3851 | #if !defined(DART_PRECOMPILED_RUNTIME) |
3852 | class ClosureSerializationCluster : public SerializationCluster { |
3853 | public: |
3854 | ClosureSerializationCluster() : SerializationCluster("Closure" ) {} |
3855 | ~ClosureSerializationCluster() {} |
3856 | |
3857 | void Trace(Serializer* s, ObjectPtr object) { |
3858 | ClosurePtr closure = Closure::RawCast(object); |
3859 | objects_.Add(closure); |
3860 | PushFromTo(closure); |
3861 | } |
3862 | |
3863 | void WriteAlloc(Serializer* s) { |
3864 | s->WriteCid(kClosureCid); |
3865 | const intptr_t count = objects_.length(); |
3866 | s->WriteUnsigned(count); |
3867 | for (intptr_t i = 0; i < count; i++) { |
3868 | ClosurePtr closure = objects_[i]; |
3869 | s->AssignRef(closure); |
3870 | } |
3871 | } |
3872 | |
3873 | void WriteFill(Serializer* s) { |
3874 | const intptr_t count = objects_.length(); |
3875 | for (intptr_t i = 0; i < count; i++) { |
3876 | ClosurePtr closure = objects_[i]; |
3877 | AutoTraceObject(closure); |
3878 | s->Write<bool>(closure->ptr()->IsCanonical()); |
3879 | WriteFromTo(closure); |
3880 | } |
3881 | } |
3882 | |
3883 | private: |
3884 | GrowableArray<ClosurePtr> objects_; |
3885 | }; |
3886 | #endif // !DART_PRECOMPILED_RUNTIME |
3887 | |
3888 | class ClosureDeserializationCluster : public DeserializationCluster { |
3889 | public: |
3890 | ClosureDeserializationCluster() {} |
3891 | ~ClosureDeserializationCluster() {} |
3892 | |
3893 | void ReadAlloc(Deserializer* d) { |
3894 | start_index_ = d->next_index(); |
3895 | PageSpace* old_space = d->heap()->old_space(); |
3896 | const intptr_t count = d->ReadUnsigned(); |
3897 | for (intptr_t i = 0; i < count; i++) { |
3898 | d->AssignRef(AllocateUninitialized(old_space, Closure::InstanceSize())); |
3899 | } |
3900 | stop_index_ = d->next_index(); |
3901 | } |
3902 | |
3903 | void ReadFill(Deserializer* d) { |
3904 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
3905 | ClosurePtr closure = static_cast<ClosurePtr>(d->Ref(id)); |
3906 | bool is_canonical = d->Read<bool>(); |
3907 | Deserializer::InitializeHeader(closure, kClosureCid, |
3908 | Closure::InstanceSize(), is_canonical); |
3909 | ReadFromTo(closure); |
3910 | } |
3911 | } |
3912 | }; |
3913 | |
3914 | #if !defined(DART_PRECOMPILED_RUNTIME) |
3915 | class MintSerializationCluster : public SerializationCluster { |
3916 | public: |
3917 | MintSerializationCluster() : SerializationCluster("int" ) {} |
3918 | ~MintSerializationCluster() {} |
3919 | |
3920 | void Trace(Serializer* s, ObjectPtr object) { |
3921 | if (!object->IsHeapObject()) { |
3922 | SmiPtr smi = Smi::RawCast(object); |
3923 | smis_.Add(smi); |
3924 | } else { |
3925 | MintPtr mint = Mint::RawCast(object); |
3926 | mints_.Add(mint); |
3927 | } |
3928 | } |
3929 | |
3930 | void WriteAlloc(Serializer* s) { |
3931 | s->WriteCid(kMintCid); |
3932 | |
3933 | s->WriteUnsigned(smis_.length() + mints_.length()); |
3934 | for (intptr_t i = 0; i < smis_.length(); i++) { |
3935 | SmiPtr smi = smis_[i]; |
3936 | s->AssignRef(smi); |
3937 | AutoTraceObject(smi); |
3938 | s->Write<bool>(true); |
3939 | s->Write<int64_t>(Smi::Value(smi)); |
3940 | } |
3941 | for (intptr_t i = 0; i < mints_.length(); i++) { |
3942 | MintPtr mint = mints_[i]; |
3943 | s->AssignRef(mint); |
3944 | AutoTraceObject(mint); |
3945 | s->Write<bool>(mint->ptr()->IsCanonical()); |
3946 | s->Write<int64_t>(mint->ptr()->value_); |
3947 | } |
3948 | } |
3949 | |
3950 | void WriteFill(Serializer* s) {} |
3951 | |
3952 | private: |
3953 | GrowableArray<SmiPtr> smis_; |
3954 | GrowableArray<MintPtr> mints_; |
3955 | }; |
3956 | #endif // !DART_PRECOMPILED_RUNTIME |
3957 | |
3958 | class MintDeserializationCluster : public DeserializationCluster { |
3959 | public: |
3960 | MintDeserializationCluster() {} |
3961 | ~MintDeserializationCluster() {} |
3962 | |
3963 | void ReadAlloc(Deserializer* d) { |
3964 | PageSpace* old_space = d->heap()->old_space(); |
3965 | |
3966 | start_index_ = d->next_index(); |
3967 | const intptr_t count = d->ReadUnsigned(); |
3968 | for (intptr_t i = 0; i < count; i++) { |
3969 | bool is_canonical = d->Read<bool>(); |
3970 | int64_t value = d->Read<int64_t>(); |
3971 | if (Smi::IsValid(value)) { |
3972 | d->AssignRef(Smi::New(value)); |
3973 | } else { |
3974 | MintPtr mint = static_cast<MintPtr>( |
3975 | AllocateUninitialized(old_space, Mint::InstanceSize())); |
3976 | Deserializer::InitializeHeader(mint, kMintCid, Mint::InstanceSize(), |
3977 | is_canonical); |
3978 | mint->ptr()->value_ = value; |
3979 | d->AssignRef(mint); |
3980 | } |
3981 | } |
3982 | stop_index_ = d->next_index(); |
3983 | } |
3984 | |
3985 | void ReadFill(Deserializer* d) {} |
3986 | |
3987 | void PostLoad(Deserializer* d, const Array& refs) { |
3988 | const Class& mint_cls = Class::Handle( |
3989 | d->zone(), Isolate::Current()->object_store()->mint_class()); |
3990 | mint_cls.set_constants(Object::empty_array()); |
3991 | Object& number = Object::Handle(d->zone()); |
3992 | for (intptr_t i = start_index_; i < stop_index_; i++) { |
3993 | number = refs.At(i); |
3994 | if (number.IsMint() && number.IsCanonical()) { |
3995 | mint_cls.InsertCanonicalMint(d->zone(), Mint::Cast(number)); |
3996 | } |
3997 | } |
3998 | } |
3999 | }; |
4000 | |
4001 | #if !defined(DART_PRECOMPILED_RUNTIME) |
4002 | class DoubleSerializationCluster : public SerializationCluster { |
4003 | public: |
4004 | DoubleSerializationCluster() : SerializationCluster("double" ) {} |
4005 | ~DoubleSerializationCluster() {} |
4006 | |
4007 | void Trace(Serializer* s, ObjectPtr object) { |
4008 | DoublePtr dbl = Double::RawCast(object); |
4009 | objects_.Add(dbl); |
4010 | } |
4011 | |
4012 | void WriteAlloc(Serializer* s) { |
4013 | s->WriteCid(kDoubleCid); |
4014 | const intptr_t count = objects_.length(); |
4015 | s->WriteUnsigned(count); |
4016 | for (intptr_t i = 0; i < count; i++) { |
4017 | DoublePtr dbl = objects_[i]; |
4018 | s->AssignRef(dbl); |
4019 | } |
4020 | } |
4021 | |
4022 | void WriteFill(Serializer* s) { |
4023 | const intptr_t count = objects_.length(); |
4024 | for (intptr_t i = 0; i < count; i++) { |
4025 | DoublePtr dbl = objects_[i]; |
4026 | AutoTraceObject(dbl); |
4027 | s->Write<bool>(dbl->ptr()->IsCanonical()); |
4028 | s->Write<double>(dbl->ptr()->value_); |
4029 | } |
4030 | } |
4031 | |
4032 | private: |
4033 | GrowableArray<DoublePtr> objects_; |
4034 | }; |
4035 | #endif // !DART_PRECOMPILED_RUNTIME |
4036 | |
4037 | class DoubleDeserializationCluster : public DeserializationCluster { |
4038 | public: |
4039 | DoubleDeserializationCluster() {} |
4040 | ~DoubleDeserializationCluster() {} |
4041 | |
4042 | void ReadAlloc(Deserializer* d) { |
4043 | start_index_ = d->next_index(); |
4044 | PageSpace* old_space = d->heap()->old_space(); |
4045 | const intptr_t count = d->ReadUnsigned(); |
4046 | for (intptr_t i = 0; i < count; i++) { |
4047 | d->AssignRef(AllocateUninitialized(old_space, Double::InstanceSize())); |
4048 | } |
4049 | stop_index_ = d->next_index(); |
4050 | } |
4051 | |
4052 | void ReadFill(Deserializer* d) { |
4053 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
4054 | DoublePtr dbl = static_cast<DoublePtr>(d->Ref(id)); |
4055 | bool is_canonical = d->Read<bool>(); |
4056 | Deserializer::InitializeHeader(dbl, kDoubleCid, Double::InstanceSize(), |
4057 | is_canonical); |
4058 | dbl->ptr()->value_ = d->Read<double>(); |
4059 | } |
4060 | } |
4061 | }; |
4062 | |
4063 | #if !defined(DART_PRECOMPILED_RUNTIME) |
4064 | class GrowableObjectArraySerializationCluster : public SerializationCluster { |
4065 | public: |
4066 | GrowableObjectArraySerializationCluster() |
4067 | : SerializationCluster("GrowableObjectArray" ) {} |
4068 | ~GrowableObjectArraySerializationCluster() {} |
4069 | |
4070 | void Trace(Serializer* s, ObjectPtr object) { |
4071 | GrowableObjectArrayPtr array = GrowableObjectArray::RawCast(object); |
4072 | objects_.Add(array); |
4073 | PushFromTo(array); |
4074 | } |
4075 | |
4076 | void WriteAlloc(Serializer* s) { |
4077 | s->WriteCid(kGrowableObjectArrayCid); |
4078 | const intptr_t count = objects_.length(); |
4079 | s->WriteUnsigned(count); |
4080 | for (intptr_t i = 0; i < count; i++) { |
4081 | GrowableObjectArrayPtr array = objects_[i]; |
4082 | s->AssignRef(array); |
4083 | } |
4084 | } |
4085 | |
4086 | void WriteFill(Serializer* s) { |
4087 | const intptr_t count = objects_.length(); |
4088 | for (intptr_t i = 0; i < count; i++) { |
4089 | GrowableObjectArrayPtr array = objects_[i]; |
4090 | AutoTraceObject(array); |
4091 | s->Write<bool>(array->ptr()->IsCanonical()); |
4092 | WriteFromTo(array); |
4093 | } |
4094 | } |
4095 | |
4096 | private: |
4097 | GrowableArray<GrowableObjectArrayPtr> objects_; |
4098 | }; |
4099 | #endif // !DART_PRECOMPILED_RUNTIME |
4100 | |
4101 | class GrowableObjectArrayDeserializationCluster |
4102 | : public DeserializationCluster { |
4103 | public: |
4104 | GrowableObjectArrayDeserializationCluster() {} |
4105 | ~GrowableObjectArrayDeserializationCluster() {} |
4106 | |
4107 | void ReadAlloc(Deserializer* d) { |
4108 | start_index_ = d->next_index(); |
4109 | PageSpace* old_space = d->heap()->old_space(); |
4110 | const intptr_t count = d->ReadUnsigned(); |
4111 | for (intptr_t i = 0; i < count; i++) { |
4112 | d->AssignRef(AllocateUninitialized(old_space, |
4113 | GrowableObjectArray::InstanceSize())); |
4114 | } |
4115 | stop_index_ = d->next_index(); |
4116 | } |
4117 | |
4118 | void ReadFill(Deserializer* d) { |
4119 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
4120 | GrowableObjectArrayPtr list = |
4121 | static_cast<GrowableObjectArrayPtr>(d->Ref(id)); |
4122 | bool is_canonical = d->Read<bool>(); |
4123 | Deserializer::InitializeHeader(list, kGrowableObjectArrayCid, |
4124 | GrowableObjectArray::InstanceSize(), |
4125 | is_canonical); |
4126 | ReadFromTo(list); |
4127 | } |
4128 | } |
4129 | }; |
4130 | |
4131 | #if !defined(DART_PRECOMPILED_RUNTIME) |
4132 | class TypedDataSerializationCluster : public SerializationCluster { |
4133 | public: |
4134 | explicit TypedDataSerializationCluster(intptr_t cid) |
4135 | : SerializationCluster("TypedData" ), cid_(cid) {} |
4136 | ~TypedDataSerializationCluster() {} |
4137 | |
4138 | void Trace(Serializer* s, ObjectPtr object) { |
4139 | TypedDataPtr data = TypedData::RawCast(object); |
4140 | objects_.Add(data); |
4141 | } |
4142 | |
4143 | void WriteAlloc(Serializer* s) { |
4144 | s->WriteCid(cid_); |
4145 | const intptr_t count = objects_.length(); |
4146 | s->WriteUnsigned(count); |
4147 | for (intptr_t i = 0; i < count; i++) { |
4148 | TypedDataPtr data = objects_[i]; |
4149 | s->AssignRef(data); |
4150 | AutoTraceObject(data); |
4151 | const intptr_t length = Smi::Value(data->ptr()->length_); |
4152 | s->WriteUnsigned(length); |
4153 | } |
4154 | } |
4155 | |
4156 | void WriteFill(Serializer* s) { |
4157 | const intptr_t count = objects_.length(); |
4158 | intptr_t element_size = TypedData::ElementSizeInBytes(cid_); |
4159 | for (intptr_t i = 0; i < count; i++) { |
4160 | TypedDataPtr data = objects_[i]; |
4161 | AutoTraceObject(data); |
4162 | const intptr_t length = Smi::Value(data->ptr()->length_); |
4163 | s->WriteUnsigned(length); |
4164 | s->Write<bool>(data->ptr()->IsCanonical()); |
4165 | uint8_t* cdata = reinterpret_cast<uint8_t*>(data->ptr()->data()); |
4166 | s->WriteBytes(cdata, length * element_size); |
4167 | } |
4168 | } |
4169 | |
4170 | private: |
4171 | const intptr_t cid_; |
4172 | GrowableArray<TypedDataPtr> objects_; |
4173 | }; |
4174 | #endif // !DART_PRECOMPILED_RUNTIME |
4175 | |
4176 | class TypedDataDeserializationCluster : public DeserializationCluster { |
4177 | public: |
4178 | explicit TypedDataDeserializationCluster(intptr_t cid) : cid_(cid) {} |
4179 | ~TypedDataDeserializationCluster() {} |
4180 | |
4181 | void ReadAlloc(Deserializer* d) { |
4182 | start_index_ = d->next_index(); |
4183 | PageSpace* old_space = d->heap()->old_space(); |
4184 | const intptr_t count = d->ReadUnsigned(); |
4185 | intptr_t element_size = TypedData::ElementSizeInBytes(cid_); |
4186 | for (intptr_t i = 0; i < count; i++) { |
4187 | const intptr_t length = d->ReadUnsigned(); |
4188 | d->AssignRef(AllocateUninitialized( |
4189 | old_space, TypedData::InstanceSize(length * element_size))); |
4190 | } |
4191 | stop_index_ = d->next_index(); |
4192 | } |
4193 | |
4194 | void ReadFill(Deserializer* d) { |
4195 | intptr_t element_size = TypedData::ElementSizeInBytes(cid_); |
4196 | |
4197 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
4198 | TypedDataPtr data = static_cast<TypedDataPtr>(d->Ref(id)); |
4199 | const intptr_t length = d->ReadUnsigned(); |
4200 | bool is_canonical = d->Read<bool>(); |
4201 | const intptr_t length_in_bytes = length * element_size; |
4202 | Deserializer::InitializeHeader( |
4203 | data, cid_, TypedData::InstanceSize(length_in_bytes), is_canonical); |
4204 | data->ptr()->length_ = Smi::New(length); |
4205 | data->ptr()->RecomputeDataField(); |
4206 | uint8_t* cdata = reinterpret_cast<uint8_t*>(data->ptr()->data()); |
4207 | d->ReadBytes(cdata, length_in_bytes); |
4208 | } |
4209 | } |
4210 | |
4211 | private: |
4212 | const intptr_t cid_; |
4213 | }; |
4214 | |
4215 | #if !defined(DART_PRECOMPILED_RUNTIME) |
4216 | class TypedDataViewSerializationCluster : public SerializationCluster { |
4217 | public: |
4218 | explicit TypedDataViewSerializationCluster(intptr_t cid) |
4219 | : SerializationCluster("TypedDataView" ), cid_(cid) {} |
4220 | ~TypedDataViewSerializationCluster() {} |
4221 | |
4222 | void Trace(Serializer* s, ObjectPtr object) { |
4223 | TypedDataViewPtr view = TypedDataView::RawCast(object); |
4224 | objects_.Add(view); |
4225 | |
4226 | PushFromTo(view); |
4227 | } |
4228 | |
4229 | void WriteAlloc(Serializer* s) { |
4230 | const intptr_t count = objects_.length(); |
4231 | s->WriteCid(cid_); |
4232 | s->WriteUnsigned(count); |
4233 | for (intptr_t i = 0; i < count; i++) { |
4234 | TypedDataViewPtr view = objects_[i]; |
4235 | s->AssignRef(view); |
4236 | } |
4237 | } |
4238 | |
4239 | void WriteFill(Serializer* s) { |
4240 | const intptr_t count = objects_.length(); |
4241 | for (intptr_t i = 0; i < count; i++) { |
4242 | TypedDataViewPtr view = objects_[i]; |
4243 | AutoTraceObject(view); |
4244 | s->Write<bool>(view->ptr()->IsCanonical()); |
4245 | WriteFromTo(view); |
4246 | } |
4247 | } |
4248 | |
4249 | private: |
4250 | const intptr_t cid_; |
4251 | GrowableArray<TypedDataViewPtr> objects_; |
4252 | }; |
4253 | #endif // !DART_PRECOMPILED_RUNTIME |
4254 | |
4255 | class TypedDataViewDeserializationCluster : public DeserializationCluster { |
4256 | public: |
4257 | explicit TypedDataViewDeserializationCluster(intptr_t cid) : cid_(cid) {} |
4258 | ~TypedDataViewDeserializationCluster() {} |
4259 | |
4260 | void ReadAlloc(Deserializer* d) { |
4261 | start_index_ = d->next_index(); |
4262 | PageSpace* old_space = d->heap()->old_space(); |
4263 | const intptr_t count = d->ReadUnsigned(); |
4264 | for (intptr_t i = 0; i < count; i++) { |
4265 | d->AssignRef( |
4266 | AllocateUninitialized(old_space, TypedDataView::InstanceSize())); |
4267 | } |
4268 | stop_index_ = d->next_index(); |
4269 | } |
4270 | |
4271 | void ReadFill(Deserializer* d) { |
4272 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
4273 | TypedDataViewPtr view = static_cast<TypedDataViewPtr>(d->Ref(id)); |
4274 | const bool is_canonical = d->Read<bool>(); |
4275 | Deserializer::InitializeHeader(view, cid_, TypedDataView::InstanceSize(), |
4276 | is_canonical); |
4277 | ReadFromTo(view); |
4278 | } |
4279 | } |
4280 | |
4281 | void PostLoad(Deserializer* d, const Array& refs) { |
4282 | auto& view = TypedDataView::Handle(d->zone()); |
4283 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
4284 | view ^= refs.At(id); |
4285 | view.RecomputeDataField(); |
4286 | } |
4287 | } |
4288 | |
4289 | private: |
4290 | const intptr_t cid_; |
4291 | }; |
4292 | |
4293 | #if !defined(DART_PRECOMPILED_RUNTIME) |
4294 | class ExternalTypedDataSerializationCluster : public SerializationCluster { |
4295 | public: |
4296 | explicit ExternalTypedDataSerializationCluster(intptr_t cid) |
4297 | : SerializationCluster("ExternalTypedData" ), cid_(cid) {} |
4298 | ~ExternalTypedDataSerializationCluster() {} |
4299 | |
4300 | void Trace(Serializer* s, ObjectPtr object) { |
4301 | ExternalTypedDataPtr data = ExternalTypedData::RawCast(object); |
4302 | objects_.Add(data); |
4303 | ASSERT(!data->ptr()->IsCanonical()); |
4304 | } |
4305 | |
4306 | void WriteAlloc(Serializer* s) { |
4307 | s->WriteCid(cid_); |
4308 | const intptr_t count = objects_.length(); |
4309 | s->WriteUnsigned(count); |
4310 | for (intptr_t i = 0; i < count; i++) { |
4311 | ExternalTypedDataPtr data = objects_[i]; |
4312 | s->AssignRef(data); |
4313 | } |
4314 | } |
4315 | |
4316 | void WriteFill(Serializer* s) { |
4317 | const intptr_t count = objects_.length(); |
4318 | intptr_t element_size = ExternalTypedData::ElementSizeInBytes(cid_); |
4319 | for (intptr_t i = 0; i < count; i++) { |
4320 | ExternalTypedDataPtr data = objects_[i]; |
4321 | AutoTraceObject(data); |
4322 | const intptr_t length = Smi::Value(data->ptr()->length_); |
4323 | s->WriteUnsigned(length); |
4324 | uint8_t* cdata = reinterpret_cast<uint8_t*>(data->ptr()->data_); |
4325 | s->Align(ExternalTypedData::kDataSerializationAlignment); |
4326 | s->WriteBytes(cdata, length * element_size); |
4327 | } |
4328 | } |
4329 | |
4330 | private: |
4331 | const intptr_t cid_; |
4332 | GrowableArray<ExternalTypedDataPtr> objects_; |
4333 | }; |
4334 | #endif // !DART_PRECOMPILED_RUNTIME |
4335 | |
4336 | class ExternalTypedDataDeserializationCluster : public DeserializationCluster { |
4337 | public: |
4338 | explicit ExternalTypedDataDeserializationCluster(intptr_t cid) : cid_(cid) {} |
4339 | ~ExternalTypedDataDeserializationCluster() {} |
4340 | |
4341 | void ReadAlloc(Deserializer* d) { |
4342 | start_index_ = d->next_index(); |
4343 | PageSpace* old_space = d->heap()->old_space(); |
4344 | const intptr_t count = d->ReadUnsigned(); |
4345 | for (intptr_t i = 0; i < count; i++) { |
4346 | d->AssignRef( |
4347 | AllocateUninitialized(old_space, ExternalTypedData::InstanceSize())); |
4348 | } |
4349 | stop_index_ = d->next_index(); |
4350 | } |
4351 | |
4352 | void ReadFill(Deserializer* d) { |
4353 | intptr_t element_size = ExternalTypedData::ElementSizeInBytes(cid_); |
4354 | |
4355 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
4356 | ExternalTypedDataPtr data = static_cast<ExternalTypedDataPtr>(d->Ref(id)); |
4357 | const intptr_t length = d->ReadUnsigned(); |
4358 | Deserializer::InitializeHeader(data, cid_, |
4359 | ExternalTypedData::InstanceSize()); |
4360 | data->ptr()->length_ = Smi::New(length); |
4361 | d->Align(ExternalTypedData::kDataSerializationAlignment); |
4362 | data->ptr()->data_ = const_cast<uint8_t*>(d->CurrentBufferAddress()); |
4363 | d->Advance(length * element_size); |
4364 | // No finalizer / external size 0. |
4365 | } |
4366 | } |
4367 | |
4368 | private: |
4369 | const intptr_t cid_; |
4370 | }; |
4371 | |
4372 | #if !defined(DART_PRECOMPILED_RUNTIME) |
4373 | class StackTraceSerializationCluster : public SerializationCluster { |
4374 | public: |
4375 | StackTraceSerializationCluster() : SerializationCluster("StackTrace" ) {} |
4376 | ~StackTraceSerializationCluster() {} |
4377 | |
4378 | void Trace(Serializer* s, ObjectPtr object) { |
4379 | StackTracePtr trace = StackTrace::RawCast(object); |
4380 | objects_.Add(trace); |
4381 | PushFromTo(trace); |
4382 | } |
4383 | |
4384 | void WriteAlloc(Serializer* s) { |
4385 | s->WriteCid(kStackTraceCid); |
4386 | const intptr_t count = objects_.length(); |
4387 | s->WriteUnsigned(count); |
4388 | for (intptr_t i = 0; i < count; i++) { |
4389 | StackTracePtr trace = objects_[i]; |
4390 | s->AssignRef(trace); |
4391 | } |
4392 | } |
4393 | |
4394 | void WriteFill(Serializer* s) { |
4395 | const intptr_t count = objects_.length(); |
4396 | for (intptr_t i = 0; i < count; i++) { |
4397 | StackTracePtr trace = objects_[i]; |
4398 | AutoTraceObject(trace); |
4399 | WriteFromTo(trace); |
4400 | } |
4401 | } |
4402 | |
4403 | private: |
4404 | GrowableArray<StackTracePtr> objects_; |
4405 | }; |
4406 | #endif // !DART_PRECOMPILED_RUNTIME |
4407 | |
4408 | class StackTraceDeserializationCluster : public DeserializationCluster { |
4409 | public: |
4410 | StackTraceDeserializationCluster() {} |
4411 | ~StackTraceDeserializationCluster() {} |
4412 | |
4413 | void ReadAlloc(Deserializer* d) { |
4414 | start_index_ = d->next_index(); |
4415 | PageSpace* old_space = d->heap()->old_space(); |
4416 | const intptr_t count = d->ReadUnsigned(); |
4417 | for (intptr_t i = 0; i < count; i++) { |
4418 | d->AssignRef( |
4419 | AllocateUninitialized(old_space, StackTrace::InstanceSize())); |
4420 | } |
4421 | stop_index_ = d->next_index(); |
4422 | } |
4423 | |
4424 | void ReadFill(Deserializer* d) { |
4425 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
4426 | StackTracePtr trace = static_cast<StackTracePtr>(d->Ref(id)); |
4427 | Deserializer::InitializeHeader(trace, kStackTraceCid, |
4428 | StackTrace::InstanceSize()); |
4429 | ReadFromTo(trace); |
4430 | } |
4431 | } |
4432 | }; |
4433 | |
4434 | #if !defined(DART_PRECOMPILED_RUNTIME) |
4435 | class RegExpSerializationCluster : public SerializationCluster { |
4436 | public: |
4437 | RegExpSerializationCluster() : SerializationCluster("RegExp" ) {} |
4438 | ~RegExpSerializationCluster() {} |
4439 | |
4440 | void Trace(Serializer* s, ObjectPtr object) { |
4441 | RegExpPtr regexp = RegExp::RawCast(object); |
4442 | objects_.Add(regexp); |
4443 | PushFromTo(regexp); |
4444 | } |
4445 | |
4446 | void WriteAlloc(Serializer* s) { |
4447 | s->WriteCid(kRegExpCid); |
4448 | const intptr_t count = objects_.length(); |
4449 | s->WriteUnsigned(count); |
4450 | for (intptr_t i = 0; i < count; i++) { |
4451 | RegExpPtr regexp = objects_[i]; |
4452 | s->AssignRef(regexp); |
4453 | } |
4454 | } |
4455 | |
4456 | void WriteFill(Serializer* s) { |
4457 | const intptr_t count = objects_.length(); |
4458 | for (intptr_t i = 0; i < count; i++) { |
4459 | RegExpPtr regexp = objects_[i]; |
4460 | AutoTraceObject(regexp); |
4461 | WriteFromTo(regexp); |
4462 | s->Write<int32_t>(regexp->ptr()->num_one_byte_registers_); |
4463 | s->Write<int32_t>(regexp->ptr()->num_two_byte_registers_); |
4464 | s->Write<int8_t>(regexp->ptr()->type_flags_); |
4465 | } |
4466 | } |
4467 | |
4468 | private: |
4469 | GrowableArray<RegExpPtr> objects_; |
4470 | }; |
4471 | #endif // !DART_PRECOMPILED_RUNTIME |
4472 | |
4473 | class RegExpDeserializationCluster : public DeserializationCluster { |
4474 | public: |
4475 | RegExpDeserializationCluster() {} |
4476 | ~RegExpDeserializationCluster() {} |
4477 | |
4478 | void ReadAlloc(Deserializer* d) { |
4479 | start_index_ = d->next_index(); |
4480 | PageSpace* old_space = d->heap()->old_space(); |
4481 | const intptr_t count = d->ReadUnsigned(); |
4482 | for (intptr_t i = 0; i < count; i++) { |
4483 | d->AssignRef(AllocateUninitialized(old_space, RegExp::InstanceSize())); |
4484 | } |
4485 | stop_index_ = d->next_index(); |
4486 | } |
4487 | |
4488 | void ReadFill(Deserializer* d) { |
4489 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
4490 | RegExpPtr regexp = static_cast<RegExpPtr>(d->Ref(id)); |
4491 | Deserializer::InitializeHeader(regexp, kRegExpCid, |
4492 | RegExp::InstanceSize()); |
4493 | ReadFromTo(regexp); |
4494 | regexp->ptr()->num_one_byte_registers_ = d->Read<int32_t>(); |
4495 | regexp->ptr()->num_two_byte_registers_ = d->Read<int32_t>(); |
4496 | regexp->ptr()->type_flags_ = d->Read<int8_t>(); |
4497 | } |
4498 | } |
4499 | }; |
4500 | |
4501 | #if !defined(DART_PRECOMPILED_RUNTIME) |
4502 | class WeakPropertySerializationCluster : public SerializationCluster { |
4503 | public: |
4504 | WeakPropertySerializationCluster() : SerializationCluster("WeakProperty" ) {} |
4505 | ~WeakPropertySerializationCluster() {} |
4506 | |
4507 | void Trace(Serializer* s, ObjectPtr object) { |
4508 | WeakPropertyPtr property = WeakProperty::RawCast(object); |
4509 | objects_.Add(property); |
4510 | PushFromTo(property); |
4511 | } |
4512 | |
4513 | void WriteAlloc(Serializer* s) { |
4514 | s->WriteCid(kWeakPropertyCid); |
4515 | const intptr_t count = objects_.length(); |
4516 | s->WriteUnsigned(count); |
4517 | for (intptr_t i = 0; i < count; i++) { |
4518 | WeakPropertyPtr property = objects_[i]; |
4519 | s->AssignRef(property); |
4520 | } |
4521 | } |
4522 | |
4523 | void WriteFill(Serializer* s) { |
4524 | const intptr_t count = objects_.length(); |
4525 | for (intptr_t i = 0; i < count; i++) { |
4526 | WeakPropertyPtr property = objects_[i]; |
4527 | AutoTraceObject(property); |
4528 | WriteFromTo(property); |
4529 | } |
4530 | } |
4531 | |
4532 | private: |
4533 | GrowableArray<WeakPropertyPtr> objects_; |
4534 | }; |
4535 | #endif // !DART_PRECOMPILED_RUNTIME |
4536 | |
4537 | class WeakPropertyDeserializationCluster : public DeserializationCluster { |
4538 | public: |
4539 | WeakPropertyDeserializationCluster() {} |
4540 | ~WeakPropertyDeserializationCluster() {} |
4541 | |
4542 | void ReadAlloc(Deserializer* d) { |
4543 | start_index_ = d->next_index(); |
4544 | PageSpace* old_space = d->heap()->old_space(); |
4545 | const intptr_t count = d->ReadUnsigned(); |
4546 | for (intptr_t i = 0; i < count; i++) { |
4547 | d->AssignRef( |
4548 | AllocateUninitialized(old_space, WeakProperty::InstanceSize())); |
4549 | } |
4550 | stop_index_ = d->next_index(); |
4551 | } |
4552 | |
4553 | void ReadFill(Deserializer* d) { |
4554 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
4555 | WeakPropertyPtr property = static_cast<WeakPropertyPtr>(d->Ref(id)); |
4556 | Deserializer::InitializeHeader(property, kWeakPropertyCid, |
4557 | WeakProperty::InstanceSize()); |
4558 | ReadFromTo(property); |
4559 | } |
4560 | } |
4561 | }; |
4562 | |
4563 | #if !defined(DART_PRECOMPILED_RUNTIME) |
4564 | class LinkedHashMapSerializationCluster : public SerializationCluster { |
4565 | public: |
4566 | LinkedHashMapSerializationCluster() : SerializationCluster("LinkedHashMap" ) {} |
4567 | ~LinkedHashMapSerializationCluster() {} |
4568 | |
4569 | void Trace(Serializer* s, ObjectPtr object) { |
4570 | LinkedHashMapPtr map = LinkedHashMap::RawCast(object); |
4571 | objects_.Add(map); |
4572 | |
4573 | s->Push(map->ptr()->type_arguments_); |
4574 | |
4575 | intptr_t used_data = Smi::Value(map->ptr()->used_data_); |
4576 | ArrayPtr data_array = map->ptr()->data_; |
4577 | ObjectPtr* data_elements = data_array->ptr()->data(); |
4578 | for (intptr_t i = 0; i < used_data; i += 2) { |
4579 | ObjectPtr key = data_elements[i]; |
4580 | if (key != data_array) { |
4581 | ObjectPtr value = data_elements[i + 1]; |
4582 | s->Push(key); |
4583 | s->Push(value); |
4584 | } |
4585 | } |
4586 | } |
4587 | |
4588 | void WriteAlloc(Serializer* s) { |
4589 | s->WriteCid(kLinkedHashMapCid); |
4590 | const intptr_t count = objects_.length(); |
4591 | s->WriteUnsigned(count); |
4592 | for (intptr_t i = 0; i < count; i++) { |
4593 | LinkedHashMapPtr map = objects_[i]; |
4594 | s->AssignRef(map); |
4595 | } |
4596 | } |
4597 | |
4598 | void WriteFill(Serializer* s) { |
4599 | const intptr_t count = objects_.length(); |
4600 | for (intptr_t i = 0; i < count; i++) { |
4601 | LinkedHashMapPtr map = objects_[i]; |
4602 | AutoTraceObject(map); |
4603 | s->Write<bool>(map->ptr()->IsCanonical()); |
4604 | |
4605 | WriteField(map, type_arguments_); |
4606 | |
4607 | const intptr_t used_data = Smi::Value(map->ptr()->used_data_); |
4608 | ASSERT((used_data & 1) == 0); // Keys + values, so must be even. |
4609 | const intptr_t deleted_keys = Smi::Value(map->ptr()->deleted_keys_); |
4610 | |
4611 | // Write out the number of (not deleted) key/value pairs that will follow. |
4612 | s->Write<int32_t>((used_data >> 1) - deleted_keys); |
4613 | |
4614 | ArrayPtr data_array = map->ptr()->data_; |
4615 | ObjectPtr* data_elements = data_array->ptr()->data(); |
4616 | for (intptr_t i = 0; i < used_data; i += 2) { |
4617 | ObjectPtr key = data_elements[i]; |
4618 | if (key != data_array) { |
4619 | ObjectPtr value = data_elements[i + 1]; |
4620 | s->WriteElementRef(key, i); |
4621 | s->WriteElementRef(value, i + 1); |
4622 | } |
4623 | } |
4624 | } |
4625 | } |
4626 | |
4627 | private: |
4628 | GrowableArray<LinkedHashMapPtr> objects_; |
4629 | }; |
4630 | #endif // !DART_PRECOMPILED_RUNTIME |
4631 | |
4632 | class LinkedHashMapDeserializationCluster : public DeserializationCluster { |
4633 | public: |
4634 | LinkedHashMapDeserializationCluster() {} |
4635 | ~LinkedHashMapDeserializationCluster() {} |
4636 | |
4637 | void ReadAlloc(Deserializer* d) { |
4638 | start_index_ = d->next_index(); |
4639 | PageSpace* old_space = d->heap()->old_space(); |
4640 | const intptr_t count = d->ReadUnsigned(); |
4641 | for (intptr_t i = 0; i < count; i++) { |
4642 | d->AssignRef( |
4643 | AllocateUninitialized(old_space, LinkedHashMap::InstanceSize())); |
4644 | } |
4645 | stop_index_ = d->next_index(); |
4646 | } |
4647 | |
4648 | void ReadFill(Deserializer* d) { |
4649 | PageSpace* old_space = d->heap()->old_space(); |
4650 | |
4651 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
4652 | LinkedHashMapPtr map = static_cast<LinkedHashMapPtr>(d->Ref(id)); |
4653 | bool is_canonical = d->Read<bool>(); |
4654 | Deserializer::InitializeHeader( |
4655 | map, kLinkedHashMapCid, LinkedHashMap::InstanceSize(), is_canonical); |
4656 | |
4657 | map->ptr()->type_arguments_ = static_cast<TypeArgumentsPtr>(d->ReadRef()); |
4658 | |
4659 | // TODO(rmacnak): Reserve ref ids and co-allocate in ReadAlloc. |
4660 | intptr_t pairs = d->Read<int32_t>(); |
4661 | intptr_t used_data = pairs << 1; |
4662 | intptr_t data_size = Utils::Maximum( |
4663 | Utils::RoundUpToPowerOfTwo(used_data), |
4664 | static_cast<uintptr_t>(LinkedHashMap::kInitialIndexSize)); |
4665 | |
4666 | ArrayPtr data = static_cast<ArrayPtr>( |
4667 | AllocateUninitialized(old_space, Array::InstanceSize(data_size))); |
4668 | data->ptr()->type_arguments_ = TypeArguments::null(); |
4669 | data->ptr()->length_ = Smi::New(data_size); |
4670 | intptr_t i; |
4671 | for (i = 0; i < used_data; i++) { |
4672 | data->ptr()->data()[i] = d->ReadRef(); |
4673 | } |
4674 | for (; i < data_size; i++) { |
4675 | data->ptr()->data()[i] = Object::null(); |
4676 | } |
4677 | |
4678 | map->ptr()->index_ = TypedData::null(); |
4679 | map->ptr()->hash_mask_ = Smi::New(0); |
4680 | map->ptr()->data_ = data; |
4681 | map->ptr()->used_data_ = Smi::New(used_data); |
4682 | map->ptr()->deleted_keys_ = Smi::New(0); |
4683 | } |
4684 | } |
4685 | }; |
4686 | |
4687 | #if !defined(DART_PRECOMPILED_RUNTIME) |
4688 | class ArraySerializationCluster : public SerializationCluster { |
4689 | public: |
4690 | explicit ArraySerializationCluster(intptr_t cid) |
4691 | : SerializationCluster("Array" ), cid_(cid) {} |
4692 | ~ArraySerializationCluster() {} |
4693 | |
4694 | void Trace(Serializer* s, ObjectPtr object) { |
4695 | ArrayPtr array = Array::RawCast(object); |
4696 | objects_.Add(array); |
4697 | |
4698 | s->Push(array->ptr()->type_arguments_); |
4699 | const intptr_t length = Smi::Value(array->ptr()->length_); |
4700 | for (intptr_t i = 0; i < length; i++) { |
4701 | s->Push(array->ptr()->data()[i]); |
4702 | } |
4703 | } |
4704 | |
4705 | void WriteAlloc(Serializer* s) { |
4706 | s->WriteCid(cid_); |
4707 | const intptr_t count = objects_.length(); |
4708 | s->WriteUnsigned(count); |
4709 | for (intptr_t i = 0; i < count; i++) { |
4710 | ArrayPtr array = objects_[i]; |
4711 | s->AssignRef(array); |
4712 | AutoTraceObject(array); |
4713 | const intptr_t length = Smi::Value(array->ptr()->length_); |
4714 | s->WriteUnsigned(length); |
4715 | } |
4716 | } |
4717 | |
4718 | void WriteFill(Serializer* s) { |
4719 | const intptr_t count = objects_.length(); |
4720 | for (intptr_t i = 0; i < count; i++) { |
4721 | ArrayPtr array = objects_[i]; |
4722 | AutoTraceObject(array); |
4723 | const intptr_t length = Smi::Value(array->ptr()->length_); |
4724 | s->WriteUnsigned(length); |
4725 | s->Write<bool>(array->ptr()->IsCanonical()); |
4726 | WriteField(array, type_arguments_); |
4727 | for (intptr_t j = 0; j < length; j++) { |
4728 | s->WriteElementRef(array->ptr()->data()[j], j); |
4729 | } |
4730 | } |
4731 | } |
4732 | |
4733 | private: |
4734 | intptr_t cid_; |
4735 | GrowableArray<ArrayPtr> objects_; |
4736 | }; |
4737 | #endif // !DART_PRECOMPILED_RUNTIME |
4738 | |
4739 | class ArrayDeserializationCluster : public DeserializationCluster { |
4740 | public: |
4741 | explicit ArrayDeserializationCluster(intptr_t cid) : cid_(cid) {} |
4742 | ~ArrayDeserializationCluster() {} |
4743 | |
4744 | void ReadAlloc(Deserializer* d) { |
4745 | start_index_ = d->next_index(); |
4746 | PageSpace* old_space = d->heap()->old_space(); |
4747 | const intptr_t count = d->ReadUnsigned(); |
4748 | for (intptr_t i = 0; i < count; i++) { |
4749 | const intptr_t length = d->ReadUnsigned(); |
4750 | d->AssignRef( |
4751 | AllocateUninitialized(old_space, Array::InstanceSize(length))); |
4752 | } |
4753 | stop_index_ = d->next_index(); |
4754 | } |
4755 | |
4756 | void ReadFill(Deserializer* d) { |
4757 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
4758 | ArrayPtr array = static_cast<ArrayPtr>(d->Ref(id)); |
4759 | const intptr_t length = d->ReadUnsigned(); |
4760 | bool is_canonical = d->Read<bool>(); |
4761 | Deserializer::InitializeHeader(array, cid_, Array::InstanceSize(length), |
4762 | is_canonical); |
4763 | array->ptr()->type_arguments_ = |
4764 | static_cast<TypeArgumentsPtr>(d->ReadRef()); |
4765 | array->ptr()->length_ = Smi::New(length); |
4766 | for (intptr_t j = 0; j < length; j++) { |
4767 | array->ptr()->data()[j] = d->ReadRef(); |
4768 | } |
4769 | } |
4770 | } |
4771 | |
4772 | private: |
4773 | const intptr_t cid_; |
4774 | }; |
4775 | |
4776 | #if !defined(DART_PRECOMPILED_RUNTIME) |
4777 | class OneByteStringSerializationCluster : public SerializationCluster { |
4778 | public: |
4779 | OneByteStringSerializationCluster() : SerializationCluster("OneByteString" ) {} |
4780 | ~OneByteStringSerializationCluster() {} |
4781 | |
4782 | void Trace(Serializer* s, ObjectPtr object) { |
4783 | OneByteStringPtr str = static_cast<OneByteStringPtr>(object); |
4784 | objects_.Add(str); |
4785 | } |
4786 | |
4787 | void WriteAlloc(Serializer* s) { |
4788 | s->WriteCid(kOneByteStringCid); |
4789 | const intptr_t count = objects_.length(); |
4790 | s->WriteUnsigned(count); |
4791 | for (intptr_t i = 0; i < count; i++) { |
4792 | OneByteStringPtr str = objects_[i]; |
4793 | s->AssignRef(str); |
4794 | AutoTraceObject(str); |
4795 | const intptr_t length = Smi::Value(str->ptr()->length_); |
4796 | s->WriteUnsigned(length); |
4797 | } |
4798 | } |
4799 | |
4800 | void WriteFill(Serializer* s) { |
4801 | const intptr_t count = objects_.length(); |
4802 | for (intptr_t i = 0; i < count; i++) { |
4803 | OneByteStringPtr str = objects_[i]; |
4804 | AutoTraceObject(str); |
4805 | const intptr_t length = Smi::Value(str->ptr()->length_); |
4806 | s->WriteUnsigned(length); |
4807 | s->Write<bool>(str->ptr()->IsCanonical()); |
4808 | intptr_t hash = String::GetCachedHash(str); |
4809 | s->Write<int32_t>(hash); |
4810 | s->WriteBytes(str->ptr()->data(), length); |
4811 | } |
4812 | } |
4813 | |
4814 | private: |
4815 | GrowableArray<OneByteStringPtr> objects_; |
4816 | }; |
4817 | #endif // !DART_PRECOMPILED_RUNTIME |
4818 | |
4819 | class OneByteStringDeserializationCluster : public DeserializationCluster { |
4820 | public: |
4821 | OneByteStringDeserializationCluster() {} |
4822 | ~OneByteStringDeserializationCluster() {} |
4823 | |
4824 | void ReadAlloc(Deserializer* d) { |
4825 | start_index_ = d->next_index(); |
4826 | PageSpace* old_space = d->heap()->old_space(); |
4827 | const intptr_t count = d->ReadUnsigned(); |
4828 | for (intptr_t i = 0; i < count; i++) { |
4829 | const intptr_t length = d->ReadUnsigned(); |
4830 | d->AssignRef(AllocateUninitialized(old_space, |
4831 | OneByteString::InstanceSize(length))); |
4832 | } |
4833 | stop_index_ = d->next_index(); |
4834 | } |
4835 | |
4836 | void ReadFill(Deserializer* d) { |
4837 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
4838 | OneByteStringPtr str = static_cast<OneByteStringPtr>(d->Ref(id)); |
4839 | const intptr_t length = d->ReadUnsigned(); |
4840 | bool is_canonical = d->Read<bool>(); |
4841 | Deserializer::InitializeHeader(str, kOneByteStringCid, |
4842 | OneByteString::InstanceSize(length), |
4843 | is_canonical); |
4844 | str->ptr()->length_ = Smi::New(length); |
4845 | String::SetCachedHash(str, d->Read<int32_t>()); |
4846 | for (intptr_t j = 0; j < length; j++) { |
4847 | str->ptr()->data()[j] = d->Read<uint8_t>(); |
4848 | } |
4849 | } |
4850 | } |
4851 | }; |
4852 | |
4853 | #if !defined(DART_PRECOMPILED_RUNTIME) |
4854 | class TwoByteStringSerializationCluster : public SerializationCluster { |
4855 | public: |
4856 | TwoByteStringSerializationCluster() : SerializationCluster("TwoByteString" ) {} |
4857 | ~TwoByteStringSerializationCluster() {} |
4858 | |
4859 | void Trace(Serializer* s, ObjectPtr object) { |
4860 | TwoByteStringPtr str = static_cast<TwoByteStringPtr>(object); |
4861 | objects_.Add(str); |
4862 | } |
4863 | |
4864 | void WriteAlloc(Serializer* s) { |
4865 | s->WriteCid(kTwoByteStringCid); |
4866 | const intptr_t count = objects_.length(); |
4867 | s->WriteUnsigned(count); |
4868 | for (intptr_t i = 0; i < count; i++) { |
4869 | TwoByteStringPtr str = objects_[i]; |
4870 | s->AssignRef(str); |
4871 | AutoTraceObject(str); |
4872 | const intptr_t length = Smi::Value(str->ptr()->length_); |
4873 | s->WriteUnsigned(length); |
4874 | } |
4875 | } |
4876 | |
4877 | void WriteFill(Serializer* s) { |
4878 | const intptr_t count = objects_.length(); |
4879 | for (intptr_t i = 0; i < count; i++) { |
4880 | TwoByteStringPtr str = objects_[i]; |
4881 | AutoTraceObject(str); |
4882 | const intptr_t length = Smi::Value(str->ptr()->length_); |
4883 | s->WriteUnsigned(length); |
4884 | s->Write<bool>(str->ptr()->IsCanonical()); |
4885 | intptr_t hash = String::GetCachedHash(str); |
4886 | s->Write<int32_t>(hash); |
4887 | s->WriteBytes(reinterpret_cast<uint8_t*>(str->ptr()->data()), length * 2); |
4888 | } |
4889 | } |
4890 | |
4891 | private: |
4892 | GrowableArray<TwoByteStringPtr> objects_; |
4893 | }; |
4894 | #endif // !DART_PRECOMPILED_RUNTIME |
4895 | |
4896 | class TwoByteStringDeserializationCluster : public DeserializationCluster { |
4897 | public: |
4898 | TwoByteStringDeserializationCluster() {} |
4899 | ~TwoByteStringDeserializationCluster() {} |
4900 | |
4901 | void ReadAlloc(Deserializer* d) { |
4902 | start_index_ = d->next_index(); |
4903 | PageSpace* old_space = d->heap()->old_space(); |
4904 | const intptr_t count = d->ReadUnsigned(); |
4905 | for (intptr_t i = 0; i < count; i++) { |
4906 | const intptr_t length = d->ReadUnsigned(); |
4907 | d->AssignRef(AllocateUninitialized(old_space, |
4908 | TwoByteString::InstanceSize(length))); |
4909 | } |
4910 | stop_index_ = d->next_index(); |
4911 | } |
4912 | |
4913 | void ReadFill(Deserializer* d) { |
4914 | for (intptr_t id = start_index_; id < stop_index_; id++) { |
4915 | TwoByteStringPtr str = static_cast<TwoByteStringPtr>(d->Ref(id)); |
4916 | const intptr_t length = d->ReadUnsigned(); |
4917 | bool is_canonical = d->Read<bool>(); |
4918 | Deserializer::InitializeHeader(str, kTwoByteStringCid, |
4919 | TwoByteString::InstanceSize(length), |
4920 | is_canonical); |
4921 | str->ptr()->length_ = Smi::New(length); |
4922 | String::SetCachedHash(str, d->Read<int32_t>()); |
4923 | uint8_t* cdata = reinterpret_cast<uint8_t*>(str->ptr()->data()); |
4924 | d->ReadBytes(cdata, length * 2); |
4925 | } |
4926 | } |
4927 | }; |
4928 | |
4929 | #if !defined(DART_PRECOMPILED_RUNTIME) |
4930 | class FakeSerializationCluster : public SerializationCluster { |
4931 | public: |
4932 | FakeSerializationCluster(const char* name, |
4933 | intptr_t num_objects, |
4934 | intptr_t size) |
4935 | : SerializationCluster(name) { |
4936 | num_objects_ = num_objects; |
4937 | size_ = size; |
4938 | } |
4939 | ~FakeSerializationCluster() {} |
4940 | |
4941 | void Trace(Serializer* s, ObjectPtr object) { UNREACHABLE(); } |
4942 | void WriteAlloc(Serializer* s) { UNREACHABLE(); } |
4943 | void WriteFill(Serializer* s) { UNREACHABLE(); } |
4944 | }; |
4945 | #endif // !DART_PRECOMPILED_RUNTIME |
4946 | |
4947 | #if defined(DEBUG) |
4948 | static const int32_t kSectionMarker = 0xABAB; |
4949 | #endif |
4950 | |
4951 | Serializer::Serializer(Thread* thread, |
4952 | Snapshot::Kind kind, |
4953 | uint8_t** buffer, |
4954 | ReAlloc alloc, |
4955 | intptr_t initial_size, |
4956 | ImageWriter* image_writer, |
4957 | bool vm, |
4958 | V8SnapshotProfileWriter* profile_writer) |
4959 | : ThreadStackResource(thread), |
4960 | heap_(thread->isolate()->heap()), |
4961 | zone_(thread->zone()), |
4962 | kind_(kind), |
4963 | stream_(buffer, alloc, initial_size), |
4964 | image_writer_(image_writer), |
4965 | clusters_by_cid_(NULL), |
4966 | stack_(), |
4967 | num_cids_(0), |
4968 | num_tlc_cids_(0), |
4969 | num_base_objects_(0), |
4970 | num_written_objects_(0), |
4971 | next_ref_index_(1), |
4972 | previous_text_offset_(0), |
4973 | field_table_(thread->isolate()->field_table()), |
4974 | vm_(vm), |
4975 | profile_writer_(profile_writer) |
4976 | #if defined(SNAPSHOT_BACKTRACE) |
4977 | , |
4978 | current_parent_(Object::null()), |
4979 | parent_pairs_() |
4980 | #endif |
4981 | #if defined(DART_PRECOMPILER) |
4982 | , |
4983 | deduped_instructions_sources_(zone_) |
4984 | #endif |
4985 | { |
4986 | num_cids_ = thread->isolate()->class_table()->NumCids(); |
4987 | num_tlc_cids_ = thread->isolate()->class_table()->NumTopLevelCids(); |
4988 | clusters_by_cid_ = new SerializationCluster*[num_cids_]; |
4989 | for (intptr_t i = 0; i < num_cids_; i++) { |
4990 | clusters_by_cid_[i] = NULL; |
4991 | } |
4992 | if (profile_writer_ != nullptr) { |
4993 | offsets_table_ = new (zone_) OffsetsTable(zone_); |
4994 | } |
4995 | } |
4996 | |
4997 | Serializer::~Serializer() { |
4998 | delete[] clusters_by_cid_; |
4999 | } |
5000 | |
5001 | void Serializer::FlushBytesWrittenToRoot() { |
5002 | #if defined(DART_PRECOMPILER) |
5003 | if (profile_writer_ != nullptr) { |
5004 | ASSERT(object_currently_writing_.id_ == 0); |
5005 | // All bytes between objects are attributed into root node. |
5006 | profile_writer_->AttributeBytesTo( |
5007 | V8SnapshotProfileWriter::ArtificialRootId(), |
5008 | stream_.Position() - object_currently_writing_.stream_start_); |
5009 | object_currently_writing_.stream_start_ = stream_.Position(); |
5010 | } |
5011 | #endif |
5012 | } |
5013 | |
5014 | void Serializer::TraceStartWritingObject(const char* type, |
5015 | ObjectPtr obj, |
5016 | StringPtr name) { |
5017 | if (profile_writer_ == nullptr) return; |
5018 | |
5019 | const char* name_str = nullptr; |
5020 | if (name != nullptr) { |
5021 | REUSABLE_STRING_HANDLESCOPE(thread()); |
5022 | String& str = reused_string_handle.Handle(); |
5023 | str = name; |
5024 | name_str = str.ToCString(); |
5025 | } |
5026 | |
5027 | TraceStartWritingObject(type, obj, name_str); |
5028 | } |
5029 | |
5030 | void Serializer::TraceStartWritingObject(const char* type, |
5031 | ObjectPtr obj, |
5032 | const char* name) { |
5033 | if (profile_writer_ == nullptr) return; |
5034 | |
5035 | intptr_t cid = -1; |
5036 | intptr_t id = 0; |
5037 | if (obj->IsHeapObject()) { |
5038 | id = heap_->GetObjectId(obj); |
5039 | cid = obj->GetClassId(); |
5040 | } else { |
5041 | id = smi_ids_.Lookup(Smi::RawCast(obj))->id_; |
5042 | cid = Smi::kClassId; |
5043 | } |
5044 | if (IsArtificialReference(id)) { |
5045 | id = -id; |
5046 | } |
5047 | ASSERT(IsAllocatedReference(id)); |
5048 | |
5049 | FlushBytesWrittenToRoot(); |
5050 | object_currently_writing_.object_ = obj; |
5051 | object_currently_writing_.id_ = id; |
5052 | object_currently_writing_.stream_start_ = stream_.Position(); |
5053 | object_currently_writing_.cid_ = cid; |
5054 | profile_writer_->SetObjectTypeAndName( |
5055 | {V8SnapshotProfileWriter::kSnapshot, id}, type, name); |
5056 | } |
5057 | |
5058 | void Serializer::TraceEndWritingObject() { |
5059 | if (profile_writer_ != nullptr) { |
5060 | ASSERT(IsAllocatedReference(object_currently_writing_.id_)); |
5061 | profile_writer_->AttributeBytesTo( |
5062 | {V8SnapshotProfileWriter::kSnapshot, object_currently_writing_.id_}, |
5063 | stream_.Position() - object_currently_writing_.stream_start_); |
5064 | object_currently_writing_ = ProfilingObject(); |
5065 | object_currently_writing_.stream_start_ = stream_.Position(); |
5066 | } |
5067 | } |
5068 | |
5069 | #if !defined(DART_PRECOMPILED_RUNTIME) |
5070 | bool Serializer::CreateArtificalNodeIfNeeded(ObjectPtr obj) { |
5071 | ASSERT(profile_writer() != nullptr); |
5072 | |
5073 | intptr_t id = heap_->GetObjectId(obj); |
5074 | if (Serializer::IsAllocatedReference(id)) { |
5075 | return false; |
5076 | } |
5077 | if (Serializer::IsArtificialReference(id)) { |
5078 | return true; |
5079 | } |
5080 | ASSERT(id == Serializer::kUnreachableReference); |
5081 | id = AssignArtificialRef(obj); |
5082 | |
5083 | const char* type = nullptr; |
5084 | StringPtr name_string = nullptr; |
5085 | const char* name = nullptr; |
5086 | ObjectPtr owner = nullptr; |
5087 | const char* owner_ref_name = nullptr; |
5088 | switch (obj->GetClassId()) { |
5089 | case kFunctionCid: { |
5090 | FunctionPtr func = static_cast<FunctionPtr>(obj); |
5091 | type = "Function" ; |
5092 | name = FunctionSerializationCluster::MakeDisambiguatedFunctionName(this, |
5093 | func); |
5094 | owner_ref_name = "owner_" ; |
5095 | owner = func->ptr()->owner_; |
5096 | break; |
5097 | } |
5098 | case kClassCid: { |
5099 | ClassPtr cls = static_cast<ClassPtr>(obj); |
5100 | type = "Class" ; |
5101 | name_string = cls->ptr()->name_; |
5102 | owner_ref_name = "library_" ; |
5103 | owner = cls->ptr()->library_; |
5104 | break; |
5105 | } |
5106 | case kPatchClassCid: { |
5107 | PatchClassPtr patch_cls = static_cast<PatchClassPtr>(obj); |
5108 | type = "PatchClass" ; |
5109 | owner_ref_name = "patched_class_" ; |
5110 | owner = patch_cls->ptr()->patched_class_; |
5111 | break; |
5112 | } |
5113 | case kLibraryCid: { |
5114 | LibraryPtr lib = static_cast<LibraryPtr>(obj); |
5115 | type = "Library" ; |
5116 | name_string = lib->ptr()->url_; |
5117 | break; |
5118 | } |
5119 | default: |
5120 | UNREACHABLE(); |
5121 | } |
5122 | |
5123 | if (name_string != nullptr) { |
5124 | REUSABLE_STRING_HANDLESCOPE(thread()); |
5125 | String& str = reused_string_handle.Handle(); |
5126 | str = name_string; |
5127 | name = str.ToCString(); |
5128 | } |
5129 | |
5130 | TraceStartWritingObject(type, obj, name); |
5131 | if (owner != nullptr) { |
5132 | CreateArtificalNodeIfNeeded(owner); |
5133 | AttributePropertyRef(owner, owner_ref_name, |
5134 | /*permit_artificial_ref=*/true); |
5135 | } |
5136 | TraceEndWritingObject(); |
5137 | return true; |
5138 | } |
5139 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
5140 | |
5141 | const char* Serializer::ReadOnlyObjectType(intptr_t cid) { |
5142 | switch (cid) { |
5143 | case kPcDescriptorsCid: |
5144 | return "PcDescriptors" ; |
5145 | case kCodeSourceMapCid: |
5146 | return "CodeSourceMap" ; |
5147 | case kCompressedStackMapsCid: |
5148 | return "CompressedStackMaps" ; |
5149 | case kOneByteStringCid: |
5150 | return "OneByteString" ; |
5151 | case kTwoByteStringCid: |
5152 | return "TwoByteString" ; |
5153 | default: |
5154 | return nullptr; |
5155 | } |
5156 | } |
5157 | |
5158 | SerializationCluster* Serializer::NewClusterForClass(intptr_t cid) { |
5159 | #if defined(DART_PRECOMPILED_RUNTIME) |
5160 | UNREACHABLE(); |
5161 | return NULL; |
5162 | #else |
5163 | Zone* Z = zone_; |
5164 | if (cid >= kNumPredefinedCids || cid == kInstanceCid) { |
5165 | Push(isolate()->class_table()->At(cid)); |
5166 | return new (Z) InstanceSerializationCluster(cid); |
5167 | } |
5168 | if (IsTypedDataViewClassId(cid)) { |
5169 | return new (Z) TypedDataViewSerializationCluster(cid); |
5170 | } |
5171 | if (IsExternalTypedDataClassId(cid)) { |
5172 | return new (Z) ExternalTypedDataSerializationCluster(cid); |
5173 | } |
5174 | if (IsTypedDataClassId(cid)) { |
5175 | return new (Z) TypedDataSerializationCluster(cid); |
5176 | } |
5177 | |
5178 | if (Snapshot::IncludesCode(kind_)) { |
5179 | if (auto const type = ReadOnlyObjectType(cid)) { |
5180 | return new (Z) RODataSerializationCluster(Z, type, cid); |
5181 | } |
5182 | } |
5183 | |
5184 | switch (cid) { |
5185 | case kClassCid: |
5186 | return new (Z) ClassSerializationCluster(num_cids_ + num_tlc_cids_); |
5187 | case kTypeArgumentsCid: |
5188 | return new (Z) TypeArgumentsSerializationCluster(); |
5189 | case kPatchClassCid: |
5190 | return new (Z) PatchClassSerializationCluster(); |
5191 | case kFunctionCid: |
5192 | return new (Z) FunctionSerializationCluster(); |
5193 | case kClosureDataCid: |
5194 | return new (Z) ClosureDataSerializationCluster(); |
5195 | case kSignatureDataCid: |
5196 | return new (Z) SignatureDataSerializationCluster(); |
5197 | case kRedirectionDataCid: |
5198 | return new (Z) RedirectionDataSerializationCluster(); |
5199 | case kFfiTrampolineDataCid: |
5200 | return new (Z) FfiTrampolineDataSerializationCluster(); |
5201 | case kFieldCid: |
5202 | return new (Z) FieldSerializationCluster(); |
5203 | case kScriptCid: |
5204 | return new (Z) ScriptSerializationCluster(); |
5205 | case kLibraryCid: |
5206 | return new (Z) LibrarySerializationCluster(); |
5207 | case kNamespaceCid: |
5208 | return new (Z) NamespaceSerializationCluster(); |
5209 | case kKernelProgramInfoCid: |
5210 | return new (Z) KernelProgramInfoSerializationCluster(); |
5211 | case kCodeCid: |
5212 | return new (Z) CodeSerializationCluster(heap_); |
5213 | case kBytecodeCid: |
5214 | return new (Z) BytecodeSerializationCluster(); |
5215 | case kObjectPoolCid: |
5216 | return new (Z) ObjectPoolSerializationCluster(); |
5217 | case kPcDescriptorsCid: |
5218 | return new (Z) PcDescriptorsSerializationCluster(); |
5219 | case kExceptionHandlersCid: |
5220 | return new (Z) ExceptionHandlersSerializationCluster(); |
5221 | case kContextCid: |
5222 | return new (Z) ContextSerializationCluster(); |
5223 | case kContextScopeCid: |
5224 | return new (Z) ContextScopeSerializationCluster(); |
5225 | case kParameterTypeCheckCid: |
5226 | return new (Z) ParameterTypeCheckSerializationCluster(); |
5227 | case kUnlinkedCallCid: |
5228 | return new (Z) UnlinkedCallSerializationCluster(); |
5229 | case kICDataCid: |
5230 | return new (Z) ICDataSerializationCluster(); |
5231 | case kMegamorphicCacheCid: |
5232 | return new (Z) MegamorphicCacheSerializationCluster(); |
5233 | case kSubtypeTestCacheCid: |
5234 | return new (Z) SubtypeTestCacheSerializationCluster(); |
5235 | case kLoadingUnitCid: |
5236 | return new (Z) LoadingUnitSerializationCluster(); |
5237 | case kLanguageErrorCid: |
5238 | return new (Z) LanguageErrorSerializationCluster(); |
5239 | case kUnhandledExceptionCid: |
5240 | return new (Z) UnhandledExceptionSerializationCluster(); |
5241 | case kLibraryPrefixCid: |
5242 | return new (Z) LibraryPrefixSerializationCluster(); |
5243 | case kTypeCid: |
5244 | return new (Z) TypeSerializationCluster(); |
5245 | case kTypeRefCid: |
5246 | return new (Z) TypeRefSerializationCluster(); |
5247 | case kTypeParameterCid: |
5248 | return new (Z) TypeParameterSerializationCluster(); |
5249 | case kClosureCid: |
5250 | return new (Z) ClosureSerializationCluster(); |
5251 | case kMintCid: |
5252 | return new (Z) MintSerializationCluster(); |
5253 | case kDoubleCid: |
5254 | return new (Z) DoubleSerializationCluster(); |
5255 | case kGrowableObjectArrayCid: |
5256 | return new (Z) GrowableObjectArraySerializationCluster(); |
5257 | case kStackTraceCid: |
5258 | return new (Z) StackTraceSerializationCluster(); |
5259 | case kRegExpCid: |
5260 | return new (Z) RegExpSerializationCluster(); |
5261 | case kWeakPropertyCid: |
5262 | return new (Z) WeakPropertySerializationCluster(); |
5263 | case kLinkedHashMapCid: |
5264 | return new (Z) LinkedHashMapSerializationCluster(); |
5265 | case kArrayCid: |
5266 | return new (Z) ArraySerializationCluster(kArrayCid); |
5267 | case kImmutableArrayCid: |
5268 | return new (Z) ArraySerializationCluster(kImmutableArrayCid); |
5269 | case kOneByteStringCid: |
5270 | return new (Z) OneByteStringSerializationCluster(); |
5271 | case kTwoByteStringCid: |
5272 | return new (Z) TwoByteStringSerializationCluster(); |
5273 | case kWeakSerializationReferenceCid: |
5274 | #if defined(DART_PRECOMPILER) |
5275 | ASSERT(kind_ == Snapshot::kFullAOT); |
5276 | return new (Z) |
5277 | WeakSerializationReferenceSerializationCluster(zone_, heap_); |
5278 | #endif |
5279 | default: |
5280 | break; |
5281 | } |
5282 | |
5283 | // The caller will check for NULL and provide an error with more context than |
5284 | // is available here. |
5285 | return NULL; |
5286 | #endif // !DART_PRECOMPILED_RUNTIME |
5287 | } |
5288 | |
5289 | bool Serializer::InCurrentLoadingUnit(ObjectPtr obj, bool record) { |
5290 | if (loading_units_ == nullptr) return true; |
5291 | |
5292 | intptr_t unit_id = heap_->GetLoadingUnit(obj); |
5293 | if (unit_id == WeakTable::kNoValue) { |
5294 | // Not found in early assignment. Conservatively choose the root. |
5295 | // TODO(41974): Are these always type testing stubs? |
5296 | unit_id = LoadingUnit::kRootId; |
5297 | } |
5298 | if (unit_id != current_loading_unit_id_) { |
5299 | if (record) { |
5300 | (*loading_units_)[unit_id]->AddDeferredObject(static_cast<CodePtr>(obj)); |
5301 | } |
5302 | return false; |
5303 | } |
5304 | return true; |
5305 | } |
5306 | |
5307 | #if !defined(DART_PRECOMPILED_RUNTIME) |
5308 | void Serializer::PrepareInstructions(GrowableArray<CodePtr>* code_objects) { |
5309 | #if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32) |
5310 | if ((kind() == Snapshot::kFullAOT) && FLAG_use_bare_instructions) { |
5311 | GrowableArray<ImageWriterCommand> writer_commands; |
5312 | RelocateCodeObjects(vm_, code_objects, &writer_commands); |
5313 | image_writer_->PrepareForSerialization(&writer_commands); |
5314 | } |
5315 | #endif // defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32) |
5316 | } |
5317 | |
5318 | void Serializer::WriteInstructions(InstructionsPtr instr, |
5319 | uint32_t unchecked_offset, |
5320 | CodePtr code, |
5321 | bool deferred) { |
5322 | ASSERT(code != Code::null()); |
5323 | |
5324 | ASSERT(InCurrentLoadingUnit(code) != deferred); |
5325 | if (deferred) { |
5326 | return; |
5327 | } |
5328 | |
5329 | const intptr_t offset = image_writer_->GetTextOffsetFor(instr, code); |
5330 | #if defined(DART_PRECOMPILER) |
5331 | if (profile_writer_ != nullptr) { |
5332 | ASSERT(IsAllocatedReference(object_currently_writing_.id_)); |
5333 | const auto offset_space = vm_ ? V8SnapshotProfileWriter::kVmText |
5334 | : V8SnapshotProfileWriter::kIsolateText; |
5335 | const V8SnapshotProfileWriter::ObjectId to_object(offset_space, offset); |
5336 | const V8SnapshotProfileWriter::ObjectId from_object( |
5337 | V8SnapshotProfileWriter::kSnapshot, object_currently_writing_.id_); |
5338 | profile_writer_->AttributeReferenceTo( |
5339 | from_object, {to_object, V8SnapshotProfileWriter::Reference::kProperty, |
5340 | profile_writer_->EnsureString("<instructions>" )}); |
5341 | } |
5342 | |
5343 | if (FLAG_precompiled_mode && FLAG_use_bare_instructions) { |
5344 | ASSERT(offset != 0); |
5345 | RELEASE_ASSERT(offset >= previous_text_offset_); |
5346 | const uint32_t delta = offset - previous_text_offset_; |
5347 | WriteUnsigned(delta); |
5348 | const uint32_t payload_info = |
5349 | (unchecked_offset << 1) | (Code::HasMonomorphicEntry(code) ? 0x1 : 0x0); |
5350 | WriteUnsigned(payload_info); |
5351 | previous_text_offset_ = offset; |
5352 | return; |
5353 | } |
5354 | #endif |
5355 | Write<uint32_t>(offset); |
5356 | WriteUnsigned(unchecked_offset); |
5357 | } |
5358 | |
5359 | void Serializer::TraceDataOffset(uint32_t offset) { |
5360 | if (profile_writer_ != nullptr) { |
5361 | // ROData cannot be roots. |
5362 | ASSERT(IsAllocatedReference(object_currently_writing_.id_)); |
5363 | auto offset_space = vm_ ? V8SnapshotProfileWriter::kVmData |
5364 | : V8SnapshotProfileWriter::kIsolateData; |
5365 | V8SnapshotProfileWriter::ObjectId from_object = { |
5366 | V8SnapshotProfileWriter::kSnapshot, object_currently_writing_.id_}; |
5367 | V8SnapshotProfileWriter::ObjectId to_object = {offset_space, offset}; |
5368 | // TODO(sjindel): Give this edge a more appropriate type than element |
5369 | // (internal, maybe?). |
5370 | profile_writer_->AttributeReferenceTo( |
5371 | from_object, |
5372 | {to_object, V8SnapshotProfileWriter::Reference::kElement, 0}); |
5373 | } |
5374 | } |
5375 | |
5376 | uint32_t Serializer::GetDataOffset(ObjectPtr object) const { |
5377 | return image_writer_->GetDataOffsetFor(object); |
5378 | } |
5379 | |
5380 | intptr_t Serializer::GetDataSize() const { |
5381 | if (image_writer_ == NULL) { |
5382 | return 0; |
5383 | } |
5384 | return image_writer_->data_size(); |
5385 | } |
5386 | |
5387 | void Serializer::Push(ObjectPtr object) { |
5388 | if (!object->IsHeapObject()) { |
5389 | SmiPtr smi = Smi::RawCast(object); |
5390 | if (smi_ids_.Lookup(smi) == NULL) { |
5391 | SmiObjectIdPair pair; |
5392 | pair.smi_ = smi; |
5393 | pair.id_ = kUnallocatedReference; |
5394 | smi_ids_.Insert(pair); |
5395 | stack_.Add(object); |
5396 | num_written_objects_++; |
5397 | } |
5398 | return; |
5399 | } |
5400 | |
5401 | if (object->IsCode() && !Snapshot::IncludesCode(kind_)) { |
5402 | return; // Do not trace, will write null. |
5403 | } |
5404 | #if !defined(DART_PRECOMPILED_RUNTIME) |
5405 | if (object->IsBytecode() && !Snapshot::IncludesBytecode(kind_)) { |
5406 | return; // Do not trace, will write null. |
5407 | } |
5408 | #endif // !DART_PRECOMPILED_RUNTIME |
5409 | |
5410 | intptr_t id = heap_->GetObjectId(object); |
5411 | if (id == kUnreachableReference) { |
5412 | // When discovering the transitive closure of objects reachable from the |
5413 | // roots we do not trace references, e.g. inside [RawCode], to |
5414 | // [RawInstructions], since [RawInstructions] doesn't contain any references |
5415 | // and the serialization code uses an [ImageWriter] for those. |
5416 | if (object->IsInstructions()) { |
5417 | UnexpectedObject(object, |
5418 | "Instructions should only be reachable from Code" ); |
5419 | } |
5420 | |
5421 | heap_->SetObjectId(object, kUnallocatedReference); |
5422 | ASSERT(IsReachableReference(heap_->GetObjectId(object))); |
5423 | stack_.Add(object); |
5424 | num_written_objects_++; |
5425 | #if defined(SNAPSHOT_BACKTRACE) |
5426 | parent_pairs_.Add(&Object::Handle(zone_, object)); |
5427 | parent_pairs_.Add(&Object::Handle(zone_, current_parent_)); |
5428 | #endif |
5429 | } |
5430 | } |
5431 | |
5432 | void Serializer::Trace(ObjectPtr object) { |
5433 | intptr_t cid; |
5434 | if (!object->IsHeapObject()) { |
5435 | // Smis are merged into the Mint cluster because Smis for the writer might |
5436 | // become Mints for the reader and vice versa. |
5437 | cid = kMintCid; |
5438 | } else { |
5439 | cid = object->GetClassId(); |
5440 | } |
5441 | |
5442 | SerializationCluster* cluster = clusters_by_cid_[cid]; |
5443 | if (cluster == NULL) { |
5444 | cluster = NewClusterForClass(cid); |
5445 | if (cluster == NULL) { |
5446 | UnexpectedObject(object, "No serialization cluster defined" ); |
5447 | } |
5448 | clusters_by_cid_[cid] = cluster; |
5449 | } |
5450 | ASSERT(cluster != NULL); |
5451 | |
5452 | #if defined(SNAPSHOT_BACKTRACE) |
5453 | current_parent_ = object; |
5454 | #endif |
5455 | |
5456 | cluster->Trace(this, object); |
5457 | |
5458 | #if defined(SNAPSHOT_BACKTRACE) |
5459 | current_parent_ = Object::null(); |
5460 | #endif |
5461 | } |
5462 | |
5463 | void Serializer::UnexpectedObject(ObjectPtr raw_object, const char* message) { |
5464 | // Exit the no safepoint scope so we can allocate while printing. |
5465 | while (thread()->no_safepoint_scope_depth() > 0) { |
5466 | thread()->DecrementNoSafepointScopeDepth(); |
5467 | } |
5468 | Object& object = Object::Handle(raw_object); |
5469 | OS::PrintErr("Unexpected object (%s, %s): 0x%" Px " %s\n" , message, |
5470 | Snapshot::KindToCString(kind_), static_cast<uword>(object.raw()), |
5471 | object.ToCString()); |
5472 | #if defined(SNAPSHOT_BACKTRACE) |
5473 | while (!object.IsNull()) { |
5474 | object = ParentOf(object); |
5475 | OS::PrintErr("referenced by 0x%" Px " %s\n" , |
5476 | static_cast<uword>(object.raw()), object.ToCString()); |
5477 | } |
5478 | #endif |
5479 | OS::Abort(); |
5480 | } |
5481 | |
5482 | #if defined(SNAPSHOT_BACKTRACE) |
5483 | ObjectPtr Serializer::ParentOf(const Object& object) { |
5484 | for (intptr_t i = 0; i < parent_pairs_.length(); i += 2) { |
5485 | if (parent_pairs_[i]->raw() == object.raw()) { |
5486 | return parent_pairs_[i + 1]->raw(); |
5487 | } |
5488 | } |
5489 | return Object::null(); |
5490 | } |
5491 | #endif // SNAPSHOT_BACKTRACE |
5492 | |
5493 | void Serializer::WriteVersionAndFeatures(bool is_vm_snapshot) { |
5494 | const char* expected_version = Version::SnapshotString(); |
5495 | ASSERT(expected_version != NULL); |
5496 | const intptr_t version_len = strlen(expected_version); |
5497 | WriteBytes(reinterpret_cast<const uint8_t*>(expected_version), version_len); |
5498 | |
5499 | const char* expected_features = |
5500 | Dart::FeaturesString(Isolate::Current(), is_vm_snapshot, kind_); |
5501 | ASSERT(expected_features != NULL); |
5502 | const intptr_t features_len = strlen(expected_features); |
5503 | WriteBytes(reinterpret_cast<const uint8_t*>(expected_features), |
5504 | features_len + 1); |
5505 | free(const_cast<char*>(expected_features)); |
5506 | } |
5507 | |
5508 | static int CompareClusters(SerializationCluster* const* a, |
5509 | SerializationCluster* const* b) { |
5510 | if ((*a)->size() > (*b)->size()) { |
5511 | return -1; |
5512 | } else if ((*a)->size() < (*b)->size()) { |
5513 | return 1; |
5514 | } else { |
5515 | return 0; |
5516 | } |
5517 | } |
5518 | |
5519 | void Serializer::Serialize() { |
5520 | while (stack_.length() > 0) { |
5521 | Trace(stack_.RemoveLast()); |
5522 | } |
5523 | |
5524 | intptr_t num_clusters = 0; |
5525 | for (intptr_t cid = 1; cid < num_cids_; cid++) { |
5526 | SerializationCluster* cluster = clusters_by_cid_[cid]; |
5527 | if (cluster != NULL) { |
5528 | num_clusters++; |
5529 | } |
5530 | } |
5531 | |
5532 | #if defined(DART_PRECOMPILER) |
5533 | // Before we finalize the count of written objects, pick canonical versions |
5534 | // of WSR objects that will be serialized and then remove any non-serialized |
5535 | // or non-canonical WSR objects from that count. |
5536 | if (auto const cluster = |
5537 | reinterpret_cast<WeakSerializationReferenceSerializationCluster*>( |
5538 | clusters_by_cid_[kWeakSerializationReferenceCid])) { |
5539 | cluster->CanonicalizeReferences(); |
5540 | auto const dropped_count = cluster->DroppedCount(); |
5541 | ASSERT(dropped_count == 0 || kind() == Snapshot::kFullAOT); |
5542 | num_written_objects_ -= dropped_count; |
5543 | } |
5544 | #endif |
5545 | |
5546 | intptr_t num_objects = num_base_objects_ + num_written_objects_; |
5547 | #if defined(ARCH_IS_64_BIT) |
5548 | if (!Utils::IsInt(32, num_objects)) { |
5549 | FATAL("Ref overflow" ); |
5550 | } |
5551 | #endif |
5552 | |
5553 | WriteUnsigned(num_base_objects_); |
5554 | WriteUnsigned(num_objects); |
5555 | WriteUnsigned(num_clusters); |
5556 | WriteUnsigned(field_table_->NumFieldIds()); |
5557 | |
5558 | for (intptr_t cid = 1; cid < num_cids_; cid++) { |
5559 | SerializationCluster* cluster = clusters_by_cid_[cid]; |
5560 | if (cluster != NULL) { |
5561 | cluster->WriteAndMeasureAlloc(this); |
5562 | #if defined(DEBUG) |
5563 | Write<int32_t>(next_ref_index_); |
5564 | #endif |
5565 | } |
5566 | } |
5567 | |
5568 | // We should have assigned a ref to every object we pushed. |
5569 | ASSERT((next_ref_index_ - 1) == num_objects); |
5570 | |
5571 | if (loading_units_ != nullptr) { |
5572 | LoadingUnitSerializationData* unit = |
5573 | (*loading_units_)[current_loading_unit_id_]; |
5574 | unit->set_num_objects(num_objects); |
5575 | } |
5576 | |
5577 | #if defined(DART_PRECOMPILER) |
5578 | // When writing snapshot profile, we want to retain some of the program |
5579 | // structure information (e.g. information about libraries, classes and |
5580 | // functions - even if it was dropped when writing snapshot itself). |
5581 | if (FLAG_write_v8_snapshot_profile_to != nullptr) { |
5582 | static_cast<CodeSerializationCluster*>(clusters_by_cid_[kCodeCid]) |
5583 | ->WriteDroppedOwnersIntoProfile(this); |
5584 | } |
5585 | #endif |
5586 | |
5587 | for (intptr_t cid = 1; cid < num_cids_; cid++) { |
5588 | SerializationCluster* cluster = clusters_by_cid_[cid]; |
5589 | if (cluster != NULL) { |
5590 | cluster->WriteAndMeasureFill(this); |
5591 | #if defined(DEBUG) |
5592 | Write<int32_t>(kSectionMarker); |
5593 | #endif |
5594 | } |
5595 | } |
5596 | } |
5597 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
5598 | |
5599 | #if defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME) |
5600 | // The serialized format of the dispatch table is a sequence of variable-length |
5601 | // integers (the built-in variable-length integer encoding/decoding of |
5602 | // the stream). Each encoded integer e is interpreted thus: |
5603 | // -kRecentCount .. -1 Pick value from the recent values buffer at index -1-e. |
5604 | // 0 Empty (unused) entry. |
5605 | // 1 .. kMaxRepeat Repeat previous entry e times. |
5606 | // kIndexBase or higher Pick entry point from the object at index e-kIndexBase |
5607 | // in the snapshot code cluster. Also put it in the recent |
5608 | // values buffer at the next round-robin index. |
5609 | |
5610 | // Constants for serialization format. Chosen such that repeats and recent |
5611 | // values are encoded as single bytes in SLEB128 encoding. |
5612 | static constexpr intptr_t kDispatchTableSpecialEncodingBits = 6; |
5613 | static constexpr intptr_t kDispatchTableRecentCount = |
5614 | 1 << kDispatchTableSpecialEncodingBits; |
5615 | static constexpr intptr_t kDispatchTableRecentMask = |
5616 | (1 << kDispatchTableSpecialEncodingBits) - 1; |
5617 | static constexpr intptr_t kDispatchTableMaxRepeat = |
5618 | (1 << kDispatchTableSpecialEncodingBits) - 1; |
5619 | static constexpr intptr_t kDispatchTableIndexBase = kDispatchTableMaxRepeat + 1; |
5620 | #endif // defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME) |
5621 | |
5622 | void Serializer::WriteDispatchTable(const Array& entries) { |
5623 | #if defined(DART_PRECOMPILER) |
5624 | if (kind() != Snapshot::kFullAOT) return; |
5625 | |
5626 | const intptr_t bytes_before = bytes_written(); |
5627 | const intptr_t table_length = entries.IsNull() ? 0 : entries.Length(); |
5628 | |
5629 | ASSERT(table_length <= compiler::target::kWordMax); |
5630 | WriteUnsigned(table_length); |
5631 | if (table_length == 0) { |
5632 | dispatch_table_size_ = bytes_written() - bytes_before; |
5633 | return; |
5634 | } |
5635 | |
5636 | auto const code_cluster = |
5637 | reinterpret_cast<CodeSerializationCluster*>(clusters_by_cid_[kCodeCid]); |
5638 | ASSERT(code_cluster != nullptr); |
5639 | // Reference IDs in a cluster are allocated sequentially, so we can use the |
5640 | // first code object's reference ID to calculate the cluster index. |
5641 | const intptr_t first_code_id = |
5642 | RefId(code_cluster->discovered_objects()->At(0)); |
5643 | // The first object in the code cluster must have its reference ID allocated. |
5644 | ASSERT(IsAllocatedReference(first_code_id)); |
5645 | |
5646 | // If instructions can be deduped, the code order table in the deserializer |
5647 | // may not contain all Code objects in the snapshot. Thus, we write the ID |
5648 | // for the first code object here so we can retrieve it during deserialization |
5649 | // and calculate the snapshot ID for Code objects from the cluster index. |
5650 | // |
5651 | // We could just use the snapshot reference ID of the Code object itself |
5652 | // instead of the cluster index and avoid this. However, since entries are |
5653 | // SLEB128 encoded, the size delta for serializing the first ID once is less |
5654 | // than the size delta of serializing the ID plus kIndexBase for each entry, |
5655 | // even when Code objects are allocated before all other non-base objects. |
5656 | // |
5657 | // We could also map Code objects to the first Code object in the cluster with |
5658 | // the same entry point and serialize that ID instead, but that loses |
5659 | // information about which Code object was originally referenced. |
5660 | ASSERT(first_code_id <= compiler::target::kWordMax); |
5661 | WriteUnsigned(first_code_id); |
5662 | |
5663 | CodePtr previous_code = nullptr; |
5664 | CodePtr recent[kDispatchTableRecentCount] = {nullptr}; |
5665 | intptr_t recent_index = 0; |
5666 | intptr_t repeat_count = 0; |
5667 | for (intptr_t i = 0; i < table_length; i++) { |
5668 | auto const code = Code::RawCast(entries.At(i)); |
5669 | // First, see if we're repeating the previous entry (invalid, recent, or |
5670 | // encoded). |
5671 | if (code == previous_code) { |
5672 | if (++repeat_count == kDispatchTableMaxRepeat) { |
5673 | Write(kDispatchTableMaxRepeat); |
5674 | repeat_count = 0; |
5675 | } |
5676 | continue; |
5677 | } |
5678 | // Emit any outsanding repeat count before handling the new code value. |
5679 | if (repeat_count > 0) { |
5680 | Write(repeat_count); |
5681 | repeat_count = 0; |
5682 | } |
5683 | previous_code = code; |
5684 | // The invalid entry can be repeated, but is never part of the recent list |
5685 | // since it already encodes to a single byte.. |
5686 | if (code == Code::null()) { |
5687 | Write(0); |
5688 | continue; |
5689 | } |
5690 | // Check against the recent entries, and write an encoded reference to |
5691 | // the recent entry if found. |
5692 | intptr_t found_index = 0; |
5693 | for (; found_index < kDispatchTableRecentCount; found_index++) { |
5694 | if (recent[found_index] == code) break; |
5695 | } |
5696 | if (found_index < kDispatchTableRecentCount) { |
5697 | Write(~found_index); |
5698 | continue; |
5699 | } |
5700 | // We have a non-repeated, non-recent entry, so encode the reference ID of |
5701 | // the code object and emit that. |
5702 | auto const object_id = RefId(code); |
5703 | // Make sure that this code object has an allocated reference ID. |
5704 | ASSERT(IsAllocatedReference(object_id)); |
5705 | // Use the index in the code cluster, not in the snapshot.. |
5706 | auto const encoded = kDispatchTableIndexBase + (object_id - first_code_id); |
5707 | ASSERT(encoded <= compiler::target::kWordMax); |
5708 | Write(encoded); |
5709 | recent[recent_index] = code; |
5710 | recent_index = (recent_index + 1) & kDispatchTableRecentMask; |
5711 | } |
5712 | if (repeat_count > 0) { |
5713 | Write(repeat_count); |
5714 | } |
5715 | dispatch_table_size_ = bytes_written() - bytes_before; |
5716 | #endif // defined(DART_PRECOMPILER) |
5717 | } |
5718 | |
5719 | void Serializer::PrintSnapshotSizes() { |
5720 | #if !defined(DART_PRECOMPILED_RUNTIME) |
5721 | if (FLAG_print_snapshot_sizes_verbose) { |
5722 | OS::PrintErr( |
5723 | " Cluster Objs Size Fraction Cumulative\n" ); |
5724 | GrowableArray<SerializationCluster*> clusters_by_size; |
5725 | for (intptr_t cid = 1; cid < num_cids_; cid++) { |
5726 | SerializationCluster* cluster = clusters_by_cid_[cid]; |
5727 | if (cluster != NULL) { |
5728 | clusters_by_size.Add(cluster); |
5729 | } |
5730 | } |
5731 | intptr_t text_size = 0; |
5732 | if (image_writer_ != nullptr) { |
5733 | auto const text_object_count = image_writer_->GetTextObjectCount(); |
5734 | text_size = image_writer_->text_size(); |
5735 | intptr_t trampoline_count, trampoline_size; |
5736 | image_writer_->GetTrampolineInfo(&trampoline_count, &trampoline_size); |
5737 | auto const instructions_count = text_object_count - trampoline_count; |
5738 | auto const instructions_size = text_size - trampoline_size; |
5739 | clusters_by_size.Add(new (zone_) FakeSerializationCluster( |
5740 | ImageWriter::TagObjectTypeAsReadOnly(zone_, "Instructions" ), |
5741 | instructions_count, instructions_size)); |
5742 | if (trampoline_size > 0) { |
5743 | clusters_by_size.Add(new (zone_) FakeSerializationCluster( |
5744 | ImageWriter::TagObjectTypeAsReadOnly(zone_, "Trampoline" ), |
5745 | trampoline_count, trampoline_size)); |
5746 | } |
5747 | } |
5748 | // The dispatch_table_size_ will be 0 if the snapshot did not include a |
5749 | // dispatch table (i.e., the VM snapshot). For a precompiled isolate |
5750 | // snapshot, we always serialize at least _one_ byte for the DispatchTable. |
5751 | if (dispatch_table_size_ > 0) { |
5752 | const auto& dispatch_table_entries = Array::Handle( |
5753 | zone_, isolate()->object_store()->dispatch_table_code_entries()); |
5754 | auto const entry_count = |
5755 | dispatch_table_entries.IsNull() ? 0 : dispatch_table_entries.Length(); |
5756 | clusters_by_size.Add(new (zone_) FakeSerializationCluster( |
5757 | "DispatchTable" , entry_count, dispatch_table_size_)); |
5758 | } |
5759 | clusters_by_size.Sort(CompareClusters); |
5760 | double total_size = |
5761 | static_cast<double>(bytes_written() + GetDataSize() + text_size); |
5762 | double cumulative_fraction = 0.0; |
5763 | for (intptr_t i = 0; i < clusters_by_size.length(); i++) { |
5764 | SerializationCluster* cluster = clusters_by_size[i]; |
5765 | double fraction = static_cast<double>(cluster->size()) / total_size; |
5766 | cumulative_fraction += fraction; |
5767 | OS::PrintErr("%25s %6" Pd " %8" Pd " %lf %lf\n" , cluster->name(), |
5768 | cluster->num_objects(), cluster->size(), fraction, |
5769 | cumulative_fraction); |
5770 | } |
5771 | } |
5772 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
5773 | } |
5774 | |
5775 | #if !defined(DART_PRECOMPILED_RUNTIME) |
5776 | void Serializer::AddVMIsolateBaseObjects() { |
5777 | // These objects are always allocated by Object::InitOnce, so they are not |
5778 | // written into the snapshot. |
5779 | |
5780 | AddBaseObject(Object::null(), "Null" , "null" ); |
5781 | AddBaseObject(Object::sentinel().raw(), "Null" , "sentinel" ); |
5782 | AddBaseObject(Object::transition_sentinel().raw(), "Null" , |
5783 | "transition_sentinel" ); |
5784 | AddBaseObject(Object::empty_array().raw(), "Array" , "<empty_array>" ); |
5785 | AddBaseObject(Object::zero_array().raw(), "Array" , "<zero_array>" ); |
5786 | AddBaseObject(Object::dynamic_type().raw(), "Type" , "<dynamic type>" ); |
5787 | AddBaseObject(Object::void_type().raw(), "Type" , "<void type>" ); |
5788 | AddBaseObject(Object::empty_type_arguments().raw(), "TypeArguments" , "[]" ); |
5789 | AddBaseObject(Bool::True().raw(), "bool" , "true" ); |
5790 | AddBaseObject(Bool::False().raw(), "bool" , "false" ); |
5791 | ASSERT(Object::extractor_parameter_types().raw() != Object::null()); |
5792 | AddBaseObject(Object::extractor_parameter_types().raw(), "Array" , |
5793 | "<extractor parameter types>" ); |
5794 | ASSERT(Object::extractor_parameter_names().raw() != Object::null()); |
5795 | AddBaseObject(Object::extractor_parameter_names().raw(), "Array" , |
5796 | "<extractor parameter names>" ); |
5797 | AddBaseObject(Object::empty_context_scope().raw(), "ContextScope" , "<empty>" ); |
5798 | AddBaseObject(Object::empty_descriptors().raw(), "PcDescriptors" , "<empty>" ); |
5799 | AddBaseObject(Object::empty_var_descriptors().raw(), "LocalVarDescriptors" , |
5800 | "<empty>" ); |
5801 | AddBaseObject(Object::empty_exception_handlers().raw(), "ExceptionHandlers" , |
5802 | "<empty>" ); |
5803 | AddBaseObject(Object::implicit_getter_bytecode().raw(), "Bytecode" , |
5804 | "<implicit getter>" ); |
5805 | AddBaseObject(Object::implicit_setter_bytecode().raw(), "Bytecode" , |
5806 | "<implicit setter>" ); |
5807 | AddBaseObject(Object::implicit_static_getter_bytecode().raw(), "Bytecode" , |
5808 | "<implicit static getter>" ); |
5809 | AddBaseObject(Object::method_extractor_bytecode().raw(), "Bytecode" , |
5810 | "<method extractor>" ); |
5811 | AddBaseObject(Object::invoke_closure_bytecode().raw(), "Bytecode" , |
5812 | "<invoke closure>" ); |
5813 | AddBaseObject(Object::invoke_field_bytecode().raw(), "Bytecode" , |
5814 | "<invoke field>" ); |
5815 | AddBaseObject(Object::nsm_dispatcher_bytecode().raw(), "Bytecode" , |
5816 | "<nsm dispatcher>" ); |
5817 | AddBaseObject(Object::dynamic_invocation_forwarder_bytecode().raw(), |
5818 | "Bytecode" , "<dyn forwarder>" ); |
5819 | |
5820 | for (intptr_t i = 0; i < ArgumentsDescriptor::kCachedDescriptorCount; i++) { |
5821 | AddBaseObject(ArgumentsDescriptor::cached_args_descriptors_[i], |
5822 | "ArgumentsDescriptor" , "<cached arguments descriptor>" ); |
5823 | } |
5824 | for (intptr_t i = 0; i < ICData::kCachedICDataArrayCount; i++) { |
5825 | AddBaseObject(ICData::cached_icdata_arrays_[i], "Array" , |
5826 | "<empty icdata entries>" ); |
5827 | } |
5828 | AddBaseObject(SubtypeTestCache::cached_array_, "Array" , |
5829 | "<empty subtype entries>" ); |
5830 | |
5831 | ClassTable* table = isolate()->class_table(); |
5832 | for (intptr_t cid = kClassCid; cid < kInstanceCid; cid++) { |
5833 | // Error, CallSiteData has no class object. |
5834 | if (cid != kErrorCid && cid != kCallSiteDataCid) { |
5835 | ASSERT(table->HasValidClassAt(cid)); |
5836 | AddBaseObject(table->At(cid), "Class" ); |
5837 | } |
5838 | } |
5839 | AddBaseObject(table->At(kDynamicCid), "Class" ); |
5840 | AddBaseObject(table->At(kVoidCid), "Class" ); |
5841 | |
5842 | if (!Snapshot::IncludesCode(kind_)) { |
5843 | for (intptr_t i = 0; i < StubCode::NumEntries(); i++) { |
5844 | AddBaseObject(StubCode::EntryAt(i).raw(), "Code" , "<stub code>" ); |
5845 | } |
5846 | } |
5847 | } |
5848 | |
5849 | intptr_t Serializer::WriteVMSnapshot(const Array& symbols) { |
5850 | NoSafepointScope no_safepoint; |
5851 | |
5852 | AddVMIsolateBaseObjects(); |
5853 | |
5854 | // Push roots. |
5855 | Push(symbols.raw()); |
5856 | if (Snapshot::IncludesCode(kind_)) { |
5857 | for (intptr_t i = 0; i < StubCode::NumEntries(); i++) { |
5858 | Push(StubCode::EntryAt(i).raw()); |
5859 | } |
5860 | } |
5861 | |
5862 | Serialize(); |
5863 | |
5864 | // Write roots. |
5865 | WriteRootRef(symbols.raw(), "symbol-table" ); |
5866 | if (Snapshot::IncludesCode(kind_)) { |
5867 | for (intptr_t i = 0; i < StubCode::NumEntries(); i++) { |
5868 | WriteRootRef(StubCode::EntryAt(i).raw(), |
5869 | zone_->PrintToString("Stub:%s" , StubCode::NameAt(i))); |
5870 | } |
5871 | } |
5872 | |
5873 | #if defined(DEBUG) |
5874 | Write<int32_t>(kSectionMarker); |
5875 | #endif |
5876 | |
5877 | FlushBytesWrittenToRoot(); |
5878 | |
5879 | PrintSnapshotSizes(); |
5880 | |
5881 | // Note we are not clearing the object id table. The full ref table |
5882 | // of the vm isolate snapshot serves as the base objects for the |
5883 | // regular isolate snapshot. |
5884 | |
5885 | // Return the number of objects, -1 accounts for unused ref 0. |
5886 | return next_ref_index_ - 1; |
5887 | } |
5888 | |
5889 | static const char* kObjectStoreFieldNames[] = { |
5890 | #define DECLARE_OBJECT_STORE_FIELD(Type, Name) #Name, |
5891 | OBJECT_STORE_FIELD_LIST(DECLARE_OBJECT_STORE_FIELD, |
5892 | DECLARE_OBJECT_STORE_FIELD, |
5893 | DECLARE_OBJECT_STORE_FIELD, |
5894 | DECLARE_OBJECT_STORE_FIELD) |
5895 | #undef DECLARE_OBJECT_STORE_FIELD |
5896 | }; |
5897 | |
5898 | void Serializer::WriteProgramSnapshot(intptr_t num_base_objects, |
5899 | ObjectStore* object_store) { |
5900 | NoSafepointScope no_safepoint; |
5901 | |
5902 | if (num_base_objects == 0) { |
5903 | // Not writing a new vm isolate: use the one this VM was loaded from. |
5904 | const Array& base_objects = Object::vm_isolate_snapshot_object_table(); |
5905 | for (intptr_t i = 1; i < base_objects.Length(); i++) { |
5906 | AddBaseObject(base_objects.At(i)); |
5907 | } |
5908 | } else { |
5909 | // Base objects carried over from WriteVMSnapshot. |
5910 | num_base_objects_ = num_base_objects; |
5911 | next_ref_index_ = num_base_objects + 1; |
5912 | } |
5913 | |
5914 | // Push roots. |
5915 | ObjectPtr* from = object_store->from(); |
5916 | ObjectPtr* to = object_store->to_snapshot(kind_); |
5917 | for (ObjectPtr* p = from; p <= to; p++) { |
5918 | Push(*p); |
5919 | } |
5920 | |
5921 | const auto& dispatch_table_entries = |
5922 | Array::Handle(zone_, object_store->dispatch_table_code_entries()); |
5923 | // We should only have a dispatch table in precompiled mode. |
5924 | ASSERT(dispatch_table_entries.IsNull() || kind() == Snapshot::kFullAOT); |
5925 | |
5926 | #if defined(DART_PRECOMPILER) |
5927 | // We treat the dispatch table as a root object and trace the Code objects it |
5928 | // references. Otherwise, a non-empty entry could be invalid on |
5929 | // deserialization if the corresponding Code object was not reachable from the |
5930 | // existing snapshot roots. |
5931 | if (!dispatch_table_entries.IsNull()) { |
5932 | for (intptr_t i = 0; i < dispatch_table_entries.Length(); i++) { |
5933 | Push(dispatch_table_entries.At(i)); |
5934 | } |
5935 | } |
5936 | #endif |
5937 | |
5938 | Serialize(); |
5939 | |
5940 | // Write roots. |
5941 | for (ObjectPtr* p = from; p <= to; p++) { |
5942 | WriteRootRef(*p, kObjectStoreFieldNames[p - from]); |
5943 | } |
5944 | |
5945 | FlushBytesWrittenToRoot(); |
5946 | // The dispatch table is serialized only for precompiled snapshots. |
5947 | WriteDispatchTable(dispatch_table_entries); |
5948 | object_currently_writing_.stream_start_ = stream_.Position(); |
5949 | #if defined(DART_PRECOMPILER) |
5950 | // If any bytes were written for the dispatch table, add it to the profile. |
5951 | if (dispatch_table_size_ > 0 && profile_writer_ != nullptr) { |
5952 | // Grab an unused ref index for a unique object id for the dispatch table. |
5953 | const auto dispatch_table_id = next_ref_index_++; |
5954 | const V8SnapshotProfileWriter::ObjectId dispatch_table_snapshot_id( |
5955 | V8SnapshotProfileWriter::kSnapshot, dispatch_table_id); |
5956 | profile_writer_->AddRoot(dispatch_table_snapshot_id, "dispatch_table" ); |
5957 | profile_writer_->SetObjectTypeAndName(dispatch_table_snapshot_id, |
5958 | "DispatchTable" , nullptr); |
5959 | profile_writer_->AttributeBytesTo(dispatch_table_snapshot_id, |
5960 | dispatch_table_size_); |
5961 | |
5962 | if (!dispatch_table_entries.IsNull()) { |
5963 | for (intptr_t i = 0; i < dispatch_table_entries.Length(); i++) { |
5964 | auto const code = Code::RawCast(dispatch_table_entries.At(i)); |
5965 | if (code == Code::null()) continue; |
5966 | const V8SnapshotProfileWriter::ObjectId code_id( |
5967 | V8SnapshotProfileWriter::kSnapshot, RefId(code)); |
5968 | profile_writer_->AttributeReferenceTo( |
5969 | dispatch_table_snapshot_id, |
5970 | {code_id, V8SnapshotProfileWriter::Reference::kElement, i}); |
5971 | } |
5972 | } |
5973 | } |
5974 | #endif |
5975 | |
5976 | #if defined(DEBUG) |
5977 | Write<int32_t>(kSectionMarker); |
5978 | #endif |
5979 | |
5980 | PrintSnapshotSizes(); |
5981 | |
5982 | // TODO(rmacnak): This also carries over object ids from loading units that |
5983 | // aren't dominators. It would be more robust to remember the written objects |
5984 | // in each loading and re-assign objects when setting up the base objects. |
5985 | // Then a reference to a non-dominating object would produce an error instead |
5986 | // of corruption. |
5987 | if (kind() != Snapshot::kFullAOT) { |
5988 | heap_->ResetObjectIdTable(); |
5989 | } |
5990 | } |
5991 | |
5992 | void Serializer::WriteUnitSnapshot(LoadingUnitSerializationData* unit, |
5993 | uint32_t program_hash) { |
5994 | Write(program_hash); |
5995 | |
5996 | NoSafepointScope no_safepoint; |
5997 | |
5998 | intptr_t num_base_objects = unit->parent()->num_objects(); |
5999 | ASSERT(num_base_objects != 0); |
6000 | num_base_objects_ = num_base_objects; |
6001 | next_ref_index_ = num_base_objects + 1; |
6002 | |
6003 | intptr_t num_deferred_objects = unit->deferred_objects()->length(); |
6004 | for (intptr_t i = 0; i < num_deferred_objects; i++) { |
6005 | const Object* deferred_object = (*unit->deferred_objects())[i]; |
6006 | ASSERT(deferred_object->IsCode()); |
6007 | CodePtr code = static_cast<CodePtr>(deferred_object->raw()); |
6008 | Push(code->ptr()->compressed_stackmaps_); |
6009 | Push(code->ptr()->code_source_map_); |
6010 | } |
6011 | { |
6012 | GrowableArray<CodePtr> raw_codes(num_deferred_objects); |
6013 | for (intptr_t i = 0; i < num_deferred_objects; i++) { |
6014 | raw_codes.Add((*unit->deferred_objects())[i]->raw()); |
6015 | } |
6016 | PrepareInstructions(&raw_codes); |
6017 | } |
6018 | |
6019 | Serialize(); |
6020 | |
6021 | intptr_t start_index = 0; |
6022 | if (num_deferred_objects != 0) { |
6023 | start_index = RefId(unit->deferred_objects()->At(0)->raw()); |
6024 | ASSERT(start_index > 0); |
6025 | } |
6026 | WriteUnsigned(start_index); |
6027 | WriteUnsigned(num_deferred_objects); |
6028 | for (intptr_t i = 0; i < num_deferred_objects; i++) { |
6029 | const Object* deferred_object = (*unit->deferred_objects())[i]; |
6030 | ASSERT(deferred_object->IsCode()); |
6031 | CodePtr code = static_cast<CodePtr>(deferred_object->raw()); |
6032 | ASSERT(RefId(code) == (start_index + i)); |
6033 | WriteInstructions(code->ptr()->instructions_, |
6034 | code->ptr()->unchecked_offset_, code, false); |
6035 | WriteRootRef(code->ptr()->compressed_stackmaps_, "deferred-code" ); |
6036 | WriteRootRef(code->ptr()->code_source_map_, "deferred-code" ); |
6037 | } |
6038 | |
6039 | FlushBytesWrittenToRoot(); |
6040 | object_currently_writing_.stream_start_ = stream_.Position(); |
6041 | |
6042 | #if defined(DEBUG) |
6043 | Write<int32_t>(kSectionMarker); |
6044 | #endif |
6045 | } |
6046 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
6047 | |
6048 | Deserializer::Deserializer(Thread* thread, |
6049 | Snapshot::Kind kind, |
6050 | const uint8_t* buffer, |
6051 | intptr_t size, |
6052 | const uint8_t* data_buffer, |
6053 | const uint8_t* instructions_buffer, |
6054 | intptr_t offset) |
6055 | : ThreadStackResource(thread), |
6056 | heap_(thread->isolate()->heap()), |
6057 | zone_(thread->zone()), |
6058 | kind_(kind), |
6059 | stream_(buffer, size), |
6060 | image_reader_(NULL), |
6061 | refs_(nullptr), |
6062 | next_ref_index_(1), |
6063 | previous_text_offset_(0), |
6064 | clusters_(NULL), |
6065 | field_table_(thread->isolate()->field_table()) { |
6066 | if (Snapshot::IncludesCode(kind)) { |
6067 | ASSERT(instructions_buffer != NULL); |
6068 | ASSERT(data_buffer != NULL); |
6069 | image_reader_ = new (zone_) ImageReader(data_buffer, instructions_buffer); |
6070 | } |
6071 | stream_.SetPosition(offset); |
6072 | } |
6073 | |
6074 | Deserializer::~Deserializer() { |
6075 | delete[] clusters_; |
6076 | } |
6077 | |
6078 | DeserializationCluster* Deserializer::ReadCluster() { |
6079 | intptr_t cid = ReadCid(); |
6080 | Zone* Z = zone_; |
6081 | if (cid >= kNumPredefinedCids || cid == kInstanceCid) { |
6082 | return new (Z) InstanceDeserializationCluster(cid); |
6083 | } |
6084 | if (IsTypedDataViewClassId(cid)) { |
6085 | return new (Z) TypedDataViewDeserializationCluster(cid); |
6086 | } |
6087 | if (IsExternalTypedDataClassId(cid)) { |
6088 | return new (Z) ExternalTypedDataDeserializationCluster(cid); |
6089 | } |
6090 | if (IsTypedDataClassId(cid)) { |
6091 | return new (Z) TypedDataDeserializationCluster(cid); |
6092 | } |
6093 | |
6094 | if (Snapshot::IncludesCode(kind_)) { |
6095 | switch (cid) { |
6096 | case kPcDescriptorsCid: |
6097 | case kCodeSourceMapCid: |
6098 | case kCompressedStackMapsCid: |
6099 | case kOneByteStringCid: |
6100 | case kTwoByteStringCid: |
6101 | return new (Z) RODataDeserializationCluster(); |
6102 | } |
6103 | } |
6104 | |
6105 | switch (cid) { |
6106 | case kClassCid: |
6107 | return new (Z) ClassDeserializationCluster(); |
6108 | case kTypeArgumentsCid: |
6109 | return new (Z) TypeArgumentsDeserializationCluster(); |
6110 | case kPatchClassCid: |
6111 | return new (Z) PatchClassDeserializationCluster(); |
6112 | case kFunctionCid: |
6113 | return new (Z) FunctionDeserializationCluster(); |
6114 | case kClosureDataCid: |
6115 | return new (Z) ClosureDataDeserializationCluster(); |
6116 | case kSignatureDataCid: |
6117 | return new (Z) SignatureDataDeserializationCluster(); |
6118 | case kRedirectionDataCid: |
6119 | return new (Z) RedirectionDataDeserializationCluster(); |
6120 | case kFfiTrampolineDataCid: |
6121 | return new (Z) FfiTrampolineDataDeserializationCluster(); |
6122 | case kFieldCid: |
6123 | return new (Z) FieldDeserializationCluster(); |
6124 | case kScriptCid: |
6125 | return new (Z) ScriptDeserializationCluster(); |
6126 | case kLibraryCid: |
6127 | return new (Z) LibraryDeserializationCluster(); |
6128 | case kNamespaceCid: |
6129 | return new (Z) NamespaceDeserializationCluster(); |
6130 | #if !defined(DART_PRECOMPILED_RUNTIME) |
6131 | case kKernelProgramInfoCid: |
6132 | return new (Z) KernelProgramInfoDeserializationCluster(); |
6133 | #endif // !DART_PRECOMPILED_RUNTIME |
6134 | case kCodeCid: |
6135 | return new (Z) CodeDeserializationCluster(); |
6136 | #if !defined(DART_PRECOMPILED_RUNTIME) |
6137 | case kBytecodeCid: |
6138 | return new (Z) BytecodeDeserializationCluster(); |
6139 | #endif // !DART_PRECOMPILED_RUNTIME |
6140 | case kObjectPoolCid: |
6141 | return new (Z) ObjectPoolDeserializationCluster(); |
6142 | case kPcDescriptorsCid: |
6143 | return new (Z) PcDescriptorsDeserializationCluster(); |
6144 | case kExceptionHandlersCid: |
6145 | return new (Z) ExceptionHandlersDeserializationCluster(); |
6146 | case kContextCid: |
6147 | return new (Z) ContextDeserializationCluster(); |
6148 | case kContextScopeCid: |
6149 | return new (Z) ContextScopeDeserializationCluster(); |
6150 | case kParameterTypeCheckCid: |
6151 | return new (Z) ParameterTypeCheckDeserializationCluster(); |
6152 | case kUnlinkedCallCid: |
6153 | return new (Z) UnlinkedCallDeserializationCluster(); |
6154 | case kICDataCid: |
6155 | return new (Z) ICDataDeserializationCluster(); |
6156 | case kMegamorphicCacheCid: |
6157 | return new (Z) MegamorphicCacheDeserializationCluster(); |
6158 | case kSubtypeTestCacheCid: |
6159 | return new (Z) SubtypeTestCacheDeserializationCluster(); |
6160 | case kLoadingUnitCid: |
6161 | return new (Z) LoadingUnitDeserializationCluster(); |
6162 | case kLanguageErrorCid: |
6163 | return new (Z) LanguageErrorDeserializationCluster(); |
6164 | case kUnhandledExceptionCid: |
6165 | return new (Z) UnhandledExceptionDeserializationCluster(); |
6166 | case kLibraryPrefixCid: |
6167 | return new (Z) LibraryPrefixDeserializationCluster(); |
6168 | case kTypeCid: |
6169 | return new (Z) TypeDeserializationCluster(); |
6170 | case kTypeRefCid: |
6171 | return new (Z) TypeRefDeserializationCluster(); |
6172 | case kTypeParameterCid: |
6173 | return new (Z) TypeParameterDeserializationCluster(); |
6174 | case kClosureCid: |
6175 | return new (Z) ClosureDeserializationCluster(); |
6176 | case kMintCid: |
6177 | return new (Z) MintDeserializationCluster(); |
6178 | case kDoubleCid: |
6179 | return new (Z) DoubleDeserializationCluster(); |
6180 | case kGrowableObjectArrayCid: |
6181 | return new (Z) GrowableObjectArrayDeserializationCluster(); |
6182 | case kStackTraceCid: |
6183 | return new (Z) StackTraceDeserializationCluster(); |
6184 | case kRegExpCid: |
6185 | return new (Z) RegExpDeserializationCluster(); |
6186 | case kWeakPropertyCid: |
6187 | return new (Z) WeakPropertyDeserializationCluster(); |
6188 | case kLinkedHashMapCid: |
6189 | return new (Z) LinkedHashMapDeserializationCluster(); |
6190 | case kArrayCid: |
6191 | return new (Z) ArrayDeserializationCluster(kArrayCid); |
6192 | case kImmutableArrayCid: |
6193 | return new (Z) ArrayDeserializationCluster(kImmutableArrayCid); |
6194 | case kOneByteStringCid: |
6195 | return new (Z) OneByteStringDeserializationCluster(); |
6196 | case kTwoByteStringCid: |
6197 | return new (Z) TwoByteStringDeserializationCluster(); |
6198 | case kWeakSerializationReferenceCid: |
6199 | #if defined(DART_PRECOMPILED_RUNTIME) |
6200 | return new (Z) WeakSerializationReferenceDeserializationCluster(); |
6201 | #endif |
6202 | default: |
6203 | break; |
6204 | } |
6205 | FATAL1("No cluster defined for cid %" Pd, cid); |
6206 | return NULL; |
6207 | } |
6208 | |
6209 | void Deserializer::ReadDispatchTable() { |
6210 | #if defined(DART_PRECOMPILED_RUNTIME) |
6211 | const intptr_t length = ReadUnsigned(); |
6212 | if (length == 0) return; |
6213 | |
6214 | // Not all Code objects may be in the code_order_table when instructions can |
6215 | // be deduplicated. Thus, we serialize the reference ID of the first code |
6216 | // object, from which we can get the reference ID for any code object. |
6217 | const intptr_t first_code_id = ReadUnsigned(); |
6218 | |
6219 | auto const I = isolate(); |
6220 | auto code = I->object_store()->dispatch_table_null_error_stub(); |
6221 | ASSERT(code != Code::null()); |
6222 | uword null_entry = Code::EntryPointOf(code); |
6223 | |
6224 | auto const table = new DispatchTable(length); |
6225 | auto const array = table->array(); |
6226 | uword value = 0; |
6227 | uword recent[kDispatchTableRecentCount] = {0}; |
6228 | intptr_t recent_index = 0; |
6229 | intptr_t repeat_count = 0; |
6230 | for (intptr_t i = 0; i < length; i++) { |
6231 | if (repeat_count > 0) { |
6232 | array[i] = value; |
6233 | repeat_count--; |
6234 | continue; |
6235 | } |
6236 | auto const encoded = Read<intptr_t>(); |
6237 | if (encoded == 0) { |
6238 | value = null_entry; |
6239 | } else if (encoded < 0) { |
6240 | intptr_t r = ~encoded; |
6241 | ASSERT(r < kDispatchTableRecentCount); |
6242 | value = recent[r]; |
6243 | } else if (encoded <= kDispatchTableMaxRepeat) { |
6244 | repeat_count = encoded - 1; |
6245 | } else { |
6246 | intptr_t cluster_index = encoded - kDispatchTableIndexBase; |
6247 | code = Code::RawCast(Ref(first_code_id + cluster_index)); |
6248 | value = Code::EntryPointOf(code); |
6249 | recent[recent_index] = value; |
6250 | recent_index = (recent_index + 1) & kDispatchTableRecentMask; |
6251 | } |
6252 | array[i] = value; |
6253 | } |
6254 | ASSERT(repeat_count == 0); |
6255 | |
6256 | I->group()->set_dispatch_table(table); |
6257 | #endif |
6258 | } |
6259 | |
6260 | ApiErrorPtr Deserializer::VerifyImageAlignment() { |
6261 | if (image_reader_ != nullptr) { |
6262 | return image_reader_->VerifyAlignment(); |
6263 | } |
6264 | return ApiError::null(); |
6265 | } |
6266 | |
6267 | char* SnapshotHeaderReader::VerifyVersionAndFeatures(Isolate* isolate, |
6268 | intptr_t* offset) { |
6269 | char* error = VerifyVersion(); |
6270 | if (error == nullptr) { |
6271 | error = VerifyFeatures(isolate); |
6272 | } |
6273 | if (error == nullptr) { |
6274 | *offset = stream_.Position(); |
6275 | } |
6276 | return error; |
6277 | } |
6278 | |
6279 | char* SnapshotHeaderReader::() { |
6280 | // If the version string doesn't match, return an error. |
6281 | // Note: New things are allocated only if we're going to return an error. |
6282 | |
6283 | const char* expected_version = Version::SnapshotString(); |
6284 | ASSERT(expected_version != NULL); |
6285 | const intptr_t version_len = strlen(expected_version); |
6286 | if (stream_.PendingBytes() < version_len) { |
6287 | const intptr_t kMessageBufferSize = 128; |
6288 | char message_buffer[kMessageBufferSize]; |
6289 | Utils::SNPrint(message_buffer, kMessageBufferSize, |
6290 | "No full snapshot version found, expected '%s'" , |
6291 | expected_version); |
6292 | return BuildError(message_buffer); |
6293 | } |
6294 | |
6295 | const char* version = |
6296 | reinterpret_cast<const char*>(stream_.AddressOfCurrentPosition()); |
6297 | ASSERT(version != NULL); |
6298 | if (strncmp(version, expected_version, version_len) != 0) { |
6299 | const intptr_t kMessageBufferSize = 256; |
6300 | char message_buffer[kMessageBufferSize]; |
6301 | char* actual_version = Utils::StrNDup(version, version_len); |
6302 | Utils::SNPrint(message_buffer, kMessageBufferSize, |
6303 | "Wrong %s snapshot version, expected '%s' found '%s'" , |
6304 | (Snapshot::IsFull(kind_)) ? "full" : "script" , |
6305 | expected_version, actual_version); |
6306 | free(actual_version); |
6307 | return BuildError(message_buffer); |
6308 | } |
6309 | stream_.Advance(version_len); |
6310 | |
6311 | return nullptr; |
6312 | } |
6313 | |
6314 | char* SnapshotHeaderReader::(Isolate* isolate) { |
6315 | const char* expected_features = |
6316 | Dart::FeaturesString(isolate, (isolate == NULL), kind_); |
6317 | ASSERT(expected_features != NULL); |
6318 | const intptr_t expected_len = strlen(expected_features); |
6319 | |
6320 | const char* features = nullptr; |
6321 | intptr_t features_length = 0; |
6322 | |
6323 | auto error = ReadFeatures(&features, &features_length); |
6324 | if (error != nullptr) { |
6325 | return error; |
6326 | } |
6327 | |
6328 | if (features_length != expected_len || |
6329 | (strncmp(features, expected_features, expected_len) != 0)) { |
6330 | const intptr_t kMessageBufferSize = 1024; |
6331 | char message_buffer[kMessageBufferSize]; |
6332 | char* actual_features = Utils::StrNDup( |
6333 | features, features_length < 1024 ? features_length : 1024); |
6334 | Utils::SNPrint(message_buffer, kMessageBufferSize, |
6335 | "Snapshot not compatible with the current VM configuration: " |
6336 | "the snapshot requires '%s' but the VM has '%s'" , |
6337 | actual_features, expected_features); |
6338 | free(const_cast<char*>(expected_features)); |
6339 | free(actual_features); |
6340 | return BuildError(message_buffer); |
6341 | } |
6342 | free(const_cast<char*>(expected_features)); |
6343 | return nullptr; |
6344 | } |
6345 | |
6346 | char* SnapshotHeaderReader::(const char** features, |
6347 | intptr_t* features_length) { |
6348 | const char* cursor = |
6349 | reinterpret_cast<const char*>(stream_.AddressOfCurrentPosition()); |
6350 | const intptr_t length = Utils::StrNLen(cursor, stream_.PendingBytes()); |
6351 | if (length == stream_.PendingBytes()) { |
6352 | return BuildError( |
6353 | "The features string in the snapshot was not '\\0'-terminated." ); |
6354 | } |
6355 | *features = cursor; |
6356 | *features_length = length; |
6357 | stream_.Advance(length + 1); |
6358 | return nullptr; |
6359 | } |
6360 | |
6361 | char* SnapshotHeaderReader::(const char* message) { |
6362 | return Utils::StrDup(message); |
6363 | } |
6364 | |
6365 | ApiErrorPtr FullSnapshotReader::ConvertToApiError(char* message) { |
6366 | // This can also fail while bringing up the VM isolate, so make sure to |
6367 | // allocate the error message in old space. |
6368 | const String& msg = String::Handle(String::New(message, Heap::kOld)); |
6369 | |
6370 | // The [message] was constructed with [BuildError] and needs to be freed. |
6371 | free(message); |
6372 | |
6373 | return ApiError::New(msg, Heap::kOld); |
6374 | } |
6375 | |
6376 | void Deserializer::ReadInstructions(CodePtr code, bool deferred) { |
6377 | if (deferred) { |
6378 | #if defined(DART_PRECOMPILED_RUNTIME) |
6379 | if (FLAG_use_bare_instructions) { |
6380 | uword entry_point = StubCode::NotLoaded().EntryPoint(); |
6381 | code->ptr()->entry_point_ = entry_point; |
6382 | code->ptr()->unchecked_entry_point_ = entry_point; |
6383 | code->ptr()->monomorphic_entry_point_ = entry_point; |
6384 | code->ptr()->monomorphic_unchecked_entry_point_ = entry_point; |
6385 | code->ptr()->instructions_length_ = 0; |
6386 | return; |
6387 | } |
6388 | #endif |
6389 | InstructionsPtr instr = StubCode::NotLoaded().instructions(); |
6390 | uint32_t unchecked_offset = 0; |
6391 | code->ptr()->instructions_ = instr; |
6392 | #if defined(DART_PRECOMPILED_RUNTIME) |
6393 | code->ptr()->instructions_length_ = Instructions::Size(instr); |
6394 | #else |
6395 | code->ptr()->unchecked_offset_ = unchecked_offset; |
6396 | #endif |
6397 | Code::InitializeCachedEntryPointsFrom(code, instr, unchecked_offset); |
6398 | return; |
6399 | } |
6400 | |
6401 | #if defined(DART_PRECOMPILED_RUNTIME) |
6402 | if (FLAG_use_bare_instructions) { |
6403 | // There are no serialized RawInstructions objects in this mode. |
6404 | code->ptr()->instructions_ = Instructions::null(); |
6405 | previous_text_offset_ += ReadUnsigned(); |
6406 | const uword payload_start = |
6407 | image_reader_->GetBareInstructionsAt(previous_text_offset_); |
6408 | const uint32_t payload_info = ReadUnsigned(); |
6409 | const uint32_t unchecked_offset = payload_info >> 1; |
6410 | const bool has_monomorphic_entrypoint = (payload_info & 0x1) == 0x1; |
6411 | |
6412 | const uword entry_offset = has_monomorphic_entrypoint |
6413 | ? Instructions::kPolymorphicEntryOffsetAOT |
6414 | : 0; |
6415 | const uword monomorphic_entry_offset = |
6416 | has_monomorphic_entrypoint ? Instructions::kMonomorphicEntryOffsetAOT |
6417 | : 0; |
6418 | |
6419 | const uword entry_point = payload_start + entry_offset; |
6420 | const uword monomorphic_entry_point = |
6421 | payload_start + monomorphic_entry_offset; |
6422 | |
6423 | code->ptr()->entry_point_ = entry_point; |
6424 | code->ptr()->unchecked_entry_point_ = entry_point + unchecked_offset; |
6425 | code->ptr()->monomorphic_entry_point_ = monomorphic_entry_point; |
6426 | code->ptr()->monomorphic_unchecked_entry_point_ = |
6427 | monomorphic_entry_point + unchecked_offset; |
6428 | return; |
6429 | } |
6430 | #endif |
6431 | |
6432 | InstructionsPtr instr = image_reader_->GetInstructionsAt(Read<uint32_t>()); |
6433 | uint32_t unchecked_offset = ReadUnsigned(); |
6434 | code->ptr()->instructions_ = instr; |
6435 | #if defined(DART_PRECOMPILED_RUNTIME) |
6436 | code->ptr()->instructions_length_ = Instructions::Size(instr); |
6437 | #else |
6438 | code->ptr()->unchecked_offset_ = unchecked_offset; |
6439 | if (kind() == Snapshot::kFullJIT) { |
6440 | const uint32_t active_offset = Read<uint32_t>(); |
6441 | instr = image_reader_->GetInstructionsAt(active_offset); |
6442 | unchecked_offset = ReadUnsigned(); |
6443 | } |
6444 | code->ptr()->active_instructions_ = instr; |
6445 | #endif |
6446 | Code::InitializeCachedEntryPointsFrom(code, instr, unchecked_offset); |
6447 | } |
6448 | |
6449 | void Deserializer::EndInstructions(const Array& refs, |
6450 | intptr_t start_index, |
6451 | intptr_t stop_index) { |
6452 | #if defined(DART_PRECOMPILED_RUNTIME) |
6453 | if (FLAG_use_bare_instructions) { |
6454 | uword previous_end = image_reader_->GetBareInstructionsEnd(); |
6455 | for (intptr_t id = stop_index - 1; id >= start_index; id--) { |
6456 | CodePtr code = static_cast<CodePtr>(refs.At(id)); |
6457 | uword start = Code::PayloadStartOf(code); |
6458 | ASSERT(start <= previous_end); |
6459 | code->ptr()->instructions_length_ = previous_end - start; |
6460 | previous_end = start; |
6461 | } |
6462 | |
6463 | // Build an array of code objects representing the order in which the |
6464 | // [Code]'s instructions will be located in memory. |
6465 | const intptr_t count = stop_index - start_index; |
6466 | const Array& order_table = |
6467 | Array::Handle(zone_, Array::New(count, Heap::kOld)); |
6468 | Object& code = Object::Handle(zone_); |
6469 | for (intptr_t i = 0; i < count; i++) { |
6470 | code = refs.At(start_index + i); |
6471 | order_table.SetAt(i, code); |
6472 | } |
6473 | ObjectStore* object_store = Isolate::Current()->object_store(); |
6474 | GrowableObjectArray& order_tables = |
6475 | GrowableObjectArray::Handle(zone_, object_store->code_order_tables()); |
6476 | if (order_tables.IsNull()) { |
6477 | order_tables = GrowableObjectArray::New(Heap::kOld); |
6478 | object_store->set_code_order_tables(order_tables); |
6479 | } |
6480 | order_tables.Add(order_table, Heap::kOld); |
6481 | } |
6482 | #endif |
6483 | } |
6484 | |
6485 | ObjectPtr Deserializer::GetObjectAt(uint32_t offset) const { |
6486 | return image_reader_->GetObjectAt(offset); |
6487 | } |
6488 | |
6489 | void Deserializer::Prepare() { |
6490 | num_base_objects_ = ReadUnsigned(); |
6491 | num_objects_ = ReadUnsigned(); |
6492 | num_clusters_ = ReadUnsigned(); |
6493 | const intptr_t field_table_len = ReadUnsigned(); |
6494 | |
6495 | clusters_ = new DeserializationCluster*[num_clusters_]; |
6496 | refs_ = Array::New(num_objects_ + 1, Heap::kOld); |
6497 | if (field_table_len > 0) { |
6498 | field_table_->AllocateIndex(field_table_len - 1); |
6499 | } |
6500 | ASSERT(field_table_->NumFieldIds() == field_table_len); |
6501 | } |
6502 | |
6503 | void Deserializer::Deserialize() { |
6504 | if (num_base_objects_ != (next_ref_index_ - 1)) { |
6505 | FATAL2("Snapshot expects %" Pd |
6506 | " base objects, but deserializer provided %" Pd, |
6507 | num_base_objects_, next_ref_index_ - 1); |
6508 | } |
6509 | |
6510 | for (intptr_t i = 0; i < num_clusters_; i++) { |
6511 | clusters_[i] = ReadCluster(); |
6512 | clusters_[i]->ReadAlloc(this); |
6513 | #if defined(DEBUG) |
6514 | intptr_t serializers_next_ref_index_ = Read<int32_t>(); |
6515 | ASSERT(serializers_next_ref_index_ == next_ref_index_); |
6516 | #endif |
6517 | } |
6518 | |
6519 | // We should have completely filled the ref array. |
6520 | ASSERT((next_ref_index_ - 1) == num_objects_); |
6521 | |
6522 | for (intptr_t i = 0; i < num_clusters_; i++) { |
6523 | clusters_[i]->ReadFill(this); |
6524 | #if defined(DEBUG) |
6525 | int32_t section_marker = Read<int32_t>(); |
6526 | ASSERT(section_marker == kSectionMarker); |
6527 | #endif |
6528 | } |
6529 | } |
6530 | |
6531 | class HeapLocker : public StackResource { |
6532 | public: |
6533 | HeapLocker(Thread* thread, PageSpace* page_space) |
6534 | : StackResource(thread), |
6535 | page_space_(page_space), |
6536 | freelist_(page_space->DataFreeList()) { |
6537 | page_space_->AcquireLock(freelist_); |
6538 | } |
6539 | ~HeapLocker() { page_space_->ReleaseLock(freelist_); } |
6540 | |
6541 | private: |
6542 | PageSpace* page_space_; |
6543 | FreeList* freelist_; |
6544 | }; |
6545 | |
6546 | void Deserializer::AddVMIsolateBaseObjects() { |
6547 | // These objects are always allocated by Object::InitOnce, so they are not |
6548 | // written into the snapshot. |
6549 | |
6550 | AddBaseObject(Object::null()); |
6551 | AddBaseObject(Object::sentinel().raw()); |
6552 | AddBaseObject(Object::transition_sentinel().raw()); |
6553 | AddBaseObject(Object::empty_array().raw()); |
6554 | AddBaseObject(Object::zero_array().raw()); |
6555 | AddBaseObject(Object::dynamic_type().raw()); |
6556 | AddBaseObject(Object::void_type().raw()); |
6557 | AddBaseObject(Object::empty_type_arguments().raw()); |
6558 | AddBaseObject(Bool::True().raw()); |
6559 | AddBaseObject(Bool::False().raw()); |
6560 | ASSERT(Object::extractor_parameter_types().raw() != Object::null()); |
6561 | AddBaseObject(Object::extractor_parameter_types().raw()); |
6562 | ASSERT(Object::extractor_parameter_names().raw() != Object::null()); |
6563 | AddBaseObject(Object::extractor_parameter_names().raw()); |
6564 | AddBaseObject(Object::empty_context_scope().raw()); |
6565 | AddBaseObject(Object::empty_descriptors().raw()); |
6566 | AddBaseObject(Object::empty_var_descriptors().raw()); |
6567 | AddBaseObject(Object::empty_exception_handlers().raw()); |
6568 | AddBaseObject(Object::implicit_getter_bytecode().raw()); |
6569 | AddBaseObject(Object::implicit_setter_bytecode().raw()); |
6570 | AddBaseObject(Object::implicit_static_getter_bytecode().raw()); |
6571 | AddBaseObject(Object::method_extractor_bytecode().raw()); |
6572 | AddBaseObject(Object::invoke_closure_bytecode().raw()); |
6573 | AddBaseObject(Object::invoke_field_bytecode().raw()); |
6574 | AddBaseObject(Object::nsm_dispatcher_bytecode().raw()); |
6575 | AddBaseObject(Object::dynamic_invocation_forwarder_bytecode().raw()); |
6576 | |
6577 | for (intptr_t i = 0; i < ArgumentsDescriptor::kCachedDescriptorCount; i++) { |
6578 | AddBaseObject(ArgumentsDescriptor::cached_args_descriptors_[i]); |
6579 | } |
6580 | for (intptr_t i = 0; i < ICData::kCachedICDataArrayCount; i++) { |
6581 | AddBaseObject(ICData::cached_icdata_arrays_[i]); |
6582 | } |
6583 | AddBaseObject(SubtypeTestCache::cached_array_); |
6584 | |
6585 | ClassTable* table = isolate()->class_table(); |
6586 | for (intptr_t cid = kClassCid; cid <= kUnwindErrorCid; cid++) { |
6587 | // Error, CallSiteData has no class object. |
6588 | if (cid != kErrorCid && cid != kCallSiteDataCid) { |
6589 | ASSERT(table->HasValidClassAt(cid)); |
6590 | AddBaseObject(table->At(cid)); |
6591 | } |
6592 | } |
6593 | AddBaseObject(table->At(kDynamicCid)); |
6594 | AddBaseObject(table->At(kVoidCid)); |
6595 | |
6596 | if (!Snapshot::IncludesCode(kind_)) { |
6597 | for (intptr_t i = 0; i < StubCode::NumEntries(); i++) { |
6598 | AddBaseObject(StubCode::EntryAt(i).raw()); |
6599 | } |
6600 | } |
6601 | } |
6602 | |
6603 | void Deserializer::ReadVMSnapshot() { |
6604 | Array& symbol_table = Array::Handle(zone_); |
6605 | Array& refs = Array::Handle(zone_); |
6606 | Prepare(); |
6607 | |
6608 | { |
6609 | NoSafepointScope no_safepoint; |
6610 | HeapLocker hl(thread(), heap_->old_space()); |
6611 | |
6612 | AddVMIsolateBaseObjects(); |
6613 | |
6614 | Deserialize(); |
6615 | |
6616 | // Read roots. |
6617 | symbol_table ^= ReadRef(); |
6618 | isolate()->object_store()->set_symbol_table(symbol_table); |
6619 | if (Snapshot::IncludesCode(kind_)) { |
6620 | for (intptr_t i = 0; i < StubCode::NumEntries(); i++) { |
6621 | Code* code = Code::ReadOnlyHandle(); |
6622 | *code ^= ReadRef(); |
6623 | StubCode::EntryAtPut(i, code); |
6624 | } |
6625 | } |
6626 | |
6627 | #if defined(DEBUG) |
6628 | int32_t section_marker = Read<int32_t>(); |
6629 | ASSERT(section_marker == kSectionMarker); |
6630 | #endif |
6631 | |
6632 | refs = refs_; |
6633 | refs_ = NULL; |
6634 | } |
6635 | |
6636 | // Move remaining bump allocation space to the freelist so it used by C++ |
6637 | // allocations (e.g., FinalizeVMIsolate) before allocating new pages. |
6638 | heap_->old_space()->AbandonBumpAllocation(); |
6639 | |
6640 | Symbols::InitFromSnapshot(isolate()); |
6641 | |
6642 | Object::set_vm_isolate_snapshot_object_table(refs); |
6643 | |
6644 | #if defined(DEBUG) |
6645 | isolate()->ValidateClassTable(); |
6646 | #endif |
6647 | |
6648 | for (intptr_t i = 0; i < num_clusters_; i++) { |
6649 | clusters_[i]->PostLoad(this, refs); |
6650 | } |
6651 | } |
6652 | |
6653 | void Deserializer::ReadProgramSnapshot(ObjectStore* object_store) { |
6654 | Array& refs = Array::Handle(zone_); |
6655 | Prepare(); |
6656 | |
6657 | { |
6658 | NoSafepointScope no_safepoint; |
6659 | HeapLocker hl(thread(), heap_->old_space()); |
6660 | |
6661 | // N.B.: Skipping index 0 because ref 0 is illegal. |
6662 | const Array& base_objects = Object::vm_isolate_snapshot_object_table(); |
6663 | for (intptr_t i = 1; i < base_objects.Length(); i++) { |
6664 | AddBaseObject(base_objects.At(i)); |
6665 | } |
6666 | |
6667 | Deserialize(); |
6668 | |
6669 | // Read roots. |
6670 | ObjectPtr* from = object_store->from(); |
6671 | ObjectPtr* to = object_store->to_snapshot(kind_); |
6672 | for (ObjectPtr* p = from; p <= to; p++) { |
6673 | *p = ReadRef(); |
6674 | } |
6675 | |
6676 | // Deserialize dispatch table (when applicable) |
6677 | ReadDispatchTable(); |
6678 | |
6679 | #if defined(DEBUG) |
6680 | int32_t section_marker = Read<int32_t>(); |
6681 | ASSERT(section_marker == kSectionMarker); |
6682 | #endif |
6683 | |
6684 | refs = refs_; |
6685 | refs_ = NULL; |
6686 | } |
6687 | |
6688 | thread()->isolate()->class_table()->CopySizesFromClassObjects(); |
6689 | heap_->old_space()->EvaluateAfterLoading(); |
6690 | |
6691 | Isolate* isolate = thread()->isolate(); |
6692 | #if defined(DEBUG) |
6693 | isolate->ValidateClassTable(); |
6694 | isolate->heap()->Verify(); |
6695 | #endif |
6696 | |
6697 | for (intptr_t i = 0; i < num_clusters_; i++) { |
6698 | clusters_[i]->PostLoad(this, refs); |
6699 | } |
6700 | const Array& units = |
6701 | Array::Handle(zone_, isolate->object_store()->loading_units()); |
6702 | if (!units.IsNull()) { |
6703 | LoadingUnit& unit = LoadingUnit::Handle(zone_); |
6704 | unit ^= units.At(LoadingUnit::kRootId); |
6705 | unit.set_base_objects(refs); |
6706 | } |
6707 | isolate->isolate_object_store()->PreallocateObjects(); |
6708 | |
6709 | // Setup native resolver for bootstrap impl. |
6710 | Bootstrap::SetupNativeResolver(); |
6711 | } |
6712 | |
6713 | ApiErrorPtr Deserializer::ReadUnitSnapshot(const LoadingUnit& unit) { |
6714 | Array& units = Array::Handle( |
6715 | zone_, thread()->isolate()->object_store()->loading_units()); |
6716 | uint32_t main_program_hash = Smi::Value(Smi::RawCast(units.At(0))); |
6717 | uint32_t unit_program_hash = Read<uint32_t>(); |
6718 | if (main_program_hash != unit_program_hash) { |
6719 | return ApiError::New( |
6720 | String::Handle(String::New("Deferred loading unit is from a different " |
6721 | "program than the main loading unit" ))); |
6722 | } |
6723 | |
6724 | Array& refs = Array::Handle(zone_); |
6725 | Prepare(); |
6726 | |
6727 | intptr_t deferred_start_index; |
6728 | intptr_t deferred_stop_index; |
6729 | { |
6730 | NoSafepointScope no_safepoint; |
6731 | HeapLocker hl(thread(), heap_->old_space()); |
6732 | |
6733 | // N.B.: Skipping index 0 because ref 0 is illegal. |
6734 | const Array& base_objects = Array::Handle( |
6735 | zone_, LoadingUnit::Handle(zone_, unit.parent()).base_objects()); |
6736 | for (intptr_t i = 1; i < base_objects.Length(); i++) { |
6737 | AddBaseObject(base_objects.At(i)); |
6738 | } |
6739 | |
6740 | Deserialize(); |
6741 | |
6742 | deferred_start_index = ReadUnsigned(); |
6743 | deferred_stop_index = deferred_start_index + ReadUnsigned(); |
6744 | for (intptr_t id = deferred_start_index; id < deferred_stop_index; id++) { |
6745 | CodePtr code = static_cast<CodePtr>(Ref(id)); |
6746 | ReadInstructions(code, false); |
6747 | if (code->ptr()->owner_->IsFunction()) { |
6748 | FunctionPtr func = static_cast<FunctionPtr>(code->ptr()->owner_); |
6749 | uword entry_point = code->ptr()->entry_point_; |
6750 | ASSERT(entry_point != 0); |
6751 | func->ptr()->entry_point_ = entry_point; |
6752 | uword unchecked_entry_point = code->ptr()->unchecked_entry_point_; |
6753 | ASSERT(unchecked_entry_point != 0); |
6754 | func->ptr()->unchecked_entry_point_ = unchecked_entry_point; |
6755 | } |
6756 | code->ptr()->compressed_stackmaps_ = |
6757 | static_cast<CompressedStackMapsPtr>(ReadRef()); |
6758 | code->ptr()->code_source_map_ = static_cast<CodeSourceMapPtr>(ReadRef()); |
6759 | } |
6760 | |
6761 | #if defined(DEBUG) |
6762 | int32_t section_marker = Read<int32_t>(); |
6763 | ASSERT(section_marker == kSectionMarker); |
6764 | #endif |
6765 | |
6766 | refs = refs_; |
6767 | refs_ = NULL; |
6768 | } |
6769 | |
6770 | #if defined(DEBUG) |
6771 | Isolate* isolate = thread()->isolate(); |
6772 | isolate->ValidateClassTable(); |
6773 | isolate->heap()->Verify(); |
6774 | #endif |
6775 | |
6776 | EndInstructions(refs, deferred_start_index, deferred_stop_index); |
6777 | for (intptr_t i = 0; i < num_clusters_; i++) { |
6778 | clusters_[i]->PostLoad(this, refs); |
6779 | } |
6780 | unit.set_base_objects(refs); |
6781 | |
6782 | return ApiError::null(); |
6783 | } |
6784 | |
6785 | #if !defined(DART_PRECOMPILED_RUNTIME) |
6786 | FullSnapshotWriter::FullSnapshotWriter(Snapshot::Kind kind, |
6787 | uint8_t** vm_snapshot_data_buffer, |
6788 | uint8_t** isolate_snapshot_data_buffer, |
6789 | ReAlloc alloc, |
6790 | ImageWriter* vm_image_writer, |
6791 | ImageWriter* isolate_image_writer) |
6792 | : thread_(Thread::Current()), |
6793 | kind_(kind), |
6794 | vm_snapshot_data_buffer_(vm_snapshot_data_buffer), |
6795 | isolate_snapshot_data_buffer_(isolate_snapshot_data_buffer), |
6796 | alloc_(alloc), |
6797 | vm_isolate_snapshot_size_(0), |
6798 | isolate_snapshot_size_(0), |
6799 | vm_image_writer_(vm_image_writer), |
6800 | isolate_image_writer_(isolate_image_writer), |
6801 | clustered_vm_size_(0), |
6802 | clustered_isolate_size_(0), |
6803 | mapped_data_size_(0), |
6804 | mapped_text_size_(0) { |
6805 | ASSERT(alloc_ != NULL); |
6806 | ASSERT(isolate() != NULL); |
6807 | ASSERT(heap() != NULL); |
6808 | ObjectStore* object_store = isolate()->object_store(); |
6809 | ASSERT(object_store != NULL); |
6810 | |
6811 | #if defined(DEBUG) |
6812 | isolate()->ValidateClassTable(); |
6813 | isolate()->ValidateConstants(); |
6814 | #endif // DEBUG |
6815 | |
6816 | #if defined(DART_PRECOMPILER) |
6817 | if (FLAG_write_v8_snapshot_profile_to != nullptr) { |
6818 | profile_writer_ = new (zone()) V8SnapshotProfileWriter(zone()); |
6819 | } |
6820 | #endif |
6821 | } |
6822 | |
6823 | FullSnapshotWriter::~FullSnapshotWriter() {} |
6824 | |
6825 | intptr_t FullSnapshotWriter::WriteVMSnapshot() { |
6826 | TIMELINE_DURATION(thread(), Isolate, "WriteVMSnapshot" ); |
6827 | |
6828 | ASSERT(vm_snapshot_data_buffer_ != NULL); |
6829 | Serializer serializer(thread(), kind_, vm_snapshot_data_buffer_, alloc_, |
6830 | kInitialSize, vm_image_writer_, /*vm=*/true, |
6831 | profile_writer_); |
6832 | |
6833 | serializer.ReserveHeader(); |
6834 | serializer.WriteVersionAndFeatures(true); |
6835 | // VM snapshot roots are: |
6836 | // - the symbol table |
6837 | // - the stub code (App-AOT, App-JIT or Core-JIT) |
6838 | |
6839 | const Array& symbols = |
6840 | Array::Handle(Dart::vm_isolate()->object_store()->symbol_table()); |
6841 | intptr_t num_objects = serializer.WriteVMSnapshot(symbols); |
6842 | serializer.FillHeader(serializer.kind()); |
6843 | clustered_vm_size_ = serializer.bytes_written(); |
6844 | |
6845 | if (Snapshot::IncludesCode(kind_)) { |
6846 | vm_image_writer_->SetProfileWriter(profile_writer_); |
6847 | vm_image_writer_->Write(serializer.stream(), true); |
6848 | mapped_data_size_ += vm_image_writer_->data_size(); |
6849 | mapped_text_size_ += vm_image_writer_->text_size(); |
6850 | vm_image_writer_->ResetOffsets(); |
6851 | vm_image_writer_->ClearProfileWriter(); |
6852 | } |
6853 | |
6854 | // The clustered part + the direct mapped data part. |
6855 | vm_isolate_snapshot_size_ = serializer.bytes_written(); |
6856 | return num_objects; |
6857 | } |
6858 | |
6859 | void FullSnapshotWriter::WriteProgramSnapshot( |
6860 | intptr_t num_base_objects, |
6861 | GrowableArray<LoadingUnitSerializationData*>* units) { |
6862 | TIMELINE_DURATION(thread(), Isolate, "WriteProgramSnapshot" ); |
6863 | |
6864 | Serializer serializer(thread(), kind_, isolate_snapshot_data_buffer_, alloc_, |
6865 | kInitialSize, isolate_image_writer_, /*vm=*/false, |
6866 | profile_writer_); |
6867 | serializer.set_loading_units(units); |
6868 | serializer.set_current_loading_unit_id(LoadingUnit::kRootId); |
6869 | ObjectStore* object_store = isolate()->object_store(); |
6870 | ASSERT(object_store != NULL); |
6871 | |
6872 | // These type arguments must always be retained. |
6873 | ASSERT(object_store->type_argument_int()->ptr()->IsCanonical()); |
6874 | ASSERT(object_store->type_argument_double()->ptr()->IsCanonical()); |
6875 | ASSERT(object_store->type_argument_string()->ptr()->IsCanonical()); |
6876 | ASSERT(object_store->type_argument_string_dynamic()->ptr()->IsCanonical()); |
6877 | ASSERT(object_store->type_argument_string_string()->ptr()->IsCanonical()); |
6878 | |
6879 | serializer.ReserveHeader(); |
6880 | serializer.WriteVersionAndFeatures(false); |
6881 | // Isolate snapshot roots are: |
6882 | // - the object store |
6883 | serializer.WriteProgramSnapshot(num_base_objects, object_store); |
6884 | serializer.FillHeader(serializer.kind()); |
6885 | clustered_isolate_size_ = serializer.bytes_written(); |
6886 | |
6887 | if (Snapshot::IncludesCode(kind_)) { |
6888 | isolate_image_writer_->SetProfileWriter(profile_writer_); |
6889 | isolate_image_writer_->Write(serializer.stream(), false); |
6890 | #if defined(DART_PRECOMPILER) |
6891 | isolate_image_writer_->DumpStatistics(); |
6892 | #endif |
6893 | |
6894 | mapped_data_size_ += isolate_image_writer_->data_size(); |
6895 | mapped_text_size_ += isolate_image_writer_->text_size(); |
6896 | isolate_image_writer_->ResetOffsets(); |
6897 | isolate_image_writer_->ClearProfileWriter(); |
6898 | } |
6899 | |
6900 | // The clustered part + the direct mapped data part. |
6901 | isolate_snapshot_size_ = serializer.bytes_written(); |
6902 | } |
6903 | |
6904 | void FullSnapshotWriter::WriteUnitSnapshot( |
6905 | GrowableArray<LoadingUnitSerializationData*>* units, |
6906 | LoadingUnitSerializationData* unit, |
6907 | uint32_t program_hash) { |
6908 | TIMELINE_DURATION(thread(), Isolate, "WriteUnitSnapshot" ); |
6909 | |
6910 | Serializer serializer(thread(), kind_, isolate_snapshot_data_buffer_, alloc_, |
6911 | kInitialSize, isolate_image_writer_, /*vm=*/false, |
6912 | profile_writer_); |
6913 | serializer.set_loading_units(units); |
6914 | serializer.set_current_loading_unit_id(unit->id()); |
6915 | |
6916 | serializer.ReserveHeader(); |
6917 | serializer.WriteVersionAndFeatures(false); |
6918 | serializer.WriteUnitSnapshot(unit, program_hash); |
6919 | serializer.FillHeader(serializer.kind()); |
6920 | clustered_isolate_size_ = serializer.bytes_written(); |
6921 | |
6922 | if (Snapshot::IncludesCode(kind_)) { |
6923 | isolate_image_writer_->SetProfileWriter(profile_writer_); |
6924 | isolate_image_writer_->Write(serializer.stream(), false); |
6925 | #if defined(DART_PRECOMPILER) |
6926 | isolate_image_writer_->DumpStatistics(); |
6927 | #endif |
6928 | |
6929 | mapped_data_size_ += isolate_image_writer_->data_size(); |
6930 | mapped_text_size_ += isolate_image_writer_->text_size(); |
6931 | isolate_image_writer_->ResetOffsets(); |
6932 | isolate_image_writer_->ClearProfileWriter(); |
6933 | } |
6934 | |
6935 | // The clustered part + the direct mapped data part. |
6936 | isolate_snapshot_size_ = serializer.bytes_written(); |
6937 | } |
6938 | |
6939 | void FullSnapshotWriter::WriteFullSnapshot( |
6940 | GrowableArray<LoadingUnitSerializationData*>* data) { |
6941 | intptr_t num_base_objects; |
6942 | if (vm_snapshot_data_buffer() != NULL) { |
6943 | num_base_objects = WriteVMSnapshot(); |
6944 | ASSERT(num_base_objects != 0); |
6945 | } else { |
6946 | num_base_objects = 0; |
6947 | } |
6948 | |
6949 | if (isolate_snapshot_data_buffer() != NULL) { |
6950 | WriteProgramSnapshot(num_base_objects, data); |
6951 | } |
6952 | |
6953 | if (FLAG_print_snapshot_sizes) { |
6954 | OS::Print("VMIsolate(CodeSize): %" Pd "\n" , clustered_vm_size_); |
6955 | OS::Print("Isolate(CodeSize): %" Pd "\n" , clustered_isolate_size_); |
6956 | OS::Print("ReadOnlyData(CodeSize): %" Pd "\n" , mapped_data_size_); |
6957 | OS::Print("Instructions(CodeSize): %" Pd "\n" , mapped_text_size_); |
6958 | OS::Print("Total(CodeSize): %" Pd "\n" , |
6959 | clustered_vm_size_ + clustered_isolate_size_ + mapped_data_size_ + |
6960 | mapped_text_size_); |
6961 | } |
6962 | |
6963 | #if defined(DART_PRECOMPILER) |
6964 | if (FLAG_write_v8_snapshot_profile_to != nullptr) { |
6965 | profile_writer_->Write(FLAG_write_v8_snapshot_profile_to); |
6966 | } |
6967 | #endif |
6968 | } |
6969 | #endif // defined(DART_PRECOMPILED_RUNTIME) |
6970 | |
6971 | FullSnapshotReader::FullSnapshotReader(const Snapshot* snapshot, |
6972 | const uint8_t* instructions_buffer, |
6973 | Thread* thread) |
6974 | : kind_(snapshot->kind()), |
6975 | thread_(thread), |
6976 | buffer_(snapshot->Addr()), |
6977 | size_(snapshot->length()), |
6978 | data_image_(snapshot->DataImage()), |
6979 | instructions_image_(instructions_buffer) { |
6980 | } |
6981 | |
6982 | char* SnapshotHeaderReader::( |
6983 | const Snapshot* snapshot) { |
6984 | SnapshotHeaderReader (snapshot); |
6985 | |
6986 | char* error = header_reader.VerifyVersion(); |
6987 | if (error != nullptr) { |
6988 | return error; |
6989 | } |
6990 | |
6991 | const char* features = nullptr; |
6992 | intptr_t features_length = 0; |
6993 | error = header_reader.ReadFeatures(&features, &features_length); |
6994 | if (error != nullptr) { |
6995 | return error; |
6996 | } |
6997 | |
6998 | ASSERT(features[features_length] == '\0'); |
6999 | const char* cursor = features; |
7000 | while (*cursor != '\0') { |
7001 | while (*cursor == ' ') { |
7002 | cursor++; |
7003 | } |
7004 | |
7005 | const char* end = strstr(cursor, " " ); |
7006 | if (end == nullptr) { |
7007 | end = features + features_length; |
7008 | } |
7009 | |
7010 | #define SET_FLAG(name) \ |
7011 | if (strncmp(cursor, #name, end - cursor) == 0) { \ |
7012 | FLAG_##name = true; \ |
7013 | cursor = end; \ |
7014 | continue; \ |
7015 | } \ |
7016 | if (strncmp(cursor, "no-" #name, end - cursor) == 0) { \ |
7017 | FLAG_##name = false; \ |
7018 | cursor = end; \ |
7019 | continue; \ |
7020 | } |
7021 | |
7022 | #define CHECK_FLAG(name, mode) \ |
7023 | if (strncmp(cursor, #name, end - cursor) == 0) { \ |
7024 | if (!FLAG_##name) { \ |
7025 | return header_reader.BuildError("Flag " #name \ |
7026 | " is true in snapshot, " \ |
7027 | "but " #name \ |
7028 | " is always false in " mode); \ |
7029 | } \ |
7030 | cursor = end; \ |
7031 | continue; \ |
7032 | } \ |
7033 | if (strncmp(cursor, "no-" #name, end - cursor) == 0) { \ |
7034 | if (FLAG_##name) { \ |
7035 | return header_reader.BuildError("Flag " #name \ |
7036 | " is false in snapshot, " \ |
7037 | "but " #name \ |
7038 | " is always true in " mode); \ |
7039 | } \ |
7040 | cursor = end; \ |
7041 | continue; \ |
7042 | } |
7043 | |
7044 | #define SET_P(name, T, DV, C) SET_FLAG(name) |
7045 | |
7046 | #if defined(PRODUCT) |
7047 | #define SET_OR_CHECK_R(name, PV, T, DV, C) CHECK_FLAG(name, "product mode") |
7048 | #else |
7049 | #define SET_OR_CHECK_R(name, PV, T, DV, C) SET_FLAG(name) |
7050 | #endif |
7051 | |
7052 | #if defined(PRODUCT) |
7053 | #define SET_OR_CHECK_C(name, PCV, PV, T, DV, C) CHECK_FLAG(name, "product mode") |
7054 | #elif defined(DART_PRECOMPILED_RUNTIME) |
7055 | #define SET_OR_CHECK_C(name, PCV, PV, T, DV, C) \ |
7056 | CHECK_FLAG(name, "the precompiled runtime") |
7057 | #else |
7058 | #define SET_OR_CHECK_C(name, PV, T, DV, C) SET_FLAG(name) |
7059 | #endif |
7060 | |
7061 | #if !defined(DEBUG) |
7062 | #define SET_OR_CHECK_D(name, T, DV, C) CHECK_FLAG(name, "non-debug mode") |
7063 | #else |
7064 | #define SET_OR_CHECK_D(name, T, DV, C) SET_FLAG(name) |
7065 | #endif |
7066 | |
7067 | VM_GLOBAL_FLAG_LIST(SET_P, SET_OR_CHECK_R, SET_OR_CHECK_C, SET_OR_CHECK_D) |
7068 | |
7069 | #undef SET_OR_CHECK_D |
7070 | #undef SET_OR_CHECK_C |
7071 | #undef SET_OR_CHECK_R |
7072 | #undef SET_P |
7073 | #undef CHECK_FLAG |
7074 | #undef SET_FLAG |
7075 | |
7076 | #if defined(DART_PRECOMPILED_RUNTIME) |
7077 | if (FLAG_sound_null_safety == kNullSafetyOptionUnspecified) { |
7078 | if (strncmp(cursor, "null-safety" , end - cursor) == 0) { |
7079 | FLAG_sound_null_safety = kNullSafetyOptionStrong; |
7080 | cursor = end; |
7081 | continue; |
7082 | } |
7083 | if (strncmp(cursor, "no-null-safety" , end - cursor) == 0) { |
7084 | FLAG_sound_null_safety = kNullSafetyOptionWeak; |
7085 | cursor = end; |
7086 | continue; |
7087 | } |
7088 | } |
7089 | #endif // defined(DART_PRECOMPILED_RUNTIME) |
7090 | |
7091 | cursor = end; |
7092 | } |
7093 | |
7094 | return nullptr; |
7095 | } |
7096 | |
7097 | bool SnapshotHeaderReader::(const Snapshot* snapshot) { |
7098 | bool null_safety = false; |
7099 | SnapshotHeaderReader (snapshot); |
7100 | const char* features = nullptr; |
7101 | intptr_t features_length = 0; |
7102 | |
7103 | char* error = header_reader.ReadFeatures(&features, &features_length); |
7104 | if (error != nullptr) { |
7105 | return false; |
7106 | } |
7107 | |
7108 | ASSERT(features[features_length] == '\0'); |
7109 | const char* cursor = features; |
7110 | while (*cursor != '\0') { |
7111 | while (*cursor == ' ') { |
7112 | cursor++; |
7113 | } |
7114 | |
7115 | const char* end = strstr(cursor, " " ); |
7116 | if (end == nullptr) { |
7117 | end = features + features_length; |
7118 | } |
7119 | |
7120 | if (strncmp(cursor, "null-safety" , end - cursor) == 0) { |
7121 | cursor = end; |
7122 | null_safety = true; |
7123 | continue; |
7124 | } |
7125 | if (strncmp(cursor, "no-null-safety" , end - cursor) == 0) { |
7126 | cursor = end; |
7127 | null_safety = false; |
7128 | continue; |
7129 | } |
7130 | |
7131 | cursor = end; |
7132 | } |
7133 | |
7134 | return null_safety; |
7135 | } |
7136 | |
7137 | ApiErrorPtr FullSnapshotReader::ReadVMSnapshot() { |
7138 | SnapshotHeaderReader (kind_, buffer_, size_); |
7139 | |
7140 | intptr_t offset = 0; |
7141 | char* error = |
7142 | header_reader.VerifyVersionAndFeatures(/*isolate=*/NULL, &offset); |
7143 | if (error != nullptr) { |
7144 | return ConvertToApiError(error); |
7145 | } |
7146 | |
7147 | Deserializer deserializer(thread_, kind_, buffer_, size_, data_image_, |
7148 | instructions_image_, offset); |
7149 | ApiErrorPtr api_error = deserializer.VerifyImageAlignment(); |
7150 | if (api_error != ApiError::null()) { |
7151 | return api_error; |
7152 | } |
7153 | |
7154 | if (Snapshot::IncludesCode(kind_)) { |
7155 | ASSERT(data_image_ != NULL); |
7156 | thread_->isolate()->SetupImagePage(data_image_, |
7157 | /* is_executable */ false); |
7158 | ASSERT(instructions_image_ != NULL); |
7159 | thread_->isolate()->SetupImagePage(instructions_image_, |
7160 | /* is_executable */ true); |
7161 | } |
7162 | |
7163 | deserializer.ReadVMSnapshot(); |
7164 | |
7165 | #if defined(DART_PRECOMPILED_RUNTIME) |
7166 | // Initialize entries in the VM portion of the BSS segment. |
7167 | ASSERT(Snapshot::IncludesCode(kind_)); |
7168 | Image image(instructions_image_); |
7169 | if (image.bss_offset() != 0) { |
7170 | // The const cast is safe because we're translating from the start of the |
7171 | // instructions (read-only) to the start of the BSS (read-write). |
7172 | uword* const bss_start = const_cast<uword*>(reinterpret_cast<const uword*>( |
7173 | instructions_image_ + image.bss_offset())); |
7174 | BSS::Initialize(thread_, bss_start, /*vm=*/true); |
7175 | } |
7176 | #endif // defined(DART_PRECOMPILED_RUNTIME) |
7177 | |
7178 | return ApiError::null(); |
7179 | } |
7180 | |
7181 | ApiErrorPtr FullSnapshotReader::ReadProgramSnapshot() { |
7182 | SnapshotHeaderReader (kind_, buffer_, size_); |
7183 | intptr_t offset = 0; |
7184 | char* error = |
7185 | header_reader.VerifyVersionAndFeatures(thread_->isolate(), &offset); |
7186 | if (error != nullptr) { |
7187 | return ConvertToApiError(error); |
7188 | } |
7189 | |
7190 | Deserializer deserializer(thread_, kind_, buffer_, size_, data_image_, |
7191 | instructions_image_, offset); |
7192 | ApiErrorPtr api_error = deserializer.VerifyImageAlignment(); |
7193 | if (api_error != ApiError::null()) { |
7194 | return api_error; |
7195 | } |
7196 | |
7197 | if (Snapshot::IncludesCode(kind_)) { |
7198 | ASSERT(data_image_ != NULL); |
7199 | thread_->isolate()->SetupImagePage(data_image_, |
7200 | /* is_executable */ false); |
7201 | ASSERT(instructions_image_ != NULL); |
7202 | thread_->isolate()->SetupImagePage(instructions_image_, |
7203 | /* is_executable */ true); |
7204 | } |
7205 | |
7206 | auto object_store = thread_->isolate()->object_store(); |
7207 | deserializer.ReadProgramSnapshot(object_store); |
7208 | |
7209 | PatchGlobalObjectPool(); |
7210 | InitializeBSS(); |
7211 | |
7212 | return ApiError::null(); |
7213 | } |
7214 | |
7215 | ApiErrorPtr FullSnapshotReader::ReadUnitSnapshot(const LoadingUnit& unit) { |
7216 | SnapshotHeaderReader (kind_, buffer_, size_); |
7217 | intptr_t offset = 0; |
7218 | char* error = |
7219 | header_reader.VerifyVersionAndFeatures(thread_->isolate(), &offset); |
7220 | if (error != nullptr) { |
7221 | return ConvertToApiError(error); |
7222 | } |
7223 | |
7224 | Deserializer deserializer(thread_, kind_, buffer_, size_, data_image_, |
7225 | instructions_image_, offset); |
7226 | ApiErrorPtr api_error = deserializer.VerifyImageAlignment(); |
7227 | if (api_error != ApiError::null()) { |
7228 | return api_error; |
7229 | } |
7230 | |
7231 | if (Snapshot::IncludesCode(kind_)) { |
7232 | ASSERT(data_image_ != NULL); |
7233 | thread_->isolate()->SetupImagePage(data_image_, |
7234 | /* is_executable */ false); |
7235 | ASSERT(instructions_image_ != NULL); |
7236 | thread_->isolate()->SetupImagePage(instructions_image_, |
7237 | /* is_executable */ true); |
7238 | } |
7239 | |
7240 | api_error = deserializer.ReadUnitSnapshot(unit); |
7241 | if (api_error != ApiError::null()) { |
7242 | return api_error; |
7243 | } |
7244 | |
7245 | PatchGlobalObjectPool(); |
7246 | InitializeBSS(); |
7247 | |
7248 | return ApiError::null(); |
7249 | } |
7250 | |
7251 | void FullSnapshotReader::PatchGlobalObjectPool() { |
7252 | #if defined(DART_PRECOMPILED_RUNTIME) |
7253 | if (FLAG_use_bare_instructions) { |
7254 | // By default, every switchable call site will put (ic_data, code) into the |
7255 | // object pool. The [code] is initialized (at AOT compile-time) to be a |
7256 | // [StubCode::SwitchableCallMiss]. |
7257 | // |
7258 | // In --use-bare-instruction we reduce the extra indirection via the [code] |
7259 | // object and store instead (ic_data, entrypoint) in the object pool. |
7260 | // |
7261 | // Since the actual [entrypoint] is only known at AOT runtime we switch all |
7262 | // existing UnlinkedCall entries in the object pool to be it's entrypoint. |
7263 | auto zone = thread_->zone(); |
7264 | const auto& pool = ObjectPool::Handle( |
7265 | zone, ObjectPool::RawCast( |
7266 | thread_->isolate()->object_store()->global_object_pool())); |
7267 | auto& entry = Object::Handle(zone); |
7268 | auto& smi = Smi::Handle(zone); |
7269 | for (intptr_t i = 0; i < pool.Length(); i++) { |
7270 | if (pool.TypeAt(i) == ObjectPool::EntryType::kTaggedObject) { |
7271 | entry = pool.ObjectAt(i); |
7272 | if (entry.raw() == StubCode::SwitchableCallMiss().raw()) { |
7273 | smi = Smi::FromAlignedAddress( |
7274 | StubCode::SwitchableCallMiss().MonomorphicEntryPoint()); |
7275 | pool.SetTypeAt(i, ObjectPool::EntryType::kImmediate, |
7276 | ObjectPool::Patchability::kPatchable); |
7277 | pool.SetObjectAt(i, smi); |
7278 | } else if (entry.raw() == StubCode::MegamorphicCall().raw()) { |
7279 | smi = Smi::FromAlignedAddress( |
7280 | StubCode::MegamorphicCall().MonomorphicEntryPoint()); |
7281 | pool.SetTypeAt(i, ObjectPool::EntryType::kImmediate, |
7282 | ObjectPool::Patchability::kPatchable); |
7283 | pool.SetObjectAt(i, smi); |
7284 | } |
7285 | } |
7286 | } |
7287 | } |
7288 | #endif // defined(DART_PRECOMPILED_RUNTIME) |
7289 | } |
7290 | |
7291 | void FullSnapshotReader::InitializeBSS() { |
7292 | #if defined(DART_PRECOMPILED_RUNTIME) |
7293 | // Initialize entries in the isolate portion of the BSS segment. |
7294 | ASSERT(Snapshot::IncludesCode(kind_)); |
7295 | Image image(instructions_image_); |
7296 | if (image.bss_offset() != 0) { |
7297 | // The const cast is safe because we're translating from the start of the |
7298 | // instructions (read-only) to the start of the BSS (read-write). |
7299 | uword* const bss_start = const_cast<uword*>(reinterpret_cast<const uword*>( |
7300 | instructions_image_ + image.bss_offset())); |
7301 | BSS::Initialize(thread_, bss_start, /*vm=*/false); |
7302 | } |
7303 | #endif // defined(DART_PRECOMPILED_RUNTIME) |
7304 | } |
7305 | |
7306 | } // namespace dart |
7307 | |