1 | // Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/program_visitor.h" |
6 | |
7 | #include "vm/code_patcher.h" |
8 | #include "vm/deopt_instructions.h" |
9 | #include "vm/hash_map.h" |
10 | #include "vm/object.h" |
11 | #include "vm/object_store.h" |
12 | #include "vm/symbols.h" |
13 | |
14 | namespace dart { |
15 | |
16 | class WorklistElement : public ZoneAllocated { |
17 | public: |
18 | WorklistElement(Zone* zone, const Object& object) |
19 | : object_(Object::Handle(zone, object.raw())), next_(nullptr) {} |
20 | |
21 | ObjectPtr value() const { return object_.raw(); } |
22 | |
23 | void set_next(WorklistElement* elem) { next_ = elem; } |
24 | WorklistElement* next() const { return next_; } |
25 | |
26 | private: |
27 | const Object& object_; |
28 | WorklistElement* next_; |
29 | |
30 | DISALLOW_COPY_AND_ASSIGN(WorklistElement); |
31 | }; |
32 | |
33 | // Implements a FIFO queue, using IsEmpty, Add, Remove operations. |
34 | class Worklist : public ValueObject { |
35 | public: |
36 | explicit Worklist(Zone* zone) |
37 | : zone_(zone), first_(nullptr), last_(nullptr) {} |
38 | |
39 | bool IsEmpty() const { return first_ == nullptr; } |
40 | |
41 | void Add(const Object& value) { |
42 | auto element = new (zone_) WorklistElement(zone_, value); |
43 | if (first_ == nullptr) { |
44 | first_ = element; |
45 | ASSERT(last_ == nullptr); |
46 | } else { |
47 | ASSERT(last_ != nullptr); |
48 | last_->set_next(element); |
49 | } |
50 | last_ = element; |
51 | ASSERT(first_ != nullptr && last_ != nullptr); |
52 | } |
53 | |
54 | ObjectPtr Remove() { |
55 | ASSERT(first_ != nullptr); |
56 | WorklistElement* result = first_; |
57 | first_ = first_->next(); |
58 | if (first_ == nullptr) { |
59 | last_ = nullptr; |
60 | } |
61 | return result->value(); |
62 | } |
63 | |
64 | private: |
65 | Zone* const zone_; |
66 | WorklistElement* first_; |
67 | WorklistElement* last_; |
68 | |
69 | DISALLOW_COPY_AND_ASSIGN(Worklist); |
70 | }; |
71 | |
72 | // Walks through the classes, functions, and code for the current program. |
73 | // |
74 | // Uses the heap object ID table to determine whether or not a given object |
75 | // has been visited already. |
76 | class ProgramWalker : public ValueObject { |
77 | public: |
78 | ProgramWalker(Zone* zone, Heap* heap, ClassVisitor* visitor) |
79 | : heap_(heap), |
80 | visitor_(visitor), |
81 | worklist_(zone), |
82 | class_object_(Object::Handle(zone)), |
83 | class_fields_(Array::Handle(zone)), |
84 | class_field_(Field::Handle(zone)), |
85 | class_functions_(Array::Handle(zone)), |
86 | class_function_(Function::Handle(zone)), |
87 | class_code_(Code::Handle(zone)), |
88 | function_code_(Code::Handle(zone)), |
89 | static_calls_array_(Array::Handle(zone)), |
90 | static_calls_table_entry_(Object::Handle(zone)), |
91 | worklist_entry_(Object::Handle(zone)) {} |
92 | |
93 | ~ProgramWalker() { heap_->ResetObjectIdTable(); } |
94 | |
95 | // Adds the given object to the worklist if it's an object type that the |
96 | // visitor can visit. |
97 | void AddToWorklist(const Object& object) { |
98 | // We don't visit null, non-heap objects, or objects in the VM heap. |
99 | if (object.IsNull() || object.IsSmi() || object.InVMIsolateHeap()) return; |
100 | // Check and set visited, even if we don't end up adding this to the list. |
101 | if (heap_->GetObjectId(object.raw()) != 0) return; |
102 | heap_->SetObjectId(object.raw(), 1); |
103 | if (object.IsClass() || |
104 | (object.IsFunction() && visitor_->IsFunctionVisitor()) || |
105 | (object.IsCode() && visitor_->IsCodeVisitor())) { |
106 | worklist_.Add(object); |
107 | } |
108 | } |
109 | |
110 | void VisitWorklist() { |
111 | while (!worklist_.IsEmpty()) { |
112 | worklist_entry_ = worklist_.Remove(); |
113 | if (worklist_entry_.IsClass()) { |
114 | VisitClass(Class::Cast(worklist_entry_)); |
115 | } else if (worklist_entry_.IsFunction()) { |
116 | VisitFunction(Function::Cast(worklist_entry_)); |
117 | } else if (worklist_entry_.IsCode()) { |
118 | VisitCode(Code::Cast(worklist_entry_)); |
119 | } else { |
120 | FATAL1("Got unexpected object %s" , worklist_entry_.ToCString()); |
121 | } |
122 | } |
123 | } |
124 | |
125 | private: |
126 | void VisitClass(const Class& cls) { |
127 | visitor_->VisitClass(cls); |
128 | |
129 | if (!visitor_->IsFunctionVisitor()) return; |
130 | |
131 | class_functions_ = cls.functions(); |
132 | for (intptr_t j = 0; j < class_functions_.Length(); j++) { |
133 | class_function_ ^= class_functions_.At(j); |
134 | AddToWorklist(class_function_); |
135 | if (class_function_.HasImplicitClosureFunction()) { |
136 | class_function_ = class_function_.ImplicitClosureFunction(); |
137 | AddToWorklist(class_function_); |
138 | } |
139 | } |
140 | |
141 | class_functions_ = cls.invocation_dispatcher_cache(); |
142 | for (intptr_t j = 0; j < class_functions_.Length(); j++) { |
143 | class_object_ = class_functions_.At(j); |
144 | if (class_object_.IsFunction()) { |
145 | class_function_ ^= class_functions_.At(j); |
146 | AddToWorklist(class_function_); |
147 | } |
148 | } |
149 | |
150 | class_fields_ = cls.fields(); |
151 | for (intptr_t j = 0; j < class_fields_.Length(); j++) { |
152 | class_field_ ^= class_fields_.At(j); |
153 | if (class_field_.HasInitializerFunction()) { |
154 | class_function_ = class_field_.InitializerFunction(); |
155 | AddToWorklist(class_function_); |
156 | } |
157 | } |
158 | |
159 | if (!visitor_->IsCodeVisitor()) return; |
160 | |
161 | class_code_ = cls.allocation_stub(); |
162 | if (!class_code_.IsNull()) AddToWorklist(class_code_); |
163 | } |
164 | |
165 | void VisitFunction(const Function& function) { |
166 | ASSERT(visitor_->IsFunctionVisitor()); |
167 | visitor_->AsFunctionVisitor()->VisitFunction(function); |
168 | if (!visitor_->IsCodeVisitor() || !function.HasCode()) return; |
169 | function_code_ = function.CurrentCode(); |
170 | AddToWorklist(function_code_); |
171 | } |
172 | |
173 | void VisitCode(const Code& code) { |
174 | ASSERT(visitor_->IsCodeVisitor()); |
175 | visitor_->AsCodeVisitor()->VisitCode(code); |
176 | |
177 | // In the precompiler, some entries in the static calls table may need |
178 | // to be visited as they may not be reachable from other sources. |
179 | // |
180 | // TODO(dartbug.com/41636): Figure out why walking the static calls table |
181 | // in JIT mode with the DedupInstructions visitor fails, so we can remove |
182 | // the check for AOT mode. |
183 | static_calls_array_ = code.static_calls_target_table(); |
184 | if (FLAG_precompiled_mode && !static_calls_array_.IsNull()) { |
185 | StaticCallsTable static_calls(static_calls_array_); |
186 | for (auto& view : static_calls) { |
187 | static_calls_table_entry_ = |
188 | view.Get<Code::kSCallTableCodeOrTypeTarget>(); |
189 | if (static_calls_table_entry_.IsCode()) { |
190 | AddToWorklist(Code::Cast(static_calls_table_entry_)); |
191 | } |
192 | } |
193 | } |
194 | } |
195 | |
196 | Heap* const heap_; |
197 | ClassVisitor* const visitor_; |
198 | Worklist worklist_; |
199 | Object& class_object_; |
200 | Array& class_fields_; |
201 | Field& class_field_; |
202 | Array& class_functions_; |
203 | Function& class_function_; |
204 | Code& class_code_; |
205 | Code& function_code_; |
206 | Array& static_calls_array_; |
207 | Object& static_calls_table_entry_; |
208 | Object& worklist_entry_; |
209 | }; |
210 | |
211 | void ProgramVisitor::WalkProgram(Zone* zone, |
212 | Isolate* isolate, |
213 | ClassVisitor* visitor) { |
214 | auto const object_store = isolate->object_store(); |
215 | auto const heap = isolate->heap(); |
216 | ProgramWalker walker(zone, heap, visitor); |
217 | |
218 | // Walk through the libraries and patches, looking for visitable objects. |
219 | const auto& libraries = |
220 | GrowableObjectArray::Handle(zone, object_store->libraries()); |
221 | auto& lib = Library::Handle(zone); |
222 | auto& cls = Class::Handle(zone); |
223 | auto& entry = Object::Handle(zone); |
224 | auto& patches = GrowableObjectArray::Handle(zone); |
225 | |
226 | for (intptr_t i = 0; i < libraries.Length(); i++) { |
227 | lib ^= libraries.At(i); |
228 | ClassDictionaryIterator it(lib, ClassDictionaryIterator::kIteratePrivate); |
229 | while (it.HasNext()) { |
230 | cls = it.GetNextClass(); |
231 | walker.AddToWorklist(cls); |
232 | } |
233 | patches = lib.used_scripts(); |
234 | for (intptr_t j = 0; j < patches.Length(); j++) { |
235 | entry = patches.At(j); |
236 | walker.AddToWorklist(entry); |
237 | } |
238 | } |
239 | |
240 | // If there's a global object pool, add any visitable objects. |
241 | const auto& global_object_pool = |
242 | ObjectPool::Handle(zone, object_store->global_object_pool()); |
243 | if (!global_object_pool.IsNull()) { |
244 | auto& object = Object::Handle(zone); |
245 | for (intptr_t i = 0; i < global_object_pool.Length(); i++) { |
246 | auto const type = global_object_pool.TypeAt(i); |
247 | if (type != ObjectPool::EntryType::kTaggedObject) continue; |
248 | object = global_object_pool.ObjectAt(i); |
249 | walker.AddToWorklist(object); |
250 | } |
251 | } |
252 | |
253 | if (visitor->IsFunctionVisitor()) { |
254 | // Function objects not necessarily reachable from classes. |
255 | auto& function = Function::Handle(zone); |
256 | const auto& closures = |
257 | GrowableObjectArray::Handle(zone, object_store->closure_functions()); |
258 | ASSERT(!closures.IsNull()); |
259 | for (intptr_t i = 0; i < closures.Length(); i++) { |
260 | function ^= closures.At(i); |
261 | walker.AddToWorklist(function); |
262 | ASSERT(!function.HasImplicitClosureFunction()); |
263 | } |
264 | } |
265 | |
266 | if (visitor->IsCodeVisitor()) { |
267 | // Code objects not necessarily reachable from functions. |
268 | auto& code = Code::Handle(zone); |
269 | const auto& dispatch_table_entries = |
270 | Array::Handle(zone, object_store->dispatch_table_code_entries()); |
271 | if (!dispatch_table_entries.IsNull()) { |
272 | for (intptr_t i = 0; i < dispatch_table_entries.Length(); i++) { |
273 | code ^= dispatch_table_entries.At(i); |
274 | walker.AddToWorklist(code); |
275 | } |
276 | } |
277 | } |
278 | |
279 | // Walk the program starting from any roots we added to the worklist. |
280 | walker.VisitWorklist(); |
281 | } |
282 | |
283 | #if !defined(DART_PRECOMPILED_RUNTIME) |
284 | // A base class for deduplication of objects. T is the type of canonical objects |
285 | // being stored, whereas S is a trait appropriate for a DirectChainedHashMap |
286 | // based set containing those canonical objects. |
287 | template <typename T, typename S> |
288 | class Dedupper : public ValueObject { |
289 | public: |
290 | explicit Dedupper(Zone* zone) : zone_(zone), canonical_objects_(zone) {} |
291 | virtual ~Dedupper() {} |
292 | |
293 | protected: |
294 | // Predicate for objects of type T. Must be overridden for class hierarchies |
295 | // like Instance and AbstractType, as it defaults to class ID comparison. |
296 | virtual bool IsCorrectType(const Object& obj) const { |
297 | return obj.GetClassId() == T::kClassId; |
298 | } |
299 | |
300 | // Predicate for choosing Ts to canonicalize. |
301 | virtual bool CanCanonicalize(const T& t) const { return true; } |
302 | |
303 | // Predicate for objects that are okay to add to the canonical hash set. |
304 | // Override IsCorrectType and/or CanCanonicalize to change the behavior. |
305 | bool ShouldAdd(const Object& obj) const { |
306 | return !obj.IsNull() && IsCorrectType(obj) && CanCanonicalize(T::Cast(obj)); |
307 | } |
308 | |
309 | void AddCanonical(const T& obj) { |
310 | if (!ShouldAdd(obj)) return; |
311 | ASSERT(!canonical_objects_.HasKey(&obj)); |
312 | canonical_objects_.Insert(&T::ZoneHandle(zone_, obj.raw())); |
313 | } |
314 | |
315 | void AddVMBaseObjects() { |
316 | const auto& object_table = Object::vm_isolate_snapshot_object_table(); |
317 | auto& obj = Object::Handle(zone_); |
318 | for (intptr_t i = 0; i < object_table.Length(); i++) { |
319 | obj = object_table.At(i); |
320 | if (!ShouldAdd(obj)) continue; |
321 | AddCanonical(T::Cast(obj)); |
322 | } |
323 | } |
324 | |
325 | typename T::ObjectPtrType Dedup(const T& obj) { |
326 | if (ShouldAdd(obj)) { |
327 | if (auto const canonical = canonical_objects_.LookupValue(&obj)) { |
328 | return canonical->raw(); |
329 | } |
330 | AddCanonical(obj); |
331 | } |
332 | return obj.raw(); |
333 | } |
334 | |
335 | Zone* const zone_; |
336 | DirectChainedHashMap<S> canonical_objects_; |
337 | }; |
338 | |
339 | void ProgramVisitor::BindStaticCalls(Zone* zone, Isolate* isolate) { |
340 | class BindStaticCallsVisitor : public CodeVisitor { |
341 | public: |
342 | explicit BindStaticCallsVisitor(Zone* zone) |
343 | : table_(Array::Handle(zone)), |
344 | kind_and_offset_(Smi::Handle(zone)), |
345 | target_(Object::Handle(zone)), |
346 | target_code_(Code::Handle(zone)) {} |
347 | |
348 | void VisitCode(const Code& code) { |
349 | table_ = code.static_calls_target_table(); |
350 | if (table_.IsNull()) return; |
351 | |
352 | StaticCallsTable static_calls(table_); |
353 | // We can only remove the target table in precompiled mode, since more |
354 | // calls may be added later otherwise. |
355 | bool only_call_via_code = FLAG_precompiled_mode; |
356 | for (const auto& view : static_calls) { |
357 | kind_and_offset_ = view.Get<Code::kSCallTableKindAndOffset>(); |
358 | auto const kind = Code::KindField::decode(kind_and_offset_.Value()); |
359 | if (kind != Code::kCallViaCode) { |
360 | ASSERT(kind == Code::kPcRelativeCall || |
361 | kind == Code::kPcRelativeTailCall || |
362 | kind == Code::kPcRelativeTTSCall); |
363 | only_call_via_code = false; |
364 | continue; |
365 | } |
366 | |
367 | target_ = view.Get<Code::kSCallTableFunctionTarget>(); |
368 | if (target_.IsNull()) { |
369 | target_ = |
370 | Code::RawCast(view.Get<Code::kSCallTableCodeOrTypeTarget>()); |
371 | ASSERT(!Code::Cast(target_).IsFunctionCode()); |
372 | // Allocation stub or AllocateContext or AllocateArray or ... |
373 | continue; |
374 | } |
375 | |
376 | auto const pc_offset = |
377 | Code::OffsetField::decode(kind_and_offset_.Value()); |
378 | const uword pc = pc_offset + code.PayloadStart(); |
379 | |
380 | // In JIT mode, static calls initially call the CallStaticFunction stub |
381 | // because their target might not be compiled yet. If the target has |
382 | // been compiled by this point, we patch the call to call the target |
383 | // directly. |
384 | // |
385 | // In precompiled mode, the binder runs after tree shaking, during which |
386 | // all targets have been compiled, and so the binder replace all static |
387 | // calls with direct calls to the target. |
388 | // |
389 | // Cf. runtime entry PatchStaticCall called from CallStaticFunction |
390 | // stub. |
391 | const auto& fun = Function::Cast(target_); |
392 | ASSERT(!FLAG_precompiled_mode || fun.HasCode()); |
393 | target_code_ = fun.HasCode() ? fun.CurrentCode() |
394 | : StubCode::CallStaticFunction().raw(); |
395 | CodePatcher::PatchStaticCallAt(pc, code, target_code_); |
396 | } |
397 | |
398 | if (only_call_via_code) { |
399 | ASSERT(FLAG_precompiled_mode); |
400 | // In precompiled mode, the Dart runtime won't patch static calls |
401 | // anymore, so drop the static call table to save space. |
402 | code.set_static_calls_target_table(Object::empty_array()); |
403 | } |
404 | } |
405 | |
406 | private: |
407 | Array& table_; |
408 | Smi& kind_and_offset_; |
409 | Object& target_; |
410 | Code& target_code_; |
411 | }; |
412 | |
413 | BindStaticCallsVisitor visitor(zone); |
414 | WalkProgram(zone, isolate, &visitor); |
415 | } |
416 | |
417 | DECLARE_FLAG(charp, trace_precompiler_to); |
418 | DECLARE_FLAG(charp, write_v8_snapshot_profile_to); |
419 | |
420 | void ProgramVisitor::ShareMegamorphicBuckets(Zone* zone, Isolate* isolate) { |
421 | const GrowableObjectArray& table = GrowableObjectArray::Handle( |
422 | zone, isolate->object_store()->megamorphic_cache_table()); |
423 | if (table.IsNull()) return; |
424 | MegamorphicCache& cache = MegamorphicCache::Handle(zone); |
425 | |
426 | const intptr_t capacity = 1; |
427 | const Array& buckets = Array::Handle( |
428 | zone, Array::New(MegamorphicCache::kEntryLength * capacity, Heap::kOld)); |
429 | const Function& handler = Function::Handle(zone); |
430 | MegamorphicCache::SetEntry(buckets, 0, Object::smi_illegal_cid(), handler); |
431 | |
432 | for (intptr_t i = 0; i < table.Length(); i++) { |
433 | cache ^= table.At(i); |
434 | cache.set_buckets(buckets); |
435 | cache.set_mask(capacity - 1); |
436 | cache.set_filled_entry_count(0); |
437 | } |
438 | } |
439 | |
440 | class StackMapEntry : public ZoneAllocated { |
441 | public: |
442 | StackMapEntry(Zone* zone, const CompressedStackMapsIterator& it) |
443 | : maps_(CompressedStackMaps::Handle(zone, it.maps_.raw())), |
444 | bits_container_( |
445 | CompressedStackMaps::Handle(zone, it.bits_container_.raw())), |
446 | spill_slot_bit_count_(it.current_spill_slot_bit_count_), |
447 | non_spill_slot_bit_count_(it.current_non_spill_slot_bit_count_), |
448 | bits_offset_(it.current_bits_offset_) { |
449 | ASSERT(!maps_.IsNull() && !maps_.IsGlobalTable()); |
450 | ASSERT(!bits_container_.IsNull()); |
451 | ASSERT(!maps_.UsesGlobalTable() || bits_container_.IsGlobalTable()); |
452 | // Check that the iterator was fully loaded when we ran the initializing |
453 | // expressions above. By this point we enter the body of the constructor, |
454 | // it's too late to run EnsureFullyLoadedEntry(). |
455 | ASSERT(it.HasLoadedEntry()); |
456 | ASSERT(it.current_spill_slot_bit_count_ >= 0); |
457 | } |
458 | |
459 | static const intptr_t kHashBits = 30; |
460 | |
461 | intptr_t Hashcode() { |
462 | if (hash_ != 0) return hash_; |
463 | uint32_t hash = 0; |
464 | hash = CombineHashes(hash, spill_slot_bit_count_); |
465 | hash = CombineHashes(hash, non_spill_slot_bit_count_); |
466 | for (intptr_t i = 0; i < PayloadLength(); i++) { |
467 | hash = CombineHashes(hash, PayloadByte(i)); |
468 | } |
469 | hash_ = FinalizeHash(hash, kHashBits); |
470 | return hash_; |
471 | } |
472 | |
473 | bool Equals(const StackMapEntry* other) const { |
474 | if (spill_slot_bit_count_ != other->spill_slot_bit_count_ || |
475 | non_spill_slot_bit_count_ != other->non_spill_slot_bit_count_) { |
476 | return false; |
477 | } |
478 | // Since we ensure that bits in the payload that are not part of the |
479 | // actual stackmap data are cleared, we can just compare payloads by byte |
480 | // instead of calling IsObject for each bit. |
481 | for (intptr_t i = 0; i < PayloadLength(); i++) { |
482 | if (PayloadByte(i) != other->PayloadByte(i)) return false; |
483 | } |
484 | return true; |
485 | } |
486 | |
487 | // Encodes this StackMapEntry to the given array of bytes and returns the |
488 | // initial offset of the entry in the array. |
489 | intptr_t EncodeTo(GrowableArray<uint8_t>* array) { |
490 | auto const current_offset = array->length(); |
491 | CompressedStackMapsBuilder::EncodeLEB128(array, spill_slot_bit_count_); |
492 | CompressedStackMapsBuilder::EncodeLEB128(array, non_spill_slot_bit_count_); |
493 | for (intptr_t i = 0; i < PayloadLength(); i++) { |
494 | array->Add(PayloadByte(i)); |
495 | } |
496 | return current_offset; |
497 | } |
498 | |
499 | intptr_t UsageCount() const { return uses_; } |
500 | void IncrementUsageCount() { uses_ += 1; } |
501 | |
502 | private: |
503 | intptr_t Length() const { |
504 | return spill_slot_bit_count_ + non_spill_slot_bit_count_; |
505 | } |
506 | intptr_t PayloadLength() const { |
507 | return Utils::RoundUp(Length(), kBitsPerByte) >> kBitsPerByteLog2; |
508 | } |
509 | intptr_t PayloadByte(intptr_t offset) const { |
510 | return bits_container_.PayloadByte(bits_offset_ + offset); |
511 | } |
512 | |
513 | const CompressedStackMaps& maps_; |
514 | const CompressedStackMaps& bits_container_; |
515 | const intptr_t spill_slot_bit_count_; |
516 | const intptr_t non_spill_slot_bit_count_; |
517 | const intptr_t bits_offset_; |
518 | |
519 | intptr_t uses_ = 1; |
520 | intptr_t hash_ = 0; |
521 | }; |
522 | |
523 | // Used for maps of indices and offsets. These are non-negative, and so the |
524 | // value for entries may be 0. Since 0 is kNoValue for |
525 | // RawPointerKeyValueTrait<const StackMapEntry, intptr_t>, we can't just use it. |
526 | class StackMapEntryKeyIntValueTrait { |
527 | public: |
528 | typedef StackMapEntry* Key; |
529 | typedef intptr_t Value; |
530 | |
531 | struct Pair { |
532 | Key key; |
533 | Value value; |
534 | Pair() : key(nullptr), value(-1) {} |
535 | Pair(const Key key, const Value& value) |
536 | : key(ASSERT_NOTNULL(key)), value(value) {} |
537 | Pair(const Pair& other) : key(other.key), value(other.value) {} |
538 | Pair& operator=(const Pair&) = default; |
539 | }; |
540 | |
541 | static Key KeyOf(Pair kv) { return kv.key; } |
542 | static Value ValueOf(Pair kv) { return kv.value; } |
543 | static intptr_t Hashcode(Key key) { return key->Hashcode(); } |
544 | static bool IsKeyEqual(Pair kv, Key key) { return key->Equals(kv.key); } |
545 | }; |
546 | |
547 | typedef DirectChainedHashMap<StackMapEntryKeyIntValueTrait> StackMapEntryIntMap; |
548 | |
549 | void ProgramVisitor::NormalizeAndDedupCompressedStackMaps(Zone* zone, |
550 | Isolate* isolate) { |
551 | // Walks all the CSMs in Code objects and collects their entry information |
552 | // for consolidation. |
553 | class CollectStackMapEntriesVisitor : public CodeVisitor { |
554 | public: |
555 | CollectStackMapEntriesVisitor(Zone* zone, |
556 | const CompressedStackMaps& global_table) |
557 | : zone_(zone), |
558 | old_global_table_(global_table), |
559 | compressed_stackmaps_(CompressedStackMaps::Handle(zone)), |
560 | collected_entries_(zone, 2), |
561 | entry_indices_(zone), |
562 | entry_offset_(zone) { |
563 | ASSERT(old_global_table_.IsNull() || old_global_table_.IsGlobalTable()); |
564 | } |
565 | |
566 | void VisitCode(const Code& code) { |
567 | compressed_stackmaps_ = code.compressed_stackmaps(); |
568 | CompressedStackMapsIterator it(compressed_stackmaps_, old_global_table_); |
569 | while (it.MoveNext()) { |
570 | it.EnsureFullyLoadedEntry(); |
571 | auto const entry = new (zone_) StackMapEntry(zone_, it); |
572 | auto const index = entry_indices_.LookupValue(entry); |
573 | if (index < 0) { |
574 | auto new_index = collected_entries_.length(); |
575 | collected_entries_.Add(entry); |
576 | entry_indices_.Insert({entry, new_index}); |
577 | } else { |
578 | collected_entries_.At(index)->IncrementUsageCount(); |
579 | } |
580 | } |
581 | } |
582 | |
583 | // Creates a new global table of stack map information. Also adds the |
584 | // offsets of encoded StackMapEntry objects to entry_offsets for use |
585 | // when normalizing CompressedStackMaps. |
586 | CompressedStackMapsPtr CreateGlobalTable( |
587 | StackMapEntryIntMap* entry_offsets) { |
588 | ASSERT(entry_offsets->IsEmpty()); |
589 | if (collected_entries_.length() == 0) return CompressedStackMaps::null(); |
590 | // First, sort the entries from most used to least used. This way, |
591 | // the most often used CSMs will have the lowest offsets, which means |
592 | // they will be smaller when LEB128 encoded. |
593 | collected_entries_.Sort( |
594 | [](StackMapEntry* const* e1, StackMapEntry* const* e2) { |
595 | return static_cast<int>((*e2)->UsageCount() - (*e1)->UsageCount()); |
596 | }); |
597 | GrowableArray<uint8_t> bytes; |
598 | // Encode the entries and record their offset in the payload. Sorting the |
599 | // entries may have changed their indices, so update those as well. |
600 | for (intptr_t i = 0, n = collected_entries_.length(); i < n; i++) { |
601 | auto const entry = collected_entries_.At(i); |
602 | entry_indices_.Update({entry, i}); |
603 | entry_offsets->Insert({entry, entry->EncodeTo(&bytes)}); |
604 | } |
605 | const auto& data = CompressedStackMaps::Handle( |
606 | zone_, CompressedStackMaps::NewGlobalTable(bytes)); |
607 | return data.raw(); |
608 | } |
609 | |
610 | private: |
611 | Zone* const zone_; |
612 | const CompressedStackMaps& old_global_table_; |
613 | |
614 | CompressedStackMaps& compressed_stackmaps_; |
615 | GrowableArray<StackMapEntry*> collected_entries_; |
616 | StackMapEntryIntMap entry_indices_; |
617 | StackMapEntryIntMap entry_offset_; |
618 | }; |
619 | |
620 | // Walks all the CSMs in Code objects, normalizes them, and then dedups them. |
621 | // |
622 | // We use normalized to refer to CSMs whose entries are references to the |
623 | // new global table created during stack map collection, and non-normalized |
624 | // for CSMs that either have inlined entry information or whose entries are |
625 | // references to the _old_ global table in the object store, if any. |
626 | class NormalizeAndDedupCompressedStackMapsVisitor |
627 | : public CodeVisitor, |
628 | public Dedupper<CompressedStackMaps, |
629 | PointerKeyValueTrait<const CompressedStackMaps>> { |
630 | public: |
631 | NormalizeAndDedupCompressedStackMapsVisitor(Zone* zone, Isolate* isolate) |
632 | : Dedupper(zone), |
633 | old_global_table_(CompressedStackMaps::Handle( |
634 | zone, |
635 | isolate->object_store()->canonicalized_stack_map_entries())), |
636 | entry_offsets_(zone), |
637 | maps_(CompressedStackMaps::Handle(zone)) { |
638 | ASSERT(old_global_table_.IsNull() || old_global_table_.IsGlobalTable()); |
639 | // The stack map normalization and deduplication happens in two phases: |
640 | // |
641 | // 1) Visit all CompressedStackMaps (CSM) objects and collect individual |
642 | // entry info as canonicalized StackMapEntries (SMEs). Also record the |
643 | // frequency the same entry info was seen across all CSMs in each SME. |
644 | |
645 | CollectStackMapEntriesVisitor collect_visitor(zone, old_global_table_); |
646 | WalkProgram(zone, isolate, &collect_visitor); |
647 | |
648 | // The results of phase 1 are used to create a new global table with |
649 | // entries sorted by decreasing frequency, so that entries that appear |
650 | // more often in CSMs have smaller payload offsets (less bytes used in |
651 | // the LEB128 encoding). The new global table is put into place |
652 | // immediately, as we already have a handle on the old table. |
653 | |
654 | const auto& new_global_table = CompressedStackMaps::Handle( |
655 | zone, collect_visitor.CreateGlobalTable(&entry_offsets_)); |
656 | isolate->object_store()->set_canonicalized_stack_map_entries( |
657 | new_global_table); |
658 | |
659 | // 2) Visit all CSMs and replace each with a canonicalized normalized |
660 | // version that uses the new global table for non-PC offset entry |
661 | // information. This part is done in VisitCode. |
662 | } |
663 | |
664 | void VisitCode(const Code& code) { |
665 | maps_ = code.compressed_stackmaps(); |
666 | if (maps_.IsNull()) return; |
667 | // First check is to make sure [maps] hasn't already been normalized, |
668 | // since any normalized map already has a canonical entry in the set. |
669 | if (auto const canonical = canonical_objects_.LookupValue(&maps_)) { |
670 | maps_ = canonical->raw(); |
671 | } else { |
672 | maps_ = NormalizeEntries(maps_); |
673 | maps_ = Dedup(maps_); |
674 | } |
675 | code.set_compressed_stackmaps(maps_); |
676 | } |
677 | |
678 | private: |
679 | // Creates a normalized CSM from the given non-normalized CSM. |
680 | CompressedStackMapsPtr NormalizeEntries(const CompressedStackMaps& maps) { |
681 | GrowableArray<uint8_t> new_payload; |
682 | CompressedStackMapsIterator it(maps, old_global_table_); |
683 | intptr_t last_offset = 0; |
684 | while (it.MoveNext()) { |
685 | it.EnsureFullyLoadedEntry(); |
686 | StackMapEntry entry(zone_, it); |
687 | auto const entry_offset = entry_offsets_.LookupValue(&entry); |
688 | auto const pc_delta = it.pc_offset() - last_offset; |
689 | CompressedStackMapsBuilder::EncodeLEB128(&new_payload, pc_delta); |
690 | CompressedStackMapsBuilder::EncodeLEB128(&new_payload, entry_offset); |
691 | last_offset = it.pc_offset(); |
692 | } |
693 | return CompressedStackMaps::NewUsingTable(new_payload); |
694 | } |
695 | |
696 | const CompressedStackMaps& old_global_table_; |
697 | StackMapEntryIntMap entry_offsets_; |
698 | CompressedStackMaps& maps_; |
699 | }; |
700 | |
701 | NormalizeAndDedupCompressedStackMapsVisitor dedup_visitor(zone, isolate); |
702 | WalkProgram(zone, isolate, &dedup_visitor); |
703 | } |
704 | |
705 | class PcDescriptorsKeyValueTrait { |
706 | public: |
707 | // Typedefs needed for the DirectChainedHashMap template. |
708 | typedef const PcDescriptors* Key; |
709 | typedef const PcDescriptors* Value; |
710 | typedef const PcDescriptors* Pair; |
711 | |
712 | static Key KeyOf(Pair kv) { return kv; } |
713 | |
714 | static Value ValueOf(Pair kv) { return kv; } |
715 | |
716 | static inline intptr_t Hashcode(Key key) { return key->Length(); } |
717 | |
718 | static inline bool IsKeyEqual(Pair pair, Key key) { |
719 | return pair->Equals(*key); |
720 | } |
721 | }; |
722 | |
723 | void ProgramVisitor::DedupPcDescriptors(Zone* zone, Isolate* isolate) { |
724 | class DedupPcDescriptorsVisitor |
725 | : public CodeVisitor, |
726 | public Dedupper<PcDescriptors, PcDescriptorsKeyValueTrait> { |
727 | public: |
728 | explicit DedupPcDescriptorsVisitor(Zone* zone) |
729 | : Dedupper(zone), |
730 | bytecode_(Bytecode::Handle(zone)), |
731 | pc_descriptor_(PcDescriptors::Handle(zone)) { |
732 | if (Snapshot::IncludesCode(Dart::vm_snapshot_kind())) { |
733 | // Prefer existing objects in the VM isolate. |
734 | AddVMBaseObjects(); |
735 | } |
736 | } |
737 | |
738 | void VisitCode(const Code& code) { |
739 | pc_descriptor_ = code.pc_descriptors(); |
740 | pc_descriptor_ = Dedup(pc_descriptor_); |
741 | code.set_pc_descriptors(pc_descriptor_); |
742 | } |
743 | |
744 | void VisitFunction(const Function& function) { |
745 | bytecode_ = function.bytecode(); |
746 | if (bytecode_.IsNull()) return; |
747 | if (bytecode_.InVMIsolateHeap()) return; |
748 | pc_descriptor_ = bytecode_.pc_descriptors(); |
749 | pc_descriptor_ = Dedup(pc_descriptor_); |
750 | bytecode_.set_pc_descriptors(pc_descriptor_); |
751 | } |
752 | |
753 | private: |
754 | Bytecode& bytecode_; |
755 | PcDescriptors& pc_descriptor_; |
756 | }; |
757 | |
758 | DedupPcDescriptorsVisitor visitor(zone); |
759 | WalkProgram(zone, isolate, &visitor); |
760 | } |
761 | |
762 | class TypedDataKeyValueTrait { |
763 | public: |
764 | // Typedefs needed for the DirectChainedHashMap template. |
765 | typedef const TypedData* Key; |
766 | typedef const TypedData* Value; |
767 | typedef const TypedData* Pair; |
768 | |
769 | static Key KeyOf(Pair kv) { return kv; } |
770 | |
771 | static Value ValueOf(Pair kv) { return kv; } |
772 | |
773 | static inline intptr_t Hashcode(Key key) { return key->CanonicalizeHash(); } |
774 | |
775 | static inline bool IsKeyEqual(Pair pair, Key key) { |
776 | return pair->CanonicalizeEquals(*key); |
777 | } |
778 | }; |
779 | |
780 | class TypedDataDedupper : public Dedupper<TypedData, TypedDataKeyValueTrait> { |
781 | public: |
782 | explicit TypedDataDedupper(Zone* zone) : Dedupper(zone) {} |
783 | |
784 | private: |
785 | bool IsCorrectType(const Object& obj) const { return obj.IsTypedData(); } |
786 | }; |
787 | |
788 | void ProgramVisitor::DedupDeoptEntries(Zone* zone, Isolate* isolate) { |
789 | class DedupDeoptEntriesVisitor : public CodeVisitor, |
790 | public TypedDataDedupper { |
791 | public: |
792 | explicit DedupDeoptEntriesVisitor(Zone* zone) |
793 | : TypedDataDedupper(zone), |
794 | deopt_table_(Array::Handle(zone)), |
795 | deopt_entry_(TypedData::Handle(zone)), |
796 | offset_(Smi::Handle(zone)), |
797 | reason_and_flags_(Smi::Handle(zone)) {} |
798 | |
799 | void VisitCode(const Code& code) { |
800 | deopt_table_ = code.deopt_info_array(); |
801 | if (deopt_table_.IsNull()) return; |
802 | intptr_t length = DeoptTable::GetLength(deopt_table_); |
803 | for (intptr_t i = 0; i < length; i++) { |
804 | DeoptTable::GetEntry(deopt_table_, i, &offset_, &deopt_entry_, |
805 | &reason_and_flags_); |
806 | ASSERT(!deopt_entry_.IsNull()); |
807 | deopt_entry_ = Dedup(deopt_entry_); |
808 | ASSERT(!deopt_entry_.IsNull()); |
809 | DeoptTable::SetEntry(deopt_table_, i, offset_, deopt_entry_, |
810 | reason_and_flags_); |
811 | } |
812 | } |
813 | |
814 | private: |
815 | Array& deopt_table_; |
816 | TypedData& deopt_entry_; |
817 | Smi& offset_; |
818 | Smi& reason_and_flags_; |
819 | }; |
820 | |
821 | if (FLAG_precompiled_mode) return; |
822 | DedupDeoptEntriesVisitor visitor(zone); |
823 | WalkProgram(zone, isolate, &visitor); |
824 | } |
825 | |
826 | #if defined(DART_PRECOMPILER) |
827 | void ProgramVisitor::DedupCatchEntryMovesMaps(Zone* zone, Isolate* isolate) { |
828 | class DedupCatchEntryMovesMapsVisitor : public CodeVisitor, |
829 | public TypedDataDedupper { |
830 | public: |
831 | explicit DedupCatchEntryMovesMapsVisitor(Zone* zone) |
832 | : TypedDataDedupper(zone), |
833 | catch_entry_moves_maps_(TypedData::Handle(zone)) {} |
834 | |
835 | void VisitCode(const Code& code) { |
836 | catch_entry_moves_maps_ = code.catch_entry_moves_maps(); |
837 | catch_entry_moves_maps_ = Dedup(catch_entry_moves_maps_); |
838 | code.set_catch_entry_moves_maps(catch_entry_moves_maps_); |
839 | } |
840 | |
841 | private: |
842 | TypedData& catch_entry_moves_maps_; |
843 | }; |
844 | |
845 | if (!FLAG_precompiled_mode) return; |
846 | DedupCatchEntryMovesMapsVisitor visitor(zone); |
847 | WalkProgram(zone, isolate, &visitor); |
848 | } |
849 | |
850 | class UnlinkedCallKeyValueTrait { |
851 | public: |
852 | // Typedefs needed for the DirectChainedHashMap template. |
853 | typedef const UnlinkedCall* Key; |
854 | typedef const UnlinkedCall* Value; |
855 | typedef const UnlinkedCall* Pair; |
856 | |
857 | static Key KeyOf(Pair kv) { return kv; } |
858 | |
859 | static Value ValueOf(Pair kv) { return kv; } |
860 | |
861 | static inline intptr_t Hashcode(Key key) { return key->Hashcode(); } |
862 | |
863 | static inline bool IsKeyEqual(Pair pair, Key key) { |
864 | return pair->Equals(*key); |
865 | } |
866 | }; |
867 | |
868 | void ProgramVisitor::DedupUnlinkedCalls(Zone* zone, Isolate* isolate) { |
869 | class DedupUnlinkedCallsVisitor |
870 | : public CodeVisitor, |
871 | public Dedupper<UnlinkedCall, UnlinkedCallKeyValueTrait> { |
872 | public: |
873 | explicit DedupUnlinkedCallsVisitor(Zone* zone, Isolate* isolate) |
874 | : Dedupper(zone), |
875 | entry_(Object::Handle(zone)), |
876 | pool_(ObjectPool::Handle(zone)) { |
877 | auto& gop = ObjectPool::Handle( |
878 | zone, isolate->object_store()->global_object_pool()); |
879 | ASSERT_EQUAL(!gop.IsNull(), FLAG_use_bare_instructions); |
880 | DedupPool(gop); |
881 | } |
882 | |
883 | void DedupPool(const ObjectPool& pool) { |
884 | if (pool.IsNull()) return; |
885 | for (intptr_t i = 0; i < pool.Length(); i++) { |
886 | if (pool.TypeAt(i) != ObjectPool::EntryType::kTaggedObject) { |
887 | continue; |
888 | } |
889 | entry_ = pool.ObjectAt(i); |
890 | if (!entry_.IsUnlinkedCall()) continue; |
891 | entry_ = Dedup(UnlinkedCall::Cast(entry_)); |
892 | pool.SetObjectAt(i, entry_); |
893 | } |
894 | } |
895 | |
896 | void VisitCode(const Code& code) { |
897 | pool_ = code.object_pool(); |
898 | DedupPool(pool_); |
899 | } |
900 | |
901 | private: |
902 | Object& entry_; |
903 | ObjectPool& pool_; |
904 | }; |
905 | |
906 | if (!FLAG_precompiled_mode) return; |
907 | |
908 | DedupUnlinkedCallsVisitor deduper(zone, isolate); |
909 | |
910 | // Note: in bare instructions mode we can still have object pools attached |
911 | // to code objects and these pools need to be deduplicated. |
912 | // We use these pools to carry information about references between code |
913 | // objects and other objects in the snapshots (these references are otherwise |
914 | // implicit and go through global object pool). This information is needed |
915 | // to produce more informative snapshot profile. |
916 | if (!FLAG_use_bare_instructions || |
917 | FLAG_write_v8_snapshot_profile_to != nullptr || |
918 | FLAG_trace_precompiler_to != nullptr) { |
919 | WalkProgram(zone, isolate, &deduper); |
920 | } |
921 | } |
922 | #endif // defined(DART_PRECOMPILER) |
923 | |
924 | class CodeSourceMapKeyValueTrait { |
925 | public: |
926 | // Typedefs needed for the DirectChainedHashMap template. |
927 | typedef const CodeSourceMap* Key; |
928 | typedef const CodeSourceMap* Value; |
929 | typedef const CodeSourceMap* Pair; |
930 | |
931 | static Key KeyOf(Pair kv) { return kv; } |
932 | |
933 | static Value ValueOf(Pair kv) { return kv; } |
934 | |
935 | static inline intptr_t Hashcode(Key key) { |
936 | ASSERT(!key->IsNull()); |
937 | return key->Length(); |
938 | } |
939 | |
940 | static inline bool IsKeyEqual(Pair pair, Key key) { |
941 | ASSERT(!pair->IsNull() && !key->IsNull()); |
942 | return pair->Equals(*key); |
943 | } |
944 | }; |
945 | |
946 | void ProgramVisitor::DedupCodeSourceMaps(Zone* zone, Isolate* isolate) { |
947 | class DedupCodeSourceMapsVisitor |
948 | : public CodeVisitor, |
949 | public Dedupper<CodeSourceMap, CodeSourceMapKeyValueTrait> { |
950 | public: |
951 | explicit DedupCodeSourceMapsVisitor(Zone* zone) |
952 | : Dedupper(zone), code_source_map_(CodeSourceMap::Handle(zone)) { |
953 | if (Snapshot::IncludesCode(Dart::vm_snapshot_kind())) { |
954 | // Prefer existing objects in the VM isolate. |
955 | AddVMBaseObjects(); |
956 | } |
957 | } |
958 | |
959 | void VisitCode(const Code& code) { |
960 | code_source_map_ = code.code_source_map(); |
961 | code_source_map_ = Dedup(code_source_map_); |
962 | code.set_code_source_map(code_source_map_); |
963 | } |
964 | |
965 | private: |
966 | CodeSourceMap& code_source_map_; |
967 | }; |
968 | |
969 | DedupCodeSourceMapsVisitor visitor(zone); |
970 | WalkProgram(zone, isolate, &visitor); |
971 | } |
972 | |
973 | class ArrayKeyValueTrait { |
974 | public: |
975 | // Typedefs needed for the DirectChainedHashMap template. |
976 | typedef const Array* Key; |
977 | typedef const Array* Value; |
978 | typedef const Array* Pair; |
979 | |
980 | static Key KeyOf(Pair kv) { return kv; } |
981 | |
982 | static Value ValueOf(Pair kv) { return kv; } |
983 | |
984 | static inline intptr_t Hashcode(Key key) { |
985 | ASSERT(!key->IsNull()); |
986 | return key->Length(); |
987 | } |
988 | |
989 | static inline bool IsKeyEqual(Pair pair, Key key) { |
990 | ASSERT(!pair->IsNull() && !key->IsNull()); |
991 | if (pair->Length() != key->Length()) return false; |
992 | for (intptr_t i = 0; i < pair->Length(); i++) { |
993 | if (pair->At(i) != key->At(i)) return false; |
994 | } |
995 | return true; |
996 | } |
997 | }; |
998 | |
999 | void ProgramVisitor::DedupLists(Zone* zone, Isolate* isolate) { |
1000 | class DedupListsVisitor : public CodeVisitor, |
1001 | public Dedupper<Array, ArrayKeyValueTrait> { |
1002 | public: |
1003 | explicit DedupListsVisitor(Zone* zone) |
1004 | : Dedupper(zone), |
1005 | list_(Array::Handle(zone)), |
1006 | function_(Function::Handle(zone)) {} |
1007 | |
1008 | void VisitCode(const Code& code) { |
1009 | if (!code.IsFunctionCode()) return; |
1010 | |
1011 | list_ = code.inlined_id_to_function(); |
1012 | list_ = Dedup(list_); |
1013 | code.set_inlined_id_to_function(list_); |
1014 | |
1015 | list_ = code.deopt_info_array(); |
1016 | list_ = Dedup(list_); |
1017 | code.set_deopt_info_array(list_); |
1018 | |
1019 | list_ = code.static_calls_target_table(); |
1020 | list_ = Dedup(list_); |
1021 | code.set_static_calls_target_table(list_); |
1022 | } |
1023 | |
1024 | void VisitFunction(const Function& function) { |
1025 | list_ = PrepareParameterTypes(function); |
1026 | list_ = Dedup(list_); |
1027 | function.set_parameter_types(list_); |
1028 | |
1029 | list_ = PrepareParameterNames(function); |
1030 | list_ = Dedup(list_); |
1031 | function.set_parameter_names(list_); |
1032 | } |
1033 | |
1034 | private: |
1035 | bool IsCorrectType(const Object& obj) const { return obj.IsArray(); } |
1036 | |
1037 | ArrayPtr PrepareParameterTypes(const Function& function) { |
1038 | list_ = function.parameter_types(); |
1039 | // Preserve parameter types in the JIT. Needed in case of recompilation |
1040 | // in checked mode, or if available to mirrors, or for copied types to |
1041 | // lazily generated tear offs. Also avoid attempting to change read-only |
1042 | // VM objects for de-duplication. |
1043 | if (FLAG_precompiled_mode && !list_.IsNull() && |
1044 | !list_.InVMIsolateHeap() && !function.IsSignatureFunction() && |
1045 | !function.IsClosureFunction() && !function.IsFfiTrampoline() && |
1046 | function.name() != Symbols::Call().raw()) { |
1047 | // Parameter types not needed for function type tests. |
1048 | for (intptr_t i = 0; i < list_.Length(); i++) { |
1049 | list_.SetAt(i, Object::dynamic_type()); |
1050 | } |
1051 | } |
1052 | return list_.raw(); |
1053 | } |
1054 | |
1055 | ArrayPtr PrepareParameterNames(const Function& function) { |
1056 | list_ = function.parameter_names(); |
1057 | // Preserve parameter names in case of recompilation for the JIT. Also |
1058 | // avoid attempting to change read-only VM objects for de-duplication. |
1059 | if (FLAG_precompiled_mode && !list_.IsNull() && |
1060 | !list_.InVMIsolateHeap() && !function.HasOptionalNamedParameters()) { |
1061 | // Parameter names not needed for resolution. |
1062 | ASSERT(list_.Length() == function.NumParameters()); |
1063 | for (intptr_t i = 0; i < list_.Length(); i++) { |
1064 | list_.SetAt(i, Symbols::OptimizedOut()); |
1065 | } |
1066 | } |
1067 | return list_.raw(); |
1068 | } |
1069 | |
1070 | Array& list_; |
1071 | Function& function_; |
1072 | }; |
1073 | |
1074 | DedupListsVisitor visitor(zone); |
1075 | WalkProgram(zone, isolate, &visitor); |
1076 | } |
1077 | |
1078 | // Traits for comparing two [Instructions] objects for equality, which is |
1079 | // implemented as bit-wise equality. |
1080 | // |
1081 | // This considers two instruction objects to be equal even if they have |
1082 | // different static call targets. Since the static call targets are called via |
1083 | // the object pool this is ok. |
1084 | class InstructionsKeyValueTrait { |
1085 | public: |
1086 | // Typedefs needed for the DirectChainedHashMap template. |
1087 | typedef const Instructions* Key; |
1088 | typedef const Instructions* Value; |
1089 | typedef const Instructions* Pair; |
1090 | |
1091 | static Key KeyOf(Pair kv) { return kv; } |
1092 | |
1093 | static Value ValueOf(Pair kv) { return kv; } |
1094 | |
1095 | static inline intptr_t Hashcode(Key key) { return key->Hash(); } |
1096 | |
1097 | static inline bool IsKeyEqual(Pair pair, Key key) { |
1098 | return pair->Equals(*key); |
1099 | } |
1100 | }; |
1101 | |
1102 | // Traits for comparing two [Code] objects for equality. |
1103 | // |
1104 | // The instruction deduplication naturally causes us to have a one-to-many |
1105 | // relationship between Instructions and Code objects. |
1106 | // |
1107 | // In AOT bare instructions mode frames only have PCs. However, the runtime |
1108 | // needs e.g. stack maps from the [Code] to scan such a frame. So we ensure that |
1109 | // instructions of code objects are only deduplicated if the metadata in the |
1110 | // code is the same. The runtime can then pick any code object corresponding to |
1111 | // the PC in the frame and use the metadata. |
1112 | // |
1113 | // In AOT non-bare instructions mode frames are expanded, like in JIT, and |
1114 | // contain the unique code object. |
1115 | #if defined(DART_PRECOMPILER) |
1116 | class CodeKeyValueTrait { |
1117 | public: |
1118 | // Typedefs needed for the DirectChainedHashMap template. |
1119 | typedef const Code* Key; |
1120 | typedef const Code* Value; |
1121 | typedef const Code* Pair; |
1122 | |
1123 | static Key KeyOf(Pair kv) { return kv; } |
1124 | |
1125 | static Value ValueOf(Pair kv) { return kv; } |
1126 | |
1127 | static inline intptr_t Hashcode(Key key) { return key->Size(); } |
1128 | |
1129 | static inline bool IsKeyEqual(Pair pair, Key key) { |
1130 | // In AOT, disabled code objects should not be considered for deduplication. |
1131 | ASSERT(!pair->IsDisabled() && !key->IsDisabled()); |
1132 | |
1133 | if (pair->raw() == key->raw()) return true; |
1134 | |
1135 | // Notice we assume that these entries have already been de-duped, so we |
1136 | // can use pointer equality. |
1137 | if (pair->static_calls_target_table() != key->static_calls_target_table()) { |
1138 | return false; |
1139 | } |
1140 | if (pair->pc_descriptors() != key->pc_descriptors()) { |
1141 | return false; |
1142 | } |
1143 | if (pair->compressed_stackmaps() != key->compressed_stackmaps()) { |
1144 | return false; |
1145 | } |
1146 | if (pair->catch_entry_moves_maps() != key->catch_entry_moves_maps()) { |
1147 | return false; |
1148 | } |
1149 | if (pair->exception_handlers() != key->exception_handlers()) { |
1150 | return false; |
1151 | } |
1152 | if (pair->UncheckedEntryPointOffset() != key->UncheckedEntryPointOffset()) { |
1153 | return false; |
1154 | } |
1155 | return Instructions::Equals(pair->instructions(), key->instructions()); |
1156 | } |
1157 | }; |
1158 | #endif |
1159 | |
1160 | void ProgramVisitor::DedupInstructions(Zone* zone, Isolate* isolate) { |
1161 | class DedupInstructionsVisitor |
1162 | : public CodeVisitor, |
1163 | public Dedupper<Instructions, InstructionsKeyValueTrait>, |
1164 | public ObjectVisitor { |
1165 | public: |
1166 | explicit DedupInstructionsVisitor(Zone* zone) |
1167 | : Dedupper(zone), |
1168 | code_(Code::Handle(zone)), |
1169 | instructions_(Instructions::Handle(zone)) { |
1170 | if (Snapshot::IncludesCode(Dart::vm_snapshot_kind())) { |
1171 | // Prefer existing objects in the VM isolate. |
1172 | Dart::vm_isolate()->heap()->VisitObjectsImagePages(this); |
1173 | } |
1174 | } |
1175 | |
1176 | void VisitObject(ObjectPtr obj) { |
1177 | if (!obj->IsInstructions()) return; |
1178 | instructions_ = Instructions::RawCast(obj); |
1179 | AddCanonical(instructions_); |
1180 | } |
1181 | |
1182 | void VisitFunction(const Function& function) { |
1183 | if (!function.HasCode()) return; |
1184 | code_ = function.CurrentCode(); |
1185 | // This causes the code to be visited once here and once directly in the |
1186 | // ProgramWalker, but as long as the deduplication process is idempotent, |
1187 | // the cached entry points won't change during the second visit. |
1188 | VisitCode(code_); |
1189 | function.SetInstructions(code_); // Update cached entry point. |
1190 | } |
1191 | |
1192 | void VisitCode(const Code& code) { |
1193 | instructions_ = code.instructions(); |
1194 | instructions_ = Dedup(instructions_); |
1195 | code.set_instructions(instructions_); |
1196 | if (code.IsDisabled()) { |
1197 | instructions_ = code.active_instructions(); |
1198 | instructions_ = Dedup(instructions_); |
1199 | } |
1200 | code.SetActiveInstructions(instructions_, |
1201 | code.UncheckedEntryPointOffset()); |
1202 | } |
1203 | |
1204 | private: |
1205 | Code& code_; |
1206 | Instructions& instructions_; |
1207 | }; |
1208 | |
1209 | #if defined(DART_PRECOMPILER) |
1210 | class DedupInstructionsWithSameMetadataVisitor |
1211 | : public CodeVisitor, |
1212 | public Dedupper<Code, CodeKeyValueTrait> { |
1213 | public: |
1214 | explicit DedupInstructionsWithSameMetadataVisitor(Zone* zone) |
1215 | : Dedupper(zone), |
1216 | canonical_(Code::Handle(zone)), |
1217 | code_(Code::Handle(zone)), |
1218 | instructions_(Instructions::Handle(zone)) {} |
1219 | |
1220 | void VisitFunction(const Function& function) { |
1221 | if (!function.HasCode()) return; |
1222 | code_ = function.CurrentCode(); |
1223 | // This causes the code to be visited once here and once directly in the |
1224 | // ProgramWalker, but as long as the deduplication process is idempotent, |
1225 | // the cached entry points won't change during the second visit. |
1226 | VisitCode(code_); |
1227 | function.SetInstructions(code_); // Update cached entry point. |
1228 | } |
1229 | |
1230 | void VisitCode(const Code& code) { |
1231 | if (code.IsDisabled()) return; |
1232 | canonical_ = Dedup(code); |
1233 | instructions_ = canonical_.instructions(); |
1234 | code.SetActiveInstructions(instructions_, |
1235 | code.UncheckedEntryPointOffset()); |
1236 | code.set_instructions(instructions_); |
1237 | } |
1238 | |
1239 | private: |
1240 | bool CanCanonicalize(const Code& code) const { return !code.IsDisabled(); } |
1241 | |
1242 | Code& canonical_; |
1243 | Code& code_; |
1244 | Instructions& instructions_; |
1245 | }; |
1246 | |
1247 | if (FLAG_precompiled_mode && FLAG_use_bare_instructions) { |
1248 | DedupInstructionsWithSameMetadataVisitor visitor(zone); |
1249 | return WalkProgram(zone, isolate, &visitor); |
1250 | } |
1251 | #endif // defined(DART_PRECOMPILER) |
1252 | |
1253 | DedupInstructionsVisitor visitor(zone); |
1254 | WalkProgram(zone, isolate, &visitor); |
1255 | } |
1256 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
1257 | |
1258 | void ProgramVisitor::Dedup(Thread* thread) { |
1259 | #if !defined(DART_PRECOMPILED_RUNTIME) |
1260 | auto const isolate = thread->isolate(); |
1261 | StackZone stack_zone(thread); |
1262 | HANDLESCOPE(thread); |
1263 | auto const zone = thread->zone(); |
1264 | |
1265 | BindStaticCalls(zone, isolate); |
1266 | ShareMegamorphicBuckets(zone, isolate); |
1267 | NormalizeAndDedupCompressedStackMaps(zone, isolate); |
1268 | DedupPcDescriptors(zone, isolate); |
1269 | DedupDeoptEntries(zone, isolate); |
1270 | #if defined(DART_PRECOMPILER) |
1271 | DedupCatchEntryMovesMaps(zone, isolate); |
1272 | DedupUnlinkedCalls(zone, isolate); |
1273 | #endif |
1274 | DedupCodeSourceMaps(zone, isolate); |
1275 | DedupLists(zone, isolate); |
1276 | |
1277 | // Reduces binary size but obfuscates profiler results. |
1278 | if (FLAG_dedup_instructions) { |
1279 | // In non-bare mode (unused atm) dedupping instructions would cause us to |
1280 | // loose the ability to uniquely map a PC to a given UnlinkedCall object, |
1281 | // since two code objects might point to the same deduped instructions |
1282 | // object but might have two different UnlinkedCall objects in their pool. |
1283 | // |
1284 | // In bare mode this cannot happen because different UnlinkedCall objects |
1285 | // would get different indices into the (global) object pool, therefore |
1286 | // making the instructions different. |
1287 | // |
1288 | // (When transitioning the switchable call site we loose track of the args |
1289 | // descriptor. Since we need it for further transitions we currently save it |
1290 | // via a PC -> UnlinkedCall mapping). |
1291 | // |
1292 | // We therfore disable the instruction deduplication in product-non-bare |
1293 | // mode (which is unused atm). |
1294 | #if defined(PRODUCT) |
1295 | if (FLAG_precompiled_mode && !FLAG_use_bare_instructions) return; |
1296 | #endif |
1297 | |
1298 | DedupInstructions(zone, isolate); |
1299 | } |
1300 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
1301 | } |
1302 | |
1303 | #if defined(DART_PRECOMPILER) |
1304 | class AssignLoadingUnitsCodeVisitor : public CodeVisitor { |
1305 | public: |
1306 | explicit AssignLoadingUnitsCodeVisitor(Zone* zone) |
1307 | : heap_(Thread::Current()->heap()), |
1308 | func_(Function::Handle(zone)), |
1309 | cls_(Class::Handle(zone)), |
1310 | lib_(Library::Handle(zone)), |
1311 | unit_(LoadingUnit::Handle(zone)), |
1312 | obj_(Object::Handle(zone)) {} |
1313 | |
1314 | void VisitCode(const Code& code) { |
1315 | intptr_t id; |
1316 | if (code.IsFunctionCode()) { |
1317 | func_ ^= code.owner(); |
1318 | cls_ = func_.Owner(); |
1319 | lib_ = cls_.library(); |
1320 | unit_ = lib_.loading_unit(); |
1321 | id = unit_.id(); |
1322 | } else if (code.IsAllocationStubCode()) { |
1323 | cls_ ^= code.owner(); |
1324 | lib_ = cls_.library(); |
1325 | unit_ = lib_.loading_unit(); |
1326 | id = unit_.id(); |
1327 | } else if (code.IsStubCode()) { |
1328 | id = LoadingUnit::kRootId; |
1329 | } else { |
1330 | UNREACHABLE(); |
1331 | } |
1332 | |
1333 | ASSERT(heap_->GetLoadingUnit(code.raw()) == WeakTable::kNoValue); |
1334 | heap_->SetLoadingUnit(code.raw(), id); |
1335 | |
1336 | obj_ = code.code_source_map(); |
1337 | MergeAssignment(obj_, id); |
1338 | obj_ = code.compressed_stackmaps(); |
1339 | MergeAssignment(obj_, id); |
1340 | } |
1341 | |
1342 | void MergeAssignment(const Object& obj, intptr_t id) { |
1343 | intptr_t old_id = heap_->GetLoadingUnit(obj_.raw()); |
1344 | if (old_id == WeakTable::kNoValue) { |
1345 | heap_->SetLoadingUnit(obj_.raw(), id); |
1346 | } else if (old_id == id) { |
1347 | // Shared with another code in the same loading unit. |
1348 | } else { |
1349 | // Shared with another code in a different loading unit. |
1350 | // Could assign to dominating loading unit. |
1351 | heap_->SetLoadingUnit(obj_.raw(), LoadingUnit::kRootId); |
1352 | } |
1353 | } |
1354 | |
1355 | private: |
1356 | Heap* heap_; |
1357 | Function& func_; |
1358 | Class& cls_; |
1359 | Library& lib_; |
1360 | LoadingUnit& unit_; |
1361 | Object& obj_; |
1362 | }; |
1363 | |
1364 | void ProgramVisitor::AssignUnits(Thread* thread) { |
1365 | StackZone stack_zone(thread); |
1366 | HANDLESCOPE(thread); |
1367 | Zone* zone = thread->zone(); |
1368 | |
1369 | // VM stubs. |
1370 | Instructions& inst = Instructions::Handle(zone); |
1371 | Code& code = Code::Handle(zone); |
1372 | for (intptr_t i = 0; i < StubCode::NumEntries(); i++) { |
1373 | inst = StubCode::EntryAt(i).instructions(); |
1374 | thread->heap()->SetLoadingUnit(inst.raw(), LoadingUnit::kRootId); |
1375 | } |
1376 | |
1377 | // Isolate stubs. |
1378 | ObjectStore* object_store = thread->isolate()->object_store(); |
1379 | ObjectPtr* from = object_store->from(); |
1380 | ObjectPtr* to = object_store->to_snapshot(Snapshot::kFullAOT); |
1381 | for (ObjectPtr* p = from; p <= to; p++) { |
1382 | if ((*p)->IsCode()) { |
1383 | code ^= *p; |
1384 | inst = code.instructions(); |
1385 | thread->heap()->SetLoadingUnit(inst.raw(), LoadingUnit::kRootId); |
1386 | } |
1387 | } |
1388 | |
1389 | // Function code / allocation stubs. |
1390 | AssignLoadingUnitsCodeVisitor visitor(zone); |
1391 | WalkProgram(zone, thread->isolate(), &visitor); |
1392 | } |
1393 | |
1394 | class ProgramHashVisitor : public CodeVisitor { |
1395 | public: |
1396 | explicit ProgramHashVisitor(Zone* zone) |
1397 | : str_(String::Handle(zone)), |
1398 | pool_(ObjectPool::Handle(zone)), |
1399 | obj_(Object::Handle(zone)), |
1400 | instr_(Instructions::Handle(zone)), |
1401 | hash_(0) {} |
1402 | |
1403 | void VisitClass(const Class& cls) { |
1404 | str_ = cls.Name(); |
1405 | VisitInstance(str_); |
1406 | } |
1407 | |
1408 | void VisitFunction(const Function& function) { |
1409 | str_ = function.name(); |
1410 | VisitInstance(str_); |
1411 | } |
1412 | |
1413 | void VisitCode(const Code& code) { |
1414 | pool_ = code.object_pool(); |
1415 | VisitPool(pool_); |
1416 | |
1417 | instr_ = code.instructions(); |
1418 | hash_ = CombineHashes(hash_, instr_.Hash()); |
1419 | } |
1420 | |
1421 | void VisitPool(const ObjectPool& pool) { |
1422 | if (pool.IsNull()) return; |
1423 | |
1424 | for (intptr_t i = 0; i < pool.Length(); i++) { |
1425 | if (pool.TypeAt(i) == ObjectPool::EntryType::kTaggedObject) { |
1426 | obj_ = pool.ObjectAt(i); |
1427 | if (obj_.IsInstance()) { |
1428 | VisitInstance(Instance::Cast(obj_)); |
1429 | } |
1430 | } |
1431 | } |
1432 | } |
1433 | |
1434 | void VisitInstance(const Instance& instance) { |
1435 | hash_ = CombineHashes(hash_, instance.CanonicalizeHash()); |
1436 | } |
1437 | |
1438 | uint32_t hash() const { return FinalizeHash(hash_, String::kHashBits); } |
1439 | |
1440 | private: |
1441 | String& str_; |
1442 | ObjectPool& pool_; |
1443 | Object& obj_; |
1444 | Instructions& instr_; |
1445 | uint32_t hash_; |
1446 | }; |
1447 | |
1448 | uint32_t ProgramVisitor::Hash(Thread* thread) { |
1449 | StackZone stack_zone(thread); |
1450 | HANDLESCOPE(thread); |
1451 | Zone* zone = thread->zone(); |
1452 | |
1453 | ProgramHashVisitor visitor(zone); |
1454 | WalkProgram(zone, thread->isolate(), &visitor); |
1455 | visitor.VisitPool(ObjectPool::Handle( |
1456 | zone, thread->isolate()->object_store()->global_object_pool())); |
1457 | return visitor.hash(); |
1458 | } |
1459 | |
1460 | #endif // defined(DART_PRECOMPILED_RUNTIME) |
1461 | |
1462 | } // namespace dart |
1463 | |