1 | // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #ifndef RUNTIME_VM_DEOPT_INSTRUCTIONS_H_ |
6 | #define RUNTIME_VM_DEOPT_INSTRUCTIONS_H_ |
7 | #if !defined(DART_PRECOMPILED_RUNTIME) |
8 | |
9 | #include "vm/allocation.h" |
10 | #include "vm/code_descriptors.h" |
11 | #include "vm/compiler/backend/flow_graph_compiler.h" |
12 | #include "vm/compiler/backend/locations.h" |
13 | #include "vm/deferred_objects.h" |
14 | #include "vm/growable_array.h" |
15 | #include "vm/object.h" |
16 | #include "vm/runtime_entry.h" |
17 | #include "vm/stack_frame.h" |
18 | #include "vm/thread.h" |
19 | |
20 | namespace dart { |
21 | |
22 | class Location; |
23 | class Value; |
24 | class MaterializeObjectInstr; |
25 | class StackFrame; |
26 | class TimelineEvent; |
27 | |
28 | // Holds all data relevant for execution of deoptimization instructions. |
29 | // Structure is allocated in C-heap. |
30 | class DeoptContext { |
31 | public: |
32 | enum DestFrameOptions { |
33 | kDestIsOriginalFrame, // Replace the original frame with deopt frame. |
34 | kDestIsAllocated // Write deopt frame to a buffer. |
35 | }; |
36 | |
37 | // If 'deoptimizing_code' is false, only frame is being deoptimized. |
38 | DeoptContext(const StackFrame* frame, |
39 | const Code& code, |
40 | DestFrameOptions dest_options, |
41 | fpu_register_t* fpu_registers, |
42 | intptr_t* cpu_registers, |
43 | bool is_lazy_deopt, |
44 | bool deoptimizing_code); |
45 | virtual ~DeoptContext(); |
46 | |
47 | // Returns the offset of the dest fp from the dest sp. Used in |
48 | // runtime code to adjust the stack size before deoptimization. |
49 | intptr_t DestStackAdjustment() const; |
50 | |
51 | intptr_t* GetSourceFrameAddressAt(intptr_t index) const { |
52 | ASSERT(source_frame_ != NULL); |
53 | ASSERT((0 <= index) && (index < source_frame_size_)); |
54 | // Convert FP relative index to SP relative one. |
55 | index = source_frame_size_ - 1 - index; |
56 | return &source_frame_[index]; |
57 | } |
58 | |
59 | // Returns index in stack slot notation where -1 is the first argument |
60 | intptr_t GetStackSlot(intptr_t index) const { |
61 | ASSERT((0 <= index) && (index < source_frame_size_)); |
62 | index -= num_args_; |
63 | return index < 0 ? index : index - kDartFrameFixedSize; |
64 | } |
65 | |
66 | intptr_t GetSourceFp() const; |
67 | intptr_t GetSourcePp() const; |
68 | intptr_t GetSourcePc() const; |
69 | |
70 | intptr_t GetCallerFp() const; |
71 | void SetCallerFp(intptr_t callers_fp); |
72 | |
73 | ObjectPtr ObjectAt(intptr_t index) const { |
74 | const ObjectPool& object_pool = ObjectPool::Handle(object_pool_); |
75 | return object_pool.ObjectAt(index); |
76 | } |
77 | |
78 | intptr_t RegisterValue(Register reg) const { |
79 | ASSERT(reg >= 0); |
80 | ASSERT(reg < kNumberOfCpuRegisters); |
81 | ASSERT(cpu_registers_ != NULL); |
82 | return cpu_registers_[reg]; |
83 | } |
84 | |
85 | double FpuRegisterValue(FpuRegister reg) const { |
86 | ASSERT(FlowGraphCompiler::SupportsUnboxedDoubles()); |
87 | ASSERT(fpu_registers_ != NULL); |
88 | ASSERT(reg >= 0); |
89 | ASSERT(reg < kNumberOfFpuRegisters); |
90 | return *reinterpret_cast<double*>(&fpu_registers_[reg]); |
91 | } |
92 | |
93 | simd128_value_t FpuRegisterValueAsSimd128(FpuRegister reg) const { |
94 | ASSERT(FlowGraphCompiler::SupportsUnboxedSimd128()); |
95 | ASSERT(fpu_registers_ != NULL); |
96 | ASSERT(reg >= 0); |
97 | ASSERT(reg < kNumberOfFpuRegisters); |
98 | const float* address = reinterpret_cast<float*>(&fpu_registers_[reg]); |
99 | return simd128_value_t().readFrom(address); |
100 | } |
101 | |
102 | // Return base pointer for the given frame (either source or destination). |
103 | // Base pointer points to the slot with the lowest address in the frame |
104 | // including incoming arguments and artificial deoptimization frame |
105 | // on top of it. |
106 | // Note: artificial frame created by the deoptimization stub is considered |
107 | // part of the frame because it contains saved caller PC and FP that |
108 | // deoptimization will fill in. |
109 | intptr_t* FrameBase(const StackFrame* frame) { |
110 | // SP of the deoptimization frame is the lowest slot because |
111 | // stack is growing downwards. |
112 | return reinterpret_cast<intptr_t*>(frame->sp() - |
113 | (kDartFrameFixedSize * kWordSize)); |
114 | } |
115 | |
116 | void set_dest_frame(const StackFrame* frame) { |
117 | ASSERT(frame != NULL && dest_frame_ == NULL); |
118 | dest_frame_ = FrameBase(frame); |
119 | } |
120 | |
121 | Thread* thread() const { return thread_; } |
122 | Zone* zone() const { return thread_->zone(); } |
123 | |
124 | intptr_t source_frame_size() const { return source_frame_size_; } |
125 | intptr_t dest_frame_size() const { return dest_frame_size_; } |
126 | |
127 | CodePtr code() const { return code_; } |
128 | |
129 | bool is_lazy_deopt() const { return is_lazy_deopt_; } |
130 | |
131 | bool deoptimizing_code() const { return deoptimizing_code_; } |
132 | |
133 | ICData::DeoptReasonId deopt_reason() const { return deopt_reason_; } |
134 | bool HasDeoptFlag(ICData::DeoptFlags flag) { |
135 | return (deopt_flags_ & flag) != 0; |
136 | } |
137 | |
138 | TypedDataPtr deopt_info() const { return deopt_info_; } |
139 | |
140 | // Fills the destination frame but defers materialization of |
141 | // objects. |
142 | void FillDestFrame(); |
143 | |
144 | // Convert deoptimization instructions to a list of moves that need |
145 | // to be executed when entering catch entry block from this deoptimization |
146 | // point. |
147 | const CatchEntryMoves* ToCatchEntryMoves(intptr_t num_vars); |
148 | |
149 | // Materializes all deferred objects. Returns the total number of |
150 | // artificial arguments used during deoptimization. |
151 | intptr_t MaterializeDeferredObjects(); |
152 | |
153 | ArrayPtr DestFrameAsArray(); |
154 | |
155 | void VisitObjectPointers(ObjectPointerVisitor* visitor); |
156 | |
157 | void DeferMaterializedObjectRef(intptr_t idx, intptr_t* slot) { |
158 | deferred_slots_ = new DeferredObjectRef( |
159 | idx, reinterpret_cast<ObjectPtr*>(slot), deferred_slots_); |
160 | } |
161 | |
162 | void DeferMaterialization(double value, DoublePtr* slot) { |
163 | deferred_slots_ = new DeferredDouble( |
164 | value, reinterpret_cast<ObjectPtr*>(slot), deferred_slots_); |
165 | } |
166 | |
167 | void DeferMintMaterialization(int64_t value, MintPtr* slot) { |
168 | deferred_slots_ = new DeferredMint( |
169 | value, reinterpret_cast<ObjectPtr*>(slot), deferred_slots_); |
170 | } |
171 | |
172 | void DeferMaterialization(simd128_value_t value, Float32x4Ptr* slot) { |
173 | deferred_slots_ = new DeferredFloat32x4( |
174 | value, reinterpret_cast<ObjectPtr*>(slot), deferred_slots_); |
175 | } |
176 | |
177 | void DeferMaterialization(simd128_value_t value, Float64x2Ptr* slot) { |
178 | deferred_slots_ = new DeferredFloat64x2( |
179 | value, reinterpret_cast<ObjectPtr*>(slot), deferred_slots_); |
180 | } |
181 | |
182 | void DeferMaterialization(simd128_value_t value, Int32x4Ptr* slot) { |
183 | deferred_slots_ = new DeferredInt32x4( |
184 | value, reinterpret_cast<ObjectPtr*>(slot), deferred_slots_); |
185 | } |
186 | |
187 | void DeferRetAddrMaterialization(intptr_t index, |
188 | intptr_t deopt_id, |
189 | intptr_t* slot) { |
190 | deferred_slots_ = new DeferredRetAddr( |
191 | index, deopt_id, reinterpret_cast<ObjectPtr*>(slot), deferred_slots_); |
192 | } |
193 | |
194 | void DeferPcMarkerMaterialization(intptr_t index, intptr_t* slot) { |
195 | deferred_slots_ = new DeferredPcMarker( |
196 | index, reinterpret_cast<ObjectPtr*>(slot), deferred_slots_); |
197 | } |
198 | |
199 | void DeferPpMaterialization(intptr_t index, ObjectPtr* slot) { |
200 | deferred_slots_ = new DeferredPp(index, slot, deferred_slots_); |
201 | } |
202 | |
203 | DeferredObject* GetDeferredObject(intptr_t idx) const { |
204 | return deferred_objects_[idx]; |
205 | } |
206 | |
207 | intptr_t num_args() const { return num_args_; } |
208 | |
209 | private: |
210 | intptr_t* GetDestFrameAddressAt(intptr_t index) const { |
211 | ASSERT(dest_frame_ != NULL); |
212 | ASSERT((0 <= index) && (index < dest_frame_size_)); |
213 | return &dest_frame_[index]; |
214 | } |
215 | |
216 | void PrepareForDeferredMaterialization(intptr_t count) { |
217 | if (count > 0) { |
218 | deferred_objects_ = new DeferredObject*[count]; |
219 | deferred_objects_count_ = count; |
220 | } |
221 | } |
222 | |
223 | // Sets the materialized value for some deferred object. |
224 | // |
225 | // Claims ownership of the memory for 'object'. |
226 | void SetDeferredObjectAt(intptr_t idx, DeferredObject* object) { |
227 | deferred_objects_[idx] = object; |
228 | } |
229 | |
230 | intptr_t DeferredObjectsCount() const { return deferred_objects_count_; } |
231 | |
232 | CodePtr code_; |
233 | ObjectPoolPtr object_pool_; |
234 | TypedDataPtr deopt_info_; |
235 | bool dest_frame_is_allocated_; |
236 | intptr_t* dest_frame_; |
237 | intptr_t dest_frame_size_; |
238 | bool source_frame_is_allocated_; |
239 | intptr_t* source_frame_; |
240 | intptr_t source_frame_size_; |
241 | intptr_t* cpu_registers_; |
242 | fpu_register_t* fpu_registers_; |
243 | intptr_t num_args_; |
244 | ICData::DeoptReasonId deopt_reason_; |
245 | uint32_t deopt_flags_; |
246 | intptr_t caller_fp_; |
247 | Thread* thread_; |
248 | int64_t deopt_start_micros_; |
249 | |
250 | DeferredSlot* deferred_slots_; |
251 | |
252 | intptr_t deferred_objects_count_; |
253 | DeferredObject** deferred_objects_; |
254 | |
255 | const bool is_lazy_deopt_; |
256 | const bool deoptimizing_code_; |
257 | |
258 | DISALLOW_COPY_AND_ASSIGN(DeoptContext); |
259 | }; |
260 | |
261 | // Represents one deopt instruction, e.g, setup return address, store object, |
262 | // store register, etc. The target is defined by instruction's position in |
263 | // the deopt-info array. |
264 | class DeoptInstr : public ZoneAllocated { |
265 | public: |
266 | enum Kind { |
267 | kRetAddress, |
268 | kConstant, |
269 | kWord, |
270 | kDouble, |
271 | kFloat32x4, |
272 | kFloat64x2, |
273 | kInt32x4, |
274 | // Mints are split into low and high words on 32-bit architectures. Each |
275 | // word can be in a register or stack slot. Note Mint pairs are only |
276 | // used on 32-bit architectures. |
277 | kMintPair, |
278 | // Mints are held in one word on 64-bit architectures. |
279 | kMint, |
280 | kInt32, |
281 | kUint32, |
282 | kPcMarker, |
283 | kPp, |
284 | kCallerFp, |
285 | kCallerPp, |
286 | kCallerPc, |
287 | kMaterializedObjectRef, |
288 | kMaterializeObject |
289 | }; |
290 | |
291 | static DeoptInstr* Create(intptr_t kind_as_int, intptr_t source_index); |
292 | |
293 | DeoptInstr() {} |
294 | virtual ~DeoptInstr() {} |
295 | |
296 | virtual const char* ToCString() const { |
297 | const char* args = ArgumentsToCString(); |
298 | if (args != NULL) { |
299 | return Thread::Current()->zone()->PrintToString( |
300 | "%s(%s)" , KindToCString(kind()), args); |
301 | } else { |
302 | return KindToCString(kind()); |
303 | } |
304 | } |
305 | |
306 | virtual void Execute(DeoptContext* deopt_context, intptr_t* dest_addr) = 0; |
307 | |
308 | virtual CatchEntryMove ToCatchEntryMove(DeoptContext* deopt_context, |
309 | intptr_t dest_slot) { |
310 | UNREACHABLE(); |
311 | return CatchEntryMove(); |
312 | } |
313 | |
314 | virtual DeoptInstr::Kind kind() const = 0; |
315 | |
316 | bool Equals(const DeoptInstr& other) const { |
317 | return (kind() == other.kind()) && (source_index() == other.source_index()); |
318 | } |
319 | |
320 | // Get the code and return address which is encoded in this |
321 | // kRetAfterAddress deopt instruction. |
322 | static uword GetRetAddress(DeoptInstr* instr, |
323 | const ObjectPool& object_pool, |
324 | Code* code); |
325 | |
326 | // Return number of initialized fields in the object that will be |
327 | // materialized by kMaterializeObject instruction. |
328 | static intptr_t GetFieldCount(DeoptInstr* instr) { |
329 | ASSERT(instr->kind() == DeoptInstr::kMaterializeObject); |
330 | return instr->source_index(); |
331 | } |
332 | |
333 | protected: |
334 | friend class DeoptInfoBuilder; |
335 | |
336 | virtual intptr_t source_index() const = 0; |
337 | |
338 | virtual const char* ArgumentsToCString() const { return NULL; } |
339 | |
340 | private: |
341 | static const char* KindToCString(Kind kind); |
342 | |
343 | DISALLOW_COPY_AND_ASSIGN(DeoptInstr); |
344 | }; |
345 | |
346 | // Helper class that allows to read a value of the given register from |
347 | // the DeoptContext as the specified type. |
348 | // It calls different method depending on which kind of register (cpu/fpu) and |
349 | // destination types are specified. |
350 | template <typename RegisterType, typename DestinationType> |
351 | struct RegisterReader; |
352 | |
353 | template <typename T> |
354 | struct RegisterReader<Register, T> { |
355 | static intptr_t Read(DeoptContext* context, Register reg) { |
356 | return context->RegisterValue(reg); |
357 | } |
358 | }; |
359 | |
360 | template <> |
361 | struct RegisterReader<FpuRegister, double> { |
362 | static double Read(DeoptContext* context, FpuRegister reg) { |
363 | return context->FpuRegisterValue(reg); |
364 | } |
365 | }; |
366 | |
367 | template <> |
368 | struct RegisterReader<FpuRegister, simd128_value_t> { |
369 | static simd128_value_t Read(DeoptContext* context, FpuRegister reg) { |
370 | return context->FpuRegisterValueAsSimd128(reg); |
371 | } |
372 | }; |
373 | |
374 | // Class that encapsulates reading and writing of values that were either in |
375 | // the registers in the optimized code or were spilled from those registers |
376 | // to the stack. |
377 | template <typename RegisterType> |
378 | class RegisterSource { |
379 | public: |
380 | enum Kind { |
381 | // Spilled register source represented as its spill slot. |
382 | kStackSlot = 0, |
383 | // Register source represented as its register index. |
384 | kRegister = 1 |
385 | }; |
386 | |
387 | explicit RegisterSource(intptr_t source_index) |
388 | : source_index_(source_index) {} |
389 | |
390 | RegisterSource(Kind kind, intptr_t index) |
391 | : source_index_(KindField::encode(kind) | |
392 | IndexFieldLayout::encode(index)) {} |
393 | |
394 | template <typename T> |
395 | T Value(DeoptContext* context) const { |
396 | if (is_register()) { |
397 | return static_cast<T>( |
398 | RegisterReader<RegisterType, T>::Read(context, reg())); |
399 | } else { |
400 | return *reinterpret_cast<T*>( |
401 | context->GetSourceFrameAddressAt(raw_index())); |
402 | } |
403 | } |
404 | |
405 | intptr_t StackSlot(DeoptContext* context) const { |
406 | ASSERT(!is_register()); |
407 | return context->GetStackSlot(raw_index()); |
408 | } |
409 | |
410 | intptr_t source_index() const { return source_index_; } |
411 | |
412 | const char* ToCString() const { |
413 | if (is_register()) { |
414 | return Name(reg()); |
415 | } else { |
416 | return Thread::Current()->zone()->PrintToString("s%" Pd "" , raw_index()); |
417 | } |
418 | } |
419 | |
420 | private: |
421 | class KindField : public BitField<intptr_t, intptr_t, 0, 1> {}; |
422 | class IndexFieldLayout |
423 | : public BitField<intptr_t, intptr_t, 1, kBitsPerWord - 1> {}; |
424 | |
425 | bool is_register() const { |
426 | return KindField::decode(source_index_) == kRegister; |
427 | } |
428 | intptr_t raw_index() const { return IndexFieldLayout::decode(source_index_); } |
429 | |
430 | RegisterType reg() const { return static_cast<RegisterType>(raw_index()); } |
431 | |
432 | static const char* Name(Register reg) { |
433 | return RegisterNames::RegisterName(reg); |
434 | } |
435 | |
436 | static const char* Name(FpuRegister fpu_reg) { |
437 | return RegisterNames::FpuRegisterName(fpu_reg); |
438 | } |
439 | |
440 | const intptr_t source_index_; |
441 | }; |
442 | |
443 | typedef RegisterSource<Register> CpuRegisterSource; |
444 | typedef RegisterSource<FpuRegister> FpuRegisterSource; |
445 | |
446 | // Builds a deoptimization info table, one DeoptInfo at a time. Call AddXXX |
447 | // methods in the order of their target, starting wih deoptimized code |
448 | // continuation pc and ending with the first argument of the deoptimized |
449 | // code. Call CreateDeoptInfo to write the accumulated instructions into |
450 | // the heap and reset the builder's internal state for the next DeoptInfo. |
451 | class DeoptInfoBuilder : public ValueObject { |
452 | public: |
453 | DeoptInfoBuilder(Zone* zone, |
454 | const intptr_t num_args, |
455 | compiler::Assembler* assembler); |
456 | |
457 | // Return address before instruction. |
458 | void AddReturnAddress(const Function& function, |
459 | intptr_t deopt_id, |
460 | intptr_t dest_index); |
461 | |
462 | // Copy from optimized frame to unoptimized. |
463 | void AddCopy(Value* value, const Location& source_loc, intptr_t dest_index); |
464 | void AddPcMarker(const Function& function, intptr_t dest_index); |
465 | void AddPp(const Function& function, intptr_t dest_index); |
466 | void AddCallerFp(intptr_t dest_index); |
467 | void AddCallerPp(intptr_t dest_index); |
468 | void AddCallerPc(intptr_t dest_index); |
469 | |
470 | // Add object to be materialized. Emit kMaterializeObject instruction. |
471 | void AddMaterialization(MaterializeObjectInstr* mat); |
472 | |
473 | // For every materialized object emit instructions describing data required |
474 | // for materialization: class of the instance to allocate and field-value |
475 | // pairs for initialization. |
476 | // Emitted instructions are expected to follow fixed size section of frame |
477 | // emitted first. This way they become a part of the bottom-most deoptimized |
478 | // frame and are discoverable by GC. |
479 | // At deoptimization they will be removed by the stub at the very end: |
480 | // after they were used to materialize objects. |
481 | // Returns the index of the next stack slot. Used for verification. |
482 | intptr_t EmitMaterializationArguments(intptr_t dest_index); |
483 | |
484 | TypedDataPtr CreateDeoptInfo(const Array& deopt_table); |
485 | |
486 | // Mark the actual start of the frame description after all materialization |
487 | // instructions were emitted. Used for verification purposes. |
488 | void MarkFrameStart() { |
489 | ASSERT(frame_start_ == -1); |
490 | frame_start_ = instructions_.length(); |
491 | } |
492 | |
493 | private: |
494 | friend class CompilerDeoptInfo; // For current_info_number_. |
495 | |
496 | class TrieNode; |
497 | |
498 | CpuRegisterSource ToCpuRegisterSource(const Location& loc); |
499 | FpuRegisterSource ToFpuRegisterSource( |
500 | const Location& loc, |
501 | Location::Kind expected_stack_slot_kind); |
502 | |
503 | intptr_t FindOrAddObjectInTable(const Object& obj) const; |
504 | intptr_t FindMaterialization(MaterializeObjectInstr* mat) const; |
505 | intptr_t CalculateStackIndex(const Location& source_loc) const; |
506 | |
507 | intptr_t FrameSize() const { |
508 | ASSERT(frame_start_ != -1); |
509 | const intptr_t frame_size = instructions_.length() - frame_start_; |
510 | ASSERT(frame_size >= 0); |
511 | return frame_size; |
512 | } |
513 | |
514 | void AddConstant(const Object& obj, intptr_t dest_index); |
515 | |
516 | Zone* zone() const { return zone_; } |
517 | |
518 | Zone* zone_; |
519 | |
520 | GrowableArray<DeoptInstr*> instructions_; |
521 | const intptr_t num_args_; |
522 | compiler::Assembler* assembler_; |
523 | |
524 | // Used to compress entries by sharing suffixes. |
525 | TrieNode* trie_root_; |
526 | intptr_t current_info_number_; |
527 | |
528 | intptr_t frame_start_; |
529 | GrowableArray<MaterializeObjectInstr*> materializations_; |
530 | |
531 | DISALLOW_COPY_AND_ASSIGN(DeoptInfoBuilder); |
532 | }; |
533 | |
534 | // Utilities for managing the deopt table and its entries. The table is |
535 | // stored in an Array in the heap. It consists of triples of (PC offset, |
536 | // info, reason). Elements of each entry are stored consecutively in the |
537 | // array. |
538 | // TODO(vegorov): consider compressing the whole table into a single TypedData |
539 | // object. |
540 | class DeoptTable : public AllStatic { |
541 | public: |
542 | // Return the array size in elements for a given number of table entries. |
543 | static intptr_t SizeFor(intptr_t length); |
544 | |
545 | // Set the entry at the given index into the table (not an array index). |
546 | static void SetEntry(const Array& table, |
547 | intptr_t index, |
548 | const Smi& offset, |
549 | const TypedData& info, |
550 | const Smi& reason_and_flags); |
551 | |
552 | // Return the length of the table in entries. |
553 | static intptr_t GetLength(const Array& table); |
554 | |
555 | // Set the output parameters (offset, info, reason) to the entry values at |
556 | // the index into the table (not an array index). |
557 | static void GetEntry(const Array& table, |
558 | intptr_t index, |
559 | Smi* offset, |
560 | TypedData* info, |
561 | Smi* reason_and_flags); |
562 | |
563 | static SmiPtr EncodeReasonAndFlags(ICData::DeoptReasonId reason, |
564 | uint32_t flags) { |
565 | return Smi::New(ReasonField::encode(reason) | FlagsField::encode(flags)); |
566 | } |
567 | |
568 | class ReasonField : public BitField<intptr_t, ICData::DeoptReasonId, 0, 8> {}; |
569 | class FlagsField : public BitField<intptr_t, uint32_t, 8, 8> {}; |
570 | |
571 | private: |
572 | static const intptr_t kEntrySize = 3; |
573 | }; |
574 | |
575 | // Holds deopt information at one deoptimization point. The information consists |
576 | // of two parts: |
577 | // - first a prefix consisting of kMaterializeObject instructions describing |
578 | // objects which had their allocation removed as part of AllocationSinking |
579 | // pass and have to be materialized; |
580 | // - followed by a list of DeoptInstr objects, specifying transformation |
581 | // information for each slot in unoptimized frame(s). |
582 | // Arguments for object materialization (class of instance to be allocated and |
583 | // field-value pairs) are added as artificial slots to the expression stack |
584 | // of the bottom-most frame. They are removed from the stack at the very end |
585 | // of deoptimization by the deoptimization stub. |
586 | class DeoptInfo : public AllStatic { |
587 | public: |
588 | // Size of the frame part of the translation not counting kMaterializeObject |
589 | // instructions in the prefix. |
590 | static intptr_t FrameSize(const TypedData& packed); |
591 | |
592 | // Returns the number of kMaterializeObject instructions in the prefix. |
593 | static intptr_t NumMaterializations(const GrowableArray<DeoptInstr*>&); |
594 | |
595 | // Unpack the entire translation into an array of deoptimization |
596 | // instructions. This copies any shared suffixes into the array. |
597 | static void Unpack(const Array& table, |
598 | const TypedData& packed, |
599 | GrowableArray<DeoptInstr*>* instructions); |
600 | |
601 | // Size of the frame part of the translation not counting kMaterializeObject |
602 | // instructions in the prefix. |
603 | static const char* ToCString(const Array& table, const TypedData& packed); |
604 | |
605 | // Returns true iff decompression yields the same instructions as the |
606 | // original. |
607 | static bool VerifyDecompression(const GrowableArray<DeoptInstr*>& original, |
608 | const Array& deopt_table, |
609 | const TypedData& packed); |
610 | |
611 | private: |
612 | static void UnpackInto(const Array& table, |
613 | const TypedData& packed, |
614 | GrowableArray<DeoptInstr*>* instructions, |
615 | intptr_t length); |
616 | }; |
617 | |
618 | } // namespace dart |
619 | |
620 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
621 | #endif // RUNTIME_VM_DEOPT_INSTRUCTIONS_H_ |
622 | |