1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_COMPILER_BACKEND_FLOW_GRAPH_H_
6#define RUNTIME_VM_COMPILER_BACKEND_FLOW_GRAPH_H_
7
8#if defined(DART_PRECOMPILED_RUNTIME)
9#error "AOT runtime should not use compiler sources (including header files)"
10#endif // defined(DART_PRECOMPILED_RUNTIME)
11
12#include "vm/bit_vector.h"
13#include "vm/compiler/backend/il.h"
14#include "vm/growable_array.h"
15#include "vm/hash_map.h"
16#include "vm/parser.h"
17#include "vm/thread.h"
18
19namespace dart {
20
21class LoopHierarchy;
22class VariableLivenessAnalysis;
23
24namespace compiler {
25class GraphIntrinsifier;
26}
27
28class BlockIterator : public ValueObject {
29 public:
30 explicit BlockIterator(const GrowableArray<BlockEntryInstr*>& block_order)
31 : block_order_(block_order), current_(0) {}
32
33 BlockIterator(const BlockIterator& other)
34 : ValueObject(),
35 block_order_(other.block_order_),
36 current_(other.current_) {}
37
38 void Advance() {
39 ASSERT(!Done());
40 current_++;
41 }
42
43 bool Done() const { return current_ >= block_order_.length(); }
44
45 BlockEntryInstr* Current() const { return block_order_[current_]; }
46
47 private:
48 const GrowableArray<BlockEntryInstr*>& block_order_;
49 intptr_t current_;
50};
51
52struct ConstantPoolTrait {
53 typedef ConstantInstr* Value;
54 typedef const Object& Key;
55 typedef ConstantInstr* Pair;
56
57 static Key KeyOf(Pair kv) { return kv->value(); }
58
59 static Value ValueOf(Pair kv) { return kv; }
60
61 static inline intptr_t Hashcode(Key key) {
62 if (key.IsSmi()) {
63 return Smi::Cast(key).Value();
64 }
65 if (key.IsDouble()) {
66 return static_cast<intptr_t>(bit_cast<int32_t, float>(
67 static_cast<float>(Double::Cast(key).value())));
68 }
69 if (key.IsMint()) {
70 return static_cast<intptr_t>(Mint::Cast(key).value());
71 }
72 if (key.IsString()) {
73 return String::Cast(key).Hash();
74 }
75 return key.GetClassId();
76 }
77
78 static inline bool IsKeyEqual(Pair kv, Key key) {
79 return kv->value().raw() == key.raw();
80 }
81};
82
83struct PrologueInfo {
84 // The first blockid used for prologue building. This information can be used
85 // by the inliner for budget calculations: The prologue code falls away when
86 // inlining, so we should not include it in the budget.
87 intptr_t min_block_id;
88
89 // The last blockid used for prologue building. This information can be used
90 // by the inliner for budget calculations: The prologue code falls away when
91 // inlining, so we should not include it in the budget.
92 intptr_t max_block_id;
93
94 PrologueInfo(intptr_t min, intptr_t max)
95 : min_block_id(min), max_block_id(max) {}
96
97 bool Contains(intptr_t block_id) const {
98 return min_block_id <= block_id && block_id <= max_block_id;
99 }
100};
101
102// Class to encapsulate the construction and manipulation of the flow graph.
103class FlowGraph : public ZoneAllocated {
104 public:
105 FlowGraph(const ParsedFunction& parsed_function,
106 GraphEntryInstr* graph_entry,
107 intptr_t max_block_id,
108 PrologueInfo prologue_info);
109
110 // Function properties.
111 const ParsedFunction& parsed_function() const { return parsed_function_; }
112 const Function& function() const { return parsed_function_.function(); }
113
114 // The number of directly accessable parameters (above the frame pointer).
115 // All other parameters can only be indirectly loaded via metadata found in
116 // the arguments descriptor.
117 intptr_t num_direct_parameters() const { return num_direct_parameters_; }
118
119 // The number of words on the stack used by the direct parameters.
120 intptr_t direct_parameters_size() const { return direct_parameters_size_; }
121
122 // The number of variables (or boxes) which code can load from / store to.
123 // The SSA renaming will insert phi's for them (and only them - i.e. there
124 // will be no phi insertion for [LocalVariable]s pointing to the expression
125 // stack!).
126 intptr_t variable_count() const {
127 return num_direct_parameters_ + parsed_function_.num_stack_locals();
128 }
129
130 // The number of variables during OSR, which may include stack slots
131 // that pass in initial contents for the expression stack.
132 intptr_t osr_variable_count() const {
133 ASSERT(IsCompiledForOsr());
134 return variable_count() + graph_entry()->osr_entry()->stack_depth();
135 }
136
137 // This function returns the offset (in words) of the [index]th
138 // parameter, relative to the first parameter.
139 // If [last_slot] is true it gives the offset of the last slot of that
140 // location, otherwise it returns the first one.
141 static intptr_t ParameterOffsetAt(const Function& function,
142 intptr_t index,
143 bool last_slot = true);
144
145 static Representation ParameterRepresentationAt(const Function& function,
146 intptr_t index);
147
148 static Representation ReturnRepresentationOf(const Function& function);
149
150 // The number of variables (or boxes) inside the functions frame - meaning
151 // below the frame pointer. This does not include the expression stack.
152 intptr_t num_stack_locals() const {
153 return parsed_function_.num_stack_locals();
154 }
155
156 bool IsIrregexpFunction() const { return function().IsIrregexpFunction(); }
157
158 LocalVariable* CurrentContextVar() const {
159 return parsed_function().current_context_var();
160 }
161
162 intptr_t CurrentContextEnvIndex() const {
163 if (function().HasBytecode()) {
164 return -1;
165 }
166
167 return EnvIndex(parsed_function().current_context_var());
168 }
169
170 intptr_t RawTypeArgumentEnvIndex() const {
171 return EnvIndex(parsed_function().RawTypeArgumentsVariable());
172 }
173
174 intptr_t ArgumentDescriptorEnvIndex() const {
175 return EnvIndex(parsed_function().arg_desc_var());
176 }
177
178 intptr_t EnvIndex(const LocalVariable* variable) const {
179 ASSERT(!variable->is_captured());
180 return num_direct_parameters_ - variable->index().value();
181 }
182
183 static bool NeedsPairLocation(Representation representation) {
184 return representation == kUnboxedInt64 &&
185 compiler::target::kIntSpillFactor == 2;
186 }
187
188 // Flow graph orders.
189 const GrowableArray<BlockEntryInstr*>& preorder() const { return preorder_; }
190 const GrowableArray<BlockEntryInstr*>& postorder() const {
191 return postorder_;
192 }
193 const GrowableArray<BlockEntryInstr*>& reverse_postorder() const {
194 return reverse_postorder_;
195 }
196 static bool ShouldReorderBlocks(const Function& function, bool is_optimized);
197 GrowableArray<BlockEntryInstr*>* CodegenBlockOrder(bool is_optimized);
198
199 // Iterators.
200 BlockIterator reverse_postorder_iterator() const {
201 return BlockIterator(reverse_postorder());
202 }
203 BlockIterator postorder_iterator() const {
204 return BlockIterator(postorder());
205 }
206
207 void EnsureSSATempIndex(Definition* defn, Definition* replacement);
208
209 void ReplaceCurrentInstruction(ForwardInstructionIterator* iterator,
210 Instruction* current,
211 Instruction* replacement);
212
213 Instruction* CreateCheckClass(Definition* to_check,
214 const Cids& cids,
215 intptr_t deopt_id,
216 TokenPosition token_pos);
217
218 Definition* CreateCheckBound(Definition* length,
219 Definition* index,
220 intptr_t deopt_id);
221
222 void AddExactnessGuard(InstanceCallInstr* call, intptr_t receiver_cid);
223
224 intptr_t current_ssa_temp_index() const { return current_ssa_temp_index_; }
225 void set_current_ssa_temp_index(intptr_t index) {
226 current_ssa_temp_index_ = index;
227 }
228
229 intptr_t max_virtual_register_number() const {
230 return current_ssa_temp_index();
231 }
232
233 enum class ToCheck { kNoCheck, kCheckNull, kCheckCid };
234
235 // Uses CHA to determine if the called method can be overridden.
236 // Return value indicates that the call needs no check at all,
237 // just a null check, or a full class check.
238 ToCheck CheckForInstanceCall(InstanceCallInstr* call,
239 FunctionLayout::Kind kind) const;
240
241 Thread* thread() const { return thread_; }
242 Zone* zone() const { return thread()->zone(); }
243 Isolate* isolate() const { return thread()->isolate(); }
244
245 intptr_t max_block_id() const { return max_block_id_; }
246 void set_max_block_id(intptr_t id) { max_block_id_ = id; }
247 intptr_t allocate_block_id() { return ++max_block_id_; }
248
249 GraphEntryInstr* graph_entry() const { return graph_entry_; }
250
251 ConstantInstr* constant_null() const { return constant_null_; }
252
253 ConstantInstr* constant_dead() const { return constant_dead_; }
254
255 intptr_t alloc_ssa_temp_index() { return current_ssa_temp_index_++; }
256
257 void AllocateSSAIndexes(Definition* def) {
258 ASSERT(def);
259 def->set_ssa_temp_index(alloc_ssa_temp_index());
260 // Always allocate a second index. This index is unused except
261 // for Definitions with register pair outputs.
262 alloc_ssa_temp_index();
263 }
264
265 intptr_t InstructionCount() const;
266
267 // Returns the definition for the object from the constant pool if
268 // one exists, otherwise returns nullptr.
269 ConstantInstr* GetExistingConstant(const Object& object) const;
270
271 // Always returns a definition for the object from the constant pool,
272 // allocating one if it doesn't already exist.
273 ConstantInstr* GetConstant(const Object& object);
274
275 void AddToGraphInitialDefinitions(Definition* defn);
276 void AddToInitialDefinitions(BlockEntryWithInitialDefs* entry,
277 Definition* defn);
278
279 // Tries to create a constant definition with the given value which can be
280 // used to replace the given operation. Ensures that the representation of
281 // the replacement matches the representation of the original definition.
282 // If the given value can't be represented using matching representation
283 // then returns op itself.
284 Definition* TryCreateConstantReplacementFor(Definition* op,
285 const Object& value);
286
287 // Returns true if the given constant value can be represented in the given
288 // representation.
289 static bool IsConstantRepresentable(const Object& value,
290 Representation target_rep,
291 bool tagged_value_must_be_smi);
292
293 enum UseKind { kEffect, kValue };
294
295 void InsertBefore(Instruction* next,
296 Instruction* instr,
297 Environment* env,
298 UseKind use_kind);
299 void InsertAfter(Instruction* prev,
300 Instruction* instr,
301 Environment* env,
302 UseKind use_kind);
303 Instruction* AppendTo(Instruction* prev,
304 Instruction* instr,
305 Environment* env,
306 UseKind use_kind);
307
308 // Operations on the flow graph.
309 void ComputeSSA(intptr_t next_virtual_register_number,
310 ZoneGrowableArray<Definition*>* inlining_parameters);
311
312 // Verification method for debugging.
313 bool VerifyRedefinitions();
314
315 void DiscoverBlocks();
316
317 void MergeBlocks();
318
319 // Insert a redefinition of an original definition after prev and rename all
320 // dominated uses of the original. If an equivalent redefinition is already
321 // present, nothing is inserted.
322 // Returns the redefinition, if a redefinition was inserted, NULL otherwise.
323 RedefinitionInstr* EnsureRedefinition(Instruction* prev,
324 Definition* original,
325 CompileType compile_type);
326
327 // Remove the redefinition instructions inserted to inhibit code motion.
328 void RemoveRedefinitions(bool keep_checks = false);
329
330 // Insert PushArgument instructions and remove explicit def-use
331 // relations between calls and their arguments.
332 void InsertPushArguments();
333
334 // Copy deoptimization target from one instruction to another if we still
335 // have to keep deoptimization environment at gotos for LICM purposes.
336 void CopyDeoptTarget(Instruction* to, Instruction* from) {
337 if (is_licm_allowed()) {
338 to->InheritDeoptTarget(zone(), from);
339 }
340 }
341
342 // Returns true if every Goto in the graph is expected to have a
343 // deoptimization environment and can be used as deoptimization target
344 // for hoisted instructions.
345 bool is_licm_allowed() const { return licm_allowed_; }
346
347 // Stop preserving environments on Goto instructions. LICM is not allowed
348 // after this point.
349 void disallow_licm() { licm_allowed_ = false; }
350
351 PrologueInfo prologue_info() const { return prologue_info_; }
352
353 // Computes the loop hierarchy of the flow graph on demand.
354 const LoopHierarchy& GetLoopHierarchy() {
355 if (loop_hierarchy_ == nullptr) {
356 loop_hierarchy_ = ComputeLoops();
357 }
358 return loop_hierarchy();
359 }
360
361 const LoopHierarchy& loop_hierarchy() const { return *loop_hierarchy_; }
362
363 // Resets the loop hierarchy of the flow graph. Use this to
364 // force a recomputation of loop detection by the next call
365 // to GetLoopHierarchy() (note that this does not immediately
366 // reset the loop_info fields of block entries, although
367 // these will be overwritten by that next call).
368 void ResetLoopHierarchy() {
369 loop_hierarchy_ = nullptr;
370 loop_invariant_loads_ = nullptr;
371 }
372
373 // Per loop header invariant loads sets. Each set contains load id for
374 // those loads that are not affected by anything in the loop and can be
375 // hoisted out. Sets are computed by LoadOptimizer.
376 ZoneGrowableArray<BitVector*>* loop_invariant_loads() const {
377 return loop_invariant_loads_;
378 }
379 void set_loop_invariant_loads(
380 ZoneGrowableArray<BitVector*>* loop_invariant_loads) {
381 loop_invariant_loads_ = loop_invariant_loads;
382 }
383
384 bool IsCompiledForOsr() const { return graph_entry()->IsCompiledForOsr(); }
385
386 BitVector* captured_parameters() const { return captured_parameters_; }
387
388 intptr_t inlining_id() const { return inlining_id_; }
389 void set_inlining_id(intptr_t value) { inlining_id_ = value; }
390
391 // Returns true if any instructions were canonicalized away.
392 bool Canonicalize();
393
394 // Attaches new ICData's to static/instance calls which don't already have
395 // them.
396 void PopulateWithICData(const Function& function);
397
398 void SelectRepresentations();
399
400 void WidenSmiToInt32();
401
402 // Remove environments from the instructions which do not deoptimize.
403 void EliminateEnvironments();
404
405 bool IsReceiver(Definition* def) const;
406
407 // Optimize (a << b) & c pattern: if c is a positive Smi or zero, then the
408 // shift can be a truncating Smi shift-left and result is always Smi.
409 // Merge instructions (only per basic-block).
410 void TryOptimizePatterns();
411
412 // Replaces uses that are dominated by dom of 'def' with 'other'.
413 // Note: uses that occur at instruction dom itself are not dominated by it.
414 static void RenameDominatedUses(Definition* def,
415 Instruction* dom,
416 Definition* other);
417
418 // Renames uses of redefined values to make sure that uses of redefined
419 // values that are dominated by a redefinition are renamed.
420 void RenameUsesDominatedByRedefinitions();
421
422 bool should_print() const { return should_print_; }
423
424 //
425 // High-level utilities.
426 //
427
428 // Logical-AND (for use in short-circuit diamond).
429 struct LogicalAnd {
430 LogicalAnd(ComparisonInstr* x, ComparisonInstr* y) : oper1(x), oper2(y) {}
431 ComparisonInstr* oper1;
432 ComparisonInstr* oper2;
433 };
434
435 // Constructs a diamond control flow at the instruction, inheriting
436 // properties from inherit and using the given compare. Returns the
437 // join (and true/false blocks in out parameters). Updates dominance
438 // relation, but not the succ/pred ordering on block.
439 JoinEntryInstr* NewDiamond(Instruction* instruction,
440 Instruction* inherit,
441 ComparisonInstr* compare,
442 TargetEntryInstr** block_true,
443 TargetEntryInstr** block_false);
444
445 // As above, but with a short-circuit on two comparisons.
446 JoinEntryInstr* NewDiamond(Instruction* instruction,
447 Instruction* inherit,
448 const LogicalAnd& condition,
449 TargetEntryInstr** block_true,
450 TargetEntryInstr** block_false);
451
452 // Adds a 2-way phi.
453 PhiInstr* AddPhi(JoinEntryInstr* join, Definition* d1, Definition* d2);
454
455 // SSA transformation methods and fields.
456 void ComputeDominators(GrowableArray<BitVector*>* dominance_frontier);
457
458 void CreateCommonConstants();
459
460 private:
461 friend class FlowGraphCompiler; // TODO(ajcbik): restructure
462 friend class FlowGraphChecker;
463 friend class IfConverter;
464 friend class BranchSimplifier;
465 friend class ConstantPropagator;
466 friend class DeadCodeElimination;
467 friend class compiler::GraphIntrinsifier;
468
469 void CompressPath(intptr_t start_index,
470 intptr_t current_index,
471 GrowableArray<intptr_t>* parent,
472 GrowableArray<intptr_t>* label);
473
474 void AddSyntheticPhis(BlockEntryInstr* block);
475
476 void Rename(GrowableArray<PhiInstr*>* live_phis,
477 VariableLivenessAnalysis* variable_liveness,
478 ZoneGrowableArray<Definition*>* inlining_parameters);
479 void RenameRecursive(BlockEntryInstr* block_entry,
480 GrowableArray<Definition*>* env,
481 GrowableArray<PhiInstr*>* live_phis,
482 VariableLivenessAnalysis* variable_liveness,
483 ZoneGrowableArray<Definition*>* inlining_parameters);
484
485 void PopulateEnvironmentFromFunctionEntry(
486 FunctionEntryInstr* function_entry,
487 GrowableArray<Definition*>* env,
488 GrowableArray<PhiInstr*>* live_phis,
489 VariableLivenessAnalysis* variable_liveness,
490 ZoneGrowableArray<Definition*>* inlining_parameters);
491
492 void PopulateEnvironmentFromOsrEntry(OsrEntryInstr* osr_entry,
493 GrowableArray<Definition*>* env);
494
495 void PopulateEnvironmentFromCatchEntry(CatchBlockEntryInstr* catch_entry,
496 GrowableArray<Definition*>* env);
497
498 void AttachEnvironment(Instruction* instr, GrowableArray<Definition*>* env);
499
500 void InsertPhis(const GrowableArray<BlockEntryInstr*>& preorder,
501 const GrowableArray<BitVector*>& assigned_vars,
502 const GrowableArray<BitVector*>& dom_frontier,
503 GrowableArray<PhiInstr*>* live_phis);
504
505 void RemoveDeadPhis(GrowableArray<PhiInstr*>* live_phis);
506
507 void ReplacePredecessor(BlockEntryInstr* old_block,
508 BlockEntryInstr* new_block);
509
510 // Finds the blocks in the natural loop for the back edge m->n. The
511 // algorithm is described in "Advanced Compiler Design & Implementation"
512 // (Muchnick) p192. Returns a BitVector indexed by block pre-order
513 // number where each bit indicates membership in the loop.
514 BitVector* FindLoopBlocks(BlockEntryInstr* m, BlockEntryInstr* n) const;
515
516 // Finds the natural loops in the flow graph and attaches the loop
517 // information to each entry block. Returns the loop hierarchy.
518 LoopHierarchy* ComputeLoops() const;
519
520 void InsertConversionsFor(Definition* def);
521 void ConvertUse(Value* use, Representation from);
522 void InsertConversion(Representation from,
523 Representation to,
524 Value* use,
525 bool is_environment_use);
526
527 void ComputeIsReceiver(PhiInstr* phi) const;
528 void ComputeIsReceiverRecursive(PhiInstr* phi,
529 GrowableArray<PhiInstr*>* unmark) const;
530
531 void OptimizeLeftShiftBitAndSmiOp(
532 ForwardInstructionIterator* current_iterator,
533 Definition* bit_and_instr,
534 Definition* left_instr,
535 Definition* right_instr);
536
537 void TryMergeTruncDivMod(GrowableArray<BinarySmiOpInstr*>* merge_candidates);
538
539 void AppendExtractNthOutputForMerged(Definition* instr,
540 intptr_t ix,
541 Representation rep,
542 intptr_t cid);
543
544 Thread* thread_;
545
546 // DiscoverBlocks computes parent_ and assigned_vars_ which are then used
547 // if/when computing SSA.
548 GrowableArray<intptr_t> parent_;
549 GrowableArray<BitVector*> assigned_vars_;
550
551 intptr_t current_ssa_temp_index_;
552 intptr_t max_block_id_;
553
554 // Flow graph fields.
555 const ParsedFunction& parsed_function_;
556 intptr_t num_direct_parameters_;
557 intptr_t direct_parameters_size_;
558 GraphEntryInstr* graph_entry_;
559 GrowableArray<BlockEntryInstr*> preorder_;
560 GrowableArray<BlockEntryInstr*> postorder_;
561 GrowableArray<BlockEntryInstr*> reverse_postorder_;
562 GrowableArray<BlockEntryInstr*> optimized_block_order_;
563 ConstantInstr* constant_null_;
564 ConstantInstr* constant_dead_;
565
566 bool licm_allowed_;
567
568 const PrologueInfo prologue_info_;
569
570 // Loop related fields.
571 LoopHierarchy* loop_hierarchy_;
572 ZoneGrowableArray<BitVector*>* loop_invariant_loads_;
573
574 DirectChainedHashMap<ConstantPoolTrait> constant_instr_pool_;
575 BitVector* captured_parameters_;
576
577 intptr_t inlining_id_;
578 bool should_print_;
579};
580
581class LivenessAnalysis : public ValueObject {
582 public:
583 LivenessAnalysis(intptr_t variable_count,
584 const GrowableArray<BlockEntryInstr*>& postorder);
585
586 void Analyze();
587
588 virtual ~LivenessAnalysis() {}
589
590 BitVector* GetLiveInSetAt(intptr_t postorder_number) const {
591 return live_in_[postorder_number];
592 }
593
594 BitVector* GetLiveOutSetAt(intptr_t postorder_number) const {
595 return live_out_[postorder_number];
596 }
597
598 BitVector* GetLiveInSet(BlockEntryInstr* block) const {
599 return GetLiveInSetAt(block->postorder_number());
600 }
601
602 BitVector* GetKillSet(BlockEntryInstr* block) const {
603 return kill_[block->postorder_number()];
604 }
605
606 BitVector* GetLiveOutSet(BlockEntryInstr* block) const {
607 return GetLiveOutSetAt(block->postorder_number());
608 }
609
610 // Print results of liveness analysis.
611 void Dump();
612
613 protected:
614 // Compute initial values for live-out, kill and live-in sets.
615 virtual void ComputeInitialSets() = 0;
616
617 // Update live-out set for the given block: live-out should contain
618 // all values that are live-in for block's successors.
619 // Returns true if live-out set was changed.
620 bool UpdateLiveOut(const BlockEntryInstr& instr);
621
622 // Update live-in set for the given block: live-in should contain
623 // all values that are live-out from the block and are not defined
624 // by this block.
625 // Returns true if live-in set was changed.
626 bool UpdateLiveIn(const BlockEntryInstr& instr);
627
628 // Perform fix-point iteration updating live-out and live-in sets
629 // for blocks until they stop changing.
630 void ComputeLiveInAndLiveOutSets();
631
632 Zone* zone() const { return zone_; }
633
634 Zone* zone_;
635
636 const intptr_t variable_count_;
637
638 const GrowableArray<BlockEntryInstr*>& postorder_;
639
640 // Live-out sets for each block. They contain indices of variables
641 // that are live out from this block: that is values that were either
642 // defined in this block or live into it and that are used in some
643 // successor block.
644 GrowableArray<BitVector*> live_out_;
645
646 // Kill sets for each block. They contain indices of variables that
647 // are defined by this block.
648 GrowableArray<BitVector*> kill_;
649
650 // Live-in sets for each block. They contain indices of variables
651 // that are used by this block or its successors.
652 GrowableArray<BitVector*> live_in_;
653};
654
655class DefinitionWorklist : public ValueObject {
656 public:
657 DefinitionWorklist(FlowGraph* flow_graph, intptr_t initial_capacity)
658 : defs_(initial_capacity),
659 contains_vector_(new BitVector(flow_graph->zone(),
660 flow_graph->current_ssa_temp_index())) {}
661
662 void Add(Definition* defn) {
663 if (!Contains(defn)) {
664 defs_.Add(defn);
665 contains_vector_->Add(defn->ssa_temp_index());
666 }
667 }
668
669 bool Contains(Definition* defn) const {
670 return (defn->ssa_temp_index() >= 0) &&
671 contains_vector_->Contains(defn->ssa_temp_index());
672 }
673
674 bool IsEmpty() const { return defs_.is_empty(); }
675
676 Definition* RemoveLast() {
677 Definition* defn = defs_.RemoveLast();
678 contains_vector_->Remove(defn->ssa_temp_index());
679 return defn;
680 }
681
682 const GrowableArray<Definition*>& definitions() const { return defs_; }
683 BitVector* contains_vector() const { return contains_vector_; }
684
685 void Clear() {
686 defs_.TruncateTo(0);
687 contains_vector_->Clear();
688 }
689
690 private:
691 GrowableArray<Definition*> defs_;
692 BitVector* contains_vector_;
693};
694
695} // namespace dart
696
697#endif // RUNTIME_VM_COMPILER_BACKEND_FLOW_GRAPH_H_
698