1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_COMPILER_BACKEND_LINEARSCAN_H_
6#define RUNTIME_VM_COMPILER_BACKEND_LINEARSCAN_H_
7
8#if defined(DART_PRECOMPILED_RUNTIME)
9#error "AOT runtime should not use compiler sources (including header files)"
10#endif // defined(DART_PRECOMPILED_RUNTIME)
11
12#include "vm/compiler/backend/flow_graph.h"
13#include "vm/compiler/backend/il.h"
14#include "vm/growable_array.h"
15
16namespace dart {
17
18class AllocationFinger;
19class FlowGraph;
20class LiveRange;
21class UseInterval;
22class UsePosition;
23
24class ReachingDefs : public ValueObject {
25 public:
26 explicit ReachingDefs(const FlowGraph& flow_graph)
27 : flow_graph_(flow_graph), phis_(10) {}
28
29 BitVector* Get(PhiInstr* phi);
30
31 private:
32 void AddPhi(PhiInstr* phi);
33 void Compute();
34
35 const FlowGraph& flow_graph_;
36 GrowableArray<PhiInstr*> phis_;
37};
38
39class SSALivenessAnalysis : public LivenessAnalysis {
40 public:
41 explicit SSALivenessAnalysis(const FlowGraph& flow_graph)
42 : LivenessAnalysis(flow_graph.max_virtual_register_number(),
43 flow_graph.postorder()),
44 graph_entry_(flow_graph.graph_entry()) {}
45
46 private:
47 // Compute initial values for live-out, kill and live-in sets.
48 virtual void ComputeInitialSets();
49
50 GraphEntryInstr* graph_entry_;
51};
52
53// Forward.
54struct ExtraLoopInfo;
55
56class FlowGraphAllocator : public ValueObject {
57 public:
58 // Number of stack slots needed for a fpu register spill slot.
59 static const intptr_t kDoubleSpillFactor =
60 kDoubleSize / compiler::target::kWordSize;
61
62 explicit FlowGraphAllocator(const FlowGraph& flow_graph,
63 bool intrinsic_mode = false);
64
65 void AllocateRegisters();
66
67 // Map a virtual register number to its live range.
68 LiveRange* GetLiveRange(intptr_t vreg);
69
70 DART_FORCE_INLINE static void SetLifetimePosition(Instruction* instr,
71 intptr_t pos) {
72 instr->SetPassSpecificId(CompilerPass::kAllocateRegisters, pos);
73 }
74
75 DART_FORCE_INLINE static bool HasLifetimePosition(Instruction* instr) {
76 return instr->HasPassSpecificId(CompilerPass::kAllocateRegisters);
77 }
78
79 DART_FORCE_INLINE static intptr_t GetLifetimePosition(
80 const Instruction* instr) {
81 return instr->GetPassSpecificId(CompilerPass::kAllocateRegisters);
82 }
83
84 private:
85 void CollectRepresentations();
86
87 // Visit blocks in the code generation order (reverse post order) and
88 // linearly assign consequent lifetime positions to every instruction.
89 // We assign position as follows:
90 //
91 // 2 * n - even position corresponding to instruction's start;
92 //
93 // 2 * n + 1 - odd position corresponding to instruction's end;
94 //
95 // Having two positions per instruction allows us to capture non-trivial
96 // shapes of use intervals: e.g. by placing a use at the start or the
97 // end position we can distinguish between instructions that need value
98 // at the register only at their start and those instructions that
99 // need value in the register until the end of instruction's body.
100 // Register allocator can perform splitting of live ranges at any position.
101 // An implicit ParallelMove will be inserted by ConnectSplitSiblings where
102 // required to resolve data flow between split siblings when allocation
103 // is finished.
104 // For specific examples see comments inside ProcessOneInstruction.
105 // Additionally creates parallel moves at the joins' predecessors
106 // that will be used for phi resolution.
107 void NumberInstructions();
108 Instruction* InstructionAt(intptr_t pos) const;
109 BlockEntryInstr* BlockEntryAt(intptr_t pos) const;
110 bool IsBlockEntry(intptr_t pos) const;
111
112 LiveRange* MakeLiveRangeForTemporary();
113
114 // Visit instructions in the postorder and build live ranges for
115 // all SSA values.
116 void BuildLiveRanges();
117
118 Instruction* ConnectOutgoingPhiMoves(BlockEntryInstr* block,
119 BitVector* interference_set);
120 void ProcessEnvironmentUses(BlockEntryInstr* block, Instruction* current);
121 void ProcessMaterializationUses(BlockEntryInstr* block,
122 const intptr_t block_start_pos,
123 const intptr_t use_pos,
124 MaterializeObjectInstr* mat);
125 void ProcessOneInput(BlockEntryInstr* block,
126 intptr_t pos,
127 Location* in_ref,
128 Value* input,
129 intptr_t vreg,
130 RegisterSet* live_registers);
131 void ProcessOneOutput(BlockEntryInstr* block,
132 intptr_t pos,
133 Location* out,
134 Definition* def,
135 intptr_t vreg,
136 bool output_same_as_first_input,
137 Location* in_ref,
138 Definition* input,
139 intptr_t input_vreg,
140 BitVector* interference_set);
141 void ProcessOneInstruction(BlockEntryInstr* block,
142 Instruction* instr,
143 BitVector* interference_set);
144
145 static const intptr_t kNormalEntryPos = 2;
146
147 void ProcessInitialDefinition(Definition* defn,
148 LiveRange* range,
149 BlockEntryInstr* block,
150 bool second_location_for_definition = false);
151 void ConnectIncomingPhiMoves(JoinEntryInstr* join);
152 void BlockLocation(Location loc, intptr_t from, intptr_t to);
153 void BlockRegisterLocation(Location loc,
154 intptr_t from,
155 intptr_t to,
156 bool* blocked_registers,
157 LiveRange** blocking_ranges);
158
159 intptr_t NumberOfRegisters() const { return number_of_registers_; }
160
161 // Find all safepoints that are covered by this live range.
162 void AssignSafepoints(Definition* defn, LiveRange* range);
163
164 void PrepareForAllocation(Location::Kind register_kind,
165 intptr_t number_of_registers,
166 const GrowableArray<LiveRange*>& unallocated,
167 LiveRange** blocking_ranges,
168 bool* blocked_registers);
169
170 // Process live ranges sorted by their start and assign registers
171 // to them
172 void AllocateUnallocatedRanges();
173 void AdvanceActiveIntervals(const intptr_t start);
174
175 // Connect split siblings over non-linear control flow edges.
176 void ResolveControlFlow();
177 void ConnectSplitSiblings(LiveRange* range,
178 BlockEntryInstr* source_block,
179 BlockEntryInstr* target_block);
180
181 // Returns true if the target location is the spill slot for the given range.
182 bool TargetLocationIsSpillSlot(LiveRange* range, Location target);
183
184 // Update location slot corresponding to the use with location allocated for
185 // the use's live range.
186 void ConvertUseTo(UsePosition* use, Location loc);
187 void ConvertAllUses(LiveRange* range);
188
189 // Add live range to the list of unallocated live ranges to be processed
190 // by the allocator.
191 void AddToUnallocated(LiveRange* range);
192 void CompleteRange(LiveRange* range, Location::Kind kind);
193#if defined(DEBUG)
194 bool UnallocatedIsSorted();
195#endif
196
197 // Try to find a free register for an unallocated live range.
198 bool AllocateFreeRegister(LiveRange* unallocated);
199
200 // Try to find a register that can be used by a given live range.
201 // If all registers are occupied consider evicting interference for
202 // a register that is going to be used as far from the start of
203 // the unallocated live range as possible.
204 void AllocateAnyRegister(LiveRange* unallocated);
205
206 // Returns true if the given range has only unconstrained uses in
207 // the given loop.
208 bool RangeHasOnlyUnconstrainedUsesInLoop(LiveRange* range, intptr_t loop_id);
209
210 // Returns true if there is a register blocked by a range that
211 // has only unconstrained uses in the loop. Such range is a good
212 // eviction candidate when allocator tries to allocate loop phi.
213 // Spilling loop phi will have a bigger negative impact on the
214 // performance because it introduces multiple operations with memory
215 // inside the loop body and on the back edge.
216 bool HasCheapEvictionCandidate(LiveRange* phi_range);
217 bool IsCheapToEvictRegisterInLoop(LoopInfo* loop_info, intptr_t reg);
218
219 // Assign selected non-free register to an unallocated live range and
220 // evict any interference that can be evicted by splitting and spilling
221 // parts of interfering live ranges. Place non-spilled parts into
222 // the list of unallocated ranges.
223 void AssignNonFreeRegister(LiveRange* unallocated, intptr_t reg);
224 bool EvictIntersection(LiveRange* allocated, LiveRange* unallocated);
225 void RemoveEvicted(intptr_t reg, intptr_t first_evicted);
226
227 // Find first intersection between unallocated live range and
228 // live ranges currently allocated to the given register.
229 intptr_t FirstIntersectionWithAllocated(intptr_t reg, LiveRange* unallocated);
230
231 bool UpdateFreeUntil(intptr_t reg,
232 LiveRange* unallocated,
233 intptr_t* cur_free_until,
234 intptr_t* cur_blocked_at);
235
236 // Split given live range in an optimal position between given positions.
237 LiveRange* SplitBetween(LiveRange* range, intptr_t from, intptr_t to);
238
239 // Find a spill slot that can be used by the given live range.
240 void AllocateSpillSlotFor(LiveRange* range);
241
242 // Allocate the given live range to a spill slot.
243 void Spill(LiveRange* range);
244
245 // Spill the given live range from the given position onwards.
246 void SpillAfter(LiveRange* range, intptr_t from);
247
248 // Spill the given live range from the given position until some
249 // position preceding the to position.
250 void SpillBetween(LiveRange* range, intptr_t from, intptr_t to);
251
252 // Mark the live range as a live object pointer at all safepoints
253 // contained in the range.
254 void MarkAsObjectAtSafepoints(LiveRange* range);
255
256 MoveOperands* AddMoveAt(intptr_t pos, Location to, Location from);
257
258 Location MakeRegisterLocation(intptr_t reg) {
259 return Location::MachineRegisterLocation(register_kind_, reg);
260 }
261
262 void SplitInitialDefinitionAt(LiveRange* range, intptr_t pos);
263
264 void PrintLiveRanges();
265
266 const FlowGraph& flow_graph_;
267
268 ReachingDefs reaching_defs_;
269
270 // Representation for SSA values indexed by SSA temp index.
271 GrowableArray<Representation> value_representations_;
272
273 const GrowableArray<BlockEntryInstr*>& block_order_;
274 const GrowableArray<BlockEntryInstr*>& postorder_;
275
276 // Mapping between lifetime positions and instructions.
277 GrowableArray<Instruction*> instructions_;
278
279 // Mapping between lifetime positions and block entries.
280 GrowableArray<BlockEntryInstr*> block_entries_;
281
282 // Mapping between loops and additional information.
283 GrowableArray<ExtraLoopInfo*> extra_loop_info_;
284
285 SSALivenessAnalysis liveness_;
286
287 // Number of virtual registers. Currently equal to the number of
288 // SSA values.
289 const intptr_t vreg_count_;
290
291 // LiveRanges corresponding to SSA values.
292 GrowableArray<LiveRange*> live_ranges_;
293
294 GrowableArray<LiveRange*> unallocated_cpu_;
295 GrowableArray<LiveRange*> unallocated_xmm_;
296
297 LiveRange* cpu_regs_[kNumberOfCpuRegisters];
298 LiveRange* fpu_regs_[kNumberOfFpuRegisters];
299
300 bool blocked_cpu_registers_[kNumberOfCpuRegisters];
301 bool blocked_fpu_registers_[kNumberOfFpuRegisters];
302
303#if defined(DEBUG)
304 GrowableArray<LiveRange*> temporaries_;
305#endif
306
307 // List of spilled live ranges.
308 GrowableArray<LiveRange*> spilled_;
309
310 // List of instructions containing calls.
311 GrowableArray<Instruction*> safepoints_;
312
313 Location::Kind register_kind_;
314
315 intptr_t number_of_registers_;
316
317 // Per register lists of allocated live ranges. Contain only those
318 // ranges that can be affected by future allocation decisions.
319 // Those live ranges that end before the start of the current live range are
320 // removed from the list and will not be affected.
321 // The length of both arrays is 'number_of_registers_'
322 GrowableArray<ZoneGrowableArray<LiveRange*>*> registers_;
323
324 GrowableArray<bool> blocked_registers_;
325
326 // Worklist for register allocator. Always maintained sorted according
327 // to ShouldBeAllocatedBefore predicate.
328 GrowableArray<LiveRange*> unallocated_;
329
330 // List of used spill slots. Contains positions after which spill slots
331 // become free and can be reused for allocation.
332 GrowableArray<intptr_t> spill_slots_;
333
334 // For every used spill slot contains a flag determines whether it is
335 // QuadSpillSlot to ensure that indexes of quad and double spill slots
336 // are disjoint.
337 GrowableArray<bool> quad_spill_slots_;
338
339 // Track whether a spill slot is expected to hold a tagged or untagged value.
340 // This is used to keep tagged and untagged spill slots disjoint. See bug
341 // #18955 for details.
342 GrowableArray<bool> untagged_spill_slots_;
343
344 intptr_t cpu_spill_slot_count_;
345
346 const bool intrinsic_mode_;
347
348 DISALLOW_COPY_AND_ASSIGN(FlowGraphAllocator);
349};
350
351// UsePosition represents a single use of an SSA value by some instruction.
352// It points to a location slot which either tells register allocator
353// where instruction expects the value (if slot contains a fixed location) or
354// asks register allocator to allocate storage (register or spill slot) for
355// this use with certain properties (if slot contains an unallocated location).
356class UsePosition : public ZoneAllocated {
357 public:
358 UsePosition(intptr_t pos, UsePosition* next, Location* location_slot)
359 : pos_(pos), location_slot_(location_slot), hint_(NULL), next_(next) {
360 ASSERT(location_slot != NULL);
361 }
362
363 Location* location_slot() const { return location_slot_; }
364 void set_location_slot(Location* location_slot) {
365 location_slot_ = location_slot;
366 }
367
368 Location hint() const {
369 ASSERT(HasHint());
370 return *hint_;
371 }
372
373 void set_hint(Location* hint) { hint_ = hint; }
374
375 bool HasHint() const { return (hint_ != NULL) && !hint_->IsUnallocated(); }
376
377 void set_next(UsePosition* next) { next_ = next; }
378 UsePosition* next() const { return next_; }
379
380 intptr_t pos() const { return pos_; }
381
382 private:
383 const intptr_t pos_;
384 Location* location_slot_;
385 Location* hint_;
386 UsePosition* next_;
387
388 DISALLOW_COPY_AND_ASSIGN(UsePosition);
389};
390
391// UseInterval represents a holeless half open interval of liveness for a given
392// SSA value: [start, end) in terms of lifetime positions that
393// NumberInstructions assigns to instructions. Register allocator has to keep
394// a value live in the register or in a spill slot from start position and until
395// the end position. The interval can cover zero or more uses.
396// Note: currently all uses of the same SSA value are linked together into a
397// single list (and not split between UseIntervals).
398class UseInterval : public ZoneAllocated {
399 public:
400 UseInterval(intptr_t start, intptr_t end, UseInterval* next)
401 : start_(start), end_(end), next_(next) {}
402
403 void Print();
404
405 intptr_t start() const { return start_; }
406 intptr_t end() const { return end_; }
407 UseInterval* next() const { return next_; }
408
409 bool Contains(intptr_t pos) const {
410 return (start() <= pos) && (pos < end());
411 }
412
413 // Return the smallest position that is covered by both UseIntervals or
414 // kIllegalPosition if intervals do not intersect.
415 intptr_t Intersect(UseInterval* other);
416
417 private:
418 friend class LiveRange;
419
420 intptr_t start_;
421 intptr_t end_;
422 UseInterval* next_;
423
424 DISALLOW_COPY_AND_ASSIGN(UseInterval);
425};
426
427// AllocationFinger is used to keep track of currently active position
428// for the register allocator and cache lookup results.
429class AllocationFinger : public ValueObject {
430 public:
431 AllocationFinger()
432 : first_pending_use_interval_(NULL),
433 first_register_use_(NULL),
434 first_register_beneficial_use_(NULL),
435 first_hinted_use_(NULL) {}
436
437 void Initialize(LiveRange* range);
438 void UpdateAfterSplit(intptr_t first_use_after_split_pos);
439 bool Advance(intptr_t start);
440
441 UseInterval* first_pending_use_interval() const {
442 return first_pending_use_interval_;
443 }
444
445 Location FirstHint();
446 UsePosition* FirstRegisterUse(intptr_t after_pos);
447 UsePosition* FirstRegisterBeneficialUse(intptr_t after_pos);
448 UsePosition* FirstInterferingUse(intptr_t after_pos);
449
450 private:
451 UseInterval* first_pending_use_interval_;
452 UsePosition* first_register_use_;
453 UsePosition* first_register_beneficial_use_;
454 UsePosition* first_hinted_use_;
455
456 DISALLOW_COPY_AND_ASSIGN(AllocationFinger);
457};
458
459class SafepointPosition : public ZoneAllocated {
460 public:
461 SafepointPosition(intptr_t pos, LocationSummary* locs)
462 : pos_(pos), locs_(locs), next_(NULL) {}
463
464 void set_next(SafepointPosition* next) { next_ = next; }
465 SafepointPosition* next() const { return next_; }
466
467 intptr_t pos() const { return pos_; }
468
469 LocationSummary* locs() const { return locs_; }
470
471 private:
472 const intptr_t pos_;
473 LocationSummary* const locs_;
474
475 SafepointPosition* next_;
476};
477
478// LiveRange represents a sequence of UseIntervals for a given SSA value.
479class LiveRange : public ZoneAllocated {
480 public:
481 explicit LiveRange(intptr_t vreg, Representation rep)
482 : vreg_(vreg),
483 representation_(rep),
484 assigned_location_(),
485 spill_slot_(),
486 uses_(NULL),
487 first_use_interval_(NULL),
488 last_use_interval_(NULL),
489 first_safepoint_(NULL),
490 last_safepoint_(NULL),
491 next_sibling_(NULL),
492 has_only_any_uses_in_loops_(0),
493 is_loop_phi_(false),
494 finger_() {}
495
496 intptr_t vreg() const { return vreg_; }
497 Representation representation() const { return representation_; }
498 LiveRange* next_sibling() const { return next_sibling_; }
499 UsePosition* first_use() const { return uses_; }
500 void set_first_use(UsePosition* use) { uses_ = use; }
501 UseInterval* first_use_interval() const { return first_use_interval_; }
502 UseInterval* last_use_interval() const { return last_use_interval_; }
503 Location assigned_location() const { return assigned_location_; }
504 Location* assigned_location_slot() { return &assigned_location_; }
505 intptr_t Start() const { return first_use_interval()->start(); }
506 intptr_t End() const { return last_use_interval()->end(); }
507
508 SafepointPosition* first_safepoint() const { return first_safepoint_; }
509
510 AllocationFinger* finger() { return &finger_; }
511
512 void set_assigned_location(Location location) {
513 assigned_location_ = location;
514 }
515
516 void set_spill_slot(Location spill_slot) { spill_slot_ = spill_slot; }
517
518 void DefineAt(intptr_t pos);
519
520 void AddSafepoint(intptr_t pos, LocationSummary* locs);
521
522 UsePosition* AddUse(intptr_t pos, Location* location_slot);
523 void AddHintedUse(intptr_t pos, Location* location_slot, Location* hint);
524
525 void AddUseInterval(intptr_t start, intptr_t end);
526
527 void Print();
528
529 LiveRange* SplitAt(intptr_t pos);
530
531 // A fast conservative check if the range might contain a given position
532 // -- can return true when the range does not contain the position (e.g.,
533 // the position lies in a lifetime hole between range start and end).
534 bool CanCover(intptr_t pos) const {
535 return (Start() <= pos) && (pos < End());
536 }
537
538 // True if the range contains the given position.
539 bool Contains(intptr_t pos) const;
540
541 Location spill_slot() const { return spill_slot_; }
542
543 bool HasOnlyUnconstrainedUsesInLoop(intptr_t loop_id) const {
544 if (loop_id < kMaxLoops) {
545 const uint64_t mask = static_cast<uint64_t>(1) << loop_id;
546 return (has_only_any_uses_in_loops_ & mask) != 0;
547 }
548 return false;
549 }
550
551 void MarkHasOnlyUnconstrainedUsesInLoop(intptr_t loop_id) {
552 if (loop_id < kMaxLoops) {
553 has_only_any_uses_in_loops_ |= static_cast<uint64_t>(1) << loop_id;
554 }
555 }
556
557 bool is_loop_phi() const { return is_loop_phi_; }
558 void mark_loop_phi() { is_loop_phi_ = true; }
559
560 private:
561 LiveRange(intptr_t vreg,
562 Representation rep,
563 UsePosition* uses,
564 UseInterval* first_use_interval,
565 UseInterval* last_use_interval,
566 SafepointPosition* first_safepoint,
567 LiveRange* next_sibling)
568 : vreg_(vreg),
569 representation_(rep),
570 assigned_location_(),
571 uses_(uses),
572 first_use_interval_(first_use_interval),
573 last_use_interval_(last_use_interval),
574 first_safepoint_(first_safepoint),
575 last_safepoint_(NULL),
576 next_sibling_(next_sibling),
577 has_only_any_uses_in_loops_(0),
578 is_loop_phi_(false),
579 finger_() {}
580
581 const intptr_t vreg_;
582 Representation representation_;
583 Location assigned_location_;
584 Location spill_slot_;
585
586 UsePosition* uses_;
587 UseInterval* first_use_interval_;
588 UseInterval* last_use_interval_;
589
590 SafepointPosition* first_safepoint_;
591 SafepointPosition* last_safepoint_;
592
593 LiveRange* next_sibling_;
594
595 static constexpr intptr_t kMaxLoops = sizeof(uint64_t) * kBitsPerByte;
596 uint64_t has_only_any_uses_in_loops_;
597 bool is_loop_phi_;
598
599 AllocationFinger finger_;
600
601 DISALLOW_COPY_AND_ASSIGN(LiveRange);
602};
603
604} // namespace dart
605
606#endif // RUNTIME_VM_COMPILER_BACKEND_LINEARSCAN_H_
607