1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_COMPILER_BACKEND_FLOW_GRAPH_COMPILER_H_
6#define RUNTIME_VM_COMPILER_BACKEND_FLOW_GRAPH_COMPILER_H_
7
8#if defined(DART_PRECOMPILED_RUNTIME)
9#error "AOT runtime should not use compiler sources (including header files)"
10#endif // defined(DART_PRECOMPILED_RUNTIME)
11
12#include <functional>
13
14#include "vm/allocation.h"
15#include "vm/code_descriptors.h"
16#include "vm/compiler/assembler/assembler.h"
17#include "vm/compiler/backend/code_statistics.h"
18#include "vm/compiler/backend/il.h"
19#include "vm/compiler/backend/locations.h"
20#include "vm/runtime_entry.h"
21
22namespace dart {
23
24// Forward declarations.
25class CatchEntryMovesMapBuilder;
26class Code;
27class DeoptInfoBuilder;
28class FlowGraph;
29class FlowGraphCompiler;
30class Function;
31template <typename T>
32class GrowableArray;
33class ParsedFunction;
34class SpeculativeInliningPolicy;
35
36namespace compiler {
37struct TableSelector;
38}
39
40// Used in methods which need conditional access to a temporary register.
41// May only be used to allocate a single temporary register.
42class TemporaryRegisterAllocator : public ValueObject {
43 public:
44 virtual ~TemporaryRegisterAllocator() {}
45 virtual Register AllocateTemporary() = 0;
46 virtual void ReleaseTemporary() = 0;
47};
48
49class ConstantTemporaryAllocator : public TemporaryRegisterAllocator {
50 public:
51 explicit ConstantTemporaryAllocator(Register tmp) : tmp_(tmp) {}
52
53 Register AllocateTemporary() override { return tmp_; }
54 void ReleaseTemporary() override {}
55
56 private:
57 Register const tmp_;
58};
59
60class NoTemporaryAllocator : public TemporaryRegisterAllocator {
61 public:
62 Register AllocateTemporary() override { UNREACHABLE(); }
63 void ReleaseTemporary() override { UNREACHABLE(); }
64};
65
66class ParallelMoveResolver : public ValueObject {
67 public:
68 explicit ParallelMoveResolver(FlowGraphCompiler* compiler);
69
70 // Resolve a set of parallel moves, emitting assembler instructions.
71 void EmitNativeCode(ParallelMoveInstr* parallel_move);
72
73 private:
74 class ScratchFpuRegisterScope : public ValueObject {
75 public:
76 ScratchFpuRegisterScope(ParallelMoveResolver* resolver,
77 FpuRegister blocked);
78 ~ScratchFpuRegisterScope();
79
80 FpuRegister reg() const { return reg_; }
81
82 private:
83 ParallelMoveResolver* resolver_;
84 FpuRegister reg_;
85 bool spilled_;
86 };
87
88 class TemporaryAllocator : public TemporaryRegisterAllocator {
89 public:
90 TemporaryAllocator(ParallelMoveResolver* resolver, Register blocked);
91
92 Register AllocateTemporary() override;
93 void ReleaseTemporary() override;
94 DEBUG_ONLY(bool DidAllocateTemporary() { return allocated_; })
95
96 virtual ~TemporaryAllocator() { ASSERT(reg_ == kNoRegister); }
97
98 private:
99 ParallelMoveResolver* const resolver_;
100 const Register blocked_;
101 Register reg_;
102 bool spilled_;
103 DEBUG_ONLY(bool allocated_ = false);
104 };
105
106 class ScratchRegisterScope : public ValueObject {
107 public:
108 ScratchRegisterScope(ParallelMoveResolver* resolver, Register blocked);
109 ~ScratchRegisterScope();
110
111 Register reg() const { return reg_; }
112
113 private:
114 TemporaryAllocator allocator_;
115 Register reg_;
116 };
117
118 bool IsScratchLocation(Location loc);
119 intptr_t AllocateScratchRegister(Location::Kind kind,
120 uword blocked_mask,
121 intptr_t first_free_register,
122 intptr_t last_free_register,
123 bool* spilled);
124
125 void SpillScratch(Register reg);
126 void RestoreScratch(Register reg);
127 void SpillFpuScratch(FpuRegister reg);
128 void RestoreFpuScratch(FpuRegister reg);
129
130 // friend class ScratchXmmRegisterScope;
131
132 // Build the initial list of moves.
133 void BuildInitialMoveList(ParallelMoveInstr* parallel_move);
134
135 // Perform the move at the moves_ index in question (possibly requiring
136 // other moves to satisfy dependencies).
137 void PerformMove(int index);
138
139 // Emit a move and remove it from the move graph.
140 void EmitMove(int index);
141
142 // Execute a move by emitting a swap of two operands. The move from
143 // source to destination is removed from the move graph.
144 void EmitSwap(int index);
145
146 // Verify the move list before performing moves.
147 void Verify();
148
149 // Helpers for non-trivial source-destination combinations that cannot
150 // be handled by a single instruction.
151 void MoveMemoryToMemory(const compiler::Address& dst,
152 const compiler::Address& src);
153 void Exchange(Register reg, const compiler::Address& mem);
154 void Exchange(const compiler::Address& mem1, const compiler::Address& mem2);
155 void Exchange(Register reg, Register base_reg, intptr_t stack_offset);
156 void Exchange(Register base_reg1,
157 intptr_t stack_offset1,
158 Register base_reg2,
159 intptr_t stack_offset2);
160
161 FlowGraphCompiler* compiler_;
162
163 // List of moves not yet resolved.
164 GrowableArray<MoveOperands*> moves_;
165};
166
167// Used for describing a deoptimization point after call (lazy deoptimization).
168// For deoptimization before instruction use class CompilerDeoptInfoWithStub.
169class CompilerDeoptInfo : public ZoneAllocated {
170 public:
171 CompilerDeoptInfo(intptr_t deopt_id,
172 ICData::DeoptReasonId reason,
173 uint32_t flags,
174 Environment* deopt_env)
175 : pc_offset_(-1),
176 deopt_id_(deopt_id),
177 reason_(reason),
178 flags_(flags),
179 deopt_env_(deopt_env) {
180 ASSERT(deopt_env != NULL);
181 }
182 virtual ~CompilerDeoptInfo() {}
183
184 TypedDataPtr CreateDeoptInfo(FlowGraphCompiler* compiler,
185 DeoptInfoBuilder* builder,
186 const Array& deopt_table);
187
188 // No code needs to be generated.
189 virtual void GenerateCode(FlowGraphCompiler* compiler, intptr_t stub_ix) {}
190
191 intptr_t pc_offset() const { return pc_offset_; }
192 void set_pc_offset(intptr_t offset) { pc_offset_ = offset; }
193
194 intptr_t deopt_id() const { return deopt_id_; }
195 ICData::DeoptReasonId reason() const { return reason_; }
196 uint32_t flags() const { return flags_; }
197 const Environment* deopt_env() const { return deopt_env_; }
198
199 private:
200 void EmitMaterializations(Environment* env, DeoptInfoBuilder* builder);
201
202 void AllocateIncomingParametersRecursive(Environment* env,
203 intptr_t* stack_height);
204
205 intptr_t pc_offset_;
206 const intptr_t deopt_id_;
207 const ICData::DeoptReasonId reason_;
208 const uint32_t flags_;
209 Environment* deopt_env_;
210
211 DISALLOW_COPY_AND_ASSIGN(CompilerDeoptInfo);
212};
213
214class CompilerDeoptInfoWithStub : public CompilerDeoptInfo {
215 public:
216 CompilerDeoptInfoWithStub(intptr_t deopt_id,
217 ICData::DeoptReasonId reason,
218 uint32_t flags,
219 Environment* deopt_env)
220 : CompilerDeoptInfo(deopt_id, reason, flags, deopt_env), entry_label_() {
221 ASSERT(reason != ICData::kDeoptAtCall);
222 }
223
224 compiler::Label* entry_label() { return &entry_label_; }
225
226 // Implementation is in architecture specific file.
227 virtual void GenerateCode(FlowGraphCompiler* compiler, intptr_t stub_ix);
228
229 const char* Name() const {
230 const char* kFormat = "Deopt stub for id %d, reason: %s";
231 const intptr_t len = Utils::SNPrint(NULL, 0, kFormat, deopt_id(),
232 DeoptReasonToCString(reason())) +
233 1;
234 char* chars = Thread::Current()->zone()->Alloc<char>(len);
235 Utils::SNPrint(chars, len, kFormat, deopt_id(),
236 DeoptReasonToCString(reason()));
237 return chars;
238 }
239
240 private:
241 compiler::Label entry_label_;
242
243 DISALLOW_COPY_AND_ASSIGN(CompilerDeoptInfoWithStub);
244};
245
246class SlowPathCode : public ZoneAllocated {
247 public:
248 explicit SlowPathCode(Instruction* instruction)
249 : instruction_(instruction), entry_label_(), exit_label_() {}
250 virtual ~SlowPathCode() {}
251
252 Instruction* instruction() const { return instruction_; }
253 compiler::Label* entry_label() { return &entry_label_; }
254 compiler::Label* exit_label() { return &exit_label_; }
255
256 void GenerateCode(FlowGraphCompiler* compiler) {
257 EmitNativeCode(compiler);
258 ASSERT(entry_label_.IsBound());
259 }
260
261 private:
262 virtual void EmitNativeCode(FlowGraphCompiler* compiler) = 0;
263
264 Instruction* instruction_;
265 compiler::Label entry_label_;
266 compiler::Label exit_label_;
267
268 DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
269};
270
271template <typename T>
272class TemplateSlowPathCode : public SlowPathCode {
273 public:
274 explicit TemplateSlowPathCode(T* instruction) : SlowPathCode(instruction) {}
275
276 T* instruction() const {
277 return static_cast<T*>(SlowPathCode::instruction());
278 }
279};
280
281// Slow path code which calls runtime entry to throw an exception.
282class ThrowErrorSlowPathCode : public TemplateSlowPathCode<Instruction> {
283 public:
284 ThrowErrorSlowPathCode(Instruction* instruction,
285 const RuntimeEntry& runtime_entry,
286 intptr_t num_args,
287 intptr_t try_index)
288 : TemplateSlowPathCode(instruction),
289 runtime_entry_(runtime_entry),
290 num_args_(num_args),
291 try_index_(try_index) {}
292
293 // This name appears in disassembly.
294 virtual const char* name() = 0;
295
296 // Subclasses can override these methods to customize slow path code.
297 virtual void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) {}
298 virtual void AddMetadataForRuntimeCall(FlowGraphCompiler* compiler) {}
299
300 virtual void EmitSharedStubCall(FlowGraphCompiler* compiler,
301 bool save_fpu_registers) {
302 UNREACHABLE();
303 }
304
305 virtual void EmitNativeCode(FlowGraphCompiler* compiler);
306
307 private:
308 const RuntimeEntry& runtime_entry_;
309 const intptr_t num_args_;
310 const intptr_t try_index_;
311};
312
313class NullErrorSlowPath : public ThrowErrorSlowPathCode {
314 public:
315 static const intptr_t kNumberOfArguments = 0;
316
317 NullErrorSlowPath(CheckNullInstr* instruction, intptr_t try_index)
318 : ThrowErrorSlowPathCode(instruction,
319 GetRuntimeEntry(instruction->exception_type()),
320 kNumberOfArguments,
321 try_index) {}
322
323 CheckNullInstr::ExceptionType exception_type() const {
324 return instruction()->AsCheckNull()->exception_type();
325 }
326
327 const char* name() override;
328
329 void EmitSharedStubCall(FlowGraphCompiler* compiler,
330 bool save_fpu_registers) override;
331
332 void AddMetadataForRuntimeCall(FlowGraphCompiler* compiler) override {
333 CheckNullInstr::AddMetadataForRuntimeCall(instruction()->AsCheckNull(),
334 compiler);
335 }
336
337 static CodePtr GetStub(FlowGraphCompiler* compiler,
338 CheckNullInstr::ExceptionType exception_type,
339 bool save_fpu_registers);
340
341 private:
342 static const RuntimeEntry& GetRuntimeEntry(
343 CheckNullInstr::ExceptionType exception_type);
344};
345
346class RangeErrorSlowPath : public ThrowErrorSlowPathCode {
347 public:
348 static const intptr_t kNumberOfArguments = 0;
349
350 RangeErrorSlowPath(GenericCheckBoundInstr* instruction, intptr_t try_index)
351 : ThrowErrorSlowPathCode(instruction,
352 kRangeErrorRuntimeEntry,
353 kNumberOfArguments,
354 try_index) {}
355 virtual const char* name() { return "check bound"; }
356
357 virtual void EmitSharedStubCall(FlowGraphCompiler* compielr,
358 bool save_fpu_registers);
359};
360
361class FlowGraphCompiler : public ValueObject {
362 private:
363 class BlockInfo : public ZoneAllocated {
364 public:
365 BlockInfo()
366 : block_label_(),
367 jump_label_(&block_label_),
368 next_nonempty_label_(NULL),
369 is_marked_(false) {}
370
371 // The label to jump to when control is transferred to this block. For
372 // nonempty blocks it is the label of the block itself. For empty
373 // blocks it is the label of the first nonempty successor block.
374 compiler::Label* jump_label() const { return jump_label_; }
375 void set_jump_label(compiler::Label* label) { jump_label_ = label; }
376
377 // The label of the first nonempty block after this one in the block
378 // order, or NULL if there is no nonempty block following this one.
379 compiler::Label* next_nonempty_label() const {
380 return next_nonempty_label_;
381 }
382 void set_next_nonempty_label(compiler::Label* label) {
383 next_nonempty_label_ = label;
384 }
385
386 bool WasCompacted() const { return jump_label_ != &block_label_; }
387
388 // Block compaction is recursive. Block info for already-compacted
389 // blocks is marked so as to avoid cycles in the graph.
390 bool is_marked() const { return is_marked_; }
391 void mark() { is_marked_ = true; }
392
393 private:
394 compiler::Label block_label_;
395
396 compiler::Label* jump_label_;
397 compiler::Label* next_nonempty_label_;
398
399 bool is_marked_;
400 };
401
402 public:
403 FlowGraphCompiler(compiler::Assembler* assembler,
404 FlowGraph* flow_graph,
405 const ParsedFunction& parsed_function,
406 bool is_optimizing,
407 SpeculativeInliningPolicy* speculative_policy,
408 const GrowableArray<const Function*>& inline_id_to_function,
409 const GrowableArray<TokenPosition>& inline_id_to_token_pos,
410 const GrowableArray<intptr_t>& caller_inline_id,
411 ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data,
412 CodeStatistics* stats = NULL);
413
414 void ArchSpecificInitialization();
415
416 ~FlowGraphCompiler();
417
418 static bool SupportsUnboxedDoubles();
419 static bool SupportsUnboxedInt64();
420 static bool SupportsUnboxedSimd128();
421 static bool SupportsHardwareDivision();
422 static bool CanConvertInt64ToDouble();
423
424 static bool IsUnboxedField(const Field& field);
425 static bool IsPotentialUnboxedField(const Field& field);
426
427 // Accessors.
428 compiler::Assembler* assembler() const { return assembler_; }
429 const ParsedFunction& parsed_function() const { return parsed_function_; }
430 const Function& function() const { return parsed_function_.function(); }
431 const GrowableArray<BlockEntryInstr*>& block_order() const {
432 return block_order_;
433 }
434 const GrowableArray<const compiler::TableSelector*>&
435 dispatch_table_call_targets() const {
436 return dispatch_table_call_targets_;
437 }
438
439 // If 'ForcedOptimization()' returns 'true', we are compiling in optimized
440 // mode for a function which cannot deoptimize. Certain optimizations, e.g.
441 // speculative optimizations and call patching are disabled.
442 bool ForcedOptimization() const { return function().ForceOptimize(); }
443
444 const FlowGraph& flow_graph() const { return flow_graph_; }
445
446 BlockEntryInstr* current_block() const { return current_block_; }
447 void set_current_block(BlockEntryInstr* value) { current_block_ = value; }
448 static bool CanOptimize();
449 bool CanOptimizeFunction() const;
450 bool CanOSRFunction() const;
451 bool is_optimizing() const { return is_optimizing_; }
452
453 void InsertBSSRelocation(BSS::Relocation reloc);
454 void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp);
455
456 // The function was fully intrinsified, so the body is unreachable.
457 //
458 // We still need to compile the body in unoptimized mode because the
459 // 'ICData's are added to the function's 'ic_data_array_' when instance
460 // calls are compiled.
461 bool skip_body_compilation() const {
462 return fully_intrinsified_ && is_optimizing();
463 }
464
465 void EnterIntrinsicMode();
466 void ExitIntrinsicMode();
467 bool intrinsic_mode() const { return intrinsic_mode_; }
468
469 void set_intrinsic_slow_path_label(compiler::Label* label) {
470 ASSERT(intrinsic_slow_path_label_ == nullptr || label == nullptr);
471 intrinsic_slow_path_label_ = label;
472 }
473 compiler::Label* intrinsic_slow_path_label() const {
474 ASSERT(intrinsic_slow_path_label_ != nullptr);
475 return intrinsic_slow_path_label_;
476 }
477
478 bool ForceSlowPathForStackOverflow() const;
479
480 const GrowableArray<BlockInfo*>& block_info() const { return block_info_; }
481 ParallelMoveResolver* parallel_move_resolver() {
482 return &parallel_move_resolver_;
483 }
484
485 void StatsBegin(Instruction* instr) {
486 if (stats_ != NULL) stats_->Begin(instr);
487 }
488
489 void StatsEnd(Instruction* instr) {
490 if (stats_ != NULL) stats_->End(instr);
491 }
492
493 void SpecialStatsBegin(intptr_t tag) {
494 if (stats_ != NULL) stats_->SpecialBegin(tag);
495 }
496
497 void SpecialStatsEnd(intptr_t tag) {
498 if (stats_ != NULL) stats_->SpecialEnd(tag);
499 }
500
501 GrowableArray<const Field*>& used_static_fields() {
502 return used_static_fields_;
503 }
504
505 // Constructor is lighweight, major initialization work should occur here.
506 // This makes it easier to measure time spent in the compiler.
507 void InitCompiler();
508
509 void CompileGraph();
510
511 void EmitPrologue();
512
513 void VisitBlocks();
514
515 // Bail out of the flow graph compiler. Does not return to the caller.
516 void Bailout(const char* reason);
517
518 // Returns 'true' if regular code generation should be skipped.
519 bool TryIntrinsify();
520
521 // Emits code for a generic move from a location 'src' to a location 'dst'.
522 //
523 // Note that Location does not include a size (that can only be deduced from
524 // a Representation), so these moves might overapproximate the size needed
525 // to move. The maximal overapproximation is moving 8 bytes instead of 4 on
526 // 64 bit architectures. This overapproximation is not a problem, because
527 // the Dart calling convention only uses word-sized stack slots.
528 //
529 // TODO(dartbug.com/40400): Express this in terms of EmitMove(NativeLocation
530 // NativeLocation) to remove code duplication.
531 void EmitMove(Location dst, Location src, TemporaryRegisterAllocator* temp);
532
533 // Emits code for a move from a location `src` to a location `dst`.
534 //
535 // Takes into account the payload and container representations of `dst` and
536 // `src` to do the smallest move possible, and sign (or zero) extend or
537 // truncate if needed.
538 //
539 // Makes use of TMP, FpuTMP, and `temp`.
540 void EmitNativeMove(const compiler::ffi::NativeLocation& dst,
541 const compiler::ffi::NativeLocation& src,
542 TemporaryRegisterAllocator* temp);
543
544 // Helper method to move from a Location to a NativeLocation.
545 void EmitMoveToNative(const compiler::ffi::NativeLocation& dst,
546 Location src_loc,
547 Representation src_type,
548 TemporaryRegisterAllocator* temp);
549
550 // Helper method to move from a NativeLocation to a Location.
551 void EmitMoveFromNative(Location dst_loc,
552 Representation dst_type,
553 const compiler::ffi::NativeLocation& src,
554 TemporaryRegisterAllocator* temp);
555
556 // Emits a Dart const to a native location.
557 void EmitMoveConst(const compiler::ffi::NativeLocation& dst,
558 Location src,
559 Representation src_type,
560 TemporaryRegisterAllocator* temp);
561
562 bool CheckAssertAssignableTypeTestingABILocations(
563 const LocationSummary& locs);
564
565 void GenerateAssertAssignable(CompileType* receiver_type,
566 TokenPosition token_pos,
567 intptr_t deopt_id,
568 const String& dst_name,
569 LocationSummary* locs);
570
571 // Returns true if we can use a type testing stub based assert
572 // assignable code pattern for the given type.
573 static bool ShouldUseTypeTestingStubFor(bool optimizing,
574 const AbstractType& type);
575
576 void GenerateAssertAssignableViaTypeTestingStub(CompileType* receiver_type,
577 TokenPosition token_pos,
578 intptr_t deopt_id,
579 const String& dst_name,
580 LocationSummary* locs);
581
582 void GenerateAssertAssignableViaTypeTestingStub(
583 CompileType* receiver_type,
584 const AbstractType& dst_type,
585 const String& dst_name,
586 const Register dst_type_reg_to_call,
587 const Register scratch_reg,
588 compiler::Label* done);
589
590 void GenerateRuntimeCall(TokenPosition token_pos,
591 intptr_t deopt_id,
592 const RuntimeEntry& entry,
593 intptr_t argument_count,
594 LocationSummary* locs);
595
596 void GenerateStubCall(TokenPosition token_pos,
597 const Code& stub,
598 PcDescriptorsLayout::Kind kind,
599 LocationSummary* locs,
600 intptr_t deopt_id = DeoptId::kNone,
601 Environment* env = nullptr);
602
603 void GeneratePatchableCall(TokenPosition token_pos,
604 const Code& stub,
605 PcDescriptorsLayout::Kind kind,
606 LocationSummary* locs);
607
608 void GenerateDartCall(intptr_t deopt_id,
609 TokenPosition token_pos,
610 const Code& stub,
611 PcDescriptorsLayout::Kind kind,
612 LocationSummary* locs,
613 Code::EntryKind entry_kind = Code::EntryKind::kNormal);
614
615 void GenerateStaticDartCall(
616 intptr_t deopt_id,
617 TokenPosition token_pos,
618 PcDescriptorsLayout::Kind kind,
619 LocationSummary* locs,
620 const Function& target,
621 Code::EntryKind entry_kind = Code::EntryKind::kNormal);
622
623 void GenerateInstanceOf(TokenPosition token_pos,
624 intptr_t deopt_id,
625 const AbstractType& type,
626 LocationSummary* locs);
627
628 void GenerateInstanceCall(intptr_t deopt_id,
629 TokenPosition token_pos,
630 LocationSummary* locs,
631 const ICData& ic_data,
632 Code::EntryKind entry_kind,
633 bool receiver_can_be_smi);
634
635 void GenerateStaticCall(
636 intptr_t deopt_id,
637 TokenPosition token_pos,
638 const Function& function,
639 ArgumentsInfo args_info,
640 LocationSummary* locs,
641 const ICData& ic_data_in,
642 ICData::RebindRule rebind_rule,
643 Code::EntryKind entry_kind = Code::EntryKind::kNormal);
644
645 void GenerateNumberTypeCheck(Register kClassIdReg,
646 const AbstractType& type,
647 compiler::Label* is_instance_lbl,
648 compiler::Label* is_not_instance_lbl);
649 void GenerateStringTypeCheck(Register kClassIdReg,
650 compiler::Label* is_instance_lbl,
651 compiler::Label* is_not_instance_lbl);
652 void GenerateListTypeCheck(Register kClassIdReg,
653 compiler::Label* is_instance_lbl);
654
655 // Returns true if no further checks are necessary but the code coming after
656 // the emitted code here is still required do a runtime call (for the negative
657 // case of throwing an exception).
658 bool GenerateSubtypeRangeCheck(Register class_id_reg,
659 const Class& type_class,
660 compiler::Label* is_subtype_lbl);
661
662 // We test up to 4 different cid ranges, if we would need to test more in
663 // order to get a definite answer we fall back to the old mechanism (namely
664 // of going into the subtyping cache)
665 static const intptr_t kMaxNumberOfCidRangesToTest = 4;
666
667 // If [fall_through_if_inside] is `true`, then [outside_range_lbl] must be
668 // supplied, since it will be jumped to in the last case if the cid is outside
669 // the range.
670 static void GenerateCidRangesCheck(compiler::Assembler* assembler,
671 Register class_id_reg,
672 const CidRangeVector& cid_ranges,
673 compiler::Label* inside_range_lbl,
674 compiler::Label* outside_range_lbl = NULL,
675 bool fall_through_if_inside = false);
676
677 void EmitOptimizedInstanceCall(
678 const Code& stub,
679 const ICData& ic_data,
680 intptr_t deopt_id,
681 TokenPosition token_pos,
682 LocationSummary* locs,
683 Code::EntryKind entry_kind = Code::EntryKind::kNormal);
684
685 void EmitInstanceCallJIT(const Code& stub,
686 const ICData& ic_data,
687 intptr_t deopt_id,
688 TokenPosition token_pos,
689 LocationSummary* locs,
690 Code::EntryKind entry_kind);
691
692 void EmitPolymorphicInstanceCall(const PolymorphicInstanceCallInstr* call,
693 const CallTargets& targets,
694 ArgumentsInfo args_info,
695 intptr_t deopt_id,
696 TokenPosition token_pos,
697 LocationSummary* locs,
698 bool complete,
699 intptr_t total_call_count,
700 bool receiver_can_be_smi = true);
701
702 void EmitMegamorphicInstanceCall(const ICData& icdata,
703 intptr_t deopt_id,
704 TokenPosition token_pos,
705 LocationSummary* locs,
706 intptr_t try_index,
707 intptr_t slow_path_argument_count = 0) {
708 const String& name = String::Handle(icdata.target_name());
709 const Array& arguments_descriptor =
710 Array::Handle(icdata.arguments_descriptor());
711 EmitMegamorphicInstanceCall(name, arguments_descriptor, deopt_id, token_pos,
712 locs, try_index);
713 }
714
715 // Pass a value for try-index where block is not available (e.g. slow path).
716 void EmitMegamorphicInstanceCall(const String& function_name,
717 const Array& arguments_descriptor,
718 intptr_t deopt_id,
719 TokenPosition token_pos,
720 LocationSummary* locs,
721 intptr_t try_index,
722 intptr_t slow_path_argument_count = 0);
723
724 void EmitInstanceCallAOT(
725 const ICData& ic_data,
726 intptr_t deopt_id,
727 TokenPosition token_pos,
728 LocationSummary* locs,
729 Code::EntryKind entry_kind = Code::EntryKind::kNormal,
730 bool receiver_can_be_smi = true);
731
732 void EmitTestAndCall(const CallTargets& targets,
733 const String& function_name,
734 ArgumentsInfo args_info,
735 compiler::Label* failed,
736 compiler::Label* match_found,
737 intptr_t deopt_id,
738 TokenPosition token_index,
739 LocationSummary* locs,
740 bool complete,
741 intptr_t total_ic_calls,
742 Code::EntryKind entry_kind = Code::EntryKind::kNormal);
743
744 void EmitDispatchTableCall(Register cid_reg,
745 int32_t selector_offset,
746 const Array& arguments_descriptor);
747
748 Condition EmitEqualityRegConstCompare(Register reg,
749 const Object& obj,
750 bool needs_number_check,
751 TokenPosition token_pos,
752 intptr_t deopt_id);
753 Condition EmitEqualityRegRegCompare(Register left,
754 Register right,
755 bool needs_number_check,
756 TokenPosition token_pos,
757 intptr_t deopt_id);
758 Condition EmitBoolTest(Register value, BranchLabels labels, bool invert);
759
760 bool NeedsEdgeCounter(BlockEntryInstr* block);
761
762 void EmitEdgeCounter(intptr_t edge_id);
763
764 void RecordCatchEntryMoves(Environment* env = NULL,
765 intptr_t try_index = kInvalidTryIndex);
766
767 void EmitCallToStub(const Code& stub);
768 void EmitTailCallToStub(const Code& stub);
769
770 // Emits the following metadata for the current PC:
771 //
772 // * Attaches current try index
773 // * Attaches stackmaps
774 // * Attaches catch entry moves (in AOT)
775 // * Deoptimization information (in JIT)
776 //
777 // If [env] is not `nullptr` it will be used instead of the
778 // `pending_deoptimization_env`.
779 void EmitCallsiteMetadata(TokenPosition token_pos,
780 intptr_t deopt_id,
781 PcDescriptorsLayout::Kind kind,
782 LocationSummary* locs,
783 Environment* env = nullptr);
784
785 void EmitYieldPositionMetadata(TokenPosition token_pos, intptr_t yield_index);
786
787 void EmitComment(Instruction* instr);
788
789 // Returns stack size (number of variables on stack for unoptimized
790 // code, or number of spill slots for optimized code).
791 intptr_t StackSize() const;
792
793 // Returns the number of extra stack slots used during an Osr entry
794 // (values for all [ParameterInstr]s, representing local variables
795 // and expression stack values, are already on the stack).
796 intptr_t ExtraStackSlotsOnOsrEntry() const;
797
798 // Returns assembler label associated with the given block entry.
799 compiler::Label* GetJumpLabel(BlockEntryInstr* block_entry) const;
800 bool WasCompacted(BlockEntryInstr* block_entry) const;
801
802 // Returns the label of the fall-through of the current block.
803 compiler::Label* NextNonEmptyLabel() const;
804
805 // Returns true if there is a next block after the current one in
806 // the block order and if it is the given block.
807 bool CanFallThroughTo(BlockEntryInstr* block_entry) const;
808
809 // Return true-, false- and fall-through label for a branch instruction.
810 BranchLabels CreateBranchLabels(BranchInstr* branch) const;
811
812 void AddExceptionHandler(intptr_t try_index,
813 intptr_t outer_try_index,
814 intptr_t pc_offset,
815 bool is_generated,
816 const Array& handler_types,
817 bool needs_stacktrace);
818 void SetNeedsStackTrace(intptr_t try_index);
819 void AddCurrentDescriptor(PcDescriptorsLayout::Kind kind,
820 intptr_t deopt_id,
821 TokenPosition token_pos);
822 void AddDescriptor(
823 PcDescriptorsLayout::Kind kind,
824 intptr_t pc_offset,
825 intptr_t deopt_id,
826 TokenPosition token_pos,
827 intptr_t try_index,
828 intptr_t yield_index = PcDescriptorsLayout::kInvalidYieldIndex);
829
830 // Add NullCheck information for the current PC.
831 void AddNullCheck(TokenPosition token_pos, const String& name);
832
833 void RecordSafepoint(LocationSummary* locs,
834 intptr_t slow_path_argument_count = 0);
835
836 compiler::Label* AddDeoptStub(intptr_t deopt_id,
837 ICData::DeoptReasonId reason,
838 uint32_t flags = 0);
839
840 CompilerDeoptInfo* AddDeoptIndexAtCall(intptr_t deopt_id);
841 CompilerDeoptInfo* AddSlowPathDeoptInfo(intptr_t deopt_id, Environment* env);
842
843 void AddSlowPathCode(SlowPathCode* slow_path);
844
845 void FinalizeExceptionHandlers(const Code& code);
846 void FinalizePcDescriptors(const Code& code);
847 ArrayPtr CreateDeoptInfo(compiler::Assembler* assembler);
848 void FinalizeStackMaps(const Code& code);
849 void FinalizeVarDescriptors(const Code& code);
850 void FinalizeCatchEntryMovesMap(const Code& code);
851 void FinalizeStaticCallTargetsTable(const Code& code);
852 void FinalizeCodeSourceMap(const Code& code);
853
854 const Class& double_class() const { return double_class_; }
855 const Class& mint_class() const { return mint_class_; }
856 const Class& float32x4_class() const { return float32x4_class_; }
857 const Class& float64x2_class() const { return float64x2_class_; }
858 const Class& int32x4_class() const { return int32x4_class_; }
859
860 const Class& BoxClassFor(Representation rep);
861
862 void SaveLiveRegisters(LocationSummary* locs);
863 void RestoreLiveRegisters(LocationSummary* locs);
864#if defined(DEBUG)
865 void ClobberDeadTempRegisters(LocationSummary* locs);
866#endif
867
868 // Returns a new environment based on [env] which accounts for the new
869 // locations of values in the slow path call.
870 Environment* SlowPathEnvironmentFor(Instruction* inst,
871 intptr_t num_slow_path_args) {
872 if (inst->env() == nullptr && is_optimizing()) {
873 if (pending_deoptimization_env_ == nullptr) {
874 return nullptr;
875 }
876 return SlowPathEnvironmentFor(pending_deoptimization_env_, inst->locs(),
877 num_slow_path_args);
878 }
879 return SlowPathEnvironmentFor(inst->env(), inst->locs(),
880 num_slow_path_args);
881 }
882
883 Environment* SlowPathEnvironmentFor(Environment* env,
884 LocationSummary* locs,
885 intptr_t num_slow_path_args);
886
887 intptr_t CurrentTryIndex() const {
888 if (current_block_ == NULL) {
889 return kInvalidTryIndex;
890 }
891 return current_block_->try_index();
892 }
893
894 bool may_reoptimize() const { return may_reoptimize_; }
895
896 // Use in unoptimized compilation to preserve/reuse ICData.
897 const ICData* GetOrAddInstanceCallICData(intptr_t deopt_id,
898 const String& target_name,
899 const Array& arguments_descriptor,
900 intptr_t num_args_tested,
901 const AbstractType& receiver_type);
902
903 const ICData* GetOrAddStaticCallICData(intptr_t deopt_id,
904 const Function& target,
905 const Array& arguments_descriptor,
906 intptr_t num_args_tested,
907 ICData::RebindRule rebind_rule);
908
909 static const CallTargets* ResolveCallTargetsForReceiverCid(
910 intptr_t cid,
911 const String& selector,
912 const Array& args_desc_array);
913
914 const ZoneGrowableArray<const ICData*>& deopt_id_to_ic_data() const {
915 return *deopt_id_to_ic_data_;
916 }
917
918 Thread* thread() const { return thread_; }
919 Isolate* isolate() const { return thread_->isolate(); }
920 Zone* zone() const { return zone_; }
921
922 void AddStubCallTarget(const Code& code);
923 void AddDispatchTableCallTarget(const compiler::TableSelector* selector);
924
925 ArrayPtr edge_counters_array() const { return edge_counters_array_.raw(); }
926
927 ArrayPtr InliningIdToFunction() const;
928
929 void BeginCodeSourceRange();
930 void EndCodeSourceRange(TokenPosition token_pos);
931
932 static bool LookupMethodFor(int class_id,
933 const String& name,
934 const ArgumentsDescriptor& args_desc,
935 Function* fn_return,
936 bool* class_is_abstract_return = NULL);
937
938 // Returns new class-id bias.
939 //
940 // TODO(kustermann): We should move this code out of the [FlowGraphCompiler]!
941 static int EmitTestAndCallCheckCid(compiler::Assembler* assembler,
942 compiler::Label* label,
943 Register class_id_reg,
944 const CidRangeValue& range,
945 int bias,
946 bool jump_on_miss = true);
947
948 bool IsEmptyBlock(BlockEntryInstr* block) const;
949
950 private:
951 friend class BoxInt64Instr; // For AddPcRelativeCallStubTarget().
952 friend class CheckNullInstr; // For AddPcRelativeCallStubTarget().
953 friend class NullErrorSlowPath; // For AddPcRelativeCallStubTarget().
954 friend class CheckStackOverflowInstr; // For AddPcRelativeCallStubTarget().
955 friend class StoreIndexedInstr; // For AddPcRelativeCallStubTarget().
956 friend class StoreInstanceFieldInstr; // For AddPcRelativeCallStubTarget().
957 friend class CheckStackOverflowSlowPath; // For pending_deoptimization_env_.
958 friend class CheckedSmiSlowPath; // Same.
959 friend class CheckedSmiComparisonSlowPath; // Same.
960 friend class GraphInstrinsicCodeGenScope; // For optimizing_.
961
962 // Architecture specific implementation of simple native moves.
963 void EmitNativeMoveArchitecture(const compiler::ffi::NativeLocation& dst,
964 const compiler::ffi::NativeLocation& src);
965
966 void EmitFrameEntry();
967
968 bool TryIntrinsifyHelper();
969 void AddPcRelativeCallTarget(const Function& function,
970 Code::EntryKind entry_kind);
971 void AddPcRelativeCallStubTarget(const Code& stub_code);
972 void AddPcRelativeTailCallStubTarget(const Code& stub_code);
973 void AddPcRelativeTTSCallTypeTarget(const AbstractType& type);
974 void AddStaticCallTarget(const Function& function,
975 Code::EntryKind entry_kind);
976
977 void GenerateDeferredCode();
978
979 void EmitInstructionPrologue(Instruction* instr);
980 void EmitInstructionEpilogue(Instruction* instr);
981
982 // Emit code to load a Value into register 'dst'.
983 void LoadValue(Register dst, Value* value);
984
985 void EmitOptimizedStaticCall(
986 const Function& function,
987 const Array& arguments_descriptor,
988 intptr_t size_with_type_args,
989 intptr_t deopt_id,
990 TokenPosition token_pos,
991 LocationSummary* locs,
992 Code::EntryKind entry_kind = Code::EntryKind::kNormal);
993
994 void EmitUnoptimizedStaticCall(
995 intptr_t size_with_type_args,
996 intptr_t deopt_id,
997 TokenPosition token_pos,
998 LocationSummary* locs,
999 const ICData& ic_data,
1000 Code::EntryKind entry_kind = Code::EntryKind::kNormal);
1001
1002 // Helper for TestAndCall that calculates a good bias that
1003 // allows more compact instructions to be emitted.
1004 intptr_t ComputeGoodBiasForCidComparison(const CallTargets& sorted,
1005 intptr_t max_immediate);
1006
1007 // More helpers for EmitTestAndCall.
1008
1009 static Register EmitTestCidRegister();
1010
1011 void EmitTestAndCallLoadReceiver(intptr_t count_without_type_args,
1012 const Array& arguments_descriptor);
1013
1014 void EmitTestAndCallSmiBranch(compiler::Label* label, bool jump_if_smi);
1015
1016 void EmitTestAndCallLoadCid(Register class_id_reg);
1017
1018 // Type checking helper methods.
1019 void CheckClassIds(Register class_id_reg,
1020 const GrowableArray<intptr_t>& class_ids,
1021 compiler::Label* is_instance_lbl,
1022 compiler::Label* is_not_instance_lbl);
1023
1024 SubtypeTestCachePtr GenerateInlineInstanceof(
1025 TokenPosition token_pos,
1026 const AbstractType& type,
1027 compiler::Label* is_instance_lbl,
1028 compiler::Label* is_not_instance_lbl);
1029
1030 SubtypeTestCachePtr GenerateInstantiatedTypeWithArgumentsTest(
1031 TokenPosition token_pos,
1032 const AbstractType& dst_type,
1033 compiler::Label* is_instance_lbl,
1034 compiler::Label* is_not_instance_lbl);
1035
1036 bool GenerateInstantiatedTypeNoArgumentsTest(
1037 TokenPosition token_pos,
1038 const AbstractType& dst_type,
1039 compiler::Label* is_instance_lbl,
1040 compiler::Label* is_not_instance_lbl);
1041
1042 SubtypeTestCachePtr GenerateUninstantiatedTypeTest(
1043 TokenPosition token_pos,
1044 const AbstractType& dst_type,
1045 compiler::Label* is_instance_lbl,
1046 compiler::Label* is_not_instance_label);
1047
1048 SubtypeTestCachePtr GenerateFunctionTypeTest(
1049 TokenPosition token_pos,
1050 const AbstractType& dst_type,
1051 compiler::Label* is_instance_lbl,
1052 compiler::Label* is_not_instance_label);
1053
1054 SubtypeTestCachePtr GenerateSubtype1TestCacheLookup(
1055 TokenPosition token_pos,
1056 const Class& type_class,
1057 compiler::Label* is_instance_lbl,
1058 compiler::Label* is_not_instance_lbl);
1059
1060 enum TypeTestStubKind {
1061 kTestTypeOneArg,
1062 kTestTypeTwoArgs,
1063 kTestTypeFourArgs,
1064 kTestTypeSixArgs,
1065 };
1066
1067 // Returns type test stub kind for a type test against type parameter type.
1068 TypeTestStubKind GetTypeTestStubKindForTypeParameter(
1069 const TypeParameter& type_param);
1070
1071 SubtypeTestCachePtr GenerateCallSubtypeTestStub(
1072 TypeTestStubKind test_kind,
1073 Register instance_reg,
1074 Register instantiator_type_arguments_reg,
1075 Register function_type_arguments_reg,
1076 Register temp_reg,
1077 compiler::Label* is_instance_lbl,
1078 compiler::Label* is_not_instance_lbl);
1079
1080 void GenerateBoolToJump(Register bool_reg,
1081 compiler::Label* is_true,
1082 compiler::Label* is_false);
1083
1084 void GenerateMethodExtractorIntrinsic(const Function& extracted_method,
1085 intptr_t type_arguments_field_offset);
1086
1087 void GenerateGetterIntrinsic(const Function& accessor, const Field& field);
1088
1089 // Perform a greedy local register allocation. Consider all registers free.
1090 void AllocateRegistersLocally(Instruction* instr);
1091
1092 // Map a block number in a forward iteration into the block number in the
1093 // corresponding reverse iteration. Used to obtain an index into
1094 // block_order for reverse iterations.
1095 intptr_t reverse_index(intptr_t index) const {
1096 return block_order_.length() - index - 1;
1097 }
1098
1099 void set_current_instruction(Instruction* current_instruction) {
1100 current_instruction_ = current_instruction;
1101 }
1102
1103 Instruction* current_instruction() { return current_instruction_; }
1104
1105 void CompactBlock(BlockEntryInstr* block);
1106 void CompactBlocks();
1107
1108 bool IsListClass(const Class& cls) const {
1109 return cls.raw() == list_class_.raw();
1110 }
1111
1112 void EmitSourceLine(Instruction* instr);
1113
1114 intptr_t GetOptimizationThreshold() const;
1115
1116 CompressedStackMapsBuilder* compressed_stackmaps_builder() {
1117 if (compressed_stackmaps_builder_ == NULL) {
1118 compressed_stackmaps_builder_ = new CompressedStackMapsBuilder();
1119 }
1120 return compressed_stackmaps_builder_;
1121 }
1122
1123#if defined(DEBUG)
1124 void FrameStateUpdateWith(Instruction* instr);
1125 void FrameStatePush(Definition* defn);
1126 void FrameStatePop(intptr_t count);
1127 bool FrameStateIsSafeToCall();
1128 void FrameStateClear();
1129#endif
1130
1131 // Returns true if instruction lookahead (window size one)
1132 // is amenable to a peephole optimization.
1133 bool IsPeephole(Instruction* instr) const;
1134
1135#if defined(DEBUG)
1136 bool CanCallDart() const {
1137 return current_instruction_ == nullptr ||
1138 current_instruction_->CanCallDart();
1139 }
1140#else
1141 bool CanCallDart() const { return true; }
1142#endif
1143
1144 bool CanPcRelativeCall(const Function& target) const;
1145 bool CanPcRelativeCall(const Code& target) const;
1146 bool CanPcRelativeCall(const AbstractType& target) const;
1147
1148 // This struct contains either function or code, the other one being NULL.
1149 class StaticCallsStruct : public ZoneAllocated {
1150 public:
1151 Code::CallKind call_kind;
1152 Code::CallEntryPoint entry_point;
1153 const intptr_t offset;
1154 const Function* function; // Can be nullptr.
1155 const Code* code; // Can be nullptr.
1156 const AbstractType* dst_type; // Can be nullptr.
1157 StaticCallsStruct(Code::CallKind call_kind,
1158 Code::CallEntryPoint entry_point,
1159 intptr_t offset_arg,
1160 const Function* function_arg,
1161 const Code* code_arg,
1162 const AbstractType* dst_type)
1163 : call_kind(call_kind),
1164 entry_point(entry_point),
1165 offset(offset_arg),
1166 function(function_arg),
1167 code(code_arg),
1168 dst_type(dst_type) {
1169 ASSERT(function == nullptr || function->IsZoneHandle());
1170 ASSERT(code == nullptr || code->IsZoneHandle() ||
1171 code->IsReadOnlyHandle());
1172 ASSERT(dst_type == nullptr || dst_type->IsZoneHandle() ||
1173 dst_type->IsReadOnlyHandle());
1174 ASSERT(code == nullptr || dst_type == nullptr);
1175 }
1176
1177 private:
1178 DISALLOW_COPY_AND_ASSIGN(StaticCallsStruct);
1179 };
1180
1181 Thread* thread_;
1182 Zone* zone_;
1183 compiler::Assembler* assembler_;
1184 const ParsedFunction& parsed_function_;
1185 const FlowGraph& flow_graph_;
1186 const GrowableArray<BlockEntryInstr*>& block_order_;
1187
1188#if defined(DEBUG)
1189 GrowableArray<Representation> frame_state_;
1190#endif
1191
1192 // Compiler specific per-block state. Indexed by postorder block number
1193 // for convenience. This is not the block's index in the block order,
1194 // which is reverse postorder.
1195 BlockEntryInstr* current_block_;
1196 ExceptionHandlerList* exception_handlers_list_;
1197 DescriptorList* pc_descriptors_list_;
1198 CompressedStackMapsBuilder* compressed_stackmaps_builder_;
1199 CodeSourceMapBuilder* code_source_map_builder_;
1200 CatchEntryMovesMapBuilder* catch_entry_moves_maps_builder_;
1201 GrowableArray<BlockInfo*> block_info_;
1202 GrowableArray<CompilerDeoptInfo*> deopt_infos_;
1203 GrowableArray<SlowPathCode*> slow_path_code_;
1204 // Fields that were referenced by generated code.
1205 // This list is needed by precompiler to ensure they are retained.
1206 GrowableArray<const Field*> used_static_fields_;
1207 // Stores static call targets as well as stub targets.
1208 // TODO(srdjan): Evaluate if we should store allocation stub targets into a
1209 // separate table?
1210 GrowableArray<StaticCallsStruct*> static_calls_target_table_;
1211 // The table selectors of all dispatch table calls in the current function.
1212 GrowableArray<const compiler::TableSelector*> dispatch_table_call_targets_;
1213 GrowableArray<IndirectGotoInstr*> indirect_gotos_;
1214 bool is_optimizing_;
1215 SpeculativeInliningPolicy* speculative_policy_;
1216 // Set to true if optimized code has IC calls.
1217 bool may_reoptimize_;
1218 // True while emitting intrinsic code.
1219 bool intrinsic_mode_;
1220 compiler::Label* intrinsic_slow_path_label_ = nullptr;
1221 bool fully_intrinsified_ = false;
1222 CodeStatistics* stats_;
1223
1224 // The definition whose value is supposed to be at the top of the
1225 // expression stack. Used by peephole optimization (window size one)
1226 // to eliminate redundant push/pop pairs.
1227 Definition* top_of_stack_ = nullptr;
1228
1229 const Class& double_class_;
1230 const Class& mint_class_;
1231 const Class& float32x4_class_;
1232 const Class& float64x2_class_;
1233 const Class& int32x4_class_;
1234 const Class& list_class_;
1235
1236 ParallelMoveResolver parallel_move_resolver_;
1237
1238 // Currently instructions generate deopt stubs internally by
1239 // calling AddDeoptStub. To communicate deoptimization environment
1240 // that should be used when deoptimizing we store it in this variable.
1241 // In future AddDeoptStub should be moved out of the instruction template.
1242 Environment* pending_deoptimization_env_;
1243
1244 ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data_;
1245 Array& edge_counters_array_;
1246
1247 // Instruction currently running EmitNativeCode().
1248 Instruction* current_instruction_ = nullptr;
1249
1250 DISALLOW_COPY_AND_ASSIGN(FlowGraphCompiler);
1251};
1252
1253} // namespace dart
1254
1255#endif // RUNTIME_VM_COMPILER_BACKEND_FLOW_GRAPH_COMPILER_H_
1256