1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_COMPILER_BACKEND_IL_H_
6#define RUNTIME_VM_COMPILER_BACKEND_IL_H_
7
8#if defined(DART_PRECOMPILED_RUNTIME)
9#error "AOT runtime should not use compiler sources (including header files)"
10#endif // defined(DART_PRECOMPILED_RUNTIME)
11
12#include <memory>
13#include <utility>
14
15#include "vm/allocation.h"
16#include "vm/code_descriptors.h"
17#include "vm/compiler/backend/compile_type.h"
18#include "vm/compiler/backend/locations.h"
19#include "vm/compiler/backend/slot.h"
20#include "vm/compiler/compiler_pass.h"
21#include "vm/compiler/compiler_state.h"
22#include "vm/compiler/ffi/marshaller.h"
23#include "vm/compiler/ffi/native_calling_convention.h"
24#include "vm/compiler/ffi/native_location.h"
25#include "vm/compiler/ffi/native_type.h"
26#include "vm/compiler/method_recognizer.h"
27#include "vm/flags.h"
28#include "vm/growable_array.h"
29#include "vm/native_entry.h"
30#include "vm/object.h"
31#include "vm/parser.h"
32#include "vm/runtime_entry.h"
33#include "vm/static_type_exactness_state.h"
34#include "vm/token_position.h"
35
36namespace dart {
37
38class BaseTextBuffer;
39class BinaryFeedback;
40class BitVector;
41class BlockEntryInstr;
42class BlockEntryWithInitialDefs;
43class BoxIntegerInstr;
44class CallTargets;
45class CatchBlockEntryInstr;
46class CheckBoundBase;
47class ComparisonInstr;
48class Definition;
49class Environment;
50class FlowGraph;
51class FlowGraphCompiler;
52class FlowGraphSerializer;
53class FlowGraphVisitor;
54class Instruction;
55class LocalVariable;
56class LoopInfo;
57class ParsedFunction;
58class Range;
59class RangeAnalysis;
60class RangeBoundary;
61class SExpList;
62class SExpression;
63class TypeUsageInfo;
64class UnboxIntegerInstr;
65
66namespace compiler {
67class BlockBuilder;
68struct TableSelector;
69} // namespace compiler
70
71class Value : public ZoneAllocated {
72 public:
73 // A forward iterator that allows removing the current value from the
74 // underlying use list during iteration.
75 class Iterator {
76 public:
77 explicit Iterator(Value* head) : next_(head) { Advance(); }
78 Value* Current() const { return current_; }
79 bool Done() const { return current_ == NULL; }
80 void Advance() {
81 // Pre-fetch next on advance and cache it.
82 current_ = next_;
83 if (next_ != NULL) next_ = next_->next_use();
84 }
85
86 private:
87 Value* current_;
88 Value* next_;
89 };
90
91 explicit Value(Definition* definition)
92 : definition_(definition),
93 previous_use_(NULL),
94 next_use_(NULL),
95 instruction_(NULL),
96 use_index_(-1),
97 reaching_type_(NULL) {}
98
99 Definition* definition() const { return definition_; }
100 void set_definition(Definition* definition) {
101 definition_ = definition;
102 // Clone the reaching type if there was one and the owner no longer matches
103 // this value's definition.
104 SetReachingType(reaching_type_);
105 }
106
107 Value* previous_use() const { return previous_use_; }
108 void set_previous_use(Value* previous) { previous_use_ = previous; }
109
110 Value* next_use() const { return next_use_; }
111 void set_next_use(Value* next) { next_use_ = next; }
112
113 bool IsSingleUse() const {
114 return (next_use_ == NULL) && (previous_use_ == NULL);
115 }
116
117 Instruction* instruction() const { return instruction_; }
118 void set_instruction(Instruction* instruction) { instruction_ = instruction; }
119
120 intptr_t use_index() const { return use_index_; }
121 void set_use_index(intptr_t index) { use_index_ = index; }
122
123 static void AddToList(Value* value, Value** list);
124 void RemoveFromUseList();
125
126 // Change the definition after use lists have been computed.
127 inline void BindTo(Definition* definition);
128 inline void BindToEnvironment(Definition* definition);
129
130 Value* Copy(Zone* zone) { return new (zone) Value(definition_); }
131
132 // CopyWithType() must only be used when the new Value is dominated by
133 // the original Value.
134 Value* CopyWithType(Zone* zone) {
135 Value* copy = new (zone) Value(definition_);
136 copy->reaching_type_ = reaching_type_;
137 return copy;
138 }
139 Value* CopyWithType() { return CopyWithType(Thread::Current()->zone()); }
140
141 CompileType* Type();
142
143 CompileType* reaching_type() const { return reaching_type_; }
144 void SetReachingType(CompileType* type);
145 void RefineReachingType(CompileType* type);
146
147#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
148 void PrintTo(BaseTextBuffer* f) const;
149#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
150
151 SExpression* ToSExpression(FlowGraphSerializer* s) const;
152
153 const char* ToCString() const;
154
155 bool IsSmiValue() { return Type()->ToCid() == kSmiCid; }
156
157 // Return true if the value represents a constant.
158 bool BindsToConstant() const;
159
160 // Return true if the value represents the constant null.
161 bool BindsToConstantNull() const;
162
163 // Assert if BindsToConstant() is false, otherwise returns the constant value.
164 const Object& BoundConstant() const;
165
166 // Return true if storing the value into a heap object requires applying the
167 // write barrier. Can change the reaching type of the Value or other Values
168 // in the same chain of redefinitions.
169 bool NeedsWriteBarrier();
170
171 bool Equals(Value* other) const;
172
173 // Returns true if this |Value| can evaluate to the given |value| during
174 // execution.
175 inline bool CanBe(const Object& value);
176
177 private:
178 friend class FlowGraphPrinter;
179 friend class FlowGraphDeserializer; // For setting reaching_type_ directly.
180
181 Definition* definition_;
182 Value* previous_use_;
183 Value* next_use_;
184 Instruction* instruction_;
185 intptr_t use_index_;
186
187 CompileType* reaching_type_;
188
189 DISALLOW_COPY_AND_ASSIGN(Value);
190};
191
192// Represents a range of class-ids for use in class checks and polymorphic
193// dispatches. The range includes both ends, i.e. it is [cid_start, cid_end].
194struct CidRange : public ZoneAllocated {
195 CidRange(intptr_t cid_start_arg, intptr_t cid_end_arg)
196 : cid_start(cid_start_arg), cid_end(cid_end_arg) {}
197 CidRange() : cid_start(kIllegalCid), cid_end(kIllegalCid) {}
198
199 bool IsSingleCid() const { return cid_start == cid_end; }
200 bool Contains(intptr_t cid) { return cid_start <= cid && cid <= cid_end; }
201 int32_t Extent() const { return cid_end - cid_start; }
202
203 // The number of class ids this range covers.
204 intptr_t size() const { return cid_end - cid_start + 1; }
205
206 bool IsIllegalRange() const {
207 return cid_start == kIllegalCid && cid_end == kIllegalCid;
208 }
209
210 intptr_t cid_start;
211 intptr_t cid_end;
212
213 DISALLOW_COPY_AND_ASSIGN(CidRange);
214};
215
216struct CidRangeValue {
217 CidRangeValue(intptr_t cid_start_arg, intptr_t cid_end_arg)
218 : cid_start(cid_start_arg), cid_end(cid_end_arg) {}
219 CidRangeValue(const CidRange& other) // NOLINT
220 : cid_start(other.cid_start), cid_end(other.cid_end) {}
221
222 bool IsSingleCid() const { return cid_start == cid_end; }
223 bool Contains(intptr_t cid) { return cid_start <= cid && cid <= cid_end; }
224 int32_t Extent() const { return cid_end - cid_start; }
225
226 // The number of class ids this range covers.
227 intptr_t size() const { return cid_end - cid_start + 1; }
228
229 bool IsIllegalRange() const {
230 return cid_start == kIllegalCid && cid_end == kIllegalCid;
231 }
232
233 intptr_t cid_start;
234 intptr_t cid_end;
235};
236
237typedef MallocGrowableArray<CidRangeValue> CidRangeVector;
238
239class HierarchyInfo : public ThreadStackResource {
240 public:
241 explicit HierarchyInfo(Thread* thread)
242 : ThreadStackResource(thread),
243 cid_subtype_ranges_nullable_(),
244 cid_subtype_ranges_abstract_nullable_(),
245 cid_subtype_ranges_nonnullable_(),
246 cid_subtype_ranges_abstract_nonnullable_(),
247 cid_subclass_ranges_() {
248 thread->set_hierarchy_info(this);
249 }
250
251 ~HierarchyInfo() { thread()->set_hierarchy_info(NULL); }
252
253 const CidRangeVector& SubtypeRangesForClass(const Class& klass,
254 bool include_abstract,
255 bool exclude_null);
256 const CidRangeVector& SubclassRangesForClass(const Class& klass);
257
258 bool InstanceOfHasClassRange(const AbstractType& type,
259 intptr_t* lower_limit,
260 intptr_t* upper_limit);
261
262 // Returns `true` if a simple [CidRange]-based subtype-check can be used to
263 // determine if a given instance's type is a subtype of [type].
264 //
265 // This is the case for [type]s without type arguments or where the type
266 // arguments are all dynamic (known as "rare type").
267 bool CanUseSubtypeRangeCheckFor(const AbstractType& type);
268
269 // Returns `true` if a combination of [CidRange]-based checks can be used to
270 // determine if a given instance's type is a subtype of [type].
271 //
272 // This is the case for [type]s with type arguments where we are able to do a
273 // [CidRange]-based subclass-check against the class and [CidRange]-based
274 // subtype-checks against the type arguments.
275 //
276 // This method should only be called if [CanUseSubtypeRangecheckFor] returned
277 // false.
278 bool CanUseGenericSubtypeRangeCheckFor(const AbstractType& type);
279
280 private:
281 // Does not use any hierarchy information available in the system but computes
282 // it via O(n) class table traversal. The boolean parameters denote:
283 // use_subtype_test : if set, IsSubtypeOf() is used to compute inclusion
284 // include_abstract : if set, include abstract types (don't care otherwise)
285 // exclude_null : if set, exclude null types (don't care otherwise)
286 void BuildRangesFor(ClassTable* table,
287 CidRangeVector* ranges,
288 const Class& klass,
289 bool use_subtype_test,
290 bool include_abstract,
291 bool exclude_null);
292
293 // In JIT mode we use hierarchy information stored in the [RawClass]s
294 // direct_subclasses_/direct_implementors_ arrays.
295 void BuildRangesForJIT(ClassTable* table,
296 CidRangeVector* ranges,
297 const Class& klass,
298 bool use_subtype_test,
299 bool include_abstract,
300 bool exclude_null);
301
302 std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_nullable_;
303 std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_abstract_nullable_;
304 std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_nonnullable_;
305 std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_abstract_nonnullable_;
306 std::unique_ptr<CidRangeVector[]> cid_subclass_ranges_;
307};
308
309// An embedded container with N elements of type T. Used (with partial
310// specialization for N=0) because embedded arrays cannot have size 0.
311template <typename T, intptr_t N>
312class EmbeddedArray {
313 public:
314 EmbeddedArray() : elements_() {}
315
316 intptr_t length() const { return N; }
317
318 const T& operator[](intptr_t i) const {
319 ASSERT(i < length());
320 return elements_[i];
321 }
322
323 T& operator[](intptr_t i) {
324 ASSERT(i < length());
325 return elements_[i];
326 }
327
328 const T& At(intptr_t i) const { return (*this)[i]; }
329
330 void SetAt(intptr_t i, const T& val) { (*this)[i] = val; }
331
332 private:
333 T elements_[N];
334};
335
336template <typename T>
337class EmbeddedArray<T, 0> {
338 public:
339 intptr_t length() const { return 0; }
340 const T& operator[](intptr_t i) const {
341 UNREACHABLE();
342 static T sentinel = 0;
343 return sentinel;
344 }
345 T& operator[](intptr_t i) {
346 UNREACHABLE();
347 static T sentinel = 0;
348 return sentinel;
349 }
350};
351
352// Instructions.
353
354// M is a two argument macro. It is applied to each concrete instruction type
355// name. The concrete instruction classes are the name with Instr concatenated.
356
357struct InstrAttrs {
358 enum Attributes {
359 _ = 0, // No special attributes.
360 //
361 // The instruction is guaranteed to not trigger GC on a non-exceptional
362 // path. If the conditions depend on parameters of the instruction, do not
363 // use this attribute but overload CanTriggerGC() instead.
364 kNoGC = 1
365 };
366};
367
368#define FOR_EACH_INSTRUCTION(M) \
369 M(GraphEntry, kNoGC) \
370 M(JoinEntry, kNoGC) \
371 M(TargetEntry, kNoGC) \
372 M(FunctionEntry, kNoGC) \
373 M(NativeEntry, kNoGC) \
374 M(OsrEntry, kNoGC) \
375 M(IndirectEntry, kNoGC) \
376 M(CatchBlockEntry, kNoGC) \
377 M(Phi, kNoGC) \
378 M(Redefinition, kNoGC) \
379 M(ReachabilityFence, kNoGC) \
380 M(Parameter, kNoGC) \
381 M(NativeParameter, kNoGC) \
382 M(LoadIndexedUnsafe, kNoGC) \
383 M(StoreIndexedUnsafe, kNoGC) \
384 M(MemoryCopy, kNoGC) \
385 M(TailCall, kNoGC) \
386 M(ParallelMove, kNoGC) \
387 M(PushArgument, kNoGC) \
388 M(Return, kNoGC) \
389 M(NativeReturn, kNoGC) \
390 M(Throw, kNoGC) \
391 M(ReThrow, kNoGC) \
392 M(Stop, _) \
393 M(Goto, kNoGC) \
394 M(IndirectGoto, kNoGC) \
395 M(Branch, kNoGC) \
396 M(AssertAssignable, _) \
397 M(AssertSubtype, _) \
398 M(AssertBoolean, _) \
399 M(SpecialParameter, kNoGC) \
400 M(ClosureCall, _) \
401 M(FfiCall, _) \
402 M(EnterHandleScope, _) \
403 M(ExitHandleScope, _) \
404 M(AllocateHandle, _) \
405 M(RawStoreField, _) \
406 M(InstanceCall, _) \
407 M(PolymorphicInstanceCall, _) \
408 M(DispatchTableCall, _) \
409 M(StaticCall, _) \
410 M(LoadLocal, kNoGC) \
411 M(DropTemps, kNoGC) \
412 M(MakeTemp, kNoGC) \
413 M(StoreLocal, kNoGC) \
414 M(StrictCompare, kNoGC) \
415 M(EqualityCompare, kNoGC) \
416 M(RelationalOp, kNoGC) \
417 M(NativeCall, _) \
418 M(DebugStepCheck, _) \
419 M(LoadIndexed, kNoGC) \
420 M(LoadCodeUnits, kNoGC) \
421 M(StoreIndexed, kNoGC) \
422 M(StoreInstanceField, _) \
423 M(LoadStaticField, _) \
424 M(StoreStaticField, kNoGC) \
425 M(BooleanNegate, kNoGC) \
426 M(InstanceOf, _) \
427 M(CreateArray, _) \
428 M(AllocateObject, _) \
429 M(LoadField, _) \
430 M(LoadUntagged, kNoGC) \
431 M(StoreUntagged, kNoGC) \
432 M(LoadClassId, kNoGC) \
433 M(InstantiateType, _) \
434 M(InstantiateTypeArguments, _) \
435 M(AllocateContext, _) \
436 M(AllocateUninitializedContext, _) \
437 M(CloneContext, _) \
438 M(BinarySmiOp, kNoGC) \
439 M(CheckedSmiComparison, _) \
440 M(CheckedSmiOp, _) \
441 M(BinaryInt32Op, kNoGC) \
442 M(UnarySmiOp, kNoGC) \
443 M(UnaryDoubleOp, kNoGC) \
444 M(CheckStackOverflow, _) \
445 M(SmiToDouble, kNoGC) \
446 M(Int32ToDouble, kNoGC) \
447 M(Int64ToDouble, kNoGC) \
448 M(DoubleToInteger, _) \
449 M(DoubleToSmi, kNoGC) \
450 M(DoubleToDouble, kNoGC) \
451 M(DoubleToFloat, kNoGC) \
452 M(FloatToDouble, kNoGC) \
453 M(CheckClass, kNoGC) \
454 M(CheckClassId, kNoGC) \
455 M(CheckSmi, kNoGC) \
456 M(CheckNull, kNoGC) \
457 M(CheckCondition, kNoGC) \
458 M(Constant, kNoGC) \
459 M(UnboxedConstant, kNoGC) \
460 M(CheckEitherNonSmi, kNoGC) \
461 M(BinaryDoubleOp, kNoGC) \
462 M(DoubleTestOp, kNoGC) \
463 M(MathUnary, kNoGC) \
464 M(MathMinMax, kNoGC) \
465 M(Box, _) \
466 M(Unbox, kNoGC) \
467 M(BoxInt64, _) \
468 M(UnboxInt64, kNoGC) \
469 M(CaseInsensitiveCompare, _) \
470 M(BinaryInt64Op, kNoGC) \
471 M(ShiftInt64Op, kNoGC) \
472 M(SpeculativeShiftInt64Op, kNoGC) \
473 M(UnaryInt64Op, kNoGC) \
474 M(CheckArrayBound, kNoGC) \
475 M(GenericCheckBound, kNoGC) \
476 M(Constraint, _) \
477 M(StringToCharCode, kNoGC) \
478 M(OneByteStringFromCharCode, kNoGC) \
479 M(StringInterpolate, _) \
480 M(Utf8Scan, kNoGC) \
481 M(InvokeMathCFunction, _) \
482 M(TruncDivMod, kNoGC) \
483 /*We could be more precise about when these 2 instructions can trigger GC.*/ \
484 M(GuardFieldClass, _) \
485 M(GuardFieldLength, _) \
486 M(GuardFieldType, _) \
487 M(IfThenElse, kNoGC) \
488 M(MaterializeObject, _) \
489 M(TestSmi, kNoGC) \
490 M(TestCids, kNoGC) \
491 M(ExtractNthOutput, kNoGC) \
492 M(BinaryUint32Op, kNoGC) \
493 M(ShiftUint32Op, kNoGC) \
494 M(SpeculativeShiftUint32Op, kNoGC) \
495 M(UnaryUint32Op, kNoGC) \
496 M(BoxUint32, _) \
497 M(UnboxUint32, kNoGC) \
498 M(BoxInt32, _) \
499 M(UnboxInt32, kNoGC) \
500 M(IntConverter, _) \
501 M(BitCast, _) \
502 M(Deoptimize, kNoGC) \
503 M(SimdOp, kNoGC)
504
505#define FOR_EACH_ABSTRACT_INSTRUCTION(M) \
506 M(Allocation, _) \
507 M(BinaryIntegerOp, _) \
508 M(BlockEntry, _) \
509 M(BoxInteger, _) \
510 M(Comparison, _) \
511 M(InstanceCallBase, _) \
512 M(ShiftIntegerOp, _) \
513 M(UnaryIntegerOp, _) \
514 M(UnboxInteger, _)
515
516#define FORWARD_DECLARATION(type, attrs) class type##Instr;
517FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
518FOR_EACH_ABSTRACT_INSTRUCTION(FORWARD_DECLARATION)
519#undef FORWARD_DECLARATION
520
521#define DEFINE_INSTRUCTION_TYPE_CHECK(type) \
522 virtual type##Instr* As##type() { return this; } \
523 virtual const type##Instr* As##type() const { return this; } \
524 virtual const char* DebugName() const { return #type; }
525
526// Functions required in all concrete instruction classes.
527#define DECLARE_INSTRUCTION_NO_BACKEND(type) \
528 virtual Tag tag() const { return k##type; } \
529 virtual void Accept(FlowGraphVisitor* visitor); \
530 DEFINE_INSTRUCTION_TYPE_CHECK(type)
531
532#define DECLARE_INSTRUCTION_BACKEND() \
533 virtual LocationSummary* MakeLocationSummary(Zone* zone, bool optimizing) \
534 const; \
535 virtual void EmitNativeCode(FlowGraphCompiler* compiler);
536
537// Functions required in all concrete instruction classes.
538#define DECLARE_INSTRUCTION(type) \
539 DECLARE_INSTRUCTION_NO_BACKEND(type) \
540 DECLARE_INSTRUCTION_BACKEND()
541
542#define DECLARE_COMPARISON_METHODS \
543 virtual LocationSummary* MakeLocationSummary(Zone* zone, bool optimizing) \
544 const; \
545 virtual Condition EmitComparisonCode(FlowGraphCompiler* compiler, \
546 BranchLabels labels);
547
548#define DECLARE_COMPARISON_INSTRUCTION(type) \
549 DECLARE_INSTRUCTION_NO_BACKEND(type) \
550 DECLARE_COMPARISON_METHODS
551
552#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
553#define PRINT_TO_SUPPORT virtual void PrintTo(BaseTextBuffer* f) const;
554#else
555#define PRINT_TO_SUPPORT
556#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
557
558#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
559#define PRINT_OPERANDS_TO_SUPPORT \
560 virtual void PrintOperandsTo(BaseTextBuffer* f) const;
561#else
562#define PRINT_OPERANDS_TO_SUPPORT
563#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
564
565#define TO_S_EXPRESSION_SUPPORT \
566 virtual SExpression* ToSExpression(FlowGraphSerializer* s) const;
567
568#define ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT \
569 virtual void AddOperandsToSExpression(SExpList* sexp, \
570 FlowGraphSerializer* s) const;
571
572#define ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT \
573 virtual void AddExtraInfoToSExpression(SExpList* sexp, \
574 FlowGraphSerializer* s) const;
575
576// Together with CidRange, this represents a mapping from a range of class-ids
577// to a method for a given selector (method name). Also can contain an
578// indication of how frequently a given method has been called at a call site.
579// This information can be harvested from the inline caches (ICs).
580struct TargetInfo : public CidRange {
581 TargetInfo(intptr_t cid_start_arg,
582 intptr_t cid_end_arg,
583 const Function* target_arg,
584 intptr_t count_arg,
585 StaticTypeExactnessState exactness)
586 : CidRange(cid_start_arg, cid_end_arg),
587 target(target_arg),
588 count(count_arg),
589 exactness(exactness) {
590 ASSERT(target->IsZoneHandle());
591 }
592 const Function* target;
593 intptr_t count;
594 StaticTypeExactnessState exactness;
595
596 DISALLOW_COPY_AND_ASSIGN(TargetInfo);
597};
598
599// A set of class-ids, arranged in ranges. Used for the CheckClass
600// and PolymorphicInstanceCall instructions.
601class Cids : public ZoneAllocated {
602 public:
603 explicit Cids(Zone* zone) : cid_ranges_(zone, 6) {}
604 // Creates the off-heap Cids object that reflects the contents
605 // of the on-VM-heap IC data.
606 // Ranges of Cids are merged if there is only one target function and
607 // it is used for all cids in the gaps between ranges.
608 static Cids* CreateForArgument(Zone* zone,
609 const BinaryFeedback& binary_feedback,
610 int argument_number);
611 static Cids* CreateMonomorphic(Zone* zone, intptr_t cid);
612
613 bool Equals(const Cids& other) const;
614
615 bool HasClassId(intptr_t cid) const;
616
617 void Add(CidRange* target) { cid_ranges_.Add(target); }
618
619 CidRange& operator[](intptr_t index) const { return *cid_ranges_[index]; }
620
621 CidRange* At(int index) const { return cid_ranges_[index]; }
622
623 intptr_t length() const { return cid_ranges_.length(); }
624
625 void SetLength(intptr_t len) { cid_ranges_.SetLength(len); }
626
627 bool is_empty() const { return cid_ranges_.is_empty(); }
628
629 void Sort(int compare(CidRange* const* a, CidRange* const* b)) {
630 cid_ranges_.Sort(compare);
631 }
632
633 bool IsMonomorphic() const;
634 intptr_t MonomorphicReceiverCid() const;
635 intptr_t ComputeLowestCid() const;
636 intptr_t ComputeHighestCid() const;
637
638 protected:
639 GrowableArray<CidRange*> cid_ranges_;
640
641 private:
642 DISALLOW_IMPLICIT_CONSTRUCTORS(Cids);
643};
644
645class CallTargets : public Cids {
646 public:
647 explicit CallTargets(Zone* zone) : Cids(zone) {}
648
649 static const CallTargets* CreateMonomorphic(Zone* zone,
650 intptr_t receiver_cid,
651 const Function& target);
652
653 // Creates the off-heap CallTargets object that reflects the contents
654 // of the on-VM-heap IC data.
655 static const CallTargets* Create(Zone* zone, const ICData& ic_data);
656
657 // This variant also expands the class-ids to neighbouring classes that
658 // inherit the same method.
659 static const CallTargets* CreateAndExpand(Zone* zone, const ICData& ic_data);
660
661 TargetInfo* TargetAt(int i) const { return static_cast<TargetInfo*>(At(i)); }
662
663 intptr_t AggregateCallCount() const;
664
665 StaticTypeExactnessState MonomorphicExactness() const;
666 bool HasSingleTarget() const;
667 bool HasSingleRecognizedTarget() const;
668 const Function& FirstTarget() const;
669 const Function& MostPopularTarget() const;
670
671 void Print() const;
672
673 bool ReceiverIs(intptr_t cid) const {
674 return IsMonomorphic() && MonomorphicReceiverCid() == cid;
675 }
676 bool ReceiverIsSmiOrMint() const {
677 if (cid_ranges_.is_empty()) {
678 return false;
679 }
680 for (intptr_t i = 0, n = cid_ranges_.length(); i < n; i++) {
681 for (intptr_t j = cid_ranges_[i]->cid_start; j <= cid_ranges_[i]->cid_end;
682 j++) {
683 if (j != kSmiCid && j != kMintCid) {
684 return false;
685 }
686 }
687 }
688 return true;
689 }
690
691 private:
692 void CreateHelper(Zone* zone, const ICData& ic_data);
693 void MergeIntoRanges();
694};
695
696// Represents type feedback for the binary operators, and a few recognized
697// static functions (see MethodRecognizer::NumArgsCheckedForStaticCall).
698class BinaryFeedback : public ZoneAllocated {
699 public:
700 explicit BinaryFeedback(Zone* zone) : feedback_(zone, 2) {}
701
702 static const BinaryFeedback* Create(Zone* zone, const ICData& ic_data);
703 static const BinaryFeedback* CreateMonomorphic(Zone* zone,
704 intptr_t receiver_cid,
705 intptr_t argument_cid);
706
707 bool ArgumentIs(intptr_t cid) const {
708 if (feedback_.is_empty()) {
709 return false;
710 }
711 for (intptr_t i = 0, n = feedback_.length(); i < n; i++) {
712 if (feedback_[i].second != cid) {
713 return false;
714 }
715 }
716 return true;
717 }
718
719 bool OperandsAreEither(intptr_t cid_a, intptr_t cid_b) const {
720 if (feedback_.is_empty()) {
721 return false;
722 }
723 for (intptr_t i = 0, n = feedback_.length(); i < n; i++) {
724 if ((feedback_[i].first != cid_a) && (feedback_[i].first != cid_b)) {
725 return false;
726 }
727 if ((feedback_[i].second != cid_a) && (feedback_[i].second != cid_b)) {
728 return false;
729 }
730 }
731 return true;
732 }
733 bool OperandsAreSmiOrNull() const {
734 return OperandsAreEither(kSmiCid, kNullCid);
735 }
736 bool OperandsAreSmiOrMint() const {
737 return OperandsAreEither(kSmiCid, kMintCid);
738 }
739 bool OperandsAreSmiOrDouble() const {
740 return OperandsAreEither(kSmiCid, kDoubleCid);
741 }
742
743 bool OperandsAre(intptr_t cid) const {
744 if (feedback_.length() != 1) return false;
745 return (feedback_[0].first == cid) && (feedback_[0].second == cid);
746 }
747
748 bool IncludesOperands(intptr_t cid) const {
749 for (intptr_t i = 0, n = feedback_.length(); i < n; i++) {
750 if ((feedback_[i].first == cid) && (feedback_[i].second == cid)) {
751 return true;
752 }
753 }
754 return false;
755 }
756
757 private:
758 GrowableArray<std::pair<intptr_t, intptr_t>> feedback_;
759
760 friend class Cids;
761};
762
763typedef ZoneGrowableArray<Value*> InputsArray;
764typedef ZoneGrowableArray<PushArgumentInstr*> PushArgumentsArray;
765
766class Instruction : public ZoneAllocated {
767 public:
768#define DECLARE_TAG(type, attrs) k##type,
769 enum Tag { FOR_EACH_INSTRUCTION(DECLARE_TAG) kNumInstructions };
770#undef DECLARE_TAG
771
772 static const intptr_t kInstructionAttrs[kNumInstructions];
773
774 enum SpeculativeMode {
775 // Types of inputs should be checked when unboxing for this instruction.
776 kGuardInputs,
777 // Each input is guaranteed to have a valid type for the input
778 // representation and its type should not be checked when unboxing.
779 kNotSpeculative
780 };
781
782 explicit Instruction(intptr_t deopt_id = DeoptId::kNone)
783 : deopt_id_(deopt_id),
784 previous_(NULL),
785 next_(NULL),
786 env_(NULL),
787 locs_(NULL),
788 inlining_id_(-1) {}
789
790 virtual ~Instruction() {}
791
792 virtual Tag tag() const = 0;
793
794 virtual intptr_t statistics_tag() const { return tag(); }
795
796 intptr_t deopt_id() const {
797 ASSERT(ComputeCanDeoptimize() || CanBecomeDeoptimizationTarget() ||
798 CompilerState::Current().is_aot());
799 return GetDeoptId();
800 }
801
802 static const ICData* GetICData(
803 const ZoneGrowableArray<const ICData*>& ic_data_array,
804 intptr_t deopt_id,
805 bool is_static_call);
806
807 virtual TokenPosition token_pos() const { return TokenPosition::kNoSource; }
808
809 virtual intptr_t InputCount() const = 0;
810 virtual Value* InputAt(intptr_t i) const = 0;
811 void SetInputAt(intptr_t i, Value* value) {
812 ASSERT(value != NULL);
813 value->set_instruction(this);
814 value->set_use_index(i);
815 RawSetInputAt(i, value);
816 }
817
818 // Remove all inputs (including in the environment) from their
819 // definition's use lists.
820 void UnuseAllInputs();
821
822 // Call instructions override this function and return the number of
823 // pushed arguments.
824 virtual intptr_t ArgumentCount() const { return 0; }
825 inline Value* ArgumentValueAt(intptr_t index) const;
826 inline Definition* ArgumentAt(intptr_t index) const;
827
828 // Sets array of PushArgument instructions.
829 virtual void SetPushArguments(PushArgumentsArray* push_arguments) {
830 UNREACHABLE();
831 }
832 // Returns array of PushArgument instructions
833 virtual PushArgumentsArray* GetPushArguments() const {
834 UNREACHABLE();
835 return nullptr;
836 }
837 // Replace inputs with separate PushArgument instructions detached from call.
838 virtual void ReplaceInputsWithPushArguments(
839 PushArgumentsArray* push_arguments) {
840 UNREACHABLE();
841 }
842 bool HasPushArguments() const { return GetPushArguments() != nullptr; }
843
844 // Repairs trailing PushArgs in environment.
845 void RepairPushArgsInEnvironment() const;
846
847 // Returns true, if this instruction can deoptimize with its current inputs.
848 // This property can change if we add or remove redefinitions that constrain
849 // the type or the range of input operands during compilation.
850 virtual bool ComputeCanDeoptimize() const = 0;
851
852 // Once we removed the deopt environment, we assume that this
853 // instruction can't deoptimize.
854 bool CanDeoptimize() const { return env() != NULL && ComputeCanDeoptimize(); }
855
856 // Visiting support.
857 virtual void Accept(FlowGraphVisitor* visitor) = 0;
858
859 Instruction* previous() const { return previous_; }
860 void set_previous(Instruction* instr) {
861 ASSERT(!IsBlockEntry());
862 previous_ = instr;
863 }
864
865 Instruction* next() const { return next_; }
866 void set_next(Instruction* instr) {
867 ASSERT(!IsGraphEntry());
868 ASSERT(!IsReturn());
869 ASSERT(!IsBranch() || (instr == NULL));
870 ASSERT(!IsPhi());
871 ASSERT(instr == NULL || !instr->IsBlockEntry());
872 // TODO(fschneider): Also add Throw and ReThrow to the list of instructions
873 // that do not have a successor. Currently, the graph builder will continue
874 // to append instruction in case of a Throw inside an expression. This
875 // condition should be handled in the graph builder
876 next_ = instr;
877 }
878
879 // Link together two instruction.
880 void LinkTo(Instruction* next) {
881 ASSERT(this != next);
882 this->set_next(next);
883 next->set_previous(this);
884 }
885
886 // Removed this instruction from the graph, after use lists have been
887 // computed. If the instruction is a definition with uses, those uses are
888 // unaffected (so the instruction can be reinserted, e.g., hoisting).
889 Instruction* RemoveFromGraph(bool return_previous = true);
890
891 // Normal instructions can have 0 (inside a block) or 1 (last instruction in
892 // a block) successors. Branch instruction with >1 successors override this
893 // function.
894 virtual intptr_t SuccessorCount() const;
895 virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
896
897 void Goto(JoinEntryInstr* entry);
898
899 virtual const char* DebugName() const = 0;
900
901#if defined(DEBUG)
902 // Checks that the field stored in an instruction has proper form:
903 // - must be a zone-handle
904 // - In background compilation, must be cloned.
905 // Aborts if field is not OK.
906 void CheckField(const Field& field) const;
907#else
908 void CheckField(const Field& field) const {}
909#endif // DEBUG
910
911 // Printing support.
912 const char* ToCString() const;
913#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
914 virtual void PrintTo(BaseTextBuffer* f) const;
915 virtual void PrintOperandsTo(BaseTextBuffer* f) const;
916#endif
917 virtual SExpression* ToSExpression(FlowGraphSerializer* s) const;
918 virtual void AddOperandsToSExpression(SExpList* sexp,
919 FlowGraphSerializer* s) const;
920 virtual void AddExtraInfoToSExpression(SExpList* sexp,
921 FlowGraphSerializer* s) const;
922
923#define DECLARE_INSTRUCTION_TYPE_CHECK(Name, Type) \
924 bool Is##Name() const { return (As##Name() != nullptr); } \
925 Type* As##Name() { \
926 auto const_this = static_cast<const Instruction*>(this); \
927 return const_cast<Type*>(const_this->As##Name()); \
928 } \
929 virtual const Type* As##Name() const { return nullptr; }
930#define INSTRUCTION_TYPE_CHECK(Name, Attrs) \
931 DECLARE_INSTRUCTION_TYPE_CHECK(Name, Name##Instr)
932
933 DECLARE_INSTRUCTION_TYPE_CHECK(Definition, Definition)
934 DECLARE_INSTRUCTION_TYPE_CHECK(BlockEntryWithInitialDefs,
935 BlockEntryWithInitialDefs)
936 DECLARE_INSTRUCTION_TYPE_CHECK(CheckBoundBase, CheckBoundBase)
937 FOR_EACH_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
938 FOR_EACH_ABSTRACT_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
939
940#undef INSTRUCTION_TYPE_CHECK
941#undef DECLARE_INSTRUCTION_TYPE_CHECK
942
943 template <typename T>
944 T* Cast() {
945 return static_cast<T*>(this);
946 }
947
948 // Returns structure describing location constraints required
949 // to emit native code for this instruction.
950 LocationSummary* locs() {
951 ASSERT(locs_ != NULL);
952 return locs_;
953 }
954
955 bool HasLocs() const { return locs_ != NULL; }
956
957 virtual LocationSummary* MakeLocationSummary(Zone* zone,
958 bool is_optimizing) const = 0;
959
960 void InitializeLocationSummary(Zone* zone, bool optimizing) {
961 ASSERT(locs_ == NULL);
962 locs_ = MakeLocationSummary(zone, optimizing);
963 }
964
965 // Makes a new call location summary (or uses `locs`) and initializes the
966 // output register constraints depending on the representation of [instr].
967 static LocationSummary* MakeCallSummary(Zone* zone,
968 const Instruction* instr,
969 LocationSummary* locs = nullptr);
970
971 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { UNIMPLEMENTED(); }
972
973 Environment* env() const { return env_; }
974 void SetEnvironment(Environment* deopt_env);
975 void RemoveEnvironment();
976 void ReplaceInEnvironment(Definition* current, Definition* replacement);
977
978 // Different compiler passes can assign pass specific ids to the instruction.
979 // Only one id can be stored at a time.
980 intptr_t GetPassSpecificId(CompilerPass::Id pass) const {
981 return (PassSpecificId::DecodePass(pass_specific_id_) == pass)
982 ? PassSpecificId::DecodeId(pass_specific_id_)
983 : PassSpecificId::kNoId;
984 }
985 void SetPassSpecificId(CompilerPass::Id pass, intptr_t id) {
986 pass_specific_id_ = PassSpecificId::Encode(pass, id);
987 }
988 bool HasPassSpecificId(CompilerPass::Id pass) const {
989 return (PassSpecificId::DecodePass(pass_specific_id_) == pass) &&
990 (PassSpecificId::DecodeId(pass_specific_id_) !=
991 PassSpecificId::kNoId);
992 }
993
994 bool HasUnmatchedInputRepresentations() const;
995
996 // Returns representation expected for the input operand at the given index.
997 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
998 return kTagged;
999 }
1000
1001 SpeculativeMode SpeculativeModeOfInputs() const {
1002 for (intptr_t i = 0; i < InputCount(); i++) {
1003 if (SpeculativeModeOfInput(i) == kGuardInputs) {
1004 return kGuardInputs;
1005 }
1006 }
1007 return kNotSpeculative;
1008 }
1009
1010 // By default, instructions should check types of inputs when unboxing
1011 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
1012 return kGuardInputs;
1013 }
1014
1015 // Representation of the value produced by this computation.
1016 virtual Representation representation() const { return kTagged; }
1017
1018 bool WasEliminated() const { return next() == NULL; }
1019
1020 // Returns deoptimization id that corresponds to the deoptimization target
1021 // that input operands conversions inserted for this instruction can jump
1022 // to.
1023 virtual intptr_t DeoptimizationTarget() const {
1024 UNREACHABLE();
1025 return DeoptId::kNone;
1026 }
1027
1028 // Returns a replacement for the instruction or NULL if the instruction can
1029 // be eliminated. By default returns the this instruction which means no
1030 // change.
1031 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
1032
1033 // Insert this instruction before 'next' after use lists are computed.
1034 // Instructions cannot be inserted before a block entry or any other
1035 // instruction without a previous instruction.
1036 void InsertBefore(Instruction* next) { InsertAfter(next->previous()); }
1037
1038 // Insert this instruction after 'prev' after use lists are computed.
1039 void InsertAfter(Instruction* prev);
1040
1041 // Append an instruction to the current one and return the tail.
1042 // This function updated def-use chains of the newly appended
1043 // instruction.
1044 Instruction* AppendInstruction(Instruction* tail);
1045
1046 // Returns true if CSE and LICM are allowed for this instruction.
1047 virtual bool AllowsCSE() const { return false; }
1048
1049 // Returns true if this instruction has any side-effects besides storing.
1050 // See StoreInstanceFieldInstr::HasUnknownSideEffects() for rationale.
1051 virtual bool HasUnknownSideEffects() const = 0;
1052
1053 // Whether this instruction can call Dart code without going through
1054 // the runtime.
1055 //
1056 // Must be true for any instruction which can call Dart code without
1057 // first creating an exit frame to transition into the runtime.
1058 //
1059 // See also WriteBarrierElimination and Thread::RememberLiveTemporaries().
1060 virtual bool CanCallDart() const { return false; }
1061
1062 virtual bool CanTriggerGC() const;
1063
1064 // Get the block entry for this instruction.
1065 virtual BlockEntryInstr* GetBlock();
1066
1067 intptr_t inlining_id() const { return inlining_id_; }
1068 void set_inlining_id(intptr_t value) {
1069 ASSERT(value >= 0);
1070 inlining_id_ = value;
1071 }
1072 bool has_inlining_id() const { return inlining_id_ >= 0; }
1073
1074 // Returns a hash code for use with hash maps.
1075 virtual intptr_t Hashcode() const;
1076
1077 // Compares two instructions. Returns true, iff:
1078 // 1. They have the same tag.
1079 // 2. All input operands are Equals.
1080 // 3. They satisfy AttributesEqual.
1081 bool Equals(Instruction* other) const;
1082
1083 // Compare attributes of a instructions (except input operands and tag).
1084 // All instructions that participate in CSE have to override this function.
1085 // This function can assume that the argument has the same type as this.
1086 virtual bool AttributesEqual(Instruction* other) const {
1087 UNREACHABLE();
1088 return false;
1089 }
1090
1091 virtual void InheritDeoptTarget(Zone* zone, Instruction* other);
1092
1093 bool NeedsEnvironment() const {
1094 return ComputeCanDeoptimize() || CanBecomeDeoptimizationTarget() ||
1095 MayThrow();
1096 }
1097
1098 virtual bool CanBecomeDeoptimizationTarget() const { return false; }
1099
1100 void InheritDeoptTargetAfter(FlowGraph* flow_graph,
1101 Definition* call,
1102 Definition* result);
1103
1104 virtual bool MayThrow() const = 0;
1105
1106 bool IsDominatedBy(Instruction* dom);
1107
1108 void ClearEnv() { env_ = NULL; }
1109
1110 void Unsupported(FlowGraphCompiler* compiler);
1111
1112 static bool SlowPathSharingSupported(bool is_optimizing) {
1113#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM) || \
1114 defined(TARGET_ARCH_ARM64)
1115 return FLAG_enable_slow_path_sharing && FLAG_precompiled_mode &&
1116 is_optimizing;
1117#else
1118 return false;
1119#endif
1120 }
1121
1122 virtual bool UseSharedSlowPathStub(bool is_optimizing) const { return false; }
1123
1124 // 'RegisterKindForResult()' returns the register kind necessary to hold the
1125 // result.
1126 //
1127 // This is not virtual because instructions should override representation()
1128 // instead.
1129 Location::Kind RegisterKindForResult() const {
1130 const Representation rep = representation();
1131 if ((rep == kUnboxedFloat) || (rep == kUnboxedDouble) ||
1132 (rep == kUnboxedFloat32x4) || (rep == kUnboxedInt32x4) ||
1133 (rep == kUnboxedFloat64x2)) {
1134 return Location::kFpuRegister;
1135 }
1136 return Location::kRegister;
1137 }
1138
1139 protected:
1140 // GetDeoptId and/or CopyDeoptIdFrom.
1141 friend class CallSiteInliner;
1142 friend class LICM;
1143 friend class ComparisonInstr;
1144 friend class Scheduler;
1145 friend class BlockEntryInstr;
1146 friend class CatchBlockEntryInstr; // deopt_id_
1147 friend class DebugStepCheckInstr; // deopt_id_
1148 friend class StrictCompareInstr; // deopt_id_
1149
1150 // Fetch deopt id without checking if this computation can deoptimize.
1151 intptr_t GetDeoptId() const { return deopt_id_; }
1152
1153 void CopyDeoptIdFrom(const Instruction& instr) {
1154 deopt_id_ = instr.deopt_id_;
1155 }
1156
1157 private:
1158 friend class BranchInstr; // For RawSetInputAt.
1159 friend class IfThenElseInstr; // For RawSetInputAt.
1160 friend class CheckConditionInstr; // For RawSetInputAt.
1161
1162 virtual void RawSetInputAt(intptr_t i, Value* value) = 0;
1163
1164 class PassSpecificId {
1165 public:
1166 static intptr_t Encode(CompilerPass::Id pass, intptr_t id) {
1167 return (id << kPassBits) | pass;
1168 }
1169
1170 static CompilerPass::Id DecodePass(intptr_t value) {
1171 return static_cast<CompilerPass::Id>(value & Utils::NBitMask(kPassBits));
1172 }
1173
1174 static intptr_t DecodeId(intptr_t value) { return (value >> kPassBits); }
1175
1176 static constexpr intptr_t kNoId = -1;
1177
1178 private:
1179 static constexpr intptr_t kPassBits = 8;
1180 static_assert(CompilerPass::kNumPasses <= (1 << kPassBits),
1181 "Pass Id does not fit into the bit field");
1182 };
1183
1184 intptr_t deopt_id_;
1185 intptr_t pass_specific_id_ = PassSpecificId::kNoId;
1186 Instruction* previous_;
1187 Instruction* next_;
1188 Environment* env_;
1189 LocationSummary* locs_;
1190 intptr_t inlining_id_;
1191
1192 DISALLOW_COPY_AND_ASSIGN(Instruction);
1193};
1194
1195struct BranchLabels {
1196 compiler::Label* true_label;
1197 compiler::Label* false_label;
1198 compiler::Label* fall_through;
1199};
1200
1201class PureInstruction : public Instruction {
1202 public:
1203 explicit PureInstruction(intptr_t deopt_id) : Instruction(deopt_id) {}
1204
1205 virtual bool AllowsCSE() const { return true; }
1206 virtual bool HasUnknownSideEffects() const { return false; }
1207};
1208
1209// Types to be used as ThrowsTrait for TemplateInstruction/TemplateDefinition.
1210struct Throws {
1211 static const bool kCanThrow = true;
1212};
1213
1214struct NoThrow {
1215 static const bool kCanThrow = false;
1216};
1217
1218// Types to be used as CSETrait for TemplateInstruction/TemplateDefinition.
1219// Pure instructions are those that allow CSE and have no effects and
1220// no dependencies.
1221template <typename DefaultBase, typename PureBase>
1222struct Pure {
1223 typedef PureBase Base;
1224};
1225
1226template <typename DefaultBase, typename PureBase>
1227struct NoCSE {
1228 typedef DefaultBase Base;
1229};
1230
1231template <intptr_t N,
1232 typename ThrowsTrait,
1233 template <typename Default, typename Pure> class CSETrait = NoCSE>
1234class TemplateInstruction
1235 : public CSETrait<Instruction, PureInstruction>::Base {
1236 public:
1237 explicit TemplateInstruction(intptr_t deopt_id = DeoptId::kNone)
1238 : CSETrait<Instruction, PureInstruction>::Base(deopt_id), inputs_() {}
1239
1240 virtual intptr_t InputCount() const { return N; }
1241 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
1242
1243 virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
1244
1245 protected:
1246 EmbeddedArray<Value*, N> inputs_;
1247
1248 private:
1249 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
1250};
1251
1252class MoveOperands : public ZoneAllocated {
1253 public:
1254 MoveOperands(Location dest, Location src) : dest_(dest), src_(src) {}
1255
1256 Location src() const { return src_; }
1257 Location dest() const { return dest_; }
1258
1259 Location* src_slot() { return &src_; }
1260 Location* dest_slot() { return &dest_; }
1261
1262 void set_src(const Location& value) { src_ = value; }
1263 void set_dest(const Location& value) { dest_ = value; }
1264
1265 // The parallel move resolver marks moves as "in-progress" by clearing the
1266 // destination (but not the source).
1267 Location MarkPending() {
1268 ASSERT(!IsPending());
1269 Location dest = dest_;
1270 dest_ = Location::NoLocation();
1271 return dest;
1272 }
1273
1274 void ClearPending(Location dest) {
1275 ASSERT(IsPending());
1276 dest_ = dest;
1277 }
1278
1279 bool IsPending() const {
1280 ASSERT(!src_.IsInvalid() || dest_.IsInvalid());
1281 return dest_.IsInvalid() && !src_.IsInvalid();
1282 }
1283
1284 // True if this move a move from the given location.
1285 bool Blocks(Location loc) const {
1286 return !IsEliminated() && src_.Equals(loc);
1287 }
1288
1289 // A move is redundant if it's been eliminated, if its source and
1290 // destination are the same, or if its destination is unneeded.
1291 bool IsRedundant() const {
1292 return IsEliminated() || dest_.IsInvalid() || src_.Equals(dest_);
1293 }
1294
1295 // We clear both operands to indicate move that's been eliminated.
1296 void Eliminate() { src_ = dest_ = Location::NoLocation(); }
1297 bool IsEliminated() const {
1298 ASSERT(!src_.IsInvalid() || dest_.IsInvalid());
1299 return src_.IsInvalid();
1300 }
1301
1302 private:
1303 Location dest_;
1304 Location src_;
1305
1306 DISALLOW_COPY_AND_ASSIGN(MoveOperands);
1307};
1308
1309class ParallelMoveInstr : public TemplateInstruction<0, NoThrow> {
1310 public:
1311 ParallelMoveInstr() : moves_(4) {}
1312
1313 DECLARE_INSTRUCTION(ParallelMove)
1314
1315 virtual bool ComputeCanDeoptimize() const { return false; }
1316
1317 virtual bool HasUnknownSideEffects() const {
1318 UNREACHABLE(); // This instruction never visited by optimization passes.
1319 return false;
1320 }
1321
1322 MoveOperands* AddMove(Location dest, Location src) {
1323 MoveOperands* move = new MoveOperands(dest, src);
1324 moves_.Add(move);
1325 return move;
1326 }
1327
1328 MoveOperands* MoveOperandsAt(intptr_t index) const { return moves_[index]; }
1329
1330 intptr_t NumMoves() const { return moves_.length(); }
1331
1332 bool IsRedundant() const;
1333
1334 virtual TokenPosition token_pos() const {
1335 return TokenPosition::kParallelMove;
1336 }
1337
1338 PRINT_TO_SUPPORT
1339
1340 private:
1341 GrowableArray<MoveOperands*> moves_; // Elements cannot be null.
1342
1343 DISALLOW_COPY_AND_ASSIGN(ParallelMoveInstr);
1344};
1345
1346// Basic block entries are administrative nodes. There is a distinguished
1347// graph entry with no predecessor. Joins are the only nodes with multiple
1348// predecessors. Targets are all other basic block entries. The types
1349// enforce edge-split form---joins are forbidden as the successors of
1350// branches.
1351class BlockEntryInstr : public Instruction {
1352 public:
1353 virtual intptr_t PredecessorCount() const = 0;
1354 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const = 0;
1355
1356 intptr_t preorder_number() const { return preorder_number_; }
1357 void set_preorder_number(intptr_t number) { preorder_number_ = number; }
1358
1359 intptr_t postorder_number() const { return postorder_number_; }
1360 void set_postorder_number(intptr_t number) { postorder_number_ = number; }
1361
1362 intptr_t block_id() const { return block_id_; }
1363
1364 // NOTE: These are SSA positions and not token positions. These are used by
1365 // the register allocator.
1366 void set_start_pos(intptr_t pos) { start_pos_ = pos; }
1367 intptr_t start_pos() const { return start_pos_; }
1368 void set_end_pos(intptr_t pos) { end_pos_ = pos; }
1369 intptr_t end_pos() const { return end_pos_; }
1370
1371 BlockEntryInstr* dominator() const { return dominator_; }
1372 BlockEntryInstr* ImmediateDominator() const;
1373
1374 const GrowableArray<BlockEntryInstr*>& dominated_blocks() {
1375 return dominated_blocks_;
1376 }
1377
1378 void AddDominatedBlock(BlockEntryInstr* block) {
1379 ASSERT(!block->IsFunctionEntry() || this->IsGraphEntry());
1380 block->set_dominator(this);
1381 dominated_blocks_.Add(block);
1382 }
1383 void ClearDominatedBlocks() { dominated_blocks_.Clear(); }
1384
1385 bool Dominates(BlockEntryInstr* other) const;
1386
1387 Instruction* last_instruction() const { return last_instruction_; }
1388 void set_last_instruction(Instruction* instr) { last_instruction_ = instr; }
1389
1390 ParallelMoveInstr* parallel_move() const { return parallel_move_; }
1391
1392 bool HasParallelMove() const { return parallel_move_ != NULL; }
1393
1394 bool HasNonRedundantParallelMove() const {
1395 return HasParallelMove() && !parallel_move()->IsRedundant();
1396 }
1397
1398 ParallelMoveInstr* GetParallelMove() {
1399 if (parallel_move_ == NULL) {
1400 parallel_move_ = new ParallelMoveInstr();
1401 }
1402 return parallel_move_;
1403 }
1404
1405 // Discover basic-block structure of the current block. Must be called
1406 // on all graph blocks in preorder to yield valid results. As a side effect,
1407 // the block entry instructions in the graph are assigned preorder numbers.
1408 // The array 'preorder' maps preorder block numbers to the block entry
1409 // instruction with that number. The depth first spanning tree is recorded
1410 // in the array 'parent', which maps preorder block numbers to the preorder
1411 // number of the block's spanning-tree parent. As a side effect of this
1412 // function, the set of basic block predecessors (e.g., block entry
1413 // instructions of predecessor blocks) and also the last instruction in the
1414 // block is recorded in each entry instruction. Returns true when called the
1415 // first time on this particular block within one graph traversal, and false
1416 // on all successive calls.
1417 bool DiscoverBlock(BlockEntryInstr* predecessor,
1418 GrowableArray<BlockEntryInstr*>* preorder,
1419 GrowableArray<intptr_t>* parent);
1420
1421 virtual intptr_t InputCount() const { return 0; }
1422 virtual Value* InputAt(intptr_t i) const {
1423 UNREACHABLE();
1424 return NULL;
1425 }
1426
1427 virtual bool CanBecomeDeoptimizationTarget() const {
1428 // BlockEntry environment is copied to Goto and Branch instructions
1429 // when we insert new blocks targeting this block.
1430 return true;
1431 }
1432
1433 virtual bool ComputeCanDeoptimize() const { return false; }
1434
1435 virtual bool HasUnknownSideEffects() const { return false; }
1436
1437 virtual bool MayThrow() const { return false; }
1438
1439 intptr_t try_index() const { return try_index_; }
1440 void set_try_index(intptr_t index) { try_index_ = index; }
1441
1442 // True for blocks inside a try { } region.
1443 bool InsideTryBlock() const { return try_index_ != kInvalidTryIndex; }
1444
1445 // Loop related methods.
1446 LoopInfo* loop_info() const { return loop_info_; }
1447 void set_loop_info(LoopInfo* loop_info) { loop_info_ = loop_info; }
1448 bool IsLoopHeader() const;
1449 intptr_t NestingDepth() const;
1450
1451 virtual BlockEntryInstr* GetBlock() { return this; }
1452
1453 virtual TokenPosition token_pos() const {
1454 return TokenPosition::kControlFlow;
1455 }
1456
1457 // Helper to mutate the graph during inlining. This block should be
1458 // replaced with new_block as a predecessor of all of this block's
1459 // successors.
1460 void ReplaceAsPredecessorWith(BlockEntryInstr* new_block);
1461
1462 void set_block_id(intptr_t block_id) { block_id_ = block_id; }
1463
1464 // Stack-based IR bookkeeping.
1465 intptr_t stack_depth() const { return stack_depth_; }
1466 void set_stack_depth(intptr_t s) { stack_depth_ = s; }
1467
1468 // For all instruction in this block: Remove all inputs (including in the
1469 // environment) from their definition's use lists for all instructions.
1470 void ClearAllInstructions();
1471
1472 DEFINE_INSTRUCTION_TYPE_CHECK(BlockEntry)
1473
1474 TO_S_EXPRESSION_SUPPORT
1475 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
1476 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
1477
1478 protected:
1479 BlockEntryInstr(intptr_t block_id,
1480 intptr_t try_index,
1481 intptr_t deopt_id,
1482 intptr_t stack_depth)
1483 : Instruction(deopt_id),
1484 block_id_(block_id),
1485 try_index_(try_index),
1486 preorder_number_(-1),
1487 postorder_number_(-1),
1488 stack_depth_(stack_depth),
1489 dominator_(nullptr),
1490 dominated_blocks_(1),
1491 last_instruction_(NULL),
1492 parallel_move_(nullptr),
1493 loop_info_(nullptr) {}
1494
1495 // Perform a depth first search to find OSR entry and
1496 // link it to the given graph entry.
1497 bool FindOsrEntryAndRelink(GraphEntryInstr* graph_entry,
1498 Instruction* parent,
1499 BitVector* block_marks);
1500
1501 private:
1502 friend class FlowGraphDeserializer; // Access to AddPredecessor().
1503
1504 virtual void RawSetInputAt(intptr_t i, Value* value) { UNREACHABLE(); }
1505
1506 virtual void ClearPredecessors() = 0;
1507 virtual void AddPredecessor(BlockEntryInstr* predecessor) = 0;
1508
1509 void set_dominator(BlockEntryInstr* instr) { dominator_ = instr; }
1510
1511 intptr_t block_id_;
1512 intptr_t try_index_;
1513 intptr_t preorder_number_;
1514 intptr_t postorder_number_;
1515 // Expected stack depth on entry (for stack-based IR only).
1516 intptr_t stack_depth_;
1517 // Starting and ending lifetime positions for this block. Used by
1518 // the linear scan register allocator.
1519 intptr_t start_pos_;
1520 intptr_t end_pos_;
1521 BlockEntryInstr* dominator_; // Immediate dominator, NULL for graph entry.
1522 // TODO(fschneider): Optimize the case of one child to save space.
1523 GrowableArray<BlockEntryInstr*> dominated_blocks_;
1524 Instruction* last_instruction_;
1525
1526 // Parallel move that will be used by linear scan register allocator to
1527 // connect live ranges at the start of the block.
1528 ParallelMoveInstr* parallel_move_;
1529
1530 // Closest enveloping loop in loop hierarchy (nullptr at nesting depth 0).
1531 LoopInfo* loop_info_;
1532
1533 DISALLOW_COPY_AND_ASSIGN(BlockEntryInstr);
1534};
1535
1536class ForwardInstructionIterator : public ValueObject {
1537 public:
1538 explicit ForwardInstructionIterator(BlockEntryInstr* block_entry)
1539 : current_(block_entry) {
1540 Advance();
1541 }
1542
1543 void Advance() {
1544 ASSERT(!Done());
1545 current_ = current_->next();
1546 }
1547
1548 bool Done() const { return current_ == NULL; }
1549
1550 // Removes 'current_' from graph and sets 'current_' to previous instruction.
1551 void RemoveCurrentFromGraph();
1552
1553 Instruction* Current() const { return current_; }
1554
1555 bool operator==(const ForwardInstructionIterator& other) const {
1556 return current_ == other.current_;
1557 }
1558
1559 ForwardInstructionIterator& operator++() {
1560 Advance();
1561 return *this;
1562 }
1563
1564 private:
1565 Instruction* current_;
1566};
1567
1568class BackwardInstructionIterator : public ValueObject {
1569 public:
1570 explicit BackwardInstructionIterator(BlockEntryInstr* block_entry)
1571 : block_entry_(block_entry), current_(block_entry->last_instruction()) {
1572 ASSERT(block_entry_->previous() == NULL);
1573 }
1574
1575 void Advance() {
1576 ASSERT(!Done());
1577 current_ = current_->previous();
1578 }
1579
1580 bool Done() const { return current_ == block_entry_; }
1581
1582 void RemoveCurrentFromGraph();
1583
1584 Instruction* Current() const { return current_; }
1585
1586 private:
1587 BlockEntryInstr* block_entry_;
1588 Instruction* current_;
1589};
1590
1591// Base class shared by all block entries which define initial definitions.
1592//
1593// The initial definitions define parameters, special parameters and constants.
1594class BlockEntryWithInitialDefs : public BlockEntryInstr {
1595 public:
1596 BlockEntryWithInitialDefs(intptr_t block_id,
1597 intptr_t try_index,
1598 intptr_t deopt_id,
1599 intptr_t stack_depth)
1600 : BlockEntryInstr(block_id, try_index, deopt_id, stack_depth) {}
1601
1602 GrowableArray<Definition*>* initial_definitions() {
1603 return &initial_definitions_;
1604 }
1605 const GrowableArray<Definition*>* initial_definitions() const {
1606 return &initial_definitions_;
1607 }
1608
1609 virtual BlockEntryWithInitialDefs* AsBlockEntryWithInitialDefs() {
1610 return this;
1611 }
1612 virtual const BlockEntryWithInitialDefs* AsBlockEntryWithInitialDefs() const {
1613 return this;
1614 }
1615
1616 protected:
1617 void PrintInitialDefinitionsTo(BaseTextBuffer* f) const;
1618
1619 private:
1620 GrowableArray<Definition*> initial_definitions_;
1621
1622 DISALLOW_COPY_AND_ASSIGN(BlockEntryWithInitialDefs);
1623};
1624
1625class GraphEntryInstr : public BlockEntryWithInitialDefs {
1626 public:
1627 GraphEntryInstr(const ParsedFunction& parsed_function, intptr_t osr_id);
1628
1629 DECLARE_INSTRUCTION(GraphEntry)
1630
1631 virtual intptr_t PredecessorCount() const { return 0; }
1632 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
1633 UNREACHABLE();
1634 return NULL;
1635 }
1636 virtual intptr_t SuccessorCount() const;
1637 virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
1638
1639 void AddCatchEntry(CatchBlockEntryInstr* entry) { catch_entries_.Add(entry); }
1640
1641 CatchBlockEntryInstr* GetCatchEntry(intptr_t index);
1642
1643 void AddIndirectEntry(IndirectEntryInstr* entry) {
1644 indirect_entries_.Add(entry);
1645 }
1646
1647 ConstantInstr* constant_null();
1648
1649 void RelinkToOsrEntry(Zone* zone, intptr_t max_block_id);
1650 bool IsCompiledForOsr() const;
1651 intptr_t osr_id() const { return osr_id_; }
1652
1653 intptr_t entry_count() const { return entry_count_; }
1654 void set_entry_count(intptr_t count) { entry_count_ = count; }
1655
1656 intptr_t spill_slot_count() const { return spill_slot_count_; }
1657 void set_spill_slot_count(intptr_t count) {
1658 ASSERT(count >= 0);
1659 spill_slot_count_ = count;
1660 }
1661
1662 // Number of stack slots reserved for compiling try-catch. For functions
1663 // without try-catch, this is 0. Otherwise, it is the number of local
1664 // variables.
1665 intptr_t fixed_slot_count() const { return fixed_slot_count_; }
1666 void set_fixed_slot_count(intptr_t count) {
1667 ASSERT(count >= 0);
1668 fixed_slot_count_ = count;
1669 }
1670 FunctionEntryInstr* normal_entry() const { return normal_entry_; }
1671 FunctionEntryInstr* unchecked_entry() const { return unchecked_entry_; }
1672 void set_normal_entry(FunctionEntryInstr* entry) { normal_entry_ = entry; }
1673 void set_unchecked_entry(FunctionEntryInstr* target) {
1674 unchecked_entry_ = target;
1675 }
1676 OsrEntryInstr* osr_entry() const { return osr_entry_; }
1677 void set_osr_entry(OsrEntryInstr* entry) { osr_entry_ = entry; }
1678
1679 const ParsedFunction& parsed_function() const { return parsed_function_; }
1680
1681 const GrowableArray<CatchBlockEntryInstr*>& catch_entries() const {
1682 return catch_entries_;
1683 }
1684
1685 const GrowableArray<IndirectEntryInstr*>& indirect_entries() const {
1686 return indirect_entries_;
1687 }
1688
1689 bool HasSingleEntryPoint() const {
1690 return catch_entries().is_empty() && unchecked_entry() == nullptr;
1691 }
1692
1693 PRINT_TO_SUPPORT
1694
1695 private:
1696 friend class FlowGraphDeserializer; // For the constructor with deopt_id arg.
1697
1698 GraphEntryInstr(const ParsedFunction& parsed_function,
1699 intptr_t osr_id,
1700 intptr_t deopt_id);
1701
1702 virtual void ClearPredecessors() {}
1703 virtual void AddPredecessor(BlockEntryInstr* predecessor) { UNREACHABLE(); }
1704
1705 const ParsedFunction& parsed_function_;
1706 FunctionEntryInstr* normal_entry_ = nullptr;
1707 FunctionEntryInstr* unchecked_entry_ = nullptr;
1708 OsrEntryInstr* osr_entry_ = nullptr;
1709 GrowableArray<CatchBlockEntryInstr*> catch_entries_;
1710 // Indirect targets are blocks reachable only through indirect gotos.
1711 GrowableArray<IndirectEntryInstr*> indirect_entries_;
1712 const intptr_t osr_id_;
1713 intptr_t entry_count_;
1714 intptr_t spill_slot_count_;
1715 intptr_t fixed_slot_count_; // For try-catch in optimized code.
1716
1717 DISALLOW_COPY_AND_ASSIGN(GraphEntryInstr);
1718};
1719
1720class JoinEntryInstr : public BlockEntryInstr {
1721 public:
1722 JoinEntryInstr(intptr_t block_id,
1723 intptr_t try_index,
1724 intptr_t deopt_id,
1725 intptr_t stack_depth = 0)
1726 : BlockEntryInstr(block_id, try_index, deopt_id, stack_depth),
1727 predecessors_(2), // Two is the assumed to be the common case.
1728 phis_(NULL) {}
1729
1730 DECLARE_INSTRUCTION(JoinEntry)
1731
1732 virtual intptr_t PredecessorCount() const { return predecessors_.length(); }
1733 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
1734 return predecessors_[index];
1735 }
1736
1737 // Returns -1 if pred is not in the list.
1738 intptr_t IndexOfPredecessor(BlockEntryInstr* pred) const;
1739
1740 ZoneGrowableArray<PhiInstr*>* phis() const { return phis_; }
1741
1742 PhiInstr* InsertPhi(intptr_t var_index, intptr_t var_count);
1743 void RemoveDeadPhis(Definition* replacement);
1744
1745 void InsertPhi(PhiInstr* phi);
1746 void RemovePhi(PhiInstr* phi);
1747
1748 virtual bool HasUnknownSideEffects() const { return false; }
1749
1750 PRINT_TO_SUPPORT
1751 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
1752
1753 private:
1754 // Classes that have access to predecessors_ when inlining.
1755 friend class BlockEntryInstr;
1756 friend class InlineExitCollector;
1757 friend class PolymorphicInliner;
1758 friend class IndirectEntryInstr; // Access in il_printer.cc.
1759
1760 // Direct access to phis_ in order to resize it due to phi elimination.
1761 friend class ConstantPropagator;
1762 friend class DeadCodeElimination;
1763
1764 virtual void ClearPredecessors() { predecessors_.Clear(); }
1765 virtual void AddPredecessor(BlockEntryInstr* predecessor);
1766
1767 GrowableArray<BlockEntryInstr*> predecessors_;
1768 ZoneGrowableArray<PhiInstr*>* phis_;
1769
1770 DISALLOW_COPY_AND_ASSIGN(JoinEntryInstr);
1771};
1772
1773class PhiIterator : public ValueObject {
1774 public:
1775 explicit PhiIterator(JoinEntryInstr* join) : phis_(join->phis()), index_(0) {}
1776
1777 void Advance() {
1778 ASSERT(!Done());
1779 index_++;
1780 }
1781
1782 bool Done() const { return (phis_ == NULL) || (index_ >= phis_->length()); }
1783
1784 PhiInstr* Current() const { return (*phis_)[index_]; }
1785
1786 // Removes current phi from graph and sets current to previous phi.
1787 void RemoveCurrentFromGraph();
1788
1789 private:
1790 ZoneGrowableArray<PhiInstr*>* phis_;
1791 intptr_t index_;
1792};
1793
1794class TargetEntryInstr : public BlockEntryInstr {
1795 public:
1796 TargetEntryInstr(intptr_t block_id,
1797 intptr_t try_index,
1798 intptr_t deopt_id,
1799 intptr_t stack_depth = 0)
1800 : BlockEntryInstr(block_id, try_index, deopt_id, stack_depth),
1801 predecessor_(NULL),
1802 edge_weight_(0.0) {}
1803
1804 DECLARE_INSTRUCTION(TargetEntry)
1805
1806 double edge_weight() const { return edge_weight_; }
1807 void set_edge_weight(double weight) { edge_weight_ = weight; }
1808 void adjust_edge_weight(double scale_factor) { edge_weight_ *= scale_factor; }
1809
1810 virtual intptr_t PredecessorCount() const {
1811 return (predecessor_ == NULL) ? 0 : 1;
1812 }
1813 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
1814 ASSERT((index == 0) && (predecessor_ != NULL));
1815 return predecessor_;
1816 }
1817
1818 PRINT_TO_SUPPORT
1819
1820 private:
1821 friend class BlockEntryInstr; // Access to predecessor_ when inlining.
1822
1823 virtual void ClearPredecessors() { predecessor_ = NULL; }
1824 virtual void AddPredecessor(BlockEntryInstr* predecessor) {
1825 ASSERT(predecessor_ == NULL);
1826 predecessor_ = predecessor;
1827 }
1828
1829 BlockEntryInstr* predecessor_;
1830 double edge_weight_;
1831
1832 DISALLOW_COPY_AND_ASSIGN(TargetEntryInstr);
1833};
1834
1835// Represents an entrypoint to a function which callers can invoke (i.e. not
1836// used for OSR entries).
1837//
1838// The flow graph builder might decide to create create multiple entrypoints
1839// (e.g. checked/unchecked entrypoints) and will attach those to the
1840// [GraphEntryInstr].
1841//
1842// Every entrypoint has it's own initial definitions. The SSA renaming
1843// will insert phi's for parameter instructions if necessary.
1844class FunctionEntryInstr : public BlockEntryWithInitialDefs {
1845 public:
1846 FunctionEntryInstr(GraphEntryInstr* graph_entry,
1847 intptr_t block_id,
1848 intptr_t try_index,
1849 intptr_t deopt_id)
1850 : BlockEntryWithInitialDefs(block_id,
1851 try_index,
1852 deopt_id,
1853 /*stack_depth=*/0),
1854 graph_entry_(graph_entry) {}
1855
1856 DECLARE_INSTRUCTION(FunctionEntry)
1857
1858 virtual intptr_t PredecessorCount() const {
1859 return (graph_entry_ == nullptr) ? 0 : 1;
1860 }
1861 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
1862 ASSERT(index == 0 && graph_entry_ != nullptr);
1863 return graph_entry_;
1864 }
1865
1866 GraphEntryInstr* graph_entry() const { return graph_entry_; }
1867
1868 PRINT_TO_SUPPORT
1869
1870 private:
1871 virtual void ClearPredecessors() { graph_entry_ = nullptr; }
1872 virtual void AddPredecessor(BlockEntryInstr* predecessor) {
1873 ASSERT(graph_entry_ == nullptr && predecessor->IsGraphEntry());
1874 graph_entry_ = predecessor->AsGraphEntry();
1875 }
1876
1877 GraphEntryInstr* graph_entry_;
1878
1879 DISALLOW_COPY_AND_ASSIGN(FunctionEntryInstr);
1880};
1881
1882// Represents entry into a function from native code.
1883//
1884// Native entries are not allowed to have regular parameters. They should use
1885// NativeParameter instead (which doesn't count as an initial definition).
1886class NativeEntryInstr : public FunctionEntryInstr {
1887 public:
1888 NativeEntryInstr(const compiler::ffi::CallbackMarshaller& marshaller,
1889 GraphEntryInstr* graph_entry,
1890 intptr_t block_id,
1891 intptr_t try_index,
1892 intptr_t deopt_id,
1893 intptr_t callback_id)
1894 : FunctionEntryInstr(graph_entry, block_id, try_index, deopt_id),
1895 callback_id_(callback_id),
1896 marshaller_(marshaller) {}
1897
1898 DECLARE_INSTRUCTION(NativeEntry)
1899
1900 PRINT_TO_SUPPORT
1901
1902 private:
1903 void SaveArgument(FlowGraphCompiler* compiler,
1904 const compiler::ffi::NativeLocation& loc) const;
1905
1906 const intptr_t callback_id_;
1907 const compiler::ffi::CallbackMarshaller& marshaller_;
1908};
1909
1910// Represents an OSR entrypoint to a function.
1911//
1912// The OSR entry has it's own initial definitions.
1913class OsrEntryInstr : public BlockEntryWithInitialDefs {
1914 public:
1915 OsrEntryInstr(GraphEntryInstr* graph_entry,
1916 intptr_t block_id,
1917 intptr_t try_index,
1918 intptr_t deopt_id,
1919 intptr_t stack_depth)
1920 : BlockEntryWithInitialDefs(block_id, try_index, deopt_id, stack_depth),
1921 graph_entry_(graph_entry) {}
1922
1923 DECLARE_INSTRUCTION(OsrEntry)
1924
1925 virtual intptr_t PredecessorCount() const {
1926 return (graph_entry_ == nullptr) ? 0 : 1;
1927 }
1928 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
1929 ASSERT(index == 0 && graph_entry_ != nullptr);
1930 return graph_entry_;
1931 }
1932
1933 GraphEntryInstr* graph_entry() const { return graph_entry_; }
1934
1935 PRINT_TO_SUPPORT
1936
1937 private:
1938 virtual void ClearPredecessors() { graph_entry_ = nullptr; }
1939 virtual void AddPredecessor(BlockEntryInstr* predecessor) {
1940 ASSERT(graph_entry_ == nullptr && predecessor->IsGraphEntry());
1941 graph_entry_ = predecessor->AsGraphEntry();
1942 }
1943
1944 GraphEntryInstr* graph_entry_;
1945
1946 DISALLOW_COPY_AND_ASSIGN(OsrEntryInstr);
1947};
1948
1949class IndirectEntryInstr : public JoinEntryInstr {
1950 public:
1951 IndirectEntryInstr(intptr_t block_id,
1952 intptr_t indirect_id,
1953 intptr_t try_index,
1954 intptr_t deopt_id)
1955 : JoinEntryInstr(block_id, try_index, deopt_id),
1956 indirect_id_(indirect_id) {}
1957
1958 DECLARE_INSTRUCTION(IndirectEntry)
1959
1960 intptr_t indirect_id() const { return indirect_id_; }
1961
1962 PRINT_TO_SUPPORT
1963
1964 private:
1965 const intptr_t indirect_id_;
1966};
1967
1968class CatchBlockEntryInstr : public BlockEntryWithInitialDefs {
1969 public:
1970 CatchBlockEntryInstr(bool is_generated,
1971 intptr_t block_id,
1972 intptr_t try_index,
1973 GraphEntryInstr* graph_entry,
1974 const Array& handler_types,
1975 intptr_t catch_try_index,
1976 bool needs_stacktrace,
1977 intptr_t deopt_id,
1978 const LocalVariable* exception_var,
1979 const LocalVariable* stacktrace_var,
1980 const LocalVariable* raw_exception_var,
1981 const LocalVariable* raw_stacktrace_var)
1982 : BlockEntryWithInitialDefs(block_id,
1983 try_index,
1984 deopt_id,
1985 /*stack_depth=*/0),
1986 graph_entry_(graph_entry),
1987 predecessor_(NULL),
1988 catch_handler_types_(Array::ZoneHandle(handler_types.raw())),
1989 catch_try_index_(catch_try_index),
1990 exception_var_(exception_var),
1991 stacktrace_var_(stacktrace_var),
1992 raw_exception_var_(raw_exception_var),
1993 raw_stacktrace_var_(raw_stacktrace_var),
1994 needs_stacktrace_(needs_stacktrace),
1995 is_generated_(is_generated) {}
1996
1997 DECLARE_INSTRUCTION(CatchBlockEntry)
1998
1999 virtual intptr_t PredecessorCount() const {
2000 return (predecessor_ == NULL) ? 0 : 1;
2001 }
2002 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
2003 ASSERT((index == 0) && (predecessor_ != NULL));
2004 return predecessor_;
2005 }
2006
2007 GraphEntryInstr* graph_entry() const { return graph_entry_; }
2008
2009 const LocalVariable* exception_var() const { return exception_var_; }
2010 const LocalVariable* stacktrace_var() const { return stacktrace_var_; }
2011
2012 const LocalVariable* raw_exception_var() const { return raw_exception_var_; }
2013 const LocalVariable* raw_stacktrace_var() const {
2014 return raw_stacktrace_var_;
2015 }
2016
2017 bool needs_stacktrace() const { return needs_stacktrace_; }
2018
2019 bool is_generated() const { return is_generated_; }
2020
2021 // Returns try index for the try block to which this catch handler
2022 // corresponds.
2023 intptr_t catch_try_index() const { return catch_try_index_; }
2024
2025 PRINT_TO_SUPPORT
2026
2027 private:
2028 friend class BlockEntryInstr; // Access to predecessor_ when inlining.
2029
2030 virtual void ClearPredecessors() { predecessor_ = NULL; }
2031 virtual void AddPredecessor(BlockEntryInstr* predecessor) {
2032 ASSERT(predecessor_ == NULL);
2033 predecessor_ = predecessor;
2034 }
2035
2036 GraphEntryInstr* graph_entry_;
2037 BlockEntryInstr* predecessor_;
2038 const Array& catch_handler_types_;
2039 const intptr_t catch_try_index_;
2040 GrowableArray<Definition*> initial_definitions_;
2041 const LocalVariable* exception_var_;
2042 const LocalVariable* stacktrace_var_;
2043 const LocalVariable* raw_exception_var_;
2044 const LocalVariable* raw_stacktrace_var_;
2045 const bool needs_stacktrace_;
2046 bool is_generated_;
2047
2048 DISALLOW_COPY_AND_ASSIGN(CatchBlockEntryInstr);
2049};
2050
2051// If the result of the allocation is not stored into any field, passed
2052// as an argument or used in a phi then it can't alias with any other
2053// SSA value.
2054class AliasIdentity : public ValueObject {
2055 public:
2056 // It is unknown if value has aliases.
2057 static AliasIdentity Unknown() { return AliasIdentity(kUnknown); }
2058
2059 // It is known that value can have aliases.
2060 static AliasIdentity Aliased() { return AliasIdentity(kAliased); }
2061
2062 // It is known that value has no aliases.
2063 static AliasIdentity NotAliased() { return AliasIdentity(kNotAliased); }
2064
2065 // It is known that value has no aliases and it was selected by
2066 // allocation sinking pass as a candidate.
2067 static AliasIdentity AllocationSinkingCandidate() {
2068 return AliasIdentity(kAllocationSinkingCandidate);
2069 }
2070
2071#define FOR_EACH_ALIAS_IDENTITY_VALUE(V) \
2072 V(Unknown, 0) \
2073 V(NotAliased, 1) \
2074 V(Aliased, 2) \
2075 V(AllocationSinkingCandidate, 3)
2076
2077 const char* ToCString() {
2078 switch (value_) {
2079#define VALUE_CASE(name, val) \
2080 case k##name: \
2081 return #name;
2082 FOR_EACH_ALIAS_IDENTITY_VALUE(VALUE_CASE)
2083#undef VALUE_CASE
2084 default:
2085 UNREACHABLE();
2086 return nullptr;
2087 }
2088 }
2089
2090 static bool Parse(const char* str, AliasIdentity* out) {
2091#define VALUE_CASE(name, val) \
2092 if (strcmp(str, #name) == 0) { \
2093 out->value_ = k##name; \
2094 return true; \
2095 }
2096 FOR_EACH_ALIAS_IDENTITY_VALUE(VALUE_CASE)
2097#undef VALUE_CASE
2098 return false;
2099 }
2100
2101 bool IsUnknown() const { return value_ == kUnknown; }
2102 bool IsAliased() const { return value_ == kAliased; }
2103 bool IsNotAliased() const { return (value_ & kNotAliased) != 0; }
2104 bool IsAllocationSinkingCandidate() const {
2105 return value_ == kAllocationSinkingCandidate;
2106 }
2107
2108 AliasIdentity(const AliasIdentity& other)
2109 : ValueObject(), value_(other.value_) {}
2110
2111 AliasIdentity& operator=(const AliasIdentity& other) {
2112 value_ = other.value_;
2113 return *this;
2114 }
2115
2116 private:
2117 explicit AliasIdentity(intptr_t value) : value_(value) {}
2118
2119#define VALUE_DEFN(name, val) k##name = val,
2120 enum { FOR_EACH_ALIAS_IDENTITY_VALUE(VALUE_DEFN) };
2121#undef VALUE_DEFN
2122
2123// Undef the FOR_EACH helper macro, since the enum is private.
2124#undef FOR_EACH_ALIAS_IDENTITY_VALUE
2125
2126 COMPILE_ASSERT((kUnknown & kNotAliased) == 0);
2127 COMPILE_ASSERT((kAliased & kNotAliased) == 0);
2128 COMPILE_ASSERT((kAllocationSinkingCandidate & kNotAliased) != 0);
2129
2130 intptr_t value_;
2131};
2132
2133// Abstract super-class of all instructions that define a value (Bind, Phi).
2134class Definition : public Instruction {
2135 public:
2136 explicit Definition(intptr_t deopt_id = DeoptId::kNone);
2137
2138 // Overridden by definitions that have call counts.
2139 virtual intptr_t CallCount() const { return -1; }
2140
2141 intptr_t temp_index() const { return temp_index_; }
2142 void set_temp_index(intptr_t index) { temp_index_ = index; }
2143 void ClearTempIndex() { temp_index_ = -1; }
2144 bool HasTemp() const { return temp_index_ >= 0; }
2145
2146 intptr_t ssa_temp_index() const { return ssa_temp_index_; }
2147 void set_ssa_temp_index(intptr_t index) {
2148 ASSERT(index >= 0);
2149 ssa_temp_index_ = index;
2150 }
2151 bool HasSSATemp() const { return ssa_temp_index_ >= 0; }
2152 void ClearSSATempIndex() { ssa_temp_index_ = -1; }
2153 bool HasPairRepresentation() const {
2154 if (compiler::target::kWordSize == 8) {
2155 return representation() == kPairOfTagged;
2156 } else {
2157 return (representation() == kPairOfTagged) ||
2158 (representation() == kUnboxedInt64);
2159 }
2160 }
2161
2162 // Compile time type of the definition, which may be requested before type
2163 // propagation during graph building.
2164 CompileType* Type() {
2165 if (type_ == NULL) {
2166 auto type = new CompileType(ComputeType());
2167 type->set_owner(this);
2168 set_type(type);
2169 }
2170 return type_;
2171 }
2172
2173 bool HasType() const { return (type_ != NULL); }
2174
2175 inline bool IsInt64Definition();
2176
2177 bool IsInt32Definition() {
2178 return IsBinaryInt32Op() || IsBoxInt32() || IsUnboxInt32() ||
2179 IsIntConverter();
2180 }
2181
2182 // Compute compile type for this definition. It is safe to use this
2183 // approximation even before type propagator was run (e.g. during graph
2184 // building).
2185 virtual CompileType ComputeType() const { return CompileType::Dynamic(); }
2186
2187 // Update CompileType of the definition. Returns true if the type has changed.
2188 virtual bool RecomputeType() { return false; }
2189
2190 PRINT_OPERANDS_TO_SUPPORT
2191 PRINT_TO_SUPPORT
2192 TO_S_EXPRESSION_SUPPORT
2193
2194 bool UpdateType(CompileType new_type) {
2195 if (type_ == nullptr) {
2196 auto type = new CompileType(new_type);
2197 type->set_owner(this);
2198 set_type(type);
2199 return true;
2200 }
2201
2202 if (type_->IsNone() || !type_->IsEqualTo(&new_type)) {
2203 *type_ = new_type;
2204 return true;
2205 }
2206
2207 return false;
2208 }
2209
2210 bool HasUses() const {
2211 return (input_use_list_ != NULL) || (env_use_list_ != NULL);
2212 }
2213 bool HasOnlyUse(Value* use) const;
2214 bool HasOnlyInputUse(Value* use) const;
2215
2216 Value* input_use_list() const { return input_use_list_; }
2217 void set_input_use_list(Value* head) { input_use_list_ = head; }
2218
2219 Value* env_use_list() const { return env_use_list_; }
2220 void set_env_use_list(Value* head) { env_use_list_ = head; }
2221
2222 void AddInputUse(Value* value) { Value::AddToList(value, &input_use_list_); }
2223 void AddEnvUse(Value* value) { Value::AddToList(value, &env_use_list_); }
2224
2225 // Replace uses of this definition with uses of other definition or value.
2226 // Precondition: use lists must be properly calculated.
2227 // Postcondition: use lists and use values are still valid.
2228 void ReplaceUsesWith(Definition* other);
2229
2230 // Replace this definition with another instruction. Use the provided result
2231 // definition to replace uses of the original definition. If replacing during
2232 // iteration, pass the iterator so that the instruction can be replaced
2233 // without affecting iteration order, otherwise pass a NULL iterator.
2234 void ReplaceWithResult(Instruction* replacement,
2235 Definition* replacement_for_uses,
2236 ForwardInstructionIterator* iterator);
2237
2238 // Replace this definition and all uses with another definition. If
2239 // replacing during iteration, pass the iterator so that the instruction
2240 // can be replaced without affecting iteration order, otherwise pass a
2241 // NULL iterator.
2242 void ReplaceWith(Definition* other, ForwardInstructionIterator* iterator);
2243
2244 // A value in the constant propagation lattice.
2245 // - non-constant sentinel
2246 // - a constant (any non-sentinel value)
2247 // - unknown sentinel
2248 Object& constant_value();
2249
2250 virtual void InferRange(RangeAnalysis* analysis, Range* range);
2251
2252 Range* range() const { return range_; }
2253 void set_range(const Range&);
2254
2255 // Definitions can be canonicalized only into definitions to ensure
2256 // this check statically we override base Canonicalize with a Canonicalize
2257 // returning Definition (return type is covariant).
2258 virtual Definition* Canonicalize(FlowGraph* flow_graph);
2259
2260 static const intptr_t kReplacementMarker = -2;
2261
2262 Definition* Replacement() {
2263 if (ssa_temp_index_ == kReplacementMarker) {
2264 return reinterpret_cast<Definition*>(temp_index_);
2265 }
2266 return this;
2267 }
2268
2269 void SetReplacement(Definition* other) {
2270 ASSERT(ssa_temp_index_ >= 0);
2271 ASSERT(WasEliminated());
2272 ssa_temp_index_ = kReplacementMarker;
2273 temp_index_ = reinterpret_cast<intptr_t>(other);
2274 }
2275
2276 virtual AliasIdentity Identity() const { return AliasIdentity::Unknown(); }
2277
2278 virtual void SetIdentity(AliasIdentity identity) { UNREACHABLE(); }
2279
2280 // Find the original definition of [this] by following through any
2281 // redefinition and check instructions.
2282 Definition* OriginalDefinition();
2283
2284 // If this definition is a redefinition (in a broad sense, this includes
2285 // CheckArrayBound and CheckNull instructions) return [Value] corresponding
2286 // to the input which is being redefined.
2287 // Otherwise return [nullptr].
2288 virtual Value* RedefinedValue() const;
2289
2290 // Find the original definition of [this].
2291 //
2292 // This is an extension of [OriginalDefinition] which also follows through any
2293 // boxing/unboxing and constraint instructions.
2294 Definition* OriginalDefinitionIgnoreBoxingAndConstraints();
2295
2296 // Helper method to determine if definition denotes an array length.
2297 static bool IsArrayLength(Definition* def);
2298
2299 virtual Definition* AsDefinition() { return this; }
2300 virtual const Definition* AsDefinition() const { return this; }
2301
2302 protected:
2303 friend class RangeAnalysis;
2304 friend class Value;
2305 friend class FlowGraphSerializer; // To access type_ directly.
2306
2307 Range* range_ = nullptr;
2308
2309 void set_type(CompileType* type) {
2310 ASSERT(type->owner() == this);
2311 type_ = type;
2312 }
2313
2314#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
2315 const char* TypeAsCString() const {
2316 return HasType() ? type_->ToCString() : "";
2317 }
2318#endif
2319
2320 private:
2321 intptr_t temp_index_ = -1;
2322 intptr_t ssa_temp_index_ = -1;
2323 Value* input_use_list_ = nullptr;
2324 Value* env_use_list_ = nullptr;
2325
2326 Object* constant_value_ = nullptr;
2327 CompileType* type_ = nullptr;
2328
2329 DISALLOW_COPY_AND_ASSIGN(Definition);
2330};
2331
2332// Change a value's definition after use lists have been computed.
2333inline void Value::BindTo(Definition* def) {
2334 RemoveFromUseList();
2335 set_definition(def);
2336 def->AddInputUse(this);
2337}
2338
2339inline void Value::BindToEnvironment(Definition* def) {
2340 RemoveFromUseList();
2341 set_definition(def);
2342 def->AddEnvUse(this);
2343}
2344
2345class PureDefinition : public Definition {
2346 public:
2347 explicit PureDefinition(intptr_t deopt_id) : Definition(deopt_id) {}
2348
2349 virtual bool AllowsCSE() const { return true; }
2350 virtual bool HasUnknownSideEffects() const { return false; }
2351};
2352
2353template <intptr_t N,
2354 typename ThrowsTrait,
2355 template <typename Impure, typename Pure> class CSETrait = NoCSE>
2356class TemplateDefinition : public CSETrait<Definition, PureDefinition>::Base {
2357 public:
2358 explicit TemplateDefinition(intptr_t deopt_id = DeoptId::kNone)
2359 : CSETrait<Definition, PureDefinition>::Base(deopt_id), inputs_() {}
2360
2361 virtual intptr_t InputCount() const { return N; }
2362 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
2363
2364 virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
2365
2366 protected:
2367 EmbeddedArray<Value*, N> inputs_;
2368
2369 private:
2370 friend class BranchInstr;
2371 friend class IfThenElseInstr;
2372
2373 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
2374};
2375
2376class PhiInstr : public Definition {
2377 public:
2378 PhiInstr(JoinEntryInstr* block, intptr_t num_inputs)
2379 : block_(block),
2380 inputs_(num_inputs),
2381 representation_(kTagged),
2382 reaching_defs_(NULL),
2383 is_alive_(false),
2384 is_receiver_(kUnknownReceiver) {
2385 for (intptr_t i = 0; i < num_inputs; ++i) {
2386 inputs_.Add(NULL);
2387 }
2388 }
2389
2390 // Get the block entry for that instruction.
2391 virtual BlockEntryInstr* GetBlock() { return block(); }
2392 JoinEntryInstr* block() const { return block_; }
2393
2394 virtual CompileType ComputeType() const;
2395 virtual bool RecomputeType();
2396
2397 intptr_t InputCount() const { return inputs_.length(); }
2398
2399 Value* InputAt(intptr_t i) const { return inputs_[i]; }
2400
2401 virtual bool ComputeCanDeoptimize() const { return false; }
2402
2403 virtual bool HasUnknownSideEffects() const { return false; }
2404
2405 // Phi is alive if it reaches a non-environment use.
2406 bool is_alive() const { return is_alive_; }
2407 void mark_alive() { is_alive_ = true; }
2408 void mark_dead() { is_alive_ = false; }
2409
2410 virtual Representation RequiredInputRepresentation(intptr_t i) const {
2411 return representation_;
2412 }
2413
2414 virtual Representation representation() const { return representation_; }
2415
2416 virtual void set_representation(Representation r) { representation_ = r; }
2417
2418 // In AOT mode Phi instructions do not check types of inputs when unboxing.
2419 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
2420 return CompilerState::Current().is_aot() ? kNotSpeculative : kGuardInputs;
2421 }
2422
2423 virtual intptr_t Hashcode() const {
2424 UNREACHABLE();
2425 return 0;
2426 }
2427
2428 DECLARE_INSTRUCTION(Phi)
2429
2430 virtual void InferRange(RangeAnalysis* analysis, Range* range);
2431
2432 BitVector* reaching_defs() const { return reaching_defs_; }
2433
2434 void set_reaching_defs(BitVector* reaching_defs) {
2435 reaching_defs_ = reaching_defs;
2436 }
2437
2438 virtual bool MayThrow() const { return false; }
2439
2440 // A phi is redundant if all input operands are the same.
2441 bool IsRedundant() const;
2442
2443 // A phi is redundant if all input operands are redefinitions of the same
2444 // value. Returns the replacement for this phi if it is redundant.
2445 // The replacement is selected among values redefined by inputs.
2446 Definition* GetReplacementForRedundantPhi() const;
2447
2448 virtual Definition* Canonicalize(FlowGraph* flow_graph);
2449
2450 PRINT_TO_SUPPORT
2451
2452 enum ReceiverType { kUnknownReceiver = -1, kNotReceiver = 0, kReceiver = 1 };
2453
2454 ReceiverType is_receiver() const {
2455 return static_cast<ReceiverType>(is_receiver_);
2456 }
2457
2458 void set_is_receiver(ReceiverType is_receiver) { is_receiver_ = is_receiver; }
2459
2460 private:
2461 // Direct access to inputs_ in order to resize it due to unreachable
2462 // predecessors.
2463 friend class ConstantPropagator;
2464
2465 void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
2466
2467 JoinEntryInstr* block_;
2468 GrowableArray<Value*> inputs_;
2469 Representation representation_;
2470 BitVector* reaching_defs_;
2471 bool is_alive_;
2472 int8_t is_receiver_;
2473
2474 DISALLOW_COPY_AND_ASSIGN(PhiInstr);
2475};
2476
2477// This instruction represents an incomming parameter for a function entry,
2478// or incoming value for OSR entry or incomming value for a catch entry.
2479// Value [index] always denotes the position of the parameter. When [base_reg]
2480// is set to FPREG, value [index] corresponds to environment variable index
2481// (0 is the very first parameter, 1 is next and so on). When [base_reg] is
2482// set to SPREG, value [index] needs to be reversed (0 is the very last
2483// parameter, 1 is next and so on) to get the sp relative position.
2484class ParameterInstr : public Definition {
2485 public:
2486 ParameterInstr(intptr_t index,
2487 intptr_t param_offset,
2488 BlockEntryInstr* block,
2489 Representation representation,
2490 Register base_reg = FPREG)
2491 : index_(index),
2492 param_offset_(param_offset),
2493 base_reg_(base_reg),
2494 representation_(representation),
2495 block_(block) {}
2496
2497 DECLARE_INSTRUCTION(Parameter)
2498
2499 intptr_t index() const { return index_; }
2500 intptr_t param_offset() const { return param_offset_; }
2501 Register base_reg() const { return base_reg_; }
2502
2503 // Get the block entry for that instruction.
2504 virtual BlockEntryInstr* GetBlock() { return block_; }
2505 void set_block(BlockEntryInstr* block) { block_ = block; }
2506
2507 intptr_t InputCount() const { return 0; }
2508 Value* InputAt(intptr_t i) const {
2509 UNREACHABLE();
2510 return NULL;
2511 }
2512
2513 virtual Representation representation() const { return representation_; }
2514
2515 virtual Representation RequiredInputRepresentation(intptr_t index) const {
2516 ASSERT(index == 0);
2517 return representation();
2518 }
2519
2520 virtual bool ComputeCanDeoptimize() const { return false; }
2521
2522 virtual bool HasUnknownSideEffects() const { return false; }
2523
2524 virtual intptr_t Hashcode() const {
2525 UNREACHABLE();
2526 return 0;
2527 }
2528
2529 virtual CompileType ComputeType() const;
2530
2531 virtual bool MayThrow() const { return false; }
2532
2533 PRINT_OPERANDS_TO_SUPPORT
2534 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
2535
2536 private:
2537 virtual void RawSetInputAt(intptr_t i, Value* value) { UNREACHABLE(); }
2538
2539 const intptr_t index_;
2540
2541 // The offset (in words) of the last slot of the parameter, relative
2542 // to the first parameter.
2543 // It is used in the FlowGraphAllocator when it sets the assigned location
2544 // and spill slot for the parameter definition.
2545 const intptr_t param_offset_;
2546 const Register base_reg_;
2547 const Representation representation_;
2548 BlockEntryInstr* block_;
2549
2550 DISALLOW_COPY_AND_ASSIGN(ParameterInstr);
2551};
2552
2553// Native parameters are not treated as initial definitions because they cannot
2554// be inlined and are only usable in optimized code. The location must be a
2555// stack location relative to the position of the stack (SPREG) after
2556// register-based arguments have been saved on entry to a native call. See
2557// NativeEntryInstr::EmitNativeCode for more details.
2558//
2559// TOOD(33549): Unify with ParameterInstr.
2560class NativeParameterInstr : public Definition {
2561 public:
2562 NativeParameterInstr(const compiler::ffi::CallbackMarshaller& marshaller,
2563 intptr_t index)
2564 : marshaller_(marshaller), index_(index) {
2565 const auto& loc = marshaller.NativeLocationOfNativeParameter(index_);
2566 ASSERT(loc.IsStack() && loc.AsStack().base_register() == SPREG);
2567 }
2568
2569 DECLARE_INSTRUCTION(NativeParameter)
2570
2571 virtual Representation representation() const {
2572 return marshaller_.RepInFfiCall(index_);
2573 }
2574
2575 intptr_t InputCount() const { return 0; }
2576 Value* InputAt(intptr_t i) const {
2577 UNREACHABLE();
2578 return NULL;
2579 }
2580
2581 virtual bool ComputeCanDeoptimize() const { return false; }
2582
2583 virtual bool HasUnknownSideEffects() const { return false; }
2584
2585 // TODO(sjindel): We can make this more precise.
2586 virtual CompileType ComputeType() const { return CompileType::Dynamic(); }
2587
2588 virtual bool MayThrow() const { return false; }
2589
2590 PRINT_OPERANDS_TO_SUPPORT
2591
2592 private:
2593 virtual void RawSetInputAt(intptr_t i, Value* value) { UNREACHABLE(); }
2594
2595 const compiler::ffi::CallbackMarshaller& marshaller_;
2596 const intptr_t index_;
2597
2598 DISALLOW_COPY_AND_ASSIGN(NativeParameterInstr);
2599};
2600
2601// Stores a tagged pointer to a slot accessible from a fixed register. It has
2602// the form:
2603//
2604// base_reg[index + #constant] = value
2605//
2606// Input 0: A tagged Smi [index]
2607// Input 1: A tagged pointer [value]
2608// offset: A signed constant offset which fits into 8 bits
2609//
2610// Currently this instruction uses pinpoints the register to be FP.
2611//
2612// This low-level instruction is non-inlinable since it makes assumptions about
2613// the frame. This is asserted via `inliner.cc::CalleeGraphValidator`.
2614class StoreIndexedUnsafeInstr : public TemplateInstruction<2, NoThrow> {
2615 public:
2616 StoreIndexedUnsafeInstr(Value* index, Value* value, intptr_t offset)
2617 : offset_(offset) {
2618 SetInputAt(kIndexPos, index);
2619 SetInputAt(kValuePos, value);
2620 }
2621
2622 enum { kIndexPos = 0, kValuePos = 1 };
2623
2624 DECLARE_INSTRUCTION(StoreIndexedUnsafe)
2625
2626 virtual Representation RequiredInputRepresentation(intptr_t index) const {
2627 ASSERT(index == kIndexPos || index == kValuePos);
2628 return kTagged;
2629 }
2630 virtual bool ComputeCanDeoptimize() const { return false; }
2631 virtual bool HasUnknownSideEffects() const { return false; }
2632
2633 virtual bool AttributesEqual(Instruction* other) const {
2634 return other->AsStoreIndexedUnsafe()->offset() == offset();
2635 }
2636
2637 Value* index() const { return inputs_[kIndexPos]; }
2638 Value* value() const { return inputs_[kValuePos]; }
2639 Register base_reg() const { return FPREG; }
2640 intptr_t offset() const { return offset_; }
2641
2642 PRINT_OPERANDS_TO_SUPPORT
2643 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
2644
2645 private:
2646 const intptr_t offset_;
2647
2648 DISALLOW_COPY_AND_ASSIGN(StoreIndexedUnsafeInstr);
2649};
2650
2651// Loads a value from slot accessable from a fixed register. It has
2652// the form:
2653//
2654// base_reg[index + #constant]
2655//
2656// Input 0: A tagged Smi [index]
2657// offset: A signed constant offset which fits into 8 bits
2658//
2659// Currently this instruction uses pinpoints the register to be FP.
2660//
2661// This lowlevel instruction is non-inlinable since it makes assumptons about
2662// the frame. This is asserted via `inliner.cc::CalleeGraphValidator`.
2663class LoadIndexedUnsafeInstr : public TemplateDefinition<1, NoThrow> {
2664 public:
2665 LoadIndexedUnsafeInstr(Value* index,
2666 intptr_t offset,
2667 CompileType result_type,
2668 Representation representation = kTagged)
2669 : offset_(offset), representation_(representation) {
2670 UpdateType(result_type);
2671 SetInputAt(0, index);
2672 }
2673
2674 DECLARE_INSTRUCTION(LoadIndexedUnsafe)
2675
2676 virtual Representation RequiredInputRepresentation(intptr_t index) const {
2677 ASSERT(index == 0);
2678 return kTagged;
2679 }
2680 virtual bool ComputeCanDeoptimize() const { return false; }
2681 virtual bool HasUnknownSideEffects() const { return false; }
2682
2683 virtual bool AttributesEqual(Instruction* other) const {
2684 return other->AsLoadIndexedUnsafe()->offset() == offset();
2685 }
2686
2687 virtual Representation representation() const { return representation_; }
2688
2689 Value* index() const { return InputAt(0); }
2690 Register base_reg() const { return FPREG; }
2691 intptr_t offset() const { return offset_; }
2692
2693 PRINT_OPERANDS_TO_SUPPORT
2694 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
2695
2696 private:
2697 const intptr_t offset_;
2698 const Representation representation_;
2699
2700 DISALLOW_COPY_AND_ASSIGN(LoadIndexedUnsafeInstr);
2701};
2702
2703class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
2704 public:
2705 MemoryCopyInstr(Value* src,
2706 Value* dest,
2707 Value* src_start,
2708 Value* dest_start,
2709 Value* length,
2710 classid_t src_cid,
2711 classid_t dest_cid)
2712 : src_cid_(src_cid),
2713 dest_cid_(dest_cid),
2714 element_size_(Instance::ElementSizeFor(src_cid)) {
2715 ASSERT(IsArrayTypeSupported(src_cid));
2716 ASSERT(IsArrayTypeSupported(dest_cid));
2717 ASSERT(Instance::ElementSizeFor(src_cid) ==
2718 Instance::ElementSizeFor(dest_cid));
2719 SetInputAt(kSrcPos, src);
2720 SetInputAt(kDestPos, dest);
2721 SetInputAt(kSrcStartPos, src_start);
2722 SetInputAt(kDestStartPos, dest_start);
2723 SetInputAt(kLengthPos, length);
2724 }
2725
2726 enum {
2727 kSrcPos = 0,
2728 kDestPos = 1,
2729 kSrcStartPos = 2,
2730 kDestStartPos = 3,
2731 kLengthPos = 4
2732 };
2733
2734 DECLARE_INSTRUCTION(MemoryCopy)
2735
2736 virtual Representation RequiredInputRepresentation(intptr_t index) const {
2737 // All inputs are tagged (for now).
2738 return kTagged;
2739 }
2740
2741 virtual bool ComputeCanDeoptimize() const { return false; }
2742 virtual bool HasUnknownSideEffects() const { return true; }
2743
2744 virtual bool AttributesEqual(Instruction* other) const { return true; }
2745
2746 Value* src() const { return inputs_[kSrcPos]; }
2747 Value* dest() const { return inputs_[kDestPos]; }
2748 Value* src_start() const { return inputs_[kSrcStartPos]; }
2749 Value* dest_start() const { return inputs_[kDestStartPos]; }
2750 Value* length() const { return inputs_[kLengthPos]; }
2751
2752 private:
2753 // Set array_reg to point to the index indicated by start (contained in
2754 // start_reg) of the typed data or string in array (contained in array_reg).
2755 void EmitComputeStartPointer(FlowGraphCompiler* compiler,
2756 classid_t array_cid,
2757 Value* start,
2758 Register array_reg,
2759 Register start_reg);
2760
2761 static bool IsArrayTypeSupported(classid_t array_cid) {
2762 if (IsTypedDataBaseClassId(array_cid)) {
2763 return true;
2764 }
2765 switch (array_cid) {
2766 case kOneByteStringCid:
2767 case kTwoByteStringCid:
2768 case kExternalOneByteStringCid:
2769 case kExternalTwoByteStringCid:
2770 return true;
2771 default:
2772 return false;
2773 }
2774 }
2775
2776 classid_t src_cid_;
2777 classid_t dest_cid_;
2778 intptr_t element_size_;
2779
2780 DISALLOW_COPY_AND_ASSIGN(MemoryCopyInstr);
2781};
2782
2783// Unwinds the current frame and tail calls a target.
2784//
2785// The return address saved by the original caller of this frame will be in it's
2786// usual location (stack or LR). The arguments descriptor supplied by the
2787// original caller will be put into ARGS_DESC_REG.
2788//
2789// This lowlevel instruction is non-inlinable since it makes assumptons about
2790// the frame. This is asserted via `inliner.cc::CalleeGraphValidator`.
2791class TailCallInstr : public Instruction {
2792 public:
2793 TailCallInstr(const Code& code, Value* arg_desc)
2794 : code_(code), arg_desc_(NULL) {
2795 SetInputAt(0, arg_desc);
2796 }
2797
2798 DECLARE_INSTRUCTION(TailCall)
2799
2800 const Code& code() const { return code_; }
2801
2802 virtual intptr_t InputCount() const { return 1; }
2803 virtual Value* InputAt(intptr_t i) const {
2804 ASSERT(i == 0);
2805 return arg_desc_;
2806 }
2807 virtual void RawSetInputAt(intptr_t i, Value* value) {
2808 ASSERT(i == 0);
2809 arg_desc_ = value;
2810 }
2811
2812 // Two tailcalls can be canonicalized into one instruction if both have the
2813 // same destination.
2814 virtual bool AllowsCSE() const { return true; }
2815 virtual bool AttributesEqual(Instruction* other) const {
2816 return &other->AsTailCall()->code() == &code();
2817 }
2818
2819 // Since no code after this instruction will be executed, there will be no
2820 // side-effects for the following code.
2821 virtual bool HasUnknownSideEffects() const { return false; }
2822 virtual bool MayThrow() const { return true; }
2823 virtual bool ComputeCanDeoptimize() const { return false; }
2824
2825 PRINT_OPERANDS_TO_SUPPORT
2826 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
2827
2828 private:
2829 const Code& code_;
2830 Value* arg_desc_;
2831};
2832
2833class PushArgumentInstr : public TemplateDefinition<1, NoThrow> {
2834 public:
2835 explicit PushArgumentInstr(Value* value, Representation representation)
2836 : representation_(representation) {
2837 SetInputAt(0, value);
2838 }
2839
2840 DECLARE_INSTRUCTION(PushArgument)
2841
2842 virtual CompileType ComputeType() const;
2843
2844 Value* value() const { return InputAt(0); }
2845
2846 virtual bool ComputeCanDeoptimize() const { return false; }
2847
2848 virtual bool HasUnknownSideEffects() const { return false; }
2849
2850 virtual TokenPosition token_pos() const {
2851 return TokenPosition::kPushArgument;
2852 }
2853
2854 virtual Representation representation() const { return representation_; }
2855
2856 virtual Representation RequiredInputRepresentation(intptr_t index) const {
2857 ASSERT(index == 0);
2858 return representation();
2859 }
2860
2861 PRINT_OPERANDS_TO_SUPPORT
2862
2863 private:
2864 const Representation representation_;
2865
2866 DISALLOW_COPY_AND_ASSIGN(PushArgumentInstr);
2867};
2868
2869inline Value* Instruction::ArgumentValueAt(intptr_t index) const {
2870 PushArgumentsArray* push_arguments = GetPushArguments();
2871 return push_arguments != nullptr ? (*push_arguments)[index]->value()
2872 : InputAt(index);
2873}
2874
2875inline Definition* Instruction::ArgumentAt(intptr_t index) const {
2876 return ArgumentValueAt(index)->definition();
2877}
2878
2879class ReturnInstr : public TemplateInstruction<1, NoThrow> {
2880 public:
2881 // The [yield_index], if provided, will cause the instruction to emit extra
2882 // yield_index -> pc offset into the [PcDescriptors].
2883 ReturnInstr(TokenPosition token_pos,
2884 Value* value,
2885 intptr_t deopt_id,
2886 intptr_t yield_index = PcDescriptorsLayout::kInvalidYieldIndex,
2887 Representation representation = kTagged)
2888 : TemplateInstruction(deopt_id),
2889 token_pos_(token_pos),
2890 yield_index_(yield_index),
2891 representation_(representation) {
2892 SetInputAt(0, value);
2893 }
2894
2895 DECLARE_INSTRUCTION(Return)
2896
2897 virtual TokenPosition token_pos() const { return token_pos_; }
2898 Value* value() const { return inputs_[0]; }
2899 intptr_t yield_index() const { return yield_index_; }
2900
2901 virtual bool CanBecomeDeoptimizationTarget() const {
2902 // Return instruction might turn into a Goto instruction after inlining.
2903 // Every Goto must have an environment.
2904 return true;
2905 }
2906
2907 virtual bool ComputeCanDeoptimize() const { return false; }
2908
2909 virtual bool HasUnknownSideEffects() const { return false; }
2910
2911 virtual bool AttributesEqual(Instruction* other) const {
2912 auto other_return = other->AsReturn();
2913 return token_pos() == other_return->token_pos() &&
2914 yield_index() == other_return->yield_index();
2915 }
2916
2917 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
2918 ASSERT(index == 0);
2919 return kNotSpeculative;
2920 }
2921
2922 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
2923
2924 virtual Representation representation() const { return representation_; }
2925
2926 virtual Representation RequiredInputRepresentation(intptr_t index) const {
2927 ASSERT(index == 0);
2928 return representation_;
2929 }
2930
2931 PRINT_OPERANDS_TO_SUPPORT
2932
2933 private:
2934 const TokenPosition token_pos_;
2935 const intptr_t yield_index_;
2936 const Representation representation_;
2937
2938 DISALLOW_COPY_AND_ASSIGN(ReturnInstr);
2939};
2940
2941// Represents a return from a Dart function into native code.
2942class NativeReturnInstr : public ReturnInstr {
2943 public:
2944 NativeReturnInstr(TokenPosition token_pos,
2945 Value* value,
2946 const compiler::ffi::CallbackMarshaller& marshaller,
2947 intptr_t deopt_id)
2948 : ReturnInstr(token_pos, value, deopt_id), marshaller_(marshaller) {}
2949
2950 DECLARE_INSTRUCTION(NativeReturn)
2951
2952 PRINT_OPERANDS_TO_SUPPORT
2953
2954 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
2955 ASSERT(idx == 0);
2956 return marshaller_.RepInFfiCall(compiler::ffi::kResultIndex);
2957 }
2958
2959 virtual bool CanBecomeDeoptimizationTarget() const {
2960 // Unlike ReturnInstr, NativeReturnInstr cannot be inlined (because it's
2961 // returning into native code).
2962 return false;
2963 }
2964
2965 private:
2966 const compiler::ffi::CallbackMarshaller& marshaller_;
2967
2968 void EmitReturnMoves(FlowGraphCompiler* compiler);
2969
2970 DISALLOW_COPY_AND_ASSIGN(NativeReturnInstr);
2971};
2972
2973class ThrowInstr : public TemplateInstruction<1, Throws> {
2974 public:
2975 explicit ThrowInstr(TokenPosition token_pos,
2976 intptr_t deopt_id,
2977 Value* exception)
2978 : TemplateInstruction(deopt_id), token_pos_(token_pos) {
2979 SetInputAt(0, exception);
2980 }
2981
2982 DECLARE_INSTRUCTION(Throw)
2983
2984 virtual TokenPosition token_pos() const { return token_pos_; }
2985 Value* exception() const { return inputs_[0]; }
2986
2987 virtual bool ComputeCanDeoptimize() const {
2988 return !CompilerState::Current().is_aot();
2989 }
2990
2991 virtual bool HasUnknownSideEffects() const { return false; }
2992
2993 private:
2994 const TokenPosition token_pos_;
2995
2996 DISALLOW_COPY_AND_ASSIGN(ThrowInstr);
2997};
2998
2999class ReThrowInstr : public TemplateInstruction<2, Throws> {
3000 public:
3001 // 'catch_try_index' can be kInvalidTryIndex if the
3002 // rethrow has been artificially generated by the parser.
3003 ReThrowInstr(TokenPosition token_pos,
3004 intptr_t catch_try_index,
3005 intptr_t deopt_id,
3006 Value* exception,
3007 Value* stacktrace)
3008 : TemplateInstruction(deopt_id),
3009 token_pos_(token_pos),
3010 catch_try_index_(catch_try_index) {
3011 SetInputAt(0, exception);
3012 SetInputAt(1, stacktrace);
3013 }
3014
3015 DECLARE_INSTRUCTION(ReThrow)
3016
3017 virtual TokenPosition token_pos() const { return token_pos_; }
3018 intptr_t catch_try_index() const { return catch_try_index_; }
3019 Value* exception() const { return inputs_[0]; }
3020 Value* stacktrace() const { return inputs_[1]; }
3021
3022 virtual bool ComputeCanDeoptimize() const {
3023 return !CompilerState::Current().is_aot();
3024 }
3025
3026 virtual bool HasUnknownSideEffects() const { return false; }
3027
3028 private:
3029 const TokenPosition token_pos_;
3030 const intptr_t catch_try_index_;
3031
3032 DISALLOW_COPY_AND_ASSIGN(ReThrowInstr);
3033};
3034
3035class StopInstr : public TemplateInstruction<0, NoThrow> {
3036 public:
3037 explicit StopInstr(const char* message) : message_(message) {
3038 ASSERT(message != NULL);
3039 }
3040
3041 const char* message() const { return message_; }
3042
3043 DECLARE_INSTRUCTION(Stop);
3044
3045 virtual bool ComputeCanDeoptimize() const { return false; }
3046
3047 virtual bool HasUnknownSideEffects() const { return false; }
3048
3049 private:
3050 const char* message_;
3051
3052 DISALLOW_COPY_AND_ASSIGN(StopInstr);
3053};
3054
3055class GotoInstr : public TemplateInstruction<0, NoThrow> {
3056 public:
3057 explicit GotoInstr(JoinEntryInstr* entry, intptr_t deopt_id)
3058 : TemplateInstruction(deopt_id),
3059 block_(NULL),
3060 successor_(entry),
3061 edge_weight_(0.0),
3062 parallel_move_(NULL) {}
3063
3064 DECLARE_INSTRUCTION(Goto)
3065
3066 BlockEntryInstr* block() const { return block_; }
3067 void set_block(BlockEntryInstr* block) { block_ = block; }
3068
3069 JoinEntryInstr* successor() const { return successor_; }
3070 void set_successor(JoinEntryInstr* successor) { successor_ = successor; }
3071 virtual intptr_t SuccessorCount() const;
3072 virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
3073
3074 double edge_weight() const { return edge_weight_; }
3075 void set_edge_weight(double weight) { edge_weight_ = weight; }
3076 void adjust_edge_weight(double scale_factor) { edge_weight_ *= scale_factor; }
3077
3078 virtual bool CanBecomeDeoptimizationTarget() const {
3079 // Goto instruction can be used as a deoptimization target when LICM
3080 // hoists instructions out of the loop.
3081 return true;
3082 }
3083
3084 virtual bool ComputeCanDeoptimize() const { return false; }
3085
3086 virtual bool HasUnknownSideEffects() const { return false; }
3087
3088 ParallelMoveInstr* parallel_move() const { return parallel_move_; }
3089
3090 bool HasParallelMove() const { return parallel_move_ != NULL; }
3091
3092 bool HasNonRedundantParallelMove() const {
3093 return HasParallelMove() && !parallel_move()->IsRedundant();
3094 }
3095
3096 ParallelMoveInstr* GetParallelMove() {
3097 if (parallel_move_ == NULL) {
3098 parallel_move_ = new ParallelMoveInstr();
3099 }
3100 return parallel_move_;
3101 }
3102
3103 virtual TokenPosition token_pos() const {
3104 return TokenPosition::kControlFlow;
3105 }
3106
3107 PRINT_TO_SUPPORT
3108 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
3109
3110 private:
3111 BlockEntryInstr* block_;
3112 JoinEntryInstr* successor_;
3113 double edge_weight_;
3114
3115 // Parallel move that will be used by linear scan register allocator to
3116 // connect live ranges at the end of the block and resolve phis.
3117 ParallelMoveInstr* parallel_move_;
3118};
3119
3120// IndirectGotoInstr represents a dynamically computed jump. Only
3121// IndirectEntryInstr targets are valid targets of an indirect goto. The
3122// concrete target to jump to is given as a parameter to the indirect goto.
3123//
3124// In order to preserve split-edge form, an indirect goto does not itself point
3125// to its targets. Instead, for each possible target, the successors_ field
3126// will contain an ordinary goto instruction that jumps to the target.
3127// TODO(zerny): Implement direct support instead of embedding gotos.
3128//
3129// Byte offsets of all possible targets are stored in the offsets_ array. The
3130// desired offset is looked up while the generated code is executing, and passed
3131// to IndirectGoto as an input.
3132class IndirectGotoInstr : public TemplateInstruction<1, NoThrow> {
3133 public:
3134 IndirectGotoInstr(const TypedData* offsets, Value* offset_from_start)
3135 : offsets_(*offsets) {
3136 SetInputAt(0, offset_from_start);
3137 }
3138
3139 DECLARE_INSTRUCTION(IndirectGoto)
3140
3141 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
3142 ASSERT(idx == 0);
3143 return kNoRepresentation;
3144 }
3145
3146 void AddSuccessor(TargetEntryInstr* successor) {
3147 ASSERT(successor->next()->IsGoto());
3148 ASSERT(successor->next()->AsGoto()->successor()->IsIndirectEntry());
3149 successors_.Add(successor);
3150 }
3151
3152 virtual intptr_t SuccessorCount() const { return successors_.length(); }
3153 virtual TargetEntryInstr* SuccessorAt(intptr_t index) const {
3154 ASSERT(index < SuccessorCount());
3155 return successors_[index];
3156 }
3157
3158 virtual bool ComputeCanDeoptimize() const { return false; }
3159 virtual bool CanBecomeDeoptimizationTarget() const { return false; }
3160
3161 virtual bool HasUnknownSideEffects() const { return false; }
3162
3163 Value* offset() const { return inputs_[0]; }
3164 void ComputeOffsetTable(FlowGraphCompiler* compiler);
3165
3166 PRINT_TO_SUPPORT
3167
3168 private:
3169 GrowableArray<TargetEntryInstr*> successors_;
3170 const TypedData& offsets_;
3171};
3172
3173class ComparisonInstr : public Definition {
3174 public:
3175 Value* left() const { return InputAt(0); }
3176 Value* right() const { return InputAt(1); }
3177
3178 virtual TokenPosition token_pos() const { return token_pos_; }
3179 Token::Kind kind() const { return kind_; }
3180
3181 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right) = 0;
3182
3183 // Emits instructions to do the comparison and branch to the true or false
3184 // label depending on the result. This implementation will call
3185 // EmitComparisonCode and then generate the branch instructions afterwards.
3186 virtual void EmitBranchCode(FlowGraphCompiler* compiler, BranchInstr* branch);
3187
3188 // Used by EmitBranchCode and EmitNativeCode depending on whether the boolean
3189 // is to be turned into branches or instantiated. May return a valid
3190 // condition in which case the caller is expected to emit a branch to the
3191 // true label based on that condition (or a branch to the false label on the
3192 // opposite condition). May also branch directly to the labels.
3193 virtual Condition EmitComparisonCode(FlowGraphCompiler* compiler,
3194 BranchLabels labels) = 0;
3195
3196 // Emits code that generates 'true' or 'false', depending on the comparison.
3197 // This implementation will call EmitComparisonCode. If EmitComparisonCode
3198 // does not use the labels (merely returning a condition) then EmitNativeCode
3199 // may be able to use the condition to avoid a branch.
3200 virtual void EmitNativeCode(FlowGraphCompiler* compiler);
3201
3202 void SetDeoptId(const Instruction& instr) { CopyDeoptIdFrom(instr); }
3203
3204 // Operation class id is computed from collected ICData.
3205 void set_operation_cid(intptr_t value) { operation_cid_ = value; }
3206 intptr_t operation_cid() const { return operation_cid_; }
3207
3208 virtual void NegateComparison() { kind_ = Token::NegateComparison(kind_); }
3209
3210 virtual bool CanBecomeDeoptimizationTarget() const { return true; }
3211 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
3212
3213 virtual bool AttributesEqual(Instruction* other) const {
3214 ComparisonInstr* other_comparison = other->AsComparison();
3215 return kind() == other_comparison->kind() &&
3216 (operation_cid() == other_comparison->operation_cid());
3217 }
3218
3219 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
3220
3221 DEFINE_INSTRUCTION_TYPE_CHECK(Comparison)
3222
3223 protected:
3224 ComparisonInstr(TokenPosition token_pos,
3225 Token::Kind kind,
3226 intptr_t deopt_id = DeoptId::kNone)
3227 : Definition(deopt_id),
3228 token_pos_(token_pos),
3229 kind_(kind),
3230 operation_cid_(kIllegalCid) {}
3231
3232 private:
3233 const TokenPosition token_pos_;
3234 Token::Kind kind_;
3235 intptr_t operation_cid_; // Set by optimizer.
3236
3237 DISALLOW_COPY_AND_ASSIGN(ComparisonInstr);
3238};
3239
3240class PureComparison : public ComparisonInstr {
3241 public:
3242 virtual bool AllowsCSE() const { return true; }
3243 virtual bool HasUnknownSideEffects() const { return false; }
3244
3245 protected:
3246 PureComparison(TokenPosition token_pos, Token::Kind kind, intptr_t deopt_id)
3247 : ComparisonInstr(token_pos, kind, deopt_id) {}
3248};
3249
3250template <intptr_t N,
3251 typename ThrowsTrait,
3252 template <typename Impure, typename Pure> class CSETrait = NoCSE>
3253class TemplateComparison
3254 : public CSETrait<ComparisonInstr, PureComparison>::Base {
3255 public:
3256 TemplateComparison(TokenPosition token_pos,
3257 Token::Kind kind,
3258 intptr_t deopt_id = DeoptId::kNone)
3259 : CSETrait<ComparisonInstr, PureComparison>::Base(token_pos,
3260 kind,
3261 deopt_id),
3262 inputs_() {}
3263
3264 virtual intptr_t InputCount() const { return N; }
3265 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
3266
3267 virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
3268
3269 protected:
3270 EmbeddedArray<Value*, N> inputs_;
3271
3272 private:
3273 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
3274};
3275
3276class BranchInstr : public Instruction {
3277 public:
3278 explicit BranchInstr(ComparisonInstr* comparison, intptr_t deopt_id)
3279 : Instruction(deopt_id), comparison_(comparison), constant_target_(NULL) {
3280 ASSERT(comparison->env() == NULL);
3281 for (intptr_t i = comparison->InputCount() - 1; i >= 0; --i) {
3282 comparison->InputAt(i)->set_instruction(this);
3283 }
3284 }
3285
3286 DECLARE_INSTRUCTION(Branch)
3287
3288 virtual intptr_t ArgumentCount() const {
3289 return comparison()->ArgumentCount();
3290 }
3291 virtual void SetPushArguments(PushArgumentsArray* push_arguments) {
3292 comparison()->SetPushArguments(push_arguments);
3293 }
3294 virtual PushArgumentsArray* GetPushArguments() const {
3295 return comparison()->GetPushArguments();
3296 }
3297
3298 intptr_t InputCount() const { return comparison()->InputCount(); }
3299
3300 Value* InputAt(intptr_t i) const { return comparison()->InputAt(i); }
3301
3302 virtual TokenPosition token_pos() const { return comparison_->token_pos(); }
3303
3304 virtual bool ComputeCanDeoptimize() const {
3305 return comparison()->ComputeCanDeoptimize();
3306 }
3307
3308 virtual bool CanBecomeDeoptimizationTarget() const {
3309 return comparison()->CanBecomeDeoptimizationTarget();
3310 }
3311
3312 virtual bool HasUnknownSideEffects() const {
3313 return comparison()->HasUnknownSideEffects();
3314 }
3315
3316 virtual bool CanCallDart() const { return comparison()->CanCallDart(); }
3317
3318 ComparisonInstr* comparison() const { return comparison_; }
3319 void SetComparison(ComparisonInstr* comp);
3320
3321 virtual intptr_t DeoptimizationTarget() const {
3322 return comparison()->DeoptimizationTarget();
3323 }
3324
3325 virtual Representation RequiredInputRepresentation(intptr_t i) const {
3326 return comparison()->RequiredInputRepresentation(i);
3327 }
3328
3329 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
3330
3331 void set_constant_target(TargetEntryInstr* target) {
3332 ASSERT(target == true_successor() || target == false_successor());
3333 constant_target_ = target;
3334 }
3335 TargetEntryInstr* constant_target() const { return constant_target_; }
3336
3337 virtual void InheritDeoptTarget(Zone* zone, Instruction* other);
3338
3339 virtual bool MayThrow() const { return comparison()->MayThrow(); }
3340
3341 TargetEntryInstr* true_successor() const { return true_successor_; }
3342 TargetEntryInstr* false_successor() const { return false_successor_; }
3343
3344 TargetEntryInstr** true_successor_address() { return &true_successor_; }
3345 TargetEntryInstr** false_successor_address() { return &false_successor_; }
3346
3347 virtual intptr_t SuccessorCount() const;
3348 virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
3349
3350 PRINT_TO_SUPPORT
3351 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
3352
3353 private:
3354 virtual void RawSetInputAt(intptr_t i, Value* value) {
3355 comparison()->RawSetInputAt(i, value);
3356 }
3357
3358 TargetEntryInstr* true_successor_;
3359 TargetEntryInstr* false_successor_;
3360 ComparisonInstr* comparison_;
3361 TargetEntryInstr* constant_target_;
3362
3363 DISALLOW_COPY_AND_ASSIGN(BranchInstr);
3364};
3365
3366class DeoptimizeInstr : public TemplateInstruction<0, NoThrow, Pure> {
3367 public:
3368 DeoptimizeInstr(ICData::DeoptReasonId deopt_reason, intptr_t deopt_id)
3369 : TemplateInstruction(deopt_id), deopt_reason_(deopt_reason) {}
3370
3371 virtual bool ComputeCanDeoptimize() const { return true; }
3372
3373 virtual bool AttributesEqual(Instruction* other) const { return true; }
3374
3375 DECLARE_INSTRUCTION(Deoptimize)
3376
3377 private:
3378 const ICData::DeoptReasonId deopt_reason_;
3379
3380 DISALLOW_COPY_AND_ASSIGN(DeoptimizeInstr);
3381};
3382
3383class RedefinitionInstr : public TemplateDefinition<1, NoThrow> {
3384 public:
3385 explicit RedefinitionInstr(Value* value) : constrained_type_(NULL) {
3386 SetInputAt(0, value);
3387 }
3388
3389 DECLARE_INSTRUCTION(Redefinition)
3390
3391 Value* value() const { return inputs_[0]; }
3392
3393 virtual CompileType ComputeType() const;
3394 virtual bool RecomputeType();
3395
3396 virtual Definition* Canonicalize(FlowGraph* flow_graph);
3397
3398 void set_constrained_type(CompileType* type) { constrained_type_ = type; }
3399 CompileType* constrained_type() const { return constrained_type_; }
3400
3401 virtual bool ComputeCanDeoptimize() const { return false; }
3402 virtual bool HasUnknownSideEffects() const { return false; }
3403
3404 virtual Value* RedefinedValue() const;
3405
3406 PRINT_OPERANDS_TO_SUPPORT
3407
3408 private:
3409 CompileType* constrained_type_;
3410 DISALLOW_COPY_AND_ASSIGN(RedefinitionInstr);
3411};
3412
3413// Keeps the value alive til after this point.
3414//
3415// The fence cannot be moved.
3416class ReachabilityFenceInstr : public TemplateInstruction<1, NoThrow> {
3417 public:
3418 explicit ReachabilityFenceInstr(Value* value) { SetInputAt(0, value); }
3419
3420 DECLARE_INSTRUCTION(ReachabilityFence)
3421
3422 Value* value() const { return inputs_[0]; }
3423
3424 virtual bool ComputeCanDeoptimize() const { return false; }
3425 virtual bool HasUnknownSideEffects() const { return false; }
3426
3427 PRINT_OPERANDS_TO_SUPPORT
3428
3429 private:
3430 DISALLOW_COPY_AND_ASSIGN(ReachabilityFenceInstr);
3431};
3432
3433class ConstraintInstr : public TemplateDefinition<1, NoThrow> {
3434 public:
3435 ConstraintInstr(Value* value, Range* constraint)
3436 : constraint_(constraint), target_(NULL) {
3437 SetInputAt(0, value);
3438 }
3439
3440 DECLARE_INSTRUCTION(Constraint)
3441
3442 virtual CompileType ComputeType() const;
3443
3444 virtual bool ComputeCanDeoptimize() const { return false; }
3445
3446 virtual bool HasUnknownSideEffects() const { return false; }
3447
3448 virtual bool AttributesEqual(Instruction* other) const {
3449 UNREACHABLE();
3450 return false;
3451 }
3452
3453 Value* value() const { return inputs_[0]; }
3454 Range* constraint() const { return constraint_; }
3455
3456 virtual void InferRange(RangeAnalysis* analysis, Range* range);
3457
3458 // Constraints for branches have their target block stored in order
3459 // to find the comparison that generated the constraint:
3460 // target->predecessor->last_instruction->comparison.
3461 void set_target(TargetEntryInstr* target) { target_ = target; }
3462 TargetEntryInstr* target() const { return target_; }
3463
3464 PRINT_OPERANDS_TO_SUPPORT
3465
3466 private:
3467 Range* constraint_;
3468 TargetEntryInstr* target_;
3469
3470 DISALLOW_COPY_AND_ASSIGN(ConstraintInstr);
3471};
3472
3473class ConstantInstr : public TemplateDefinition<0, NoThrow, Pure> {
3474 public:
3475 ConstantInstr(const Object& value,
3476 TokenPosition token_pos = TokenPosition::kConstant);
3477
3478 DECLARE_INSTRUCTION(Constant)
3479 virtual CompileType ComputeType() const;
3480
3481 virtual Definition* Canonicalize(FlowGraph* flow_graph);
3482
3483 const Object& value() const { return value_; }
3484
3485 bool IsSmi() const { return compiler::target::IsSmi(value()); }
3486
3487 virtual bool ComputeCanDeoptimize() const { return false; }
3488
3489 virtual void InferRange(RangeAnalysis* analysis, Range* range);
3490
3491 virtual bool AttributesEqual(Instruction* other) const;
3492
3493 virtual TokenPosition token_pos() const { return token_pos_; }
3494
3495 bool IsUnboxedSignedIntegerConstant() const {
3496 return representation() == kUnboxedInt32 ||
3497 representation() == kUnboxedInt64;
3498 }
3499
3500 int64_t GetUnboxedSignedIntegerConstantValue() const {
3501 ASSERT(IsUnboxedSignedIntegerConstant());
3502 return value_.IsSmi() ? Smi::Cast(value_).Value()
3503 : Mint::Cast(value_).value();
3504 }
3505
3506 void EmitMoveToLocation(FlowGraphCompiler* compiler,
3507 const Location& destination,
3508 Register tmp = kNoRegister);
3509
3510 PRINT_OPERANDS_TO_SUPPORT
3511 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
3512
3513 private:
3514 const Object& value_;
3515 const TokenPosition token_pos_;
3516
3517 DISALLOW_COPY_AND_ASSIGN(ConstantInstr);
3518};
3519
3520// Merged ConstantInstr -> UnboxedXXX into UnboxedConstantInstr.
3521// TODO(srdjan): Implemented currently for doubles only, should implement
3522// for other unboxing instructions.
3523class UnboxedConstantInstr : public ConstantInstr {
3524 public:
3525 explicit UnboxedConstantInstr(const Object& value,
3526 Representation representation);
3527
3528 virtual Representation representation() const { return representation_; }
3529
3530 // Either NULL or the address of the unboxed constant.
3531 uword constant_address() const { return constant_address_; }
3532
3533 DECLARE_INSTRUCTION(UnboxedConstant)
3534
3535 private:
3536 const Representation representation_;
3537 uword constant_address_; // Either NULL or points to the untagged constant.
3538
3539 DISALLOW_COPY_AND_ASSIGN(UnboxedConstantInstr);
3540};
3541
3542// Checks that one type is a subtype of another (e.g. for type parameter bounds
3543// checking). Throws a TypeError otherwise. Both types are instantiated at
3544// runtime as necessary.
3545class AssertSubtypeInstr : public TemplateInstruction<4, Throws, Pure> {
3546 public:
3547 AssertSubtypeInstr(TokenPosition token_pos,
3548 Value* instantiator_type_arguments,
3549 Value* function_type_arguments,
3550 Value* sub_type,
3551 Value* super_type,
3552 const String& dst_name,
3553 intptr_t deopt_id)
3554 : TemplateInstruction(deopt_id),
3555 token_pos_(token_pos),
3556 dst_name_(String::ZoneHandle(dst_name.raw())) {
3557 ASSERT(!dst_name.IsNull());
3558 SetInputAt(0, instantiator_type_arguments);
3559 SetInputAt(1, function_type_arguments);
3560 SetInputAt(2, sub_type);
3561 SetInputAt(3, super_type);
3562 }
3563
3564 DECLARE_INSTRUCTION(AssertSubtype);
3565
3566 Value* instantiator_type_arguments() const { return inputs_[0]; }
3567 Value* function_type_arguments() const { return inputs_[1]; }
3568 Value* sub_type() const { return inputs_[2]; }
3569 Value* super_type() const { return inputs_[3]; }
3570
3571 virtual TokenPosition token_pos() const { return token_pos_; }
3572 const String& dst_name() const { return dst_name_; }
3573
3574 virtual bool ComputeCanDeoptimize() const {
3575 return !CompilerState::Current().is_aot();
3576 }
3577
3578 virtual bool CanBecomeDeoptimizationTarget() const { return true; }
3579
3580 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
3581
3582 virtual bool AttributesEqual(Instruction* other) const { return true; }
3583
3584 PRINT_OPERANDS_TO_SUPPORT
3585
3586 private:
3587 const TokenPosition token_pos_;
3588 const String& dst_name_;
3589
3590 DISALLOW_COPY_AND_ASSIGN(AssertSubtypeInstr);
3591};
3592
3593class AssertAssignableInstr : public TemplateDefinition<4, Throws, Pure> {
3594 public:
3595#define FOR_EACH_ASSERT_ASSIGNABLE_KIND(V) \
3596 V(ParameterCheck) \
3597 V(InsertedByFrontend) \
3598 V(FromSource) \
3599 V(Unknown)
3600
3601#define KIND_DEFN(name) k##name,
3602 enum Kind { FOR_EACH_ASSERT_ASSIGNABLE_KIND(KIND_DEFN) };
3603#undef KIND_DEFN
3604
3605 static const char* KindToCString(Kind kind);
3606 static bool ParseKind(const char* str, Kind* out);
3607
3608 AssertAssignableInstr(TokenPosition token_pos,
3609 Value* value,
3610 Value* dst_type,
3611 Value* instantiator_type_arguments,
3612 Value* function_type_arguments,
3613 const String& dst_name,
3614 intptr_t deopt_id,
3615 Kind kind = kUnknown)
3616 : TemplateDefinition(deopt_id),
3617 token_pos_(token_pos),
3618 dst_name_(dst_name),
3619 kind_(kind) {
3620 ASSERT(!dst_name.IsNull());
3621 SetInputAt(0, value);
3622 SetInputAt(1, dst_type);
3623 SetInputAt(2, instantiator_type_arguments);
3624 SetInputAt(3, function_type_arguments);
3625 }
3626
3627 virtual intptr_t statistics_tag() const;
3628
3629 DECLARE_INSTRUCTION(AssertAssignable)
3630 virtual CompileType ComputeType() const;
3631 virtual bool RecomputeType();
3632
3633 Value* value() const { return inputs_[0]; }
3634 Value* dst_type() const { return inputs_[1]; }
3635 Value* instantiator_type_arguments() const { return inputs_[2]; }
3636 Value* function_type_arguments() const { return inputs_[3]; }
3637
3638 virtual TokenPosition token_pos() const { return token_pos_; }
3639 const String& dst_name() const { return dst_name_; }
3640
3641 virtual bool ComputeCanDeoptimize() const {
3642 return !CompilerState::Current().is_aot();
3643 }
3644
3645 virtual bool CanBecomeDeoptimizationTarget() const {
3646 // AssertAssignable instructions that are specialized by the optimizer
3647 // (e.g. replaced with CheckClass) need a deoptimization descriptor before.
3648 return true;
3649 }
3650
3651 virtual Definition* Canonicalize(FlowGraph* flow_graph);
3652
3653 virtual bool AttributesEqual(Instruction* other) const { return true; }
3654
3655 virtual Value* RedefinedValue() const;
3656
3657 PRINT_OPERANDS_TO_SUPPORT
3658 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
3659
3660 private:
3661 const TokenPosition token_pos_;
3662 const String& dst_name_;
3663 const Kind kind_;
3664
3665 DISALLOW_COPY_AND_ASSIGN(AssertAssignableInstr);
3666};
3667
3668class AssertBooleanInstr : public TemplateDefinition<1, Throws, Pure> {
3669 public:
3670 AssertBooleanInstr(TokenPosition token_pos, Value* value, intptr_t deopt_id)
3671 : TemplateDefinition(deopt_id), token_pos_(token_pos) {
3672 SetInputAt(0, value);
3673 }
3674
3675 DECLARE_INSTRUCTION(AssertBoolean)
3676 virtual CompileType ComputeType() const;
3677
3678 virtual TokenPosition token_pos() const { return token_pos_; }
3679 Value* value() const { return inputs_[0]; }
3680
3681 virtual bool ComputeCanDeoptimize() const {
3682 return !CompilerState::Current().is_aot();
3683 }
3684
3685 virtual Definition* Canonicalize(FlowGraph* flow_graph);
3686
3687 virtual bool AttributesEqual(Instruction* other) const { return true; }
3688
3689 virtual Value* RedefinedValue() const;
3690
3691 PRINT_OPERANDS_TO_SUPPORT
3692
3693 private:
3694 const TokenPosition token_pos_;
3695
3696 DISALLOW_COPY_AND_ASSIGN(AssertBooleanInstr);
3697};
3698
3699// Denotes a special parameter, currently either the context of a closure,
3700// the type arguments of a generic function or an arguments descriptor.
3701class SpecialParameterInstr : public TemplateDefinition<0, NoThrow> {
3702 public:
3703#define FOR_EACH_SPECIAL_PARAMETER_KIND(M) \
3704 M(Context) \
3705 M(TypeArgs) \
3706 M(ArgDescriptor) \
3707 M(Exception) \
3708 M(StackTrace)
3709
3710#define KIND_DECL(name) k##name,
3711 enum SpecialParameterKind { FOR_EACH_SPECIAL_PARAMETER_KIND(KIND_DECL) };
3712#undef KIND_DECL
3713
3714 // Defined as a static intptr_t instead of inside the enum since some
3715 // switch statements depend on the exhaustibility checking.
3716#define KIND_INC(name) +1
3717 static const intptr_t kNumKinds = 0 FOR_EACH_SPECIAL_PARAMETER_KIND(KIND_INC);
3718#undef KIND_INC
3719
3720 static const char* KindToCString(SpecialParameterKind k);
3721 static bool ParseKind(const char* str, SpecialParameterKind* out);
3722
3723 SpecialParameterInstr(SpecialParameterKind kind,
3724 intptr_t deopt_id,
3725 BlockEntryInstr* block)
3726 : TemplateDefinition(deopt_id), kind_(kind), block_(block) {}
3727
3728 DECLARE_INSTRUCTION(SpecialParameter)
3729
3730 virtual BlockEntryInstr* GetBlock() { return block_; }
3731
3732 virtual CompileType ComputeType() const;
3733
3734 virtual bool ComputeCanDeoptimize() const { return false; }
3735
3736 virtual bool HasUnknownSideEffects() const { return false; }
3737
3738 virtual bool AttributesEqual(Instruction* other) const {
3739 return kind() == other->AsSpecialParameter()->kind();
3740 }
3741 SpecialParameterKind kind() const { return kind_; }
3742
3743 const char* ToCString() const;
3744
3745 PRINT_OPERANDS_TO_SUPPORT
3746 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
3747
3748 private:
3749 const SpecialParameterKind kind_;
3750 BlockEntryInstr* block_;
3751 DISALLOW_COPY_AND_ASSIGN(SpecialParameterInstr);
3752};
3753
3754struct ArgumentsInfo {
3755 ArgumentsInfo(intptr_t type_args_len,
3756 intptr_t count_with_type_args,
3757 intptr_t size_with_type_args,
3758 const Array& argument_names)
3759 : type_args_len(type_args_len),
3760 count_with_type_args(count_with_type_args),
3761 size_with_type_args(size_with_type_args),
3762 count_without_type_args(count_with_type_args -
3763 (type_args_len > 0 ? 1 : 0)),
3764 size_without_type_args(size_with_type_args -
3765 (type_args_len > 0 ? 1 : 0)),
3766 argument_names(argument_names) {}
3767
3768 ArrayPtr ToArgumentsDescriptor() const {
3769 return ArgumentsDescriptor::New(type_args_len, count_without_type_args,
3770 size_without_type_args, argument_names);
3771 }
3772
3773 const intptr_t type_args_len;
3774 const intptr_t count_with_type_args;
3775 const intptr_t size_with_type_args;
3776 const intptr_t count_without_type_args;
3777 const intptr_t size_without_type_args;
3778 const Array& argument_names;
3779};
3780
3781template <intptr_t kExtraInputs>
3782class TemplateDartCall : public Definition {
3783 public:
3784 TemplateDartCall(intptr_t deopt_id,
3785 intptr_t type_args_len,
3786 const Array& argument_names,
3787 InputsArray* inputs,
3788 TokenPosition token_pos)
3789 : Definition(deopt_id),
3790 type_args_len_(type_args_len),
3791 argument_names_(argument_names),
3792 inputs_(inputs),
3793 token_pos_(token_pos) {
3794 ASSERT(argument_names.IsZoneHandle() || argument_names.InVMIsolateHeap());
3795 ASSERT(inputs_->length() >= kExtraInputs);
3796 for (intptr_t i = 0, n = inputs_->length(); i < n; ++i) {
3797 SetInputAt(i, (*inputs_)[i]);
3798 }
3799 }
3800
3801 inline StringPtr Selector();
3802
3803 virtual bool MayThrow() const { return true; }
3804 virtual bool CanCallDart() const { return true; }
3805
3806 virtual intptr_t InputCount() const { return inputs_->length(); }
3807 virtual Value* InputAt(intptr_t i) const { return inputs_->At(i); }
3808
3809 intptr_t FirstArgIndex() const { return type_args_len_ > 0 ? 1 : 0; }
3810 Value* Receiver() const { return this->ArgumentValueAt(FirstArgIndex()); }
3811 intptr_t ArgumentCountWithoutTypeArgs() const {
3812 return ArgumentCount() - FirstArgIndex();
3813 }
3814 intptr_t ArgumentsSizeWithoutTypeArgs() const {
3815 return ArgumentsSize() - FirstArgIndex();
3816 }
3817 // ArgumentCount() includes the type argument vector if any.
3818 // Caution: Must override Instruction::ArgumentCount().
3819 intptr_t ArgumentCount() const {
3820 return push_arguments_ != nullptr ? push_arguments_->length()
3821 : inputs_->length() - kExtraInputs;
3822 }
3823 virtual intptr_t ArgumentsSize() const { return ArgumentCount(); }
3824
3825 virtual void SetPushArguments(PushArgumentsArray* push_arguments) {
3826 ASSERT(push_arguments_ == nullptr);
3827 push_arguments_ = push_arguments;
3828 }
3829 virtual PushArgumentsArray* GetPushArguments() const {
3830 return push_arguments_;
3831 }
3832 virtual void ReplaceInputsWithPushArguments(
3833 PushArgumentsArray* push_arguments) {
3834 ASSERT(push_arguments_ == nullptr);
3835 ASSERT(push_arguments->length() == ArgumentCount());
3836 SetPushArguments(push_arguments);
3837 ASSERT(inputs_->length() == ArgumentCount() + kExtraInputs);
3838 const intptr_t extra_inputs_base = inputs_->length() - kExtraInputs;
3839 for (intptr_t i = 0, n = ArgumentCount(); i < n; ++i) {
3840 InputAt(i)->RemoveFromUseList();
3841 }
3842 for (intptr_t i = 0; i < kExtraInputs; ++i) {
3843 SetInputAt(i, InputAt(extra_inputs_base + i));
3844 }
3845 inputs_->TruncateTo(kExtraInputs);
3846 }
3847 intptr_t type_args_len() const { return type_args_len_; }
3848 const Array& argument_names() const { return argument_names_; }
3849 virtual TokenPosition token_pos() const { return token_pos_; }
3850 ArrayPtr GetArgumentsDescriptor() const {
3851 return ArgumentsDescriptor::New(
3852 type_args_len(), ArgumentCountWithoutTypeArgs(),
3853 ArgumentsSizeWithoutTypeArgs(), argument_names());
3854 }
3855
3856 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
3857
3858 private:
3859 virtual void RawSetInputAt(intptr_t i, Value* value) {
3860 (*inputs_)[i] = value;
3861 }
3862
3863 intptr_t type_args_len_;
3864 const Array& argument_names_;
3865 InputsArray* inputs_;
3866 PushArgumentsArray* push_arguments_ = nullptr;
3867 TokenPosition token_pos_;
3868
3869 DISALLOW_COPY_AND_ASSIGN(TemplateDartCall);
3870};
3871
3872class ClosureCallInstr : public TemplateDartCall<1> {
3873 public:
3874 ClosureCallInstr(InputsArray* inputs,
3875 intptr_t type_args_len,
3876 const Array& argument_names,
3877 TokenPosition token_pos,
3878 intptr_t deopt_id,
3879 Code::EntryKind entry_kind = Code::EntryKind::kNormal)
3880 : TemplateDartCall(deopt_id,
3881 type_args_len,
3882 argument_names,
3883 inputs,
3884 token_pos),
3885 entry_kind_(entry_kind) {}
3886
3887 DECLARE_INSTRUCTION(ClosureCall)
3888
3889 // TODO(kmillikin): implement exact call counts for closure calls.
3890 virtual intptr_t CallCount() const { return 1; }
3891
3892 virtual bool ComputeCanDeoptimize() const {
3893 return !CompilerState::Current().is_aot();
3894 }
3895
3896 virtual bool HasUnknownSideEffects() const { return true; }
3897
3898 Code::EntryKind entry_kind() const { return entry_kind_; }
3899
3900 PRINT_OPERANDS_TO_SUPPORT
3901 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
3902
3903 private:
3904 const Code::EntryKind entry_kind_;
3905
3906 DISALLOW_COPY_AND_ASSIGN(ClosureCallInstr);
3907};
3908
3909// Common base class for various kinds of instance call instructions
3910// (InstanceCallInstr, PolymorphicInstanceCallInstr).
3911class InstanceCallBaseInstr : public TemplateDartCall<0> {
3912 public:
3913 InstanceCallBaseInstr(TokenPosition token_pos,
3914 const String& function_name,
3915 Token::Kind token_kind,
3916 InputsArray* arguments,
3917 intptr_t type_args_len,
3918 const Array& argument_names,
3919 const ICData* ic_data,
3920 intptr_t deopt_id,
3921 const Function& interface_target,
3922 const Function& tearoff_interface_target)
3923 : TemplateDartCall(deopt_id,
3924 type_args_len,
3925 argument_names,
3926 arguments,
3927 token_pos),
3928 ic_data_(ic_data),
3929 function_name_(function_name),
3930 token_kind_(token_kind),
3931 interface_target_(interface_target),
3932 tearoff_interface_target_(tearoff_interface_target),
3933 result_type_(nullptr),
3934 has_unique_selector_(false) {
3935 ASSERT(function_name.IsNotTemporaryScopedHandle());
3936 ASSERT(interface_target.IsNotTemporaryScopedHandle());
3937 ASSERT(tearoff_interface_target.IsNotTemporaryScopedHandle());
3938 ASSERT(!arguments->is_empty());
3939 ASSERT(Token::IsBinaryOperator(token_kind) ||
3940 Token::IsEqualityOperator(token_kind) ||
3941 Token::IsRelationalOperator(token_kind) ||
3942 Token::IsUnaryOperator(token_kind) ||
3943 Token::IsIndexOperator(token_kind) ||
3944 Token::IsTypeTestOperator(token_kind) ||
3945 Token::IsTypeCastOperator(token_kind) || token_kind == Token::kGET ||
3946 token_kind == Token::kSET || token_kind == Token::kILLEGAL);
3947 }
3948
3949 const ICData* ic_data() const { return ic_data_; }
3950 bool HasICData() const {
3951 return (ic_data() != nullptr) && !ic_data()->IsNull();
3952 }
3953
3954 // ICData can be replaced by optimizer.
3955 void set_ic_data(const ICData* value) { ic_data_ = value; }
3956
3957 const String& function_name() const { return function_name_; }
3958 Token::Kind token_kind() const { return token_kind_; }
3959 const Function& interface_target() const { return interface_target_; }
3960 const Function& tearoff_interface_target() const {
3961 return tearoff_interface_target_;
3962 }
3963
3964 bool has_unique_selector() const { return has_unique_selector_; }
3965 void set_has_unique_selector(bool b) { has_unique_selector_ = b; }
3966
3967 virtual CompileType ComputeType() const;
3968
3969 virtual bool ComputeCanDeoptimize() const {
3970 return !CompilerState::Current().is_aot();
3971 }
3972
3973 virtual bool CanBecomeDeoptimizationTarget() const {
3974 // Instance calls that are specialized by the optimizer need a
3975 // deoptimization descriptor before the call.
3976 return true;
3977 }
3978
3979 virtual bool HasUnknownSideEffects() const { return true; }
3980
3981 void SetResultType(Zone* zone, CompileType new_type) {
3982 result_type_ = new (zone) CompileType(new_type);
3983 }
3984
3985 CompileType* result_type() const { return result_type_; }
3986
3987 intptr_t result_cid() const {
3988 if (result_type_ == nullptr) {
3989 return kDynamicCid;
3990 }
3991 return result_type_->ToCid();
3992 }
3993
3994 FunctionPtr ResolveForReceiverClass(const Class& cls, bool allow_add = true);
3995
3996 Code::EntryKind entry_kind() const { return entry_kind_; }
3997 void set_entry_kind(Code::EntryKind value) { entry_kind_ = value; }
3998
3999 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
4000 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
4001 DEFINE_INSTRUCTION_TYPE_CHECK(InstanceCallBase);
4002
4003 bool receiver_is_not_smi() const { return receiver_is_not_smi_; }
4004 void set_receiver_is_not_smi(bool value) { receiver_is_not_smi_ = value; }
4005
4006 // Tries to prove that the receiver will not be a Smi based on the
4007 // interface target, CompileType and hints from TFA.
4008 void UpdateReceiverSminess(Zone* zone);
4009
4010 bool CanReceiverBeSmiBasedOnInterfaceTarget(Zone* zone) const;
4011
4012 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
4013 if (type_args_len() > 0) {
4014 if (idx == 0) {
4015 return kGuardInputs;
4016 }
4017 idx--;
4018 }
4019 return interface_target_.is_unboxed_parameter_at(idx) ? kNotSpeculative
4020 : kGuardInputs;
4021 }
4022
4023 virtual intptr_t ArgumentsSize() const;
4024
4025 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
4026
4027 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
4028
4029 virtual Representation representation() const;
4030
4031 protected:
4032 friend class CallSpecializer;
4033 void set_ic_data(ICData* value) { ic_data_ = value; }
4034 void set_result_type(CompileType* result_type) { result_type_ = result_type; }
4035
4036 private:
4037 const ICData* ic_data_;
4038 const String& function_name_;
4039 const Token::Kind token_kind_; // Binary op, unary op, kGET or kILLEGAL.
4040 const Function& interface_target_;
4041 const Function& tearoff_interface_target_;
4042 CompileType* result_type_; // Inferred result type.
4043 bool has_unique_selector_;
4044 Code::EntryKind entry_kind_ = Code::EntryKind::kNormal;
4045 bool receiver_is_not_smi_ = false;
4046
4047 DISALLOW_COPY_AND_ASSIGN(InstanceCallBaseInstr);
4048};
4049
4050class InstanceCallInstr : public InstanceCallBaseInstr {
4051 public:
4052 InstanceCallInstr(
4053 TokenPosition token_pos,
4054 const String& function_name,
4055 Token::Kind token_kind,
4056 InputsArray* arguments,
4057 intptr_t type_args_len,
4058 const Array& argument_names,
4059 intptr_t checked_argument_count,
4060 const ZoneGrowableArray<const ICData*>& ic_data_array,
4061 intptr_t deopt_id,
4062 const Function& interface_target = Function::null_function(),
4063 const Function& tearoff_interface_target = Function::null_function())
4064 : InstanceCallBaseInstr(
4065 token_pos,
4066 function_name,
4067 token_kind,
4068 arguments,
4069 type_args_len,
4070 argument_names,
4071 GetICData(ic_data_array, deopt_id, /*is_static_call=*/false),
4072 deopt_id,
4073 interface_target,
4074 tearoff_interface_target),
4075 checked_argument_count_(checked_argument_count) {}
4076
4077 InstanceCallInstr(
4078 TokenPosition token_pos,
4079 const String& function_name,
4080 Token::Kind token_kind,
4081 InputsArray* arguments,
4082 intptr_t type_args_len,
4083 const Array& argument_names,
4084 intptr_t checked_argument_count,
4085 intptr_t deopt_id,
4086 const Function& interface_target = Function::null_function(),
4087 const Function& tearoff_interface_target = Function::null_function())
4088 : InstanceCallBaseInstr(token_pos,
4089 function_name,
4090 token_kind,
4091 arguments,
4092 type_args_len,
4093 argument_names,
4094 /*ic_data=*/nullptr,
4095 deopt_id,
4096 interface_target,
4097 tearoff_interface_target),
4098 checked_argument_count_(checked_argument_count) {}
4099
4100 DECLARE_INSTRUCTION(InstanceCall)
4101
4102 intptr_t checked_argument_count() const { return checked_argument_count_; }
4103
4104 virtual intptr_t CallCount() const {
4105 return ic_data() == nullptr ? 0 : ic_data()->AggregateCount();
4106 }
4107
4108 void set_receivers_static_type(const AbstractType* receiver_type) {
4109 ASSERT(receiver_type != nullptr);
4110 receivers_static_type_ = receiver_type;
4111 }
4112
4113 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4114
4115 PRINT_OPERANDS_TO_SUPPORT
4116 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
4117
4118 bool MatchesCoreName(const String& name);
4119
4120 const class BinaryFeedback& BinaryFeedback();
4121 void SetBinaryFeedback(const class BinaryFeedback* binary) {
4122 binary_ = binary;
4123 }
4124
4125 const CallTargets& Targets();
4126 void SetTargets(const CallTargets* targets) { targets_ = targets; }
4127
4128 private:
4129 const CallTargets* targets_ = nullptr;
4130 const class BinaryFeedback* binary_ = nullptr;
4131 const intptr_t checked_argument_count_;
4132 const AbstractType* receivers_static_type_ = nullptr;
4133
4134 DISALLOW_COPY_AND_ASSIGN(InstanceCallInstr);
4135};
4136
4137class PolymorphicInstanceCallInstr : public InstanceCallBaseInstr {
4138 public:
4139 // Generate a replacement polymorphic call instruction.
4140 static PolymorphicInstanceCallInstr* FromCall(Zone* zone,
4141 InstanceCallBaseInstr* call,
4142 const CallTargets& targets,
4143 bool complete) {
4144 ASSERT(!call->HasPushArguments());
4145 InputsArray* args = new (zone) InputsArray(zone, call->ArgumentCount());
4146 for (intptr_t i = 0, n = call->ArgumentCount(); i < n; ++i) {
4147 args->Add(call->ArgumentValueAt(i)->CopyWithType(zone));
4148 }
4149 auto new_call = new (zone) PolymorphicInstanceCallInstr(
4150 call->token_pos(), call->function_name(), call->token_kind(), args,
4151 call->type_args_len(), call->argument_names(), call->ic_data(),
4152 call->deopt_id(), call->interface_target(),
4153 call->tearoff_interface_target(), targets, complete);
4154 if (call->has_inlining_id()) {
4155 new_call->set_inlining_id(call->inlining_id());
4156 }
4157 new_call->set_result_type(call->result_type());
4158 new_call->set_entry_kind(call->entry_kind());
4159 new_call->set_has_unique_selector(call->has_unique_selector());
4160 return new_call;
4161 }
4162
4163 bool complete() const { return complete_; }
4164
4165 virtual CompileType ComputeType() const;
4166
4167 bool HasOnlyDispatcherOrImplicitAccessorTargets() const;
4168
4169 const CallTargets& targets() const { return targets_; }
4170 intptr_t NumberOfChecks() const { return targets_.length(); }
4171
4172 bool IsSureToCallSingleRecognizedTarget() const;
4173
4174 virtual intptr_t CallCount() const;
4175
4176 // If this polymophic call site was created to cover the remaining cids after
4177 // inlining then we need to keep track of the total number of calls including
4178 // the ones that we inlined. This is different from the CallCount above: Eg
4179 // if there were 100 calls originally, distributed across three class-ids in
4180 // the ratio 50, 40, 7, 3. The first two were inlined, so now we have only
4181 // 10 calls in the CallCount above, but the heuristics need to know that the
4182 // last two cids cover 7% and 3% of the calls, not 70% and 30%.
4183 intptr_t total_call_count() { return total_call_count_; }
4184
4185 void set_total_call_count(intptr_t count) { total_call_count_ = count; }
4186
4187 DECLARE_INSTRUCTION(PolymorphicInstanceCall)
4188
4189 virtual Definition* Canonicalize(FlowGraph* graph);
4190
4191 static TypePtr ComputeRuntimeType(const CallTargets& targets);
4192
4193 PRINT_OPERANDS_TO_SUPPORT
4194 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
4195
4196 private:
4197 PolymorphicInstanceCallInstr(TokenPosition token_pos,
4198 const String& function_name,
4199 Token::Kind token_kind,
4200 InputsArray* arguments,
4201 intptr_t type_args_len,
4202 const Array& argument_names,
4203 const ICData* ic_data,
4204 intptr_t deopt_id,
4205 const Function& interface_target,
4206 const Function& tearoff_interface_target,
4207 const CallTargets& targets,
4208 bool complete)
4209 : InstanceCallBaseInstr(token_pos,
4210 function_name,
4211 token_kind,
4212 arguments,
4213 type_args_len,
4214 argument_names,
4215 ic_data,
4216 deopt_id,
4217 interface_target,
4218 tearoff_interface_target),
4219 targets_(targets),
4220 complete_(complete) {
4221 ASSERT(targets.length() != 0);
4222 total_call_count_ = CallCount();
4223 }
4224
4225 const CallTargets& targets_;
4226 const bool complete_;
4227 intptr_t total_call_count_;
4228
4229 friend class PolymorphicInliner;
4230
4231 DISALLOW_COPY_AND_ASSIGN(PolymorphicInstanceCallInstr);
4232};
4233
4234// Instance call using the global dispatch table.
4235//
4236// Takes untagged ClassId of the receiver as extra input.
4237class DispatchTableCallInstr : public TemplateDartCall<1> {
4238 public:
4239 DispatchTableCallInstr(TokenPosition token_pos,
4240 const Function& interface_target,
4241 const compiler::TableSelector* selector,
4242 InputsArray* arguments,
4243 intptr_t type_args_len,
4244 const Array& argument_names)
4245 : TemplateDartCall(DeoptId::kNone,
4246 type_args_len,
4247 argument_names,
4248 arguments,
4249 token_pos),
4250 interface_target_(interface_target),
4251 selector_(selector) {
4252 ASSERT(selector != nullptr);
4253 ASSERT(interface_target_.IsNotTemporaryScopedHandle());
4254 ASSERT(!arguments->is_empty());
4255 }
4256
4257 static DispatchTableCallInstr* FromCall(
4258 Zone* zone,
4259 const InstanceCallBaseInstr* call,
4260 Value* cid,
4261 const Function& interface_target,
4262 const compiler::TableSelector* selector);
4263
4264 DECLARE_INSTRUCTION(DispatchTableCall)
4265
4266 const Function& interface_target() const { return interface_target_; }
4267 const compiler::TableSelector* selector() const { return selector_; }
4268
4269 Value* class_id() const { return InputAt(InputCount() - 1); }
4270
4271 virtual CompileType ComputeType() const;
4272
4273 virtual bool ComputeCanDeoptimize() const { return false; }
4274
4275 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4276
4277 virtual bool CanBecomeDeoptimizationTarget() const { return false; }
4278
4279 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
4280
4281 virtual bool HasUnknownSideEffects() const { return true; }
4282
4283 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
4284 if (type_args_len() > 0) {
4285 if (idx == 0) {
4286 return kGuardInputs;
4287 }
4288 idx--;
4289 }
4290 return interface_target_.is_unboxed_parameter_at(idx) ? kNotSpeculative
4291 : kGuardInputs;
4292 }
4293
4294 virtual intptr_t ArgumentsSize() const;
4295
4296 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
4297
4298 virtual Representation representation() const;
4299
4300 PRINT_OPERANDS_TO_SUPPORT
4301
4302 private:
4303 const Function& interface_target_;
4304 const compiler::TableSelector* selector_;
4305
4306 DISALLOW_COPY_AND_ASSIGN(DispatchTableCallInstr);
4307};
4308
4309class StrictCompareInstr : public TemplateComparison<2, NoThrow, Pure> {
4310 public:
4311 StrictCompareInstr(TokenPosition token_pos,
4312 Token::Kind kind,
4313 Value* left,
4314 Value* right,
4315 bool needs_number_check,
4316 intptr_t deopt_id);
4317
4318 DECLARE_COMPARISON_INSTRUCTION(StrictCompare)
4319
4320 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
4321
4322 virtual CompileType ComputeType() const;
4323
4324 virtual bool ComputeCanDeoptimize() const { return false; }
4325
4326 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4327
4328 bool needs_number_check() const { return needs_number_check_; }
4329 void set_needs_number_check(bool value) { needs_number_check_ = value; }
4330
4331 bool AttributesEqual(Instruction* other) const;
4332
4333 PRINT_OPERANDS_TO_SUPPORT
4334 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT;
4335
4336 private:
4337 Condition EmitComparisonCodeRegConstant(FlowGraphCompiler* compiler,
4338 BranchLabels labels,
4339 Register reg,
4340 const Object& obj);
4341 bool TryEmitBoolTest(FlowGraphCompiler* compiler,
4342 BranchLabels labels,
4343 intptr_t input_index,
4344 const Object& obj,
4345 Condition* condition_out);
4346
4347 // True if the comparison must check for double or Mint and
4348 // use value comparison instead.
4349 bool needs_number_check_;
4350
4351 DISALLOW_COPY_AND_ASSIGN(StrictCompareInstr);
4352};
4353
4354// Comparison instruction that is equivalent to the (left & right) == 0
4355// comparison pattern.
4356class TestSmiInstr : public TemplateComparison<2, NoThrow, Pure> {
4357 public:
4358 TestSmiInstr(TokenPosition token_pos,
4359 Token::Kind kind,
4360 Value* left,
4361 Value* right)
4362 : TemplateComparison(token_pos, kind) {
4363 ASSERT(kind == Token::kEQ || kind == Token::kNE);
4364 SetInputAt(0, left);
4365 SetInputAt(1, right);
4366 }
4367
4368 DECLARE_COMPARISON_INSTRUCTION(TestSmi);
4369
4370 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
4371
4372 virtual CompileType ComputeType() const;
4373
4374 virtual bool ComputeCanDeoptimize() const { return false; }
4375
4376 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
4377 return kTagged;
4378 }
4379
4380 private:
4381 DISALLOW_COPY_AND_ASSIGN(TestSmiInstr);
4382};
4383
4384// Checks the input value cid against cids stored in a table and returns either
4385// a result or deoptimizes. If the cid is not in the list and there is a deopt
4386// id, then the instruction deoptimizes. If there is no deopt id, all the
4387// results must be the same (all true or all false) and the instruction returns
4388// the opposite for cids not on the list. The first element in the table must
4389// always be the result for the Smi class-id and is allowed to differ from the
4390// other results even in the no-deopt case.
4391class TestCidsInstr : public TemplateComparison<1, NoThrow, Pure> {
4392 public:
4393 TestCidsInstr(TokenPosition token_pos,
4394 Token::Kind kind,
4395 Value* value,
4396 const ZoneGrowableArray<intptr_t>& cid_results,
4397 intptr_t deopt_id);
4398
4399 const ZoneGrowableArray<intptr_t>& cid_results() const {
4400 return cid_results_;
4401 }
4402
4403 DECLARE_COMPARISON_INSTRUCTION(TestCids);
4404
4405 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
4406
4407 virtual CompileType ComputeType() const;
4408
4409 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4410
4411 virtual bool ComputeCanDeoptimize() const {
4412 return GetDeoptId() != DeoptId::kNone;
4413 }
4414
4415 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
4416 return kTagged;
4417 }
4418
4419 virtual bool AttributesEqual(Instruction* other) const;
4420
4421 void set_licm_hoisted(bool value) { licm_hoisted_ = value; }
4422
4423 PRINT_OPERANDS_TO_SUPPORT
4424
4425 private:
4426 const ZoneGrowableArray<intptr_t>& cid_results_;
4427 bool licm_hoisted_;
4428 DISALLOW_COPY_AND_ASSIGN(TestCidsInstr);
4429};
4430
4431class EqualityCompareInstr : public TemplateComparison<2, NoThrow, Pure> {
4432 public:
4433 EqualityCompareInstr(TokenPosition token_pos,
4434 Token::Kind kind,
4435 Value* left,
4436 Value* right,
4437 intptr_t cid,
4438 intptr_t deopt_id,
4439 SpeculativeMode speculative_mode = kGuardInputs)
4440 : TemplateComparison(token_pos, kind, deopt_id),
4441 speculative_mode_(speculative_mode) {
4442 ASSERT(Token::IsEqualityOperator(kind));
4443 SetInputAt(0, left);
4444 SetInputAt(1, right);
4445 set_operation_cid(cid);
4446 }
4447
4448 DECLARE_COMPARISON_INSTRUCTION(EqualityCompare)
4449
4450 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
4451
4452 virtual CompileType ComputeType() const;
4453
4454 virtual bool ComputeCanDeoptimize() const { return false; }
4455
4456 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
4457 ASSERT((idx == 0) || (idx == 1));
4458 if (operation_cid() == kDoubleCid) return kUnboxedDouble;
4459 if (operation_cid() == kMintCid) return kUnboxedInt64;
4460 return kTagged;
4461 }
4462
4463 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
4464 return speculative_mode_;
4465 }
4466
4467 virtual bool AttributesEqual(Instruction* other) const {
4468 return ComparisonInstr::AttributesEqual(other) &&
4469 (speculative_mode_ == other->AsEqualityCompare()->speculative_mode_);
4470 }
4471
4472 PRINT_OPERANDS_TO_SUPPORT
4473
4474 private:
4475 const SpeculativeMode speculative_mode_;
4476 DISALLOW_COPY_AND_ASSIGN(EqualityCompareInstr);
4477};
4478
4479class RelationalOpInstr : public TemplateComparison<2, NoThrow, Pure> {
4480 public:
4481 RelationalOpInstr(TokenPosition token_pos,
4482 Token::Kind kind,
4483 Value* left,
4484 Value* right,
4485 intptr_t cid,
4486 intptr_t deopt_id,
4487 SpeculativeMode speculative_mode = kGuardInputs)
4488 : TemplateComparison(token_pos, kind, deopt_id),
4489 speculative_mode_(speculative_mode) {
4490 ASSERT(Token::IsRelationalOperator(kind));
4491 SetInputAt(0, left);
4492 SetInputAt(1, right);
4493 set_operation_cid(cid);
4494 }
4495
4496 DECLARE_COMPARISON_INSTRUCTION(RelationalOp)
4497
4498 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
4499
4500 virtual CompileType ComputeType() const;
4501
4502 virtual bool ComputeCanDeoptimize() const { return false; }
4503
4504 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
4505 ASSERT((idx == 0) || (idx == 1));
4506 if (operation_cid() == kDoubleCid) return kUnboxedDouble;
4507 if (operation_cid() == kMintCid) return kUnboxedInt64;
4508 return kTagged;
4509 }
4510
4511 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
4512 return speculative_mode_;
4513 }
4514
4515 virtual bool AttributesEqual(Instruction* other) const {
4516 return ComparisonInstr::AttributesEqual(other) &&
4517 (speculative_mode_ == other->AsRelationalOp()->speculative_mode_);
4518 }
4519
4520 PRINT_OPERANDS_TO_SUPPORT
4521
4522 private:
4523 const SpeculativeMode speculative_mode_;
4524 DISALLOW_COPY_AND_ASSIGN(RelationalOpInstr);
4525};
4526
4527// TODO(vegorov): ComparisonInstr should be switched to use IfTheElseInstr for
4528// materialization of true and false constants.
4529class IfThenElseInstr : public Definition {
4530 public:
4531 IfThenElseInstr(ComparisonInstr* comparison,
4532 Value* if_true,
4533 Value* if_false,
4534 intptr_t deopt_id)
4535 : Definition(deopt_id),
4536 comparison_(comparison),
4537 if_true_(Smi::Cast(if_true->BoundConstant()).Value()),
4538 if_false_(Smi::Cast(if_false->BoundConstant()).Value()) {
4539 // Adjust uses at the comparison.
4540 ASSERT(comparison->env() == NULL);
4541 for (intptr_t i = comparison->InputCount() - 1; i >= 0; --i) {
4542 comparison->InputAt(i)->set_instruction(this);
4543 }
4544 }
4545
4546 // Returns true if this combination of comparison and values flowing on
4547 // the true and false paths is supported on the current platform.
4548 static bool Supports(ComparisonInstr* comparison, Value* v1, Value* v2);
4549
4550 DECLARE_INSTRUCTION(IfThenElse)
4551
4552 intptr_t InputCount() const { return comparison()->InputCount(); }
4553
4554 Value* InputAt(intptr_t i) const { return comparison()->InputAt(i); }
4555
4556 virtual bool ComputeCanDeoptimize() const {
4557 return comparison()->ComputeCanDeoptimize();
4558 }
4559
4560 virtual bool CanBecomeDeoptimizationTarget() const {
4561 return comparison()->CanBecomeDeoptimizationTarget();
4562 }
4563
4564 virtual intptr_t DeoptimizationTarget() const {
4565 return comparison()->DeoptimizationTarget();
4566 }
4567
4568 virtual Representation RequiredInputRepresentation(intptr_t i) const {
4569 return comparison()->RequiredInputRepresentation(i);
4570 }
4571
4572 virtual CompileType ComputeType() const;
4573
4574 virtual void InferRange(RangeAnalysis* analysis, Range* range);
4575
4576 ComparisonInstr* comparison() const { return comparison_; }
4577 intptr_t if_true() const { return if_true_; }
4578 intptr_t if_false() const { return if_false_; }
4579
4580 virtual bool AllowsCSE() const { return comparison()->AllowsCSE(); }
4581 virtual bool HasUnknownSideEffects() const {
4582 return comparison()->HasUnknownSideEffects();
4583 }
4584 virtual bool CanCallDart() const { return comparison()->CanCallDart(); }
4585
4586 virtual bool AttributesEqual(Instruction* other) const {
4587 IfThenElseInstr* other_if_then_else = other->AsIfThenElse();
4588 return (comparison()->tag() == other_if_then_else->comparison()->tag()) &&
4589 comparison()->AttributesEqual(other_if_then_else->comparison()) &&
4590 (if_true_ == other_if_then_else->if_true_) &&
4591 (if_false_ == other_if_then_else->if_false_);
4592 }
4593
4594 virtual bool MayThrow() const { return comparison()->MayThrow(); }
4595
4596 PRINT_OPERANDS_TO_SUPPORT
4597
4598 private:
4599 virtual void RawSetInputAt(intptr_t i, Value* value) {
4600 comparison()->RawSetInputAt(i, value);
4601 }
4602
4603 ComparisonInstr* comparison_;
4604 const intptr_t if_true_;
4605 const intptr_t if_false_;
4606
4607 DISALLOW_COPY_AND_ASSIGN(IfThenElseInstr);
4608};
4609
4610class StaticCallInstr : public TemplateDartCall<0> {
4611 public:
4612 StaticCallInstr(TokenPosition token_pos,
4613 const Function& function,
4614 intptr_t type_args_len,
4615 const Array& argument_names,
4616 InputsArray* arguments,
4617 const ZoneGrowableArray<const ICData*>& ic_data_array,
4618 intptr_t deopt_id,
4619 ICData::RebindRule rebind_rule)
4620 : TemplateDartCall(deopt_id,
4621 type_args_len,
4622 argument_names,
4623 arguments,
4624 token_pos),
4625 ic_data_(NULL),
4626 call_count_(0),
4627 function_(function),
4628 rebind_rule_(rebind_rule),
4629 result_type_(NULL),
4630 is_known_list_constructor_(false),
4631 identity_(AliasIdentity::Unknown()) {
4632 ic_data_ = GetICData(ic_data_array, deopt_id, /*is_static_call=*/true);
4633 ASSERT(function.IsZoneHandle());
4634 ASSERT(!function.IsNull());
4635 }
4636
4637 StaticCallInstr(TokenPosition token_pos,
4638 const Function& function,
4639 intptr_t type_args_len,
4640 const Array& argument_names,
4641 InputsArray* arguments,
4642 intptr_t deopt_id,
4643 intptr_t call_count,
4644 ICData::RebindRule rebind_rule)
4645 : TemplateDartCall(deopt_id,
4646 type_args_len,
4647 argument_names,
4648 arguments,
4649 token_pos),
4650 ic_data_(NULL),
4651 call_count_(call_count),
4652 function_(function),
4653 rebind_rule_(rebind_rule),
4654 result_type_(NULL),
4655 is_known_list_constructor_(false),
4656 identity_(AliasIdentity::Unknown()) {
4657 ASSERT(function.IsZoneHandle());
4658 ASSERT(!function.IsNull());
4659 }
4660
4661 // Generate a replacement call instruction for an instance call which
4662 // has been found to have only one target.
4663 template <class C>
4664 static StaticCallInstr* FromCall(Zone* zone,
4665 const C* call,
4666 const Function& target,
4667 intptr_t call_count) {
4668 ASSERT(!call->HasPushArguments());
4669 InputsArray* args = new (zone) InputsArray(zone, call->ArgumentCount());
4670 for (intptr_t i = 0; i < call->ArgumentCount(); i++) {
4671 args->Add(call->ArgumentValueAt(i)->CopyWithType());
4672 }
4673 StaticCallInstr* new_call = new (zone)
4674 StaticCallInstr(call->token_pos(), target, call->type_args_len(),
4675 call->argument_names(), args, call->deopt_id(),
4676 call_count, ICData::kNoRebind);
4677 if (call->result_type() != NULL) {
4678 new_call->result_type_ = call->result_type();
4679 }
4680 if (call->has_inlining_id()) {
4681 new_call->set_inlining_id(call->inlining_id());
4682 }
4683 new_call->set_entry_kind(call->entry_kind());
4684 return new_call;
4685 }
4686
4687 // ICData for static calls carries call count.
4688 const ICData* ic_data() const { return ic_data_; }
4689 bool HasICData() const { return (ic_data() != NULL) && !ic_data()->IsNull(); }
4690
4691 void set_ic_data(const ICData* value) { ic_data_ = value; }
4692
4693 DECLARE_INSTRUCTION(StaticCall)
4694 virtual CompileType ComputeType() const;
4695 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4696
4697 // Accessors forwarded to the AST node.
4698 const Function& function() const { return function_; }
4699
4700 virtual intptr_t CallCount() const {
4701 return ic_data() == NULL ? call_count_ : ic_data()->AggregateCount();
4702 }
4703
4704 virtual bool ComputeCanDeoptimize() const {
4705 return !CompilerState::Current().is_aot();
4706 }
4707
4708 virtual bool CanBecomeDeoptimizationTarget() const {
4709 // Static calls that are specialized by the optimizer (e.g. sqrt) need a
4710 // deoptimization descriptor before the call.
4711 return true;
4712 }
4713
4714 virtual bool HasUnknownSideEffects() const { return true; }
4715 virtual bool CanCallDart() const { return true; }
4716
4717 // Initialize result type of this call instruction if target is a recognized
4718 // method or has pragma annotation.
4719 // Returns true on success, false if result type is still unknown.
4720 bool InitResultType(Zone* zone);
4721
4722 void SetResultType(Zone* zone, CompileType new_type) {
4723 result_type_ = new (zone) CompileType(new_type);
4724 }
4725
4726 CompileType* result_type() const { return result_type_; }
4727
4728 intptr_t result_cid() const {
4729 if (result_type_ == NULL) {
4730 return kDynamicCid;
4731 }
4732 return result_type_->ToCid();
4733 }
4734
4735 bool is_known_list_constructor() const { return is_known_list_constructor_; }
4736 void set_is_known_list_constructor(bool value) {
4737 is_known_list_constructor_ = value;
4738 }
4739
4740 Code::EntryKind entry_kind() const { return entry_kind_; }
4741
4742 void set_entry_kind(Code::EntryKind value) { entry_kind_ = value; }
4743
4744 bool IsRecognizedFactory() const { return is_known_list_constructor(); }
4745
4746 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
4747 if (type_args_len() > 0 || function().IsFactory()) {
4748 if (idx == 0) {
4749 return kGuardInputs;
4750 }
4751 idx--;
4752 }
4753 return function_.is_unboxed_parameter_at(idx) ? kNotSpeculative
4754 : kGuardInputs;
4755 }
4756
4757 virtual intptr_t ArgumentsSize() const;
4758
4759 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
4760
4761 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
4762
4763 virtual Representation representation() const;
4764
4765 virtual AliasIdentity Identity() const { return identity_; }
4766 virtual void SetIdentity(AliasIdentity identity) { identity_ = identity; }
4767
4768 const CallTargets& Targets();
4769 const class BinaryFeedback& BinaryFeedback();
4770
4771 PRINT_OPERANDS_TO_SUPPORT
4772 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
4773 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
4774
4775 private:
4776 const ICData* ic_data_;
4777 const CallTargets* targets_ = nullptr;
4778 const class BinaryFeedback* binary_ = nullptr;
4779 const intptr_t call_count_;
4780 const Function& function_;
4781 const ICData::RebindRule rebind_rule_;
4782 CompileType* result_type_; // Known or inferred result type.
4783
4784 // 'True' for recognized list constructors.
4785 bool is_known_list_constructor_;
4786
4787 Code::EntryKind entry_kind_ = Code::EntryKind::kNormal;
4788
4789 AliasIdentity identity_;
4790
4791 DISALLOW_COPY_AND_ASSIGN(StaticCallInstr);
4792};
4793
4794class LoadLocalInstr : public TemplateDefinition<0, NoThrow> {
4795 public:
4796 LoadLocalInstr(const LocalVariable& local, TokenPosition token_pos)
4797 : local_(local), is_last_(false), token_pos_(token_pos) {}
4798
4799 DECLARE_INSTRUCTION(LoadLocal)
4800 virtual CompileType ComputeType() const;
4801
4802 const LocalVariable& local() const { return local_; }
4803
4804 virtual bool ComputeCanDeoptimize() const { return false; }
4805
4806 virtual bool HasUnknownSideEffects() const {
4807 UNREACHABLE(); // Eliminated by SSA construction.
4808 return false;
4809 }
4810
4811 void mark_last() { is_last_ = true; }
4812 bool is_last() const { return is_last_; }
4813
4814 virtual TokenPosition token_pos() const { return token_pos_; }
4815
4816 PRINT_OPERANDS_TO_SUPPORT
4817 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
4818
4819 private:
4820 const LocalVariable& local_;
4821 bool is_last_;
4822 const TokenPosition token_pos_;
4823
4824 DISALLOW_COPY_AND_ASSIGN(LoadLocalInstr);
4825};
4826
4827class DropTempsInstr : public Definition {
4828 public:
4829 DropTempsInstr(intptr_t num_temps, Value* value)
4830 : num_temps_(num_temps), value_(NULL) {
4831 if (value != NULL) {
4832 SetInputAt(0, value);
4833 }
4834 }
4835
4836 DECLARE_INSTRUCTION(DropTemps)
4837
4838 virtual intptr_t InputCount() const { return value_ != NULL ? 1 : 0; }
4839 virtual Value* InputAt(intptr_t i) const {
4840 ASSERT((value_ != NULL) && (i == 0));
4841 return value_;
4842 }
4843
4844 Value* value() const { return value_; }
4845
4846 intptr_t num_temps() const { return num_temps_; }
4847
4848 virtual CompileType ComputeType() const;
4849
4850 virtual bool ComputeCanDeoptimize() const { return false; }
4851
4852 virtual bool HasUnknownSideEffects() const {
4853 UNREACHABLE(); // Eliminated by SSA construction.
4854 return false;
4855 }
4856
4857 virtual bool MayThrow() const { return false; }
4858
4859 virtual TokenPosition token_pos() const { return TokenPosition::kTempMove; }
4860
4861 PRINT_OPERANDS_TO_SUPPORT
4862
4863 private:
4864 virtual void RawSetInputAt(intptr_t i, Value* value) { value_ = value; }
4865
4866 const intptr_t num_temps_;
4867 Value* value_;
4868
4869 DISALLOW_COPY_AND_ASSIGN(DropTempsInstr);
4870};
4871
4872// This instruction is used to reserve a space on the expression stack
4873// that later would be filled with StoreLocal. Reserved space would be
4874// filled with a null value initially.
4875//
4876// Note: One must not use Constant(#null) to reserve expression stack space
4877// because it would lead to an incorrectly compiled unoptimized code. Graph
4878// builder would set Constant(#null) as an input definition to the instruction
4879// that consumes this value from the expression stack - not knowing that
4880// this value represents a placeholder - which might lead issues if instruction
4881// has specialization for constant inputs (see https://dartbug.com/33195).
4882class MakeTempInstr : public TemplateDefinition<0, NoThrow, Pure> {
4883 public:
4884 explicit MakeTempInstr(Zone* zone)
4885 : null_(new (zone) ConstantInstr(Object::ZoneHandle())) {
4886 // Note: We put ConstantInstr inside MakeTemp to simplify code generation:
4887 // having ConstantInstr allows us to use Location::Contant(null_) as an
4888 // output location for this instruction.
4889 }
4890
4891 DECLARE_INSTRUCTION(MakeTemp)
4892
4893 virtual CompileType ComputeType() const { return CompileType::Dynamic(); }
4894
4895 virtual bool ComputeCanDeoptimize() const { return false; }
4896
4897 virtual bool HasUnknownSideEffects() const {
4898 UNREACHABLE(); // Eliminated by SSA construction.
4899 return false;
4900 }
4901
4902 virtual bool MayThrow() const { return false; }
4903
4904 virtual TokenPosition token_pos() const { return TokenPosition::kTempMove; }
4905
4906 PRINT_OPERANDS_TO_SUPPORT
4907
4908 private:
4909 ConstantInstr* null_;
4910
4911 DISALLOW_COPY_AND_ASSIGN(MakeTempInstr);
4912};
4913
4914class StoreLocalInstr : public TemplateDefinition<1, NoThrow> {
4915 public:
4916 StoreLocalInstr(const LocalVariable& local,
4917 Value* value,
4918 TokenPosition token_pos)
4919 : local_(local), is_dead_(false), is_last_(false), token_pos_(token_pos) {
4920 SetInputAt(0, value);
4921 }
4922
4923 DECLARE_INSTRUCTION(StoreLocal)
4924 virtual CompileType ComputeType() const;
4925
4926 const LocalVariable& local() const { return local_; }
4927 Value* value() const { return inputs_[0]; }
4928
4929 virtual bool ComputeCanDeoptimize() const { return false; }
4930
4931 void mark_dead() { is_dead_ = true; }
4932 bool is_dead() const { return is_dead_; }
4933
4934 void mark_last() { is_last_ = true; }
4935 bool is_last() const { return is_last_; }
4936
4937 virtual bool HasUnknownSideEffects() const {
4938 UNREACHABLE(); // Eliminated by SSA construction.
4939 return false;
4940 }
4941
4942 virtual TokenPosition token_pos() const { return token_pos_; }
4943
4944 PRINT_OPERANDS_TO_SUPPORT
4945 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
4946
4947 private:
4948 const LocalVariable& local_;
4949 bool is_dead_;
4950 bool is_last_;
4951 const TokenPosition token_pos_;
4952
4953 DISALLOW_COPY_AND_ASSIGN(StoreLocalInstr);
4954};
4955
4956class NativeCallInstr : public TemplateDartCall<0> {
4957 public:
4958 NativeCallInstr(const String* name,
4959 const Function* function,
4960 bool link_lazily,
4961 TokenPosition position,
4962 InputsArray* args)
4963 : TemplateDartCall(DeoptId::kNone,
4964 0,
4965 Array::null_array(),
4966 args,
4967 position),
4968 native_name_(name),
4969 function_(function),
4970 native_c_function_(NULL),
4971 is_bootstrap_native_(false),
4972 is_auto_scope_(true),
4973 link_lazily_(link_lazily),
4974 token_pos_(position) {
4975 ASSERT(name->IsZoneHandle());
4976 ASSERT(function->IsZoneHandle());
4977 }
4978
4979 DECLARE_INSTRUCTION(NativeCall)
4980
4981 const String& native_name() const { return *native_name_; }
4982 const Function& function() const { return *function_; }
4983 NativeFunction native_c_function() const { return native_c_function_; }
4984 bool is_bootstrap_native() const { return is_bootstrap_native_; }
4985 bool is_auto_scope() const { return is_auto_scope_; }
4986 bool link_lazily() const { return link_lazily_; }
4987 virtual TokenPosition token_pos() const { return token_pos_; }
4988
4989 virtual bool ComputeCanDeoptimize() const { return false; }
4990
4991 virtual bool HasUnknownSideEffects() const { return true; }
4992
4993 // Always creates an exit frame before more Dart code can be called.
4994 virtual bool CanCallDart() const { return false; }
4995
4996 void SetupNative();
4997
4998 PRINT_OPERANDS_TO_SUPPORT
4999 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
5000 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
5001
5002 private:
5003 void set_native_c_function(NativeFunction value) {
5004 native_c_function_ = value;
5005 }
5006
5007 void set_is_bootstrap_native(bool value) { is_bootstrap_native_ = value; }
5008 void set_is_auto_scope(bool value) { is_auto_scope_ = value; }
5009
5010 const String* native_name_;
5011 const Function* function_;
5012 NativeFunction native_c_function_;
5013 bool is_bootstrap_native_;
5014 bool is_auto_scope_;
5015 bool link_lazily_;
5016 const TokenPosition token_pos_;
5017
5018 DISALLOW_COPY_AND_ASSIGN(NativeCallInstr);
5019};
5020
5021// Performs a call to native C code. In contrast to NativeCall, the arguments
5022// are unboxed and passed through the native calling convention. However, not
5023// all dart objects can be passed as arguments. Please see the FFI documentation
5024// for more details.
5025// TODO(35775): Add link to the documentation when it's written.
5026class FfiCallInstr : public Definition {
5027 public:
5028 FfiCallInstr(Zone* zone,
5029 intptr_t deopt_id,
5030 const compiler::ffi::CallMarshaller& marshaller)
5031 : Definition(deopt_id),
5032 zone_(zone),
5033 marshaller_(marshaller),
5034 inputs_(marshaller.num_args() + 1) {
5035 inputs_.FillWith(nullptr, 0, marshaller.num_args() + 1);
5036 }
5037
5038 DECLARE_INSTRUCTION(FfiCall)
5039
5040 // Number of arguments to the native function.
5041 intptr_t NativeArgCount() const { return InputCount() - 1; }
5042
5043 // Input index of the function pointer to invoke.
5044 intptr_t TargetAddressIndex() const { return NativeArgCount(); }
5045
5046 virtual intptr_t InputCount() const { return inputs_.length(); }
5047 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
5048 virtual bool MayThrow() const {
5049 // By Dart_PropagateError.
5050 return true;
5051 }
5052
5053 // FfiCallInstr calls C code, which can call back into Dart.
5054 virtual bool ComputeCanDeoptimize() const {
5055 return !CompilerState::Current().is_aot();
5056 }
5057
5058 virtual bool HasUnknownSideEffects() const { return true; }
5059
5060 // Always creates an exit frame before more Dart code can be called.
5061 virtual bool CanCallDart() const { return false; }
5062
5063 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
5064 virtual Representation representation() const;
5065
5066 // Returns true if we can assume generated code will be executable during a
5067 // safepoint.
5068 //
5069 // TODO(#37739): This should be true when dual-mapping is enabled as well, but
5070 // there are some bugs where it still switches code protections currently.
5071 static bool CanExecuteGeneratedCodeInSafepoint() {
5072 return FLAG_precompiled_mode;
5073 }
5074
5075 PRINT_OPERANDS_TO_SUPPORT
5076
5077 private:
5078 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
5079
5080 void EmitParamMoves(FlowGraphCompiler* compiler);
5081 void EmitReturnMoves(FlowGraphCompiler* compiler);
5082
5083 Zone* const zone_;
5084 const compiler::ffi::CallMarshaller& marshaller_;
5085
5086 GrowableArray<Value*> inputs_;
5087
5088 DISALLOW_COPY_AND_ASSIGN(FfiCallInstr);
5089};
5090
5091class EnterHandleScopeInstr : public TemplateDefinition<0, NoThrow> {
5092 public:
5093 enum class Kind { kEnterHandleScope = 0, kGetTopHandleScope = 1 };
5094
5095 explicit EnterHandleScopeInstr(Kind kind) : kind_(kind) {}
5096
5097 DECLARE_INSTRUCTION(EnterHandleScope)
5098
5099 virtual Representation representation() const { return kUnboxedIntPtr; }
5100 virtual bool ComputeCanDeoptimize() const { return false; }
5101 virtual bool HasUnknownSideEffects() const { return false; }
5102
5103 PRINT_OPERANDS_TO_SUPPORT
5104
5105 private:
5106 Kind kind_;
5107
5108 DISALLOW_COPY_AND_ASSIGN(EnterHandleScopeInstr);
5109};
5110
5111class ExitHandleScopeInstr : public TemplateInstruction<0, NoThrow> {
5112 public:
5113 ExitHandleScopeInstr() {}
5114
5115 DECLARE_INSTRUCTION(ExitHandleScope)
5116
5117 virtual bool ComputeCanDeoptimize() const { return false; }
5118 virtual bool HasUnknownSideEffects() const { return false; }
5119
5120 private:
5121 DISALLOW_COPY_AND_ASSIGN(ExitHandleScopeInstr);
5122};
5123
5124class AllocateHandleInstr : public TemplateDefinition<1, NoThrow> {
5125 public:
5126 explicit AllocateHandleInstr(Value* scope) { SetInputAt(kScope, scope); }
5127
5128 enum { kScope = 0 };
5129
5130 DECLARE_INSTRUCTION(AllocateHandle)
5131
5132 virtual intptr_t InputCount() const { return 1; }
5133 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
5134 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
5135 virtual Representation representation() const { return kUnboxedIntPtr; }
5136 virtual bool ComputeCanDeoptimize() const { return false; }
5137 virtual bool HasUnknownSideEffects() const { return false; }
5138
5139 private:
5140 DISALLOW_COPY_AND_ASSIGN(AllocateHandleInstr);
5141};
5142
5143class RawStoreFieldInstr : public TemplateInstruction<2, NoThrow> {
5144 public:
5145 RawStoreFieldInstr(Value* base, Value* value, int32_t offset)
5146 : offset_(offset) {
5147 SetInputAt(kBase, base);
5148 SetInputAt(kValue, value);
5149 }
5150
5151 enum { kBase = 0, kValue = 1 };
5152
5153 DECLARE_INSTRUCTION(RawStoreField)
5154
5155 virtual intptr_t InputCount() const { return 2; }
5156 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
5157 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
5158 virtual bool ComputeCanDeoptimize() const { return false; }
5159 virtual bool HasUnknownSideEffects() const { return false; }
5160
5161 private:
5162 const int32_t offset_;
5163
5164 DISALLOW_COPY_AND_ASSIGN(RawStoreFieldInstr);
5165};
5166
5167class DebugStepCheckInstr : public TemplateInstruction<0, NoThrow> {
5168 public:
5169 DebugStepCheckInstr(TokenPosition token_pos,
5170 PcDescriptorsLayout::Kind stub_kind,
5171 intptr_t deopt_id)
5172 : TemplateInstruction<0, NoThrow>(deopt_id),
5173 token_pos_(token_pos),
5174 stub_kind_(stub_kind) {}
5175
5176 DECLARE_INSTRUCTION(DebugStepCheck)
5177
5178 virtual TokenPosition token_pos() const { return token_pos_; }
5179 virtual bool ComputeCanDeoptimize() const { return false; }
5180 virtual bool HasUnknownSideEffects() const { return true; }
5181 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
5182
5183 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
5184
5185 private:
5186 const TokenPosition token_pos_;
5187 const PcDescriptorsLayout::Kind stub_kind_;
5188
5189 DISALLOW_COPY_AND_ASSIGN(DebugStepCheckInstr);
5190};
5191
5192enum StoreBarrierType { kNoStoreBarrier, kEmitStoreBarrier };
5193
5194// StoreInstanceField instruction represents a store of the given [value] into
5195// the specified [slot] on the [instance] object. [emit_store_barrier] allows to
5196// specify whether the store should omit the write barrier. [kind] specifies
5197// whether this store is an initializing store, i.e. the first store into a
5198// field after the allocation.
5199//
5200// In JIT mode a slot might be a subject to the field unboxing optimization:
5201// if field type profiling shows that this slot always contains a double or SIMD
5202// value then this field becomes "unboxed" - in this case when storing into
5203// such field we update the payload of the box referenced by the field, rather
5204// than updating the field itself.
5205//
5206// Note: even if [emit_store_barrier] is set to [kEmitStoreBarrier] the store
5207// can still omit the barrier if it establishes that it is not needed.
5208//
5209// Note: stores generated from the constructor initializer list and from
5210// field initializers *must* be marked as initializing. Initializing stores
5211// into unboxed fields are responsible for allocating the mutable box which
5212// would be mutated by subsequent stores.
5213class StoreInstanceFieldInstr : public TemplateInstruction<2, NoThrow> {
5214 public:
5215 enum class Kind {
5216 // Store is known to be the first store into a slot of an object after
5217 // object was allocated and before it escapes (e.g. stores in constructor
5218 // initializer list).
5219 kInitializing,
5220
5221 // All other stores.
5222 kOther,
5223 };
5224
5225 StoreInstanceFieldInstr(const Slot& slot,
5226 Value* instance,
5227 Value* value,
5228 StoreBarrierType emit_store_barrier,
5229 TokenPosition token_pos,
5230 Kind kind = Kind::kOther)
5231 : slot_(slot),
5232 emit_store_barrier_(emit_store_barrier),
5233 token_pos_(token_pos),
5234 is_initialization_(kind == Kind::kInitializing) {
5235 SetInputAt(kInstancePos, instance);
5236 SetInputAt(kValuePos, value);
5237 }
5238
5239 // Convenience constructor that looks up an IL Slot for the given [field].
5240 StoreInstanceFieldInstr(const Field& field,
5241 Value* instance,
5242 Value* value,
5243 StoreBarrierType emit_store_barrier,
5244 TokenPosition token_pos,
5245 const ParsedFunction* parsed_function,
5246 Kind kind = Kind::kOther)
5247 : StoreInstanceFieldInstr(Slot::Get(field, parsed_function),
5248 instance,
5249 value,
5250 emit_store_barrier,
5251 token_pos,
5252 kind) {}
5253
5254 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
5255 // In AOT unbox is done based on TFA, therefore it was proven to be correct
5256 // and it can never deoptmize.
5257 return (IsUnboxedStore() && CompilerState::Current().is_aot())
5258 ? kNotSpeculative
5259 : kGuardInputs;
5260 }
5261
5262 DECLARE_INSTRUCTION(StoreInstanceField)
5263
5264 enum { kInstancePos = 0, kValuePos = 1 };
5265
5266 Value* instance() const { return inputs_[kInstancePos]; }
5267 const Slot& slot() const { return slot_; }
5268 Value* value() const { return inputs_[kValuePos]; }
5269
5270 virtual TokenPosition token_pos() const { return token_pos_; }
5271 bool is_initialization() const { return is_initialization_; }
5272
5273 bool ShouldEmitStoreBarrier() const {
5274 if (instance()->definition() == value()->definition()) {
5275 // `x.slot = x` cannot create an old->new or old&marked->old&unmarked
5276 // reference.
5277 return false;
5278 }
5279
5280 if (value()->definition()->Type()->IsBool()) {
5281 return false;
5282 }
5283 return value()->NeedsWriteBarrier() &&
5284 (emit_store_barrier_ == kEmitStoreBarrier);
5285 }
5286
5287 void set_emit_store_barrier(StoreBarrierType value) {
5288 emit_store_barrier_ = value;
5289 }
5290
5291 virtual bool CanTriggerGC() const {
5292 return IsUnboxedStore() || IsPotentialUnboxedStore();
5293 }
5294
5295 virtual bool ComputeCanDeoptimize() const { return false; }
5296
5297 // May require a deoptimization target for input conversions.
5298 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
5299
5300 // Currently CSE/LICM don't operate on any instructions that can be affected
5301 // by stores/loads. LoadOptimizer handles loads separately. Hence stores
5302 // are marked as having no side-effects.
5303 virtual bool HasUnknownSideEffects() const { return false; }
5304
5305 bool IsUnboxedStore() const;
5306 bool IsPotentialUnboxedStore() const;
5307
5308 virtual Representation RequiredInputRepresentation(intptr_t index) const;
5309
5310 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
5311
5312 PRINT_OPERANDS_TO_SUPPORT
5313 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
5314 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
5315
5316 private:
5317 friend class JitCallSpecializer; // For ASSERT(initialization_).
5318
5319 intptr_t OffsetInBytes() const { return slot().offset_in_bytes(); }
5320
5321 compiler::Assembler::CanBeSmi CanValueBeSmi() const {
5322 // Write barrier is skipped for nullable and non-nullable smis.
5323 ASSERT(value()->Type()->ToNullableCid() != kSmiCid);
5324 return value()->Type()->CanBeSmi() ? compiler::Assembler::kValueCanBeSmi
5325 : compiler::Assembler::kValueIsNotSmi;
5326 }
5327
5328 const Slot& slot_;
5329 StoreBarrierType emit_store_barrier_;
5330 const TokenPosition token_pos_;
5331 // Marks initializing stores. E.g. in the constructor.
5332 const bool is_initialization_;
5333
5334 DISALLOW_COPY_AND_ASSIGN(StoreInstanceFieldInstr);
5335};
5336
5337class GuardFieldInstr : public TemplateInstruction<1, NoThrow, Pure> {
5338 public:
5339 GuardFieldInstr(Value* value, const Field& field, intptr_t deopt_id)
5340 : TemplateInstruction(deopt_id), field_(field) {
5341 SetInputAt(0, value);
5342 CheckField(field);
5343 }
5344
5345 Value* value() const { return inputs_[0]; }
5346
5347 const Field& field() const { return field_; }
5348
5349 virtual bool ComputeCanDeoptimize() const { return true; }
5350 virtual bool CanBecomeDeoptimizationTarget() const {
5351 // Ensure that we record kDeopt PC descriptor in unoptimized code.
5352 return true;
5353 }
5354
5355 PRINT_OPERANDS_TO_SUPPORT
5356
5357 private:
5358 const Field& field_;
5359
5360 DISALLOW_COPY_AND_ASSIGN(GuardFieldInstr);
5361};
5362
5363class GuardFieldClassInstr : public GuardFieldInstr {
5364 public:
5365 GuardFieldClassInstr(Value* value, const Field& field, intptr_t deopt_id)
5366 : GuardFieldInstr(value, field, deopt_id) {
5367 CheckField(field);
5368 }
5369
5370 DECLARE_INSTRUCTION(GuardFieldClass)
5371
5372 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
5373
5374 virtual bool AttributesEqual(Instruction* other) const;
5375
5376 private:
5377 DISALLOW_COPY_AND_ASSIGN(GuardFieldClassInstr);
5378};
5379
5380class GuardFieldLengthInstr : public GuardFieldInstr {
5381 public:
5382 GuardFieldLengthInstr(Value* value, const Field& field, intptr_t deopt_id)
5383 : GuardFieldInstr(value, field, deopt_id) {
5384 CheckField(field);
5385 }
5386
5387 DECLARE_INSTRUCTION(GuardFieldLength)
5388
5389 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
5390
5391 virtual bool AttributesEqual(Instruction* other) const;
5392
5393 private:
5394 DISALLOW_COPY_AND_ASSIGN(GuardFieldLengthInstr);
5395};
5396
5397// For a field of static type G<T0, ..., Tn> and a stored value of runtime
5398// type T checks that type arguments of T at G exactly match <T0, ..., Tn>
5399// and updates guarded state (FieldLayout::static_type_exactness_state_)
5400// accordingly.
5401//
5402// See StaticTypeExactnessState for more information.
5403class GuardFieldTypeInstr : public GuardFieldInstr {
5404 public:
5405 GuardFieldTypeInstr(Value* value, const Field& field, intptr_t deopt_id)
5406 : GuardFieldInstr(value, field, deopt_id) {
5407 CheckField(field);
5408 }
5409
5410 DECLARE_INSTRUCTION(GuardFieldType)
5411
5412 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
5413
5414 virtual bool AttributesEqual(Instruction* other) const;
5415
5416 private:
5417 DISALLOW_COPY_AND_ASSIGN(GuardFieldTypeInstr);
5418};
5419
5420class LoadStaticFieldInstr : public TemplateDefinition<0, Throws> {
5421 public:
5422 LoadStaticFieldInstr(const Field& field,
5423 TokenPosition token_pos,
5424 bool calls_initializer = false,
5425 intptr_t deopt_id = DeoptId::kNone)
5426 : TemplateDefinition(deopt_id),
5427 field_(field),
5428 token_pos_(token_pos),
5429 calls_initializer_(calls_initializer) {
5430 ASSERT(!calls_initializer || (deopt_id != DeoptId::kNone));
5431 }
5432
5433 DECLARE_INSTRUCTION(LoadStaticField)
5434
5435 virtual CompileType ComputeType() const;
5436
5437 const Field& field() const { return field_; }
5438 bool IsFieldInitialized() const;
5439
5440 bool calls_initializer() const { return calls_initializer_; }
5441 void set_calls_initializer(bool value) { calls_initializer_ = value; }
5442
5443 virtual bool AllowsCSE() const {
5444 return field().is_final() && !FLAG_fields_may_be_reset;
5445 }
5446
5447 virtual bool ComputeCanDeoptimize() const { return calls_initializer(); }
5448 virtual bool HasUnknownSideEffects() const { return calls_initializer(); }
5449 virtual bool CanTriggerGC() const { return calls_initializer(); }
5450 virtual bool MayThrow() const { return calls_initializer(); }
5451
5452 virtual Definition* Canonicalize(FlowGraph* flow_graph);
5453
5454 virtual bool AttributesEqual(Instruction* other) const;
5455
5456 virtual TokenPosition token_pos() const { return token_pos_; }
5457
5458 PRINT_OPERANDS_TO_SUPPORT
5459
5460 private:
5461 const Field& field_;
5462 const TokenPosition token_pos_;
5463 bool calls_initializer_;
5464
5465 DISALLOW_COPY_AND_ASSIGN(LoadStaticFieldInstr);
5466};
5467
5468class StoreStaticFieldInstr : public TemplateDefinition<1, NoThrow> {
5469 public:
5470 StoreStaticFieldInstr(const Field& field,
5471 Value* value,
5472 TokenPosition token_pos)
5473 : field_(field), token_pos_(token_pos) {
5474 ASSERT(field.IsZoneHandle());
5475 SetInputAt(kValuePos, value);
5476 CheckField(field);
5477 }
5478
5479 enum { kValuePos = 0 };
5480
5481 DECLARE_INSTRUCTION(StoreStaticField)
5482
5483 const Field& field() const { return field_; }
5484 Value* value() const { return inputs_[kValuePos]; }
5485
5486 virtual bool ComputeCanDeoptimize() const { return false; }
5487
5488 // Currently CSE/LICM don't operate on any instructions that can be affected
5489 // by stores/loads. LoadOptimizer handles loads separately. Hence stores
5490 // are marked as having no side-effects.
5491 virtual bool HasUnknownSideEffects() const { return false; }
5492
5493 virtual TokenPosition token_pos() const { return token_pos_; }
5494
5495 PRINT_OPERANDS_TO_SUPPORT
5496
5497 private:
5498 compiler::Assembler::CanBeSmi CanValueBeSmi() const {
5499 ASSERT(value()->Type()->ToNullableCid() != kSmiCid);
5500 return value()->Type()->CanBeSmi() ? compiler::Assembler::kValueCanBeSmi
5501 : compiler::Assembler::kValueIsNotSmi;
5502 }
5503
5504 const Field& field_;
5505 const TokenPosition token_pos_;
5506
5507 DISALLOW_COPY_AND_ASSIGN(StoreStaticFieldInstr);
5508};
5509
5510enum AlignmentType {
5511 kUnalignedAccess,
5512 kAlignedAccess,
5513};
5514
5515class LoadIndexedInstr : public TemplateDefinition<2, NoThrow> {
5516 public:
5517 LoadIndexedInstr(Value* array,
5518 Value* index,
5519 bool index_unboxed,
5520 intptr_t index_scale,
5521 intptr_t class_id,
5522 AlignmentType alignment,
5523 intptr_t deopt_id,
5524 TokenPosition token_pos,
5525 CompileType* result_type = nullptr);
5526
5527 TokenPosition token_pos() const { return token_pos_; }
5528
5529 DECLARE_INSTRUCTION(LoadIndexed)
5530 virtual CompileType ComputeType() const;
5531
5532 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
5533 ASSERT(idx == 0 || idx == 1);
5534 // The array may be tagged or untagged (for external arrays).
5535 if (idx == 0) return kNoRepresentation;
5536
5537 if (index_unboxed_) {
5538#if defined(TARGET_ARCH_IS_64_BIT)
5539 return kUnboxedInt64;
5540#else
5541 return kUnboxedUint32;
5542#endif
5543 } else {
5544 return kTagged; // Index is a smi.
5545 }
5546 }
5547
5548 bool IsExternal() const {
5549 return array()->definition()->representation() == kUntagged;
5550 }
5551
5552 Value* array() const { return inputs_[0]; }
5553 Value* index() const { return inputs_[1]; }
5554 intptr_t index_scale() const { return index_scale_; }
5555 intptr_t class_id() const { return class_id_; }
5556 bool aligned() const { return alignment_ == kAlignedAccess; }
5557
5558 virtual bool ComputeCanDeoptimize() const {
5559 return GetDeoptId() != DeoptId::kNone;
5560 }
5561
5562 virtual Representation representation() const;
5563 virtual void InferRange(RangeAnalysis* analysis, Range* range);
5564
5565 virtual bool HasUnknownSideEffects() const { return false; }
5566
5567 virtual Definition* Canonicalize(FlowGraph* flow_graph);
5568
5569 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
5570
5571 private:
5572 const bool index_unboxed_;
5573 const intptr_t index_scale_;
5574 const intptr_t class_id_;
5575 const AlignmentType alignment_;
5576 const TokenPosition token_pos_;
5577 CompileType* result_type_; // derived from call
5578
5579 DISALLOW_COPY_AND_ASSIGN(LoadIndexedInstr);
5580};
5581
5582// Loads the specified number of code units from the given string, packing
5583// multiple code units into a single datatype. In essence, this is a specialized
5584// version of LoadIndexedInstr which accepts only string targets and can load
5585// multiple elements at once. The result datatype differs depending on the
5586// string type, element count, and architecture; if possible, the result is
5587// packed into a Smi, falling back to a Mint otherwise.
5588// TODO(zerny): Add support for loading into UnboxedInt32x4.
5589class LoadCodeUnitsInstr : public TemplateDefinition<2, NoThrow> {
5590 public:
5591 LoadCodeUnitsInstr(Value* str,
5592 Value* index,
5593 intptr_t element_count,
5594 intptr_t class_id,
5595 TokenPosition token_pos)
5596 : class_id_(class_id),
5597 token_pos_(token_pos),
5598 element_count_(element_count),
5599 representation_(kTagged) {
5600 ASSERT(element_count == 1 || element_count == 2 || element_count == 4);
5601 ASSERT(IsStringClassId(class_id));
5602 SetInputAt(0, str);
5603 SetInputAt(1, index);
5604 }
5605
5606 TokenPosition token_pos() const { return token_pos_; }
5607
5608 DECLARE_INSTRUCTION(LoadCodeUnits)
5609 virtual CompileType ComputeType() const;
5610
5611 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
5612 if (idx == 0) {
5613 // The string may be tagged or untagged (for external strings).
5614 return kNoRepresentation;
5615 }
5616 ASSERT(idx == 1);
5617 return kTagged;
5618 }
5619
5620 bool IsExternal() const {
5621 return array()->definition()->representation() == kUntagged;
5622 }
5623
5624 Value* array() const { return inputs_[0]; }
5625 Value* index() const { return inputs_[1]; }
5626
5627 intptr_t index_scale() const {
5628 return compiler::target::Instance::ElementSizeFor(class_id_);
5629 }
5630
5631 intptr_t class_id() const { return class_id_; }
5632 intptr_t element_count() const { return element_count_; }
5633
5634 bool can_pack_into_smi() const {
5635 return element_count() <= kSmiBits / (index_scale() * kBitsPerByte);
5636 }
5637
5638 virtual bool ComputeCanDeoptimize() const { return false; }
5639
5640 virtual Representation representation() const { return representation_; }
5641 void set_representation(Representation repr) { representation_ = repr; }
5642 virtual void InferRange(RangeAnalysis* analysis, Range* range);
5643
5644 virtual bool HasUnknownSideEffects() const { return false; }
5645
5646 private:
5647 const intptr_t class_id_;
5648 const TokenPosition token_pos_;
5649 const intptr_t element_count_;
5650 Representation representation_;
5651
5652 DISALLOW_COPY_AND_ASSIGN(LoadCodeUnitsInstr);
5653};
5654
5655class OneByteStringFromCharCodeInstr
5656 : public TemplateDefinition<1, NoThrow, Pure> {
5657 public:
5658 explicit OneByteStringFromCharCodeInstr(Value* char_code) {
5659 SetInputAt(0, char_code);
5660 }
5661
5662 DECLARE_INSTRUCTION(OneByteStringFromCharCode)
5663 virtual CompileType ComputeType() const;
5664
5665 Value* char_code() const { return inputs_[0]; }
5666
5667 virtual bool ComputeCanDeoptimize() const { return false; }
5668
5669 virtual bool AttributesEqual(Instruction* other) const { return true; }
5670
5671 private:
5672 DISALLOW_COPY_AND_ASSIGN(OneByteStringFromCharCodeInstr);
5673};
5674
5675class StringToCharCodeInstr : public TemplateDefinition<1, NoThrow, Pure> {
5676 public:
5677 StringToCharCodeInstr(Value* str, intptr_t cid) : cid_(cid) {
5678 ASSERT(str != NULL);
5679 SetInputAt(0, str);
5680 }
5681
5682 DECLARE_INSTRUCTION(StringToCharCode)
5683 virtual CompileType ComputeType() const;
5684
5685 Value* str() const { return inputs_[0]; }
5686
5687 virtual bool ComputeCanDeoptimize() const { return false; }
5688
5689 virtual bool AttributesEqual(Instruction* other) const {
5690 return other->AsStringToCharCode()->cid_ == cid_;
5691 }
5692
5693 private:
5694 const intptr_t cid_;
5695
5696 DISALLOW_COPY_AND_ASSIGN(StringToCharCodeInstr);
5697};
5698
5699class StringInterpolateInstr : public TemplateDefinition<1, Throws> {
5700 public:
5701 StringInterpolateInstr(Value* value,
5702 TokenPosition token_pos,
5703 intptr_t deopt_id)
5704 : TemplateDefinition(deopt_id),
5705 token_pos_(token_pos),
5706 function_(Function::ZoneHandle()) {
5707 SetInputAt(0, value);
5708 }
5709
5710 Value* value() const { return inputs_[0]; }
5711 virtual TokenPosition token_pos() const { return token_pos_; }
5712
5713 virtual CompileType ComputeType() const;
5714 // Issues a static call to Dart code which calls toString on objects.
5715 virtual bool HasUnknownSideEffects() const { return true; }
5716 virtual bool CanCallDart() const { return true; }
5717 virtual bool ComputeCanDeoptimize() const {
5718 return !CompilerState::Current().is_aot();
5719 }
5720
5721 const Function& CallFunction() const;
5722
5723 virtual Definition* Canonicalize(FlowGraph* flow_graph);
5724
5725 DECLARE_INSTRUCTION(StringInterpolate)
5726
5727 private:
5728 const TokenPosition token_pos_;
5729 Function& function_;
5730
5731 DISALLOW_COPY_AND_ASSIGN(StringInterpolateInstr);
5732};
5733
5734// Scanning instruction to compute the result size and decoding parameters
5735// for the UTF-8 decoder. Equivalent to:
5736//
5737// int _scan(Uint8List bytes, int start, int end, _OneByteString table,
5738// _Utf8Decoder decoder) {
5739// int size = 0;
5740// int flags = 0;
5741// for (int i = start; i < end; i++) {
5742// int t = table.codeUnitAt(bytes[i]);
5743// size += t & sizeMask;
5744// flags |= t;
5745// }
5746// decoder._scanFlags |= flags & flagsMask;
5747// return size;
5748// }
5749//
5750// under these assumptions:
5751// - The start and end inputs are within the bounds of bytes and in smi range.
5752// - The decoder._scanFlags field is unboxed or contains a smi.
5753// - The first 128 entries of the table have the value 1.
5754class Utf8ScanInstr : public TemplateDefinition<5, NoThrow> {
5755 public:
5756 Utf8ScanInstr(Value* decoder,
5757 Value* bytes,
5758 Value* start,
5759 Value* end,
5760 Value* table,
5761 const Slot& decoder_scan_flags_field)
5762 : scan_flags_field_(decoder_scan_flags_field) {
5763 SetInputAt(0, decoder);
5764 SetInputAt(1, bytes);
5765 SetInputAt(2, start);
5766 SetInputAt(3, end);
5767 SetInputAt(4, table);
5768 }
5769
5770 DECLARE_INSTRUCTION(Utf8Scan)
5771
5772 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
5773 ASSERT(idx >= 0 || idx <= 4);
5774 // The start and end inputs are unboxed, but in smi range.
5775 if (idx == 2 || idx == 3) return kUnboxedIntPtr;
5776 return kTagged;
5777 }
5778
5779 virtual Representation representation() const { return kUnboxedIntPtr; }
5780
5781 virtual CompileType ComputeType() const { return CompileType::Int(); }
5782 virtual bool HasUnknownSideEffects() const { return true; }
5783 virtual bool ComputeCanDeoptimize() const { return false; }
5784 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
5785
5786 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
5787 return kNotSpeculative;
5788 }
5789
5790 virtual bool AttributesEqual(Instruction* other) const {
5791 return scan_flags_field_.Equals(&other->AsUtf8Scan()->scan_flags_field_);
5792 }
5793
5794 bool IsScanFlagsUnboxed() const;
5795
5796 PRINT_TO_SUPPORT
5797
5798 private:
5799 const Slot& scan_flags_field_;
5800
5801 DISALLOW_COPY_AND_ASSIGN(Utf8ScanInstr);
5802};
5803
5804class StoreIndexedInstr : public TemplateInstruction<3, NoThrow> {
5805 public:
5806 StoreIndexedInstr(Value* array,
5807 Value* index,
5808 Value* value,
5809 StoreBarrierType emit_store_barrier,
5810 bool index_unboxed,
5811 intptr_t index_scale,
5812 intptr_t class_id,
5813 AlignmentType alignment,
5814 intptr_t deopt_id,
5815 TokenPosition token_pos,
5816 SpeculativeMode speculative_mode = kGuardInputs);
5817 DECLARE_INSTRUCTION(StoreIndexed)
5818
5819 enum { kArrayPos = 0, kIndexPos = 1, kValuePos = 2 };
5820
5821 Value* array() const { return inputs_[kArrayPos]; }
5822 Value* index() const { return inputs_[kIndexPos]; }
5823 Value* value() const { return inputs_[kValuePos]; }
5824
5825 intptr_t index_scale() const { return index_scale_; }
5826 intptr_t class_id() const { return class_id_; }
5827 bool aligned() const { return alignment_ == kAlignedAccess; }
5828
5829 bool ShouldEmitStoreBarrier() const {
5830 if (array()->definition() == value()->definition()) {
5831 // `x[slot] = x` cannot create an old->new or old&marked->old&unmarked
5832 // reference.
5833 return false;
5834 }
5835
5836 if (value()->definition()->Type()->IsBool()) {
5837 return false;
5838 }
5839 return value()->NeedsWriteBarrier() &&
5840 (emit_store_barrier_ == kEmitStoreBarrier);
5841 }
5842
5843 void set_emit_store_barrier(StoreBarrierType value) {
5844 emit_store_barrier_ = value;
5845 }
5846
5847 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
5848 return speculative_mode_;
5849 }
5850
5851 virtual bool ComputeCanDeoptimize() const { return false; }
5852
5853 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
5854
5855 bool IsExternal() const {
5856 return array()->definition()->representation() == kUntagged;
5857 }
5858
5859 virtual intptr_t DeoptimizationTarget() const {
5860 // Direct access since this instruction cannot deoptimize, and the deopt-id
5861 // was inherited from another instruction that could deoptimize.
5862 return GetDeoptId();
5863 }
5864
5865 virtual bool HasUnknownSideEffects() const { return false; }
5866
5867 void PrintOperandsTo(BaseTextBuffer* f) const;
5868
5869 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
5870
5871 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
5872
5873 private:
5874 compiler::Assembler::CanBeSmi CanValueBeSmi() const {
5875 return compiler::Assembler::kValueCanBeSmi;
5876 }
5877
5878 StoreBarrierType emit_store_barrier_;
5879 const bool index_unboxed_;
5880 const intptr_t index_scale_;
5881 const intptr_t class_id_;
5882 const AlignmentType alignment_;
5883 const TokenPosition token_pos_;
5884 const SpeculativeMode speculative_mode_;
5885
5886 DISALLOW_COPY_AND_ASSIGN(StoreIndexedInstr);
5887};
5888
5889// Note overrideable, built-in: value ? false : true.
5890class BooleanNegateInstr : public TemplateDefinition<1, NoThrow> {
5891 public:
5892 explicit BooleanNegateInstr(Value* value) { SetInputAt(0, value); }
5893
5894 DECLARE_INSTRUCTION(BooleanNegate)
5895 virtual CompileType ComputeType() const;
5896
5897 Value* value() const { return inputs_[0]; }
5898
5899 virtual bool ComputeCanDeoptimize() const { return false; }
5900
5901 virtual bool HasUnknownSideEffects() const { return false; }
5902
5903 virtual Definition* Canonicalize(FlowGraph* flow_graph);
5904
5905 private:
5906 DISALLOW_COPY_AND_ASSIGN(BooleanNegateInstr);
5907};
5908
5909class InstanceOfInstr : public TemplateDefinition<3, Throws> {
5910 public:
5911 InstanceOfInstr(TokenPosition token_pos,
5912 Value* value,
5913 Value* instantiator_type_arguments,
5914 Value* function_type_arguments,
5915 const AbstractType& type,
5916 intptr_t deopt_id)
5917 : TemplateDefinition(deopt_id), token_pos_(token_pos), type_(type) {
5918 ASSERT(!type.IsNull());
5919 SetInputAt(0, value);
5920 SetInputAt(1, instantiator_type_arguments);
5921 SetInputAt(2, function_type_arguments);
5922 }
5923
5924 DECLARE_INSTRUCTION(InstanceOf)
5925 virtual CompileType ComputeType() const;
5926
5927 Value* value() const { return inputs_[0]; }
5928 Value* instantiator_type_arguments() const { return inputs_[1]; }
5929 Value* function_type_arguments() const { return inputs_[2]; }
5930
5931 const AbstractType& type() const { return type_; }
5932 virtual TokenPosition token_pos() const { return token_pos_; }
5933
5934 virtual bool ComputeCanDeoptimize() const {
5935 return !CompilerState::Current().is_aot();
5936 }
5937
5938 virtual bool HasUnknownSideEffects() const { return false; }
5939
5940 PRINT_OPERANDS_TO_SUPPORT
5941
5942 private:
5943 const TokenPosition token_pos_;
5944 Value* value_;
5945 Value* type_arguments_;
5946 const AbstractType& type_;
5947
5948 DISALLOW_COPY_AND_ASSIGN(InstanceOfInstr);
5949};
5950
5951// Subclasses of 'AllocationInstr' must maintain the invariant that if
5952// 'WillAllocateNewOrRemembered' is true, then the result of the allocation must
5953// either reside in new space or be in the store buffer.
5954class AllocationInstr : public Definition {
5955 public:
5956 explicit AllocationInstr(intptr_t deopt_id = DeoptId::kNone)
5957 : Definition(deopt_id) {}
5958
5959 // TODO(sjindel): Update these conditions when the incremental write barrier
5960 // is added.
5961 virtual bool WillAllocateNewOrRemembered() const = 0;
5962
5963 DEFINE_INSTRUCTION_TYPE_CHECK(Allocation);
5964
5965 private:
5966 DISALLOW_COPY_AND_ASSIGN(AllocationInstr);
5967};
5968
5969template <intptr_t N, typename ThrowsTrait>
5970class TemplateAllocation : public AllocationInstr {
5971 public:
5972 explicit TemplateAllocation(intptr_t deopt_id = DeoptId::kNone)
5973 : AllocationInstr(deopt_id), inputs_() {}
5974
5975 virtual intptr_t InputCount() const { return N; }
5976 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
5977
5978 virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
5979
5980 protected:
5981 EmbeddedArray<Value*, N> inputs_;
5982
5983 private:
5984 friend class BranchInstr;
5985 friend class IfThenElseInstr;
5986
5987 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
5988};
5989
5990class AllocateObjectInstr : public AllocationInstr {
5991 public:
5992 AllocateObjectInstr(TokenPosition token_pos,
5993 const Class& cls,
5994 Value* type_arguments = nullptr)
5995 : token_pos_(token_pos),
5996 cls_(cls),
5997 type_arguments_(type_arguments),
5998 identity_(AliasIdentity::Unknown()),
5999 closure_function_(Function::ZoneHandle()) {
6000 ASSERT((cls.NumTypeArguments() > 0) == (type_arguments != nullptr));
6001 if (type_arguments != nullptr) {
6002 SetInputAt(0, type_arguments);
6003 }
6004 }
6005
6006 DECLARE_INSTRUCTION(AllocateObject)
6007 virtual CompileType ComputeType() const;
6008
6009 const Class& cls() const { return cls_; }
6010 virtual TokenPosition token_pos() const { return token_pos_; }
6011 Value* type_arguments() const { return type_arguments_; }
6012
6013 const Function& closure_function() const { return closure_function_; }
6014 void set_closure_function(const Function& function) {
6015 closure_function_ = function.raw();
6016 }
6017
6018 virtual intptr_t InputCount() const {
6019 return (type_arguments_ != nullptr) ? 1 : 0;
6020 }
6021 virtual Value* InputAt(intptr_t i) const {
6022 ASSERT(type_arguments_ != nullptr && i == 0);
6023 return type_arguments_;
6024 }
6025
6026 virtual bool MayThrow() const { return false; }
6027
6028 virtual bool ComputeCanDeoptimize() const { return false; }
6029
6030 virtual bool HasUnknownSideEffects() const { return false; }
6031
6032 virtual AliasIdentity Identity() const { return identity_; }
6033 virtual void SetIdentity(AliasIdentity identity) { identity_ = identity; }
6034
6035 virtual bool WillAllocateNewOrRemembered() const {
6036 return WillAllocateNewOrRemembered(cls());
6037 }
6038
6039 static bool WillAllocateNewOrRemembered(const Class& cls) {
6040 return Heap::IsAllocatableInNewSpace(cls.target_instance_size());
6041 }
6042
6043 PRINT_OPERANDS_TO_SUPPORT
6044 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
6045 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
6046
6047 private:
6048 virtual void RawSetInputAt(intptr_t i, Value* value) {
6049 ASSERT((type_arguments_ != nullptr) && (i == 0));
6050 ASSERT(value != nullptr);
6051 type_arguments_ = value;
6052 }
6053
6054 const TokenPosition token_pos_;
6055 const Class& cls_;
6056 Value* type_arguments_;
6057 AliasIdentity identity_;
6058 Function& closure_function_;
6059
6060 DISALLOW_COPY_AND_ASSIGN(AllocateObjectInstr);
6061};
6062
6063class AllocateUninitializedContextInstr
6064 : public TemplateAllocation<0, NoThrow> {
6065 public:
6066 AllocateUninitializedContextInstr(TokenPosition token_pos,
6067 intptr_t num_context_variables);
6068
6069 DECLARE_INSTRUCTION(AllocateUninitializedContext)
6070 virtual CompileType ComputeType() const;
6071
6072 virtual TokenPosition token_pos() const { return token_pos_; }
6073 intptr_t num_context_variables() const { return num_context_variables_; }
6074
6075 virtual bool ComputeCanDeoptimize() const { return false; }
6076
6077 virtual bool HasUnknownSideEffects() const { return false; }
6078
6079 virtual bool WillAllocateNewOrRemembered() const {
6080 return compiler::target::WillAllocateNewOrRememberedContext(
6081 num_context_variables_);
6082 }
6083
6084 virtual AliasIdentity Identity() const { return identity_; }
6085 virtual void SetIdentity(AliasIdentity identity) { identity_ = identity; }
6086
6087 PRINT_OPERANDS_TO_SUPPORT
6088
6089 private:
6090 const TokenPosition token_pos_;
6091 const intptr_t num_context_variables_;
6092 AliasIdentity identity_;
6093
6094 DISALLOW_COPY_AND_ASSIGN(AllocateUninitializedContextInstr);
6095};
6096
6097// This instruction captures the state of the object which had its allocation
6098// removed during the AllocationSinking pass.
6099// It does not produce any real code only deoptimization information.
6100class MaterializeObjectInstr : public Definition {
6101 public:
6102 MaterializeObjectInstr(AllocateObjectInstr* allocation,
6103 const ZoneGrowableArray<const Slot*>& slots,
6104 ZoneGrowableArray<Value*>* values)
6105 : allocation_(allocation),
6106 cls_(allocation->cls()),
6107 num_variables_(-1),
6108 slots_(slots),
6109 values_(values),
6110 locations_(NULL),
6111 visited_for_liveness_(false),
6112 registers_remapped_(false) {
6113 ASSERT(slots_.length() == values_->length());
6114 for (intptr_t i = 0; i < InputCount(); i++) {
6115 InputAt(i)->set_instruction(this);
6116 InputAt(i)->set_use_index(i);
6117 }
6118 }
6119
6120 MaterializeObjectInstr(AllocateUninitializedContextInstr* allocation,
6121 const ZoneGrowableArray<const Slot*>& slots,
6122 ZoneGrowableArray<Value*>* values)
6123 : allocation_(allocation),
6124 cls_(Class::ZoneHandle(Object::context_class())),
6125 num_variables_(allocation->num_context_variables()),
6126 slots_(slots),
6127 values_(values),
6128 locations_(NULL),
6129 visited_for_liveness_(false),
6130 registers_remapped_(false) {
6131 ASSERT(slots_.length() == values_->length());
6132 for (intptr_t i = 0; i < InputCount(); i++) {
6133 InputAt(i)->set_instruction(this);
6134 InputAt(i)->set_use_index(i);
6135 }
6136 }
6137
6138 Definition* allocation() const { return allocation_; }
6139 const Class& cls() const { return cls_; }
6140
6141 intptr_t num_variables() const { return num_variables_; }
6142
6143 intptr_t FieldOffsetAt(intptr_t i) const {
6144 return slots_[i]->offset_in_bytes();
6145 }
6146
6147 const Location& LocationAt(intptr_t i) { return locations_[i]; }
6148
6149 DECLARE_INSTRUCTION(MaterializeObject)
6150
6151 virtual intptr_t InputCount() const { return values_->length(); }
6152
6153 virtual Value* InputAt(intptr_t i) const { return (*values_)[i]; }
6154
6155 // SelectRepresentations pass is run once more while MaterializeObject
6156 // instructions are still in the graph. To avoid any redundant boxing
6157 // operations inserted by that pass we should indicate that this
6158 // instruction can cope with any representation as it is essentially
6159 // an environment use.
6160 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
6161 ASSERT(0 <= idx && idx < InputCount());
6162 return kNoRepresentation;
6163 }
6164
6165 virtual bool ComputeCanDeoptimize() const { return false; }
6166 virtual bool HasUnknownSideEffects() const { return false; }
6167
6168 Location* locations() { return locations_; }
6169 void set_locations(Location* locations) { locations_ = locations; }
6170
6171 virtual bool MayThrow() const { return false; }
6172
6173 void RemapRegisters(intptr_t* cpu_reg_slots, intptr_t* fpu_reg_slots);
6174
6175 bool was_visited_for_liveness() const { return visited_for_liveness_; }
6176 void mark_visited_for_liveness() { visited_for_liveness_ = true; }
6177
6178 PRINT_OPERANDS_TO_SUPPORT
6179
6180 private:
6181 virtual void RawSetInputAt(intptr_t i, Value* value) {
6182 (*values_)[i] = value;
6183 }
6184
6185 Definition* allocation_;
6186 const Class& cls_;
6187 intptr_t num_variables_;
6188 const ZoneGrowableArray<const Slot*>& slots_;
6189 ZoneGrowableArray<Value*>* values_;
6190 Location* locations_;
6191
6192 bool visited_for_liveness_;
6193 bool registers_remapped_;
6194
6195 DISALLOW_COPY_AND_ASSIGN(MaterializeObjectInstr);
6196};
6197
6198class CreateArrayInstr : public TemplateAllocation<2, Throws> {
6199 public:
6200 CreateArrayInstr(TokenPosition token_pos,
6201 Value* element_type,
6202 Value* num_elements,
6203 intptr_t deopt_id)
6204 : TemplateAllocation(deopt_id),
6205 token_pos_(token_pos),
6206 identity_(AliasIdentity::Unknown()) {
6207 SetInputAt(kElementTypePos, element_type);
6208 SetInputAt(kLengthPos, num_elements);
6209 }
6210
6211 enum { kElementTypePos = 0, kLengthPos = 1 };
6212
6213 DECLARE_INSTRUCTION(CreateArray)
6214 virtual CompileType ComputeType() const;
6215
6216 virtual TokenPosition token_pos() const { return token_pos_; }
6217 Value* element_type() const { return inputs_[kElementTypePos]; }
6218 Value* num_elements() const { return inputs_[kLengthPos]; }
6219
6220 // Throw needs environment, which is created only if instruction can
6221 // deoptimize.
6222 virtual bool ComputeCanDeoptimize() const {
6223 return !CompilerState::Current().is_aot();
6224 }
6225
6226 virtual bool HasUnknownSideEffects() const { return false; }
6227
6228 virtual AliasIdentity Identity() const { return identity_; }
6229 virtual void SetIdentity(AliasIdentity identity) { identity_ = identity; }
6230
6231 virtual bool WillAllocateNewOrRemembered() const {
6232 // Large arrays will use cards instead; cannot skip write barrier.
6233 if (!num_elements()->BindsToConstant()) return false;
6234 const Object& length = num_elements()->BoundConstant();
6235 if (!length.IsSmi()) return false;
6236 return compiler::target::WillAllocateNewOrRememberedArray(
6237 Smi::Cast(length).Value());
6238 }
6239
6240 private:
6241 const TokenPosition token_pos_;
6242 AliasIdentity identity_;
6243
6244 DISALLOW_COPY_AND_ASSIGN(CreateArrayInstr);
6245};
6246
6247// Note: This instruction must not be moved without the indexed access that
6248// depends on it (e.g. out of loops). GC may collect the array while the
6249// external data-array is still accessed.
6250// TODO(vegorov) enable LICMing this instruction by ensuring that array itself
6251// is kept alive.
6252class LoadUntaggedInstr : public TemplateDefinition<1, NoThrow> {
6253 public:
6254 LoadUntaggedInstr(Value* object, intptr_t offset) : offset_(offset) {
6255 SetInputAt(0, object);
6256 }
6257
6258 virtual Representation representation() const { return kUntagged; }
6259 DECLARE_INSTRUCTION(LoadUntagged)
6260 virtual CompileType ComputeType() const;
6261
6262 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
6263 ASSERT(idx == 0);
6264 // The object may be tagged or untagged (for external objects).
6265 return kNoRepresentation;
6266 }
6267
6268 Value* object() const { return inputs_[0]; }
6269 intptr_t offset() const { return offset_; }
6270
6271 virtual bool ComputeCanDeoptimize() const { return false; }
6272
6273 virtual bool HasUnknownSideEffects() const { return false; }
6274 virtual bool AttributesEqual(Instruction* other) const {
6275 return other->AsLoadUntagged()->offset_ == offset_;
6276 }
6277
6278 PRINT_OPERANDS_TO_SUPPORT
6279
6280 private:
6281 intptr_t offset_;
6282
6283 DISALLOW_COPY_AND_ASSIGN(LoadUntaggedInstr);
6284};
6285
6286// Stores an untagged value into the given object.
6287//
6288// If the untagged value is a derived pointer (e.g. pointer to start of internal
6289// typed data array backing) then this instruction cannot be moved across
6290// instructions which can trigger GC, to ensure that
6291//
6292// LoadUntaggeed + Arithmetic + StoreUntagged
6293//
6294// are performed atomically
6295//
6296// See kernel_to_il.cc:BuildTypedDataViewFactoryConstructor.
6297class StoreUntaggedInstr : public TemplateInstruction<2, NoThrow> {
6298 public:
6299 StoreUntaggedInstr(Value* object, Value* value, intptr_t offset)
6300 : offset_(offset) {
6301 SetInputAt(0, object);
6302 SetInputAt(1, value);
6303 }
6304
6305 DECLARE_INSTRUCTION(StoreUntagged)
6306
6307 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
6308 ASSERT(idx == 0 || idx == 1);
6309 // The object may be tagged or untagged (for external objects).
6310 if (idx == 0) return kNoRepresentation;
6311 return kUntagged;
6312 }
6313
6314 Value* object() const { return inputs_[0]; }
6315 Value* value() const { return inputs_[1]; }
6316 intptr_t offset() const { return offset_; }
6317
6318 virtual bool ComputeCanDeoptimize() const { return false; }
6319 virtual bool HasUnknownSideEffects() const { return false; }
6320 virtual bool AttributesEqual(Instruction* other) const {
6321 return other->AsStoreUntagged()->offset_ == offset_;
6322 }
6323
6324 intptr_t offset_from_tagged() const {
6325 const bool is_tagged = object()->definition()->representation() == kTagged;
6326 return offset() - (is_tagged ? kHeapObjectTag : 0);
6327 }
6328
6329 private:
6330 intptr_t offset_;
6331
6332 DISALLOW_COPY_AND_ASSIGN(StoreUntaggedInstr);
6333};
6334
6335class LoadClassIdInstr : public TemplateDefinition<1, NoThrow, Pure> {
6336 public:
6337 explicit LoadClassIdInstr(Value* object,
6338 Representation representation = kTagged,
6339 bool input_can_be_smi = true)
6340 : representation_(representation), input_can_be_smi_(input_can_be_smi) {
6341 ASSERT(representation == kTagged || representation == kUntagged);
6342 SetInputAt(0, object);
6343 }
6344
6345 virtual Representation representation() const { return representation_; }
6346 DECLARE_INSTRUCTION(LoadClassId)
6347 virtual CompileType ComputeType() const;
6348
6349 virtual Definition* Canonicalize(FlowGraph* flow_graph);
6350
6351 Value* object() const { return inputs_[0]; }
6352
6353 virtual bool ComputeCanDeoptimize() const { return false; }
6354
6355 virtual bool AttributesEqual(Instruction* other) const {
6356 auto other_load = other->AsLoadClassId();
6357 return other_load->representation_ == representation_ &&
6358 other_load->input_can_be_smi_ == input_can_be_smi_;
6359 }
6360
6361 PRINT_OPERANDS_TO_SUPPORT
6362
6363 private:
6364 const Representation representation_;
6365 const bool input_can_be_smi_;
6366
6367 DISALLOW_COPY_AND_ASSIGN(LoadClassIdInstr);
6368};
6369
6370// LoadFieldInstr represents a load from the given [slot] in the given
6371// [instance]. If calls_initializer(), then LoadFieldInstr also calls field
6372// initializer if field is not initialized yet (contains sentinel value).
6373//
6374// Note: if slot was a subject of the field unboxing optimization then this load
6375// would both load the box stored in the field and then load the content of
6376// the box.
6377class LoadFieldInstr : public TemplateDefinition<1, Throws> {
6378 public:
6379 LoadFieldInstr(Value* instance,
6380 const Slot& slot,
6381 TokenPosition token_pos,
6382 bool calls_initializer = false,
6383 intptr_t deopt_id = DeoptId::kNone)
6384 : TemplateDefinition(deopt_id),
6385 slot_(slot),
6386 token_pos_(token_pos),
6387 calls_initializer_(calls_initializer) {
6388 ASSERT(!calls_initializer || (deopt_id != DeoptId::kNone));
6389 ASSERT(!calls_initializer || slot.IsDartField());
6390 SetInputAt(0, instance);
6391 }
6392
6393 Value* instance() const { return inputs_[0]; }
6394 const Slot& slot() const { return slot_; }
6395
6396 virtual TokenPosition token_pos() const { return token_pos_; }
6397
6398 bool calls_initializer() const { return calls_initializer_; }
6399 void set_calls_initializer(bool value) { calls_initializer_ = value; }
6400
6401 virtual Representation representation() const;
6402
6403 bool IsUnboxedLoad() const;
6404 bool IsPotentialUnboxedLoad() const;
6405
6406 DECLARE_INSTRUCTION(LoadField)
6407 virtual CompileType ComputeType() const;
6408
6409 virtual bool ComputeCanDeoptimize() const { return calls_initializer(); }
6410 virtual bool HasUnknownSideEffects() const { return calls_initializer(); }
6411 virtual bool CanTriggerGC() const { return calls_initializer(); }
6412 virtual bool MayThrow() const { return calls_initializer(); }
6413
6414 virtual void InferRange(RangeAnalysis* analysis, Range* range);
6415
6416 bool IsImmutableLengthLoad() const;
6417
6418 // Try evaluating this load against the given constant value of
6419 // the instance. Returns true if evaluation succeeded and
6420 // puts result into result.
6421 // Note: we only evaluate loads when we can ensure that
6422 // instance has the field.
6423 bool Evaluate(const Object& instance_value, Object* result);
6424
6425 static bool TryEvaluateLoad(const Object& instance,
6426 const Field& field,
6427 Object* result);
6428
6429 static bool TryEvaluateLoad(const Object& instance,
6430 const Slot& field,
6431 Object* result);
6432
6433 virtual Definition* Canonicalize(FlowGraph* flow_graph);
6434
6435 static bool IsFixedLengthArrayCid(intptr_t cid);
6436 static bool IsTypedDataViewFactory(const Function& function);
6437
6438 virtual bool AllowsCSE() const { return slot_.is_immutable(); }
6439
6440 virtual bool AttributesEqual(Instruction* other) const;
6441
6442 PRINT_OPERANDS_TO_SUPPORT
6443 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
6444 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
6445
6446 private:
6447 intptr_t OffsetInBytes() const { return slot().offset_in_bytes(); }
6448
6449 // Generate code which checks if field is initialized and
6450 // calls initializer if it is not. Field value is already loaded.
6451 void EmitNativeCodeForInitializerCall(FlowGraphCompiler* compiler);
6452
6453 const Slot& slot_;
6454 const TokenPosition token_pos_;
6455 bool calls_initializer_;
6456
6457 DISALLOW_COPY_AND_ASSIGN(LoadFieldInstr);
6458};
6459
6460class InstantiateTypeInstr : public TemplateDefinition<2, Throws> {
6461 public:
6462 InstantiateTypeInstr(TokenPosition token_pos,
6463 const AbstractType& type,
6464 Value* instantiator_type_arguments,
6465 Value* function_type_arguments,
6466 intptr_t deopt_id)
6467 : TemplateDefinition(deopt_id), token_pos_(token_pos), type_(type) {
6468 ASSERT(type.IsZoneHandle() || type.IsReadOnlyHandle());
6469 SetInputAt(0, instantiator_type_arguments);
6470 SetInputAt(1, function_type_arguments);
6471 }
6472
6473 DECLARE_INSTRUCTION(InstantiateType)
6474
6475 Value* instantiator_type_arguments() const { return inputs_[0]; }
6476 Value* function_type_arguments() const { return inputs_[1]; }
6477 const AbstractType& type() const { return type_; }
6478 virtual TokenPosition token_pos() const { return token_pos_; }
6479
6480 virtual bool ComputeCanDeoptimize() const {
6481 return !CompilerState::Current().is_aot();
6482 }
6483
6484 virtual bool HasUnknownSideEffects() const { return false; }
6485
6486 PRINT_OPERANDS_TO_SUPPORT
6487
6488 private:
6489 const TokenPosition token_pos_;
6490 const AbstractType& type_;
6491
6492 DISALLOW_COPY_AND_ASSIGN(InstantiateTypeInstr);
6493};
6494
6495class InstantiateTypeArgumentsInstr : public TemplateDefinition<2, Throws> {
6496 public:
6497 InstantiateTypeArgumentsInstr(TokenPosition token_pos,
6498 const TypeArguments& type_arguments,
6499 const Class& instantiator_class,
6500 const Function& function,
6501 Value* instantiator_type_arguments,
6502 Value* function_type_arguments,
6503 intptr_t deopt_id)
6504 : TemplateDefinition(deopt_id),
6505 token_pos_(token_pos),
6506 type_arguments_(type_arguments),
6507 instantiator_class_(instantiator_class),
6508 function_(function) {
6509 ASSERT(type_arguments.IsZoneHandle());
6510 ASSERT(instantiator_class.IsZoneHandle());
6511 ASSERT(function.IsZoneHandle());
6512 SetInputAt(0, instantiator_type_arguments);
6513 SetInputAt(1, function_type_arguments);
6514 }
6515
6516 DECLARE_INSTRUCTION(InstantiateTypeArguments)
6517
6518 Value* instantiator_type_arguments() const { return inputs_[0]; }
6519 Value* function_type_arguments() const { return inputs_[1]; }
6520 const TypeArguments& type_arguments() const { return type_arguments_; }
6521 const Class& instantiator_class() const { return instantiator_class_; }
6522 const Function& function() const { return function_; }
6523 virtual TokenPosition token_pos() const { return token_pos_; }
6524
6525 virtual bool ComputeCanDeoptimize() const {
6526 return !CompilerState::Current().is_aot();
6527 }
6528
6529 virtual bool HasUnknownSideEffects() const { return false; }
6530
6531 virtual Definition* Canonicalize(FlowGraph* flow_graph);
6532
6533 const Code& GetStub() const {
6534 bool with_runtime_check;
6535 if (type_arguments().CanShareInstantiatorTypeArguments(
6536 instantiator_class(), &with_runtime_check)) {
6537 ASSERT(with_runtime_check);
6538 return StubCode::InstantiateTypeArgumentsMayShareInstantiatorTA();
6539 } else if (type_arguments().CanShareFunctionTypeArguments(
6540 function(), &with_runtime_check)) {
6541 ASSERT(with_runtime_check);
6542 return StubCode::InstantiateTypeArgumentsMayShareFunctionTA();
6543 }
6544 return StubCode::InstantiateTypeArguments();
6545 }
6546
6547 PRINT_OPERANDS_TO_SUPPORT
6548
6549 private:
6550 const TokenPosition token_pos_;
6551 const TypeArguments& type_arguments_;
6552 const Class& instantiator_class_;
6553 const Function& function_;
6554
6555 DISALLOW_COPY_AND_ASSIGN(InstantiateTypeArgumentsInstr);
6556};
6557
6558// [AllocateContext] instruction allocates a new Context object with the space
6559// for the given [context_variables].
6560class AllocateContextInstr : public TemplateAllocation<0, NoThrow> {
6561 public:
6562 AllocateContextInstr(TokenPosition token_pos,
6563 const ZoneGrowableArray<const Slot*>& context_slots)
6564 : token_pos_(token_pos), context_slots_(context_slots) {}
6565
6566 DECLARE_INSTRUCTION(AllocateContext)
6567 virtual CompileType ComputeType() const;
6568
6569 virtual TokenPosition token_pos() const { return token_pos_; }
6570 const ZoneGrowableArray<const Slot*>& context_slots() const {
6571 return context_slots_;
6572 }
6573
6574 intptr_t num_context_variables() const { return context_slots().length(); }
6575
6576 virtual bool ComputeCanDeoptimize() const { return false; }
6577
6578 virtual bool HasUnknownSideEffects() const { return false; }
6579
6580 virtual bool WillAllocateNewOrRemembered() const {
6581 return compiler::target::WillAllocateNewOrRememberedContext(
6582 context_slots().length());
6583 }
6584
6585 PRINT_OPERANDS_TO_SUPPORT
6586
6587 private:
6588 const TokenPosition token_pos_;
6589 const ZoneGrowableArray<const Slot*>& context_slots_;
6590
6591 DISALLOW_COPY_AND_ASSIGN(AllocateContextInstr);
6592};
6593
6594// [CloneContext] instruction clones the given Context object assuming that
6595// it contains exactly the provided [context_variables].
6596class CloneContextInstr : public TemplateDefinition<1, NoThrow> {
6597 public:
6598 CloneContextInstr(TokenPosition token_pos,
6599 Value* context_value,
6600 const ZoneGrowableArray<const Slot*>& context_slots,
6601 intptr_t deopt_id)
6602 : TemplateDefinition(deopt_id),
6603 token_pos_(token_pos),
6604 context_slots_(context_slots) {
6605 SetInputAt(0, context_value);
6606 }
6607
6608 virtual TokenPosition token_pos() const { return token_pos_; }
6609 Value* context_value() const { return inputs_[0]; }
6610
6611 const ZoneGrowableArray<const Slot*>& context_slots() const {
6612 return context_slots_;
6613 }
6614
6615 DECLARE_INSTRUCTION(CloneContext)
6616 virtual CompileType ComputeType() const;
6617
6618 virtual bool ComputeCanDeoptimize() const {
6619 return !CompilerState::Current().is_aot();
6620 }
6621
6622 virtual bool HasUnknownSideEffects() const { return false; }
6623
6624 private:
6625 const TokenPosition token_pos_;
6626 const ZoneGrowableArray<const Slot*>& context_slots_;
6627
6628 DISALLOW_COPY_AND_ASSIGN(CloneContextInstr);
6629};
6630
6631class CheckEitherNonSmiInstr : public TemplateInstruction<2, NoThrow, Pure> {
6632 public:
6633 CheckEitherNonSmiInstr(Value* left, Value* right, intptr_t deopt_id)
6634 : TemplateInstruction(deopt_id), licm_hoisted_(false) {
6635 SetInputAt(0, left);
6636 SetInputAt(1, right);
6637 }
6638
6639 Value* left() const { return inputs_[0]; }
6640 Value* right() const { return inputs_[1]; }
6641
6642 DECLARE_INSTRUCTION(CheckEitherNonSmi)
6643
6644 virtual bool ComputeCanDeoptimize() const { return true; }
6645
6646 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
6647
6648 virtual bool AttributesEqual(Instruction* other) const { return true; }
6649
6650 void set_licm_hoisted(bool value) { licm_hoisted_ = value; }
6651
6652 private:
6653 bool licm_hoisted_;
6654
6655 DISALLOW_COPY_AND_ASSIGN(CheckEitherNonSmiInstr);
6656};
6657
6658class Boxing : public AllStatic {
6659 public:
6660 static bool Supports(Representation rep) {
6661 switch (rep) {
6662 case kUnboxedDouble:
6663 case kUnboxedFloat32x4:
6664 case kUnboxedFloat64x2:
6665 case kUnboxedInt32x4:
6666 case kUnboxedInt64:
6667 case kUnboxedInt32:
6668 case kUnboxedUint32:
6669 return true;
6670 default:
6671 return false;
6672 }
6673 }
6674
6675 static intptr_t ValueOffset(Representation rep) {
6676 switch (rep) {
6677 case kUnboxedFloat:
6678 case kUnboxedDouble:
6679 return Double::value_offset();
6680
6681 case kUnboxedFloat32x4:
6682 return Float32x4::value_offset();
6683
6684 case kUnboxedFloat64x2:
6685 return Float64x2::value_offset();
6686
6687 case kUnboxedInt32x4:
6688 return Int32x4::value_offset();
6689
6690 case kUnboxedInt64:
6691 return Mint::value_offset();
6692
6693 default:
6694 UNREACHABLE();
6695 return 0;
6696 }
6697 }
6698
6699 static intptr_t BoxCid(Representation rep) {
6700 switch (rep) {
6701 case kUnboxedInt64:
6702 return kMintCid;
6703 case kUnboxedDouble:
6704 case kUnboxedFloat:
6705 return kDoubleCid;
6706 case kUnboxedFloat32x4:
6707 return kFloat32x4Cid;
6708 case kUnboxedFloat64x2:
6709 return kFloat64x2Cid;
6710 case kUnboxedInt32x4:
6711 return kInt32x4Cid;
6712 default:
6713 UNREACHABLE();
6714 return kIllegalCid;
6715 }
6716 }
6717};
6718
6719class BoxInstr : public TemplateDefinition<1, NoThrow, Pure> {
6720 public:
6721 static BoxInstr* Create(Representation from, Value* value);
6722
6723 Value* value() const { return inputs_[0]; }
6724 Representation from_representation() const { return from_representation_; }
6725
6726 DECLARE_INSTRUCTION(Box)
6727 virtual CompileType ComputeType() const;
6728
6729 virtual bool ComputeCanDeoptimize() const { return false; }
6730 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
6731
6732 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
6733 ASSERT(idx == 0);
6734 return from_representation();
6735 }
6736
6737 virtual bool AttributesEqual(Instruction* other) const {
6738 return other->AsBox()->from_representation() == from_representation();
6739 }
6740
6741 Definition* Canonicalize(FlowGraph* flow_graph);
6742
6743 virtual TokenPosition token_pos() const { return TokenPosition::kBox; }
6744
6745 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
6746 return kNotSpeculative;
6747 }
6748
6749 protected:
6750 BoxInstr(Representation from_representation, Value* value)
6751 : from_representation_(from_representation) {
6752 SetInputAt(0, value);
6753 }
6754
6755 private:
6756 intptr_t ValueOffset() const {
6757 return Boxing::ValueOffset(from_representation());
6758 }
6759
6760 const Representation from_representation_;
6761
6762 DISALLOW_COPY_AND_ASSIGN(BoxInstr);
6763};
6764
6765class BoxIntegerInstr : public BoxInstr {
6766 public:
6767 BoxIntegerInstr(Representation representation, Value* value)
6768 : BoxInstr(representation, value) {}
6769
6770 virtual bool ValueFitsSmi() const;
6771
6772 virtual void InferRange(RangeAnalysis* analysis, Range* range);
6773
6774 virtual CompileType ComputeType() const;
6775 virtual bool RecomputeType();
6776
6777 virtual Definition* Canonicalize(FlowGraph* flow_graph);
6778
6779 DEFINE_INSTRUCTION_TYPE_CHECK(BoxInteger)
6780
6781 private:
6782 DISALLOW_COPY_AND_ASSIGN(BoxIntegerInstr);
6783};
6784
6785class BoxInteger32Instr : public BoxIntegerInstr {
6786 public:
6787 BoxInteger32Instr(Representation representation, Value* value)
6788 : BoxIntegerInstr(representation, value) {}
6789
6790 DECLARE_INSTRUCTION_BACKEND()
6791
6792 private:
6793 DISALLOW_COPY_AND_ASSIGN(BoxInteger32Instr);
6794};
6795
6796class BoxInt32Instr : public BoxInteger32Instr {
6797 public:
6798 explicit BoxInt32Instr(Value* value)
6799 : BoxInteger32Instr(kUnboxedInt32, value) {}
6800
6801 DECLARE_INSTRUCTION_NO_BACKEND(BoxInt32)
6802
6803 private:
6804 DISALLOW_COPY_AND_ASSIGN(BoxInt32Instr);
6805};
6806
6807class BoxUint32Instr : public BoxInteger32Instr {
6808 public:
6809 explicit BoxUint32Instr(Value* value)
6810 : BoxInteger32Instr(kUnboxedUint32, value) {}
6811
6812 DECLARE_INSTRUCTION_NO_BACKEND(BoxUint32)
6813
6814 private:
6815 DISALLOW_COPY_AND_ASSIGN(BoxUint32Instr);
6816};
6817
6818class BoxInt64Instr : public BoxIntegerInstr {
6819 public:
6820 explicit BoxInt64Instr(Value* value)
6821 : BoxIntegerInstr(kUnboxedInt64, value) {}
6822
6823 virtual Definition* Canonicalize(FlowGraph* flow_graph);
6824
6825 DECLARE_INSTRUCTION(BoxInt64)
6826
6827 private:
6828 DISALLOW_COPY_AND_ASSIGN(BoxInt64Instr);
6829};
6830
6831class UnboxInstr : public TemplateDefinition<1, NoThrow, Pure> {
6832 public:
6833 static UnboxInstr* Create(Representation to,
6834 Value* value,
6835 intptr_t deopt_id,
6836 SpeculativeMode speculative_mode = kGuardInputs);
6837
6838 Value* value() const { return inputs_[0]; }
6839
6840 virtual bool ComputeCanDeoptimize() const {
6841 if (SpeculativeModeOfInputs() == kNotSpeculative) {
6842 return false;
6843 }
6844
6845 const intptr_t value_cid = value()->Type()->ToCid();
6846 const intptr_t box_cid = BoxCid();
6847
6848 if (value_cid == box_cid) {
6849 return false;
6850 }
6851
6852 if (CanConvertSmi() && (value_cid == kSmiCid)) {
6853 return false;
6854 }
6855
6856 return true;
6857 }
6858
6859 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
6860 return speculative_mode_;
6861 }
6862
6863 virtual Representation representation() const { return representation_; }
6864
6865 DECLARE_INSTRUCTION(Unbox)
6866 virtual CompileType ComputeType() const;
6867
6868 virtual bool AttributesEqual(Instruction* other) const {
6869 UnboxInstr* other_unbox = other->AsUnbox();
6870 return (representation() == other_unbox->representation()) &&
6871 (speculative_mode_ == other_unbox->speculative_mode_);
6872 }
6873
6874 Definition* Canonicalize(FlowGraph* flow_graph);
6875
6876 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
6877
6878 virtual TokenPosition token_pos() const { return TokenPosition::kBox; }
6879
6880 protected:
6881 UnboxInstr(Representation representation,
6882 Value* value,
6883 intptr_t deopt_id,
6884 SpeculativeMode speculative_mode)
6885 : TemplateDefinition(deopt_id),
6886 representation_(representation),
6887 speculative_mode_(speculative_mode) {
6888 SetInputAt(0, value);
6889 }
6890
6891 private:
6892 bool CanConvertSmi() const;
6893 void EmitLoadFromBox(FlowGraphCompiler* compiler);
6894 void EmitSmiConversion(FlowGraphCompiler* compiler);
6895 void EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler);
6896 void EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler);
6897 void EmitLoadFromBoxWithDeopt(FlowGraphCompiler* compiler);
6898
6899 intptr_t BoxCid() const { return Boxing::BoxCid(representation_); }
6900
6901 intptr_t ValueOffset() const { return Boxing::ValueOffset(representation_); }
6902
6903 const Representation representation_;
6904 const SpeculativeMode speculative_mode_;
6905
6906 DISALLOW_COPY_AND_ASSIGN(UnboxInstr);
6907};
6908
6909class UnboxIntegerInstr : public UnboxInstr {
6910 public:
6911 enum TruncationMode { kTruncate, kNoTruncation };
6912
6913 UnboxIntegerInstr(Representation representation,
6914 TruncationMode truncation_mode,
6915 Value* value,
6916 intptr_t deopt_id,
6917 SpeculativeMode speculative_mode)
6918 : UnboxInstr(representation, value, deopt_id, speculative_mode),
6919 is_truncating_(truncation_mode == kTruncate) {}
6920
6921 bool is_truncating() const { return is_truncating_; }
6922
6923 void mark_truncating() { is_truncating_ = true; }
6924
6925 virtual CompileType ComputeType() const;
6926
6927 virtual bool AttributesEqual(Instruction* other) const {
6928 UnboxIntegerInstr* other_unbox = other->AsUnboxInteger();
6929 return UnboxInstr::AttributesEqual(other) &&
6930 (other_unbox->is_truncating_ == is_truncating_);
6931 }
6932
6933 virtual Definition* Canonicalize(FlowGraph* flow_graph);
6934
6935 DEFINE_INSTRUCTION_TYPE_CHECK(UnboxInteger)
6936
6937 PRINT_OPERANDS_TO_SUPPORT
6938
6939 private:
6940 bool is_truncating_;
6941
6942 DISALLOW_COPY_AND_ASSIGN(UnboxIntegerInstr);
6943};
6944
6945class UnboxInteger32Instr : public UnboxIntegerInstr {
6946 public:
6947 UnboxInteger32Instr(Representation representation,
6948 TruncationMode truncation_mode,
6949 Value* value,
6950 intptr_t deopt_id,
6951 SpeculativeMode speculative_mode)
6952 : UnboxIntegerInstr(representation,
6953 truncation_mode,
6954 value,
6955 deopt_id,
6956 speculative_mode) {}
6957
6958 DECLARE_INSTRUCTION_BACKEND()
6959
6960 private:
6961 DISALLOW_COPY_AND_ASSIGN(UnboxInteger32Instr);
6962};
6963
6964class UnboxUint32Instr : public UnboxInteger32Instr {
6965 public:
6966 UnboxUint32Instr(Value* value,
6967 intptr_t deopt_id,
6968 SpeculativeMode speculative_mode = kGuardInputs)
6969 : UnboxInteger32Instr(kUnboxedUint32,
6970 kTruncate,
6971 value,
6972 deopt_id,
6973 speculative_mode) {
6974 ASSERT(is_truncating());
6975 }
6976
6977 virtual bool ComputeCanDeoptimize() const;
6978
6979 virtual void InferRange(RangeAnalysis* analysis, Range* range);
6980
6981 DECLARE_INSTRUCTION_NO_BACKEND(UnboxUint32)
6982
6983 private:
6984 DISALLOW_COPY_AND_ASSIGN(UnboxUint32Instr);
6985};
6986
6987class UnboxInt32Instr : public UnboxInteger32Instr {
6988 public:
6989 UnboxInt32Instr(TruncationMode truncation_mode,
6990 Value* value,
6991 intptr_t deopt_id,
6992 SpeculativeMode speculative_mode = kGuardInputs)
6993 : UnboxInteger32Instr(kUnboxedInt32,
6994 truncation_mode,
6995 value,
6996 deopt_id,
6997 speculative_mode) {}
6998
6999 virtual bool ComputeCanDeoptimize() const;
7000
7001 virtual void InferRange(RangeAnalysis* analysis, Range* range);
7002
7003 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7004
7005 DECLARE_INSTRUCTION_NO_BACKEND(UnboxInt32)
7006
7007 private:
7008 DISALLOW_COPY_AND_ASSIGN(UnboxInt32Instr);
7009};
7010
7011class UnboxInt64Instr : public UnboxIntegerInstr {
7012 public:
7013 UnboxInt64Instr(Value* value,
7014 intptr_t deopt_id,
7015 SpeculativeMode speculative_mode)
7016 : UnboxIntegerInstr(kUnboxedInt64,
7017 kNoTruncation,
7018 value,
7019 deopt_id,
7020 speculative_mode) {}
7021
7022 virtual void InferRange(RangeAnalysis* analysis, Range* range);
7023
7024 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7025
7026 DECLARE_INSTRUCTION_NO_BACKEND(UnboxInt64)
7027
7028 private:
7029 DISALLOW_COPY_AND_ASSIGN(UnboxInt64Instr);
7030};
7031
7032bool Definition::IsInt64Definition() {
7033 return (Type()->ToCid() == kMintCid) || IsBinaryInt64Op() ||
7034 IsUnaryInt64Op() || IsShiftInt64Op() || IsSpeculativeShiftInt64Op() ||
7035 IsBoxInt64() || IsUnboxInt64();
7036}
7037
7038class MathUnaryInstr : public TemplateDefinition<1, NoThrow, Pure> {
7039 public:
7040 enum MathUnaryKind {
7041 kIllegal,
7042 kSqrt,
7043 kDoubleSquare,
7044 };
7045 MathUnaryInstr(MathUnaryKind kind, Value* value, intptr_t deopt_id)
7046 : TemplateDefinition(deopt_id), kind_(kind) {
7047 SetInputAt(0, value);
7048 }
7049
7050 Value* value() const { return inputs_[0]; }
7051 MathUnaryKind kind() const { return kind_; }
7052
7053 virtual bool ComputeCanDeoptimize() const { return false; }
7054
7055 virtual Representation representation() const { return kUnboxedDouble; }
7056
7057 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7058 ASSERT(idx == 0);
7059 return kUnboxedDouble;
7060 }
7061
7062 virtual intptr_t DeoptimizationTarget() const {
7063 // Direct access since this instruction cannot deoptimize, and the deopt-id
7064 // was inherited from another instruction that could deoptimize.
7065 return GetDeoptId();
7066 }
7067
7068 DECLARE_INSTRUCTION(MathUnary)
7069 virtual CompileType ComputeType() const;
7070
7071 virtual bool AttributesEqual(Instruction* other) const {
7072 return kind() == other->AsMathUnary()->kind();
7073 }
7074
7075 Definition* Canonicalize(FlowGraph* flow_graph);
7076
7077 static const char* KindToCString(MathUnaryKind kind);
7078
7079 PRINT_OPERANDS_TO_SUPPORT
7080
7081 private:
7082 const MathUnaryKind kind_;
7083
7084 DISALLOW_COPY_AND_ASSIGN(MathUnaryInstr);
7085};
7086
7087// Calls into the runtime and performs a case-insensitive comparison of the
7088// UTF16 strings (i.e. TwoByteString or ExternalTwoByteString) located at
7089// str[lhs_index:lhs_index + length] and str[rhs_index:rhs_index + length].
7090// Depending on the runtime entry passed, we will treat the strings as either
7091// UCS2 (no surrogate handling) or UTF16 (surrogates handled appropriately).
7092class CaseInsensitiveCompareInstr
7093 : public TemplateDefinition<4, NoThrow, Pure> {
7094 public:
7095 CaseInsensitiveCompareInstr(Value* str,
7096 Value* lhs_index,
7097 Value* rhs_index,
7098 Value* length,
7099 const RuntimeEntry& entry,
7100 intptr_t cid)
7101 : entry_(entry), cid_(cid) {
7102 ASSERT(cid == kTwoByteStringCid || cid == kExternalTwoByteStringCid);
7103 ASSERT(index_scale() == 2);
7104 SetInputAt(0, str);
7105 SetInputAt(1, lhs_index);
7106 SetInputAt(2, rhs_index);
7107 SetInputAt(3, length);
7108 }
7109
7110 Value* str() const { return inputs_[0]; }
7111 Value* lhs_index() const { return inputs_[1]; }
7112 Value* rhs_index() const { return inputs_[2]; }
7113 Value* length() const { return inputs_[3]; }
7114
7115 const RuntimeEntry& TargetFunction() const { return entry_; }
7116 bool IsExternal() const { return cid_ == kExternalTwoByteStringCid; }
7117 intptr_t class_id() const { return cid_; }
7118
7119 intptr_t index_scale() const {
7120 return compiler::target::Instance::ElementSizeFor(cid_);
7121 }
7122
7123 virtual bool ComputeCanDeoptimize() const { return false; }
7124
7125 virtual Representation representation() const { return kTagged; }
7126
7127 DECLARE_INSTRUCTION(CaseInsensitiveCompare)
7128 virtual CompileType ComputeType() const;
7129
7130 virtual bool AttributesEqual(Instruction* other) const {
7131 return other->AsCaseInsensitiveCompare()->cid_ == cid_;
7132 }
7133
7134 private:
7135 const RuntimeEntry& entry_;
7136 const intptr_t cid_;
7137
7138 DISALLOW_COPY_AND_ASSIGN(CaseInsensitiveCompareInstr);
7139};
7140
7141// Represents Math's static min and max functions.
7142class MathMinMaxInstr : public TemplateDefinition<2, NoThrow, Pure> {
7143 public:
7144 MathMinMaxInstr(MethodRecognizer::Kind op_kind,
7145 Value* left_value,
7146 Value* right_value,
7147 intptr_t deopt_id,
7148 intptr_t result_cid)
7149 : TemplateDefinition(deopt_id),
7150 op_kind_(op_kind),
7151 result_cid_(result_cid) {
7152 ASSERT((result_cid == kSmiCid) || (result_cid == kDoubleCid));
7153 SetInputAt(0, left_value);
7154 SetInputAt(1, right_value);
7155 }
7156
7157 MethodRecognizer::Kind op_kind() const { return op_kind_; }
7158
7159 Value* left() const { return inputs_[0]; }
7160 Value* right() const { return inputs_[1]; }
7161
7162 intptr_t result_cid() const { return result_cid_; }
7163
7164 virtual bool ComputeCanDeoptimize() const { return false; }
7165
7166 virtual Representation representation() const {
7167 if (result_cid() == kSmiCid) {
7168 return kTagged;
7169 }
7170 ASSERT(result_cid() == kDoubleCid);
7171 return kUnboxedDouble;
7172 }
7173
7174 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7175 if (result_cid() == kSmiCid) {
7176 return kTagged;
7177 }
7178 ASSERT(result_cid() == kDoubleCid);
7179 return kUnboxedDouble;
7180 }
7181
7182 virtual intptr_t DeoptimizationTarget() const {
7183 // Direct access since this instruction cannot deoptimize, and the deopt-id
7184 // was inherited from another instruction that could deoptimize.
7185 return GetDeoptId();
7186 }
7187
7188 DECLARE_INSTRUCTION(MathMinMax)
7189 virtual CompileType ComputeType() const;
7190 virtual bool AttributesEqual(Instruction* other) const;
7191
7192 private:
7193 const MethodRecognizer::Kind op_kind_;
7194 const intptr_t result_cid_;
7195
7196 DISALLOW_COPY_AND_ASSIGN(MathMinMaxInstr);
7197};
7198
7199class BinaryDoubleOpInstr : public TemplateDefinition<2, NoThrow, Pure> {
7200 public:
7201 BinaryDoubleOpInstr(Token::Kind op_kind,
7202 Value* left,
7203 Value* right,
7204 intptr_t deopt_id,
7205 TokenPosition token_pos,
7206 SpeculativeMode speculative_mode = kGuardInputs)
7207 : TemplateDefinition(deopt_id),
7208 op_kind_(op_kind),
7209 token_pos_(token_pos),
7210 speculative_mode_(speculative_mode) {
7211 SetInputAt(0, left);
7212 SetInputAt(1, right);
7213 }
7214
7215 Value* left() const { return inputs_[0]; }
7216 Value* right() const { return inputs_[1]; }
7217
7218 Token::Kind op_kind() const { return op_kind_; }
7219
7220 virtual TokenPosition token_pos() const { return token_pos_; }
7221
7222 virtual bool ComputeCanDeoptimize() const { return false; }
7223
7224 virtual Representation representation() const { return kUnboxedDouble; }
7225
7226 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7227 ASSERT((idx == 0) || (idx == 1));
7228 return kUnboxedDouble;
7229 }
7230
7231 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
7232 return speculative_mode_;
7233 }
7234
7235 virtual intptr_t DeoptimizationTarget() const {
7236 // Direct access since this instruction cannot deoptimize, and the deopt-id
7237 // was inherited from another instruction that could deoptimize.
7238 return GetDeoptId();
7239 }
7240
7241 PRINT_OPERANDS_TO_SUPPORT
7242
7243 DECLARE_INSTRUCTION(BinaryDoubleOp)
7244 virtual CompileType ComputeType() const;
7245
7246 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7247
7248 virtual bool AttributesEqual(Instruction* other) const {
7249 const BinaryDoubleOpInstr* other_bin_op = other->AsBinaryDoubleOp();
7250 return (op_kind() == other_bin_op->op_kind()) &&
7251 (speculative_mode_ == other_bin_op->speculative_mode_);
7252 }
7253
7254 private:
7255 const Token::Kind op_kind_;
7256 const TokenPosition token_pos_;
7257 const SpeculativeMode speculative_mode_;
7258
7259 DISALLOW_COPY_AND_ASSIGN(BinaryDoubleOpInstr);
7260};
7261
7262class DoubleTestOpInstr : public TemplateComparison<1, NoThrow, Pure> {
7263 public:
7264 DoubleTestOpInstr(MethodRecognizer::Kind op_kind,
7265 Value* value,
7266 intptr_t deopt_id,
7267 TokenPosition token_pos)
7268 : TemplateComparison(token_pos, Token::kEQ, deopt_id), op_kind_(op_kind) {
7269 SetInputAt(0, value);
7270 }
7271
7272 Value* value() const { return InputAt(0); }
7273
7274 MethodRecognizer::Kind op_kind() const { return op_kind_; }
7275
7276 virtual bool ComputeCanDeoptimize() const { return false; }
7277
7278 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7279 ASSERT(idx == 0);
7280 return kUnboxedDouble;
7281 }
7282
7283 PRINT_OPERANDS_TO_SUPPORT
7284 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
7285
7286 DECLARE_COMPARISON_INSTRUCTION(DoubleTestOp)
7287
7288 virtual CompileType ComputeType() const;
7289
7290 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7291
7292 virtual bool AttributesEqual(Instruction* other) const {
7293 return op_kind_ == other->AsDoubleTestOp()->op_kind() &&
7294 ComparisonInstr::AttributesEqual(other);
7295 }
7296
7297 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
7298
7299 private:
7300 const MethodRecognizer::Kind op_kind_;
7301
7302 DISALLOW_COPY_AND_ASSIGN(DoubleTestOpInstr);
7303};
7304
7305class UnaryIntegerOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
7306 public:
7307 UnaryIntegerOpInstr(Token::Kind op_kind, Value* value, intptr_t deopt_id)
7308 : TemplateDefinition(deopt_id), op_kind_(op_kind) {
7309 ASSERT((op_kind == Token::kNEGATE) || (op_kind == Token::kBIT_NOT));
7310 SetInputAt(0, value);
7311 }
7312
7313 static UnaryIntegerOpInstr* Make(Representation representation,
7314 Token::Kind op_kind,
7315 Value* value,
7316 intptr_t deopt_id,
7317 Range* range);
7318
7319 Value* value() const { return inputs_[0]; }
7320 Token::Kind op_kind() const { return op_kind_; }
7321
7322 virtual bool AttributesEqual(Instruction* other) const {
7323 return other->AsUnaryIntegerOp()->op_kind() == op_kind();
7324 }
7325
7326 virtual intptr_t DeoptimizationTarget() const {
7327 // Direct access since this instruction cannot deoptimize, and the deopt-id
7328 // was inherited from another instruction that could deoptimize.
7329 return GetDeoptId();
7330 }
7331
7332 PRINT_OPERANDS_TO_SUPPORT
7333
7334 DEFINE_INSTRUCTION_TYPE_CHECK(UnaryIntegerOp)
7335
7336 private:
7337 const Token::Kind op_kind_;
7338};
7339
7340// Handles both Smi operations: BIT_OR and NEGATE.
7341class UnarySmiOpInstr : public UnaryIntegerOpInstr {
7342 public:
7343 UnarySmiOpInstr(Token::Kind op_kind, Value* value, intptr_t deopt_id)
7344 : UnaryIntegerOpInstr(op_kind, value, deopt_id) {}
7345
7346 virtual bool ComputeCanDeoptimize() const {
7347 return op_kind() == Token::kNEGATE;
7348 }
7349
7350 virtual CompileType ComputeType() const;
7351
7352 DECLARE_INSTRUCTION(UnarySmiOp)
7353
7354 private:
7355 DISALLOW_COPY_AND_ASSIGN(UnarySmiOpInstr);
7356};
7357
7358class UnaryUint32OpInstr : public UnaryIntegerOpInstr {
7359 public:
7360 UnaryUint32OpInstr(Token::Kind op_kind, Value* value, intptr_t deopt_id)
7361 : UnaryIntegerOpInstr(op_kind, value, deopt_id) {
7362 ASSERT(IsSupported(op_kind));
7363 }
7364
7365 virtual bool ComputeCanDeoptimize() const { return false; }
7366
7367 virtual CompileType ComputeType() const;
7368
7369 virtual Representation representation() const { return kUnboxedUint32; }
7370
7371 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7372 ASSERT(idx == 0);
7373 return kUnboxedUint32;
7374 }
7375
7376 static bool IsSupported(Token::Kind op_kind) {
7377 return op_kind == Token::kBIT_NOT;
7378 }
7379
7380 DECLARE_INSTRUCTION(UnaryUint32Op)
7381
7382 private:
7383 DISALLOW_COPY_AND_ASSIGN(UnaryUint32OpInstr);
7384};
7385
7386class UnaryInt64OpInstr : public UnaryIntegerOpInstr {
7387 public:
7388 UnaryInt64OpInstr(Token::Kind op_kind,
7389 Value* value,
7390 intptr_t deopt_id,
7391 SpeculativeMode speculative_mode = kGuardInputs)
7392 : UnaryIntegerOpInstr(op_kind, value, deopt_id),
7393 speculative_mode_(speculative_mode) {
7394 ASSERT(op_kind == Token::kBIT_NOT || op_kind == Token::kNEGATE);
7395 }
7396
7397 virtual bool ComputeCanDeoptimize() const { return false; }
7398
7399 virtual CompileType ComputeType() const;
7400
7401 virtual Representation representation() const { return kUnboxedInt64; }
7402
7403 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7404 ASSERT(idx == 0);
7405 return kUnboxedInt64;
7406 }
7407
7408 virtual bool AttributesEqual(Instruction* other) const {
7409 UnaryInt64OpInstr* unary_op_other = other->AsUnaryInt64Op();
7410 return UnaryIntegerOpInstr::AttributesEqual(other) &&
7411 (speculative_mode_ == unary_op_other->speculative_mode_);
7412 }
7413
7414 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
7415 return speculative_mode_;
7416 }
7417
7418 DECLARE_INSTRUCTION(UnaryInt64Op)
7419
7420 private:
7421 const SpeculativeMode speculative_mode_;
7422 DISALLOW_COPY_AND_ASSIGN(UnaryInt64OpInstr);
7423};
7424
7425class CheckedSmiOpInstr : public TemplateDefinition<2, Throws> {
7426 public:
7427 CheckedSmiOpInstr(Token::Kind op_kind,
7428 Value* left,
7429 Value* right,
7430 TemplateDartCall<0>* call)
7431 : TemplateDefinition(call->deopt_id()), call_(call), op_kind_(op_kind) {
7432 ASSERT(call->type_args_len() == 0);
7433 ASSERT(!call->IsInstanceCallBase() ||
7434 call->AsInstanceCallBase()->CanReceiverBeSmiBasedOnInterfaceTarget(
7435 Thread::Current()->zone()));
7436
7437 SetInputAt(0, left);
7438 SetInputAt(1, right);
7439 }
7440
7441 TemplateDartCall<0>* call() const { return call_; }
7442 Token::Kind op_kind() const { return op_kind_; }
7443 Value* left() const { return inputs_[0]; }
7444 Value* right() const { return inputs_[1]; }
7445
7446 virtual bool ComputeCanDeoptimize() const { return false; }
7447
7448 virtual CompileType ComputeType() const;
7449 virtual bool RecomputeType();
7450
7451 virtual bool HasUnknownSideEffects() const { return true; }
7452 virtual bool CanCallDart() const { return true; }
7453
7454 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7455
7456 PRINT_OPERANDS_TO_SUPPORT
7457 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
7458
7459 DECLARE_INSTRUCTION(CheckedSmiOp)
7460
7461 private:
7462 TemplateDartCall<0>* call_;
7463 const Token::Kind op_kind_;
7464 DISALLOW_COPY_AND_ASSIGN(CheckedSmiOpInstr);
7465};
7466
7467class CheckedSmiComparisonInstr : public TemplateComparison<2, Throws> {
7468 public:
7469 CheckedSmiComparisonInstr(Token::Kind op_kind,
7470 Value* left,
7471 Value* right,
7472 TemplateDartCall<0>* call)
7473 : TemplateComparison(call->token_pos(), op_kind, call->deopt_id()),
7474 call_(call),
7475 is_negated_(false) {
7476 ASSERT(call->type_args_len() == 0);
7477 ASSERT(!call->IsInstanceCallBase() ||
7478 call->AsInstanceCallBase()->CanReceiverBeSmiBasedOnInterfaceTarget(
7479 Thread::Current()->zone()));
7480
7481 SetInputAt(0, left);
7482 SetInputAt(1, right);
7483 }
7484
7485 TemplateDartCall<0>* call() const { return call_; }
7486
7487 virtual bool ComputeCanDeoptimize() const { return false; }
7488
7489 virtual CompileType ComputeType() const;
7490
7491 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7492
7493 virtual void NegateComparison() {
7494 ComparisonInstr::NegateComparison();
7495 is_negated_ = !is_negated_;
7496 }
7497
7498 bool is_negated() const { return is_negated_; }
7499
7500 virtual bool HasUnknownSideEffects() const { return true; }
7501 virtual bool CanCallDart() const { return true; }
7502
7503 PRINT_OPERANDS_TO_SUPPORT
7504
7505 DECLARE_INSTRUCTION(CheckedSmiComparison)
7506
7507 virtual void EmitBranchCode(FlowGraphCompiler* compiler, BranchInstr* branch);
7508
7509 virtual Condition EmitComparisonCode(FlowGraphCompiler* compiler,
7510 BranchLabels labels);
7511
7512 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
7513
7514 private:
7515 TemplateDartCall<0>* call_;
7516 bool is_negated_;
7517 DISALLOW_COPY_AND_ASSIGN(CheckedSmiComparisonInstr);
7518};
7519
7520class BinaryIntegerOpInstr : public TemplateDefinition<2, NoThrow, Pure> {
7521 public:
7522 BinaryIntegerOpInstr(Token::Kind op_kind,
7523 Value* left,
7524 Value* right,
7525 intptr_t deopt_id)
7526 : TemplateDefinition(deopt_id),
7527 op_kind_(op_kind),
7528 can_overflow_(true),
7529 is_truncating_(false) {
7530 SetInputAt(0, left);
7531 SetInputAt(1, right);
7532 }
7533
7534 static BinaryIntegerOpInstr* Make(
7535 Representation representation,
7536 Token::Kind op_kind,
7537 Value* left,
7538 Value* right,
7539 intptr_t deopt_id,
7540 bool can_overflow,
7541 bool is_truncating,
7542 Range* range,
7543 SpeculativeMode speculative_mode = kGuardInputs);
7544
7545 Token::Kind op_kind() const { return op_kind_; }
7546 Value* left() const { return inputs_[0]; }
7547 Value* right() const { return inputs_[1]; }
7548
7549 bool can_overflow() const { return can_overflow_; }
7550 void set_can_overflow(bool overflow) {
7551 ASSERT(!is_truncating_ || !overflow);
7552 can_overflow_ = overflow;
7553 }
7554
7555 bool is_truncating() const { return is_truncating_; }
7556 void mark_truncating() {
7557 is_truncating_ = true;
7558 set_can_overflow(false);
7559 }
7560
7561 // Returns true if right is a non-zero Smi constant which absolute value is
7562 // a power of two.
7563 bool RightIsPowerOfTwoConstant() const;
7564
7565 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7566
7567 virtual bool AttributesEqual(Instruction* other) const;
7568
7569 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
7570
7571 virtual void InferRange(RangeAnalysis* analysis, Range* range);
7572
7573 PRINT_OPERANDS_TO_SUPPORT
7574 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
7575
7576 DEFINE_INSTRUCTION_TYPE_CHECK(BinaryIntegerOp)
7577
7578 protected:
7579 void InferRangeHelper(const Range* left_range,
7580 const Range* right_range,
7581 Range* range);
7582
7583 private:
7584 Definition* CreateConstantResult(FlowGraph* graph, const Integer& result);
7585
7586 const Token::Kind op_kind_;
7587
7588 bool can_overflow_;
7589 bool is_truncating_;
7590
7591 DISALLOW_COPY_AND_ASSIGN(BinaryIntegerOpInstr);
7592};
7593
7594class BinarySmiOpInstr : public BinaryIntegerOpInstr {
7595 public:
7596 BinarySmiOpInstr(Token::Kind op_kind,
7597 Value* left,
7598 Value* right,
7599 intptr_t deopt_id)
7600 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id),
7601 right_range_(NULL) {}
7602
7603 virtual bool ComputeCanDeoptimize() const;
7604
7605 virtual void InferRange(RangeAnalysis* analysis, Range* range);
7606 virtual CompileType ComputeType() const;
7607
7608 DECLARE_INSTRUCTION(BinarySmiOp)
7609
7610 Range* right_range() const { return right_range_; }
7611
7612 private:
7613 Range* right_range_;
7614
7615 DISALLOW_COPY_AND_ASSIGN(BinarySmiOpInstr);
7616};
7617
7618class BinaryInt32OpInstr : public BinaryIntegerOpInstr {
7619 public:
7620 BinaryInt32OpInstr(Token::Kind op_kind,
7621 Value* left,
7622 Value* right,
7623 intptr_t deopt_id)
7624 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id) {
7625 SetInputAt(0, left);
7626 SetInputAt(1, right);
7627 }
7628
7629 static bool IsSupported(Token::Kind op_kind, Value* left, Value* right) {
7630#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM)
7631 switch (op_kind) {
7632 case Token::kADD:
7633 case Token::kSUB:
7634 case Token::kMUL:
7635 case Token::kBIT_AND:
7636 case Token::kBIT_OR:
7637 case Token::kBIT_XOR:
7638 return true;
7639
7640 case Token::kSHL:
7641 case Token::kSHR:
7642 if (right->BindsToConstant() && right->BoundConstant().IsSmi()) {
7643 const intptr_t value = Smi::Cast(right->BoundConstant()).Value();
7644 return 0 <= value && value < kBitsPerWord;
7645 }
7646 return false;
7647
7648 default:
7649 return false;
7650 }
7651#else
7652 return false;
7653#endif
7654 }
7655
7656 virtual bool ComputeCanDeoptimize() const;
7657
7658 virtual Representation representation() const { return kUnboxedInt32; }
7659
7660 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7661 ASSERT((idx == 0) || (idx == 1));
7662 return kUnboxedInt32;
7663 }
7664
7665 virtual CompileType ComputeType() const;
7666
7667 DECLARE_INSTRUCTION(BinaryInt32Op)
7668
7669 private:
7670 DISALLOW_COPY_AND_ASSIGN(BinaryInt32OpInstr);
7671};
7672
7673class BinaryUint32OpInstr : public BinaryIntegerOpInstr {
7674 public:
7675 BinaryUint32OpInstr(Token::Kind op_kind,
7676 Value* left,
7677 Value* right,
7678 intptr_t deopt_id)
7679 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id) {
7680 mark_truncating();
7681 ASSERT(IsSupported(op_kind));
7682 }
7683
7684 virtual bool ComputeCanDeoptimize() const { return false; }
7685
7686 virtual Representation representation() const { return kUnboxedUint32; }
7687
7688 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7689 ASSERT((idx == 0) || (idx == 1));
7690 return kUnboxedUint32;
7691 }
7692
7693 virtual CompileType ComputeType() const;
7694
7695 static bool IsSupported(Token::Kind op_kind) {
7696 switch (op_kind) {
7697 case Token::kADD:
7698 case Token::kSUB:
7699 case Token::kMUL:
7700 case Token::kBIT_AND:
7701 case Token::kBIT_OR:
7702 case Token::kBIT_XOR:
7703 return true;
7704 default:
7705 return false;
7706 }
7707 }
7708
7709 DECLARE_INSTRUCTION(BinaryUint32Op)
7710
7711 private:
7712 DISALLOW_COPY_AND_ASSIGN(BinaryUint32OpInstr);
7713};
7714
7715class BinaryInt64OpInstr : public BinaryIntegerOpInstr {
7716 public:
7717 BinaryInt64OpInstr(Token::Kind op_kind,
7718 Value* left,
7719 Value* right,
7720 intptr_t deopt_id,
7721 SpeculativeMode speculative_mode = kGuardInputs)
7722 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id),
7723 speculative_mode_(speculative_mode) {
7724 mark_truncating();
7725 }
7726
7727 virtual bool ComputeCanDeoptimize() const {
7728 ASSERT(!can_overflow());
7729 return false;
7730 }
7731
7732 virtual bool MayThrow() const {
7733 return op_kind() == Token::kMOD || op_kind() == Token::kTRUNCDIV;
7734 }
7735
7736 virtual Representation representation() const { return kUnboxedInt64; }
7737
7738 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7739 ASSERT((idx == 0) || (idx == 1));
7740 return kUnboxedInt64;
7741 }
7742
7743 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
7744 return speculative_mode_;
7745 }
7746
7747 virtual bool AttributesEqual(Instruction* other) const {
7748 return BinaryIntegerOpInstr::AttributesEqual(other) &&
7749 (speculative_mode_ == other->AsBinaryInt64Op()->speculative_mode_);
7750 }
7751
7752 virtual CompileType ComputeType() const;
7753
7754 DECLARE_INSTRUCTION(BinaryInt64Op)
7755
7756 private:
7757 const SpeculativeMode speculative_mode_;
7758 DISALLOW_COPY_AND_ASSIGN(BinaryInt64OpInstr);
7759};
7760
7761// Base class for integer shift operations.
7762class ShiftIntegerOpInstr : public BinaryIntegerOpInstr {
7763 public:
7764 ShiftIntegerOpInstr(Token::Kind op_kind,
7765 Value* left,
7766 Value* right,
7767 intptr_t deopt_id)
7768 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id),
7769 shift_range_(NULL) {
7770 ASSERT((op_kind == Token::kSHR) || (op_kind == Token::kSHL));
7771 mark_truncating();
7772 }
7773
7774 Range* shift_range() const { return shift_range_; }
7775
7776 // Set the range directly (takes ownership).
7777 void set_shift_range(Range* shift_range) { shift_range_ = shift_range; }
7778
7779 virtual void InferRange(RangeAnalysis* analysis, Range* range);
7780
7781 DEFINE_INSTRUCTION_TYPE_CHECK(ShiftIntegerOp)
7782
7783 protected:
7784 static const intptr_t kShiftCountLimit = 63;
7785
7786 // Returns true if the shift amount is guaranteed to be in
7787 // [0..max] range.
7788 bool IsShiftCountInRange(int64_t max = kShiftCountLimit) const;
7789
7790 private:
7791 Range* shift_range_;
7792
7793 DISALLOW_COPY_AND_ASSIGN(ShiftIntegerOpInstr);
7794};
7795
7796// Non-speculative int64 shift. Takes 2 unboxed int64.
7797// Throws if right operand is negative.
7798class ShiftInt64OpInstr : public ShiftIntegerOpInstr {
7799 public:
7800 ShiftInt64OpInstr(Token::Kind op_kind,
7801 Value* left,
7802 Value* right,
7803 intptr_t deopt_id)
7804 : ShiftIntegerOpInstr(op_kind, left, right, deopt_id) {}
7805
7806 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
7807 return kNotSpeculative;
7808 }
7809 virtual bool ComputeCanDeoptimize() const { return false; }
7810 virtual bool MayThrow() const { return true; }
7811
7812 virtual Representation representation() const { return kUnboxedInt64; }
7813
7814 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7815 ASSERT((idx == 0) || (idx == 1));
7816 return kUnboxedInt64;
7817 }
7818
7819 virtual CompileType ComputeType() const;
7820
7821 DECLARE_INSTRUCTION(ShiftInt64Op)
7822
7823 private:
7824 DISALLOW_COPY_AND_ASSIGN(ShiftInt64OpInstr);
7825};
7826
7827// Speculative int64 shift. Takes unboxed int64 and smi.
7828// Deoptimizes if right operand is negative or greater than kShiftCountLimit.
7829class SpeculativeShiftInt64OpInstr : public ShiftIntegerOpInstr {
7830 public:
7831 SpeculativeShiftInt64OpInstr(Token::Kind op_kind,
7832 Value* left,
7833 Value* right,
7834 intptr_t deopt_id)
7835 : ShiftIntegerOpInstr(op_kind, left, right, deopt_id) {}
7836
7837 virtual bool ComputeCanDeoptimize() const {
7838 ASSERT(!can_overflow());
7839 return !IsShiftCountInRange();
7840 }
7841
7842 virtual Representation representation() const { return kUnboxedInt64; }
7843
7844 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7845 ASSERT((idx == 0) || (idx == 1));
7846 return (idx == 0) ? kUnboxedInt64 : kTagged;
7847 }
7848
7849 virtual CompileType ComputeType() const;
7850
7851 DECLARE_INSTRUCTION(SpeculativeShiftInt64Op)
7852
7853 private:
7854 DISALLOW_COPY_AND_ASSIGN(SpeculativeShiftInt64OpInstr);
7855};
7856
7857// Non-speculative uint32 shift. Takes unboxed uint32 and unboxed int64.
7858// Throws if right operand is negative.
7859class ShiftUint32OpInstr : public ShiftIntegerOpInstr {
7860 public:
7861 ShiftUint32OpInstr(Token::Kind op_kind,
7862 Value* left,
7863 Value* right,
7864 intptr_t deopt_id)
7865 : ShiftIntegerOpInstr(op_kind, left, right, deopt_id) {}
7866
7867 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
7868 return kNotSpeculative;
7869 }
7870 virtual bool ComputeCanDeoptimize() const { return false; }
7871 virtual bool MayThrow() const { return true; }
7872
7873 virtual Representation representation() const { return kUnboxedUint32; }
7874
7875 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7876 ASSERT((idx == 0) || (idx == 1));
7877 return (idx == 0) ? kUnboxedUint32 : kUnboxedInt64;
7878 }
7879
7880 virtual CompileType ComputeType() const;
7881
7882 DECLARE_INSTRUCTION(ShiftUint32Op)
7883
7884 private:
7885 static const intptr_t kUint32ShiftCountLimit = 31;
7886
7887 DISALLOW_COPY_AND_ASSIGN(ShiftUint32OpInstr);
7888};
7889
7890// Speculative uint32 shift. Takes unboxed uint32 and smi.
7891// Deoptimizes if right operand is negative.
7892class SpeculativeShiftUint32OpInstr : public ShiftIntegerOpInstr {
7893 public:
7894 SpeculativeShiftUint32OpInstr(Token::Kind op_kind,
7895 Value* left,
7896 Value* right,
7897 intptr_t deopt_id)
7898 : ShiftIntegerOpInstr(op_kind, left, right, deopt_id) {}
7899
7900 virtual bool ComputeCanDeoptimize() const { return !IsShiftCountInRange(); }
7901
7902 virtual Representation representation() const { return kUnboxedUint32; }
7903
7904 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7905 ASSERT((idx == 0) || (idx == 1));
7906 return (idx == 0) ? kUnboxedUint32 : kTagged;
7907 }
7908
7909 DECLARE_INSTRUCTION(SpeculativeShiftUint32Op)
7910
7911 virtual CompileType ComputeType() const;
7912
7913 private:
7914 static const intptr_t kUint32ShiftCountLimit = 31;
7915
7916 DISALLOW_COPY_AND_ASSIGN(SpeculativeShiftUint32OpInstr);
7917};
7918
7919// Handles only NEGATE.
7920class UnaryDoubleOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
7921 public:
7922 UnaryDoubleOpInstr(Token::Kind op_kind,
7923 Value* value,
7924 intptr_t deopt_id,
7925 SpeculativeMode speculative_mode = kGuardInputs)
7926 : TemplateDefinition(deopt_id),
7927 op_kind_(op_kind),
7928 speculative_mode_(speculative_mode) {
7929 ASSERT(op_kind == Token::kNEGATE);
7930 SetInputAt(0, value);
7931 }
7932
7933 Value* value() const { return inputs_[0]; }
7934 Token::Kind op_kind() const { return op_kind_; }
7935
7936 DECLARE_INSTRUCTION(UnaryDoubleOp)
7937 virtual CompileType ComputeType() const;
7938
7939 virtual bool ComputeCanDeoptimize() const { return false; }
7940
7941 virtual intptr_t DeoptimizationTarget() const {
7942 // Direct access since this instruction cannot deoptimize, and the deopt-id
7943 // was inherited from another instruction that could deoptimize.
7944 return GetDeoptId();
7945 }
7946
7947 virtual Representation representation() const { return kUnboxedDouble; }
7948
7949 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7950 ASSERT(idx == 0);
7951 return kUnboxedDouble;
7952 }
7953
7954 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
7955 return speculative_mode_;
7956 }
7957
7958 virtual bool AttributesEqual(Instruction* other) const {
7959 return speculative_mode_ == other->AsUnaryDoubleOp()->speculative_mode_;
7960 }
7961
7962 PRINT_OPERANDS_TO_SUPPORT
7963
7964 private:
7965 const Token::Kind op_kind_;
7966 const SpeculativeMode speculative_mode_;
7967
7968 DISALLOW_COPY_AND_ASSIGN(UnaryDoubleOpInstr);
7969};
7970
7971class CheckStackOverflowInstr : public TemplateInstruction<0, NoThrow> {
7972 public:
7973 enum Kind {
7974 // kOsrAndPreemption stack overflow checks are emitted in both unoptimized
7975 // and optimized versions of the code and they serve as both preemption and
7976 // OSR entry points.
7977 kOsrAndPreemption,
7978
7979 // kOsrOnly stack overflow checks are only needed in the unoptimized code
7980 // because we can't OSR optimized code.
7981 kOsrOnly,
7982 };
7983
7984 CheckStackOverflowInstr(TokenPosition token_pos,
7985 intptr_t stack_depth,
7986 intptr_t loop_depth,
7987 intptr_t deopt_id,
7988 Kind kind)
7989 : TemplateInstruction(deopt_id),
7990 token_pos_(token_pos),
7991 stack_depth_(stack_depth),
7992 loop_depth_(loop_depth),
7993 kind_(kind) {
7994 ASSERT(kind != kOsrOnly || loop_depth > 0);
7995 }
7996
7997 virtual TokenPosition token_pos() const { return token_pos_; }
7998 bool in_loop() const { return loop_depth_ > 0; }
7999 intptr_t stack_depth() const { return stack_depth_; }
8000 intptr_t loop_depth() const { return loop_depth_; }
8001
8002 DECLARE_INSTRUCTION(CheckStackOverflow)
8003
8004 virtual bool ComputeCanDeoptimize() const {
8005 return !CompilerState::Current().is_aot();
8006 }
8007
8008 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
8009
8010 virtual bool HasUnknownSideEffects() const { return false; }
8011
8012 virtual bool UseSharedSlowPathStub(bool is_optimizing) const {
8013 return SlowPathSharingSupported(is_optimizing);
8014 }
8015
8016 PRINT_OPERANDS_TO_SUPPORT
8017 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
8018
8019 private:
8020 const TokenPosition token_pos_;
8021 const intptr_t stack_depth_;
8022 const intptr_t loop_depth_;
8023 const Kind kind_;
8024
8025 DISALLOW_COPY_AND_ASSIGN(CheckStackOverflowInstr);
8026};
8027
8028// TODO(vegorov): remove this instruction in favor of Int32ToDouble.
8029class SmiToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
8030 public:
8031 SmiToDoubleInstr(Value* value, TokenPosition token_pos)
8032 : token_pos_(token_pos) {
8033 SetInputAt(0, value);
8034 }
8035
8036 Value* value() const { return inputs_[0]; }
8037 virtual TokenPosition token_pos() const { return token_pos_; }
8038
8039 DECLARE_INSTRUCTION(SmiToDouble)
8040 virtual CompileType ComputeType() const;
8041
8042 virtual Representation representation() const { return kUnboxedDouble; }
8043
8044 virtual bool ComputeCanDeoptimize() const { return false; }
8045
8046 virtual bool AttributesEqual(Instruction* other) const { return true; }
8047
8048 private:
8049 const TokenPosition token_pos_;
8050
8051 DISALLOW_COPY_AND_ASSIGN(SmiToDoubleInstr);
8052};
8053
8054class Int32ToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
8055 public:
8056 explicit Int32ToDoubleInstr(Value* value) { SetInputAt(0, value); }
8057
8058 Value* value() const { return inputs_[0]; }
8059
8060 DECLARE_INSTRUCTION(Int32ToDouble)
8061 virtual CompileType ComputeType() const;
8062
8063 virtual Representation RequiredInputRepresentation(intptr_t index) const {
8064 ASSERT(index == 0);
8065 return kUnboxedInt32;
8066 }
8067
8068 virtual Representation representation() const { return kUnboxedDouble; }
8069
8070 virtual bool ComputeCanDeoptimize() const { return false; }
8071
8072 virtual bool AttributesEqual(Instruction* other) const { return true; }
8073
8074 private:
8075 DISALLOW_COPY_AND_ASSIGN(Int32ToDoubleInstr);
8076};
8077
8078class Int64ToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
8079 public:
8080 Int64ToDoubleInstr(Value* value,
8081 intptr_t deopt_id,
8082 SpeculativeMode speculative_mode = kGuardInputs)
8083 : TemplateDefinition(deopt_id), speculative_mode_(speculative_mode) {
8084 SetInputAt(0, value);
8085 }
8086
8087 Value* value() const { return inputs_[0]; }
8088
8089 DECLARE_INSTRUCTION(Int64ToDouble)
8090 virtual CompileType ComputeType() const;
8091
8092 virtual Representation RequiredInputRepresentation(intptr_t index) const {
8093 ASSERT(index == 0);
8094 return kUnboxedInt64;
8095 }
8096
8097 virtual Representation representation() const { return kUnboxedDouble; }
8098
8099 virtual intptr_t DeoptimizationTarget() const {
8100 // Direct access since this instruction cannot deoptimize, and the deopt-id
8101 // was inherited from another instruction that could deoptimize.
8102 return GetDeoptId();
8103 }
8104
8105 virtual bool ComputeCanDeoptimize() const { return false; }
8106
8107 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
8108 return speculative_mode_;
8109 }
8110
8111 virtual bool AttributesEqual(Instruction* other) const {
8112 return speculative_mode_ == other->AsInt64ToDouble()->speculative_mode_;
8113 }
8114
8115 private:
8116 const SpeculativeMode speculative_mode_;
8117
8118 DISALLOW_COPY_AND_ASSIGN(Int64ToDoubleInstr);
8119};
8120
8121class DoubleToIntegerInstr : public TemplateDefinition<1, Throws> {
8122 public:
8123 DoubleToIntegerInstr(Value* value, InstanceCallInstr* instance_call)
8124 : TemplateDefinition(instance_call->deopt_id()),
8125 instance_call_(instance_call) {
8126 SetInputAt(0, value);
8127 }
8128
8129 Value* value() const { return inputs_[0]; }
8130 InstanceCallInstr* instance_call() const { return instance_call_; }
8131
8132 DECLARE_INSTRUCTION(DoubleToInteger)
8133 virtual CompileType ComputeType() const;
8134
8135 virtual bool ComputeCanDeoptimize() const {
8136 return !CompilerState::Current().is_aot();
8137 }
8138
8139 virtual bool HasUnknownSideEffects() const { return false; }
8140
8141 virtual bool CanCallDart() const { return true; }
8142
8143 private:
8144 InstanceCallInstr* instance_call_;
8145
8146 DISALLOW_COPY_AND_ASSIGN(DoubleToIntegerInstr);
8147};
8148
8149// Similar to 'DoubleToIntegerInstr' but expects unboxed double as input
8150// and creates a Smi.
8151class DoubleToSmiInstr : public TemplateDefinition<1, NoThrow, Pure> {
8152 public:
8153 DoubleToSmiInstr(Value* value, intptr_t deopt_id)
8154 : TemplateDefinition(deopt_id) {
8155 SetInputAt(0, value);
8156 }
8157
8158 Value* value() const { return inputs_[0]; }
8159
8160 DECLARE_INSTRUCTION(DoubleToSmi)
8161 virtual CompileType ComputeType() const;
8162
8163 virtual bool ComputeCanDeoptimize() const { return true; }
8164
8165 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8166 ASSERT(idx == 0);
8167 return kUnboxedDouble;
8168 }
8169
8170 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
8171
8172 virtual bool AttributesEqual(Instruction* other) const { return true; }
8173
8174 private:
8175 DISALLOW_COPY_AND_ASSIGN(DoubleToSmiInstr);
8176};
8177
8178class DoubleToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
8179 public:
8180 DoubleToDoubleInstr(Value* value,
8181 MethodRecognizer::Kind recognized_kind,
8182 intptr_t deopt_id)
8183 : TemplateDefinition(deopt_id), recognized_kind_(recognized_kind) {
8184 SetInputAt(0, value);
8185 }
8186
8187 Value* value() const { return inputs_[0]; }
8188
8189 MethodRecognizer::Kind recognized_kind() const { return recognized_kind_; }
8190
8191 DECLARE_INSTRUCTION(DoubleToDouble)
8192 virtual CompileType ComputeType() const;
8193
8194 virtual bool ComputeCanDeoptimize() const { return false; }
8195
8196 virtual Representation representation() const { return kUnboxedDouble; }
8197
8198 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8199 ASSERT(idx == 0);
8200 return kUnboxedDouble;
8201 }
8202
8203 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
8204
8205 virtual bool AttributesEqual(Instruction* other) const {
8206 return other->AsDoubleToDouble()->recognized_kind() == recognized_kind();
8207 }
8208
8209 private:
8210 const MethodRecognizer::Kind recognized_kind_;
8211
8212 DISALLOW_COPY_AND_ASSIGN(DoubleToDoubleInstr);
8213};
8214
8215class DoubleToFloatInstr : public TemplateDefinition<1, NoThrow, Pure> {
8216 public:
8217 DoubleToFloatInstr(Value* value,
8218 intptr_t deopt_id,
8219 SpeculativeMode speculative_mode = kGuardInputs)
8220 : TemplateDefinition(deopt_id), speculative_mode_(speculative_mode) {
8221 SetInputAt(0, value);
8222 }
8223
8224 Value* value() const { return inputs_[0]; }
8225
8226 DECLARE_INSTRUCTION(DoubleToFloat)
8227
8228 virtual CompileType ComputeType() const;
8229
8230 virtual bool ComputeCanDeoptimize() const { return false; }
8231
8232 virtual Representation representation() const {
8233 // This works since double is the representation that the typed array
8234 // store expects.
8235 // TODO(fschneider): Change this to a genuine float representation once it
8236 // is supported.
8237 return kUnboxedDouble;
8238 }
8239
8240 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8241 ASSERT(idx == 0);
8242 return kUnboxedDouble;
8243 }
8244
8245 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
8246 return speculative_mode_;
8247 }
8248
8249 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
8250
8251 virtual bool AttributesEqual(Instruction* other) const { return true; }
8252
8253 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8254
8255 private:
8256 const SpeculativeMode speculative_mode_;
8257
8258 DISALLOW_COPY_AND_ASSIGN(DoubleToFloatInstr);
8259};
8260
8261class FloatToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
8262 public:
8263 FloatToDoubleInstr(Value* value, intptr_t deopt_id)
8264 : TemplateDefinition(deopt_id) {
8265 SetInputAt(0, value);
8266 }
8267
8268 Value* value() const { return inputs_[0]; }
8269
8270 DECLARE_INSTRUCTION(FloatToDouble)
8271
8272 virtual CompileType ComputeType() const;
8273
8274 virtual bool ComputeCanDeoptimize() const { return false; }
8275
8276 virtual Representation representation() const { return kUnboxedDouble; }
8277
8278 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8279 ASSERT(idx == 0);
8280 return kUnboxedDouble;
8281 }
8282
8283 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
8284
8285 virtual bool AttributesEqual(Instruction* other) const { return true; }
8286
8287 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8288
8289 private:
8290 DISALLOW_COPY_AND_ASSIGN(FloatToDoubleInstr);
8291};
8292
8293// TODO(sjindel): Replace with FFICallInstr.
8294class InvokeMathCFunctionInstr : public PureDefinition {
8295 public:
8296 InvokeMathCFunctionInstr(ZoneGrowableArray<Value*>* inputs,
8297 intptr_t deopt_id,
8298 MethodRecognizer::Kind recognized_kind,
8299 TokenPosition token_pos);
8300
8301 static intptr_t ArgumentCountFor(MethodRecognizer::Kind recognized_kind_);
8302
8303 const RuntimeEntry& TargetFunction() const;
8304
8305 MethodRecognizer::Kind recognized_kind() const { return recognized_kind_; }
8306
8307 virtual TokenPosition token_pos() const { return token_pos_; }
8308
8309 DECLARE_INSTRUCTION(InvokeMathCFunction)
8310 virtual CompileType ComputeType() const;
8311
8312 virtual bool ComputeCanDeoptimize() const { return false; }
8313
8314 virtual Representation representation() const { return kUnboxedDouble; }
8315
8316 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8317 ASSERT((0 <= idx) && (idx < InputCount()));
8318 return kUnboxedDouble;
8319 }
8320
8321 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
8322
8323 virtual intptr_t InputCount() const { return inputs_->length(); }
8324
8325 virtual Value* InputAt(intptr_t i) const { return (*inputs_)[i]; }
8326
8327 virtual bool AttributesEqual(Instruction* other) const {
8328 InvokeMathCFunctionInstr* other_invoke = other->AsInvokeMathCFunction();
8329 return other_invoke->recognized_kind() == recognized_kind();
8330 }
8331
8332 virtual bool MayThrow() const { return false; }
8333
8334 static const intptr_t kSavedSpTempIndex = 0;
8335 static const intptr_t kObjectTempIndex = 1;
8336 static const intptr_t kDoubleTempIndex = 2;
8337
8338 PRINT_OPERANDS_TO_SUPPORT
8339
8340 private:
8341 virtual void RawSetInputAt(intptr_t i, Value* value) {
8342 (*inputs_)[i] = value;
8343 }
8344
8345 ZoneGrowableArray<Value*>* inputs_;
8346 const MethodRecognizer::Kind recognized_kind_;
8347 const TokenPosition token_pos_;
8348
8349 DISALLOW_COPY_AND_ASSIGN(InvokeMathCFunctionInstr);
8350};
8351
8352class ExtractNthOutputInstr : public TemplateDefinition<1, NoThrow, Pure> {
8353 public:
8354 // Extract the Nth output register from value.
8355 ExtractNthOutputInstr(Value* value,
8356 intptr_t n,
8357 Representation definition_rep,
8358 intptr_t definition_cid)
8359 : index_(n),
8360 definition_rep_(definition_rep),
8361 definition_cid_(definition_cid) {
8362 SetInputAt(0, value);
8363 }
8364
8365 Value* value() const { return inputs_[0]; }
8366
8367 DECLARE_INSTRUCTION(ExtractNthOutput)
8368
8369 virtual CompileType ComputeType() const;
8370 virtual bool ComputeCanDeoptimize() const { return false; }
8371
8372 intptr_t index() const { return index_; }
8373
8374 virtual Representation representation() const { return definition_rep_; }
8375
8376 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8377 ASSERT(idx == 0);
8378 if (representation() == kTagged) {
8379 return kPairOfTagged;
8380 }
8381 UNREACHABLE();
8382 return definition_rep_;
8383 }
8384
8385 virtual bool AttributesEqual(Instruction* other) const {
8386 ExtractNthOutputInstr* other_extract = other->AsExtractNthOutput();
8387 return (other_extract->representation() == representation()) &&
8388 (other_extract->index() == index());
8389 }
8390
8391 PRINT_OPERANDS_TO_SUPPORT
8392
8393 private:
8394 const intptr_t index_;
8395 const Representation definition_rep_;
8396 const intptr_t definition_cid_;
8397 DISALLOW_COPY_AND_ASSIGN(ExtractNthOutputInstr);
8398};
8399
8400class TruncDivModInstr : public TemplateDefinition<2, NoThrow, Pure> {
8401 public:
8402 TruncDivModInstr(Value* lhs, Value* rhs, intptr_t deopt_id);
8403
8404 static intptr_t OutputIndexOf(Token::Kind token);
8405
8406 virtual CompileType ComputeType() const;
8407
8408 virtual bool ComputeCanDeoptimize() const { return true; }
8409
8410 virtual Representation representation() const { return kPairOfTagged; }
8411
8412 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8413 ASSERT((0 <= idx) && (idx < InputCount()));
8414 return kTagged;
8415 }
8416
8417 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
8418
8419 DECLARE_INSTRUCTION(TruncDivMod)
8420
8421 virtual bool AttributesEqual(Instruction* other) const { return true; }
8422
8423 PRINT_OPERANDS_TO_SUPPORT
8424
8425 private:
8426 Range* divisor_range() const {
8427 // Note: this range is only used to remove check for zero divisor from
8428 // the emitted pattern. It is not used for deciding whether instruction
8429 // will deoptimize or not - that is why it is ok to access range of
8430 // the definition directly. Otherwise range analysis or another pass
8431 // needs to cache range of the divisor in the operation to prevent
8432 // bugs when range information gets out of sync with the final decision
8433 // whether some instruction can deoptimize or not made in
8434 // EliminateEnvironments().
8435 return InputAt(1)->definition()->range();
8436 }
8437
8438 DISALLOW_COPY_AND_ASSIGN(TruncDivModInstr);
8439};
8440
8441class CheckClassInstr : public TemplateInstruction<1, NoThrow> {
8442 public:
8443 CheckClassInstr(Value* value,
8444 intptr_t deopt_id,
8445 const Cids& cids,
8446 TokenPosition token_pos);
8447
8448 DECLARE_INSTRUCTION(CheckClass)
8449
8450 virtual bool ComputeCanDeoptimize() const { return true; }
8451
8452 virtual TokenPosition token_pos() const { return token_pos_; }
8453
8454 Value* value() const { return inputs_[0]; }
8455
8456 const Cids& cids() const { return cids_; }
8457
8458 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
8459
8460 bool IsNullCheck() const { return IsDeoptIfNull() || IsDeoptIfNotNull(); }
8461
8462 bool IsDeoptIfNull() const;
8463 bool IsDeoptIfNotNull() const;
8464
8465 bool IsBitTest() const;
8466 static bool IsCompactCidRange(const Cids& cids);
8467 intptr_t ComputeCidMask() const;
8468
8469 virtual bool AllowsCSE() const { return true; }
8470 virtual bool HasUnknownSideEffects() const { return false; }
8471
8472 virtual bool AttributesEqual(Instruction* other) const;
8473
8474 bool licm_hoisted() const { return licm_hoisted_; }
8475 void set_licm_hoisted(bool value) { licm_hoisted_ = value; }
8476
8477 PRINT_OPERANDS_TO_SUPPORT
8478
8479 private:
8480 const Cids& cids_;
8481 bool licm_hoisted_;
8482 bool is_bit_test_;
8483 const TokenPosition token_pos_;
8484
8485 int EmitCheckCid(FlowGraphCompiler* compiler,
8486 int bias,
8487 intptr_t cid_start,
8488 intptr_t cid_end,
8489 bool is_last,
8490 compiler::Label* is_ok,
8491 compiler::Label* deopt,
8492 bool use_near_jump);
8493 void EmitBitTest(FlowGraphCompiler* compiler,
8494 intptr_t min,
8495 intptr_t max,
8496 intptr_t mask,
8497 compiler::Label* deopt);
8498 void EmitNullCheck(FlowGraphCompiler* compiler, compiler::Label* deopt);
8499
8500 DISALLOW_COPY_AND_ASSIGN(CheckClassInstr);
8501};
8502
8503class CheckSmiInstr : public TemplateInstruction<1, NoThrow, Pure> {
8504 public:
8505 CheckSmiInstr(Value* value, intptr_t deopt_id, TokenPosition token_pos)
8506 : TemplateInstruction(deopt_id),
8507 token_pos_(token_pos),
8508 licm_hoisted_(false) {
8509 SetInputAt(0, value);
8510 }
8511
8512 Value* value() const { return inputs_[0]; }
8513 virtual TokenPosition token_pos() const { return token_pos_; }
8514
8515 DECLARE_INSTRUCTION(CheckSmi)
8516
8517 virtual bool ComputeCanDeoptimize() const { return true; }
8518
8519 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
8520
8521 virtual bool AttributesEqual(Instruction* other) const { return true; }
8522
8523 bool licm_hoisted() const { return licm_hoisted_; }
8524 void set_licm_hoisted(bool value) { licm_hoisted_ = value; }
8525
8526 private:
8527 const TokenPosition token_pos_;
8528 bool licm_hoisted_;
8529
8530 DISALLOW_COPY_AND_ASSIGN(CheckSmiInstr);
8531};
8532
8533// CheckNull instruction takes one input (`value`) and tests it for `null`.
8534// If `value` is `null`, then an exception is thrown according to
8535// `exception_type`. Otherwise, execution proceeds to the next instruction.
8536class CheckNullInstr : public TemplateDefinition<1, Throws, Pure> {
8537 public:
8538 enum ExceptionType {
8539 kNoSuchMethod,
8540 kArgumentError,
8541 kCastError,
8542 };
8543
8544 CheckNullInstr(Value* value,
8545 const String& function_name,
8546 intptr_t deopt_id,
8547 TokenPosition token_pos,
8548 ExceptionType exception_type = kNoSuchMethod)
8549 : TemplateDefinition(deopt_id),
8550 token_pos_(token_pos),
8551 function_name_(function_name),
8552 exception_type_(exception_type) {
8553 ASSERT(function_name.IsNotTemporaryScopedHandle());
8554 SetInputAt(0, value);
8555 }
8556
8557 Value* value() const { return inputs_[0]; }
8558 virtual TokenPosition token_pos() const { return token_pos_; }
8559 const String& function_name() const { return function_name_; }
8560 ExceptionType exception_type() const { return exception_type_; }
8561
8562 virtual bool UseSharedSlowPathStub(bool is_optimizing) const {
8563 return SlowPathSharingSupported(is_optimizing);
8564 }
8565
8566 DECLARE_INSTRUCTION(CheckNull)
8567
8568 virtual CompileType ComputeType() const;
8569 virtual bool RecomputeType();
8570
8571 // CheckNull can implicitly call Dart code (NoSuchMethodError constructor),
8572 // so it needs a deopt ID in optimized and unoptimized code.
8573 virtual bool ComputeCanDeoptimize() const {
8574 return !CompilerState::Current().is_aot();
8575 }
8576 virtual bool CanBecomeDeoptimizationTarget() const { return true; }
8577
8578 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8579
8580 virtual bool AttributesEqual(Instruction* other) const;
8581
8582 static void AddMetadataForRuntimeCall(CheckNullInstr* check_null,
8583 FlowGraphCompiler* compiler);
8584
8585 virtual Value* RedefinedValue() const;
8586
8587 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
8588
8589 PRINT_OPERANDS_TO_SUPPORT
8590
8591 private:
8592 const TokenPosition token_pos_;
8593 const String& function_name_;
8594 const ExceptionType exception_type_;
8595
8596 DISALLOW_COPY_AND_ASSIGN(CheckNullInstr);
8597};
8598
8599class CheckClassIdInstr : public TemplateInstruction<1, NoThrow> {
8600 public:
8601 CheckClassIdInstr(Value* value, CidRangeValue cids, intptr_t deopt_id)
8602 : TemplateInstruction(deopt_id), cids_(cids) {
8603 SetInputAt(0, value);
8604 }
8605
8606 Value* value() const { return inputs_[0]; }
8607 const CidRangeValue& cids() const { return cids_; }
8608
8609 DECLARE_INSTRUCTION(CheckClassId)
8610
8611 virtual bool ComputeCanDeoptimize() const { return true; }
8612
8613 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
8614
8615 virtual bool AllowsCSE() const { return true; }
8616 virtual bool HasUnknownSideEffects() const { return false; }
8617
8618 virtual bool AttributesEqual(Instruction* other) const { return true; }
8619
8620 PRINT_OPERANDS_TO_SUPPORT
8621
8622 private:
8623 bool Contains(intptr_t cid) const;
8624
8625 CidRangeValue cids_;
8626
8627 DISALLOW_COPY_AND_ASSIGN(CheckClassIdInstr);
8628};
8629
8630// Base class for speculative [CheckArrayBoundInstr] and
8631// non-speculative [GenericCheckBoundInstr] bounds checking.
8632class CheckBoundBase : public TemplateDefinition<2, NoThrow, Pure> {
8633 public:
8634 CheckBoundBase(Value* length, Value* index, intptr_t deopt_id)
8635 : TemplateDefinition(deopt_id) {
8636 SetInputAt(kLengthPos, length);
8637 SetInputAt(kIndexPos, index);
8638 }
8639
8640 Value* length() const { return inputs_[kLengthPos]; }
8641 Value* index() const { return inputs_[kIndexPos]; }
8642
8643 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8644
8645 virtual CheckBoundBase* AsCheckBoundBase() { return this; }
8646 virtual const CheckBoundBase* AsCheckBoundBase() const { return this; }
8647 virtual Value* RedefinedValue() const;
8648
8649 // Returns true if the bounds check can be eliminated without
8650 // changing the semantics (viz. 0 <= index < length).
8651 bool IsRedundant(bool use_loops = false);
8652
8653 // Give a name to the location/input indices.
8654 enum { kLengthPos = 0, kIndexPos = 1 };
8655
8656 private:
8657 DISALLOW_COPY_AND_ASSIGN(CheckBoundBase);
8658};
8659
8660// Performs an array bounds check, where
8661// safe_index := CheckArrayBound(length, index)
8662// returns the "safe" index when
8663// 0 <= index < length
8664// or otherwise deoptimizes (viz. speculative).
8665class CheckArrayBoundInstr : public CheckBoundBase {
8666 public:
8667 CheckArrayBoundInstr(Value* length, Value* index, intptr_t deopt_id)
8668 : CheckBoundBase(length, index, deopt_id),
8669 generalized_(false),
8670 licm_hoisted_(false) {}
8671
8672 DECLARE_INSTRUCTION(CheckArrayBound)
8673
8674 virtual CompileType ComputeType() const;
8675 virtual bool RecomputeType();
8676
8677 virtual bool ComputeCanDeoptimize() const { return true; }
8678
8679 void mark_generalized() { generalized_ = true; }
8680
8681 // Returns the length offset for array and string types.
8682 static intptr_t LengthOffsetFor(intptr_t class_id);
8683
8684 static bool IsFixedLengthArrayType(intptr_t class_id);
8685
8686 virtual bool AttributesEqual(Instruction* other) const { return true; }
8687
8688 void set_licm_hoisted(bool value) { licm_hoisted_ = value; }
8689
8690 private:
8691 bool generalized_;
8692 bool licm_hoisted_;
8693
8694 DISALLOW_COPY_AND_ASSIGN(CheckArrayBoundInstr);
8695};
8696
8697// Performs an array bounds check, where
8698// safe_index := GenericCheckBound(length, index)
8699// returns the "safe" index when
8700// 0 <= index < length
8701// or otherwise throws an out-of-bounds exception (viz. non-speculative).
8702class GenericCheckBoundInstr : public CheckBoundBase {
8703 public:
8704 // We prefer to have unboxed inputs on 64-bit where values can fit into a
8705 // register.
8706 static bool UseUnboxedRepresentation() {
8707 return compiler::target::kWordSize == 8;
8708 }
8709
8710 GenericCheckBoundInstr(Value* length, Value* index, intptr_t deopt_id)
8711 : CheckBoundBase(length, index, deopt_id) {}
8712
8713 virtual bool AttributesEqual(Instruction* other) const { return true; }
8714
8715 DECLARE_INSTRUCTION(GenericCheckBound)
8716
8717 virtual CompileType ComputeType() const;
8718 virtual bool RecomputeType();
8719
8720 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
8721
8722 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
8723 return kNotSpeculative;
8724 }
8725
8726 virtual Representation representation() const {
8727 return UseUnboxedRepresentation() ? kUnboxedInt64 : kTagged;
8728 }
8729
8730 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8731 ASSERT(idx == kIndexPos || idx == kLengthPos);
8732 return UseUnboxedRepresentation() ? kUnboxedInt64 : kTagged;
8733 }
8734
8735 // GenericCheckBound can implicitly call Dart code (RangeError or
8736 // ArgumentError constructor), so it can lazily deopt.
8737 virtual bool ComputeCanDeoptimize() const {
8738 return !CompilerState::Current().is_aot();
8739 }
8740
8741 virtual bool MayThrow() const { return true; }
8742
8743 virtual bool UseSharedSlowPathStub(bool is_optimizing) const {
8744 return SlowPathSharingSupported(is_optimizing);
8745 }
8746
8747 private:
8748 DISALLOW_COPY_AND_ASSIGN(GenericCheckBoundInstr);
8749};
8750
8751// Instruction evaluates the given comparison and deoptimizes if it evaluates
8752// to false.
8753class CheckConditionInstr : public Instruction {
8754 public:
8755 CheckConditionInstr(ComparisonInstr* comparison, intptr_t deopt_id)
8756 : Instruction(deopt_id), comparison_(comparison) {
8757 ASSERT(comparison->ArgumentCount() == 0);
8758 ASSERT(comparison->env() == nullptr);
8759 for (intptr_t i = comparison->InputCount() - 1; i >= 0; --i) {
8760 comparison->InputAt(i)->set_instruction(this);
8761 }
8762 }
8763
8764 ComparisonInstr* comparison() const { return comparison_; }
8765
8766 DECLARE_INSTRUCTION(CheckCondition)
8767
8768 virtual bool ComputeCanDeoptimize() const { return true; }
8769
8770 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
8771
8772 virtual bool AllowsCSE() const { return true; }
8773 virtual bool HasUnknownSideEffects() const { return false; }
8774
8775 virtual bool AttributesEqual(Instruction* other) const {
8776 return other->Cast<CheckConditionInstr>()->comparison()->AttributesEqual(
8777 comparison());
8778 }
8779
8780 virtual intptr_t InputCount() const { return comparison()->InputCount(); }
8781 virtual Value* InputAt(intptr_t i) const { return comparison()->InputAt(i); }
8782
8783 virtual bool MayThrow() const { return false; }
8784
8785 PRINT_OPERANDS_TO_SUPPORT
8786
8787 private:
8788 virtual void RawSetInputAt(intptr_t i, Value* value) {
8789 comparison()->RawSetInputAt(i, value);
8790 }
8791
8792 ComparisonInstr* comparison_;
8793
8794 DISALLOW_COPY_AND_ASSIGN(CheckConditionInstr);
8795};
8796
8797class IntConverterInstr : public TemplateDefinition<1, NoThrow, Pure> {
8798 public:
8799 IntConverterInstr(Representation from,
8800 Representation to,
8801 Value* value,
8802 intptr_t deopt_id)
8803 : TemplateDefinition(deopt_id),
8804 from_representation_(from),
8805 to_representation_(to),
8806 is_truncating_(to == kUnboxedUint32) {
8807 ASSERT(from != to);
8808 ASSERT(from == kUnboxedInt64 || from == kUnboxedUint32 ||
8809 from == kUnboxedInt32 || from == kUntagged);
8810 ASSERT(to == kUnboxedInt64 || to == kUnboxedUint32 || to == kUnboxedInt32 ||
8811 to == kUntagged);
8812 ASSERT(from != kUntagged ||
8813 (to == kUnboxedIntPtr || to == kUnboxedFfiIntPtr));
8814 ASSERT(to != kUntagged ||
8815 (from == kUnboxedIntPtr || from == kUnboxedFfiIntPtr));
8816 SetInputAt(0, value);
8817 }
8818
8819 Value* value() const { return inputs_[0]; }
8820
8821 Representation from() const { return from_representation_; }
8822 Representation to() const { return to_representation_; }
8823 bool is_truncating() const { return is_truncating_; }
8824
8825 void mark_truncating() { is_truncating_ = true; }
8826
8827 Definition* Canonicalize(FlowGraph* flow_graph);
8828
8829 virtual bool ComputeCanDeoptimize() const;
8830
8831 virtual Representation representation() const { return to(); }
8832
8833 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8834 ASSERT(idx == 0);
8835 return from();
8836 }
8837
8838 virtual bool AttributesEqual(Instruction* other) const {
8839 ASSERT(other->IsIntConverter());
8840 auto converter = other->AsIntConverter();
8841 return (converter->from() == from()) && (converter->to() == to()) &&
8842 (converter->is_truncating() == is_truncating());
8843 }
8844
8845 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
8846
8847 virtual void InferRange(RangeAnalysis* analysis, Range* range);
8848
8849 virtual CompileType ComputeType() const {
8850 // TODO(vegorov) use range information to improve type.
8851 return CompileType::Int();
8852 }
8853
8854 DECLARE_INSTRUCTION(IntConverter);
8855
8856 PRINT_OPERANDS_TO_SUPPORT
8857
8858 private:
8859 const Representation from_representation_;
8860 const Representation to_representation_;
8861 bool is_truncating_;
8862
8863 DISALLOW_COPY_AND_ASSIGN(IntConverterInstr);
8864};
8865
8866// Moves a floating-point value between CPU and FPU registers. Used to implement
8867// "softfp" calling conventions, where FPU arguments/return values are passed in
8868// normal CPU registers.
8869class BitCastInstr : public TemplateDefinition<1, NoThrow, Pure> {
8870 public:
8871 BitCastInstr(Representation from, Representation to, Value* value)
8872 : TemplateDefinition(DeoptId::kNone),
8873 from_representation_(from),
8874 to_representation_(to) {
8875 ASSERT(from != to);
8876 ASSERT((to == kUnboxedInt32 && from == kUnboxedFloat) ||
8877 (to == kUnboxedFloat && from == kUnboxedInt32) ||
8878 (to == kUnboxedInt64 && from == kUnboxedDouble) ||
8879 (to == kUnboxedDouble && from == kUnboxedInt64));
8880 SetInputAt(0, value);
8881 }
8882
8883 Value* value() const { return inputs_[0]; }
8884
8885 Representation from() const { return from_representation_; }
8886 Representation to() const { return to_representation_; }
8887
8888 virtual bool ComputeCanDeoptimize() const { return false; }
8889
8890 virtual Representation representation() const { return to(); }
8891
8892 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8893 ASSERT(idx == 0);
8894 return from();
8895 }
8896
8897 virtual bool AttributesEqual(Instruction* other) const {
8898 ASSERT(other->IsBitCast());
8899 BitCastInstr* converter = other->AsBitCast();
8900 return converter->from() == from() && converter->to() == to();
8901 }
8902
8903 virtual CompileType ComputeType() const { return CompileType::Dynamic(); }
8904
8905 DECLARE_INSTRUCTION(BitCast);
8906
8907 PRINT_OPERANDS_TO_SUPPORT
8908
8909 private:
8910 const Representation from_representation_;
8911 const Representation to_representation_;
8912
8913 DISALLOW_COPY_AND_ASSIGN(BitCastInstr);
8914};
8915
8916// SimdOpInstr
8917//
8918// All SIMD intrinsics and recognized methods are represented via instances
8919// of SimdOpInstr, a particular type of SimdOp is selected by SimdOpInstr::Kind.
8920//
8921// Defines below are used to contruct SIMD_OP_LIST - a list of all SIMD
8922// operations. SIMD_OP_LIST contains information such as arity, input types and
8923// output type for each SIMD op and is used to derive things like input
8924// and output representations, type of return value, etc.
8925//
8926// Lists of SIMD ops are defined using macro M, OP and BINARY_OP which are
8927// expected to have the following signature:
8928//
8929// (Arity, HasMask, Name, (In_0, ..., In_Arity), Out)
8930//
8931// where:
8932//
8933// HasMask is either _ or MASK and determines if operation has an
8934// constant mask attribute
8935// In_0, ..., In_Arity are input types
8936// Out is output type
8937//
8938
8939// A binary SIMD op with the given name that has signature T x T -> T.
8940#define SIMD_BINARY_OP(M, T, Name) M(2, _, T##Name, (T, T), T)
8941
8942// List of SIMD_BINARY_OPs common for Float32x4 or Float64x2.
8943// Note: M for recognized methods and OP for operators.
8944#define SIMD_BINARY_FLOAT_OP_LIST(M, OP, T) \
8945 SIMD_BINARY_OP(OP, T, Add) \
8946 SIMD_BINARY_OP(OP, T, Sub) \
8947 SIMD_BINARY_OP(OP, T, Mul) \
8948 SIMD_BINARY_OP(OP, T, Div) \
8949 SIMD_BINARY_OP(M, T, Min) \
8950 SIMD_BINARY_OP(M, T, Max)
8951
8952// List of SIMD_BINARY_OP for Int32x4.
8953// Note: M for recognized methods and OP for operators.
8954#define SIMD_BINARY_INTEGER_OP_LIST(M, OP, T) \
8955 SIMD_BINARY_OP(OP, T, Add) \
8956 SIMD_BINARY_OP(OP, T, Sub) \
8957 SIMD_BINARY_OP(OP, T, BitAnd) \
8958 SIMD_BINARY_OP(OP, T, BitOr) \
8959 SIMD_BINARY_OP(OP, T, BitXor)
8960
8961// Given a signature of a given SIMD op construct its per component variations.
8962#define SIMD_PER_COMPONENT_XYZW(M, Arity, Name, Inputs, Output) \
8963 M(Arity, _, Name##X, Inputs, Output) \
8964 M(Arity, _, Name##Y, Inputs, Output) \
8965 M(Arity, _, Name##Z, Inputs, Output) \
8966 M(Arity, _, Name##W, Inputs, Output)
8967
8968// Define convertion between two SIMD types.
8969#define SIMD_CONVERSION(M, FromType, ToType) \
8970 M(1, _, FromType##To##ToType, (FromType), ToType)
8971
8972// List of all recognized SIMD operations.
8973// Note: except for operations that map to operators (Add, Mul, Sub, Div,
8974// BitXor, BitOr) all other operations must match names used by
8975// MethodRecognizer. This allows to autogenerate convertion from
8976// MethodRecognizer::Kind into SimdOpInstr::Kind (see KindForMethod helper).
8977// Note: M is for those SimdOp that are recognized methods and BINARY_OP
8978// is for operators.
8979#define SIMD_OP_LIST(M, BINARY_OP) \
8980 SIMD_BINARY_FLOAT_OP_LIST(M, BINARY_OP, Float32x4) \
8981 SIMD_BINARY_FLOAT_OP_LIST(M, BINARY_OP, Float64x2) \
8982 SIMD_BINARY_INTEGER_OP_LIST(M, BINARY_OP, Int32x4) \
8983 SIMD_PER_COMPONENT_XYZW(M, 1, Float32x4Shuffle, (Float32x4), Double) \
8984 SIMD_PER_COMPONENT_XYZW(M, 2, Float32x4With, (Double, Float32x4), Float32x4) \
8985 SIMD_PER_COMPONENT_XYZW(M, 1, Int32x4GetFlag, (Int32x4), Bool) \
8986 SIMD_PER_COMPONENT_XYZW(M, 2, Int32x4WithFlag, (Int32x4, Bool), Int32x4) \
8987 M(1, MASK, Float32x4Shuffle, (Float32x4), Float32x4) \
8988 M(1, MASK, Int32x4Shuffle, (Int32x4), Int32x4) \
8989 M(2, MASK, Float32x4ShuffleMix, (Float32x4, Float32x4), Float32x4) \
8990 M(2, MASK, Int32x4ShuffleMix, (Int32x4, Int32x4), Int32x4) \
8991 M(2, _, Float32x4Equal, (Float32x4, Float32x4), Int32x4) \
8992 M(2, _, Float32x4GreaterThan, (Float32x4, Float32x4), Int32x4) \
8993 M(2, _, Float32x4GreaterThanOrEqual, (Float32x4, Float32x4), Int32x4) \
8994 M(2, _, Float32x4LessThan, (Float32x4, Float32x4), Int32x4) \
8995 M(2, _, Float32x4LessThanOrEqual, (Float32x4, Float32x4), Int32x4) \
8996 M(2, _, Float32x4NotEqual, (Float32x4, Float32x4), Int32x4) \
8997 M(4, _, Int32x4FromInts, (Int32, Int32, Int32, Int32), Int32x4) \
8998 M(4, _, Int32x4FromBools, (Bool, Bool, Bool, Bool), Int32x4) \
8999 M(4, _, Float32x4FromDoubles, (Double, Double, Double, Double), Float32x4) \
9000 M(2, _, Float64x2FromDoubles, (Double, Double), Float64x2) \
9001 M(0, _, Float32x4Zero, (), Float32x4) \
9002 M(0, _, Float64x2Zero, (), Float64x2) \
9003 M(1, _, Float32x4Splat, (Double), Float32x4) \
9004 M(1, _, Float64x2Splat, (Double), Float64x2) \
9005 M(1, _, Int32x4GetSignMask, (Int32x4), Int8) \
9006 M(1, _, Float32x4GetSignMask, (Float32x4), Int8) \
9007 M(1, _, Float64x2GetSignMask, (Float64x2), Int8) \
9008 M(2, _, Float32x4Scale, (Double, Float32x4), Float32x4) \
9009 M(2, _, Float64x2Scale, (Float64x2, Double), Float64x2) \
9010 M(1, _, Float32x4Sqrt, (Float32x4), Float32x4) \
9011 M(1, _, Float64x2Sqrt, (Float64x2), Float64x2) \
9012 M(1, _, Float32x4Reciprocal, (Float32x4), Float32x4) \
9013 M(1, _, Float32x4ReciprocalSqrt, (Float32x4), Float32x4) \
9014 M(1, _, Float32x4Negate, (Float32x4), Float32x4) \
9015 M(1, _, Float64x2Negate, (Float64x2), Float64x2) \
9016 M(1, _, Float32x4Abs, (Float32x4), Float32x4) \
9017 M(1, _, Float64x2Abs, (Float64x2), Float64x2) \
9018 M(3, _, Float32x4Clamp, (Float32x4, Float32x4, Float32x4), Float32x4) \
9019 M(1, _, Float64x2GetX, (Float64x2), Double) \
9020 M(1, _, Float64x2GetY, (Float64x2), Double) \
9021 M(2, _, Float64x2WithX, (Float64x2, Double), Float64x2) \
9022 M(2, _, Float64x2WithY, (Float64x2, Double), Float64x2) \
9023 M(3, _, Int32x4Select, (Int32x4, Float32x4, Float32x4), Float32x4) \
9024 SIMD_CONVERSION(M, Float32x4, Int32x4) \
9025 SIMD_CONVERSION(M, Int32x4, Float32x4) \
9026 SIMD_CONVERSION(M, Float32x4, Float64x2) \
9027 SIMD_CONVERSION(M, Float64x2, Float32x4)
9028
9029class SimdOpInstr : public Definition {
9030 public:
9031 enum Kind {
9032#define DECLARE_ENUM(Arity, Mask, Name, ...) k##Name,
9033 SIMD_OP_LIST(DECLARE_ENUM, DECLARE_ENUM)
9034#undef DECLARE_ENUM
9035 kIllegalSimdOp,
9036 };
9037
9038 // Create SimdOp from the arguments of the given call and the given receiver.
9039 static SimdOpInstr* CreateFromCall(Zone* zone,
9040 MethodRecognizer::Kind kind,
9041 Definition* receiver,
9042 Instruction* call,
9043 intptr_t mask = 0);
9044
9045 // Create SimdOp from the arguments of the given factory call.
9046 static SimdOpInstr* CreateFromFactoryCall(Zone* zone,
9047 MethodRecognizer::Kind kind,
9048 Instruction* call);
9049
9050 // Create a binary SimdOp instr.
9051 static SimdOpInstr* Create(Kind kind,
9052 Value* left,
9053 Value* right,
9054 intptr_t deopt_id) {
9055 return new SimdOpInstr(kind, left, right, deopt_id);
9056 }
9057
9058 // Create a binary SimdOp instr.
9059 static SimdOpInstr* Create(MethodRecognizer::Kind kind,
9060 Value* left,
9061 Value* right,
9062 intptr_t deopt_id) {
9063 return new SimdOpInstr(KindForMethod(kind), left, right, deopt_id);
9064 }
9065
9066 // Create a unary SimdOp.
9067 static SimdOpInstr* Create(MethodRecognizer::Kind kind,
9068 Value* left,
9069 intptr_t deopt_id) {
9070 return new SimdOpInstr(KindForMethod(kind), left, deopt_id);
9071 }
9072
9073 static Kind KindForOperator(MethodRecognizer::Kind kind);
9074
9075 static Kind KindForMethod(MethodRecognizer::Kind method_kind);
9076
9077 // Convert a combination of SIMD cid and an arithmetic token into Kind, e.g.
9078 // Float32x4 and Token::kADD becomes Float32x4Add.
9079 static Kind KindForOperator(intptr_t cid, Token::Kind op);
9080
9081 virtual intptr_t InputCount() const;
9082 virtual Value* InputAt(intptr_t i) const {
9083 ASSERT(0 <= i && i < InputCount());
9084 return inputs_[i];
9085 }
9086
9087 Kind kind() const { return kind_; }
9088 intptr_t mask() const {
9089 ASSERT(HasMask());
9090 return mask_;
9091 }
9092
9093 virtual Representation representation() const;
9094 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
9095
9096 virtual CompileType ComputeType() const;
9097
9098 virtual bool MayThrow() const { return false; }
9099 virtual bool ComputeCanDeoptimize() const { return false; }
9100
9101 virtual intptr_t DeoptimizationTarget() const {
9102 // Direct access since this instruction cannot deoptimize, and the deopt-id
9103 // was inherited from another instruction that could deoptimize.
9104 return GetDeoptId();
9105 }
9106
9107 virtual bool HasUnknownSideEffects() const { return false; }
9108 virtual bool AllowsCSE() const { return true; }
9109
9110 virtual bool AttributesEqual(Instruction* other) const {
9111 SimdOpInstr* other_op = other->AsSimdOp();
9112 return kind() == other_op->kind() &&
9113 (!HasMask() || mask() == other_op->mask());
9114 }
9115
9116 DECLARE_INSTRUCTION(SimdOp)
9117 PRINT_OPERANDS_TO_SUPPORT
9118 ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
9119 ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
9120
9121 private:
9122 SimdOpInstr(Kind kind, intptr_t deopt_id)
9123 : Definition(deopt_id), kind_(kind) {}
9124
9125 SimdOpInstr(Kind kind, Value* left, intptr_t deopt_id)
9126 : Definition(deopt_id), kind_(kind) {
9127 SetInputAt(0, left);
9128 }
9129
9130 SimdOpInstr(Kind kind, Value* left, Value* right, intptr_t deopt_id)
9131 : Definition(deopt_id), kind_(kind) {
9132 SetInputAt(0, left);
9133 SetInputAt(1, right);
9134 }
9135
9136 bool HasMask() const;
9137 void set_mask(intptr_t mask) { mask_ = mask; }
9138
9139 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
9140
9141 // We consider SimdOpInstr to be very uncommon so we don't optimize them for
9142 // size. Any instance of SimdOpInstr has enough space to fit any variation.
9143 // TODO(dartbug.com/30949) optimize this for size.
9144 const Kind kind_;
9145 Value* inputs_[4];
9146 intptr_t mask_;
9147
9148 DISALLOW_COPY_AND_ASSIGN(SimdOpInstr);
9149};
9150
9151#undef DECLARE_INSTRUCTION
9152
9153class Environment : public ZoneAllocated {
9154 public:
9155 // Iterate the non-NULL values in the innermost level of an environment.
9156 class ShallowIterator : public ValueObject {
9157 public:
9158 explicit ShallowIterator(Environment* environment)
9159 : environment_(environment), index_(0) {}
9160
9161 ShallowIterator(const ShallowIterator& other)
9162 : ValueObject(),
9163 environment_(other.environment_),
9164 index_(other.index_) {}
9165
9166 ShallowIterator& operator=(const ShallowIterator& other) {
9167 environment_ = other.environment_;
9168 index_ = other.index_;
9169 return *this;
9170 }
9171
9172 Environment* environment() const { return environment_; }
9173
9174 void Advance() {
9175 ASSERT(!Done());
9176 ++index_;
9177 }
9178
9179 bool Done() const {
9180 return (environment_ == NULL) || (index_ >= environment_->Length());
9181 }
9182
9183 Value* CurrentValue() const {
9184 ASSERT(!Done());
9185 ASSERT(environment_->values_[index_] != NULL);
9186 return environment_->values_[index_];
9187 }
9188
9189 void SetCurrentValue(Value* value) {
9190 ASSERT(!Done());
9191 ASSERT(value != NULL);
9192 environment_->values_[index_] = value;
9193 }
9194
9195 Location CurrentLocation() const {
9196 ASSERT(!Done());
9197 return environment_->locations_[index_];
9198 }
9199
9200 void SetCurrentLocation(Location loc) {
9201 ASSERT(!Done());
9202 environment_->locations_[index_] = loc;
9203 }
9204
9205 private:
9206 Environment* environment_;
9207 intptr_t index_;
9208 };
9209
9210 // Iterate all non-NULL values in an environment, including outer
9211 // environments. Note that the iterator skips empty environments.
9212 class DeepIterator : public ValueObject {
9213 public:
9214 explicit DeepIterator(Environment* environment) : iterator_(environment) {
9215 SkipDone();
9216 }
9217
9218 void Advance() {
9219 ASSERT(!Done());
9220 iterator_.Advance();
9221 SkipDone();
9222 }
9223
9224 bool Done() const { return iterator_.environment() == NULL; }
9225
9226 Value* CurrentValue() const {
9227 ASSERT(!Done());
9228 return iterator_.CurrentValue();
9229 }
9230
9231 void SetCurrentValue(Value* value) {
9232 ASSERT(!Done());
9233 iterator_.SetCurrentValue(value);
9234 }
9235
9236 Location CurrentLocation() const {
9237 ASSERT(!Done());
9238 return iterator_.CurrentLocation();
9239 }
9240
9241 void SetCurrentLocation(Location loc) {
9242 ASSERT(!Done());
9243 iterator_.SetCurrentLocation(loc);
9244 }
9245
9246 private:
9247 void SkipDone() {
9248 while (!Done() && iterator_.Done()) {
9249 iterator_ = ShallowIterator(iterator_.environment()->outer());
9250 }
9251 }
9252
9253 ShallowIterator iterator_;
9254 };
9255
9256 // Construct an environment by constructing uses from an array of definitions.
9257 static Environment* From(Zone* zone,
9258 const GrowableArray<Definition*>& definitions,
9259 intptr_t fixed_parameter_count,
9260 const ParsedFunction& parsed_function);
9261
9262 void set_locations(Location* locations) {
9263 ASSERT(locations_ == NULL);
9264 locations_ = locations;
9265 }
9266
9267 // Get deopt_id associated with this environment.
9268 // Note that only outer environments have deopt id associated with
9269 // them (set by DeepCopyToOuter).
9270 intptr_t deopt_id() const {
9271 ASSERT(deopt_id_ != DeoptId::kNone);
9272 return deopt_id_;
9273 }
9274
9275 Environment* outer() const { return outer_; }
9276
9277 Environment* Outermost() {
9278 Environment* result = this;
9279 while (result->outer() != NULL)
9280 result = result->outer();
9281 return result;
9282 }
9283
9284 Value* ValueAt(intptr_t ix) const { return values_[ix]; }
9285
9286 void PushValue(Value* value);
9287
9288 intptr_t Length() const { return values_.length(); }
9289
9290 Location LocationAt(intptr_t index) const {
9291 ASSERT((index >= 0) && (index < values_.length()));
9292 return locations_[index];
9293 }
9294
9295 // The use index is the index in the flattened environment.
9296 Value* ValueAtUseIndex(intptr_t index) const {
9297 const Environment* env = this;
9298 while (index >= env->Length()) {
9299 ASSERT(env->outer_ != NULL);
9300 index -= env->Length();
9301 env = env->outer_;
9302 }
9303 return env->ValueAt(index);
9304 }
9305
9306 intptr_t fixed_parameter_count() const { return fixed_parameter_count_; }
9307
9308 intptr_t CountArgsPushed() {
9309 intptr_t count = 0;
9310 for (Environment::DeepIterator it(this); !it.Done(); it.Advance()) {
9311 if (it.CurrentValue()->definition()->IsPushArgument()) {
9312 count++;
9313 }
9314 }
9315 return count;
9316 }
9317
9318 const Function& function() const { return parsed_function_.function(); }
9319
9320 Environment* DeepCopy(Zone* zone) const { return DeepCopy(zone, Length()); }
9321
9322 void DeepCopyTo(Zone* zone, Instruction* instr) const;
9323 void DeepCopyToOuter(Zone* zone,
9324 Instruction* instr,
9325 intptr_t outer_deopt_id) const;
9326
9327 void DeepCopyAfterTo(Zone* zone,
9328 Instruction* instr,
9329 intptr_t argc,
9330 Definition* dead,
9331 Definition* result) const;
9332
9333 void PrintTo(BaseTextBuffer* f) const;
9334 SExpression* ToSExpression(FlowGraphSerializer* s) const;
9335 const char* ToCString() const;
9336
9337 // Deep copy an environment. The 'length' parameter may be less than the
9338 // environment's length in order to drop values (e.g., passed arguments)
9339 // from the copy.
9340 Environment* DeepCopy(Zone* zone, intptr_t length) const;
9341
9342 private:
9343 friend class ShallowIterator;
9344 friend class compiler::BlockBuilder; // For Environment constructor.
9345 friend class FlowGraphDeserializer; // For constructor and deopt_id_.
9346
9347 Environment(intptr_t length,
9348 intptr_t fixed_parameter_count,
9349 const ParsedFunction& parsed_function,
9350 Environment* outer)
9351 : values_(length),
9352 fixed_parameter_count_(fixed_parameter_count),
9353 parsed_function_(parsed_function),
9354 outer_(outer) {}
9355
9356 GrowableArray<Value*> values_;
9357 Location* locations_ = nullptr;
9358 const intptr_t fixed_parameter_count_;
9359 // Deoptimization id associated with this environment. Only set for
9360 // outer environments.
9361 intptr_t deopt_id_ = DeoptId::kNone;
9362 const ParsedFunction& parsed_function_;
9363 Environment* outer_;
9364
9365 DISALLOW_COPY_AND_ASSIGN(Environment);
9366};
9367
9368// Visitor base class to visit each instruction and computation in a flow
9369// graph as defined by a reversed list of basic blocks.
9370class FlowGraphVisitor : public ValueObject {
9371 public:
9372 explicit FlowGraphVisitor(const GrowableArray<BlockEntryInstr*>& block_order)
9373 : current_iterator_(NULL), block_order_(&block_order) {}
9374 virtual ~FlowGraphVisitor() {}
9375
9376 ForwardInstructionIterator* current_iterator() const {
9377 return current_iterator_;
9378 }
9379
9380 // Visit each block in the block order, and for each block its
9381 // instructions in order from the block entry to exit.
9382 virtual void VisitBlocks();
9383
9384// Visit functions for instruction classes, with an empty default
9385// implementation.
9386#define DECLARE_VISIT_INSTRUCTION(ShortName, Attrs) \
9387 virtual void Visit##ShortName(ShortName##Instr* instr) {}
9388
9389 FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
9390
9391#undef DECLARE_VISIT_INSTRUCTION
9392
9393 protected:
9394 void set_block_order(const GrowableArray<BlockEntryInstr*>& block_order) {
9395 block_order_ = &block_order;
9396 }
9397
9398 ForwardInstructionIterator* current_iterator_;
9399
9400 private:
9401 const GrowableArray<BlockEntryInstr*>* block_order_;
9402 DISALLOW_COPY_AND_ASSIGN(FlowGraphVisitor);
9403};
9404
9405// Helper macros for platform ports.
9406#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name) \
9407 LocationSummary* Name::MakeLocationSummary(Zone* zone, bool opt) const { \
9408 UNIMPLEMENTED(); \
9409 return NULL; \
9410 } \
9411 void Name::EmitNativeCode(FlowGraphCompiler* compiler) { UNIMPLEMENTED(); }
9412
9413template <intptr_t kExtraInputs>
9414StringPtr TemplateDartCall<kExtraInputs>::Selector() {
9415 if (auto static_call = this->AsStaticCall()) {
9416 return static_call->function().name();
9417 } else if (auto instance_call = this->AsInstanceCall()) {
9418 return instance_call->function_name().raw();
9419 } else {
9420 UNREACHABLE();
9421 }
9422}
9423
9424inline bool Value::CanBe(const Object& value) {
9425 ConstantInstr* constant = definition()->AsConstant();
9426 return (constant == nullptr) || constant->value().raw() == value.raw();
9427}
9428
9429} // namespace dart
9430
9431#endif // RUNTIME_VM_COMPILER_BACKEND_IL_H_
9432