1 | // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/compiler/backend/range_analysis.h" |
6 | |
7 | #include "vm/bit_vector.h" |
8 | #include "vm/compiler/backend/il_printer.h" |
9 | #include "vm/compiler/backend/loops.h" |
10 | |
11 | namespace dart { |
12 | |
13 | DEFINE_FLAG(bool, |
14 | array_bounds_check_elimination, |
15 | true, |
16 | "Eliminate redundant bounds checks." ); |
17 | DEFINE_FLAG(bool, trace_range_analysis, false, "Trace range analysis progress" ); |
18 | DEFINE_FLAG(bool, |
19 | trace_integer_ir_selection, |
20 | false, |
21 | "Print integer IR selection optimization pass." ); |
22 | DECLARE_FLAG(bool, trace_constant_propagation); |
23 | |
24 | // Quick access to the locally defined isolate() and zone() methods. |
25 | #define I (isolate()) |
26 | #define Z (zone()) |
27 | |
28 | void RangeAnalysis::Analyze() { |
29 | CollectValues(); |
30 | InsertConstraints(); |
31 | flow_graph_->GetLoopHierarchy().ComputeInduction(); |
32 | InferRanges(); |
33 | EliminateRedundantBoundsChecks(); |
34 | MarkUnreachableBlocks(); |
35 | |
36 | NarrowMintToInt32(); |
37 | |
38 | IntegerInstructionSelector iis(flow_graph_); |
39 | iis.Select(); |
40 | |
41 | RemoveConstraints(); |
42 | } |
43 | |
44 | // Helper method to chase to a constrained definition. |
45 | static Definition* UnwrapConstraint(Definition* defn) { |
46 | while (defn->IsConstraint()) { |
47 | defn = defn->AsConstraint()->value()->definition(); |
48 | } |
49 | return defn; |
50 | } |
51 | |
52 | void RangeAnalysis::CollectValues() { |
53 | auto graph_entry = flow_graph_->graph_entry(); |
54 | |
55 | auto& initial = *graph_entry->initial_definitions(); |
56 | for (intptr_t i = 0; i < initial.length(); ++i) { |
57 | Definition* current = initial[i]; |
58 | if (IsIntegerDefinition(current)) { |
59 | values_.Add(current); |
60 | } |
61 | } |
62 | |
63 | for (intptr_t i = 0; i < graph_entry->SuccessorCount(); ++i) { |
64 | auto successor = graph_entry->SuccessorAt(i); |
65 | if (auto entry = successor->AsBlockEntryWithInitialDefs()) { |
66 | const auto& initial = *entry->initial_definitions(); |
67 | for (intptr_t j = 0; j < initial.length(); ++j) { |
68 | Definition* current = initial[j]; |
69 | if (IsIntegerDefinition(current)) { |
70 | values_.Add(current); |
71 | } |
72 | } |
73 | } |
74 | } |
75 | |
76 | for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); |
77 | !block_it.Done(); block_it.Advance()) { |
78 | BlockEntryInstr* block = block_it.Current(); |
79 | JoinEntryInstr* join = block->AsJoinEntry(); |
80 | if (join != NULL) { |
81 | for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) { |
82 | PhiInstr* current = phi_it.Current(); |
83 | if (current->Type()->IsInt()) { |
84 | values_.Add(current); |
85 | } |
86 | } |
87 | } |
88 | |
89 | for (ForwardInstructionIterator instr_it(block); !instr_it.Done(); |
90 | instr_it.Advance()) { |
91 | Instruction* current = instr_it.Current(); |
92 | Definition* defn = current->AsDefinition(); |
93 | if (defn != NULL) { |
94 | if (defn->HasSSATemp() && IsIntegerDefinition(defn)) { |
95 | values_.Add(defn); |
96 | if (defn->IsBinaryInt64Op()) { |
97 | binary_int64_ops_.Add(defn->AsBinaryInt64Op()); |
98 | } else if (defn->IsShiftInt64Op() || |
99 | defn->IsSpeculativeShiftInt64Op()) { |
100 | shift_int64_ops_.Add(defn->AsShiftIntegerOp()); |
101 | } |
102 | } |
103 | } |
104 | if (auto check = current->AsCheckBoundBase()) { |
105 | bounds_checks_.Add(check); |
106 | } |
107 | } |
108 | } |
109 | } |
110 | |
111 | // Given a boundary (right operand) and a comparison operation return |
112 | // a symbolic range constraint for the left operand of the comparison assuming |
113 | // that it evaluated to true. |
114 | // For example for the comparison a < b symbol a is constrained with range |
115 | // [Smi::kMinValue, b - 1]. |
116 | Range* RangeAnalysis::ConstraintSmiRange(Token::Kind op, Definition* boundary) { |
117 | switch (op) { |
118 | case Token::kEQ: |
119 | return new (Z) Range(RangeBoundary::FromDefinition(boundary), |
120 | RangeBoundary::FromDefinition(boundary)); |
121 | case Token::kNE: |
122 | return new (Z) Range(Range::Full(RangeBoundary::kRangeBoundarySmi)); |
123 | case Token::kLT: |
124 | return new (Z) Range(RangeBoundary::MinSmi(), |
125 | RangeBoundary::FromDefinition(boundary, -1)); |
126 | case Token::kGT: |
127 | return new (Z) Range(RangeBoundary::FromDefinition(boundary, 1), |
128 | RangeBoundary::MaxSmi()); |
129 | case Token::kLTE: |
130 | return new (Z) Range(RangeBoundary::MinSmi(), |
131 | RangeBoundary::FromDefinition(boundary)); |
132 | case Token::kGTE: |
133 | return new (Z) Range(RangeBoundary::FromDefinition(boundary), |
134 | RangeBoundary::MaxSmi()); |
135 | default: |
136 | UNREACHABLE(); |
137 | return NULL; |
138 | } |
139 | } |
140 | |
141 | ConstraintInstr* RangeAnalysis::InsertConstraintFor(Value* use, |
142 | Definition* defn, |
143 | Range* constraint_range, |
144 | Instruction* after) { |
145 | // No need to constrain constants. |
146 | if (defn->IsConstant()) return NULL; |
147 | |
148 | // Check if the value is already constrained to avoid inserting duplicated |
149 | // constraints. |
150 | ConstraintInstr* constraint = after->next()->AsConstraint(); |
151 | while (constraint != NULL) { |
152 | if ((constraint->value()->definition() == defn) && |
153 | constraint->constraint()->Equals(constraint_range)) { |
154 | return NULL; |
155 | } |
156 | constraint = constraint->next()->AsConstraint(); |
157 | } |
158 | |
159 | constraint = new (Z) ConstraintInstr(use->CopyWithType(), constraint_range); |
160 | |
161 | flow_graph_->InsertAfter(after, constraint, NULL, FlowGraph::kValue); |
162 | FlowGraph::RenameDominatedUses(defn, constraint, constraint); |
163 | constraints_.Add(constraint); |
164 | return constraint; |
165 | } |
166 | |
167 | bool RangeAnalysis::ConstrainValueAfterBranch(Value* use, Definition* defn) { |
168 | BranchInstr* branch = use->instruction()->AsBranch(); |
169 | RelationalOpInstr* rel_op = branch->comparison()->AsRelationalOp(); |
170 | if ((rel_op != NULL) && (rel_op->operation_cid() == kSmiCid)) { |
171 | // Found comparison of two smis. Constrain defn at true and false |
172 | // successors using the other operand as a boundary. |
173 | Definition* boundary; |
174 | Token::Kind op_kind; |
175 | if (use->use_index() == 0) { // Left operand. |
176 | boundary = rel_op->InputAt(1)->definition(); |
177 | op_kind = rel_op->kind(); |
178 | } else { |
179 | ASSERT(use->use_index() == 1); // Right operand. |
180 | boundary = rel_op->InputAt(0)->definition(); |
181 | // InsertConstraintFor assumes that defn is left operand of a |
182 | // comparison if it is right operand flip the comparison. |
183 | op_kind = Token::FlipComparison(rel_op->kind()); |
184 | } |
185 | |
186 | // Constrain definition at the true successor. |
187 | ConstraintInstr* true_constraint = |
188 | InsertConstraintFor(use, defn, ConstraintSmiRange(op_kind, boundary), |
189 | branch->true_successor()); |
190 | if (true_constraint != NULL) { |
191 | true_constraint->set_target(branch->true_successor()); |
192 | } |
193 | |
194 | // Constrain definition with a negated condition at the false successor. |
195 | ConstraintInstr* false_constraint = InsertConstraintFor( |
196 | use, defn, |
197 | ConstraintSmiRange(Token::NegateComparison(op_kind), boundary), |
198 | branch->false_successor()); |
199 | if (false_constraint != NULL) { |
200 | false_constraint->set_target(branch->false_successor()); |
201 | } |
202 | |
203 | return true; |
204 | } |
205 | |
206 | return false; |
207 | } |
208 | |
209 | void RangeAnalysis::InsertConstraintsFor(Definition* defn) { |
210 | for (Value* use = defn->input_use_list(); use != NULL; |
211 | use = use->next_use()) { |
212 | if (auto branch = use->instruction()->AsBranch()) { |
213 | if (ConstrainValueAfterBranch(use, defn)) { |
214 | Value* other_value = branch->InputAt(1 - use->use_index()); |
215 | if (!IsIntegerDefinition(other_value->definition())) { |
216 | ConstrainValueAfterBranch(other_value, other_value->definition()); |
217 | } |
218 | } |
219 | } else if (auto check = use->instruction()->AsCheckBoundBase()) { |
220 | ConstrainValueAfterCheckBound(use, check, defn); |
221 | } |
222 | } |
223 | } |
224 | |
225 | void RangeAnalysis::ConstrainValueAfterCheckBound(Value* use, |
226 | CheckBoundBase* check, |
227 | Definition* defn) { |
228 | const intptr_t use_index = use->use_index(); |
229 | |
230 | Range* constraint_range = NULL; |
231 | if (use_index == CheckBoundBase::kIndexPos) { |
232 | Definition* length = check->length()->definition(); |
233 | constraint_range = new (Z) Range(RangeBoundary::FromConstant(0), |
234 | RangeBoundary::FromDefinition(length, -1)); |
235 | } else { |
236 | ASSERT(use_index == CheckBoundBase::kLengthPos); |
237 | Definition* index = check->index()->definition(); |
238 | constraint_range = new (Z) |
239 | Range(RangeBoundary::FromDefinition(index, 1), RangeBoundary::MaxSmi()); |
240 | } |
241 | InsertConstraintFor(use, defn, constraint_range, check); |
242 | } |
243 | |
244 | void RangeAnalysis::InsertConstraints() { |
245 | for (intptr_t i = 0; i < values_.length(); i++) { |
246 | InsertConstraintsFor(values_[i]); |
247 | } |
248 | |
249 | for (intptr_t i = 0; i < constraints_.length(); i++) { |
250 | InsertConstraintsFor(constraints_[i]); |
251 | } |
252 | } |
253 | |
254 | const Range* RangeAnalysis::GetSmiRange(Value* value) const { |
255 | Definition* defn = value->definition(); |
256 | const Range* range = defn->range(); |
257 | |
258 | if ((range == NULL) && (defn->Type()->ToCid() != kSmiCid)) { |
259 | // Type propagator determined that reaching type for this use is Smi. |
260 | // However the definition itself is not a smi-definition and |
261 | // thus it will never have range assigned to it. Just return the widest |
262 | // range possible for this value. |
263 | // We don't need to handle kMintCid here because all external mints |
264 | // (e.g. results of loads or function call) can be used only after they |
265 | // pass through UnboxInt64Instr which is considered as mint-definition |
266 | // and will have a range assigned to it. |
267 | // Note: that we can't return NULL here because it is used as lattice's |
268 | // bottom element to indicate that the range was not computed *yet*. |
269 | return &smi_range_; |
270 | } |
271 | |
272 | return range; |
273 | } |
274 | |
275 | const Range* RangeAnalysis::GetIntRange(Value* value) const { |
276 | Definition* defn = value->definition(); |
277 | const Range* range = defn->range(); |
278 | |
279 | if ((range == NULL) && !defn->Type()->IsInt()) { |
280 | // Type propagator determined that reaching type for this use is int. |
281 | // However the definition itself is not a int-definition and |
282 | // thus it will never have range assigned to it. Just return the widest |
283 | // range possible for this value. |
284 | // Note: that we can't return NULL here because it is used as lattice's |
285 | // bottom element to indicate that the range was not computed *yet*. |
286 | return &int64_range_; |
287 | } |
288 | |
289 | return range; |
290 | } |
291 | |
292 | const char* RangeBoundary::KindToCString(Kind kind) { |
293 | switch (kind) { |
294 | #define KIND_CASE(name) \ |
295 | case Kind::k##name: \ |
296 | return #name; |
297 | FOR_EACH_RANGE_BOUNDARY_KIND(KIND_CASE) |
298 | #undef KIND_CASE |
299 | default: |
300 | UNREACHABLE(); |
301 | return nullptr; |
302 | } |
303 | } |
304 | |
305 | bool RangeBoundary::ParseKind(const char* str, Kind* out) { |
306 | #define KIND_CASE(name) \ |
307 | if (strcmp(str, #name) == 0) { \ |
308 | *out = Kind::k##name; \ |
309 | return true; \ |
310 | } |
311 | FOR_EACH_RANGE_BOUNDARY_KIND(KIND_CASE) |
312 | #undef KIND_CASE |
313 | return false; |
314 | } |
315 | |
316 | static bool AreEqualDefinitions(Definition* a, Definition* b) { |
317 | a = UnwrapConstraint(a); |
318 | b = UnwrapConstraint(b); |
319 | return (a == b) || (a->AllowsCSE() && b->AllowsCSE() && a->Equals(b)); |
320 | } |
321 | |
322 | static bool DependOnSameSymbol(const RangeBoundary& a, const RangeBoundary& b) { |
323 | return a.IsSymbol() && b.IsSymbol() && |
324 | AreEqualDefinitions(a.symbol(), b.symbol()); |
325 | } |
326 | |
327 | // Given the current range of a phi and a newly computed range check |
328 | // if it is growing towards negative infinity, if it does widen it to |
329 | // MinSmi. |
330 | static RangeBoundary WidenMin(const Range* range, |
331 | const Range* new_range, |
332 | RangeBoundary::RangeSize size) { |
333 | RangeBoundary min = range->min(); |
334 | RangeBoundary new_min = new_range->min(); |
335 | |
336 | if (min.IsSymbol()) { |
337 | if (min.LowerBound().Overflowed(size)) { |
338 | return RangeBoundary::MinConstant(size); |
339 | } else if (DependOnSameSymbol(min, new_min)) { |
340 | return min.offset() <= new_min.offset() |
341 | ? min |
342 | : RangeBoundary::MinConstant(size); |
343 | } else if (min.UpperBound(size) <= new_min.LowerBound(size)) { |
344 | return min; |
345 | } |
346 | } |
347 | |
348 | min = Range::ConstantMin(range, size); |
349 | new_min = Range::ConstantMin(new_range, size); |
350 | |
351 | return (min.ConstantValue() <= new_min.ConstantValue()) |
352 | ? min |
353 | : RangeBoundary::MinConstant(size); |
354 | } |
355 | |
356 | // Given the current range of a phi and a newly computed range check |
357 | // if it is growing towards positive infinity, if it does widen it to |
358 | // MaxSmi. |
359 | static RangeBoundary WidenMax(const Range* range, |
360 | const Range* new_range, |
361 | RangeBoundary::RangeSize size) { |
362 | RangeBoundary max = range->max(); |
363 | RangeBoundary new_max = new_range->max(); |
364 | |
365 | if (max.IsSymbol()) { |
366 | if (max.UpperBound().Overflowed(size)) { |
367 | return RangeBoundary::MaxConstant(size); |
368 | } else if (DependOnSameSymbol(max, new_max)) { |
369 | return max.offset() >= new_max.offset() |
370 | ? max |
371 | : RangeBoundary::MaxConstant(size); |
372 | } else if (max.LowerBound(size) >= new_max.UpperBound(size)) { |
373 | return max; |
374 | } |
375 | } |
376 | |
377 | max = Range::ConstantMax(range, size); |
378 | new_max = Range::ConstantMax(new_range, size); |
379 | |
380 | return (max.ConstantValue() >= new_max.ConstantValue()) |
381 | ? max |
382 | : RangeBoundary::MaxConstant(size); |
383 | } |
384 | |
385 | // Given the current range of a phi and a newly computed range check |
386 | // if we can perform narrowing: use newly computed minimum to improve precision |
387 | // of the computed range. We do it only if current minimum was widened and is |
388 | // equal to MinSmi. |
389 | // Newly computed minimum is expected to be greater or equal than old one as |
390 | // we are running after widening phase. |
391 | static RangeBoundary NarrowMin(const Range* range, |
392 | const Range* new_range, |
393 | RangeBoundary::RangeSize size) { |
394 | const RangeBoundary min = Range::ConstantMin(range, size); |
395 | const RangeBoundary new_min = Range::ConstantMin(new_range, size); |
396 | if (min.ConstantValue() > new_min.ConstantValue()) return range->min(); |
397 | |
398 | // TODO(vegorov): consider using negative infinity to indicate widened bound. |
399 | return range->min().IsMinimumOrBelow(size) ? new_range->min() : range->min(); |
400 | } |
401 | |
402 | // Given the current range of a phi and a newly computed range check |
403 | // if we can perform narrowing: use newly computed maximum to improve precision |
404 | // of the computed range. We do it only if current maximum was widened and is |
405 | // equal to MaxSmi. |
406 | // Newly computed maximum is expected to be less or equal than old one as |
407 | // we are running after widening phase. |
408 | static RangeBoundary NarrowMax(const Range* range, |
409 | const Range* new_range, |
410 | RangeBoundary::RangeSize size) { |
411 | const RangeBoundary max = Range::ConstantMax(range, size); |
412 | const RangeBoundary new_max = Range::ConstantMax(new_range, size); |
413 | if (max.ConstantValue() < new_max.ConstantValue()) return range->max(); |
414 | |
415 | // TODO(vegorov): consider using positive infinity to indicate widened bound. |
416 | return range->max().IsMaximumOrAbove(size) ? new_range->max() : range->max(); |
417 | } |
418 | |
419 | char RangeAnalysis::OpPrefix(JoinOperator op) { |
420 | switch (op) { |
421 | case WIDEN: |
422 | return 'W'; |
423 | case NARROW: |
424 | return 'N'; |
425 | case NONE: |
426 | return 'I'; |
427 | } |
428 | UNREACHABLE(); |
429 | return ' '; |
430 | } |
431 | |
432 | static RangeBoundary::RangeSize RangeSizeForPhi(Definition* phi) { |
433 | ASSERT(phi->IsPhi()); |
434 | if (phi->Type()->ToCid() == kSmiCid) { |
435 | return RangeBoundary::kRangeBoundarySmi; |
436 | } else if (phi->representation() == kUnboxedInt32) { |
437 | return RangeBoundary::kRangeBoundaryInt32; |
438 | } else if (phi->Type()->IsInt()) { |
439 | return RangeBoundary::kRangeBoundaryInt64; |
440 | } else { |
441 | UNREACHABLE(); |
442 | return RangeBoundary::kRangeBoundaryInt64; |
443 | } |
444 | } |
445 | |
446 | bool RangeAnalysis::InferRange(JoinOperator op, |
447 | Definition* defn, |
448 | intptr_t iteration) { |
449 | Range range; |
450 | defn->InferRange(this, &range); |
451 | |
452 | if (!Range::IsUnknown(&range)) { |
453 | if (!Range::IsUnknown(defn->range()) && defn->IsPhi()) { |
454 | const RangeBoundary::RangeSize size = RangeSizeForPhi(defn); |
455 | if (op == WIDEN) { |
456 | range = Range(WidenMin(defn->range(), &range, size), |
457 | WidenMax(defn->range(), &range, size)); |
458 | } else if (op == NARROW) { |
459 | range = Range(NarrowMin(defn->range(), &range, size), |
460 | NarrowMax(defn->range(), &range, size)); |
461 | } |
462 | } |
463 | |
464 | if (!range.Equals(defn->range())) { |
465 | #ifndef PRODUCT |
466 | if (FLAG_support_il_printer && FLAG_trace_range_analysis) { |
467 | THR_Print("%c [%" Pd "] %s: %s => %s\n" , OpPrefix(op), iteration, |
468 | defn->ToCString(), Range::ToCString(defn->range()), |
469 | Range::ToCString(&range)); |
470 | } |
471 | #endif // !PRODUCT |
472 | defn->set_range(range); |
473 | return true; |
474 | } |
475 | } |
476 | |
477 | return false; |
478 | } |
479 | |
480 | void RangeAnalysis::CollectDefinitions(BitVector* set) { |
481 | for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); |
482 | !block_it.Done(); block_it.Advance()) { |
483 | BlockEntryInstr* block = block_it.Current(); |
484 | |
485 | JoinEntryInstr* join = block->AsJoinEntry(); |
486 | if (join != NULL) { |
487 | for (PhiIterator it(join); !it.Done(); it.Advance()) { |
488 | PhiInstr* phi = it.Current(); |
489 | if (set->Contains(phi->ssa_temp_index())) { |
490 | definitions_.Add(phi); |
491 | } |
492 | } |
493 | } |
494 | |
495 | for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { |
496 | Definition* defn = it.Current()->AsDefinition(); |
497 | if ((defn != NULL) && defn->HasSSATemp() && |
498 | set->Contains(defn->ssa_temp_index())) { |
499 | definitions_.Add(defn); |
500 | } |
501 | } |
502 | } |
503 | } |
504 | |
505 | void RangeAnalysis::Iterate(JoinOperator op, intptr_t max_iterations) { |
506 | // TODO(vegorov): switch to worklist if this becomes performance bottleneck. |
507 | intptr_t iteration = 0; |
508 | bool changed; |
509 | do { |
510 | changed = false; |
511 | for (intptr_t i = 0; i < definitions_.length(); i++) { |
512 | Definition* defn = definitions_[i]; |
513 | if (InferRange(op, defn, iteration)) { |
514 | changed = true; |
515 | } |
516 | } |
517 | |
518 | iteration++; |
519 | } while (changed && (iteration < max_iterations)); |
520 | } |
521 | |
522 | void RangeAnalysis::InferRanges() { |
523 | Zone* zone = flow_graph_->zone(); |
524 | // Initialize bitvector for quick filtering of int values. |
525 | BitVector* set = |
526 | new (zone) BitVector(zone, flow_graph_->current_ssa_temp_index()); |
527 | for (intptr_t i = 0; i < values_.length(); i++) { |
528 | set->Add(values_[i]->ssa_temp_index()); |
529 | } |
530 | for (intptr_t i = 0; i < constraints_.length(); i++) { |
531 | set->Add(constraints_[i]->ssa_temp_index()); |
532 | } |
533 | |
534 | // Collect integer definitions (including constraints) in the reverse |
535 | // postorder. This improves convergence speed compared to iterating |
536 | // values_ and constraints_ array separately. |
537 | auto graph_entry = flow_graph_->graph_entry(); |
538 | const auto& initial = *graph_entry->initial_definitions(); |
539 | for (intptr_t i = 0; i < initial.length(); ++i) { |
540 | Definition* definition = initial[i]; |
541 | if (set->Contains(definition->ssa_temp_index())) { |
542 | definitions_.Add(definition); |
543 | } |
544 | } |
545 | |
546 | for (intptr_t i = 0; i < graph_entry->SuccessorCount(); ++i) { |
547 | auto successor = graph_entry->SuccessorAt(i); |
548 | if (auto function_entry = successor->AsFunctionEntry()) { |
549 | const auto& initial = *function_entry->initial_definitions(); |
550 | for (intptr_t j = 0; j < initial.length(); ++j) { |
551 | Definition* definition = initial[j]; |
552 | if (set->Contains(definition->ssa_temp_index())) { |
553 | definitions_.Add(definition); |
554 | } |
555 | } |
556 | } |
557 | } |
558 | |
559 | CollectDefinitions(set); |
560 | |
561 | // Perform an iteration of range inference just propagating ranges |
562 | // through the graph as-is without applying widening or narrowing. |
563 | // This helps to improve precision of initial bounds. |
564 | // We are doing 2 iterations to hit common cases where phi range |
565 | // stabilizes quickly and yields a better precision than after |
566 | // widening and narrowing. |
567 | Iterate(NONE, 2); |
568 | |
569 | // Perform fix-point iteration of range inference applying widening |
570 | // operator to phis to ensure fast convergence. |
571 | // Widening simply maps growing bounds to the respective range bound. |
572 | Iterate(WIDEN, kMaxInt32); |
573 | |
574 | // Perform fix-point iteration of range inference applying narrowing |
575 | // to phis to compute more accurate range. |
576 | // Narrowing only improves those boundaries that were widened up to |
577 | // range boundary and leaves other boundaries intact. |
578 | Iterate(NARROW, kMaxInt32); |
579 | } |
580 | |
581 | void RangeAnalysis::AssignRangesRecursively(Definition* defn) { |
582 | if (!Range::IsUnknown(defn->range())) { |
583 | return; |
584 | } |
585 | |
586 | if (!IsIntegerDefinition(defn)) { |
587 | return; |
588 | } |
589 | |
590 | for (intptr_t i = 0; i < defn->InputCount(); i++) { |
591 | Definition* input_defn = defn->InputAt(i)->definition(); |
592 | if (!input_defn->HasSSATemp() || input_defn->IsConstant()) { |
593 | AssignRangesRecursively(input_defn); |
594 | } |
595 | } |
596 | |
597 | Range new_range; |
598 | defn->InferRange(this, &new_range); |
599 | if (!Range::IsUnknown(&new_range)) { |
600 | defn->set_range(new_range); |
601 | } |
602 | } |
603 | |
604 | // Scheduler is a helper class that inserts floating control-flow less |
605 | // subgraphs into the flow graph. |
606 | // It always attempts to schedule instructions into the loop preheader in the |
607 | // way similar to LICM optimization pass. |
608 | // Scheduler supports rollback - that is it keeps track of instructions it |
609 | // schedules and can remove all instructions it inserted from the graph. |
610 | class Scheduler { |
611 | public: |
612 | explicit Scheduler(FlowGraph* flow_graph) |
613 | : flow_graph_(flow_graph), |
614 | loop_headers_(flow_graph->GetLoopHierarchy().headers()), |
615 | pre_headers_(loop_headers_.length()) { |
616 | for (intptr_t i = 0; i < loop_headers_.length(); i++) { |
617 | pre_headers_.Add(loop_headers_[i]->ImmediateDominator()); |
618 | } |
619 | } |
620 | |
621 | // Clear the list of emitted instructions. |
622 | void Start() { emitted_.Clear(); } |
623 | |
624 | // Given the floating instruction attempt to schedule it into one of the |
625 | // loop preheaders that dominates given post_dominator instruction. |
626 | // Some of the instruction inputs can potentially be unscheduled as well. |
627 | // Returns NULL is the scheduling fails (e.g. inputs are not invariant for |
628 | // any loop containing post_dominator). |
629 | // Resulting schedule should be equivalent to one obtained by inserting |
630 | // instructions right before post_dominator and running CSE and LICM passes. |
631 | template <typename T> |
632 | T* Emit(T* instruction, Instruction* post_dominator) { |
633 | return static_cast<T*>(EmitRecursively(instruction, post_dominator)); |
634 | } |
635 | |
636 | // Undo all insertions recorded in the list of emitted instructions. |
637 | void Rollback() { |
638 | for (intptr_t i = emitted_.length() - 1; i >= 0; i--) { |
639 | emitted_[i]->RemoveFromGraph(); |
640 | } |
641 | emitted_.Clear(); |
642 | } |
643 | |
644 | private: |
645 | typedef DirectChainedHashMap<PointerKeyValueTrait<Instruction> > Map; |
646 | |
647 | Instruction* EmitRecursively(Instruction* instruction, Instruction* sink) { |
648 | // Schedule all unscheduled inputs and unwrap all constrained inputs. |
649 | for (intptr_t i = 0; i < instruction->InputCount(); i++) { |
650 | Definition* defn = instruction->InputAt(i)->definition(); |
651 | |
652 | // Instruction is not in the graph yet which means that none of |
653 | // its input uses should be recorded at defn's use chains. |
654 | // Verify this assumption to ensure that we are not going to |
655 | // leave use-lists in an inconsistent state when we start |
656 | // rewriting inputs via set_definition. |
657 | ASSERT(instruction->InputAt(i)->IsSingleUse() && |
658 | !defn->HasOnlyInputUse(instruction->InputAt(i))); |
659 | |
660 | if (!defn->HasSSATemp()) { |
661 | Definition* scheduled = Emit(defn, sink); |
662 | if (scheduled == NULL) { |
663 | return NULL; |
664 | } |
665 | instruction->InputAt(i)->set_definition(scheduled); |
666 | } else if (defn->IsConstraint()) { |
667 | instruction->InputAt(i)->set_definition(UnwrapConstraint(defn)); |
668 | } |
669 | } |
670 | |
671 | // Attempt to find equivalent instruction that was already scheduled. |
672 | // If the instruction is still in the graph (it could have been |
673 | // un-scheduled by a rollback action) and it dominates the sink - use it. |
674 | Instruction* emitted = map_.LookupValue(instruction); |
675 | if (emitted != NULL && !emitted->WasEliminated() && |
676 | sink->IsDominatedBy(emitted)) { |
677 | return emitted; |
678 | } |
679 | |
680 | // Attempt to find suitable pre-header. Iterate loop headers backwards to |
681 | // attempt scheduling into the outermost loop first. |
682 | for (intptr_t i = loop_headers_.length() - 1; i >= 0; i--) { |
683 | BlockEntryInstr* = loop_headers_[i]; |
684 | BlockEntryInstr* = pre_headers_[i]; |
685 | |
686 | if (pre_header == NULL) { |
687 | continue; |
688 | } |
689 | |
690 | if (!sink->IsDominatedBy(header)) { |
691 | continue; |
692 | } |
693 | |
694 | Instruction* last = pre_header->last_instruction(); |
695 | |
696 | bool inputs_are_invariant = true; |
697 | for (intptr_t j = 0; j < instruction->InputCount(); j++) { |
698 | Definition* defn = instruction->InputAt(j)->definition(); |
699 | if (!last->IsDominatedBy(defn)) { |
700 | inputs_are_invariant = false; |
701 | break; |
702 | } |
703 | } |
704 | |
705 | if (inputs_are_invariant) { |
706 | EmitTo(pre_header, instruction); |
707 | return instruction; |
708 | } |
709 | } |
710 | |
711 | return NULL; |
712 | } |
713 | |
714 | void EmitTo(BlockEntryInstr* block, Instruction* instr) { |
715 | GotoInstr* last = block->last_instruction()->AsGoto(); |
716 | flow_graph_->InsertBefore( |
717 | last, instr, last->env(), |
718 | instr->IsDefinition() ? FlowGraph::kValue : FlowGraph::kEffect); |
719 | instr->CopyDeoptIdFrom(*last); |
720 | |
721 | map_.Insert(instr); |
722 | emitted_.Add(instr); |
723 | } |
724 | |
725 | FlowGraph* flow_graph_; |
726 | Map map_; |
727 | const ZoneGrowableArray<BlockEntryInstr*>& ; |
728 | GrowableArray<BlockEntryInstr*> ; |
729 | GrowableArray<Instruction*> emitted_; |
730 | }; |
731 | |
732 | // If bounds check 0 <= index < length is not redundant we attempt to |
733 | // replace it with a sequence of checks that guarantee |
734 | // |
735 | // 0 <= LowerBound(index) < UpperBound(index) < length |
736 | // |
737 | // and hoist all of those checks out of the enclosing loop. |
738 | // |
739 | // Upper/Lower bounds are symbolic arithmetic expressions with +, -, * |
740 | // operations. |
741 | class BoundsCheckGeneralizer { |
742 | public: |
743 | BoundsCheckGeneralizer(RangeAnalysis* range_analysis, FlowGraph* flow_graph) |
744 | : range_analysis_(range_analysis), |
745 | flow_graph_(flow_graph), |
746 | scheduler_(flow_graph) {} |
747 | |
748 | void TryGeneralize(CheckArrayBoundInstr* check) { |
749 | Definition* upper_bound = |
750 | ConstructUpperBound(check->index()->definition(), check); |
751 | if (upper_bound == UnwrapConstraint(check->index()->definition())) { |
752 | // Unable to construct upper bound for the index. |
753 | if (FLAG_support_il_printer && FLAG_trace_range_analysis) { |
754 | THR_Print("Failed to construct upper bound for %s index\n" , |
755 | check->ToCString()); |
756 | } |
757 | return; |
758 | } |
759 | |
760 | // Re-associate subexpressions inside upper_bound to collect all constants |
761 | // together. This will expose more redundancies when we are going to emit |
762 | // upper bound through scheduler. |
763 | if (!Simplify(&upper_bound, NULL)) { |
764 | if (FLAG_support_il_printer && FLAG_trace_range_analysis) { |
765 | THR_Print("Failed to simplify upper bound for %s index\n" , |
766 | check->ToCString()); |
767 | } |
768 | return; |
769 | } |
770 | upper_bound = ApplyConstraints(upper_bound, check); |
771 | range_analysis_->AssignRangesRecursively(upper_bound); |
772 | |
773 | // We are going to constrain any symbols participating in + and * operations |
774 | // to guarantee that they are positive. Find all symbols that need |
775 | // constraining. If there is a subtraction subexpression with non-positive |
776 | // range give up on generalization for simplicity. |
777 | GrowableArray<Definition*> non_positive_symbols; |
778 | if (!FindNonPositiveSymbols(&non_positive_symbols, upper_bound)) { |
779 | #ifndef PRODUCT |
780 | if (FLAG_support_il_printer && FLAG_trace_range_analysis) { |
781 | THR_Print( |
782 | "Failed to generalize %s index to %s" |
783 | " (can't ensure positivity)\n" , |
784 | check->ToCString(), IndexBoundToCString(upper_bound)); |
785 | } |
786 | #endif // !PRODUCT |
787 | return; |
788 | } |
789 | |
790 | // Check that we can statically prove that lower bound of the index is |
791 | // non-negative under the assumption that all potentially non-positive |
792 | // symbols are positive. |
793 | GrowableArray<ConstraintInstr*> positive_constraints( |
794 | non_positive_symbols.length()); |
795 | Range* positive_range = |
796 | new Range(RangeBoundary::FromConstant(0), |
797 | RangeBoundary::MaxConstant(RangeBoundary::kRangeBoundarySmi)); |
798 | for (intptr_t i = 0; i < non_positive_symbols.length(); i++) { |
799 | Definition* symbol = non_positive_symbols[i]; |
800 | positive_constraints.Add( |
801 | new ConstraintInstr(new Value(symbol), positive_range)); |
802 | } |
803 | |
804 | Definition* lower_bound = |
805 | ConstructLowerBound(check->index()->definition(), check); |
806 | // No need to simplify lower bound before applying constraints as |
807 | // we are not going to emit it. |
808 | lower_bound = ApplyConstraints(lower_bound, check, &positive_constraints); |
809 | range_analysis_->AssignRangesRecursively(lower_bound); |
810 | |
811 | if (!RangeUtils::IsPositive(lower_bound->range())) { |
812 | // Can't prove that lower bound is positive even with additional checks |
813 | // against potentially non-positive symbols. Give up. |
814 | #ifndef PRODUCT |
815 | if (FLAG_support_il_printer && FLAG_trace_range_analysis) { |
816 | THR_Print( |
817 | "Failed to generalize %s index to %s" |
818 | " (lower bound is not positive)\n" , |
819 | check->ToCString(), IndexBoundToCString(upper_bound)); |
820 | } |
821 | #endif // !PRODUCT |
822 | return; |
823 | } |
824 | |
825 | #ifndef PRODUCT |
826 | if (FLAG_support_il_printer && FLAG_trace_range_analysis) { |
827 | THR_Print("For %s computed index bounds [%s, %s]\n" , check->ToCString(), |
828 | IndexBoundToCString(lower_bound), |
829 | IndexBoundToCString(upper_bound)); |
830 | } |
831 | #endif // !PRODUCT |
832 | |
833 | // At this point we know that 0 <= index < UpperBound(index) under |
834 | // certain preconditions. Start by emitting this preconditions. |
835 | scheduler_.Start(); |
836 | |
837 | // AOT should only see non-deopting GenericCheckBound. |
838 | ASSERT(!CompilerState::Current().is_aot()); |
839 | |
840 | ConstantInstr* max_smi = flow_graph_->GetConstant( |
841 | Smi::Handle(Smi::New(compiler::target::kSmiMax))); |
842 | for (intptr_t i = 0; i < non_positive_symbols.length(); i++) { |
843 | CheckArrayBoundInstr* precondition = new CheckArrayBoundInstr( |
844 | new Value(max_smi), new Value(non_positive_symbols[i]), |
845 | DeoptId::kNone); |
846 | precondition->mark_generalized(); |
847 | precondition = scheduler_.Emit(precondition, check); |
848 | if (precondition == NULL) { |
849 | if (FLAG_trace_range_analysis) { |
850 | THR_Print(" => failed to insert positivity constraint\n" ); |
851 | } |
852 | scheduler_.Rollback(); |
853 | return; |
854 | } |
855 | } |
856 | |
857 | CheckArrayBoundInstr* new_check = new CheckArrayBoundInstr( |
858 | new Value(UnwrapConstraint(check->length()->definition())), |
859 | new Value(upper_bound), DeoptId::kNone); |
860 | new_check->mark_generalized(); |
861 | if (new_check->IsRedundant()) { |
862 | if (FLAG_trace_range_analysis) { |
863 | THR_Print(" => generalized check is redundant\n" ); |
864 | } |
865 | RemoveGeneralizedCheck(check); |
866 | return; |
867 | } |
868 | |
869 | new_check = scheduler_.Emit(new_check, check); |
870 | if (new_check != NULL) { |
871 | if (FLAG_trace_range_analysis) { |
872 | THR_Print(" => generalized check was hoisted into B%" Pd "\n" , |
873 | new_check->GetBlock()->block_id()); |
874 | } |
875 | RemoveGeneralizedCheck(check); |
876 | } else { |
877 | if (FLAG_trace_range_analysis) { |
878 | THR_Print(" => generalized check can't be hoisted\n" ); |
879 | } |
880 | scheduler_.Rollback(); |
881 | } |
882 | } |
883 | |
884 | static void RemoveGeneralizedCheck(CheckArrayBoundInstr* check) { |
885 | BinarySmiOpInstr* binary_op = check->index()->definition()->AsBinarySmiOp(); |
886 | if (binary_op != NULL) { |
887 | binary_op->set_can_overflow(false); |
888 | } |
889 | check->ReplaceUsesWith(check->index()->definition()); |
890 | check->RemoveFromGraph(); |
891 | } |
892 | |
893 | private: |
894 | BinarySmiOpInstr* MakeBinaryOp(Token::Kind op_kind, |
895 | Definition* left, |
896 | Definition* right) { |
897 | return new BinarySmiOpInstr(op_kind, new Value(left), new Value(right), |
898 | DeoptId::kNone); |
899 | } |
900 | |
901 | BinarySmiOpInstr* MakeBinaryOp(Token::Kind op_kind, |
902 | Definition* left, |
903 | intptr_t right) { |
904 | ConstantInstr* constant_right = |
905 | flow_graph_->GetConstant(Smi::Handle(Smi::New(right))); |
906 | return MakeBinaryOp(op_kind, left, constant_right); |
907 | } |
908 | |
909 | Definition* RangeBoundaryToDefinition(const RangeBoundary& bound) { |
910 | Definition* symbol = UnwrapConstraint(bound.symbol()); |
911 | if (bound.offset() == 0) { |
912 | return symbol; |
913 | } else { |
914 | return MakeBinaryOp(Token::kADD, symbol, bound.offset()); |
915 | } |
916 | } |
917 | |
918 | typedef Definition* (BoundsCheckGeneralizer::*PhiBoundFunc)(PhiInstr*, |
919 | LoopInfo*, |
920 | InductionVar*, |
921 | Instruction*); |
922 | |
923 | // Construct symbolic lower bound for a value at the given point. |
924 | Definition* ConstructLowerBound(Definition* value, Instruction* point) { |
925 | return ConstructBound(&BoundsCheckGeneralizer::InductionVariableLowerBound, |
926 | value, point); |
927 | } |
928 | |
929 | // Construct symbolic upper bound for a value at the given point. |
930 | Definition* ConstructUpperBound(Definition* value, Instruction* point) { |
931 | return ConstructBound(&BoundsCheckGeneralizer::InductionVariableUpperBound, |
932 | value, point); |
933 | } |
934 | |
935 | // Helper methods to implement "older" business logic. |
936 | // TODO(ajcbik): generalize with new induction variable information |
937 | |
938 | // Only accept loops with a smi constraint on smi induction. |
939 | LoopInfo* GetSmiBoundedLoop(PhiInstr* phi) { |
940 | LoopInfo* loop = phi->GetBlock()->loop_info(); |
941 | if (loop == nullptr) { |
942 | return nullptr; |
943 | } |
944 | ConstraintInstr* limit = loop->limit(); |
945 | if (limit == nullptr) { |
946 | return nullptr; |
947 | } |
948 | Definition* def = UnwrapConstraint(limit->value()->definition()); |
949 | Range* constraining_range = limit->constraint(); |
950 | if (GetSmiInduction(loop, def) != nullptr && |
951 | constraining_range->min().Equals(RangeBoundary::MinSmi()) && |
952 | constraining_range->max().IsSymbol() && |
953 | def->IsDominatedBy(constraining_range->max().symbol())) { |
954 | return loop; |
955 | } |
956 | return nullptr; |
957 | } |
958 | |
959 | // Returns true if x is invariant and is either based on a Smi definition |
960 | // or is a Smi constant. |
961 | static bool IsSmiInvariant(const InductionVar* x) { |
962 | return InductionVar::IsInvariant(x) && Smi::IsValid(x->offset()) && |
963 | Smi::IsValid(x->mult()) && |
964 | (x->mult() == 0 || x->def()->Type()->ToCid() == kSmiCid); |
965 | } |
966 | |
967 | // Only accept smi linear induction with unit stride. |
968 | InductionVar* GetSmiInduction(LoopInfo* loop, Definition* def) { |
969 | if (loop != nullptr && def->Type()->ToCid() == kSmiCid) { |
970 | InductionVar* induc = loop->LookupInduction(def); |
971 | int64_t stride; |
972 | if (induc != nullptr && InductionVar::IsLinear(induc, &stride) && |
973 | stride == 1 && IsSmiInvariant(induc->initial())) { |
974 | return induc; |
975 | } |
976 | } |
977 | return nullptr; |
978 | } |
979 | |
980 | // Reconstruct invariant. |
981 | Definition* GenerateInvariant(InductionVar* induc) { |
982 | Definition* res = nullptr; |
983 | if (induc->mult() == 0) { |
984 | res = |
985 | flow_graph_->GetConstant(Smi::ZoneHandle(Smi::New(induc->offset()))); |
986 | } else { |
987 | res = induc->def(); |
988 | if (induc->mult() != 1) { |
989 | res = MakeBinaryOp(Token::kMUL, res, induc->mult()); |
990 | } |
991 | if (induc->offset() != 0) { |
992 | res = MakeBinaryOp(Token::kADD, res, induc->offset()); |
993 | } |
994 | } |
995 | return res; |
996 | } |
997 | |
998 | // Construct symbolic bound for a value at the given point: |
999 | // |
1000 | // 1. if value is an induction variable use its bounds; |
1001 | // 2. if value is addition or multiplication construct bounds for left |
1002 | // and right hand sides separately and use addition/multiplication |
1003 | // of bounds as a bound (addition and multiplication are monotone |
1004 | // operations for both operands); |
1005 | // 3. if value is a substraction then construct bound for the left hand |
1006 | // side and use substraction of the right hand side from the left hand |
1007 | // side bound as a bound for an expression (substraction is monotone for |
1008 | // the left hand side operand). |
1009 | // |
1010 | Definition* ConstructBound(PhiBoundFunc phi_bound_func, |
1011 | Definition* value, |
1012 | Instruction* point) { |
1013 | value = UnwrapConstraint(value); |
1014 | if (value->IsPhi()) { |
1015 | PhiInstr* phi = value->AsPhi(); |
1016 | LoopInfo* loop = GetSmiBoundedLoop(phi); |
1017 | InductionVar* induc = GetSmiInduction(loop, phi); |
1018 | if (induc != nullptr) { |
1019 | return (this->*phi_bound_func)(phi, loop, induc, point); |
1020 | } |
1021 | } else if (value->IsBinarySmiOp()) { |
1022 | BinarySmiOpInstr* bin_op = value->AsBinarySmiOp(); |
1023 | if ((bin_op->op_kind() == Token::kADD) || |
1024 | (bin_op->op_kind() == Token::kMUL) || |
1025 | (bin_op->op_kind() == Token::kSUB)) { |
1026 | Definition* new_left = |
1027 | ConstructBound(phi_bound_func, bin_op->left()->definition(), point); |
1028 | Definition* new_right = |
1029 | (bin_op->op_kind() != Token::kSUB) |
1030 | ? ConstructBound(phi_bound_func, bin_op->right()->definition(), |
1031 | point) |
1032 | : UnwrapConstraint(bin_op->right()->definition()); |
1033 | |
1034 | if ((new_left != UnwrapConstraint(bin_op->left()->definition())) || |
1035 | (new_right != UnwrapConstraint(bin_op->right()->definition()))) { |
1036 | return MakeBinaryOp(bin_op->op_kind(), new_left, new_right); |
1037 | } |
1038 | } |
1039 | } |
1040 | return value; |
1041 | } |
1042 | |
1043 | Definition* InductionVariableUpperBound(PhiInstr* phi, |
1044 | LoopInfo* loop, |
1045 | InductionVar* induc, |
1046 | Instruction* point) { |
1047 | // Test if limit dominates given point. |
1048 | ConstraintInstr* limit = loop->limit(); |
1049 | if (!point->IsDominatedBy(limit)) { |
1050 | return phi; |
1051 | } |
1052 | // Decide between direct or indirect bound. |
1053 | Definition* bounded_def = UnwrapConstraint(limit->value()->definition()); |
1054 | if (bounded_def == phi) { |
1055 | // Given a smi bounded loop with smi induction variable |
1056 | // |
1057 | // x <- phi(x0, x + 1) |
1058 | // |
1059 | // and a constraint x <= M that dominates the given |
1060 | // point we conclude that M is an upper bound for x. |
1061 | return RangeBoundaryToDefinition(limit->constraint()->max()); |
1062 | } else { |
1063 | // Given a smi bounded loop with two smi induction variables |
1064 | // |
1065 | // x <- phi(x0, x + 1) |
1066 | // y <- phi(y0, y + 1) |
1067 | // |
1068 | // and a constraint x <= M that dominates the given |
1069 | // point we can conclude that |
1070 | // |
1071 | // y <= y0 + (M - x0) |
1072 | // |
1073 | InductionVar* bounded_induc = GetSmiInduction(loop, bounded_def); |
1074 | Definition* x0 = GenerateInvariant(bounded_induc->initial()); |
1075 | Definition* y0 = GenerateInvariant(induc->initial()); |
1076 | Definition* m = RangeBoundaryToDefinition(limit->constraint()->max()); |
1077 | BinarySmiOpInstr* loop_length = |
1078 | MakeBinaryOp(Token::kSUB, ConstructUpperBound(m, point), |
1079 | ConstructLowerBound(x0, point)); |
1080 | return MakeBinaryOp(Token::kADD, ConstructUpperBound(y0, point), |
1081 | loop_length); |
1082 | } |
1083 | } |
1084 | |
1085 | Definition* InductionVariableLowerBound(PhiInstr* phi, |
1086 | LoopInfo* loop, |
1087 | InductionVar* induc, |
1088 | Instruction* point) { |
1089 | // Given a smi bounded loop with smi induction variable |
1090 | // |
1091 | // x <- phi(x0, x + 1) |
1092 | // |
1093 | // we can conclude that LowerBound(x) == x0. |
1094 | return ConstructLowerBound(GenerateInvariant(induc->initial()), point); |
1095 | } |
1096 | |
1097 | // Try to re-associate binary operations in the floating DAG of operations |
1098 | // to collect all constants together, e.g. x + C0 + y + C1 is simplified into |
1099 | // x + y + (C0 + C1). |
1100 | bool Simplify(Definition** defn, intptr_t* constant) { |
1101 | if ((*defn)->IsBinarySmiOp()) { |
1102 | BinarySmiOpInstr* binary_op = (*defn)->AsBinarySmiOp(); |
1103 | Definition* left = binary_op->left()->definition(); |
1104 | Definition* right = binary_op->right()->definition(); |
1105 | |
1106 | intptr_t c = 0; |
1107 | if (binary_op->op_kind() == Token::kADD) { |
1108 | intptr_t left_const = 0; |
1109 | intptr_t right_const = 0; |
1110 | if (!Simplify(&left, &left_const) || !Simplify(&right, &right_const)) { |
1111 | return false; |
1112 | } |
1113 | |
1114 | c = left_const + right_const; |
1115 | if (Utils::WillAddOverflow(left_const, right_const) || |
1116 | !compiler::target::IsSmi(c)) { |
1117 | return false; // Abort. |
1118 | } |
1119 | |
1120 | if (constant != NULL) { |
1121 | *constant = c; |
1122 | } |
1123 | |
1124 | if ((left == NULL) && (right == NULL)) { |
1125 | if (constant != NULL) { |
1126 | *defn = NULL; |
1127 | } else { |
1128 | *defn = flow_graph_->GetConstant(Smi::Handle(Smi::New(c))); |
1129 | } |
1130 | return true; |
1131 | } |
1132 | |
1133 | if (left == NULL) { |
1134 | if ((constant != NULL) || (c == 0)) { |
1135 | *defn = right; |
1136 | return true; |
1137 | } else { |
1138 | left = right; |
1139 | right = NULL; |
1140 | } |
1141 | } |
1142 | |
1143 | if (right == NULL) { |
1144 | if ((constant != NULL) || (c == 0)) { |
1145 | *defn = left; |
1146 | return true; |
1147 | } else { |
1148 | right = flow_graph_->GetConstant(Smi::Handle(Smi::New(c))); |
1149 | c = 0; |
1150 | } |
1151 | } |
1152 | } else if (binary_op->op_kind() == Token::kSUB) { |
1153 | intptr_t left_const = 0; |
1154 | intptr_t right_const = 0; |
1155 | if (!Simplify(&left, &left_const) || !Simplify(&right, &right_const)) { |
1156 | return false; |
1157 | } |
1158 | |
1159 | c = (left_const - right_const); |
1160 | if (Utils::WillSubOverflow(left_const, right_const) || |
1161 | !compiler::target::IsSmi(c)) { |
1162 | return false; // Abort. |
1163 | } |
1164 | |
1165 | if (constant != NULL) { |
1166 | *constant = c; |
1167 | } |
1168 | |
1169 | if ((left == NULL) && (right == NULL)) { |
1170 | if (constant != NULL) { |
1171 | *defn = NULL; |
1172 | } else { |
1173 | *defn = flow_graph_->GetConstant(Smi::Handle(Smi::New(c))); |
1174 | } |
1175 | return true; |
1176 | } |
1177 | |
1178 | if (left == NULL) { |
1179 | left = flow_graph_->GetConstant(Object::smi_zero()); |
1180 | } |
1181 | |
1182 | if (right == NULL) { |
1183 | if ((constant != NULL) || (c == 0)) { |
1184 | *defn = left; |
1185 | return true; |
1186 | } else { |
1187 | right = flow_graph_->GetConstant(Smi::Handle(Smi::New(-c))); |
1188 | c = 0; |
1189 | } |
1190 | } |
1191 | } else if (binary_op->op_kind() == Token::kMUL) { |
1192 | if (!Simplify(&left, NULL) || !Simplify(&right, NULL)) { |
1193 | return false; |
1194 | } |
1195 | } else { |
1196 | // Don't attempt to simplify any other binary operation. |
1197 | return true; |
1198 | } |
1199 | |
1200 | ASSERT(left != NULL); |
1201 | ASSERT(right != NULL); |
1202 | |
1203 | const bool left_changed = (left != binary_op->left()->definition()); |
1204 | const bool right_changed = (right != binary_op->right()->definition()); |
1205 | if (left_changed || right_changed) { |
1206 | if (!(*defn)->HasSSATemp()) { |
1207 | if (left_changed) binary_op->left()->set_definition(left); |
1208 | if (right_changed) binary_op->right()->set_definition(right); |
1209 | *defn = binary_op; |
1210 | } else { |
1211 | *defn = MakeBinaryOp(binary_op->op_kind(), UnwrapConstraint(left), |
1212 | UnwrapConstraint(right)); |
1213 | } |
1214 | } |
1215 | |
1216 | if ((c != 0) && (constant == NULL)) { |
1217 | *defn = MakeBinaryOp(Token::kADD, *defn, c); |
1218 | } |
1219 | } else if ((*defn)->IsConstant()) { |
1220 | ConstantInstr* constant_defn = (*defn)->AsConstant(); |
1221 | if ((constant != NULL) && constant_defn->IsSmi()) { |
1222 | *defn = NULL; |
1223 | *constant = Smi::Cast(constant_defn->value()).Value(); |
1224 | } |
1225 | } |
1226 | |
1227 | return true; |
1228 | } |
1229 | |
1230 | // If possible find a set of symbols that need to be non-negative to |
1231 | // guarantee that expression as whole is non-negative. |
1232 | bool FindNonPositiveSymbols(GrowableArray<Definition*>* symbols, |
1233 | Definition* defn) { |
1234 | if (defn->IsConstant()) { |
1235 | const Object& value = defn->AsConstant()->value(); |
1236 | return compiler::target::IsSmi(value) && (Smi::Cast(value).Value() >= 0); |
1237 | } else if (defn->HasSSATemp()) { |
1238 | if (!RangeUtils::IsPositive(defn->range())) { |
1239 | symbols->Add(defn); |
1240 | } |
1241 | return true; |
1242 | } else if (defn->IsBinarySmiOp()) { |
1243 | BinarySmiOpInstr* binary_op = defn->AsBinarySmiOp(); |
1244 | ASSERT((binary_op->op_kind() == Token::kADD) || |
1245 | (binary_op->op_kind() == Token::kSUB) || |
1246 | (binary_op->op_kind() == Token::kMUL)); |
1247 | |
1248 | if (RangeUtils::IsPositive(defn->range())) { |
1249 | // We can statically prove that this subexpression is always positive. |
1250 | // No need to inspect its subexpressions. |
1251 | return true; |
1252 | } |
1253 | |
1254 | if (binary_op->op_kind() == Token::kSUB) { |
1255 | // For addition and multiplication it's enough to ensure that |
1256 | // lhs and rhs are positive to guarantee that defn as whole is |
1257 | // positive. This does not work for substraction so just give up. |
1258 | return false; |
1259 | } |
1260 | |
1261 | return FindNonPositiveSymbols(symbols, binary_op->left()->definition()) && |
1262 | FindNonPositiveSymbols(symbols, binary_op->right()->definition()); |
1263 | } |
1264 | UNREACHABLE(); |
1265 | return false; |
1266 | } |
1267 | |
1268 | // Find innermost constraint for the given definition dominating given |
1269 | // instruction. |
1270 | static Definition* FindInnermostConstraint(Definition* defn, |
1271 | Instruction* post_dominator) { |
1272 | for (Value* use = defn->input_use_list(); use != NULL; |
1273 | use = use->next_use()) { |
1274 | ConstraintInstr* constraint = use->instruction()->AsConstraint(); |
1275 | if ((constraint != NULL) && post_dominator->IsDominatedBy(constraint)) { |
1276 | return FindInnermostConstraint(constraint, post_dominator); |
1277 | } |
1278 | } |
1279 | return defn; |
1280 | } |
1281 | |
1282 | // Replace symbolic parts of the boundary with respective constraints |
1283 | // that hold at the given point in the flow graph signified by |
1284 | // post_dominator. |
1285 | // Constraints array allows to provide a set of additional floating |
1286 | // constraints that were not inserted into the graph. |
1287 | static Definition* ApplyConstraints( |
1288 | Definition* defn, |
1289 | Instruction* post_dominator, |
1290 | GrowableArray<ConstraintInstr*>* constraints = NULL) { |
1291 | if (defn->HasSSATemp()) { |
1292 | defn = FindInnermostConstraint(defn, post_dominator); |
1293 | if (constraints != NULL) { |
1294 | for (intptr_t i = 0; i < constraints->length(); i++) { |
1295 | ConstraintInstr* constraint = (*constraints)[i]; |
1296 | if (constraint->value()->definition() == defn) { |
1297 | return constraint; |
1298 | } |
1299 | } |
1300 | } |
1301 | return defn; |
1302 | } |
1303 | |
1304 | for (intptr_t i = 0; i < defn->InputCount(); i++) { |
1305 | defn->InputAt(i)->set_definition(ApplyConstraints( |
1306 | defn->InputAt(i)->definition(), post_dominator, constraints)); |
1307 | } |
1308 | |
1309 | return defn; |
1310 | } |
1311 | |
1312 | #ifndef PRODUCT |
1313 | static void PrettyPrintIndexBoundRecursively(BaseTextBuffer* f, |
1314 | Definition* index_bound) { |
1315 | BinarySmiOpInstr* binary_op = index_bound->AsBinarySmiOp(); |
1316 | if (binary_op != NULL) { |
1317 | f->AddString("(" ); |
1318 | PrettyPrintIndexBoundRecursively(f, binary_op->left()->definition()); |
1319 | f->Printf(" %s " , Token::Str(binary_op->op_kind())); |
1320 | PrettyPrintIndexBoundRecursively(f, binary_op->right()->definition()); |
1321 | f->AddString(")" ); |
1322 | } else if (index_bound->IsConstant()) { |
1323 | f->Printf("%" Pd "" , |
1324 | Smi::Cast(index_bound->AsConstant()->value()).Value()); |
1325 | } else { |
1326 | f->Printf("v%" Pd "" , index_bound->ssa_temp_index()); |
1327 | } |
1328 | f->Printf(" {%s}" , Range::ToCString(index_bound->range())); |
1329 | } |
1330 | |
1331 | static const char* IndexBoundToCString(Definition* index_bound) { |
1332 | char buffer[1024]; |
1333 | BufferFormatter f(buffer, sizeof(buffer)); |
1334 | PrettyPrintIndexBoundRecursively(&f, index_bound); |
1335 | return Thread::Current()->zone()->MakeCopyOfString(buffer); |
1336 | } |
1337 | #endif // !PRODUCT |
1338 | |
1339 | RangeAnalysis* range_analysis_; |
1340 | FlowGraph* flow_graph_; |
1341 | Scheduler scheduler_; |
1342 | }; |
1343 | |
1344 | void RangeAnalysis::EliminateRedundantBoundsChecks() { |
1345 | if (FLAG_array_bounds_check_elimination) { |
1346 | const Function& function = flow_graph_->function(); |
1347 | // Generalization only if we have not deoptimized on a generalized |
1348 | // check earlier and we are not compiling precompiled code |
1349 | // (no optimistic hoisting of checks possible) |
1350 | const bool try_generalization = |
1351 | !CompilerState::Current().is_aot() && |
1352 | !function.ProhibitsBoundsCheckGeneralization(); |
1353 | BoundsCheckGeneralizer generalizer(this, flow_graph_); |
1354 | for (CheckBoundBase* check : bounds_checks_) { |
1355 | if (check->IsRedundant(/*use_loops=*/true)) { |
1356 | check->ReplaceUsesWith(check->index()->definition()); |
1357 | check->RemoveFromGraph(); |
1358 | } else if (try_generalization) { |
1359 | if (auto jit_check = check->AsCheckArrayBound()) { |
1360 | generalizer.TryGeneralize(jit_check); |
1361 | } |
1362 | } |
1363 | } |
1364 | } |
1365 | } |
1366 | |
1367 | void RangeAnalysis::MarkUnreachableBlocks() { |
1368 | for (intptr_t i = 0; i < constraints_.length(); i++) { |
1369 | if (Range::IsUnknown(constraints_[i]->range())) { |
1370 | TargetEntryInstr* target = constraints_[i]->target(); |
1371 | if (target == NULL) { |
1372 | // TODO(vegorov): replace Constraint with an uncoditional |
1373 | // deoptimization and kill all dominated dead code. |
1374 | continue; |
1375 | } |
1376 | |
1377 | BranchInstr* branch = |
1378 | target->PredecessorAt(0)->last_instruction()->AsBranch(); |
1379 | if (target == branch->true_successor()) { |
1380 | // True unreachable. |
1381 | if (FLAG_trace_constant_propagation && flow_graph_->should_print()) { |
1382 | THR_Print("Range analysis: True unreachable (B%" Pd ")\n" , |
1383 | branch->true_successor()->block_id()); |
1384 | } |
1385 | branch->set_constant_target(branch->false_successor()); |
1386 | } else { |
1387 | ASSERT(target == branch->false_successor()); |
1388 | // False unreachable. |
1389 | if (FLAG_trace_constant_propagation && flow_graph_->should_print()) { |
1390 | THR_Print("Range analysis: False unreachable (B%" Pd ")\n" , |
1391 | branch->false_successor()->block_id()); |
1392 | } |
1393 | branch->set_constant_target(branch->true_successor()); |
1394 | } |
1395 | } |
1396 | } |
1397 | } |
1398 | |
1399 | void RangeAnalysis::RemoveConstraints() { |
1400 | for (intptr_t i = 0; i < constraints_.length(); i++) { |
1401 | Definition* def = constraints_[i]->value()->definition(); |
1402 | // Some constraints might be constraining constraints. Unwind the chain of |
1403 | // constraints until we reach the actual definition. |
1404 | while (def->IsConstraint()) { |
1405 | def = def->AsConstraint()->value()->definition(); |
1406 | } |
1407 | constraints_[i]->ReplaceUsesWith(def); |
1408 | constraints_[i]->RemoveFromGraph(); |
1409 | } |
1410 | } |
1411 | |
1412 | static void NarrowBinaryInt64Op(BinaryInt64OpInstr* int64_op) { |
1413 | if (RangeUtils::Fits(int64_op->range(), RangeBoundary::kRangeBoundaryInt32) && |
1414 | RangeUtils::Fits(int64_op->left()->definition()->range(), |
1415 | RangeBoundary::kRangeBoundaryInt32) && |
1416 | RangeUtils::Fits(int64_op->right()->definition()->range(), |
1417 | RangeBoundary::kRangeBoundaryInt32) && |
1418 | BinaryInt32OpInstr::IsSupported(int64_op->op_kind(), int64_op->left(), |
1419 | int64_op->right())) { |
1420 | BinaryInt32OpInstr* int32_op = new BinaryInt32OpInstr( |
1421 | int64_op->op_kind(), int64_op->left()->CopyWithType(), |
1422 | int64_op->right()->CopyWithType(), int64_op->DeoptimizationTarget()); |
1423 | int32_op->set_range(*int64_op->range()); |
1424 | int32_op->set_can_overflow(false); |
1425 | int64_op->ReplaceWith(int32_op, NULL); |
1426 | } |
1427 | } |
1428 | |
1429 | static void NarrowShiftInt64Op(ShiftIntegerOpInstr* int64_op) { |
1430 | if (RangeUtils::Fits(int64_op->range(), RangeBoundary::kRangeBoundaryInt32) && |
1431 | RangeUtils::Fits(int64_op->left()->definition()->range(), |
1432 | RangeBoundary::kRangeBoundaryInt32) && |
1433 | RangeUtils::Fits(int64_op->right()->definition()->range(), |
1434 | RangeBoundary::kRangeBoundaryInt32) && |
1435 | BinaryInt32OpInstr::IsSupported(int64_op->op_kind(), int64_op->left(), |
1436 | int64_op->right())) { |
1437 | BinaryInt32OpInstr* int32_op = new BinaryInt32OpInstr( |
1438 | int64_op->op_kind(), int64_op->left()->CopyWithType(), |
1439 | int64_op->right()->CopyWithType(), int64_op->DeoptimizationTarget()); |
1440 | int32_op->set_range(*int64_op->range()); |
1441 | int32_op->set_can_overflow(false); |
1442 | int64_op->ReplaceWith(int32_op, NULL); |
1443 | } |
1444 | } |
1445 | |
1446 | void RangeAnalysis::NarrowMintToInt32() { |
1447 | for (intptr_t i = 0; i < binary_int64_ops_.length(); i++) { |
1448 | NarrowBinaryInt64Op(binary_int64_ops_[i]); |
1449 | } |
1450 | |
1451 | for (intptr_t i = 0; i < shift_int64_ops_.length(); i++) { |
1452 | NarrowShiftInt64Op(shift_int64_ops_[i]); |
1453 | } |
1454 | } |
1455 | |
1456 | IntegerInstructionSelector::IntegerInstructionSelector(FlowGraph* flow_graph) |
1457 | : flow_graph_(flow_graph) { |
1458 | ASSERT(flow_graph_ != NULL); |
1459 | zone_ = flow_graph_->zone(); |
1460 | selected_uint32_defs_ = |
1461 | new (zone_) BitVector(zone_, flow_graph_->current_ssa_temp_index()); |
1462 | } |
1463 | |
1464 | void IntegerInstructionSelector::Select() { |
1465 | if (FLAG_trace_integer_ir_selection) { |
1466 | THR_Print("---- starting integer ir selection -------\n" ); |
1467 | } |
1468 | FindPotentialUint32Definitions(); |
1469 | FindUint32NarrowingDefinitions(); |
1470 | Propagate(); |
1471 | ReplaceInstructions(); |
1472 | if (FLAG_support_il_printer && FLAG_trace_integer_ir_selection) { |
1473 | THR_Print("---- after integer ir selection -------\n" ); |
1474 | FlowGraphPrinter printer(*flow_graph_); |
1475 | printer.PrintBlocks(); |
1476 | } |
1477 | } |
1478 | |
1479 | bool IntegerInstructionSelector::IsPotentialUint32Definition(Definition* def) { |
1480 | // TODO(johnmccutchan): Consider Smi operations, to avoid unnecessary tagging |
1481 | // & untagged of intermediate results. |
1482 | // TODO(johnmccutchan): Consider phis. |
1483 | return def->IsBoxInt64() || def->IsUnboxInt64() || def->IsShiftInt64Op() || |
1484 | def->IsSpeculativeShiftInt64Op() || |
1485 | (def->IsBinaryInt64Op() && BinaryUint32OpInstr::IsSupported( |
1486 | def->AsBinaryInt64Op()->op_kind())) || |
1487 | (def->IsUnaryInt64Op() && |
1488 | UnaryUint32OpInstr::IsSupported(def->AsUnaryInt64Op()->op_kind())); |
1489 | } |
1490 | |
1491 | void IntegerInstructionSelector::FindPotentialUint32Definitions() { |
1492 | if (FLAG_trace_integer_ir_selection) { |
1493 | THR_Print("++++ Finding potential Uint32 definitions:\n" ); |
1494 | } |
1495 | |
1496 | for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); |
1497 | !block_it.Done(); block_it.Advance()) { |
1498 | BlockEntryInstr* block = block_it.Current(); |
1499 | |
1500 | for (ForwardInstructionIterator instr_it(block); !instr_it.Done(); |
1501 | instr_it.Advance()) { |
1502 | Instruction* current = instr_it.Current(); |
1503 | Definition* defn = current->AsDefinition(); |
1504 | if ((defn != NULL) && defn->HasSSATemp()) { |
1505 | if (IsPotentialUint32Definition(defn)) { |
1506 | if (FLAG_support_il_printer && FLAG_trace_integer_ir_selection) { |
1507 | THR_Print("Adding %s\n" , current->ToCString()); |
1508 | } |
1509 | potential_uint32_defs_.Add(defn); |
1510 | } |
1511 | } |
1512 | } |
1513 | } |
1514 | } |
1515 | |
1516 | // BinaryInt64Op masks and stores into unsigned typed arrays that truncate the |
1517 | // value into a Uint32 range. |
1518 | bool IntegerInstructionSelector::IsUint32NarrowingDefinition(Definition* def) { |
1519 | if (def->IsBinaryInt64Op()) { |
1520 | BinaryInt64OpInstr* op = def->AsBinaryInt64Op(); |
1521 | // Must be a mask operation. |
1522 | if (op->op_kind() != Token::kBIT_AND) { |
1523 | return false; |
1524 | } |
1525 | Range* range = op->range(); |
1526 | if ((range == NULL) || |
1527 | !range->IsWithin(0, static_cast<int64_t>(kMaxUint32))) { |
1528 | return false; |
1529 | } |
1530 | return true; |
1531 | } |
1532 | // TODO(johnmccutchan): Add typed array stores. |
1533 | return false; |
1534 | } |
1535 | |
1536 | void IntegerInstructionSelector::FindUint32NarrowingDefinitions() { |
1537 | ASSERT(selected_uint32_defs_ != NULL); |
1538 | if (FLAG_trace_integer_ir_selection) { |
1539 | THR_Print("++++ Selecting Uint32 definitions:\n" ); |
1540 | THR_Print("++++ Initial set:\n" ); |
1541 | } |
1542 | for (intptr_t i = 0; i < potential_uint32_defs_.length(); i++) { |
1543 | Definition* defn = potential_uint32_defs_[i]; |
1544 | if (IsUint32NarrowingDefinition(defn)) { |
1545 | if (FLAG_support_il_printer && FLAG_trace_integer_ir_selection) { |
1546 | THR_Print("Adding %s\n" , defn->ToCString()); |
1547 | } |
1548 | selected_uint32_defs_->Add(defn->ssa_temp_index()); |
1549 | } |
1550 | } |
1551 | } |
1552 | |
1553 | bool IntegerInstructionSelector::AllUsesAreUint32Narrowing(Value* list_head) { |
1554 | for (Value::Iterator it(list_head); !it.Done(); it.Advance()) { |
1555 | Value* use = it.Current(); |
1556 | Definition* defn = use->instruction()->AsDefinition(); |
1557 | if ((defn == NULL) || !defn->HasSSATemp() || |
1558 | !selected_uint32_defs_->Contains(defn->ssa_temp_index())) { |
1559 | return false; |
1560 | } |
1561 | // Right-hand side operand of ShiftInt64Op is not narrowing (all its bits |
1562 | // should be taken into account). |
1563 | if (ShiftIntegerOpInstr* shift = defn->AsShiftIntegerOp()) { |
1564 | if (use == shift->right()) { |
1565 | return false; |
1566 | } |
1567 | } |
1568 | } |
1569 | return true; |
1570 | } |
1571 | |
1572 | bool IntegerInstructionSelector::CanBecomeUint32(Definition* def) { |
1573 | ASSERT(IsPotentialUint32Definition(def)); |
1574 | if (def->IsBoxInt64()) { |
1575 | // If a BoxInt64's input is a candidate, the box is a candidate. |
1576 | Definition* box_input = def->AsBoxInt64()->value()->definition(); |
1577 | return selected_uint32_defs_->Contains(box_input->ssa_temp_index()); |
1578 | } |
1579 | // A right shift with an input outside of Uint32 range cannot be converted |
1580 | // because we need the high bits. |
1581 | if (def->IsShiftInt64Op() || def->IsSpeculativeShiftInt64Op()) { |
1582 | ShiftIntegerOpInstr* op = def->AsShiftIntegerOp(); |
1583 | if (op->op_kind() == Token::kSHR) { |
1584 | Definition* shift_input = op->left()->definition(); |
1585 | ASSERT(shift_input != NULL); |
1586 | Range* range = shift_input->range(); |
1587 | if ((range == NULL) || |
1588 | !range->IsWithin(0, static_cast<int64_t>(kMaxUint32))) { |
1589 | return false; |
1590 | } |
1591 | } |
1592 | } |
1593 | if (!def->HasUses()) { |
1594 | // No uses, skip. |
1595 | return false; |
1596 | } |
1597 | return AllUsesAreUint32Narrowing(def->input_use_list()) && |
1598 | AllUsesAreUint32Narrowing(def->env_use_list()); |
1599 | } |
1600 | |
1601 | void IntegerInstructionSelector::Propagate() { |
1602 | ASSERT(selected_uint32_defs_ != NULL); |
1603 | bool changed = true; |
1604 | intptr_t iteration = 0; |
1605 | while (changed) { |
1606 | if (FLAG_trace_integer_ir_selection) { |
1607 | THR_Print("+++ Iteration: %" Pd "\n" , iteration++); |
1608 | } |
1609 | changed = false; |
1610 | for (intptr_t i = 0; i < potential_uint32_defs_.length(); i++) { |
1611 | Definition* defn = potential_uint32_defs_[i]; |
1612 | if (selected_uint32_defs_->Contains(defn->ssa_temp_index())) { |
1613 | // Already marked as a candidate, skip. |
1614 | continue; |
1615 | } |
1616 | if (defn->IsConstant()) { |
1617 | // Skip constants. |
1618 | continue; |
1619 | } |
1620 | if (CanBecomeUint32(defn)) { |
1621 | if (FLAG_support_il_printer && FLAG_trace_integer_ir_selection) { |
1622 | THR_Print("Adding %s\n" , defn->ToCString()); |
1623 | } |
1624 | // Found a new candidate. |
1625 | selected_uint32_defs_->Add(defn->ssa_temp_index()); |
1626 | // Haven't reached fixed point yet. |
1627 | changed = true; |
1628 | } |
1629 | } |
1630 | } |
1631 | if (FLAG_trace_integer_ir_selection) { |
1632 | THR_Print("Reached fixed point\n" ); |
1633 | } |
1634 | } |
1635 | |
1636 | Definition* IntegerInstructionSelector::ConstructReplacementFor( |
1637 | Definition* def) { |
1638 | // Should only see mint definitions. |
1639 | ASSERT(IsPotentialUint32Definition(def)); |
1640 | // Should not see constant instructions. |
1641 | ASSERT(!def->IsConstant()); |
1642 | if (def->IsBinaryIntegerOp()) { |
1643 | BinaryIntegerOpInstr* op = def->AsBinaryIntegerOp(); |
1644 | Token::Kind op_kind = op->op_kind(); |
1645 | Value* left = op->left()->CopyWithType(); |
1646 | Value* right = op->right()->CopyWithType(); |
1647 | intptr_t deopt_id = op->DeoptimizationTarget(); |
1648 | if (def->IsBinaryInt64Op()) { |
1649 | return new (Z) BinaryUint32OpInstr(op_kind, left, right, deopt_id); |
1650 | } else if (def->IsShiftInt64Op()) { |
1651 | return new (Z) ShiftUint32OpInstr(op_kind, left, right, deopt_id); |
1652 | } else if (def->IsSpeculativeShiftInt64Op()) { |
1653 | return new (Z) |
1654 | SpeculativeShiftUint32OpInstr(op_kind, left, right, deopt_id); |
1655 | } else { |
1656 | UNREACHABLE(); |
1657 | } |
1658 | } else if (def->IsBoxInt64()) { |
1659 | Value* value = def->AsBoxInt64()->value()->CopyWithType(); |
1660 | return new (Z) BoxUint32Instr(value); |
1661 | } else if (def->IsUnboxInt64()) { |
1662 | UnboxInstr* unbox = def->AsUnboxInt64(); |
1663 | Value* value = unbox->value()->CopyWithType(); |
1664 | intptr_t deopt_id = unbox->DeoptimizationTarget(); |
1665 | return new (Z) |
1666 | UnboxUint32Instr(value, deopt_id, def->SpeculativeModeOfInputs()); |
1667 | } else if (def->IsUnaryInt64Op()) { |
1668 | UnaryInt64OpInstr* op = def->AsUnaryInt64Op(); |
1669 | Token::Kind op_kind = op->op_kind(); |
1670 | Value* value = op->value()->CopyWithType(); |
1671 | intptr_t deopt_id = op->DeoptimizationTarget(); |
1672 | return new (Z) UnaryUint32OpInstr(op_kind, value, deopt_id); |
1673 | } |
1674 | UNREACHABLE(); |
1675 | return NULL; |
1676 | } |
1677 | |
1678 | void IntegerInstructionSelector::ReplaceInstructions() { |
1679 | if (FLAG_trace_integer_ir_selection) { |
1680 | THR_Print("++++ Replacing instructions:\n" ); |
1681 | } |
1682 | for (intptr_t i = 0; i < potential_uint32_defs_.length(); i++) { |
1683 | Definition* defn = potential_uint32_defs_[i]; |
1684 | if (!selected_uint32_defs_->Contains(defn->ssa_temp_index())) { |
1685 | // Not a candidate. |
1686 | continue; |
1687 | } |
1688 | Definition* replacement = ConstructReplacementFor(defn); |
1689 | ASSERT(replacement != NULL); |
1690 | if (!Range::IsUnknown(defn->range())) { |
1691 | if (defn->range()->IsPositive()) { |
1692 | replacement->set_range(*defn->range()); |
1693 | } else { |
1694 | replacement->set_range(Range(RangeBoundary::FromConstant(0), |
1695 | RangeBoundary::FromConstant(kMaxUint32))); |
1696 | } |
1697 | } |
1698 | if (FLAG_support_il_printer && FLAG_trace_integer_ir_selection) { |
1699 | THR_Print("Replacing %s with %s\n" , defn->ToCString(), |
1700 | replacement->ToCString()); |
1701 | } |
1702 | defn->ReplaceWith(replacement, NULL); |
1703 | } |
1704 | } |
1705 | |
1706 | RangeBoundary RangeBoundary::FromDefinition(Definition* defn, int64_t offs) { |
1707 | if (defn->IsConstant() && defn->AsConstant()->IsSmi()) { |
1708 | return FromConstant(Smi::Cast(defn->AsConstant()->value()).Value() + offs); |
1709 | } |
1710 | ASSERT(IsValidOffsetForSymbolicRangeBoundary(offs)); |
1711 | return RangeBoundary(kSymbol, reinterpret_cast<intptr_t>(defn), offs); |
1712 | } |
1713 | |
1714 | RangeBoundary RangeBoundary::LowerBound() const { |
1715 | if (IsInfinity()) { |
1716 | return NegativeInfinity(); |
1717 | } |
1718 | if (IsConstant()) return *this; |
1719 | return Add(Range::ConstantMinSmi(symbol()->range()), |
1720 | RangeBoundary::FromConstant(offset_), NegativeInfinity()); |
1721 | } |
1722 | |
1723 | RangeBoundary RangeBoundary::UpperBound() const { |
1724 | if (IsInfinity()) { |
1725 | return PositiveInfinity(); |
1726 | } |
1727 | if (IsConstant()) return *this; |
1728 | |
1729 | return Add(Range::ConstantMaxSmi(symbol()->range()), |
1730 | RangeBoundary::FromConstant(offset_), PositiveInfinity()); |
1731 | } |
1732 | |
1733 | RangeBoundary RangeBoundary::Add(const RangeBoundary& a, |
1734 | const RangeBoundary& b, |
1735 | const RangeBoundary& overflow) { |
1736 | if (a.IsInfinity() || b.IsInfinity()) return overflow; |
1737 | |
1738 | ASSERT(a.IsConstant() && b.IsConstant()); |
1739 | if (Utils::WillAddOverflow(a.ConstantValue(), b.ConstantValue())) { |
1740 | return overflow; |
1741 | } |
1742 | |
1743 | int64_t result = a.ConstantValue() + b.ConstantValue(); |
1744 | |
1745 | return RangeBoundary::FromConstant(result); |
1746 | } |
1747 | |
1748 | RangeBoundary RangeBoundary::Sub(const RangeBoundary& a, |
1749 | const RangeBoundary& b, |
1750 | const RangeBoundary& overflow) { |
1751 | if (a.IsInfinity() || b.IsInfinity()) return overflow; |
1752 | ASSERT(a.IsConstant() && b.IsConstant()); |
1753 | if (Utils::WillSubOverflow(a.ConstantValue(), b.ConstantValue())) { |
1754 | return overflow; |
1755 | } |
1756 | |
1757 | int64_t result = a.ConstantValue() - b.ConstantValue(); |
1758 | |
1759 | return RangeBoundary::FromConstant(result); |
1760 | } |
1761 | |
1762 | bool RangeBoundary::SymbolicAdd(const RangeBoundary& a, |
1763 | const RangeBoundary& b, |
1764 | RangeBoundary* result) { |
1765 | if (a.IsSymbol() && b.IsConstant()) { |
1766 | if (Utils::WillAddOverflow(a.offset(), b.ConstantValue())) { |
1767 | return false; |
1768 | } |
1769 | |
1770 | const int64_t offset = a.offset() + b.ConstantValue(); |
1771 | |
1772 | if (!IsValidOffsetForSymbolicRangeBoundary(offset)) { |
1773 | return false; |
1774 | } |
1775 | |
1776 | *result = RangeBoundary::FromDefinition(a.symbol(), offset); |
1777 | return true; |
1778 | } else if (b.IsSymbol() && a.IsConstant()) { |
1779 | return SymbolicAdd(b, a, result); |
1780 | } |
1781 | return false; |
1782 | } |
1783 | |
1784 | bool RangeBoundary::SymbolicSub(const RangeBoundary& a, |
1785 | const RangeBoundary& b, |
1786 | RangeBoundary* result) { |
1787 | if (a.IsSymbol() && b.IsConstant()) { |
1788 | if (Utils::WillSubOverflow(a.offset(), b.ConstantValue())) { |
1789 | return false; |
1790 | } |
1791 | |
1792 | const int64_t offset = a.offset() - b.ConstantValue(); |
1793 | |
1794 | if (!IsValidOffsetForSymbolicRangeBoundary(offset)) { |
1795 | return false; |
1796 | } |
1797 | |
1798 | *result = RangeBoundary::FromDefinition(a.symbol(), offset); |
1799 | return true; |
1800 | } |
1801 | return false; |
1802 | } |
1803 | |
1804 | bool RangeBoundary::Equals(const RangeBoundary& other) const { |
1805 | if (IsConstant() && other.IsConstant()) { |
1806 | return ConstantValue() == other.ConstantValue(); |
1807 | } else if (IsInfinity() && other.IsInfinity()) { |
1808 | return kind() == other.kind(); |
1809 | } else if (IsSymbol() && other.IsSymbol()) { |
1810 | return (offset() == other.offset()) && DependOnSameSymbol(*this, other); |
1811 | } else if (IsUnknown() && other.IsUnknown()) { |
1812 | return true; |
1813 | } |
1814 | return false; |
1815 | } |
1816 | |
1817 | RangeBoundary RangeBoundary::Shl(const RangeBoundary& value_boundary, |
1818 | int64_t shift_count, |
1819 | const RangeBoundary& overflow) { |
1820 | ASSERT(value_boundary.IsConstant()); |
1821 | ASSERT(shift_count >= 0); |
1822 | int64_t limit = 64 - shift_count; |
1823 | int64_t value = value_boundary.ConstantValue(); |
1824 | |
1825 | if (value == 0) { |
1826 | return RangeBoundary(0); |
1827 | } else if (shift_count == 0 || |
1828 | (limit > 0 && Utils::IsInt(static_cast<int>(limit), value))) { |
1829 | // Result stays in 64 bit range. |
1830 | const int64_t result = static_cast<uint64_t>(value) << shift_count; |
1831 | return RangeBoundary(result); |
1832 | } |
1833 | |
1834 | return overflow; |
1835 | } |
1836 | |
1837 | static RangeBoundary CanonicalizeBoundary(const RangeBoundary& a, |
1838 | const RangeBoundary& overflow) { |
1839 | if (a.IsConstant() || a.IsInfinity()) { |
1840 | return a; |
1841 | } |
1842 | |
1843 | int64_t offset = a.offset(); |
1844 | Definition* symbol = a.symbol(); |
1845 | |
1846 | bool changed; |
1847 | do { |
1848 | changed = false; |
1849 | if (symbol->IsConstraint()) { |
1850 | symbol = symbol->AsConstraint()->value()->definition(); |
1851 | changed = true; |
1852 | } else if (symbol->IsBinarySmiOp()) { |
1853 | BinarySmiOpInstr* op = symbol->AsBinarySmiOp(); |
1854 | Definition* left = op->left()->definition(); |
1855 | Definition* right = op->right()->definition(); |
1856 | switch (op->op_kind()) { |
1857 | case Token::kADD: |
1858 | if (right->IsConstant()) { |
1859 | int64_t rhs = Smi::Cast(right->AsConstant()->value()).Value(); |
1860 | if (Utils::WillAddOverflow(offset, rhs)) { |
1861 | return overflow; |
1862 | } |
1863 | offset += rhs; |
1864 | symbol = left; |
1865 | changed = true; |
1866 | } else if (left->IsConstant()) { |
1867 | int64_t rhs = Smi::Cast(left->AsConstant()->value()).Value(); |
1868 | if (Utils::WillAddOverflow(offset, rhs)) { |
1869 | return overflow; |
1870 | } |
1871 | offset += rhs; |
1872 | symbol = right; |
1873 | changed = true; |
1874 | } |
1875 | break; |
1876 | |
1877 | case Token::kSUB: |
1878 | if (right->IsConstant()) { |
1879 | int64_t rhs = Smi::Cast(right->AsConstant()->value()).Value(); |
1880 | if (Utils::WillSubOverflow(offset, rhs)) { |
1881 | return overflow; |
1882 | } |
1883 | offset -= rhs; |
1884 | symbol = left; |
1885 | changed = true; |
1886 | } |
1887 | break; |
1888 | |
1889 | default: |
1890 | break; |
1891 | } |
1892 | } |
1893 | } while (changed); |
1894 | |
1895 | if (!RangeBoundary::IsValidOffsetForSymbolicRangeBoundary(offset)) { |
1896 | return overflow; |
1897 | } |
1898 | |
1899 | return RangeBoundary::FromDefinition(symbol, offset); |
1900 | } |
1901 | |
1902 | static bool CanonicalizeMaxBoundary(RangeBoundary* a) { |
1903 | if (!a->IsSymbol()) return false; |
1904 | |
1905 | Range* range = a->symbol()->range(); |
1906 | if ((range == NULL) || !range->max().IsSymbol()) return false; |
1907 | |
1908 | if (Utils::WillAddOverflow(range->max().offset(), a->offset())) { |
1909 | *a = RangeBoundary::PositiveInfinity(); |
1910 | return true; |
1911 | } |
1912 | |
1913 | const int64_t offset = range->max().offset() + a->offset(); |
1914 | |
1915 | if (!RangeBoundary::IsValidOffsetForSymbolicRangeBoundary(offset)) { |
1916 | *a = RangeBoundary::PositiveInfinity(); |
1917 | return true; |
1918 | } |
1919 | |
1920 | *a = CanonicalizeBoundary( |
1921 | RangeBoundary::FromDefinition(range->max().symbol(), offset), |
1922 | RangeBoundary::PositiveInfinity()); |
1923 | |
1924 | return true; |
1925 | } |
1926 | |
1927 | static bool CanonicalizeMinBoundary(RangeBoundary* a) { |
1928 | if (!a->IsSymbol()) return false; |
1929 | |
1930 | Range* range = a->symbol()->range(); |
1931 | if ((range == NULL) || !range->min().IsSymbol()) return false; |
1932 | |
1933 | if (Utils::WillAddOverflow(range->min().offset(), a->offset())) { |
1934 | *a = RangeBoundary::NegativeInfinity(); |
1935 | return true; |
1936 | } |
1937 | |
1938 | const int64_t offset = range->min().offset() + a->offset(); |
1939 | |
1940 | if (!RangeBoundary::IsValidOffsetForSymbolicRangeBoundary(offset)) { |
1941 | *a = RangeBoundary::NegativeInfinity(); |
1942 | return true; |
1943 | } |
1944 | |
1945 | *a = CanonicalizeBoundary( |
1946 | RangeBoundary::FromDefinition(range->min().symbol(), offset), |
1947 | RangeBoundary::NegativeInfinity()); |
1948 | |
1949 | return true; |
1950 | } |
1951 | |
1952 | typedef bool (*BoundaryOp)(RangeBoundary*); |
1953 | |
1954 | static bool CanonicalizeForComparison(RangeBoundary* a, |
1955 | RangeBoundary* b, |
1956 | BoundaryOp op, |
1957 | const RangeBoundary& overflow) { |
1958 | if (!a->IsSymbol() || !b->IsSymbol()) { |
1959 | return false; |
1960 | } |
1961 | |
1962 | RangeBoundary canonical_a = *a; |
1963 | RangeBoundary canonical_b = *b; |
1964 | |
1965 | do { |
1966 | if (DependOnSameSymbol(canonical_a, canonical_b)) { |
1967 | *a = canonical_a; |
1968 | *b = canonical_b; |
1969 | return true; |
1970 | } |
1971 | } while (op(&canonical_a) || op(&canonical_b)); |
1972 | |
1973 | return false; |
1974 | } |
1975 | |
1976 | RangeBoundary RangeBoundary::JoinMin(RangeBoundary a, |
1977 | RangeBoundary b, |
1978 | RangeBoundary::RangeSize size) { |
1979 | if (a.Equals(b)) { |
1980 | return b; |
1981 | } |
1982 | |
1983 | if (CanonicalizeForComparison(&a, &b, &CanonicalizeMinBoundary, |
1984 | RangeBoundary::NegativeInfinity())) { |
1985 | return (a.offset() <= b.offset()) ? a : b; |
1986 | } |
1987 | |
1988 | const int64_t inf_a = a.LowerBound(size); |
1989 | const int64_t inf_b = b.LowerBound(size); |
1990 | const int64_t sup_a = a.UpperBound(size); |
1991 | const int64_t sup_b = b.UpperBound(size); |
1992 | |
1993 | if ((sup_a <= inf_b) && !a.LowerBound().Overflowed(size)) { |
1994 | return a; |
1995 | } else if ((sup_b <= inf_a) && !b.LowerBound().Overflowed(size)) { |
1996 | return b; |
1997 | } else { |
1998 | return RangeBoundary::FromConstant(Utils::Minimum(inf_a, inf_b)); |
1999 | } |
2000 | } |
2001 | |
2002 | RangeBoundary RangeBoundary::JoinMax(RangeBoundary a, |
2003 | RangeBoundary b, |
2004 | RangeBoundary::RangeSize size) { |
2005 | if (a.Equals(b)) { |
2006 | return b; |
2007 | } |
2008 | |
2009 | if (CanonicalizeForComparison(&a, &b, &CanonicalizeMaxBoundary, |
2010 | RangeBoundary::PositiveInfinity())) { |
2011 | return (a.offset() >= b.offset()) ? a : b; |
2012 | } |
2013 | |
2014 | const int64_t inf_a = a.LowerBound(size); |
2015 | const int64_t inf_b = b.LowerBound(size); |
2016 | const int64_t sup_a = a.UpperBound(size); |
2017 | const int64_t sup_b = b.UpperBound(size); |
2018 | |
2019 | if ((sup_a <= inf_b) && !b.UpperBound().Overflowed(size)) { |
2020 | return b; |
2021 | } else if ((sup_b <= inf_a) && !a.UpperBound().Overflowed(size)) { |
2022 | return a; |
2023 | } else { |
2024 | return RangeBoundary::FromConstant(Utils::Maximum(sup_a, sup_b)); |
2025 | } |
2026 | } |
2027 | |
2028 | RangeBoundary RangeBoundary::IntersectionMin(RangeBoundary a, RangeBoundary b) { |
2029 | ASSERT(!a.IsPositiveInfinity() && !b.IsPositiveInfinity()); |
2030 | ASSERT(!a.IsUnknown() && !b.IsUnknown()); |
2031 | |
2032 | if (a.Equals(b)) { |
2033 | return a; |
2034 | } |
2035 | |
2036 | if (a.IsMinimumOrBelow(RangeBoundary::kRangeBoundarySmi)) { |
2037 | return b; |
2038 | } else if (b.IsMinimumOrBelow(RangeBoundary::kRangeBoundarySmi)) { |
2039 | return a; |
2040 | } |
2041 | |
2042 | if (CanonicalizeForComparison(&a, &b, &CanonicalizeMinBoundary, |
2043 | RangeBoundary::NegativeInfinity())) { |
2044 | return (a.offset() >= b.offset()) ? a : b; |
2045 | } |
2046 | |
2047 | const int64_t inf_a = a.SmiLowerBound(); |
2048 | const int64_t inf_b = b.SmiLowerBound(); |
2049 | |
2050 | return (inf_a >= inf_b) ? a : b; |
2051 | } |
2052 | |
2053 | RangeBoundary RangeBoundary::IntersectionMax(RangeBoundary a, RangeBoundary b) { |
2054 | ASSERT(!a.IsNegativeInfinity() && !b.IsNegativeInfinity()); |
2055 | ASSERT(!a.IsUnknown() && !b.IsUnknown()); |
2056 | |
2057 | if (a.Equals(b)) { |
2058 | return a; |
2059 | } |
2060 | |
2061 | if (a.IsMaximumOrAbove(RangeBoundary::kRangeBoundarySmi)) { |
2062 | return b; |
2063 | } else if (b.IsMaximumOrAbove(RangeBoundary::kRangeBoundarySmi)) { |
2064 | return a; |
2065 | } |
2066 | |
2067 | if (CanonicalizeForComparison(&a, &b, &CanonicalizeMaxBoundary, |
2068 | RangeBoundary::PositiveInfinity())) { |
2069 | return (a.offset() <= b.offset()) ? a : b; |
2070 | } |
2071 | |
2072 | const int64_t sup_a = a.SmiUpperBound(); |
2073 | const int64_t sup_b = b.SmiUpperBound(); |
2074 | |
2075 | return (sup_a <= sup_b) ? a : b; |
2076 | } |
2077 | |
2078 | int64_t RangeBoundary::ConstantValue() const { |
2079 | ASSERT(IsConstant()); |
2080 | return value_; |
2081 | } |
2082 | |
2083 | bool Range::IsPositive() const { |
2084 | return OnlyGreaterThanOrEqualTo(0); |
2085 | } |
2086 | |
2087 | bool Range::OnlyLessThanOrEqualTo(int64_t val) const { |
2088 | const RangeBoundary upper_bound = max().UpperBound(); |
2089 | return !upper_bound.IsPositiveInfinity() && |
2090 | (upper_bound.ConstantValue() <= val); |
2091 | } |
2092 | |
2093 | bool Range::OnlyGreaterThanOrEqualTo(int64_t val) const { |
2094 | const RangeBoundary lower_bound = min().LowerBound(); |
2095 | return !lower_bound.IsNegativeInfinity() && |
2096 | (lower_bound.ConstantValue() >= val); |
2097 | } |
2098 | |
2099 | // Inclusive. |
2100 | bool Range::IsWithin(int64_t min_int, int64_t max_int) const { |
2101 | return OnlyGreaterThanOrEqualTo(min_int) && OnlyLessThanOrEqualTo(max_int); |
2102 | } |
2103 | |
2104 | bool Range::Overlaps(int64_t min_int, int64_t max_int) const { |
2105 | RangeBoundary lower = min().LowerBound(); |
2106 | RangeBoundary upper = max().UpperBound(); |
2107 | const int64_t this_min = |
2108 | lower.IsNegativeInfinity() ? RangeBoundary::kMin : lower.ConstantValue(); |
2109 | const int64_t this_max = |
2110 | upper.IsPositiveInfinity() ? RangeBoundary::kMax : upper.ConstantValue(); |
2111 | if ((this_min <= min_int) && (min_int <= this_max)) return true; |
2112 | if ((this_min <= max_int) && (max_int <= this_max)) return true; |
2113 | if ((min_int < this_min) && (max_int > this_max)) return true; |
2114 | return false; |
2115 | } |
2116 | |
2117 | bool Range::IsUnsatisfiable() const { |
2118 | // Infinity case: [+inf, ...] || [..., -inf] |
2119 | if (min().IsPositiveInfinity() || max().IsNegativeInfinity()) { |
2120 | return true; |
2121 | } |
2122 | // Constant case: For example [0, -1]. |
2123 | if (Range::ConstantMin(this).ConstantValue() > |
2124 | Range::ConstantMax(this).ConstantValue()) { |
2125 | return true; |
2126 | } |
2127 | // Symbol case: For example [v+1, v]. |
2128 | return DependOnSameSymbol(min(), max()) && min().offset() > max().offset(); |
2129 | } |
2130 | |
2131 | void Range::Clamp(RangeBoundary::RangeSize size) { |
2132 | min_ = min_.Clamp(size); |
2133 | max_ = max_.Clamp(size); |
2134 | } |
2135 | |
2136 | void Range::ClampToConstant(RangeBoundary::RangeSize size) { |
2137 | min_ = min_.LowerBound().Clamp(size); |
2138 | max_ = max_.UpperBound().Clamp(size); |
2139 | } |
2140 | |
2141 | void Range::Shl(const Range* left, |
2142 | const Range* right, |
2143 | RangeBoundary* result_min, |
2144 | RangeBoundary* result_max) { |
2145 | ASSERT(left != NULL); |
2146 | ASSERT(right != NULL); |
2147 | ASSERT(result_min != NULL); |
2148 | ASSERT(result_max != NULL); |
2149 | RangeBoundary left_max = Range::ConstantMax(left); |
2150 | RangeBoundary left_min = Range::ConstantMin(left); |
2151 | // A negative shift count always deoptimizes (and throws), so the minimum |
2152 | // shift count is zero. |
2153 | int64_t right_max = Utils::Maximum(Range::ConstantMax(right).ConstantValue(), |
2154 | static_cast<int64_t>(0)); |
2155 | int64_t right_min = Utils::Maximum(Range::ConstantMin(right).ConstantValue(), |
2156 | static_cast<int64_t>(0)); |
2157 | |
2158 | *result_min = RangeBoundary::Shl( |
2159 | left_min, left_min.ConstantValue() > 0 ? right_min : right_max, |
2160 | left_min.ConstantValue() > 0 ? RangeBoundary::PositiveInfinity() |
2161 | : RangeBoundary::NegativeInfinity()); |
2162 | |
2163 | *result_max = RangeBoundary::Shl( |
2164 | left_max, left_max.ConstantValue() > 0 ? right_max : right_min, |
2165 | left_max.ConstantValue() > 0 ? RangeBoundary::PositiveInfinity() |
2166 | : RangeBoundary::NegativeInfinity()); |
2167 | } |
2168 | |
2169 | void Range::Shr(const Range* left, |
2170 | const Range* right, |
2171 | RangeBoundary* result_min, |
2172 | RangeBoundary* result_max) { |
2173 | RangeBoundary left_max = Range::ConstantMax(left); |
2174 | RangeBoundary left_min = Range::ConstantMin(left); |
2175 | // A negative shift count always deoptimizes (and throws), so the minimum |
2176 | // shift count is zero. |
2177 | int64_t right_max = Utils::Maximum(Range::ConstantMax(right).ConstantValue(), |
2178 | static_cast<int64_t>(0)); |
2179 | int64_t right_min = Utils::Maximum(Range::ConstantMin(right).ConstantValue(), |
2180 | static_cast<int64_t>(0)); |
2181 | |
2182 | *result_min = RangeBoundary::Shr( |
2183 | left_min, left_min.ConstantValue() > 0 ? right_max : right_min); |
2184 | |
2185 | *result_max = RangeBoundary::Shr( |
2186 | left_max, left_max.ConstantValue() > 0 ? right_min : right_max); |
2187 | } |
2188 | |
2189 | void Range::And(const Range* left_range, |
2190 | const Range* right_range, |
2191 | RangeBoundary* result_min, |
2192 | RangeBoundary* result_max) { |
2193 | ASSERT(left_range != NULL); |
2194 | ASSERT(right_range != NULL); |
2195 | ASSERT(result_min != NULL); |
2196 | ASSERT(result_max != NULL); |
2197 | |
2198 | if (Range::ConstantMin(right_range).ConstantValue() >= 0) { |
2199 | *result_min = RangeBoundary::FromConstant(0); |
2200 | *result_max = Range::ConstantMax(right_range); |
2201 | return; |
2202 | } |
2203 | |
2204 | if (Range::ConstantMin(left_range).ConstantValue() >= 0) { |
2205 | *result_min = RangeBoundary::FromConstant(0); |
2206 | *result_max = Range::ConstantMax(left_range); |
2207 | return; |
2208 | } |
2209 | |
2210 | BitwiseOp(left_range, right_range, result_min, result_max); |
2211 | } |
2212 | |
2213 | static int BitSize(const Range* range) { |
2214 | const int64_t min = Range::ConstantMin(range).ConstantValue(); |
2215 | const int64_t max = Range::ConstantMax(range).ConstantValue(); |
2216 | return Utils::Maximum(Utils::BitLength(min), Utils::BitLength(max)); |
2217 | } |
2218 | |
2219 | void Range::BitwiseOp(const Range* left_range, |
2220 | const Range* right_range, |
2221 | RangeBoundary* result_min, |
2222 | RangeBoundary* result_max) { |
2223 | const int bitsize = Utils::Maximum(BitSize(left_range), BitSize(right_range)); |
2224 | |
2225 | if (left_range->IsPositive() && right_range->IsPositive()) { |
2226 | *result_min = RangeBoundary::FromConstant(0); |
2227 | } else { |
2228 | *result_min = |
2229 | RangeBoundary::FromConstant(-(static_cast<uint64_t>(1) << bitsize)); |
2230 | } |
2231 | |
2232 | *result_max = |
2233 | RangeBoundary::FromConstant((static_cast<uint64_t>(1) << bitsize) - 1); |
2234 | } |
2235 | |
2236 | void Range::Add(const Range* left_range, |
2237 | const Range* right_range, |
2238 | RangeBoundary* result_min, |
2239 | RangeBoundary* result_max, |
2240 | Definition* left_defn) { |
2241 | ASSERT(left_range != NULL); |
2242 | ASSERT(right_range != NULL); |
2243 | ASSERT(result_min != NULL); |
2244 | ASSERT(result_max != NULL); |
2245 | |
2246 | RangeBoundary left_min = Definition::IsArrayLength(left_defn) |
2247 | ? RangeBoundary::FromDefinition(left_defn) |
2248 | : left_range->min(); |
2249 | |
2250 | RangeBoundary left_max = Definition::IsArrayLength(left_defn) |
2251 | ? RangeBoundary::FromDefinition(left_defn) |
2252 | : left_range->max(); |
2253 | |
2254 | if (!RangeBoundary::SymbolicAdd(left_min, right_range->min(), result_min)) { |
2255 | *result_min = RangeBoundary::Add(left_range->min().LowerBound(), |
2256 | right_range->min().LowerBound(), |
2257 | RangeBoundary::NegativeInfinity()); |
2258 | } |
2259 | if (!RangeBoundary::SymbolicAdd(left_max, right_range->max(), result_max)) { |
2260 | *result_max = RangeBoundary::Add(right_range->max().UpperBound(), |
2261 | left_range->max().UpperBound(), |
2262 | RangeBoundary::PositiveInfinity()); |
2263 | } |
2264 | } |
2265 | |
2266 | void Range::Sub(const Range* left_range, |
2267 | const Range* right_range, |
2268 | RangeBoundary* result_min, |
2269 | RangeBoundary* result_max, |
2270 | Definition* left_defn) { |
2271 | ASSERT(left_range != NULL); |
2272 | ASSERT(right_range != NULL); |
2273 | ASSERT(result_min != NULL); |
2274 | ASSERT(result_max != NULL); |
2275 | |
2276 | RangeBoundary left_min = Definition::IsArrayLength(left_defn) |
2277 | ? RangeBoundary::FromDefinition(left_defn) |
2278 | : left_range->min(); |
2279 | |
2280 | RangeBoundary left_max = Definition::IsArrayLength(left_defn) |
2281 | ? RangeBoundary::FromDefinition(left_defn) |
2282 | : left_range->max(); |
2283 | |
2284 | if (!RangeBoundary::SymbolicSub(left_min, right_range->max(), result_min)) { |
2285 | *result_min = RangeBoundary::Sub(left_range->min().LowerBound(), |
2286 | right_range->max().UpperBound(), |
2287 | RangeBoundary::NegativeInfinity()); |
2288 | } |
2289 | if (!RangeBoundary::SymbolicSub(left_max, right_range->min(), result_max)) { |
2290 | *result_max = RangeBoundary::Sub(left_range->max().UpperBound(), |
2291 | right_range->min().LowerBound(), |
2292 | RangeBoundary::PositiveInfinity()); |
2293 | } |
2294 | } |
2295 | |
2296 | void Range::Mul(const Range* left_range, |
2297 | const Range* right_range, |
2298 | RangeBoundary* result_min, |
2299 | RangeBoundary* result_max) { |
2300 | ASSERT(left_range != NULL); |
2301 | ASSERT(right_range != NULL); |
2302 | ASSERT(result_min != NULL); |
2303 | ASSERT(result_max != NULL); |
2304 | |
2305 | const int64_t left_max = ConstantAbsMax(left_range); |
2306 | const int64_t right_max = ConstantAbsMax(right_range); |
2307 | if ((left_max <= -compiler::target::kSmiMin) && |
2308 | (right_max <= -compiler::target::kSmiMin) && |
2309 | ((left_max == 0) || (right_max <= kMaxInt64 / left_max))) { |
2310 | // Product of left and right max values stays in 64 bit range. |
2311 | const int64_t mul_max = left_max * right_max; |
2312 | if (OnlyPositiveOrZero(*left_range, *right_range) || |
2313 | OnlyNegativeOrZero(*left_range, *right_range)) { |
2314 | // If both ranges are of the same sign then the range of the result |
2315 | // is positive and is between multiplications of absolute minimums |
2316 | // and absolute maximums. |
2317 | const int64_t mul_min = |
2318 | ConstantAbsMin(left_range) * ConstantAbsMin(right_range); |
2319 | *result_min = RangeBoundary::FromConstant(mul_min); |
2320 | *result_max = RangeBoundary::FromConstant(mul_max); |
2321 | } else { |
2322 | // If ranges have mixed signs then use conservative approximation: |
2323 | // absolute value of the result is less or equal to multiplication |
2324 | // of absolute maximums. |
2325 | *result_min = RangeBoundary::FromConstant(-mul_max); |
2326 | *result_max = RangeBoundary::FromConstant(mul_max); |
2327 | } |
2328 | return; |
2329 | } |
2330 | |
2331 | // TODO(vegorov): handle mixed sign case that leads to (-Infinity, 0] range. |
2332 | if (OnlyPositiveOrZero(*left_range, *right_range) || |
2333 | OnlyNegativeOrZero(*left_range, *right_range)) { |
2334 | *result_min = RangeBoundary::FromConstant(0); |
2335 | *result_max = RangeBoundary::PositiveInfinity(); |
2336 | return; |
2337 | } |
2338 | |
2339 | *result_min = RangeBoundary::NegativeInfinity(); |
2340 | *result_max = RangeBoundary::PositiveInfinity(); |
2341 | } |
2342 | |
2343 | void Range::TruncDiv(const Range* left_range, |
2344 | const Range* right_range, |
2345 | RangeBoundary* result_min, |
2346 | RangeBoundary* result_max) { |
2347 | ASSERT(left_range != nullptr); |
2348 | ASSERT(right_range != nullptr); |
2349 | ASSERT(result_min != nullptr); |
2350 | ASSERT(result_max != nullptr); |
2351 | |
2352 | if (left_range->OnlyGreaterThanOrEqualTo(0) && |
2353 | right_range->OnlyGreaterThanOrEqualTo(1)) { |
2354 | const int64_t left_max = ConstantAbsMax(left_range); |
2355 | const int64_t left_min = ConstantAbsMin(left_range); |
2356 | const int64_t right_max = ConstantAbsMax(right_range); |
2357 | const int64_t right_min = ConstantAbsMin(right_range); |
2358 | |
2359 | *result_max = RangeBoundary::FromConstant(left_max / right_min); |
2360 | *result_min = RangeBoundary::FromConstant(left_min / right_max); |
2361 | return; |
2362 | } |
2363 | |
2364 | *result_min = RangeBoundary::NegativeInfinity(); |
2365 | *result_max = RangeBoundary::PositiveInfinity(); |
2366 | } |
2367 | |
2368 | void Range::Mod(const Range* right_range, |
2369 | RangeBoundary* result_min, |
2370 | RangeBoundary* result_max) { |
2371 | ASSERT(right_range != nullptr); |
2372 | ASSERT(result_min != nullptr); |
2373 | ASSERT(result_max != nullptr); |
2374 | // Each modulo result is positive and bounded by one less than |
2375 | // the maximum of the right-hand-side (it is unlikely that the |
2376 | // left-hand-side further refines this in typical programs). |
2377 | // Note that x % MinInt can be MaxInt and x % 0 always throws. |
2378 | const int64_t kModMin = 0; |
2379 | int64_t mod_max = kMaxInt64; |
2380 | if (Range::ConstantMin(right_range).ConstantValue() != kMinInt64) { |
2381 | const int64_t right_max = ConstantAbsMax(right_range); |
2382 | mod_max = Utils::Maximum(right_max - 1, kModMin); |
2383 | } |
2384 | *result_min = RangeBoundary::FromConstant(kModMin); |
2385 | *result_max = RangeBoundary::FromConstant(mod_max); |
2386 | } |
2387 | |
2388 | // Both the a and b ranges are >= 0. |
2389 | bool Range::OnlyPositiveOrZero(const Range& a, const Range& b) { |
2390 | return a.OnlyGreaterThanOrEqualTo(0) && b.OnlyGreaterThanOrEqualTo(0); |
2391 | } |
2392 | |
2393 | // Both the a and b ranges are <= 0. |
2394 | bool Range::OnlyNegativeOrZero(const Range& a, const Range& b) { |
2395 | return a.OnlyLessThanOrEqualTo(0) && b.OnlyLessThanOrEqualTo(0); |
2396 | } |
2397 | |
2398 | // Return the maximum absolute value included in range. |
2399 | int64_t Range::ConstantAbsMax(const Range* range) { |
2400 | if (range == NULL) { |
2401 | return RangeBoundary::kMax; |
2402 | } |
2403 | const int64_t abs_min = |
2404 | Utils::AbsWithSaturation(Range::ConstantMin(range).ConstantValue()); |
2405 | const int64_t abs_max = |
2406 | Utils::AbsWithSaturation(Range::ConstantMax(range).ConstantValue()); |
2407 | return Utils::Maximum(abs_min, abs_max); |
2408 | } |
2409 | |
2410 | // Return the minimum absolute value included in range. |
2411 | int64_t Range::ConstantAbsMin(const Range* range) { |
2412 | if (range == NULL) { |
2413 | return 0; |
2414 | } |
2415 | const int64_t abs_min = |
2416 | Utils::AbsWithSaturation(Range::ConstantMin(range).ConstantValue()); |
2417 | const int64_t abs_max = |
2418 | Utils::AbsWithSaturation(Range::ConstantMax(range).ConstantValue()); |
2419 | return Utils::Minimum(abs_min, abs_max); |
2420 | } |
2421 | |
2422 | void Range::BinaryOp(const Token::Kind op, |
2423 | const Range* left_range, |
2424 | const Range* right_range, |
2425 | Definition* left_defn, |
2426 | Range* result) { |
2427 | ASSERT(left_range != NULL); |
2428 | ASSERT(right_range != NULL); |
2429 | |
2430 | // Both left and right ranges are finite. |
2431 | ASSERT(left_range->IsFinite()); |
2432 | ASSERT(right_range->IsFinite()); |
2433 | |
2434 | RangeBoundary min; |
2435 | RangeBoundary max; |
2436 | ASSERT(min.IsUnknown() && max.IsUnknown()); |
2437 | |
2438 | switch (op) { |
2439 | case Token::kADD: |
2440 | Range::Add(left_range, right_range, &min, &max, left_defn); |
2441 | break; |
2442 | |
2443 | case Token::kSUB: |
2444 | Range::Sub(left_range, right_range, &min, &max, left_defn); |
2445 | break; |
2446 | |
2447 | case Token::kMUL: |
2448 | Range::Mul(left_range, right_range, &min, &max); |
2449 | break; |
2450 | |
2451 | case Token::kTRUNCDIV: |
2452 | Range::TruncDiv(left_range, right_range, &min, &max); |
2453 | break; |
2454 | |
2455 | case Token::kMOD: |
2456 | Range::Mod(right_range, &min, &max); |
2457 | break; |
2458 | |
2459 | case Token::kSHL: |
2460 | Range::Shl(left_range, right_range, &min, &max); |
2461 | break; |
2462 | |
2463 | case Token::kSHR: |
2464 | Range::Shr(left_range, right_range, &min, &max); |
2465 | break; |
2466 | |
2467 | case Token::kBIT_AND: |
2468 | Range::And(left_range, right_range, &min, &max); |
2469 | break; |
2470 | |
2471 | case Token::kBIT_XOR: |
2472 | case Token::kBIT_OR: |
2473 | Range::BitwiseOp(left_range, right_range, &min, &max); |
2474 | break; |
2475 | |
2476 | default: |
2477 | *result = Range(RangeBoundary::NegativeInfinity(), |
2478 | RangeBoundary::PositiveInfinity()); |
2479 | return; |
2480 | } |
2481 | |
2482 | ASSERT(!min.IsUnknown() && !max.IsUnknown()); |
2483 | |
2484 | // Sanity: avoid [l, u] with constants l > u. |
2485 | ASSERT(!min.IsConstant() || !max.IsConstant() || |
2486 | min.ConstantValue() <= max.ConstantValue()); |
2487 | |
2488 | *result = Range(min, max); |
2489 | } |
2490 | |
2491 | void Definition::set_range(const Range& range) { |
2492 | if (range_ == NULL) { |
2493 | range_ = new Range(); |
2494 | } |
2495 | *range_ = range; |
2496 | } |
2497 | |
2498 | void Definition::InferRange(RangeAnalysis* analysis, Range* range) { |
2499 | if (Type()->ToCid() == kSmiCid) { |
2500 | *range = Range::Full(RangeBoundary::kRangeBoundarySmi); |
2501 | } else if (IsInt64Definition()) { |
2502 | *range = Range::Full(RangeBoundary::kRangeBoundaryInt64); |
2503 | } else if (IsInt32Definition()) { |
2504 | *range = Range::Full(RangeBoundary::kRangeBoundaryInt32); |
2505 | } else if (Type()->IsInt()) { |
2506 | *range = Range::Full(RangeBoundary::kRangeBoundaryInt64); |
2507 | } else { |
2508 | // Only Smi and Mint supported. |
2509 | UNREACHABLE(); |
2510 | } |
2511 | } |
2512 | |
2513 | static bool DependsOnSymbol(const RangeBoundary& a, Definition* symbol) { |
2514 | return a.IsSymbol() && (UnwrapConstraint(a.symbol()) == symbol); |
2515 | } |
2516 | |
2517 | // Given the range and definition update the range so that |
2518 | // it covers both original range and definitions range. |
2519 | // |
2520 | // The following should also hold: |
2521 | // |
2522 | // [_|_, _|_] U a = a U [_|_, _|_] = a |
2523 | // |
2524 | static void Join(Range* range, |
2525 | Definition* defn, |
2526 | const Range* defn_range, |
2527 | RangeBoundary::RangeSize size) { |
2528 | if (Range::IsUnknown(defn_range)) { |
2529 | return; |
2530 | } |
2531 | |
2532 | if (Range::IsUnknown(range)) { |
2533 | *range = *defn_range; |
2534 | return; |
2535 | } |
2536 | |
2537 | Range other = *defn_range; |
2538 | |
2539 | // Handle patterns where range already depends on defn as a symbol: |
2540 | // |
2541 | // (..., S+o] U range(S) and [S+o, ...) U range(S) |
2542 | // |
2543 | // To improve precision of the computed join use [S, S] instead of |
2544 | // using range(S). It will be canonicalized away by JoinMin/JoinMax |
2545 | // functions. |
2546 | Definition* unwrapped = UnwrapConstraint(defn); |
2547 | if (DependsOnSymbol(range->min(), unwrapped) || |
2548 | DependsOnSymbol(range->max(), unwrapped)) { |
2549 | other = Range(RangeBoundary::FromDefinition(defn, 0), |
2550 | RangeBoundary::FromDefinition(defn, 0)); |
2551 | } |
2552 | |
2553 | // First try to compare ranges based on their upper and lower bounds. |
2554 | const int64_t inf_range = range->min().LowerBound(size); |
2555 | const int64_t inf_other = other.min().LowerBound(size); |
2556 | const int64_t sup_range = range->max().UpperBound(size); |
2557 | const int64_t sup_other = other.max().UpperBound(size); |
2558 | |
2559 | if (sup_range <= inf_other) { |
2560 | // The range is fully below defn's range. Keep the minimum and |
2561 | // expand the maximum. |
2562 | range->set_max(other.max()); |
2563 | } else if (sup_other <= inf_range) { |
2564 | // The range is fully above defn's range. Keep the maximum and |
2565 | // expand the minimum. |
2566 | range->set_min(other.min()); |
2567 | } else { |
2568 | // Can't compare ranges as whole. Join minimum and maximum separately. |
2569 | *range = Range(RangeBoundary::JoinMin(range->min(), other.min(), size), |
2570 | RangeBoundary::JoinMax(range->max(), other.max(), size)); |
2571 | } |
2572 | } |
2573 | |
2574 | // A definition dominates a phi if its block dominates the phi's block |
2575 | // and the two blocks are different. |
2576 | static bool DominatesPhi(BlockEntryInstr* a, BlockEntryInstr* phi_block) { |
2577 | return a->Dominates(phi_block) && (a != phi_block); |
2578 | } |
2579 | |
2580 | // When assigning range to a phi we must take care to avoid self-reference |
2581 | // cycles when phi's range depends on the phi itself. |
2582 | // To prevent such cases we impose additional restriction on symbols that |
2583 | // can be used as boundaries for phi's range: they must dominate |
2584 | // phi's definition. |
2585 | static RangeBoundary EnsureAcyclicSymbol(BlockEntryInstr* phi_block, |
2586 | const RangeBoundary& a, |
2587 | const RangeBoundary& limit) { |
2588 | if (!a.IsSymbol() || DominatesPhi(a.symbol()->GetBlock(), phi_block)) { |
2589 | return a; |
2590 | } |
2591 | |
2592 | // Symbol does not dominate phi. Try unwrapping constraint and check again. |
2593 | Definition* unwrapped = UnwrapConstraint(a.symbol()); |
2594 | if ((unwrapped != a.symbol()) && |
2595 | DominatesPhi(unwrapped->GetBlock(), phi_block)) { |
2596 | return RangeBoundary::FromDefinition(unwrapped, a.offset()); |
2597 | } |
2598 | |
2599 | return limit; |
2600 | } |
2601 | |
2602 | static const Range* GetInputRange(RangeAnalysis* analysis, |
2603 | RangeBoundary::RangeSize size, |
2604 | Value* input) { |
2605 | switch (size) { |
2606 | case RangeBoundary::kRangeBoundarySmi: |
2607 | return analysis->GetSmiRange(input); |
2608 | case RangeBoundary::kRangeBoundaryInt32: |
2609 | return input->definition()->range(); |
2610 | case RangeBoundary::kRangeBoundaryInt64: |
2611 | return analysis->GetIntRange(input); |
2612 | default: |
2613 | UNREACHABLE(); |
2614 | return NULL; |
2615 | } |
2616 | } |
2617 | |
2618 | void PhiInstr::InferRange(RangeAnalysis* analysis, Range* range) { |
2619 | const RangeBoundary::RangeSize size = RangeSizeForPhi(this); |
2620 | for (intptr_t i = 0; i < InputCount(); i++) { |
2621 | Value* input = InputAt(i); |
2622 | Join(range, input->definition(), GetInputRange(analysis, size, input), |
2623 | size); |
2624 | } |
2625 | |
2626 | BlockEntryInstr* phi_block = GetBlock(); |
2627 | range->set_min( |
2628 | EnsureAcyclicSymbol(phi_block, range->min(), RangeBoundary::MinSmi())); |
2629 | range->set_max( |
2630 | EnsureAcyclicSymbol(phi_block, range->max(), RangeBoundary::MaxSmi())); |
2631 | } |
2632 | |
2633 | void ConstantInstr::InferRange(RangeAnalysis* analysis, Range* range) { |
2634 | if (value_.IsSmi()) { |
2635 | int64_t value = Smi::Cast(value_).Value(); |
2636 | *range = Range(RangeBoundary::FromConstant(value), |
2637 | RangeBoundary::FromConstant(value)); |
2638 | } else if (value_.IsMint()) { |
2639 | int64_t value = Mint::Cast(value_).value(); |
2640 | *range = Range(RangeBoundary::FromConstant(value), |
2641 | RangeBoundary::FromConstant(value)); |
2642 | } else { |
2643 | // Only Smi and Mint supported. |
2644 | UNREACHABLE(); |
2645 | } |
2646 | } |
2647 | |
2648 | void ConstraintInstr::InferRange(RangeAnalysis* analysis, Range* range) { |
2649 | const Range* value_range = analysis->GetSmiRange(value()); |
2650 | if (Range::IsUnknown(value_range)) { |
2651 | return; |
2652 | } |
2653 | |
2654 | // TODO(vegorov) check if precision of the analysis can be improved by |
2655 | // recognizing intersections of the form: |
2656 | // |
2657 | // (..., S+x] ^ [S+x, ...) = [S+x, S+x] |
2658 | // |
2659 | Range result = value_range->Intersect(constraint()); |
2660 | |
2661 | if (result.IsUnsatisfiable()) { |
2662 | return; |
2663 | } |
2664 | |
2665 | *range = result; |
2666 | } |
2667 | |
2668 | void LoadFieldInstr::InferRange(RangeAnalysis* analysis, Range* range) { |
2669 | switch (slot().kind()) { |
2670 | case Slot::Kind::kArray_length: |
2671 | case Slot::Kind::kGrowableObjectArray_length: |
2672 | *range = Range( |
2673 | RangeBoundary::FromConstant(0), |
2674 | RangeBoundary::FromConstant(compiler::target::Array::kMaxElements)); |
2675 | break; |
2676 | |
2677 | case Slot::Kind::kTypedDataBase_length: |
2678 | case Slot::Kind::kTypedDataView_offset_in_bytes: |
2679 | *range = Range(RangeBoundary::FromConstant(0), RangeBoundary::MaxSmi()); |
2680 | break; |
2681 | |
2682 | case Slot::Kind::kString_length: |
2683 | *range = Range( |
2684 | RangeBoundary::FromConstant(0), |
2685 | RangeBoundary::FromConstant(compiler::target::String::kMaxElements)); |
2686 | break; |
2687 | |
2688 | case Slot::Kind::kDartField: |
2689 | case Slot::Kind::kCapturedVariable: |
2690 | // Use default value. |
2691 | Definition::InferRange(analysis, range); |
2692 | break; |
2693 | |
2694 | case Slot::Kind::kLinkedHashMap_index: |
2695 | case Slot::Kind::kLinkedHashMap_data: |
2696 | case Slot::Kind::kGrowableObjectArray_data: |
2697 | case Slot::Kind::kContext_parent: |
2698 | case Slot::Kind::kTypeArguments: |
2699 | case Slot::Kind::kClosure_context: |
2700 | case Slot::Kind::kClosure_delayed_type_arguments: |
2701 | case Slot::Kind::kClosure_function: |
2702 | case Slot::Kind::kClosure_function_type_arguments: |
2703 | case Slot::Kind::kClosure_instantiator_type_arguments: |
2704 | case Slot::Kind::kPointerBase_data_field: |
2705 | case Slot::Kind::kTypedDataView_data: |
2706 | case Slot::Kind::kType_arguments: |
2707 | case Slot::Kind::kTypeArgumentsIndex: |
2708 | case Slot::Kind::kUnhandledException_exception: |
2709 | case Slot::Kind::kUnhandledException_stacktrace: |
2710 | // Not an integer valued field. |
2711 | UNREACHABLE(); |
2712 | break; |
2713 | |
2714 | case Slot::Kind::kClosure_hash: |
2715 | case Slot::Kind::kLinkedHashMap_hash_mask: |
2716 | case Slot::Kind::kLinkedHashMap_used_data: |
2717 | case Slot::Kind::kLinkedHashMap_deleted_keys: |
2718 | *range = Range(RangeBoundary::FromConstant(0), RangeBoundary::MaxSmi()); |
2719 | break; |
2720 | |
2721 | case Slot::Kind::kArgumentsDescriptor_type_args_len: |
2722 | case Slot::Kind::kArgumentsDescriptor_positional_count: |
2723 | case Slot::Kind::kArgumentsDescriptor_count: |
2724 | case Slot::Kind::kArgumentsDescriptor_size: |
2725 | *range = Range(RangeBoundary::FromConstant(0), RangeBoundary::MaxSmi()); |
2726 | break; |
2727 | } |
2728 | } |
2729 | |
2730 | void LoadIndexedInstr::InferRange(RangeAnalysis* analysis, Range* range) { |
2731 | switch (class_id()) { |
2732 | case kTypedDataInt8ArrayCid: |
2733 | *range = Range(RangeBoundary::FromConstant(-128), |
2734 | RangeBoundary::FromConstant(127)); |
2735 | break; |
2736 | case kTypedDataUint8ArrayCid: |
2737 | case kTypedDataUint8ClampedArrayCid: |
2738 | case kExternalTypedDataUint8ArrayCid: |
2739 | case kExternalTypedDataUint8ClampedArrayCid: |
2740 | *range = Range(RangeBoundary::FromConstant(0), |
2741 | RangeBoundary::FromConstant(255)); |
2742 | break; |
2743 | case kTypedDataInt16ArrayCid: |
2744 | *range = Range(RangeBoundary::FromConstant(-32768), |
2745 | RangeBoundary::FromConstant(32767)); |
2746 | break; |
2747 | case kTypedDataUint16ArrayCid: |
2748 | *range = Range(RangeBoundary::FromConstant(0), |
2749 | RangeBoundary::FromConstant(65535)); |
2750 | break; |
2751 | case kTypedDataInt32ArrayCid: |
2752 | *range = Range(RangeBoundary::FromConstant(kMinInt32), |
2753 | RangeBoundary::FromConstant(kMaxInt32)); |
2754 | break; |
2755 | case kTypedDataUint32ArrayCid: |
2756 | *range = Range(RangeBoundary::FromConstant(0), |
2757 | RangeBoundary::FromConstant(kMaxUint32)); |
2758 | break; |
2759 | case kOneByteStringCid: |
2760 | *range = Range(RangeBoundary::FromConstant(0), |
2761 | RangeBoundary::FromConstant(0xFF)); |
2762 | break; |
2763 | case kTwoByteStringCid: |
2764 | *range = Range(RangeBoundary::FromConstant(0), |
2765 | RangeBoundary::FromConstant(0xFFFF)); |
2766 | break; |
2767 | default: |
2768 | Definition::InferRange(analysis, range); |
2769 | break; |
2770 | } |
2771 | } |
2772 | |
2773 | void LoadCodeUnitsInstr::InferRange(RangeAnalysis* analysis, Range* range) { |
2774 | ASSERT(IsStringClassId(class_id())); |
2775 | RangeBoundary zero = RangeBoundary::FromConstant(0); |
2776 | // Take the number of loaded characters into account when determining the |
2777 | // range of the result. |
2778 | ASSERT(element_count_ > 0); |
2779 | switch (class_id()) { |
2780 | case kOneByteStringCid: |
2781 | case kExternalOneByteStringCid: |
2782 | ASSERT(element_count_ <= 4); |
2783 | *range = Range(zero, RangeBoundary::FromConstant( |
2784 | Utils::NBitMask(kBitsPerByte * element_count_))); |
2785 | break; |
2786 | case kTwoByteStringCid: |
2787 | case kExternalTwoByteStringCid: |
2788 | ASSERT(element_count_ <= 2); |
2789 | *range = Range(zero, RangeBoundary::FromConstant(Utils::NBitMask( |
2790 | 2 * kBitsPerByte * element_count_))); |
2791 | break; |
2792 | default: |
2793 | UNREACHABLE(); |
2794 | break; |
2795 | } |
2796 | } |
2797 | |
2798 | void IfThenElseInstr::InferRange(RangeAnalysis* analysis, Range* range) { |
2799 | const intptr_t min = Utils::Minimum(if_true_, if_false_); |
2800 | const intptr_t max = Utils::Maximum(if_true_, if_false_); |
2801 | *range = |
2802 | Range(RangeBoundary::FromConstant(min), RangeBoundary::FromConstant(max)); |
2803 | } |
2804 | |
2805 | static RangeBoundary::RangeSize RepresentationToRangeSize(Representation r) { |
2806 | switch (r) { |
2807 | case kTagged: |
2808 | return RangeBoundary::kRangeBoundarySmi; |
2809 | case kUnboxedInt32: |
2810 | return RangeBoundary::kRangeBoundaryInt32; |
2811 | case kUnboxedInt64: |
2812 | case kUnboxedUint32: // Overapproximate Uint32 as Int64. |
2813 | return RangeBoundary::kRangeBoundaryInt64; |
2814 | default: |
2815 | UNREACHABLE(); |
2816 | return RangeBoundary::kRangeBoundarySmi; |
2817 | } |
2818 | } |
2819 | |
2820 | void BinaryIntegerOpInstr::InferRangeHelper(const Range* left_range, |
2821 | const Range* right_range, |
2822 | Range* range) { |
2823 | // TODO(vegorov): canonicalize BinaryIntegerOp to always have constant on the |
2824 | // right and a non-constant on the left. |
2825 | if (Range::IsUnknown(left_range) || Range::IsUnknown(right_range)) { |
2826 | return; |
2827 | } |
2828 | |
2829 | Range::BinaryOp(op_kind(), left_range, right_range, left()->definition(), |
2830 | range); |
2831 | ASSERT(!Range::IsUnknown(range)); |
2832 | |
2833 | const RangeBoundary::RangeSize range_size = |
2834 | RepresentationToRangeSize(representation()); |
2835 | |
2836 | // Calculate overflowed status before clamping if operation is |
2837 | // not truncating. |
2838 | if (!is_truncating()) { |
2839 | set_can_overflow(!range->Fits(range_size)); |
2840 | } |
2841 | |
2842 | range->Clamp(range_size); |
2843 | } |
2844 | |
2845 | static void CacheRange(Range** slot, |
2846 | const Range* range, |
2847 | RangeBoundary::RangeSize size) { |
2848 | if (range != NULL) { |
2849 | if (*slot == NULL) { |
2850 | *slot = new Range(); |
2851 | } |
2852 | **slot = *range; |
2853 | |
2854 | // Eliminate any symbolic dependencies from the range information. |
2855 | (*slot)->ClampToConstant(size); |
2856 | } else if (*slot != NULL) { |
2857 | **slot = Range(); // Clear cached range information. |
2858 | } |
2859 | } |
2860 | |
2861 | void BinaryIntegerOpInstr::InferRange(RangeAnalysis* analysis, Range* range) { |
2862 | auto const left_size = |
2863 | RepresentationToRangeSize(RequiredInputRepresentation(0)); |
2864 | auto const right_size = |
2865 | RepresentationToRangeSize(RequiredInputRepresentation(1)); |
2866 | InferRangeHelper(GetInputRange(analysis, left_size, left()), |
2867 | GetInputRange(analysis, right_size, right()), range); |
2868 | } |
2869 | |
2870 | void BinarySmiOpInstr::InferRange(RangeAnalysis* analysis, Range* range) { |
2871 | const Range* right_smi_range = analysis->GetSmiRange(right()); |
2872 | // TODO(vegorov) completely remove this once GetSmiRange is eliminated. |
2873 | if (op_kind() == Token::kSHR || op_kind() == Token::kSHL || |
2874 | op_kind() == Token::kMOD || op_kind() == Token::kTRUNCDIV) { |
2875 | CacheRange(&right_range_, right_smi_range, |
2876 | RangeBoundary::kRangeBoundarySmi); |
2877 | } |
2878 | InferRangeHelper(analysis->GetSmiRange(left()), right_smi_range, range); |
2879 | } |
2880 | |
2881 | void ShiftIntegerOpInstr::InferRange(RangeAnalysis* analysis, Range* range) { |
2882 | const Range* right_range = RequiredInputRepresentation(1) == kTagged |
2883 | ? analysis->GetSmiRange(right()) |
2884 | : right()->definition()->range(); |
2885 | CacheRange(&shift_range_, right()->definition()->range(), |
2886 | RangeBoundary::kRangeBoundaryInt64); |
2887 | InferRangeHelper(left()->definition()->range(), right_range, range); |
2888 | } |
2889 | |
2890 | void BoxIntegerInstr::InferRange(RangeAnalysis* analysis, Range* range) { |
2891 | const Range* value_range = value()->definition()->range(); |
2892 | if (!Range::IsUnknown(value_range)) { |
2893 | *range = *value_range; |
2894 | } |
2895 | } |
2896 | |
2897 | void UnboxInt32Instr::InferRange(RangeAnalysis* analysis, Range* range) { |
2898 | if (value()->Type()->ToCid() == kSmiCid) { |
2899 | const Range* value_range = analysis->GetSmiRange(value()); |
2900 | if (!Range::IsUnknown(value_range)) { |
2901 | *range = *value_range; |
2902 | } |
2903 | } else if (RangeAnalysis::IsIntegerDefinition(value()->definition())) { |
2904 | const Range* value_range = analysis->GetIntRange(value()); |
2905 | if (!Range::IsUnknown(value_range)) { |
2906 | *range = *value_range; |
2907 | } |
2908 | } else if (value()->Type()->ToCid() == kSmiCid) { |
2909 | *range = Range::Full(RangeBoundary::kRangeBoundarySmi); |
2910 | } else { |
2911 | *range = Range::Full(RangeBoundary::kRangeBoundaryInt32); |
2912 | } |
2913 | } |
2914 | |
2915 | void UnboxUint32Instr::InferRange(RangeAnalysis* analysis, Range* range) { |
2916 | const Range* value_range = NULL; |
2917 | |
2918 | if (value()->Type()->ToCid() == kSmiCid) { |
2919 | value_range = analysis->GetSmiRange(value()); |
2920 | } else if (RangeAnalysis::IsIntegerDefinition(value()->definition())) { |
2921 | value_range = analysis->GetIntRange(value()); |
2922 | } else { |
2923 | *range = Range(RangeBoundary::FromConstant(0), |
2924 | RangeBoundary::FromConstant(kMaxUint32)); |
2925 | return; |
2926 | } |
2927 | |
2928 | if (!Range::IsUnknown(value_range)) { |
2929 | if (value_range->IsPositive()) { |
2930 | *range = *value_range; |
2931 | } else { |
2932 | *range = Range(RangeBoundary::FromConstant(0), |
2933 | RangeBoundary::FromConstant(kMaxUint32)); |
2934 | } |
2935 | } |
2936 | } |
2937 | |
2938 | void UnboxInt64Instr::InferRange(RangeAnalysis* analysis, Range* range) { |
2939 | const Range* value_range = value()->definition()->range(); |
2940 | if (value_range != NULL) { |
2941 | *range = *value_range; |
2942 | } else if (!value()->definition()->IsInt64Definition() && |
2943 | (value()->definition()->Type()->ToCid() != kSmiCid)) { |
2944 | *range = Range::Full(RangeBoundary::kRangeBoundaryInt64); |
2945 | } |
2946 | } |
2947 | |
2948 | void IntConverterInstr::InferRange(RangeAnalysis* analysis, Range* range) { |
2949 | if (from() == kUntagged || to() == kUntagged) { |
2950 | ASSERT((from() == kUntagged && |
2951 | (to() == kUnboxedIntPtr || to() == kUnboxedFfiIntPtr)) || |
2952 | ((from() == kUnboxedIntPtr || from() == kUnboxedFfiIntPtr) && |
2953 | to() == kUntagged)); |
2954 | } else { |
2955 | ASSERT(from() == kUnboxedInt32 || from() == kUnboxedInt64 || |
2956 | from() == kUnboxedUint32); |
2957 | ASSERT(to() == kUnboxedInt32 || to() == kUnboxedInt64 || |
2958 | to() == kUnboxedUint32); |
2959 | } |
2960 | |
2961 | const Range* value_range = value()->definition()->range(); |
2962 | if (Range::IsUnknown(value_range)) { |
2963 | return; |
2964 | } |
2965 | |
2966 | if (to() == kUnboxedUint32) { |
2967 | // TODO(vegorov): improve range information for unboxing to Uint32. |
2968 | *range = |
2969 | Range(RangeBoundary::FromConstant(0), |
2970 | RangeBoundary::FromConstant(static_cast<int64_t>(kMaxUint32))); |
2971 | } else { |
2972 | *range = *value_range; |
2973 | if (to() == kUnboxedInt32) { |
2974 | range->Clamp(RangeBoundary::kRangeBoundaryInt32); |
2975 | } |
2976 | } |
2977 | } |
2978 | |
2979 | static bool IsRedundantBasedOnRangeInformation(Value* index, Value* length) { |
2980 | // Range of the index is unknown can't decide if the check is redundant. |
2981 | Range* index_range = index->definition()->range(); |
2982 | if (index_range == nullptr) { |
2983 | if (!(index->BindsToConstant() && |
2984 | compiler::target::IsSmi(index->BoundConstant()))) { |
2985 | return false; |
2986 | } |
2987 | Range range; |
2988 | index->definition()->InferRange(nullptr, &range); |
2989 | ASSERT(!Range::IsUnknown(&range)); |
2990 | index->definition()->set_range(range); |
2991 | index_range = index->definition()->range(); |
2992 | } |
2993 | |
2994 | // Range of the index is not positive. Check can't be redundant. |
2995 | if (Range::ConstantMinSmi(index_range).ConstantValue() < 0) { |
2996 | return false; |
2997 | } |
2998 | |
2999 | RangeBoundary max = RangeBoundary::FromDefinition(index->definition()); |
3000 | RangeBoundary max_upper = max.UpperBound(); |
3001 | RangeBoundary array_length = |
3002 | RangeBoundary::FromDefinition(length->definition()); |
3003 | RangeBoundary length_lower = array_length.LowerBound(); |
3004 | if (max_upper.OverflowedSmi() || length_lower.OverflowedSmi()) { |
3005 | return false; |
3006 | } |
3007 | |
3008 | // Try to compare constant boundaries. |
3009 | if (max_upper.ConstantValue() < length_lower.ConstantValue()) { |
3010 | return true; |
3011 | } |
3012 | |
3013 | RangeBoundary canonical_length = |
3014 | CanonicalizeBoundary(array_length, RangeBoundary::PositiveInfinity()); |
3015 | if (canonical_length.OverflowedSmi()) { |
3016 | return false; |
3017 | } |
3018 | |
3019 | // Try symbolic comparison. |
3020 | do { |
3021 | if (DependOnSameSymbol(max, canonical_length)) { |
3022 | return max.offset() < canonical_length.offset(); |
3023 | } |
3024 | } while (CanonicalizeMaxBoundary(&max) || |
3025 | CanonicalizeMinBoundary(&canonical_length)); |
3026 | |
3027 | // Failed to prove that maximum is bounded with array length. |
3028 | return false; |
3029 | } |
3030 | |
3031 | bool CheckBoundBase::IsRedundant(bool use_loops) { |
3032 | // First, try to prove redundancy with the results of range analysis. |
3033 | if (IsRedundantBasedOnRangeInformation(index(), length())) { |
3034 | return true; |
3035 | } else if (!use_loops) { |
3036 | return false; |
3037 | } |
3038 | // Next, try to prove redundancy with the results of induction analysis. |
3039 | LoopInfo* loop = GetBlock()->loop_info(); |
3040 | if (loop != nullptr) { |
3041 | return loop->IsInRange(this, index(), length()); |
3042 | } |
3043 | return false; |
3044 | } |
3045 | |
3046 | } // namespace dart |
3047 | |