1// Copyright (c) 2017, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/compiler/call_specializer.h"
6
7#include "vm/compiler/backend/flow_graph_compiler.h"
8#include "vm/compiler/backend/inliner.h"
9#include "vm/compiler/cha.h"
10#include "vm/compiler/compiler_state.h"
11#include "vm/cpu.h"
12
13namespace dart {
14
15// Quick access to the current isolate and zone.
16#define I (isolate())
17#define Z (zone())
18
19static void RefineUseTypes(Definition* instr) {
20 CompileType* new_type = instr->Type();
21 for (Value::Iterator it(instr->input_use_list()); !it.Done(); it.Advance()) {
22 it.Current()->RefineReachingType(new_type);
23 }
24}
25
26static bool ShouldInlineSimd() {
27 return FlowGraphCompiler::SupportsUnboxedSimd128();
28}
29
30static bool CanUnboxDouble() {
31 return FlowGraphCompiler::SupportsUnboxedDoubles();
32}
33
34static bool CanConvertInt64ToDouble() {
35 return FlowGraphCompiler::CanConvertInt64ToDouble();
36}
37
38static bool IsNumberCid(intptr_t cid) {
39 return (cid == kSmiCid) || (cid == kDoubleCid);
40}
41
42static bool ShouldSpecializeForDouble(const BinaryFeedback& binary_feedback) {
43 // Don't specialize for double if we can't unbox them.
44 if (!CanUnboxDouble()) {
45 return false;
46 }
47
48 // Unboxed double operation can't handle case of two smis.
49 if (binary_feedback.IncludesOperands(kSmiCid)) {
50 return false;
51 }
52
53 // Check that the call site has seen only smis and doubles.
54 return binary_feedback.OperandsAreSmiOrDouble();
55}
56
57// Optimize instance calls using ICData.
58void CallSpecializer::ApplyICData() {
59 VisitBlocks();
60}
61
62// Optimize instance calls using cid. This is called after optimizer
63// converted instance calls to instructions. Any remaining
64// instance calls are either megamorphic calls, cannot be optimized or
65// have no runtime type feedback collected.
66// Attempts to convert an instance call (IC call) using propagated class-ids,
67// e.g., receiver class id, guarded-cid, or by guessing cid-s.
68void CallSpecializer::ApplyClassIds() {
69 ASSERT(current_iterator_ == NULL);
70 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator();
71 !block_it.Done(); block_it.Advance()) {
72 thread()->CheckForSafepoint();
73 ForwardInstructionIterator it(block_it.Current());
74 current_iterator_ = ⁢
75 for (; !it.Done(); it.Advance()) {
76 Instruction* instr = it.Current();
77 if (instr->IsInstanceCall()) {
78 InstanceCallInstr* call = instr->AsInstanceCall();
79 if (call->HasICData()) {
80 if (TryCreateICData(call)) {
81 VisitInstanceCall(call);
82 }
83 }
84 } else if (auto static_call = instr->AsStaticCall()) {
85 // If TFA devirtualized instance calls to static calls we also want to
86 // process them here.
87 VisitStaticCall(static_call);
88 } else if (instr->IsPolymorphicInstanceCall()) {
89 SpecializePolymorphicInstanceCall(instr->AsPolymorphicInstanceCall());
90 }
91 }
92 current_iterator_ = NULL;
93 }
94}
95
96bool CallSpecializer::TryCreateICData(InstanceCallInstr* call) {
97 ASSERT(call->HasICData());
98
99 if (call->Targets().length() > 0) {
100 // This occurs when an instance call has too many checks, will be converted
101 // to megamorphic call.
102 return false;
103 }
104
105 const intptr_t receiver_index = call->FirstArgIndex();
106 GrowableArray<intptr_t> class_ids(call->ic_data()->NumArgsTested());
107 ASSERT(call->ic_data()->NumArgsTested() <=
108 call->ArgumentCountWithoutTypeArgs());
109 for (intptr_t i = 0; i < call->ic_data()->NumArgsTested(); i++) {
110 class_ids.Add(call->ArgumentValueAt(receiver_index + i)->Type()->ToCid());
111 }
112
113 const Token::Kind op_kind = call->token_kind();
114 if (FLAG_guess_icdata_cid) {
115 if (CompilerState::Current().is_aot()) {
116 // In precompiler speculate that both sides of bitwise operation
117 // are Smi-s.
118 if (Token::IsBinaryBitwiseOperator(op_kind) &&
119 call->CanReceiverBeSmiBasedOnInterfaceTarget(zone())) {
120 class_ids[0] = kSmiCid;
121 class_ids[1] = kSmiCid;
122 }
123 }
124 if (Token::IsRelationalOperator(op_kind) ||
125 Token::IsEqualityOperator(op_kind) ||
126 Token::IsBinaryOperator(op_kind)) {
127 // Guess cid: if one of the inputs is a number assume that the other
128 // is a number of same type, unless the interface target tells us this
129 // is impossible.
130 if (call->CanReceiverBeSmiBasedOnInterfaceTarget(zone())) {
131 const intptr_t cid_0 = class_ids[0];
132 const intptr_t cid_1 = class_ids[1];
133 if ((cid_0 == kDynamicCid) && (IsNumberCid(cid_1))) {
134 class_ids[0] = cid_1;
135 } else if (IsNumberCid(cid_0) && (cid_1 == kDynamicCid)) {
136 class_ids[1] = cid_0;
137 }
138 }
139 }
140 }
141
142 bool all_cids_known = true;
143 for (intptr_t i = 0; i < class_ids.length(); i++) {
144 if (class_ids[i] == kDynamicCid) {
145 // Not all cid-s known.
146 all_cids_known = false;
147 break;
148 }
149 }
150
151 if (all_cids_known) {
152 const Class& receiver_class =
153 Class::Handle(Z, isolate()->class_table()->At(class_ids[0]));
154 if (!receiver_class.is_finalized()) {
155 // Do not eagerly finalize classes. ResolveDynamicForReceiverClass can
156 // cause class finalization, since callee's receiver class may not be
157 // finalized yet.
158 return false;
159 }
160 const Function& function = Function::Handle(
161 Z, call->ResolveForReceiverClass(receiver_class, /*allow_add=*/false));
162 if (function.IsNull()) {
163 return false;
164 }
165 ASSERT(!function.IsInvokeFieldDispatcher());
166
167 // Update the CallTargets attached to the instruction with our speculative
168 // target. The next round of CallSpecializer::VisitInstanceCall will make
169 // use of this.
170 call->SetTargets(CallTargets::CreateMonomorphic(Z, class_ids[0], function));
171 if (class_ids.length() == 2) {
172 call->SetBinaryFeedback(
173 BinaryFeedback::CreateMonomorphic(Z, class_ids[0], class_ids[1]));
174 }
175 return true;
176 }
177
178 return false;
179}
180
181void CallSpecializer::SpecializePolymorphicInstanceCall(
182 PolymorphicInstanceCallInstr* call) {
183 if (!FLAG_polymorphic_with_deopt) {
184 // Specialization adds receiver checks which can lead to deoptimization.
185 return;
186 }
187
188 const intptr_t receiver_cid = call->Receiver()->Type()->ToCid();
189 if (receiver_cid == kDynamicCid) {
190 return; // No information about receiver was infered.
191 }
192
193 const ICData& ic_data = *call->ic_data();
194
195 const CallTargets* targets =
196 FlowGraphCompiler::ResolveCallTargetsForReceiverCid(
197 receiver_cid, String::Handle(zone(), ic_data.target_name()),
198 Array::Handle(zone(), ic_data.arguments_descriptor()));
199 if (targets == NULL) {
200 // No specialization.
201 return;
202 }
203
204 ASSERT(targets->HasSingleTarget());
205 const Function& target = targets->FirstTarget();
206 StaticCallInstr* specialized =
207 StaticCallInstr::FromCall(Z, call, target, targets->AggregateCallCount());
208 call->ReplaceWith(specialized, current_iterator());
209}
210
211void CallSpecializer::ReplaceCallWithResult(Definition* call,
212 Instruction* replacement,
213 Definition* result) {
214 ASSERT(!call->HasPushArguments());
215 if (result == nullptr) {
216 ASSERT(replacement->IsDefinition());
217 call->ReplaceWith(replacement->AsDefinition(), current_iterator());
218 } else {
219 call->ReplaceWithResult(replacement, result, current_iterator());
220 }
221}
222
223void CallSpecializer::ReplaceCall(Definition* call, Definition* replacement) {
224 ReplaceCallWithResult(call, replacement, nullptr);
225}
226
227void CallSpecializer::AddCheckSmi(Definition* to_check,
228 intptr_t deopt_id,
229 Environment* deopt_environment,
230 Instruction* insert_before) {
231 // TODO(alexmarkov): check reaching type instead of definition type
232 if (to_check->Type()->ToCid() != kSmiCid) {
233 InsertBefore(insert_before,
234 new (Z) CheckSmiInstr(new (Z) Value(to_check), deopt_id,
235 insert_before->token_pos()),
236 deopt_environment, FlowGraph::kEffect);
237 }
238}
239
240void CallSpecializer::AddCheckClass(Definition* to_check,
241 const Cids& cids,
242 intptr_t deopt_id,
243 Environment* deopt_environment,
244 Instruction* insert_before) {
245 // Type propagation has not run yet, we cannot eliminate the check.
246 Instruction* check = flow_graph_->CreateCheckClass(
247 to_check, cids, deopt_id, insert_before->token_pos());
248 InsertBefore(insert_before, check, deopt_environment, FlowGraph::kEffect);
249}
250
251void CallSpecializer::AddChecksForArgNr(InstanceCallInstr* call,
252 Definition* argument,
253 int argument_number) {
254 const Cids* cids =
255 Cids::CreateForArgument(zone(), call->BinaryFeedback(), argument_number);
256 AddCheckClass(argument, *cids, call->deopt_id(), call->env(), call);
257}
258
259void CallSpecializer::AddCheckNull(Value* to_check,
260 const String& function_name,
261 intptr_t deopt_id,
262 Environment* deopt_environment,
263 Instruction* insert_before) {
264 if (to_check->Type()->is_nullable()) {
265 CheckNullInstr* check_null =
266 new (Z) CheckNullInstr(to_check->CopyWithType(Z), function_name,
267 deopt_id, insert_before->token_pos());
268 if (FLAG_trace_strong_mode_types) {
269 THR_Print("[Strong mode] Inserted %s\n", check_null->ToCString());
270 }
271 InsertBefore(insert_before, check_null, deopt_environment,
272 FlowGraph::kEffect);
273 }
274}
275
276bool CallSpecializer::TryReplaceWithIndexedOp(InstanceCallInstr* call) {
277 if (call->Targets().IsMonomorphic()) {
278 return FlowGraphInliner::TryReplaceInstanceCallWithInline(
279 flow_graph_, current_iterator(), call, speculative_policy_);
280 }
281 return false;
282}
283
284// Return true if d is a string of length one (a constant or result from
285// from string-from-char-code instruction.
286static bool IsLengthOneString(Definition* d) {
287 if (d->IsConstant()) {
288 const Object& obj = d->AsConstant()->value();
289 if (obj.IsString()) {
290 return String::Cast(obj).Length() == 1;
291 } else {
292 return false;
293 }
294 } else {
295 return d->IsOneByteStringFromCharCode();
296 }
297}
298
299// Returns true if the string comparison was converted into char-code
300// comparison. Conversion is only possible for strings of length one.
301// E.g., detect str[x] == "x"; and use an integer comparison of char-codes.
302bool CallSpecializer::TryStringLengthOneEquality(InstanceCallInstr* call,
303 Token::Kind op_kind) {
304 ASSERT(call->BinaryFeedback().OperandsAre(kOneByteStringCid));
305 // Check that left and right are length one strings (either string constants
306 // or results of string-from-char-code.
307 Definition* left = call->ArgumentAt(0);
308 Definition* right = call->ArgumentAt(1);
309 Value* left_val = NULL;
310 Definition* to_remove_left = NULL;
311 if (IsLengthOneString(right)) {
312 // Swap, since we know that both arguments are strings
313 Definition* temp = left;
314 left = right;
315 right = temp;
316 }
317 if (IsLengthOneString(left)) {
318 // Optimize if left is a string with length one (either constant or
319 // result of string-from-char-code.
320 if (left->IsConstant()) {
321 ConstantInstr* left_const = left->AsConstant();
322 const String& str = String::Cast(left_const->value());
323 ASSERT(str.Length() == 1);
324 ConstantInstr* char_code_left = flow_graph()->GetConstant(
325 Smi::ZoneHandle(Z, Smi::New(static_cast<intptr_t>(str.CharAt(0)))));
326 left_val = new (Z) Value(char_code_left);
327 } else if (left->IsOneByteStringFromCharCode()) {
328 // Use input of string-from-charcode as left value.
329 OneByteStringFromCharCodeInstr* instr =
330 left->AsOneByteStringFromCharCode();
331 left_val = new (Z) Value(instr->char_code()->definition());
332 to_remove_left = instr;
333 } else {
334 // IsLengthOneString(left) should have been false.
335 UNREACHABLE();
336 }
337
338 Definition* to_remove_right = NULL;
339 Value* right_val = NULL;
340 if (right->IsOneByteStringFromCharCode()) {
341 // Skip string-from-char-code, and use its input as right value.
342 OneByteStringFromCharCodeInstr* right_instr =
343 right->AsOneByteStringFromCharCode();
344 right_val = new (Z) Value(right_instr->char_code()->definition());
345 to_remove_right = right_instr;
346 } else {
347 AddChecksForArgNr(call, right, /* arg_number = */ 1);
348 // String-to-char-code instructions returns -1 (illegal charcode) if
349 // string is not of length one.
350 StringToCharCodeInstr* char_code_right = new (Z)
351 StringToCharCodeInstr(new (Z) Value(right), kOneByteStringCid);
352 InsertBefore(call, char_code_right, call->env(), FlowGraph::kValue);
353 right_val = new (Z) Value(char_code_right);
354 }
355
356 // Comparing char-codes instead of strings.
357 EqualityCompareInstr* comp =
358 new (Z) EqualityCompareInstr(call->token_pos(), op_kind, left_val,
359 right_val, kSmiCid, call->deopt_id());
360 ReplaceCall(call, comp);
361
362 // Remove dead instructions.
363 if ((to_remove_left != NULL) &&
364 (to_remove_left->input_use_list() == NULL)) {
365 to_remove_left->ReplaceUsesWith(flow_graph()->constant_null());
366 to_remove_left->RemoveFromGraph();
367 }
368 if ((to_remove_right != NULL) &&
369 (to_remove_right->input_use_list() == NULL)) {
370 to_remove_right->ReplaceUsesWith(flow_graph()->constant_null());
371 to_remove_right->RemoveFromGraph();
372 }
373 return true;
374 }
375 return false;
376}
377
378static bool SmiFitsInDouble() {
379 return compiler::target::kSmiBits < 53;
380}
381
382bool CallSpecializer::TryReplaceWithEqualityOp(InstanceCallInstr* call,
383 Token::Kind op_kind) {
384 const BinaryFeedback& binary_feedback = call->BinaryFeedback();
385
386 ASSERT(call->type_args_len() == 0);
387 ASSERT(call->ArgumentCount() == 2);
388 Definition* const left = call->ArgumentAt(0);
389 Definition* const right = call->ArgumentAt(1);
390
391 intptr_t cid = kIllegalCid;
392 if (binary_feedback.OperandsAre(kOneByteStringCid)) {
393 return TryStringLengthOneEquality(call, op_kind);
394 } else if (binary_feedback.OperandsAre(kSmiCid)) {
395 InsertBefore(call,
396 new (Z) CheckSmiInstr(new (Z) Value(left), call->deopt_id(),
397 call->token_pos()),
398 call->env(), FlowGraph::kEffect);
399 InsertBefore(call,
400 new (Z) CheckSmiInstr(new (Z) Value(right), call->deopt_id(),
401 call->token_pos()),
402 call->env(), FlowGraph::kEffect);
403 cid = kSmiCid;
404 } else if (binary_feedback.OperandsAreSmiOrMint() &&
405 FlowGraphCompiler::SupportsUnboxedInt64()) {
406 cid = kMintCid;
407 } else if (binary_feedback.OperandsAreSmiOrDouble() && CanUnboxDouble()) {
408 // Use double comparison.
409 if (SmiFitsInDouble()) {
410 cid = kDoubleCid;
411 } else {
412 if (binary_feedback.IncludesOperands(kSmiCid)) {
413 // We cannot use double comparison on two smis. Need polymorphic
414 // call.
415 return false;
416 } else {
417 InsertBefore(
418 call,
419 new (Z) CheckEitherNonSmiInstr(
420 new (Z) Value(left), new (Z) Value(right), call->deopt_id()),
421 call->env(), FlowGraph::kEffect);
422 cid = kDoubleCid;
423 }
424 }
425 } else {
426 // Check if ICDData contains checks with Smi/Null combinations. In that case
427 // we can still emit the optimized Smi equality operation but need to add
428 // checks for null or Smi.
429 if (binary_feedback.OperandsAreSmiOrNull()) {
430 AddChecksForArgNr(call, left, /* arg_number = */ 0);
431 AddChecksForArgNr(call, right, /* arg_number = */ 1);
432
433 cid = kSmiCid;
434 } else {
435 // Shortcut for equality with null.
436 // TODO(vegorov): this optimization is not speculative and should
437 // be hoisted out of this function.
438 ConstantInstr* right_const = right->AsConstant();
439 ConstantInstr* left_const = left->AsConstant();
440 if ((right_const != NULL && right_const->value().IsNull()) ||
441 (left_const != NULL && left_const->value().IsNull())) {
442 StrictCompareInstr* comp = new (Z)
443 StrictCompareInstr(call->token_pos(), Token::kEQ_STRICT,
444 new (Z) Value(left), new (Z) Value(right),
445 /* number_check = */ false, DeoptId::kNone);
446 ReplaceCall(call, comp);
447 return true;
448 }
449 return false;
450 }
451 }
452 ASSERT(cid != kIllegalCid);
453 EqualityCompareInstr* comp = new (Z)
454 EqualityCompareInstr(call->token_pos(), op_kind, new (Z) Value(left),
455 new (Z) Value(right), cid, call->deopt_id());
456 ReplaceCall(call, comp);
457 return true;
458}
459
460bool CallSpecializer::TryReplaceWithRelationalOp(InstanceCallInstr* call,
461 Token::Kind op_kind) {
462 ASSERT(call->type_args_len() == 0);
463 ASSERT(call->ArgumentCount() == 2);
464
465 const BinaryFeedback& binary_feedback = call->BinaryFeedback();
466 Definition* left = call->ArgumentAt(0);
467 Definition* right = call->ArgumentAt(1);
468
469 intptr_t cid = kIllegalCid;
470 if (binary_feedback.OperandsAre(kSmiCid)) {
471 InsertBefore(call,
472 new (Z) CheckSmiInstr(new (Z) Value(left), call->deopt_id(),
473 call->token_pos()),
474 call->env(), FlowGraph::kEffect);
475 InsertBefore(call,
476 new (Z) CheckSmiInstr(new (Z) Value(right), call->deopt_id(),
477 call->token_pos()),
478 call->env(), FlowGraph::kEffect);
479 cid = kSmiCid;
480 } else if (binary_feedback.OperandsAreSmiOrMint() &&
481 FlowGraphCompiler::SupportsUnboxedInt64()) {
482 cid = kMintCid;
483 } else if (binary_feedback.OperandsAreSmiOrDouble() && CanUnboxDouble()) {
484 // Use double comparison.
485 if (SmiFitsInDouble()) {
486 cid = kDoubleCid;
487 } else {
488 if (binary_feedback.IncludesOperands(kSmiCid)) {
489 // We cannot use double comparison on two smis. Need polymorphic
490 // call.
491 return false;
492 } else {
493 InsertBefore(
494 call,
495 new (Z) CheckEitherNonSmiInstr(
496 new (Z) Value(left), new (Z) Value(right), call->deopt_id()),
497 call->env(), FlowGraph::kEffect);
498 cid = kDoubleCid;
499 }
500 }
501 } else {
502 return false;
503 }
504 ASSERT(cid != kIllegalCid);
505 RelationalOpInstr* comp =
506 new (Z) RelationalOpInstr(call->token_pos(), op_kind, new (Z) Value(left),
507 new (Z) Value(right), cid, call->deopt_id());
508 ReplaceCall(call, comp);
509 return true;
510}
511
512bool CallSpecializer::TryReplaceWithBinaryOp(InstanceCallInstr* call,
513 Token::Kind op_kind) {
514 intptr_t operands_type = kIllegalCid;
515 ASSERT(call->HasICData());
516 const BinaryFeedback& binary_feedback = call->BinaryFeedback();
517 switch (op_kind) {
518 case Token::kADD:
519 case Token::kSUB:
520 case Token::kMUL:
521 if (binary_feedback.OperandsAre(kSmiCid)) {
522 // Don't generate smi code if the IC data is marked because
523 // of an overflow.
524 operands_type =
525 call->ic_data()->HasDeoptReason(ICData::kDeoptBinarySmiOp)
526 ? kMintCid
527 : kSmiCid;
528 } else if (binary_feedback.OperandsAreSmiOrMint() &&
529 FlowGraphCompiler::SupportsUnboxedInt64()) {
530 // Don't generate mint code if the IC data is marked because of an
531 // overflow.
532 if (call->ic_data()->HasDeoptReason(ICData::kDeoptBinaryInt64Op))
533 return false;
534 operands_type = kMintCid;
535 } else if (ShouldSpecializeForDouble(binary_feedback)) {
536 operands_type = kDoubleCid;
537 } else if (binary_feedback.OperandsAre(kFloat32x4Cid)) {
538 operands_type = kFloat32x4Cid;
539 } else if (binary_feedback.OperandsAre(kInt32x4Cid)) {
540 ASSERT(op_kind != Token::kMUL); // Int32x4 doesn't have a multiply op.
541 operands_type = kInt32x4Cid;
542 } else if (binary_feedback.OperandsAre(kFloat64x2Cid)) {
543 operands_type = kFloat64x2Cid;
544 } else {
545 return false;
546 }
547 break;
548 case Token::kDIV:
549 if (!FlowGraphCompiler::SupportsHardwareDivision()) return false;
550 if (ShouldSpecializeForDouble(binary_feedback) ||
551 binary_feedback.OperandsAre(kSmiCid)) {
552 operands_type = kDoubleCid;
553 } else if (binary_feedback.OperandsAre(kFloat32x4Cid)) {
554 operands_type = kFloat32x4Cid;
555 } else if (binary_feedback.OperandsAre(kFloat64x2Cid)) {
556 operands_type = kFloat64x2Cid;
557 } else {
558 return false;
559 }
560 break;
561 case Token::kBIT_AND:
562 case Token::kBIT_OR:
563 case Token::kBIT_XOR:
564 if (binary_feedback.OperandsAre(kSmiCid)) {
565 operands_type = kSmiCid;
566 } else if (binary_feedback.OperandsAreSmiOrMint()) {
567 operands_type = kMintCid;
568 } else if (binary_feedback.OperandsAre(kInt32x4Cid)) {
569 operands_type = kInt32x4Cid;
570 } else {
571 return false;
572 }
573 break;
574 case Token::kSHR:
575 case Token::kSHL:
576 if (binary_feedback.OperandsAre(kSmiCid)) {
577 // Left shift may overflow from smi into mint or big ints.
578 // Don't generate smi code if the IC data is marked because
579 // of an overflow.
580 if (call->ic_data()->HasDeoptReason(ICData::kDeoptBinaryInt64Op)) {
581 return false;
582 }
583 operands_type =
584 call->ic_data()->HasDeoptReason(ICData::kDeoptBinarySmiOp)
585 ? kMintCid
586 : kSmiCid;
587 } else if (binary_feedback.OperandsAreSmiOrMint() &&
588 binary_feedback.ArgumentIs(kSmiCid)) {
589 // Don't generate mint code if the IC data is marked because of an
590 // overflow.
591 if (call->ic_data()->HasDeoptReason(ICData::kDeoptBinaryInt64Op)) {
592 return false;
593 }
594 // Check for smi/mint << smi or smi/mint >> smi.
595 operands_type = kMintCid;
596 } else {
597 return false;
598 }
599 break;
600 case Token::kMOD:
601 case Token::kTRUNCDIV:
602 if (!FlowGraphCompiler::SupportsHardwareDivision()) return false;
603 if (binary_feedback.OperandsAre(kSmiCid)) {
604 if (call->ic_data()->HasDeoptReason(ICData::kDeoptBinarySmiOp)) {
605 return false;
606 }
607 operands_type = kSmiCid;
608 } else {
609 return false;
610 }
611 break;
612 default:
613 UNREACHABLE();
614 }
615
616 ASSERT(call->type_args_len() == 0);
617 ASSERT(call->ArgumentCount() == 2);
618 Definition* left = call->ArgumentAt(0);
619 Definition* right = call->ArgumentAt(1);
620 if (operands_type == kDoubleCid) {
621 if (!CanUnboxDouble()) {
622 return false;
623 }
624 // Check that either left or right are not a smi. Result of a
625 // binary operation with two smis is a smi not a double, except '/' which
626 // returns a double for two smis.
627 if (op_kind != Token::kDIV) {
628 InsertBefore(
629 call,
630 new (Z) CheckEitherNonSmiInstr(
631 new (Z) Value(left), new (Z) Value(right), call->deopt_id()),
632 call->env(), FlowGraph::kEffect);
633 }
634
635 BinaryDoubleOpInstr* double_bin_op = new (Z)
636 BinaryDoubleOpInstr(op_kind, new (Z) Value(left), new (Z) Value(right),
637 call->deopt_id(), call->token_pos());
638 ReplaceCall(call, double_bin_op);
639 } else if (operands_type == kMintCid) {
640 if (!FlowGraphCompiler::SupportsUnboxedInt64()) return false;
641 if ((op_kind == Token::kSHR) || (op_kind == Token::kSHL)) {
642 SpeculativeShiftInt64OpInstr* shift_op = new (Z)
643 SpeculativeShiftInt64OpInstr(op_kind, new (Z) Value(left),
644 new (Z) Value(right), call->deopt_id());
645 ReplaceCall(call, shift_op);
646 } else {
647 BinaryInt64OpInstr* bin_op = new (Z) BinaryInt64OpInstr(
648 op_kind, new (Z) Value(left), new (Z) Value(right), call->deopt_id());
649 ReplaceCall(call, bin_op);
650 }
651 } else if ((operands_type == kFloat32x4Cid) ||
652 (operands_type == kInt32x4Cid) ||
653 (operands_type == kFloat64x2Cid)) {
654 return InlineSimdBinaryOp(call, operands_type, op_kind);
655 } else if (op_kind == Token::kMOD) {
656 ASSERT(operands_type == kSmiCid);
657 if (right->IsConstant()) {
658 const Object& obj = right->AsConstant()->value();
659 if (obj.IsSmi() && Utils::IsPowerOfTwo(Smi::Cast(obj).Value())) {
660 // Insert smi check and attach a copy of the original environment
661 // because the smi operation can still deoptimize.
662 InsertBefore(call,
663 new (Z) CheckSmiInstr(new (Z) Value(left),
664 call->deopt_id(), call->token_pos()),
665 call->env(), FlowGraph::kEffect);
666 ConstantInstr* constant = flow_graph()->GetConstant(
667 Smi::Handle(Z, Smi::New(Smi::Cast(obj).Value() - 1)));
668 BinarySmiOpInstr* bin_op =
669 new (Z) BinarySmiOpInstr(Token::kBIT_AND, new (Z) Value(left),
670 new (Z) Value(constant), call->deopt_id());
671 ReplaceCall(call, bin_op);
672 return true;
673 }
674 }
675 // Insert two smi checks and attach a copy of the original
676 // environment because the smi operation can still deoptimize.
677 AddCheckSmi(left, call->deopt_id(), call->env(), call);
678 AddCheckSmi(right, call->deopt_id(), call->env(), call);
679 BinarySmiOpInstr* bin_op = new (Z) BinarySmiOpInstr(
680 op_kind, new (Z) Value(left), new (Z) Value(right), call->deopt_id());
681 ReplaceCall(call, bin_op);
682 } else {
683 ASSERT(operands_type == kSmiCid);
684 // Insert two smi checks and attach a copy of the original
685 // environment because the smi operation can still deoptimize.
686 AddCheckSmi(left, call->deopt_id(), call->env(), call);
687 AddCheckSmi(right, call->deopt_id(), call->env(), call);
688 if (left->IsConstant() &&
689 ((op_kind == Token::kADD) || (op_kind == Token::kMUL))) {
690 // Constant should be on the right side.
691 Definition* temp = left;
692 left = right;
693 right = temp;
694 }
695 BinarySmiOpInstr* bin_op = new (Z) BinarySmiOpInstr(
696 op_kind, new (Z) Value(left), new (Z) Value(right), call->deopt_id());
697 ReplaceCall(call, bin_op);
698 }
699 return true;
700}
701
702bool CallSpecializer::TryReplaceWithUnaryOp(InstanceCallInstr* call,
703 Token::Kind op_kind) {
704 ASSERT(call->type_args_len() == 0);
705 ASSERT(call->ArgumentCount() == 1);
706 Definition* input = call->ArgumentAt(0);
707 Definition* unary_op = NULL;
708 if (call->Targets().ReceiverIs(kSmiCid)) {
709 InsertBefore(call,
710 new (Z) CheckSmiInstr(new (Z) Value(input), call->deopt_id(),
711 call->token_pos()),
712 call->env(), FlowGraph::kEffect);
713 unary_op = new (Z)
714 UnarySmiOpInstr(op_kind, new (Z) Value(input), call->deopt_id());
715 } else if ((op_kind == Token::kBIT_NOT) &&
716 call->Targets().ReceiverIsSmiOrMint() &&
717 FlowGraphCompiler::SupportsUnboxedInt64()) {
718 unary_op = new (Z)
719 UnaryInt64OpInstr(op_kind, new (Z) Value(input), call->deopt_id());
720 } else if (call->Targets().ReceiverIs(kDoubleCid) &&
721 (op_kind == Token::kNEGATE) && CanUnboxDouble()) {
722 AddReceiverCheck(call);
723 unary_op = new (Z) UnaryDoubleOpInstr(Token::kNEGATE, new (Z) Value(input),
724 call->deopt_id());
725 } else {
726 return false;
727 }
728 ASSERT(unary_op != NULL);
729 ReplaceCall(call, unary_op);
730 return true;
731}
732
733bool CallSpecializer::TryInlineImplicitInstanceGetter(InstanceCallInstr* call) {
734 const CallTargets& targets = call->Targets();
735 ASSERT(targets.HasSingleTarget());
736
737 // Inline implicit instance getter.
738 Field& field = Field::ZoneHandle(Z, targets.FirstTarget().accessor_field());
739 ASSERT(!field.IsNull());
740 if (field.needs_load_guard()) {
741 return false;
742 }
743 if (should_clone_fields_) {
744 field = field.CloneFromOriginal();
745 }
746
747 switch (flow_graph()->CheckForInstanceCall(call,
748 FunctionLayout::kImplicitGetter)) {
749 case FlowGraph::ToCheck::kCheckNull:
750 AddCheckNull(call->Receiver(), call->function_name(), call->deopt_id(),
751 call->env(), call);
752 break;
753 case FlowGraph::ToCheck::kCheckCid:
754 if (CompilerState::Current().is_aot()) {
755 return false; // AOT cannot class check
756 }
757 AddReceiverCheck(call);
758 break;
759 case FlowGraph::ToCheck::kNoCheck:
760 break;
761 }
762 InlineImplicitInstanceGetter(call, field);
763 return true;
764}
765
766void CallSpecializer::InlineImplicitInstanceGetter(Definition* call,
767 const Field& field) {
768 ASSERT(field.is_instance());
769 Definition* receiver = call->ArgumentAt(0);
770
771 const bool calls_initializer = field.NeedsInitializationCheckOnLoad();
772 const Slot& slot = Slot::Get(field, &flow_graph()->parsed_function());
773 LoadFieldInstr* load = new (Z) LoadFieldInstr(
774 new (Z) Value(receiver), slot, call->token_pos(), calls_initializer,
775 calls_initializer ? call->deopt_id() : DeoptId::kNone);
776
777 // Note that this is a case of LoadField -> InstanceCall lazy deopt.
778 // Which means that we don't need to remove arguments from the environment
779 // because normal getter call expects receiver pushed (unlike the case
780 // of LoadField -> LoadField deoptimization handled by
781 // FlowGraph::AttachEnvironment).
782 if (!calls_initializer) {
783 // If we don't call initializer then we don't need an environment.
784 call->RemoveEnvironment();
785 }
786 ReplaceCall(call, load);
787
788 if (load->slot().nullable_cid() != kDynamicCid) {
789 // Reset value types if we know concrete cid.
790 for (Value::Iterator it(load->input_use_list()); !it.Done(); it.Advance()) {
791 it.Current()->SetReachingType(nullptr);
792 }
793 }
794}
795
796bool CallSpecializer::TryInlineInstanceSetter(InstanceCallInstr* instr) {
797 const CallTargets& targets = instr->Targets();
798 if (!targets.HasSingleTarget()) {
799 // Polymorphic sites are inlined like normal method calls by conventional
800 // inlining.
801 return false;
802 }
803 const Function& target = targets.FirstTarget();
804 if (target.kind() != FunctionLayout::kImplicitSetter) {
805 // Non-implicit setter are inlined like normal method calls.
806 return false;
807 }
808 Field& field = Field::ZoneHandle(Z, target.accessor_field());
809 ASSERT(!field.IsNull());
810 if (should_clone_fields_) {
811 field = field.CloneFromOriginal();
812 }
813
814 switch (flow_graph()->CheckForInstanceCall(instr,
815 FunctionLayout::kImplicitSetter)) {
816 case FlowGraph::ToCheck::kCheckNull:
817 AddCheckNull(instr->Receiver(), instr->function_name(), instr->deopt_id(),
818 instr->env(), instr);
819 break;
820 case FlowGraph::ToCheck::kCheckCid:
821 if (CompilerState::Current().is_aot()) {
822 return false; // AOT cannot class check
823 }
824 AddReceiverCheck(instr);
825 break;
826 case FlowGraph::ToCheck::kNoCheck:
827 break;
828 }
829
830 // True if we can use unchecked entry into the setter.
831 bool is_unchecked_call = false;
832 if (!CompilerState::Current().is_aot()) {
833 if (targets.IsMonomorphic() && targets.MonomorphicExactness().IsExact()) {
834 if (targets.MonomorphicExactness().IsTriviallyExact()) {
835 flow_graph()->AddExactnessGuard(instr,
836 targets.MonomorphicReceiverCid());
837 }
838 is_unchecked_call = true;
839 }
840 }
841
842 if (I->use_field_guards()) {
843 if (field.guarded_cid() != kDynamicCid) {
844 InsertBefore(instr,
845 new (Z)
846 GuardFieldClassInstr(new (Z) Value(instr->ArgumentAt(1)),
847 field, instr->deopt_id()),
848 instr->env(), FlowGraph::kEffect);
849 }
850
851 if (field.needs_length_check()) {
852 InsertBefore(
853 instr,
854 new (Z) GuardFieldLengthInstr(new (Z) Value(instr->ArgumentAt(1)),
855 field, instr->deopt_id()),
856 instr->env(), FlowGraph::kEffect);
857 }
858
859 if (field.static_type_exactness_state().NeedsFieldGuard()) {
860 InsertBefore(instr,
861 new (Z)
862 GuardFieldTypeInstr(new (Z) Value(instr->ArgumentAt(1)),
863 field, instr->deopt_id()),
864 instr->env(), FlowGraph::kEffect);
865 }
866 }
867
868 // Build an AssertAssignable if necessary.
869 const AbstractType& dst_type = AbstractType::ZoneHandle(zone(), field.type());
870 if (!dst_type.IsTopTypeForSubtyping()) {
871 // Compute if we need to type check the value. Always type check if
872 // at a dynamic invocation.
873 bool needs_check = true;
874 if (!instr->interface_target().IsNull()) {
875 if (field.is_covariant()) {
876 // Always type check covariant fields.
877 needs_check = true;
878 } else if (field.is_generic_covariant_impl()) {
879 // If field is generic covariant then we don't need to check it
880 // if the invocation was marked as unchecked (e.g. receiver of
881 // the invocation is also the receiver of the surrounding method).
882 // Note: we can't use flow_graph()->IsReceiver() for this optimization
883 // because strong mode only gives static guarantees at the AST level
884 // not at the SSA level.
885 needs_check = !(is_unchecked_call ||
886 (instr->entry_kind() == Code::EntryKind::kUnchecked));
887 } else {
888 // The rest of the stores are checked statically (we are not at
889 // a dynamic invocation).
890 needs_check = false;
891 }
892 }
893
894 if (needs_check) {
895 Definition* instantiator_type_args = flow_graph_->constant_null();
896 Definition* function_type_args = flow_graph_->constant_null();
897 if (!dst_type.IsInstantiated()) {
898 const Class& owner = Class::Handle(Z, field.Owner());
899 if (owner.NumTypeArguments() > 0) {
900 instantiator_type_args = new (Z)
901 LoadFieldInstr(new (Z) Value(instr->ArgumentAt(0)),
902 Slot::GetTypeArgumentsSlotFor(thread(), owner),
903 instr->token_pos());
904 InsertBefore(instr, instantiator_type_args, instr->env(),
905 FlowGraph::kValue);
906 }
907 }
908
909 InsertBefore(
910 instr,
911 new (Z) AssertAssignableInstr(
912 instr->token_pos(), new (Z) Value(instr->ArgumentAt(1)),
913 new (Z) Value(flow_graph_->GetConstant(dst_type)),
914 new (Z) Value(instantiator_type_args),
915 new (Z) Value(function_type_args),
916 String::ZoneHandle(zone(), field.name()), instr->deopt_id()),
917 instr->env(), FlowGraph::kEffect);
918 }
919 }
920
921 // Field guard was detached.
922 ASSERT(instr->FirstArgIndex() == 0);
923 StoreInstanceFieldInstr* store = new (Z) StoreInstanceFieldInstr(
924 field, new (Z) Value(instr->ArgumentAt(0)),
925 new (Z) Value(instr->ArgumentAt(1)), kEmitStoreBarrier,
926 instr->token_pos(), &flow_graph()->parsed_function());
927
928 // Discard the environment from the original instruction because the store
929 // can't deoptimize.
930 instr->RemoveEnvironment();
931 ReplaceCallWithResult(instr, store, flow_graph()->constant_null());
932 return true;
933}
934
935bool CallSpecializer::InlineSimdBinaryOp(InstanceCallInstr* call,
936 intptr_t cid,
937 Token::Kind op_kind) {
938 if (!ShouldInlineSimd()) {
939 return false;
940 }
941 ASSERT(call->type_args_len() == 0);
942 ASSERT(call->ArgumentCount() == 2);
943 Definition* const left = call->ArgumentAt(0);
944 Definition* const right = call->ArgumentAt(1);
945 // Type check left and right.
946 AddChecksForArgNr(call, left, /* arg_number = */ 0);
947 AddChecksForArgNr(call, right, /* arg_number = */ 1);
948 // Replace call.
949 SimdOpInstr* op = SimdOpInstr::Create(
950 SimdOpInstr::KindForOperator(cid, op_kind), new (Z) Value(left),
951 new (Z) Value(right), call->deopt_id());
952 ReplaceCall(call, op);
953
954 return true;
955}
956
957// Only unique implicit instance getters can be currently handled.
958bool CallSpecializer::TryInlineInstanceGetter(InstanceCallInstr* call) {
959 const CallTargets& targets = call->Targets();
960 if (!targets.HasSingleTarget()) {
961 // Polymorphic sites are inlined like normal methods by conventional
962 // inlining in FlowGraphInliner.
963 return false;
964 }
965 const Function& target = targets.FirstTarget();
966 if (target.kind() != FunctionLayout::kImplicitGetter) {
967 // Non-implicit getters are inlined like normal methods by conventional
968 // inlining in FlowGraphInliner.
969 return false;
970 }
971 return TryInlineImplicitInstanceGetter(call);
972}
973
974void CallSpecializer::ReplaceWithMathCFunction(
975 InstanceCallInstr* call,
976 MethodRecognizer::Kind recognized_kind) {
977 ASSERT(call->type_args_len() == 0);
978 AddReceiverCheck(call);
979 ZoneGrowableArray<Value*>* args =
980 new (Z) ZoneGrowableArray<Value*>(call->ArgumentCount());
981 for (intptr_t i = 0; i < call->ArgumentCount(); i++) {
982 args->Add(new (Z) Value(call->ArgumentAt(i)));
983 }
984 InvokeMathCFunctionInstr* invoke = new (Z) InvokeMathCFunctionInstr(
985 args, call->deopt_id(), recognized_kind, call->token_pos());
986 ReplaceCall(call, invoke);
987}
988
989// Inline only simple, frequently called core library methods.
990bool CallSpecializer::TryInlineInstanceMethod(InstanceCallInstr* call) {
991 const CallTargets& targets = call->Targets();
992 if (!targets.IsMonomorphic()) {
993 // No type feedback collected or multiple receivers/targets found.
994 return false;
995 }
996
997 const Function& target = targets.FirstTarget();
998 intptr_t receiver_cid = targets.MonomorphicReceiverCid();
999 MethodRecognizer::Kind recognized_kind = target.recognized_kind();
1000
1001 if (CanUnboxDouble() &&
1002 (recognized_kind == MethodRecognizer::kIntegerToDouble)) {
1003 if (receiver_cid == kSmiCid) {
1004 AddReceiverCheck(call);
1005 ReplaceCall(call,
1006 new (Z) SmiToDoubleInstr(new (Z) Value(call->ArgumentAt(0)),
1007 call->token_pos()));
1008 return true;
1009 } else if ((receiver_cid == kMintCid) && CanConvertInt64ToDouble()) {
1010 AddReceiverCheck(call);
1011 ReplaceCall(call,
1012 new (Z) Int64ToDoubleInstr(new (Z) Value(call->ArgumentAt(0)),
1013 call->deopt_id()));
1014 return true;
1015 }
1016 }
1017
1018 if (receiver_cid == kDoubleCid) {
1019 if (!CanUnboxDouble()) {
1020 return false;
1021 }
1022 switch (recognized_kind) {
1023 case MethodRecognizer::kDoubleToInteger: {
1024 AddReceiverCheck(call);
1025 ASSERT(call->HasICData());
1026 const ICData& ic_data = *call->ic_data();
1027 Definition* input = call->ArgumentAt(0);
1028 Definition* d2i_instr = NULL;
1029 if (ic_data.HasDeoptReason(ICData::kDeoptDoubleToSmi)) {
1030 // Do not repeatedly deoptimize because result didn't fit into Smi.
1031 d2i_instr = new (Z) DoubleToIntegerInstr(new (Z) Value(input), call);
1032 } else {
1033 // Optimistically assume result fits into Smi.
1034 d2i_instr =
1035 new (Z) DoubleToSmiInstr(new (Z) Value(input), call->deopt_id());
1036 }
1037 ReplaceCall(call, d2i_instr);
1038 return true;
1039 }
1040 case MethodRecognizer::kDoubleMod:
1041 case MethodRecognizer::kDoubleRound:
1042 ReplaceWithMathCFunction(call, recognized_kind);
1043 return true;
1044 case MethodRecognizer::kDoubleTruncate:
1045 case MethodRecognizer::kDoubleFloor:
1046 case MethodRecognizer::kDoubleCeil:
1047 if (!TargetCPUFeatures::double_truncate_round_supported()) {
1048 ReplaceWithMathCFunction(call, recognized_kind);
1049 } else {
1050 AddReceiverCheck(call);
1051 DoubleToDoubleInstr* d2d_instr =
1052 new (Z) DoubleToDoubleInstr(new (Z) Value(call->ArgumentAt(0)),
1053 recognized_kind, call->deopt_id());
1054 ReplaceCall(call, d2d_instr);
1055 }
1056 return true;
1057 default:
1058 break;
1059 }
1060 }
1061
1062 return FlowGraphInliner::TryReplaceInstanceCallWithInline(
1063 flow_graph_, current_iterator(), call, speculative_policy_);
1064}
1065
1066// If type tests specified by 'ic_data' do not depend on type arguments,
1067// return mapping cid->result in 'results' (i : cid; i + 1: result).
1068// If all tests yield the same result, return it otherwise return Bool::null.
1069// If no mapping is possible, 'results' has less than
1070// (ic_data.NumberOfChecks() * 2) entries
1071// An instance-of test returning all same results can be converted to a class
1072// check.
1073BoolPtr CallSpecializer::InstanceOfAsBool(
1074 const ICData& ic_data,
1075 const AbstractType& type,
1076 ZoneGrowableArray<intptr_t>* results) const {
1077 ASSERT(results->is_empty());
1078 ASSERT(ic_data.NumArgsTested() == 1); // Unary checks only.
1079 if (type.IsFunctionType() || type.IsDartFunctionType() ||
1080 !type.IsInstantiated()) {
1081 return Bool::null();
1082 }
1083 const Class& type_class = Class::Handle(Z, type.type_class());
1084 const intptr_t num_type_args = type_class.NumTypeArguments();
1085 if (num_type_args > 0) {
1086 // Only raw types can be directly compared, thus disregarding type
1087 // arguments.
1088 const intptr_t num_type_params = type_class.NumTypeParameters();
1089 const intptr_t from_index = num_type_args - num_type_params;
1090 const TypeArguments& type_arguments =
1091 TypeArguments::Handle(Z, type.arguments());
1092 const bool is_raw_type = type_arguments.IsNull() ||
1093 type_arguments.IsRaw(from_index, num_type_params);
1094 if (!is_raw_type) {
1095 // Unknown result.
1096 return Bool::null();
1097 }
1098 }
1099
1100 const ClassTable& class_table = *isolate()->class_table();
1101 Bool& prev = Bool::Handle(Z);
1102 Class& cls = Class::Handle(Z);
1103
1104 bool results_differ = false;
1105 const intptr_t number_of_checks = ic_data.NumberOfChecks();
1106 for (int i = 0; i < number_of_checks; i++) {
1107 cls = class_table.At(ic_data.GetReceiverClassIdAt(i));
1108 if (cls.NumTypeArguments() > 0) {
1109 return Bool::null();
1110 }
1111 bool is_subtype = false;
1112 if (cls.IsNullClass()) {
1113 // 'null' is an instance of Null, Object*, Never*, void, and dynamic.
1114 // In addition, 'null' is an instance of any nullable type.
1115 // It is also an instance of FutureOr<T> if it is an instance of T.
1116 const AbstractType& unwrapped_type =
1117 AbstractType::Handle(type.UnwrapFutureOr());
1118 ASSERT(unwrapped_type.IsInstantiated());
1119 is_subtype = unwrapped_type.IsTopTypeForInstanceOf() ||
1120 unwrapped_type.IsNullable() ||
1121 (unwrapped_type.IsLegacy() && unwrapped_type.IsNeverType());
1122 } else {
1123 is_subtype =
1124 Class::IsSubtypeOf(cls, Object::null_type_arguments(),
1125 Nullability::kNonNullable, type, Heap::kOld);
1126 }
1127 results->Add(cls.id());
1128 results->Add(static_cast<intptr_t>(is_subtype));
1129 if (prev.IsNull()) {
1130 prev = Bool::Get(is_subtype).raw();
1131 } else {
1132 if (is_subtype != prev.value()) {
1133 results_differ = true;
1134 }
1135 }
1136 }
1137 return results_differ ? Bool::null() : prev.raw();
1138}
1139
1140// Returns true if checking against this type is a direct class id comparison.
1141bool CallSpecializer::TypeCheckAsClassEquality(const AbstractType& type) {
1142 ASSERT(type.IsFinalized());
1143 // Requires CHA.
1144 if (!type.IsInstantiated()) return false;
1145 // Function types have different type checking rules.
1146 if (type.IsFunctionType()) return false;
1147 const Class& type_class = Class::Handle(type.type_class());
1148 // Could be an interface check?
1149 if (CHA::IsImplemented(type_class)) return false;
1150 // Check if there are subclasses.
1151 if (CHA::HasSubclasses(type_class)) {
1152 return false;
1153 }
1154
1155 // Private classes cannot be subclassed by later loaded libs.
1156 if (!type_class.IsPrivate()) {
1157 // In AOT mode we can't use CHA deoptimizations.
1158 ASSERT(!CompilerState::Current().is_aot() || !FLAG_use_cha_deopt);
1159 if (FLAG_use_cha_deopt || isolate()->all_classes_finalized()) {
1160 if (FLAG_trace_cha) {
1161 THR_Print(
1162 " **(CHA) Typecheck as class equality since no "
1163 "subclasses: %s\n",
1164 type_class.ToCString());
1165 }
1166 if (FLAG_use_cha_deopt) {
1167 thread()->compiler_state().cha().AddToGuardedClasses(
1168 type_class, /*subclass_count=*/0);
1169 }
1170 } else {
1171 return false;
1172 }
1173 }
1174 const intptr_t num_type_args = type_class.NumTypeArguments();
1175 if (num_type_args > 0) {
1176 // Only raw types can be directly compared, thus disregarding type
1177 // arguments.
1178 const intptr_t num_type_params = type_class.NumTypeParameters();
1179 const intptr_t from_index = num_type_args - num_type_params;
1180 const TypeArguments& type_arguments =
1181 TypeArguments::Handle(type.arguments());
1182 const bool is_raw_type = type_arguments.IsNull() ||
1183 type_arguments.IsRaw(from_index, num_type_params);
1184 if (!is_raw_type) {
1185 return false;
1186 }
1187 }
1188 if (type.IsNullable() || type.IsTopTypeForInstanceOf() ||
1189 type.IsNeverType()) {
1190 // A class id check is not sufficient, since a null instance also satisfies
1191 // the test against a nullable type.
1192 // TODO(regis): Add a null check in addition to the class id check?
1193 return false;
1194 }
1195 return true;
1196}
1197
1198bool CallSpecializer::TryReplaceInstanceOfWithRangeCheck(
1199 InstanceCallInstr* call,
1200 const AbstractType& type) {
1201 // TODO(dartbug.com/30632) does this optimization make sense in JIT?
1202 return false;
1203}
1204
1205bool CallSpecializer::TryOptimizeInstanceOfUsingStaticTypes(
1206 InstanceCallInstr* call,
1207 const AbstractType& type) {
1208 ASSERT(Token::IsTypeTestOperator(call->token_kind()));
1209 if (!type.IsInstantiated()) {
1210 return false;
1211 }
1212
1213 Value* left_value = call->Receiver();
1214 if (left_value->Type()->IsInstanceOf(type)) {
1215 ConstantInstr* replacement = flow_graph()->GetConstant(Bool::True());
1216 call->ReplaceUsesWith(replacement);
1217 ASSERT(current_iterator()->Current() == call);
1218 current_iterator()->RemoveCurrentFromGraph();
1219 return true;
1220 }
1221
1222 // The goal is to emit code that will determine the result of 'x is type'
1223 // depending solely on the fact that x == null or not.
1224 // Checking whether the receiver is null can only help if the tested type is
1225 // non-nullable or legacy (including Never*) or the Null type.
1226 // Also, testing receiver for null cannot help with FutureOr.
1227 if ((type.IsNullable() && !type.IsNullType()) || type.IsFutureOrType()) {
1228 return false;
1229 }
1230
1231 // If type is Null or Never*, or the static type of the receiver is a
1232 // subtype of the tested type, replace 'receiver is type' with
1233 // - 'receiver == null' if type is Null or Never*,
1234 // - 'receiver != null' otherwise.
1235 if (type.IsNullType() || (type.IsNeverType() && type.IsLegacy()) ||
1236 left_value->Type()->IsSubtypeOf(type)) {
1237 Definition* replacement = new (Z) StrictCompareInstr(
1238 call->token_pos(),
1239 (type.IsNullType() || (type.IsNeverType() && type.IsLegacy()))
1240 ? Token::kEQ_STRICT
1241 : Token::kNE_STRICT,
1242 left_value->CopyWithType(Z),
1243 new (Z) Value(flow_graph()->constant_null()),
1244 /* number_check = */ false, DeoptId::kNone);
1245 if (FLAG_trace_strong_mode_types) {
1246 THR_Print("[Strong mode] replacing %s with %s (%s < %s)\n",
1247 call->ToCString(), replacement->ToCString(),
1248 left_value->Type()->ToAbstractType()->ToCString(),
1249 type.ToCString());
1250 }
1251 ReplaceCall(call, replacement);
1252 return true;
1253 }
1254
1255 return false;
1256}
1257
1258void CallSpecializer::ReplaceWithInstanceOf(InstanceCallInstr* call) {
1259 ASSERT(Token::IsTypeTestOperator(call->token_kind()));
1260 Definition* left = call->ArgumentAt(0);
1261 Definition* instantiator_type_args = NULL;
1262 Definition* function_type_args = NULL;
1263 AbstractType& type = AbstractType::ZoneHandle(Z);
1264 ASSERT(call->type_args_len() == 0);
1265 if (call->ArgumentCount() == 2) {
1266 instantiator_type_args = flow_graph()->constant_null();
1267 function_type_args = flow_graph()->constant_null();
1268 ASSERT(call->MatchesCoreName(Symbols::_simpleInstanceOf()));
1269 type = AbstractType::Cast(call->ArgumentAt(1)->AsConstant()->value()).raw();
1270 } else {
1271 ASSERT(call->ArgumentCount() == 4);
1272 instantiator_type_args = call->ArgumentAt(1);
1273 function_type_args = call->ArgumentAt(2);
1274 type = AbstractType::Cast(call->ArgumentAt(3)->AsConstant()->value()).raw();
1275 }
1276
1277 if (TryOptimizeInstanceOfUsingStaticTypes(call, type)) {
1278 return;
1279 }
1280
1281 if (TypeCheckAsClassEquality(type)) {
1282 LoadClassIdInstr* left_cid = new (Z) LoadClassIdInstr(new (Z) Value(left));
1283 InsertBefore(call, left_cid, NULL, FlowGraph::kValue);
1284 const intptr_t type_cid = Class::Handle(Z, type.type_class()).id();
1285 ConstantInstr* cid =
1286 flow_graph()->GetConstant(Smi::Handle(Z, Smi::New(type_cid)));
1287
1288 StrictCompareInstr* check_cid = new (Z) StrictCompareInstr(
1289 call->token_pos(), Token::kEQ_STRICT, new (Z) Value(left_cid),
1290 new (Z) Value(cid), /* number_check = */ false, DeoptId::kNone);
1291 ReplaceCall(call, check_cid);
1292 return;
1293 }
1294
1295 if (TryReplaceInstanceOfWithRangeCheck(call, type)) {
1296 return;
1297 }
1298
1299 const ICData& unary_checks =
1300 ICData::ZoneHandle(Z, call->ic_data()->AsUnaryClassChecks());
1301 const intptr_t number_of_checks = unary_checks.NumberOfChecks();
1302 if (number_of_checks > 0 && number_of_checks <= FLAG_max_polymorphic_checks) {
1303 ZoneGrowableArray<intptr_t>* results =
1304 new (Z) ZoneGrowableArray<intptr_t>(number_of_checks * 2);
1305 const Bool& as_bool =
1306 Bool::ZoneHandle(Z, InstanceOfAsBool(unary_checks, type, results));
1307 if (as_bool.IsNull() || CompilerState::Current().is_aot()) {
1308 if (results->length() == number_of_checks * 2) {
1309 const bool can_deopt = SpecializeTestCidsForNumericTypes(results, type);
1310 if (can_deopt &&
1311 !speculative_policy_->IsAllowedForInlining(call->deopt_id())) {
1312 // Guard against repeated speculative inlining.
1313 return;
1314 }
1315 TestCidsInstr* test_cids = new (Z) TestCidsInstr(
1316 call->token_pos(), Token::kIS, new (Z) Value(left), *results,
1317 can_deopt ? call->deopt_id() : DeoptId::kNone);
1318 // Remove type.
1319 ReplaceCall(call, test_cids);
1320 return;
1321 }
1322 } else {
1323 // One result only.
1324 AddReceiverCheck(call);
1325 ConstantInstr* bool_const = flow_graph()->GetConstant(as_bool);
1326 ASSERT(!call->HasPushArguments());
1327 call->ReplaceUsesWith(bool_const);
1328 ASSERT(current_iterator()->Current() == call);
1329 current_iterator()->RemoveCurrentFromGraph();
1330 return;
1331 }
1332 }
1333
1334 InstanceOfInstr* instance_of = new (Z) InstanceOfInstr(
1335 call->token_pos(), new (Z) Value(left),
1336 new (Z) Value(instantiator_type_args), new (Z) Value(function_type_args),
1337 type, call->deopt_id());
1338 ReplaceCall(call, instance_of);
1339}
1340
1341void CallSpecializer::VisitStaticCall(StaticCallInstr* call) {
1342 if (FlowGraphInliner::TryReplaceStaticCallWithInline(
1343 flow_graph_, current_iterator(), call, speculative_policy_)) {
1344 return;
1345 }
1346
1347 if (speculative_policy_->IsAllowedForInlining(call->deopt_id())) {
1348 // Only if speculative inlining is enabled.
1349
1350 MethodRecognizer::Kind recognized_kind = call->function().recognized_kind();
1351 const CallTargets& targets = call->Targets();
1352 const BinaryFeedback& binary_feedback = call->BinaryFeedback();
1353
1354 switch (recognized_kind) {
1355 case MethodRecognizer::kMathMin:
1356 case MethodRecognizer::kMathMax: {
1357 // We can handle only monomorphic min/max call sites with both arguments
1358 // being either doubles or smis.
1359 if (CanUnboxDouble() && targets.IsMonomorphic() &&
1360 (call->FirstArgIndex() == 0)) {
1361 intptr_t result_cid = kIllegalCid;
1362 if (binary_feedback.IncludesOperands(kDoubleCid)) {
1363 result_cid = kDoubleCid;
1364 } else if (binary_feedback.IncludesOperands(kSmiCid)) {
1365 result_cid = kSmiCid;
1366 }
1367 if (result_cid != kIllegalCid) {
1368 MathMinMaxInstr* min_max = new (Z) MathMinMaxInstr(
1369 recognized_kind, new (Z) Value(call->ArgumentAt(0)),
1370 new (Z) Value(call->ArgumentAt(1)), call->deopt_id(),
1371 result_cid);
1372 const Cids* cids = Cids::CreateMonomorphic(Z, result_cid);
1373 AddCheckClass(min_max->left()->definition(), *cids,
1374 call->deopt_id(), call->env(), call);
1375 AddCheckClass(min_max->right()->definition(), *cids,
1376 call->deopt_id(), call->env(), call);
1377 ReplaceCall(call, min_max);
1378 return;
1379 }
1380 }
1381 break;
1382 }
1383 case MethodRecognizer::kDoubleFromInteger: {
1384 if (call->HasICData() && targets.IsMonomorphic() &&
1385 (call->FirstArgIndex() == 0)) {
1386 if (CanUnboxDouble()) {
1387 if (binary_feedback.ArgumentIs(kSmiCid)) {
1388 Definition* arg = call->ArgumentAt(1);
1389 AddCheckSmi(arg, call->deopt_id(), call->env(), call);
1390 ReplaceCall(call, new (Z) SmiToDoubleInstr(new (Z) Value(arg),
1391 call->token_pos()));
1392 return;
1393 } else if (binary_feedback.ArgumentIs(kMintCid) &&
1394 CanConvertInt64ToDouble()) {
1395 Definition* arg = call->ArgumentAt(1);
1396 ReplaceCall(call, new (Z) Int64ToDoubleInstr(new (Z) Value(arg),
1397 call->deopt_id()));
1398 return;
1399 }
1400 }
1401 }
1402 break;
1403 }
1404
1405 default:
1406 break;
1407 }
1408 }
1409
1410 if (TryOptimizeStaticCallUsingStaticTypes(call)) {
1411 return;
1412 }
1413}
1414
1415void CallSpecializer::VisitLoadCodeUnits(LoadCodeUnitsInstr* instr) {
1416// TODO(zerny): Use kUnboxedUint32 once it is fully supported/optimized.
1417#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM)
1418 if (!instr->can_pack_into_smi()) instr->set_representation(kUnboxedInt64);
1419#endif
1420}
1421
1422static bool CidTestResultsContains(const ZoneGrowableArray<intptr_t>& results,
1423 intptr_t test_cid) {
1424 for (intptr_t i = 0; i < results.length(); i += 2) {
1425 if (results[i] == test_cid) return true;
1426 }
1427 return false;
1428}
1429
1430static void TryAddTest(ZoneGrowableArray<intptr_t>* results,
1431 intptr_t test_cid,
1432 bool result) {
1433 if (!CidTestResultsContains(*results, test_cid)) {
1434 results->Add(test_cid);
1435 results->Add(static_cast<intptr_t>(result));
1436 }
1437}
1438
1439// Used when we only need the positive result because we return false by
1440// default.
1441static void PurgeNegativeTestCidsEntries(ZoneGrowableArray<intptr_t>* results) {
1442 // We can't purge the Smi entry at the beginning since it is used in the
1443 // Smi check before the Cid is loaded.
1444 int dest = 2;
1445 for (intptr_t i = 2; i < results->length(); i += 2) {
1446 if (results->At(i + 1) != 0) {
1447 (*results)[dest++] = results->At(i);
1448 (*results)[dest++] = results->At(i + 1);
1449 }
1450 }
1451 results->SetLength(dest);
1452}
1453
1454bool CallSpecializer::SpecializeTestCidsForNumericTypes(
1455 ZoneGrowableArray<intptr_t>* results,
1456 const AbstractType& type) {
1457 ASSERT(results->length() >= 2); // At least on entry.
1458 const ClassTable& class_table = *Isolate::Current()->class_table();
1459 if ((*results)[0] != kSmiCid) {
1460 const Class& smi_class = Class::Handle(class_table.At(kSmiCid));
1461 const bool smi_is_subtype =
1462 Class::IsSubtypeOf(smi_class, Object::null_type_arguments(),
1463 Nullability::kNonNullable, type, Heap::kOld);
1464 results->Add((*results)[results->length() - 2]);
1465 results->Add((*results)[results->length() - 2]);
1466 for (intptr_t i = results->length() - 3; i > 1; --i) {
1467 (*results)[i] = (*results)[i - 2];
1468 }
1469 (*results)[0] = kSmiCid;
1470 (*results)[1] = static_cast<intptr_t>(smi_is_subtype);
1471 }
1472
1473 ASSERT(type.IsInstantiated());
1474 ASSERT(results->length() >= 2);
1475 if (type.IsSmiType()) {
1476 ASSERT((*results)[0] == kSmiCid);
1477 PurgeNegativeTestCidsEntries(results);
1478 return false;
1479 } else if (type.IsIntType()) {
1480 ASSERT((*results)[0] == kSmiCid);
1481 TryAddTest(results, kMintCid, true);
1482 // Cannot deoptimize since all tests returning true have been added.
1483 PurgeNegativeTestCidsEntries(results);
1484 return false;
1485 } else if (type.IsNumberType()) {
1486 ASSERT((*results)[0] == kSmiCid);
1487 TryAddTest(results, kMintCid, true);
1488 TryAddTest(results, kDoubleCid, true);
1489 PurgeNegativeTestCidsEntries(results);
1490 return false;
1491 } else if (type.IsDoubleType()) {
1492 ASSERT((*results)[0] == kSmiCid);
1493 TryAddTest(results, kDoubleCid, true);
1494 PurgeNegativeTestCidsEntries(results);
1495 return false;
1496 }
1497 return true; // May deoptimize since we have not identified all 'true' tests.
1498}
1499
1500void TypedDataSpecializer::Optimize(FlowGraph* flow_graph) {
1501 TypedDataSpecializer optimizer(flow_graph);
1502 optimizer.VisitBlocks();
1503}
1504
1505void TypedDataSpecializer::EnsureIsInitialized() {
1506 if (initialized_) return;
1507
1508 initialized_ = true;
1509
1510 int_type_ = Type::IntType();
1511 double_type_ = Type::Double();
1512
1513 const auto& typed_data = Library::Handle(
1514 Z, Library::LookupLibrary(thread_, Symbols::DartTypedData()));
1515
1516 auto& td_class = Class::Handle(Z);
1517 auto& direct_implementors = GrowableObjectArray::Handle(Z);
1518
1519#define INIT_HANDLE(iface, member_name, type, cid) \
1520 td_class = typed_data.LookupClass(Symbols::iface()); \
1521 ASSERT(!td_class.IsNull()); \
1522 direct_implementors = td_class.direct_implementors(); \
1523 if (!HasThirdPartyImplementor(direct_implementors)) { \
1524 member_name = td_class.RareType(); \
1525 }
1526
1527 PUBLIC_TYPED_DATA_CLASS_LIST(INIT_HANDLE)
1528#undef INIT_HANDLE
1529}
1530
1531bool TypedDataSpecializer::HasThirdPartyImplementor(
1532 const GrowableObjectArray& direct_implementors) {
1533 // Check if there are non internal/external/view implementors.
1534 for (intptr_t i = 0; i < direct_implementors.Length(); ++i) {
1535 implementor_ ^= direct_implementors.At(i);
1536
1537 // We only consider [implementor_] a 3rd party implementor if it was
1538 // finalized by the class finalizer, since only then can we have concrete
1539 // instances of the [implementor_].
1540 if (implementor_.is_finalized()) {
1541 const classid_t cid = implementor_.id();
1542 if (!IsTypedDataClassId(cid) && !IsTypedDataViewClassId(cid) &&
1543 !IsExternalTypedDataClassId(cid)) {
1544 return true;
1545 }
1546 }
1547 }
1548 return false;
1549}
1550
1551void TypedDataSpecializer::VisitInstanceCall(InstanceCallInstr* call) {
1552 TryInlineCall(call);
1553}
1554
1555void TypedDataSpecializer::VisitStaticCall(StaticCallInstr* call) {
1556 const Function& function = call->function();
1557 if (!function.is_static()) {
1558 ASSERT(call->ArgumentCount() > 0);
1559 TryInlineCall(call);
1560 }
1561}
1562
1563void TypedDataSpecializer::TryInlineCall(TemplateDartCall<0>* call) {
1564 const bool is_length_getter = call->Selector() == Symbols::GetLength().raw();
1565 const bool is_index_get = call->Selector() == Symbols::IndexToken().raw();
1566 const bool is_index_set =
1567 call->Selector() == Symbols::AssignIndexToken().raw();
1568
1569 if (is_length_getter || is_index_get || is_index_set) {
1570 EnsureIsInitialized();
1571
1572 const intptr_t receiver_index = call->FirstArgIndex();
1573
1574 CompileType* receiver_type = call->ArgumentAt(receiver_index + 0)->Type();
1575
1576 CompileType* index_type = nullptr;
1577 if (is_index_get || is_index_set) {
1578 index_type = call->ArgumentAt(receiver_index + 1)->Type();
1579 }
1580
1581 CompileType* value_type = nullptr;
1582 if (is_index_set) {
1583 value_type = call->ArgumentAt(receiver_index + 2)->Type();
1584 }
1585
1586 auto& type_class = Class::Handle(zone_);
1587#define TRY_INLINE(iface, member_name, type, cid) \
1588 if (!member_name.IsNull()) { \
1589 const bool is_float_access = \
1590 cid == kTypedDataFloat32ArrayCid || cid == kTypedDataFloat64ArrayCid; \
1591 if (receiver_type->IsAssignableTo(member_name)) { \
1592 if (is_length_getter) { \
1593 type_class = member_name.type_class(); \
1594 ReplaceWithLengthGetter(call); \
1595 } else if (is_index_get) { \
1596 if (is_float_access && !FlowGraphCompiler::SupportsUnboxedDoubles()) { \
1597 return; \
1598 } \
1599 if (!index_type->IsNullableInt()) return; \
1600 type_class = member_name.type_class(); \
1601 ReplaceWithIndexGet(call, cid); \
1602 } else { \
1603 if (is_float_access && !FlowGraphCompiler::SupportsUnboxedDoubles()) { \
1604 return; \
1605 } \
1606 if (!index_type->IsNullableInt()) return; \
1607 if (!value_type->IsAssignableTo(type)) return; \
1608 type_class = member_name.type_class(); \
1609 ReplaceWithIndexSet(call, cid); \
1610 } \
1611 return; \
1612 } \
1613 }
1614 PUBLIC_TYPED_DATA_CLASS_LIST(TRY_INLINE)
1615#undef INIT_HANDLE
1616 }
1617}
1618
1619void TypedDataSpecializer::ReplaceWithLengthGetter(TemplateDartCall<0>* call) {
1620 const intptr_t receiver_idx = call->FirstArgIndex();
1621 auto array = call->ArgumentAt(receiver_idx + 0);
1622
1623 if (array->Type()->is_nullable()) {
1624 AppendNullCheck(call, &array);
1625 }
1626 Definition* length = AppendLoadLength(call, array);
1627 flow_graph_->ReplaceCurrentInstruction(current_iterator(), call, length);
1628 RefineUseTypes(length);
1629}
1630
1631void TypedDataSpecializer::ReplaceWithIndexGet(TemplateDartCall<0>* call,
1632 classid_t cid) {
1633 const intptr_t receiver_idx = call->FirstArgIndex();
1634 auto array = call->ArgumentAt(receiver_idx + 0);
1635 auto index = call->ArgumentAt(receiver_idx + 1);
1636
1637 if (array->Type()->is_nullable()) {
1638 AppendNullCheck(call, &array);
1639 }
1640 if (index->Type()->is_nullable()) {
1641 AppendNullCheck(call, &index);
1642 }
1643 AppendBoundsCheck(call, array, &index);
1644 Definition* value = AppendLoadIndexed(call, array, index, cid);
1645 flow_graph_->ReplaceCurrentInstruction(current_iterator(), call, value);
1646 RefineUseTypes(value);
1647}
1648
1649void TypedDataSpecializer::ReplaceWithIndexSet(TemplateDartCall<0>* call,
1650 classid_t cid) {
1651 const intptr_t receiver_idx = call->FirstArgIndex();
1652 auto array = call->ArgumentAt(receiver_idx + 0);
1653 auto index = call->ArgumentAt(receiver_idx + 1);
1654 auto value = call->ArgumentAt(receiver_idx + 2);
1655
1656 if (array->Type()->is_nullable()) {
1657 AppendNullCheck(call, &array);
1658 }
1659 if (index->Type()->is_nullable()) {
1660 AppendNullCheck(call, &index);
1661 }
1662 if (value->Type()->is_nullable()) {
1663 AppendNullCheck(call, &value);
1664 }
1665 AppendBoundsCheck(call, array, &index);
1666 AppendStoreIndexed(call, array, index, value, cid);
1667
1668 RELEASE_ASSERT(!call->HasUses());
1669 flow_graph_->ReplaceCurrentInstruction(current_iterator(), call, nullptr);
1670}
1671
1672void TypedDataSpecializer::AppendNullCheck(TemplateDartCall<0>* call,
1673 Definition** value) {
1674 auto check =
1675 new (Z) CheckNullInstr(new (Z) Value(*value), Symbols::OptimizedOut(),
1676 call->deopt_id(), call->token_pos());
1677 flow_graph_->InsertBefore(call, check, call->env(), FlowGraph::kValue);
1678
1679 // Use data dependency as control dependency.
1680 *value = check;
1681}
1682
1683void TypedDataSpecializer::AppendBoundsCheck(TemplateDartCall<0>* call,
1684 Definition* array,
1685 Definition** index) {
1686 auto length = new (Z) LoadFieldInstr(
1687 new (Z) Value(array), Slot::TypedDataBase_length(), call->token_pos());
1688 flow_graph_->InsertBefore(call, length, call->env(), FlowGraph::kValue);
1689
1690 auto check = new (Z) GenericCheckBoundInstr(
1691 new (Z) Value(length), new (Z) Value(*index), DeoptId::kNone);
1692 flow_graph_->InsertBefore(call, check, call->env(), FlowGraph::kValue);
1693
1694 // Use data dependency as control dependency.
1695 *index = check;
1696}
1697
1698Definition* TypedDataSpecializer::AppendLoadLength(TemplateDartCall<0>* call,
1699 Definition* array) {
1700 auto length = new (Z) LoadFieldInstr(
1701 new (Z) Value(array), Slot::TypedDataBase_length(), call->token_pos());
1702 flow_graph_->InsertBefore(call, length, call->env(), FlowGraph::kValue);
1703 return length;
1704}
1705
1706Definition* TypedDataSpecializer::AppendLoadIndexed(TemplateDartCall<0>* call,
1707 Definition* array,
1708 Definition* index,
1709 classid_t cid) {
1710 const intptr_t element_size = TypedDataBase::ElementSizeFor(cid);
1711 const intptr_t index_scale = element_size;
1712
1713 auto data = new (Z)
1714 LoadUntaggedInstr(new (Z) Value(array),
1715 compiler::target::TypedDataBase::data_field_offset());
1716 flow_graph_->InsertBefore(call, data, call->env(), FlowGraph::kValue);
1717
1718 Definition* load = new (Z) LoadIndexedInstr(
1719 new (Z) Value(data), new (Z) Value(index), /*index_unboxed=*/false,
1720 index_scale, cid, kAlignedAccess, DeoptId::kNone, call->token_pos());
1721 flow_graph_->InsertBefore(call, load, call->env(), FlowGraph::kValue);
1722
1723 if (cid == kTypedDataFloat32ArrayCid) {
1724 load = new (Z) FloatToDoubleInstr(new (Z) Value(load), call->deopt_id());
1725 flow_graph_->InsertBefore(call, load, call->env(), FlowGraph::kValue);
1726 }
1727
1728 return load;
1729}
1730
1731void TypedDataSpecializer::AppendStoreIndexed(TemplateDartCall<0>* call,
1732 Definition* array,
1733 Definition* index,
1734 Definition* value,
1735 classid_t cid) {
1736 const intptr_t element_size = TypedDataBase::ElementSizeFor(cid);
1737 const intptr_t index_scale = element_size;
1738
1739 const auto deopt_id = call->deopt_id();
1740
1741 switch (cid) {
1742 case kTypedDataInt8ArrayCid:
1743 case kTypedDataUint8ArrayCid:
1744 case kTypedDataUint8ClampedArrayCid:
1745 case kTypedDataInt16ArrayCid:
1746 case kTypedDataUint16ArrayCid:
1747 case kExternalTypedDataUint8ArrayCid:
1748 case kExternalTypedDataUint8ClampedArrayCid: {
1749 // Insert explicit unboxing instructions with truncation to avoid relying
1750 // on [SelectRepresentations] which doesn't mark them as truncating.
1751 value = UnboxInstr::Create(kUnboxedIntPtr, new (Z) Value(value), deopt_id,
1752 Instruction::kNotSpeculative);
1753 flow_graph_->InsertBefore(call, value, call->env(), FlowGraph::kValue);
1754 break;
1755 }
1756 case kTypedDataInt32ArrayCid: {
1757 // Insert explicit unboxing instructions with truncation to avoid relying
1758 // on [SelectRepresentations] which doesn't mark them as truncating.
1759 value = UnboxInstr::Create(kUnboxedInt32, new (Z) Value(value), deopt_id,
1760 Instruction::kNotSpeculative);
1761 flow_graph_->InsertBefore(call, value, call->env(), FlowGraph::kValue);
1762 break;
1763 }
1764 case kTypedDataUint32ArrayCid: {
1765 // Insert explicit unboxing instructions with truncation to avoid relying
1766 // on [SelectRepresentations] which doesn't mark them as truncating.
1767 value = UnboxInstr::Create(kUnboxedUint32, new (Z) Value(value), deopt_id,
1768 Instruction::kNotSpeculative);
1769 flow_graph_->InsertBefore(call, value, call->env(), FlowGraph::kValue);
1770 break;
1771 }
1772 case kTypedDataInt64ArrayCid:
1773 case kTypedDataUint64ArrayCid: {
1774 // Insert explicit unboxing instructions with truncation to avoid relying
1775 // on [SelectRepresentations] which doesn't mark them as truncating.
1776 value = UnboxInstr::Create(kUnboxedInt64, new (Z) Value(value),
1777 DeoptId::kNone, Instruction::kNotSpeculative);
1778 flow_graph_->InsertBefore(call, value, call->env(), FlowGraph::kValue);
1779 break;
1780 }
1781 case kTypedDataFloat32ArrayCid: {
1782 value = new (Z) DoubleToFloatInstr(new (Z) Value(value), deopt_id,
1783 Instruction::kNotSpeculative);
1784 flow_graph_->InsertBefore(call, value, call->env(), FlowGraph::kValue);
1785 break;
1786 }
1787 default:
1788 break;
1789 }
1790
1791 auto data = new (Z)
1792 LoadUntaggedInstr(new (Z) Value(array),
1793 compiler::target::TypedDataBase::data_field_offset());
1794 flow_graph_->InsertBefore(call, data, call->env(), FlowGraph::kValue);
1795
1796 auto store = new (Z) StoreIndexedInstr(
1797 new (Z) Value(data), new (Z) Value(index), new (Z) Value(value),
1798 kNoStoreBarrier, /*index_unboxed=*/false, index_scale, cid,
1799 kAlignedAccess, DeoptId::kNone, call->token_pos(),
1800 Instruction::kNotSpeculative);
1801 flow_graph_->InsertBefore(call, store, call->env(), FlowGraph::kEffect);
1802}
1803
1804void CallSpecializer::ReplaceInstanceCallsWithDispatchTableCalls() {
1805 // Only implemented for AOT.
1806}
1807
1808} // namespace dart
1809