1 | // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/compiler/backend/il.h" |
6 | |
7 | #include "vm/bit_vector.h" |
8 | #include "vm/bootstrap.h" |
9 | #include "vm/compiler/aot/dispatch_table_generator.h" |
10 | #include "vm/compiler/backend/code_statistics.h" |
11 | #include "vm/compiler/backend/constant_propagator.h" |
12 | #include "vm/compiler/backend/evaluator.h" |
13 | #include "vm/compiler/backend/flow_graph_compiler.h" |
14 | #include "vm/compiler/backend/linearscan.h" |
15 | #include "vm/compiler/backend/locations.h" |
16 | #include "vm/compiler/backend/loops.h" |
17 | #include "vm/compiler/backend/range_analysis.h" |
18 | #include "vm/compiler/ffi/frame_rebase.h" |
19 | #include "vm/compiler/ffi/native_calling_convention.h" |
20 | #include "vm/compiler/frontend/flow_graph_builder.h" |
21 | #include "vm/compiler/frontend/kernel_translation_helper.h" |
22 | #include "vm/compiler/jit/compiler.h" |
23 | #include "vm/compiler/method_recognizer.h" |
24 | #include "vm/cpu.h" |
25 | #include "vm/dart_entry.h" |
26 | #include "vm/object.h" |
27 | #include "vm/object_store.h" |
28 | #include "vm/os.h" |
29 | #include "vm/regexp_assembler_ir.h" |
30 | #include "vm/resolver.h" |
31 | #include "vm/runtime_entry.h" |
32 | #include "vm/scopes.h" |
33 | #include "vm/stack_frame.h" |
34 | #include "vm/stub_code.h" |
35 | #include "vm/symbols.h" |
36 | #include "vm/type_testing_stubs.h" |
37 | |
38 | #include "vm/compiler/backend/il_printer.h" |
39 | |
40 | namespace dart { |
41 | |
42 | DEFINE_FLAG(bool, |
43 | propagate_ic_data, |
44 | true, |
45 | "Propagate IC data from unoptimized to optimized IC calls." ); |
46 | DEFINE_FLAG(bool, |
47 | two_args_smi_icd, |
48 | true, |
49 | "Generate special IC stubs for two args Smi operations" ); |
50 | |
51 | class SubclassFinder { |
52 | public: |
53 | SubclassFinder(Zone* zone, |
54 | GrowableArray<intptr_t>* cids, |
55 | bool include_abstract) |
56 | : array_handles_(zone), |
57 | class_handles_(zone), |
58 | cids_(cids), |
59 | include_abstract_(include_abstract) {} |
60 | |
61 | void ScanSubClasses(const Class& klass) { |
62 | if (include_abstract_ || !klass.is_abstract()) { |
63 | cids_->Add(klass.id()); |
64 | } |
65 | ScopedHandle<GrowableObjectArray> array(&array_handles_); |
66 | ScopedHandle<Class> subclass(&class_handles_); |
67 | *array = klass.direct_subclasses(); |
68 | if (!array->IsNull()) { |
69 | for (intptr_t i = 0; i < array->Length(); ++i) { |
70 | *subclass ^= array->At(i); |
71 | ScanSubClasses(*subclass); |
72 | } |
73 | } |
74 | } |
75 | |
76 | void ScanImplementorClasses(const Class& klass) { |
77 | // An implementor of [klass] is |
78 | // * the [klass] itself. |
79 | // * all implementors of the direct subclasses of [klass]. |
80 | // * all implementors of the direct implementors of [klass]. |
81 | if (include_abstract_ || !klass.is_abstract()) { |
82 | cids_->Add(klass.id()); |
83 | } |
84 | |
85 | ScopedHandle<GrowableObjectArray> array(&array_handles_); |
86 | ScopedHandle<Class> subclass_or_implementor(&class_handles_); |
87 | |
88 | *array = klass.direct_subclasses(); |
89 | if (!array->IsNull()) { |
90 | for (intptr_t i = 0; i < array->Length(); ++i) { |
91 | *subclass_or_implementor ^= (*array).At(i); |
92 | ScanImplementorClasses(*subclass_or_implementor); |
93 | } |
94 | } |
95 | *array = klass.direct_implementors(); |
96 | if (!array->IsNull()) { |
97 | for (intptr_t i = 0; i < array->Length(); ++i) { |
98 | *subclass_or_implementor ^= (*array).At(i); |
99 | ScanImplementorClasses(*subclass_or_implementor); |
100 | } |
101 | } |
102 | } |
103 | |
104 | private: |
105 | ReusableHandleStack<GrowableObjectArray> array_handles_; |
106 | ReusableHandleStack<Class> class_handles_; |
107 | GrowableArray<intptr_t>* cids_; |
108 | const bool include_abstract_; |
109 | }; |
110 | |
111 | const CidRangeVector& HierarchyInfo::SubtypeRangesForClass( |
112 | const Class& klass, |
113 | bool include_abstract, |
114 | bool exclude_null) { |
115 | ClassTable* table = thread()->isolate()->class_table(); |
116 | const intptr_t cid_count = table->NumCids(); |
117 | std::unique_ptr<CidRangeVector[]>* cid_ranges = nullptr; |
118 | if (include_abstract) { |
119 | cid_ranges = exclude_null ? &cid_subtype_ranges_abstract_nonnullable_ |
120 | : &cid_subtype_ranges_abstract_nullable_; |
121 | } else { |
122 | cid_ranges = exclude_null ? &cid_subtype_ranges_nonnullable_ |
123 | : &cid_subtype_ranges_nullable_; |
124 | } |
125 | if (*cid_ranges == nullptr) { |
126 | cid_ranges->reset(new CidRangeVector[cid_count]); |
127 | } |
128 | CidRangeVector& ranges = (*cid_ranges)[klass.id()]; |
129 | if (ranges.length() == 0) { |
130 | if (!FLAG_precompiled_mode) { |
131 | BuildRangesForJIT(table, &ranges, klass, /*use_subtype_test=*/true, |
132 | include_abstract, exclude_null); |
133 | } else { |
134 | BuildRangesFor(table, &ranges, klass, /*use_subtype_test=*/true, |
135 | include_abstract, exclude_null); |
136 | } |
137 | } |
138 | return ranges; |
139 | } |
140 | |
141 | const CidRangeVector& HierarchyInfo::SubclassRangesForClass( |
142 | const Class& klass) { |
143 | ClassTable* table = thread()->isolate()->class_table(); |
144 | const intptr_t cid_count = table->NumCids(); |
145 | if (cid_subclass_ranges_ == nullptr) { |
146 | cid_subclass_ranges_.reset(new CidRangeVector[cid_count]); |
147 | } |
148 | |
149 | CidRangeVector& ranges = cid_subclass_ranges_[klass.id()]; |
150 | if (ranges.length() == 0) { |
151 | if (!FLAG_precompiled_mode) { |
152 | BuildRangesForJIT(table, &ranges, klass, |
153 | /*use_subtype_test=*/true, |
154 | /*include_abstract=*/false, |
155 | /*exclude_null=*/false); |
156 | } else { |
157 | BuildRangesFor(table, &ranges, klass, |
158 | /*use_subtype_test=*/false, |
159 | /*include_abstract=*/false, |
160 | /*exclude_null=*/false); |
161 | } |
162 | } |
163 | return ranges; |
164 | } |
165 | |
166 | // Build the ranges either for: |
167 | // "<obj> as <Type>", or |
168 | // "<obj> is <Type>" |
169 | void HierarchyInfo::BuildRangesFor(ClassTable* table, |
170 | CidRangeVector* ranges, |
171 | const Class& klass, |
172 | bool use_subtype_test, |
173 | bool include_abstract, |
174 | bool exclude_null) { |
175 | Zone* zone = thread()->zone(); |
176 | ClassTable* class_table = thread()->isolate()->class_table(); |
177 | |
178 | // Only really used if `use_subtype_test == true`. |
179 | const Type& dst_type = Type::Handle(zone, Type::RawCast(klass.RareType())); |
180 | AbstractType& cls_type = AbstractType::Handle(zone); |
181 | |
182 | Class& cls = Class::Handle(zone); |
183 | AbstractType& super_type = AbstractType::Handle(zone); |
184 | const intptr_t cid_count = table->NumCids(); |
185 | |
186 | // Iterate over all cids to find the ones to be included in the ranges. |
187 | intptr_t start = -1; |
188 | intptr_t end = -1; |
189 | for (intptr_t cid = kInstanceCid; cid < cid_count; ++cid) { |
190 | // Create local zone because deep hierarchies may allocate lots of handles |
191 | // within one iteration of this loop. |
192 | StackZone stack_zone(thread()); |
193 | HANDLESCOPE(thread()); |
194 | |
195 | // Some cases are "don't care", i.e., they may or may not be included, |
196 | // whatever yields the least number of ranges for efficiency. |
197 | if (!table->HasValidClassAt(cid)) continue; |
198 | if (cid == kTypeArgumentsCid) continue; |
199 | if (cid == kVoidCid) continue; |
200 | if (cid == kDynamicCid) continue; |
201 | if (cid == kNeverCid) continue; |
202 | if (cid == kNullCid && !exclude_null) continue; |
203 | cls = table->At(cid); |
204 | if (!include_abstract && cls.is_abstract()) continue; |
205 | if (cls.IsTopLevel()) continue; |
206 | |
207 | // We are either interested in [CidRange]es of subclasses or subtypes. |
208 | bool test_succeeded = false; |
209 | if (cid == kNullCid) { |
210 | ASSERT(exclude_null); |
211 | test_succeeded = false; |
212 | } else if (use_subtype_test) { |
213 | cls_type = cls.RareType(); |
214 | test_succeeded = cls_type.IsSubtypeOf(dst_type, Heap::kNew); |
215 | } else { |
216 | while (!cls.IsObjectClass()) { |
217 | if (cls.raw() == klass.raw()) { |
218 | test_succeeded = true; |
219 | break; |
220 | } |
221 | super_type = cls.super_type(); |
222 | const intptr_t type_class_id = super_type.type_class_id(); |
223 | cls = class_table->At(type_class_id); |
224 | } |
225 | } |
226 | |
227 | if (test_succeeded) { |
228 | // On success, open a new or continue any open range. |
229 | if (start == -1) start = cid; |
230 | end = cid; |
231 | } else if (start != -1) { |
232 | // On failure, close any open range from start to end |
233 | // (the latter is the most recent succesful "do-care" cid). |
234 | ASSERT(start <= end); |
235 | CidRange range(start, end); |
236 | ranges->Add(range); |
237 | start = -1; |
238 | end = -1; |
239 | } |
240 | } |
241 | |
242 | // Construct last range (either close open one, or add invalid). |
243 | if (start != -1) { |
244 | ASSERT(start <= end); |
245 | CidRange range(start, end); |
246 | ranges->Add(range); |
247 | } else if (ranges->length() == 0) { |
248 | CidRange range; |
249 | ASSERT(range.IsIllegalRange()); |
250 | ranges->Add(range); |
251 | } |
252 | } |
253 | |
254 | void HierarchyInfo::BuildRangesForJIT(ClassTable* table, |
255 | CidRangeVector* ranges, |
256 | const Class& dst_klass, |
257 | bool use_subtype_test, |
258 | bool include_abstract, |
259 | bool exclude_null) { |
260 | if (dst_klass.InVMIsolateHeap()) { |
261 | BuildRangesFor(table, ranges, dst_klass, use_subtype_test, include_abstract, |
262 | exclude_null); |
263 | return; |
264 | } |
265 | |
266 | Zone* zone = thread()->zone(); |
267 | GrowableArray<intptr_t> cids; |
268 | SubclassFinder finder(zone, &cids, include_abstract); |
269 | if (use_subtype_test) { |
270 | finder.ScanImplementorClasses(dst_klass); |
271 | } else { |
272 | finder.ScanSubClasses(dst_klass); |
273 | } |
274 | |
275 | // Sort all collected cids. |
276 | intptr_t* cids_array = cids.data(); |
277 | |
278 | qsort(cids_array, cids.length(), sizeof(intptr_t), |
279 | [](const void* a, const void* b) { |
280 | // MSAN seems unaware of allocations inside qsort. The linker flag |
281 | // -fsanitize=memory should give us a MSAN-aware version of libc... |
282 | MSAN_UNPOISON(static_cast<const intptr_t*>(a), sizeof(intptr_t)); |
283 | MSAN_UNPOISON(static_cast<const intptr_t*>(b), sizeof(intptr_t)); |
284 | return static_cast<int>(*static_cast<const intptr_t*>(a) - |
285 | *static_cast<const intptr_t*>(b)); |
286 | }); |
287 | |
288 | // Build ranges of all the cids. |
289 | Class& klass = Class::Handle(); |
290 | intptr_t left_cid = -1; |
291 | intptr_t last_cid = -1; |
292 | for (intptr_t i = 0; i < cids.length(); ++i) { |
293 | if (left_cid == -1) { |
294 | left_cid = last_cid = cids[i]; |
295 | } else { |
296 | const intptr_t current_cid = cids[i]; |
297 | |
298 | // Skip duplicates. |
299 | if (current_cid == last_cid) continue; |
300 | |
301 | // Consecutive numbers cids are ok. |
302 | if (current_cid == (last_cid + 1)) { |
303 | last_cid = current_cid; |
304 | } else { |
305 | // We sorted, after all! |
306 | RELEASE_ASSERT(last_cid < current_cid); |
307 | |
308 | intptr_t j = last_cid + 1; |
309 | for (; j < current_cid; ++j) { |
310 | if (table->HasValidClassAt(j)) { |
311 | klass = table->At(j); |
312 | if (!klass.IsTopLevel()) { |
313 | // If we care about abstract classes also, we cannot skip over any |
314 | // arbitrary abstract class, only those which are subtypes. |
315 | if (include_abstract) { |
316 | break; |
317 | } |
318 | |
319 | // If the class is concrete we cannot skip over it. |
320 | if (!klass.is_abstract()) { |
321 | break; |
322 | } |
323 | } |
324 | } |
325 | } |
326 | |
327 | if (current_cid == j) { |
328 | // If there's only abstract cids between [last_cid] and the |
329 | // [current_cid] then we connect them. |
330 | last_cid = current_cid; |
331 | } else { |
332 | // Finish the current open cid range and start a new one. |
333 | ranges->Add(CidRange{left_cid, last_cid}); |
334 | left_cid = last_cid = current_cid; |
335 | } |
336 | } |
337 | } |
338 | } |
339 | |
340 | // If there is an open cid-range which we haven't finished yet, we'll |
341 | // complete it. |
342 | if (left_cid != -1) { |
343 | ranges->Add(CidRange{left_cid, last_cid}); |
344 | } |
345 | } |
346 | |
347 | bool HierarchyInfo::CanUseSubtypeRangeCheckFor(const AbstractType& type) { |
348 | ASSERT(type.IsFinalized()); |
349 | |
350 | if (!type.IsInstantiated() || !type.IsType() || type.IsFunctionType() || |
351 | type.IsDartFunctionType()) { |
352 | return false; |
353 | } |
354 | |
355 | // The FutureOr<T> type cannot be handled by checking whether the instance is |
356 | // a subtype of FutureOr and then checking whether the type argument `T` |
357 | // matches. |
358 | // |
359 | // Instead we would need to perform multiple checks: |
360 | // |
361 | // instance is Null || instance is T || instance is Future<T> |
362 | // |
363 | if (type.IsFutureOrType()) { |
364 | return false; |
365 | } |
366 | |
367 | Zone* zone = thread()->zone(); |
368 | const Class& type_class = Class::Handle(zone, type.type_class()); |
369 | |
370 | // We can use class id range checks only if we don't have to test type |
371 | // arguments. |
372 | // |
373 | // This is e.g. true for "String" but also for "List<dynamic>". (A type for |
374 | // which the type arguments vector is filled with "dynamic" is known as a rare |
375 | // type) |
376 | if (type_class.IsGeneric()) { |
377 | // TODO(kustermann): We might want to consider extending this when the type |
378 | // arguments are not "dynamic" but instantiated-to-bounds. |
379 | const Type& rare_type = |
380 | Type::Handle(zone, Type::RawCast(type_class.RareType())); |
381 | if (!rare_type.IsSubtypeOf(type, Heap::kNew)) { |
382 | ASSERT(type.arguments() != TypeArguments::null()); |
383 | return false; |
384 | } |
385 | } |
386 | |
387 | return true; |
388 | } |
389 | |
390 | bool HierarchyInfo::CanUseGenericSubtypeRangeCheckFor( |
391 | const AbstractType& type) { |
392 | ASSERT(type.IsFinalized()); |
393 | |
394 | if (!type.IsType() || type.IsFunctionType() || type.IsDartFunctionType()) { |
395 | return false; |
396 | } |
397 | |
398 | // The FutureOr<T> type cannot be handled by checking whether the instance is |
399 | // a subtype of FutureOr and then checking whether the type argument `T` |
400 | // matches. |
401 | // |
402 | // Instead we would need to perform multiple checks: |
403 | // |
404 | // instance is Null || instance is T || instance is Future<T> |
405 | // |
406 | if (type.IsFutureOrType()) { |
407 | return false; |
408 | } |
409 | |
410 | // NOTE: We do allow non-instantiated types here (in comparison to |
411 | // [CanUseSubtypeRangeCheckFor], since we handle type parameters in the type |
412 | // expression in some cases (see below). |
413 | |
414 | Zone* zone = thread()->zone(); |
415 | const Class& type_class = Class::Handle(zone, type.type_class()); |
416 | const intptr_t num_type_parameters = type_class.NumTypeParameters(); |
417 | const intptr_t num_type_arguments = type_class.NumTypeArguments(); |
418 | |
419 | // This function should only be called for generic classes. |
420 | ASSERT(type_class.NumTypeParameters() > 0 && |
421 | type.arguments() != TypeArguments::null()); |
422 | |
423 | // If the type class is implemented the different implementations might have |
424 | // their type argument vector stored at different offsets and we can therefore |
425 | // not perform our optimized [CidRange]-based implementation. |
426 | // |
427 | // TODO(kustermann): If the class is implemented but all implementations |
428 | // store the instantator type argument vector at the same offset we can |
429 | // still do it! |
430 | if (type_class.is_implemented()) { |
431 | return false; |
432 | } |
433 | |
434 | const TypeArguments& ta = |
435 | TypeArguments::Handle(zone, Type::Cast(type).arguments()); |
436 | ASSERT(ta.Length() == num_type_arguments); |
437 | |
438 | // The last [num_type_pararameters] entries in the [TypeArguments] vector [ta] |
439 | // are the values we have to check against. Ensure we can handle all of them |
440 | // via [CidRange]-based checks or that it is a type parameter. |
441 | AbstractType& type_arg = AbstractType::Handle(zone); |
442 | for (intptr_t i = 0; i < num_type_parameters; ++i) { |
443 | type_arg = ta.TypeAt(num_type_arguments - num_type_parameters + i); |
444 | if (!CanUseSubtypeRangeCheckFor(type_arg) && !type_arg.IsTypeParameter()) { |
445 | return false; |
446 | } |
447 | } |
448 | |
449 | return true; |
450 | } |
451 | |
452 | bool HierarchyInfo::InstanceOfHasClassRange(const AbstractType& type, |
453 | intptr_t* lower_limit, |
454 | intptr_t* upper_limit) { |
455 | ASSERT(CompilerState::Current().is_aot()); |
456 | if (CanUseSubtypeRangeCheckFor(type)) { |
457 | const Class& type_class = |
458 | Class::Handle(thread()->zone(), type.type_class()); |
459 | const CidRangeVector& ranges = |
460 | SubtypeRangesForClass(type_class, |
461 | /*include_abstract=*/false, |
462 | /*exclude_null=*/true); |
463 | if (ranges.length() == 1) { |
464 | const CidRangeValue& range = ranges[0]; |
465 | if (!range.IsIllegalRange()) { |
466 | *lower_limit = range.cid_start; |
467 | *upper_limit = range.cid_end; |
468 | return true; |
469 | } |
470 | } |
471 | } |
472 | return false; |
473 | } |
474 | |
475 | #if defined(DEBUG) |
476 | void Instruction::CheckField(const Field& field) const { |
477 | ASSERT(field.IsZoneHandle()); |
478 | ASSERT(!Compiler::IsBackgroundCompilation() || !field.IsOriginal()); |
479 | } |
480 | #endif // DEBUG |
481 | |
482 | Definition::Definition(intptr_t deopt_id) : Instruction(deopt_id) {} |
483 | |
484 | // A value in the constant propagation lattice. |
485 | // - non-constant sentinel |
486 | // - a constant (any non-sentinel value) |
487 | // - unknown sentinel |
488 | Object& Definition::constant_value() { |
489 | if (constant_value_ == NULL) { |
490 | constant_value_ = &Object::ZoneHandle(ConstantPropagator::Unknown()); |
491 | } |
492 | return *constant_value_; |
493 | } |
494 | |
495 | Definition* Definition::OriginalDefinition() { |
496 | Definition* defn = this; |
497 | Value* unwrapped; |
498 | while ((unwrapped = defn->RedefinedValue()) != nullptr) { |
499 | defn = unwrapped->definition(); |
500 | } |
501 | return defn; |
502 | } |
503 | |
504 | Value* Definition::RedefinedValue() const { |
505 | return nullptr; |
506 | } |
507 | |
508 | Value* RedefinitionInstr::RedefinedValue() const { |
509 | return value(); |
510 | } |
511 | |
512 | Value* AssertAssignableInstr::RedefinedValue() const { |
513 | return value(); |
514 | } |
515 | |
516 | Value* AssertBooleanInstr::RedefinedValue() const { |
517 | return value(); |
518 | } |
519 | |
520 | Value* CheckBoundBase::RedefinedValue() const { |
521 | return index(); |
522 | } |
523 | |
524 | Value* CheckNullInstr::RedefinedValue() const { |
525 | return value(); |
526 | } |
527 | |
528 | Definition* Definition::OriginalDefinitionIgnoreBoxingAndConstraints() { |
529 | Definition* def = this; |
530 | while (true) { |
531 | Definition* orig; |
532 | if (def->IsConstraint() || def->IsBox() || def->IsUnbox() || |
533 | def->IsIntConverter()) { |
534 | orig = def->InputAt(0)->definition(); |
535 | } else { |
536 | orig = def->OriginalDefinition(); |
537 | } |
538 | if (orig == def) return def; |
539 | def = orig; |
540 | } |
541 | } |
542 | |
543 | bool Definition::IsArrayLength(Definition* def) { |
544 | if (def != nullptr) { |
545 | if (auto load = def->OriginalDefinitionIgnoreBoxingAndConstraints() |
546 | ->AsLoadField()) { |
547 | return load->IsImmutableLengthLoad(); |
548 | } |
549 | } |
550 | return false; |
551 | } |
552 | |
553 | const ICData* Instruction::GetICData( |
554 | const ZoneGrowableArray<const ICData*>& ic_data_array, |
555 | intptr_t deopt_id, |
556 | bool is_static_call) { |
557 | // The deopt_id can be outside the range of the IC data array for |
558 | // computations added in the optimizing compiler. |
559 | ASSERT(deopt_id != DeoptId::kNone); |
560 | if (deopt_id >= ic_data_array.length()) { |
561 | return nullptr; |
562 | } |
563 | const ICData* result = ic_data_array[deopt_id]; |
564 | ASSERT(result == nullptr || is_static_call == result->is_static_call()); |
565 | return result; |
566 | } |
567 | |
568 | intptr_t Instruction::Hashcode() const { |
569 | intptr_t result = tag(); |
570 | for (intptr_t i = 0; i < InputCount(); ++i) { |
571 | Value* value = InputAt(i); |
572 | intptr_t j = value->definition()->ssa_temp_index(); |
573 | result = result * 31 + j; |
574 | } |
575 | return result; |
576 | } |
577 | |
578 | bool Instruction::Equals(Instruction* other) const { |
579 | if (tag() != other->tag()) return false; |
580 | if (InputCount() != other->InputCount()) return false; |
581 | for (intptr_t i = 0; i < InputCount(); ++i) { |
582 | if (!InputAt(i)->Equals(other->InputAt(i))) return false; |
583 | } |
584 | return AttributesEqual(other); |
585 | } |
586 | |
587 | void Instruction::Unsupported(FlowGraphCompiler* compiler) { |
588 | compiler->Bailout(ToCString()); |
589 | UNREACHABLE(); |
590 | } |
591 | |
592 | bool Value::Equals(Value* other) const { |
593 | return definition() == other->definition(); |
594 | } |
595 | |
596 | static int OrderById(CidRange* const* a, CidRange* const* b) { |
597 | // Negative if 'a' should sort before 'b'. |
598 | ASSERT((*a)->IsSingleCid()); |
599 | ASSERT((*b)->IsSingleCid()); |
600 | return (*a)->cid_start - (*b)->cid_start; |
601 | } |
602 | |
603 | static int OrderByFrequencyThenId(CidRange* const* a, CidRange* const* b) { |
604 | const TargetInfo* target_info_a = static_cast<const TargetInfo*>(*a); |
605 | const TargetInfo* target_info_b = static_cast<const TargetInfo*>(*b); |
606 | // Negative if 'a' should sort before 'b'. |
607 | if (target_info_b->count != target_info_a->count) { |
608 | return (target_info_b->count - target_info_a->count); |
609 | } else { |
610 | return (*a)->cid_start - (*b)->cid_start; |
611 | } |
612 | } |
613 | |
614 | bool Cids::Equals(const Cids& other) const { |
615 | if (length() != other.length()) return false; |
616 | for (int i = 0; i < length(); i++) { |
617 | if (cid_ranges_[i]->cid_start != other.cid_ranges_[i]->cid_start || |
618 | cid_ranges_[i]->cid_end != other.cid_ranges_[i]->cid_end) { |
619 | return false; |
620 | } |
621 | } |
622 | return true; |
623 | } |
624 | |
625 | intptr_t Cids::ComputeLowestCid() const { |
626 | intptr_t min = kIntptrMax; |
627 | for (intptr_t i = 0; i < cid_ranges_.length(); ++i) { |
628 | min = Utils::Minimum(min, cid_ranges_[i]->cid_start); |
629 | } |
630 | return min; |
631 | } |
632 | |
633 | intptr_t Cids::ComputeHighestCid() const { |
634 | intptr_t max = -1; |
635 | for (intptr_t i = 0; i < cid_ranges_.length(); ++i) { |
636 | max = Utils::Maximum(max, cid_ranges_[i]->cid_end); |
637 | } |
638 | return max; |
639 | } |
640 | |
641 | bool Cids::HasClassId(intptr_t cid) const { |
642 | for (int i = 0; i < length(); i++) { |
643 | if (cid_ranges_[i]->Contains(cid)) { |
644 | return true; |
645 | } |
646 | } |
647 | return false; |
648 | } |
649 | |
650 | Cids* Cids::CreateMonomorphic(Zone* zone, intptr_t cid) { |
651 | Cids* cids = new (zone) Cids(zone); |
652 | cids->Add(new (zone) CidRange(cid, cid)); |
653 | return cids; |
654 | } |
655 | |
656 | Cids* Cids::CreateForArgument(Zone* zone, |
657 | const BinaryFeedback& binary_feedback, |
658 | int argument_number) { |
659 | Cids* cids = new (zone) Cids(zone); |
660 | for (intptr_t i = 0; i < binary_feedback.feedback_.length(); i++) { |
661 | ASSERT((argument_number == 0) || (argument_number == 1)); |
662 | const intptr_t cid = argument_number == 0 |
663 | ? binary_feedback.feedback_[i].first |
664 | : binary_feedback.feedback_[i].second; |
665 | cids->Add(new (zone) CidRange(cid, cid)); |
666 | } |
667 | |
668 | if (cids->length() != 0) { |
669 | cids->Sort(OrderById); |
670 | |
671 | // Merge adjacent class id ranges. |
672 | int dest = 0; |
673 | for (int src = 1; src < cids->length(); src++) { |
674 | if (cids->cid_ranges_[dest]->cid_end + 1 >= |
675 | cids->cid_ranges_[src]->cid_start) { |
676 | cids->cid_ranges_[dest]->cid_end = cids->cid_ranges_[src]->cid_end; |
677 | } else { |
678 | dest++; |
679 | if (src != dest) cids->cid_ranges_[dest] = cids->cid_ranges_[src]; |
680 | } |
681 | } |
682 | cids->SetLength(dest + 1); |
683 | } |
684 | |
685 | return cids; |
686 | } |
687 | |
688 | static intptr_t Usage(const Function& function) { |
689 | intptr_t count = function.usage_counter(); |
690 | if (count < 0) { |
691 | if (function.HasCode()) { |
692 | // 'function' is queued for optimized compilation |
693 | count = FLAG_optimization_counter_threshold; |
694 | } else { |
695 | // 'function' is queued for unoptimized compilation |
696 | count = FLAG_compilation_counter_threshold; |
697 | } |
698 | } else if (Code::IsOptimized(function.CurrentCode())) { |
699 | // 'function' was optimized and stopped counting |
700 | count = FLAG_optimization_counter_threshold; |
701 | } |
702 | return count; |
703 | } |
704 | |
705 | void CallTargets::CreateHelper(Zone* zone, const ICData& ic_data) { |
706 | Function& dummy = Function::Handle(zone); |
707 | |
708 | const intptr_t num_args_tested = ic_data.NumArgsTested(); |
709 | |
710 | for (int i = 0, n = ic_data.NumberOfChecks(); i < n; i++) { |
711 | if (ic_data.GetCountAt(i) == 0) { |
712 | continue; |
713 | } |
714 | |
715 | intptr_t id = kDynamicCid; |
716 | if (num_args_tested == 0) { |
717 | } else if (num_args_tested == 1) { |
718 | ic_data.GetOneClassCheckAt(i, &id, &dummy); |
719 | } else { |
720 | ASSERT(num_args_tested == 2); |
721 | GrowableArray<intptr_t> arg_ids; |
722 | ic_data.GetCheckAt(i, &arg_ids, &dummy); |
723 | id = arg_ids[0]; |
724 | } |
725 | Function& function = Function::ZoneHandle(zone, ic_data.GetTargetAt(i)); |
726 | intptr_t count = ic_data.GetCountAt(i); |
727 | cid_ranges_.Add(new (zone) TargetInfo(id, id, &function, count, |
728 | ic_data.GetExactnessAt(i))); |
729 | } |
730 | |
731 | if (ic_data.is_megamorphic()) { |
732 | ASSERT(num_args_tested == 1); // Only 1-arg ICData will turn megamorphic. |
733 | const String& name = String::Handle(zone, ic_data.target_name()); |
734 | const Array& descriptor = |
735 | Array::Handle(zone, ic_data.arguments_descriptor()); |
736 | Thread* thread = Thread::Current(); |
737 | const MegamorphicCache& cache = MegamorphicCache::Handle( |
738 | zone, MegamorphicCacheTable::Lookup(thread, name, descriptor)); |
739 | SafepointMutexLocker ml(thread->isolate()->megamorphic_mutex()); |
740 | MegamorphicCacheEntries entries(Array::Handle(zone, cache.buckets())); |
741 | for (intptr_t i = 0, n = entries.Length(); i < n; i++) { |
742 | const intptr_t id = |
743 | Smi::Value(entries[i].Get<MegamorphicCache::kClassIdIndex>()); |
744 | if (id == kIllegalCid) { |
745 | continue; |
746 | } |
747 | Function& function = Function::ZoneHandle(zone); |
748 | function ^= entries[i].Get<MegamorphicCache::kTargetFunctionIndex>(); |
749 | const intptr_t filled_entry_count = cache.filled_entry_count(); |
750 | ASSERT(filled_entry_count > 0); |
751 | cid_ranges_.Add(new (zone) TargetInfo( |
752 | id, id, &function, Usage(function) / filled_entry_count, |
753 | StaticTypeExactnessState::NotTracking())); |
754 | } |
755 | } |
756 | } |
757 | |
758 | bool Cids::IsMonomorphic() const { |
759 | if (length() != 1) return false; |
760 | return cid_ranges_[0]->IsSingleCid(); |
761 | } |
762 | |
763 | intptr_t Cids::MonomorphicReceiverCid() const { |
764 | ASSERT(IsMonomorphic()); |
765 | return cid_ranges_[0]->cid_start; |
766 | } |
767 | |
768 | StaticTypeExactnessState CallTargets::MonomorphicExactness() const { |
769 | ASSERT(IsMonomorphic()); |
770 | return TargetAt(0)->exactness; |
771 | } |
772 | |
773 | const char* AssertAssignableInstr::KindToCString(Kind kind) { |
774 | switch (kind) { |
775 | #define KIND_CASE(name) \ |
776 | case k##name: \ |
777 | return #name; |
778 | FOR_EACH_ASSERT_ASSIGNABLE_KIND(KIND_CASE) |
779 | #undef KIND_CASE |
780 | default: |
781 | UNREACHABLE(); |
782 | return nullptr; |
783 | } |
784 | } |
785 | |
786 | bool AssertAssignableInstr::ParseKind(const char* str, Kind* out) { |
787 | #define KIND_CASE(name) \ |
788 | if (strcmp(str, #name) == 0) { \ |
789 | *out = Kind::k##name; \ |
790 | return true; \ |
791 | } |
792 | FOR_EACH_ASSERT_ASSIGNABLE_KIND(KIND_CASE) |
793 | #undef KIND_CASE |
794 | return false; |
795 | } |
796 | |
797 | CheckClassInstr::CheckClassInstr(Value* value, |
798 | intptr_t deopt_id, |
799 | const Cids& cids, |
800 | TokenPosition token_pos) |
801 | : TemplateInstruction(deopt_id), |
802 | cids_(cids), |
803 | licm_hoisted_(false), |
804 | is_bit_test_(IsCompactCidRange(cids)), |
805 | token_pos_(token_pos) { |
806 | // Expected useful check data. |
807 | const intptr_t number_of_checks = cids.length(); |
808 | ASSERT(number_of_checks > 0); |
809 | SetInputAt(0, value); |
810 | // Otherwise use CheckSmiInstr. |
811 | ASSERT(number_of_checks != 1 || !cids[0].IsSingleCid() || |
812 | cids[0].cid_start != kSmiCid); |
813 | } |
814 | |
815 | bool CheckClassInstr::AttributesEqual(Instruction* other) const { |
816 | CheckClassInstr* other_check = other->AsCheckClass(); |
817 | ASSERT(other_check != NULL); |
818 | return cids().Equals(other_check->cids()); |
819 | } |
820 | |
821 | bool CheckClassInstr::IsDeoptIfNull() const { |
822 | if (!cids().IsMonomorphic()) { |
823 | return false; |
824 | } |
825 | CompileType* in_type = value()->Type(); |
826 | const intptr_t cid = cids().MonomorphicReceiverCid(); |
827 | // Performance check: use CheckSmiInstr instead. |
828 | ASSERT(cid != kSmiCid); |
829 | return in_type->is_nullable() && (in_type->ToNullableCid() == cid); |
830 | } |
831 | |
832 | // Null object is a singleton of null-class (except for some sentinel, |
833 | // transitional temporaries). Instead of checking against the null class only |
834 | // we can check against null instance instead. |
835 | bool CheckClassInstr::IsDeoptIfNotNull() const { |
836 | if (!cids().IsMonomorphic()) { |
837 | return false; |
838 | } |
839 | const intptr_t cid = cids().MonomorphicReceiverCid(); |
840 | return cid == kNullCid; |
841 | } |
842 | |
843 | bool CheckClassInstr::IsCompactCidRange(const Cids& cids) { |
844 | const intptr_t number_of_checks = cids.length(); |
845 | // If there are only two checks, the extra register pressure needed for the |
846 | // dense-cid-range code is not justified. |
847 | if (number_of_checks <= 2) return false; |
848 | |
849 | // TODO(fschneider): Support smis in dense cid checks. |
850 | if (cids.HasClassId(kSmiCid)) return false; |
851 | |
852 | intptr_t min = cids.ComputeLowestCid(); |
853 | intptr_t max = cids.ComputeHighestCid(); |
854 | return (max - min) < compiler::target::kBitsPerWord; |
855 | } |
856 | |
857 | bool CheckClassInstr::IsBitTest() const { |
858 | return is_bit_test_; |
859 | } |
860 | |
861 | intptr_t CheckClassInstr::ComputeCidMask() const { |
862 | ASSERT(IsBitTest()); |
863 | const uintptr_t one = 1; |
864 | intptr_t min = cids_.ComputeLowestCid(); |
865 | intptr_t mask = 0; |
866 | for (intptr_t i = 0; i < cids_.length(); ++i) { |
867 | uintptr_t run; |
868 | uintptr_t range = one + cids_[i].Extent(); |
869 | if (range >= static_cast<uintptr_t>(compiler::target::kBitsPerWord)) { |
870 | run = -1; |
871 | } else { |
872 | run = (one << range) - 1; |
873 | } |
874 | mask |= run << (cids_[i].cid_start - min); |
875 | } |
876 | return mask; |
877 | } |
878 | |
879 | bool LoadFieldInstr::IsUnboxedLoad() const { |
880 | return slot().IsDartField() && |
881 | FlowGraphCompiler::IsUnboxedField(slot().field()); |
882 | } |
883 | |
884 | bool LoadFieldInstr::IsPotentialUnboxedLoad() const { |
885 | return slot().IsDartField() && |
886 | FlowGraphCompiler::IsPotentialUnboxedField(slot().field()); |
887 | } |
888 | |
889 | Representation LoadFieldInstr::representation() const { |
890 | if (IsUnboxedLoad()) { |
891 | const Field& field = slot().field(); |
892 | const intptr_t cid = field.UnboxedFieldCid(); |
893 | switch (cid) { |
894 | case kDoubleCid: |
895 | return kUnboxedDouble; |
896 | case kFloat32x4Cid: |
897 | return kUnboxedFloat32x4; |
898 | case kFloat64x2Cid: |
899 | return kUnboxedFloat64x2; |
900 | default: |
901 | if (field.is_non_nullable_integer()) { |
902 | return kUnboxedInt64; |
903 | } else { |
904 | UNREACHABLE(); |
905 | } |
906 | } |
907 | } |
908 | return kTagged; |
909 | } |
910 | |
911 | AllocateUninitializedContextInstr::AllocateUninitializedContextInstr( |
912 | TokenPosition token_pos, |
913 | intptr_t num_context_variables) |
914 | : token_pos_(token_pos), |
915 | num_context_variables_(num_context_variables), |
916 | identity_(AliasIdentity::Unknown()) { |
917 | // This instruction is not used in AOT for code size reasons. |
918 | ASSERT(!CompilerState::Current().is_aot()); |
919 | } |
920 | |
921 | bool StoreInstanceFieldInstr::IsUnboxedStore() const { |
922 | return slot().IsDartField() && |
923 | FlowGraphCompiler::IsUnboxedField(slot().field()); |
924 | } |
925 | |
926 | bool StoreInstanceFieldInstr::IsPotentialUnboxedStore() const { |
927 | return slot().IsDartField() && |
928 | FlowGraphCompiler::IsPotentialUnboxedField(slot().field()); |
929 | } |
930 | |
931 | Representation StoreInstanceFieldInstr::RequiredInputRepresentation( |
932 | intptr_t index) const { |
933 | ASSERT((index == 0) || (index == 1)); |
934 | if ((index == 1) && IsUnboxedStore()) { |
935 | const Field& field = slot().field(); |
936 | const intptr_t cid = field.UnboxedFieldCid(); |
937 | switch (cid) { |
938 | case kDoubleCid: |
939 | return kUnboxedDouble; |
940 | case kFloat32x4Cid: |
941 | return kUnboxedFloat32x4; |
942 | case kFloat64x2Cid: |
943 | return kUnboxedFloat64x2; |
944 | default: |
945 | if (field.is_non_nullable_integer()) { |
946 | return kUnboxedInt64; |
947 | } else { |
948 | UNREACHABLE(); |
949 | } |
950 | } |
951 | } |
952 | return kTagged; |
953 | } |
954 | |
955 | Instruction* StoreInstanceFieldInstr::Canonicalize(FlowGraph* flow_graph) { |
956 | // Dart objects are allocated null-initialized, which means we can eliminate |
957 | // all initializing stores which store null value. |
958 | // Context objects can be allocated uninitialized as a performance |
959 | // optimization in JIT mode - however in AOT mode we always allocate them |
960 | // null initialized. |
961 | if (is_initialization_ && |
962 | (!slot().IsContextSlot() || |
963 | !instance()->definition()->IsAllocateUninitializedContext()) && |
964 | value()->BindsToConstantNull()) { |
965 | return nullptr; |
966 | } |
967 | return this; |
968 | } |
969 | |
970 | bool GuardFieldClassInstr::AttributesEqual(Instruction* other) const { |
971 | return field().raw() == other->AsGuardFieldClass()->field().raw(); |
972 | } |
973 | |
974 | bool GuardFieldLengthInstr::AttributesEqual(Instruction* other) const { |
975 | return field().raw() == other->AsGuardFieldLength()->field().raw(); |
976 | } |
977 | |
978 | bool GuardFieldTypeInstr::AttributesEqual(Instruction* other) const { |
979 | return field().raw() == other->AsGuardFieldType()->field().raw(); |
980 | } |
981 | |
982 | Instruction* AssertSubtypeInstr::Canonicalize(FlowGraph* flow_graph) { |
983 | // If all inputs are constant, we can instantiate the sub and super type and |
984 | // remove this instruction if the subtype test succeeds. |
985 | if (super_type()->BindsToConstant() && sub_type()->BindsToConstant() && |
986 | instantiator_type_arguments()->BindsToConstant() && |
987 | function_type_arguments()->BindsToConstant()) { |
988 | auto Z = Thread::Current()->zone(); |
989 | const auto& constant_instantiator_type_args = |
990 | instantiator_type_arguments()->BoundConstant().IsNull() |
991 | ? TypeArguments::null_type_arguments() |
992 | : TypeArguments::Cast( |
993 | instantiator_type_arguments()->BoundConstant()); |
994 | const auto& constant_function_type_args = |
995 | function_type_arguments()->BoundConstant().IsNull() |
996 | ? TypeArguments::null_type_arguments() |
997 | : TypeArguments::Cast(function_type_arguments()->BoundConstant()); |
998 | auto& constant_sub_type = AbstractType::Handle( |
999 | Z, AbstractType::Cast(sub_type()->BoundConstant()).raw()); |
1000 | auto& constant_super_type = AbstractType::Handle( |
1001 | Z, AbstractType::Cast(super_type()->BoundConstant()).raw()); |
1002 | |
1003 | ASSERT(!constant_super_type.IsTypeRef()); |
1004 | ASSERT(!constant_sub_type.IsTypeRef()); |
1005 | |
1006 | if (AbstractType::InstantiateAndTestSubtype( |
1007 | &constant_sub_type, &constant_super_type, |
1008 | constant_instantiator_type_args, constant_function_type_args)) { |
1009 | return nullptr; |
1010 | } |
1011 | } |
1012 | return this; |
1013 | } |
1014 | |
1015 | bool StrictCompareInstr::AttributesEqual(Instruction* other) const { |
1016 | StrictCompareInstr* other_op = other->AsStrictCompare(); |
1017 | ASSERT(other_op != NULL); |
1018 | return ComparisonInstr::AttributesEqual(other) && |
1019 | (needs_number_check() == other_op->needs_number_check()); |
1020 | } |
1021 | |
1022 | bool MathMinMaxInstr::AttributesEqual(Instruction* other) const { |
1023 | MathMinMaxInstr* other_op = other->AsMathMinMax(); |
1024 | ASSERT(other_op != NULL); |
1025 | return (op_kind() == other_op->op_kind()) && |
1026 | (result_cid() == other_op->result_cid()); |
1027 | } |
1028 | |
1029 | bool BinaryIntegerOpInstr::AttributesEqual(Instruction* other) const { |
1030 | ASSERT(other->tag() == tag()); |
1031 | BinaryIntegerOpInstr* other_op = other->AsBinaryIntegerOp(); |
1032 | return (op_kind() == other_op->op_kind()) && |
1033 | (can_overflow() == other_op->can_overflow()) && |
1034 | (is_truncating() == other_op->is_truncating()); |
1035 | } |
1036 | |
1037 | bool LoadFieldInstr::AttributesEqual(Instruction* other) const { |
1038 | LoadFieldInstr* other_load = other->AsLoadField(); |
1039 | ASSERT(other_load != NULL); |
1040 | return &this->slot_ == &other_load->slot_; |
1041 | } |
1042 | |
1043 | bool LoadStaticFieldInstr::AttributesEqual(Instruction* other) const { |
1044 | ASSERT(IsFieldInitialized()); |
1045 | return field().raw() == other->AsLoadStaticField()->field().raw(); |
1046 | } |
1047 | |
1048 | bool LoadStaticFieldInstr::IsFieldInitialized() const { |
1049 | const Field& field = this->field(); |
1050 | return (field.StaticValue() != Object::sentinel().raw()) && |
1051 | (field.StaticValue() != Object::transition_sentinel().raw()); |
1052 | } |
1053 | |
1054 | Definition* LoadStaticFieldInstr::Canonicalize(FlowGraph* flow_graph) { |
1055 | // When precompiling, the fact that a field is currently initialized does not |
1056 | // make it safe to omit code that checks if the field needs initialization |
1057 | // because the field will be reset so it starts uninitialized in the process |
1058 | // running the precompiled code. We must be prepared to reinitialize fields. |
1059 | if (calls_initializer() && !FLAG_fields_may_be_reset && |
1060 | IsFieldInitialized()) { |
1061 | set_calls_initializer(false); |
1062 | } |
1063 | return this; |
1064 | } |
1065 | |
1066 | ConstantInstr::ConstantInstr(const Object& value, TokenPosition token_pos) |
1067 | : value_(value), token_pos_(token_pos) { |
1068 | // Check that the value is not an incorrect Integer representation. |
1069 | ASSERT(!value.IsMint() || !Smi::IsValid(Mint::Cast(value).AsInt64Value())); |
1070 | // Check that clones of fields are not stored as constants. |
1071 | ASSERT(!value.IsField() || Field::Cast(value).IsOriginal()); |
1072 | // Check that all non-Smi objects are heap allocated and in old space. |
1073 | ASSERT(value.IsSmi() || value.IsOld()); |
1074 | #if defined(DEBUG) |
1075 | // Generally, instances in the flow graph should be canonical. Smis, null |
1076 | // values, and sentinel values are canonical by construction and so we skip |
1077 | // them here. |
1078 | if (!value.IsNull() && !value.IsSmi() && value.IsInstance() && |
1079 | !value.IsCanonical() && (value.raw() != Object::sentinel().raw())) { |
1080 | // The only allowed type for which IsCanonical() never answers true is |
1081 | // TypeParameter. (They are treated as canonical due to how they are |
1082 | // created, but there is no way to canonicalize a new TypeParameter |
1083 | // instance containing the same information as an existing instance.) |
1084 | // |
1085 | // Arrays in ConstantInstrs are usually immutable and canonicalized, but |
1086 | // there are at least a couple of cases where one or both is not true: |
1087 | // |
1088 | // * The Arrays created as backing for ArgumentsDescriptors may not be |
1089 | // canonicalized for space reasons when inlined in the IL. However, they |
1090 | // are still immutable. |
1091 | // * The backtracking stack for IRRegExps is put into a ConstantInstr for |
1092 | // immediate use as an argument to the operations on that stack. In this |
1093 | // case, the Array representing it is neither immutable or canonical. |
1094 | // |
1095 | // In addition to complicating the story for Arrays, IRRegExp compilation |
1096 | // also uses other non-canonical values as "constants". For example, the bit |
1097 | // tables used for certain character classes are represented as TypedData, |
1098 | // and so those values are also neither immutable (as there are no immutable |
1099 | // TypedData values) or canonical. |
1100 | // |
1101 | // LibraryPrefixes are also never canonicalized since their equality is |
1102 | // their identity. |
1103 | ASSERT(value.IsTypeParameter() || value.IsArray() || value.IsTypedData() || |
1104 | value.IsLibraryPrefix()); |
1105 | } |
1106 | #endif |
1107 | } |
1108 | |
1109 | bool ConstantInstr::AttributesEqual(Instruction* other) const { |
1110 | ConstantInstr* other_constant = other->AsConstant(); |
1111 | ASSERT(other_constant != NULL); |
1112 | return (value().raw() == other_constant->value().raw() && |
1113 | representation() == other_constant->representation()); |
1114 | } |
1115 | |
1116 | UnboxedConstantInstr::UnboxedConstantInstr(const Object& value, |
1117 | Representation representation) |
1118 | : ConstantInstr(value), |
1119 | representation_(representation), |
1120 | constant_address_(0) { |
1121 | if (representation_ == kUnboxedDouble) { |
1122 | ASSERT(value.IsDouble()); |
1123 | constant_address_ = FindDoubleConstant(Double::Cast(value).value()); |
1124 | } |
1125 | } |
1126 | |
1127 | // Returns true if the value represents a constant. |
1128 | bool Value::BindsToConstant() const { |
1129 | return definition()->IsConstant(); |
1130 | } |
1131 | |
1132 | // Returns true if the value represents constant null. |
1133 | bool Value::BindsToConstantNull() const { |
1134 | ConstantInstr* constant = definition()->AsConstant(); |
1135 | return (constant != NULL) && constant->value().IsNull(); |
1136 | } |
1137 | |
1138 | const Object& Value::BoundConstant() const { |
1139 | ASSERT(BindsToConstant()); |
1140 | ConstantInstr* constant = definition()->AsConstant(); |
1141 | ASSERT(constant != NULL); |
1142 | return constant->value(); |
1143 | } |
1144 | |
1145 | GraphEntryInstr::GraphEntryInstr(const ParsedFunction& parsed_function, |
1146 | intptr_t osr_id) |
1147 | : GraphEntryInstr(parsed_function, |
1148 | osr_id, |
1149 | CompilerState::Current().GetNextDeoptId()) {} |
1150 | |
1151 | GraphEntryInstr::GraphEntryInstr(const ParsedFunction& parsed_function, |
1152 | intptr_t osr_id, |
1153 | intptr_t deopt_id) |
1154 | : BlockEntryWithInitialDefs(0, |
1155 | kInvalidTryIndex, |
1156 | deopt_id, |
1157 | /*stack_depth*/ 0), |
1158 | parsed_function_(parsed_function), |
1159 | catch_entries_(), |
1160 | indirect_entries_(), |
1161 | osr_id_(osr_id), |
1162 | entry_count_(0), |
1163 | spill_slot_count_(0), |
1164 | fixed_slot_count_(0) {} |
1165 | |
1166 | ConstantInstr* GraphEntryInstr::constant_null() { |
1167 | ASSERT(initial_definitions()->length() > 0); |
1168 | for (intptr_t i = 0; i < initial_definitions()->length(); ++i) { |
1169 | ConstantInstr* defn = (*initial_definitions())[i]->AsConstant(); |
1170 | if (defn != NULL && defn->value().IsNull()) return defn; |
1171 | } |
1172 | UNREACHABLE(); |
1173 | return NULL; |
1174 | } |
1175 | |
1176 | CatchBlockEntryInstr* GraphEntryInstr::GetCatchEntry(intptr_t index) { |
1177 | // TODO(fschneider): Sort the catch entries by catch_try_index to avoid |
1178 | // searching. |
1179 | for (intptr_t i = 0; i < catch_entries_.length(); ++i) { |
1180 | if (catch_entries_[i]->catch_try_index() == index) return catch_entries_[i]; |
1181 | } |
1182 | return NULL; |
1183 | } |
1184 | |
1185 | bool GraphEntryInstr::IsCompiledForOsr() const { |
1186 | return osr_id_ != Compiler::kNoOSRDeoptId; |
1187 | } |
1188 | |
1189 | // ==== Support for visiting flow graphs. |
1190 | |
1191 | #define DEFINE_ACCEPT(ShortName, Attrs) \ |
1192 | void ShortName##Instr::(FlowGraphVisitor* visitor) { \ |
1193 | visitor->Visit##ShortName(this); \ |
1194 | } |
1195 | |
1196 | FOR_EACH_INSTRUCTION(DEFINE_ACCEPT) |
1197 | |
1198 | #undef DEFINE_ACCEPT |
1199 | |
1200 | void Instruction::SetEnvironment(Environment* deopt_env) { |
1201 | intptr_t use_index = 0; |
1202 | for (Environment::DeepIterator it(deopt_env); !it.Done(); it.Advance()) { |
1203 | Value* use = it.CurrentValue(); |
1204 | use->set_instruction(this); |
1205 | use->set_use_index(use_index++); |
1206 | } |
1207 | env_ = deopt_env; |
1208 | } |
1209 | |
1210 | void Instruction::RemoveEnvironment() { |
1211 | for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) { |
1212 | it.CurrentValue()->RemoveFromUseList(); |
1213 | } |
1214 | env_ = NULL; |
1215 | } |
1216 | |
1217 | void Instruction::ReplaceInEnvironment(Definition* current, |
1218 | Definition* replacement) { |
1219 | for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) { |
1220 | Value* use = it.CurrentValue(); |
1221 | if (use->definition() == current) { |
1222 | use->RemoveFromUseList(); |
1223 | use->set_definition(replacement); |
1224 | replacement->AddEnvUse(use); |
1225 | } |
1226 | } |
1227 | } |
1228 | |
1229 | Instruction* Instruction::RemoveFromGraph(bool return_previous) { |
1230 | ASSERT(!IsBlockEntry()); |
1231 | ASSERT(!IsBranch()); |
1232 | ASSERT(!IsThrow()); |
1233 | ASSERT(!IsReturn()); |
1234 | ASSERT(!IsReThrow()); |
1235 | ASSERT(!IsGoto()); |
1236 | ASSERT(previous() != NULL); |
1237 | // We cannot assert that the instruction, if it is a definition, has no |
1238 | // uses. This function is used to remove instructions from the graph and |
1239 | // reinsert them elsewhere (e.g., hoisting). |
1240 | Instruction* prev_instr = previous(); |
1241 | Instruction* next_instr = next(); |
1242 | ASSERT(next_instr != NULL); |
1243 | ASSERT(!next_instr->IsBlockEntry()); |
1244 | prev_instr->LinkTo(next_instr); |
1245 | UnuseAllInputs(); |
1246 | // Reset the successor and previous instruction to indicate that the |
1247 | // instruction is removed from the graph. |
1248 | set_previous(NULL); |
1249 | set_next(NULL); |
1250 | return return_previous ? prev_instr : next_instr; |
1251 | } |
1252 | |
1253 | void Instruction::InsertAfter(Instruction* prev) { |
1254 | ASSERT(previous_ == NULL); |
1255 | ASSERT(next_ == NULL); |
1256 | previous_ = prev; |
1257 | next_ = prev->next_; |
1258 | next_->previous_ = this; |
1259 | previous_->next_ = this; |
1260 | |
1261 | // Update def-use chains whenever instructions are added to the graph |
1262 | // after initial graph construction. |
1263 | for (intptr_t i = InputCount() - 1; i >= 0; --i) { |
1264 | Value* input = InputAt(i); |
1265 | input->definition()->AddInputUse(input); |
1266 | } |
1267 | } |
1268 | |
1269 | Instruction* Instruction::AppendInstruction(Instruction* tail) { |
1270 | LinkTo(tail); |
1271 | // Update def-use chains whenever instructions are added to the graph |
1272 | // after initial graph construction. |
1273 | for (intptr_t i = tail->InputCount() - 1; i >= 0; --i) { |
1274 | Value* input = tail->InputAt(i); |
1275 | input->definition()->AddInputUse(input); |
1276 | } |
1277 | return tail; |
1278 | } |
1279 | |
1280 | BlockEntryInstr* Instruction::GetBlock() { |
1281 | // TODO(fschneider): Implement a faster way to get the block of an |
1282 | // instruction. |
1283 | Instruction* result = previous(); |
1284 | ASSERT(result != nullptr); |
1285 | while (!result->IsBlockEntry()) { |
1286 | result = result->previous(); |
1287 | ASSERT(result != nullptr); |
1288 | } |
1289 | return result->AsBlockEntry(); |
1290 | } |
1291 | |
1292 | void ForwardInstructionIterator::RemoveCurrentFromGraph() { |
1293 | current_ = current_->RemoveFromGraph(true); // Set current_ to previous. |
1294 | } |
1295 | |
1296 | void BackwardInstructionIterator::RemoveCurrentFromGraph() { |
1297 | current_ = current_->RemoveFromGraph(false); // Set current_ to next. |
1298 | } |
1299 | |
1300 | // Default implementation of visiting basic blocks. Can be overridden. |
1301 | void FlowGraphVisitor::VisitBlocks() { |
1302 | ASSERT(current_iterator_ == NULL); |
1303 | for (intptr_t i = 0; i < block_order_->length(); ++i) { |
1304 | BlockEntryInstr* entry = (*block_order_)[i]; |
1305 | entry->Accept(this); |
1306 | ForwardInstructionIterator it(entry); |
1307 | current_iterator_ = ⁢ |
1308 | for (; !it.Done(); it.Advance()) { |
1309 | it.Current()->Accept(this); |
1310 | } |
1311 | current_iterator_ = NULL; |
1312 | } |
1313 | } |
1314 | |
1315 | bool Value::NeedsWriteBarrier() { |
1316 | Value* value = this; |
1317 | do { |
1318 | if (value->Type()->IsNull() || |
1319 | (value->Type()->ToNullableCid() == kSmiCid) || |
1320 | (value->Type()->ToNullableCid() == kBoolCid)) { |
1321 | return false; |
1322 | } |
1323 | |
1324 | // Strictly speaking, the incremental barrier can only be skipped for |
1325 | // immediate objects (Smis) or permanent objects (vm-isolate heap or |
1326 | // image pages). Here we choose to skip the barrier for any constant on |
1327 | // the assumption it will remain reachable through the object pool. |
1328 | if (value->BindsToConstant()) { |
1329 | return false; |
1330 | } |
1331 | |
1332 | // Follow the chain of redefinitions as redefined value could have a more |
1333 | // accurate type (for example, AssertAssignable of Smi to a generic T). |
1334 | value = value->definition()->RedefinedValue(); |
1335 | } while (value != nullptr); |
1336 | |
1337 | return true; |
1338 | } |
1339 | |
1340 | void JoinEntryInstr::AddPredecessor(BlockEntryInstr* predecessor) { |
1341 | // Require the predecessors to be sorted by block_id to make managing |
1342 | // their corresponding phi inputs simpler. |
1343 | intptr_t pred_id = predecessor->block_id(); |
1344 | intptr_t index = 0; |
1345 | while ((index < predecessors_.length()) && |
1346 | (predecessors_[index]->block_id() < pred_id)) { |
1347 | ++index; |
1348 | } |
1349 | #if defined(DEBUG) |
1350 | for (intptr_t i = index; i < predecessors_.length(); ++i) { |
1351 | ASSERT(predecessors_[i]->block_id() != pred_id); |
1352 | } |
1353 | #endif |
1354 | predecessors_.InsertAt(index, predecessor); |
1355 | } |
1356 | |
1357 | intptr_t JoinEntryInstr::IndexOfPredecessor(BlockEntryInstr* pred) const { |
1358 | for (intptr_t i = 0; i < predecessors_.length(); ++i) { |
1359 | if (predecessors_[i] == pred) return i; |
1360 | } |
1361 | return -1; |
1362 | } |
1363 | |
1364 | void Value::AddToList(Value* value, Value** list) { |
1365 | ASSERT(value->next_use() == nullptr); |
1366 | ASSERT(value->previous_use() == nullptr); |
1367 | Value* next = *list; |
1368 | ASSERT(value != next); |
1369 | *list = value; |
1370 | value->set_next_use(next); |
1371 | value->set_previous_use(NULL); |
1372 | if (next != NULL) next->set_previous_use(value); |
1373 | } |
1374 | |
1375 | void Value::RemoveFromUseList() { |
1376 | Definition* def = definition(); |
1377 | Value* next = next_use(); |
1378 | if (this == def->input_use_list()) { |
1379 | def->set_input_use_list(next); |
1380 | if (next != NULL) next->set_previous_use(NULL); |
1381 | } else if (this == def->env_use_list()) { |
1382 | def->set_env_use_list(next); |
1383 | if (next != NULL) next->set_previous_use(NULL); |
1384 | } else if (Value* prev = previous_use()) { |
1385 | prev->set_next_use(next); |
1386 | if (next != NULL) next->set_previous_use(prev); |
1387 | } |
1388 | |
1389 | set_previous_use(NULL); |
1390 | set_next_use(NULL); |
1391 | } |
1392 | |
1393 | // True if the definition has a single input use and is used only in |
1394 | // environments at the same instruction as that input use. |
1395 | bool Definition::HasOnlyUse(Value* use) const { |
1396 | if (!HasOnlyInputUse(use)) { |
1397 | return false; |
1398 | } |
1399 | |
1400 | Instruction* target = use->instruction(); |
1401 | for (Value::Iterator it(env_use_list()); !it.Done(); it.Advance()) { |
1402 | if (it.Current()->instruction() != target) return false; |
1403 | } |
1404 | return true; |
1405 | } |
1406 | |
1407 | bool Definition::HasOnlyInputUse(Value* use) const { |
1408 | return (input_use_list() == use) && (use->next_use() == NULL); |
1409 | } |
1410 | |
1411 | void Definition::ReplaceUsesWith(Definition* other) { |
1412 | ASSERT(other != NULL); |
1413 | ASSERT(this != other); |
1414 | |
1415 | Value* current = NULL; |
1416 | Value* next = input_use_list(); |
1417 | if (next != NULL) { |
1418 | // Change all the definitions. |
1419 | while (next != NULL) { |
1420 | current = next; |
1421 | current->set_definition(other); |
1422 | current->RefineReachingType(other->Type()); |
1423 | next = current->next_use(); |
1424 | } |
1425 | |
1426 | // Concatenate the lists. |
1427 | next = other->input_use_list(); |
1428 | current->set_next_use(next); |
1429 | if (next != NULL) next->set_previous_use(current); |
1430 | other->set_input_use_list(input_use_list()); |
1431 | set_input_use_list(NULL); |
1432 | } |
1433 | |
1434 | // Repeat for environment uses. |
1435 | current = NULL; |
1436 | next = env_use_list(); |
1437 | if (next != NULL) { |
1438 | while (next != NULL) { |
1439 | current = next; |
1440 | current->set_definition(other); |
1441 | current->RefineReachingType(other->Type()); |
1442 | next = current->next_use(); |
1443 | } |
1444 | next = other->env_use_list(); |
1445 | current->set_next_use(next); |
1446 | if (next != NULL) next->set_previous_use(current); |
1447 | other->set_env_use_list(env_use_list()); |
1448 | set_env_use_list(NULL); |
1449 | } |
1450 | } |
1451 | |
1452 | void Instruction::UnuseAllInputs() { |
1453 | for (intptr_t i = InputCount() - 1; i >= 0; --i) { |
1454 | InputAt(i)->RemoveFromUseList(); |
1455 | } |
1456 | for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) { |
1457 | it.CurrentValue()->RemoveFromUseList(); |
1458 | } |
1459 | } |
1460 | |
1461 | void Instruction::RepairPushArgsInEnvironment() const { |
1462 | PushArgumentsArray* push_arguments = GetPushArguments(); |
1463 | ASSERT(push_arguments != nullptr); |
1464 | const intptr_t arg_count = ArgumentCount(); |
1465 | ASSERT(arg_count <= env()->Length()); |
1466 | const intptr_t env_base = env()->Length() - arg_count; |
1467 | for (intptr_t i = 0; i < arg_count; ++i) { |
1468 | env()->ValueAt(env_base + i)->BindToEnvironment(push_arguments->At(i)); |
1469 | } |
1470 | } |
1471 | |
1472 | void Instruction::InheritDeoptTargetAfter(FlowGraph* flow_graph, |
1473 | Definition* call, |
1474 | Definition* result) { |
1475 | ASSERT(call->env() != NULL); |
1476 | deopt_id_ = DeoptId::ToDeoptAfter(call->deopt_id_); |
1477 | call->env()->DeepCopyAfterTo( |
1478 | flow_graph->zone(), this, call->ArgumentCount(), |
1479 | flow_graph->constant_dead(), |
1480 | result != NULL ? result : flow_graph->constant_dead()); |
1481 | } |
1482 | |
1483 | void Instruction::InheritDeoptTarget(Zone* zone, Instruction* other) { |
1484 | ASSERT(other->env() != NULL); |
1485 | CopyDeoptIdFrom(*other); |
1486 | other->env()->DeepCopyTo(zone, this); |
1487 | } |
1488 | |
1489 | void BranchInstr::InheritDeoptTarget(Zone* zone, Instruction* other) { |
1490 | ASSERT(env() == NULL); |
1491 | Instruction::InheritDeoptTarget(zone, other); |
1492 | comparison()->SetDeoptId(*this); |
1493 | } |
1494 | |
1495 | bool Instruction::IsDominatedBy(Instruction* dom) { |
1496 | BlockEntryInstr* block = GetBlock(); |
1497 | BlockEntryInstr* dom_block = dom->GetBlock(); |
1498 | |
1499 | if (dom->IsPhi()) { |
1500 | dom = dom_block; |
1501 | } |
1502 | |
1503 | if (block == dom_block) { |
1504 | if ((block == dom) || (this == block->last_instruction())) { |
1505 | return true; |
1506 | } |
1507 | |
1508 | if (IsPhi()) { |
1509 | return false; |
1510 | } |
1511 | |
1512 | for (Instruction* curr = dom->next(); curr != NULL; curr = curr->next()) { |
1513 | if (curr == this) return true; |
1514 | } |
1515 | |
1516 | return false; |
1517 | } |
1518 | |
1519 | return dom_block->Dominates(block); |
1520 | } |
1521 | |
1522 | bool Instruction::HasUnmatchedInputRepresentations() const { |
1523 | for (intptr_t i = 0; i < InputCount(); i++) { |
1524 | Definition* input = InputAt(i)->definition(); |
1525 | const Representation input_representation = RequiredInputRepresentation(i); |
1526 | if (input_representation != kNoRepresentation && |
1527 | input_representation != input->representation()) { |
1528 | return true; |
1529 | } |
1530 | } |
1531 | |
1532 | return false; |
1533 | } |
1534 | |
1535 | const intptr_t Instruction::kInstructionAttrs[Instruction::kNumInstructions] = { |
1536 | #define INSTR_ATTRS(type, attrs) InstrAttrs::attrs, |
1537 | FOR_EACH_INSTRUCTION(INSTR_ATTRS) |
1538 | #undef INSTR_ATTRS |
1539 | }; |
1540 | |
1541 | bool Instruction::CanTriggerGC() const { |
1542 | return (kInstructionAttrs[tag()] & InstrAttrs::kNoGC) == 0; |
1543 | } |
1544 | |
1545 | void Definition::ReplaceWithResult(Instruction* replacement, |
1546 | Definition* replacement_for_uses, |
1547 | ForwardInstructionIterator* iterator) { |
1548 | // Record replacement's input uses. |
1549 | for (intptr_t i = replacement->InputCount() - 1; i >= 0; --i) { |
1550 | Value* input = replacement->InputAt(i); |
1551 | input->definition()->AddInputUse(input); |
1552 | } |
1553 | // Take replacement's environment from this definition. |
1554 | ASSERT(replacement->env() == NULL); |
1555 | replacement->SetEnvironment(env()); |
1556 | ClearEnv(); |
1557 | // Replace all uses of this definition with replacement_for_uses. |
1558 | ReplaceUsesWith(replacement_for_uses); |
1559 | |
1560 | // Finally replace this one with the replacement instruction in the graph. |
1561 | previous()->LinkTo(replacement); |
1562 | if ((iterator != NULL) && (this == iterator->Current())) { |
1563 | // Remove through the iterator. |
1564 | replacement->LinkTo(this); |
1565 | iterator->RemoveCurrentFromGraph(); |
1566 | } else { |
1567 | replacement->LinkTo(next()); |
1568 | // Remove this definition's input uses. |
1569 | UnuseAllInputs(); |
1570 | } |
1571 | set_previous(NULL); |
1572 | set_next(NULL); |
1573 | } |
1574 | |
1575 | void Definition::ReplaceWith(Definition* other, |
1576 | ForwardInstructionIterator* iterator) { |
1577 | // Reuse this instruction's SSA name for other. |
1578 | ASSERT(!other->HasSSATemp()); |
1579 | if (HasSSATemp()) { |
1580 | other->set_ssa_temp_index(ssa_temp_index()); |
1581 | } |
1582 | ReplaceWithResult(other, other, iterator); |
1583 | } |
1584 | |
1585 | void BranchInstr::SetComparison(ComparisonInstr* new_comparison) { |
1586 | for (intptr_t i = new_comparison->InputCount() - 1; i >= 0; --i) { |
1587 | Value* input = new_comparison->InputAt(i); |
1588 | input->definition()->AddInputUse(input); |
1589 | input->set_instruction(this); |
1590 | } |
1591 | // There should be no need to copy or unuse an environment. |
1592 | ASSERT(comparison()->env() == NULL); |
1593 | ASSERT(new_comparison->env() == NULL); |
1594 | // Remove the current comparison's input uses. |
1595 | comparison()->UnuseAllInputs(); |
1596 | ASSERT(!new_comparison->HasUses()); |
1597 | comparison_ = new_comparison; |
1598 | } |
1599 | |
1600 | // ==== Postorder graph traversal. |
1601 | static bool IsMarked(BlockEntryInstr* block, |
1602 | GrowableArray<BlockEntryInstr*>* preorder) { |
1603 | // Detect that a block has been visited as part of the current |
1604 | // DiscoverBlocks (we can call DiscoverBlocks multiple times). The block |
1605 | // will be 'marked' by (1) having a preorder number in the range of the |
1606 | // preorder array and (2) being in the preorder array at that index. |
1607 | intptr_t i = block->preorder_number(); |
1608 | return (i >= 0) && (i < preorder->length()) && ((*preorder)[i] == block); |
1609 | } |
1610 | |
1611 | // Base class implementation used for JoinEntry and TargetEntry. |
1612 | bool BlockEntryInstr::DiscoverBlock(BlockEntryInstr* predecessor, |
1613 | GrowableArray<BlockEntryInstr*>* preorder, |
1614 | GrowableArray<intptr_t>* parent) { |
1615 | // If this block has a predecessor (i.e., is not the graph entry) we can |
1616 | // assume the preorder array is non-empty. |
1617 | ASSERT((predecessor == NULL) || !preorder->is_empty()); |
1618 | // Blocks with a single predecessor cannot have been reached before. |
1619 | ASSERT(IsJoinEntry() || !IsMarked(this, preorder)); |
1620 | |
1621 | // 1. If the block has already been reached, add current_block as a |
1622 | // basic-block predecessor and we are done. |
1623 | if (IsMarked(this, preorder)) { |
1624 | ASSERT(predecessor != NULL); |
1625 | AddPredecessor(predecessor); |
1626 | return false; |
1627 | } |
1628 | |
1629 | // 2. Otherwise, clear the predecessors which might have been computed on |
1630 | // some earlier call to DiscoverBlocks and record this predecessor. |
1631 | ClearPredecessors(); |
1632 | if (predecessor != NULL) AddPredecessor(predecessor); |
1633 | |
1634 | // 3. The predecessor is the spanning-tree parent. The graph entry has no |
1635 | // parent, indicated by -1. |
1636 | intptr_t parent_number = |
1637 | (predecessor == NULL) ? -1 : predecessor->preorder_number(); |
1638 | parent->Add(parent_number); |
1639 | |
1640 | // 4. Assign the preorder number and add the block entry to the list. |
1641 | set_preorder_number(preorder->length()); |
1642 | preorder->Add(this); |
1643 | |
1644 | // The preorder and parent arrays are indexed by |
1645 | // preorder block number, so they should stay in lockstep. |
1646 | ASSERT(preorder->length() == parent->length()); |
1647 | |
1648 | // 5. Iterate straight-line successors to record assigned variables and |
1649 | // find the last instruction in the block. The graph entry block consists |
1650 | // of only the entry instruction, so that is the last instruction in the |
1651 | // block. |
1652 | Instruction* last = this; |
1653 | for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) { |
1654 | last = it.Current(); |
1655 | } |
1656 | set_last_instruction(last); |
1657 | if (last->IsGoto()) last->AsGoto()->set_block(this); |
1658 | |
1659 | return true; |
1660 | } |
1661 | |
1662 | void GraphEntryInstr::RelinkToOsrEntry(Zone* zone, intptr_t max_block_id) { |
1663 | ASSERT(osr_id_ != Compiler::kNoOSRDeoptId); |
1664 | BitVector* block_marks = new (zone) BitVector(zone, max_block_id + 1); |
1665 | bool found = FindOsrEntryAndRelink(this, /*parent=*/NULL, block_marks); |
1666 | ASSERT(found); |
1667 | } |
1668 | |
1669 | bool BlockEntryInstr::FindOsrEntryAndRelink(GraphEntryInstr* graph_entry, |
1670 | Instruction* parent, |
1671 | BitVector* block_marks) { |
1672 | const intptr_t osr_id = graph_entry->osr_id(); |
1673 | |
1674 | // Search for the instruction with the OSR id. Use a depth first search |
1675 | // because basic blocks have not been discovered yet. Prune unreachable |
1676 | // blocks by replacing the normal entry with a jump to the block |
1677 | // containing the OSR entry point. |
1678 | |
1679 | // Do not visit blocks more than once. |
1680 | if (block_marks->Contains(block_id())) return false; |
1681 | block_marks->Add(block_id()); |
1682 | |
1683 | // Search this block for the OSR id. |
1684 | Instruction* instr = this; |
1685 | for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) { |
1686 | instr = it.Current(); |
1687 | if (instr->GetDeoptId() == osr_id) { |
1688 | // Sanity check that we found a stack check instruction. |
1689 | ASSERT(instr->IsCheckStackOverflow()); |
1690 | // Loop stack check checks are always in join blocks so that they can |
1691 | // be the target of a goto. |
1692 | ASSERT(IsJoinEntry()); |
1693 | // The instruction should be the first instruction in the block so |
1694 | // we can simply jump to the beginning of the block. |
1695 | ASSERT(instr->previous() == this); |
1696 | |
1697 | ASSERT(stack_depth() == instr->AsCheckStackOverflow()->stack_depth()); |
1698 | auto normal_entry = graph_entry->normal_entry(); |
1699 | auto osr_entry = new OsrEntryInstr( |
1700 | graph_entry, normal_entry->block_id(), normal_entry->try_index(), |
1701 | normal_entry->deopt_id(), stack_depth()); |
1702 | |
1703 | auto goto_join = new GotoInstr(AsJoinEntry(), |
1704 | CompilerState::Current().GetNextDeoptId()); |
1705 | ASSERT(parent != nullptr); |
1706 | goto_join->CopyDeoptIdFrom(*parent); |
1707 | osr_entry->LinkTo(goto_join); |
1708 | |
1709 | // Remove normal function entries & add osr entry. |
1710 | graph_entry->set_normal_entry(nullptr); |
1711 | graph_entry->set_unchecked_entry(nullptr); |
1712 | graph_entry->set_osr_entry(osr_entry); |
1713 | |
1714 | return true; |
1715 | } |
1716 | } |
1717 | |
1718 | // Recursively search the successors. |
1719 | for (intptr_t i = instr->SuccessorCount() - 1; i >= 0; --i) { |
1720 | if (instr->SuccessorAt(i)->FindOsrEntryAndRelink(graph_entry, instr, |
1721 | block_marks)) { |
1722 | return true; |
1723 | } |
1724 | } |
1725 | return false; |
1726 | } |
1727 | |
1728 | bool BlockEntryInstr::Dominates(BlockEntryInstr* other) const { |
1729 | // TODO(fschneider): Make this faster by e.g. storing dominators for each |
1730 | // block while computing the dominator tree. |
1731 | ASSERT(other != NULL); |
1732 | BlockEntryInstr* current = other; |
1733 | while (current != NULL && current != this) { |
1734 | current = current->dominator(); |
1735 | } |
1736 | return current == this; |
1737 | } |
1738 | |
1739 | BlockEntryInstr* BlockEntryInstr::ImmediateDominator() const { |
1740 | Instruction* last = dominator()->last_instruction(); |
1741 | if ((last->SuccessorCount() == 1) && (last->SuccessorAt(0) == this)) { |
1742 | return dominator(); |
1743 | } |
1744 | return NULL; |
1745 | } |
1746 | |
1747 | bool BlockEntryInstr::() const { |
1748 | return loop_info_ != nullptr && loop_info_->header() == this; |
1749 | } |
1750 | |
1751 | intptr_t BlockEntryInstr::NestingDepth() const { |
1752 | return loop_info_ == nullptr ? 0 : loop_info_->NestingDepth(); |
1753 | } |
1754 | |
1755 | // Helper to mutate the graph during inlining. This block should be |
1756 | // replaced with new_block as a predecessor of all of this block's |
1757 | // successors. For each successor, the predecessors will be reordered |
1758 | // to preserve block-order sorting of the predecessors as well as the |
1759 | // phis if the successor is a join. |
1760 | void BlockEntryInstr::ReplaceAsPredecessorWith(BlockEntryInstr* new_block) { |
1761 | // Set the last instruction of the new block to that of the old block. |
1762 | Instruction* last = last_instruction(); |
1763 | new_block->set_last_instruction(last); |
1764 | // For each successor, update the predecessors. |
1765 | for (intptr_t sidx = 0; sidx < last->SuccessorCount(); ++sidx) { |
1766 | // If the successor is a target, update its predecessor. |
1767 | TargetEntryInstr* target = last->SuccessorAt(sidx)->AsTargetEntry(); |
1768 | if (target != NULL) { |
1769 | target->predecessor_ = new_block; |
1770 | continue; |
1771 | } |
1772 | // If the successor is a join, update each predecessor and the phis. |
1773 | JoinEntryInstr* join = last->SuccessorAt(sidx)->AsJoinEntry(); |
1774 | ASSERT(join != NULL); |
1775 | // Find the old predecessor index. |
1776 | intptr_t old_index = join->IndexOfPredecessor(this); |
1777 | intptr_t pred_count = join->PredecessorCount(); |
1778 | ASSERT(old_index >= 0); |
1779 | ASSERT(old_index < pred_count); |
1780 | // Find the new predecessor index while reordering the predecessors. |
1781 | intptr_t new_id = new_block->block_id(); |
1782 | intptr_t new_index = old_index; |
1783 | if (block_id() < new_id) { |
1784 | // Search upwards, bubbling down intermediate predecessors. |
1785 | for (; new_index < pred_count - 1; ++new_index) { |
1786 | if (join->predecessors_[new_index + 1]->block_id() > new_id) break; |
1787 | join->predecessors_[new_index] = join->predecessors_[new_index + 1]; |
1788 | } |
1789 | } else { |
1790 | // Search downwards, bubbling up intermediate predecessors. |
1791 | for (; new_index > 0; --new_index) { |
1792 | if (join->predecessors_[new_index - 1]->block_id() < new_id) break; |
1793 | join->predecessors_[new_index] = join->predecessors_[new_index - 1]; |
1794 | } |
1795 | } |
1796 | join->predecessors_[new_index] = new_block; |
1797 | // If the new and old predecessor index match there is nothing to update. |
1798 | if ((join->phis() == NULL) || (old_index == new_index)) return; |
1799 | // Otherwise, reorder the predecessor uses in each phi. |
1800 | for (PhiIterator it(join); !it.Done(); it.Advance()) { |
1801 | PhiInstr* phi = it.Current(); |
1802 | ASSERT(phi != NULL); |
1803 | ASSERT(pred_count == phi->InputCount()); |
1804 | // Save the predecessor use. |
1805 | Value* pred_use = phi->InputAt(old_index); |
1806 | // Move uses between old and new. |
1807 | intptr_t step = (old_index < new_index) ? 1 : -1; |
1808 | for (intptr_t use_idx = old_index; use_idx != new_index; |
1809 | use_idx += step) { |
1810 | phi->SetInputAt(use_idx, phi->InputAt(use_idx + step)); |
1811 | } |
1812 | // Write the predecessor use. |
1813 | phi->SetInputAt(new_index, pred_use); |
1814 | } |
1815 | } |
1816 | } |
1817 | |
1818 | void BlockEntryInstr::ClearAllInstructions() { |
1819 | JoinEntryInstr* join = this->AsJoinEntry(); |
1820 | if (join != NULL) { |
1821 | for (PhiIterator it(join); !it.Done(); it.Advance()) { |
1822 | it.Current()->UnuseAllInputs(); |
1823 | } |
1824 | } |
1825 | UnuseAllInputs(); |
1826 | for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) { |
1827 | it.Current()->UnuseAllInputs(); |
1828 | } |
1829 | } |
1830 | |
1831 | PhiInstr* JoinEntryInstr::InsertPhi(intptr_t var_index, intptr_t var_count) { |
1832 | // Lazily initialize the array of phis. |
1833 | // Currently, phis are stored in a sparse array that holds the phi |
1834 | // for variable with index i at position i. |
1835 | // TODO(fschneider): Store phis in a more compact way. |
1836 | if (phis_ == NULL) { |
1837 | phis_ = new ZoneGrowableArray<PhiInstr*>(var_count); |
1838 | for (intptr_t i = 0; i < var_count; i++) { |
1839 | phis_->Add(NULL); |
1840 | } |
1841 | } |
1842 | ASSERT((*phis_)[var_index] == NULL); |
1843 | return (*phis_)[var_index] = new PhiInstr(this, PredecessorCount()); |
1844 | } |
1845 | |
1846 | void JoinEntryInstr::InsertPhi(PhiInstr* phi) { |
1847 | // Lazily initialize the array of phis. |
1848 | if (phis_ == NULL) { |
1849 | phis_ = new ZoneGrowableArray<PhiInstr*>(1); |
1850 | } |
1851 | phis_->Add(phi); |
1852 | } |
1853 | |
1854 | void JoinEntryInstr::RemovePhi(PhiInstr* phi) { |
1855 | ASSERT(phis_ != NULL); |
1856 | for (intptr_t index = 0; index < phis_->length(); ++index) { |
1857 | if (phi == (*phis_)[index]) { |
1858 | (*phis_)[index] = phis_->Last(); |
1859 | phis_->RemoveLast(); |
1860 | return; |
1861 | } |
1862 | } |
1863 | } |
1864 | |
1865 | void JoinEntryInstr::RemoveDeadPhis(Definition* replacement) { |
1866 | if (phis_ == NULL) return; |
1867 | |
1868 | intptr_t to_index = 0; |
1869 | for (intptr_t from_index = 0; from_index < phis_->length(); ++from_index) { |
1870 | PhiInstr* phi = (*phis_)[from_index]; |
1871 | if (phi != NULL) { |
1872 | if (phi->is_alive()) { |
1873 | (*phis_)[to_index++] = phi; |
1874 | for (intptr_t i = phi->InputCount() - 1; i >= 0; --i) { |
1875 | Value* input = phi->InputAt(i); |
1876 | input->definition()->AddInputUse(input); |
1877 | } |
1878 | } else { |
1879 | phi->ReplaceUsesWith(replacement); |
1880 | } |
1881 | } |
1882 | } |
1883 | if (to_index == 0) { |
1884 | phis_ = NULL; |
1885 | } else { |
1886 | phis_->TruncateTo(to_index); |
1887 | } |
1888 | } |
1889 | |
1890 | intptr_t Instruction::SuccessorCount() const { |
1891 | return 0; |
1892 | } |
1893 | |
1894 | BlockEntryInstr* Instruction::SuccessorAt(intptr_t index) const { |
1895 | // Called only if index is in range. Only control-transfer instructions |
1896 | // can have non-zero successor counts and they override this function. |
1897 | UNREACHABLE(); |
1898 | return NULL; |
1899 | } |
1900 | |
1901 | intptr_t GraphEntryInstr::SuccessorCount() const { |
1902 | return (normal_entry() == nullptr ? 0 : 1) + |
1903 | (unchecked_entry() == nullptr ? 0 : 1) + |
1904 | (osr_entry() == nullptr ? 0 : 1) + catch_entries_.length(); |
1905 | } |
1906 | |
1907 | BlockEntryInstr* GraphEntryInstr::SuccessorAt(intptr_t index) const { |
1908 | if (normal_entry() != nullptr) { |
1909 | if (index == 0) return normal_entry_; |
1910 | index--; |
1911 | } |
1912 | if (unchecked_entry() != nullptr) { |
1913 | if (index == 0) return unchecked_entry(); |
1914 | index--; |
1915 | } |
1916 | if (osr_entry() != nullptr) { |
1917 | if (index == 0) return osr_entry(); |
1918 | index--; |
1919 | } |
1920 | return catch_entries_[index]; |
1921 | } |
1922 | |
1923 | intptr_t BranchInstr::SuccessorCount() const { |
1924 | return 2; |
1925 | } |
1926 | |
1927 | BlockEntryInstr* BranchInstr::SuccessorAt(intptr_t index) const { |
1928 | if (index == 0) return true_successor_; |
1929 | if (index == 1) return false_successor_; |
1930 | UNREACHABLE(); |
1931 | return NULL; |
1932 | } |
1933 | |
1934 | intptr_t GotoInstr::SuccessorCount() const { |
1935 | return 1; |
1936 | } |
1937 | |
1938 | BlockEntryInstr* GotoInstr::SuccessorAt(intptr_t index) const { |
1939 | ASSERT(index == 0); |
1940 | return successor(); |
1941 | } |
1942 | |
1943 | void Instruction::Goto(JoinEntryInstr* entry) { |
1944 | LinkTo(new GotoInstr(entry, CompilerState::Current().GetNextDeoptId())); |
1945 | } |
1946 | |
1947 | bool IntConverterInstr::ComputeCanDeoptimize() const { |
1948 | return (to() == kUnboxedInt32) && !is_truncating() && |
1949 | !RangeUtils::Fits(value()->definition()->range(), |
1950 | RangeBoundary::kRangeBoundaryInt32); |
1951 | } |
1952 | |
1953 | bool UnboxInt32Instr::ComputeCanDeoptimize() const { |
1954 | if (SpeculativeModeOfInputs() == kNotSpeculative) { |
1955 | return false; |
1956 | } |
1957 | const intptr_t value_cid = value()->Type()->ToCid(); |
1958 | if (value_cid == kSmiCid) { |
1959 | return (compiler::target::kSmiBits > 32) && !is_truncating() && |
1960 | !RangeUtils::Fits(value()->definition()->range(), |
1961 | RangeBoundary::kRangeBoundaryInt32); |
1962 | } else if (value_cid == kMintCid) { |
1963 | return !is_truncating() && |
1964 | !RangeUtils::Fits(value()->definition()->range(), |
1965 | RangeBoundary::kRangeBoundaryInt32); |
1966 | } else if (is_truncating() && value()->definition()->IsBoxInteger()) { |
1967 | return false; |
1968 | } else if ((compiler::target::kSmiBits < 32) && value()->Type()->IsInt()) { |
1969 | return !RangeUtils::Fits(value()->definition()->range(), |
1970 | RangeBoundary::kRangeBoundaryInt32); |
1971 | } else { |
1972 | return true; |
1973 | } |
1974 | } |
1975 | |
1976 | bool UnboxUint32Instr::ComputeCanDeoptimize() const { |
1977 | ASSERT(is_truncating()); |
1978 | if (SpeculativeModeOfInputs() == kNotSpeculative) { |
1979 | return false; |
1980 | } |
1981 | if ((value()->Type()->ToCid() == kSmiCid) || |
1982 | (value()->Type()->ToCid() == kMintCid)) { |
1983 | return false; |
1984 | } |
1985 | // Check input value's range. |
1986 | Range* value_range = value()->definition()->range(); |
1987 | return !RangeUtils::Fits(value_range, RangeBoundary::kRangeBoundaryInt64); |
1988 | } |
1989 | |
1990 | bool BinaryInt32OpInstr::ComputeCanDeoptimize() const { |
1991 | switch (op_kind()) { |
1992 | case Token::kBIT_AND: |
1993 | case Token::kBIT_OR: |
1994 | case Token::kBIT_XOR: |
1995 | return false; |
1996 | |
1997 | case Token::kSHR: |
1998 | return false; |
1999 | |
2000 | case Token::kSHL: |
2001 | // Currently only shifts by in range constant are supported, see |
2002 | // BinaryInt32OpInstr::IsSupported. |
2003 | return can_overflow(); |
2004 | |
2005 | case Token::kMOD: { |
2006 | UNREACHABLE(); |
2007 | } |
2008 | |
2009 | default: |
2010 | return can_overflow(); |
2011 | } |
2012 | } |
2013 | |
2014 | bool BinarySmiOpInstr::ComputeCanDeoptimize() const { |
2015 | switch (op_kind()) { |
2016 | case Token::kBIT_AND: |
2017 | case Token::kBIT_OR: |
2018 | case Token::kBIT_XOR: |
2019 | return false; |
2020 | |
2021 | case Token::kSHR: |
2022 | return !RangeUtils::IsPositive(right_range()); |
2023 | |
2024 | case Token::kSHL: |
2025 | return can_overflow() || !RangeUtils::IsPositive(right_range()); |
2026 | |
2027 | case Token::kMOD: |
2028 | return RangeUtils::CanBeZero(right_range()); |
2029 | |
2030 | case Token::kTRUNCDIV: |
2031 | return RangeUtils::CanBeZero(right_range()) || |
2032 | RangeUtils::Overlaps(right_range(), -1, -1); |
2033 | |
2034 | default: |
2035 | return can_overflow(); |
2036 | } |
2037 | } |
2038 | |
2039 | bool ShiftIntegerOpInstr::IsShiftCountInRange(int64_t max) const { |
2040 | return RangeUtils::IsWithin(shift_range(), 0, max); |
2041 | } |
2042 | |
2043 | bool BinaryIntegerOpInstr::RightIsPowerOfTwoConstant() const { |
2044 | if (!right()->definition()->IsConstant()) return false; |
2045 | const Object& constant = right()->definition()->AsConstant()->value(); |
2046 | if (!constant.IsSmi()) return false; |
2047 | const intptr_t int_value = Smi::Cast(constant).Value(); |
2048 | ASSERT(int_value != kIntptrMin); |
2049 | return Utils::IsPowerOfTwo(Utils::Abs(int_value)); |
2050 | } |
2051 | |
2052 | static intptr_t RepresentationBits(Representation r) { |
2053 | switch (r) { |
2054 | case kTagged: |
2055 | return compiler::target::kBitsPerWord - 1; |
2056 | case kUnboxedInt32: |
2057 | case kUnboxedUint32: |
2058 | return 32; |
2059 | case kUnboxedInt64: |
2060 | return 64; |
2061 | default: |
2062 | UNREACHABLE(); |
2063 | return 0; |
2064 | } |
2065 | } |
2066 | |
2067 | static int64_t RepresentationMask(Representation r) { |
2068 | return static_cast<int64_t>(static_cast<uint64_t>(-1) >> |
2069 | (64 - RepresentationBits(r))); |
2070 | } |
2071 | |
2072 | static Definition* CanonicalizeCommutativeDoubleArithmetic(Token::Kind op, |
2073 | Value* left, |
2074 | Value* right) { |
2075 | int64_t left_value; |
2076 | if (!Evaluator::ToIntegerConstant(left, &left_value)) { |
2077 | return NULL; |
2078 | } |
2079 | |
2080 | // Can't apply 0.0 * x -> 0.0 equivalence to double operation because |
2081 | // 0.0 * NaN is NaN not 0.0. |
2082 | // Can't apply 0.0 + x -> x to double because 0.0 + (-0.0) is 0.0 not -0.0. |
2083 | switch (op) { |
2084 | case Token::kMUL: |
2085 | if (left_value == 1) { |
2086 | if (right->definition()->representation() != kUnboxedDouble) { |
2087 | // Can't yet apply the equivalence because representation selection |
2088 | // did not run yet. We need it to guarantee that right value is |
2089 | // correctly coerced to double. The second canonicalization pass |
2090 | // will apply this equivalence. |
2091 | return NULL; |
2092 | } else { |
2093 | return right->definition(); |
2094 | } |
2095 | } |
2096 | break; |
2097 | default: |
2098 | break; |
2099 | } |
2100 | |
2101 | return NULL; |
2102 | } |
2103 | |
2104 | Definition* DoubleToFloatInstr::Canonicalize(FlowGraph* flow_graph) { |
2105 | #ifdef DEBUG |
2106 | // Must only be used in Float32 StoreIndexedInstr or FloatToDoubleInstr or |
2107 | // Phis introduce by load forwarding. |
2108 | ASSERT(env_use_list() == NULL); |
2109 | for (Value* use = input_use_list(); use != NULL; use = use->next_use()) { |
2110 | ASSERT(use->instruction()->IsPhi() || |
2111 | use->instruction()->IsFloatToDouble() || |
2112 | (use->instruction()->IsStoreIndexed() && |
2113 | (use->instruction()->AsStoreIndexed()->class_id() == |
2114 | kTypedDataFloat32ArrayCid))); |
2115 | } |
2116 | #endif |
2117 | if (!HasUses()) return NULL; |
2118 | if (value()->definition()->IsFloatToDouble()) { |
2119 | // F2D(D2F(v)) == v. |
2120 | return value()->definition()->AsFloatToDouble()->value()->definition(); |
2121 | } |
2122 | return this; |
2123 | } |
2124 | |
2125 | Definition* FloatToDoubleInstr::Canonicalize(FlowGraph* flow_graph) { |
2126 | return HasUses() ? this : NULL; |
2127 | } |
2128 | |
2129 | Definition* BinaryDoubleOpInstr::Canonicalize(FlowGraph* flow_graph) { |
2130 | if (!HasUses()) return NULL; |
2131 | |
2132 | Definition* result = NULL; |
2133 | |
2134 | result = CanonicalizeCommutativeDoubleArithmetic(op_kind(), left(), right()); |
2135 | if (result != NULL) { |
2136 | return result; |
2137 | } |
2138 | |
2139 | result = CanonicalizeCommutativeDoubleArithmetic(op_kind(), right(), left()); |
2140 | if (result != NULL) { |
2141 | return result; |
2142 | } |
2143 | |
2144 | if ((op_kind() == Token::kMUL) && |
2145 | (left()->definition() == right()->definition())) { |
2146 | MathUnaryInstr* math_unary = new MathUnaryInstr( |
2147 | MathUnaryInstr::kDoubleSquare, new Value(left()->definition()), |
2148 | DeoptimizationTarget()); |
2149 | flow_graph->InsertBefore(this, math_unary, env(), FlowGraph::kValue); |
2150 | return math_unary; |
2151 | } |
2152 | |
2153 | return this; |
2154 | } |
2155 | |
2156 | Definition* DoubleTestOpInstr::Canonicalize(FlowGraph* flow_graph) { |
2157 | return HasUses() ? this : NULL; |
2158 | } |
2159 | |
2160 | static bool IsCommutative(Token::Kind op) { |
2161 | switch (op) { |
2162 | case Token::kMUL: |
2163 | FALL_THROUGH; |
2164 | case Token::kADD: |
2165 | FALL_THROUGH; |
2166 | case Token::kBIT_AND: |
2167 | FALL_THROUGH; |
2168 | case Token::kBIT_OR: |
2169 | FALL_THROUGH; |
2170 | case Token::kBIT_XOR: |
2171 | return true; |
2172 | default: |
2173 | return false; |
2174 | } |
2175 | } |
2176 | |
2177 | UnaryIntegerOpInstr* UnaryIntegerOpInstr::Make(Representation representation, |
2178 | Token::Kind op_kind, |
2179 | Value* value, |
2180 | intptr_t deopt_id, |
2181 | Range* range) { |
2182 | UnaryIntegerOpInstr* op = NULL; |
2183 | switch (representation) { |
2184 | case kTagged: |
2185 | op = new UnarySmiOpInstr(op_kind, value, deopt_id); |
2186 | break; |
2187 | case kUnboxedInt32: |
2188 | return NULL; |
2189 | case kUnboxedUint32: |
2190 | op = new UnaryUint32OpInstr(op_kind, value, deopt_id); |
2191 | break; |
2192 | case kUnboxedInt64: |
2193 | op = new UnaryInt64OpInstr(op_kind, value, deopt_id); |
2194 | break; |
2195 | default: |
2196 | UNREACHABLE(); |
2197 | return NULL; |
2198 | } |
2199 | |
2200 | if (op == NULL) { |
2201 | return op; |
2202 | } |
2203 | |
2204 | if (!Range::IsUnknown(range)) { |
2205 | op->set_range(*range); |
2206 | } |
2207 | |
2208 | ASSERT(op->representation() == representation); |
2209 | return op; |
2210 | } |
2211 | |
2212 | BinaryIntegerOpInstr* BinaryIntegerOpInstr::Make( |
2213 | Representation representation, |
2214 | Token::Kind op_kind, |
2215 | Value* left, |
2216 | Value* right, |
2217 | intptr_t deopt_id, |
2218 | bool can_overflow, |
2219 | bool is_truncating, |
2220 | Range* range, |
2221 | SpeculativeMode speculative_mode) { |
2222 | BinaryIntegerOpInstr* op = NULL; |
2223 | switch (representation) { |
2224 | case kTagged: |
2225 | op = new BinarySmiOpInstr(op_kind, left, right, deopt_id); |
2226 | break; |
2227 | case kUnboxedInt32: |
2228 | if (!BinaryInt32OpInstr::IsSupported(op_kind, left, right)) { |
2229 | return NULL; |
2230 | } |
2231 | op = new BinaryInt32OpInstr(op_kind, left, right, deopt_id); |
2232 | break; |
2233 | case kUnboxedUint32: |
2234 | if ((op_kind == Token::kSHR) || (op_kind == Token::kSHL)) { |
2235 | if (speculative_mode == kNotSpeculative) { |
2236 | op = new ShiftUint32OpInstr(op_kind, left, right, deopt_id); |
2237 | } else { |
2238 | op = |
2239 | new SpeculativeShiftUint32OpInstr(op_kind, left, right, deopt_id); |
2240 | } |
2241 | } else { |
2242 | op = new BinaryUint32OpInstr(op_kind, left, right, deopt_id); |
2243 | } |
2244 | break; |
2245 | case kUnboxedInt64: |
2246 | if ((op_kind == Token::kSHR) || (op_kind == Token::kSHL)) { |
2247 | if (speculative_mode == kNotSpeculative) { |
2248 | op = new ShiftInt64OpInstr(op_kind, left, right, deopt_id); |
2249 | } else { |
2250 | op = new SpeculativeShiftInt64OpInstr(op_kind, left, right, deopt_id); |
2251 | } |
2252 | } else { |
2253 | op = new BinaryInt64OpInstr(op_kind, left, right, deopt_id); |
2254 | } |
2255 | break; |
2256 | default: |
2257 | UNREACHABLE(); |
2258 | return NULL; |
2259 | } |
2260 | |
2261 | if (!Range::IsUnknown(range)) { |
2262 | op->set_range(*range); |
2263 | } |
2264 | |
2265 | op->set_can_overflow(can_overflow); |
2266 | if (is_truncating) { |
2267 | op->mark_truncating(); |
2268 | } |
2269 | |
2270 | ASSERT(op->representation() == representation); |
2271 | return op; |
2272 | } |
2273 | |
2274 | Definition* CheckedSmiOpInstr::Canonicalize(FlowGraph* flow_graph) { |
2275 | if ((left()->Type()->ToCid() == kSmiCid) && |
2276 | (right()->Type()->ToCid() == kSmiCid)) { |
2277 | Definition* replacement = NULL; |
2278 | // Operations that can't deoptimize are specialized here: These include |
2279 | // bit-wise operators and comparisons. Other arithmetic operations can |
2280 | // overflow or divide by 0 and can't be specialized unless we have extra |
2281 | // range information. |
2282 | switch (op_kind()) { |
2283 | case Token::kBIT_AND: |
2284 | FALL_THROUGH; |
2285 | case Token::kBIT_OR: |
2286 | FALL_THROUGH; |
2287 | case Token::kBIT_XOR: |
2288 | replacement = new BinarySmiOpInstr( |
2289 | op_kind(), new Value(left()->definition()), |
2290 | new Value(right()->definition()), DeoptId::kNone); |
2291 | FALL_THROUGH; |
2292 | default: |
2293 | break; |
2294 | } |
2295 | if (replacement != NULL) { |
2296 | flow_graph->InsertBefore(this, replacement, env(), FlowGraph::kValue); |
2297 | return replacement; |
2298 | } |
2299 | } |
2300 | return this; |
2301 | } |
2302 | |
2303 | ComparisonInstr* CheckedSmiComparisonInstr::CopyWithNewOperands(Value* left, |
2304 | Value* right) { |
2305 | UNREACHABLE(); |
2306 | return NULL; |
2307 | } |
2308 | |
2309 | Definition* CheckedSmiComparisonInstr::Canonicalize(FlowGraph* flow_graph) { |
2310 | CompileType* left_type = left()->Type(); |
2311 | CompileType* right_type = right()->Type(); |
2312 | intptr_t op_cid = kIllegalCid; |
2313 | SpeculativeMode speculative_mode = kGuardInputs; |
2314 | |
2315 | if ((left_type->ToCid() == kSmiCid) && (right_type->ToCid() == kSmiCid)) { |
2316 | op_cid = kSmiCid; |
2317 | } else if (FlowGraphCompiler::SupportsUnboxedInt64() && |
2318 | // TODO(dartbug.com/30480): handle nullable types here |
2319 | left_type->IsNullableInt() && !left_type->is_nullable() && |
2320 | right_type->IsNullableInt() && !right_type->is_nullable()) { |
2321 | op_cid = kMintCid; |
2322 | speculative_mode = kNotSpeculative; |
2323 | } |
2324 | |
2325 | if (op_cid != kIllegalCid) { |
2326 | Definition* replacement = NULL; |
2327 | if (Token::IsRelationalOperator(kind())) { |
2328 | replacement = new RelationalOpInstr( |
2329 | token_pos(), kind(), left()->CopyWithType(), right()->CopyWithType(), |
2330 | op_cid, DeoptId::kNone, speculative_mode); |
2331 | } else if (Token::IsEqualityOperator(kind())) { |
2332 | replacement = new EqualityCompareInstr( |
2333 | token_pos(), kind(), left()->CopyWithType(), right()->CopyWithType(), |
2334 | op_cid, DeoptId::kNone, speculative_mode); |
2335 | } |
2336 | if (replacement != NULL) { |
2337 | if (FLAG_trace_strong_mode_types && (op_cid == kMintCid)) { |
2338 | THR_Print("[Strong mode] Optimization: replacing %s with %s\n" , |
2339 | ToCString(), replacement->ToCString()); |
2340 | } |
2341 | flow_graph->InsertBefore(this, replacement, env(), FlowGraph::kValue); |
2342 | return replacement; |
2343 | } |
2344 | } |
2345 | return this; |
2346 | } |
2347 | |
2348 | Definition* BinaryIntegerOpInstr::Canonicalize(FlowGraph* flow_graph) { |
2349 | // If both operands are constants evaluate this expression. Might |
2350 | // occur due to load forwarding after constant propagation pass |
2351 | // have already been run. |
2352 | |
2353 | if (left()->BindsToConstant() && right()->BindsToConstant()) { |
2354 | const Integer& result = Integer::Handle(Evaluator::BinaryIntegerEvaluate( |
2355 | left()->BoundConstant(), right()->BoundConstant(), op_kind(), |
2356 | is_truncating(), representation(), Thread::Current())); |
2357 | |
2358 | if (!result.IsNull()) { |
2359 | return flow_graph->TryCreateConstantReplacementFor(this, result); |
2360 | } |
2361 | } |
2362 | |
2363 | if (left()->BindsToConstant() && !right()->BindsToConstant() && |
2364 | IsCommutative(op_kind())) { |
2365 | Value* l = left(); |
2366 | Value* r = right(); |
2367 | SetInputAt(0, r); |
2368 | SetInputAt(1, l); |
2369 | } |
2370 | |
2371 | int64_t rhs; |
2372 | if (!Evaluator::ToIntegerConstant(right(), &rhs)) { |
2373 | return this; |
2374 | } |
2375 | |
2376 | if (is_truncating()) { |
2377 | switch (op_kind()) { |
2378 | case Token::kMUL: |
2379 | case Token::kSUB: |
2380 | case Token::kADD: |
2381 | case Token::kBIT_AND: |
2382 | case Token::kBIT_OR: |
2383 | case Token::kBIT_XOR: |
2384 | rhs = Evaluator::TruncateTo(rhs, representation()); |
2385 | break; |
2386 | default: |
2387 | break; |
2388 | } |
2389 | } |
2390 | |
2391 | switch (op_kind()) { |
2392 | case Token::kMUL: |
2393 | if (rhs == 1) { |
2394 | return left()->definition(); |
2395 | } else if (rhs == 0) { |
2396 | return right()->definition(); |
2397 | } else if (rhs == 2) { |
2398 | const int64_t shift_1 = 1; |
2399 | ConstantInstr* constant_1 = |
2400 | flow_graph->GetConstant(Smi::Handle(Smi::New(shift_1))); |
2401 | BinaryIntegerOpInstr* shift = BinaryIntegerOpInstr::Make( |
2402 | representation(), Token::kSHL, left()->CopyWithType(), |
2403 | new Value(constant_1), GetDeoptId(), can_overflow(), |
2404 | is_truncating(), range(), SpeculativeModeOfInputs()); |
2405 | if (shift != nullptr) { |
2406 | // Assign a range to the shift factor, just in case range |
2407 | // analysis no longer runs after this rewriting. |
2408 | if (auto shift_with_range = shift->AsShiftIntegerOp()) { |
2409 | shift_with_range->set_shift_range( |
2410 | new Range(RangeBoundary::FromConstant(shift_1), |
2411 | RangeBoundary::FromConstant(shift_1))); |
2412 | } |
2413 | flow_graph->InsertBefore(this, shift, env(), FlowGraph::kValue); |
2414 | return shift; |
2415 | } |
2416 | } |
2417 | |
2418 | break; |
2419 | case Token::kADD: |
2420 | if (rhs == 0) { |
2421 | return left()->definition(); |
2422 | } |
2423 | break; |
2424 | case Token::kBIT_AND: |
2425 | if (rhs == 0) { |
2426 | return right()->definition(); |
2427 | } else if (rhs == RepresentationMask(representation())) { |
2428 | return left()->definition(); |
2429 | } |
2430 | break; |
2431 | case Token::kBIT_OR: |
2432 | if (rhs == 0) { |
2433 | return left()->definition(); |
2434 | } else if (rhs == RepresentationMask(representation())) { |
2435 | return right()->definition(); |
2436 | } |
2437 | break; |
2438 | case Token::kBIT_XOR: |
2439 | if (rhs == 0) { |
2440 | return left()->definition(); |
2441 | } else if (rhs == RepresentationMask(representation())) { |
2442 | UnaryIntegerOpInstr* bit_not = UnaryIntegerOpInstr::Make( |
2443 | representation(), Token::kBIT_NOT, left()->CopyWithType(), |
2444 | GetDeoptId(), range()); |
2445 | if (bit_not != NULL) { |
2446 | flow_graph->InsertBefore(this, bit_not, env(), FlowGraph::kValue); |
2447 | return bit_not; |
2448 | } |
2449 | } |
2450 | break; |
2451 | |
2452 | case Token::kSUB: |
2453 | if (rhs == 0) { |
2454 | return left()->definition(); |
2455 | } |
2456 | break; |
2457 | |
2458 | case Token::kTRUNCDIV: |
2459 | if (rhs == 1) { |
2460 | return left()->definition(); |
2461 | } else if (rhs == -1) { |
2462 | UnaryIntegerOpInstr* negation = UnaryIntegerOpInstr::Make( |
2463 | representation(), Token::kNEGATE, left()->CopyWithType(), |
2464 | GetDeoptId(), range()); |
2465 | if (negation != NULL) { |
2466 | flow_graph->InsertBefore(this, negation, env(), FlowGraph::kValue); |
2467 | return negation; |
2468 | } |
2469 | } |
2470 | break; |
2471 | |
2472 | case Token::kMOD: |
2473 | if (std::abs(rhs) == 1) { |
2474 | return flow_graph->TryCreateConstantReplacementFor(this, |
2475 | Object::smi_zero()); |
2476 | } |
2477 | break; |
2478 | |
2479 | case Token::kSHR: |
2480 | if (rhs == 0) { |
2481 | return left()->definition(); |
2482 | } else if (rhs < 0) { |
2483 | // Instruction will always throw on negative rhs operand. |
2484 | if (!CanDeoptimize()) { |
2485 | // For non-speculative operations (no deopt), let |
2486 | // the code generator deal with throw on slowpath. |
2487 | break; |
2488 | } |
2489 | ASSERT(GetDeoptId() != DeoptId::kNone); |
2490 | DeoptimizeInstr* deopt = |
2491 | new DeoptimizeInstr(ICData::kDeoptBinarySmiOp, GetDeoptId()); |
2492 | flow_graph->InsertBefore(this, deopt, env(), FlowGraph::kEffect); |
2493 | // Replace with zero since it always throws. |
2494 | return flow_graph->TryCreateConstantReplacementFor(this, |
2495 | Object::smi_zero()); |
2496 | } |
2497 | break; |
2498 | |
2499 | case Token::kSHL: { |
2500 | const intptr_t result_bits = RepresentationBits(representation()); |
2501 | if (rhs == 0) { |
2502 | return left()->definition(); |
2503 | } else if ((rhs >= kBitsPerInt64) || |
2504 | ((rhs >= result_bits) && is_truncating())) { |
2505 | return flow_graph->TryCreateConstantReplacementFor(this, |
2506 | Object::smi_zero()); |
2507 | } else if ((rhs < 0) || ((rhs >= result_bits) && !is_truncating())) { |
2508 | // Instruction will always throw on negative rhs operand or |
2509 | // deoptimize on large rhs operand. |
2510 | if (!CanDeoptimize()) { |
2511 | // For non-speculative operations (no deopt), let |
2512 | // the code generator deal with throw on slowpath. |
2513 | break; |
2514 | } |
2515 | ASSERT(GetDeoptId() != DeoptId::kNone); |
2516 | DeoptimizeInstr* deopt = |
2517 | new DeoptimizeInstr(ICData::kDeoptBinarySmiOp, GetDeoptId()); |
2518 | flow_graph->InsertBefore(this, deopt, env(), FlowGraph::kEffect); |
2519 | // Replace with zero since it overshifted or always throws. |
2520 | return flow_graph->TryCreateConstantReplacementFor(this, |
2521 | Object::smi_zero()); |
2522 | } |
2523 | break; |
2524 | } |
2525 | |
2526 | default: |
2527 | break; |
2528 | } |
2529 | |
2530 | return this; |
2531 | } |
2532 | |
2533 | // Optimizations that eliminate or simplify individual instructions. |
2534 | Instruction* Instruction::Canonicalize(FlowGraph* flow_graph) { |
2535 | return this; |
2536 | } |
2537 | |
2538 | Definition* Definition::Canonicalize(FlowGraph* flow_graph) { |
2539 | return this; |
2540 | } |
2541 | |
2542 | Definition* RedefinitionInstr::Canonicalize(FlowGraph* flow_graph) { |
2543 | // Must not remove Redifinitions without uses until LICM, even though |
2544 | // Redefinition might not have any uses itself it can still be dominating |
2545 | // uses of the value it redefines and must serve as a barrier for those |
2546 | // uses. RenameUsesDominatedByRedefinitions would normalize the graph and |
2547 | // route those uses through this redefinition. |
2548 | if (!HasUses() && !flow_graph->is_licm_allowed()) { |
2549 | return NULL; |
2550 | } |
2551 | if ((constrained_type() != nullptr) && Type()->IsEqualTo(value()->Type())) { |
2552 | return value()->definition(); |
2553 | } |
2554 | return this; |
2555 | } |
2556 | |
2557 | Instruction* CheckStackOverflowInstr::Canonicalize(FlowGraph* flow_graph) { |
2558 | switch (kind_) { |
2559 | case kOsrAndPreemption: |
2560 | return this; |
2561 | case kOsrOnly: |
2562 | // Don't need OSR entries in the optimized code. |
2563 | return NULL; |
2564 | } |
2565 | |
2566 | // Switch above exhausts all possibilities but some compilers can't figure |
2567 | // it out. |
2568 | UNREACHABLE(); |
2569 | return this; |
2570 | } |
2571 | |
2572 | bool LoadFieldInstr::IsImmutableLengthLoad() const { |
2573 | switch (slot().kind()) { |
2574 | case Slot::Kind::kArray_length: |
2575 | case Slot::Kind::kTypedDataBase_length: |
2576 | case Slot::Kind::kString_length: |
2577 | return true; |
2578 | case Slot::Kind::kGrowableObjectArray_length: |
2579 | return false; |
2580 | |
2581 | // Not length loads. |
2582 | case Slot::Kind::kLinkedHashMap_index: |
2583 | case Slot::Kind::kLinkedHashMap_data: |
2584 | case Slot::Kind::kLinkedHashMap_hash_mask: |
2585 | case Slot::Kind::kLinkedHashMap_used_data: |
2586 | case Slot::Kind::kLinkedHashMap_deleted_keys: |
2587 | case Slot::Kind::kArgumentsDescriptor_type_args_len: |
2588 | case Slot::Kind::kArgumentsDescriptor_positional_count: |
2589 | case Slot::Kind::kArgumentsDescriptor_count: |
2590 | case Slot::Kind::kArgumentsDescriptor_size: |
2591 | case Slot::Kind::kTypeArguments: |
2592 | case Slot::Kind::kTypedDataView_offset_in_bytes: |
2593 | case Slot::Kind::kTypedDataView_data: |
2594 | case Slot::Kind::kGrowableObjectArray_data: |
2595 | case Slot::Kind::kContext_parent: |
2596 | case Slot::Kind::kClosure_context: |
2597 | case Slot::Kind::kClosure_delayed_type_arguments: |
2598 | case Slot::Kind::kClosure_function: |
2599 | case Slot::Kind::kClosure_function_type_arguments: |
2600 | case Slot::Kind::kClosure_instantiator_type_arguments: |
2601 | case Slot::Kind::kClosure_hash: |
2602 | case Slot::Kind::kCapturedVariable: |
2603 | case Slot::Kind::kDartField: |
2604 | case Slot::Kind::kPointerBase_data_field: |
2605 | case Slot::Kind::kType_arguments: |
2606 | case Slot::Kind::kTypeArgumentsIndex: |
2607 | case Slot::Kind::kUnhandledException_exception: |
2608 | case Slot::Kind::kUnhandledException_stacktrace: |
2609 | return false; |
2610 | } |
2611 | UNREACHABLE(); |
2612 | return false; |
2613 | } |
2614 | |
2615 | bool LoadFieldInstr::IsFixedLengthArrayCid(intptr_t cid) { |
2616 | if (IsTypedDataClassId(cid) || IsExternalTypedDataClassId(cid)) { |
2617 | return true; |
2618 | } |
2619 | |
2620 | switch (cid) { |
2621 | case kArrayCid: |
2622 | case kImmutableArrayCid: |
2623 | return true; |
2624 | default: |
2625 | return false; |
2626 | } |
2627 | } |
2628 | |
2629 | bool LoadFieldInstr::IsTypedDataViewFactory(const Function& function) { |
2630 | auto kind = function.recognized_kind(); |
2631 | switch (kind) { |
2632 | case MethodRecognizer::kTypedData_ByteDataView_factory: |
2633 | case MethodRecognizer::kTypedData_Int8ArrayView_factory: |
2634 | case MethodRecognizer::kTypedData_Uint8ArrayView_factory: |
2635 | case MethodRecognizer::kTypedData_Uint8ClampedArrayView_factory: |
2636 | case MethodRecognizer::kTypedData_Int16ArrayView_factory: |
2637 | case MethodRecognizer::kTypedData_Uint16ArrayView_factory: |
2638 | case MethodRecognizer::kTypedData_Int32ArrayView_factory: |
2639 | case MethodRecognizer::kTypedData_Uint32ArrayView_factory: |
2640 | case MethodRecognizer::kTypedData_Int64ArrayView_factory: |
2641 | case MethodRecognizer::kTypedData_Uint64ArrayView_factory: |
2642 | case MethodRecognizer::kTypedData_Float32ArrayView_factory: |
2643 | case MethodRecognizer::kTypedData_Float64ArrayView_factory: |
2644 | case MethodRecognizer::kTypedData_Float32x4ArrayView_factory: |
2645 | case MethodRecognizer::kTypedData_Int32x4ArrayView_factory: |
2646 | case MethodRecognizer::kTypedData_Float64x2ArrayView_factory: |
2647 | return true; |
2648 | default: |
2649 | return false; |
2650 | } |
2651 | } |
2652 | |
2653 | Definition* ConstantInstr::Canonicalize(FlowGraph* flow_graph) { |
2654 | return HasUses() ? this : NULL; |
2655 | } |
2656 | |
2657 | // A math unary instruction has a side effect (exception |
2658 | // thrown) if the argument is not a number. |
2659 | // TODO(srdjan): eliminate if has no uses and input is guaranteed to be number. |
2660 | Definition* MathUnaryInstr::Canonicalize(FlowGraph* flow_graph) { |
2661 | return this; |
2662 | } |
2663 | |
2664 | bool LoadFieldInstr::TryEvaluateLoad(const Object& instance, |
2665 | const Slot& field, |
2666 | Object* result) { |
2667 | switch (field.kind()) { |
2668 | case Slot::Kind::kDartField: |
2669 | return TryEvaluateLoad(instance, field.field(), result); |
2670 | |
2671 | case Slot::Kind::kArgumentsDescriptor_type_args_len: |
2672 | if (instance.IsArray() && Array::Cast(instance).IsImmutable()) { |
2673 | ArgumentsDescriptor desc(Array::Cast(instance)); |
2674 | *result = Smi::New(desc.TypeArgsLen()); |
2675 | return true; |
2676 | } |
2677 | return false; |
2678 | |
2679 | default: |
2680 | break; |
2681 | } |
2682 | return false; |
2683 | } |
2684 | |
2685 | bool LoadFieldInstr::TryEvaluateLoad(const Object& instance, |
2686 | const Field& field, |
2687 | Object* result) { |
2688 | if (!field.is_final() || !instance.IsInstance()) { |
2689 | return false; |
2690 | } |
2691 | |
2692 | // Check that instance really has the field which we |
2693 | // are trying to load from. |
2694 | Class& cls = Class::Handle(instance.clazz()); |
2695 | while (cls.raw() != Class::null() && cls.raw() != field.Owner()) { |
2696 | cls = cls.SuperClass(); |
2697 | } |
2698 | if (cls.raw() != field.Owner()) { |
2699 | // Failed to find the field in class or its superclasses. |
2700 | return false; |
2701 | } |
2702 | |
2703 | // Object has the field: execute the load. |
2704 | *result = Instance::Cast(instance).GetField(field); |
2705 | return true; |
2706 | } |
2707 | |
2708 | bool LoadFieldInstr::Evaluate(const Object& instance, Object* result) { |
2709 | return TryEvaluateLoad(instance, slot(), result); |
2710 | } |
2711 | |
2712 | Definition* LoadFieldInstr::Canonicalize(FlowGraph* flow_graph) { |
2713 | if (!HasUses() && !calls_initializer()) return nullptr; |
2714 | |
2715 | if (IsImmutableLengthLoad()) { |
2716 | ASSERT(!calls_initializer()); |
2717 | Definition* array = instance()->definition()->OriginalDefinition(); |
2718 | if (StaticCallInstr* call = array->AsStaticCall()) { |
2719 | // For fixed length arrays if the array is the result of a known |
2720 | // constructor call we can replace the length load with the length |
2721 | // argument passed to the constructor. |
2722 | if (call->is_known_list_constructor() && |
2723 | IsFixedLengthArrayCid(call->Type()->ToCid())) { |
2724 | return call->ArgumentAt(1); |
2725 | } else if (call->function().recognized_kind() == |
2726 | MethodRecognizer::kByteDataFactory) { |
2727 | // Similarly, we check for the ByteData constructor and forward its |
2728 | // explicit length argument appropriately. |
2729 | return call->ArgumentAt(1); |
2730 | } else if (IsTypedDataViewFactory(call->function())) { |
2731 | // Typed data view factories all take three arguments (after |
2732 | // the implicit type arguments parameter): |
2733 | // |
2734 | // 1) _TypedList buffer -- the underlying data for the view |
2735 | // 2) int offsetInBytes -- the offset into the buffer to start viewing |
2736 | // 3) int length -- the number of elements in the view |
2737 | // |
2738 | // Here, we forward the third. |
2739 | return call->ArgumentAt(3); |
2740 | } |
2741 | } else if (CreateArrayInstr* create_array = array->AsCreateArray()) { |
2742 | if (slot().kind() == Slot::Kind::kArray_length) { |
2743 | return create_array->num_elements()->definition(); |
2744 | } |
2745 | } else if (LoadFieldInstr* load_array = array->AsLoadField()) { |
2746 | // For arrays with guarded lengths, replace the length load |
2747 | // with a constant. |
2748 | const Slot& slot = load_array->slot(); |
2749 | if (slot.IsDartField()) { |
2750 | if (slot.field().guarded_list_length() >= 0) { |
2751 | return flow_graph->GetConstant( |
2752 | Smi::Handle(Smi::New(slot.field().guarded_list_length()))); |
2753 | } |
2754 | } |
2755 | } |
2756 | } else if (slot().kind() == Slot::Kind::kTypedDataView_data) { |
2757 | // This case cover the first explicit argument to typed data view |
2758 | // factories, the data (buffer). |
2759 | ASSERT(!calls_initializer()); |
2760 | Definition* array = instance()->definition()->OriginalDefinition(); |
2761 | if (StaticCallInstr* call = array->AsStaticCall()) { |
2762 | if (IsTypedDataViewFactory(call->function())) { |
2763 | return call->ArgumentAt(1); |
2764 | } |
2765 | } |
2766 | } else if (slot().kind() == Slot::Kind::kTypedDataView_offset_in_bytes) { |
2767 | // This case cover the second explicit argument to typed data view |
2768 | // factories, the offset into the buffer. |
2769 | ASSERT(!calls_initializer()); |
2770 | Definition* array = instance()->definition()->OriginalDefinition(); |
2771 | if (StaticCallInstr* call = array->AsStaticCall()) { |
2772 | if (IsTypedDataViewFactory(call->function())) { |
2773 | return call->ArgumentAt(2); |
2774 | } else if (call->function().recognized_kind() == |
2775 | MethodRecognizer::kByteDataFactory) { |
2776 | // A _ByteDataView returned from the ByteData constructor always |
2777 | // has an offset of 0. |
2778 | return flow_graph->GetConstant(Object::smi_zero()); |
2779 | } |
2780 | } |
2781 | } else if (slot().IsTypeArguments()) { |
2782 | ASSERT(!calls_initializer()); |
2783 | Definition* array = instance()->definition()->OriginalDefinition(); |
2784 | if (StaticCallInstr* call = array->AsStaticCall()) { |
2785 | if (call->is_known_list_constructor()) { |
2786 | return call->ArgumentAt(0); |
2787 | } else if (IsTypedDataViewFactory(call->function())) { |
2788 | return flow_graph->constant_null(); |
2789 | } |
2790 | switch (call->function().recognized_kind()) { |
2791 | case MethodRecognizer::kByteDataFactory: |
2792 | case MethodRecognizer::kLinkedHashMap_getData: |
2793 | return flow_graph->constant_null(); |
2794 | default: |
2795 | break; |
2796 | } |
2797 | } else if (CreateArrayInstr* create_array = array->AsCreateArray()) { |
2798 | return create_array->element_type()->definition(); |
2799 | } else if (LoadFieldInstr* load_array = array->AsLoadField()) { |
2800 | const Slot& slot = load_array->slot(); |
2801 | switch (slot.kind()) { |
2802 | case Slot::Kind::kDartField: { |
2803 | // For trivially exact fields we know that type arguments match |
2804 | // static type arguments exactly. |
2805 | const Field& field = slot.field(); |
2806 | if (field.static_type_exactness_state().IsTriviallyExact()) { |
2807 | return flow_graph->GetConstant(TypeArguments::Handle( |
2808 | AbstractType::Handle(field.type()).arguments())); |
2809 | } |
2810 | break; |
2811 | } |
2812 | |
2813 | case Slot::Kind::kLinkedHashMap_data: |
2814 | return flow_graph->constant_null(); |
2815 | |
2816 | default: |
2817 | break; |
2818 | } |
2819 | } |
2820 | } |
2821 | |
2822 | // Try folding away loads from constant objects. |
2823 | if (instance()->BindsToConstant()) { |
2824 | Object& result = Object::Handle(); |
2825 | if (Evaluate(instance()->BoundConstant(), &result)) { |
2826 | if (result.IsSmi() || result.IsOld()) { |
2827 | return flow_graph->GetConstant(result); |
2828 | } |
2829 | } |
2830 | } |
2831 | |
2832 | return this; |
2833 | } |
2834 | |
2835 | Definition* AssertBooleanInstr::Canonicalize(FlowGraph* flow_graph) { |
2836 | if (FLAG_eliminate_type_checks) { |
2837 | if (value()->Type()->ToCid() == kBoolCid) { |
2838 | return value()->definition(); |
2839 | } |
2840 | |
2841 | // In strong mode type is already verified either by static analysis |
2842 | // or runtime checks, so AssertBoolean just ensures that value is not null. |
2843 | if (!value()->Type()->is_nullable()) { |
2844 | return value()->definition(); |
2845 | } |
2846 | } |
2847 | |
2848 | return this; |
2849 | } |
2850 | |
2851 | Definition* AssertAssignableInstr::Canonicalize(FlowGraph* flow_graph) { |
2852 | // We need dst_type() to be a constant AbstractType to perform any |
2853 | // canonicalization. |
2854 | if (!dst_type()->BindsToConstant()) return this; |
2855 | const auto& abs_type = AbstractType::Cast(dst_type()->BoundConstant()); |
2856 | |
2857 | if (abs_type.IsTopTypeForSubtyping() || |
2858 | (FLAG_eliminate_type_checks && |
2859 | value()->Type()->IsAssignableTo(abs_type))) { |
2860 | return value()->definition(); |
2861 | } |
2862 | if (abs_type.IsInstantiated()) { |
2863 | return this; |
2864 | } |
2865 | |
2866 | // For uninstantiated target types: If the instantiator and function |
2867 | // type arguments are constant, instantiate the target type here. |
2868 | // Note: these constant type arguments might not necessarily correspond |
2869 | // to the correct instantiator because AssertAssignable might |
2870 | // be located in the unreachable part of the graph (e.g. |
2871 | // it might be dominated by CheckClass that always fails). |
2872 | // This means that the code below must guard against such possibility. |
2873 | Zone* Z = Thread::Current()->zone(); |
2874 | |
2875 | const TypeArguments* instantiator_type_args = nullptr; |
2876 | const TypeArguments* function_type_args = nullptr; |
2877 | |
2878 | if (instantiator_type_arguments()->BindsToConstant()) { |
2879 | const Object& val = instantiator_type_arguments()->BoundConstant(); |
2880 | instantiator_type_args = (val.raw() == TypeArguments::null()) |
2881 | ? &TypeArguments::null_type_arguments() |
2882 | : &TypeArguments::Cast(val); |
2883 | } |
2884 | |
2885 | if (function_type_arguments()->BindsToConstant()) { |
2886 | const Object& val = function_type_arguments()->BoundConstant(); |
2887 | function_type_args = |
2888 | (val.raw() == TypeArguments::null()) |
2889 | ? &TypeArguments::null_type_arguments() |
2890 | : &TypeArguments::Cast(function_type_arguments()->BoundConstant()); |
2891 | } |
2892 | |
2893 | // If instantiator_type_args are not constant try to match the pattern |
2894 | // obj.field.:type_arguments where field's static type exactness state |
2895 | // tells us that all values stored in the field have exact superclass. |
2896 | // In this case we know the prefix of the actual type arguments vector |
2897 | // and can try to instantiate the type using just the prefix. |
2898 | // |
2899 | // Note: TypeParameter::InstantiateFrom returns an error if we try |
2900 | // to instantiate it from a vector that is too short. |
2901 | if (instantiator_type_args == nullptr) { |
2902 | if (LoadFieldInstr* load_type_args = |
2903 | instantiator_type_arguments()->definition()->AsLoadField()) { |
2904 | if (load_type_args->slot().IsTypeArguments()) { |
2905 | if (LoadFieldInstr* load_field = load_type_args->instance() |
2906 | ->definition() |
2907 | ->OriginalDefinition() |
2908 | ->AsLoadField()) { |
2909 | if (load_field->slot().IsDartField() && |
2910 | load_field->slot() |
2911 | .field() |
2912 | .static_type_exactness_state() |
2913 | .IsHasExactSuperClass()) { |
2914 | instantiator_type_args = &TypeArguments::Handle( |
2915 | Z, AbstractType::Handle(Z, load_field->slot().field().type()) |
2916 | .arguments()); |
2917 | } |
2918 | } |
2919 | } |
2920 | } |
2921 | } |
2922 | |
2923 | if ((instantiator_type_args != nullptr) && (function_type_args != nullptr)) { |
2924 | AbstractType& new_dst_type = AbstractType::Handle( |
2925 | Z, abs_type.InstantiateFrom(*instantiator_type_args, |
2926 | *function_type_args, kAllFree, Heap::kOld)); |
2927 | if (new_dst_type.IsNull()) { |
2928 | // Failed instantiation in dead code. |
2929 | return this; |
2930 | } |
2931 | if (new_dst_type.IsTypeRef()) { |
2932 | new_dst_type = TypeRef::Cast(new_dst_type).type(); |
2933 | } |
2934 | new_dst_type = new_dst_type.Canonicalize(); |
2935 | |
2936 | // Successfully instantiated destination type: update the type attached |
2937 | // to this instruction and set type arguments to null because we no |
2938 | // longer need them (the type was instantiated). |
2939 | dst_type()->BindTo(flow_graph->GetConstant(new_dst_type)); |
2940 | instantiator_type_arguments()->BindTo(flow_graph->constant_null()); |
2941 | function_type_arguments()->BindTo(flow_graph->constant_null()); |
2942 | |
2943 | if (new_dst_type.IsTopTypeForSubtyping() || |
2944 | (FLAG_eliminate_type_checks && |
2945 | value()->Type()->IsAssignableTo(new_dst_type))) { |
2946 | return value()->definition(); |
2947 | } |
2948 | } |
2949 | return this; |
2950 | } |
2951 | |
2952 | Definition* InstantiateTypeArgumentsInstr::Canonicalize(FlowGraph* flow_graph) { |
2953 | return HasUses() ? this : NULL; |
2954 | } |
2955 | |
2956 | LocationSummary* DebugStepCheckInstr::MakeLocationSummary(Zone* zone, |
2957 | bool opt) const { |
2958 | const intptr_t kNumInputs = 0; |
2959 | const intptr_t kNumTemps = 0; |
2960 | LocationSummary* locs = new (zone) |
2961 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
2962 | return locs; |
2963 | } |
2964 | |
2965 | Instruction* DebugStepCheckInstr::Canonicalize(FlowGraph* flow_graph) { |
2966 | return NULL; |
2967 | } |
2968 | |
2969 | Definition* BoxInstr::Canonicalize(FlowGraph* flow_graph) { |
2970 | if (input_use_list() == nullptr) { |
2971 | // Environments can accommodate any representation. No need to box. |
2972 | return value()->definition(); |
2973 | } |
2974 | |
2975 | // Fold away Box<rep>(Unbox<rep>(v)) if value is known to be of the |
2976 | // right class. |
2977 | UnboxInstr* unbox_defn = value()->definition()->AsUnbox(); |
2978 | if ((unbox_defn != NULL) && |
2979 | (unbox_defn->representation() == from_representation()) && |
2980 | (unbox_defn->value()->Type()->ToCid() == Type()->ToCid())) { |
2981 | return unbox_defn->value()->definition(); |
2982 | } |
2983 | |
2984 | return this; |
2985 | } |
2986 | |
2987 | bool BoxIntegerInstr::ValueFitsSmi() const { |
2988 | Range* range = value()->definition()->range(); |
2989 | return RangeUtils::Fits(range, RangeBoundary::kRangeBoundarySmi); |
2990 | } |
2991 | |
2992 | Definition* BoxIntegerInstr::Canonicalize(FlowGraph* flow_graph) { |
2993 | if (input_use_list() == nullptr) { |
2994 | // Environments can accommodate any representation. No need to box. |
2995 | return value()->definition(); |
2996 | } |
2997 | |
2998 | return this; |
2999 | } |
3000 | |
3001 | Definition* BoxInt64Instr::Canonicalize(FlowGraph* flow_graph) { |
3002 | Definition* replacement = BoxIntegerInstr::Canonicalize(flow_graph); |
3003 | if (replacement != this) { |
3004 | return replacement; |
3005 | } |
3006 | |
3007 | // For all x, box(unbox(x)) = x. |
3008 | if (auto unbox = value()->definition()->AsUnboxInt64()) { |
3009 | if (unbox->SpeculativeModeOfInputs() == kNotSpeculative) { |
3010 | return unbox->value()->definition(); |
3011 | } |
3012 | } else if (auto unbox = value()->definition()->AsUnboxedConstant()) { |
3013 | return flow_graph->GetConstant(unbox->value()); |
3014 | } |
3015 | |
3016 | // Find a more precise box instruction. |
3017 | if (auto conv = value()->definition()->AsIntConverter()) { |
3018 | Definition* replacement; |
3019 | if (conv->from() == kUntagged) { |
3020 | return this; |
3021 | } |
3022 | switch (conv->from()) { |
3023 | case kUnboxedInt32: |
3024 | replacement = new BoxInt32Instr(conv->value()->CopyWithType()); |
3025 | break; |
3026 | case kUnboxedUint32: |
3027 | replacement = new BoxUint32Instr(conv->value()->CopyWithType()); |
3028 | break; |
3029 | default: |
3030 | UNREACHABLE(); |
3031 | break; |
3032 | } |
3033 | flow_graph->InsertBefore(this, replacement, NULL, FlowGraph::kValue); |
3034 | return replacement; |
3035 | } |
3036 | |
3037 | return this; |
3038 | } |
3039 | |
3040 | Definition* UnboxInstr::Canonicalize(FlowGraph* flow_graph) { |
3041 | if (!HasUses() && !CanDeoptimize()) return NULL; |
3042 | |
3043 | // Fold away Unbox<rep>(Box<rep>(v)). |
3044 | BoxInstr* box_defn = value()->definition()->AsBox(); |
3045 | if ((box_defn != NULL) && |
3046 | (box_defn->from_representation() == representation())) { |
3047 | return box_defn->value()->definition(); |
3048 | } |
3049 | |
3050 | if (representation() == kUnboxedDouble && value()->BindsToConstant()) { |
3051 | UnboxedConstantInstr* uc = NULL; |
3052 | |
3053 | const Object& val = value()->BoundConstant(); |
3054 | if (val.IsSmi()) { |
3055 | const Double& double_val = Double::ZoneHandle( |
3056 | flow_graph->zone(), |
3057 | Double::NewCanonical(Smi::Cast(val).AsDoubleValue())); |
3058 | uc = new UnboxedConstantInstr(double_val, kUnboxedDouble); |
3059 | } else if (val.IsDouble()) { |
3060 | uc = new UnboxedConstantInstr(val, kUnboxedDouble); |
3061 | } |
3062 | |
3063 | if (uc != NULL) { |
3064 | flow_graph->InsertBefore(this, uc, NULL, FlowGraph::kValue); |
3065 | return uc; |
3066 | } |
3067 | } |
3068 | |
3069 | return this; |
3070 | } |
3071 | |
3072 | Definition* UnboxIntegerInstr::Canonicalize(FlowGraph* flow_graph) { |
3073 | if (!HasUses() && !CanDeoptimize()) return NULL; |
3074 | |
3075 | // Do not attempt to fold this instruction if we have not matched |
3076 | // input/output representations yet. |
3077 | if (HasUnmatchedInputRepresentations()) { |
3078 | return this; |
3079 | } |
3080 | |
3081 | // Fold away UnboxInteger<rep_to>(BoxInteger<rep_from>(v)). |
3082 | BoxIntegerInstr* box_defn = value()->definition()->AsBoxInteger(); |
3083 | if (box_defn != NULL && !box_defn->HasUnmatchedInputRepresentations()) { |
3084 | Representation from_representation = |
3085 | box_defn->value()->definition()->representation(); |
3086 | if (from_representation == representation()) { |
3087 | return box_defn->value()->definition(); |
3088 | } else { |
3089 | // Only operate on explicit unboxed operands. |
3090 | IntConverterInstr* converter = new IntConverterInstr( |
3091 | from_representation, representation(), |
3092 | box_defn->value()->CopyWithType(), |
3093 | (representation() == kUnboxedInt32) ? GetDeoptId() : DeoptId::kNone); |
3094 | // TODO(vegorov): marking resulting converter as truncating when |
3095 | // unboxing can't deoptimize is a workaround for the missing |
3096 | // deoptimization environment when we insert converter after |
3097 | // EliminateEnvironments and there is a mismatch between predicates |
3098 | // UnboxIntConverterInstr::CanDeoptimize and UnboxInt32::CanDeoptimize. |
3099 | if ((representation() == kUnboxedInt32) && |
3100 | (is_truncating() || !CanDeoptimize())) { |
3101 | converter->mark_truncating(); |
3102 | } |
3103 | flow_graph->InsertBefore(this, converter, env(), FlowGraph::kValue); |
3104 | return converter; |
3105 | } |
3106 | } |
3107 | |
3108 | return this; |
3109 | } |
3110 | |
3111 | Definition* UnboxInt32Instr::Canonicalize(FlowGraph* flow_graph) { |
3112 | Definition* replacement = UnboxIntegerInstr::Canonicalize(flow_graph); |
3113 | if (replacement != this) { |
3114 | return replacement; |
3115 | } |
3116 | |
3117 | ConstantInstr* c = value()->definition()->AsConstant(); |
3118 | if ((c != NULL) && c->value().IsSmi()) { |
3119 | if (!is_truncating()) { |
3120 | // Check that constant fits into 32-bit integer. |
3121 | const int64_t value = static_cast<int64_t>(Smi::Cast(c->value()).Value()); |
3122 | if (!Utils::IsInt(32, value)) { |
3123 | return this; |
3124 | } |
3125 | } |
3126 | |
3127 | UnboxedConstantInstr* uc = |
3128 | new UnboxedConstantInstr(c->value(), kUnboxedInt32); |
3129 | if (c->range() != NULL) { |
3130 | uc->set_range(*c->range()); |
3131 | } |
3132 | flow_graph->InsertBefore(this, uc, NULL, FlowGraph::kValue); |
3133 | return uc; |
3134 | } |
3135 | |
3136 | return this; |
3137 | } |
3138 | |
3139 | Definition* UnboxInt64Instr::Canonicalize(FlowGraph* flow_graph) { |
3140 | Definition* replacement = UnboxIntegerInstr::Canonicalize(flow_graph); |
3141 | if (replacement != this) { |
3142 | return replacement; |
3143 | } |
3144 | |
3145 | // Currently we perform this only on 64-bit architectures. |
3146 | if (compiler::target::kBitsPerWord == 64) { |
3147 | ConstantInstr* c = value()->definition()->AsConstant(); |
3148 | if (c != NULL && (c->value().IsSmi() || c->value().IsMint())) { |
3149 | UnboxedConstantInstr* uc = |
3150 | new UnboxedConstantInstr(c->value(), kUnboxedInt64); |
3151 | if (c->range() != NULL) { |
3152 | uc->set_range(*c->range()); |
3153 | } |
3154 | flow_graph->InsertBefore(this, uc, NULL, FlowGraph::kValue); |
3155 | return uc; |
3156 | } |
3157 | } |
3158 | |
3159 | return this; |
3160 | } |
3161 | |
3162 | Definition* IntConverterInstr::Canonicalize(FlowGraph* flow_graph) { |
3163 | if (!HasUses()) return NULL; |
3164 | |
3165 | IntConverterInstr* box_defn = value()->definition()->AsIntConverter(); |
3166 | if ((box_defn != NULL) && (box_defn->representation() == from())) { |
3167 | // Do not erase truncating conversions from 64-bit value to 32-bit values |
3168 | // because such conversions erase upper 32 bits. |
3169 | if ((box_defn->from() == kUnboxedInt64) && box_defn->is_truncating()) { |
3170 | return this; |
3171 | } |
3172 | |
3173 | // It's safe to discard any other conversions from and then back to the same |
3174 | // integer type. |
3175 | if (box_defn->from() == to()) { |
3176 | return box_defn->value()->definition(); |
3177 | } |
3178 | |
3179 | // Do not merge conversions where the first starts from Untagged or the |
3180 | // second ends at Untagged, since we expect to see either UnboxedIntPtr |
3181 | // or UnboxedFfiIntPtr as the other type in an Untagged conversion. |
3182 | if ((box_defn->from() == kUntagged) || (to() == kUntagged)) { |
3183 | return this; |
3184 | } |
3185 | |
3186 | IntConverterInstr* converter = new IntConverterInstr( |
3187 | box_defn->from(), representation(), box_defn->value()->CopyWithType(), |
3188 | (to() == kUnboxedInt32) ? GetDeoptId() : DeoptId::kNone); |
3189 | if ((representation() == kUnboxedInt32) && is_truncating()) { |
3190 | converter->mark_truncating(); |
3191 | } |
3192 | flow_graph->InsertBefore(this, converter, env(), FlowGraph::kValue); |
3193 | return converter; |
3194 | } |
3195 | |
3196 | UnboxInt64Instr* unbox_defn = value()->definition()->AsUnboxInt64(); |
3197 | if (unbox_defn != NULL && (from() == kUnboxedInt64) && |
3198 | (to() == kUnboxedInt32) && unbox_defn->HasOnlyInputUse(value())) { |
3199 | // TODO(vegorov): there is a duplication of code between UnboxedIntCoverter |
3200 | // and code path that unboxes Mint into Int32. We should just schedule |
3201 | // these instructions close to each other instead of fusing them. |
3202 | Definition* replacement = |
3203 | new UnboxInt32Instr(is_truncating() ? UnboxInt32Instr::kTruncate |
3204 | : UnboxInt32Instr::kNoTruncation, |
3205 | unbox_defn->value()->CopyWithType(), GetDeoptId()); |
3206 | flow_graph->InsertBefore(this, replacement, env(), FlowGraph::kValue); |
3207 | return replacement; |
3208 | } |
3209 | |
3210 | return this; |
3211 | } |
3212 | |
3213 | // Tests for a FP comparison that cannot be negated |
3214 | // (to preserve NaN semantics). |
3215 | static bool IsFpCompare(ComparisonInstr* comp) { |
3216 | if (comp->IsRelationalOp()) { |
3217 | return comp->operation_cid() == kDoubleCid; |
3218 | } |
3219 | return false; |
3220 | } |
3221 | |
3222 | Definition* BooleanNegateInstr::Canonicalize(FlowGraph* flow_graph) { |
3223 | Definition* defn = value()->definition(); |
3224 | // Convert e.g. !(x > y) into (x <= y) for non-FP x, y. |
3225 | if (defn->IsComparison() && defn->HasOnlyUse(value()) && |
3226 | defn->Type()->ToCid() == kBoolCid) { |
3227 | ComparisonInstr* comp = defn->AsComparison(); |
3228 | if (!IsFpCompare(comp)) { |
3229 | comp->NegateComparison(); |
3230 | return defn; |
3231 | } |
3232 | } |
3233 | return this; |
3234 | } |
3235 | |
3236 | static bool MayBeBoxableNumber(intptr_t cid) { |
3237 | return (cid == kDynamicCid) || (cid == kMintCid) || (cid == kDoubleCid); |
3238 | } |
3239 | |
3240 | static bool MayBeNumber(CompileType* type) { |
3241 | if (type->IsNone()) { |
3242 | return false; |
3243 | } |
3244 | const AbstractType& unwrapped_type = |
3245 | AbstractType::Handle(type->ToAbstractType()->UnwrapFutureOr()); |
3246 | // Note that type 'Number' is a subtype of itself. |
3247 | return unwrapped_type.IsTopTypeForSubtyping() || |
3248 | unwrapped_type.IsObjectType() || unwrapped_type.IsTypeParameter() || |
3249 | unwrapped_type.IsSubtypeOf(Type::Handle(Type::Number()), Heap::kOld); |
3250 | } |
3251 | |
3252 | // Returns a replacement for a strict comparison and signals if the result has |
3253 | // to be negated. |
3254 | static Definition* CanonicalizeStrictCompare(StrictCompareInstr* compare, |
3255 | bool* negated, |
3256 | bool is_branch) { |
3257 | // Use propagated cid and type information to eliminate number checks. |
3258 | // If one of the inputs is not a boxable number (Mint, Double), or |
3259 | // is not a subtype of num, no need for number checks. |
3260 | if (compare->needs_number_check()) { |
3261 | if (!MayBeBoxableNumber(compare->left()->Type()->ToCid()) || |
3262 | !MayBeBoxableNumber(compare->right()->Type()->ToCid())) { |
3263 | compare->set_needs_number_check(false); |
3264 | } else if (!MayBeNumber(compare->left()->Type()) || |
3265 | !MayBeNumber(compare->right()->Type())) { |
3266 | compare->set_needs_number_check(false); |
3267 | } |
3268 | } |
3269 | *negated = false; |
3270 | PassiveObject& constant = PassiveObject::Handle(); |
3271 | Value* other = NULL; |
3272 | if (compare->right()->BindsToConstant()) { |
3273 | constant = compare->right()->BoundConstant().raw(); |
3274 | other = compare->left(); |
3275 | } else if (compare->left()->BindsToConstant()) { |
3276 | constant = compare->left()->BoundConstant().raw(); |
3277 | other = compare->right(); |
3278 | } else { |
3279 | return compare; |
3280 | } |
3281 | |
3282 | const bool can_merge = is_branch || (other->Type()->ToCid() == kBoolCid); |
3283 | Definition* other_defn = other->definition(); |
3284 | Token::Kind kind = compare->kind(); |
3285 | // Handle e === true. |
3286 | if ((kind == Token::kEQ_STRICT) && (constant.raw() == Bool::True().raw()) && |
3287 | can_merge) { |
3288 | return other_defn; |
3289 | } |
3290 | // Handle e !== false. |
3291 | if ((kind == Token::kNE_STRICT) && (constant.raw() == Bool::False().raw()) && |
3292 | can_merge) { |
3293 | return other_defn; |
3294 | } |
3295 | // Handle e !== true. |
3296 | if ((kind == Token::kNE_STRICT) && (constant.raw() == Bool::True().raw()) && |
3297 | other_defn->IsComparison() && can_merge && |
3298 | other_defn->HasOnlyUse(other)) { |
3299 | ComparisonInstr* comp = other_defn->AsComparison(); |
3300 | if (!IsFpCompare(comp)) { |
3301 | *negated = true; |
3302 | return other_defn; |
3303 | } |
3304 | } |
3305 | // Handle e === false. |
3306 | if ((kind == Token::kEQ_STRICT) && (constant.raw() == Bool::False().raw()) && |
3307 | other_defn->IsComparison() && can_merge && |
3308 | other_defn->HasOnlyUse(other)) { |
3309 | ComparisonInstr* comp = other_defn->AsComparison(); |
3310 | if (!IsFpCompare(comp)) { |
3311 | *negated = true; |
3312 | return other_defn; |
3313 | } |
3314 | } |
3315 | return compare; |
3316 | } |
3317 | |
3318 | static bool BindsToGivenConstant(Value* v, intptr_t expected) { |
3319 | return v->BindsToConstant() && v->BoundConstant().IsSmi() && |
3320 | (Smi::Cast(v->BoundConstant()).Value() == expected); |
3321 | } |
3322 | |
3323 | // Recognize patterns (a & b) == 0 and (a & 2^n) != 2^n. |
3324 | static bool RecognizeTestPattern(Value* left, Value* right, bool* negate) { |
3325 | if (!right->BindsToConstant() || !right->BoundConstant().IsSmi()) { |
3326 | return false; |
3327 | } |
3328 | |
3329 | const intptr_t value = Smi::Cast(right->BoundConstant()).Value(); |
3330 | if ((value != 0) && !Utils::IsPowerOfTwo(value)) { |
3331 | return false; |
3332 | } |
3333 | |
3334 | BinarySmiOpInstr* mask_op = left->definition()->AsBinarySmiOp(); |
3335 | if ((mask_op == NULL) || (mask_op->op_kind() != Token::kBIT_AND) || |
3336 | !mask_op->HasOnlyUse(left)) { |
3337 | return false; |
3338 | } |
3339 | |
3340 | if (value == 0) { |
3341 | // Recognized (a & b) == 0 pattern. |
3342 | *negate = false; |
3343 | return true; |
3344 | } |
3345 | |
3346 | // Recognize |
3347 | if (BindsToGivenConstant(mask_op->left(), value) || |
3348 | BindsToGivenConstant(mask_op->right(), value)) { |
3349 | // Recognized (a & 2^n) == 2^n pattern. It's equivalent to (a & 2^n) != 0 |
3350 | // so we need to negate original comparison. |
3351 | *negate = true; |
3352 | return true; |
3353 | } |
3354 | |
3355 | return false; |
3356 | } |
3357 | |
3358 | Instruction* BranchInstr::Canonicalize(FlowGraph* flow_graph) { |
3359 | Zone* zone = flow_graph->zone(); |
3360 | // Only handle strict-compares. |
3361 | if (comparison()->IsStrictCompare()) { |
3362 | bool negated = false; |
3363 | Definition* replacement = CanonicalizeStrictCompare( |
3364 | comparison()->AsStrictCompare(), &negated, /* is_branch = */ true); |
3365 | if (replacement == comparison()) { |
3366 | return this; |
3367 | } |
3368 | ComparisonInstr* comp = replacement->AsComparison(); |
3369 | if ((comp == NULL) || comp->CanDeoptimize() || |
3370 | comp->HasUnmatchedInputRepresentations()) { |
3371 | return this; |
3372 | } |
3373 | |
3374 | // Replace the comparison if the replacement is used at this branch, |
3375 | // and has exactly one use. |
3376 | Value* use = comp->input_use_list(); |
3377 | if ((use->instruction() == this) && comp->HasOnlyUse(use)) { |
3378 | if (negated) { |
3379 | comp->NegateComparison(); |
3380 | } |
3381 | RemoveEnvironment(); |
3382 | flow_graph->CopyDeoptTarget(this, comp); |
3383 | // Unlink environment from the comparison since it is copied to the |
3384 | // branch instruction. |
3385 | comp->RemoveEnvironment(); |
3386 | |
3387 | comp->RemoveFromGraph(); |
3388 | SetComparison(comp); |
3389 | if (FLAG_trace_optimization) { |
3390 | THR_Print("Merging comparison v%" Pd "\n" , comp->ssa_temp_index()); |
3391 | } |
3392 | // Clear the comparison's temp index and ssa temp index since the |
3393 | // value of the comparison is not used outside the branch anymore. |
3394 | ASSERT(comp->input_use_list() == NULL); |
3395 | comp->ClearSSATempIndex(); |
3396 | comp->ClearTempIndex(); |
3397 | } |
3398 | } else if (comparison()->IsEqualityCompare() && |
3399 | comparison()->operation_cid() == kSmiCid) { |
3400 | BinarySmiOpInstr* bit_and = NULL; |
3401 | bool negate = false; |
3402 | if (RecognizeTestPattern(comparison()->left(), comparison()->right(), |
3403 | &negate)) { |
3404 | bit_and = comparison()->left()->definition()->AsBinarySmiOp(); |
3405 | } else if (RecognizeTestPattern(comparison()->right(), comparison()->left(), |
3406 | &negate)) { |
3407 | bit_and = comparison()->right()->definition()->AsBinarySmiOp(); |
3408 | } |
3409 | if (bit_and != NULL) { |
3410 | if (FLAG_trace_optimization) { |
3411 | THR_Print("Merging test smi v%" Pd "\n" , bit_and->ssa_temp_index()); |
3412 | } |
3413 | TestSmiInstr* test = new TestSmiInstr( |
3414 | comparison()->token_pos(), |
3415 | negate ? Token::NegateComparison(comparison()->kind()) |
3416 | : comparison()->kind(), |
3417 | bit_and->left()->Copy(zone), bit_and->right()->Copy(zone)); |
3418 | ASSERT(!CanDeoptimize()); |
3419 | RemoveEnvironment(); |
3420 | flow_graph->CopyDeoptTarget(this, bit_and); |
3421 | SetComparison(test); |
3422 | bit_and->RemoveFromGraph(); |
3423 | } |
3424 | } |
3425 | return this; |
3426 | } |
3427 | |
3428 | Definition* StrictCompareInstr::Canonicalize(FlowGraph* flow_graph) { |
3429 | if (!HasUses()) return NULL; |
3430 | bool negated = false; |
3431 | Definition* replacement = CanonicalizeStrictCompare(this, &negated, |
3432 | /* is_branch = */ false); |
3433 | if (negated && replacement->IsComparison()) { |
3434 | ASSERT(replacement != this); |
3435 | replacement->AsComparison()->NegateComparison(); |
3436 | } |
3437 | return replacement; |
3438 | } |
3439 | |
3440 | Instruction* CheckClassInstr::Canonicalize(FlowGraph* flow_graph) { |
3441 | const intptr_t value_cid = value()->Type()->ToCid(); |
3442 | if (value_cid == kDynamicCid) { |
3443 | return this; |
3444 | } |
3445 | |
3446 | return cids().HasClassId(value_cid) ? NULL : this; |
3447 | } |
3448 | |
3449 | Definition* LoadClassIdInstr::Canonicalize(FlowGraph* flow_graph) { |
3450 | // TODO(dartbug.com/40188): Allow this to canonicalize into an untagged |
3451 | // constant and make a subsequent DispatchTableCallInstr canonicalize into a |
3452 | // StaticCall. |
3453 | if (representation() == kUntagged) return this; |
3454 | const intptr_t cid = object()->Type()->ToCid(); |
3455 | if (cid != kDynamicCid) { |
3456 | const auto& smi = Smi::ZoneHandle(flow_graph->zone(), Smi::New(cid)); |
3457 | return flow_graph->GetConstant(smi); |
3458 | } |
3459 | return this; |
3460 | } |
3461 | |
3462 | Instruction* CheckClassIdInstr::Canonicalize(FlowGraph* flow_graph) { |
3463 | if (value()->BindsToConstant()) { |
3464 | const Object& constant_value = value()->BoundConstant(); |
3465 | if (constant_value.IsSmi() && |
3466 | cids_.Contains(Smi::Cast(constant_value).Value())) { |
3467 | return NULL; |
3468 | } |
3469 | } |
3470 | return this; |
3471 | } |
3472 | |
3473 | TestCidsInstr::TestCidsInstr(TokenPosition token_pos, |
3474 | Token::Kind kind, |
3475 | Value* value, |
3476 | const ZoneGrowableArray<intptr_t>& cid_results, |
3477 | intptr_t deopt_id) |
3478 | : TemplateComparison(token_pos, kind, deopt_id), |
3479 | cid_results_(cid_results), |
3480 | licm_hoisted_(false) { |
3481 | ASSERT((kind == Token::kIS) || (kind == Token::kISNOT)); |
3482 | SetInputAt(0, value); |
3483 | set_operation_cid(kObjectCid); |
3484 | #ifdef DEBUG |
3485 | ASSERT(cid_results[0] == kSmiCid); |
3486 | if (deopt_id == DeoptId::kNone) { |
3487 | // The entry for Smi can be special, but all other entries have |
3488 | // to match in the no-deopt case. |
3489 | for (intptr_t i = 4; i < cid_results.length(); i += 2) { |
3490 | ASSERT(cid_results[i + 1] == cid_results[3]); |
3491 | } |
3492 | } |
3493 | #endif |
3494 | } |
3495 | |
3496 | Definition* TestCidsInstr::Canonicalize(FlowGraph* flow_graph) { |
3497 | CompileType* in_type = left()->Type(); |
3498 | intptr_t cid = in_type->ToCid(); |
3499 | if (cid == kDynamicCid) return this; |
3500 | |
3501 | const ZoneGrowableArray<intptr_t>& data = cid_results(); |
3502 | const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0; |
3503 | for (intptr_t i = 0; i < data.length(); i += 2) { |
3504 | if (data[i] == cid) { |
3505 | return (data[i + 1] == true_result) |
3506 | ? flow_graph->GetConstant(Bool::True()) |
3507 | : flow_graph->GetConstant(Bool::False()); |
3508 | } |
3509 | } |
3510 | |
3511 | if (!CanDeoptimize()) { |
3512 | ASSERT(deopt_id() == DeoptId::kNone); |
3513 | return (data[data.length() - 1] == true_result) |
3514 | ? flow_graph->GetConstant(Bool::False()) |
3515 | : flow_graph->GetConstant(Bool::True()); |
3516 | } |
3517 | |
3518 | // TODO(sra): Handle nullable input, possibly canonicalizing to a compare |
3519 | // against `null`. |
3520 | return this; |
3521 | } |
3522 | |
3523 | Instruction* GuardFieldClassInstr::Canonicalize(FlowGraph* flow_graph) { |
3524 | if (field().guarded_cid() == kDynamicCid) { |
3525 | return NULL; // Nothing to guard. |
3526 | } |
3527 | |
3528 | if (field().is_nullable() && value()->Type()->IsNull()) { |
3529 | return NULL; |
3530 | } |
3531 | |
3532 | const intptr_t cid = field().is_nullable() ? value()->Type()->ToNullableCid() |
3533 | : value()->Type()->ToCid(); |
3534 | if (field().guarded_cid() == cid) { |
3535 | return NULL; // Value is guaranteed to have this cid. |
3536 | } |
3537 | |
3538 | return this; |
3539 | } |
3540 | |
3541 | Instruction* GuardFieldLengthInstr::Canonicalize(FlowGraph* flow_graph) { |
3542 | if (!field().needs_length_check()) { |
3543 | return NULL; // Nothing to guard. |
3544 | } |
3545 | |
3546 | const intptr_t expected_length = field().guarded_list_length(); |
3547 | if (expected_length == Field::kUnknownFixedLength) { |
3548 | return this; |
3549 | } |
3550 | |
3551 | // Check if length is statically known. |
3552 | StaticCallInstr* call = value()->definition()->AsStaticCall(); |
3553 | if (call == NULL) { |
3554 | return this; |
3555 | } |
3556 | |
3557 | ConstantInstr* length = NULL; |
3558 | if (call->is_known_list_constructor() && |
3559 | LoadFieldInstr::IsFixedLengthArrayCid(call->Type()->ToCid())) { |
3560 | length = call->ArgumentAt(1)->AsConstant(); |
3561 | } else if (call->function().recognized_kind() == |
3562 | MethodRecognizer::kByteDataFactory) { |
3563 | length = call->ArgumentAt(1)->AsConstant(); |
3564 | } else if (LoadFieldInstr::IsTypedDataViewFactory(call->function())) { |
3565 | length = call->ArgumentAt(3)->AsConstant(); |
3566 | } |
3567 | if ((length != NULL) && length->value().IsSmi() && |
3568 | Smi::Cast(length->value()).Value() == expected_length) { |
3569 | return NULL; // Expected length matched. |
3570 | } |
3571 | |
3572 | return this; |
3573 | } |
3574 | |
3575 | Instruction* GuardFieldTypeInstr::Canonicalize(FlowGraph* flow_graph) { |
3576 | return field().static_type_exactness_state().NeedsFieldGuard() ? this |
3577 | : nullptr; |
3578 | } |
3579 | |
3580 | Instruction* CheckSmiInstr::Canonicalize(FlowGraph* flow_graph) { |
3581 | return (value()->Type()->ToCid() == kSmiCid) ? NULL : this; |
3582 | } |
3583 | |
3584 | Instruction* CheckEitherNonSmiInstr::Canonicalize(FlowGraph* flow_graph) { |
3585 | if ((left()->Type()->ToCid() == kDoubleCid) || |
3586 | (right()->Type()->ToCid() == kDoubleCid)) { |
3587 | return NULL; // Remove from the graph. |
3588 | } |
3589 | return this; |
3590 | } |
3591 | |
3592 | Definition* CheckNullInstr::Canonicalize(FlowGraph* flow_graph) { |
3593 | return (!value()->Type()->is_nullable()) ? value()->definition() : this; |
3594 | } |
3595 | |
3596 | bool CheckNullInstr::AttributesEqual(Instruction* other) const { |
3597 | CheckNullInstr* other_check = other->AsCheckNull(); |
3598 | ASSERT(other_check != nullptr); |
3599 | return function_name().Equals(other_check->function_name()) && |
3600 | exception_type() == other_check->exception_type(); |
3601 | } |
3602 | |
3603 | BoxInstr* BoxInstr::Create(Representation from, Value* value) { |
3604 | switch (from) { |
3605 | case kUnboxedInt32: |
3606 | return new BoxInt32Instr(value); |
3607 | |
3608 | case kUnboxedUint32: |
3609 | return new BoxUint32Instr(value); |
3610 | |
3611 | case kUnboxedInt64: |
3612 | return new BoxInt64Instr(value); |
3613 | |
3614 | case kUnboxedDouble: |
3615 | case kUnboxedFloat: |
3616 | case kUnboxedFloat32x4: |
3617 | case kUnboxedFloat64x2: |
3618 | case kUnboxedInt32x4: |
3619 | return new BoxInstr(from, value); |
3620 | |
3621 | default: |
3622 | UNREACHABLE(); |
3623 | return NULL; |
3624 | } |
3625 | } |
3626 | |
3627 | UnboxInstr* UnboxInstr::Create(Representation to, |
3628 | Value* value, |
3629 | intptr_t deopt_id, |
3630 | SpeculativeMode speculative_mode) { |
3631 | switch (to) { |
3632 | case kUnboxedInt32: |
3633 | // We must truncate if we can't deoptimize. |
3634 | return new UnboxInt32Instr( |
3635 | speculative_mode == SpeculativeMode::kNotSpeculative |
3636 | ? UnboxInt32Instr::kTruncate |
3637 | : UnboxInt32Instr::kNoTruncation, |
3638 | value, deopt_id, speculative_mode); |
3639 | |
3640 | case kUnboxedUint32: |
3641 | return new UnboxUint32Instr(value, deopt_id, speculative_mode); |
3642 | |
3643 | case kUnboxedInt64: |
3644 | return new UnboxInt64Instr(value, deopt_id, speculative_mode); |
3645 | |
3646 | case kUnboxedDouble: |
3647 | case kUnboxedFloat: |
3648 | case kUnboxedFloat32x4: |
3649 | case kUnboxedFloat64x2: |
3650 | case kUnboxedInt32x4: |
3651 | ASSERT(FlowGraphCompiler::SupportsUnboxedDoubles()); |
3652 | return new UnboxInstr(to, value, deopt_id, speculative_mode); |
3653 | |
3654 | default: |
3655 | UNREACHABLE(); |
3656 | return NULL; |
3657 | } |
3658 | } |
3659 | |
3660 | bool UnboxInstr::CanConvertSmi() const { |
3661 | switch (representation()) { |
3662 | case kUnboxedDouble: |
3663 | case kUnboxedFloat: |
3664 | case kUnboxedInt32: |
3665 | case kUnboxedInt64: |
3666 | return true; |
3667 | |
3668 | case kUnboxedFloat32x4: |
3669 | case kUnboxedFloat64x2: |
3670 | case kUnboxedInt32x4: |
3671 | return false; |
3672 | |
3673 | default: |
3674 | UNREACHABLE(); |
3675 | return false; |
3676 | } |
3677 | } |
3678 | |
3679 | const BinaryFeedback* BinaryFeedback::Create(Zone* zone, |
3680 | const ICData& ic_data) { |
3681 | BinaryFeedback* result = new (zone) BinaryFeedback(zone); |
3682 | if (ic_data.NumArgsTested() == 2) { |
3683 | for (intptr_t i = 0, n = ic_data.NumberOfChecks(); i < n; i++) { |
3684 | if (ic_data.GetCountAt(i) == 0) { |
3685 | continue; |
3686 | } |
3687 | GrowableArray<intptr_t> arg_ids; |
3688 | ic_data.GetClassIdsAt(i, &arg_ids); |
3689 | result->feedback_.Add({arg_ids[0], arg_ids[1]}); |
3690 | } |
3691 | } |
3692 | return result; |
3693 | } |
3694 | |
3695 | const BinaryFeedback* BinaryFeedback::CreateMonomorphic(Zone* zone, |
3696 | intptr_t receiver_cid, |
3697 | intptr_t argument_cid) { |
3698 | BinaryFeedback* result = new (zone) BinaryFeedback(zone); |
3699 | result->feedback_.Add({receiver_cid, argument_cid}); |
3700 | return result; |
3701 | } |
3702 | |
3703 | const CallTargets* CallTargets::CreateMonomorphic(Zone* zone, |
3704 | intptr_t receiver_cid, |
3705 | const Function& target) { |
3706 | CallTargets* targets = new (zone) CallTargets(zone); |
3707 | const intptr_t count = 1; |
3708 | targets->cid_ranges_.Add(new (zone) TargetInfo( |
3709 | receiver_cid, receiver_cid, &Function::ZoneHandle(zone, target.raw()), |
3710 | count, StaticTypeExactnessState::NotTracking())); |
3711 | return targets; |
3712 | } |
3713 | |
3714 | const CallTargets* CallTargets::Create(Zone* zone, const ICData& ic_data) { |
3715 | CallTargets* targets = new (zone) CallTargets(zone); |
3716 | targets->CreateHelper(zone, ic_data); |
3717 | targets->Sort(OrderById); |
3718 | targets->MergeIntoRanges(); |
3719 | return targets; |
3720 | } |
3721 | |
3722 | const CallTargets* CallTargets::CreateAndExpand(Zone* zone, |
3723 | const ICData& ic_data) { |
3724 | CallTargets& targets = *new (zone) CallTargets(zone); |
3725 | targets.CreateHelper(zone, ic_data); |
3726 | |
3727 | if (targets.is_empty() || targets.IsMonomorphic()) { |
3728 | return &targets; |
3729 | } |
3730 | |
3731 | targets.Sort(OrderById); |
3732 | |
3733 | Array& args_desc_array = Array::Handle(zone, ic_data.arguments_descriptor()); |
3734 | ArgumentsDescriptor args_desc(args_desc_array); |
3735 | String& name = String::Handle(zone, ic_data.target_name()); |
3736 | |
3737 | Function& fn = Function::Handle(zone); |
3738 | |
3739 | intptr_t length = targets.length(); |
3740 | |
3741 | // Merging/extending cid ranges is also done in Cids::CreateAndExpand. |
3742 | // If changing this code, consider also adjusting Cids code. |
3743 | |
3744 | // Spread class-ids to preceding classes where a lookup yields the same |
3745 | // method. A polymorphic target is not really the same method since its |
3746 | // behaviour depends on the receiver class-id, so we don't spread the |
3747 | // class-ids in that case. |
3748 | for (int idx = 0; idx < length; idx++) { |
3749 | int lower_limit_cid = (idx == 0) ? -1 : targets[idx - 1].cid_end; |
3750 | auto target_info = targets.TargetAt(idx); |
3751 | const Function& target = *target_info->target; |
3752 | if (target.is_polymorphic_target()) continue; |
3753 | for (int i = target_info->cid_start - 1; i > lower_limit_cid; i--) { |
3754 | bool class_is_abstract = false; |
3755 | if (FlowGraphCompiler::LookupMethodFor(i, name, args_desc, &fn, |
3756 | &class_is_abstract) && |
3757 | fn.raw() == target.raw()) { |
3758 | if (!class_is_abstract) { |
3759 | target_info->cid_start = i; |
3760 | target_info->exactness = StaticTypeExactnessState::NotTracking(); |
3761 | } |
3762 | } else { |
3763 | break; |
3764 | } |
3765 | } |
3766 | } |
3767 | |
3768 | // Spread class-ids to following classes where a lookup yields the same |
3769 | // method. |
3770 | const intptr_t max_cid = Isolate::Current()->class_table()->NumCids(); |
3771 | for (int idx = 0; idx < length; idx++) { |
3772 | int upper_limit_cid = |
3773 | (idx == length - 1) ? max_cid : targets[idx + 1].cid_start; |
3774 | auto target_info = targets.TargetAt(idx); |
3775 | const Function& target = *target_info->target; |
3776 | if (target.is_polymorphic_target()) continue; |
3777 | // The code below makes attempt to avoid spreading class-id range |
3778 | // into a suffix that consists purely of abstract classes to |
3779 | // shorten the range. |
3780 | // However such spreading is beneficial when it allows to |
3781 | // merge to consequtive ranges. |
3782 | intptr_t cid_end_including_abstract = target_info->cid_end; |
3783 | for (int i = target_info->cid_end + 1; i < upper_limit_cid; i++) { |
3784 | bool class_is_abstract = false; |
3785 | if (FlowGraphCompiler::LookupMethodFor(i, name, args_desc, &fn, |
3786 | &class_is_abstract) && |
3787 | fn.raw() == target.raw()) { |
3788 | cid_end_including_abstract = i; |
3789 | if (!class_is_abstract) { |
3790 | target_info->cid_end = i; |
3791 | target_info->exactness = StaticTypeExactnessState::NotTracking(); |
3792 | } |
3793 | } else { |
3794 | break; |
3795 | } |
3796 | } |
3797 | |
3798 | // Check if we have a suffix that consists of abstract classes |
3799 | // and expand into it if that would allow us to merge this |
3800 | // range with subsequent range. |
3801 | if ((cid_end_including_abstract > target_info->cid_end) && |
3802 | (idx < length - 1) && |
3803 | ((cid_end_including_abstract + 1) == targets[idx + 1].cid_start) && |
3804 | (target.raw() == targets.TargetAt(idx + 1)->target->raw())) { |
3805 | target_info->cid_end = cid_end_including_abstract; |
3806 | target_info->exactness = StaticTypeExactnessState::NotTracking(); |
3807 | } |
3808 | } |
3809 | targets.MergeIntoRanges(); |
3810 | return &targets; |
3811 | } |
3812 | |
3813 | void CallTargets::MergeIntoRanges() { |
3814 | if (length() == 0) { |
3815 | return; // For correctness not performance: must not update length to 1. |
3816 | } |
3817 | |
3818 | // Merge adjacent class id ranges. |
3819 | int dest = 0; |
3820 | // We merge entries that dispatch to the same target, but polymorphic targets |
3821 | // are not really the same target since they depend on the class-id, so we |
3822 | // don't merge them. |
3823 | for (int src = 1; src < length(); src++) { |
3824 | const Function& target = *TargetAt(dest)->target; |
3825 | if (TargetAt(dest)->cid_end + 1 >= TargetAt(src)->cid_start && |
3826 | target.raw() == TargetAt(src)->target->raw() && |
3827 | !target.is_polymorphic_target()) { |
3828 | TargetAt(dest)->cid_end = TargetAt(src)->cid_end; |
3829 | TargetAt(dest)->count += TargetAt(src)->count; |
3830 | TargetAt(dest)->exactness = StaticTypeExactnessState::NotTracking(); |
3831 | } else { |
3832 | dest++; |
3833 | if (src != dest) { |
3834 | // Use cid_ranges_ instead of TargetAt when updating the pointer. |
3835 | cid_ranges_[dest] = TargetAt(src); |
3836 | } |
3837 | } |
3838 | } |
3839 | SetLength(dest + 1); |
3840 | Sort(OrderByFrequencyThenId); |
3841 | } |
3842 | |
3843 | void CallTargets::Print() const { |
3844 | for (intptr_t i = 0; i < length(); i++) { |
3845 | THR_Print("cid = [%" Pd ", %" Pd "], count = %" Pd ", target = %s\n" , |
3846 | TargetAt(i)->cid_start, TargetAt(i)->cid_end, TargetAt(i)->count, |
3847 | TargetAt(i)->target->ToQualifiedCString()); |
3848 | } |
3849 | } |
3850 | |
3851 | // Shared code generation methods (EmitNativeCode and |
3852 | // MakeLocationSummary). Only assembly code that can be shared across all |
3853 | // architectures can be used. Machine specific register allocation and code |
3854 | // generation is located in intermediate_language_<arch>.cc |
3855 | |
3856 | #define __ compiler->assembler()-> |
3857 | |
3858 | LocationSummary* GraphEntryInstr::MakeLocationSummary(Zone* zone, |
3859 | bool optimizing) const { |
3860 | UNREACHABLE(); |
3861 | return NULL; |
3862 | } |
3863 | |
3864 | LocationSummary* JoinEntryInstr::MakeLocationSummary(Zone* zone, |
3865 | bool optimizing) const { |
3866 | UNREACHABLE(); |
3867 | return NULL; |
3868 | } |
3869 | |
3870 | void JoinEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3871 | __ Bind(compiler->GetJumpLabel(this)); |
3872 | if (!compiler->is_optimizing()) { |
3873 | compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(), |
3874 | TokenPosition::kNoSource); |
3875 | } |
3876 | if (HasParallelMove()) { |
3877 | compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); |
3878 | } |
3879 | } |
3880 | |
3881 | LocationSummary* TargetEntryInstr::MakeLocationSummary(Zone* zone, |
3882 | bool optimizing) const { |
3883 | UNREACHABLE(); |
3884 | return NULL; |
3885 | } |
3886 | |
3887 | void TargetEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3888 | __ Bind(compiler->GetJumpLabel(this)); |
3889 | |
3890 | // TODO(kusterman): Remove duplicate between |
3891 | // {TargetEntryInstr,FunctionEntryInstr}::EmitNativeCode. |
3892 | if (!compiler->is_optimizing()) { |
3893 | if (compiler->NeedsEdgeCounter(this)) { |
3894 | compiler->EmitEdgeCounter(preorder_number()); |
3895 | } |
3896 | |
3897 | // The deoptimization descriptor points after the edge counter code for |
3898 | // uniformity with ARM, where we can reuse pattern matching code that |
3899 | // matches backwards from the end of the pattern. |
3900 | compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(), |
3901 | TokenPosition::kNoSource); |
3902 | } |
3903 | if (HasParallelMove()) { |
3904 | if (compiler::Assembler::EmittingComments()) { |
3905 | compiler->EmitComment(parallel_move()); |
3906 | } |
3907 | compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); |
3908 | } |
3909 | } |
3910 | |
3911 | LocationSummary* FunctionEntryInstr::MakeLocationSummary( |
3912 | Zone* zone, |
3913 | bool optimizing) const { |
3914 | UNREACHABLE(); |
3915 | return NULL; |
3916 | } |
3917 | |
3918 | void FunctionEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3919 | #if defined(TARGET_ARCH_X64) |
3920 | // Ensure the start of the monomorphic checked entry is 2-byte aligned (see |
3921 | // also Assembler::MonomorphicCheckedEntry()). |
3922 | if (__ CodeSize() % 2 == 1) { |
3923 | __ nop(); |
3924 | } |
3925 | #endif |
3926 | if (tag() == Instruction::kFunctionEntry) { |
3927 | __ Bind(compiler->GetJumpLabel(this)); |
3928 | } |
3929 | |
3930 | if (this == compiler->flow_graph().graph_entry()->unchecked_entry()) { |
3931 | __ BindUncheckedEntryPoint(); |
3932 | } |
3933 | |
3934 | // In the AOT compiler we want to reduce code size, so generate no |
3935 | // fall-through code in [FlowGraphCompiler::CompileGraph()]. |
3936 | // (As opposed to here where we don't check for the return value of |
3937 | // [Intrinsify]). |
3938 | const Function& function = compiler->parsed_function().function(); |
3939 | |
3940 | if (function.NeedsMonomorphicCheckedEntry(compiler->zone())) { |
3941 | compiler->SpecialStatsBegin(CombinedCodeStatistics::kTagCheckedEntry); |
3942 | if (!FLAG_precompiled_mode) { |
3943 | __ MonomorphicCheckedEntryJIT(); |
3944 | } else { |
3945 | __ MonomorphicCheckedEntryAOT(); |
3946 | } |
3947 | compiler->SpecialStatsEnd(CombinedCodeStatistics::kTagCheckedEntry); |
3948 | } |
3949 | |
3950 | // NOTE: Because of the presence of multiple entry-points, we generate several |
3951 | // times the same intrinsification & frame setup. That's why we cannot rely on |
3952 | // the constant pool being `false` when we come in here. |
3953 | #if defined(TARGET_USES_OBJECT_POOL) |
3954 | __ set_constant_pool_allowed(false); |
3955 | #endif |
3956 | |
3957 | if (compiler->TryIntrinsify() && compiler->skip_body_compilation()) { |
3958 | return; |
3959 | } |
3960 | compiler->EmitPrologue(); |
3961 | |
3962 | #if defined(TARGET_USES_OBJECT_POOL) |
3963 | ASSERT(__ constant_pool_allowed()); |
3964 | #endif |
3965 | |
3966 | if (!compiler->is_optimizing()) { |
3967 | if (compiler->NeedsEdgeCounter(this)) { |
3968 | compiler->EmitEdgeCounter(preorder_number()); |
3969 | } |
3970 | |
3971 | // The deoptimization descriptor points after the edge counter code for |
3972 | // uniformity with ARM, where we can reuse pattern matching code that |
3973 | // matches backwards from the end of the pattern. |
3974 | compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(), |
3975 | TokenPosition::kNoSource); |
3976 | } |
3977 | if (HasParallelMove()) { |
3978 | if (compiler::Assembler::EmittingComments()) { |
3979 | compiler->EmitComment(parallel_move()); |
3980 | } |
3981 | compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); |
3982 | } |
3983 | } |
3984 | |
3985 | LocationSummary* NativeEntryInstr::MakeLocationSummary(Zone* zone, |
3986 | bool optimizing) const { |
3987 | UNREACHABLE(); |
3988 | } |
3989 | |
3990 | LocationSummary* OsrEntryInstr::MakeLocationSummary(Zone* zone, |
3991 | bool optimizing) const { |
3992 | UNREACHABLE(); |
3993 | return NULL; |
3994 | } |
3995 | |
3996 | void OsrEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
3997 | ASSERT(!CompilerState::Current().is_aot()); |
3998 | ASSERT(compiler->is_optimizing()); |
3999 | __ Bind(compiler->GetJumpLabel(this)); |
4000 | |
4001 | // NOTE: Because the graph can have multiple entrypoints, we generate several |
4002 | // times the same intrinsification & frame setup. That's why we cannot rely on |
4003 | // the constant pool being `false` when we come in here. |
4004 | #if defined(TARGET_USES_OBJECT_POOL) |
4005 | __ set_constant_pool_allowed(false); |
4006 | #endif |
4007 | |
4008 | compiler->EmitPrologue(); |
4009 | |
4010 | #if defined(TARGET_USES_OBJECT_POOL) |
4011 | ASSERT(__ constant_pool_allowed()); |
4012 | #endif |
4013 | |
4014 | if (HasParallelMove()) { |
4015 | if (compiler::Assembler::EmittingComments()) { |
4016 | compiler->EmitComment(parallel_move()); |
4017 | } |
4018 | compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); |
4019 | } |
4020 | } |
4021 | |
4022 | void IndirectGotoInstr::ComputeOffsetTable(FlowGraphCompiler* compiler) { |
4023 | ASSERT(SuccessorCount() == offsets_.Length()); |
4024 | intptr_t element_size = offsets_.ElementSizeInBytes(); |
4025 | for (intptr_t i = 0; i < SuccessorCount(); i++) { |
4026 | TargetEntryInstr* target = SuccessorAt(i); |
4027 | auto* label = compiler->GetJumpLabel(target); |
4028 | RELEASE_ASSERT(label != nullptr); |
4029 | RELEASE_ASSERT(label->IsBound()); |
4030 | intptr_t offset = label->Position(); |
4031 | RELEASE_ASSERT(offset > 0); |
4032 | offsets_.SetInt32(i * element_size, offset); |
4033 | } |
4034 | } |
4035 | |
4036 | LocationSummary* IndirectEntryInstr::MakeLocationSummary( |
4037 | Zone* zone, |
4038 | bool optimizing) const { |
4039 | return JoinEntryInstr::MakeLocationSummary(zone, optimizing); |
4040 | } |
4041 | |
4042 | void IndirectEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4043 | JoinEntryInstr::EmitNativeCode(compiler); |
4044 | } |
4045 | |
4046 | LocationSummary* LoadStaticFieldInstr::MakeLocationSummary(Zone* zone, |
4047 | bool opt) const { |
4048 | const intptr_t kNumInputs = 0; |
4049 | const intptr_t kNumTemps = 0; |
4050 | LocationSummary* locs = new (zone) LocationSummary( |
4051 | zone, kNumInputs, kNumTemps, |
4052 | calls_initializer() ? LocationSummary::kCall : LocationSummary::kNoCall); |
4053 | locs->set_out(0, calls_initializer() ? Location::RegisterLocation( |
4054 | InitStaticFieldABI::kResultReg) |
4055 | : Location::RequiresRegister()); |
4056 | return locs; |
4057 | } |
4058 | |
4059 | void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4060 | const Register result = locs()->out(0).reg(); |
4061 | |
4062 | compiler->used_static_fields().Add(&field()); |
4063 | |
4064 | // Note: static fields ids won't be changed by hot-reload. |
4065 | const intptr_t field_table_offset = |
4066 | compiler::target::Thread::field_table_values_offset(); |
4067 | const intptr_t field_offset = compiler::target::FieldTable::OffsetOf(field()); |
4068 | |
4069 | __ LoadMemoryValue(result, THR, static_cast<int32_t>(field_table_offset)); |
4070 | __ LoadMemoryValue(result, result, static_cast<int32_t>(field_offset)); |
4071 | |
4072 | if (calls_initializer()) { |
4073 | compiler::Label call_runtime, no_call; |
4074 | __ CompareObject(result, Object::sentinel()); |
4075 | |
4076 | if (!field().is_late()) { |
4077 | __ BranchIf(EQUAL, &call_runtime); |
4078 | __ CompareObject(result, Object::transition_sentinel()); |
4079 | } |
4080 | |
4081 | __ BranchIf(NOT_EQUAL, &no_call); |
4082 | |
4083 | __ Bind(&call_runtime); |
4084 | __ LoadObject(InitStaticFieldABI::kFieldReg, |
4085 | Field::ZoneHandle(field().Original())); |
4086 | |
4087 | auto object_store = compiler->isolate()->object_store(); |
4088 | const auto& init_static_field_stub = Code::ZoneHandle( |
4089 | compiler->zone(), object_store->init_static_field_stub()); |
4090 | compiler->GenerateStubCall(token_pos(), init_static_field_stub, |
4091 | /*kind=*/PcDescriptorsLayout::kOther, locs(), |
4092 | deopt_id()); |
4093 | __ Bind(&no_call); |
4094 | } |
4095 | } |
4096 | |
4097 | void LoadFieldInstr::EmitNativeCodeForInitializerCall( |
4098 | FlowGraphCompiler* compiler) { |
4099 | ASSERT(calls_initializer()); |
4100 | ASSERT(locs()->in(0).reg() == InitInstanceFieldABI::kInstanceReg); |
4101 | ASSERT(locs()->out(0).reg() == InitInstanceFieldABI::kResultReg); |
4102 | ASSERT(slot().IsDartField()); |
4103 | const Field& field = slot().field(); |
4104 | const Field& original_field = Field::ZoneHandle(field.Original()); |
4105 | |
4106 | compiler::Label no_call; |
4107 | __ CompareObject(InitInstanceFieldABI::kResultReg, Object::sentinel()); |
4108 | __ BranchIf(NOT_EQUAL, &no_call); |
4109 | |
4110 | __ LoadObject(InitInstanceFieldABI::kFieldReg, original_field); |
4111 | |
4112 | auto object_store = compiler->isolate()->object_store(); |
4113 | auto& stub = Code::ZoneHandle(compiler->zone()); |
4114 | if (field.needs_load_guard()) { |
4115 | stub = object_store->init_instance_field_stub(); |
4116 | } else if (field.is_late()) { |
4117 | if (!field.has_nontrivial_initializer()) { |
4118 | // Common stub calls runtime which will throw an exception. |
4119 | stub = object_store->init_instance_field_stub(); |
4120 | } else { |
4121 | // Stubs for late field initialization call initializer |
4122 | // function directly, so make sure one is created. |
4123 | original_field.EnsureInitializerFunction(); |
4124 | |
4125 | if (field.is_final()) { |
4126 | stub = object_store->init_late_final_instance_field_stub(); |
4127 | } else { |
4128 | stub = object_store->init_late_instance_field_stub(); |
4129 | } |
4130 | } |
4131 | } else { |
4132 | UNREACHABLE(); |
4133 | } |
4134 | |
4135 | // Instruction inputs are popped from the stack at this point, |
4136 | // so deoptimization environment has to be adjusted. |
4137 | // This adjustment is done in FlowGraph::AttachEnvironment. |
4138 | compiler->GenerateStubCall(token_pos(), stub, |
4139 | /*kind=*/PcDescriptorsLayout::kOther, locs(), |
4140 | deopt_id()); |
4141 | __ Bind(&no_call); |
4142 | } |
4143 | |
4144 | LocationSummary* ThrowInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
4145 | const intptr_t kNumInputs = 1; |
4146 | const intptr_t kNumTemps = 0; |
4147 | LocationSummary* summary = new (zone) |
4148 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
4149 | summary->set_in(0, Location::RegisterLocation(ThrowABI::kExceptionReg)); |
4150 | return summary; |
4151 | } |
4152 | |
4153 | void ThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4154 | auto object_store = compiler->isolate()->object_store(); |
4155 | const auto& throw_stub = |
4156 | Code::ZoneHandle(compiler->zone(), object_store->throw_stub()); |
4157 | |
4158 | compiler->GenerateStubCall(token_pos(), throw_stub, |
4159 | /*kind=*/PcDescriptorsLayout::kOther, locs(), |
4160 | deopt_id()); |
4161 | // Issue(dartbug.com/41353): Right now we have to emit an extra breakpoint |
4162 | // instruction: The ThrowInstr will terminate the current block. The very |
4163 | // next machine code instruction might get a pc descriptor attached with a |
4164 | // different try-index. If we removed this breakpoint instruction, the |
4165 | // runtime might associated this call with the try-index of the next |
4166 | // instruction. |
4167 | __ Breakpoint(); |
4168 | } |
4169 | |
4170 | LocationSummary* ReThrowInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
4171 | const intptr_t kNumInputs = 2; |
4172 | const intptr_t kNumTemps = 0; |
4173 | LocationSummary* summary = new (zone) |
4174 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
4175 | summary->set_in(0, Location::RegisterLocation(ReThrowABI::kExceptionReg)); |
4176 | summary->set_in(1, Location::RegisterLocation(ReThrowABI::kStackTraceReg)); |
4177 | return summary; |
4178 | } |
4179 | |
4180 | void ReThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4181 | auto object_store = compiler->isolate()->object_store(); |
4182 | const auto& re_throw_stub = |
4183 | Code::ZoneHandle(compiler->zone(), object_store->re_throw_stub()); |
4184 | |
4185 | compiler->SetNeedsStackTrace(catch_try_index()); |
4186 | compiler->GenerateStubCall(token_pos(), re_throw_stub, |
4187 | /*kind=*/PcDescriptorsLayout::kOther, locs(), |
4188 | deopt_id()); |
4189 | // Issue(dartbug.com/41353): Right now we have to emit an extra breakpoint |
4190 | // instruction: The ThrowInstr will terminate the current block. The very |
4191 | // next machine code instruction might get a pc descriptor attached with a |
4192 | // different try-index. If we removed this breakpoint instruction, the |
4193 | // runtime might associated this call with the try-index of the next |
4194 | // instruction. |
4195 | __ Breakpoint(); |
4196 | } |
4197 | |
4198 | LocationSummary* AssertBooleanInstr::MakeLocationSummary(Zone* zone, |
4199 | bool opt) const { |
4200 | const intptr_t kNumInputs = 1; |
4201 | const intptr_t kNumTemps = 0; |
4202 | LocationSummary* locs = new (zone) |
4203 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
4204 | locs->set_in(0, Location::RegisterLocation(AssertBooleanABI::kObjectReg)); |
4205 | locs->set_out(0, Location::RegisterLocation(AssertBooleanABI::kObjectReg)); |
4206 | return locs; |
4207 | } |
4208 | |
4209 | void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4210 | // Check that the type of the value is allowed in conditional context. |
4211 | ASSERT(locs()->always_calls()); |
4212 | |
4213 | auto object_store = compiler->isolate()->object_store(); |
4214 | const auto& assert_boolean_stub = |
4215 | Code::ZoneHandle(compiler->zone(), object_store->assert_boolean_stub()); |
4216 | |
4217 | compiler::Label done; |
4218 | __ CompareObject(AssertBooleanABI::kObjectReg, Object::null_instance()); |
4219 | __ BranchIf(NOT_EQUAL, &done); |
4220 | compiler->GenerateStubCall(token_pos(), assert_boolean_stub, |
4221 | /*kind=*/PcDescriptorsLayout::kOther, locs(), |
4222 | deopt_id()); |
4223 | __ Bind(&done); |
4224 | } |
4225 | |
4226 | LocationSummary* PhiInstr::MakeLocationSummary(Zone* zone, |
4227 | bool optimizing) const { |
4228 | UNREACHABLE(); |
4229 | return NULL; |
4230 | } |
4231 | |
4232 | void PhiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4233 | UNREACHABLE(); |
4234 | } |
4235 | |
4236 | LocationSummary* RedefinitionInstr::MakeLocationSummary(Zone* zone, |
4237 | bool optimizing) const { |
4238 | UNREACHABLE(); |
4239 | return NULL; |
4240 | } |
4241 | |
4242 | void RedefinitionInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4243 | UNREACHABLE(); |
4244 | } |
4245 | |
4246 | LocationSummary* ReachabilityFenceInstr::MakeLocationSummary( |
4247 | Zone* zone, |
4248 | bool optimizing) const { |
4249 | LocationSummary* summary = new (zone) |
4250 | LocationSummary(zone, 1, 0, LocationSummary::ContainsCall::kNoCall); |
4251 | // Keep the parameter alive and reachable, in any location. |
4252 | summary->set_in(0, Location::Any()); |
4253 | return summary; |
4254 | } |
4255 | |
4256 | void ReachabilityFenceInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4257 | // No native code, but we rely on the parameter being passed in here so that |
4258 | // it stays alive and reachable. |
4259 | } |
4260 | |
4261 | LocationSummary* ParameterInstr::MakeLocationSummary(Zone* zone, |
4262 | bool optimizing) const { |
4263 | UNREACHABLE(); |
4264 | return NULL; |
4265 | } |
4266 | |
4267 | void ParameterInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4268 | UNREACHABLE(); |
4269 | } |
4270 | |
4271 | void NativeParameterInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4272 | // The native entry frame has size -kExitLinkSlotFromFp. In order to access |
4273 | // the top of stack from above the entry frame, we add a constant to account |
4274 | // for the two frame pointers and two return addresses of the entry frame. |
4275 | constexpr intptr_t kEntryFramePadding = 4; |
4276 | compiler::ffi::FrameRebase rebase( |
4277 | /*old_base=*/SPREG, /*new_base=*/FPREG, |
4278 | (-kExitLinkSlotFromEntryFp + kEntryFramePadding) * |
4279 | compiler::target::kWordSize, |
4280 | compiler->zone()); |
4281 | const auto& src = |
4282 | rebase.Rebase(marshaller_.NativeLocationOfNativeParameter(index_)); |
4283 | NoTemporaryAllocator no_temp; |
4284 | const Location out_loc = locs()->out(0); |
4285 | const Representation out_rep = representation(); |
4286 | compiler->EmitMoveFromNative(out_loc, out_rep, src, &no_temp); |
4287 | } |
4288 | |
4289 | LocationSummary* NativeParameterInstr::MakeLocationSummary(Zone* zone, |
4290 | bool opt) const { |
4291 | ASSERT(opt); |
4292 | Location output = Location::Any(); |
4293 | if (representation() == kUnboxedInt64 && compiler::target::kWordSize < 8) { |
4294 | output = Location::Pair(Location::RequiresRegister(), |
4295 | Location::RequiresFpuRegister()); |
4296 | } else { |
4297 | output = RegisterKindForResult() == Location::kRegister |
4298 | ? Location::RequiresRegister() |
4299 | : Location::RequiresFpuRegister(); |
4300 | } |
4301 | return LocationSummary::Make(zone, /*num_inputs=*/0, output, |
4302 | LocationSummary::kNoCall); |
4303 | } |
4304 | |
4305 | bool ParallelMoveInstr::IsRedundant() const { |
4306 | for (intptr_t i = 0; i < moves_.length(); i++) { |
4307 | if (!moves_[i]->IsRedundant()) { |
4308 | return false; |
4309 | } |
4310 | } |
4311 | return true; |
4312 | } |
4313 | |
4314 | LocationSummary* ParallelMoveInstr::MakeLocationSummary(Zone* zone, |
4315 | bool optimizing) const { |
4316 | return NULL; |
4317 | } |
4318 | |
4319 | void ParallelMoveInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4320 | UNREACHABLE(); |
4321 | } |
4322 | |
4323 | LocationSummary* ConstraintInstr::MakeLocationSummary(Zone* zone, |
4324 | bool optimizing) const { |
4325 | UNREACHABLE(); |
4326 | return NULL; |
4327 | } |
4328 | |
4329 | void ConstraintInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4330 | UNREACHABLE(); |
4331 | } |
4332 | |
4333 | LocationSummary* MaterializeObjectInstr::MakeLocationSummary( |
4334 | Zone* zone, |
4335 | bool optimizing) const { |
4336 | UNREACHABLE(); |
4337 | return NULL; |
4338 | } |
4339 | |
4340 | void MaterializeObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4341 | UNREACHABLE(); |
4342 | } |
4343 | |
4344 | // This function should be kept in sync with |
4345 | // FlowGraphCompiler::SlowPathEnvironmentFor(). |
4346 | void MaterializeObjectInstr::RemapRegisters(intptr_t* cpu_reg_slots, |
4347 | intptr_t* fpu_reg_slots) { |
4348 | if (registers_remapped_) { |
4349 | return; |
4350 | } |
4351 | registers_remapped_ = true; |
4352 | |
4353 | for (intptr_t i = 0; i < InputCount(); i++) { |
4354 | locations_[i] = LocationRemapForSlowPath( |
4355 | LocationAt(i), InputAt(i)->definition(), cpu_reg_slots, fpu_reg_slots); |
4356 | } |
4357 | } |
4358 | |
4359 | const char* SpecialParameterInstr::KindToCString(SpecialParameterKind k) { |
4360 | switch (k) { |
4361 | #define KIND_CASE(Name) \ |
4362 | case SpecialParameterKind::k##Name: \ |
4363 | return #Name; |
4364 | FOR_EACH_SPECIAL_PARAMETER_KIND(KIND_CASE) |
4365 | #undef KIND_CASE |
4366 | } |
4367 | return nullptr; |
4368 | } |
4369 | |
4370 | bool SpecialParameterInstr::ParseKind(const char* str, |
4371 | SpecialParameterKind* out) { |
4372 | ASSERT(str != nullptr && out != nullptr); |
4373 | #define KIND_CASE(Name) \ |
4374 | if (strcmp(str, #Name) == 0) { \ |
4375 | *out = SpecialParameterKind::k##Name; \ |
4376 | return true; \ |
4377 | } |
4378 | FOR_EACH_SPECIAL_PARAMETER_KIND(KIND_CASE) |
4379 | #undef KIND_CASE |
4380 | return false; |
4381 | } |
4382 | |
4383 | LocationSummary* SpecialParameterInstr::MakeLocationSummary(Zone* zone, |
4384 | bool opt) const { |
4385 | // Only appears in initial definitions, never in normal code. |
4386 | UNREACHABLE(); |
4387 | return NULL; |
4388 | } |
4389 | |
4390 | void SpecialParameterInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4391 | // Only appears in initial definitions, never in normal code. |
4392 | UNREACHABLE(); |
4393 | } |
4394 | |
4395 | LocationSummary* MakeTempInstr::MakeLocationSummary(Zone* zone, |
4396 | bool optimizing) const { |
4397 | ASSERT(!optimizing); |
4398 | null_->InitializeLocationSummary(zone, optimizing); |
4399 | return null_->locs(); |
4400 | } |
4401 | |
4402 | void MakeTempInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4403 | ASSERT(!compiler->is_optimizing()); |
4404 | null_->EmitNativeCode(compiler); |
4405 | } |
4406 | |
4407 | LocationSummary* DropTempsInstr::MakeLocationSummary(Zone* zone, |
4408 | bool optimizing) const { |
4409 | ASSERT(!optimizing); |
4410 | return (InputCount() == 1) |
4411 | ? LocationSummary::Make(zone, 1, Location::SameAsFirstInput(), |
4412 | LocationSummary::kNoCall) |
4413 | : LocationSummary::Make(zone, 0, Location::NoLocation(), |
4414 | LocationSummary::kNoCall); |
4415 | } |
4416 | |
4417 | void DropTempsInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4418 | ASSERT(!compiler->is_optimizing()); |
4419 | // Assert that register assignment is correct. |
4420 | ASSERT((InputCount() == 0) || (locs()->out(0).reg() == locs()->in(0).reg())); |
4421 | __ Drop(num_temps()); |
4422 | } |
4423 | |
4424 | StrictCompareInstr::StrictCompareInstr(TokenPosition token_pos, |
4425 | Token::Kind kind, |
4426 | Value* left, |
4427 | Value* right, |
4428 | bool needs_number_check, |
4429 | intptr_t deopt_id) |
4430 | : TemplateComparison(token_pos, kind, deopt_id), |
4431 | needs_number_check_(needs_number_check) { |
4432 | ASSERT((kind == Token::kEQ_STRICT) || (kind == Token::kNE_STRICT)); |
4433 | SetInputAt(0, left); |
4434 | SetInputAt(1, right); |
4435 | } |
4436 | |
4437 | Condition StrictCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler, |
4438 | BranchLabels labels) { |
4439 | Location left = locs()->in(0); |
4440 | Location right = locs()->in(1); |
4441 | ASSERT(!left.IsConstant() || !right.IsConstant()); |
4442 | Condition true_condition; |
4443 | if (left.IsConstant()) { |
4444 | if (TryEmitBoolTest(compiler, labels, 1, left.constant(), |
4445 | &true_condition)) { |
4446 | return true_condition; |
4447 | } |
4448 | true_condition = EmitComparisonCodeRegConstant( |
4449 | compiler, labels, right.reg(), left.constant()); |
4450 | } else if (right.IsConstant()) { |
4451 | if (TryEmitBoolTest(compiler, labels, 0, right.constant(), |
4452 | &true_condition)) { |
4453 | return true_condition; |
4454 | } |
4455 | true_condition = EmitComparisonCodeRegConstant(compiler, labels, left.reg(), |
4456 | right.constant()); |
4457 | } else { |
4458 | true_condition = compiler->EmitEqualityRegRegCompare( |
4459 | left.reg(), right.reg(), needs_number_check(), token_pos(), deopt_id()); |
4460 | } |
4461 | return true_condition != kInvalidCondition && (kind() != Token::kEQ_STRICT) |
4462 | ? InvertCondition(true_condition) |
4463 | : true_condition; |
4464 | } |
4465 | |
4466 | bool StrictCompareInstr::TryEmitBoolTest(FlowGraphCompiler* compiler, |
4467 | BranchLabels labels, |
4468 | intptr_t input_index, |
4469 | const Object& obj, |
4470 | Condition* true_condition_out) { |
4471 | CompileType* input_type = InputAt(input_index)->Type(); |
4472 | if (input_type->ToCid() == kBoolCid && obj.GetClassId() == kBoolCid) { |
4473 | bool invert = (kind() != Token::kEQ_STRICT) ^ !Bool::Cast(obj).value(); |
4474 | *true_condition_out = |
4475 | compiler->EmitBoolTest(locs()->in(input_index).reg(), labels, invert); |
4476 | return true; |
4477 | } |
4478 | return false; |
4479 | } |
4480 | |
4481 | LocationSummary* LoadClassIdInstr::MakeLocationSummary(Zone* zone, |
4482 | bool opt) const { |
4483 | const intptr_t kNumInputs = 1; |
4484 | return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), |
4485 | LocationSummary::kNoCall); |
4486 | } |
4487 | |
4488 | void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4489 | const Register object = locs()->in(0).reg(); |
4490 | const Register result = locs()->out(0).reg(); |
4491 | if (input_can_be_smi_ && this->object()->Type()->CanBeSmi()) { |
4492 | if (representation() == kTagged) { |
4493 | __ LoadTaggedClassIdMayBeSmi(result, object); |
4494 | } else { |
4495 | __ LoadClassIdMayBeSmi(result, object); |
4496 | } |
4497 | } else { |
4498 | __ LoadClassId(result, object); |
4499 | if (representation() == kTagged) { |
4500 | __ SmiTag(result); |
4501 | } |
4502 | } |
4503 | } |
4504 | |
4505 | LocationSummary* InstanceCallInstr::MakeLocationSummary(Zone* zone, |
4506 | bool optimizing) const { |
4507 | return MakeCallSummary(zone, this); |
4508 | } |
4509 | |
4510 | static CodePtr TwoArgsSmiOpInlineCacheEntry(Token::Kind kind) { |
4511 | if (!FLAG_two_args_smi_icd) { |
4512 | return Code::null(); |
4513 | } |
4514 | switch (kind) { |
4515 | case Token::kADD: |
4516 | return StubCode::SmiAddInlineCache().raw(); |
4517 | case Token::kLT: |
4518 | return StubCode::SmiLessInlineCache().raw(); |
4519 | case Token::kEQ: |
4520 | return StubCode::SmiEqualInlineCache().raw(); |
4521 | default: |
4522 | return Code::null(); |
4523 | } |
4524 | } |
4525 | |
4526 | bool InstanceCallBaseInstr::CanReceiverBeSmiBasedOnInterfaceTarget( |
4527 | Zone* zone) const { |
4528 | if (!interface_target().IsNull()) { |
4529 | // Note: target_type is fully instantiated rare type (all type parameters |
4530 | // are replaced with dynamic) so checking if Smi is assignable to |
4531 | // it would compute correctly whether or not receiver can be a smi. |
4532 | const AbstractType& target_type = AbstractType::Handle( |
4533 | zone, Class::Handle(zone, interface_target().Owner()).RareType()); |
4534 | if (!CompileType::Smi().IsAssignableTo(target_type)) { |
4535 | return false; |
4536 | } |
4537 | } |
4538 | // In all other cases conservatively assume that the receiver can be a smi. |
4539 | return true; |
4540 | } |
4541 | |
4542 | Representation InstanceCallBaseInstr::RequiredInputRepresentation( |
4543 | intptr_t idx) const { |
4544 | // The first input is the array of types |
4545 | // for generic functions |
4546 | if (type_args_len() > 0) { |
4547 | if (idx == 0) { |
4548 | return kTagged; |
4549 | } |
4550 | idx--; |
4551 | } |
4552 | return FlowGraph::ParameterRepresentationAt(interface_target(), idx); |
4553 | } |
4554 | |
4555 | intptr_t InstanceCallBaseInstr::ArgumentsSize() const { |
4556 | if (interface_target().IsNull()) { |
4557 | return ArgumentCountWithoutTypeArgs() + ((type_args_len() > 0) ? 1 : 0); |
4558 | } |
4559 | |
4560 | return FlowGraph::ParameterOffsetAt(interface_target(), |
4561 | ArgumentCountWithoutTypeArgs(), |
4562 | /*last_slot=*/false) + |
4563 | ((type_args_len() > 0) ? 1 : 0); |
4564 | } |
4565 | |
4566 | Representation InstanceCallBaseInstr::representation() const { |
4567 | return FlowGraph::ReturnRepresentationOf(interface_target()); |
4568 | } |
4569 | |
4570 | void InstanceCallBaseInstr::UpdateReceiverSminess(Zone* zone) { |
4571 | if (CompilerState::Current().is_aot() && !receiver_is_not_smi()) { |
4572 | if (!Receiver()->Type()->CanBeSmi() || |
4573 | !CanReceiverBeSmiBasedOnInterfaceTarget(zone)) { |
4574 | set_receiver_is_not_smi(true); |
4575 | } |
4576 | } |
4577 | } |
4578 | |
4579 | void InstanceCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4580 | Zone* zone = compiler->zone(); |
4581 | const ICData* call_ic_data = NULL; |
4582 | if (!FLAG_propagate_ic_data || !compiler->is_optimizing() || |
4583 | (ic_data() == NULL)) { |
4584 | const Array& arguments_descriptor = |
4585 | Array::Handle(zone, GetArgumentsDescriptor()); |
4586 | |
4587 | AbstractType& receivers_static_type = AbstractType::Handle(zone); |
4588 | if (receivers_static_type_ != nullptr) { |
4589 | receivers_static_type = receivers_static_type_->raw(); |
4590 | } |
4591 | |
4592 | call_ic_data = compiler->GetOrAddInstanceCallICData( |
4593 | deopt_id(), function_name(), arguments_descriptor, |
4594 | checked_argument_count(), receivers_static_type); |
4595 | } else { |
4596 | call_ic_data = &ICData::ZoneHandle(zone, ic_data()->raw()); |
4597 | } |
4598 | |
4599 | UpdateReceiverSminess(zone); |
4600 | |
4601 | if ((compiler->is_optimizing() || compiler->function().HasBytecode()) && |
4602 | HasICData()) { |
4603 | ASSERT(HasICData()); |
4604 | if (compiler->is_optimizing() && (ic_data()->NumberOfUsedChecks() > 0)) { |
4605 | const ICData& unary_ic_data = |
4606 | ICData::ZoneHandle(zone, ic_data()->AsUnaryClassChecks()); |
4607 | compiler->GenerateInstanceCall(deopt_id(), token_pos(), locs(), |
4608 | unary_ic_data, entry_kind(), |
4609 | !receiver_is_not_smi()); |
4610 | } else { |
4611 | // Call was not visited yet, use original ICData in order to populate it. |
4612 | compiler->GenerateInstanceCall(deopt_id(), token_pos(), locs(), |
4613 | *call_ic_data, entry_kind(), |
4614 | !receiver_is_not_smi()); |
4615 | } |
4616 | } else { |
4617 | // Unoptimized code. |
4618 | compiler->AddCurrentDescriptor(PcDescriptorsLayout::kRewind, deopt_id(), |
4619 | token_pos()); |
4620 | bool is_smi_two_args_op = false; |
4621 | const Code& stub = |
4622 | Code::ZoneHandle(TwoArgsSmiOpInlineCacheEntry(token_kind())); |
4623 | if (!stub.IsNull()) { |
4624 | // We have a dedicated inline cache stub for this operation, add an |
4625 | // an initial Smi/Smi check with count 0. |
4626 | is_smi_two_args_op = call_ic_data->AddSmiSmiCheckForFastSmiStubs(); |
4627 | } |
4628 | if (is_smi_two_args_op) { |
4629 | ASSERT(ArgumentCount() == 2); |
4630 | compiler->EmitInstanceCallJIT(stub, *call_ic_data, deopt_id(), |
4631 | token_pos(), locs(), entry_kind()); |
4632 | } else { |
4633 | compiler->GenerateInstanceCall(deopt_id(), token_pos(), locs(), |
4634 | *call_ic_data, entry_kind(), |
4635 | !receiver_is_not_smi()); |
4636 | } |
4637 | } |
4638 | } |
4639 | |
4640 | bool InstanceCallInstr::MatchesCoreName(const String& name) { |
4641 | return Library::IsPrivateCoreLibName(function_name(), name); |
4642 | } |
4643 | |
4644 | FunctionPtr InstanceCallBaseInstr::ResolveForReceiverClass( |
4645 | const Class& cls, |
4646 | bool allow_add /* = true */) { |
4647 | const Array& args_desc_array = Array::Handle(GetArgumentsDescriptor()); |
4648 | ArgumentsDescriptor args_desc(args_desc_array); |
4649 | return Resolver::ResolveDynamicForReceiverClass(cls, function_name(), |
4650 | args_desc, allow_add); |
4651 | } |
4652 | |
4653 | const CallTargets& InstanceCallInstr::Targets() { |
4654 | if (targets_ == nullptr) { |
4655 | Zone* zone = Thread::Current()->zone(); |
4656 | if (HasICData()) { |
4657 | targets_ = CallTargets::CreateAndExpand(zone, *ic_data()); |
4658 | } else { |
4659 | targets_ = new (zone) CallTargets(zone); |
4660 | ASSERT(targets_->is_empty()); |
4661 | } |
4662 | } |
4663 | return *targets_; |
4664 | } |
4665 | |
4666 | const BinaryFeedback& InstanceCallInstr::BinaryFeedback() { |
4667 | if (binary_ == nullptr) { |
4668 | Zone* zone = Thread::Current()->zone(); |
4669 | if (HasICData()) { |
4670 | binary_ = BinaryFeedback::Create(zone, *ic_data()); |
4671 | } else { |
4672 | binary_ = new (zone) class BinaryFeedback(zone); |
4673 | } |
4674 | } |
4675 | return *binary_; |
4676 | } |
4677 | |
4678 | Representation DispatchTableCallInstr::RequiredInputRepresentation( |
4679 | intptr_t idx) const { |
4680 | if (idx == (InputCount() - 1)) { |
4681 | return kUntagged; |
4682 | } |
4683 | |
4684 | // The first input is the array of types |
4685 | // for generic functions |
4686 | if (type_args_len() > 0) { |
4687 | if (idx == 0) { |
4688 | return kTagged; |
4689 | } |
4690 | idx--; |
4691 | } |
4692 | return FlowGraph::ParameterRepresentationAt(interface_target(), idx); |
4693 | } |
4694 | |
4695 | intptr_t DispatchTableCallInstr::ArgumentsSize() const { |
4696 | if (interface_target().IsNull()) { |
4697 | return ArgumentCountWithoutTypeArgs() + ((type_args_len() > 0) ? 1 : 0); |
4698 | } |
4699 | |
4700 | return FlowGraph::ParameterOffsetAt(interface_target(), |
4701 | ArgumentCountWithoutTypeArgs(), |
4702 | /*last_slot=*/false) + |
4703 | ((type_args_len() > 0) ? 1 : 0); |
4704 | } |
4705 | |
4706 | Representation DispatchTableCallInstr::representation() const { |
4707 | return FlowGraph::ReturnRepresentationOf(interface_target()); |
4708 | } |
4709 | |
4710 | DispatchTableCallInstr* DispatchTableCallInstr::FromCall( |
4711 | Zone* zone, |
4712 | const InstanceCallBaseInstr* call, |
4713 | Value* cid, |
4714 | const Function& interface_target, |
4715 | const compiler::TableSelector* selector) { |
4716 | InputsArray* args = new (zone) InputsArray(zone, call->ArgumentCount() + 1); |
4717 | for (intptr_t i = 0; i < call->ArgumentCount(); i++) { |
4718 | args->Add(call->ArgumentValueAt(i)->CopyWithType()); |
4719 | } |
4720 | args->Add(cid); |
4721 | auto dispatch_table_call = new (zone) DispatchTableCallInstr( |
4722 | call->token_pos(), interface_target, selector, args, |
4723 | call->type_args_len(), call->argument_names()); |
4724 | if (call->has_inlining_id()) { |
4725 | dispatch_table_call->set_inlining_id(call->inlining_id()); |
4726 | } |
4727 | return dispatch_table_call; |
4728 | } |
4729 | |
4730 | void DispatchTableCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4731 | Array& arguments_descriptor = Array::ZoneHandle(); |
4732 | if (selector()->requires_args_descriptor) { |
4733 | ArgumentsInfo args_info(type_args_len(), ArgumentCount(), ArgumentsSize(), |
4734 | argument_names()); |
4735 | arguments_descriptor = args_info.ToArgumentsDescriptor(); |
4736 | } |
4737 | const Register cid_reg = locs()->in(0).reg(); |
4738 | compiler->EmitDispatchTableCall(cid_reg, selector()->offset, |
4739 | arguments_descriptor); |
4740 | compiler->EmitCallsiteMetadata(token_pos(), DeoptId::kNone, |
4741 | PcDescriptorsLayout::kOther, locs()); |
4742 | if (selector()->called_on_null && !selector()->on_null_interface) { |
4743 | Value* receiver = ArgumentValueAt(FirstArgIndex()); |
4744 | if (receiver->Type()->is_nullable()) { |
4745 | const String& function_name = |
4746 | String::ZoneHandle(interface_target().name()); |
4747 | compiler->AddNullCheck(token_pos(), function_name); |
4748 | } |
4749 | } |
4750 | __ Drop(ArgumentsSize()); |
4751 | |
4752 | compiler->AddDispatchTableCallTarget(selector()); |
4753 | } |
4754 | |
4755 | Representation StaticCallInstr::RequiredInputRepresentation( |
4756 | intptr_t idx) const { |
4757 | // The first input is the array of types |
4758 | // for generic functions |
4759 | if (type_args_len() > 0 || function().IsFactory()) { |
4760 | if (idx == 0) { |
4761 | return kTagged; |
4762 | } |
4763 | idx--; |
4764 | } |
4765 | return FlowGraph::ParameterRepresentationAt(function(), idx); |
4766 | } |
4767 | |
4768 | intptr_t StaticCallInstr::ArgumentsSize() const { |
4769 | return FlowGraph::ParameterOffsetAt(function(), |
4770 | ArgumentCountWithoutTypeArgs(), |
4771 | /*last_slot=*/false) + |
4772 | ((type_args_len() > 0) ? 1 : 0); |
4773 | } |
4774 | |
4775 | Representation StaticCallInstr::representation() const { |
4776 | return FlowGraph::ReturnRepresentationOf(function()); |
4777 | } |
4778 | |
4779 | const CallTargets& StaticCallInstr::Targets() { |
4780 | if (targets_ == nullptr) { |
4781 | Zone* zone = Thread::Current()->zone(); |
4782 | if (HasICData()) { |
4783 | targets_ = CallTargets::CreateAndExpand(zone, *ic_data()); |
4784 | } else { |
4785 | targets_ = new (zone) CallTargets(zone); |
4786 | ASSERT(targets_->is_empty()); |
4787 | } |
4788 | } |
4789 | return *targets_; |
4790 | } |
4791 | |
4792 | const BinaryFeedback& StaticCallInstr::BinaryFeedback() { |
4793 | if (binary_ == nullptr) { |
4794 | Zone* zone = Thread::Current()->zone(); |
4795 | if (HasICData()) { |
4796 | binary_ = BinaryFeedback::Create(zone, *ic_data()); |
4797 | } else { |
4798 | binary_ = new (zone) class BinaryFeedback(zone); |
4799 | } |
4800 | } |
4801 | return *binary_; |
4802 | } |
4803 | |
4804 | bool CallTargets::HasSingleRecognizedTarget() const { |
4805 | if (!HasSingleTarget()) return false; |
4806 | return FirstTarget().recognized_kind() != MethodRecognizer::kUnknown; |
4807 | } |
4808 | |
4809 | bool CallTargets::HasSingleTarget() const { |
4810 | if (length() == 0) return false; |
4811 | for (int i = 0; i < length(); i++) { |
4812 | if (TargetAt(i)->target->raw() != TargetAt(0)->target->raw()) return false; |
4813 | } |
4814 | return true; |
4815 | } |
4816 | |
4817 | const Function& CallTargets::FirstTarget() const { |
4818 | ASSERT(length() != 0); |
4819 | ASSERT(TargetAt(0)->target->IsZoneHandle()); |
4820 | return *TargetAt(0)->target; |
4821 | } |
4822 | |
4823 | const Function& CallTargets::MostPopularTarget() const { |
4824 | ASSERT(length() != 0); |
4825 | ASSERT(TargetAt(0)->target->IsZoneHandle()); |
4826 | for (int i = 1; i < length(); i++) { |
4827 | ASSERT(TargetAt(i)->count <= TargetAt(0)->count); |
4828 | } |
4829 | return *TargetAt(0)->target; |
4830 | } |
4831 | |
4832 | intptr_t CallTargets::AggregateCallCount() const { |
4833 | intptr_t sum = 0; |
4834 | for (int i = 0; i < length(); i++) { |
4835 | sum += TargetAt(i)->count; |
4836 | } |
4837 | return sum; |
4838 | } |
4839 | |
4840 | bool PolymorphicInstanceCallInstr::HasOnlyDispatcherOrImplicitAccessorTargets() |
4841 | const { |
4842 | const intptr_t len = targets_.length(); |
4843 | Function& target = Function::Handle(); |
4844 | for (intptr_t i = 0; i < len; i++) { |
4845 | target = targets_.TargetAt(i)->target->raw(); |
4846 | if (!target.IsDispatcherOrImplicitAccessor()) { |
4847 | return false; |
4848 | } |
4849 | } |
4850 | return true; |
4851 | } |
4852 | |
4853 | intptr_t PolymorphicInstanceCallInstr::CallCount() const { |
4854 | return targets().AggregateCallCount(); |
4855 | } |
4856 | |
4857 | LocationSummary* PolymorphicInstanceCallInstr::MakeLocationSummary( |
4858 | Zone* zone, |
4859 | bool optimizing) const { |
4860 | return MakeCallSummary(zone, this); |
4861 | } |
4862 | |
4863 | void PolymorphicInstanceCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
4864 | ArgumentsInfo args_info(type_args_len(), ArgumentCount(), ArgumentsSize(), |
4865 | argument_names()); |
4866 | UpdateReceiverSminess(compiler->zone()); |
4867 | compiler->EmitPolymorphicInstanceCall( |
4868 | this, targets(), args_info, deopt_id(), token_pos(), locs(), complete(), |
4869 | total_call_count(), !receiver_is_not_smi()); |
4870 | } |
4871 | |
4872 | TypePtr PolymorphicInstanceCallInstr::ComputeRuntimeType( |
4873 | const CallTargets& targets) { |
4874 | bool is_string = true; |
4875 | bool is_integer = true; |
4876 | bool is_double = true; |
4877 | |
4878 | const intptr_t num_checks = targets.length(); |
4879 | for (intptr_t i = 0; i < num_checks; i++) { |
4880 | ASSERT(targets.TargetAt(i)->target->raw() == |
4881 | targets.TargetAt(0)->target->raw()); |
4882 | const intptr_t start = targets[i].cid_start; |
4883 | const intptr_t end = targets[i].cid_end; |
4884 | for (intptr_t cid = start; cid <= end; cid++) { |
4885 | is_string = is_string && IsStringClassId(cid); |
4886 | is_integer = is_integer && IsIntegerClassId(cid); |
4887 | is_double = is_double && (cid == kDoubleCid); |
4888 | } |
4889 | } |
4890 | |
4891 | if (is_string) { |
4892 | ASSERT(!is_integer); |
4893 | ASSERT(!is_double); |
4894 | return Type::StringType(); |
4895 | } else if (is_integer) { |
4896 | ASSERT(!is_double); |
4897 | return Type::IntType(); |
4898 | } else if (is_double) { |
4899 | return Type::Double(); |
4900 | } |
4901 | |
4902 | return Type::null(); |
4903 | } |
4904 | |
4905 | Definition* InstanceCallInstr::Canonicalize(FlowGraph* flow_graph) { |
4906 | const intptr_t receiver_cid = Receiver()->Type()->ToCid(); |
4907 | |
4908 | // We could turn cold call sites for known receiver cids into a StaticCall. |
4909 | // However, that keeps the ICData of the InstanceCall from being updated. |
4910 | // This is fine if there is no later deoptimization, but if there is, then |
4911 | // the InstanceCall with the updated ICData for this receiver may then be |
4912 | // better optimized by the compiler. |
4913 | // |
4914 | // TODO(dartbug.com/37291): Allow this optimization, but accumulate affected |
4915 | // InstanceCallInstrs and the corresponding reciever cids during compilation. |
4916 | // After compilation, add receiver checks to the ICData for those call sites. |
4917 | if (Targets().is_empty()) return this; |
4918 | |
4919 | const CallTargets* new_target = |
4920 | FlowGraphCompiler::ResolveCallTargetsForReceiverCid( |
4921 | receiver_cid, |
4922 | String::Handle(flow_graph->zone(), ic_data()->target_name()), |
4923 | Array::Handle(flow_graph->zone(), ic_data()->arguments_descriptor())); |
4924 | if (new_target == NULL) { |
4925 | // No specialization. |
4926 | return this; |
4927 | } |
4928 | |
4929 | ASSERT(new_target->HasSingleTarget()); |
4930 | const Function& target = new_target->FirstTarget(); |
4931 | StaticCallInstr* specialized = StaticCallInstr::FromCall( |
4932 | flow_graph->zone(), this, target, new_target->AggregateCallCount()); |
4933 | flow_graph->InsertBefore(this, specialized, env(), FlowGraph::kValue); |
4934 | return specialized; |
4935 | } |
4936 | |
4937 | Definition* DispatchTableCallInstr::Canonicalize(FlowGraph* flow_graph) { |
4938 | // TODO(dartbug.com/40188): Allow this to canonicalize into a StaticCall when |
4939 | // when input class id is constant; |
4940 | return this; |
4941 | } |
4942 | |
4943 | Definition* PolymorphicInstanceCallInstr::Canonicalize(FlowGraph* flow_graph) { |
4944 | if (!IsSureToCallSingleRecognizedTarget()) { |
4945 | return this; |
4946 | } |
4947 | |
4948 | const Function& target = targets().FirstTarget(); |
4949 | if (target.recognized_kind() == MethodRecognizer::kObjectRuntimeType) { |
4950 | const AbstractType& type = |
4951 | AbstractType::Handle(ComputeRuntimeType(targets_)); |
4952 | if (!type.IsNull()) { |
4953 | return flow_graph->GetConstant(type); |
4954 | } |
4955 | } |
4956 | |
4957 | return this; |
4958 | } |
4959 | |
4960 | bool PolymorphicInstanceCallInstr::IsSureToCallSingleRecognizedTarget() const { |
4961 | if (CompilerState::Current().is_aot() && !complete()) return false; |
4962 | return targets_.HasSingleRecognizedTarget(); |
4963 | } |
4964 | |
4965 | bool StaticCallInstr::InitResultType(Zone* zone) { |
4966 | const intptr_t list_cid = FactoryRecognizer::GetResultCidOfListFactory( |
4967 | zone, function(), ArgumentCount()); |
4968 | if (list_cid != kDynamicCid) { |
4969 | SetResultType(zone, CompileType::FromCid(list_cid)); |
4970 | set_is_known_list_constructor(true); |
4971 | return true; |
4972 | } else if (function().has_pragma()) { |
4973 | const intptr_t recognized_cid = |
4974 | MethodRecognizer::ResultCidFromPragma(function()); |
4975 | if (recognized_cid != kDynamicCid) { |
4976 | SetResultType(zone, CompileType::FromCid(recognized_cid)); |
4977 | return true; |
4978 | } |
4979 | } |
4980 | return false; |
4981 | } |
4982 | |
4983 | Definition* StaticCallInstr::Canonicalize(FlowGraph* flow_graph) { |
4984 | if (!CompilerState::Current().is_aot()) { |
4985 | return this; |
4986 | } |
4987 | |
4988 | if (function().recognized_kind() == MethodRecognizer::kObjectRuntimeType) { |
4989 | if (input_use_list() == NULL) { |
4990 | // This function has only environment uses. In precompiled mode it is |
4991 | // fine to remove it - because we will never deoptimize. |
4992 | return flow_graph->constant_dead(); |
4993 | } |
4994 | } |
4995 | |
4996 | return this; |
4997 | } |
4998 | |
4999 | LocationSummary* StaticCallInstr::MakeLocationSummary(Zone* zone, |
5000 | bool optimizing) const { |
5001 | return MakeCallSummary(zone, this); |
5002 | } |
5003 | |
5004 | void StaticCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5005 | Zone* zone = compiler->zone(); |
5006 | const ICData* call_ic_data = NULL; |
5007 | if (!FLAG_propagate_ic_data || !compiler->is_optimizing() || |
5008 | (ic_data() == NULL)) { |
5009 | const Array& arguments_descriptor = |
5010 | Array::Handle(zone, GetArgumentsDescriptor()); |
5011 | const int num_args_checked = |
5012 | MethodRecognizer::NumArgsCheckedForStaticCall(function()); |
5013 | call_ic_data = compiler->GetOrAddStaticCallICData( |
5014 | deopt_id(), function(), arguments_descriptor, num_args_checked, |
5015 | rebind_rule_); |
5016 | } else { |
5017 | call_ic_data = &ICData::ZoneHandle(ic_data()->raw()); |
5018 | } |
5019 | ArgumentsInfo args_info(type_args_len(), ArgumentCount(), ArgumentsSize(), |
5020 | argument_names()); |
5021 | compiler->GenerateStaticCall(deopt_id(), token_pos(), function(), args_info, |
5022 | locs(), *call_ic_data, rebind_rule_, |
5023 | entry_kind()); |
5024 | if (function().IsFactory()) { |
5025 | TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info(); |
5026 | if (type_usage_info != nullptr) { |
5027 | const Class& klass = Class::Handle(function().Owner()); |
5028 | RegisterTypeArgumentsUse(compiler->function(), type_usage_info, klass, |
5029 | ArgumentAt(0)); |
5030 | } |
5031 | } |
5032 | } |
5033 | |
5034 | intptr_t AssertAssignableInstr::statistics_tag() const { |
5035 | switch (kind_) { |
5036 | case kParameterCheck: |
5037 | return CombinedCodeStatistics::kTagAssertAssignableParameterCheck; |
5038 | case kInsertedByFrontend: |
5039 | return CombinedCodeStatistics::kTagAssertAssignableInsertedByFrontend; |
5040 | case kFromSource: |
5041 | return CombinedCodeStatistics::kTagAssertAssignableFromSource; |
5042 | case kUnknown: |
5043 | break; |
5044 | } |
5045 | |
5046 | return tag(); |
5047 | } |
5048 | |
5049 | void AssertAssignableInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5050 | compiler->GenerateAssertAssignable(value()->Type(), token_pos(), deopt_id(), |
5051 | dst_name(), locs()); |
5052 | ASSERT(locs()->in(0).reg() == locs()->out(0).reg()); |
5053 | } |
5054 | |
5055 | LocationSummary* AssertSubtypeInstr::MakeLocationSummary(Zone* zone, |
5056 | bool opt) const { |
5057 | if (!sub_type()->BindsToConstant() || !super_type()->BindsToConstant()) { |
5058 | // TODO(dartbug.com/40813): Handle setting up the non-constant case. |
5059 | UNREACHABLE(); |
5060 | } |
5061 | const intptr_t kNumInputs = 4; |
5062 | const intptr_t kNumTemps = 0; |
5063 | LocationSummary* summary = new (zone) |
5064 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); |
5065 | summary->set_in(0, Location::RegisterLocation( |
5066 | TypeTestABI::kInstantiatorTypeArgumentsReg)); |
5067 | summary->set_in( |
5068 | 1, Location::RegisterLocation(TypeTestABI::kFunctionTypeArgumentsReg)); |
5069 | summary->set_in(2, |
5070 | Location::Constant(sub_type()->definition()->AsConstant())); |
5071 | summary->set_in(3, |
5072 | Location::Constant(super_type()->definition()->AsConstant())); |
5073 | return summary; |
5074 | } |
5075 | |
5076 | void AssertSubtypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5077 | __ PushRegister(locs()->in(0).reg()); |
5078 | __ PushRegister(locs()->in(1).reg()); |
5079 | __ PushObject(locs()->in(2).constant()); |
5080 | __ PushObject(locs()->in(3).constant()); |
5081 | __ PushObject(dst_name()); |
5082 | |
5083 | compiler->GenerateRuntimeCall(token_pos(), deopt_id(), |
5084 | kSubtypeCheckRuntimeEntry, 5, locs()); |
5085 | |
5086 | __ Drop(5); |
5087 | } |
5088 | |
5089 | LocationSummary* DeoptimizeInstr::MakeLocationSummary(Zone* zone, |
5090 | bool opt) const { |
5091 | return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall); |
5092 | } |
5093 | |
5094 | void DeoptimizeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5095 | __ Jump(compiler->AddDeoptStub(deopt_id(), deopt_reason_)); |
5096 | } |
5097 | |
5098 | void CheckClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5099 | compiler::Label* deopt = |
5100 | compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass, |
5101 | licm_hoisted_ ? ICData::kHoisted : 0); |
5102 | if (IsNullCheck()) { |
5103 | EmitNullCheck(compiler, deopt); |
5104 | return; |
5105 | } |
5106 | |
5107 | ASSERT(!cids_.IsMonomorphic() || !cids_.HasClassId(kSmiCid)); |
5108 | Register value = locs()->in(0).reg(); |
5109 | Register temp = locs()->temp(0).reg(); |
5110 | compiler::Label is_ok; |
5111 | |
5112 | __ BranchIfSmi(value, cids_.HasClassId(kSmiCid) ? &is_ok : deopt); |
5113 | |
5114 | __ LoadClassId(temp, value); |
5115 | |
5116 | if (IsBitTest()) { |
5117 | intptr_t min = cids_.ComputeLowestCid(); |
5118 | intptr_t max = cids_.ComputeHighestCid(); |
5119 | EmitBitTest(compiler, min, max, ComputeCidMask(), deopt); |
5120 | } else { |
5121 | const intptr_t num_checks = cids_.length(); |
5122 | const bool use_near_jump = num_checks < 5; |
5123 | int bias = 0; |
5124 | for (intptr_t i = 0; i < num_checks; i++) { |
5125 | intptr_t cid_start = cids_[i].cid_start; |
5126 | intptr_t cid_end = cids_[i].cid_end; |
5127 | if (cid_start == kSmiCid && cid_end == kSmiCid) { |
5128 | continue; // We already handled Smi above. |
5129 | } |
5130 | if (cid_start == kSmiCid) cid_start++; |
5131 | if (cid_end == kSmiCid) cid_end--; |
5132 | const bool is_last = |
5133 | (i == num_checks - 1) || |
5134 | (i == num_checks - 2 && cids_[i + 1].cid_start == kSmiCid && |
5135 | cids_[i + 1].cid_end == kSmiCid); |
5136 | bias = EmitCheckCid(compiler, bias, cid_start, cid_end, is_last, &is_ok, |
5137 | deopt, use_near_jump); |
5138 | } |
5139 | } |
5140 | __ Bind(&is_ok); |
5141 | } |
5142 | |
5143 | LocationSummary* GenericCheckBoundInstr::MakeLocationSummary(Zone* zone, |
5144 | bool opt) const { |
5145 | const intptr_t kNumInputs = 2; |
5146 | const intptr_t kNumTemps = 0; |
5147 | LocationSummary* locs = new (zone) LocationSummary( |
5148 | zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSharedSlowPath); |
5149 | locs->set_in(kLengthPos, |
5150 | Location::RegisterLocation(RangeErrorABI::kLengthReg)); |
5151 | locs->set_in(kIndexPos, Location::RegisterLocation(RangeErrorABI::kIndexReg)); |
5152 | return locs; |
5153 | } |
5154 | |
5155 | void GenericCheckBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5156 | ASSERT(representation() == RequiredInputRepresentation(kIndexPos)); |
5157 | ASSERT(representation() == RequiredInputRepresentation(kLengthPos)); |
5158 | |
5159 | RangeErrorSlowPath* slow_path = |
5160 | new RangeErrorSlowPath(this, compiler->CurrentTryIndex()); |
5161 | compiler->AddSlowPathCode(slow_path); |
5162 | Location length_loc = locs()->in(kLengthPos); |
5163 | Location index_loc = locs()->in(kIndexPos); |
5164 | Register length = length_loc.reg(); |
5165 | Register index = index_loc.reg(); |
5166 | const intptr_t index_cid = this->index()->Type()->ToCid(); |
5167 | |
5168 | // The length comes from one of our variable-sized heap objects (e.g. typed |
5169 | // data array) and is therefore guaranteed to be in the positive Smi range. |
5170 | if (representation() == kTagged) { |
5171 | if (index_cid != kSmiCid) { |
5172 | __ BranchIfNotSmi(index, slow_path->entry_label()); |
5173 | } |
5174 | } else { |
5175 | ASSERT(representation() == kUnboxedInt64); |
5176 | } |
5177 | __ CompareRegisters(index, length); |
5178 | __ BranchIf(UNSIGNED_GREATER_EQUAL, slow_path->entry_label()); |
5179 | } |
5180 | |
5181 | LocationSummary* CheckNullInstr::MakeLocationSummary(Zone* zone, |
5182 | bool opt) const { |
5183 | const intptr_t kNumInputs = 1; |
5184 | const intptr_t kNumTemps = 0; |
5185 | LocationSummary* locs = new (zone) LocationSummary( |
5186 | zone, kNumInputs, kNumTemps, |
5187 | UseSharedSlowPathStub(opt) ? LocationSummary::kCallOnSharedSlowPath |
5188 | : LocationSummary::kCallOnSlowPath); |
5189 | locs->set_in(0, Location::RequiresRegister()); |
5190 | return locs; |
5191 | } |
5192 | |
5193 | void CheckNullInstr::AddMetadataForRuntimeCall(CheckNullInstr* check_null, |
5194 | FlowGraphCompiler* compiler) { |
5195 | compiler->AddNullCheck(check_null->token_pos(), check_null->function_name()); |
5196 | } |
5197 | |
5198 | void RangeErrorSlowPath::EmitSharedStubCall(FlowGraphCompiler* compiler, |
5199 | bool save_fpu_registers) { |
5200 | #if defined(TARGET_ARCH_IA32) |
5201 | UNREACHABLE(); |
5202 | #else |
5203 | auto object_store = compiler->isolate()->object_store(); |
5204 | const auto& stub = Code::ZoneHandle( |
5205 | compiler->zone(), |
5206 | save_fpu_registers |
5207 | ? object_store->range_error_stub_with_fpu_regs_stub() |
5208 | : object_store->range_error_stub_without_fpu_regs_stub()); |
5209 | compiler->EmitCallToStub(stub); |
5210 | #endif |
5211 | } |
5212 | |
5213 | void UnboxInstr::EmitLoadFromBoxWithDeopt(FlowGraphCompiler* compiler) { |
5214 | const intptr_t box_cid = BoxCid(); |
5215 | const Register box = locs()->in(0).reg(); |
5216 | const Register temp = |
5217 | (locs()->temp_count() > 0) ? locs()->temp(0).reg() : kNoRegister; |
5218 | compiler::Label* deopt = |
5219 | compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnbox); |
5220 | compiler::Label is_smi; |
5221 | |
5222 | if ((value()->Type()->ToNullableCid() == box_cid) && |
5223 | value()->Type()->is_nullable()) { |
5224 | __ CompareObject(box, Object::null_object()); |
5225 | __ BranchIf(EQUAL, deopt); |
5226 | } else { |
5227 | __ BranchIfSmi(box, CanConvertSmi() ? &is_smi : deopt); |
5228 | __ CompareClassId(box, box_cid, temp); |
5229 | __ BranchIf(NOT_EQUAL, deopt); |
5230 | } |
5231 | |
5232 | EmitLoadFromBox(compiler); |
5233 | |
5234 | if (is_smi.IsLinked()) { |
5235 | compiler::Label done; |
5236 | __ Jump(&done); |
5237 | __ Bind(&is_smi); |
5238 | EmitSmiConversion(compiler); |
5239 | __ Bind(&done); |
5240 | } |
5241 | } |
5242 | |
5243 | void UnboxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5244 | if (SpeculativeModeOfInputs() == kNotSpeculative) { |
5245 | switch (representation()) { |
5246 | case kUnboxedDouble: |
5247 | case kUnboxedFloat: |
5248 | case kUnboxedFloat32x4: |
5249 | case kUnboxedFloat64x2: |
5250 | case kUnboxedInt32x4: |
5251 | EmitLoadFromBox(compiler); |
5252 | break; |
5253 | |
5254 | case kUnboxedInt32: |
5255 | EmitLoadInt32FromBoxOrSmi(compiler); |
5256 | break; |
5257 | |
5258 | case kUnboxedInt64: { |
5259 | if (value()->Type()->ToCid() == kSmiCid) { |
5260 | // Smi -> int64 conversion is more efficient than |
5261 | // handling arbitrary smi/mint. |
5262 | EmitSmiConversion(compiler); |
5263 | } else { |
5264 | EmitLoadInt64FromBoxOrSmi(compiler); |
5265 | } |
5266 | break; |
5267 | } |
5268 | default: |
5269 | UNREACHABLE(); |
5270 | break; |
5271 | } |
5272 | } else { |
5273 | ASSERT(SpeculativeModeOfInputs() == kGuardInputs); |
5274 | const intptr_t value_cid = value()->Type()->ToCid(); |
5275 | const intptr_t box_cid = BoxCid(); |
5276 | |
5277 | if (value_cid == box_cid) { |
5278 | EmitLoadFromBox(compiler); |
5279 | } else if (CanConvertSmi() && (value_cid == kSmiCid)) { |
5280 | EmitSmiConversion(compiler); |
5281 | } else if (representation() == kUnboxedInt32 && value()->Type()->IsInt()) { |
5282 | EmitLoadInt32FromBoxOrSmi(compiler); |
5283 | } else if (representation() == kUnboxedInt64 && value()->Type()->IsInt()) { |
5284 | EmitLoadInt64FromBoxOrSmi(compiler); |
5285 | } else { |
5286 | ASSERT(CanDeoptimize()); |
5287 | EmitLoadFromBoxWithDeopt(compiler); |
5288 | } |
5289 | } |
5290 | } |
5291 | |
5292 | Environment* Environment::From(Zone* zone, |
5293 | const GrowableArray<Definition*>& definitions, |
5294 | intptr_t fixed_parameter_count, |
5295 | const ParsedFunction& parsed_function) { |
5296 | Environment* env = new (zone) Environment( |
5297 | definitions.length(), fixed_parameter_count, parsed_function, NULL); |
5298 | for (intptr_t i = 0; i < definitions.length(); ++i) { |
5299 | env->values_.Add(new (zone) Value(definitions[i])); |
5300 | } |
5301 | return env; |
5302 | } |
5303 | |
5304 | void Environment::PushValue(Value* value) { |
5305 | values_.Add(value); |
5306 | } |
5307 | |
5308 | Environment* Environment::DeepCopy(Zone* zone, intptr_t length) const { |
5309 | ASSERT(length <= values_.length()); |
5310 | Environment* copy = |
5311 | new (zone) Environment(length, fixed_parameter_count_, parsed_function_, |
5312 | (outer_ == NULL) ? NULL : outer_->DeepCopy(zone)); |
5313 | copy->deopt_id_ = this->deopt_id_; |
5314 | if (locations_ != NULL) { |
5315 | Location* new_locations = zone->Alloc<Location>(length); |
5316 | copy->set_locations(new_locations); |
5317 | } |
5318 | for (intptr_t i = 0; i < length; ++i) { |
5319 | copy->values_.Add(values_[i]->CopyWithType(zone)); |
5320 | if (locations_ != NULL) { |
5321 | copy->locations_[i] = locations_[i].Copy(); |
5322 | } |
5323 | } |
5324 | return copy; |
5325 | } |
5326 | |
5327 | // Copies the environment and updates the environment use lists. |
5328 | void Environment::DeepCopyTo(Zone* zone, Instruction* instr) const { |
5329 | for (Environment::DeepIterator it(instr->env()); !it.Done(); it.Advance()) { |
5330 | it.CurrentValue()->RemoveFromUseList(); |
5331 | } |
5332 | |
5333 | Environment* copy = DeepCopy(zone); |
5334 | instr->SetEnvironment(copy); |
5335 | for (Environment::DeepIterator it(copy); !it.Done(); it.Advance()) { |
5336 | Value* value = it.CurrentValue(); |
5337 | value->definition()->AddEnvUse(value); |
5338 | } |
5339 | } |
5340 | |
5341 | void Environment::DeepCopyAfterTo(Zone* zone, |
5342 | Instruction* instr, |
5343 | intptr_t argc, |
5344 | Definition* dead, |
5345 | Definition* result) const { |
5346 | for (Environment::DeepIterator it(instr->env()); !it.Done(); it.Advance()) { |
5347 | it.CurrentValue()->RemoveFromUseList(); |
5348 | } |
5349 | |
5350 | Environment* copy = DeepCopy(zone, values_.length() - argc); |
5351 | for (intptr_t i = 0; i < argc; i++) { |
5352 | copy->values_.Add(new (zone) Value(dead)); |
5353 | } |
5354 | copy->values_.Add(new (zone) Value(result)); |
5355 | |
5356 | instr->SetEnvironment(copy); |
5357 | for (Environment::DeepIterator it(copy); !it.Done(); it.Advance()) { |
5358 | Value* value = it.CurrentValue(); |
5359 | value->definition()->AddEnvUse(value); |
5360 | } |
5361 | } |
5362 | |
5363 | // Copies the environment as outer on an inlined instruction and updates the |
5364 | // environment use lists. |
5365 | void Environment::DeepCopyToOuter(Zone* zone, |
5366 | Instruction* instr, |
5367 | intptr_t outer_deopt_id) const { |
5368 | // Create a deep copy removing caller arguments from the environment. |
5369 | ASSERT(this != NULL); |
5370 | ASSERT(instr->env()->outer() == NULL); |
5371 | intptr_t argument_count = instr->env()->fixed_parameter_count(); |
5372 | Environment* copy = DeepCopy(zone, values_.length() - argument_count); |
5373 | copy->deopt_id_ = outer_deopt_id; |
5374 | instr->env()->outer_ = copy; |
5375 | intptr_t use_index = instr->env()->Length(); // Start index after inner. |
5376 | for (Environment::DeepIterator it(copy); !it.Done(); it.Advance()) { |
5377 | Value* value = it.CurrentValue(); |
5378 | value->set_instruction(instr); |
5379 | value->set_use_index(use_index++); |
5380 | value->definition()->AddEnvUse(value); |
5381 | } |
5382 | } |
5383 | |
5384 | ComparisonInstr* DoubleTestOpInstr::CopyWithNewOperands(Value* new_left, |
5385 | Value* new_right) { |
5386 | UNREACHABLE(); |
5387 | return NULL; |
5388 | } |
5389 | |
5390 | ComparisonInstr* EqualityCompareInstr::CopyWithNewOperands(Value* new_left, |
5391 | Value* new_right) { |
5392 | return new EqualityCompareInstr(token_pos(), kind(), new_left, new_right, |
5393 | operation_cid(), deopt_id()); |
5394 | } |
5395 | |
5396 | ComparisonInstr* RelationalOpInstr::CopyWithNewOperands(Value* new_left, |
5397 | Value* new_right) { |
5398 | return new RelationalOpInstr(token_pos(), kind(), new_left, new_right, |
5399 | operation_cid(), deopt_id(), |
5400 | SpeculativeModeOfInputs()); |
5401 | } |
5402 | |
5403 | ComparisonInstr* StrictCompareInstr::CopyWithNewOperands(Value* new_left, |
5404 | Value* new_right) { |
5405 | return new StrictCompareInstr(token_pos(), kind(), new_left, new_right, |
5406 | needs_number_check(), DeoptId::kNone); |
5407 | } |
5408 | |
5409 | ComparisonInstr* TestSmiInstr::CopyWithNewOperands(Value* new_left, |
5410 | Value* new_right) { |
5411 | return new TestSmiInstr(token_pos(), kind(), new_left, new_right); |
5412 | } |
5413 | |
5414 | ComparisonInstr* TestCidsInstr::CopyWithNewOperands(Value* new_left, |
5415 | Value* new_right) { |
5416 | return new TestCidsInstr(token_pos(), kind(), new_left, cid_results(), |
5417 | deopt_id()); |
5418 | } |
5419 | |
5420 | bool TestCidsInstr::AttributesEqual(Instruction* other) const { |
5421 | TestCidsInstr* other_instr = other->AsTestCids(); |
5422 | if (!ComparisonInstr::AttributesEqual(other)) { |
5423 | return false; |
5424 | } |
5425 | if (cid_results().length() != other_instr->cid_results().length()) { |
5426 | return false; |
5427 | } |
5428 | for (intptr_t i = 0; i < cid_results().length(); i++) { |
5429 | if (cid_results()[i] != other_instr->cid_results()[i]) { |
5430 | return false; |
5431 | } |
5432 | } |
5433 | return true; |
5434 | } |
5435 | |
5436 | static bool BindsToSmiConstant(Value* value) { |
5437 | return value->BindsToConstant() && value->BoundConstant().IsSmi(); |
5438 | } |
5439 | |
5440 | bool IfThenElseInstr::Supports(ComparisonInstr* comparison, |
5441 | Value* v1, |
5442 | Value* v2) { |
5443 | bool is_smi_result = BindsToSmiConstant(v1) && BindsToSmiConstant(v2); |
5444 | if (comparison->IsStrictCompare()) { |
5445 | // Strict comparison with number checks calls a stub and is not supported |
5446 | // by if-conversion. |
5447 | return is_smi_result && |
5448 | !comparison->AsStrictCompare()->needs_number_check(); |
5449 | } |
5450 | if (comparison->operation_cid() != kSmiCid) { |
5451 | // Non-smi comparisons are not supported by if-conversion. |
5452 | return false; |
5453 | } |
5454 | return is_smi_result; |
5455 | } |
5456 | |
5457 | bool PhiInstr::IsRedundant() const { |
5458 | ASSERT(InputCount() > 1); |
5459 | Definition* first = InputAt(0)->definition(); |
5460 | for (intptr_t i = 1; i < InputCount(); ++i) { |
5461 | Definition* def = InputAt(i)->definition(); |
5462 | if (def != first) return false; |
5463 | } |
5464 | return true; |
5465 | } |
5466 | |
5467 | Definition* PhiInstr::GetReplacementForRedundantPhi() const { |
5468 | Definition* first = InputAt(0)->definition(); |
5469 | if (InputCount() == 1) { |
5470 | return first; |
5471 | } |
5472 | ASSERT(InputCount() > 1); |
5473 | Definition* first_origin = first->OriginalDefinition(); |
5474 | bool look_for_redefinition = false; |
5475 | for (intptr_t i = 1; i < InputCount(); ++i) { |
5476 | Definition* def = InputAt(i)->definition(); |
5477 | if (def != first) { |
5478 | if (def->OriginalDefinition() != first_origin) return nullptr; |
5479 | look_for_redefinition = true; |
5480 | } |
5481 | } |
5482 | if (look_for_redefinition) { |
5483 | // Find the most specific redefinition which is common for all inputs |
5484 | // (the longest common chain). |
5485 | Definition* redef = first; |
5486 | for (intptr_t i = 1, n = InputCount(); redef != first_origin && i < n;) { |
5487 | Value* value = InputAt(i); |
5488 | bool found = false; |
5489 | do { |
5490 | Definition* def = value->definition(); |
5491 | if (def == redef) { |
5492 | found = true; |
5493 | break; |
5494 | } |
5495 | value = def->RedefinedValue(); |
5496 | } while (value != nullptr); |
5497 | if (found) { |
5498 | ++i; |
5499 | } else { |
5500 | ASSERT(redef != first_origin); |
5501 | redef = redef->RedefinedValue()->definition(); |
5502 | } |
5503 | } |
5504 | return redef; |
5505 | } else { |
5506 | return first; |
5507 | } |
5508 | } |
5509 | |
5510 | Definition* PhiInstr::Canonicalize(FlowGraph* flow_graph) { |
5511 | Definition* replacement = GetReplacementForRedundantPhi(); |
5512 | return (replacement != nullptr) ? replacement : this; |
5513 | } |
5514 | |
5515 | // Removes current phi from graph and sets current to previous phi. |
5516 | void PhiIterator::RemoveCurrentFromGraph() { |
5517 | Current()->UnuseAllInputs(); |
5518 | (*phis_)[index_] = phis_->Last(); |
5519 | phis_->RemoveLast(); |
5520 | --index_; |
5521 | } |
5522 | |
5523 | Instruction* CheckConditionInstr::Canonicalize(FlowGraph* graph) { |
5524 | if (StrictCompareInstr* strict_compare = comparison()->AsStrictCompare()) { |
5525 | if ((InputAt(0)->definition()->OriginalDefinition() == |
5526 | InputAt(1)->definition()->OriginalDefinition()) && |
5527 | strict_compare->kind() == Token::kEQ_STRICT) { |
5528 | return nullptr; |
5529 | } |
5530 | } |
5531 | return this; |
5532 | } |
5533 | |
5534 | bool CheckArrayBoundInstr::IsFixedLengthArrayType(intptr_t cid) { |
5535 | return LoadFieldInstr::IsFixedLengthArrayCid(cid); |
5536 | } |
5537 | |
5538 | Definition* CheckBoundBase::Canonicalize(FlowGraph* flow_graph) { |
5539 | return IsRedundant() ? index()->definition() : this; |
5540 | } |
5541 | |
5542 | intptr_t CheckArrayBoundInstr::LengthOffsetFor(intptr_t class_id) { |
5543 | if (IsTypedDataClassId(class_id) || IsTypedDataViewClassId(class_id) || |
5544 | IsExternalTypedDataClassId(class_id)) { |
5545 | return compiler::target::TypedDataBase::length_offset(); |
5546 | } |
5547 | |
5548 | switch (class_id) { |
5549 | case kGrowableObjectArrayCid: |
5550 | return compiler::target::GrowableObjectArray::length_offset(); |
5551 | case kOneByteStringCid: |
5552 | case kTwoByteStringCid: |
5553 | return compiler::target::String::length_offset(); |
5554 | case kArrayCid: |
5555 | case kImmutableArrayCid: |
5556 | return compiler::target::Array::length_offset(); |
5557 | default: |
5558 | UNREACHABLE(); |
5559 | return -1; |
5560 | } |
5561 | } |
5562 | |
5563 | const Function& StringInterpolateInstr::CallFunction() const { |
5564 | if (function_.IsNull()) { |
5565 | const int kTypeArgsLen = 0; |
5566 | const int kNumberOfArguments = 1; |
5567 | const Array& kNoArgumentNames = Object::null_array(); |
5568 | const Class& cls = |
5569 | Class::Handle(Library::LookupCoreClass(Symbols::StringBase())); |
5570 | ASSERT(!cls.IsNull()); |
5571 | function_ = Resolver::ResolveStatic( |
5572 | cls, Library::PrivateCoreLibName(Symbols::Interpolate()), kTypeArgsLen, |
5573 | kNumberOfArguments, kNoArgumentNames); |
5574 | } |
5575 | ASSERT(!function_.IsNull()); |
5576 | return function_; |
5577 | } |
5578 | |
5579 | // Replace StringInterpolateInstr with a constant string if all inputs are |
5580 | // constant of [string, number, boolean, null]. |
5581 | // Leave the CreateArrayInstr and StoreIndexedInstr in the stream in case |
5582 | // deoptimization occurs. |
5583 | Definition* StringInterpolateInstr::Canonicalize(FlowGraph* flow_graph) { |
5584 | // The following graph structure is generated by the graph builder: |
5585 | // v2 <- CreateArray(v0) |
5586 | // StoreIndexed(v2, v3, v4) -- v3:constant index, v4: value. |
5587 | // .. |
5588 | // v8 <- StringInterpolate(v2) |
5589 | |
5590 | // Don't compile-time fold when optimizing the interpolation function itself. |
5591 | if (flow_graph->function().raw() == CallFunction().raw()) { |
5592 | return this; |
5593 | } |
5594 | |
5595 | CreateArrayInstr* create_array = value()->definition()->AsCreateArray(); |
5596 | if (create_array == nullptr) { |
5597 | // Do not try to fold interpolate if array is an OSR argument. |
5598 | ASSERT(flow_graph->IsCompiledForOsr()); |
5599 | ASSERT(value()->definition()->IsPhi()); |
5600 | return this; |
5601 | } |
5602 | // Check if the string interpolation has only constant inputs. |
5603 | Value* num_elements = create_array->num_elements(); |
5604 | if (!num_elements->BindsToConstant() || |
5605 | !num_elements->BoundConstant().IsSmi()) { |
5606 | return this; |
5607 | } |
5608 | const intptr_t length = Smi::Cast(num_elements->BoundConstant()).Value(); |
5609 | Thread* thread = Thread::Current(); |
5610 | Zone* zone = thread->zone(); |
5611 | GrowableHandlePtrArray<const String> pieces(zone, length); |
5612 | for (intptr_t i = 0; i < length; i++) { |
5613 | pieces.Add(Object::null_string()); |
5614 | } |
5615 | |
5616 | for (Value::Iterator it(create_array->input_use_list()); !it.Done(); |
5617 | it.Advance()) { |
5618 | Instruction* curr = it.Current()->instruction(); |
5619 | if (curr == this) continue; |
5620 | |
5621 | StoreIndexedInstr* store = curr->AsStoreIndexed(); |
5622 | if (store == nullptr || !store->index()->BindsToConstant() || |
5623 | !store->index()->BoundConstant().IsSmi()) { |
5624 | return this; |
5625 | } |
5626 | intptr_t store_index = Smi::Cast(store->index()->BoundConstant()).Value(); |
5627 | ASSERT(store_index < length); |
5628 | ASSERT(store != NULL); |
5629 | if (store->value()->definition()->IsConstant()) { |
5630 | ASSERT(store->index()->BindsToConstant()); |
5631 | const Object& obj = store->value()->definition()->AsConstant()->value(); |
5632 | // TODO(srdjan): Verify if any other types should be converted as well. |
5633 | if (obj.IsString()) { |
5634 | pieces.SetAt(store_index, String::Cast(obj)); |
5635 | } else if (obj.IsSmi()) { |
5636 | const char* cstr = obj.ToCString(); |
5637 | pieces.SetAt(store_index, |
5638 | String::Handle(zone, String::New(cstr, Heap::kOld))); |
5639 | } else if (obj.IsBool()) { |
5640 | pieces.SetAt(store_index, Bool::Cast(obj).value() ? Symbols::True() |
5641 | : Symbols::False()); |
5642 | } else if (obj.IsNull()) { |
5643 | pieces.SetAt(store_index, Symbols::null()); |
5644 | } else { |
5645 | return this; |
5646 | } |
5647 | } else { |
5648 | return this; |
5649 | } |
5650 | } |
5651 | |
5652 | const String& concatenated = |
5653 | String::ZoneHandle(zone, Symbols::FromConcatAll(thread, pieces)); |
5654 | return flow_graph->GetConstant(concatenated); |
5655 | } |
5656 | |
5657 | static AlignmentType StrengthenAlignment(intptr_t cid, |
5658 | AlignmentType alignment) { |
5659 | switch (cid) { |
5660 | case kTypedDataInt8ArrayCid: |
5661 | case kTypedDataUint8ArrayCid: |
5662 | case kTypedDataUint8ClampedArrayCid: |
5663 | case kExternalTypedDataUint8ArrayCid: |
5664 | case kExternalTypedDataUint8ClampedArrayCid: |
5665 | case kOneByteStringCid: |
5666 | case kExternalOneByteStringCid: |
5667 | // Don't need to worry about alignment for accessing bytes. |
5668 | return kAlignedAccess; |
5669 | case kTypedDataFloat64x2ArrayCid: |
5670 | case kTypedDataInt32x4ArrayCid: |
5671 | case kTypedDataFloat32x4ArrayCid: |
5672 | // TODO(rmacnak): Investigate alignment requirements of floating point |
5673 | // loads. |
5674 | return kAlignedAccess; |
5675 | } |
5676 | |
5677 | return alignment; |
5678 | } |
5679 | |
5680 | LoadIndexedInstr::LoadIndexedInstr(Value* array, |
5681 | Value* index, |
5682 | bool index_unboxed, |
5683 | intptr_t index_scale, |
5684 | intptr_t class_id, |
5685 | AlignmentType alignment, |
5686 | intptr_t deopt_id, |
5687 | TokenPosition token_pos, |
5688 | CompileType* result_type) |
5689 | : TemplateDefinition(deopt_id), |
5690 | index_unboxed_(index_unboxed), |
5691 | index_scale_(index_scale), |
5692 | class_id_(class_id), |
5693 | alignment_(StrengthenAlignment(class_id, alignment)), |
5694 | token_pos_(token_pos), |
5695 | result_type_(result_type) { |
5696 | SetInputAt(0, array); |
5697 | SetInputAt(1, index); |
5698 | } |
5699 | |
5700 | Definition* LoadIndexedInstr::Canonicalize(FlowGraph* flow_graph) { |
5701 | auto Z = flow_graph->zone(); |
5702 | if (auto box = index()->definition()->AsBoxInt64()) { |
5703 | // TODO(dartbug.com/39432): Make LoadIndexed fully suport unboxed indices. |
5704 | if (!box->ComputeCanDeoptimize() && compiler::target::kWordSize == 8) { |
5705 | auto load = new (Z) LoadIndexedInstr( |
5706 | array()->CopyWithType(Z), box->value()->CopyWithType(Z), |
5707 | /*index_unboxed=*/true, index_scale(), class_id(), alignment_, |
5708 | GetDeoptId(), token_pos(), result_type_); |
5709 | flow_graph->InsertBefore(this, load, env(), FlowGraph::kValue); |
5710 | return load; |
5711 | } |
5712 | } |
5713 | return this; |
5714 | } |
5715 | |
5716 | StoreIndexedInstr::StoreIndexedInstr(Value* array, |
5717 | Value* index, |
5718 | Value* value, |
5719 | StoreBarrierType emit_store_barrier, |
5720 | bool index_unboxed, |
5721 | intptr_t index_scale, |
5722 | intptr_t class_id, |
5723 | AlignmentType alignment, |
5724 | intptr_t deopt_id, |
5725 | TokenPosition token_pos, |
5726 | SpeculativeMode speculative_mode) |
5727 | : TemplateInstruction(deopt_id), |
5728 | emit_store_barrier_(emit_store_barrier), |
5729 | index_unboxed_(index_unboxed), |
5730 | index_scale_(index_scale), |
5731 | class_id_(class_id), |
5732 | alignment_(StrengthenAlignment(class_id, alignment)), |
5733 | token_pos_(token_pos), |
5734 | speculative_mode_(speculative_mode) { |
5735 | SetInputAt(kArrayPos, array); |
5736 | SetInputAt(kIndexPos, index); |
5737 | SetInputAt(kValuePos, value); |
5738 | } |
5739 | |
5740 | Instruction* StoreIndexedInstr::Canonicalize(FlowGraph* flow_graph) { |
5741 | auto Z = flow_graph->zone(); |
5742 | if (auto box = index()->definition()->AsBoxInt64()) { |
5743 | // TODO(dartbug.com/39432): Make StoreIndexed fully suport unboxed indices. |
5744 | if (!box->ComputeCanDeoptimize() && compiler::target::kWordSize == 8) { |
5745 | auto store = new (Z) StoreIndexedInstr( |
5746 | array()->CopyWithType(Z), box->value()->CopyWithType(Z), |
5747 | value()->CopyWithType(Z), emit_store_barrier_, |
5748 | /*index_unboxed=*/true, index_scale(), class_id(), alignment_, |
5749 | GetDeoptId(), token_pos(), speculative_mode_); |
5750 | flow_graph->InsertBefore(this, store, env(), FlowGraph::kEffect); |
5751 | return nullptr; |
5752 | } |
5753 | } |
5754 | return this; |
5755 | } |
5756 | |
5757 | bool Utf8ScanInstr::IsScanFlagsUnboxed() const { |
5758 | return FlowGraphCompiler::IsUnboxedField(scan_flags_field_.field()); |
5759 | } |
5760 | |
5761 | InvokeMathCFunctionInstr::InvokeMathCFunctionInstr( |
5762 | ZoneGrowableArray<Value*>* inputs, |
5763 | intptr_t deopt_id, |
5764 | MethodRecognizer::Kind recognized_kind, |
5765 | TokenPosition token_pos) |
5766 | : PureDefinition(deopt_id), |
5767 | inputs_(inputs), |
5768 | recognized_kind_(recognized_kind), |
5769 | token_pos_(token_pos) { |
5770 | ASSERT(inputs_->length() == ArgumentCountFor(recognized_kind_)); |
5771 | for (intptr_t i = 0; i < inputs_->length(); ++i) { |
5772 | ASSERT((*inputs)[i] != NULL); |
5773 | (*inputs)[i]->set_instruction(this); |
5774 | (*inputs)[i]->set_use_index(i); |
5775 | } |
5776 | } |
5777 | |
5778 | intptr_t InvokeMathCFunctionInstr::ArgumentCountFor( |
5779 | MethodRecognizer::Kind kind) { |
5780 | switch (kind) { |
5781 | case MethodRecognizer::kDoubleTruncate: |
5782 | case MethodRecognizer::kDoubleFloor: |
5783 | case MethodRecognizer::kDoubleCeil: { |
5784 | ASSERT(!TargetCPUFeatures::double_truncate_round_supported()); |
5785 | return 1; |
5786 | } |
5787 | case MethodRecognizer::kDoubleRound: |
5788 | case MethodRecognizer::kMathAtan: |
5789 | case MethodRecognizer::kMathTan: |
5790 | case MethodRecognizer::kMathAcos: |
5791 | case MethodRecognizer::kMathAsin: |
5792 | case MethodRecognizer::kMathSin: |
5793 | case MethodRecognizer::kMathCos: |
5794 | return 1; |
5795 | case MethodRecognizer::kDoubleMod: |
5796 | case MethodRecognizer::kMathDoublePow: |
5797 | case MethodRecognizer::kMathAtan2: |
5798 | return 2; |
5799 | default: |
5800 | UNREACHABLE(); |
5801 | } |
5802 | return 0; |
5803 | } |
5804 | |
5805 | const RuntimeEntry& InvokeMathCFunctionInstr::TargetFunction() const { |
5806 | switch (recognized_kind_) { |
5807 | case MethodRecognizer::kDoubleTruncate: |
5808 | return kLibcTruncRuntimeEntry; |
5809 | case MethodRecognizer::kDoubleRound: |
5810 | return kLibcRoundRuntimeEntry; |
5811 | case MethodRecognizer::kDoubleFloor: |
5812 | return kLibcFloorRuntimeEntry; |
5813 | case MethodRecognizer::kDoubleCeil: |
5814 | return kLibcCeilRuntimeEntry; |
5815 | case MethodRecognizer::kMathDoublePow: |
5816 | return kLibcPowRuntimeEntry; |
5817 | case MethodRecognizer::kDoubleMod: |
5818 | return kDartModuloRuntimeEntry; |
5819 | case MethodRecognizer::kMathTan: |
5820 | return kLibcTanRuntimeEntry; |
5821 | case MethodRecognizer::kMathAsin: |
5822 | return kLibcAsinRuntimeEntry; |
5823 | case MethodRecognizer::kMathSin: |
5824 | return kLibcSinRuntimeEntry; |
5825 | case MethodRecognizer::kMathCos: |
5826 | return kLibcCosRuntimeEntry; |
5827 | case MethodRecognizer::kMathAcos: |
5828 | return kLibcAcosRuntimeEntry; |
5829 | case MethodRecognizer::kMathAtan: |
5830 | return kLibcAtanRuntimeEntry; |
5831 | case MethodRecognizer::kMathAtan2: |
5832 | return kLibcAtan2RuntimeEntry; |
5833 | default: |
5834 | UNREACHABLE(); |
5835 | } |
5836 | return kLibcPowRuntimeEntry; |
5837 | } |
5838 | |
5839 | const char* MathUnaryInstr::KindToCString(MathUnaryKind kind) { |
5840 | switch (kind) { |
5841 | case kIllegal: |
5842 | return "illegal" ; |
5843 | case kSqrt: |
5844 | return "sqrt" ; |
5845 | case kDoubleSquare: |
5846 | return "double-square" ; |
5847 | } |
5848 | UNREACHABLE(); |
5849 | return "" ; |
5850 | } |
5851 | |
5852 | TruncDivModInstr::TruncDivModInstr(Value* lhs, Value* rhs, intptr_t deopt_id) |
5853 | : TemplateDefinition(deopt_id) { |
5854 | SetInputAt(0, lhs); |
5855 | SetInputAt(1, rhs); |
5856 | } |
5857 | |
5858 | intptr_t TruncDivModInstr::OutputIndexOf(Token::Kind token) { |
5859 | switch (token) { |
5860 | case Token::kTRUNCDIV: |
5861 | return 0; |
5862 | case Token::kMOD: |
5863 | return 1; |
5864 | default: |
5865 | UNIMPLEMENTED(); |
5866 | return -1; |
5867 | } |
5868 | } |
5869 | |
5870 | LocationSummary* NativeCallInstr::MakeLocationSummary(Zone* zone, |
5871 | bool optimizing) const { |
5872 | return MakeCallSummary(zone, this); |
5873 | } |
5874 | |
5875 | void NativeCallInstr::SetupNative() { |
5876 | if (link_lazily()) { |
5877 | // Resolution will happen during NativeEntry::LinkNativeCall. |
5878 | return; |
5879 | } |
5880 | |
5881 | Zone* zone = Thread::Current()->zone(); |
5882 | const Class& cls = Class::Handle(zone, function().Owner()); |
5883 | const Library& library = Library::Handle(zone, cls.library()); |
5884 | |
5885 | Dart_NativeEntryResolver resolver = library.native_entry_resolver(); |
5886 | bool is_bootstrap_native = Bootstrap::IsBootstrapResolver(resolver); |
5887 | set_is_bootstrap_native(is_bootstrap_native); |
5888 | |
5889 | const int num_params = |
5890 | NativeArguments::ParameterCountForResolution(function()); |
5891 | bool auto_setup_scope = true; |
5892 | NativeFunction native_function = NativeEntry::ResolveNative( |
5893 | library, native_name(), num_params, &auto_setup_scope); |
5894 | if (native_function == NULL) { |
5895 | Report::MessageF(Report::kError, Script::Handle(function().script()), |
5896 | function().token_pos(), Report::AtLocation, |
5897 | "native function '%s' (%" Pd " arguments) cannot be found" , |
5898 | native_name().ToCString(), function().NumParameters()); |
5899 | } |
5900 | set_is_auto_scope(auto_setup_scope); |
5901 | set_native_c_function(native_function); |
5902 | } |
5903 | |
5904 | #if !defined(TARGET_ARCH_ARM) |
5905 | |
5906 | LocationSummary* BitCastInstr::MakeLocationSummary(Zone* zone, bool opt) const { |
5907 | UNREACHABLE(); |
5908 | } |
5909 | |
5910 | void BitCastInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
5911 | UNREACHABLE(); |
5912 | } |
5913 | |
5914 | #endif // defined(TARGET_ARCH_ARM) |
5915 | |
5916 | Representation FfiCallInstr::RequiredInputRepresentation(intptr_t idx) const { |
5917 | if (idx == TargetAddressIndex()) { |
5918 | return kUnboxedFfiIntPtr; |
5919 | } else { |
5920 | return marshaller_.RepInFfiCall(idx); |
5921 | } |
5922 | } |
5923 | |
5924 | #define Z zone_ |
5925 | |
5926 | LocationSummary* FfiCallInstr::MakeLocationSummary(Zone* zone, |
5927 | bool is_optimizing) const { |
5928 | // The temporary register needs to be callee-saved and not an argument |
5929 | // register. |
5930 | ASSERT(((1 << CallingConventions::kFirstCalleeSavedCpuReg) & |
5931 | CallingConventions::kArgumentRegisters) == 0); |
5932 | |
5933 | constexpr intptr_t kNumTemps = 2; |
5934 | |
5935 | LocationSummary* summary = new (zone) |
5936 | LocationSummary(zone, /*num_inputs=*/InputCount(), |
5937 | /*num_temps=*/kNumTemps, LocationSummary::kCall); |
5938 | |
5939 | summary->set_in(TargetAddressIndex(), |
5940 | Location::RegisterLocation( |
5941 | CallingConventions::kFirstNonArgumentRegister)); |
5942 | summary->set_temp(0, Location::RegisterLocation( |
5943 | CallingConventions::kSecondNonArgumentRegister)); |
5944 | summary->set_temp(1, Location::RegisterLocation( |
5945 | CallingConventions::kFirstCalleeSavedCpuReg)); |
5946 | summary->set_out(0, marshaller_.LocInFfiCall(compiler::ffi::kResultIndex)); |
5947 | |
5948 | for (intptr_t i = 0, n = marshaller_.num_args(); i < n; ++i) { |
5949 | summary->set_in(i, marshaller_.LocInFfiCall(i)); |
5950 | } |
5951 | |
5952 | return summary; |
5953 | } |
5954 | |
5955 | void FfiCallInstr::EmitParamMoves(FlowGraphCompiler* compiler) { |
5956 | const Register saved_fp = locs()->temp(0).reg(); |
5957 | const Register temp = locs()->temp(1).reg(); |
5958 | |
5959 | compiler::ffi::FrameRebase rebase(/*old_base=*/FPREG, /*new_base=*/saved_fp, |
5960 | /*stack_delta=*/0, zone_); |
5961 | for (intptr_t i = 0, n = NativeArgCount(); i < n; ++i) { |
5962 | const Location origin = rebase.Rebase(locs()->in(i)); |
5963 | const Representation origin_rep = RequiredInputRepresentation(i); |
5964 | const auto& target = marshaller_.Location(i); |
5965 | ConstantTemporaryAllocator temp_alloc(temp); |
5966 | if (origin.IsConstant()) { |
5967 | compiler->EmitMoveConst(target, origin, origin_rep, &temp_alloc); |
5968 | } else { |
5969 | compiler->EmitMoveToNative(target, origin, origin_rep, &temp_alloc); |
5970 | } |
5971 | } |
5972 | } |
5973 | |
5974 | void FfiCallInstr::EmitReturnMoves(FlowGraphCompiler* compiler) { |
5975 | const auto& src = marshaller_.Location(compiler::ffi::kResultIndex); |
5976 | if (src.payload_type().IsVoid()) { |
5977 | return; |
5978 | } |
5979 | const Location dst_loc = locs()->out(0); |
5980 | const Representation dst_type = representation(); |
5981 | NoTemporaryAllocator no_temp; |
5982 | compiler->EmitMoveFromNative(dst_loc, dst_type, src, &no_temp); |
5983 | } |
5984 | |
5985 | static Location FirstArgumentLocation() { |
5986 | #ifdef TARGET_ARCH_IA32 |
5987 | return Location::StackSlot(0, SPREG); |
5988 | #else |
5989 | return Location::RegisterLocation(CallingConventions::ArgumentRegisters[0]); |
5990 | #endif |
5991 | } |
5992 | |
5993 | LocationSummary* EnterHandleScopeInstr::MakeLocationSummary( |
5994 | Zone* zone, |
5995 | bool is_optimizing) const { |
5996 | LocationSummary* summary = |
5997 | new (zone) LocationSummary(zone, /*num_inputs=*/0, |
5998 | /*num_temps=*/0, LocationSummary::kCall); |
5999 | summary->set_out(0, |
6000 | Location::RegisterLocation(CallingConventions::kReturnReg)); |
6001 | return summary; |
6002 | } |
6003 | |
6004 | void EnterHandleScopeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6005 | if (kind_ == Kind::kGetTopHandleScope) { |
6006 | __ LoadMemoryValue(CallingConventions::kReturnReg, THR, |
6007 | compiler::target::Thread::api_top_scope_offset()); |
6008 | return; |
6009 | } |
6010 | |
6011 | Location arg_loc = FirstArgumentLocation(); |
6012 | __ EnterCFrame(arg_loc.IsRegister() ? 0 : compiler::target::kWordSize); |
6013 | NoTemporaryAllocator no_temp; |
6014 | compiler->EmitMove(arg_loc, Location::RegisterLocation(THR), &no_temp); |
6015 | __ CallCFunction( |
6016 | compiler::Address(THR, compiler::target::Thread::OffsetFromThread( |
6017 | &kEnterHandleScopeRuntimeEntry))); |
6018 | __ LeaveCFrame(); |
6019 | } |
6020 | |
6021 | LocationSummary* ExitHandleScopeInstr::MakeLocationSummary( |
6022 | Zone* zone, |
6023 | bool is_optimizing) const { |
6024 | LocationSummary* summary = |
6025 | new (zone) LocationSummary(zone, /*num_inputs=*/0, |
6026 | /*num_temps=*/0, LocationSummary::kCall); |
6027 | return summary; |
6028 | } |
6029 | |
6030 | void ExitHandleScopeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6031 | Location arg_loc = FirstArgumentLocation(); |
6032 | __ EnterCFrame(arg_loc.IsRegister() ? 0 : compiler::target::kWordSize); |
6033 | NoTemporaryAllocator no_temp; |
6034 | compiler->EmitMove(arg_loc, Location::RegisterLocation(THR), &no_temp); |
6035 | __ CallCFunction( |
6036 | compiler::Address(THR, compiler::target::Thread::OffsetFromThread( |
6037 | &kExitHandleScopeRuntimeEntry))); |
6038 | __ LeaveCFrame(); |
6039 | } |
6040 | |
6041 | LocationSummary* AllocateHandleInstr::MakeLocationSummary( |
6042 | Zone* zone, |
6043 | bool is_optimizing) const { |
6044 | LocationSummary* summary = |
6045 | new (zone) LocationSummary(zone, /*num_inputs=*/1, |
6046 | /*num_temps=*/0, LocationSummary::kCall); |
6047 | |
6048 | Location arg_loc = FirstArgumentLocation(); |
6049 | // Assign input to a register that does not conflict with anything if |
6050 | // argument is passed on the stack. |
6051 | const Register scope_reg = |
6052 | arg_loc.IsStackSlot() ? CallingConventions::kSecondNonArgumentRegister |
6053 | : arg_loc.reg(); |
6054 | |
6055 | summary->set_in(kScope, Location::RegisterLocation(scope_reg)); |
6056 | summary->set_out(0, |
6057 | Location::RegisterLocation(CallingConventions::kReturnReg)); |
6058 | return summary; |
6059 | } |
6060 | |
6061 | Representation AllocateHandleInstr::RequiredInputRepresentation( |
6062 | intptr_t idx) const { |
6063 | ASSERT(idx == kScope); |
6064 | return kUnboxedIntPtr; |
6065 | } |
6066 | |
6067 | void AllocateHandleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6068 | Location arg_loc = FirstArgumentLocation(); |
6069 | __ EnterCFrame(arg_loc.IsRegister() ? 0 : compiler::target::kWordSize); |
6070 | if (arg_loc.IsStackSlot()) { |
6071 | NoTemporaryAllocator no_temp; |
6072 | compiler->EmitMove(arg_loc, locs()->in(kScope), &no_temp); |
6073 | } |
6074 | __ CallCFunction( |
6075 | compiler::Address(THR, compiler::target::Thread::OffsetFromThread( |
6076 | &kAllocateHandleRuntimeEntry))); |
6077 | __ LeaveCFrame(); |
6078 | } |
6079 | |
6080 | LocationSummary* RawStoreFieldInstr::MakeLocationSummary( |
6081 | Zone* zone, |
6082 | bool is_optimizing) const { |
6083 | LocationSummary* summary = |
6084 | new (zone) LocationSummary(zone, /*num_inputs=*/2, |
6085 | /*num_temps=*/0, LocationSummary::kNoCall); |
6086 | |
6087 | summary->set_in(kBase, Location::RequiresRegister()); |
6088 | summary->set_in(kValue, Location::RequiresRegister()); |
6089 | |
6090 | return summary; |
6091 | } |
6092 | |
6093 | Representation RawStoreFieldInstr::RequiredInputRepresentation( |
6094 | intptr_t idx) const { |
6095 | switch (idx) { |
6096 | case kBase: |
6097 | return kUntagged; |
6098 | case kValue: |
6099 | return kTagged; |
6100 | default: |
6101 | break; |
6102 | } |
6103 | UNREACHABLE(); |
6104 | } |
6105 | |
6106 | void RawStoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
6107 | const Register base_reg = locs()->in(kBase).reg(); |
6108 | const Register value_reg = locs()->in(kValue).reg(); |
6109 | compiler->assembler()->StoreMemoryValue(value_reg, base_reg, offset_); |
6110 | } |
6111 | |
6112 | void NativeReturnInstr::EmitReturnMoves(FlowGraphCompiler* compiler) { |
6113 | const auto& dst = marshaller_.Location(compiler::ffi::kResultIndex); |
6114 | if (dst.payload_type().IsVoid()) { |
6115 | return; |
6116 | } |
6117 | const Location src_loc = locs()->in(0); |
6118 | const Representation src_type = RequiredInputRepresentation(0); |
6119 | NoTemporaryAllocator no_temp; |
6120 | compiler->EmitMoveToNative(dst, src_loc, src_type, &no_temp); |
6121 | } |
6122 | |
6123 | LocationSummary* NativeReturnInstr::MakeLocationSummary(Zone* zone, |
6124 | bool opt) const { |
6125 | const intptr_t kNumInputs = 1; |
6126 | const intptr_t kNumTemps = 0; |
6127 | LocationSummary* locs = new (zone) |
6128 | LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); |
6129 | locs->set_in( |
6130 | 0, marshaller_.LocationOfNativeParameter(compiler::ffi::kResultIndex)); |
6131 | return locs; |
6132 | } |
6133 | |
6134 | #undef Z |
6135 | |
6136 | Representation FfiCallInstr::representation() const { |
6137 | return marshaller_.RepInFfiCall(compiler::ffi::kResultIndex); |
6138 | } |
6139 | |
6140 | // SIMD |
6141 | |
6142 | SimdOpInstr::Kind SimdOpInstr::KindForOperator(MethodRecognizer::Kind kind) { |
6143 | switch (kind) { |
6144 | case MethodRecognizer::kFloat32x4Mul: |
6145 | return SimdOpInstr::kFloat32x4Mul; |
6146 | case MethodRecognizer::kFloat32x4Div: |
6147 | return SimdOpInstr::kFloat32x4Div; |
6148 | case MethodRecognizer::kFloat32x4Add: |
6149 | return SimdOpInstr::kFloat32x4Add; |
6150 | case MethodRecognizer::kFloat32x4Sub: |
6151 | return SimdOpInstr::kFloat32x4Sub; |
6152 | case MethodRecognizer::kFloat64x2Mul: |
6153 | return SimdOpInstr::kFloat64x2Mul; |
6154 | case MethodRecognizer::kFloat64x2Div: |
6155 | return SimdOpInstr::kFloat64x2Div; |
6156 | case MethodRecognizer::kFloat64x2Add: |
6157 | return SimdOpInstr::kFloat64x2Add; |
6158 | case MethodRecognizer::kFloat64x2Sub: |
6159 | return SimdOpInstr::kFloat64x2Sub; |
6160 | default: |
6161 | break; |
6162 | } |
6163 | UNREACHABLE(); |
6164 | return SimdOpInstr::kIllegalSimdOp; |
6165 | } |
6166 | |
6167 | SimdOpInstr* SimdOpInstr::CreateFromCall(Zone* zone, |
6168 | MethodRecognizer::Kind kind, |
6169 | Definition* receiver, |
6170 | Instruction* call, |
6171 | intptr_t mask /* = 0 */) { |
6172 | SimdOpInstr* op; |
6173 | switch (kind) { |
6174 | case MethodRecognizer::kFloat32x4Mul: |
6175 | case MethodRecognizer::kFloat32x4Div: |
6176 | case MethodRecognizer::kFloat32x4Add: |
6177 | case MethodRecognizer::kFloat32x4Sub: |
6178 | case MethodRecognizer::kFloat64x2Mul: |
6179 | case MethodRecognizer::kFloat64x2Div: |
6180 | case MethodRecognizer::kFloat64x2Add: |
6181 | case MethodRecognizer::kFloat64x2Sub: |
6182 | op = new (zone) SimdOpInstr(KindForOperator(kind), call->deopt_id()); |
6183 | break; |
6184 | default: |
6185 | op = new (zone) SimdOpInstr(KindForMethod(kind), call->deopt_id()); |
6186 | break; |
6187 | } |
6188 | |
6189 | if (receiver != nullptr) { |
6190 | op->SetInputAt(0, new (zone) Value(receiver)); |
6191 | } |
6192 | for (intptr_t i = (receiver != nullptr ? 1 : 0); i < op->InputCount(); i++) { |
6193 | op->SetInputAt(i, call->ArgumentValueAt(i)->CopyWithType(zone)); |
6194 | } |
6195 | if (op->HasMask()) { |
6196 | op->set_mask(mask); |
6197 | } |
6198 | ASSERT(call->ArgumentCount() == (op->InputCount() + (op->HasMask() ? 1 : 0))); |
6199 | return op; |
6200 | } |
6201 | |
6202 | SimdOpInstr* SimdOpInstr::CreateFromFactoryCall(Zone* zone, |
6203 | MethodRecognizer::Kind kind, |
6204 | Instruction* call) { |
6205 | SimdOpInstr* op = |
6206 | new (zone) SimdOpInstr(KindForMethod(kind), call->deopt_id()); |
6207 | for (intptr_t i = 0; i < op->InputCount(); i++) { |
6208 | // Note: ArgumentAt(0) is type arguments which we don't need. |
6209 | op->SetInputAt(i, call->ArgumentValueAt(i + 1)->CopyWithType(zone)); |
6210 | } |
6211 | ASSERT(call->ArgumentCount() == (op->InputCount() + 1)); |
6212 | return op; |
6213 | } |
6214 | |
6215 | SimdOpInstr::Kind SimdOpInstr::KindForOperator(intptr_t cid, Token::Kind op) { |
6216 | switch (cid) { |
6217 | case kFloat32x4Cid: |
6218 | switch (op) { |
6219 | case Token::kADD: |
6220 | return kFloat32x4Add; |
6221 | case Token::kSUB: |
6222 | return kFloat32x4Sub; |
6223 | case Token::kMUL: |
6224 | return kFloat32x4Mul; |
6225 | case Token::kDIV: |
6226 | return kFloat32x4Div; |
6227 | default: |
6228 | break; |
6229 | } |
6230 | break; |
6231 | |
6232 | case kFloat64x2Cid: |
6233 | switch (op) { |
6234 | case Token::kADD: |
6235 | return kFloat64x2Add; |
6236 | case Token::kSUB: |
6237 | return kFloat64x2Sub; |
6238 | case Token::kMUL: |
6239 | return kFloat64x2Mul; |
6240 | case Token::kDIV: |
6241 | return kFloat64x2Div; |
6242 | default: |
6243 | break; |
6244 | } |
6245 | break; |
6246 | |
6247 | case kInt32x4Cid: |
6248 | switch (op) { |
6249 | case Token::kADD: |
6250 | return kInt32x4Add; |
6251 | case Token::kSUB: |
6252 | return kInt32x4Sub; |
6253 | case Token::kBIT_AND: |
6254 | return kInt32x4BitAnd; |
6255 | case Token::kBIT_OR: |
6256 | return kInt32x4BitOr; |
6257 | case Token::kBIT_XOR: |
6258 | return kInt32x4BitXor; |
6259 | default: |
6260 | break; |
6261 | } |
6262 | break; |
6263 | } |
6264 | |
6265 | UNREACHABLE(); |
6266 | return kIllegalSimdOp; |
6267 | } |
6268 | |
6269 | SimdOpInstr::Kind SimdOpInstr::KindForMethod(MethodRecognizer::Kind kind) { |
6270 | switch (kind) { |
6271 | #define CASE_METHOD(Arity, Mask, Name, ...) \ |
6272 | case MethodRecognizer::k##Name: \ |
6273 | return k##Name; |
6274 | #define CASE_BINARY_OP(Arity, Mask, Name, Args, Result) |
6275 | SIMD_OP_LIST(CASE_METHOD, CASE_BINARY_OP) |
6276 | #undef CASE_METHOD |
6277 | #undef CASE_BINARY_OP |
6278 | default: |
6279 | break; |
6280 | } |
6281 | |
6282 | FATAL1("Not a SIMD method: %s" , MethodRecognizer::KindToCString(kind)); |
6283 | return kIllegalSimdOp; |
6284 | } |
6285 | |
6286 | // Methods InputCount(), representation(), RequiredInputRepresentation() and |
6287 | // HasMask() are using an array of SimdOpInfo structures representing all |
6288 | // necessary information about the instruction. |
6289 | |
6290 | struct SimdOpInfo { |
6291 | uint8_t arity; |
6292 | bool has_mask; |
6293 | Representation output; |
6294 | Representation inputs[4]; |
6295 | }; |
6296 | |
6297 | // Make representaion from type name used by SIMD_OP_LIST. |
6298 | #define REP(T) (kUnboxed##T) |
6299 | static const Representation kUnboxedBool = kTagged; |
6300 | static const Representation kUnboxedInt8 = kUnboxedInt32; |
6301 | |
6302 | #define ENCODE_INPUTS_0() |
6303 | #define ENCODE_INPUTS_1(In0) REP(In0) |
6304 | #define ENCODE_INPUTS_2(In0, In1) REP(In0), REP(In1) |
6305 | #define ENCODE_INPUTS_3(In0, In1, In2) REP(In0), REP(In1), REP(In2) |
6306 | #define ENCODE_INPUTS_4(In0, In1, In2, In3) \ |
6307 | REP(In0), REP(In1), REP(In2), REP(In3) |
6308 | |
6309 | // Helpers for correct interpretation of the Mask field in the SIMD_OP_LIST. |
6310 | #define HAS_MASK true |
6311 | #define HAS__ false |
6312 | |
6313 | // Define the metadata array. |
6314 | static const SimdOpInfo simd_op_information[] = { |
6315 | #define PP_APPLY(M, Args) M Args |
6316 | #define CASE(Arity, Mask, Name, Args, Result) \ |
6317 | {Arity, HAS_##Mask, REP(Result), {PP_APPLY(ENCODE_INPUTS_##Arity, Args)}}, |
6318 | SIMD_OP_LIST(CASE, CASE) |
6319 | #undef CASE |
6320 | #undef PP_APPLY |
6321 | }; |
6322 | |
6323 | // Undef all auxiliary macros. |
6324 | #undef ENCODE_INFORMATION |
6325 | #undef HAS__ |
6326 | #undef HAS_MASK |
6327 | #undef ENCODE_INPUTS_0 |
6328 | #undef ENCODE_INPUTS_1 |
6329 | #undef ENCODE_INPUTS_2 |
6330 | #undef ENCODE_INPUTS_3 |
6331 | #undef ENCODE_INPUTS_4 |
6332 | #undef REP |
6333 | |
6334 | intptr_t SimdOpInstr::InputCount() const { |
6335 | return simd_op_information[kind()].arity; |
6336 | } |
6337 | |
6338 | Representation SimdOpInstr::representation() const { |
6339 | return simd_op_information[kind()].output; |
6340 | } |
6341 | |
6342 | Representation SimdOpInstr::RequiredInputRepresentation(intptr_t idx) const { |
6343 | ASSERT(0 <= idx && idx < InputCount()); |
6344 | return simd_op_information[kind()].inputs[idx]; |
6345 | } |
6346 | |
6347 | bool SimdOpInstr::HasMask() const { |
6348 | return simd_op_information[kind()].has_mask; |
6349 | } |
6350 | |
6351 | #undef __ |
6352 | |
6353 | } // namespace dart |
6354 | |