1 | // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32. |
6 | #if defined(TARGET_ARCH_IA32) |
7 | |
8 | #include "vm/compiler/backend/flow_graph_compiler.h" |
9 | |
10 | #include "vm/code_patcher.h" |
11 | #include "vm/compiler/api/type_check_mode.h" |
12 | #include "vm/compiler/backend/il_printer.h" |
13 | #include "vm/compiler/backend/locations.h" |
14 | #include "vm/compiler/frontend/flow_graph_builder.h" |
15 | #include "vm/compiler/jit/compiler.h" |
16 | #include "vm/cpu.h" |
17 | #include "vm/dart_entry.h" |
18 | #include "vm/deopt_instructions.h" |
19 | #include "vm/instructions.h" |
20 | #include "vm/object_store.h" |
21 | #include "vm/parser.h" |
22 | #include "vm/stack_frame.h" |
23 | #include "vm/stub_code.h" |
24 | #include "vm/symbols.h" |
25 | |
26 | namespace dart { |
27 | |
28 | DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization." ); |
29 | DEFINE_FLAG(bool, unbox_mints, true, "Optimize 64-bit integer arithmetic." ); |
30 | |
31 | DECLARE_FLAG(bool, enable_simd_inline); |
32 | |
33 | void FlowGraphCompiler::ArchSpecificInitialization() {} |
34 | |
35 | FlowGraphCompiler::~FlowGraphCompiler() { |
36 | // BlockInfos are zone-allocated, so their destructors are not called. |
37 | // Verify the labels explicitly here. |
38 | for (int i = 0; i < block_info_.length(); ++i) { |
39 | ASSERT(!block_info_[i]->jump_label()->IsLinked()); |
40 | ASSERT(!block_info_[i]->jump_label()->HasNear()); |
41 | } |
42 | } |
43 | |
44 | bool FlowGraphCompiler::SupportsUnboxedDoubles() { |
45 | return true; |
46 | } |
47 | |
48 | bool FlowGraphCompiler::SupportsUnboxedInt64() { |
49 | return FLAG_unbox_mints; |
50 | } |
51 | |
52 | bool FlowGraphCompiler::SupportsUnboxedSimd128() { |
53 | return FLAG_enable_simd_inline; |
54 | } |
55 | |
56 | bool FlowGraphCompiler::SupportsHardwareDivision() { |
57 | return true; |
58 | } |
59 | |
60 | bool FlowGraphCompiler::CanConvertInt64ToDouble() { |
61 | return true; |
62 | } |
63 | |
64 | void FlowGraphCompiler::EnterIntrinsicMode() { |
65 | ASSERT(!intrinsic_mode()); |
66 | intrinsic_mode_ = true; |
67 | } |
68 | |
69 | void FlowGraphCompiler::ExitIntrinsicMode() { |
70 | ASSERT(intrinsic_mode()); |
71 | intrinsic_mode_ = false; |
72 | } |
73 | |
74 | TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, |
75 | DeoptInfoBuilder* builder, |
76 | const Array& deopt_table) { |
77 | if (deopt_env_ == NULL) { |
78 | ++builder->current_info_number_; |
79 | return TypedData::null(); |
80 | } |
81 | |
82 | intptr_t stack_height = compiler->StackSize(); |
83 | AllocateIncomingParametersRecursive(deopt_env_, &stack_height); |
84 | |
85 | intptr_t slot_ix = 0; |
86 | Environment* current = deopt_env_; |
87 | |
88 | // Emit all kMaterializeObject instructions describing objects to be |
89 | // materialized on the deoptimization as a prefix to the deoptimization info. |
90 | EmitMaterializations(deopt_env_, builder); |
91 | |
92 | // The real frame starts here. |
93 | builder->MarkFrameStart(); |
94 | |
95 | Zone* zone = compiler->zone(); |
96 | |
97 | builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++); |
98 | builder->AddCallerFp(slot_ix++); |
99 | builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++); |
100 | |
101 | // Emit all values that are needed for materialization as a part of the |
102 | // expression stack for the bottom-most frame. This guarantees that GC |
103 | // will be able to find them during materialization. |
104 | slot_ix = builder->EmitMaterializationArguments(slot_ix); |
105 | |
106 | // For the innermost environment, set outgoing arguments and the locals. |
107 | for (intptr_t i = current->Length() - 1; |
108 | i >= current->fixed_parameter_count(); i--) { |
109 | builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++); |
110 | } |
111 | |
112 | builder->AddPcMarker(current->function(), slot_ix++); |
113 | builder->AddCallerFp(slot_ix++); |
114 | |
115 | Environment* previous = current; |
116 | current = current->outer(); |
117 | while (current != NULL) { |
118 | // For any outer environment the deopt id is that of the call instruction |
119 | // which is recorded in the outer environment. |
120 | builder->AddReturnAddress(current->function(), |
121 | DeoptId::ToDeoptAfter(current->deopt_id()), |
122 | slot_ix++); |
123 | |
124 | // The values of outgoing arguments can be changed from the inlined call so |
125 | // we must read them from the previous environment. |
126 | for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { |
127 | builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), |
128 | slot_ix++); |
129 | } |
130 | |
131 | // Set the locals, note that outgoing arguments are not in the environment. |
132 | for (intptr_t i = current->Length() - 1; |
133 | i >= current->fixed_parameter_count(); i--) { |
134 | builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++); |
135 | } |
136 | |
137 | builder->AddPcMarker(current->function(), slot_ix++); |
138 | builder->AddCallerFp(slot_ix++); |
139 | |
140 | // Iterate on the outer environment. |
141 | previous = current; |
142 | current = current->outer(); |
143 | } |
144 | // The previous pointer is now the outermost environment. |
145 | ASSERT(previous != NULL); |
146 | |
147 | // For the outermost environment, set caller PC. |
148 | builder->AddCallerPc(slot_ix++); |
149 | |
150 | // For the outermost environment, set the incoming arguments. |
151 | for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { |
152 | builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++); |
153 | } |
154 | |
155 | return builder->CreateDeoptInfo(deopt_table); |
156 | } |
157 | |
158 | void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, |
159 | intptr_t stub_ix) { |
160 | // Calls do not need stubs, they share a deoptimization trampoline. |
161 | ASSERT(reason() != ICData::kDeoptAtCall); |
162 | compiler::Assembler* assembler = compiler->assembler(); |
163 | #define __ assembler-> |
164 | __ Comment("%s" , Name()); |
165 | __ Bind(entry_label()); |
166 | if (FLAG_trap_on_deoptimization) { |
167 | __ int3(); |
168 | } |
169 | |
170 | ASSERT(deopt_env() != NULL); |
171 | __ pushl(CODE_REG); |
172 | __ Call(StubCode::Deoptimize()); |
173 | set_pc_offset(assembler->CodeSize()); |
174 | __ int3(); |
175 | #undef __ |
176 | } |
177 | |
178 | #define __ assembler()-> |
179 | |
180 | // Fall through if bool_register contains null. |
181 | void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, |
182 | compiler::Label* is_true, |
183 | compiler::Label* is_false) { |
184 | const compiler::Immediate& raw_null = |
185 | compiler::Immediate(static_cast<intptr_t>(Object::null())); |
186 | compiler::Label fall_through; |
187 | __ cmpl(bool_register, raw_null); |
188 | __ j(EQUAL, &fall_through, compiler::Assembler::kNearJump); |
189 | BranchLabels labels = {is_true, is_false, &fall_through}; |
190 | Condition true_condition = |
191 | EmitBoolTest(bool_register, labels, /*invert=*/false); |
192 | ASSERT(true_condition != kInvalidCondition); |
193 | __ j(true_condition, is_true); |
194 | __ jmp(is_false); |
195 | __ Bind(&fall_through); |
196 | } |
197 | |
198 | // Clobbers ECX. |
199 | SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub( |
200 | TypeTestStubKind test_kind, |
201 | Register instance_reg, |
202 | Register instantiator_type_arguments_reg, |
203 | Register function_type_arguments_reg, |
204 | Register temp_reg, |
205 | compiler::Label* is_instance_lbl, |
206 | compiler::Label* is_not_instance_lbl) { |
207 | const SubtypeTestCache& type_test_cache = |
208 | SubtypeTestCache::ZoneHandle(zone(), SubtypeTestCache::New()); |
209 | const compiler::Immediate& raw_null = |
210 | compiler::Immediate(static_cast<intptr_t>(Object::null())); |
211 | __ LoadObject(temp_reg, type_test_cache); |
212 | __ pushl(temp_reg); // Subtype test cache. |
213 | __ pushl(instance_reg); // Instance. |
214 | if (test_kind == kTestTypeOneArg) { |
215 | ASSERT(instantiator_type_arguments_reg == kNoRegister); |
216 | ASSERT(function_type_arguments_reg == kNoRegister); |
217 | __ pushl(raw_null); |
218 | __ pushl(raw_null); |
219 | __ Call(StubCode::Subtype1TestCache()); |
220 | } else if (test_kind == kTestTypeTwoArgs) { |
221 | ASSERT(instantiator_type_arguments_reg == kNoRegister); |
222 | ASSERT(function_type_arguments_reg == kNoRegister); |
223 | __ pushl(raw_null); |
224 | __ pushl(raw_null); |
225 | __ Call(StubCode::Subtype2TestCache()); |
226 | } else if (test_kind == kTestTypeFourArgs) { |
227 | __ pushl(instantiator_type_arguments_reg); |
228 | __ pushl(function_type_arguments_reg); |
229 | __ Call(StubCode::Subtype4TestCache()); |
230 | } else if (test_kind == kTestTypeSixArgs) { |
231 | __ pushl(instantiator_type_arguments_reg); |
232 | __ pushl(function_type_arguments_reg); |
233 | __ Call(StubCode::Subtype6TestCache()); |
234 | } else { |
235 | UNREACHABLE(); |
236 | } |
237 | // Result is in ECX: null -> not found, otherwise Bool::True or Bool::False. |
238 | ASSERT(instance_reg != ECX); |
239 | ASSERT(temp_reg != ECX); |
240 | __ Drop(2); |
241 | __ popl(instance_reg); // Restore receiver. |
242 | __ popl(temp_reg); // Discard. |
243 | GenerateBoolToJump(ECX, is_instance_lbl, is_not_instance_lbl); |
244 | return type_test_cache.raw(); |
245 | } |
246 | |
247 | // Jumps to labels 'is_instance' or 'is_not_instance' respectively, if |
248 | // type test is conclusive, otherwise fallthrough if a type test could not |
249 | // be completed. |
250 | // EAX: instance (must survive). |
251 | // Clobbers ECX, EDI. |
252 | SubtypeTestCachePtr |
253 | FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest( |
254 | TokenPosition token_pos, |
255 | const AbstractType& type, |
256 | compiler::Label* is_instance_lbl, |
257 | compiler::Label* is_not_instance_lbl) { |
258 | __ Comment("InstantiatedTypeWithArgumentsTest" ); |
259 | ASSERT(type.IsInstantiated()); |
260 | ASSERT(!type.IsFunctionType()); |
261 | const Class& type_class = Class::ZoneHandle(zone(), type.type_class()); |
262 | ASSERT(type_class.NumTypeArguments() > 0); |
263 | const Type& smi_type = Type::Handle(zone(), Type::SmiType()); |
264 | const bool smi_is_ok = smi_type.IsSubtypeOf(type, Heap::kOld); |
265 | __ testl(TypeTestABI::kInstanceReg, compiler::Immediate(kSmiTagMask)); |
266 | if (smi_is_ok) { |
267 | // Fast case for type = FutureOr<int/num/top-type>. |
268 | __ j(ZERO, is_instance_lbl); |
269 | } else { |
270 | __ j(ZERO, is_not_instance_lbl); |
271 | } |
272 | const intptr_t num_type_args = type_class.NumTypeArguments(); |
273 | const intptr_t num_type_params = type_class.NumTypeParameters(); |
274 | const intptr_t from_index = num_type_args - num_type_params; |
275 | const TypeArguments& type_arguments = |
276 | TypeArguments::ZoneHandle(zone(), type.arguments()); |
277 | const bool is_raw_type = type_arguments.IsNull() || |
278 | type_arguments.IsRaw(from_index, num_type_params); |
279 | if (is_raw_type) { |
280 | const Register kClassIdReg = ECX; |
281 | // dynamic type argument, check only classes. |
282 | __ LoadClassId(kClassIdReg, TypeTestABI::kInstanceReg); |
283 | __ cmpl(kClassIdReg, compiler::Immediate(type_class.id())); |
284 | __ j(EQUAL, is_instance_lbl); |
285 | // List is a very common case. |
286 | if (IsListClass(type_class)) { |
287 | GenerateListTypeCheck(kClassIdReg, is_instance_lbl); |
288 | } |
289 | return GenerateSubtype1TestCacheLookup( |
290 | token_pos, type_class, is_instance_lbl, is_not_instance_lbl); |
291 | } |
292 | // If one type argument only, check if type argument is a top type. |
293 | if (type_arguments.Length() == 1) { |
294 | const AbstractType& tp_argument = |
295 | AbstractType::ZoneHandle(zone(), type_arguments.TypeAt(0)); |
296 | if (tp_argument.IsTopTypeForSubtyping()) { |
297 | // Instance class test only necessary. |
298 | return GenerateSubtype1TestCacheLookup( |
299 | token_pos, type_class, is_instance_lbl, is_not_instance_lbl); |
300 | } |
301 | } |
302 | // Regular subtype test cache involving instance's type arguments. |
303 | const Register kInstantiatorTypeArgumentsReg = kNoRegister; |
304 | const Register kFunctionTypeArgumentsReg = kNoRegister; |
305 | const Register kTempReg = EDI; |
306 | return GenerateCallSubtypeTestStub( |
307 | kTestTypeTwoArgs, TypeTestABI::kInstanceReg, |
308 | kInstantiatorTypeArgumentsReg, kFunctionTypeArgumentsReg, kTempReg, |
309 | is_instance_lbl, is_not_instance_lbl); |
310 | } |
311 | |
312 | void FlowGraphCompiler::CheckClassIds(Register class_id_reg, |
313 | const GrowableArray<intptr_t>& class_ids, |
314 | compiler::Label* is_equal_lbl, |
315 | compiler::Label* is_not_equal_lbl) { |
316 | for (intptr_t i = 0; i < class_ids.length(); i++) { |
317 | __ cmpl(class_id_reg, compiler::Immediate(class_ids[i])); |
318 | __ j(EQUAL, is_equal_lbl); |
319 | } |
320 | __ jmp(is_not_equal_lbl); |
321 | } |
322 | |
323 | // Testing against an instantiated type with no arguments, without |
324 | // SubtypeTestCache. |
325 | // EAX: instance to test against (preserved). |
326 | // Clobbers ECX, EDI. |
327 | // Returns true if there is a fallthrough. |
328 | bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest( |
329 | TokenPosition token_pos, |
330 | const AbstractType& type, |
331 | compiler::Label* is_instance_lbl, |
332 | compiler::Label* is_not_instance_lbl) { |
333 | __ Comment("InstantiatedTypeNoArgumentsTest" ); |
334 | ASSERT(type.IsInstantiated()); |
335 | ASSERT(!type.IsFunctionType()); |
336 | const Class& type_class = Class::Handle(zone(), type.type_class()); |
337 | ASSERT(type_class.NumTypeArguments() == 0); |
338 | |
339 | __ testl(TypeTestABI::kInstanceReg, compiler::Immediate(kSmiTagMask)); |
340 | // If instance is Smi, check directly. |
341 | const Class& smi_class = Class::Handle(zone(), Smi::Class()); |
342 | if (Class::IsSubtypeOf(smi_class, Object::null_type_arguments(), |
343 | Nullability::kNonNullable, type, Heap::kOld)) { |
344 | // Fast case for type = int/num/top-type. |
345 | __ j(ZERO, is_instance_lbl); |
346 | } else { |
347 | __ j(ZERO, is_not_instance_lbl); |
348 | } |
349 | const Register kClassIdReg = ECX; |
350 | __ LoadClassId(kClassIdReg, TypeTestABI::kInstanceReg); |
351 | // Bool interface can be implemented only by core class Bool. |
352 | if (type.IsBoolType()) { |
353 | __ cmpl(kClassIdReg, compiler::Immediate(kBoolCid)); |
354 | __ j(EQUAL, is_instance_lbl); |
355 | __ jmp(is_not_instance_lbl); |
356 | return false; |
357 | } |
358 | // Custom checking for numbers (Smi, Mint and Double). |
359 | // Note that instance is not Smi (checked above). |
360 | if (type.IsNumberType() || type.IsIntType() || type.IsDoubleType()) { |
361 | GenerateNumberTypeCheck(kClassIdReg, type, is_instance_lbl, |
362 | is_not_instance_lbl); |
363 | return false; |
364 | } |
365 | if (type.IsStringType()) { |
366 | GenerateStringTypeCheck(kClassIdReg, is_instance_lbl, is_not_instance_lbl); |
367 | return false; |
368 | } |
369 | if (type.IsDartFunctionType()) { |
370 | // Check if instance is a closure. |
371 | __ cmpl(kClassIdReg, compiler::Immediate(kClosureCid)); |
372 | __ j(EQUAL, is_instance_lbl); |
373 | return true; // Fall through |
374 | } |
375 | |
376 | // Fast case for cid-range based checks. |
377 | // Warning: This code destroys the contents of [kClassIdReg]. |
378 | if (GenerateSubtypeRangeCheck(kClassIdReg, type_class, is_instance_lbl)) { |
379 | return false; |
380 | } |
381 | |
382 | // Otherwise fallthrough, result non-conclusive. |
383 | return true; |
384 | } |
385 | |
386 | // Uses SubtypeTestCache to store instance class and result. |
387 | // EAX: instance to test. |
388 | // Clobbers EDI, ECX. |
389 | // Immediate class test already done. |
390 | // TODO(srdjan): Implement a quicker subtype check, as type test |
391 | // arrays can grow too high, but they may be useful when optimizing |
392 | // code (type-feedback). |
393 | SubtypeTestCachePtr FlowGraphCompiler::GenerateSubtype1TestCacheLookup( |
394 | TokenPosition token_pos, |
395 | const Class& type_class, |
396 | compiler::Label* is_instance_lbl, |
397 | compiler::Label* is_not_instance_lbl) { |
398 | __ Comment("Subtype1TestCacheLookup" ); |
399 | #if defined(DEBUG) |
400 | compiler::Label ok; |
401 | __ BranchIfNotSmi(TypeTestABI::kInstanceReg, &ok); |
402 | __ Breakpoint(); |
403 | __ Bind(&ok); |
404 | #endif |
405 | __ LoadClassId(EDI, TypeTestABI::kInstanceReg); |
406 | __ LoadClassById(ECX, EDI); |
407 | // ECX: instance class. |
408 | // Check immediate superclass equality. If type_class is Object, then testing |
409 | // supertype may yield a wrong result for Null in NNBD strong mode (because |
410 | // Null also extends Object). |
411 | if (!type_class.IsObjectClass() || !Isolate::Current()->null_safety()) { |
412 | __ movl(EDI, compiler::FieldAddress(ECX, Class::super_type_offset())); |
413 | __ movl(EDI, compiler::FieldAddress(EDI, Type::type_class_id_offset())); |
414 | __ cmpl(EDI, compiler::Immediate(Smi::RawValue(type_class.id()))); |
415 | __ j(EQUAL, is_instance_lbl); |
416 | } |
417 | |
418 | const Register kInstantiatorTypeArgumentsReg = kNoRegister; |
419 | const Register kFunctionTypeArgumentsReg = kNoRegister; |
420 | const Register kTempReg = EDI; |
421 | return GenerateCallSubtypeTestStub(kTestTypeOneArg, TypeTestABI::kInstanceReg, |
422 | kInstantiatorTypeArgumentsReg, |
423 | kFunctionTypeArgumentsReg, kTempReg, |
424 | is_instance_lbl, is_not_instance_lbl); |
425 | } |
426 | |
427 | // Generates inlined check if 'type' is a type parameter or type itself |
428 | // EAX: instance (preserved). |
429 | // Clobbers EDX, EDI, ECX. |
430 | SubtypeTestCachePtr FlowGraphCompiler::GenerateUninstantiatedTypeTest( |
431 | TokenPosition token_pos, |
432 | const AbstractType& type, |
433 | compiler::Label* is_instance_lbl, |
434 | compiler::Label* is_not_instance_lbl) { |
435 | __ Comment("UninstantiatedTypeTest" ); |
436 | const Register kTempReg = EDI; |
437 | ASSERT(!type.IsInstantiated()); |
438 | ASSERT(!type.IsFunctionType()); |
439 | // Skip check if destination is a dynamic type. |
440 | const compiler::Immediate& raw_null = |
441 | compiler::Immediate(static_cast<intptr_t>(Object::null())); |
442 | if (type.IsTypeParameter()) { |
443 | const TypeParameter& type_param = TypeParameter::Cast(type); |
444 | |
445 | __ movl(EDX, compiler::Address( |
446 | ESP, 1 * kWordSize)); // Get instantiator type args. |
447 | __ movl(ECX, |
448 | compiler::Address(ESP, 0 * kWordSize)); // Get function type args. |
449 | // EDX: instantiator type arguments. |
450 | // ECX: function type arguments. |
451 | const Register kTypeArgumentsReg = |
452 | type_param.IsClassTypeParameter() |
453 | ? TypeTestABI::kInstantiatorTypeArgumentsReg |
454 | : TypeTestABI::kFunctionTypeArgumentsReg; |
455 | // Check if type arguments are null, i.e. equivalent to vector of dynamic. |
456 | __ cmpl(kTypeArgumentsReg, raw_null); |
457 | __ j(EQUAL, is_instance_lbl); |
458 | __ movl(EDI, compiler::FieldAddress( |
459 | kTypeArgumentsReg, |
460 | TypeArguments::type_at_offset(type_param.index()))); |
461 | // EDI: concrete type of type. |
462 | // Check if type argument is dynamic, Object?, or void. |
463 | __ CompareObject(EDI, Object::dynamic_type()); |
464 | __ j(EQUAL, is_instance_lbl); |
465 | __ CompareObject( |
466 | EDI, Type::ZoneHandle( |
467 | zone(), isolate()->object_store()->nullable_object_type())); |
468 | __ j(EQUAL, is_instance_lbl); |
469 | __ CompareObject(EDI, Object::void_type()); |
470 | __ j(EQUAL, is_instance_lbl); |
471 | |
472 | // For Smi check quickly against int and num interfaces. |
473 | compiler::Label not_smi; |
474 | __ testl(EAX, compiler::Immediate(kSmiTagMask)); // Value is Smi? |
475 | __ j(NOT_ZERO, ¬_smi, compiler::Assembler::kNearJump); |
476 | __ CompareObject(EDI, Type::ZoneHandle(zone(), Type::IntType())); |
477 | __ j(EQUAL, is_instance_lbl); |
478 | __ CompareObject(EDI, Type::ZoneHandle(zone(), Type::Number())); |
479 | __ j(EQUAL, is_instance_lbl); |
480 | // Smi can be handled by type test cache. |
481 | __ Bind(¬_smi); |
482 | |
483 | const auto test_kind = GetTypeTestStubKindForTypeParameter(type_param); |
484 | const SubtypeTestCache& type_test_cache = SubtypeTestCache::ZoneHandle( |
485 | zone(), GenerateCallSubtypeTestStub( |
486 | test_kind, TypeTestABI::kInstanceReg, |
487 | TypeTestABI::kInstantiatorTypeArgumentsReg, |
488 | TypeTestABI::kFunctionTypeArgumentsReg, kTempReg, |
489 | is_instance_lbl, is_not_instance_lbl)); |
490 | return type_test_cache.raw(); |
491 | } |
492 | if (type.IsType()) { |
493 | // Smi is FutureOr<T>, when T is a top type or int or num. |
494 | if (!type.IsFutureOrType()) { |
495 | __ testl(TypeTestABI::kInstanceReg, |
496 | compiler::Immediate(kSmiTagMask)); // Is instance Smi? |
497 | __ j(ZERO, is_not_instance_lbl); |
498 | } |
499 | __ movl(TypeTestABI::kInstantiatorTypeArgumentsReg, |
500 | compiler::Address(ESP, 1 * kWordSize)); |
501 | __ movl(TypeTestABI::kFunctionTypeArgumentsReg, |
502 | compiler::Address(ESP, 0 * kWordSize)); |
503 | // Uninstantiated type class is known at compile time, but the type |
504 | // arguments are determined at runtime by the instantiator(s). |
505 | return GenerateCallSubtypeTestStub( |
506 | kTestTypeFourArgs, TypeTestABI::kInstanceReg, |
507 | TypeTestABI::kInstantiatorTypeArgumentsReg, |
508 | TypeTestABI::kFunctionTypeArgumentsReg, kTempReg, is_instance_lbl, |
509 | is_not_instance_lbl); |
510 | } |
511 | return SubtypeTestCache::null(); |
512 | } |
513 | |
514 | // Generates function type check. |
515 | // |
516 | // See [GenerateUninstantiatedTypeTest] for calling convention. |
517 | SubtypeTestCachePtr FlowGraphCompiler::GenerateFunctionTypeTest( |
518 | TokenPosition token_pos, |
519 | const AbstractType& type, |
520 | compiler::Label* is_instance_lbl, |
521 | compiler::Label* is_not_instance_lbl) { |
522 | __ Comment("FunctionTypeTest" ); |
523 | |
524 | __ testl(TypeTestABI::kInstanceReg, compiler::Immediate(kSmiTagMask)); |
525 | __ j(ZERO, is_not_instance_lbl); |
526 | // Uninstantiated type class is known at compile time, but the type |
527 | // arguments are determined at runtime by the instantiator(s). |
528 | const Register kTempReg = EDI; |
529 | return GenerateCallSubtypeTestStub( |
530 | kTestTypeSixArgs, TypeTestABI::kInstanceReg, |
531 | TypeTestABI::kInstantiatorTypeArgumentsReg, |
532 | TypeTestABI::kFunctionTypeArgumentsReg, kTempReg, is_instance_lbl, |
533 | is_not_instance_lbl); |
534 | } |
535 | |
536 | // Inputs: |
537 | // - EAX: instance to test against (preserved). |
538 | // - EDX: optional instantiator type arguments (preserved). |
539 | // - ECX: optional function type arguments (preserved). |
540 | // Clobbers EDI. |
541 | // Returns: |
542 | // - preserved instance in EAX, optional instantiator type arguments in EDX, and |
543 | // optional function type arguments in RCX. |
544 | // Note that this inlined code must be followed by the runtime_call code, as it |
545 | // may fall through to it. Otherwise, this inline code will jump to the label |
546 | // is_instance or to the label is_not_instance. |
547 | SubtypeTestCachePtr FlowGraphCompiler::GenerateInlineInstanceof( |
548 | TokenPosition token_pos, |
549 | const AbstractType& type, |
550 | compiler::Label* is_instance_lbl, |
551 | compiler::Label* is_not_instance_lbl) { |
552 | __ Comment("InlineInstanceof" ); |
553 | |
554 | if (type.IsFunctionType()) { |
555 | return GenerateFunctionTypeTest(token_pos, type, is_instance_lbl, |
556 | is_not_instance_lbl); |
557 | } |
558 | |
559 | if (type.IsInstantiated()) { |
560 | const Class& type_class = Class::ZoneHandle(zone(), type.type_class()); |
561 | // A class equality check is only applicable with a dst type (not a |
562 | // function type) of a non-parameterized class or with a raw dst type of |
563 | // a parameterized class. |
564 | if (type_class.NumTypeArguments() > 0) { |
565 | return GenerateInstantiatedTypeWithArgumentsTest( |
566 | token_pos, type, is_instance_lbl, is_not_instance_lbl); |
567 | // Fall through to runtime call. |
568 | } |
569 | const bool has_fall_through = GenerateInstantiatedTypeNoArgumentsTest( |
570 | token_pos, type, is_instance_lbl, is_not_instance_lbl); |
571 | if (has_fall_through) { |
572 | // If test non-conclusive so far, try the inlined type-test cache. |
573 | // 'type' is known at compile time. |
574 | return GenerateSubtype1TestCacheLookup( |
575 | token_pos, type_class, is_instance_lbl, is_not_instance_lbl); |
576 | } else { |
577 | return SubtypeTestCache::null(); |
578 | } |
579 | } |
580 | return GenerateUninstantiatedTypeTest(token_pos, type, is_instance_lbl, |
581 | is_not_instance_lbl); |
582 | } |
583 | |
584 | // If instanceof type test cannot be performed successfully at compile time and |
585 | // therefore eliminated, optimize it by adding inlined tests for: |
586 | // - Null -> see comment below. |
587 | // - Smi -> compile time subtype check (only if dst class is not parameterized). |
588 | // - Class equality (only if class is not parameterized). |
589 | // Inputs: |
590 | // - EAX: object. |
591 | // - EDX: instantiator type arguments or raw_null. |
592 | // - ECX: function type arguments or raw_null. |
593 | // Returns: |
594 | // - true or false in EAX. |
595 | void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos, |
596 | intptr_t deopt_id, |
597 | const AbstractType& type, |
598 | LocationSummary* locs) { |
599 | ASSERT(type.IsFinalized()); |
600 | ASSERT(!type.IsTopTypeForInstanceOf()); // Already checked. |
601 | |
602 | __ pushl(EDX); // Store instantiator type arguments. |
603 | __ pushl(ECX); // Store function type arguments. |
604 | |
605 | const compiler::Immediate& raw_null = |
606 | compiler::Immediate(static_cast<intptr_t>(Object::null())); |
607 | compiler::Label is_instance, is_not_instance; |
608 | // 'null' is an instance of Null, Object*, Never*, void, and dynamic. |
609 | // In addition, 'null' is an instance of any nullable type. |
610 | // It is also an instance of FutureOr<T> if it is an instance of T. |
611 | const AbstractType& unwrapped_type = |
612 | AbstractType::Handle(type.UnwrapFutureOr()); |
613 | if (!unwrapped_type.IsTypeParameter() || unwrapped_type.IsNullable()) { |
614 | // Only nullable type parameter remains nullable after instantiation. |
615 | // See NullIsInstanceOf(). |
616 | __ cmpl(EAX, raw_null); |
617 | __ j(EQUAL, (unwrapped_type.IsNullable() || |
618 | (unwrapped_type.IsLegacy() && unwrapped_type.IsNeverType())) |
619 | ? &is_instance |
620 | : &is_not_instance); |
621 | } |
622 | |
623 | // Generate inline instanceof test. |
624 | SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); |
625 | test_cache = |
626 | GenerateInlineInstanceof(token_pos, type, &is_instance, &is_not_instance); |
627 | |
628 | // test_cache is null if there is no fall-through. |
629 | compiler::Label done; |
630 | if (!test_cache.IsNull()) { |
631 | // Generate runtime call. |
632 | __ movl(EDX, compiler::Address( |
633 | ESP, 1 * kWordSize)); // Get instantiator type args. |
634 | __ movl(ECX, |
635 | compiler::Address(ESP, 0 * kWordSize)); // Get function type args. |
636 | __ PushObject(Object::null_object()); // Make room for the result. |
637 | __ pushl(EAX); // Push the instance. |
638 | __ PushObject(type); // Push the type. |
639 | __ pushl(TypeTestABI::kInstantiatorTypeArgumentsReg); |
640 | __ pushl(TypeTestABI::kFunctionTypeArgumentsReg); |
641 | __ LoadObject(EAX, test_cache); |
642 | __ pushl(EAX); |
643 | GenerateRuntimeCall(token_pos, deopt_id, kInstanceofRuntimeEntry, 5, locs); |
644 | // Pop the parameters supplied to the runtime entry. The result of the |
645 | // instanceof runtime call will be left as the result of the operation. |
646 | __ Drop(5); |
647 | __ popl(EAX); |
648 | __ jmp(&done, compiler::Assembler::kNearJump); |
649 | } |
650 | __ Bind(&is_not_instance); |
651 | __ LoadObject(EAX, Bool::Get(false)); |
652 | __ jmp(&done, compiler::Assembler::kNearJump); |
653 | |
654 | __ Bind(&is_instance); |
655 | __ LoadObject(EAX, Bool::Get(true)); |
656 | __ Bind(&done); |
657 | __ popl(ECX); // Remove pushed function type arguments. |
658 | __ popl(EDX); // Remove pushed instantiator type arguments. |
659 | } |
660 | |
661 | // Optimize assignable type check by adding inlined tests for: |
662 | // - NULL -> return NULL. |
663 | // - Smi -> compile time subtype check (only if dst class is not parameterized). |
664 | // - Class equality (only if class is not parameterized). |
665 | // Inputs: |
666 | // - EAX: object. |
667 | // - EBX: destination type (if non-constant). |
668 | // - EDX: instantiator type arguments or raw_null. |
669 | // - ECX: function type arguments or raw_null. |
670 | // Returns: |
671 | // - object in EAX for successful assignable check (or throws TypeError). |
672 | // Performance notes: positive checks must be quick, negative checks can be slow |
673 | // as they throw an exception. |
674 | void FlowGraphCompiler::GenerateAssertAssignable(CompileType* receiver_type, |
675 | TokenPosition token_pos, |
676 | intptr_t deopt_id, |
677 | const String& dst_name, |
678 | LocationSummary* locs) { |
679 | ASSERT(!token_pos.IsClassifying()); |
680 | ASSERT(CheckAssertAssignableTypeTestingABILocations(*locs)); |
681 | |
682 | if (!locs->in(1).IsConstant()) { |
683 | // TODO(dartbug.com/40813): Handle setting up the non-constant case. |
684 | UNREACHABLE(); |
685 | } |
686 | |
687 | ASSERT(locs->in(1).constant().IsAbstractType()); |
688 | const auto& dst_type = AbstractType::Cast(locs->in(1).constant()); |
689 | ASSERT(dst_type.IsFinalized()); |
690 | |
691 | if (dst_type.IsTopTypeForSubtyping()) return; // No code needed. |
692 | |
693 | __ pushl(TypeTestABI::kInstantiatorTypeArgumentsReg); |
694 | __ pushl(TypeTestABI::kFunctionTypeArgumentsReg); |
695 | |
696 | compiler::Label is_assignable, runtime_call; |
697 | if (Instance::NullIsAssignableTo(dst_type)) { |
698 | const compiler::Immediate& raw_null = |
699 | compiler::Immediate(static_cast<intptr_t>(Object::null())); |
700 | __ cmpl(TypeTestABI::kInstanceReg, raw_null); |
701 | __ j(EQUAL, &is_assignable); |
702 | } |
703 | |
704 | // Generate inline type check, linking to runtime call if not assignable. |
705 | SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); |
706 | test_cache = GenerateInlineInstanceof(token_pos, dst_type, &is_assignable, |
707 | &runtime_call); |
708 | |
709 | __ Bind(&runtime_call); |
710 | __ movl( |
711 | TypeTestABI::kInstantiatorTypeArgumentsReg, |
712 | compiler::Address(ESP, 1 * kWordSize)); // Get instantiator type args. |
713 | __ movl(TypeTestABI::kFunctionTypeArgumentsReg, |
714 | compiler::Address(ESP, 0 * kWordSize)); // Get function type args. |
715 | __ PushObject(Object::null_object()); // Make room for the result. |
716 | __ pushl(TypeTestABI::kInstanceReg); // Push the source object. |
717 | if (locs->in(1).IsConstant()) { |
718 | __ PushObject(locs->in(1).constant()); // Push the type of the destination. |
719 | } else { |
720 | // TODO(dartbug.com/40813): Handle setting up the non-constant case. |
721 | UNREACHABLE(); |
722 | } |
723 | __ pushl(TypeTestABI::kInstantiatorTypeArgumentsReg); |
724 | __ pushl(TypeTestABI::kFunctionTypeArgumentsReg); |
725 | __ PushObject(dst_name); // Push the name of the destination. |
726 | __ LoadObject(EAX, test_cache); |
727 | __ pushl(EAX); |
728 | __ PushObject(Smi::ZoneHandle(zone(), Smi::New(kTypeCheckFromInline))); |
729 | GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 7, locs); |
730 | // Pop the parameters supplied to the runtime entry. The result of the |
731 | // type check runtime call is the checked value. |
732 | __ Drop(7); |
733 | __ popl(TypeTestABI::kInstanceReg); |
734 | |
735 | __ Bind(&is_assignable); |
736 | __ popl(TypeTestABI::kFunctionTypeArgumentsReg); |
737 | __ popl(TypeTestABI::kInstantiatorTypeArgumentsReg); |
738 | } |
739 | |
740 | void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) { |
741 | if (is_optimizing()) { |
742 | return; |
743 | } |
744 | Definition* defn = instr->AsDefinition(); |
745 | if ((defn != NULL) && defn->HasTemp()) { |
746 | Location value = defn->locs()->out(0); |
747 | if (value.IsRegister()) { |
748 | __ pushl(value.reg()); |
749 | } else if (value.IsConstant()) { |
750 | __ PushObject(value.constant()); |
751 | } else { |
752 | ASSERT(value.IsStackSlot()); |
753 | __ pushl(LocationToStackSlotAddress(value)); |
754 | } |
755 | } |
756 | } |
757 | |
758 | // NOTE: If the entry code shape changes, ReturnAddressLocator in profiler.cc |
759 | // needs to be updated to match. |
760 | void FlowGraphCompiler::EmitFrameEntry() { |
761 | const Function& function = parsed_function().function(); |
762 | if (CanOptimizeFunction() && function.IsOptimizable() && |
763 | (!is_optimizing() || may_reoptimize())) { |
764 | __ Comment("Invocation Count Check" ); |
765 | const Register function_reg = EBX; |
766 | __ LoadObject(function_reg, function); |
767 | |
768 | // Reoptimization of an optimized function is triggered by counting in |
769 | // IC stubs, but not at the entry of the function. |
770 | if (!is_optimizing()) { |
771 | __ incl(compiler::FieldAddress(function_reg, |
772 | Function::usage_counter_offset())); |
773 | } |
774 | __ cmpl( |
775 | compiler::FieldAddress(function_reg, Function::usage_counter_offset()), |
776 | compiler::Immediate(GetOptimizationThreshold())); |
777 | ASSERT(function_reg == EBX); |
778 | compiler::Label dont_optimize; |
779 | __ j(LESS, &dont_optimize, compiler::Assembler::kNearJump); |
780 | __ jmp(compiler::Address(THR, Thread::optimize_entry_offset())); |
781 | __ Bind(&dont_optimize); |
782 | } |
783 | __ Comment("Enter frame" ); |
784 | if (flow_graph().IsCompiledForOsr()) { |
785 | intptr_t extra_slots = ExtraStackSlotsOnOsrEntry(); |
786 | ASSERT(extra_slots >= 0); |
787 | __ EnterOsrFrame(extra_slots * kWordSize); |
788 | } else { |
789 | ASSERT(StackSize() >= 0); |
790 | __ EnterDartFrame(StackSize() * kWordSize); |
791 | } |
792 | } |
793 | |
794 | void FlowGraphCompiler::EmitPrologue() { |
795 | EmitFrameEntry(); |
796 | |
797 | // In unoptimized code, initialize (non-argument) stack allocated slots. |
798 | if (!is_optimizing()) { |
799 | const int num_locals = parsed_function().num_stack_locals(); |
800 | |
801 | intptr_t args_desc_slot = -1; |
802 | if (parsed_function().has_arg_desc_var()) { |
803 | args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable( |
804 | parsed_function().arg_desc_var()); |
805 | } |
806 | |
807 | __ Comment("Initialize spill slots" ); |
808 | if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) { |
809 | const compiler::Immediate& raw_null = |
810 | compiler::Immediate(static_cast<intptr_t>(Object::null())); |
811 | __ movl(EAX, raw_null); |
812 | } |
813 | for (intptr_t i = 0; i < num_locals; ++i) { |
814 | const intptr_t slot_index = |
815 | compiler::target::frame_layout.FrameSlotForVariableIndex(-i); |
816 | Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : EAX; |
817 | __ movl(compiler::Address(EBP, slot_index * kWordSize), value_reg); |
818 | } |
819 | } |
820 | |
821 | EndCodeSourceRange(TokenPosition::kDartCodePrologue); |
822 | } |
823 | |
824 | void FlowGraphCompiler::CompileGraph() { |
825 | InitCompiler(); |
826 | |
827 | ASSERT(!block_order().is_empty()); |
828 | VisitBlocks(); |
829 | |
830 | if (!skip_body_compilation()) { |
831 | #if defined(DEBUG) |
832 | __ int3(); |
833 | #endif |
834 | GenerateDeferredCode(); |
835 | } |
836 | |
837 | for (intptr_t i = 0; i < indirect_gotos_.length(); ++i) { |
838 | indirect_gotos_[i]->ComputeOffsetTable(this); |
839 | } |
840 | } |
841 | |
842 | void FlowGraphCompiler::EmitCallToStub(const Code& stub) { |
843 | __ Call(stub); |
844 | AddStubCallTarget(stub); |
845 | } |
846 | |
847 | void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id, |
848 | TokenPosition token_pos, |
849 | const Code& stub, |
850 | PcDescriptorsLayout::Kind kind, |
851 | LocationSummary* locs, |
852 | Code::EntryKind entry_kind) { |
853 | ASSERT(CanCallDart()); |
854 | __ Call(stub, /*moveable_target=*/false, entry_kind); |
855 | EmitCallsiteMetadata(token_pos, deopt_id, kind, locs); |
856 | } |
857 | |
858 | void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id, |
859 | TokenPosition token_pos, |
860 | PcDescriptorsLayout::Kind kind, |
861 | LocationSummary* locs, |
862 | const Function& target, |
863 | Code::EntryKind entry_kind) { |
864 | ASSERT(CanCallDart()); |
865 | const auto& stub = StubCode::CallStaticFunction(); |
866 | __ Call(stub, /*movable_target=*/true, entry_kind); |
867 | EmitCallsiteMetadata(token_pos, deopt_id, kind, locs); |
868 | AddStaticCallTarget(target, entry_kind); |
869 | } |
870 | |
871 | void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos, |
872 | intptr_t deopt_id, |
873 | const RuntimeEntry& entry, |
874 | intptr_t argument_count, |
875 | LocationSummary* locs) { |
876 | __ CallRuntime(entry, argument_count); |
877 | EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kOther, locs); |
878 | } |
879 | |
880 | void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args, |
881 | intptr_t deopt_id, |
882 | TokenPosition token_pos, |
883 | LocationSummary* locs, |
884 | const ICData& ic_data, |
885 | Code::EntryKind entry_kind) { |
886 | ASSERT(CanCallDart()); |
887 | const Code& stub = |
888 | StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested()); |
889 | __ LoadObject(ECX, ic_data); |
890 | GenerateDartCall(deopt_id, token_pos, stub, |
891 | PcDescriptorsLayout::kUnoptStaticCall, locs, entry_kind); |
892 | __ Drop(size_with_type_args); |
893 | } |
894 | |
895 | void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) { |
896 | // We do not check for overflow when incrementing the edge counter. The |
897 | // function should normally be optimized long before the counter can |
898 | // overflow; and though we do not reset the counters when we optimize or |
899 | // deoptimize, there is a bound on the number of |
900 | // optimization/deoptimization cycles we will attempt. |
901 | ASSERT(!edge_counters_array_.IsNull()); |
902 | __ Comment("Edge counter" ); |
903 | __ LoadObject(EAX, edge_counters_array_); |
904 | __ IncrementSmiField( |
905 | compiler::FieldAddress(EAX, Array::element_offset(edge_id)), 1); |
906 | } |
907 | |
908 | void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub, |
909 | const ICData& ic_data, |
910 | intptr_t deopt_id, |
911 | TokenPosition token_pos, |
912 | LocationSummary* locs, |
913 | Code::EntryKind entry_kind) { |
914 | ASSERT(CanCallDart()); |
915 | ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0); |
916 | // Each ICData propagated from unoptimized to optimized code contains the |
917 | // function that corresponds to the Dart function of that IC call. Due |
918 | // to inlining in optimized code, that function may not correspond to the |
919 | // top-level function (parsed_function().function()) which could be |
920 | // reoptimized and which counter needs to be incremented. |
921 | // Pass the function explicitly, it is used in IC stub. |
922 | __ LoadObject(EAX, parsed_function().function()); |
923 | // Load receiver into EBX. |
924 | __ movl(EBX, compiler::Address( |
925 | ESP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize)); |
926 | __ LoadObject(ECX, ic_data); |
927 | GenerateDartCall(deopt_id, token_pos, stub, PcDescriptorsLayout::kIcCall, |
928 | locs, entry_kind); |
929 | __ Drop(ic_data.SizeWithTypeArgs()); |
930 | } |
931 | |
932 | void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub, |
933 | const ICData& ic_data, |
934 | intptr_t deopt_id, |
935 | TokenPosition token_pos, |
936 | LocationSummary* locs, |
937 | Code::EntryKind entry_kind) { |
938 | ASSERT(CanCallDart()); |
939 | ASSERT(entry_kind == Code::EntryKind::kNormal || |
940 | entry_kind == Code::EntryKind::kUnchecked); |
941 | ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0); |
942 | // Load receiver into EBX. |
943 | __ movl(EBX, compiler::Address( |
944 | ESP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize)); |
945 | __ LoadObject(ECX, ic_data, true); |
946 | __ LoadObject(CODE_REG, stub, true); |
947 | const intptr_t entry_point_offset = |
948 | entry_kind == Code::EntryKind::kNormal |
949 | ? Code::entry_point_offset(Code::EntryKind::kMonomorphic) |
950 | : Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked); |
951 | __ call(compiler::FieldAddress(CODE_REG, entry_point_offset)); |
952 | EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kIcCall, locs); |
953 | __ Drop(ic_data.SizeWithTypeArgs()); |
954 | } |
955 | |
956 | void FlowGraphCompiler::EmitMegamorphicInstanceCall( |
957 | const String& name, |
958 | const Array& arguments_descriptor, |
959 | intptr_t deopt_id, |
960 | TokenPosition token_pos, |
961 | LocationSummary* locs, |
962 | intptr_t try_index, |
963 | intptr_t slow_path_argument_count) { |
964 | ASSERT(CanCallDart()); |
965 | ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0)); |
966 | const ArgumentsDescriptor args_desc(arguments_descriptor); |
967 | const MegamorphicCache& cache = MegamorphicCache::ZoneHandle( |
968 | zone(), |
969 | MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor)); |
970 | |
971 | __ Comment("MegamorphicCall" ); |
972 | // Load receiver into EBX. |
973 | __ movl(EBX, compiler::Address(ESP, (args_desc.Count() - 1) * kWordSize)); |
974 | __ LoadObject(ECX, cache, true); |
975 | __ LoadObject(CODE_REG, StubCode::MegamorphicCall(), true); |
976 | __ call(compiler::FieldAddress( |
977 | CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic))); |
978 | |
979 | AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone, token_pos); |
980 | RecordSafepoint(locs, slow_path_argument_count); |
981 | const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id); |
982 | // Precompilation not implemented on ia32 platform. |
983 | ASSERT(!FLAG_precompiled_mode); |
984 | if (is_optimizing()) { |
985 | AddDeoptIndexAtCall(deopt_id_after); |
986 | } else { |
987 | // Add deoptimization continuation point after the call and before the |
988 | // arguments are removed. |
989 | AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id_after, |
990 | token_pos); |
991 | } |
992 | RecordCatchEntryMoves(pending_deoptimization_env_, try_index); |
993 | __ Drop(args_desc.SizeWithTypeArgs()); |
994 | } |
995 | |
996 | void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data, |
997 | intptr_t deopt_id, |
998 | TokenPosition token_pos, |
999 | LocationSummary* locs, |
1000 | Code::EntryKind entry_kind, |
1001 | bool receiver_can_be_smi) { |
1002 | // Only generated with precompilation. |
1003 | UNREACHABLE(); |
1004 | } |
1005 | |
1006 | void FlowGraphCompiler::EmitOptimizedStaticCall( |
1007 | const Function& function, |
1008 | const Array& arguments_descriptor, |
1009 | intptr_t size_with_type_args, |
1010 | intptr_t deopt_id, |
1011 | TokenPosition token_pos, |
1012 | LocationSummary* locs, |
1013 | Code::EntryKind entry_kind) { |
1014 | ASSERT(CanCallDart()); |
1015 | if (function.HasOptionalParameters() || function.IsGeneric()) { |
1016 | __ LoadObject(EDX, arguments_descriptor); |
1017 | } else { |
1018 | __ xorl(EDX, EDX); // GC safe smi zero because of stub. |
1019 | } |
1020 | // Do not use the code from the function, but let the code be patched so that |
1021 | // we can record the outgoing edges to other code. |
1022 | GenerateStaticDartCall(deopt_id, token_pos, PcDescriptorsLayout::kOther, locs, |
1023 | function, entry_kind); |
1024 | __ Drop(size_with_type_args); |
1025 | } |
1026 | |
1027 | void FlowGraphCompiler::EmitDispatchTableCall( |
1028 | Register cid_reg, |
1029 | int32_t selector_offset, |
1030 | const Array& arguments_descriptor) { |
1031 | // Only generated with precompilation. |
1032 | UNREACHABLE(); |
1033 | } |
1034 | |
1035 | Condition FlowGraphCompiler::EmitEqualityRegConstCompare( |
1036 | Register reg, |
1037 | const Object& obj, |
1038 | bool needs_number_check, |
1039 | TokenPosition token_pos, |
1040 | intptr_t deopt_id) { |
1041 | ASSERT(!needs_number_check || (!obj.IsMint() && !obj.IsDouble())); |
1042 | |
1043 | if (obj.IsSmi() && (Smi::Cast(obj).Value() == 0)) { |
1044 | ASSERT(!needs_number_check); |
1045 | __ testl(reg, reg); |
1046 | return EQUAL; |
1047 | } |
1048 | |
1049 | if (needs_number_check) { |
1050 | __ pushl(reg); |
1051 | __ PushObject(obj); |
1052 | if (is_optimizing()) { |
1053 | __ Call(StubCode::OptimizedIdenticalWithNumberCheck()); |
1054 | } else { |
1055 | __ Call(StubCode::UnoptimizedIdenticalWithNumberCheck()); |
1056 | } |
1057 | AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id, |
1058 | token_pos); |
1059 | // Stub returns result in flags (result of a cmpl, we need ZF computed). |
1060 | __ popl(reg); // Discard constant. |
1061 | __ popl(reg); // Restore 'reg'. |
1062 | } else { |
1063 | __ CompareObject(reg, obj); |
1064 | } |
1065 | return EQUAL; |
1066 | } |
1067 | |
1068 | Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left, |
1069 | Register right, |
1070 | bool needs_number_check, |
1071 | TokenPosition token_pos, |
1072 | intptr_t deopt_id) { |
1073 | if (needs_number_check) { |
1074 | __ pushl(left); |
1075 | __ pushl(right); |
1076 | if (is_optimizing()) { |
1077 | __ Call(StubCode::OptimizedIdenticalWithNumberCheck()); |
1078 | } else { |
1079 | __ Call(StubCode::UnoptimizedIdenticalWithNumberCheck()); |
1080 | } |
1081 | AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id, |
1082 | token_pos); |
1083 | // Stub returns result in flags (result of a cmpl, we need ZF computed). |
1084 | __ popl(right); |
1085 | __ popl(left); |
1086 | } else { |
1087 | __ cmpl(left, right); |
1088 | } |
1089 | return EQUAL; |
1090 | } |
1091 | |
1092 | Condition FlowGraphCompiler::EmitBoolTest(Register value, |
1093 | BranchLabels labels, |
1094 | bool invert) { |
1095 | __ Comment("BoolTest" ); |
1096 | __ testl(value, compiler::Immediate( |
1097 | compiler::target::ObjectAlignment::kBoolValueMask)); |
1098 | return invert ? NOT_EQUAL : EQUAL; |
1099 | } |
1100 | |
1101 | // This function must be in sync with FlowGraphCompiler::RecordSafepoint and |
1102 | // FlowGraphCompiler::SlowPathEnvironmentFor. |
1103 | void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { |
1104 | #if defined(DEBUG) |
1105 | locs->CheckWritableInputs(); |
1106 | ClobberDeadTempRegisters(locs); |
1107 | #endif |
1108 | |
1109 | // TODO(vegorov): consider saving only caller save (volatile) registers. |
1110 | const intptr_t xmm_regs_count = locs->live_registers()->FpuRegisterCount(); |
1111 | if (xmm_regs_count > 0) { |
1112 | __ subl(ESP, compiler::Immediate(xmm_regs_count * kFpuRegisterSize)); |
1113 | // Store XMM registers with the lowest register number at the lowest |
1114 | // address. |
1115 | intptr_t offset = 0; |
1116 | for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) { |
1117 | XmmRegister xmm_reg = static_cast<XmmRegister>(i); |
1118 | if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) { |
1119 | __ movups(compiler::Address(ESP, offset), xmm_reg); |
1120 | offset += kFpuRegisterSize; |
1121 | } |
1122 | } |
1123 | ASSERT(offset == (xmm_regs_count * kFpuRegisterSize)); |
1124 | } |
1125 | |
1126 | // The order in which the registers are pushed must match the order |
1127 | // in which the registers are encoded in the safe point's stack map. |
1128 | for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) { |
1129 | Register reg = static_cast<Register>(i); |
1130 | if (locs->live_registers()->ContainsRegister(reg)) { |
1131 | __ pushl(reg); |
1132 | } |
1133 | } |
1134 | } |
1135 | |
1136 | void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) { |
1137 | for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) { |
1138 | Register reg = static_cast<Register>(i); |
1139 | if (locs->live_registers()->ContainsRegister(reg)) { |
1140 | __ popl(reg); |
1141 | } |
1142 | } |
1143 | |
1144 | const intptr_t xmm_regs_count = locs->live_registers()->FpuRegisterCount(); |
1145 | if (xmm_regs_count > 0) { |
1146 | // XMM registers have the lowest register number at the lowest address. |
1147 | intptr_t offset = 0; |
1148 | for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) { |
1149 | XmmRegister xmm_reg = static_cast<XmmRegister>(i); |
1150 | if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) { |
1151 | __ movups(xmm_reg, compiler::Address(ESP, offset)); |
1152 | offset += kFpuRegisterSize; |
1153 | } |
1154 | } |
1155 | ASSERT(offset == (xmm_regs_count * kFpuRegisterSize)); |
1156 | __ addl(ESP, compiler::Immediate(offset)); |
1157 | } |
1158 | } |
1159 | |
1160 | #if defined(DEBUG) |
1161 | void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) { |
1162 | // Clobber temporaries that have not been manually preserved. |
1163 | for (intptr_t i = 0; i < locs->temp_count(); ++i) { |
1164 | Location tmp = locs->temp(i); |
1165 | // TODO(zerny): clobber non-live temporary FPU registers. |
1166 | if (tmp.IsRegister() && |
1167 | !locs->live_registers()->ContainsRegister(tmp.reg())) { |
1168 | __ movl(tmp.reg(), compiler::Immediate(0xf7)); |
1169 | } |
1170 | } |
1171 | } |
1172 | #endif |
1173 | |
1174 | Register FlowGraphCompiler::EmitTestCidRegister() { |
1175 | return EDI; |
1176 | } |
1177 | |
1178 | void FlowGraphCompiler::EmitTestAndCallLoadReceiver( |
1179 | intptr_t count_without_type_args, |
1180 | const Array& arguments_descriptor) { |
1181 | __ Comment("EmitTestAndCall" ); |
1182 | // Load receiver into EAX. |
1183 | __ movl(EAX, |
1184 | compiler::Address(ESP, (count_without_type_args - 1) * kWordSize)); |
1185 | __ LoadObject(EDX, arguments_descriptor); |
1186 | } |
1187 | |
1188 | void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label, |
1189 | bool if_smi) { |
1190 | __ testl(EAX, compiler::Immediate(kSmiTagMask)); |
1191 | // Jump if receiver is (not) Smi. |
1192 | __ j(if_smi ? ZERO : NOT_ZERO, label); |
1193 | } |
1194 | |
1195 | void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) { |
1196 | ASSERT(class_id_reg != EAX); |
1197 | __ LoadClassId(class_id_reg, EAX); |
1198 | } |
1199 | |
1200 | #undef __ |
1201 | #define __ assembler-> |
1202 | |
1203 | int FlowGraphCompiler::EmitTestAndCallCheckCid(compiler::Assembler* assembler, |
1204 | compiler::Label* label, |
1205 | Register class_id_reg, |
1206 | const CidRangeValue& range, |
1207 | int bias, |
1208 | bool jump_on_miss) { |
1209 | intptr_t cid_start = range.cid_start; |
1210 | if (range.IsSingleCid()) { |
1211 | __ cmpl(class_id_reg, compiler::Immediate(cid_start - bias)); |
1212 | __ j(jump_on_miss ? NOT_EQUAL : EQUAL, label); |
1213 | } else { |
1214 | __ addl(class_id_reg, compiler::Immediate(bias - cid_start)); |
1215 | bias = cid_start; |
1216 | __ cmpl(class_id_reg, compiler::Immediate(range.Extent())); |
1217 | __ j(jump_on_miss ? ABOVE : BELOW_EQUAL, label); // Unsigned higher. |
1218 | } |
1219 | return bias; |
1220 | } |
1221 | |
1222 | #undef __ |
1223 | #define __ assembler()-> |
1224 | |
1225 | void FlowGraphCompiler::EmitMove(Location destination, |
1226 | Location source, |
1227 | TemporaryRegisterAllocator* tmp) { |
1228 | if (destination.Equals(source)) return; |
1229 | |
1230 | if (source.IsRegister()) { |
1231 | if (destination.IsRegister()) { |
1232 | __ movl(destination.reg(), source.reg()); |
1233 | } else { |
1234 | ASSERT(destination.IsStackSlot()); |
1235 | __ movl(LocationToStackSlotAddress(destination), source.reg()); |
1236 | } |
1237 | } else if (source.IsStackSlot()) { |
1238 | if (destination.IsRegister()) { |
1239 | __ movl(destination.reg(), LocationToStackSlotAddress(source)); |
1240 | } else if (destination.IsFpuRegister()) { |
1241 | // 32-bit float |
1242 | __ movss(destination.fpu_reg(), LocationToStackSlotAddress(source)); |
1243 | } else { |
1244 | ASSERT(destination.IsStackSlot()); |
1245 | Register scratch = tmp->AllocateTemporary(); |
1246 | __ MoveMemoryToMemory(LocationToStackSlotAddress(destination), |
1247 | LocationToStackSlotAddress(source), scratch); |
1248 | tmp->ReleaseTemporary(); |
1249 | } |
1250 | } else if (source.IsFpuRegister()) { |
1251 | if (destination.IsFpuRegister()) { |
1252 | // Optimization manual recommends using MOVAPS for register |
1253 | // to register moves. |
1254 | __ movaps(destination.fpu_reg(), source.fpu_reg()); |
1255 | } else { |
1256 | if (destination.IsDoubleStackSlot()) { |
1257 | __ movsd(LocationToStackSlotAddress(destination), source.fpu_reg()); |
1258 | } else if (destination.IsStackSlot()) { |
1259 | // 32-bit float |
1260 | __ movss(LocationToStackSlotAddress(destination), source.fpu_reg()); |
1261 | } else { |
1262 | ASSERT(destination.IsQuadStackSlot()); |
1263 | __ movups(LocationToStackSlotAddress(destination), source.fpu_reg()); |
1264 | } |
1265 | } |
1266 | } else if (source.IsDoubleStackSlot()) { |
1267 | if (destination.IsFpuRegister()) { |
1268 | __ movsd(destination.fpu_reg(), LocationToStackSlotAddress(source)); |
1269 | } else if (destination.IsStackSlot()) { |
1270 | // Source holds a 32-bit float, take only the lower 32-bits |
1271 | __ movss(FpuTMP, LocationToStackSlotAddress(source)); |
1272 | __ movss(LocationToStackSlotAddress(destination), FpuTMP); |
1273 | } else { |
1274 | ASSERT(destination.IsDoubleStackSlot()); |
1275 | __ movsd(FpuTMP, LocationToStackSlotAddress(source)); |
1276 | __ movsd(LocationToStackSlotAddress(destination), FpuTMP); |
1277 | } |
1278 | } else if (source.IsQuadStackSlot()) { |
1279 | if (destination.IsFpuRegister()) { |
1280 | __ movups(destination.fpu_reg(), LocationToStackSlotAddress(source)); |
1281 | } else { |
1282 | ASSERT(destination.IsQuadStackSlot()); |
1283 | __ movups(FpuTMP, LocationToStackSlotAddress(source)); |
1284 | __ movups(LocationToStackSlotAddress(destination), FpuTMP); |
1285 | } |
1286 | } else if (source.IsPairLocation()) { |
1287 | ASSERT(destination.IsPairLocation()); |
1288 | for (intptr_t i : {0, 1}) { |
1289 | EmitMove(destination.Component(i), source.Component(i), tmp); |
1290 | } |
1291 | } else { |
1292 | ASSERT(source.IsConstant()); |
1293 | source.constant_instruction()->EmitMoveToLocation(this, destination); |
1294 | } |
1295 | } |
1296 | |
1297 | void FlowGraphCompiler::EmitNativeMoveArchitecture( |
1298 | const compiler::ffi::NativeLocation& destination, |
1299 | const compiler::ffi::NativeLocation& source) { |
1300 | const auto& src_type = source.payload_type(); |
1301 | const auto& dst_type = destination.payload_type(); |
1302 | ASSERT(src_type.IsFloat() == dst_type.IsFloat()); |
1303 | ASSERT(src_type.IsInt() == dst_type.IsInt()); |
1304 | ASSERT(src_type.IsSigned() == dst_type.IsSigned()); |
1305 | ASSERT(src_type.IsFundamental()); |
1306 | ASSERT(dst_type.IsFundamental()); |
1307 | const intptr_t src_size = src_type.SizeInBytes(); |
1308 | const intptr_t dst_size = dst_type.SizeInBytes(); |
1309 | const bool sign_or_zero_extend = dst_size > src_size; |
1310 | |
1311 | if (source.IsRegisters()) { |
1312 | const auto& src = source.AsRegisters(); |
1313 | ASSERT(src.num_regs() == 1); |
1314 | ASSERT(src_size <= 4); |
1315 | const auto src_reg = src.reg_at(0); |
1316 | |
1317 | if (destination.IsRegisters()) { |
1318 | const auto& dst = destination.AsRegisters(); |
1319 | ASSERT(dst.num_regs() == 1); |
1320 | const auto dst_reg = dst.reg_at(0); |
1321 | if (!sign_or_zero_extend) { |
1322 | ASSERT(dst_size == 4); |
1323 | __ movl(dst_reg, src_reg); |
1324 | } else { |
1325 | switch (src_type.AsFundamental().representation()) { |
1326 | case compiler::ffi::kInt8: // Sign extend operand. |
1327 | __ movsxb(dst_reg, ByteRegisterOf(src_reg)); |
1328 | return; |
1329 | case compiler::ffi::kInt16: |
1330 | __ movsxw(dst_reg, src_reg); |
1331 | return; |
1332 | case compiler::ffi::kUint8: // Zero extend operand. |
1333 | __ movzxb(dst_reg, ByteRegisterOf(src_reg)); |
1334 | return; |
1335 | case compiler::ffi::kUint16: |
1336 | __ movzxw(dst_reg, src_reg); |
1337 | return; |
1338 | default: |
1339 | // 32 to 64 bit is covered in IL by Representation conversions. |
1340 | UNIMPLEMENTED(); |
1341 | } |
1342 | } |
1343 | |
1344 | } else if (destination.IsFpuRegisters()) { |
1345 | // Fpu Registers should only contain doubles and registers only ints. |
1346 | UNIMPLEMENTED(); |
1347 | |
1348 | } else { |
1349 | ASSERT(destination.IsStack()); |
1350 | ASSERT(!sign_or_zero_extend); |
1351 | const auto& dst = destination.AsStack(); |
1352 | const auto dst_addr = NativeLocationToStackSlotAddress(dst); |
1353 | switch (dst_size) { |
1354 | case 4: |
1355 | __ movl(dst_addr, src_reg); |
1356 | return; |
1357 | case 2: |
1358 | __ movw(dst_addr, src_reg); |
1359 | return; |
1360 | case 1: |
1361 | __ movb(dst_addr, ByteRegisterOf(src_reg)); |
1362 | return; |
1363 | default: |
1364 | UNREACHABLE(); |
1365 | } |
1366 | } |
1367 | |
1368 | } else if (source.IsFpuRegisters()) { |
1369 | const auto& src = source.AsFpuRegisters(); |
1370 | // We have not implemented conversions here, use IL convert instructions. |
1371 | ASSERT(src_type.Equals(dst_type)); |
1372 | |
1373 | if (destination.IsRegisters()) { |
1374 | // Fpu Registers should only contain doubles and registers only ints. |
1375 | UNIMPLEMENTED(); |
1376 | |
1377 | } else if (destination.IsFpuRegisters()) { |
1378 | const auto& dst = destination.AsFpuRegisters(); |
1379 | // Optimization manual recommends using MOVAPS for register |
1380 | // to register moves. |
1381 | __ movaps(dst.fpu_reg(), src.fpu_reg()); |
1382 | |
1383 | } else { |
1384 | ASSERT(destination.IsStack()); |
1385 | ASSERT(src_type.IsFloat()); |
1386 | const auto& dst = destination.AsStack(); |
1387 | const auto dst_addr = NativeLocationToStackSlotAddress(dst); |
1388 | switch (dst_size) { |
1389 | case 8: |
1390 | __ movsd(dst_addr, src.fpu_reg()); |
1391 | return; |
1392 | case 4: |
1393 | __ movss(dst_addr, src.fpu_reg()); |
1394 | return; |
1395 | default: |
1396 | UNREACHABLE(); |
1397 | } |
1398 | } |
1399 | |
1400 | } else { |
1401 | ASSERT(source.IsStack()); |
1402 | const auto& src = source.AsStack(); |
1403 | const auto src_addr = NativeLocationToStackSlotAddress(src); |
1404 | if (destination.IsRegisters()) { |
1405 | const auto& dst = destination.AsRegisters(); |
1406 | ASSERT(dst.num_regs() == 1); |
1407 | ASSERT(dst_size <= 4); |
1408 | const auto dst_reg = dst.reg_at(0); |
1409 | if (!sign_or_zero_extend) { |
1410 | ASSERT(dst_size == 4); |
1411 | __ movl(dst_reg, src_addr); |
1412 | } else { |
1413 | switch (src_type.AsFundamental().representation()) { |
1414 | case compiler::ffi::kInt8: // Sign extend operand. |
1415 | __ movsxb(dst_reg, src_addr); |
1416 | return; |
1417 | case compiler::ffi::kInt16: |
1418 | __ movsxw(dst_reg, src_addr); |
1419 | return; |
1420 | case compiler::ffi::kUint8: // Zero extend operand. |
1421 | __ movzxb(dst_reg, src_addr); |
1422 | return; |
1423 | case compiler::ffi::kUint16: |
1424 | __ movzxw(dst_reg, src_addr); |
1425 | return; |
1426 | default: |
1427 | // 32 to 64 bit is covered in IL by Representation conversions. |
1428 | UNIMPLEMENTED(); |
1429 | } |
1430 | } |
1431 | |
1432 | } else if (destination.IsFpuRegisters()) { |
1433 | ASSERT(src_type.Equals(dst_type)); |
1434 | ASSERT(src_type.IsFloat()); |
1435 | const auto& dst = destination.AsFpuRegisters(); |
1436 | switch (dst_size) { |
1437 | case 8: |
1438 | __ movsd(dst.fpu_reg(), src_addr); |
1439 | return; |
1440 | case 4: |
1441 | __ movss(dst.fpu_reg(), src_addr); |
1442 | return; |
1443 | default: |
1444 | UNREACHABLE(); |
1445 | } |
1446 | |
1447 | } else { |
1448 | ASSERT(destination.IsStack()); |
1449 | UNREACHABLE(); |
1450 | } |
1451 | } |
1452 | } |
1453 | |
1454 | #undef __ |
1455 | #define __ compiler_->assembler()-> |
1456 | |
1457 | void ParallelMoveResolver::EmitSwap(int index) { |
1458 | MoveOperands* move = moves_[index]; |
1459 | const Location source = move->src(); |
1460 | const Location destination = move->dest(); |
1461 | |
1462 | if (source.IsRegister() && destination.IsRegister()) { |
1463 | __ xchgl(destination.reg(), source.reg()); |
1464 | } else if (source.IsRegister() && destination.IsStackSlot()) { |
1465 | Exchange(source.reg(), LocationToStackSlotAddress(destination)); |
1466 | } else if (source.IsStackSlot() && destination.IsRegister()) { |
1467 | Exchange(destination.reg(), LocationToStackSlotAddress(source)); |
1468 | } else if (source.IsStackSlot() && destination.IsStackSlot()) { |
1469 | Exchange(LocationToStackSlotAddress(destination), |
1470 | LocationToStackSlotAddress(source)); |
1471 | } else if (source.IsFpuRegister() && destination.IsFpuRegister()) { |
1472 | __ movaps(FpuTMP, source.fpu_reg()); |
1473 | __ movaps(source.fpu_reg(), destination.fpu_reg()); |
1474 | __ movaps(destination.fpu_reg(), FpuTMP); |
1475 | } else if (source.IsFpuRegister() || destination.IsFpuRegister()) { |
1476 | ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() || |
1477 | source.IsDoubleStackSlot() || source.IsQuadStackSlot()); |
1478 | bool double_width = |
1479 | destination.IsDoubleStackSlot() || source.IsDoubleStackSlot(); |
1480 | XmmRegister reg = |
1481 | source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg(); |
1482 | const compiler::Address& slot_address = |
1483 | source.IsFpuRegister() ? LocationToStackSlotAddress(destination) |
1484 | : LocationToStackSlotAddress(source); |
1485 | |
1486 | if (double_width) { |
1487 | __ movsd(FpuTMP, slot_address); |
1488 | __ movsd(slot_address, reg); |
1489 | } else { |
1490 | __ movups(FpuTMP, slot_address); |
1491 | __ movups(slot_address, reg); |
1492 | } |
1493 | __ movaps(reg, FpuTMP); |
1494 | } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { |
1495 | const compiler::Address& source_slot_address = |
1496 | LocationToStackSlotAddress(source); |
1497 | const compiler::Address& destination_slot_address = |
1498 | LocationToStackSlotAddress(destination); |
1499 | |
1500 | ScratchFpuRegisterScope ensure_scratch(this, FpuTMP); |
1501 | __ movsd(FpuTMP, source_slot_address); |
1502 | __ movsd(ensure_scratch.reg(), destination_slot_address); |
1503 | __ movsd(destination_slot_address, FpuTMP); |
1504 | __ movsd(source_slot_address, ensure_scratch.reg()); |
1505 | } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) { |
1506 | const compiler::Address& source_slot_address = |
1507 | LocationToStackSlotAddress(source); |
1508 | const compiler::Address& destination_slot_address = |
1509 | LocationToStackSlotAddress(destination); |
1510 | |
1511 | ScratchFpuRegisterScope ensure_scratch(this, FpuTMP); |
1512 | __ movups(FpuTMP, source_slot_address); |
1513 | __ movups(ensure_scratch.reg(), destination_slot_address); |
1514 | __ movups(destination_slot_address, FpuTMP); |
1515 | __ movups(source_slot_address, ensure_scratch.reg()); |
1516 | } else { |
1517 | UNREACHABLE(); |
1518 | } |
1519 | |
1520 | // The swap of source and destination has executed a move from source to |
1521 | // destination. |
1522 | move->Eliminate(); |
1523 | |
1524 | // Any unperformed (including pending) move with a source of either |
1525 | // this move's source or destination needs to have their source |
1526 | // changed to reflect the state of affairs after the swap. |
1527 | for (int i = 0; i < moves_.length(); ++i) { |
1528 | const MoveOperands& other_move = *moves_[i]; |
1529 | if (other_move.Blocks(source)) { |
1530 | moves_[i]->set_src(destination); |
1531 | } else if (other_move.Blocks(destination)) { |
1532 | moves_[i]->set_src(source); |
1533 | } |
1534 | } |
1535 | } |
1536 | |
1537 | void ParallelMoveResolver::MoveMemoryToMemory(const compiler::Address& dst, |
1538 | const compiler::Address& src) { |
1539 | ScratchRegisterScope ensure_scratch(this, kNoRegister); |
1540 | __ MoveMemoryToMemory(dst, src, ensure_scratch.reg()); |
1541 | } |
1542 | |
1543 | void ParallelMoveResolver::Exchange(Register reg, |
1544 | const compiler::Address& mem) { |
1545 | ScratchRegisterScope ensure_scratch(this, reg); |
1546 | __ movl(ensure_scratch.reg(), mem); |
1547 | __ movl(mem, reg); |
1548 | __ movl(reg, ensure_scratch.reg()); |
1549 | } |
1550 | |
1551 | void ParallelMoveResolver::Exchange(const compiler::Address& mem1, |
1552 | const compiler::Address& mem2) { |
1553 | ScratchRegisterScope ensure_scratch1(this, kNoRegister); |
1554 | ScratchRegisterScope ensure_scratch2(this, ensure_scratch1.reg()); |
1555 | __ movl(ensure_scratch1.reg(), mem1); |
1556 | __ movl(ensure_scratch2.reg(), mem2); |
1557 | __ movl(mem2, ensure_scratch1.reg()); |
1558 | __ movl(mem1, ensure_scratch2.reg()); |
1559 | } |
1560 | |
1561 | void ParallelMoveResolver::Exchange(Register reg, |
1562 | Register base_reg, |
1563 | intptr_t stack_offset) { |
1564 | UNREACHABLE(); |
1565 | } |
1566 | |
1567 | void ParallelMoveResolver::Exchange(Register base_reg1, |
1568 | intptr_t stack_offset1, |
1569 | Register base_reg2, |
1570 | intptr_t stack_offset2) { |
1571 | UNREACHABLE(); |
1572 | } |
1573 | |
1574 | void ParallelMoveResolver::SpillScratch(Register reg) { |
1575 | __ pushl(reg); |
1576 | } |
1577 | |
1578 | void ParallelMoveResolver::RestoreScratch(Register reg) { |
1579 | __ popl(reg); |
1580 | } |
1581 | |
1582 | void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { |
1583 | __ subl(ESP, compiler::Immediate(kFpuRegisterSize)); |
1584 | __ movups(compiler::Address(ESP, 0), reg); |
1585 | } |
1586 | |
1587 | void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { |
1588 | __ movups(reg, compiler::Address(ESP, 0)); |
1589 | __ addl(ESP, compiler::Immediate(kFpuRegisterSize)); |
1590 | } |
1591 | |
1592 | #undef __ |
1593 | |
1594 | } // namespace dart |
1595 | |
1596 | #endif // defined(TARGET_ARCH_IA32) |
1597 | |