| 1 | // Copyright (c) 2013, the Dart project authors.  Please see the AUTHORS file | 
|---|
| 2 | // for details. All rights reserved. Use of this source code is governed by a | 
|---|
| 3 | // BSD-style license that can be found in the LICENSE file. | 
|---|
| 4 |  | 
|---|
| 5 | #include "vm/globals.h"  // Needed here to get TARGET_ARCH_ARM. | 
|---|
| 6 | #if defined(TARGET_ARCH_ARM) | 
|---|
| 7 |  | 
|---|
| 8 | #include "vm/compiler/backend/flow_graph_compiler.h" | 
|---|
| 9 |  | 
|---|
| 10 | #include "vm/compiler/api/type_check_mode.h" | 
|---|
| 11 | #include "vm/compiler/backend/il_printer.h" | 
|---|
| 12 | #include "vm/compiler/backend/locations.h" | 
|---|
| 13 | #include "vm/compiler/jit/compiler.h" | 
|---|
| 14 | #include "vm/cpu.h" | 
|---|
| 15 | #include "vm/dart_entry.h" | 
|---|
| 16 | #include "vm/deopt_instructions.h" | 
|---|
| 17 | #include "vm/dispatch_table.h" | 
|---|
| 18 | #include "vm/instructions.h" | 
|---|
| 19 | #include "vm/object_store.h" | 
|---|
| 20 | #include "vm/parser.h" | 
|---|
| 21 | #include "vm/stack_frame.h" | 
|---|
| 22 | #include "vm/stub_code.h" | 
|---|
| 23 | #include "vm/symbols.h" | 
|---|
| 24 |  | 
|---|
| 25 | namespace dart { | 
|---|
| 26 |  | 
|---|
| 27 | DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization."); | 
|---|
| 28 | DEFINE_FLAG(bool, unbox_mints, true, "Optimize 64-bit integer arithmetic."); | 
|---|
| 29 | DEFINE_FLAG(bool, unbox_doubles, true, "Optimize double arithmetic."); | 
|---|
| 30 | DECLARE_FLAG(bool, enable_simd_inline); | 
|---|
| 31 |  | 
|---|
| 32 | void FlowGraphCompiler::ArchSpecificInitialization() { | 
|---|
| 33 | if (FLAG_precompiled_mode && FLAG_use_bare_instructions) { | 
|---|
| 34 | auto object_store = isolate()->object_store(); | 
|---|
| 35 |  | 
|---|
| 36 | const auto& stub = | 
|---|
| 37 | Code::ZoneHandle(object_store->write_barrier_wrappers_stub()); | 
|---|
| 38 | if (CanPcRelativeCall(stub)) { | 
|---|
| 39 | assembler_->generate_invoke_write_barrier_wrapper_ = | 
|---|
| 40 | [&](Condition condition, Register reg) { | 
|---|
| 41 | const intptr_t offset_into_target = | 
|---|
| 42 | Thread::WriteBarrierWrappersOffsetForRegister(reg); | 
|---|
| 43 | assembler_->GenerateUnRelocatedPcRelativeCall(condition, | 
|---|
| 44 | offset_into_target); | 
|---|
| 45 | AddPcRelativeCallStubTarget(stub); | 
|---|
| 46 | }; | 
|---|
| 47 | } | 
|---|
| 48 |  | 
|---|
| 49 | const auto& array_stub = | 
|---|
| 50 | Code::ZoneHandle(object_store->array_write_barrier_stub()); | 
|---|
| 51 | if (CanPcRelativeCall(stub)) { | 
|---|
| 52 | assembler_->generate_invoke_array_write_barrier_ = | 
|---|
| 53 | [&](Condition condition) { | 
|---|
| 54 | assembler_->GenerateUnRelocatedPcRelativeCall(condition); | 
|---|
| 55 | AddPcRelativeCallStubTarget(array_stub); | 
|---|
| 56 | }; | 
|---|
| 57 | } | 
|---|
| 58 | } | 
|---|
| 59 | } | 
|---|
| 60 |  | 
|---|
| 61 | FlowGraphCompiler::~FlowGraphCompiler() { | 
|---|
| 62 | // BlockInfos are zone-allocated, so their destructors are not called. | 
|---|
| 63 | // Verify the labels explicitly here. | 
|---|
| 64 | for (int i = 0; i < block_info_.length(); ++i) { | 
|---|
| 65 | ASSERT(!block_info_[i]->jump_label()->IsLinked()); | 
|---|
| 66 | } | 
|---|
| 67 | } | 
|---|
| 68 |  | 
|---|
| 69 | bool FlowGraphCompiler::SupportsUnboxedDoubles() { | 
|---|
| 70 | return TargetCPUFeatures::vfp_supported() && FLAG_unbox_doubles; | 
|---|
| 71 | } | 
|---|
| 72 |  | 
|---|
| 73 | bool FlowGraphCompiler::SupportsUnboxedInt64() { | 
|---|
| 74 | return FLAG_unbox_mints; | 
|---|
| 75 | } | 
|---|
| 76 |  | 
|---|
| 77 | bool FlowGraphCompiler::SupportsUnboxedSimd128() { | 
|---|
| 78 | return TargetCPUFeatures::neon_supported() && FLAG_enable_simd_inline; | 
|---|
| 79 | } | 
|---|
| 80 |  | 
|---|
| 81 | bool FlowGraphCompiler::SupportsHardwareDivision() { | 
|---|
| 82 | return TargetCPUFeatures::can_divide(); | 
|---|
| 83 | } | 
|---|
| 84 |  | 
|---|
| 85 | bool FlowGraphCompiler::CanConvertInt64ToDouble() { | 
|---|
| 86 | // ARM does not have a short instruction sequence for converting int64 to | 
|---|
| 87 | // double. | 
|---|
| 88 | return false; | 
|---|
| 89 | } | 
|---|
| 90 |  | 
|---|
| 91 | void FlowGraphCompiler::EnterIntrinsicMode() { | 
|---|
| 92 | ASSERT(!intrinsic_mode()); | 
|---|
| 93 | intrinsic_mode_ = true; | 
|---|
| 94 | ASSERT(!assembler()->constant_pool_allowed()); | 
|---|
| 95 | } | 
|---|
| 96 |  | 
|---|
| 97 | void FlowGraphCompiler::ExitIntrinsicMode() { | 
|---|
| 98 | ASSERT(intrinsic_mode()); | 
|---|
| 99 | intrinsic_mode_ = false; | 
|---|
| 100 | } | 
|---|
| 101 |  | 
|---|
| 102 | TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, | 
|---|
| 103 | DeoptInfoBuilder* builder, | 
|---|
| 104 | const Array& deopt_table) { | 
|---|
| 105 | if (deopt_env_ == NULL) { | 
|---|
| 106 | ++builder->current_info_number_; | 
|---|
| 107 | return TypedData::null(); | 
|---|
| 108 | } | 
|---|
| 109 |  | 
|---|
| 110 | intptr_t stack_height = compiler->StackSize(); | 
|---|
| 111 | AllocateIncomingParametersRecursive(deopt_env_, &stack_height); | 
|---|
| 112 |  | 
|---|
| 113 | intptr_t slot_ix = 0; | 
|---|
| 114 | Environment* current = deopt_env_; | 
|---|
| 115 |  | 
|---|
| 116 | // Emit all kMaterializeObject instructions describing objects to be | 
|---|
| 117 | // materialized on the deoptimization as a prefix to the deoptimization info. | 
|---|
| 118 | EmitMaterializations(deopt_env_, builder); | 
|---|
| 119 |  | 
|---|
| 120 | // The real frame starts here. | 
|---|
| 121 | builder->MarkFrameStart(); | 
|---|
| 122 |  | 
|---|
| 123 | Zone* zone = compiler->zone(); | 
|---|
| 124 |  | 
|---|
| 125 | builder->AddPp(current->function(), slot_ix++); | 
|---|
| 126 | builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++); | 
|---|
| 127 | builder->AddCallerFp(slot_ix++); | 
|---|
| 128 | builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++); | 
|---|
| 129 |  | 
|---|
| 130 | // Emit all values that are needed for materialization as a part of the | 
|---|
| 131 | // expression stack for the bottom-most frame. This guarantees that GC | 
|---|
| 132 | // will be able to find them during materialization. | 
|---|
| 133 | slot_ix = builder->EmitMaterializationArguments(slot_ix); | 
|---|
| 134 |  | 
|---|
| 135 | // For the innermost environment, set outgoing arguments and the locals. | 
|---|
| 136 | for (intptr_t i = current->Length() - 1; | 
|---|
| 137 | i >= current->fixed_parameter_count(); i--) { | 
|---|
| 138 | builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++); | 
|---|
| 139 | } | 
|---|
| 140 |  | 
|---|
| 141 | Environment* previous = current; | 
|---|
| 142 | current = current->outer(); | 
|---|
| 143 | while (current != NULL) { | 
|---|
| 144 | builder->AddPp(current->function(), slot_ix++); | 
|---|
| 145 | builder->AddPcMarker(previous->function(), slot_ix++); | 
|---|
| 146 | builder->AddCallerFp(slot_ix++); | 
|---|
| 147 |  | 
|---|
| 148 | // For any outer environment the deopt id is that of the call instruction | 
|---|
| 149 | // which is recorded in the outer environment. | 
|---|
| 150 | builder->AddReturnAddress(current->function(), | 
|---|
| 151 | DeoptId::ToDeoptAfter(current->deopt_id()), | 
|---|
| 152 | slot_ix++); | 
|---|
| 153 |  | 
|---|
| 154 | // The values of outgoing arguments can be changed from the inlined call so | 
|---|
| 155 | // we must read them from the previous environment. | 
|---|
| 156 | for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { | 
|---|
| 157 | builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), | 
|---|
| 158 | slot_ix++); | 
|---|
| 159 | } | 
|---|
| 160 |  | 
|---|
| 161 | // Set the locals, note that outgoing arguments are not in the environment. | 
|---|
| 162 | for (intptr_t i = current->Length() - 1; | 
|---|
| 163 | i >= current->fixed_parameter_count(); i--) { | 
|---|
| 164 | builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++); | 
|---|
| 165 | } | 
|---|
| 166 |  | 
|---|
| 167 | // Iterate on the outer environment. | 
|---|
| 168 | previous = current; | 
|---|
| 169 | current = current->outer(); | 
|---|
| 170 | } | 
|---|
| 171 | // The previous pointer is now the outermost environment. | 
|---|
| 172 | ASSERT(previous != NULL); | 
|---|
| 173 |  | 
|---|
| 174 | // Set slots for the outermost environment. | 
|---|
| 175 | builder->AddCallerPp(slot_ix++); | 
|---|
| 176 | builder->AddPcMarker(previous->function(), slot_ix++); | 
|---|
| 177 | builder->AddCallerFp(slot_ix++); | 
|---|
| 178 | builder->AddCallerPc(slot_ix++); | 
|---|
| 179 |  | 
|---|
| 180 | // For the outermost environment, set the incoming arguments. | 
|---|
| 181 | for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { | 
|---|
| 182 | builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++); | 
|---|
| 183 | } | 
|---|
| 184 |  | 
|---|
| 185 | return builder->CreateDeoptInfo(deopt_table); | 
|---|
| 186 | } | 
|---|
| 187 |  | 
|---|
| 188 | void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, | 
|---|
| 189 | intptr_t stub_ix) { | 
|---|
| 190 | // Calls do not need stubs, they share a deoptimization trampoline. | 
|---|
| 191 | ASSERT(reason() != ICData::kDeoptAtCall); | 
|---|
| 192 | compiler::Assembler* assembler = compiler->assembler(); | 
|---|
| 193 | #define __ assembler-> | 
|---|
| 194 | __ Comment( "%s", Name()); | 
|---|
| 195 | __ Bind(entry_label()); | 
|---|
| 196 | if (FLAG_trap_on_deoptimization) { | 
|---|
| 197 | __ bkpt(0); | 
|---|
| 198 | } | 
|---|
| 199 |  | 
|---|
| 200 | ASSERT(deopt_env() != NULL); | 
|---|
| 201 | __ ldr(LR, compiler::Address( | 
|---|
| 202 | THR, compiler::target::Thread::deoptimize_entry_offset())); | 
|---|
| 203 | __ blx(LR); | 
|---|
| 204 | ASSERT(kReservedCpuRegisters & (1 << LR)); | 
|---|
| 205 | set_pc_offset(assembler->CodeSize()); | 
|---|
| 206 | #undef __ | 
|---|
| 207 | } | 
|---|
| 208 |  | 
|---|
| 209 | #define __ assembler()-> | 
|---|
| 210 |  | 
|---|
| 211 | // Fall through if bool_register contains null. | 
|---|
| 212 | void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, | 
|---|
| 213 | compiler::Label* is_true, | 
|---|
| 214 | compiler::Label* is_false) { | 
|---|
| 215 | compiler::Label fall_through; | 
|---|
| 216 | __ CompareObject(bool_register, Object::null_object()); | 
|---|
| 217 | __ b(&fall_through, EQ); | 
|---|
| 218 | BranchLabels labels = {is_true, is_false, &fall_through}; | 
|---|
| 219 | Condition true_condition = | 
|---|
| 220 | EmitBoolTest(bool_register, labels, /*invert=*/false); | 
|---|
| 221 | ASSERT(true_condition != kInvalidCondition); | 
|---|
| 222 | __ b(is_true, true_condition); | 
|---|
| 223 | __ b(is_false); | 
|---|
| 224 | __ Bind(&fall_through); | 
|---|
| 225 | } | 
|---|
| 226 |  | 
|---|
| 227 | // R0: instance (must be preserved). | 
|---|
| 228 | // R2: instantiator type arguments (if used). | 
|---|
| 229 | // R1: function type arguments (if used). | 
|---|
| 230 | // R3: type test cache. | 
|---|
| 231 | SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub( | 
|---|
| 232 | TypeTestStubKind test_kind, | 
|---|
| 233 | Register instance_reg, | 
|---|
| 234 | Register instantiator_type_arguments_reg, | 
|---|
| 235 | Register function_type_arguments_reg, | 
|---|
| 236 | Register temp_reg, | 
|---|
| 237 | compiler::Label* is_instance_lbl, | 
|---|
| 238 | compiler::Label* is_not_instance_lbl) { | 
|---|
| 239 | ASSERT(instance_reg == R0); | 
|---|
| 240 | ASSERT(temp_reg == kNoRegister);  // Unused on ARM. | 
|---|
| 241 | const SubtypeTestCache& type_test_cache = | 
|---|
| 242 | SubtypeTestCache::ZoneHandle(zone(), SubtypeTestCache::New()); | 
|---|
| 243 | __ LoadUniqueObject(R3, type_test_cache); | 
|---|
| 244 | if (test_kind == kTestTypeOneArg) { | 
|---|
| 245 | ASSERT(instantiator_type_arguments_reg == kNoRegister); | 
|---|
| 246 | ASSERT(function_type_arguments_reg == kNoRegister); | 
|---|
| 247 | __ BranchLink(StubCode::Subtype1TestCache()); | 
|---|
| 248 | } else if (test_kind == kTestTypeTwoArgs) { | 
|---|
| 249 | ASSERT(instantiator_type_arguments_reg == kNoRegister); | 
|---|
| 250 | ASSERT(function_type_arguments_reg == kNoRegister); | 
|---|
| 251 | __ BranchLink(StubCode::Subtype2TestCache()); | 
|---|
| 252 | } else if (test_kind == kTestTypeFourArgs) { | 
|---|
| 253 | ASSERT(instantiator_type_arguments_reg == | 
|---|
| 254 | TypeTestABI::kInstantiatorTypeArgumentsReg); | 
|---|
| 255 | ASSERT(function_type_arguments_reg == | 
|---|
| 256 | TypeTestABI::kFunctionTypeArgumentsReg); | 
|---|
| 257 | __ BranchLink(StubCode::Subtype4TestCache()); | 
|---|
| 258 | } else if (test_kind == kTestTypeSixArgs) { | 
|---|
| 259 | ASSERT(instantiator_type_arguments_reg == | 
|---|
| 260 | TypeTestABI::kInstantiatorTypeArgumentsReg); | 
|---|
| 261 | ASSERT(function_type_arguments_reg == | 
|---|
| 262 | TypeTestABI::kFunctionTypeArgumentsReg); | 
|---|
| 263 | __ BranchLink(StubCode::Subtype6TestCache()); | 
|---|
| 264 | } else { | 
|---|
| 265 | UNREACHABLE(); | 
|---|
| 266 | } | 
|---|
| 267 | // Result is in R1: null -> not found, otherwise Bool::True or Bool::False. | 
|---|
| 268 | GenerateBoolToJump(R1, is_instance_lbl, is_not_instance_lbl); | 
|---|
| 269 | return type_test_cache.raw(); | 
|---|
| 270 | } | 
|---|
| 271 |  | 
|---|
| 272 | // Jumps to labels 'is_instance' or 'is_not_instance' respectively, if | 
|---|
| 273 | // type test is conclusive, otherwise fallthrough if a type test could not | 
|---|
| 274 | // be completed. | 
|---|
| 275 | // R0: instance being type checked (preserved). | 
|---|
| 276 | // Clobbers R1, R2. | 
|---|
| 277 | SubtypeTestCachePtr | 
|---|
| 278 | FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest( | 
|---|
| 279 | TokenPosition token_pos, | 
|---|
| 280 | const AbstractType& type, | 
|---|
| 281 | compiler::Label* is_instance_lbl, | 
|---|
| 282 | compiler::Label* is_not_instance_lbl) { | 
|---|
| 283 | __ Comment( "InstantiatedTypeWithArgumentsTest"); | 
|---|
| 284 | ASSERT(type.IsInstantiated()); | 
|---|
| 285 | ASSERT(!type.IsFunctionType()); | 
|---|
| 286 | const Class& type_class = Class::ZoneHandle(zone(), type.type_class()); | 
|---|
| 287 | ASSERT(type_class.NumTypeArguments() > 0); | 
|---|
| 288 | const Type& smi_type = Type::Handle(zone(), Type::SmiType()); | 
|---|
| 289 | const bool smi_is_ok = smi_type.IsSubtypeOf(type, Heap::kOld); | 
|---|
| 290 | __ tst(TypeTestABI::kInstanceReg, compiler::Operand(kSmiTagMask)); | 
|---|
| 291 | if (smi_is_ok) { | 
|---|
| 292 | // Fast case for type = FutureOr<int/num/top-type>. | 
|---|
| 293 | __ b(is_instance_lbl, EQ); | 
|---|
| 294 | } else { | 
|---|
| 295 | __ b(is_not_instance_lbl, EQ); | 
|---|
| 296 | } | 
|---|
| 297 | const intptr_t num_type_args = type_class.NumTypeArguments(); | 
|---|
| 298 | const intptr_t num_type_params = type_class.NumTypeParameters(); | 
|---|
| 299 | const intptr_t from_index = num_type_args - num_type_params; | 
|---|
| 300 | const TypeArguments& type_arguments = | 
|---|
| 301 | TypeArguments::ZoneHandle(zone(), type.arguments()); | 
|---|
| 302 | const bool is_raw_type = type_arguments.IsNull() || | 
|---|
| 303 | type_arguments.IsRaw(from_index, num_type_params); | 
|---|
| 304 | if (is_raw_type) { | 
|---|
| 305 | const Register kClassIdReg = R2; | 
|---|
| 306 | // dynamic type argument, check only classes. | 
|---|
| 307 | __ LoadClassId(kClassIdReg, TypeTestABI::kInstanceReg); | 
|---|
| 308 | __ CompareImmediate(kClassIdReg, type_class.id()); | 
|---|
| 309 | __ b(is_instance_lbl, EQ); | 
|---|
| 310 | // List is a very common case. | 
|---|
| 311 | if (IsListClass(type_class)) { | 
|---|
| 312 | GenerateListTypeCheck(kClassIdReg, is_instance_lbl); | 
|---|
| 313 | } | 
|---|
| 314 | return GenerateSubtype1TestCacheLookup( | 
|---|
| 315 | token_pos, type_class, is_instance_lbl, is_not_instance_lbl); | 
|---|
| 316 | } | 
|---|
| 317 | // If one type argument only, check if type argument is a top type. | 
|---|
| 318 | if (type_arguments.Length() == 1) { | 
|---|
| 319 | const AbstractType& tp_argument = | 
|---|
| 320 | AbstractType::ZoneHandle(zone(), type_arguments.TypeAt(0)); | 
|---|
| 321 | if (tp_argument.IsTopTypeForSubtyping()) { | 
|---|
| 322 | // Instance class test only necessary. | 
|---|
| 323 | return GenerateSubtype1TestCacheLookup( | 
|---|
| 324 | token_pos, type_class, is_instance_lbl, is_not_instance_lbl); | 
|---|
| 325 | } | 
|---|
| 326 | } | 
|---|
| 327 |  | 
|---|
| 328 | // Regular subtype test cache involving instance's type arguments. | 
|---|
| 329 | const Register kInstantiatorTypeArgumentsReg = kNoRegister; | 
|---|
| 330 | const Register kFunctionTypeArgumentsReg = kNoRegister; | 
|---|
| 331 | const Register kTempReg = kNoRegister; | 
|---|
| 332 | // R0: instance (must be preserved). | 
|---|
| 333 | return GenerateCallSubtypeTestStub( | 
|---|
| 334 | kTestTypeTwoArgs, TypeTestABI::kInstanceReg, | 
|---|
| 335 | kInstantiatorTypeArgumentsReg, kFunctionTypeArgumentsReg, kTempReg, | 
|---|
| 336 | is_instance_lbl, is_not_instance_lbl); | 
|---|
| 337 | } | 
|---|
| 338 |  | 
|---|
| 339 | void FlowGraphCompiler::CheckClassIds(Register class_id_reg, | 
|---|
| 340 | const GrowableArray<intptr_t>& class_ids, | 
|---|
| 341 | compiler::Label* is_equal_lbl, | 
|---|
| 342 | compiler::Label* is_not_equal_lbl) { | 
|---|
| 343 | for (intptr_t i = 0; i < class_ids.length(); i++) { | 
|---|
| 344 | __ CompareImmediate(class_id_reg, class_ids[i]); | 
|---|
| 345 | __ b(is_equal_lbl, EQ); | 
|---|
| 346 | } | 
|---|
| 347 | __ b(is_not_equal_lbl); | 
|---|
| 348 | } | 
|---|
| 349 |  | 
|---|
| 350 | // Testing against an instantiated type with no arguments, without | 
|---|
| 351 | // SubtypeTestCache. | 
|---|
| 352 | // R0: instance being type checked (preserved). | 
|---|
| 353 | // Clobbers R2, R3. | 
|---|
| 354 | // Returns true if there is a fallthrough. | 
|---|
| 355 | bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest( | 
|---|
| 356 | TokenPosition token_pos, | 
|---|
| 357 | const AbstractType& type, | 
|---|
| 358 | compiler::Label* is_instance_lbl, | 
|---|
| 359 | compiler::Label* is_not_instance_lbl) { | 
|---|
| 360 | __ Comment( "InstantiatedTypeNoArgumentsTest"); | 
|---|
| 361 | ASSERT(type.IsInstantiated()); | 
|---|
| 362 | ASSERT(!type.IsFunctionType()); | 
|---|
| 363 | const Class& type_class = Class::Handle(zone(), type.type_class()); | 
|---|
| 364 | ASSERT(type_class.NumTypeArguments() == 0); | 
|---|
| 365 |  | 
|---|
| 366 | __ tst(TypeTestABI::kInstanceReg, compiler::Operand(kSmiTagMask)); | 
|---|
| 367 | // If instance is Smi, check directly. | 
|---|
| 368 | const Class& smi_class = Class::Handle(zone(), Smi::Class()); | 
|---|
| 369 | if (Class::IsSubtypeOf(smi_class, Object::null_type_arguments(), | 
|---|
| 370 | Nullability::kNonNullable, type, Heap::kOld)) { | 
|---|
| 371 | // Fast case for type = int/num/top-type. | 
|---|
| 372 | __ b(is_instance_lbl, EQ); | 
|---|
| 373 | } else { | 
|---|
| 374 | __ b(is_not_instance_lbl, EQ); | 
|---|
| 375 | } | 
|---|
| 376 | const Register kClassIdReg = R2; | 
|---|
| 377 | __ LoadClassId(kClassIdReg, TypeTestABI::kInstanceReg); | 
|---|
| 378 | // Bool interface can be implemented only by core class Bool. | 
|---|
| 379 | if (type.IsBoolType()) { | 
|---|
| 380 | __ CompareImmediate(kClassIdReg, kBoolCid); | 
|---|
| 381 | __ b(is_instance_lbl, EQ); | 
|---|
| 382 | __ b(is_not_instance_lbl); | 
|---|
| 383 | return false; | 
|---|
| 384 | } | 
|---|
| 385 | // Custom checking for numbers (Smi, Mint and Double). | 
|---|
| 386 | // Note that instance is not Smi (checked above). | 
|---|
| 387 | if (type.IsNumberType() || type.IsIntType() || type.IsDoubleType()) { | 
|---|
| 388 | GenerateNumberTypeCheck(kClassIdReg, type, is_instance_lbl, | 
|---|
| 389 | is_not_instance_lbl); | 
|---|
| 390 | return false; | 
|---|
| 391 | } | 
|---|
| 392 | if (type.IsStringType()) { | 
|---|
| 393 | GenerateStringTypeCheck(kClassIdReg, is_instance_lbl, is_not_instance_lbl); | 
|---|
| 394 | return false; | 
|---|
| 395 | } | 
|---|
| 396 | if (type.IsDartFunctionType()) { | 
|---|
| 397 | // Check if instance is a closure. | 
|---|
| 398 | __ CompareImmediate(kClassIdReg, kClosureCid); | 
|---|
| 399 | __ b(is_instance_lbl, EQ); | 
|---|
| 400 | return true;  // Fall through | 
|---|
| 401 | } | 
|---|
| 402 |  | 
|---|
| 403 | // Fast case for cid-range based checks. | 
|---|
| 404 | // Warning: This code destroys the contents of [kClassIdReg]. | 
|---|
| 405 | if (GenerateSubtypeRangeCheck(kClassIdReg, type_class, is_instance_lbl)) { | 
|---|
| 406 | return false; | 
|---|
| 407 | } | 
|---|
| 408 |  | 
|---|
| 409 | // Otherwise fallthrough, result non-conclusive. | 
|---|
| 410 | return true; | 
|---|
| 411 | } | 
|---|
| 412 |  | 
|---|
| 413 | // Uses SubtypeTestCache to store instance class and result. | 
|---|
| 414 | // R0: instance to test. | 
|---|
| 415 | // Clobbers R1-R4, R8, R9. | 
|---|
| 416 | // Immediate class test already done. | 
|---|
| 417 | // TODO(srdjan): Implement a quicker subtype check, as type test | 
|---|
| 418 | // arrays can grow too high, but they may be useful when optimizing | 
|---|
| 419 | // code (type-feedback). | 
|---|
| 420 | SubtypeTestCachePtr FlowGraphCompiler::GenerateSubtype1TestCacheLookup( | 
|---|
| 421 | TokenPosition token_pos, | 
|---|
| 422 | const Class& type_class, | 
|---|
| 423 | compiler::Label* is_instance_lbl, | 
|---|
| 424 | compiler::Label* is_not_instance_lbl) { | 
|---|
| 425 | __ Comment( "Subtype1TestCacheLookup"); | 
|---|
| 426 | #if defined(DEBUG) | 
|---|
| 427 | compiler::Label ok; | 
|---|
| 428 | __ BranchIfNotSmi(TypeTestABI::kInstanceReg, &ok); | 
|---|
| 429 | __ Breakpoint(); | 
|---|
| 430 | __ Bind(&ok); | 
|---|
| 431 | #endif | 
|---|
| 432 | __ LoadClassId(R2, TypeTestABI::kInstanceReg); | 
|---|
| 433 | __ LoadClassById(R1, R2); | 
|---|
| 434 | // R1: instance class. | 
|---|
| 435 | // Check immediate superclass equality. If type_class is Object, then testing | 
|---|
| 436 | // supertype may yield a wrong result for Null in NNBD strong mode (because | 
|---|
| 437 | // Null also extends Object). | 
|---|
| 438 | if (!type_class.IsObjectClass() || !Isolate::Current()->null_safety()) { | 
|---|
| 439 | __ ldr(R2, compiler::FieldAddress( | 
|---|
| 440 | R1, compiler::target::Class::super_type_offset())); | 
|---|
| 441 | __ ldr(R2, compiler::FieldAddress( | 
|---|
| 442 | R2, compiler::target::Type::type_class_id_offset())); | 
|---|
| 443 | __ CompareImmediate(R2, Smi::RawValue(type_class.id())); | 
|---|
| 444 | __ b(is_instance_lbl, EQ); | 
|---|
| 445 | } | 
|---|
| 446 |  | 
|---|
| 447 | const Register kInstantiatorTypeArgumentsReg = kNoRegister; | 
|---|
| 448 | const Register kFunctionTypeArgumentsReg = kNoRegister; | 
|---|
| 449 | const Register kTempReg = kNoRegister; | 
|---|
| 450 | return GenerateCallSubtypeTestStub(kTestTypeOneArg, TypeTestABI::kInstanceReg, | 
|---|
| 451 | kInstantiatorTypeArgumentsReg, | 
|---|
| 452 | kFunctionTypeArgumentsReg, kTempReg, | 
|---|
| 453 | is_instance_lbl, is_not_instance_lbl); | 
|---|
| 454 | } | 
|---|
| 455 |  | 
|---|
| 456 | // Generates inlined check if 'type' is a type parameter or type itself | 
|---|
| 457 | // R0: instance (preserved). | 
|---|
| 458 | SubtypeTestCachePtr FlowGraphCompiler::GenerateUninstantiatedTypeTest( | 
|---|
| 459 | TokenPosition token_pos, | 
|---|
| 460 | const AbstractType& type, | 
|---|
| 461 | compiler::Label* is_instance_lbl, | 
|---|
| 462 | compiler::Label* is_not_instance_lbl) { | 
|---|
| 463 | __ Comment( "UninstantiatedTypeTest"); | 
|---|
| 464 | const Register kTempReg = kNoRegister; | 
|---|
| 465 | ASSERT(!type.IsInstantiated()); | 
|---|
| 466 | ASSERT(!type.IsFunctionType()); | 
|---|
| 467 | // Skip check if destination is a dynamic type. | 
|---|
| 468 | if (type.IsTypeParameter()) { | 
|---|
| 469 | const TypeParameter& type_param = TypeParameter::Cast(type); | 
|---|
| 470 | static_assert(TypeTestABI::kFunctionTypeArgumentsReg < | 
|---|
| 471 | TypeTestABI::kInstantiatorTypeArgumentsReg, | 
|---|
| 472 | "Should be ordered to load arguments with one instruction"); | 
|---|
| 473 | __ ldm(IA, SP, | 
|---|
| 474 | (1 << TypeTestABI::kFunctionTypeArgumentsReg) | | 
|---|
| 475 | (1 << TypeTestABI::kInstantiatorTypeArgumentsReg)); | 
|---|
| 476 | const Register kTypeArgumentsReg = | 
|---|
| 477 | type_param.IsClassTypeParameter() | 
|---|
| 478 | ? TypeTestABI::kInstantiatorTypeArgumentsReg | 
|---|
| 479 | : TypeTestABI::kFunctionTypeArgumentsReg; | 
|---|
| 480 | // Check if type arguments are null, i.e. equivalent to vector of dynamic. | 
|---|
| 481 | __ CompareObject(kTypeArgumentsReg, Object::null_object()); | 
|---|
| 482 | __ b(is_instance_lbl, EQ); | 
|---|
| 483 | __ ldr(R3, compiler::FieldAddress( | 
|---|
| 484 | kTypeArgumentsReg, | 
|---|
| 485 | compiler::target::TypeArguments::type_at_offset( | 
|---|
| 486 | type_param.index()))); | 
|---|
| 487 | // R3: concrete type of type. | 
|---|
| 488 | // Check if type argument is dynamic, Object?, or void. | 
|---|
| 489 | __ CompareObject(R3, Object::dynamic_type()); | 
|---|
| 490 | __ b(is_instance_lbl, EQ); | 
|---|
| 491 | __ CompareObject( | 
|---|
| 492 | R3, Type::ZoneHandle( | 
|---|
| 493 | zone(), isolate()->object_store()->nullable_object_type())); | 
|---|
| 494 | __ b(is_instance_lbl, EQ); | 
|---|
| 495 | __ CompareObject(R3, Object::void_type()); | 
|---|
| 496 | __ b(is_instance_lbl, EQ); | 
|---|
| 497 |  | 
|---|
| 498 | // For Smi check quickly against int and num interfaces. | 
|---|
| 499 | compiler::Label not_smi; | 
|---|
| 500 | __ tst(R0, compiler::Operand(kSmiTagMask));  // Value is Smi? | 
|---|
| 501 | __ b(¬_smi, NE); | 
|---|
| 502 | __ CompareObject(R3, Type::ZoneHandle(zone(), Type::IntType())); | 
|---|
| 503 | __ b(is_instance_lbl, EQ); | 
|---|
| 504 | __ CompareObject(R3, Type::ZoneHandle(zone(), Type::Number())); | 
|---|
| 505 | __ b(is_instance_lbl, EQ); | 
|---|
| 506 | // Smi can be handled by type test cache. | 
|---|
| 507 | __ Bind(¬_smi); | 
|---|
| 508 |  | 
|---|
| 509 | const auto test_kind = GetTypeTestStubKindForTypeParameter(type_param); | 
|---|
| 510 | const SubtypeTestCache& type_test_cache = SubtypeTestCache::ZoneHandle( | 
|---|
| 511 | zone(), GenerateCallSubtypeTestStub( | 
|---|
| 512 | test_kind, TypeTestABI::kInstanceReg, | 
|---|
| 513 | TypeTestABI::kInstantiatorTypeArgumentsReg, | 
|---|
| 514 | TypeTestABI::kFunctionTypeArgumentsReg, kTempReg, | 
|---|
| 515 | is_instance_lbl, is_not_instance_lbl)); | 
|---|
| 516 | return type_test_cache.raw(); | 
|---|
| 517 | } | 
|---|
| 518 | if (type.IsType()) { | 
|---|
| 519 | // Smi is FutureOr<T>, when T is a top type or int or num. | 
|---|
| 520 | if (!type.IsFutureOrType()) { | 
|---|
| 521 | __ BranchIfSmi(TypeTestABI::kInstanceReg, is_not_instance_lbl); | 
|---|
| 522 | } | 
|---|
| 523 | static_assert(TypeTestABI::kFunctionTypeArgumentsReg < | 
|---|
| 524 | TypeTestABI::kInstantiatorTypeArgumentsReg, | 
|---|
| 525 | "Should be ordered to load arguments with one instruction"); | 
|---|
| 526 | __ ldm(IA, SP, | 
|---|
| 527 | (1 << TypeTestABI::kFunctionTypeArgumentsReg) | | 
|---|
| 528 | (1 << TypeTestABI::kInstantiatorTypeArgumentsReg)); | 
|---|
| 529 | // Uninstantiated type class is known at compile time, but the type | 
|---|
| 530 | // arguments are determined at runtime by the instantiator(s). | 
|---|
| 531 | return GenerateCallSubtypeTestStub( | 
|---|
| 532 | kTestTypeFourArgs, TypeTestABI::kInstanceReg, | 
|---|
| 533 | TypeTestABI::kInstantiatorTypeArgumentsReg, | 
|---|
| 534 | TypeTestABI::kFunctionTypeArgumentsReg, kTempReg, is_instance_lbl, | 
|---|
| 535 | is_not_instance_lbl); | 
|---|
| 536 | } | 
|---|
| 537 | return SubtypeTestCache::null(); | 
|---|
| 538 | } | 
|---|
| 539 |  | 
|---|
| 540 | // Generates function type check. | 
|---|
| 541 | // | 
|---|
| 542 | // See [GenerateUninstantiatedTypeTest] for calling convention. | 
|---|
| 543 | SubtypeTestCachePtr FlowGraphCompiler::GenerateFunctionTypeTest( | 
|---|
| 544 | TokenPosition token_pos, | 
|---|
| 545 | const AbstractType& type, | 
|---|
| 546 | compiler::Label* is_instance_lbl, | 
|---|
| 547 | compiler::Label* is_not_instance_lbl) { | 
|---|
| 548 | __ BranchIfSmi(TypeTestABI::kInstanceReg, is_not_instance_lbl); | 
|---|
| 549 | static_assert(TypeTestABI::kFunctionTypeArgumentsReg < | 
|---|
| 550 | TypeTestABI::kInstantiatorTypeArgumentsReg, | 
|---|
| 551 | "Should be ordered to load arguments with one instruction"); | 
|---|
| 552 | __ ldm(IA, SP, | 
|---|
| 553 | (1 << TypeTestABI::kFunctionTypeArgumentsReg) | | 
|---|
| 554 | (1 << TypeTestABI::kInstantiatorTypeArgumentsReg)); | 
|---|
| 555 | // Uninstantiated type class is known at compile time, but the type | 
|---|
| 556 | // arguments are determined at runtime by the instantiator(s). | 
|---|
| 557 | const Register kTempReg = kNoRegister; | 
|---|
| 558 | return GenerateCallSubtypeTestStub( | 
|---|
| 559 | kTestTypeSixArgs, TypeTestABI::kInstanceReg, | 
|---|
| 560 | TypeTestABI::kInstantiatorTypeArgumentsReg, | 
|---|
| 561 | TypeTestABI::kFunctionTypeArgumentsReg, kTempReg, is_instance_lbl, | 
|---|
| 562 | is_not_instance_lbl); | 
|---|
| 563 | } | 
|---|
| 564 |  | 
|---|
| 565 | // Inputs: | 
|---|
| 566 | // - R0: instance being type checked (preserved). | 
|---|
| 567 | // - R2: optional instantiator type arguments (preserved). | 
|---|
| 568 | // - R1: optional function type arguments (preserved). | 
|---|
| 569 | // Clobbers R3, R4, R8, R9. | 
|---|
| 570 | // Returns: | 
|---|
| 571 | // - preserved instance in R0, optional instantiator type arguments in R2, and | 
|---|
| 572 | //   optional function type arguments in R1. | 
|---|
| 573 | // Note that this inlined code must be followed by the runtime_call code, as it | 
|---|
| 574 | // may fall through to it. Otherwise, this inline code will jump to the label | 
|---|
| 575 | // is_instance or to the label is_not_instance. | 
|---|
| 576 | SubtypeTestCachePtr FlowGraphCompiler::GenerateInlineInstanceof( | 
|---|
| 577 | TokenPosition token_pos, | 
|---|
| 578 | const AbstractType& type, | 
|---|
| 579 | compiler::Label* is_instance_lbl, | 
|---|
| 580 | compiler::Label* is_not_instance_lbl) { | 
|---|
| 581 | __ Comment( "InlineInstanceof"); | 
|---|
| 582 |  | 
|---|
| 583 | if (type.IsFunctionType()) { | 
|---|
| 584 | return GenerateFunctionTypeTest(token_pos, type, is_instance_lbl, | 
|---|
| 585 | is_not_instance_lbl); | 
|---|
| 586 | } | 
|---|
| 587 |  | 
|---|
| 588 | if (type.IsInstantiated()) { | 
|---|
| 589 | const Class& type_class = Class::ZoneHandle(zone(), type.type_class()); | 
|---|
| 590 | // A class equality check is only applicable with a dst type (not a | 
|---|
| 591 | // function type) of a non-parameterized class or with a raw dst type of | 
|---|
| 592 | // a parameterized class. | 
|---|
| 593 | if (type_class.NumTypeArguments() > 0) { | 
|---|
| 594 | return GenerateInstantiatedTypeWithArgumentsTest( | 
|---|
| 595 | token_pos, type, is_instance_lbl, is_not_instance_lbl); | 
|---|
| 596 | // Fall through to runtime call. | 
|---|
| 597 | } | 
|---|
| 598 | const bool has_fall_through = GenerateInstantiatedTypeNoArgumentsTest( | 
|---|
| 599 | token_pos, type, is_instance_lbl, is_not_instance_lbl); | 
|---|
| 600 | if (has_fall_through) { | 
|---|
| 601 | // If test non-conclusive so far, try the inlined type-test cache. | 
|---|
| 602 | // 'type' is known at compile time. | 
|---|
| 603 | return GenerateSubtype1TestCacheLookup( | 
|---|
| 604 | token_pos, type_class, is_instance_lbl, is_not_instance_lbl); | 
|---|
| 605 | } else { | 
|---|
| 606 | return SubtypeTestCache::null(); | 
|---|
| 607 | } | 
|---|
| 608 | } | 
|---|
| 609 | return GenerateUninstantiatedTypeTest(token_pos, type, is_instance_lbl, | 
|---|
| 610 | is_not_instance_lbl); | 
|---|
| 611 | } | 
|---|
| 612 |  | 
|---|
| 613 | // If instanceof type test cannot be performed successfully at compile time and | 
|---|
| 614 | // therefore eliminated, optimize it by adding inlined tests for: | 
|---|
| 615 | // - Null -> see comment below. | 
|---|
| 616 | // - Smi -> compile time subtype check (only if dst class is not parameterized). | 
|---|
| 617 | // - Class equality (only if class is not parameterized). | 
|---|
| 618 | // Inputs: | 
|---|
| 619 | // - R0: object. | 
|---|
| 620 | // - R2: instantiator type arguments or raw_null. | 
|---|
| 621 | // - R1: function type arguments or raw_null. | 
|---|
| 622 | // Returns: | 
|---|
| 623 | // - true or false in R0. | 
|---|
| 624 | void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos, | 
|---|
| 625 | intptr_t deopt_id, | 
|---|
| 626 | const AbstractType& type, | 
|---|
| 627 | LocationSummary* locs) { | 
|---|
| 628 | ASSERT(type.IsFinalized()); | 
|---|
| 629 | ASSERT(!type.IsTopTypeForInstanceOf());  // Already checked. | 
|---|
| 630 | static_assert(TypeTestABI::kFunctionTypeArgumentsReg < | 
|---|
| 631 | TypeTestABI::kInstantiatorTypeArgumentsReg, | 
|---|
| 632 | "Should be ordered to push arguments with one instruction"); | 
|---|
| 633 | __ PushList((1 << TypeTestABI::kInstantiatorTypeArgumentsReg) | | 
|---|
| 634 | (1 << TypeTestABI::kFunctionTypeArgumentsReg)); | 
|---|
| 635 |  | 
|---|
| 636 | compiler::Label is_instance, is_not_instance; | 
|---|
| 637 | // 'null' is an instance of Null, Object*, Never*, void, and dynamic. | 
|---|
| 638 | // In addition, 'null' is an instance of any nullable type. | 
|---|
| 639 | // It is also an instance of FutureOr<T> if it is an instance of T. | 
|---|
| 640 | const AbstractType& unwrapped_type = | 
|---|
| 641 | AbstractType::Handle(type.UnwrapFutureOr()); | 
|---|
| 642 | if (!unwrapped_type.IsTypeParameter() || unwrapped_type.IsNullable()) { | 
|---|
| 643 | // Only nullable type parameter remains nullable after instantiation. | 
|---|
| 644 | // See NullIsInstanceOf(). | 
|---|
| 645 | __ CompareObject(TypeTestABI::kInstanceReg, Object::null_object()); | 
|---|
| 646 | __ b((unwrapped_type.IsNullable() || | 
|---|
| 647 | (unwrapped_type.IsLegacy() && unwrapped_type.IsNeverType())) | 
|---|
| 648 | ? &is_instance | 
|---|
| 649 | : &is_not_instance, | 
|---|
| 650 | EQ); | 
|---|
| 651 | } | 
|---|
| 652 |  | 
|---|
| 653 | // Generate inline instanceof test. | 
|---|
| 654 | SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); | 
|---|
| 655 | test_cache = | 
|---|
| 656 | GenerateInlineInstanceof(token_pos, type, &is_instance, &is_not_instance); | 
|---|
| 657 |  | 
|---|
| 658 | // test_cache is null if there is no fall-through. | 
|---|
| 659 | compiler::Label done; | 
|---|
| 660 | if (!test_cache.IsNull()) { | 
|---|
| 661 | // Generate runtime call. | 
|---|
| 662 | static_assert(TypeTestABI::kFunctionTypeArgumentsReg < | 
|---|
| 663 | TypeTestABI::kInstantiatorTypeArgumentsReg, | 
|---|
| 664 | "Should be ordered to load arguments with one instruction"); | 
|---|
| 665 | __ ldm(IA, SP, | 
|---|
| 666 | (1 << TypeTestABI::kFunctionTypeArgumentsReg) | | 
|---|
| 667 | (1 << TypeTestABI::kInstantiatorTypeArgumentsReg)); | 
|---|
| 668 | __ LoadUniqueObject(TypeTestABI::kDstTypeReg, type); | 
|---|
| 669 | __ LoadUniqueObject(TypeTestABI::kSubtypeTestCacheReg, test_cache); | 
|---|
| 670 | GenerateStubCall(token_pos, StubCode::InstanceOf(), | 
|---|
| 671 | /*kind=*/PcDescriptorsLayout::kOther, locs, deopt_id); | 
|---|
| 672 | __ b(&done); | 
|---|
| 673 | } | 
|---|
| 674 | __ Bind(&is_not_instance); | 
|---|
| 675 | __ LoadObject(R0, Bool::Get(false)); | 
|---|
| 676 | __ b(&done); | 
|---|
| 677 |  | 
|---|
| 678 | __ Bind(&is_instance); | 
|---|
| 679 | __ LoadObject(R0, Bool::Get(true)); | 
|---|
| 680 | __ Bind(&done); | 
|---|
| 681 | // Remove instantiator type arguments and function type arguments. | 
|---|
| 682 | __ Drop(2); | 
|---|
| 683 | } | 
|---|
| 684 |  | 
|---|
| 685 | // Optimize assignable type check by adding inlined tests for: | 
|---|
| 686 | // - NULL -> return NULL. | 
|---|
| 687 | // - Smi -> compile time subtype check (only if dst class is not parameterized). | 
|---|
| 688 | // - Class equality (only if class is not parameterized). | 
|---|
| 689 | // Inputs: | 
|---|
| 690 | // - R0: instance being type checked. | 
|---|
| 691 | // - R8: destination type (if non-constant). | 
|---|
| 692 | // - R2: instantiator type arguments or raw_null. | 
|---|
| 693 | // - R1: function type arguments or raw_null. | 
|---|
| 694 | // Returns: | 
|---|
| 695 | // - object in R0 for successful assignable check (or throws TypeError). | 
|---|
| 696 | // Performance notes: positive checks must be quick, negative checks can be slow | 
|---|
| 697 | // as they throw an exception. | 
|---|
| 698 | void FlowGraphCompiler::GenerateAssertAssignable(CompileType* receiver_type, | 
|---|
| 699 | TokenPosition token_pos, | 
|---|
| 700 | intptr_t deopt_id, | 
|---|
| 701 | const String& dst_name, | 
|---|
| 702 | LocationSummary* locs) { | 
|---|
| 703 | ASSERT(!token_pos.IsClassifying()); | 
|---|
| 704 | ASSERT(CheckAssertAssignableTypeTestingABILocations(*locs)); | 
|---|
| 705 |  | 
|---|
| 706 | compiler::Label is_assignable_fast, is_assignable, runtime_call; | 
|---|
| 707 |  | 
|---|
| 708 | // Generate inline type check, linking to runtime call if not assignable. | 
|---|
| 709 | SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); | 
|---|
| 710 | static_assert( | 
|---|
| 711 | TypeTestABI::kFunctionTypeArgumentsReg < | 
|---|
| 712 | TypeTestABI::kInstantiatorTypeArgumentsReg, | 
|---|
| 713 | "Should be ordered to push and load arguments with one instruction"); | 
|---|
| 714 | static RegList type_args = (1 << TypeTestABI::kFunctionTypeArgumentsReg) | | 
|---|
| 715 | (1 << TypeTestABI::kInstantiatorTypeArgumentsReg); | 
|---|
| 716 |  | 
|---|
| 717 | if (locs->in(1).IsConstant()) { | 
|---|
| 718 | const auto& dst_type = AbstractType::Cast(locs->in(1).constant()); | 
|---|
| 719 | ASSERT(dst_type.IsFinalized()); | 
|---|
| 720 |  | 
|---|
| 721 | if (dst_type.IsTopTypeForSubtyping()) return;  // No code needed. | 
|---|
| 722 |  | 
|---|
| 723 | if (ShouldUseTypeTestingStubFor(is_optimizing(), dst_type)) { | 
|---|
| 724 | GenerateAssertAssignableViaTypeTestingStub(receiver_type, token_pos, | 
|---|
| 725 | deopt_id, dst_name, locs); | 
|---|
| 726 | return; | 
|---|
| 727 | } | 
|---|
| 728 |  | 
|---|
| 729 | if (Instance::NullIsAssignableTo(dst_type)) { | 
|---|
| 730 | __ CompareObject(TypeTestABI::kInstanceReg, Object::null_object()); | 
|---|
| 731 | __ b(&is_assignable_fast, EQ); | 
|---|
| 732 | } | 
|---|
| 733 |  | 
|---|
| 734 | __ PushList(type_args); | 
|---|
| 735 |  | 
|---|
| 736 | test_cache = GenerateInlineInstanceof(token_pos, dst_type, &is_assignable, | 
|---|
| 737 | &runtime_call); | 
|---|
| 738 | } else { | 
|---|
| 739 | // TODO(dartbug.com/40813): Handle setting up the non-constant case. | 
|---|
| 740 | UNREACHABLE(); | 
|---|
| 741 | } | 
|---|
| 742 |  | 
|---|
| 743 | __ Bind(&runtime_call); | 
|---|
| 744 | __ ldm(IA, SP, type_args); | 
|---|
| 745 | __ PushObject(Object::null_object());  // Make room for the result. | 
|---|
| 746 | __ Push(TypeTestABI::kInstanceReg);    // Push the source object. | 
|---|
| 747 | // Push the type of the destination. | 
|---|
| 748 | if (locs->in(1).IsConstant()) { | 
|---|
| 749 | __ PushObject(locs->in(1).constant()); | 
|---|
| 750 | } else { | 
|---|
| 751 | // TODO(dartbug.com/40813): Handle setting up the non-constant case. | 
|---|
| 752 | UNREACHABLE(); | 
|---|
| 753 | } | 
|---|
| 754 | __ PushList(type_args); | 
|---|
| 755 | __ PushObject(dst_name);  // Push the name of the destination. | 
|---|
| 756 | __ LoadUniqueObject(R0, test_cache); | 
|---|
| 757 | __ Push(R0); | 
|---|
| 758 | __ PushImmediate(Smi::RawValue(kTypeCheckFromInline)); | 
|---|
| 759 | GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 7, locs); | 
|---|
| 760 | // Pop the parameters supplied to the runtime entry. The result of the | 
|---|
| 761 | // type check runtime call is the checked value. | 
|---|
| 762 | __ Drop(7); | 
|---|
| 763 | __ Pop(TypeTestABI::kInstanceReg); | 
|---|
| 764 | __ Bind(&is_assignable); | 
|---|
| 765 | __ PopList(type_args); | 
|---|
| 766 | __ Bind(&is_assignable_fast); | 
|---|
| 767 | } | 
|---|
| 768 |  | 
|---|
| 769 | void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub( | 
|---|
| 770 | CompileType* receiver_type, | 
|---|
| 771 | TokenPosition token_pos, | 
|---|
| 772 | intptr_t deopt_id, | 
|---|
| 773 | const String& dst_name, | 
|---|
| 774 | LocationSummary* locs) { | 
|---|
| 775 | ASSERT(CheckAssertAssignableTypeTestingABILocations(*locs)); | 
|---|
| 776 | // We must have a constant dst_type for generating a call to the stub. | 
|---|
| 777 | ASSERT(locs->in(1).IsConstant()); | 
|---|
| 778 | const auto& dst_type = AbstractType::Cast(locs->in(1).constant()); | 
|---|
| 779 |  | 
|---|
| 780 | // If the dst_type is instantiated we know the target TTS stub at | 
|---|
| 781 | // compile-time and can therefore use a pc-relative call. | 
|---|
| 782 | const bool use_pc_relative_call = | 
|---|
| 783 | dst_type.IsInstantiated() && CanPcRelativeCall(dst_type); | 
|---|
| 784 |  | 
|---|
| 785 | const Register kRegToCall = | 
|---|
| 786 | use_pc_relative_call | 
|---|
| 787 | ? kNoRegister | 
|---|
| 788 | : (dst_type.IsTypeParameter() ? R9 : TypeTestABI::kDstTypeReg); | 
|---|
| 789 | const Register kScratchReg = R4; | 
|---|
| 790 |  | 
|---|
| 791 | compiler::Label done; | 
|---|
| 792 |  | 
|---|
| 793 | GenerateAssertAssignableViaTypeTestingStub(receiver_type, dst_type, dst_name, | 
|---|
| 794 | kRegToCall, kScratchReg, &done); | 
|---|
| 795 |  | 
|---|
| 796 | // We use 2 consecutive entries in the pool for the subtype cache and the | 
|---|
| 797 | // destination name.  The second entry, namely [dst_name] seems to be unused, | 
|---|
| 798 | // but it will be used by the code throwing a TypeError if the type test fails | 
|---|
| 799 | // (see runtime/vm/runtime_entry.cc:TypeCheck).  It will use pattern matching | 
|---|
| 800 | // on the call site to find out at which pool index the destination name is | 
|---|
| 801 | // located. | 
|---|
| 802 | const intptr_t sub_type_cache_index = __ object_pool_builder().AddObject( | 
|---|
| 803 | Object::null_object(), ObjectPool::Patchability::kPatchable); | 
|---|
| 804 | const intptr_t sub_type_cache_offset = | 
|---|
| 805 | compiler::target::ObjectPool::element_offset(sub_type_cache_index) - | 
|---|
| 806 | kHeapObjectTag; | 
|---|
| 807 | const intptr_t dst_name_index = __ object_pool_builder().AddObject( | 
|---|
| 808 | dst_name, ObjectPool::Patchability::kPatchable); | 
|---|
| 809 | ASSERT((sub_type_cache_index + 1) == dst_name_index); | 
|---|
| 810 | ASSERT(__ constant_pool_allowed()); | 
|---|
| 811 |  | 
|---|
| 812 | if (use_pc_relative_call) { | 
|---|
| 813 | __ LoadWordFromPoolOffset(TypeTestABI::kSubtypeTestCacheReg, | 
|---|
| 814 | sub_type_cache_offset, PP); | 
|---|
| 815 | __ GenerateUnRelocatedPcRelativeCall(); | 
|---|
| 816 | AddPcRelativeTTSCallTypeTarget(dst_type); | 
|---|
| 817 | } else { | 
|---|
| 818 | __ LoadField(R9, compiler::FieldAddress( | 
|---|
| 819 | kRegToCall, compiler::target::AbstractType:: | 
|---|
| 820 | type_test_stub_entry_point_offset())); | 
|---|
| 821 | __ LoadWordFromPoolOffset(TypeTestABI::kSubtypeTestCacheReg, | 
|---|
| 822 | sub_type_cache_offset, PP); | 
|---|
| 823 | __ blx(R9); | 
|---|
| 824 | } | 
|---|
| 825 | EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kOther, locs); | 
|---|
| 826 | __ Bind(&done); | 
|---|
| 827 | } | 
|---|
| 828 |  | 
|---|
| 829 | void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) { | 
|---|
| 830 | if (is_optimizing()) { | 
|---|
| 831 | return; | 
|---|
| 832 | } | 
|---|
| 833 | Definition* defn = instr->AsDefinition(); | 
|---|
| 834 | if ((defn != NULL) && defn->HasTemp()) { | 
|---|
| 835 | __ Push(defn->locs()->out(0).reg()); | 
|---|
| 836 | } | 
|---|
| 837 | } | 
|---|
| 838 |  | 
|---|
| 839 | void FlowGraphCompiler::GenerateMethodExtractorIntrinsic( | 
|---|
| 840 | const Function& extracted_method, | 
|---|
| 841 | intptr_t type_arguments_field_offset) { | 
|---|
| 842 | // No frame has been setup here. | 
|---|
| 843 | ASSERT(!__ constant_pool_allowed()); | 
|---|
| 844 | ASSERT(extracted_method.IsZoneHandle()); | 
|---|
| 845 |  | 
|---|
| 846 | const Code& build_method_extractor = Code::ZoneHandle( | 
|---|
| 847 | isolate()->object_store()->build_method_extractor_code()); | 
|---|
| 848 |  | 
|---|
| 849 | const intptr_t stub_index = __ object_pool_builder().AddObject( | 
|---|
| 850 | build_method_extractor, ObjectPool::Patchability::kNotPatchable); | 
|---|
| 851 | const intptr_t function_index = __ object_pool_builder().AddObject( | 
|---|
| 852 | extracted_method, ObjectPool::Patchability::kNotPatchable); | 
|---|
| 853 |  | 
|---|
| 854 | // We use a custom pool register to preserve caller PP. | 
|---|
| 855 | Register kPoolReg = R0; | 
|---|
| 856 |  | 
|---|
| 857 | // R1 = extracted function | 
|---|
| 858 | // R4 = offset of type argument vector (or 0 if class is not generic) | 
|---|
| 859 | if (FLAG_precompiled_mode && FLAG_use_bare_instructions) { | 
|---|
| 860 | kPoolReg = PP; | 
|---|
| 861 | } else { | 
|---|
| 862 | __ LoadFieldFromOffset(kWord, kPoolReg, CODE_REG, | 
|---|
| 863 | compiler::target::Code::object_pool_offset()); | 
|---|
| 864 | } | 
|---|
| 865 | __ LoadImmediate(R4, type_arguments_field_offset); | 
|---|
| 866 | __ LoadFieldFromOffset( | 
|---|
| 867 | kWord, R1, kPoolReg, | 
|---|
| 868 | compiler::target::ObjectPool::element_offset(function_index)); | 
|---|
| 869 | __ LoadFieldFromOffset( | 
|---|
| 870 | kWord, CODE_REG, kPoolReg, | 
|---|
| 871 | compiler::target::ObjectPool::element_offset(stub_index)); | 
|---|
| 872 | __ Branch(compiler::FieldAddress( | 
|---|
| 873 | CODE_REG, | 
|---|
| 874 | compiler::target::Code::entry_point_offset(Code::EntryKind::kUnchecked))); | 
|---|
| 875 | } | 
|---|
| 876 |  | 
|---|
| 877 | void FlowGraphCompiler::EmitFrameEntry() { | 
|---|
| 878 | const Function& function = parsed_function().function(); | 
|---|
| 879 | if (CanOptimizeFunction() && function.IsOptimizable() && | 
|---|
| 880 | (!is_optimizing() || may_reoptimize())) { | 
|---|
| 881 | __ Comment( "Invocation Count Check"); | 
|---|
| 882 | const Register function_reg = R8; | 
|---|
| 883 | __ ldr(function_reg, compiler::FieldAddress( | 
|---|
| 884 | CODE_REG, compiler::target::Code::owner_offset())); | 
|---|
| 885 | __ ldr(R3, compiler::FieldAddress( | 
|---|
| 886 | function_reg, | 
|---|
| 887 | compiler::target::Function::usage_counter_offset())); | 
|---|
| 888 | // Reoptimization of an optimized function is triggered by counting in | 
|---|
| 889 | // IC stubs, but not at the entry of the function. | 
|---|
| 890 | if (!is_optimizing()) { | 
|---|
| 891 | __ add(R3, R3, compiler::Operand(1)); | 
|---|
| 892 | __ str(R3, compiler::FieldAddress( | 
|---|
| 893 | function_reg, | 
|---|
| 894 | compiler::target::Function::usage_counter_offset())); | 
|---|
| 895 | } | 
|---|
| 896 | __ CompareImmediate(R3, GetOptimizationThreshold()); | 
|---|
| 897 | ASSERT(function_reg == R8); | 
|---|
| 898 | __ Branch(compiler::Address( | 
|---|
| 899 | THR, compiler::target::Thread::optimize_entry_offset()), | 
|---|
| 900 | GE); | 
|---|
| 901 | } | 
|---|
| 902 | __ Comment( "Enter frame"); | 
|---|
| 903 | if (flow_graph().IsCompiledForOsr()) { | 
|---|
| 904 | const intptr_t extra_slots = ExtraStackSlotsOnOsrEntry(); | 
|---|
| 905 | ASSERT(extra_slots >= 0); | 
|---|
| 906 | __ EnterOsrFrame(extra_slots * compiler::target::kWordSize); | 
|---|
| 907 | } else { | 
|---|
| 908 | ASSERT(StackSize() >= 0); | 
|---|
| 909 | __ EnterDartFrame(StackSize() * compiler::target::kWordSize); | 
|---|
| 910 | } | 
|---|
| 911 | } | 
|---|
| 912 |  | 
|---|
| 913 | void FlowGraphCompiler::EmitPrologue() { | 
|---|
| 914 | EmitFrameEntry(); | 
|---|
| 915 | ASSERT(assembler()->constant_pool_allowed()); | 
|---|
| 916 |  | 
|---|
| 917 | // In unoptimized code, initialize (non-argument) stack allocated slots. | 
|---|
| 918 | if (!is_optimizing()) { | 
|---|
| 919 | const int num_locals = parsed_function().num_stack_locals(); | 
|---|
| 920 |  | 
|---|
| 921 | intptr_t args_desc_slot = -1; | 
|---|
| 922 | if (parsed_function().has_arg_desc_var()) { | 
|---|
| 923 | args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable( | 
|---|
| 924 | parsed_function().arg_desc_var()); | 
|---|
| 925 | } | 
|---|
| 926 |  | 
|---|
| 927 | __ Comment( "Initialize spill slots"); | 
|---|
| 928 | if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) { | 
|---|
| 929 | __ LoadObject(R0, Object::null_object()); | 
|---|
| 930 | } | 
|---|
| 931 | for (intptr_t i = 0; i < num_locals; ++i) { | 
|---|
| 932 | const intptr_t slot_index = | 
|---|
| 933 | compiler::target::frame_layout.FrameSlotForVariableIndex(-i); | 
|---|
| 934 | Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : R0; | 
|---|
| 935 | __ StoreToOffset(kWord, value_reg, FP, | 
|---|
| 936 | slot_index * compiler::target::kWordSize); | 
|---|
| 937 | } | 
|---|
| 938 | } | 
|---|
| 939 |  | 
|---|
| 940 | EndCodeSourceRange(TokenPosition::kDartCodePrologue); | 
|---|
| 941 | } | 
|---|
| 942 |  | 
|---|
| 943 | // Input parameters: | 
|---|
| 944 | //   LR: return address. | 
|---|
| 945 | //   SP: address of last argument. | 
|---|
| 946 | //   FP: caller's frame pointer. | 
|---|
| 947 | //   PP: caller's pool pointer. | 
|---|
| 948 | //   R4: arguments descriptor array. | 
|---|
| 949 | void FlowGraphCompiler::CompileGraph() { | 
|---|
| 950 | InitCompiler(); | 
|---|
| 951 |  | 
|---|
| 952 | // For JIT we have multiple entrypoints functionality which moved the frame | 
|---|
| 953 | // setup into the [TargetEntryInstr] (which will set the constant pool | 
|---|
| 954 | // allowed bit to true).  Despite this we still have to set the | 
|---|
| 955 | // constant pool allowed bit to true here as well, because we can generate | 
|---|
| 956 | // code for [CatchEntryInstr]s, which need the pool. | 
|---|
| 957 | __ set_constant_pool_allowed(true); | 
|---|
| 958 |  | 
|---|
| 959 | VisitBlocks(); | 
|---|
| 960 |  | 
|---|
| 961 | #if defined(DEBUG) | 
|---|
| 962 | __ bkpt(0); | 
|---|
| 963 | #endif | 
|---|
| 964 |  | 
|---|
| 965 | if (!skip_body_compilation()) { | 
|---|
| 966 | ASSERT(assembler()->constant_pool_allowed()); | 
|---|
| 967 | GenerateDeferredCode(); | 
|---|
| 968 | } | 
|---|
| 969 |  | 
|---|
| 970 | for (intptr_t i = 0; i < indirect_gotos_.length(); ++i) { | 
|---|
| 971 | indirect_gotos_[i]->ComputeOffsetTable(this); | 
|---|
| 972 | } | 
|---|
| 973 | } | 
|---|
| 974 |  | 
|---|
| 975 | void FlowGraphCompiler::EmitCallToStub(const Code& stub) { | 
|---|
| 976 | ASSERT(!stub.IsNull()); | 
|---|
| 977 | if (CanPcRelativeCall(stub)) { | 
|---|
| 978 | __ GenerateUnRelocatedPcRelativeCall(); | 
|---|
| 979 | AddPcRelativeCallStubTarget(stub); | 
|---|
| 980 | } else { | 
|---|
| 981 | __ BranchLink(stub); | 
|---|
| 982 | AddStubCallTarget(stub); | 
|---|
| 983 | } | 
|---|
| 984 | } | 
|---|
| 985 |  | 
|---|
| 986 | void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) { | 
|---|
| 987 | ASSERT(!stub.IsNull()); | 
|---|
| 988 | if (CanPcRelativeCall(stub)) { | 
|---|
| 989 | __ LeaveDartFrame(); | 
|---|
| 990 | __ GenerateUnRelocatedPcRelativeTailCall(); | 
|---|
| 991 | AddPcRelativeTailCallStubTarget(stub); | 
|---|
| 992 | #if defined(DEBUG) | 
|---|
| 993 | __ Breakpoint(); | 
|---|
| 994 | #endif | 
|---|
| 995 | } else { | 
|---|
| 996 | __ LoadObject(CODE_REG, stub); | 
|---|
| 997 | __ LeaveDartFrame(); | 
|---|
| 998 | __ ldr(PC, compiler::FieldAddress( | 
|---|
| 999 | CODE_REG, compiler::target::Code::entry_point_offset())); | 
|---|
| 1000 | AddStubCallTarget(stub); | 
|---|
| 1001 | } | 
|---|
| 1002 | } | 
|---|
| 1003 |  | 
|---|
| 1004 | void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos, | 
|---|
| 1005 | const Code& stub, | 
|---|
| 1006 | PcDescriptorsLayout::Kind kind, | 
|---|
| 1007 | LocationSummary* locs) { | 
|---|
| 1008 | __ BranchLinkPatchable(stub); | 
|---|
| 1009 | EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs); | 
|---|
| 1010 | } | 
|---|
| 1011 |  | 
|---|
| 1012 | void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id, | 
|---|
| 1013 | TokenPosition token_pos, | 
|---|
| 1014 | const Code& stub, | 
|---|
| 1015 | PcDescriptorsLayout::Kind kind, | 
|---|
| 1016 | LocationSummary* locs, | 
|---|
| 1017 | Code::EntryKind entry_kind) { | 
|---|
| 1018 | ASSERT(CanCallDart()); | 
|---|
| 1019 | __ BranchLinkPatchable(stub, entry_kind); | 
|---|
| 1020 | EmitCallsiteMetadata(token_pos, deopt_id, kind, locs); | 
|---|
| 1021 | } | 
|---|
| 1022 |  | 
|---|
| 1023 | void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id, | 
|---|
| 1024 | TokenPosition token_pos, | 
|---|
| 1025 | PcDescriptorsLayout::Kind kind, | 
|---|
| 1026 | LocationSummary* locs, | 
|---|
| 1027 | const Function& target, | 
|---|
| 1028 | Code::EntryKind entry_kind) { | 
|---|
| 1029 | ASSERT(CanCallDart()); | 
|---|
| 1030 | if (CanPcRelativeCall(target)) { | 
|---|
| 1031 | __ GenerateUnRelocatedPcRelativeCall(); | 
|---|
| 1032 | AddPcRelativeCallTarget(target, entry_kind); | 
|---|
| 1033 | EmitCallsiteMetadata(token_pos, deopt_id, kind, locs); | 
|---|
| 1034 | } else { | 
|---|
| 1035 | ASSERT(is_optimizing()); | 
|---|
| 1036 | // Call sites to the same target can share object pool entries. These | 
|---|
| 1037 | // call sites are never patched for breakpoints: the function is deoptimized | 
|---|
| 1038 | // and the unoptimized code with IC calls for static calls is patched | 
|---|
| 1039 | // instead. | 
|---|
| 1040 | const auto& stub = StubCode::CallStaticFunction(); | 
|---|
| 1041 | __ BranchLinkWithEquivalence(stub, target, entry_kind); | 
|---|
| 1042 | EmitCallsiteMetadata(token_pos, deopt_id, kind, locs); | 
|---|
| 1043 | AddStaticCallTarget(target, entry_kind); | 
|---|
| 1044 | } | 
|---|
| 1045 | } | 
|---|
| 1046 |  | 
|---|
| 1047 | void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos, | 
|---|
| 1048 | intptr_t deopt_id, | 
|---|
| 1049 | const RuntimeEntry& entry, | 
|---|
| 1050 | intptr_t argument_count, | 
|---|
| 1051 | LocationSummary* locs) { | 
|---|
| 1052 | __ CallRuntime(entry, argument_count); | 
|---|
| 1053 | EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kOther, locs); | 
|---|
| 1054 | } | 
|---|
| 1055 |  | 
|---|
| 1056 | void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) { | 
|---|
| 1057 | // We do not check for overflow when incrementing the edge counter.  The | 
|---|
| 1058 | // function should normally be optimized long before the counter can | 
|---|
| 1059 | // overflow; and though we do not reset the counters when we optimize or | 
|---|
| 1060 | // deoptimize, there is a bound on the number of | 
|---|
| 1061 | // optimization/deoptimization cycles we will attempt. | 
|---|
| 1062 | ASSERT(!edge_counters_array_.IsNull()); | 
|---|
| 1063 | ASSERT(assembler_->constant_pool_allowed()); | 
|---|
| 1064 | __ Comment( "Edge counter"); | 
|---|
| 1065 | __ LoadObject(R0, edge_counters_array_); | 
|---|
| 1066 | #if defined(DEBUG) | 
|---|
| 1067 | bool old_use_far_branches = assembler_->use_far_branches(); | 
|---|
| 1068 | assembler_->set_use_far_branches(true); | 
|---|
| 1069 | #endif  // DEBUG | 
|---|
| 1070 | __ LoadFieldFromOffset(kWord, R1, R0, | 
|---|
| 1071 | compiler::target::Array::element_offset(edge_id)); | 
|---|
| 1072 | __ add(R1, R1, compiler::Operand(Smi::RawValue(1))); | 
|---|
| 1073 | __ StoreIntoObjectNoBarrierOffset( | 
|---|
| 1074 | R0, compiler::target::Array::element_offset(edge_id), R1); | 
|---|
| 1075 | #if defined(DEBUG) | 
|---|
| 1076 | assembler_->set_use_far_branches(old_use_far_branches); | 
|---|
| 1077 | #endif  // DEBUG | 
|---|
| 1078 | } | 
|---|
| 1079 |  | 
|---|
| 1080 | void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub, | 
|---|
| 1081 | const ICData& ic_data, | 
|---|
| 1082 | intptr_t deopt_id, | 
|---|
| 1083 | TokenPosition token_pos, | 
|---|
| 1084 | LocationSummary* locs, | 
|---|
| 1085 | Code::EntryKind entry_kind) { | 
|---|
| 1086 | ASSERT(CanCallDart()); | 
|---|
| 1087 | ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); | 
|---|
| 1088 | // Each ICData propagated from unoptimized to optimized code contains the | 
|---|
| 1089 | // function that corresponds to the Dart function of that IC call. Due | 
|---|
| 1090 | // to inlining in optimized code, that function may not correspond to the | 
|---|
| 1091 | // top-level function (parsed_function().function()) which could be | 
|---|
| 1092 | // reoptimized and which counter needs to be incremented. | 
|---|
| 1093 | // Pass the function explicitly, it is used in IC stub. | 
|---|
| 1094 |  | 
|---|
| 1095 | __ LoadObject(R8, parsed_function().function()); | 
|---|
| 1096 | __ LoadFromOffset(kWord, R0, SP, | 
|---|
| 1097 | (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize); | 
|---|
| 1098 | __ LoadUniqueObject(R9, ic_data); | 
|---|
| 1099 | GenerateDartCall(deopt_id, token_pos, stub, PcDescriptorsLayout::kIcCall, | 
|---|
| 1100 | locs, entry_kind); | 
|---|
| 1101 | __ Drop(ic_data.SizeWithTypeArgs()); | 
|---|
| 1102 | } | 
|---|
| 1103 |  | 
|---|
| 1104 | void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub, | 
|---|
| 1105 | const ICData& ic_data, | 
|---|
| 1106 | intptr_t deopt_id, | 
|---|
| 1107 | TokenPosition token_pos, | 
|---|
| 1108 | LocationSummary* locs, | 
|---|
| 1109 | Code::EntryKind entry_kind) { | 
|---|
| 1110 | ASSERT(CanCallDart()); | 
|---|
| 1111 | ASSERT(entry_kind == Code::EntryKind::kNormal || | 
|---|
| 1112 | entry_kind == Code::EntryKind::kUnchecked); | 
|---|
| 1113 | ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); | 
|---|
| 1114 | __ LoadFromOffset(kWord, R0, SP, | 
|---|
| 1115 | (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize); | 
|---|
| 1116 | __ LoadUniqueObject(R9, ic_data); | 
|---|
| 1117 | __ LoadUniqueObject(CODE_REG, stub); | 
|---|
| 1118 | const intptr_t entry_point_offset = | 
|---|
| 1119 | entry_kind == Code::EntryKind::kNormal | 
|---|
| 1120 | ? Code::entry_point_offset(Code::EntryKind::kMonomorphic) | 
|---|
| 1121 | : Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked); | 
|---|
| 1122 | __ ldr(LR, compiler::FieldAddress(CODE_REG, entry_point_offset)); | 
|---|
| 1123 | __ blx(LR); | 
|---|
| 1124 | EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kIcCall, locs); | 
|---|
| 1125 | __ Drop(ic_data.SizeWithTypeArgs()); | 
|---|
| 1126 | } | 
|---|
| 1127 |  | 
|---|
| 1128 | void FlowGraphCompiler::EmitMegamorphicInstanceCall( | 
|---|
| 1129 | const String& name, | 
|---|
| 1130 | const Array& arguments_descriptor, | 
|---|
| 1131 | intptr_t deopt_id, | 
|---|
| 1132 | TokenPosition token_pos, | 
|---|
| 1133 | LocationSummary* locs, | 
|---|
| 1134 | intptr_t try_index, | 
|---|
| 1135 | intptr_t slow_path_argument_count) { | 
|---|
| 1136 | ASSERT(CanCallDart()); | 
|---|
| 1137 | ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0)); | 
|---|
| 1138 | const ArgumentsDescriptor args_desc(arguments_descriptor); | 
|---|
| 1139 | const MegamorphicCache& cache = MegamorphicCache::ZoneHandle( | 
|---|
| 1140 | zone(), | 
|---|
| 1141 | MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor)); | 
|---|
| 1142 |  | 
|---|
| 1143 | __ Comment( "MegamorphicCall"); | 
|---|
| 1144 | // Load receiver into R0. | 
|---|
| 1145 | __ LoadFromOffset(kWord, R0, SP, | 
|---|
| 1146 | (args_desc.Count() - 1) * compiler::target::kWordSize); | 
|---|
| 1147 | // Use same code pattern as instance call so it can be parsed by code patcher. | 
|---|
| 1148 | if (FLAG_precompiled_mode) { | 
|---|
| 1149 | if (FLAG_use_bare_instructions) { | 
|---|
| 1150 | // The AOT runtime will replace the slot in the object pool with the | 
|---|
| 1151 | // entrypoint address - see clustered_snapshot.cc. | 
|---|
| 1152 | __ LoadUniqueObject(LR, StubCode::MegamorphicCall()); | 
|---|
| 1153 | } else { | 
|---|
| 1154 | __ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall()); | 
|---|
| 1155 | __ ldr(LR, compiler::FieldAddress( | 
|---|
| 1156 | CODE_REG, compiler::target::Code::entry_point_offset( | 
|---|
| 1157 | Code::EntryKind::kMonomorphic))); | 
|---|
| 1158 | } | 
|---|
| 1159 | __ LoadUniqueObject(R9, cache); | 
|---|
| 1160 | __ blx(LR); | 
|---|
| 1161 |  | 
|---|
| 1162 | } else { | 
|---|
| 1163 | __ LoadUniqueObject(R9, cache); | 
|---|
| 1164 | __ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall()); | 
|---|
| 1165 | __ ldr(LR, compiler::FieldAddress( | 
|---|
| 1166 | CODE_REG, | 
|---|
| 1167 | Code::entry_point_offset(Code::EntryKind::kMonomorphic))); | 
|---|
| 1168 | __ blx(LR); | 
|---|
| 1169 | } | 
|---|
| 1170 |  | 
|---|
| 1171 | RecordSafepoint(locs, slow_path_argument_count); | 
|---|
| 1172 | const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id); | 
|---|
| 1173 | if (FLAG_precompiled_mode) { | 
|---|
| 1174 | // Megamorphic calls may occur in slow path stubs. | 
|---|
| 1175 | // If valid use try_index argument. | 
|---|
| 1176 | if (try_index == kInvalidTryIndex) { | 
|---|
| 1177 | try_index = CurrentTryIndex(); | 
|---|
| 1178 | } | 
|---|
| 1179 | AddDescriptor(PcDescriptorsLayout::kOther, assembler()->CodeSize(), | 
|---|
| 1180 | DeoptId::kNone, token_pos, try_index); | 
|---|
| 1181 | } else if (is_optimizing()) { | 
|---|
| 1182 | AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone, | 
|---|
| 1183 | token_pos); | 
|---|
| 1184 | AddDeoptIndexAtCall(deopt_id_after); | 
|---|
| 1185 | } else { | 
|---|
| 1186 | AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone, | 
|---|
| 1187 | token_pos); | 
|---|
| 1188 | // Add deoptimization continuation point after the call and before the | 
|---|
| 1189 | // arguments are removed. | 
|---|
| 1190 | AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id_after, | 
|---|
| 1191 | token_pos); | 
|---|
| 1192 | } | 
|---|
| 1193 | RecordCatchEntryMoves(pending_deoptimization_env_, try_index); | 
|---|
| 1194 | __ Drop(args_desc.SizeWithTypeArgs()); | 
|---|
| 1195 | } | 
|---|
| 1196 |  | 
|---|
| 1197 | void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data, | 
|---|
| 1198 | intptr_t deopt_id, | 
|---|
| 1199 | TokenPosition token_pos, | 
|---|
| 1200 | LocationSummary* locs, | 
|---|
| 1201 | Code::EntryKind entry_kind, | 
|---|
| 1202 | bool receiver_can_be_smi) { | 
|---|
| 1203 | ASSERT(CanCallDart()); | 
|---|
| 1204 | ASSERT(entry_kind == Code::EntryKind::kNormal || | 
|---|
| 1205 | entry_kind == Code::EntryKind::kUnchecked); | 
|---|
| 1206 | ASSERT(ic_data.NumArgsTested() == 1); | 
|---|
| 1207 | const Code& initial_stub = StubCode::SwitchableCallMiss(); | 
|---|
| 1208 | const char* switchable_call_mode = "smiable"; | 
|---|
| 1209 | if (!receiver_can_be_smi) { | 
|---|
| 1210 | switchable_call_mode = "non-smi"; | 
|---|
| 1211 | ic_data.set_receiver_cannot_be_smi(true); | 
|---|
| 1212 | } | 
|---|
| 1213 | const UnlinkedCall& data = | 
|---|
| 1214 | UnlinkedCall::ZoneHandle(zone(), ic_data.AsUnlinkedCall()); | 
|---|
| 1215 |  | 
|---|
| 1216 | __ Comment( "InstanceCallAOT (%s)", switchable_call_mode); | 
|---|
| 1217 | __ LoadFromOffset( | 
|---|
| 1218 | kWord, R0, SP, | 
|---|
| 1219 | (ic_data.SizeWithoutTypeArgs() - 1) * compiler::target::kWordSize); | 
|---|
| 1220 | if (FLAG_precompiled_mode && FLAG_use_bare_instructions) { | 
|---|
| 1221 | // The AOT runtime will replace the slot in the object pool with the | 
|---|
| 1222 | // entrypoint address - see clustered_snapshot.cc. | 
|---|
| 1223 | __ LoadUniqueObject(LR, initial_stub); | 
|---|
| 1224 | } else { | 
|---|
| 1225 | __ LoadUniqueObject(CODE_REG, initial_stub); | 
|---|
| 1226 | const intptr_t entry_point_offset = | 
|---|
| 1227 | entry_kind == Code::EntryKind::kNormal | 
|---|
| 1228 | ? compiler::target::Code::entry_point_offset( | 
|---|
| 1229 | Code::EntryKind::kMonomorphic) | 
|---|
| 1230 | : compiler::target::Code::entry_point_offset( | 
|---|
| 1231 | Code::EntryKind::kMonomorphicUnchecked); | 
|---|
| 1232 | __ ldr(LR, compiler::FieldAddress(CODE_REG, entry_point_offset)); | 
|---|
| 1233 | } | 
|---|
| 1234 | __ LoadUniqueObject(R9, data); | 
|---|
| 1235 | __ blx(LR); | 
|---|
| 1236 |  | 
|---|
| 1237 | EmitCallsiteMetadata(token_pos, DeoptId::kNone, PcDescriptorsLayout::kOther, | 
|---|
| 1238 | locs); | 
|---|
| 1239 | __ Drop(ic_data.SizeWithTypeArgs()); | 
|---|
| 1240 | } | 
|---|
| 1241 |  | 
|---|
| 1242 | void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args, | 
|---|
| 1243 | intptr_t deopt_id, | 
|---|
| 1244 | TokenPosition token_pos, | 
|---|
| 1245 | LocationSummary* locs, | 
|---|
| 1246 | const ICData& ic_data, | 
|---|
| 1247 | Code::EntryKind entry_kind) { | 
|---|
| 1248 | ASSERT(CanCallDart()); | 
|---|
| 1249 | const Code& stub = | 
|---|
| 1250 | StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested()); | 
|---|
| 1251 | __ LoadObject(R9, ic_data); | 
|---|
| 1252 | GenerateDartCall(deopt_id, token_pos, stub, | 
|---|
| 1253 | PcDescriptorsLayout::kUnoptStaticCall, locs, entry_kind); | 
|---|
| 1254 | __ Drop(size_with_type_args); | 
|---|
| 1255 | } | 
|---|
| 1256 |  | 
|---|
| 1257 | void FlowGraphCompiler::EmitOptimizedStaticCall( | 
|---|
| 1258 | const Function& function, | 
|---|
| 1259 | const Array& arguments_descriptor, | 
|---|
| 1260 | intptr_t size_with_type_args, | 
|---|
| 1261 | intptr_t deopt_id, | 
|---|
| 1262 | TokenPosition token_pos, | 
|---|
| 1263 | LocationSummary* locs, | 
|---|
| 1264 | Code::EntryKind entry_kind) { | 
|---|
| 1265 | ASSERT(CanCallDart()); | 
|---|
| 1266 | ASSERT(!function.IsClosureFunction()); | 
|---|
| 1267 | if (function.HasOptionalParameters() || function.IsGeneric()) { | 
|---|
| 1268 | __ LoadObject(R4, arguments_descriptor); | 
|---|
| 1269 | } else { | 
|---|
| 1270 | if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) { | 
|---|
| 1271 | __ LoadImmediate(R4, 0);  // GC safe smi zero because of stub. | 
|---|
| 1272 | } | 
|---|
| 1273 | } | 
|---|
| 1274 | // Do not use the code from the function, but let the code be patched so that | 
|---|
| 1275 | // we can record the outgoing edges to other code. | 
|---|
| 1276 | GenerateStaticDartCall(deopt_id, token_pos, PcDescriptorsLayout::kOther, locs, | 
|---|
| 1277 | function, entry_kind); | 
|---|
| 1278 | __ Drop(size_with_type_args); | 
|---|
| 1279 | } | 
|---|
| 1280 |  | 
|---|
| 1281 | void FlowGraphCompiler::EmitDispatchTableCall( | 
|---|
| 1282 | Register cid_reg, | 
|---|
| 1283 | int32_t selector_offset, | 
|---|
| 1284 | const Array& arguments_descriptor) { | 
|---|
| 1285 | ASSERT(CanCallDart()); | 
|---|
| 1286 | ASSERT(cid_reg != ARGS_DESC_REG); | 
|---|
| 1287 | if (!arguments_descriptor.IsNull()) { | 
|---|
| 1288 | __ LoadObject(ARGS_DESC_REG, arguments_descriptor); | 
|---|
| 1289 | } | 
|---|
| 1290 | intptr_t offset = (selector_offset - DispatchTable::OriginElement()) * | 
|---|
| 1291 | compiler::target::kWordSize; | 
|---|
| 1292 | if (offset == 0) { | 
|---|
| 1293 | __ ldr(LR, compiler::Address(DISPATCH_TABLE_REG, cid_reg, LSL, | 
|---|
| 1294 | compiler::target::kWordSizeLog2)); | 
|---|
| 1295 | } else { | 
|---|
| 1296 | __ add(LR, DISPATCH_TABLE_REG, | 
|---|
| 1297 | compiler::Operand(cid_reg, LSL, compiler::target::kWordSizeLog2)); | 
|---|
| 1298 | if (!Utils::IsAbsoluteUint(12, offset)) { | 
|---|
| 1299 | const intptr_t adjust = offset & -(1 << 12); | 
|---|
| 1300 | __ AddImmediate(LR, LR, adjust); | 
|---|
| 1301 | offset -= adjust; | 
|---|
| 1302 | } | 
|---|
| 1303 | __ ldr(LR, compiler::Address(LR, offset)); | 
|---|
| 1304 | } | 
|---|
| 1305 | __ blx(LR); | 
|---|
| 1306 | } | 
|---|
| 1307 |  | 
|---|
| 1308 | Condition FlowGraphCompiler::EmitEqualityRegConstCompare( | 
|---|
| 1309 | Register reg, | 
|---|
| 1310 | const Object& obj, | 
|---|
| 1311 | bool needs_number_check, | 
|---|
| 1312 | TokenPosition token_pos, | 
|---|
| 1313 | intptr_t deopt_id) { | 
|---|
| 1314 | if (needs_number_check) { | 
|---|
| 1315 | ASSERT(!obj.IsMint() && !obj.IsDouble()); | 
|---|
| 1316 | __ Push(reg); | 
|---|
| 1317 | __ PushObject(obj); | 
|---|
| 1318 | if (is_optimizing()) { | 
|---|
| 1319 | __ BranchLinkPatchable(StubCode::OptimizedIdenticalWithNumberCheck()); | 
|---|
| 1320 | } else { | 
|---|
| 1321 | __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck()); | 
|---|
| 1322 | } | 
|---|
| 1323 | AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id, | 
|---|
| 1324 | token_pos); | 
|---|
| 1325 | // Stub returns result in flags (result of a cmp, we need Z computed). | 
|---|
| 1326 | __ Drop(1);   // Discard constant. | 
|---|
| 1327 | __ Pop(reg);  // Restore 'reg'. | 
|---|
| 1328 | } else { | 
|---|
| 1329 | __ CompareObject(reg, obj); | 
|---|
| 1330 | } | 
|---|
| 1331 | return EQ; | 
|---|
| 1332 | } | 
|---|
| 1333 |  | 
|---|
| 1334 | Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left, | 
|---|
| 1335 | Register right, | 
|---|
| 1336 | bool needs_number_check, | 
|---|
| 1337 | TokenPosition token_pos, | 
|---|
| 1338 | intptr_t deopt_id) { | 
|---|
| 1339 | if (needs_number_check) { | 
|---|
| 1340 | __ Push(left); | 
|---|
| 1341 | __ Push(right); | 
|---|
| 1342 | if (is_optimizing()) { | 
|---|
| 1343 | __ BranchLinkPatchable(StubCode::OptimizedIdenticalWithNumberCheck()); | 
|---|
| 1344 | } else { | 
|---|
| 1345 | __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck()); | 
|---|
| 1346 | } | 
|---|
| 1347 | AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id, | 
|---|
| 1348 | token_pos); | 
|---|
| 1349 | // Stub returns result in flags (result of a cmp, we need Z computed). | 
|---|
| 1350 | __ Pop(right); | 
|---|
| 1351 | __ Pop(left); | 
|---|
| 1352 | } else { | 
|---|
| 1353 | __ cmp(left, compiler::Operand(right)); | 
|---|
| 1354 | } | 
|---|
| 1355 | return EQ; | 
|---|
| 1356 | } | 
|---|
| 1357 |  | 
|---|
| 1358 | Condition FlowGraphCompiler::EmitBoolTest(Register value, | 
|---|
| 1359 | BranchLabels labels, | 
|---|
| 1360 | bool invert) { | 
|---|
| 1361 | __ Comment( "BoolTest"); | 
|---|
| 1362 | __ tst(value, | 
|---|
| 1363 | compiler::Operand(compiler::target::ObjectAlignment::kBoolValueMask)); | 
|---|
| 1364 | return invert ? NE : EQ; | 
|---|
| 1365 | } | 
|---|
| 1366 |  | 
|---|
| 1367 | // This function must be in sync with FlowGraphCompiler::RecordSafepoint and | 
|---|
| 1368 | // FlowGraphCompiler::SlowPathEnvironmentFor. | 
|---|
| 1369 | void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { | 
|---|
| 1370 | #if defined(DEBUG) | 
|---|
| 1371 | locs->CheckWritableInputs(); | 
|---|
| 1372 | ClobberDeadTempRegisters(locs); | 
|---|
| 1373 | #endif | 
|---|
| 1374 | // TODO(vegorov): consider saving only caller save (volatile) registers. | 
|---|
| 1375 | __ PushRegisters(*locs->live_registers()); | 
|---|
| 1376 | } | 
|---|
| 1377 |  | 
|---|
| 1378 | void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) { | 
|---|
| 1379 | __ PopRegisters(*locs->live_registers()); | 
|---|
| 1380 | } | 
|---|
| 1381 |  | 
|---|
| 1382 | #if defined(DEBUG) | 
|---|
| 1383 | void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) { | 
|---|
| 1384 | // Clobber temporaries that have not been manually preserved. | 
|---|
| 1385 | for (intptr_t i = 0; i < locs->temp_count(); ++i) { | 
|---|
| 1386 | Location tmp = locs->temp(i); | 
|---|
| 1387 | // TODO(zerny): clobber non-live temporary FPU registers. | 
|---|
| 1388 | if (tmp.IsRegister() && | 
|---|
| 1389 | !locs->live_registers()->ContainsRegister(tmp.reg())) { | 
|---|
| 1390 | __ mov(tmp.reg(), compiler::Operand(0xf7)); | 
|---|
| 1391 | } | 
|---|
| 1392 | } | 
|---|
| 1393 | } | 
|---|
| 1394 | #endif | 
|---|
| 1395 |  | 
|---|
| 1396 | Register FlowGraphCompiler::EmitTestCidRegister() { | 
|---|
| 1397 | return R2; | 
|---|
| 1398 | } | 
|---|
| 1399 |  | 
|---|
| 1400 | void FlowGraphCompiler::EmitTestAndCallLoadReceiver( | 
|---|
| 1401 | intptr_t count_without_type_args, | 
|---|
| 1402 | const Array& arguments_descriptor) { | 
|---|
| 1403 | __ Comment( "EmitTestAndCall"); | 
|---|
| 1404 | // Load receiver into R0. | 
|---|
| 1405 | __ LoadFromOffset( | 
|---|
| 1406 | kWord, R0, SP, | 
|---|
| 1407 | (count_without_type_args - 1) * compiler::target::kWordSize); | 
|---|
| 1408 | __ LoadObject(R4, arguments_descriptor); | 
|---|
| 1409 | } | 
|---|
| 1410 |  | 
|---|
| 1411 | void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label, | 
|---|
| 1412 | bool if_smi) { | 
|---|
| 1413 | __ tst(R0, compiler::Operand(kSmiTagMask)); | 
|---|
| 1414 | // Jump if receiver is not Smi. | 
|---|
| 1415 | __ b(label, if_smi ? EQ : NE); | 
|---|
| 1416 | } | 
|---|
| 1417 |  | 
|---|
| 1418 | void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) { | 
|---|
| 1419 | ASSERT(class_id_reg != R0); | 
|---|
| 1420 | __ LoadClassId(class_id_reg, R0); | 
|---|
| 1421 | } | 
|---|
| 1422 |  | 
|---|
| 1423 | #undef __ | 
|---|
| 1424 | #define __ assembler-> | 
|---|
| 1425 |  | 
|---|
| 1426 | int FlowGraphCompiler::EmitTestAndCallCheckCid(compiler::Assembler* assembler, | 
|---|
| 1427 | compiler::Label* label, | 
|---|
| 1428 | Register class_id_reg, | 
|---|
| 1429 | const CidRangeValue& range, | 
|---|
| 1430 | int bias, | 
|---|
| 1431 | bool jump_on_miss) { | 
|---|
| 1432 | intptr_t cid_start = range.cid_start; | 
|---|
| 1433 | if (range.IsSingleCid()) { | 
|---|
| 1434 | __ AddImmediateSetFlags(class_id_reg, class_id_reg, bias - cid_start); | 
|---|
| 1435 | __ BranchIf(jump_on_miss ? NOT_ZERO : ZERO, label); | 
|---|
| 1436 | bias = cid_start; | 
|---|
| 1437 | } else { | 
|---|
| 1438 | __ AddImmediate(class_id_reg, class_id_reg, bias - cid_start); | 
|---|
| 1439 | __ CompareImmediate(class_id_reg, range.Extent()); | 
|---|
| 1440 | __ BranchIf(jump_on_miss ? UNSIGNED_GREATER : UNSIGNED_LESS_EQUAL, label); | 
|---|
| 1441 | bias = cid_start; | 
|---|
| 1442 | } | 
|---|
| 1443 | return bias; | 
|---|
| 1444 | } | 
|---|
| 1445 |  | 
|---|
| 1446 | #undef __ | 
|---|
| 1447 | #define __ assembler()-> | 
|---|
| 1448 |  | 
|---|
| 1449 | void FlowGraphCompiler::EmitMove(Location destination, | 
|---|
| 1450 | Location source, | 
|---|
| 1451 | TemporaryRegisterAllocator* allocator) { | 
|---|
| 1452 | if (destination.Equals(source)) return; | 
|---|
| 1453 |  | 
|---|
| 1454 | if (source.IsRegister()) { | 
|---|
| 1455 | if (destination.IsRegister()) { | 
|---|
| 1456 | __ mov(destination.reg(), compiler::Operand(source.reg())); | 
|---|
| 1457 | } else { | 
|---|
| 1458 | ASSERT(destination.IsStackSlot()); | 
|---|
| 1459 | const intptr_t dest_offset = destination.ToStackSlotOffset(); | 
|---|
| 1460 | __ StoreToOffset(kWord, source.reg(), destination.base_reg(), | 
|---|
| 1461 | dest_offset); | 
|---|
| 1462 | } | 
|---|
| 1463 | } else if (source.IsStackSlot()) { | 
|---|
| 1464 | if (destination.IsRegister()) { | 
|---|
| 1465 | const intptr_t source_offset = source.ToStackSlotOffset(); | 
|---|
| 1466 | __ LoadFromOffset(kWord, destination.reg(), source.base_reg(), | 
|---|
| 1467 | source_offset); | 
|---|
| 1468 | } else { | 
|---|
| 1469 | ASSERT(destination.IsStackSlot()); | 
|---|
| 1470 | const intptr_t source_offset = source.ToStackSlotOffset(); | 
|---|
| 1471 | const intptr_t dest_offset = destination.ToStackSlotOffset(); | 
|---|
| 1472 |  | 
|---|
| 1473 | // LR not used by register allocator. | 
|---|
| 1474 | ASSERT(((1 << LR) & kDartAvailableCpuRegs) == 0); | 
|---|
| 1475 |  | 
|---|
| 1476 | // StoreToOffset uses TMP in the case where dest_offset is too large or | 
|---|
| 1477 | // small in order to calculate a new base. We fall back to using LR as a | 
|---|
| 1478 | // temporary as we know we're in a ParallelMove. | 
|---|
| 1479 | const Register temp_reg = LR; | 
|---|
| 1480 |  | 
|---|
| 1481 | __ LoadFromOffset(kWord, temp_reg, source.base_reg(), source_offset); | 
|---|
| 1482 | __ StoreToOffset(kWord, temp_reg, destination.base_reg(), dest_offset); | 
|---|
| 1483 | } | 
|---|
| 1484 | } else if (source.IsFpuRegister()) { | 
|---|
| 1485 | if (destination.IsFpuRegister()) { | 
|---|
| 1486 | if (TargetCPUFeatures::neon_supported()) { | 
|---|
| 1487 | __ vmovq(destination.fpu_reg(), source.fpu_reg()); | 
|---|
| 1488 | } else { | 
|---|
| 1489 | // If we're not inlining simd values, then only the even numbered D | 
|---|
| 1490 | // register will have anything in them. | 
|---|
| 1491 | __ vmovd(EvenDRegisterOf(destination.fpu_reg()), | 
|---|
| 1492 | EvenDRegisterOf(source.fpu_reg())); | 
|---|
| 1493 | } | 
|---|
| 1494 | } else if (destination.IsStackSlot()) { | 
|---|
| 1495 | // 32-bit float | 
|---|
| 1496 | const intptr_t dest_offset = destination.ToStackSlotOffset(); | 
|---|
| 1497 | const SRegister src = EvenSRegisterOf(EvenDRegisterOf(source.fpu_reg())); | 
|---|
| 1498 | __ StoreSToOffset(src, destination.base_reg(), dest_offset); | 
|---|
| 1499 | } else if (destination.IsDoubleStackSlot()) { | 
|---|
| 1500 | const intptr_t dest_offset = destination.ToStackSlotOffset(); | 
|---|
| 1501 | DRegister src = EvenDRegisterOf(source.fpu_reg()); | 
|---|
| 1502 | __ StoreDToOffset(src, destination.base_reg(), dest_offset); | 
|---|
| 1503 | } else { | 
|---|
| 1504 | ASSERT(destination.IsQuadStackSlot()); | 
|---|
| 1505 | const intptr_t dest_offset = destination.ToStackSlotOffset(); | 
|---|
| 1506 | const DRegister dsrc0 = EvenDRegisterOf(source.fpu_reg()); | 
|---|
| 1507 | __ StoreMultipleDToOffset(dsrc0, 2, destination.base_reg(), dest_offset); | 
|---|
| 1508 | } | 
|---|
| 1509 | } else if (source.IsDoubleStackSlot()) { | 
|---|
| 1510 | if (destination.IsFpuRegister()) { | 
|---|
| 1511 | const intptr_t source_offset = source.ToStackSlotOffset(); | 
|---|
| 1512 | const DRegister dst = EvenDRegisterOf(destination.fpu_reg()); | 
|---|
| 1513 | __ LoadDFromOffset(dst, source.base_reg(), source_offset); | 
|---|
| 1514 | } else if (destination.IsStackSlot()) { | 
|---|
| 1515 | // 32-bit float | 
|---|
| 1516 | const intptr_t source_offset = source.ToStackSlotOffset(); | 
|---|
| 1517 | const intptr_t dest_offset = destination.ToStackSlotOffset(); | 
|---|
| 1518 | __ LoadSFromOffset(STMP, source.base_reg(), source_offset); | 
|---|
| 1519 | __ StoreSToOffset(STMP, destination.base_reg(), dest_offset); | 
|---|
| 1520 | } else { | 
|---|
| 1521 | ASSERT(destination.IsDoubleStackSlot()); | 
|---|
| 1522 | const intptr_t source_offset = source.ToStackSlotOffset(); | 
|---|
| 1523 | const intptr_t dest_offset = destination.ToStackSlotOffset(); | 
|---|
| 1524 | __ LoadDFromOffset(DTMP, source.base_reg(), source_offset); | 
|---|
| 1525 | __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset); | 
|---|
| 1526 | } | 
|---|
| 1527 | } else if (source.IsQuadStackSlot()) { | 
|---|
| 1528 | if (destination.IsFpuRegister()) { | 
|---|
| 1529 | const intptr_t source_offset = source.ToStackSlotOffset(); | 
|---|
| 1530 | const DRegister dst0 = EvenDRegisterOf(destination.fpu_reg()); | 
|---|
| 1531 | __ LoadMultipleDFromOffset(dst0, 2, source.base_reg(), source_offset); | 
|---|
| 1532 | } else { | 
|---|
| 1533 | ASSERT(destination.IsQuadStackSlot()); | 
|---|
| 1534 | const intptr_t source_offset = source.ToStackSlotOffset(); | 
|---|
| 1535 | const intptr_t dest_offset = destination.ToStackSlotOffset(); | 
|---|
| 1536 | const DRegister dtmp0 = DTMP; | 
|---|
| 1537 | __ LoadMultipleDFromOffset(dtmp0, 2, source.base_reg(), source_offset); | 
|---|
| 1538 | __ StoreMultipleDToOffset(dtmp0, 2, destination.base_reg(), dest_offset); | 
|---|
| 1539 | } | 
|---|
| 1540 | } else if (source.IsPairLocation()) { | 
|---|
| 1541 | ASSERT(destination.IsPairLocation()); | 
|---|
| 1542 | for (intptr_t i : {0, 1}) { | 
|---|
| 1543 | EmitMove(destination.Component(i), source.Component(i), allocator); | 
|---|
| 1544 | } | 
|---|
| 1545 | } else { | 
|---|
| 1546 | ASSERT(source.IsConstant()); | 
|---|
| 1547 | if (destination.IsFpuRegister() || destination.IsDoubleStackSlot() || | 
|---|
| 1548 | destination.IsStackSlot()) { | 
|---|
| 1549 | Register tmp = allocator->AllocateTemporary(); | 
|---|
| 1550 | source.constant_instruction()->EmitMoveToLocation(this, destination, tmp); | 
|---|
| 1551 | allocator->ReleaseTemporary(); | 
|---|
| 1552 | } else { | 
|---|
| 1553 | source.constant_instruction()->EmitMoveToLocation(this, destination); | 
|---|
| 1554 | } | 
|---|
| 1555 | } | 
|---|
| 1556 | } | 
|---|
| 1557 |  | 
|---|
| 1558 | static OperandSize BytesToOperandSize(intptr_t bytes) { | 
|---|
| 1559 | switch (bytes) { | 
|---|
| 1560 | case 4: | 
|---|
| 1561 | return OperandSize::kWord; | 
|---|
| 1562 | case 2: | 
|---|
| 1563 | return OperandSize::kHalfword; | 
|---|
| 1564 | case 1: | 
|---|
| 1565 | return OperandSize::kByte; | 
|---|
| 1566 | default: | 
|---|
| 1567 | UNIMPLEMENTED(); | 
|---|
| 1568 | } | 
|---|
| 1569 | } | 
|---|
| 1570 |  | 
|---|
| 1571 | void FlowGraphCompiler::EmitNativeMoveArchitecture( | 
|---|
| 1572 | const compiler::ffi::NativeLocation& destination, | 
|---|
| 1573 | const compiler::ffi::NativeLocation& source) { | 
|---|
| 1574 | const auto& src_payload_type = source.payload_type(); | 
|---|
| 1575 | const auto& dst_payload_type = destination.payload_type(); | 
|---|
| 1576 | const auto& src_container_type = source.container_type(); | 
|---|
| 1577 | const auto& dst_container_type = destination.container_type(); | 
|---|
| 1578 | ASSERT(src_container_type.IsFloat() == dst_container_type.IsFloat()); | 
|---|
| 1579 | ASSERT(src_container_type.IsInt() == dst_container_type.IsInt()); | 
|---|
| 1580 | ASSERT(src_payload_type.IsSigned() == dst_payload_type.IsSigned()); | 
|---|
| 1581 | ASSERT(src_payload_type.IsFundamental()); | 
|---|
| 1582 | ASSERT(dst_payload_type.IsFundamental()); | 
|---|
| 1583 | const intptr_t src_size = src_payload_type.SizeInBytes(); | 
|---|
| 1584 | const intptr_t dst_size = dst_payload_type.SizeInBytes(); | 
|---|
| 1585 | const bool sign_or_zero_extend = dst_size > src_size; | 
|---|
| 1586 |  | 
|---|
| 1587 | if (source.IsRegisters()) { | 
|---|
| 1588 | const auto& src = source.AsRegisters(); | 
|---|
| 1589 | ASSERT(src.num_regs() == 1); | 
|---|
| 1590 | ASSERT(src_size <= 4); | 
|---|
| 1591 | const auto src_reg = src.reg_at(0); | 
|---|
| 1592 |  | 
|---|
| 1593 | if (destination.IsRegisters()) { | 
|---|
| 1594 | const auto& dst = destination.AsRegisters(); | 
|---|
| 1595 | ASSERT(dst.num_regs() == 1); | 
|---|
| 1596 | const auto dst_reg = dst.reg_at(0); | 
|---|
| 1597 | if (!sign_or_zero_extend) { | 
|---|
| 1598 | ASSERT(dst_size == 4); | 
|---|
| 1599 | __ mov(dst_reg, compiler::Operand(src_reg)); | 
|---|
| 1600 | } else { | 
|---|
| 1601 | ASSERT(sign_or_zero_extend); | 
|---|
| 1602 | // Arm has no sign- or zero-extension instructions, so use shifts. | 
|---|
| 1603 | const intptr_t shift_length = | 
|---|
| 1604 | (compiler::target::kWordSize - src_size) * kBitsPerByte; | 
|---|
| 1605 | __ Lsl(dst_reg, src_reg, compiler::Operand(shift_length)); | 
|---|
| 1606 | if (src_payload_type.IsSigned()) { | 
|---|
| 1607 | __ Asr(dst_reg, dst_reg, compiler::Operand(shift_length)); | 
|---|
| 1608 | } else { | 
|---|
| 1609 | __ Lsr(dst_reg, dst_reg, compiler::Operand(shift_length)); | 
|---|
| 1610 | } | 
|---|
| 1611 | } | 
|---|
| 1612 |  | 
|---|
| 1613 | } else if (destination.IsFpuRegisters()) { | 
|---|
| 1614 | // Fpu Registers should only contain doubles and registers only ints. | 
|---|
| 1615 | // The bit casts are done with a BitCastInstr. | 
|---|
| 1616 | // TODO(dartbug.com/40371): Remove BitCastInstr and implement here. | 
|---|
| 1617 | UNIMPLEMENTED(); | 
|---|
| 1618 |  | 
|---|
| 1619 | } else { | 
|---|
| 1620 | ASSERT(destination.IsStack()); | 
|---|
| 1621 | const auto& dst = destination.AsStack(); | 
|---|
| 1622 | ASSERT(!sign_or_zero_extend); | 
|---|
| 1623 | ASSERT(dst_size <= 4); | 
|---|
| 1624 | const OperandSize op_size = BytesToOperandSize(dst_size); | 
|---|
| 1625 | __ StoreToOffset(op_size, src.reg_at(0), dst.base_register(), | 
|---|
| 1626 | dst.offset_in_bytes()); | 
|---|
| 1627 | } | 
|---|
| 1628 |  | 
|---|
| 1629 | } else if (source.IsFpuRegisters()) { | 
|---|
| 1630 | const auto& src = source.AsFpuRegisters(); | 
|---|
| 1631 | // We have not implemented conversions here, use IL convert instructions. | 
|---|
| 1632 | ASSERT(src_payload_type.Equals(dst_payload_type)); | 
|---|
| 1633 |  | 
|---|
| 1634 | if (destination.IsRegisters()) { | 
|---|
| 1635 | // Fpu Registers should only contain doubles and registers only ints. | 
|---|
| 1636 | // The bit casts are done with a BitCastInstr. | 
|---|
| 1637 | // TODO(dartbug.com/40371): Remove BitCastInstr and implement here. | 
|---|
| 1638 | UNIMPLEMENTED(); | 
|---|
| 1639 |  | 
|---|
| 1640 | } else if (destination.IsFpuRegisters()) { | 
|---|
| 1641 | const auto& dst = destination.AsFpuRegisters(); | 
|---|
| 1642 | switch (dst_size) { | 
|---|
| 1643 | case 16: | 
|---|
| 1644 | __ vmovq(dst.fpu_reg(), src.fpu_reg()); | 
|---|
| 1645 | return; | 
|---|
| 1646 | case 8: | 
|---|
| 1647 | __ vmovd(dst.fpu_as_d_reg(), src.fpu_as_d_reg()); | 
|---|
| 1648 | return; | 
|---|
| 1649 | case 4: | 
|---|
| 1650 | __ vmovs(dst.fpu_as_s_reg(), src.fpu_as_s_reg()); | 
|---|
| 1651 | return; | 
|---|
| 1652 | default: | 
|---|
| 1653 | UNREACHABLE(); | 
|---|
| 1654 | } | 
|---|
| 1655 |  | 
|---|
| 1656 | } else { | 
|---|
| 1657 | ASSERT(destination.IsStack()); | 
|---|
| 1658 | ASSERT(src_payload_type.IsFloat()); | 
|---|
| 1659 | const auto& dst = destination.AsStack(); | 
|---|
| 1660 | switch (dst_size) { | 
|---|
| 1661 | case 8: | 
|---|
| 1662 | __ StoreDToOffset(src.fpu_as_d_reg(), dst.base_register(), | 
|---|
| 1663 | dst.offset_in_bytes()); | 
|---|
| 1664 | return; | 
|---|
| 1665 | case 4: | 
|---|
| 1666 | __ StoreSToOffset(src.fpu_as_s_reg(), dst.base_register(), | 
|---|
| 1667 | dst.offset_in_bytes()); | 
|---|
| 1668 | return; | 
|---|
| 1669 | default: | 
|---|
| 1670 | // TODO(dartbug.com/37470): Case 16 for simd packed data. | 
|---|
| 1671 | UNREACHABLE(); | 
|---|
| 1672 | } | 
|---|
| 1673 | } | 
|---|
| 1674 |  | 
|---|
| 1675 | } else { | 
|---|
| 1676 | ASSERT(source.IsStack()); | 
|---|
| 1677 | const auto& src = source.AsStack(); | 
|---|
| 1678 | if (destination.IsRegisters()) { | 
|---|
| 1679 | const auto& dst = destination.AsRegisters(); | 
|---|
| 1680 | ASSERT(dst.num_regs() == 1); | 
|---|
| 1681 | const auto dst_reg = dst.reg_at(0); | 
|---|
| 1682 | ASSERT(!sign_or_zero_extend); | 
|---|
| 1683 | ASSERT(dst_size <= 4); | 
|---|
| 1684 | const OperandSize op_size = BytesToOperandSize(dst_size); | 
|---|
| 1685 | __ LoadFromOffset(op_size, dst_reg, src.base_register(), | 
|---|
| 1686 | src.offset_in_bytes()); | 
|---|
| 1687 |  | 
|---|
| 1688 | } else if (destination.IsFpuRegisters()) { | 
|---|
| 1689 | ASSERT(src_payload_type.Equals(dst_payload_type)); | 
|---|
| 1690 | ASSERT(src_payload_type.IsFloat()); | 
|---|
| 1691 | const auto& dst = destination.AsFpuRegisters(); | 
|---|
| 1692 | switch (src_size) { | 
|---|
| 1693 | case 8: | 
|---|
| 1694 | __ LoadDFromOffset(dst.fpu_as_d_reg(), src.base_register(), | 
|---|
| 1695 | src.offset_in_bytes()); | 
|---|
| 1696 | return; | 
|---|
| 1697 | case 4: | 
|---|
| 1698 | __ LoadSFromOffset(dst.fpu_as_s_reg(), src.base_register(), | 
|---|
| 1699 | src.offset_in_bytes()); | 
|---|
| 1700 | return; | 
|---|
| 1701 | default: | 
|---|
| 1702 | UNIMPLEMENTED(); | 
|---|
| 1703 | } | 
|---|
| 1704 |  | 
|---|
| 1705 | } else { | 
|---|
| 1706 | ASSERT(destination.IsStack()); | 
|---|
| 1707 | UNREACHABLE(); | 
|---|
| 1708 | } | 
|---|
| 1709 | } | 
|---|
| 1710 | } | 
|---|
| 1711 |  | 
|---|
| 1712 | void FlowGraphCompiler::LoadBSSEntry(BSS::Relocation relocation, | 
|---|
| 1713 | Register dst, | 
|---|
| 1714 | Register tmp) { | 
|---|
| 1715 | compiler::Label skip_reloc; | 
|---|
| 1716 | __ b(&skip_reloc); | 
|---|
| 1717 | InsertBSSRelocation(relocation); | 
|---|
| 1718 | __ Bind(&skip_reloc); | 
|---|
| 1719 |  | 
|---|
| 1720 | // For historical reasons, the PC on ARM points 8 bytes (two instructions) | 
|---|
| 1721 | // past the current instruction. | 
|---|
| 1722 | __ sub(tmp, PC, | 
|---|
| 1723 | compiler::Operand(Instr::kPCReadOffset + compiler::target::kWordSize)); | 
|---|
| 1724 |  | 
|---|
| 1725 | // tmp holds the address of the relocation. | 
|---|
| 1726 | __ ldr(dst, compiler::Address(tmp)); | 
|---|
| 1727 |  | 
|---|
| 1728 | // dst holds the relocation itself: tmp - bss_start. | 
|---|
| 1729 | // tmp = tmp + (bss_start - tmp) = bss_start | 
|---|
| 1730 | __ add(tmp, tmp, compiler::Operand(dst)); | 
|---|
| 1731 |  | 
|---|
| 1732 | // tmp holds the start of the BSS section. | 
|---|
| 1733 | // Load the "get-thread" routine: *bss_start. | 
|---|
| 1734 | __ ldr(dst, compiler::Address(tmp)); | 
|---|
| 1735 | } | 
|---|
| 1736 |  | 
|---|
| 1737 | #undef __ | 
|---|
| 1738 | #define __ compiler_->assembler()-> | 
|---|
| 1739 |  | 
|---|
| 1740 | void ParallelMoveResolver::EmitSwap(int index) { | 
|---|
| 1741 | MoveOperands* move = moves_[index]; | 
|---|
| 1742 | const Location source = move->src(); | 
|---|
| 1743 | const Location destination = move->dest(); | 
|---|
| 1744 |  | 
|---|
| 1745 | if (source.IsRegister() && destination.IsRegister()) { | 
|---|
| 1746 | ASSERT(source.reg() != IP); | 
|---|
| 1747 | ASSERT(destination.reg() != IP); | 
|---|
| 1748 | __ mov(IP, compiler::Operand(source.reg())); | 
|---|
| 1749 | __ mov(source.reg(), compiler::Operand(destination.reg())); | 
|---|
| 1750 | __ mov(destination.reg(), compiler::Operand(IP)); | 
|---|
| 1751 | } else if (source.IsRegister() && destination.IsStackSlot()) { | 
|---|
| 1752 | Exchange(source.reg(), destination.base_reg(), | 
|---|
| 1753 | destination.ToStackSlotOffset()); | 
|---|
| 1754 | } else if (source.IsStackSlot() && destination.IsRegister()) { | 
|---|
| 1755 | Exchange(destination.reg(), source.base_reg(), source.ToStackSlotOffset()); | 
|---|
| 1756 | } else if (source.IsStackSlot() && destination.IsStackSlot()) { | 
|---|
| 1757 | Exchange(source.base_reg(), source.ToStackSlotOffset(), | 
|---|
| 1758 | destination.base_reg(), destination.ToStackSlotOffset()); | 
|---|
| 1759 | } else if (source.IsFpuRegister() && destination.IsFpuRegister()) { | 
|---|
| 1760 | if (TargetCPUFeatures::neon_supported()) { | 
|---|
| 1761 | const QRegister dst = destination.fpu_reg(); | 
|---|
| 1762 | const QRegister src = source.fpu_reg(); | 
|---|
| 1763 | ASSERT(dst != QTMP && src != QTMP); | 
|---|
| 1764 | __ vmovq(QTMP, src); | 
|---|
| 1765 | __ vmovq(src, dst); | 
|---|
| 1766 | __ vmovq(dst, QTMP); | 
|---|
| 1767 | } else { | 
|---|
| 1768 | const DRegister dst = EvenDRegisterOf(destination.fpu_reg()); | 
|---|
| 1769 | const DRegister src = EvenDRegisterOf(source.fpu_reg()); | 
|---|
| 1770 | ASSERT(dst != DTMP && src != DTMP); | 
|---|
| 1771 | __ vmovd(DTMP, src); | 
|---|
| 1772 | __ vmovd(src, dst); | 
|---|
| 1773 | __ vmovd(dst, DTMP); | 
|---|
| 1774 | } | 
|---|
| 1775 | } else if (source.IsFpuRegister() || destination.IsFpuRegister()) { | 
|---|
| 1776 | ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() || | 
|---|
| 1777 | source.IsDoubleStackSlot() || source.IsQuadStackSlot()); | 
|---|
| 1778 | bool double_width = | 
|---|
| 1779 | destination.IsDoubleStackSlot() || source.IsDoubleStackSlot(); | 
|---|
| 1780 | QRegister qreg = | 
|---|
| 1781 | source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg(); | 
|---|
| 1782 | DRegister reg = EvenDRegisterOf(qreg); | 
|---|
| 1783 | Register base_reg = | 
|---|
| 1784 | source.IsFpuRegister() ? destination.base_reg() : source.base_reg(); | 
|---|
| 1785 | const intptr_t slot_offset = source.IsFpuRegister() | 
|---|
| 1786 | ? destination.ToStackSlotOffset() | 
|---|
| 1787 | : source.ToStackSlotOffset(); | 
|---|
| 1788 |  | 
|---|
| 1789 | if (double_width) { | 
|---|
| 1790 | __ LoadDFromOffset(DTMP, base_reg, slot_offset); | 
|---|
| 1791 | __ StoreDToOffset(reg, base_reg, slot_offset); | 
|---|
| 1792 | __ vmovd(reg, DTMP); | 
|---|
| 1793 | } else { | 
|---|
| 1794 | __ LoadMultipleDFromOffset(DTMP, 2, base_reg, slot_offset); | 
|---|
| 1795 | __ StoreMultipleDToOffset(reg, 2, base_reg, slot_offset); | 
|---|
| 1796 | __ vmovq(qreg, QTMP); | 
|---|
| 1797 | } | 
|---|
| 1798 | } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { | 
|---|
| 1799 | const intptr_t source_offset = source.ToStackSlotOffset(); | 
|---|
| 1800 | const intptr_t dest_offset = destination.ToStackSlotOffset(); | 
|---|
| 1801 |  | 
|---|
| 1802 | ScratchFpuRegisterScope ensure_scratch(this, kNoQRegister); | 
|---|
| 1803 | DRegister scratch = EvenDRegisterOf(ensure_scratch.reg()); | 
|---|
| 1804 | __ LoadDFromOffset(DTMP, source.base_reg(), source_offset); | 
|---|
| 1805 | __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset); | 
|---|
| 1806 | __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset); | 
|---|
| 1807 | __ StoreDToOffset(scratch, destination.base_reg(), source_offset); | 
|---|
| 1808 | } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) { | 
|---|
| 1809 | const intptr_t source_offset = source.ToStackSlotOffset(); | 
|---|
| 1810 | const intptr_t dest_offset = destination.ToStackSlotOffset(); | 
|---|
| 1811 |  | 
|---|
| 1812 | ScratchFpuRegisterScope ensure_scratch(this, kNoQRegister); | 
|---|
| 1813 | DRegister scratch = EvenDRegisterOf(ensure_scratch.reg()); | 
|---|
| 1814 | __ LoadMultipleDFromOffset(DTMP, 2, source.base_reg(), source_offset); | 
|---|
| 1815 | __ LoadMultipleDFromOffset(scratch, 2, destination.base_reg(), dest_offset); | 
|---|
| 1816 | __ StoreMultipleDToOffset(DTMP, 2, destination.base_reg(), dest_offset); | 
|---|
| 1817 | __ StoreMultipleDToOffset(scratch, 2, destination.base_reg(), | 
|---|
| 1818 | source_offset); | 
|---|
| 1819 | } else { | 
|---|
| 1820 | UNREACHABLE(); | 
|---|
| 1821 | } | 
|---|
| 1822 |  | 
|---|
| 1823 | // The swap of source and destination has executed a move from source to | 
|---|
| 1824 | // destination. | 
|---|
| 1825 | move->Eliminate(); | 
|---|
| 1826 |  | 
|---|
| 1827 | // Any unperformed (including pending) move with a source of either | 
|---|
| 1828 | // this move's source or destination needs to have their source | 
|---|
| 1829 | // changed to reflect the state of affairs after the swap. | 
|---|
| 1830 | for (int i = 0; i < moves_.length(); ++i) { | 
|---|
| 1831 | const MoveOperands& other_move = *moves_[i]; | 
|---|
| 1832 | if (other_move.Blocks(source)) { | 
|---|
| 1833 | moves_[i]->set_src(destination); | 
|---|
| 1834 | } else if (other_move.Blocks(destination)) { | 
|---|
| 1835 | moves_[i]->set_src(source); | 
|---|
| 1836 | } | 
|---|
| 1837 | } | 
|---|
| 1838 | } | 
|---|
| 1839 |  | 
|---|
| 1840 | void ParallelMoveResolver::MoveMemoryToMemory(const compiler::Address& dst, | 
|---|
| 1841 | const compiler::Address& src) { | 
|---|
| 1842 | UNREACHABLE(); | 
|---|
| 1843 | } | 
|---|
| 1844 |  | 
|---|
| 1845 | // Do not call or implement this function. Instead, use the form below that | 
|---|
| 1846 | // uses an offset from the frame pointer instead of an Address. | 
|---|
| 1847 | void ParallelMoveResolver::Exchange(Register reg, | 
|---|
| 1848 | const compiler::Address& mem) { | 
|---|
| 1849 | UNREACHABLE(); | 
|---|
| 1850 | } | 
|---|
| 1851 |  | 
|---|
| 1852 | // Do not call or implement this function. Instead, use the form below that | 
|---|
| 1853 | // uses offsets from the frame pointer instead of Addresses. | 
|---|
| 1854 | void ParallelMoveResolver::Exchange(const compiler::Address& mem1, | 
|---|
| 1855 | const compiler::Address& mem2) { | 
|---|
| 1856 | UNREACHABLE(); | 
|---|
| 1857 | } | 
|---|
| 1858 |  | 
|---|
| 1859 | void ParallelMoveResolver::Exchange(Register reg, | 
|---|
| 1860 | Register base_reg, | 
|---|
| 1861 | intptr_t stack_offset) { | 
|---|
| 1862 | ScratchRegisterScope tmp(this, reg); | 
|---|
| 1863 | __ mov(tmp.reg(), compiler::Operand(reg)); | 
|---|
| 1864 | __ LoadFromOffset(kWord, reg, base_reg, stack_offset); | 
|---|
| 1865 | __ StoreToOffset(kWord, tmp.reg(), base_reg, stack_offset); | 
|---|
| 1866 | } | 
|---|
| 1867 |  | 
|---|
| 1868 | void ParallelMoveResolver::Exchange(Register base_reg1, | 
|---|
| 1869 | intptr_t stack_offset1, | 
|---|
| 1870 | Register base_reg2, | 
|---|
| 1871 | intptr_t stack_offset2) { | 
|---|
| 1872 | ScratchRegisterScope tmp1(this, kNoRegister); | 
|---|
| 1873 | ScratchRegisterScope tmp2(this, tmp1.reg()); | 
|---|
| 1874 | __ LoadFromOffset(kWord, tmp1.reg(), base_reg1, stack_offset1); | 
|---|
| 1875 | __ LoadFromOffset(kWord, tmp2.reg(), base_reg2, stack_offset2); | 
|---|
| 1876 | __ StoreToOffset(kWord, tmp1.reg(), base_reg2, stack_offset2); | 
|---|
| 1877 | __ StoreToOffset(kWord, tmp2.reg(), base_reg1, stack_offset1); | 
|---|
| 1878 | } | 
|---|
| 1879 |  | 
|---|
| 1880 | void ParallelMoveResolver::SpillScratch(Register reg) { | 
|---|
| 1881 | __ Push(reg); | 
|---|
| 1882 | } | 
|---|
| 1883 |  | 
|---|
| 1884 | void ParallelMoveResolver::RestoreScratch(Register reg) { | 
|---|
| 1885 | __ Pop(reg); | 
|---|
| 1886 | } | 
|---|
| 1887 |  | 
|---|
| 1888 | void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { | 
|---|
| 1889 | DRegister dreg = EvenDRegisterOf(reg); | 
|---|
| 1890 | __ vstrd(dreg, | 
|---|
| 1891 | compiler::Address(SP, -kDoubleSize, compiler::Address::PreIndex)); | 
|---|
| 1892 | } | 
|---|
| 1893 |  | 
|---|
| 1894 | void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { | 
|---|
| 1895 | DRegister dreg = EvenDRegisterOf(reg); | 
|---|
| 1896 | __ vldrd(dreg, | 
|---|
| 1897 | compiler::Address(SP, kDoubleSize, compiler::Address::PostIndex)); | 
|---|
| 1898 | } | 
|---|
| 1899 |  | 
|---|
| 1900 | #undef __ | 
|---|
| 1901 |  | 
|---|
| 1902 | }  // namespace dart | 
|---|
| 1903 |  | 
|---|
| 1904 | #endif  // defined(TARGET_ARCH_ARM) | 
|---|
| 1905 |  | 
|---|