| 1 | // Copyright (c) 2012, the Dart project authors.  Please see the AUTHORS file | 
|---|
| 2 | // for details. All rights reserved. Use of this source code is governed by a | 
|---|
| 3 | // BSD-style license that can be found in the LICENSE file. | 
|---|
| 4 |  | 
|---|
| 5 | #include "vm/compiler/backend/flow_graph.h" | 
|---|
| 6 |  | 
|---|
| 7 | #include "vm/bit_vector.h" | 
|---|
| 8 | #include "vm/compiler/backend/flow_graph_compiler.h" | 
|---|
| 9 | #include "vm/compiler/backend/il.h" | 
|---|
| 10 | #include "vm/compiler/backend/il_printer.h" | 
|---|
| 11 | #include "vm/compiler/backend/loops.h" | 
|---|
| 12 | #include "vm/compiler/backend/range_analysis.h" | 
|---|
| 13 | #include "vm/compiler/cha.h" | 
|---|
| 14 | #include "vm/compiler/compiler_state.h" | 
|---|
| 15 | #include "vm/compiler/frontend/flow_graph_builder.h" | 
|---|
| 16 | #include "vm/growable_array.h" | 
|---|
| 17 | #include "vm/object_store.h" | 
|---|
| 18 | #include "vm/resolver.h" | 
|---|
| 19 |  | 
|---|
| 20 | namespace dart { | 
|---|
| 21 |  | 
|---|
| 22 | #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32) | 
|---|
| 23 | // Smi->Int32 widening pass is disabled due to dartbug.com/32619. | 
|---|
| 24 | DEFINE_FLAG(bool, use_smi_widening, false, "Enable Smi->Int32 widening pass."); | 
|---|
| 25 | DEFINE_FLAG(bool, trace_smi_widening, false, "Trace Smi->Int32 widening pass."); | 
|---|
| 26 | #endif | 
|---|
| 27 | DEFINE_FLAG(bool, prune_dead_locals, true, "optimize dead locals away"); | 
|---|
| 28 |  | 
|---|
| 29 | // Quick access to the current zone. | 
|---|
| 30 | #define Z (zone()) | 
|---|
| 31 |  | 
|---|
| 32 | FlowGraph::FlowGraph(const ParsedFunction& parsed_function, | 
|---|
| 33 | GraphEntryInstr* graph_entry, | 
|---|
| 34 | intptr_t max_block_id, | 
|---|
| 35 | PrologueInfo prologue_info) | 
|---|
| 36 | : thread_(Thread::Current()), | 
|---|
| 37 | parent_(), | 
|---|
| 38 | current_ssa_temp_index_(0), | 
|---|
| 39 | max_block_id_(max_block_id), | 
|---|
| 40 | parsed_function_(parsed_function), | 
|---|
| 41 | num_direct_parameters_(parsed_function.function().HasOptionalParameters() | 
|---|
| 42 | ? 0 | 
|---|
| 43 | : parsed_function.function().NumParameters()), | 
|---|
| 44 | direct_parameters_size_(0), | 
|---|
| 45 | graph_entry_(graph_entry), | 
|---|
| 46 | preorder_(), | 
|---|
| 47 | postorder_(), | 
|---|
| 48 | reverse_postorder_(), | 
|---|
| 49 | optimized_block_order_(), | 
|---|
| 50 | constant_null_(nullptr), | 
|---|
| 51 | constant_dead_(nullptr), | 
|---|
| 52 | licm_allowed_(true), | 
|---|
| 53 | prologue_info_(prologue_info), | 
|---|
| 54 | loop_hierarchy_(nullptr), | 
|---|
| 55 | loop_invariant_loads_(nullptr), | 
|---|
| 56 | captured_parameters_(new (zone()) BitVector(zone(), variable_count())), | 
|---|
| 57 | inlining_id_(-1), | 
|---|
| 58 | should_print_(FlowGraphPrinter::ShouldPrint(parsed_function.function())) { | 
|---|
| 59 | direct_parameters_size_ = ParameterOffsetAt( | 
|---|
| 60 | function(), num_direct_parameters_, /*last_slot*/ false); | 
|---|
| 61 | DiscoverBlocks(); | 
|---|
| 62 | } | 
|---|
| 63 |  | 
|---|
| 64 | void FlowGraph::EnsureSSATempIndex(Definition* defn, Definition* replacement) { | 
|---|
| 65 | if ((replacement->ssa_temp_index() == -1) && (defn->ssa_temp_index() != -1)) { | 
|---|
| 66 | AllocateSSAIndexes(replacement); | 
|---|
| 67 | } | 
|---|
| 68 | } | 
|---|
| 69 |  | 
|---|
| 70 | intptr_t FlowGraph::ParameterOffsetAt(const Function& function, | 
|---|
| 71 | intptr_t index, | 
|---|
| 72 | bool last_slot /*=true*/) { | 
|---|
| 73 | ASSERT(index <= function.NumParameters()); | 
|---|
| 74 | intptr_t param_offset = 0; | 
|---|
| 75 | for (intptr_t i = 0; i < index; i++) { | 
|---|
| 76 | if (function.is_unboxed_integer_parameter_at(i)) { | 
|---|
| 77 | param_offset += compiler::target::kIntSpillFactor; | 
|---|
| 78 | } else if (function.is_unboxed_double_parameter_at(i)) { | 
|---|
| 79 | param_offset += compiler::target::kDoubleSpillFactor; | 
|---|
| 80 | } else { | 
|---|
| 81 | ASSERT(!function.is_unboxed_parameter_at(i)); | 
|---|
| 82 | // Unboxed parameters always occupy one word | 
|---|
| 83 | param_offset++; | 
|---|
| 84 | } | 
|---|
| 85 | } | 
|---|
| 86 | if (last_slot) { | 
|---|
| 87 | ASSERT(index < function.NumParameters()); | 
|---|
| 88 | if (function.is_unboxed_double_parameter_at(index) && | 
|---|
| 89 | compiler::target::kDoubleSpillFactor > 1) { | 
|---|
| 90 | ASSERT(compiler::target::kDoubleSpillFactor == 2); | 
|---|
| 91 | param_offset++; | 
|---|
| 92 | } else if (function.is_unboxed_integer_parameter_at(index) && | 
|---|
| 93 | compiler::target::kIntSpillFactor > 1) { | 
|---|
| 94 | ASSERT(compiler::target::kIntSpillFactor == 2); | 
|---|
| 95 | param_offset++; | 
|---|
| 96 | } | 
|---|
| 97 | } | 
|---|
| 98 | return param_offset; | 
|---|
| 99 | } | 
|---|
| 100 |  | 
|---|
| 101 | Representation FlowGraph::ParameterRepresentationAt(const Function& function, | 
|---|
| 102 | intptr_t index) { | 
|---|
| 103 | if (function.IsNull()) { | 
|---|
| 104 | return kTagged; | 
|---|
| 105 | } | 
|---|
| 106 | ASSERT(index < function.NumParameters()); | 
|---|
| 107 | if (function.is_unboxed_integer_parameter_at(index)) { | 
|---|
| 108 | return kUnboxedInt64; | 
|---|
| 109 | } else if (function.is_unboxed_double_parameter_at(index)) { | 
|---|
| 110 | return kUnboxedDouble; | 
|---|
| 111 | } else { | 
|---|
| 112 | ASSERT(!function.is_unboxed_parameter_at(index)); | 
|---|
| 113 | return kTagged; | 
|---|
| 114 | } | 
|---|
| 115 | } | 
|---|
| 116 |  | 
|---|
| 117 | Representation FlowGraph::ReturnRepresentationOf(const Function& function) { | 
|---|
| 118 | if (function.IsNull()) { | 
|---|
| 119 | return kTagged; | 
|---|
| 120 | } | 
|---|
| 121 | if (function.has_unboxed_integer_return()) { | 
|---|
| 122 | return kUnboxedInt64; | 
|---|
| 123 | } else if (function.has_unboxed_double_return()) { | 
|---|
| 124 | return kUnboxedDouble; | 
|---|
| 125 | } else { | 
|---|
| 126 | ASSERT(!function.has_unboxed_return()); | 
|---|
| 127 | return kTagged; | 
|---|
| 128 | } | 
|---|
| 129 | } | 
|---|
| 130 |  | 
|---|
| 131 | void FlowGraph::ReplaceCurrentInstruction(ForwardInstructionIterator* iterator, | 
|---|
| 132 | Instruction* current, | 
|---|
| 133 | Instruction* replacement) { | 
|---|
| 134 | Definition* current_defn = current->AsDefinition(); | 
|---|
| 135 | if ((replacement != NULL) && (current_defn != NULL)) { | 
|---|
| 136 | Definition* replacement_defn = replacement->AsDefinition(); | 
|---|
| 137 | ASSERT(replacement_defn != NULL); | 
|---|
| 138 | current_defn->ReplaceUsesWith(replacement_defn); | 
|---|
| 139 | EnsureSSATempIndex(current_defn, replacement_defn); | 
|---|
| 140 |  | 
|---|
| 141 | if (FLAG_trace_optimization) { | 
|---|
| 142 | THR_Print( "Replacing v%"Pd " with v%"Pd "\n", | 
|---|
| 143 | current_defn->ssa_temp_index(), | 
|---|
| 144 | replacement_defn->ssa_temp_index()); | 
|---|
| 145 | } | 
|---|
| 146 | } else if (FLAG_trace_optimization) { | 
|---|
| 147 | if (current_defn == NULL) { | 
|---|
| 148 | THR_Print( "Removing %s\n", current->DebugName()); | 
|---|
| 149 | } else { | 
|---|
| 150 | ASSERT(!current_defn->HasUses()); | 
|---|
| 151 | THR_Print( "Removing v%"Pd ".\n", current_defn->ssa_temp_index()); | 
|---|
| 152 | } | 
|---|
| 153 | } | 
|---|
| 154 | if (current->ArgumentCount() != 0) { | 
|---|
| 155 | ASSERT(!current->HasPushArguments()); | 
|---|
| 156 | } | 
|---|
| 157 | iterator->RemoveCurrentFromGraph(); | 
|---|
| 158 | } | 
|---|
| 159 |  | 
|---|
| 160 | bool FlowGraph::ShouldReorderBlocks(const Function& function, | 
|---|
| 161 | bool is_optimized) { | 
|---|
| 162 | return is_optimized && FLAG_reorder_basic_blocks && | 
|---|
| 163 | !function.is_intrinsic() && !function.IsFfiTrampoline(); | 
|---|
| 164 | } | 
|---|
| 165 |  | 
|---|
| 166 | GrowableArray<BlockEntryInstr*>* FlowGraph::CodegenBlockOrder( | 
|---|
| 167 | bool is_optimized) { | 
|---|
| 168 | return ShouldReorderBlocks(function(), is_optimized) ? &optimized_block_order_ | 
|---|
| 169 | : &reverse_postorder_; | 
|---|
| 170 | } | 
|---|
| 171 |  | 
|---|
| 172 | ConstantInstr* FlowGraph::GetExistingConstant(const Object& object) const { | 
|---|
| 173 | return constant_instr_pool_.LookupValue(object); | 
|---|
| 174 | } | 
|---|
| 175 |  | 
|---|
| 176 | ConstantInstr* FlowGraph::GetConstant(const Object& object) { | 
|---|
| 177 | ConstantInstr* constant = GetExistingConstant(object); | 
|---|
| 178 | if (constant == nullptr) { | 
|---|
| 179 | // Otherwise, allocate and add it to the pool. | 
|---|
| 180 | constant = | 
|---|
| 181 | new (zone()) ConstantInstr(Object::ZoneHandle(zone(), object.raw())); | 
|---|
| 182 | constant->set_ssa_temp_index(alloc_ssa_temp_index()); | 
|---|
| 183 | if (NeedsPairLocation(constant->representation())) { | 
|---|
| 184 | alloc_ssa_temp_index(); | 
|---|
| 185 | } | 
|---|
| 186 | AddToGraphInitialDefinitions(constant); | 
|---|
| 187 | constant_instr_pool_.Insert(constant); | 
|---|
| 188 | } | 
|---|
| 189 | return constant; | 
|---|
| 190 | } | 
|---|
| 191 |  | 
|---|
| 192 | bool FlowGraph::IsConstantRepresentable(const Object& value, | 
|---|
| 193 | Representation target_rep, | 
|---|
| 194 | bool tagged_value_must_be_smi) { | 
|---|
| 195 | switch (target_rep) { | 
|---|
| 196 | case kTagged: | 
|---|
| 197 | return !tagged_value_must_be_smi || value.IsSmi(); | 
|---|
| 198 |  | 
|---|
| 199 | case kUnboxedInt32: | 
|---|
| 200 | if (value.IsInteger()) { | 
|---|
| 201 | return Utils::IsInt(32, Integer::Cast(value).AsInt64Value()); | 
|---|
| 202 | } | 
|---|
| 203 | return false; | 
|---|
| 204 |  | 
|---|
| 205 | case kUnboxedUint32: | 
|---|
| 206 | if (value.IsInteger()) { | 
|---|
| 207 | return Utils::IsUint(32, Integer::Cast(value).AsInt64Value()); | 
|---|
| 208 | } | 
|---|
| 209 | return false; | 
|---|
| 210 |  | 
|---|
| 211 | case kUnboxedInt64: | 
|---|
| 212 | return value.IsInteger(); | 
|---|
| 213 |  | 
|---|
| 214 | case kUnboxedDouble: | 
|---|
| 215 | return value.IsInteger() || value.IsDouble(); | 
|---|
| 216 |  | 
|---|
| 217 | default: | 
|---|
| 218 | return false; | 
|---|
| 219 | } | 
|---|
| 220 | } | 
|---|
| 221 |  | 
|---|
| 222 | Definition* FlowGraph::TryCreateConstantReplacementFor(Definition* op, | 
|---|
| 223 | const Object& value) { | 
|---|
| 224 | // Check that representation of the constant matches expected representation. | 
|---|
| 225 | if (!IsConstantRepresentable( | 
|---|
| 226 | value, op->representation(), | 
|---|
| 227 | /*tagged_value_must_be_smi=*/op->Type()->IsNullableSmi())) { | 
|---|
| 228 | return op; | 
|---|
| 229 | } | 
|---|
| 230 |  | 
|---|
| 231 | Definition* result = GetConstant(value); | 
|---|
| 232 | if (op->representation() != kTagged) { | 
|---|
| 233 | // We checked above that constant can be safely unboxed. | 
|---|
| 234 | result = UnboxInstr::Create(op->representation(), new Value(result), | 
|---|
| 235 | DeoptId::kNone, Instruction::kNotSpeculative); | 
|---|
| 236 | // If the current instruction is a phi we need to insert the replacement | 
|---|
| 237 | // into the block which contains this phi - because phis exist separately | 
|---|
| 238 | // from all other instructions. | 
|---|
| 239 | if (auto phi = op->AsPhi()) { | 
|---|
| 240 | InsertAfter(phi->GetBlock(), result, nullptr, FlowGraph::kValue); | 
|---|
| 241 | } else { | 
|---|
| 242 | InsertBefore(op, result, nullptr, FlowGraph::kValue); | 
|---|
| 243 | } | 
|---|
| 244 | } | 
|---|
| 245 |  | 
|---|
| 246 | return result; | 
|---|
| 247 | } | 
|---|
| 248 |  | 
|---|
| 249 | void FlowGraph::AddToGraphInitialDefinitions(Definition* defn) { | 
|---|
| 250 | defn->set_previous(graph_entry_); | 
|---|
| 251 | graph_entry_->initial_definitions()->Add(defn); | 
|---|
| 252 | } | 
|---|
| 253 |  | 
|---|
| 254 | void FlowGraph::AddToInitialDefinitions(BlockEntryWithInitialDefs* entry, | 
|---|
| 255 | Definition* defn) { | 
|---|
| 256 | defn->set_previous(entry); | 
|---|
| 257 | if (auto par = defn->AsParameter()) { | 
|---|
| 258 | par->set_block(entry);  // set cached block | 
|---|
| 259 | } | 
|---|
| 260 | entry->initial_definitions()->Add(defn); | 
|---|
| 261 | } | 
|---|
| 262 |  | 
|---|
| 263 | void FlowGraph::InsertBefore(Instruction* next, | 
|---|
| 264 | Instruction* instr, | 
|---|
| 265 | Environment* env, | 
|---|
| 266 | UseKind use_kind) { | 
|---|
| 267 | InsertAfter(next->previous(), instr, env, use_kind); | 
|---|
| 268 | } | 
|---|
| 269 |  | 
|---|
| 270 | void FlowGraph::InsertAfter(Instruction* prev, | 
|---|
| 271 | Instruction* instr, | 
|---|
| 272 | Environment* env, | 
|---|
| 273 | UseKind use_kind) { | 
|---|
| 274 | if (use_kind == kValue) { | 
|---|
| 275 | ASSERT(instr->IsDefinition()); | 
|---|
| 276 | AllocateSSAIndexes(instr->AsDefinition()); | 
|---|
| 277 | } | 
|---|
| 278 | instr->InsertAfter(prev); | 
|---|
| 279 | ASSERT(instr->env() == NULL); | 
|---|
| 280 | if (env != NULL) { | 
|---|
| 281 | env->DeepCopyTo(zone(), instr); | 
|---|
| 282 | } | 
|---|
| 283 | } | 
|---|
| 284 |  | 
|---|
| 285 | Instruction* FlowGraph::AppendTo(Instruction* prev, | 
|---|
| 286 | Instruction* instr, | 
|---|
| 287 | Environment* env, | 
|---|
| 288 | UseKind use_kind) { | 
|---|
| 289 | if (use_kind == kValue) { | 
|---|
| 290 | ASSERT(instr->IsDefinition()); | 
|---|
| 291 | AllocateSSAIndexes(instr->AsDefinition()); | 
|---|
| 292 | } | 
|---|
| 293 | ASSERT(instr->env() == NULL); | 
|---|
| 294 | if (env != NULL) { | 
|---|
| 295 | env->DeepCopyTo(zone(), instr); | 
|---|
| 296 | } | 
|---|
| 297 | return prev->AppendInstruction(instr); | 
|---|
| 298 | } | 
|---|
| 299 |  | 
|---|
| 300 | // A wrapper around block entries including an index of the next successor to | 
|---|
| 301 | // be read. | 
|---|
| 302 | class BlockTraversalState { | 
|---|
| 303 | public: | 
|---|
| 304 | explicit BlockTraversalState(BlockEntryInstr* block) | 
|---|
| 305 | : block_(block), | 
|---|
| 306 | next_successor_ix_(block->last_instruction()->SuccessorCount() - 1) {} | 
|---|
| 307 |  | 
|---|
| 308 | bool HasNextSuccessor() const { return next_successor_ix_ >= 0; } | 
|---|
| 309 | BlockEntryInstr* NextSuccessor() { | 
|---|
| 310 | ASSERT(HasNextSuccessor()); | 
|---|
| 311 | return block_->last_instruction()->SuccessorAt(next_successor_ix_--); | 
|---|
| 312 | } | 
|---|
| 313 |  | 
|---|
| 314 | BlockEntryInstr* block() const { return block_; } | 
|---|
| 315 |  | 
|---|
| 316 | private: | 
|---|
| 317 | BlockEntryInstr* block_; | 
|---|
| 318 | intptr_t next_successor_ix_; | 
|---|
| 319 |  | 
|---|
| 320 | DISALLOW_ALLOCATION(); | 
|---|
| 321 | }; | 
|---|
| 322 |  | 
|---|
| 323 | void FlowGraph::DiscoverBlocks() { | 
|---|
| 324 | StackZone zone(thread()); | 
|---|
| 325 |  | 
|---|
| 326 | // Initialize state. | 
|---|
| 327 | preorder_.Clear(); | 
|---|
| 328 | postorder_.Clear(); | 
|---|
| 329 | reverse_postorder_.Clear(); | 
|---|
| 330 | parent_.Clear(); | 
|---|
| 331 |  | 
|---|
| 332 | GrowableArray<BlockTraversalState> block_stack; | 
|---|
| 333 | graph_entry_->DiscoverBlock(NULL, &preorder_, &parent_); | 
|---|
| 334 | block_stack.Add(BlockTraversalState(graph_entry_)); | 
|---|
| 335 | while (!block_stack.is_empty()) { | 
|---|
| 336 | BlockTraversalState& state = block_stack.Last(); | 
|---|
| 337 | BlockEntryInstr* block = state.block(); | 
|---|
| 338 | if (state.HasNextSuccessor()) { | 
|---|
| 339 | // Process successors one-by-one. | 
|---|
| 340 | BlockEntryInstr* succ = state.NextSuccessor(); | 
|---|
| 341 | if (succ->DiscoverBlock(block, &preorder_, &parent_)) { | 
|---|
| 342 | block_stack.Add(BlockTraversalState(succ)); | 
|---|
| 343 | } | 
|---|
| 344 | } else { | 
|---|
| 345 | // All successors have been processed, pop the current block entry node | 
|---|
| 346 | // and add it to the postorder list. | 
|---|
| 347 | block_stack.RemoveLast(); | 
|---|
| 348 | block->set_postorder_number(postorder_.length()); | 
|---|
| 349 | postorder_.Add(block); | 
|---|
| 350 | } | 
|---|
| 351 | } | 
|---|
| 352 |  | 
|---|
| 353 | ASSERT(postorder_.length() == preorder_.length()); | 
|---|
| 354 |  | 
|---|
| 355 | // Create an array of blocks in reverse postorder. | 
|---|
| 356 | intptr_t block_count = postorder_.length(); | 
|---|
| 357 | for (intptr_t i = 0; i < block_count; ++i) { | 
|---|
| 358 | reverse_postorder_.Add(postorder_[block_count - i - 1]); | 
|---|
| 359 | } | 
|---|
| 360 |  | 
|---|
| 361 | ResetLoopHierarchy(); | 
|---|
| 362 | } | 
|---|
| 363 |  | 
|---|
| 364 | void FlowGraph::MergeBlocks() { | 
|---|
| 365 | bool changed = false; | 
|---|
| 366 | BitVector* merged = new (zone()) BitVector(zone(), postorder().length()); | 
|---|
| 367 | for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); | 
|---|
| 368 | block_it.Advance()) { | 
|---|
| 369 | BlockEntryInstr* block = block_it.Current(); | 
|---|
| 370 | if (block->IsGraphEntry()) continue; | 
|---|
| 371 | if (merged->Contains(block->postorder_number())) continue; | 
|---|
| 372 |  | 
|---|
| 373 | Instruction* last = block->last_instruction(); | 
|---|
| 374 | BlockEntryInstr* last_merged_block = nullptr; | 
|---|
| 375 | while (auto goto_instr = last->AsGoto()) { | 
|---|
| 376 | JoinEntryInstr* successor = goto_instr->successor(); | 
|---|
| 377 | if (successor->PredecessorCount() > 1) break; | 
|---|
| 378 | if (block->try_index() != successor->try_index()) break; | 
|---|
| 379 |  | 
|---|
| 380 | // Replace all phis with their arguments prior to removing successor. | 
|---|
| 381 | for (PhiIterator it(successor); !it.Done(); it.Advance()) { | 
|---|
| 382 | PhiInstr* phi = it.Current(); | 
|---|
| 383 | Value* input = phi->InputAt(0); | 
|---|
| 384 | phi->ReplaceUsesWith(input->definition()); | 
|---|
| 385 | input->RemoveFromUseList(); | 
|---|
| 386 | } | 
|---|
| 387 |  | 
|---|
| 388 | // Remove environment uses and unlink goto and block entry. | 
|---|
| 389 | successor->UnuseAllInputs(); | 
|---|
| 390 | last->previous()->LinkTo(successor->next()); | 
|---|
| 391 | last->UnuseAllInputs(); | 
|---|
| 392 |  | 
|---|
| 393 | last = successor->last_instruction(); | 
|---|
| 394 | merged->Add(successor->postorder_number()); | 
|---|
| 395 | last_merged_block = successor; | 
|---|
| 396 | changed = true; | 
|---|
| 397 | if (FLAG_trace_optimization) { | 
|---|
| 398 | THR_Print( "Merged blocks B%"Pd " and B%"Pd "\n", block->block_id(), | 
|---|
| 399 | successor->block_id()); | 
|---|
| 400 | } | 
|---|
| 401 | } | 
|---|
| 402 | // The new block inherits the block id of the last successor to maintain | 
|---|
| 403 | // the order of phi inputs at its successors consistent with block ids. | 
|---|
| 404 | if (last_merged_block != nullptr) { | 
|---|
| 405 | block->set_block_id(last_merged_block->block_id()); | 
|---|
| 406 | } | 
|---|
| 407 | } | 
|---|
| 408 | // Recompute block order after changes were made. | 
|---|
| 409 | if (changed) DiscoverBlocks(); | 
|---|
| 410 | } | 
|---|
| 411 |  | 
|---|
| 412 | void FlowGraph::ComputeIsReceiverRecursive( | 
|---|
| 413 | PhiInstr* phi, | 
|---|
| 414 | GrowableArray<PhiInstr*>* unmark) const { | 
|---|
| 415 | if (phi->is_receiver() != PhiInstr::kUnknownReceiver) return; | 
|---|
| 416 | phi->set_is_receiver(PhiInstr::kReceiver); | 
|---|
| 417 | for (intptr_t i = 0; i < phi->InputCount(); ++i) { | 
|---|
| 418 | Definition* def = phi->InputAt(i)->definition(); | 
|---|
| 419 | if (def->IsParameter() && (def->AsParameter()->index() == 0)) continue; | 
|---|
| 420 | if (!def->IsPhi()) { | 
|---|
| 421 | phi->set_is_receiver(PhiInstr::kNotReceiver); | 
|---|
| 422 | break; | 
|---|
| 423 | } | 
|---|
| 424 | ComputeIsReceiverRecursive(def->AsPhi(), unmark); | 
|---|
| 425 | if (def->AsPhi()->is_receiver() == PhiInstr::kNotReceiver) { | 
|---|
| 426 | phi->set_is_receiver(PhiInstr::kNotReceiver); | 
|---|
| 427 | break; | 
|---|
| 428 | } | 
|---|
| 429 | } | 
|---|
| 430 |  | 
|---|
| 431 | if (phi->is_receiver() == PhiInstr::kNotReceiver) { | 
|---|
| 432 | unmark->Add(phi); | 
|---|
| 433 | } | 
|---|
| 434 | } | 
|---|
| 435 |  | 
|---|
| 436 | void FlowGraph::ComputeIsReceiver(PhiInstr* phi) const { | 
|---|
| 437 | GrowableArray<PhiInstr*> unmark; | 
|---|
| 438 | ComputeIsReceiverRecursive(phi, &unmark); | 
|---|
| 439 |  | 
|---|
| 440 | // Now drain unmark. | 
|---|
| 441 | while (!unmark.is_empty()) { | 
|---|
| 442 | PhiInstr* phi = unmark.RemoveLast(); | 
|---|
| 443 | for (Value::Iterator it(phi->input_use_list()); !it.Done(); it.Advance()) { | 
|---|
| 444 | PhiInstr* use = it.Current()->instruction()->AsPhi(); | 
|---|
| 445 | if ((use != NULL) && (use->is_receiver() == PhiInstr::kReceiver)) { | 
|---|
| 446 | use->set_is_receiver(PhiInstr::kNotReceiver); | 
|---|
| 447 | unmark.Add(use); | 
|---|
| 448 | } | 
|---|
| 449 | } | 
|---|
| 450 | } | 
|---|
| 451 | } | 
|---|
| 452 |  | 
|---|
| 453 | bool FlowGraph::IsReceiver(Definition* def) const { | 
|---|
| 454 | def = def->OriginalDefinition();  // Could be redefined. | 
|---|
| 455 | if (def->IsParameter()) return (def->AsParameter()->index() == 0); | 
|---|
| 456 | if (!def->IsPhi() || graph_entry()->HasSingleEntryPoint()) { | 
|---|
| 457 | return false; | 
|---|
| 458 | } | 
|---|
| 459 | PhiInstr* phi = def->AsPhi(); | 
|---|
| 460 | if (phi->is_receiver() != PhiInstr::kUnknownReceiver) { | 
|---|
| 461 | return (phi->is_receiver() == PhiInstr::kReceiver); | 
|---|
| 462 | } | 
|---|
| 463 | // Not known if this phi is the receiver yet. Compute it now. | 
|---|
| 464 | ComputeIsReceiver(phi); | 
|---|
| 465 | return (phi->is_receiver() == PhiInstr::kReceiver); | 
|---|
| 466 | } | 
|---|
| 467 |  | 
|---|
| 468 | FlowGraph::ToCheck FlowGraph::CheckForInstanceCall( | 
|---|
| 469 | InstanceCallInstr* call, | 
|---|
| 470 | FunctionLayout::Kind kind) const { | 
|---|
| 471 | if (!FLAG_use_cha_deopt && !isolate()->all_classes_finalized()) { | 
|---|
| 472 | // Even if class or function are private, lazy class finalization | 
|---|
| 473 | // may later add overriding methods. | 
|---|
| 474 | return ToCheck::kCheckCid; | 
|---|
| 475 | } | 
|---|
| 476 |  | 
|---|
| 477 | // Best effort to get the receiver class. | 
|---|
| 478 | Value* receiver = call->Receiver(); | 
|---|
| 479 | Class& receiver_class = Class::Handle(zone()); | 
|---|
| 480 | bool receiver_maybe_null = false; | 
|---|
| 481 | if (function().IsDynamicFunction() && IsReceiver(receiver->definition())) { | 
|---|
| 482 | // Call receiver is callee receiver: calling "this.g()" in f(). | 
|---|
| 483 | receiver_class = function().Owner(); | 
|---|
| 484 | } else { | 
|---|
| 485 | // Get the receiver's compile type. Note that | 
|---|
| 486 | // we allow nullable types, which may result in just generating | 
|---|
| 487 | // a null check rather than the more elaborate class check | 
|---|
| 488 | CompileType* type = receiver->Type(); | 
|---|
| 489 | const AbstractType* atype = type->ToAbstractType(); | 
|---|
| 490 | if (atype->IsInstantiated() && atype->HasTypeClass() && | 
|---|
| 491 | !atype->IsDynamicType()) { | 
|---|
| 492 | if (type->is_nullable()) { | 
|---|
| 493 | receiver_maybe_null = true; | 
|---|
| 494 | } | 
|---|
| 495 | receiver_class = atype->type_class(); | 
|---|
| 496 | if (receiver_class.is_implemented()) { | 
|---|
| 497 | receiver_class = Class::null(); | 
|---|
| 498 | } | 
|---|
| 499 | } | 
|---|
| 500 | } | 
|---|
| 501 |  | 
|---|
| 502 | // Useful receiver class information? | 
|---|
| 503 | if (receiver_class.IsNull()) { | 
|---|
| 504 | return ToCheck::kCheckCid; | 
|---|
| 505 | } else if (call->HasICData()) { | 
|---|
| 506 | // If the static class type does not match information found in ICData | 
|---|
| 507 | // (which may be "guessed"), then bail, since subsequent code generation | 
|---|
| 508 | // (AOT and JIT) for inlining uses the latter. | 
|---|
| 509 | // TODO(ajcbik): improve this by using the static class. | 
|---|
| 510 | const intptr_t cid = receiver_class.id(); | 
|---|
| 511 | const ICData* data = call->ic_data(); | 
|---|
| 512 | bool match = false; | 
|---|
| 513 | Class& cls = Class::Handle(zone()); | 
|---|
| 514 | Function& fun = Function::Handle(zone()); | 
|---|
| 515 | for (intptr_t i = 0, len = data->NumberOfChecks(); i < len; i++) { | 
|---|
| 516 | if (!data->IsUsedAt(i)) { | 
|---|
| 517 | continue;  // do not consider | 
|---|
| 518 | } | 
|---|
| 519 | fun = data->GetTargetAt(i); | 
|---|
| 520 | cls = fun.Owner(); | 
|---|
| 521 | if (data->GetReceiverClassIdAt(i) == cid || cls.id() == cid) { | 
|---|
| 522 | match = true; | 
|---|
| 523 | break; | 
|---|
| 524 | } | 
|---|
| 525 | } | 
|---|
| 526 | if (!match) { | 
|---|
| 527 | return ToCheck::kCheckCid; | 
|---|
| 528 | } | 
|---|
| 529 | } | 
|---|
| 530 |  | 
|---|
| 531 | const String& method_name = | 
|---|
| 532 | (kind == FunctionLayout::kMethodExtractor) | 
|---|
| 533 | ? String::Handle(zone(), Field::NameFromGetter(call->function_name())) | 
|---|
| 534 | : call->function_name(); | 
|---|
| 535 |  | 
|---|
| 536 | // If the receiver can have the null value, exclude any method | 
|---|
| 537 | // that is actually valid on a null receiver. | 
|---|
| 538 | if (receiver_maybe_null) { | 
|---|
| 539 | const Class& null_class = | 
|---|
| 540 | Class::Handle(zone(), isolate()->object_store()->null_class()); | 
|---|
| 541 | const Function& target = Function::Handle( | 
|---|
| 542 | zone(), | 
|---|
| 543 | Resolver::ResolveDynamicAnyArgs(zone(), null_class, method_name)); | 
|---|
| 544 | if (!target.IsNull()) { | 
|---|
| 545 | return ToCheck::kCheckCid; | 
|---|
| 546 | } | 
|---|
| 547 | } | 
|---|
| 548 |  | 
|---|
| 549 | // Use CHA to determine if the method is not overridden by any subclass | 
|---|
| 550 | // of the receiver class. Any methods that are valid when the receiver | 
|---|
| 551 | // has a null value are excluded above (to avoid throwing an exception | 
|---|
| 552 | // on something valid, like null.hashCode). | 
|---|
| 553 | intptr_t subclass_count = 0; | 
|---|
| 554 | CHA& cha = thread()->compiler_state().cha(); | 
|---|
| 555 | if (!cha.HasOverride(receiver_class, method_name, &subclass_count)) { | 
|---|
| 556 | if (FLAG_trace_cha) { | 
|---|
| 557 | THR_Print( | 
|---|
| 558 | "  **(CHA) Instance call needs no class check since there " | 
|---|
| 559 | "are no overrides of method '%s' on '%s'\n", | 
|---|
| 560 | method_name.ToCString(), receiver_class.ToCString()); | 
|---|
| 561 | } | 
|---|
| 562 | if (FLAG_use_cha_deopt) { | 
|---|
| 563 | cha.AddToGuardedClasses(receiver_class, subclass_count); | 
|---|
| 564 | } | 
|---|
| 565 | return receiver_maybe_null ? ToCheck::kCheckNull : ToCheck::kNoCheck; | 
|---|
| 566 | } | 
|---|
| 567 | return ToCheck::kCheckCid; | 
|---|
| 568 | } | 
|---|
| 569 |  | 
|---|
| 570 | Instruction* FlowGraph::CreateCheckClass(Definition* to_check, | 
|---|
| 571 | const Cids& cids, | 
|---|
| 572 | intptr_t deopt_id, | 
|---|
| 573 | TokenPosition token_pos) { | 
|---|
| 574 | if (cids.IsMonomorphic() && cids.MonomorphicReceiverCid() == kSmiCid) { | 
|---|
| 575 | return new (zone()) | 
|---|
| 576 | CheckSmiInstr(new (zone()) Value(to_check), deopt_id, token_pos); | 
|---|
| 577 | } | 
|---|
| 578 | return new (zone()) | 
|---|
| 579 | CheckClassInstr(new (zone()) Value(to_check), deopt_id, cids, token_pos); | 
|---|
| 580 | } | 
|---|
| 581 |  | 
|---|
| 582 | Definition* FlowGraph::CreateCheckBound(Definition* length, | 
|---|
| 583 | Definition* index, | 
|---|
| 584 | intptr_t deopt_id) { | 
|---|
| 585 | Value* val1 = new (zone()) Value(length); | 
|---|
| 586 | Value* val2 = new (zone()) Value(index); | 
|---|
| 587 | if (CompilerState::Current().is_aot()) { | 
|---|
| 588 | return new (zone()) GenericCheckBoundInstr(val1, val2, deopt_id); | 
|---|
| 589 | } | 
|---|
| 590 | return new (zone()) CheckArrayBoundInstr(val1, val2, deopt_id); | 
|---|
| 591 | } | 
|---|
| 592 |  | 
|---|
| 593 | void FlowGraph::AddExactnessGuard(InstanceCallInstr* call, | 
|---|
| 594 | intptr_t receiver_cid) { | 
|---|
| 595 | const Class& cls = Class::Handle( | 
|---|
| 596 | zone(), Isolate::Current()->class_table()->At(receiver_cid)); | 
|---|
| 597 |  | 
|---|
| 598 | Definition* load_type_args = new (zone()) LoadFieldInstr( | 
|---|
| 599 | call->Receiver()->CopyWithType(), | 
|---|
| 600 | Slot::GetTypeArgumentsSlotFor(thread(), cls), call->token_pos()); | 
|---|
| 601 | InsertBefore(call, load_type_args, call->env(), FlowGraph::kValue); | 
|---|
| 602 |  | 
|---|
| 603 | const AbstractType& type = | 
|---|
| 604 | AbstractType::Handle(zone(), call->ic_data()->receivers_static_type()); | 
|---|
| 605 | ASSERT(!type.IsNull()); | 
|---|
| 606 | const TypeArguments& args = TypeArguments::Handle(zone(), type.arguments()); | 
|---|
| 607 | Instruction* guard = new (zone()) CheckConditionInstr( | 
|---|
| 608 | new StrictCompareInstr(call->token_pos(), Token::kEQ_STRICT, | 
|---|
| 609 | new (zone()) Value(load_type_args), | 
|---|
| 610 | new (zone()) Value(GetConstant(args)), | 
|---|
| 611 | /*needs_number_check=*/false, call->deopt_id()), | 
|---|
| 612 | call->deopt_id()); | 
|---|
| 613 | InsertBefore(call, guard, call->env(), FlowGraph::kEffect); | 
|---|
| 614 | } | 
|---|
| 615 |  | 
|---|
| 616 | // Verify that a redefinition dominates all uses of the redefined value. | 
|---|
| 617 | bool FlowGraph::VerifyRedefinitions() { | 
|---|
| 618 | for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); | 
|---|
| 619 | block_it.Advance()) { | 
|---|
| 620 | for (ForwardInstructionIterator instr_it(block_it.Current()); | 
|---|
| 621 | !instr_it.Done(); instr_it.Advance()) { | 
|---|
| 622 | RedefinitionInstr* redefinition = instr_it.Current()->AsRedefinition(); | 
|---|
| 623 | if (redefinition != NULL) { | 
|---|
| 624 | Definition* original = redefinition->value()->definition(); | 
|---|
| 625 | for (Value::Iterator it(original->input_use_list()); !it.Done(); | 
|---|
| 626 | it.Advance()) { | 
|---|
| 627 | Value* original_use = it.Current(); | 
|---|
| 628 | if (original_use->instruction() == redefinition) { | 
|---|
| 629 | continue; | 
|---|
| 630 | } | 
|---|
| 631 | if (original_use->instruction()->IsDominatedBy(redefinition)) { | 
|---|
| 632 | FlowGraphPrinter::PrintGraph( "VerifyRedefinitions", this); | 
|---|
| 633 | THR_Print( "%s\n", redefinition->ToCString()); | 
|---|
| 634 | THR_Print( "use=%s\n", original_use->instruction()->ToCString()); | 
|---|
| 635 | return false; | 
|---|
| 636 | } | 
|---|
| 637 | } | 
|---|
| 638 | } | 
|---|
| 639 | } | 
|---|
| 640 | } | 
|---|
| 641 | return true; | 
|---|
| 642 | } | 
|---|
| 643 |  | 
|---|
| 644 | LivenessAnalysis::LivenessAnalysis( | 
|---|
| 645 | intptr_t variable_count, | 
|---|
| 646 | const GrowableArray<BlockEntryInstr*>& postorder) | 
|---|
| 647 | : zone_(Thread::Current()->zone()), | 
|---|
| 648 | variable_count_(variable_count), | 
|---|
| 649 | postorder_(postorder), | 
|---|
| 650 | live_out_(postorder.length()), | 
|---|
| 651 | kill_(postorder.length()), | 
|---|
| 652 | live_in_(postorder.length()) {} | 
|---|
| 653 |  | 
|---|
| 654 | bool LivenessAnalysis::UpdateLiveOut(const BlockEntryInstr& block) { | 
|---|
| 655 | BitVector* live_out = live_out_[block.postorder_number()]; | 
|---|
| 656 | bool changed = false; | 
|---|
| 657 | Instruction* last = block.last_instruction(); | 
|---|
| 658 | ASSERT(last != NULL); | 
|---|
| 659 | for (intptr_t i = 0; i < last->SuccessorCount(); i++) { | 
|---|
| 660 | BlockEntryInstr* succ = last->SuccessorAt(i); | 
|---|
| 661 | ASSERT(succ != NULL); | 
|---|
| 662 | if (live_out->AddAll(live_in_[succ->postorder_number()])) { | 
|---|
| 663 | changed = true; | 
|---|
| 664 | } | 
|---|
| 665 | } | 
|---|
| 666 | return changed; | 
|---|
| 667 | } | 
|---|
| 668 |  | 
|---|
| 669 | bool LivenessAnalysis::UpdateLiveIn(const BlockEntryInstr& block) { | 
|---|
| 670 | BitVector* live_out = live_out_[block.postorder_number()]; | 
|---|
| 671 | BitVector* kill = kill_[block.postorder_number()]; | 
|---|
| 672 | BitVector* live_in = live_in_[block.postorder_number()]; | 
|---|
| 673 | return live_in->KillAndAdd(kill, live_out); | 
|---|
| 674 | } | 
|---|
| 675 |  | 
|---|
| 676 | void LivenessAnalysis::ComputeLiveInAndLiveOutSets() { | 
|---|
| 677 | const intptr_t block_count = postorder_.length(); | 
|---|
| 678 | bool changed; | 
|---|
| 679 | do { | 
|---|
| 680 | changed = false; | 
|---|
| 681 |  | 
|---|
| 682 | for (intptr_t i = 0; i < block_count; i++) { | 
|---|
| 683 | const BlockEntryInstr& block = *postorder_[i]; | 
|---|
| 684 |  | 
|---|
| 685 | // Live-in set depends only on kill set which does not | 
|---|
| 686 | // change in this loop and live-out set.  If live-out | 
|---|
| 687 | // set does not change there is no need to recompute | 
|---|
| 688 | // live-in set. | 
|---|
| 689 | if (UpdateLiveOut(block) && UpdateLiveIn(block)) { | 
|---|
| 690 | changed = true; | 
|---|
| 691 | } | 
|---|
| 692 | } | 
|---|
| 693 | } while (changed); | 
|---|
| 694 | } | 
|---|
| 695 |  | 
|---|
| 696 | void LivenessAnalysis::Analyze() { | 
|---|
| 697 | const intptr_t block_count = postorder_.length(); | 
|---|
| 698 | for (intptr_t i = 0; i < block_count; i++) { | 
|---|
| 699 | live_out_.Add(new (zone()) BitVector(zone(), variable_count_)); | 
|---|
| 700 | kill_.Add(new (zone()) BitVector(zone(), variable_count_)); | 
|---|
| 701 | live_in_.Add(new (zone()) BitVector(zone(), variable_count_)); | 
|---|
| 702 | } | 
|---|
| 703 |  | 
|---|
| 704 | ComputeInitialSets(); | 
|---|
| 705 | ComputeLiveInAndLiveOutSets(); | 
|---|
| 706 | } | 
|---|
| 707 |  | 
|---|
| 708 | static void PrintBitVector(const char* tag, BitVector* v) { | 
|---|
| 709 | THR_Print( "%s:", tag); | 
|---|
| 710 | for (BitVector::Iterator it(v); !it.Done(); it.Advance()) { | 
|---|
| 711 | THR_Print( " %"Pd "", it.Current()); | 
|---|
| 712 | } | 
|---|
| 713 | THR_Print( "\n"); | 
|---|
| 714 | } | 
|---|
| 715 |  | 
|---|
| 716 | void LivenessAnalysis::Dump() { | 
|---|
| 717 | const intptr_t block_count = postorder_.length(); | 
|---|
| 718 | for (intptr_t i = 0; i < block_count; i++) { | 
|---|
| 719 | BlockEntryInstr* block = postorder_[i]; | 
|---|
| 720 | THR_Print( "block @%"Pd " -> ", block->block_id()); | 
|---|
| 721 |  | 
|---|
| 722 | Instruction* last = block->last_instruction(); | 
|---|
| 723 | for (intptr_t j = 0; j < last->SuccessorCount(); j++) { | 
|---|
| 724 | BlockEntryInstr* succ = last->SuccessorAt(j); | 
|---|
| 725 | THR_Print( " @%"Pd "", succ->block_id()); | 
|---|
| 726 | } | 
|---|
| 727 | THR_Print( "\n"); | 
|---|
| 728 |  | 
|---|
| 729 | PrintBitVector( "  live out", live_out_[i]); | 
|---|
| 730 | PrintBitVector( "  kill", kill_[i]); | 
|---|
| 731 | PrintBitVector( "  live in", live_in_[i]); | 
|---|
| 732 | } | 
|---|
| 733 | } | 
|---|
| 734 |  | 
|---|
| 735 | // Computes liveness information for local variables. | 
|---|
| 736 | class VariableLivenessAnalysis : public LivenessAnalysis { | 
|---|
| 737 | public: | 
|---|
| 738 | explicit VariableLivenessAnalysis(FlowGraph* flow_graph) | 
|---|
| 739 | : LivenessAnalysis(flow_graph->variable_count(), flow_graph->postorder()), | 
|---|
| 740 | flow_graph_(flow_graph), | 
|---|
| 741 | assigned_vars_() {} | 
|---|
| 742 |  | 
|---|
| 743 | // For every block (in preorder) compute and return set of variables that | 
|---|
| 744 | // have new assigned values flowing out of that block. | 
|---|
| 745 | const GrowableArray<BitVector*>& ComputeAssignedVars() { | 
|---|
| 746 | // We can't directly return kill_ because it uses postorder numbering while | 
|---|
| 747 | // SSA construction uses preorder numbering internally. | 
|---|
| 748 | // We have to permute postorder into preorder. | 
|---|
| 749 | assigned_vars_.Clear(); | 
|---|
| 750 |  | 
|---|
| 751 | const intptr_t block_count = flow_graph_->preorder().length(); | 
|---|
| 752 | for (intptr_t i = 0; i < block_count; i++) { | 
|---|
| 753 | BlockEntryInstr* block = flow_graph_->preorder()[i]; | 
|---|
| 754 | // All locals are assigned inside a try{} block. | 
|---|
| 755 | // This is a safe approximation and workaround to force insertion of | 
|---|
| 756 | // phis for stores that appear non-live because of the way catch-blocks | 
|---|
| 757 | // are connected to the graph: They normally are dominated by the | 
|---|
| 758 | // try-entry, but are direct successors of the graph entry in our flow | 
|---|
| 759 | // graph. | 
|---|
| 760 | // TODO(fschneider): Improve this approximation by better modeling the | 
|---|
| 761 | // actual data flow to reduce the number of redundant phis. | 
|---|
| 762 | BitVector* kill = GetKillSet(block); | 
|---|
| 763 | if (block->InsideTryBlock()) { | 
|---|
| 764 | kill->SetAll(); | 
|---|
| 765 | } else { | 
|---|
| 766 | kill->Intersect(GetLiveOutSet(block)); | 
|---|
| 767 | } | 
|---|
| 768 | assigned_vars_.Add(kill); | 
|---|
| 769 | } | 
|---|
| 770 |  | 
|---|
| 771 | return assigned_vars_; | 
|---|
| 772 | } | 
|---|
| 773 |  | 
|---|
| 774 | // Returns true if the value set by the given store reaches any load from the | 
|---|
| 775 | // same local variable. | 
|---|
| 776 | bool IsStoreAlive(BlockEntryInstr* block, StoreLocalInstr* store) { | 
|---|
| 777 | if (store->local().Equals(*flow_graph_->CurrentContextVar())) { | 
|---|
| 778 | return true; | 
|---|
| 779 | } | 
|---|
| 780 |  | 
|---|
| 781 | if (store->is_dead()) { | 
|---|
| 782 | return false; | 
|---|
| 783 | } | 
|---|
| 784 | if (store->is_last()) { | 
|---|
| 785 | const intptr_t index = flow_graph_->EnvIndex(&store->local()); | 
|---|
| 786 | return GetLiveOutSet(block)->Contains(index); | 
|---|
| 787 | } | 
|---|
| 788 |  | 
|---|
| 789 | return true; | 
|---|
| 790 | } | 
|---|
| 791 |  | 
|---|
| 792 | // Returns true if the given load is the last for the local and the value | 
|---|
| 793 | // of the local will not flow into another one. | 
|---|
| 794 | bool IsLastLoad(BlockEntryInstr* block, LoadLocalInstr* load) { | 
|---|
| 795 | if (load->local().Equals(*flow_graph_->CurrentContextVar())) { | 
|---|
| 796 | return false; | 
|---|
| 797 | } | 
|---|
| 798 | const intptr_t index = flow_graph_->EnvIndex(&load->local()); | 
|---|
| 799 | return load->is_last() && !GetLiveOutSet(block)->Contains(index); | 
|---|
| 800 | } | 
|---|
| 801 |  | 
|---|
| 802 | private: | 
|---|
| 803 | virtual void ComputeInitialSets(); | 
|---|
| 804 |  | 
|---|
| 805 | const FlowGraph* flow_graph_; | 
|---|
| 806 | GrowableArray<BitVector*> assigned_vars_; | 
|---|
| 807 | }; | 
|---|
| 808 |  | 
|---|
| 809 | void VariableLivenessAnalysis::ComputeInitialSets() { | 
|---|
| 810 | const intptr_t block_count = postorder_.length(); | 
|---|
| 811 |  | 
|---|
| 812 | BitVector* last_loads = new (zone()) BitVector(zone(), variable_count_); | 
|---|
| 813 | for (intptr_t i = 0; i < block_count; i++) { | 
|---|
| 814 | BlockEntryInstr* block = postorder_[i]; | 
|---|
| 815 |  | 
|---|
| 816 | BitVector* kill = kill_[i]; | 
|---|
| 817 | BitVector* live_in = live_in_[i]; | 
|---|
| 818 | last_loads->Clear(); | 
|---|
| 819 |  | 
|---|
| 820 | // There is an implicit use (load-local) of every local variable at each | 
|---|
| 821 | // call inside a try{} block and every call has an implicit control-flow | 
|---|
| 822 | // to the catch entry. As an approximation we mark all locals as live | 
|---|
| 823 | // inside try{}. | 
|---|
| 824 | // TODO(fschneider): Improve this approximation, since not all local | 
|---|
| 825 | // variable stores actually reach a call. | 
|---|
| 826 | if (block->InsideTryBlock()) { | 
|---|
| 827 | live_in->SetAll(); | 
|---|
| 828 | continue; | 
|---|
| 829 | } | 
|---|
| 830 |  | 
|---|
| 831 | // Iterate backwards starting at the last instruction. | 
|---|
| 832 | for (BackwardInstructionIterator it(block); !it.Done(); it.Advance()) { | 
|---|
| 833 | Instruction* current = it.Current(); | 
|---|
| 834 |  | 
|---|
| 835 | LoadLocalInstr* load = current->AsLoadLocal(); | 
|---|
| 836 | if (load != NULL) { | 
|---|
| 837 | const intptr_t index = flow_graph_->EnvIndex(&load->local()); | 
|---|
| 838 | if (index >= live_in->length()) continue;  // Skip tmp_locals. | 
|---|
| 839 | live_in->Add(index); | 
|---|
| 840 | if (!last_loads->Contains(index)) { | 
|---|
| 841 | last_loads->Add(index); | 
|---|
| 842 | load->mark_last(); | 
|---|
| 843 | } | 
|---|
| 844 | continue; | 
|---|
| 845 | } | 
|---|
| 846 |  | 
|---|
| 847 | StoreLocalInstr* store = current->AsStoreLocal(); | 
|---|
| 848 | if (store != NULL) { | 
|---|
| 849 | const intptr_t index = flow_graph_->EnvIndex(&store->local()); | 
|---|
| 850 | if (index >= live_in->length()) continue;  // Skip tmp_locals. | 
|---|
| 851 | if (kill->Contains(index)) { | 
|---|
| 852 | if (!live_in->Contains(index)) { | 
|---|
| 853 | store->mark_dead(); | 
|---|
| 854 | } | 
|---|
| 855 | } else { | 
|---|
| 856 | if (!live_in->Contains(index)) { | 
|---|
| 857 | store->mark_last(); | 
|---|
| 858 | } | 
|---|
| 859 | kill->Add(index); | 
|---|
| 860 | } | 
|---|
| 861 | live_in->Remove(index); | 
|---|
| 862 | continue; | 
|---|
| 863 | } | 
|---|
| 864 | } | 
|---|
| 865 |  | 
|---|
| 866 | // For blocks with parameter or special parameter instructions we add them | 
|---|
| 867 | // to the kill set. | 
|---|
| 868 | const bool is_function_entry = block->IsFunctionEntry(); | 
|---|
| 869 | const bool is_osr_entry = block->IsOsrEntry(); | 
|---|
| 870 | const bool is_catch_block_entry = block->IsCatchBlockEntry(); | 
|---|
| 871 | if (is_function_entry || is_osr_entry || is_catch_block_entry) { | 
|---|
| 872 | const intptr_t parameter_count = | 
|---|
| 873 | (is_osr_entry || is_catch_block_entry) | 
|---|
| 874 | ? flow_graph_->variable_count() | 
|---|
| 875 | : flow_graph_->num_direct_parameters(); | 
|---|
| 876 | for (intptr_t i = 0; i < parameter_count; ++i) { | 
|---|
| 877 | live_in->Remove(i); | 
|---|
| 878 | kill->Add(i); | 
|---|
| 879 | } | 
|---|
| 880 | } | 
|---|
| 881 | if (is_function_entry) { | 
|---|
| 882 | if (flow_graph_->parsed_function().has_arg_desc_var()) { | 
|---|
| 883 | const auto index = flow_graph_->ArgumentDescriptorEnvIndex(); | 
|---|
| 884 | live_in->Remove(index); | 
|---|
| 885 | kill->Add(index); | 
|---|
| 886 | } | 
|---|
| 887 | } | 
|---|
| 888 | } | 
|---|
| 889 | } | 
|---|
| 890 |  | 
|---|
| 891 | void FlowGraph::ComputeSSA( | 
|---|
| 892 | intptr_t next_virtual_register_number, | 
|---|
| 893 | ZoneGrowableArray<Definition*>* inlining_parameters) { | 
|---|
| 894 | ASSERT((next_virtual_register_number == 0) || (inlining_parameters != NULL)); | 
|---|
| 895 | current_ssa_temp_index_ = next_virtual_register_number; | 
|---|
| 896 | GrowableArray<BitVector*> dominance_frontier; | 
|---|
| 897 | GrowableArray<intptr_t> idom; | 
|---|
| 898 |  | 
|---|
| 899 | ComputeDominators(&dominance_frontier); | 
|---|
| 900 |  | 
|---|
| 901 | VariableLivenessAnalysis variable_liveness(this); | 
|---|
| 902 | variable_liveness.Analyze(); | 
|---|
| 903 |  | 
|---|
| 904 | GrowableArray<PhiInstr*> live_phis; | 
|---|
| 905 |  | 
|---|
| 906 | InsertPhis(preorder_, variable_liveness.ComputeAssignedVars(), | 
|---|
| 907 | dominance_frontier, &live_phis); | 
|---|
| 908 |  | 
|---|
| 909 | // Rename uses to reference inserted phis where appropriate. | 
|---|
| 910 | // Collect phis that reach a non-environment use. | 
|---|
| 911 | Rename(&live_phis, &variable_liveness, inlining_parameters); | 
|---|
| 912 |  | 
|---|
| 913 | // Propagate alive mark transitively from alive phis and then remove | 
|---|
| 914 | // non-live ones. | 
|---|
| 915 | RemoveDeadPhis(&live_phis); | 
|---|
| 916 | } | 
|---|
| 917 |  | 
|---|
| 918 | // Compute immediate dominators and the dominance frontier for each basic | 
|---|
| 919 | // block.  As a side effect of the algorithm, sets the immediate dominator | 
|---|
| 920 | // of each basic block. | 
|---|
| 921 | // | 
|---|
| 922 | // dominance_frontier: an output parameter encoding the dominance frontier. | 
|---|
| 923 | //     The array maps the preorder block number of a block to the set of | 
|---|
| 924 | //     (preorder block numbers of) blocks in the dominance frontier. | 
|---|
| 925 | void FlowGraph::ComputeDominators( | 
|---|
| 926 | GrowableArray<BitVector*>* dominance_frontier) { | 
|---|
| 927 | // Use the SEMI-NCA algorithm to compute dominators.  This is a two-pass | 
|---|
| 928 | // version of the Lengauer-Tarjan algorithm (LT is normally three passes) | 
|---|
| 929 | // that eliminates a pass by using nearest-common ancestor (NCA) to | 
|---|
| 930 | // compute immediate dominators from semidominators.  It also removes a | 
|---|
| 931 | // level of indirection in the link-eval forest data structure. | 
|---|
| 932 | // | 
|---|
| 933 | // The algorithm is described in Georgiadis, Tarjan, and Werneck's | 
|---|
| 934 | // "Finding Dominators in Practice". | 
|---|
| 935 | // See http://www.cs.princeton.edu/~rwerneck/dominators/ . | 
|---|
| 936 |  | 
|---|
| 937 | // All arrays are maps between preorder basic-block numbers. | 
|---|
| 938 | intptr_t size = parent_.length(); | 
|---|
| 939 | GrowableArray<intptr_t> idom(size);   // Immediate dominator. | 
|---|
| 940 | GrowableArray<intptr_t> semi(size);   // Semidominator. | 
|---|
| 941 | GrowableArray<intptr_t> label(size);  // Label for link-eval forest. | 
|---|
| 942 |  | 
|---|
| 943 | // 1. First pass: compute semidominators as in Lengauer-Tarjan. | 
|---|
| 944 | // Semidominators are computed from a depth-first spanning tree and are an | 
|---|
| 945 | // approximation of immediate dominators. | 
|---|
| 946 |  | 
|---|
| 947 | // Use a link-eval data structure with path compression.  Implement path | 
|---|
| 948 | // compression in place by mutating the parent array.  Each block has a | 
|---|
| 949 | // label, which is the minimum block number on the compressed path. | 
|---|
| 950 |  | 
|---|
| 951 | // Initialize idom, semi, and label used by SEMI-NCA.  Initialize the | 
|---|
| 952 | // dominance frontier output array. | 
|---|
| 953 | for (intptr_t i = 0; i < size; ++i) { | 
|---|
| 954 | idom.Add(parent_[i]); | 
|---|
| 955 | semi.Add(i); | 
|---|
| 956 | label.Add(i); | 
|---|
| 957 | dominance_frontier->Add(new (zone()) BitVector(zone(), size)); | 
|---|
| 958 | } | 
|---|
| 959 |  | 
|---|
| 960 | // Loop over the blocks in reverse preorder (not including the graph | 
|---|
| 961 | // entry).  Clear the dominated blocks in the graph entry in case | 
|---|
| 962 | // ComputeDominators is used to recompute them. | 
|---|
| 963 | preorder_[0]->ClearDominatedBlocks(); | 
|---|
| 964 | for (intptr_t block_index = size - 1; block_index >= 1; --block_index) { | 
|---|
| 965 | // Loop over the predecessors. | 
|---|
| 966 | BlockEntryInstr* block = preorder_[block_index]; | 
|---|
| 967 | // Clear the immediately dominated blocks in case ComputeDominators is | 
|---|
| 968 | // used to recompute them. | 
|---|
| 969 | block->ClearDominatedBlocks(); | 
|---|
| 970 | for (intptr_t i = 0, count = block->PredecessorCount(); i < count; ++i) { | 
|---|
| 971 | BlockEntryInstr* pred = block->PredecessorAt(i); | 
|---|
| 972 | ASSERT(pred != NULL); | 
|---|
| 973 |  | 
|---|
| 974 | // Look for the semidominator by ascending the semidominator path | 
|---|
| 975 | // starting from pred. | 
|---|
| 976 | intptr_t pred_index = pred->preorder_number(); | 
|---|
| 977 | intptr_t best = pred_index; | 
|---|
| 978 | if (pred_index > block_index) { | 
|---|
| 979 | CompressPath(block_index, pred_index, &parent_, &label); | 
|---|
| 980 | best = label[pred_index]; | 
|---|
| 981 | } | 
|---|
| 982 |  | 
|---|
| 983 | // Update the semidominator if we've found a better one. | 
|---|
| 984 | semi[block_index] = Utils::Minimum(semi[block_index], semi[best]); | 
|---|
| 985 | } | 
|---|
| 986 |  | 
|---|
| 987 | // Now use label for the semidominator. | 
|---|
| 988 | label[block_index] = semi[block_index]; | 
|---|
| 989 | } | 
|---|
| 990 |  | 
|---|
| 991 | // 2. Compute the immediate dominators as the nearest common ancestor of | 
|---|
| 992 | // spanning tree parent and semidominator, for all blocks except the entry. | 
|---|
| 993 | for (intptr_t block_index = 1; block_index < size; ++block_index) { | 
|---|
| 994 | intptr_t dom_index = idom[block_index]; | 
|---|
| 995 | while (dom_index > semi[block_index]) { | 
|---|
| 996 | dom_index = idom[dom_index]; | 
|---|
| 997 | } | 
|---|
| 998 | idom[block_index] = dom_index; | 
|---|
| 999 | preorder_[dom_index]->AddDominatedBlock(preorder_[block_index]); | 
|---|
| 1000 | } | 
|---|
| 1001 |  | 
|---|
| 1002 | // 3. Now compute the dominance frontier for all blocks.  This is | 
|---|
| 1003 | // algorithm in "A Simple, Fast Dominance Algorithm" (Figure 5), which is | 
|---|
| 1004 | // attributed to a paper by Ferrante et al.  There is no bookkeeping | 
|---|
| 1005 | // required to avoid adding a block twice to the same block's dominance | 
|---|
| 1006 | // frontier because we use a set to represent the dominance frontier. | 
|---|
| 1007 | for (intptr_t block_index = 0; block_index < size; ++block_index) { | 
|---|
| 1008 | BlockEntryInstr* block = preorder_[block_index]; | 
|---|
| 1009 | intptr_t count = block->PredecessorCount(); | 
|---|
| 1010 | if (count <= 1) continue; | 
|---|
| 1011 | for (intptr_t i = 0; i < count; ++i) { | 
|---|
| 1012 | BlockEntryInstr* runner = block->PredecessorAt(i); | 
|---|
| 1013 | while (runner != block->dominator()) { | 
|---|
| 1014 | (*dominance_frontier)[runner->preorder_number()]->Add(block_index); | 
|---|
| 1015 | runner = runner->dominator(); | 
|---|
| 1016 | } | 
|---|
| 1017 | } | 
|---|
| 1018 | } | 
|---|
| 1019 | } | 
|---|
| 1020 |  | 
|---|
| 1021 | void FlowGraph::CompressPath(intptr_t start_index, | 
|---|
| 1022 | intptr_t current_index, | 
|---|
| 1023 | GrowableArray<intptr_t>* parent, | 
|---|
| 1024 | GrowableArray<intptr_t>* label) { | 
|---|
| 1025 | intptr_t next_index = (*parent)[current_index]; | 
|---|
| 1026 | if (next_index > start_index) { | 
|---|
| 1027 | CompressPath(start_index, next_index, parent, label); | 
|---|
| 1028 | (*label)[current_index] = | 
|---|
| 1029 | Utils::Minimum((*label)[current_index], (*label)[next_index]); | 
|---|
| 1030 | (*parent)[current_index] = (*parent)[next_index]; | 
|---|
| 1031 | } | 
|---|
| 1032 | } | 
|---|
| 1033 |  | 
|---|
| 1034 | void FlowGraph::InsertPhis(const GrowableArray<BlockEntryInstr*>& preorder, | 
|---|
| 1035 | const GrowableArray<BitVector*>& assigned_vars, | 
|---|
| 1036 | const GrowableArray<BitVector*>& dom_frontier, | 
|---|
| 1037 | GrowableArray<PhiInstr*>* live_phis) { | 
|---|
| 1038 | const intptr_t block_count = preorder.length(); | 
|---|
| 1039 | // Map preorder block number to the highest variable index that has a phi | 
|---|
| 1040 | // in that block.  Use it to avoid inserting multiple phis for the same | 
|---|
| 1041 | // variable. | 
|---|
| 1042 | GrowableArray<intptr_t> has_already(block_count); | 
|---|
| 1043 | // Map preorder block number to the highest variable index for which the | 
|---|
| 1044 | // block went on the worklist.  Use it to avoid adding the same block to | 
|---|
| 1045 | // the worklist more than once for the same variable. | 
|---|
| 1046 | GrowableArray<intptr_t> work(block_count); | 
|---|
| 1047 |  | 
|---|
| 1048 | // Initialize has_already and work. | 
|---|
| 1049 | for (intptr_t block_index = 0; block_index < block_count; ++block_index) { | 
|---|
| 1050 | has_already.Add(-1); | 
|---|
| 1051 | work.Add(-1); | 
|---|
| 1052 | } | 
|---|
| 1053 |  | 
|---|
| 1054 | // Insert phis for each variable in turn. | 
|---|
| 1055 | GrowableArray<BlockEntryInstr*> worklist; | 
|---|
| 1056 | for (intptr_t var_index = 0; var_index < variable_count(); ++var_index) { | 
|---|
| 1057 | const bool always_live = | 
|---|
| 1058 | !FLAG_prune_dead_locals || (var_index == CurrentContextEnvIndex()); | 
|---|
| 1059 | // Add to the worklist each block containing an assignment. | 
|---|
| 1060 | for (intptr_t block_index = 0; block_index < block_count; ++block_index) { | 
|---|
| 1061 | if (assigned_vars[block_index]->Contains(var_index)) { | 
|---|
| 1062 | work[block_index] = var_index; | 
|---|
| 1063 | worklist.Add(preorder[block_index]); | 
|---|
| 1064 | } | 
|---|
| 1065 | } | 
|---|
| 1066 |  | 
|---|
| 1067 | while (!worklist.is_empty()) { | 
|---|
| 1068 | BlockEntryInstr* current = worklist.RemoveLast(); | 
|---|
| 1069 | // Ensure a phi for each block in the dominance frontier of current. | 
|---|
| 1070 | for (BitVector::Iterator it(dom_frontier[current->preorder_number()]); | 
|---|
| 1071 | !it.Done(); it.Advance()) { | 
|---|
| 1072 | int index = it.Current(); | 
|---|
| 1073 | if (has_already[index] < var_index) { | 
|---|
| 1074 | JoinEntryInstr* join = preorder[index]->AsJoinEntry(); | 
|---|
| 1075 | ASSERT(join != nullptr); | 
|---|
| 1076 | PhiInstr* phi = join->InsertPhi( | 
|---|
| 1077 | var_index, variable_count() + join->stack_depth()); | 
|---|
| 1078 | if (always_live) { | 
|---|
| 1079 | phi->mark_alive(); | 
|---|
| 1080 | live_phis->Add(phi); | 
|---|
| 1081 | } | 
|---|
| 1082 | has_already[index] = var_index; | 
|---|
| 1083 | if (work[index] < var_index) { | 
|---|
| 1084 | work[index] = var_index; | 
|---|
| 1085 | worklist.Add(join); | 
|---|
| 1086 | } | 
|---|
| 1087 | } | 
|---|
| 1088 | } | 
|---|
| 1089 | } | 
|---|
| 1090 | } | 
|---|
| 1091 | } | 
|---|
| 1092 |  | 
|---|
| 1093 | void FlowGraph::CreateCommonConstants() { | 
|---|
| 1094 | constant_null_ = GetConstant(Object::ZoneHandle()); | 
|---|
| 1095 | constant_dead_ = GetConstant(Symbols::OptimizedOut()); | 
|---|
| 1096 | } | 
|---|
| 1097 |  | 
|---|
| 1098 | void FlowGraph::AddSyntheticPhis(BlockEntryInstr* block) { | 
|---|
| 1099 | ASSERT(IsCompiledForOsr()); | 
|---|
| 1100 | if (auto join = block->AsJoinEntry()) { | 
|---|
| 1101 | const intptr_t local_phi_count = variable_count() + join->stack_depth(); | 
|---|
| 1102 | for (intptr_t i = variable_count(); i < local_phi_count; ++i) { | 
|---|
| 1103 | if (join->phis() == nullptr || (*join->phis())[i] == nullptr) { | 
|---|
| 1104 | join->InsertPhi(i, local_phi_count)->mark_alive(); | 
|---|
| 1105 | } | 
|---|
| 1106 | } | 
|---|
| 1107 | } | 
|---|
| 1108 | } | 
|---|
| 1109 |  | 
|---|
| 1110 | void FlowGraph::Rename(GrowableArray<PhiInstr*>* live_phis, | 
|---|
| 1111 | VariableLivenessAnalysis* variable_liveness, | 
|---|
| 1112 | ZoneGrowableArray<Definition*>* inlining_parameters) { | 
|---|
| 1113 | GraphEntryInstr* entry = graph_entry(); | 
|---|
| 1114 |  | 
|---|
| 1115 | // Add global constants to the initial definitions. | 
|---|
| 1116 | CreateCommonConstants(); | 
|---|
| 1117 |  | 
|---|
| 1118 | // Initial renaming environment. | 
|---|
| 1119 | GrowableArray<Definition*> env(variable_count()); | 
|---|
| 1120 | env.FillWith(constant_dead(), 0, num_direct_parameters()); | 
|---|
| 1121 | env.FillWith(constant_null(), num_direct_parameters(), num_stack_locals()); | 
|---|
| 1122 |  | 
|---|
| 1123 | if (entry->catch_entries().length() > 0) { | 
|---|
| 1124 | // Functions with try-catch have a fixed area of stack slots reserved | 
|---|
| 1125 | // so that all local variables are stored at a known location when | 
|---|
| 1126 | // on entry to the catch. | 
|---|
| 1127 | entry->set_fixed_slot_count(num_stack_locals()); | 
|---|
| 1128 | } else { | 
|---|
| 1129 | ASSERT(entry->unchecked_entry() != nullptr ? entry->SuccessorCount() == 2 | 
|---|
| 1130 | : entry->SuccessorCount() == 1); | 
|---|
| 1131 | } | 
|---|
| 1132 |  | 
|---|
| 1133 | // For OSR on a non-empty stack, insert synthetic phis on every joining entry. | 
|---|
| 1134 | // These phis are synthetic since they are not driven by live variable | 
|---|
| 1135 | // analysis, but merely serve the purpose of merging stack slots from | 
|---|
| 1136 | // parameters and other predecessors at the block in which OSR occurred. | 
|---|
| 1137 | if (IsCompiledForOsr()) { | 
|---|
| 1138 | AddSyntheticPhis(entry->osr_entry()->last_instruction()->SuccessorAt(0)); | 
|---|
| 1139 | for (intptr_t i = 0, n = entry->dominated_blocks().length(); i < n; ++i) { | 
|---|
| 1140 | AddSyntheticPhis(entry->dominated_blocks()[i]); | 
|---|
| 1141 | } | 
|---|
| 1142 | } | 
|---|
| 1143 |  | 
|---|
| 1144 | RenameRecursive(entry, &env, live_phis, variable_liveness, | 
|---|
| 1145 | inlining_parameters); | 
|---|
| 1146 | } | 
|---|
| 1147 |  | 
|---|
| 1148 | void FlowGraph::PopulateEnvironmentFromFunctionEntry( | 
|---|
| 1149 | FunctionEntryInstr* function_entry, | 
|---|
| 1150 | GrowableArray<Definition*>* env, | 
|---|
| 1151 | GrowableArray<PhiInstr*>* live_phis, | 
|---|
| 1152 | VariableLivenessAnalysis* variable_liveness, | 
|---|
| 1153 | ZoneGrowableArray<Definition*>* inlining_parameters) { | 
|---|
| 1154 | ASSERT(!IsCompiledForOsr()); | 
|---|
| 1155 | const intptr_t direct_parameter_count = num_direct_parameters_; | 
|---|
| 1156 |  | 
|---|
| 1157 | // Check if inlining_parameters include a type argument vector parameter. | 
|---|
| 1158 | const intptr_t inlined_type_args_param = | 
|---|
| 1159 | ((inlining_parameters != NULL) && function().IsGeneric()) ? 1 : 0; | 
|---|
| 1160 |  | 
|---|
| 1161 | ASSERT(variable_count() == env->length()); | 
|---|
| 1162 | ASSERT(direct_parameter_count <= env->length()); | 
|---|
| 1163 | intptr_t param_offset = 0; | 
|---|
| 1164 | for (intptr_t i = 0; i < direct_parameter_count; i++) { | 
|---|
| 1165 | ASSERT(FLAG_precompiled_mode || !function().is_unboxed_parameter_at(i)); | 
|---|
| 1166 | ParameterInstr* param; | 
|---|
| 1167 |  | 
|---|
| 1168 | const intptr_t index = (function().IsFactory() ? (i - 1) : i); | 
|---|
| 1169 |  | 
|---|
| 1170 | if (index >= 0 && function().is_unboxed_integer_parameter_at(index)) { | 
|---|
| 1171 | constexpr intptr_t kCorrection = compiler::target::kIntSpillFactor - 1; | 
|---|
| 1172 | param = new (zone()) ParameterInstr(i, param_offset + kCorrection, | 
|---|
| 1173 | function_entry, kUnboxedInt64); | 
|---|
| 1174 | param_offset += compiler::target::kIntSpillFactor; | 
|---|
| 1175 | } else if (index >= 0 && function().is_unboxed_double_parameter_at(index)) { | 
|---|
| 1176 | constexpr intptr_t kCorrection = compiler::target::kDoubleSpillFactor - 1; | 
|---|
| 1177 | param = new (zone()) ParameterInstr(i, param_offset + kCorrection, | 
|---|
| 1178 | function_entry, kUnboxedDouble); | 
|---|
| 1179 | param_offset += compiler::target::kDoubleSpillFactor; | 
|---|
| 1180 | } else { | 
|---|
| 1181 | ASSERT(index < 0 || !function().is_unboxed_parameter_at(index)); | 
|---|
| 1182 | param = | 
|---|
| 1183 | new (zone()) ParameterInstr(i, param_offset, function_entry, kTagged); | 
|---|
| 1184 | param_offset++; | 
|---|
| 1185 | } | 
|---|
| 1186 | param->set_ssa_temp_index(alloc_ssa_temp_index()); | 
|---|
| 1187 | if (NeedsPairLocation(param->representation())) alloc_ssa_temp_index(); | 
|---|
| 1188 | AddToInitialDefinitions(function_entry, param); | 
|---|
| 1189 | (*env)[i] = param; | 
|---|
| 1190 | } | 
|---|
| 1191 |  | 
|---|
| 1192 | // Override the entries in the renaming environment which are special (i.e. | 
|---|
| 1193 | // inlining arguments, type parameter, args descriptor, context, ...) | 
|---|
| 1194 | { | 
|---|
| 1195 | // Replace parameter slots with inlining definitions coming in. | 
|---|
| 1196 | if (inlining_parameters != NULL) { | 
|---|
| 1197 | for (intptr_t i = 0; i < function().NumParameters(); ++i) { | 
|---|
| 1198 | Definition* defn = (*inlining_parameters)[inlined_type_args_param + i]; | 
|---|
| 1199 | AllocateSSAIndexes(defn); | 
|---|
| 1200 | AddToInitialDefinitions(function_entry, defn); | 
|---|
| 1201 |  | 
|---|
| 1202 | intptr_t index = EnvIndex(parsed_function_.RawParameterVariable(i)); | 
|---|
| 1203 | (*env)[index] = defn; | 
|---|
| 1204 | } | 
|---|
| 1205 | } | 
|---|
| 1206 |  | 
|---|
| 1207 | // Replace the type arguments slot with a special parameter. | 
|---|
| 1208 | const bool reify_generic_argument = function().IsGeneric(); | 
|---|
| 1209 | if (reify_generic_argument) { | 
|---|
| 1210 | ASSERT(parsed_function().function_type_arguments() != NULL); | 
|---|
| 1211 | Definition* defn; | 
|---|
| 1212 | if (inlining_parameters == NULL) { | 
|---|
| 1213 | // Note: If we are not inlining, then the prologue builder will | 
|---|
| 1214 | // take care of checking that we got the correct reified type | 
|---|
| 1215 | // arguments.  This includes checking the argument descriptor in order | 
|---|
| 1216 | // to even find out if the parameter was passed or not. | 
|---|
| 1217 | defn = constant_dead(); | 
|---|
| 1218 | } else { | 
|---|
| 1219 | defn = (*inlining_parameters)[0]; | 
|---|
| 1220 | } | 
|---|
| 1221 | AllocateSSAIndexes(defn); | 
|---|
| 1222 | AddToInitialDefinitions(function_entry, defn); | 
|---|
| 1223 | (*env)[RawTypeArgumentEnvIndex()] = defn; | 
|---|
| 1224 | } | 
|---|
| 1225 |  | 
|---|
| 1226 | // Replace the argument descriptor slot with a special parameter. | 
|---|
| 1227 | if (parsed_function().has_arg_desc_var()) { | 
|---|
| 1228 | Definition* defn = | 
|---|
| 1229 | new (Z) SpecialParameterInstr(SpecialParameterInstr::kArgDescriptor, | 
|---|
| 1230 | DeoptId::kNone, function_entry); | 
|---|
| 1231 | AllocateSSAIndexes(defn); | 
|---|
| 1232 | AddToInitialDefinitions(function_entry, defn); | 
|---|
| 1233 | (*env)[ArgumentDescriptorEnvIndex()] = defn; | 
|---|
| 1234 | } | 
|---|
| 1235 | } | 
|---|
| 1236 | } | 
|---|
| 1237 |  | 
|---|
| 1238 | void FlowGraph::PopulateEnvironmentFromOsrEntry( | 
|---|
| 1239 | OsrEntryInstr* osr_entry, | 
|---|
| 1240 | GrowableArray<Definition*>* env) { | 
|---|
| 1241 | ASSERT(IsCompiledForOsr()); | 
|---|
| 1242 | // During OSR, all variables and possibly a non-empty stack are | 
|---|
| 1243 | // passed as parameters. The latter mimics the incoming expression | 
|---|
| 1244 | // stack that was set up prior to triggering OSR. | 
|---|
| 1245 | const intptr_t parameter_count = osr_variable_count(); | 
|---|
| 1246 | ASSERT(parameter_count == env->length()); | 
|---|
| 1247 | for (intptr_t i = 0; i < parameter_count; i++) { | 
|---|
| 1248 | ParameterInstr* param = | 
|---|
| 1249 | new (zone()) ParameterInstr(i, i, osr_entry, kTagged); | 
|---|
| 1250 | param->set_ssa_temp_index(alloc_ssa_temp_index()); | 
|---|
| 1251 | AddToInitialDefinitions(osr_entry, param); | 
|---|
| 1252 | (*env)[i] = param; | 
|---|
| 1253 | } | 
|---|
| 1254 | } | 
|---|
| 1255 |  | 
|---|
| 1256 | void FlowGraph::PopulateEnvironmentFromCatchEntry( | 
|---|
| 1257 | CatchBlockEntryInstr* catch_entry, | 
|---|
| 1258 | GrowableArray<Definition*>* env) { | 
|---|
| 1259 | const intptr_t raw_exception_var_envindex = | 
|---|
| 1260 | catch_entry->raw_exception_var() != nullptr | 
|---|
| 1261 | ? EnvIndex(catch_entry->raw_exception_var()) | 
|---|
| 1262 | : -1; | 
|---|
| 1263 | const intptr_t raw_stacktrace_var_envindex = | 
|---|
| 1264 | catch_entry->raw_stacktrace_var() != nullptr | 
|---|
| 1265 | ? EnvIndex(catch_entry->raw_stacktrace_var()) | 
|---|
| 1266 | : -1; | 
|---|
| 1267 |  | 
|---|
| 1268 | // Add real definitions for all locals and parameters. | 
|---|
| 1269 | ASSERT(variable_count() == env->length()); | 
|---|
| 1270 | for (intptr_t i = 0, n = variable_count(); i < n; ++i) { | 
|---|
| 1271 | // Replace usages of the raw exception/stacktrace variables with | 
|---|
| 1272 | // [SpecialParameterInstr]s. | 
|---|
| 1273 | Definition* param = nullptr; | 
|---|
| 1274 | if (raw_exception_var_envindex == i) { | 
|---|
| 1275 | param = new (Z) SpecialParameterInstr(SpecialParameterInstr::kException, | 
|---|
| 1276 | DeoptId::kNone, catch_entry); | 
|---|
| 1277 | } else if (raw_stacktrace_var_envindex == i) { | 
|---|
| 1278 | param = new (Z) SpecialParameterInstr(SpecialParameterInstr::kStackTrace, | 
|---|
| 1279 | DeoptId::kNone, catch_entry); | 
|---|
| 1280 | } else { | 
|---|
| 1281 | param = new (Z) ParameterInstr(i, i, catch_entry, kTagged); | 
|---|
| 1282 | } | 
|---|
| 1283 |  | 
|---|
| 1284 | param->set_ssa_temp_index(alloc_ssa_temp_index());  // New SSA temp. | 
|---|
| 1285 | (*env)[i] = param; | 
|---|
| 1286 | AddToInitialDefinitions(catch_entry, param); | 
|---|
| 1287 | } | 
|---|
| 1288 | } | 
|---|
| 1289 |  | 
|---|
| 1290 | void FlowGraph::AttachEnvironment(Instruction* instr, | 
|---|
| 1291 | GrowableArray<Definition*>* env) { | 
|---|
| 1292 | Environment* deopt_env = | 
|---|
| 1293 | Environment::From(zone(), *env, num_direct_parameters_, parsed_function_); | 
|---|
| 1294 | if (instr->IsClosureCall() || instr->IsLoadField()) { | 
|---|
| 1295 | // Trim extra inputs of ClosureCall and LoadField instructions from | 
|---|
| 1296 | // the environment. Inputs of those instructions are not pushed onto | 
|---|
| 1297 | // the stack at the point where deoptimization can occur. | 
|---|
| 1298 | // Note that in case of LoadField there can be two possible situations, | 
|---|
| 1299 | // the code here handles LoadField to LoadField lazy deoptimization in | 
|---|
| 1300 | // which we are transitioning from position after the call to initialization | 
|---|
| 1301 | // stub in optimized code to a similar position after the call to | 
|---|
| 1302 | // initialization stub in unoptimized code. There is another variant | 
|---|
| 1303 | // (LoadField deoptimizing into a position after a getter call) which is | 
|---|
| 1304 | // handled in a different way (see | 
|---|
| 1305 | // CallSpecializer::InlineImplicitInstanceGetter). | 
|---|
| 1306 | deopt_env = | 
|---|
| 1307 | deopt_env->DeepCopy(zone(), deopt_env->Length() - instr->InputCount() + | 
|---|
| 1308 | instr->ArgumentCount()); | 
|---|
| 1309 | } | 
|---|
| 1310 | instr->SetEnvironment(deopt_env); | 
|---|
| 1311 | for (Environment::DeepIterator it(deopt_env); !it.Done(); it.Advance()) { | 
|---|
| 1312 | Value* use = it.CurrentValue(); | 
|---|
| 1313 | use->definition()->AddEnvUse(use); | 
|---|
| 1314 | } | 
|---|
| 1315 | } | 
|---|
| 1316 |  | 
|---|
| 1317 | void FlowGraph::RenameRecursive( | 
|---|
| 1318 | BlockEntryInstr* block_entry, | 
|---|
| 1319 | GrowableArray<Definition*>* env, | 
|---|
| 1320 | GrowableArray<PhiInstr*>* live_phis, | 
|---|
| 1321 | VariableLivenessAnalysis* variable_liveness, | 
|---|
| 1322 | ZoneGrowableArray<Definition*>* inlining_parameters) { | 
|---|
| 1323 | // 1. Process phis first. | 
|---|
| 1324 | if (auto join = block_entry->AsJoinEntry()) { | 
|---|
| 1325 | if (join->phis() != nullptr) { | 
|---|
| 1326 | const intptr_t local_phi_count = variable_count() + join->stack_depth(); | 
|---|
| 1327 | ASSERT(join->phis()->length() == local_phi_count); | 
|---|
| 1328 | for (intptr_t i = 0; i < local_phi_count; ++i) { | 
|---|
| 1329 | PhiInstr* phi = (*join->phis())[i]; | 
|---|
| 1330 | if (phi != nullptr) { | 
|---|
| 1331 | (*env)[i] = phi; | 
|---|
| 1332 | AllocateSSAIndexes(phi);  // New SSA temp. | 
|---|
| 1333 | if (block_entry->InsideTryBlock() && !phi->is_alive()) { | 
|---|
| 1334 | // This is a safe approximation.  Inside try{} all locals are | 
|---|
| 1335 | // used at every call implicitly, so we mark all phis as live | 
|---|
| 1336 | // from the start. | 
|---|
| 1337 | // TODO(fschneider): Improve this approximation to eliminate | 
|---|
| 1338 | // more redundant phis. | 
|---|
| 1339 | phi->mark_alive(); | 
|---|
| 1340 | live_phis->Add(phi); | 
|---|
| 1341 | } | 
|---|
| 1342 | } | 
|---|
| 1343 | } | 
|---|
| 1344 | } | 
|---|
| 1345 | } else if (auto osr_entry = block_entry->AsOsrEntry()) { | 
|---|
| 1346 | PopulateEnvironmentFromOsrEntry(osr_entry, env); | 
|---|
| 1347 | } else if (auto function_entry = block_entry->AsFunctionEntry()) { | 
|---|
| 1348 | ASSERT(!IsCompiledForOsr()); | 
|---|
| 1349 | PopulateEnvironmentFromFunctionEntry( | 
|---|
| 1350 | function_entry, env, live_phis, variable_liveness, inlining_parameters); | 
|---|
| 1351 | } else if (auto catch_entry = block_entry->AsCatchBlockEntry()) { | 
|---|
| 1352 | PopulateEnvironmentFromCatchEntry(catch_entry, env); | 
|---|
| 1353 | } | 
|---|
| 1354 |  | 
|---|
| 1355 | if (!block_entry->IsGraphEntry() && | 
|---|
| 1356 | !block_entry->IsBlockEntryWithInitialDefs()) { | 
|---|
| 1357 | // Prune non-live variables at block entry by replacing their environment | 
|---|
| 1358 | // slots with null. | 
|---|
| 1359 | BitVector* live_in = variable_liveness->GetLiveInSet(block_entry); | 
|---|
| 1360 | for (intptr_t i = 0; i < variable_count(); i++) { | 
|---|
| 1361 | // TODO(fschneider): Make sure that live_in always contains the | 
|---|
| 1362 | // CurrentContext variable to avoid the special case here. | 
|---|
| 1363 | if (FLAG_prune_dead_locals && !live_in->Contains(i) && | 
|---|
| 1364 | (i != CurrentContextEnvIndex())) { | 
|---|
| 1365 | (*env)[i] = constant_dead(); | 
|---|
| 1366 | } | 
|---|
| 1367 | } | 
|---|
| 1368 | } | 
|---|
| 1369 |  | 
|---|
| 1370 | // Attach environment to the block entry. | 
|---|
| 1371 | AttachEnvironment(block_entry, env); | 
|---|
| 1372 |  | 
|---|
| 1373 | // 2. Process normal instructions. | 
|---|
| 1374 | for (ForwardInstructionIterator it(block_entry); !it.Done(); it.Advance()) { | 
|---|
| 1375 | Instruction* current = it.Current(); | 
|---|
| 1376 |  | 
|---|
| 1377 | // Attach current environment to the instructions that need it. | 
|---|
| 1378 | if (current->NeedsEnvironment()) { | 
|---|
| 1379 | AttachEnvironment(current, env); | 
|---|
| 1380 | } | 
|---|
| 1381 |  | 
|---|
| 1382 | // 2a. Handle uses: | 
|---|
| 1383 | // Update the expression stack renaming environment for each use by | 
|---|
| 1384 | // removing the renamed value. For each use of a LoadLocal, StoreLocal, | 
|---|
| 1385 | // MakeTemp, DropTemps or Constant (or any expression under OSR), | 
|---|
| 1386 | // replace it with the renamed value. | 
|---|
| 1387 | for (intptr_t i = current->InputCount() - 1; i >= 0; --i) { | 
|---|
| 1388 | Value* v = current->InputAt(i); | 
|---|
| 1389 | // Update expression stack. | 
|---|
| 1390 | ASSERT(env->length() > variable_count()); | 
|---|
| 1391 | Definition* reaching_defn = env->RemoveLast(); | 
|---|
| 1392 | Definition* input_defn = v->definition(); | 
|---|
| 1393 | if (input_defn != reaching_defn) { | 
|---|
| 1394 | // Inspect the replacing definition before making the change. | 
|---|
| 1395 | if (IsCompiledForOsr()) { | 
|---|
| 1396 | // Under OSR, constants can reside on the expression stack. Just | 
|---|
| 1397 | // generate the constant rather than going through a synthetic phi. | 
|---|
| 1398 | if (input_defn->IsConstant() && reaching_defn->IsPhi()) { | 
|---|
| 1399 | ASSERT(env->length() < osr_variable_count()); | 
|---|
| 1400 | auto constant = GetConstant(input_defn->AsConstant()->value()); | 
|---|
| 1401 | current->ReplaceInEnvironment(reaching_defn, constant); | 
|---|
| 1402 | reaching_defn = constant; | 
|---|
| 1403 | } | 
|---|
| 1404 | } else { | 
|---|
| 1405 | // Note: constants can only be replaced with other constants. | 
|---|
| 1406 | ASSERT(input_defn->IsLoadLocal() || input_defn->IsStoreLocal() || | 
|---|
| 1407 | input_defn->IsDropTemps() || input_defn->IsMakeTemp() || | 
|---|
| 1408 | (input_defn->IsConstant() && reaching_defn->IsConstant())); | 
|---|
| 1409 | } | 
|---|
| 1410 | // Assert we are not referencing nulls in the initial environment. | 
|---|
| 1411 | ASSERT(reaching_defn->ssa_temp_index() != -1); | 
|---|
| 1412 | // Replace the definition. | 
|---|
| 1413 | v->set_definition(reaching_defn); | 
|---|
| 1414 | input_defn = reaching_defn; | 
|---|
| 1415 | } | 
|---|
| 1416 | input_defn->AddInputUse(v); | 
|---|
| 1417 | } | 
|---|
| 1418 |  | 
|---|
| 1419 | // 2b. Handle LoadLocal/StoreLocal/MakeTemp/DropTemps/Constant specially. | 
|---|
| 1420 | // Other definitions are just pushed to the environment directly. | 
|---|
| 1421 | Definition* result = NULL; | 
|---|
| 1422 | switch (current->tag()) { | 
|---|
| 1423 | case Instruction::kLoadLocal: { | 
|---|
| 1424 | LoadLocalInstr* load = current->Cast<LoadLocalInstr>(); | 
|---|
| 1425 |  | 
|---|
| 1426 | // The graph construction ensures we do not have an unused LoadLocal | 
|---|
| 1427 | // computation. | 
|---|
| 1428 | ASSERT(load->HasTemp()); | 
|---|
| 1429 | const intptr_t index = EnvIndex(&load->local()); | 
|---|
| 1430 | result = (*env)[index]; | 
|---|
| 1431 |  | 
|---|
| 1432 | PhiInstr* phi = result->AsPhi(); | 
|---|
| 1433 | if ((phi != NULL) && !phi->is_alive()) { | 
|---|
| 1434 | phi->mark_alive(); | 
|---|
| 1435 | live_phis->Add(phi); | 
|---|
| 1436 | } | 
|---|
| 1437 |  | 
|---|
| 1438 | if (FLAG_prune_dead_locals && | 
|---|
| 1439 | variable_liveness->IsLastLoad(block_entry, load)) { | 
|---|
| 1440 | (*env)[index] = constant_dead(); | 
|---|
| 1441 | } | 
|---|
| 1442 |  | 
|---|
| 1443 | // Record captured parameters so that they can be skipped when | 
|---|
| 1444 | // emitting sync code inside optimized try-blocks. | 
|---|
| 1445 | if (load->local().is_captured_parameter()) { | 
|---|
| 1446 | captured_parameters_->Add(index); | 
|---|
| 1447 | } | 
|---|
| 1448 |  | 
|---|
| 1449 | if (phi != nullptr) { | 
|---|
| 1450 | // Assign type to Phi if it doesn't have a type yet. | 
|---|
| 1451 | // For a Phi to appear in the local variable it either was placed | 
|---|
| 1452 | // there as incoming value by renaming or it was stored there by | 
|---|
| 1453 | // StoreLocal which took this Phi from another local via LoadLocal, | 
|---|
| 1454 | // to which this reasoning applies recursively. | 
|---|
| 1455 | // This means that we are guaranteed to process LoadLocal for a | 
|---|
| 1456 | // matching variable first. | 
|---|
| 1457 | if (!phi->HasType()) { | 
|---|
| 1458 | ASSERT((index < phi->block()->phis()->length()) && | 
|---|
| 1459 | ((*phi->block()->phis())[index] == phi)); | 
|---|
| 1460 | phi->UpdateType( | 
|---|
| 1461 | CompileType::FromAbstractType(load->local().type())); | 
|---|
| 1462 | } | 
|---|
| 1463 | } | 
|---|
| 1464 | break; | 
|---|
| 1465 | } | 
|---|
| 1466 |  | 
|---|
| 1467 | case Instruction::kStoreLocal: { | 
|---|
| 1468 | StoreLocalInstr* store = current->Cast<StoreLocalInstr>(); | 
|---|
| 1469 | const intptr_t index = EnvIndex(&store->local()); | 
|---|
| 1470 | result = store->value()->definition(); | 
|---|
| 1471 |  | 
|---|
| 1472 | if (!FLAG_prune_dead_locals || | 
|---|
| 1473 | variable_liveness->IsStoreAlive(block_entry, store)) { | 
|---|
| 1474 | (*env)[index] = result; | 
|---|
| 1475 | } else { | 
|---|
| 1476 | (*env)[index] = constant_dead(); | 
|---|
| 1477 | } | 
|---|
| 1478 | break; | 
|---|
| 1479 | } | 
|---|
| 1480 |  | 
|---|
| 1481 | case Instruction::kDropTemps: { | 
|---|
| 1482 | // Drop temps from the environment. | 
|---|
| 1483 | DropTempsInstr* drop = current->Cast<DropTempsInstr>(); | 
|---|
| 1484 | for (intptr_t j = 0; j < drop->num_temps(); j++) { | 
|---|
| 1485 | env->RemoveLast(); | 
|---|
| 1486 | } | 
|---|
| 1487 | if (drop->value() != NULL) { | 
|---|
| 1488 | result = drop->value()->definition(); | 
|---|
| 1489 | } | 
|---|
| 1490 | ASSERT((drop->value() != NULL) || !drop->HasTemp()); | 
|---|
| 1491 | break; | 
|---|
| 1492 | } | 
|---|
| 1493 |  | 
|---|
| 1494 | case Instruction::kConstant: { | 
|---|
| 1495 | ConstantInstr* constant = current->Cast<ConstantInstr>(); | 
|---|
| 1496 | if (constant->HasTemp()) { | 
|---|
| 1497 | result = GetConstant(constant->value()); | 
|---|
| 1498 | } | 
|---|
| 1499 | break; | 
|---|
| 1500 | } | 
|---|
| 1501 |  | 
|---|
| 1502 | case Instruction::kMakeTemp: { | 
|---|
| 1503 | // Simply push a #null value to the expression stack. | 
|---|
| 1504 | result = constant_null_; | 
|---|
| 1505 | break; | 
|---|
| 1506 | } | 
|---|
| 1507 |  | 
|---|
| 1508 | case Instruction::kPushArgument: | 
|---|
| 1509 | UNREACHABLE(); | 
|---|
| 1510 | break; | 
|---|
| 1511 |  | 
|---|
| 1512 | case Instruction::kCheckStackOverflow: | 
|---|
| 1513 | // Assert environment integrity at checkpoints. | 
|---|
| 1514 | ASSERT((variable_count() + | 
|---|
| 1515 | current->AsCheckStackOverflow()->stack_depth()) == | 
|---|
| 1516 | env->length()); | 
|---|
| 1517 | continue; | 
|---|
| 1518 |  | 
|---|
| 1519 | default: | 
|---|
| 1520 | // Other definitions directly go into the environment. | 
|---|
| 1521 | if (Definition* definition = current->AsDefinition()) { | 
|---|
| 1522 | if (definition->HasTemp()) { | 
|---|
| 1523 | // Assign fresh SSA temporary and update expression stack. | 
|---|
| 1524 | AllocateSSAIndexes(definition); | 
|---|
| 1525 | env->Add(definition); | 
|---|
| 1526 | } | 
|---|
| 1527 | } | 
|---|
| 1528 | continue; | 
|---|
| 1529 | } | 
|---|
| 1530 |  | 
|---|
| 1531 | // Update expression stack and remove current instruction from the graph. | 
|---|
| 1532 | Definition* definition = current->Cast<Definition>(); | 
|---|
| 1533 | if (definition->HasTemp()) { | 
|---|
| 1534 | ASSERT(result != nullptr); | 
|---|
| 1535 | env->Add(result); | 
|---|
| 1536 | } | 
|---|
| 1537 | it.RemoveCurrentFromGraph(); | 
|---|
| 1538 | } | 
|---|
| 1539 |  | 
|---|
| 1540 | // 3. Process dominated blocks. | 
|---|
| 1541 | const bool set_stack = (block_entry == graph_entry()) && IsCompiledForOsr(); | 
|---|
| 1542 | for (intptr_t i = 0; i < block_entry->dominated_blocks().length(); ++i) { | 
|---|
| 1543 | BlockEntryInstr* block = block_entry->dominated_blocks()[i]; | 
|---|
| 1544 | GrowableArray<Definition*> new_env(env->length()); | 
|---|
| 1545 | new_env.AddArray(*env); | 
|---|
| 1546 | // During OSR, when traversing from the graph entry directly any block | 
|---|
| 1547 | // (which may be a non-entry), we must adjust the environment to mimic | 
|---|
| 1548 | // a non-empty incoming expression stack to ensure temporaries refer to | 
|---|
| 1549 | // the right stack items. | 
|---|
| 1550 | const intptr_t stack_depth = block->stack_depth(); | 
|---|
| 1551 | ASSERT(stack_depth >= 0); | 
|---|
| 1552 | if (set_stack) { | 
|---|
| 1553 | ASSERT(variable_count() == new_env.length()); | 
|---|
| 1554 | new_env.FillWith(constant_dead(), variable_count(), stack_depth); | 
|---|
| 1555 | } else if (!block->last_instruction()->IsTailCall()) { | 
|---|
| 1556 | // Assert environment integrity otherwise. | 
|---|
| 1557 | ASSERT((variable_count() + stack_depth) == new_env.length()); | 
|---|
| 1558 | } | 
|---|
| 1559 | RenameRecursive(block, &new_env, live_phis, variable_liveness, | 
|---|
| 1560 | inlining_parameters); | 
|---|
| 1561 | } | 
|---|
| 1562 |  | 
|---|
| 1563 | // 4. Process successor block. We have edge-split form, so that only blocks | 
|---|
| 1564 | // with one successor can have a join block as successor. | 
|---|
| 1565 | if ((block_entry->last_instruction()->SuccessorCount() == 1) && | 
|---|
| 1566 | block_entry->last_instruction()->SuccessorAt(0)->IsJoinEntry()) { | 
|---|
| 1567 | JoinEntryInstr* successor = | 
|---|
| 1568 | block_entry->last_instruction()->SuccessorAt(0)->AsJoinEntry(); | 
|---|
| 1569 | intptr_t pred_index = successor->IndexOfPredecessor(block_entry); | 
|---|
| 1570 | ASSERT(pred_index >= 0); | 
|---|
| 1571 | if (successor->phis() != NULL) { | 
|---|
| 1572 | for (intptr_t i = 0; i < successor->phis()->length(); ++i) { | 
|---|
| 1573 | PhiInstr* phi = (*successor->phis())[i]; | 
|---|
| 1574 | if (phi != nullptr) { | 
|---|
| 1575 | // Rename input operand. | 
|---|
| 1576 | Definition* input = (*env)[i]; | 
|---|
| 1577 | ASSERT(input != nullptr); | 
|---|
| 1578 | ASSERT(!input->IsPushArgument()); | 
|---|
| 1579 | Value* use = new (zone()) Value(input); | 
|---|
| 1580 | phi->SetInputAt(pred_index, use); | 
|---|
| 1581 | } | 
|---|
| 1582 | } | 
|---|
| 1583 | } | 
|---|
| 1584 | } | 
|---|
| 1585 | } | 
|---|
| 1586 |  | 
|---|
| 1587 | void FlowGraph::RemoveDeadPhis(GrowableArray<PhiInstr*>* live_phis) { | 
|---|
| 1588 | // Augment live_phis with those that have implicit real used at | 
|---|
| 1589 | // potentially throwing instructions if there is a try-catch in this graph. | 
|---|
| 1590 | if (!graph_entry()->catch_entries().is_empty()) { | 
|---|
| 1591 | for (BlockIterator it(postorder_iterator()); !it.Done(); it.Advance()) { | 
|---|
| 1592 | JoinEntryInstr* join = it.Current()->AsJoinEntry(); | 
|---|
| 1593 | if (join == NULL) continue; | 
|---|
| 1594 | for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) { | 
|---|
| 1595 | PhiInstr* phi = phi_it.Current(); | 
|---|
| 1596 | if (phi == NULL || phi->is_alive() || (phi->input_use_list() != NULL) || | 
|---|
| 1597 | (phi->env_use_list() == NULL)) { | 
|---|
| 1598 | continue; | 
|---|
| 1599 | } | 
|---|
| 1600 | for (Value::Iterator it(phi->env_use_list()); !it.Done(); | 
|---|
| 1601 | it.Advance()) { | 
|---|
| 1602 | Value* use = it.Current(); | 
|---|
| 1603 | if (use->instruction()->MayThrow() && | 
|---|
| 1604 | use->instruction()->GetBlock()->InsideTryBlock()) { | 
|---|
| 1605 | live_phis->Add(phi); | 
|---|
| 1606 | phi->mark_alive(); | 
|---|
| 1607 | break; | 
|---|
| 1608 | } | 
|---|
| 1609 | } | 
|---|
| 1610 | } | 
|---|
| 1611 | } | 
|---|
| 1612 | } | 
|---|
| 1613 |  | 
|---|
| 1614 | while (!live_phis->is_empty()) { | 
|---|
| 1615 | PhiInstr* phi = live_phis->RemoveLast(); | 
|---|
| 1616 | for (intptr_t i = 0; i < phi->InputCount(); i++) { | 
|---|
| 1617 | Value* val = phi->InputAt(i); | 
|---|
| 1618 | PhiInstr* used_phi = val->definition()->AsPhi(); | 
|---|
| 1619 | if ((used_phi != NULL) && !used_phi->is_alive()) { | 
|---|
| 1620 | used_phi->mark_alive(); | 
|---|
| 1621 | live_phis->Add(used_phi); | 
|---|
| 1622 | } | 
|---|
| 1623 | } | 
|---|
| 1624 | } | 
|---|
| 1625 |  | 
|---|
| 1626 | for (BlockIterator it(postorder_iterator()); !it.Done(); it.Advance()) { | 
|---|
| 1627 | JoinEntryInstr* join = it.Current()->AsJoinEntry(); | 
|---|
| 1628 | if (join != NULL) join->RemoveDeadPhis(constant_dead()); | 
|---|
| 1629 | } | 
|---|
| 1630 | } | 
|---|
| 1631 |  | 
|---|
| 1632 | RedefinitionInstr* FlowGraph::EnsureRedefinition(Instruction* prev, | 
|---|
| 1633 | Definition* original, | 
|---|
| 1634 | CompileType compile_type) { | 
|---|
| 1635 | RedefinitionInstr* first = prev->next()->AsRedefinition(); | 
|---|
| 1636 | if (first != nullptr && (first->constrained_type() != nullptr)) { | 
|---|
| 1637 | if ((first->value()->definition() == original) && | 
|---|
| 1638 | first->constrained_type()->IsEqualTo(&compile_type)) { | 
|---|
| 1639 | // Already redefined. Do nothing. | 
|---|
| 1640 | return nullptr; | 
|---|
| 1641 | } | 
|---|
| 1642 | } | 
|---|
| 1643 | RedefinitionInstr* redef = new RedefinitionInstr(new Value(original)); | 
|---|
| 1644 |  | 
|---|
| 1645 | // Don't set the constrained type when the type is None(), which denotes an | 
|---|
| 1646 | // unreachable value (e.g. using value null after some form of null check). | 
|---|
| 1647 | if (!compile_type.IsNone()) { | 
|---|
| 1648 | redef->set_constrained_type(new CompileType(compile_type)); | 
|---|
| 1649 | } | 
|---|
| 1650 |  | 
|---|
| 1651 | InsertAfter(prev, redef, nullptr, FlowGraph::kValue); | 
|---|
| 1652 | RenameDominatedUses(original, redef, redef); | 
|---|
| 1653 |  | 
|---|
| 1654 | if (redef->input_use_list() == nullptr) { | 
|---|
| 1655 | // There are no dominated uses, so the newly added Redefinition is useless. | 
|---|
| 1656 | // Remove Redefinition to avoid interfering with | 
|---|
| 1657 | // BranchSimplifier::Simplify which needs empty blocks. | 
|---|
| 1658 | redef->RemoveFromGraph(); | 
|---|
| 1659 | return nullptr; | 
|---|
| 1660 | } | 
|---|
| 1661 |  | 
|---|
| 1662 | return redef; | 
|---|
| 1663 | } | 
|---|
| 1664 |  | 
|---|
| 1665 | void FlowGraph::RemoveRedefinitions(bool keep_checks) { | 
|---|
| 1666 | // Remove redefinition and check instructions that were inserted | 
|---|
| 1667 | // to make a control dependence explicit with a data dependence, | 
|---|
| 1668 | // for example, to inhibit hoisting. | 
|---|
| 1669 | for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); | 
|---|
| 1670 | block_it.Advance()) { | 
|---|
| 1671 | thread()->CheckForSafepoint(); | 
|---|
| 1672 | for (ForwardInstructionIterator instr_it(block_it.Current()); | 
|---|
| 1673 | !instr_it.Done(); instr_it.Advance()) { | 
|---|
| 1674 | Instruction* instruction = instr_it.Current(); | 
|---|
| 1675 | if (auto redef = instruction->AsRedefinition()) { | 
|---|
| 1676 | redef->ReplaceUsesWith(redef->value()->definition()); | 
|---|
| 1677 | instr_it.RemoveCurrentFromGraph(); | 
|---|
| 1678 | } else if (keep_checks) { | 
|---|
| 1679 | continue; | 
|---|
| 1680 | } else if (auto def = instruction->AsDefinition()) { | 
|---|
| 1681 | Value* value = def->RedefinedValue(); | 
|---|
| 1682 | if (value != nullptr) { | 
|---|
| 1683 | def->ReplaceUsesWith(value->definition()); | 
|---|
| 1684 | def->ClearSSATempIndex(); | 
|---|
| 1685 | } | 
|---|
| 1686 | } | 
|---|
| 1687 | } | 
|---|
| 1688 | } | 
|---|
| 1689 | } | 
|---|
| 1690 |  | 
|---|
| 1691 | BitVector* FlowGraph::FindLoopBlocks(BlockEntryInstr* m, | 
|---|
| 1692 | BlockEntryInstr* n) const { | 
|---|
| 1693 | GrowableArray<BlockEntryInstr*> stack; | 
|---|
| 1694 | BitVector* loop_blocks = new (zone()) BitVector(zone(), preorder_.length()); | 
|---|
| 1695 |  | 
|---|
| 1696 | loop_blocks->Add(n->preorder_number()); | 
|---|
| 1697 | if (n != m) { | 
|---|
| 1698 | loop_blocks->Add(m->preorder_number()); | 
|---|
| 1699 | stack.Add(m); | 
|---|
| 1700 | } | 
|---|
| 1701 |  | 
|---|
| 1702 | while (!stack.is_empty()) { | 
|---|
| 1703 | BlockEntryInstr* p = stack.RemoveLast(); | 
|---|
| 1704 | for (intptr_t i = 0; i < p->PredecessorCount(); ++i) { | 
|---|
| 1705 | BlockEntryInstr* q = p->PredecessorAt(i); | 
|---|
| 1706 | if (!loop_blocks->Contains(q->preorder_number())) { | 
|---|
| 1707 | loop_blocks->Add(q->preorder_number()); | 
|---|
| 1708 | stack.Add(q); | 
|---|
| 1709 | } | 
|---|
| 1710 | } | 
|---|
| 1711 | } | 
|---|
| 1712 | return loop_blocks; | 
|---|
| 1713 | } | 
|---|
| 1714 |  | 
|---|
| 1715 | LoopHierarchy* FlowGraph::ComputeLoops() const { | 
|---|
| 1716 | // Iterate over all entry blocks in the flow graph to attach | 
|---|
| 1717 | // loop information to each loop header. | 
|---|
| 1718 | ZoneGrowableArray<BlockEntryInstr*>*  = | 
|---|
| 1719 | new (zone()) ZoneGrowableArray<BlockEntryInstr*>(); | 
|---|
| 1720 | for (BlockIterator it = postorder_iterator(); !it.Done(); it.Advance()) { | 
|---|
| 1721 | BlockEntryInstr* block = it.Current(); | 
|---|
| 1722 | // Reset loop information on every entry block (since this method | 
|---|
| 1723 | // may recompute loop information on a modified flow graph). | 
|---|
| 1724 | block->set_loop_info(nullptr); | 
|---|
| 1725 | // Iterate over predecessors to find back edges. | 
|---|
| 1726 | for (intptr_t i = 0; i < block->PredecessorCount(); ++i) { | 
|---|
| 1727 | BlockEntryInstr* pred = block->PredecessorAt(i); | 
|---|
| 1728 | if (block->Dominates(pred)) { | 
|---|
| 1729 | // Identify the block as a loop header and add the blocks in the | 
|---|
| 1730 | // loop to the loop information. Loops that share the same loop | 
|---|
| 1731 | // header are treated as one loop by merging these blocks. | 
|---|
| 1732 | BitVector* loop_blocks = FindLoopBlocks(pred, block); | 
|---|
| 1733 | if (block->loop_info() == nullptr) { | 
|---|
| 1734 | intptr_t id = loop_headers->length(); | 
|---|
| 1735 | block->set_loop_info(new (zone()) LoopInfo(id, block, loop_blocks)); | 
|---|
| 1736 | loop_headers->Add(block); | 
|---|
| 1737 | } else { | 
|---|
| 1738 | ASSERT(block->loop_info()->header() == block); | 
|---|
| 1739 | block->loop_info()->AddBlocks(loop_blocks); | 
|---|
| 1740 | } | 
|---|
| 1741 | block->loop_info()->AddBackEdge(pred); | 
|---|
| 1742 | } | 
|---|
| 1743 | } | 
|---|
| 1744 | } | 
|---|
| 1745 |  | 
|---|
| 1746 | // Build the loop hierarchy and link every entry block to | 
|---|
| 1747 | // the closest enveloping loop in loop hierarchy. | 
|---|
| 1748 | return new (zone()) LoopHierarchy(loop_headers, preorder_); | 
|---|
| 1749 | } | 
|---|
| 1750 |  | 
|---|
| 1751 | intptr_t FlowGraph::InstructionCount() const { | 
|---|
| 1752 | intptr_t size = 0; | 
|---|
| 1753 | // Iterate each block, skipping the graph entry. | 
|---|
| 1754 | for (intptr_t i = 1; i < preorder_.length(); ++i) { | 
|---|
| 1755 | BlockEntryInstr* block = preorder_[i]; | 
|---|
| 1756 |  | 
|---|
| 1757 | // Skip any blocks from the prologue to make them not count towards the | 
|---|
| 1758 | // inlining instruction budget. | 
|---|
| 1759 | const intptr_t block_id = block->block_id(); | 
|---|
| 1760 | if (prologue_info_.Contains(block_id)) { | 
|---|
| 1761 | continue; | 
|---|
| 1762 | } | 
|---|
| 1763 |  | 
|---|
| 1764 | for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { | 
|---|
| 1765 | ++size; | 
|---|
| 1766 | } | 
|---|
| 1767 | } | 
|---|
| 1768 | return size; | 
|---|
| 1769 | } | 
|---|
| 1770 |  | 
|---|
| 1771 | void FlowGraph::ConvertUse(Value* use, Representation from_rep) { | 
|---|
| 1772 | const Representation to_rep = | 
|---|
| 1773 | use->instruction()->RequiredInputRepresentation(use->use_index()); | 
|---|
| 1774 | if (from_rep == to_rep || to_rep == kNoRepresentation) { | 
|---|
| 1775 | return; | 
|---|
| 1776 | } | 
|---|
| 1777 | InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/false); | 
|---|
| 1778 | } | 
|---|
| 1779 |  | 
|---|
| 1780 | static bool IsUnboxedInteger(Representation rep) { | 
|---|
| 1781 | return (rep == kUnboxedInt32) || (rep == kUnboxedUint32) || | 
|---|
| 1782 | (rep == kUnboxedInt64); | 
|---|
| 1783 | } | 
|---|
| 1784 |  | 
|---|
| 1785 | static bool ShouldInlineSimd() { | 
|---|
| 1786 | return FlowGraphCompiler::SupportsUnboxedSimd128(); | 
|---|
| 1787 | } | 
|---|
| 1788 |  | 
|---|
| 1789 | static bool CanUnboxDouble() { | 
|---|
| 1790 | return FlowGraphCompiler::SupportsUnboxedDoubles(); | 
|---|
| 1791 | } | 
|---|
| 1792 |  | 
|---|
| 1793 | static bool CanConvertInt64ToDouble() { | 
|---|
| 1794 | return FlowGraphCompiler::CanConvertInt64ToDouble(); | 
|---|
| 1795 | } | 
|---|
| 1796 |  | 
|---|
| 1797 | void FlowGraph::InsertConversion(Representation from, | 
|---|
| 1798 | Representation to, | 
|---|
| 1799 | Value* use, | 
|---|
| 1800 | bool is_environment_use) { | 
|---|
| 1801 | Instruction* insert_before; | 
|---|
| 1802 | Instruction* deopt_target; | 
|---|
| 1803 | PhiInstr* phi = use->instruction()->AsPhi(); | 
|---|
| 1804 | if (phi != NULL) { | 
|---|
| 1805 | ASSERT(phi->is_alive()); | 
|---|
| 1806 | // For phis conversions have to be inserted in the predecessor. | 
|---|
| 1807 | auto predecessor = phi->block()->PredecessorAt(use->use_index()); | 
|---|
| 1808 | insert_before = predecessor->last_instruction(); | 
|---|
| 1809 | ASSERT(insert_before->GetBlock() == predecessor); | 
|---|
| 1810 | deopt_target = NULL; | 
|---|
| 1811 | } else { | 
|---|
| 1812 | deopt_target = insert_before = use->instruction(); | 
|---|
| 1813 | } | 
|---|
| 1814 |  | 
|---|
| 1815 | Definition* converted = NULL; | 
|---|
| 1816 | if (IsUnboxedInteger(from) && IsUnboxedInteger(to)) { | 
|---|
| 1817 | const intptr_t deopt_id = (to == kUnboxedInt32) && (deopt_target != NULL) | 
|---|
| 1818 | ? deopt_target->DeoptimizationTarget() | 
|---|
| 1819 | : DeoptId::kNone; | 
|---|
| 1820 | converted = | 
|---|
| 1821 | new (Z) IntConverterInstr(from, to, use->CopyWithType(), deopt_id); | 
|---|
| 1822 | } else if ((from == kUnboxedInt32) && (to == kUnboxedDouble)) { | 
|---|
| 1823 | converted = new Int32ToDoubleInstr(use->CopyWithType()); | 
|---|
| 1824 | } else if ((from == kUnboxedInt64) && (to == kUnboxedDouble) && | 
|---|
| 1825 | CanConvertInt64ToDouble()) { | 
|---|
| 1826 | const intptr_t deopt_id = (deopt_target != NULL) | 
|---|
| 1827 | ? deopt_target->DeoptimizationTarget() | 
|---|
| 1828 | : DeoptId::kNone; | 
|---|
| 1829 | ASSERT(CanUnboxDouble()); | 
|---|
| 1830 | converted = new Int64ToDoubleInstr(use->CopyWithType(), deopt_id); | 
|---|
| 1831 | } else if ((from == kTagged) && Boxing::Supports(to)) { | 
|---|
| 1832 | const intptr_t deopt_id = (deopt_target != NULL) | 
|---|
| 1833 | ? deopt_target->DeoptimizationTarget() | 
|---|
| 1834 | : DeoptId::kNone; | 
|---|
| 1835 | converted = UnboxInstr::Create( | 
|---|
| 1836 | to, use->CopyWithType(), deopt_id, | 
|---|
| 1837 | use->instruction()->SpeculativeModeOfInput(use->use_index())); | 
|---|
| 1838 | } else if ((to == kTagged) && Boxing::Supports(from)) { | 
|---|
| 1839 | converted = BoxInstr::Create(from, use->CopyWithType()); | 
|---|
| 1840 | } else { | 
|---|
| 1841 | // We have failed to find a suitable conversion instruction. | 
|---|
| 1842 | // Insert two "dummy" conversion instructions with the correct | 
|---|
| 1843 | // "from" and "to" representation. The inserted instructions will | 
|---|
| 1844 | // trigger a deoptimization if executed. See #12417 for a discussion. | 
|---|
| 1845 | const intptr_t deopt_id = (deopt_target != NULL) | 
|---|
| 1846 | ? deopt_target->DeoptimizationTarget() | 
|---|
| 1847 | : DeoptId::kNone; | 
|---|
| 1848 | ASSERT(Boxing::Supports(from)); | 
|---|
| 1849 | ASSERT(Boxing::Supports(to)); | 
|---|
| 1850 | Definition* boxed = BoxInstr::Create(from, use->CopyWithType()); | 
|---|
| 1851 | use->BindTo(boxed); | 
|---|
| 1852 | InsertBefore(insert_before, boxed, NULL, FlowGraph::kValue); | 
|---|
| 1853 | converted = UnboxInstr::Create(to, new (Z) Value(boxed), deopt_id); | 
|---|
| 1854 | } | 
|---|
| 1855 | ASSERT(converted != NULL); | 
|---|
| 1856 | InsertBefore(insert_before, converted, use->instruction()->env(), | 
|---|
| 1857 | FlowGraph::kValue); | 
|---|
| 1858 | if (is_environment_use) { | 
|---|
| 1859 | use->BindToEnvironment(converted); | 
|---|
| 1860 | } else { | 
|---|
| 1861 | use->BindTo(converted); | 
|---|
| 1862 | } | 
|---|
| 1863 |  | 
|---|
| 1864 | if ((to == kUnboxedInt32) && (phi != NULL)) { | 
|---|
| 1865 | // Int32 phis are unboxed optimistically. Ensure that unboxing | 
|---|
| 1866 | // has deoptimization target attached from the goto instruction. | 
|---|
| 1867 | CopyDeoptTarget(converted, insert_before); | 
|---|
| 1868 | } | 
|---|
| 1869 | } | 
|---|
| 1870 |  | 
|---|
| 1871 | void FlowGraph::InsertConversionsFor(Definition* def) { | 
|---|
| 1872 | const Representation from_rep = def->representation(); | 
|---|
| 1873 |  | 
|---|
| 1874 | for (Value::Iterator it(def->input_use_list()); !it.Done(); it.Advance()) { | 
|---|
| 1875 | ConvertUse(it.Current(), from_rep); | 
|---|
| 1876 | } | 
|---|
| 1877 | } | 
|---|
| 1878 |  | 
|---|
| 1879 | static void UnboxPhi(PhiInstr* phi) { | 
|---|
| 1880 | Representation unboxed = phi->representation(); | 
|---|
| 1881 |  | 
|---|
| 1882 | switch (phi->Type()->ToCid()) { | 
|---|
| 1883 | case kDoubleCid: | 
|---|
| 1884 | if (CanUnboxDouble()) { | 
|---|
| 1885 | unboxed = kUnboxedDouble; | 
|---|
| 1886 | } | 
|---|
| 1887 | break; | 
|---|
| 1888 | case kFloat32x4Cid: | 
|---|
| 1889 | if (ShouldInlineSimd()) { | 
|---|
| 1890 | unboxed = kUnboxedFloat32x4; | 
|---|
| 1891 | } | 
|---|
| 1892 | break; | 
|---|
| 1893 | case kInt32x4Cid: | 
|---|
| 1894 | if (ShouldInlineSimd()) { | 
|---|
| 1895 | unboxed = kUnboxedInt32x4; | 
|---|
| 1896 | } | 
|---|
| 1897 | break; | 
|---|
| 1898 | case kFloat64x2Cid: | 
|---|
| 1899 | if (ShouldInlineSimd()) { | 
|---|
| 1900 | unboxed = kUnboxedFloat64x2; | 
|---|
| 1901 | } | 
|---|
| 1902 | break; | 
|---|
| 1903 | } | 
|---|
| 1904 |  | 
|---|
| 1905 | // If all the inputs are unboxed, leave the Phi unboxed. | 
|---|
| 1906 | if ((unboxed == kTagged) && phi->Type()->IsInt()) { | 
|---|
| 1907 | bool should_unbox = true; | 
|---|
| 1908 | Representation new_representation = kTagged; | 
|---|
| 1909 | for (intptr_t i = 0; i < phi->InputCount(); i++) { | 
|---|
| 1910 | Definition* input = phi->InputAt(i)->definition(); | 
|---|
| 1911 | if (input->representation() != kUnboxedInt64 && | 
|---|
| 1912 | input->representation() != kUnboxedInt32 && | 
|---|
| 1913 | input->representation() != kUnboxedUint32 && !(input == phi)) { | 
|---|
| 1914 | should_unbox = false; | 
|---|
| 1915 | break; | 
|---|
| 1916 | } | 
|---|
| 1917 |  | 
|---|
| 1918 | if (new_representation == kTagged) { | 
|---|
| 1919 | new_representation = input->representation(); | 
|---|
| 1920 | } else if (new_representation != input->representation()) { | 
|---|
| 1921 | new_representation = kNoRepresentation; | 
|---|
| 1922 | } | 
|---|
| 1923 | } | 
|---|
| 1924 | if (should_unbox) { | 
|---|
| 1925 | unboxed = new_representation != kNoRepresentation | 
|---|
| 1926 | ? new_representation | 
|---|
| 1927 | : RangeUtils::Fits(phi->range(), | 
|---|
| 1928 | RangeBoundary::kRangeBoundaryInt32) | 
|---|
| 1929 | ? kUnboxedInt32 | 
|---|
| 1930 | : kUnboxedInt64; | 
|---|
| 1931 | } | 
|---|
| 1932 | } | 
|---|
| 1933 |  | 
|---|
| 1934 | if ((unboxed == kTagged) && phi->Type()->IsInt()) { | 
|---|
| 1935 | // Conservatively unbox phis that: | 
|---|
| 1936 | //   - are proven to be of type Int; | 
|---|
| 1937 | //   - fit into 64bits range; | 
|---|
| 1938 | //   - have either constants or Box() operations as inputs; | 
|---|
| 1939 | //   - have at least one Box() operation as an input; | 
|---|
| 1940 | //   - are used in at least 1 Unbox() operation. | 
|---|
| 1941 | bool should_unbox = false; | 
|---|
| 1942 | for (intptr_t i = 0; i < phi->InputCount(); i++) { | 
|---|
| 1943 | Definition* input = phi->InputAt(i)->definition(); | 
|---|
| 1944 | if (input->IsBox()) { | 
|---|
| 1945 | should_unbox = true; | 
|---|
| 1946 | } else if (!input->IsConstant()) { | 
|---|
| 1947 | should_unbox = false; | 
|---|
| 1948 | break; | 
|---|
| 1949 | } | 
|---|
| 1950 | } | 
|---|
| 1951 |  | 
|---|
| 1952 | if (should_unbox) { | 
|---|
| 1953 | // We checked inputs. Check if phi is used in at least one unbox | 
|---|
| 1954 | // operation. | 
|---|
| 1955 | bool has_unboxed_use = false; | 
|---|
| 1956 | for (Value* use = phi->input_use_list(); use != NULL; | 
|---|
| 1957 | use = use->next_use()) { | 
|---|
| 1958 | Instruction* instr = use->instruction(); | 
|---|
| 1959 | if (instr->IsUnbox()) { | 
|---|
| 1960 | has_unboxed_use = true; | 
|---|
| 1961 | break; | 
|---|
| 1962 | } else if (IsUnboxedInteger( | 
|---|
| 1963 | instr->RequiredInputRepresentation(use->use_index()))) { | 
|---|
| 1964 | has_unboxed_use = true; | 
|---|
| 1965 | break; | 
|---|
| 1966 | } | 
|---|
| 1967 | } | 
|---|
| 1968 |  | 
|---|
| 1969 | if (!has_unboxed_use) { | 
|---|
| 1970 | should_unbox = false; | 
|---|
| 1971 | } | 
|---|
| 1972 | } | 
|---|
| 1973 |  | 
|---|
| 1974 | if (should_unbox) { | 
|---|
| 1975 | unboxed = | 
|---|
| 1976 | RangeUtils::Fits(phi->range(), RangeBoundary::kRangeBoundaryInt32) | 
|---|
| 1977 | ? kUnboxedInt32 | 
|---|
| 1978 | : kUnboxedInt64; | 
|---|
| 1979 | } | 
|---|
| 1980 | } | 
|---|
| 1981 |  | 
|---|
| 1982 | phi->set_representation(unboxed); | 
|---|
| 1983 | } | 
|---|
| 1984 |  | 
|---|
| 1985 | void FlowGraph::SelectRepresentations() { | 
|---|
| 1986 | // First we decide for each phi if it is beneficial to unbox it. If so, we | 
|---|
| 1987 | // change it's `phi->representation()` | 
|---|
| 1988 | for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); | 
|---|
| 1989 | block_it.Advance()) { | 
|---|
| 1990 | JoinEntryInstr* join_entry = block_it.Current()->AsJoinEntry(); | 
|---|
| 1991 | if (join_entry != NULL) { | 
|---|
| 1992 | for (PhiIterator it(join_entry); !it.Done(); it.Advance()) { | 
|---|
| 1993 | PhiInstr* phi = it.Current(); | 
|---|
| 1994 | UnboxPhi(phi); | 
|---|
| 1995 | } | 
|---|
| 1996 | } | 
|---|
| 1997 | } | 
|---|
| 1998 |  | 
|---|
| 1999 | // Process all initial definitions and insert conversions when needed (depends | 
|---|
| 2000 | // on phi unboxing decision above). | 
|---|
| 2001 | for (intptr_t i = 0; i < graph_entry()->initial_definitions()->length(); | 
|---|
| 2002 | i++) { | 
|---|
| 2003 | InsertConversionsFor((*graph_entry()->initial_definitions())[i]); | 
|---|
| 2004 | } | 
|---|
| 2005 | for (intptr_t i = 0; i < graph_entry()->SuccessorCount(); ++i) { | 
|---|
| 2006 | auto successor = graph_entry()->SuccessorAt(i); | 
|---|
| 2007 | if (auto entry = successor->AsBlockEntryWithInitialDefs()) { | 
|---|
| 2008 | auto& initial_definitions = *entry->initial_definitions(); | 
|---|
| 2009 | for (intptr_t j = 0; j < initial_definitions.length(); j++) { | 
|---|
| 2010 | InsertConversionsFor(initial_definitions[j]); | 
|---|
| 2011 | } | 
|---|
| 2012 | } | 
|---|
| 2013 | } | 
|---|
| 2014 |  | 
|---|
| 2015 | // Process all normal definitions and insert conversions when needed (depends | 
|---|
| 2016 | // on phi unboxing decision above). | 
|---|
| 2017 | for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); | 
|---|
| 2018 | block_it.Advance()) { | 
|---|
| 2019 | BlockEntryInstr* entry = block_it.Current(); | 
|---|
| 2020 | if (JoinEntryInstr* join_entry = entry->AsJoinEntry()) { | 
|---|
| 2021 | for (PhiIterator it(join_entry); !it.Done(); it.Advance()) { | 
|---|
| 2022 | PhiInstr* phi = it.Current(); | 
|---|
| 2023 | ASSERT(phi != NULL); | 
|---|
| 2024 | ASSERT(phi->is_alive()); | 
|---|
| 2025 | InsertConversionsFor(phi); | 
|---|
| 2026 | } | 
|---|
| 2027 | } | 
|---|
| 2028 | for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { | 
|---|
| 2029 | Definition* def = it.Current()->AsDefinition(); | 
|---|
| 2030 | if (def != NULL) { | 
|---|
| 2031 | InsertConversionsFor(def); | 
|---|
| 2032 | } | 
|---|
| 2033 | } | 
|---|
| 2034 | } | 
|---|
| 2035 | } | 
|---|
| 2036 |  | 
|---|
| 2037 | #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32) | 
|---|
| 2038 | // Smi widening pass is only meaningful on platforms where Smi | 
|---|
| 2039 | // is smaller than 32bit. For now only support it on ARM and ia32. | 
|---|
| 2040 | static bool CanBeWidened(BinarySmiOpInstr* smi_op) { | 
|---|
| 2041 | return BinaryInt32OpInstr::IsSupported(smi_op->op_kind(), smi_op->left(), | 
|---|
| 2042 | smi_op->right()); | 
|---|
| 2043 | } | 
|---|
| 2044 |  | 
|---|
| 2045 | static bool BenefitsFromWidening(BinarySmiOpInstr* smi_op) { | 
|---|
| 2046 | // TODO(vegorov): when shifts with non-constants shift count are supported | 
|---|
| 2047 | // add them here as we save untagging for the count. | 
|---|
| 2048 | switch (smi_op->op_kind()) { | 
|---|
| 2049 | case Token::kMUL: | 
|---|
| 2050 | case Token::kSHR: | 
|---|
| 2051 | // For kMUL we save untagging of the argument for kSHR | 
|---|
| 2052 | // we save tagging of the result. | 
|---|
| 2053 | return true; | 
|---|
| 2054 |  | 
|---|
| 2055 | default: | 
|---|
| 2056 | return false; | 
|---|
| 2057 | } | 
|---|
| 2058 | } | 
|---|
| 2059 |  | 
|---|
| 2060 | // Maps an entry block to its closest enveloping loop id, or -1 if none. | 
|---|
| 2061 | static intptr_t LoopId(BlockEntryInstr* block) { | 
|---|
| 2062 | LoopInfo* loop = block->loop_info(); | 
|---|
| 2063 | if (loop != nullptr) { | 
|---|
| 2064 | return loop->id(); | 
|---|
| 2065 | } | 
|---|
| 2066 | return -1; | 
|---|
| 2067 | } | 
|---|
| 2068 |  | 
|---|
| 2069 | void FlowGraph::WidenSmiToInt32() { | 
|---|
| 2070 | if (!FLAG_use_smi_widening) { | 
|---|
| 2071 | return; | 
|---|
| 2072 | } | 
|---|
| 2073 |  | 
|---|
| 2074 | GrowableArray<BinarySmiOpInstr*> candidates; | 
|---|
| 2075 |  | 
|---|
| 2076 | // Step 1. Collect all instructions that potentially benefit from widening of | 
|---|
| 2077 | // their operands (or their result) into int32 range. | 
|---|
| 2078 | for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); | 
|---|
| 2079 | block_it.Advance()) { | 
|---|
| 2080 | for (ForwardInstructionIterator instr_it(block_it.Current()); | 
|---|
| 2081 | !instr_it.Done(); instr_it.Advance()) { | 
|---|
| 2082 | BinarySmiOpInstr* smi_op = instr_it.Current()->AsBinarySmiOp(); | 
|---|
| 2083 | if ((smi_op != NULL) && smi_op->HasSSATemp() && | 
|---|
| 2084 | BenefitsFromWidening(smi_op) && CanBeWidened(smi_op)) { | 
|---|
| 2085 | candidates.Add(smi_op); | 
|---|
| 2086 | } | 
|---|
| 2087 | } | 
|---|
| 2088 | } | 
|---|
| 2089 |  | 
|---|
| 2090 | if (candidates.is_empty()) { | 
|---|
| 2091 | return; | 
|---|
| 2092 | } | 
|---|
| 2093 |  | 
|---|
| 2094 | // Step 2. For each block in the graph compute which loop it belongs to. | 
|---|
| 2095 | // We will use this information later during computation of the widening's | 
|---|
| 2096 | // gain: we are going to assume that only conversion occurring inside the | 
|---|
| 2097 | // same loop should be counted against the gain, all other conversions | 
|---|
| 2098 | // can be hoisted and thus cost nothing compared to the loop cost itself. | 
|---|
| 2099 | GetLoopHierarchy(); | 
|---|
| 2100 |  | 
|---|
| 2101 | // Step 3. For each candidate transitively collect all other BinarySmiOpInstr | 
|---|
| 2102 | // and PhiInstr that depend on it and that it depends on and count amount of | 
|---|
| 2103 | // untagging operations that we save in assumption that this whole graph of | 
|---|
| 2104 | // values is using kUnboxedInt32 representation instead of kTagged. | 
|---|
| 2105 | // Convert those graphs that have positive gain to kUnboxedInt32. | 
|---|
| 2106 |  | 
|---|
| 2107 | // BitVector containing SSA indexes of all processed definitions. Used to skip | 
|---|
| 2108 | // those candidates that belong to dependency graph of another candidate. | 
|---|
| 2109 | BitVector* processed = new (Z) BitVector(Z, current_ssa_temp_index()); | 
|---|
| 2110 |  | 
|---|
| 2111 | // Worklist used to collect dependency graph. | 
|---|
| 2112 | DefinitionWorklist worklist(this, candidates.length()); | 
|---|
| 2113 | for (intptr_t i = 0; i < candidates.length(); i++) { | 
|---|
| 2114 | BinarySmiOpInstr* op = candidates[i]; | 
|---|
| 2115 | if (op->WasEliminated() || processed->Contains(op->ssa_temp_index())) { | 
|---|
| 2116 | continue; | 
|---|
| 2117 | } | 
|---|
| 2118 |  | 
|---|
| 2119 | if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 
|---|
| 2120 | THR_Print( "analysing candidate: %s\n", op->ToCString()); | 
|---|
| 2121 | } | 
|---|
| 2122 | worklist.Clear(); | 
|---|
| 2123 | worklist.Add(op); | 
|---|
| 2124 |  | 
|---|
| 2125 | // Collect dependency graph. Note: more items are added to worklist | 
|---|
| 2126 | // inside this loop. | 
|---|
| 2127 | intptr_t gain = 0; | 
|---|
| 2128 | for (intptr_t j = 0; j < worklist.definitions().length(); j++) { | 
|---|
| 2129 | Definition* defn = worklist.definitions()[j]; | 
|---|
| 2130 |  | 
|---|
| 2131 | if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 
|---|
| 2132 | THR_Print( "> %s\n", defn->ToCString()); | 
|---|
| 2133 | } | 
|---|
| 2134 |  | 
|---|
| 2135 | if (defn->IsBinarySmiOp() && | 
|---|
| 2136 | BenefitsFromWidening(defn->AsBinarySmiOp())) { | 
|---|
| 2137 | gain++; | 
|---|
| 2138 | if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 
|---|
| 2139 | THR_Print( "^ [%"Pd "] (o) %s\n", gain, defn->ToCString()); | 
|---|
| 2140 | } | 
|---|
| 2141 | } | 
|---|
| 2142 |  | 
|---|
| 2143 | const intptr_t defn_loop = LoopId(defn->GetBlock()); | 
|---|
| 2144 |  | 
|---|
| 2145 | // Process all inputs. | 
|---|
| 2146 | for (intptr_t k = 0; k < defn->InputCount(); k++) { | 
|---|
| 2147 | Definition* input = defn->InputAt(k)->definition(); | 
|---|
| 2148 | if (input->IsBinarySmiOp() && CanBeWidened(input->AsBinarySmiOp())) { | 
|---|
| 2149 | worklist.Add(input); | 
|---|
| 2150 | } else if (input->IsPhi() && (input->Type()->ToCid() == kSmiCid)) { | 
|---|
| 2151 | worklist.Add(input); | 
|---|
| 2152 | } else if (input->IsBinaryInt64Op()) { | 
|---|
| 2153 | // Mint operation produces untagged result. We avoid tagging. | 
|---|
| 2154 | gain++; | 
|---|
| 2155 | if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 
|---|
| 2156 | THR_Print( "^ [%"Pd "] (i) %s\n", gain, input->ToCString()); | 
|---|
| 2157 | } | 
|---|
| 2158 | } else if (defn_loop == LoopId(input->GetBlock()) && | 
|---|
| 2159 | (input->Type()->ToCid() == kSmiCid)) { | 
|---|
| 2160 | // Input comes from the same loop, is known to be smi and requires | 
|---|
| 2161 | // untagging. | 
|---|
| 2162 | // TODO(vegorov) this heuristic assumes that values that are not | 
|---|
| 2163 | // known to be smi have to be checked and this check can be | 
|---|
| 2164 | // coalesced with untagging. Start coalescing them. | 
|---|
| 2165 | gain--; | 
|---|
| 2166 | if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 
|---|
| 2167 | THR_Print( "v [%"Pd "] (i) %s\n", gain, input->ToCString()); | 
|---|
| 2168 | } | 
|---|
| 2169 | } | 
|---|
| 2170 | } | 
|---|
| 2171 |  | 
|---|
| 2172 | // Process all uses. | 
|---|
| 2173 | for (Value* use = defn->input_use_list(); use != NULL; | 
|---|
| 2174 | use = use->next_use()) { | 
|---|
| 2175 | Instruction* instr = use->instruction(); | 
|---|
| 2176 | Definition* use_defn = instr->AsDefinition(); | 
|---|
| 2177 | if (use_defn == NULL) { | 
|---|
| 2178 | // We assume that tagging before returning or pushing argument costs | 
|---|
| 2179 | // very little compared to the cost of the return/call itself. | 
|---|
| 2180 | ASSERT(!instr->IsPushArgument()); | 
|---|
| 2181 | if (!instr->IsReturn() && | 
|---|
| 2182 | (use->use_index() >= instr->ArgumentCount())) { | 
|---|
| 2183 | gain--; | 
|---|
| 2184 | if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 
|---|
| 2185 | THR_Print( "v [%"Pd "] (u) %s\n", gain, | 
|---|
| 2186 | use->instruction()->ToCString()); | 
|---|
| 2187 | } | 
|---|
| 2188 | } | 
|---|
| 2189 | continue; | 
|---|
| 2190 | } else if (use_defn->IsBinarySmiOp() && | 
|---|
| 2191 | CanBeWidened(use_defn->AsBinarySmiOp())) { | 
|---|
| 2192 | worklist.Add(use_defn); | 
|---|
| 2193 | } else if (use_defn->IsPhi() && | 
|---|
| 2194 | use_defn->AsPhi()->Type()->ToCid() == kSmiCid) { | 
|---|
| 2195 | worklist.Add(use_defn); | 
|---|
| 2196 | } else if (use_defn->IsBinaryInt64Op()) { | 
|---|
| 2197 | // BinaryInt64Op requires untagging of its inputs. | 
|---|
| 2198 | // Converting kUnboxedInt32 to kUnboxedInt64 is essentially zero cost | 
|---|
| 2199 | // sign extension operation. | 
|---|
| 2200 | gain++; | 
|---|
| 2201 | if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 
|---|
| 2202 | THR_Print( "^ [%"Pd "] (u) %s\n", gain, | 
|---|
| 2203 | use->instruction()->ToCString()); | 
|---|
| 2204 | } | 
|---|
| 2205 | } else if (defn_loop == LoopId(instr->GetBlock())) { | 
|---|
| 2206 | gain--; | 
|---|
| 2207 | if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 
|---|
| 2208 | THR_Print( "v [%"Pd "] (u) %s\n", gain, | 
|---|
| 2209 | use->instruction()->ToCString()); | 
|---|
| 2210 | } | 
|---|
| 2211 | } | 
|---|
| 2212 | } | 
|---|
| 2213 | } | 
|---|
| 2214 |  | 
|---|
| 2215 | processed->AddAll(worklist.contains_vector()); | 
|---|
| 2216 |  | 
|---|
| 2217 | if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 
|---|
| 2218 | THR_Print( "~ %s gain %"Pd "\n", op->ToCString(), gain); | 
|---|
| 2219 | } | 
|---|
| 2220 |  | 
|---|
| 2221 | if (gain > 0) { | 
|---|
| 2222 | // We have positive gain from widening. Convert all BinarySmiOpInstr into | 
|---|
| 2223 | // BinaryInt32OpInstr and set representation of all phis to kUnboxedInt32. | 
|---|
| 2224 | for (intptr_t j = 0; j < worklist.definitions().length(); j++) { | 
|---|
| 2225 | Definition* defn = worklist.definitions()[j]; | 
|---|
| 2226 | ASSERT(defn->IsPhi() || defn->IsBinarySmiOp()); | 
|---|
| 2227 |  | 
|---|
| 2228 | // Since we widen the integer representation we've to clear out type | 
|---|
| 2229 | // propagation information (e.g. it might no longer be a _Smi). | 
|---|
| 2230 | for (Value::Iterator it(defn->input_use_list()); !it.Done(); | 
|---|
| 2231 | it.Advance()) { | 
|---|
| 2232 | it.Current()->SetReachingType(nullptr); | 
|---|
| 2233 | } | 
|---|
| 2234 |  | 
|---|
| 2235 | if (defn->IsBinarySmiOp()) { | 
|---|
| 2236 | BinarySmiOpInstr* smi_op = defn->AsBinarySmiOp(); | 
|---|
| 2237 | BinaryInt32OpInstr* int32_op = new (Z) BinaryInt32OpInstr( | 
|---|
| 2238 | smi_op->op_kind(), smi_op->left()->CopyWithType(), | 
|---|
| 2239 | smi_op->right()->CopyWithType(), smi_op->DeoptimizationTarget()); | 
|---|
| 2240 |  | 
|---|
| 2241 | smi_op->ReplaceWith(int32_op, NULL); | 
|---|
| 2242 | } else if (defn->IsPhi()) { | 
|---|
| 2243 | defn->AsPhi()->set_representation(kUnboxedInt32); | 
|---|
| 2244 | ASSERT(defn->Type()->IsInt()); | 
|---|
| 2245 | } | 
|---|
| 2246 | } | 
|---|
| 2247 | } | 
|---|
| 2248 | } | 
|---|
| 2249 | } | 
|---|
| 2250 | #else | 
|---|
| 2251 | void FlowGraph::WidenSmiToInt32() { | 
|---|
| 2252 | // TODO(vegorov) ideally on 64-bit platforms we would like to narrow smi | 
|---|
| 2253 | // operations to 32-bit where it saves tagging and untagging and allows | 
|---|
| 2254 | // to use shorted (and faster) instructions. But we currently don't | 
|---|
| 2255 | // save enough range information in the ICData to drive this decision. | 
|---|
| 2256 | } | 
|---|
| 2257 | #endif | 
|---|
| 2258 |  | 
|---|
| 2259 | void FlowGraph::EliminateEnvironments() { | 
|---|
| 2260 | // After this pass we can no longer perform LICM and hoist instructions | 
|---|
| 2261 | // that can deoptimize. | 
|---|
| 2262 |  | 
|---|
| 2263 | disallow_licm(); | 
|---|
| 2264 | for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); | 
|---|
| 2265 | block_it.Advance()) { | 
|---|
| 2266 | BlockEntryInstr* block = block_it.Current(); | 
|---|
| 2267 | if (!block->IsCatchBlockEntry()) { | 
|---|
| 2268 | block->RemoveEnvironment(); | 
|---|
| 2269 | } | 
|---|
| 2270 | for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { | 
|---|
| 2271 | Instruction* current = it.Current(); | 
|---|
| 2272 | if (!current->ComputeCanDeoptimize() && | 
|---|
| 2273 | (!current->MayThrow() || !current->GetBlock()->InsideTryBlock())) { | 
|---|
| 2274 | // Instructions that can throw need an environment for optimized | 
|---|
| 2275 | // try-catch. | 
|---|
| 2276 | // TODO(srdjan): --source-lines needs deopt environments to get at | 
|---|
| 2277 | // the code for this instruction, however, leaving the environment | 
|---|
| 2278 | // changes code. | 
|---|
| 2279 | current->RemoveEnvironment(); | 
|---|
| 2280 | } | 
|---|
| 2281 | } | 
|---|
| 2282 | } | 
|---|
| 2283 | } | 
|---|
| 2284 |  | 
|---|
| 2285 | bool FlowGraph::Canonicalize() { | 
|---|
| 2286 | bool changed = false; | 
|---|
| 2287 |  | 
|---|
| 2288 | for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); | 
|---|
| 2289 | block_it.Advance()) { | 
|---|
| 2290 | BlockEntryInstr* const block = block_it.Current(); | 
|---|
| 2291 | if (auto join = block->AsJoinEntry()) { | 
|---|
| 2292 | for (PhiIterator it(join); !it.Done(); it.Advance()) { | 
|---|
| 2293 | PhiInstr* current = it.Current(); | 
|---|
| 2294 | if (current->HasUnmatchedInputRepresentations()) { | 
|---|
| 2295 | // Can't canonicalize this instruction until all conversions for its | 
|---|
| 2296 | // inputs are inserted. | 
|---|
| 2297 | continue; | 
|---|
| 2298 | } | 
|---|
| 2299 |  | 
|---|
| 2300 | Definition* replacement = current->Canonicalize(this); | 
|---|
| 2301 | ASSERT(replacement != nullptr); | 
|---|
| 2302 | if (replacement != current) { | 
|---|
| 2303 | current->ReplaceUsesWith(replacement); | 
|---|
| 2304 | it.RemoveCurrentFromGraph(); | 
|---|
| 2305 | changed = true; | 
|---|
| 2306 | } | 
|---|
| 2307 | } | 
|---|
| 2308 | } | 
|---|
| 2309 | for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { | 
|---|
| 2310 | Instruction* current = it.Current(); | 
|---|
| 2311 | if (current->HasUnmatchedInputRepresentations()) { | 
|---|
| 2312 | // Can't canonicalize this instruction until all conversions for its | 
|---|
| 2313 | // inputs are inserted. | 
|---|
| 2314 | continue; | 
|---|
| 2315 | } | 
|---|
| 2316 |  | 
|---|
| 2317 | Instruction* replacement = current->Canonicalize(this); | 
|---|
| 2318 |  | 
|---|
| 2319 | if (replacement != current) { | 
|---|
| 2320 | // For non-definitions Canonicalize should return either NULL or | 
|---|
| 2321 | // this. | 
|---|
| 2322 | ASSERT((replacement == NULL) || current->IsDefinition()); | 
|---|
| 2323 | ReplaceCurrentInstruction(&it, current, replacement); | 
|---|
| 2324 | changed = true; | 
|---|
| 2325 | } | 
|---|
| 2326 | } | 
|---|
| 2327 | } | 
|---|
| 2328 | return changed; | 
|---|
| 2329 | } | 
|---|
| 2330 |  | 
|---|
| 2331 | void FlowGraph::PopulateWithICData(const Function& function) { | 
|---|
| 2332 | Zone* zone = Thread::Current()->zone(); | 
|---|
| 2333 |  | 
|---|
| 2334 | for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); | 
|---|
| 2335 | block_it.Advance()) { | 
|---|
| 2336 | ForwardInstructionIterator it(block_it.Current()); | 
|---|
| 2337 | for (; !it.Done(); it.Advance()) { | 
|---|
| 2338 | Instruction* instr = it.Current(); | 
|---|
| 2339 | if (instr->IsInstanceCall()) { | 
|---|
| 2340 | InstanceCallInstr* call = instr->AsInstanceCall(); | 
|---|
| 2341 | if (!call->HasICData()) { | 
|---|
| 2342 | const Array& arguments_descriptor = | 
|---|
| 2343 | Array::Handle(zone, call->GetArgumentsDescriptor()); | 
|---|
| 2344 | const ICData& ic_data = ICData::ZoneHandle( | 
|---|
| 2345 | zone, | 
|---|
| 2346 | ICData::New(function, call->function_name(), arguments_descriptor, | 
|---|
| 2347 | call->deopt_id(), call->checked_argument_count(), | 
|---|
| 2348 | ICData::kInstance)); | 
|---|
| 2349 | call->set_ic_data(&ic_data); | 
|---|
| 2350 | } | 
|---|
| 2351 | } else if (instr->IsStaticCall()) { | 
|---|
| 2352 | StaticCallInstr* call = instr->AsStaticCall(); | 
|---|
| 2353 | if (!call->HasICData()) { | 
|---|
| 2354 | const Array& arguments_descriptor = | 
|---|
| 2355 | Array::Handle(zone, call->GetArgumentsDescriptor()); | 
|---|
| 2356 | const Function& target = call->function(); | 
|---|
| 2357 | int num_args_checked = | 
|---|
| 2358 | MethodRecognizer::NumArgsCheckedForStaticCall(target); | 
|---|
| 2359 | const ICData& ic_data = ICData::ZoneHandle( | 
|---|
| 2360 | zone, ICData::New(function, String::Handle(zone, target.name()), | 
|---|
| 2361 | arguments_descriptor, call->deopt_id(), | 
|---|
| 2362 | num_args_checked, ICData::kStatic)); | 
|---|
| 2363 | ic_data.AddTarget(target); | 
|---|
| 2364 | call->set_ic_data(&ic_data); | 
|---|
| 2365 | } | 
|---|
| 2366 | } | 
|---|
| 2367 | } | 
|---|
| 2368 | } | 
|---|
| 2369 | } | 
|---|
| 2370 |  | 
|---|
| 2371 | // Optimize (a << b) & c pattern: if c is a positive Smi or zero, then the | 
|---|
| 2372 | // shift can be a truncating Smi shift-left and result is always Smi. | 
|---|
| 2373 | // Merging occurs only per basic-block. | 
|---|
| 2374 | void FlowGraph::TryOptimizePatterns() { | 
|---|
| 2375 | if (!FLAG_truncating_left_shift) return; | 
|---|
| 2376 | GrowableArray<BinarySmiOpInstr*> div_mod_merge; | 
|---|
| 2377 | GrowableArray<InvokeMathCFunctionInstr*> sin_cos_merge; | 
|---|
| 2378 | for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); | 
|---|
| 2379 | block_it.Advance()) { | 
|---|
| 2380 | // Merging only per basic-block. | 
|---|
| 2381 | div_mod_merge.Clear(); | 
|---|
| 2382 | sin_cos_merge.Clear(); | 
|---|
| 2383 | ForwardInstructionIterator it(block_it.Current()); | 
|---|
| 2384 | for (; !it.Done(); it.Advance()) { | 
|---|
| 2385 | if (it.Current()->IsBinarySmiOp()) { | 
|---|
| 2386 | BinarySmiOpInstr* binop = it.Current()->AsBinarySmiOp(); | 
|---|
| 2387 | if (binop->op_kind() == Token::kBIT_AND) { | 
|---|
| 2388 | OptimizeLeftShiftBitAndSmiOp(&it, binop, binop->left()->definition(), | 
|---|
| 2389 | binop->right()->definition()); | 
|---|
| 2390 | } else if ((binop->op_kind() == Token::kTRUNCDIV) || | 
|---|
| 2391 | (binop->op_kind() == Token::kMOD)) { | 
|---|
| 2392 | if (binop->HasUses()) { | 
|---|
| 2393 | div_mod_merge.Add(binop); | 
|---|
| 2394 | } | 
|---|
| 2395 | } | 
|---|
| 2396 | } else if (it.Current()->IsBinaryInt64Op()) { | 
|---|
| 2397 | BinaryInt64OpInstr* mintop = it.Current()->AsBinaryInt64Op(); | 
|---|
| 2398 | if (mintop->op_kind() == Token::kBIT_AND) { | 
|---|
| 2399 | OptimizeLeftShiftBitAndSmiOp(&it, mintop, | 
|---|
| 2400 | mintop->left()->definition(), | 
|---|
| 2401 | mintop->right()->definition()); | 
|---|
| 2402 | } | 
|---|
| 2403 | } else if (it.Current()->IsInvokeMathCFunction()) { | 
|---|
| 2404 | InvokeMathCFunctionInstr* math_unary = | 
|---|
| 2405 | it.Current()->AsInvokeMathCFunction(); | 
|---|
| 2406 | if ((math_unary->recognized_kind() == MethodRecognizer::kMathSin) || | 
|---|
| 2407 | (math_unary->recognized_kind() == MethodRecognizer::kMathCos)) { | 
|---|
| 2408 | if (math_unary->HasUses()) { | 
|---|
| 2409 | sin_cos_merge.Add(math_unary); | 
|---|
| 2410 | } | 
|---|
| 2411 | } | 
|---|
| 2412 | } | 
|---|
| 2413 | } | 
|---|
| 2414 | TryMergeTruncDivMod(&div_mod_merge); | 
|---|
| 2415 | } | 
|---|
| 2416 | } | 
|---|
| 2417 |  | 
|---|
| 2418 | // Returns true if use is dominated by the given instruction. | 
|---|
| 2419 | // Note: uses that occur at instruction itself are not dominated by it. | 
|---|
| 2420 | static bool IsDominatedUse(Instruction* dom, Value* use) { | 
|---|
| 2421 | BlockEntryInstr* dom_block = dom->GetBlock(); | 
|---|
| 2422 |  | 
|---|
| 2423 | Instruction* instr = use->instruction(); | 
|---|
| 2424 |  | 
|---|
| 2425 | PhiInstr* phi = instr->AsPhi(); | 
|---|
| 2426 | if (phi != NULL) { | 
|---|
| 2427 | return dom_block->Dominates(phi->block()->PredecessorAt(use->use_index())); | 
|---|
| 2428 | } | 
|---|
| 2429 |  | 
|---|
| 2430 | BlockEntryInstr* use_block = instr->GetBlock(); | 
|---|
| 2431 | if (use_block == dom_block) { | 
|---|
| 2432 | // Fast path for the case of block entry. | 
|---|
| 2433 | if (dom_block == dom) return true; | 
|---|
| 2434 |  | 
|---|
| 2435 | for (Instruction* curr = dom->next(); curr != NULL; curr = curr->next()) { | 
|---|
| 2436 | if (curr == instr) return true; | 
|---|
| 2437 | } | 
|---|
| 2438 |  | 
|---|
| 2439 | return false; | 
|---|
| 2440 | } | 
|---|
| 2441 |  | 
|---|
| 2442 | return dom_block->Dominates(use_block); | 
|---|
| 2443 | } | 
|---|
| 2444 |  | 
|---|
| 2445 | void FlowGraph::RenameDominatedUses(Definition* def, | 
|---|
| 2446 | Instruction* dom, | 
|---|
| 2447 | Definition* other) { | 
|---|
| 2448 | for (Value::Iterator it(def->input_use_list()); !it.Done(); it.Advance()) { | 
|---|
| 2449 | Value* use = it.Current(); | 
|---|
| 2450 | if (IsDominatedUse(dom, use)) { | 
|---|
| 2451 | use->BindTo(other); | 
|---|
| 2452 | } | 
|---|
| 2453 | } | 
|---|
| 2454 | } | 
|---|
| 2455 |  | 
|---|
| 2456 | void FlowGraph::RenameUsesDominatedByRedefinitions() { | 
|---|
| 2457 | for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); | 
|---|
| 2458 | block_it.Advance()) { | 
|---|
| 2459 | for (ForwardInstructionIterator instr_it(block_it.Current()); | 
|---|
| 2460 | !instr_it.Done(); instr_it.Advance()) { | 
|---|
| 2461 | Definition* definition = instr_it.Current()->AsDefinition(); | 
|---|
| 2462 | // CheckArrayBound instructions have their own mechanism for ensuring | 
|---|
| 2463 | // proper dependencies, so we don't rewrite those here. | 
|---|
| 2464 | if (definition != nullptr && !definition->IsCheckArrayBound()) { | 
|---|
| 2465 | Value* redefined = definition->RedefinedValue(); | 
|---|
| 2466 | if (redefined != nullptr) { | 
|---|
| 2467 | if (!definition->HasSSATemp()) { | 
|---|
| 2468 | AllocateSSAIndexes(definition); | 
|---|
| 2469 | } | 
|---|
| 2470 | Definition* original = redefined->definition(); | 
|---|
| 2471 | RenameDominatedUses(original, definition, definition); | 
|---|
| 2472 | } | 
|---|
| 2473 | } | 
|---|
| 2474 | } | 
|---|
| 2475 | } | 
|---|
| 2476 | } | 
|---|
| 2477 |  | 
|---|
| 2478 | static bool IsPositiveOrZeroSmiConst(Definition* d) { | 
|---|
| 2479 | ConstantInstr* const_instr = d->AsConstant(); | 
|---|
| 2480 | if ((const_instr != NULL) && (const_instr->value().IsSmi())) { | 
|---|
| 2481 | return Smi::Cast(const_instr->value()).Value() >= 0; | 
|---|
| 2482 | } | 
|---|
| 2483 | return false; | 
|---|
| 2484 | } | 
|---|
| 2485 |  | 
|---|
| 2486 | static BinarySmiOpInstr* AsSmiShiftLeftInstruction(Definition* d) { | 
|---|
| 2487 | BinarySmiOpInstr* instr = d->AsBinarySmiOp(); | 
|---|
| 2488 | if ((instr != NULL) && (instr->op_kind() == Token::kSHL)) { | 
|---|
| 2489 | return instr; | 
|---|
| 2490 | } | 
|---|
| 2491 | return NULL; | 
|---|
| 2492 | } | 
|---|
| 2493 |  | 
|---|
| 2494 | void FlowGraph::OptimizeLeftShiftBitAndSmiOp( | 
|---|
| 2495 | ForwardInstructionIterator* current_iterator, | 
|---|
| 2496 | Definition* bit_and_instr, | 
|---|
| 2497 | Definition* left_instr, | 
|---|
| 2498 | Definition* right_instr) { | 
|---|
| 2499 | ASSERT(bit_and_instr != NULL); | 
|---|
| 2500 | ASSERT((left_instr != NULL) && (right_instr != NULL)); | 
|---|
| 2501 |  | 
|---|
| 2502 | // Check for pattern, smi_shift_left must be single-use. | 
|---|
| 2503 | bool is_positive_or_zero = IsPositiveOrZeroSmiConst(left_instr); | 
|---|
| 2504 | if (!is_positive_or_zero) { | 
|---|
| 2505 | is_positive_or_zero = IsPositiveOrZeroSmiConst(right_instr); | 
|---|
| 2506 | } | 
|---|
| 2507 | if (!is_positive_or_zero) return; | 
|---|
| 2508 |  | 
|---|
| 2509 | BinarySmiOpInstr* smi_shift_left = NULL; | 
|---|
| 2510 | if (bit_and_instr->InputAt(0)->IsSingleUse()) { | 
|---|
| 2511 | smi_shift_left = AsSmiShiftLeftInstruction(left_instr); | 
|---|
| 2512 | } | 
|---|
| 2513 | if ((smi_shift_left == NULL) && (bit_and_instr->InputAt(1)->IsSingleUse())) { | 
|---|
| 2514 | smi_shift_left = AsSmiShiftLeftInstruction(right_instr); | 
|---|
| 2515 | } | 
|---|
| 2516 | if (smi_shift_left == NULL) return; | 
|---|
| 2517 |  | 
|---|
| 2518 | // Pattern recognized. | 
|---|
| 2519 | smi_shift_left->mark_truncating(); | 
|---|
| 2520 | ASSERT(bit_and_instr->IsBinarySmiOp() || bit_and_instr->IsBinaryInt64Op()); | 
|---|
| 2521 | if (bit_and_instr->IsBinaryInt64Op()) { | 
|---|
| 2522 | // Replace Mint op with Smi op. | 
|---|
| 2523 | BinarySmiOpInstr* smi_op = new (Z) BinarySmiOpInstr( | 
|---|
| 2524 | Token::kBIT_AND, new (Z) Value(left_instr), new (Z) Value(right_instr), | 
|---|
| 2525 | DeoptId::kNone);  // BIT_AND cannot deoptimize. | 
|---|
| 2526 | bit_and_instr->ReplaceWith(smi_op, current_iterator); | 
|---|
| 2527 | } | 
|---|
| 2528 | } | 
|---|
| 2529 |  | 
|---|
| 2530 | // Dart: | 
|---|
| 2531 | //  var x = d % 10; | 
|---|
| 2532 | //  var y = d ~/ 10; | 
|---|
| 2533 | //  var z = x + y; | 
|---|
| 2534 | // | 
|---|
| 2535 | // IL: | 
|---|
| 2536 | //  v4 <- %(v2, v3) | 
|---|
| 2537 | //  v5 <- ~/(v2, v3) | 
|---|
| 2538 | //  v6 <- +(v4, v5) | 
|---|
| 2539 | // | 
|---|
| 2540 | // IL optimized: | 
|---|
| 2541 | //  v4 <- DIVMOD(v2, v3); | 
|---|
| 2542 | //  v5 <- LoadIndexed(v4, 0); // ~/ result | 
|---|
| 2543 | //  v6 <- LoadIndexed(v4, 1); // % result | 
|---|
| 2544 | //  v7 <- +(v5, v6) | 
|---|
| 2545 | // Because of the environment it is important that merged instruction replaces | 
|---|
| 2546 | // first original instruction encountered. | 
|---|
| 2547 | void FlowGraph::TryMergeTruncDivMod( | 
|---|
| 2548 | GrowableArray<BinarySmiOpInstr*>* merge_candidates) { | 
|---|
| 2549 | if (merge_candidates->length() < 2) { | 
|---|
| 2550 | // Need at least a TRUNCDIV and a MOD. | 
|---|
| 2551 | return; | 
|---|
| 2552 | } | 
|---|
| 2553 | for (intptr_t i = 0; i < merge_candidates->length(); i++) { | 
|---|
| 2554 | BinarySmiOpInstr* curr_instr = (*merge_candidates)[i]; | 
|---|
| 2555 | if (curr_instr == NULL) { | 
|---|
| 2556 | // Instruction was merged already. | 
|---|
| 2557 | continue; | 
|---|
| 2558 | } | 
|---|
| 2559 | ASSERT((curr_instr->op_kind() == Token::kTRUNCDIV) || | 
|---|
| 2560 | (curr_instr->op_kind() == Token::kMOD)); | 
|---|
| 2561 | // Check if there is kMOD/kTRUNDIV binop with same inputs. | 
|---|
| 2562 | const Token::Kind other_kind = (curr_instr->op_kind() == Token::kTRUNCDIV) | 
|---|
| 2563 | ? Token::kMOD | 
|---|
| 2564 | : Token::kTRUNCDIV; | 
|---|
| 2565 | Definition* left_def = curr_instr->left()->definition(); | 
|---|
| 2566 | Definition* right_def = curr_instr->right()->definition(); | 
|---|
| 2567 | for (intptr_t k = i + 1; k < merge_candidates->length(); k++) { | 
|---|
| 2568 | BinarySmiOpInstr* other_binop = (*merge_candidates)[k]; | 
|---|
| 2569 | // 'other_binop' can be NULL if it was already merged. | 
|---|
| 2570 | if ((other_binop != NULL) && (other_binop->op_kind() == other_kind) && | 
|---|
| 2571 | (other_binop->left()->definition() == left_def) && | 
|---|
| 2572 | (other_binop->right()->definition() == right_def)) { | 
|---|
| 2573 | (*merge_candidates)[k] = NULL;  // Clear it. | 
|---|
| 2574 | ASSERT(curr_instr->HasUses()); | 
|---|
| 2575 | AppendExtractNthOutputForMerged( | 
|---|
| 2576 | curr_instr, TruncDivModInstr::OutputIndexOf(curr_instr->op_kind()), | 
|---|
| 2577 | kTagged, kSmiCid); | 
|---|
| 2578 | ASSERT(other_binop->HasUses()); | 
|---|
| 2579 | AppendExtractNthOutputForMerged( | 
|---|
| 2580 | other_binop, | 
|---|
| 2581 | TruncDivModInstr::OutputIndexOf(other_binop->op_kind()), kTagged, | 
|---|
| 2582 | kSmiCid); | 
|---|
| 2583 |  | 
|---|
| 2584 | // Replace with TruncDivMod. | 
|---|
| 2585 | TruncDivModInstr* div_mod = new (Z) TruncDivModInstr( | 
|---|
| 2586 | curr_instr->left()->CopyWithType(), | 
|---|
| 2587 | curr_instr->right()->CopyWithType(), curr_instr->deopt_id()); | 
|---|
| 2588 | curr_instr->ReplaceWith(div_mod, NULL); | 
|---|
| 2589 | other_binop->ReplaceUsesWith(div_mod); | 
|---|
| 2590 | other_binop->RemoveFromGraph(); | 
|---|
| 2591 | // Only one merge possible. Because canonicalization happens later, | 
|---|
| 2592 | // more candidates are possible. | 
|---|
| 2593 | // TODO(srdjan): Allow merging of trunc-div/mod into truncDivMod. | 
|---|
| 2594 | break; | 
|---|
| 2595 | } | 
|---|
| 2596 | } | 
|---|
| 2597 | } | 
|---|
| 2598 | } | 
|---|
| 2599 |  | 
|---|
| 2600 | void FlowGraph::(Definition* instr, | 
|---|
| 2601 | intptr_t index, | 
|---|
| 2602 | Representation rep, | 
|---|
| 2603 | intptr_t cid) { | 
|---|
| 2604 | ExtractNthOutputInstr*  = | 
|---|
| 2605 | new (Z) ExtractNthOutputInstr(new (Z) Value(instr), index, rep, cid); | 
|---|
| 2606 | instr->ReplaceUsesWith(extract); | 
|---|
| 2607 | InsertAfter(instr, extract, NULL, FlowGraph::kValue); | 
|---|
| 2608 | } | 
|---|
| 2609 |  | 
|---|
| 2610 | // | 
|---|
| 2611 | // Static helpers for the flow graph utilities. | 
|---|
| 2612 | // | 
|---|
| 2613 |  | 
|---|
| 2614 | static TargetEntryInstr* NewTarget(FlowGraph* graph, Instruction* inherit) { | 
|---|
| 2615 | TargetEntryInstr* target = new (graph->zone()) | 
|---|
| 2616 | TargetEntryInstr(graph->allocate_block_id(), | 
|---|
| 2617 | inherit->GetBlock()->try_index(), DeoptId::kNone); | 
|---|
| 2618 | target->InheritDeoptTarget(graph->zone(), inherit); | 
|---|
| 2619 | return target; | 
|---|
| 2620 | } | 
|---|
| 2621 |  | 
|---|
| 2622 | static JoinEntryInstr* NewJoin(FlowGraph* graph, Instruction* inherit) { | 
|---|
| 2623 | JoinEntryInstr* join = new (graph->zone()) | 
|---|
| 2624 | JoinEntryInstr(graph->allocate_block_id(), | 
|---|
| 2625 | inherit->GetBlock()->try_index(), DeoptId::kNone); | 
|---|
| 2626 | join->InheritDeoptTarget(graph->zone(), inherit); | 
|---|
| 2627 | return join; | 
|---|
| 2628 | } | 
|---|
| 2629 |  | 
|---|
| 2630 | static GotoInstr* NewGoto(FlowGraph* graph, | 
|---|
| 2631 | JoinEntryInstr* target, | 
|---|
| 2632 | Instruction* inherit) { | 
|---|
| 2633 | GotoInstr* got = new (graph->zone()) GotoInstr(target, DeoptId::kNone); | 
|---|
| 2634 | got->InheritDeoptTarget(graph->zone(), inherit); | 
|---|
| 2635 | return got; | 
|---|
| 2636 | } | 
|---|
| 2637 |  | 
|---|
| 2638 | static BranchInstr* NewBranch(FlowGraph* graph, | 
|---|
| 2639 | ComparisonInstr* cmp, | 
|---|
| 2640 | Instruction* inherit) { | 
|---|
| 2641 | BranchInstr* bra = new (graph->zone()) BranchInstr(cmp, DeoptId::kNone); | 
|---|
| 2642 | bra->InheritDeoptTarget(graph->zone(), inherit); | 
|---|
| 2643 | return bra; | 
|---|
| 2644 | } | 
|---|
| 2645 |  | 
|---|
| 2646 | // | 
|---|
| 2647 | // Flow graph utilities. | 
|---|
| 2648 | // | 
|---|
| 2649 |  | 
|---|
| 2650 | // Constructs new diamond decision at the given instruction. | 
|---|
| 2651 | // | 
|---|
| 2652 | //               ENTRY | 
|---|
| 2653 | //             instruction | 
|---|
| 2654 | //            if (compare) | 
|---|
| 2655 | //              /   \ | 
|---|
| 2656 | //           B_TRUE B_FALSE | 
|---|
| 2657 | //              \   / | 
|---|
| 2658 | //               JOIN | 
|---|
| 2659 | // | 
|---|
| 2660 | JoinEntryInstr* FlowGraph::NewDiamond(Instruction* instruction, | 
|---|
| 2661 | Instruction* inherit, | 
|---|
| 2662 | ComparisonInstr* compare, | 
|---|
| 2663 | TargetEntryInstr** b_true, | 
|---|
| 2664 | TargetEntryInstr** b_false) { | 
|---|
| 2665 | BlockEntryInstr* entry = instruction->GetBlock(); | 
|---|
| 2666 |  | 
|---|
| 2667 | TargetEntryInstr* bt = NewTarget(this, inherit); | 
|---|
| 2668 | TargetEntryInstr* bf = NewTarget(this, inherit); | 
|---|
| 2669 | JoinEntryInstr* join = NewJoin(this, inherit); | 
|---|
| 2670 | GotoInstr* gotot = NewGoto(this, join, inherit); | 
|---|
| 2671 | GotoInstr* gotof = NewGoto(this, join, inherit); | 
|---|
| 2672 | BranchInstr* bra = NewBranch(this, compare, inherit); | 
|---|
| 2673 |  | 
|---|
| 2674 | instruction->AppendInstruction(bra); | 
|---|
| 2675 | entry->set_last_instruction(bra); | 
|---|
| 2676 |  | 
|---|
| 2677 | *bra->true_successor_address() = bt; | 
|---|
| 2678 | *bra->false_successor_address() = bf; | 
|---|
| 2679 |  | 
|---|
| 2680 | bt->AppendInstruction(gotot); | 
|---|
| 2681 | bt->set_last_instruction(gotot); | 
|---|
| 2682 |  | 
|---|
| 2683 | bf->AppendInstruction(gotof); | 
|---|
| 2684 | bf->set_last_instruction(gotof); | 
|---|
| 2685 |  | 
|---|
| 2686 | // Update dominance relation incrementally. | 
|---|
| 2687 | for (intptr_t i = 0, n = entry->dominated_blocks().length(); i < n; ++i) { | 
|---|
| 2688 | join->AddDominatedBlock(entry->dominated_blocks()[i]); | 
|---|
| 2689 | } | 
|---|
| 2690 | entry->ClearDominatedBlocks(); | 
|---|
| 2691 | entry->AddDominatedBlock(bt); | 
|---|
| 2692 | entry->AddDominatedBlock(bf); | 
|---|
| 2693 | entry->AddDominatedBlock(join); | 
|---|
| 2694 |  | 
|---|
| 2695 | // TODO(ajcbik): update pred/succ/ordering incrementally too. | 
|---|
| 2696 |  | 
|---|
| 2697 | // Return new blocks. | 
|---|
| 2698 | *b_true = bt; | 
|---|
| 2699 | *b_false = bf; | 
|---|
| 2700 | return join; | 
|---|
| 2701 | } | 
|---|
| 2702 |  | 
|---|
| 2703 | JoinEntryInstr* FlowGraph::NewDiamond(Instruction* instruction, | 
|---|
| 2704 | Instruction* inherit, | 
|---|
| 2705 | const LogicalAnd& condition, | 
|---|
| 2706 | TargetEntryInstr** b_true, | 
|---|
| 2707 | TargetEntryInstr** b_false) { | 
|---|
| 2708 | // First diamond for first comparison. | 
|---|
| 2709 | TargetEntryInstr* bt = nullptr; | 
|---|
| 2710 | TargetEntryInstr* bf = nullptr; | 
|---|
| 2711 | JoinEntryInstr* mid_point = | 
|---|
| 2712 | NewDiamond(instruction, inherit, condition.oper1, &bt, &bf); | 
|---|
| 2713 |  | 
|---|
| 2714 | // Short-circuit second comparison and connect through phi. | 
|---|
| 2715 | condition.oper2->InsertAfter(bt); | 
|---|
| 2716 | AllocateSSAIndexes(condition.oper2); | 
|---|
| 2717 | condition.oper2->InheritDeoptTarget(zone(), inherit);  // must inherit | 
|---|
| 2718 | PhiInstr* phi = | 
|---|
| 2719 | AddPhi(mid_point, condition.oper2, GetConstant(Bool::False())); | 
|---|
| 2720 | StrictCompareInstr* circuit = new (zone()) StrictCompareInstr( | 
|---|
| 2721 | inherit->token_pos(), Token::kEQ_STRICT, new (zone()) Value(phi), | 
|---|
| 2722 | new (zone()) Value(GetConstant(Bool::True())), false, | 
|---|
| 2723 | DeoptId::kNone);  // don't inherit | 
|---|
| 2724 |  | 
|---|
| 2725 | // Return new blocks through the second diamond. | 
|---|
| 2726 | return NewDiamond(mid_point, inherit, circuit, b_true, b_false); | 
|---|
| 2727 | } | 
|---|
| 2728 |  | 
|---|
| 2729 | PhiInstr* FlowGraph::AddPhi(JoinEntryInstr* join, | 
|---|
| 2730 | Definition* d1, | 
|---|
| 2731 | Definition* d2) { | 
|---|
| 2732 | PhiInstr* phi = new (zone()) PhiInstr(join, 2); | 
|---|
| 2733 | Value* v1 = new (zone()) Value(d1); | 
|---|
| 2734 | Value* v2 = new (zone()) Value(d2); | 
|---|
| 2735 |  | 
|---|
| 2736 | AllocateSSAIndexes(phi); | 
|---|
| 2737 |  | 
|---|
| 2738 | phi->mark_alive(); | 
|---|
| 2739 | phi->SetInputAt(0, v1); | 
|---|
| 2740 | phi->SetInputAt(1, v2); | 
|---|
| 2741 | d1->AddInputUse(v1); | 
|---|
| 2742 | d2->AddInputUse(v2); | 
|---|
| 2743 | join->InsertPhi(phi); | 
|---|
| 2744 |  | 
|---|
| 2745 | return phi; | 
|---|
| 2746 | } | 
|---|
| 2747 |  | 
|---|
| 2748 | void FlowGraph::InsertPushArguments() { | 
|---|
| 2749 | for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); | 
|---|
| 2750 | block_it.Advance()) { | 
|---|
| 2751 | thread()->CheckForSafepoint(); | 
|---|
| 2752 | for (ForwardInstructionIterator instr_it(block_it.Current()); | 
|---|
| 2753 | !instr_it.Done(); instr_it.Advance()) { | 
|---|
| 2754 | Instruction* instruction = instr_it.Current(); | 
|---|
| 2755 | const intptr_t arg_count = instruction->ArgumentCount(); | 
|---|
| 2756 | if (arg_count == 0) { | 
|---|
| 2757 | continue; | 
|---|
| 2758 | } | 
|---|
| 2759 | PushArgumentsArray* arguments = | 
|---|
| 2760 | new (Z) PushArgumentsArray(zone(), arg_count); | 
|---|
| 2761 | for (intptr_t i = 0; i < arg_count; ++i) { | 
|---|
| 2762 | Value* arg = instruction->ArgumentValueAt(i); | 
|---|
| 2763 | PushArgumentInstr* push_arg = new (Z) PushArgumentInstr( | 
|---|
| 2764 | arg->CopyWithType(Z), instruction->RequiredInputRepresentation(i)); | 
|---|
| 2765 | arguments->Add(push_arg); | 
|---|
| 2766 | // Insert all PushArgument instructions immediately before call. | 
|---|
| 2767 | // PushArgumentInstr::EmitNativeCode may generate more efficient | 
|---|
| 2768 | // code for subsequent PushArgument instructions (ARM, ARM64). | 
|---|
| 2769 | InsertBefore(instruction, push_arg, /*env=*/nullptr, kEffect); | 
|---|
| 2770 | } | 
|---|
| 2771 | instruction->ReplaceInputsWithPushArguments(arguments); | 
|---|
| 2772 | if (instruction->env() != nullptr) { | 
|---|
| 2773 | instruction->RepairPushArgsInEnvironment(); | 
|---|
| 2774 | } | 
|---|
| 2775 | } | 
|---|
| 2776 | } | 
|---|
| 2777 | } | 
|---|
| 2778 |  | 
|---|
| 2779 | }  // namespace dart | 
|---|
| 2780 |  | 
|---|