| 1 | // Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file |
| 2 | // for details. All rights reserved. Use of this source code is governed by a |
| 3 | // BSD-style license that can be found in the LICENSE file. |
| 4 | |
| 5 | #include <functional> |
| 6 | |
| 7 | #include "vm/compiler/backend/flow_graph.h" |
| 8 | #include "vm/compiler/compiler_pass.h" |
| 9 | #include "vm/compiler/write_barrier_elimination.h" |
| 10 | |
| 11 | namespace dart { |
| 12 | |
| 13 | #if defined(DEBUG) |
| 14 | DEFINE_FLAG(bool, |
| 15 | trace_write_barrier_elimination, |
| 16 | false, |
| 17 | "Trace WriteBarrierElimination pass." ); |
| 18 | #endif |
| 19 | |
| 20 | class DefinitionIndexPairTrait { |
| 21 | public: |
| 22 | typedef Definition* Key; |
| 23 | typedef intptr_t Value; |
| 24 | struct Pair { |
| 25 | Definition* definition = nullptr; |
| 26 | intptr_t index = -1; |
| 27 | Pair() {} |
| 28 | Pair(Definition* definition, intptr_t index) |
| 29 | : definition(definition), index(index) {} |
| 30 | }; |
| 31 | |
| 32 | static Key KeyOf(Pair kv) { return kv.definition; } |
| 33 | static Value ValueOf(Pair kv) { return kv.index; } |
| 34 | static inline intptr_t Hashcode(Key key) { return std::hash<Key>()(key); } |
| 35 | static inline bool IsKeyEqual(Pair kv, Key key) { |
| 36 | return kv.definition == key; |
| 37 | } |
| 38 | }; |
| 39 | |
| 40 | typedef DirectChainedHashMap<DefinitionIndexPairTrait> DefinitionIndexMap; |
| 41 | |
| 42 | // Inter-block write-barrier elimination. |
| 43 | // |
| 44 | // This optimization removes write barriers from some store instructions under |
| 45 | // certain assumptions which the runtime is responsible to sustain. |
| 46 | // |
| 47 | // We can skip a write barrier on a StoreInstanceField to a container object X |
| 48 | // if we know that either: |
| 49 | // - X is in new-space, or |
| 50 | // - X is in old-space, and: |
| 51 | // - X is in the store buffer, and |
| 52 | // - X is in the deferred marking stack (if concurrent marking is enabled) |
| 53 | // |
| 54 | // The result of an Allocation instruction (Instruction::IsAllocation()) will |
| 55 | // satisfy one of these requirements immediately after the instruction |
| 56 | // if WillAllocateNewOrRemembered() is true. |
| 57 | // |
| 58 | // Without runtime support, we would have to assume that any instruction which |
| 59 | // can trigger a new-space scavenge (Instruction::CanTriggerGC()) might promote |
| 60 | // a new-space temporary into old-space, and we could not skip a store barrier |
| 61 | // on a write into it afterward. |
| 62 | // |
| 63 | // However, many instructions can trigger GC in unlikely cases, like |
| 64 | // CheckStackOverflow and Box. To avoid interrupting write barrier elimination |
| 65 | // across these instructions, the runtime ensures that any live temporaries |
| 66 | // (except arrays) promoted during a scavenge caused by a non-Dart-call |
| 67 | // instruction (see Instruction::CanCallDart()) will be added to the store |
| 68 | // buffer. Additionally, if concurrent marking was initiated, the runtime |
| 69 | // ensures that all live temporaries are also in the deferred marking stack. |
| 70 | // |
| 71 | // See also Thread::RememberLiveTemporaries() and |
| 72 | // Thread::DeferredMarkLiveTemporaries(). |
| 73 | class WriteBarrierElimination : public ValueObject { |
| 74 | public: |
| 75 | WriteBarrierElimination(Zone* zone, FlowGraph* flow_graph); |
| 76 | |
| 77 | void Analyze(); |
| 78 | void SaveResults(); |
| 79 | |
| 80 | private: |
| 81 | void IndexDefinitions(Zone* zone); |
| 82 | |
| 83 | bool AnalyzeBlock(BlockEntryInstr* entry); |
| 84 | void MergePredecessors(BlockEntryInstr* entry); |
| 85 | |
| 86 | void UpdateVectorForBlock(BlockEntryInstr* entry, bool finalize); |
| 87 | |
| 88 | static intptr_t Index(BlockEntryInstr* entry) { |
| 89 | return entry->postorder_number(); |
| 90 | } |
| 91 | |
| 92 | intptr_t Index(Definition* def) { |
| 93 | ASSERT(IsUsable(def)); |
| 94 | return definition_indices_.LookupValue(def); |
| 95 | } |
| 96 | |
| 97 | bool IsUsable(Definition* def) { |
| 98 | return def->IsPhi() || (def->IsAllocation() && |
| 99 | def->AsAllocation()->WillAllocateNewOrRemembered()); |
| 100 | } |
| 101 | |
| 102 | #if defined(DEBUG) |
| 103 | static bool SlotEligibleForWBE(const Slot& slot); |
| 104 | #endif |
| 105 | |
| 106 | FlowGraph* const flow_graph_; |
| 107 | const GrowableArray<BlockEntryInstr*>* const block_order_; |
| 108 | |
| 109 | // Number of usable definitions in the graph. |
| 110 | intptr_t definition_count_ = 0; |
| 111 | |
| 112 | // Maps each usable definition to its index in the bitvectors. |
| 113 | DefinitionIndexMap definition_indices_; |
| 114 | |
| 115 | // Bitvector with all non-Array-allocation instructions set. Used to |
| 116 | // un-mark Array allocations as usable. |
| 117 | BitVector* array_allocations_mask_; |
| 118 | |
| 119 | // Bitvectors for each block of which allocations are new or remembered |
| 120 | // at the start (after Phis). |
| 121 | GrowableArray<BitVector*> usable_allocs_in_; |
| 122 | |
| 123 | // Bitvectors for each block of which allocations are new or remembered |
| 124 | // at the end of the block. |
| 125 | GrowableArray<BitVector*> usable_allocs_out_; |
| 126 | |
| 127 | // Remaining blocks to process. |
| 128 | GrowableArray<BlockEntryInstr*> worklist_; |
| 129 | |
| 130 | // Temporary used in many functions to avoid repeated zone allocation. |
| 131 | BitVector* vector_; |
| 132 | |
| 133 | // Bitvector of blocks which have been processed, to ensure each block |
| 134 | // is processed at least once. |
| 135 | BitVector* processed_blocks_; |
| 136 | |
| 137 | #if defined(DEBUG) |
| 138 | bool tracing_ = false; |
| 139 | #else |
| 140 | static constexpr bool tracing_ = false; |
| 141 | #endif |
| 142 | }; |
| 143 | |
| 144 | WriteBarrierElimination::WriteBarrierElimination(Zone* zone, |
| 145 | FlowGraph* flow_graph) |
| 146 | : flow_graph_(flow_graph), block_order_(&flow_graph->postorder()) { |
| 147 | #if defined(DEBUG) |
| 148 | if (flow_graph->should_print() && FLAG_trace_write_barrier_elimination) { |
| 149 | tracing_ = true; |
| 150 | } |
| 151 | #endif |
| 152 | |
| 153 | IndexDefinitions(zone); |
| 154 | |
| 155 | for (intptr_t i = 0; i < block_order_->length(); ++i) { |
| 156 | usable_allocs_in_.Add(new (zone) BitVector(zone, definition_count_)); |
| 157 | usable_allocs_in_[i]->CopyFrom(vector_); |
| 158 | |
| 159 | usable_allocs_out_.Add(new (zone) BitVector(zone, definition_count_)); |
| 160 | usable_allocs_out_[i]->CopyFrom(vector_); |
| 161 | } |
| 162 | |
| 163 | processed_blocks_ = new (zone) BitVector(zone, block_order_->length()); |
| 164 | } |
| 165 | |
| 166 | void WriteBarrierElimination::Analyze() { |
| 167 | for (intptr_t i = 0; i < block_order_->length(); ++i) { |
| 168 | worklist_.Add(block_order_->At(i)); |
| 169 | } |
| 170 | |
| 171 | while (!worklist_.is_empty()) { |
| 172 | auto* const entry = worklist_.RemoveLast(); |
| 173 | if (AnalyzeBlock(entry)) { |
| 174 | for (intptr_t i = 0; i < entry->last_instruction()->SuccessorCount(); |
| 175 | ++i) { |
| 176 | if (tracing_) { |
| 177 | THR_Print("Enqueueing block %" Pd "\n" , entry->block_id()); |
| 178 | } |
| 179 | worklist_.Add(entry->last_instruction()->SuccessorAt(i)); |
| 180 | } |
| 181 | } |
| 182 | } |
| 183 | } |
| 184 | |
| 185 | void WriteBarrierElimination::SaveResults() { |
| 186 | for (intptr_t i = 0; i < block_order_->length(); ++i) { |
| 187 | vector_->CopyFrom(usable_allocs_in_[i]); |
| 188 | UpdateVectorForBlock(block_order_->At(i), /*finalize=*/true); |
| 189 | } |
| 190 | } |
| 191 | |
| 192 | void WriteBarrierElimination::IndexDefinitions(Zone* zone) { |
| 193 | BitmapBuilder array_allocations; |
| 194 | |
| 195 | for (intptr_t i = 0; i < block_order_->length(); ++i) { |
| 196 | BlockEntryInstr* const block = block_order_->At(i); |
| 197 | if (auto join_block = block->AsJoinEntry()) { |
| 198 | for (PhiIterator it(join_block); !it.Done(); it.Advance()) { |
| 199 | array_allocations.Set(definition_count_, false); |
| 200 | definition_indices_.Insert({it.Current(), definition_count_++}); |
| 201 | #if defined(DEBUG) |
| 202 | if (tracing_) { |
| 203 | THR_Print("Definition (%" Pd ") has index %" Pd ".\n" , |
| 204 | it.Current()->ssa_temp_index(), definition_count_ - 1); |
| 205 | } |
| 206 | #endif |
| 207 | } |
| 208 | } |
| 209 | for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { |
| 210 | if (Definition* current = it.Current()->AsDefinition()) { |
| 211 | if (IsUsable(current)) { |
| 212 | array_allocations.Set(definition_count_, current->IsCreateArray()); |
| 213 | definition_indices_.Insert({current, definition_count_++}); |
| 214 | #if defined(DEBUG) |
| 215 | if (tracing_) { |
| 216 | THR_Print("Definition (%" Pd ") has index %" Pd ".\n" , |
| 217 | current->ssa_temp_index(), definition_count_ - 1); |
| 218 | } |
| 219 | #endif |
| 220 | } |
| 221 | } |
| 222 | } |
| 223 | } |
| 224 | |
| 225 | vector_ = new (zone) BitVector(zone, definition_count_); |
| 226 | vector_->SetAll(); |
| 227 | array_allocations_mask_ = new (zone) BitVector(zone, definition_count_); |
| 228 | for (intptr_t i = 0; i < definition_count_; ++i) { |
| 229 | if (!array_allocations.Get(i)) array_allocations_mask_->Add(i); |
| 230 | } |
| 231 | } |
| 232 | |
| 233 | void WriteBarrierElimination::MergePredecessors(BlockEntryInstr* entry) { |
| 234 | vector_->Clear(); |
| 235 | for (intptr_t i = 0; i < entry->PredecessorCount(); ++i) { |
| 236 | BitVector* predecessor_set = |
| 237 | usable_allocs_out_[Index(entry->PredecessorAt(i))]; |
| 238 | if (i == 0) { |
| 239 | vector_->AddAll(predecessor_set); |
| 240 | } else { |
| 241 | vector_->Intersect(predecessor_set); |
| 242 | } |
| 243 | } |
| 244 | |
| 245 | if (JoinEntryInstr* join = entry->AsJoinEntry()) { |
| 246 | // A Phi is usable if and only if all its inputs are usable. |
| 247 | for (PhiIterator it(join); !it.Done(); it.Advance()) { |
| 248 | PhiInstr* phi = it.Current(); |
| 249 | ASSERT(phi->InputCount() == entry->PredecessorCount()); |
| 250 | bool is_usable = true; |
| 251 | for (intptr_t i = 0; i < phi->InputCount(); ++i) { |
| 252 | BitVector* const predecessor_set = |
| 253 | usable_allocs_out_[Index(entry->PredecessorAt(i))]; |
| 254 | Definition* const origin = phi->InputAt(i)->definition(); |
| 255 | if (!IsUsable(origin) || !predecessor_set->Contains(Index(origin))) { |
| 256 | is_usable = false; |
| 257 | break; |
| 258 | } |
| 259 | } |
| 260 | vector_->Set(Index(phi), is_usable); |
| 261 | } |
| 262 | |
| 263 | #if defined(DEBUG) |
| 264 | if (tracing_) { |
| 265 | THR_Print("Merge predecessors for %" Pd ".\n" , entry->block_id()); |
| 266 | for (PhiIterator it(join); !it.Done(); it.Advance()) { |
| 267 | PhiInstr* phi = it.Current(); |
| 268 | THR_Print("%" Pd ": %s\n" , phi->ssa_temp_index(), |
| 269 | vector_->Contains(Index(phi)) ? "true" : "false" ); |
| 270 | } |
| 271 | } |
| 272 | #endif |
| 273 | } |
| 274 | } |
| 275 | |
| 276 | bool WriteBarrierElimination::AnalyzeBlock(BlockEntryInstr* entry) { |
| 277 | // Recompute the usable allocs in-set. |
| 278 | MergePredecessors(entry); |
| 279 | |
| 280 | // If the in-set has not changed, there's no work to do. |
| 281 | BitVector* const in_set = usable_allocs_in_[Index(entry)]; |
| 282 | ASSERT(vector_->SubsetOf(*in_set)); // convergence |
| 283 | if (vector_->Equals(*in_set) && processed_blocks_->Contains(Index(entry))) { |
| 284 | if (tracing_) { |
| 285 | THR_Print("Bailout of block %" Pd ": inputs didn't change.\n" , |
| 286 | entry->block_id()); |
| 287 | } |
| 288 | return false; |
| 289 | } else if (tracing_) { |
| 290 | THR_Print("Inputs of block %" Pd " changed: " , entry->block_id()); |
| 291 | in_set->Print(); |
| 292 | THR_Print(" -> " ); |
| 293 | vector_->Print(); |
| 294 | THR_Print("\n" ); |
| 295 | } |
| 296 | |
| 297 | usable_allocs_in_[Index(entry)]->CopyFrom(vector_); |
| 298 | UpdateVectorForBlock(entry, /*finalize=*/false); |
| 299 | |
| 300 | processed_blocks_->Add(Index(entry)); |
| 301 | |
| 302 | // Successors only need to be updated if the out-set changes. |
| 303 | if (vector_->Equals(*usable_allocs_out_[Index(entry)])) { |
| 304 | if (tracing_) { |
| 305 | THR_Print("Bailout of block %" Pd ": out-set didn't change.\n" , |
| 306 | entry->block_id()); |
| 307 | } |
| 308 | return false; |
| 309 | } |
| 310 | |
| 311 | BitVector* const out_set = usable_allocs_out_[Index(entry)]; |
| 312 | ASSERT(vector_->SubsetOf(*out_set)); // convergence |
| 313 | out_set->CopyFrom(vector_); |
| 314 | if (tracing_) { |
| 315 | THR_Print("Block %" Pd " changed.\n" , entry->block_id()); |
| 316 | } |
| 317 | return true; |
| 318 | } |
| 319 | |
| 320 | #if defined(DEBUG) |
| 321 | bool WriteBarrierElimination::SlotEligibleForWBE(const Slot& slot) { |
| 322 | // We assume that Dart code only stores into Instances, Contexts, and |
| 323 | // UnhandledExceptions. This assumption is used in |
| 324 | // RestoreWriteBarrierInvariantVisitor::VisitPointers. |
| 325 | |
| 326 | switch (slot.kind()) { |
| 327 | case Slot::Kind::kCapturedVariable: // Context |
| 328 | return true; |
| 329 | case Slot::Kind::kDartField: // Instance |
| 330 | return true; |
| 331 | |
| 332 | #define FOR_EACH_NATIVE_SLOT(class, underlying_type, field, type, modifiers) \ |
| 333 | case Slot::Kind::k##class##_##field: \ |
| 334 | return std::is_base_of<InstanceLayout, underlying_type>::value || \ |
| 335 | std::is_base_of<ContextLayout, underlying_type>::value || \ |
| 336 | std::is_base_of<UnhandledExceptionLayout, underlying_type>::value; |
| 337 | |
| 338 | NATIVE_SLOTS_LIST(FOR_EACH_NATIVE_SLOT) |
| 339 | #undef FOR_EACH_NATIVE_SLOT |
| 340 | |
| 341 | default: |
| 342 | return false; |
| 343 | } |
| 344 | } |
| 345 | #endif |
| 346 | |
| 347 | void WriteBarrierElimination::UpdateVectorForBlock(BlockEntryInstr* entry, |
| 348 | bool finalize) { |
| 349 | for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { |
| 350 | Instruction* const current = it.Current(); |
| 351 | |
| 352 | if (finalize) { |
| 353 | if (StoreInstanceFieldInstr* instr = current->AsStoreInstanceField()) { |
| 354 | Definition* const container = instr->instance()->definition(); |
| 355 | if (IsUsable(container) && vector_->Contains(Index(container))) { |
| 356 | DEBUG_ASSERT(SlotEligibleForWBE(instr->slot())); |
| 357 | instr->set_emit_store_barrier(kNoStoreBarrier); |
| 358 | } |
| 359 | } else if (StoreIndexedInstr* instr = current->AsStoreIndexed()) { |
| 360 | Definition* const array = instr->array()->definition(); |
| 361 | if (IsUsable(array) && vector_->Contains(Index(array))) { |
| 362 | instr->set_emit_store_barrier(StoreBarrierType::kNoStoreBarrier); |
| 363 | } |
| 364 | } |
| 365 | } |
| 366 | |
| 367 | if (current->CanCallDart()) { |
| 368 | vector_->Clear(); |
| 369 | } else if (current->CanTriggerGC()) { |
| 370 | // Clear array allocations. These are not added to the remembered set |
| 371 | // by Thread::RememberLiveTemporaries() after a scavenge. |
| 372 | vector_->Intersect(array_allocations_mask_); |
| 373 | } |
| 374 | |
| 375 | if (AllocationInstr* const alloc = current->AsAllocation()) { |
| 376 | if (alloc->WillAllocateNewOrRemembered()) { |
| 377 | vector_->Add(Index(alloc)); |
| 378 | } |
| 379 | } |
| 380 | } |
| 381 | } |
| 382 | |
| 383 | void EliminateWriteBarriers(FlowGraph* flow_graph) { |
| 384 | WriteBarrierElimination elimination(Thread::Current()->zone(), flow_graph); |
| 385 | elimination.Analyze(); |
| 386 | elimination.SaveResults(); |
| 387 | } |
| 388 | |
| 389 | } // namespace dart |
| 390 | |