1 | // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/compiler/backend/block_scheduler.h" |
6 | |
7 | #include "vm/allocation.h" |
8 | #include "vm/code_patcher.h" |
9 | #include "vm/compiler/backend/flow_graph.h" |
10 | #include "vm/compiler/jit/compiler.h" |
11 | |
12 | namespace dart { |
13 | |
14 | static intptr_t GetEdgeCount(const Array& edge_counters, intptr_t edge_id) { |
15 | if (!FLAG_reorder_basic_blocks) { |
16 | // Assume everything was visited once. |
17 | return 1; |
18 | } |
19 | return Smi::Value(Smi::RawCast(edge_counters.At(edge_id))); |
20 | } |
21 | |
22 | // There is an edge from instruction->successor. Set its weight (edge count |
23 | // per function entry). |
24 | static void SetEdgeWeight(BlockEntryInstr* block, |
25 | BlockEntryInstr* successor, |
26 | const Array& edge_counters, |
27 | intptr_t entry_count) { |
28 | TargetEntryInstr* target = successor->AsTargetEntry(); |
29 | if (target != NULL) { |
30 | // If this block ends in a goto, the edge count of this edge is the same |
31 | // as the count on the single outgoing edge. This is true as long as the |
32 | // block does not throw an exception. |
33 | intptr_t count = GetEdgeCount(edge_counters, target->preorder_number()); |
34 | if ((count >= 0) && (entry_count != 0)) { |
35 | double weight = |
36 | static_cast<double>(count) / static_cast<double>(entry_count); |
37 | target->set_edge_weight(weight); |
38 | } |
39 | } else { |
40 | GotoInstr* jump = block->last_instruction()->AsGoto(); |
41 | if (jump != NULL) { |
42 | intptr_t count = GetEdgeCount(edge_counters, block->preorder_number()); |
43 | if ((count >= 0) && (entry_count != 0)) { |
44 | double weight = |
45 | static_cast<double>(count) / static_cast<double>(entry_count); |
46 | jump->set_edge_weight(weight); |
47 | } |
48 | } |
49 | } |
50 | } |
51 | |
52 | void BlockScheduler::AssignEdgeWeights(FlowGraph* flow_graph) { |
53 | if (!FLAG_reorder_basic_blocks) { |
54 | return; |
55 | } |
56 | if (CompilerState::Current().is_aot()) { |
57 | return; |
58 | } |
59 | |
60 | const Function& function = flow_graph->parsed_function().function(); |
61 | const Array& ic_data_array = |
62 | Array::Handle(flow_graph->zone(), function.ic_data_array()); |
63 | if (Compiler::IsBackgroundCompilation() && ic_data_array.IsNull()) { |
64 | // Deferred loading cleared ic_data_array. |
65 | Compiler::AbortBackgroundCompilation( |
66 | DeoptId::kNone, "BlockScheduler: ICData array cleared" ); |
67 | } |
68 | if (ic_data_array.IsNull()) { |
69 | DEBUG_ASSERT(Isolate::Current()->HasAttemptedReload() || |
70 | function.ForceOptimize()); |
71 | return; |
72 | } |
73 | Array& edge_counters = Array::Handle(); |
74 | edge_counters ^= ic_data_array.At(0); |
75 | |
76 | auto graph_entry = flow_graph->graph_entry(); |
77 | BlockEntryInstr* entry = graph_entry->normal_entry(); |
78 | if (entry == nullptr) { |
79 | entry = graph_entry->osr_entry(); |
80 | ASSERT(entry != nullptr); |
81 | } |
82 | const intptr_t entry_count = |
83 | GetEdgeCount(edge_counters, entry->preorder_number()); |
84 | graph_entry->set_entry_count(entry_count); |
85 | |
86 | for (BlockIterator it = flow_graph->reverse_postorder_iterator(); !it.Done(); |
87 | it.Advance()) { |
88 | BlockEntryInstr* block = it.Current(); |
89 | Instruction* last = block->last_instruction(); |
90 | for (intptr_t i = 0; i < last->SuccessorCount(); ++i) { |
91 | BlockEntryInstr* succ = last->SuccessorAt(i); |
92 | SetEdgeWeight(block, succ, edge_counters, entry_count); |
93 | } |
94 | } |
95 | } |
96 | |
97 | // A weighted control-flow graph edge. |
98 | struct Edge { |
99 | Edge(BlockEntryInstr* source, BlockEntryInstr* target, double weight) |
100 | : source(source), target(target), weight(weight) {} |
101 | |
102 | static int LowestWeightFirst(const Edge* a, const Edge* b); |
103 | |
104 | BlockEntryInstr* source; |
105 | BlockEntryInstr* target; |
106 | double weight; |
107 | }; |
108 | |
109 | // A linked list node in a chain of blocks. |
110 | struct Link : public ZoneAllocated { |
111 | Link(BlockEntryInstr* block, Link* next) : block(block), next(next) {} |
112 | |
113 | BlockEntryInstr* block; |
114 | Link* next; |
115 | }; |
116 | |
117 | // A chain of blocks with first and last pointers for fast concatenation and |
118 | // a length to support adding a shorter chain's links to a longer chain. |
119 | struct Chain : public ZoneAllocated { |
120 | explicit Chain(BlockEntryInstr* block) |
121 | : first(new Link(block, NULL)), last(first), length(1) {} |
122 | |
123 | Link* first; |
124 | Link* last; |
125 | intptr_t length; |
126 | }; |
127 | |
128 | int Edge::LowestWeightFirst(const Edge* a, const Edge* b) { |
129 | if (a->weight < b->weight) { |
130 | return -1; |
131 | } |
132 | return (a->weight > b->weight) ? 1 : 0; |
133 | } |
134 | |
135 | // Combine two chains by adding the shorter chain's links to the longer |
136 | // chain. |
137 | static void Union(GrowableArray<Chain*>* chains, |
138 | Chain* source_chain, |
139 | Chain* target_chain) { |
140 | if (source_chain->length < target_chain->length) { |
141 | for (Link* link = source_chain->first; link != NULL; link = link->next) { |
142 | (*chains)[link->block->postorder_number()] = target_chain; |
143 | } |
144 | // Link the chains. |
145 | source_chain->last->next = target_chain->first; |
146 | // Update the state of the longer chain. |
147 | target_chain->first = source_chain->first; |
148 | target_chain->length += source_chain->length; |
149 | } else { |
150 | for (Link* link = target_chain->first; link != NULL; link = link->next) { |
151 | (*chains)[link->block->postorder_number()] = source_chain; |
152 | } |
153 | source_chain->last->next = target_chain->first; |
154 | source_chain->last = target_chain->last; |
155 | source_chain->length += target_chain->length; |
156 | } |
157 | } |
158 | |
159 | void BlockScheduler::ReorderBlocks(FlowGraph* flow_graph) { |
160 | if (CompilerState::Current().is_aot()) { |
161 | ReorderBlocksAOT(flow_graph); |
162 | } else { |
163 | ReorderBlocksJIT(flow_graph); |
164 | } |
165 | } |
166 | |
167 | void BlockScheduler::ReorderBlocksJIT(FlowGraph* flow_graph) { |
168 | if (!FLAG_reorder_basic_blocks) { |
169 | return; |
170 | } |
171 | |
172 | // Add every block to a chain of length 1 and compute a list of edges |
173 | // sorted by weight. |
174 | intptr_t block_count = flow_graph->preorder().length(); |
175 | GrowableArray<Edge> edges(2 * block_count); |
176 | |
177 | // A map from a block's postorder number to the chain it is in. Used to |
178 | // implement a simple (ordered) union-find data structure. Chains are |
179 | // stored by pointer so that they are aliased (mutating one mutates all |
180 | // shared ones). Find(n) is simply chains[n]. |
181 | GrowableArray<Chain*> chains(block_count); |
182 | |
183 | for (BlockIterator it = flow_graph->postorder_iterator(); !it.Done(); |
184 | it.Advance()) { |
185 | BlockEntryInstr* block = it.Current(); |
186 | chains.Add(new Chain(block)); |
187 | |
188 | Instruction* last = block->last_instruction(); |
189 | for (intptr_t i = 0; i < last->SuccessorCount(); ++i) { |
190 | BlockEntryInstr* succ = last->SuccessorAt(i); |
191 | double weight = 0.0; |
192 | if (succ->IsTargetEntry()) { |
193 | weight = succ->AsTargetEntry()->edge_weight(); |
194 | } else if (last->IsGoto()) { |
195 | weight = last->AsGoto()->edge_weight(); |
196 | } |
197 | edges.Add(Edge(block, succ, weight)); |
198 | } |
199 | } |
200 | |
201 | // Handle each edge in turn. The edges are sorted by increasing weight. |
202 | edges.Sort(Edge::LowestWeightFirst); |
203 | while (!edges.is_empty()) { |
204 | Edge edge = edges.RemoveLast(); |
205 | Chain* source_chain = chains[edge.source->postorder_number()]; |
206 | Chain* target_chain = chains[edge.target->postorder_number()]; |
207 | |
208 | // If the source and target are already in the same chain or if the |
209 | // edge's source or target is not exposed at the appropriate end of a |
210 | // chain skip this edge. |
211 | if ((source_chain == target_chain) || |
212 | (edge.source != source_chain->last->block) || |
213 | (edge.target != target_chain->first->block)) { |
214 | continue; |
215 | } |
216 | |
217 | Union(&chains, source_chain, target_chain); |
218 | } |
219 | |
220 | // Ensure the checked entry remains first to avoid needing another offset on |
221 | // Instructions, compare Code::EntryPointOf. |
222 | GraphEntryInstr* graph_entry = flow_graph->graph_entry(); |
223 | flow_graph->CodegenBlockOrder(true)->Add(graph_entry); |
224 | FunctionEntryInstr* checked_entry = graph_entry->normal_entry(); |
225 | if (checked_entry != nullptr) { |
226 | flow_graph->CodegenBlockOrder(true)->Add(checked_entry); |
227 | } |
228 | // Build a new block order. Emit each chain when its first block occurs |
229 | // in the original reverse postorder ordering (which gives a topological |
230 | // sort of the blocks). |
231 | for (intptr_t i = block_count - 1; i >= 0; --i) { |
232 | if (chains[i]->first->block == flow_graph->postorder()[i]) { |
233 | for (Link* link = chains[i]->first; link != NULL; link = link->next) { |
234 | if ((link->block != checked_entry) && (link->block != graph_entry)) { |
235 | flow_graph->CodegenBlockOrder(true)->Add(link->block); |
236 | } |
237 | } |
238 | } |
239 | } |
240 | } |
241 | |
242 | // Moves blocks ending in a throw/rethrow, as well as any block post-dominated |
243 | // by such a throwing block, to the end. |
244 | void BlockScheduler::ReorderBlocksAOT(FlowGraph* flow_graph) { |
245 | if (!FLAG_reorder_basic_blocks) { |
246 | return; |
247 | } |
248 | |
249 | auto& reverse_postorder = flow_graph->reverse_postorder(); |
250 | const intptr_t block_count = reverse_postorder.length(); |
251 | GrowableArray<bool> is_terminating(block_count); |
252 | is_terminating.FillWith(false, 0, block_count); |
253 | |
254 | // Any block in the worklist is marked and any of its unconditional |
255 | // predecessors need to be marked as well. |
256 | GrowableArray<BlockEntryInstr*> worklist; |
257 | |
258 | // Add all throwing blocks to the worklist. |
259 | for (intptr_t i = 0; i < block_count; ++i) { |
260 | auto block = reverse_postorder[i]; |
261 | auto last = block->last_instruction(); |
262 | if (last->IsThrow() || last->IsReThrow()) { |
263 | const intptr_t preorder_nr = block->preorder_number(); |
264 | is_terminating[preorder_nr] = true; |
265 | worklist.Add(block); |
266 | } |
267 | } |
268 | |
269 | // Follow all indirect predecessors which unconditionally will end up in a |
270 | // throwing block. |
271 | while (worklist.length() > 0) { |
272 | auto block = worklist.RemoveLast(); |
273 | for (intptr_t i = 0; i < block->PredecessorCount(); ++i) { |
274 | auto predecessor = block->PredecessorAt(i); |
275 | if (predecessor->last_instruction()->IsGoto()) { |
276 | const intptr_t preorder_nr = predecessor->preorder_number(); |
277 | if (!is_terminating[preorder_nr]) { |
278 | is_terminating[preorder_nr] = true; |
279 | worklist.Add(predecessor); |
280 | } |
281 | } |
282 | } |
283 | } |
284 | |
285 | // Emit code in reverse postorder but move any throwing blocks (except the |
286 | // function entry, which needs to come first) to the very end. |
287 | auto codegen_order = flow_graph->CodegenBlockOrder(true); |
288 | for (intptr_t i = 0; i < block_count; ++i) { |
289 | auto block = reverse_postorder[i]; |
290 | const intptr_t preorder_nr = block->preorder_number(); |
291 | if (!is_terminating[preorder_nr] || block->IsFunctionEntry()) { |
292 | codegen_order->Add(block); |
293 | } |
294 | } |
295 | for (intptr_t i = 0; i < block_count; ++i) { |
296 | auto block = reverse_postorder[i]; |
297 | const intptr_t preorder_nr = block->preorder_number(); |
298 | if (is_terminating[preorder_nr] && !block->IsFunctionEntry()) { |
299 | codegen_order->Add(block); |
300 | } |
301 | } |
302 | } |
303 | |
304 | } // namespace dart |
305 | |