1 | // Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/compiler/backend/il_test_helper.h" |
6 | |
7 | #include "vm/compiler/aot/aot_call_specializer.h" |
8 | #include "vm/compiler/backend/block_scheduler.h" |
9 | #include "vm/compiler/backend/flow_graph.h" |
10 | #include "vm/compiler/backend/flow_graph_compiler.h" |
11 | #include "vm/compiler/backend/il.h" |
12 | #include "vm/compiler/backend/il_printer.h" |
13 | #include "vm/compiler/backend/inliner.h" |
14 | #include "vm/compiler/call_specializer.h" |
15 | #include "vm/compiler/compiler_pass.h" |
16 | #include "vm/compiler/jit/compiler.h" |
17 | #include "vm/compiler/jit/jit_call_specializer.h" |
18 | #include "vm/dart_api_impl.h" |
19 | #include "vm/parser.h" |
20 | #include "vm/unit_test.h" |
21 | |
22 | namespace dart { |
23 | |
24 | Definition* const FlowGraphBuilderHelper::kPhiSelfReference = nullptr; |
25 | |
26 | LibraryPtr LoadTestScript(const char* script, |
27 | Dart_NativeEntryResolver resolver, |
28 | const char* lib_uri) { |
29 | Dart_Handle api_lib; |
30 | { |
31 | TransitionVMToNative transition(Thread::Current()); |
32 | api_lib = TestCase::LoadTestScript(script, resolver, lib_uri); |
33 | EXPECT_VALID(api_lib); |
34 | } |
35 | auto& lib = Library::Handle(); |
36 | lib ^= Api::UnwrapHandle(api_lib); |
37 | EXPECT(!lib.IsNull()); |
38 | return lib.raw(); |
39 | } |
40 | |
41 | FunctionPtr GetFunction(const Library& lib, const char* name) { |
42 | Thread* thread = Thread::Current(); |
43 | const auto& func = Function::Handle(lib.LookupFunctionAllowPrivate( |
44 | String::Handle(Symbols::New(thread, name)))); |
45 | EXPECT(!func.IsNull()); |
46 | return func.raw(); |
47 | } |
48 | |
49 | ClassPtr GetClass(const Library& lib, const char* name) { |
50 | Thread* thread = Thread::Current(); |
51 | const auto& cls = Class::Handle( |
52 | lib.LookupClassAllowPrivate(String::Handle(Symbols::New(thread, name)))); |
53 | EXPECT(!cls.IsNull()); |
54 | return cls.raw(); |
55 | } |
56 | |
57 | TypeParameterPtr GetClassTypeParameter(const Class& klass, const char* name) { |
58 | const auto& param = TypeParameter::Handle( |
59 | klass.LookupTypeParameter(String::Handle(String::New(name)))); |
60 | EXPECT(!param.IsNull()); |
61 | return param.raw(); |
62 | } |
63 | |
64 | TypeParameterPtr GetFunctionTypeParameter(const Function& fun, |
65 | const char* name) { |
66 | intptr_t fun_level = 0; |
67 | const auto& param = TypeParameter::Handle( |
68 | fun.LookupTypeParameter(String::Handle(String::New(name)), &fun_level)); |
69 | EXPECT(!param.IsNull()); |
70 | return param.raw(); |
71 | } |
72 | |
73 | ObjectPtr Invoke(const Library& lib, const char* name) { |
74 | // These tests rely on running unoptimized code to collect type feedback. The |
75 | // interpreter does not collect type feedback for interface calls, so set |
76 | // compilation threshold to 0 in order to compile invoked function |
77 | // immediately and execute compiled code. |
78 | SetFlagScope<int> sfs(&FLAG_compilation_counter_threshold, 0); |
79 | |
80 | Thread* thread = Thread::Current(); |
81 | Dart_Handle api_lib = Api::NewHandle(thread, lib.raw()); |
82 | Dart_Handle result; |
83 | { |
84 | TransitionVMToNative transition(thread); |
85 | result = |
86 | Dart_Invoke(api_lib, NewString(name), /*argc=*/0, /*argv=*/nullptr); |
87 | EXPECT_VALID(result); |
88 | } |
89 | return Api::UnwrapHandle(result); |
90 | } |
91 | |
92 | FlowGraph* TestPipeline::RunPasses( |
93 | std::initializer_list<CompilerPass::Id> passes) { |
94 | // The table dispatch transformation needs a precompiler, which is not |
95 | // available in the test pipeline. |
96 | SetFlagScope<bool> sfs(&FLAG_use_table_dispatch, false); |
97 | |
98 | auto thread = Thread::Current(); |
99 | auto zone = thread->zone(); |
100 | const bool optimized = true; |
101 | const intptr_t osr_id = Compiler::kNoOSRDeoptId; |
102 | |
103 | auto pipeline = CompilationPipeline::New(zone, function_); |
104 | |
105 | parsed_function_ = new (zone) |
106 | ParsedFunction(thread, Function::ZoneHandle(zone, function_.raw())); |
107 | pipeline->ParseFunction(parsed_function_); |
108 | |
109 | // Extract type feedback before the graph is built, as the graph |
110 | // builder uses it to attach it to nodes. |
111 | ic_data_array_ = new (zone) ZoneGrowableArray<const ICData*>(); |
112 | if (mode_ == CompilerPass::kJIT) { |
113 | function_.RestoreICDataMap(ic_data_array_, /*clone_ic_data=*/false); |
114 | } |
115 | |
116 | flow_graph_ = pipeline->BuildFlowGraph(zone, parsed_function_, ic_data_array_, |
117 | osr_id, optimized); |
118 | |
119 | if (mode_ == CompilerPass::kAOT) { |
120 | flow_graph_->PopulateWithICData(function_); |
121 | } |
122 | |
123 | const bool reorder_blocks = |
124 | FlowGraph::ShouldReorderBlocks(function_, optimized); |
125 | if (mode_ == CompilerPass::kJIT && reorder_blocks) { |
126 | BlockScheduler::AssignEdgeWeights(flow_graph_); |
127 | } |
128 | |
129 | SpeculativeInliningPolicy speculative_policy(/*enable_suppression=*/false); |
130 | pass_state_ = new CompilerPassState(thread, flow_graph_, &speculative_policy); |
131 | pass_state_->reorder_blocks = reorder_blocks; |
132 | |
133 | if (optimized) { |
134 | pass_state_->inline_id_to_function.Add(&function_); |
135 | // We do not add the token position now because we don't know the |
136 | // position of the inlined call until later. A side effect of this |
137 | // is that the length of |inline_id_to_function| is always larger |
138 | // than the length of |inline_id_to_token_pos| by one. |
139 | // Top scope function has no caller (-1). We do this because we expect |
140 | // all token positions to be at an inlined call. |
141 | pass_state_->caller_inline_id.Add(-1); |
142 | |
143 | JitCallSpecializer jit_call_specializer(flow_graph_, &speculative_policy); |
144 | AotCallSpecializer aot_call_specializer(/*precompiler=*/nullptr, |
145 | flow_graph_, &speculative_policy); |
146 | if (mode_ == CompilerPass::kAOT) { |
147 | pass_state_->call_specializer = &aot_call_specializer; |
148 | } else { |
149 | pass_state_->call_specializer = &jit_call_specializer; |
150 | } |
151 | |
152 | if (passes.size() > 0) { |
153 | flow_graph_ = CompilerPass::RunPipelineWithPasses(pass_state_, passes); |
154 | } else { |
155 | flow_graph_ = CompilerPass::RunPipeline(mode_, pass_state_); |
156 | } |
157 | } |
158 | |
159 | return flow_graph_; |
160 | } |
161 | |
162 | void TestPipeline::CompileGraphAndAttachFunction() { |
163 | Zone* zone = thread_->zone(); |
164 | const bool optimized = true; |
165 | |
166 | SpeculativeInliningPolicy speculative_policy(/*enable_suppression=*/false); |
167 | |
168 | #if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32) |
169 | const bool use_far_branches = false; |
170 | #else |
171 | const bool use_far_branches = true; |
172 | #endif |
173 | |
174 | ASSERT(pass_state_->inline_id_to_function.length() == |
175 | pass_state_->caller_inline_id.length()); |
176 | compiler::ObjectPoolBuilder object_pool_builder; |
177 | compiler::Assembler assembler(&object_pool_builder, use_far_branches); |
178 | FlowGraphCompiler graph_compiler( |
179 | &assembler, flow_graph_, *parsed_function_, optimized, |
180 | &speculative_policy, pass_state_->inline_id_to_function, |
181 | pass_state_->inline_id_to_token_pos, pass_state_->caller_inline_id, |
182 | ic_data_array_); |
183 | |
184 | graph_compiler.CompileGraph(); |
185 | |
186 | const auto& deopt_info_array = |
187 | Array::Handle(zone, graph_compiler.CreateDeoptInfo(&assembler)); |
188 | const auto pool_attachment = Code::PoolAttachment::kAttachPool; |
189 | const auto& code = Code::Handle(Code::FinalizeCode( |
190 | &graph_compiler, &assembler, pool_attachment, optimized, nullptr)); |
191 | code.set_is_optimized(optimized); |
192 | code.set_owner(function_); |
193 | |
194 | graph_compiler.FinalizePcDescriptors(code); |
195 | code.set_deopt_info_array(deopt_info_array); |
196 | |
197 | graph_compiler.FinalizeStackMaps(code); |
198 | graph_compiler.FinalizeVarDescriptors(code); |
199 | graph_compiler.FinalizeExceptionHandlers(code); |
200 | graph_compiler.FinalizeCatchEntryMovesMap(code); |
201 | graph_compiler.FinalizeStaticCallTargetsTable(code); |
202 | graph_compiler.FinalizeCodeSourceMap(code); |
203 | |
204 | if (optimized) { |
205 | function_.InstallOptimizedCode(code); |
206 | } else { |
207 | function_.set_unoptimized_code(code); |
208 | function_.AttachCode(code); |
209 | } |
210 | |
211 | // We expect there to be no deoptimizations. |
212 | if (mode_ == CompilerPass::kAOT) { |
213 | // TODO(kustermann): Enable this once we get rid of [CheckedSmiSlowPath]s. |
214 | // EXPECT(deopt_info_array.IsNull() || deopt_info_array.Length() == 0); |
215 | } |
216 | } |
217 | |
218 | bool ILMatcher::TryMatch(std::initializer_list<MatchCode> match_codes, |
219 | MatchOpCode insert_before) { |
220 | std::vector<MatchCode> qcodes = match_codes; |
221 | |
222 | if (insert_before != kInvalidMatchOpCode) { |
223 | for (auto pos = qcodes.begin(); pos < qcodes.end(); pos++) { |
224 | pos = qcodes.insert(pos, insert_before) + 1; |
225 | } |
226 | } |
227 | |
228 | if (trace_) { |
229 | OS::PrintErr("ILMatcher: Matching the following graph\n" ); |
230 | FlowGraphPrinter::PrintGraph("ILMatcher" , flow_graph_); |
231 | OS::PrintErr("ILMatcher: Starting match at %s:\n" , cursor_->ToCString()); |
232 | } |
233 | |
234 | Instruction* cursor = cursor_; |
235 | for (size_t i = 0; i < qcodes.size(); ++i) { |
236 | Instruction** capture = qcodes[i].capture_; |
237 | if (parallel_moves_handling_ == ParallelMovesHandling::kSkip) { |
238 | while (cursor->IsParallelMove()) { |
239 | cursor = cursor->next(); |
240 | } |
241 | } |
242 | if (trace_) { |
243 | OS::PrintErr(" matching %30s @ %s\n" , |
244 | MatchOpCodeToCString(qcodes[i].opcode()), |
245 | cursor->ToCString()); |
246 | } |
247 | |
248 | auto next = MatchInternal(qcodes, i, cursor); |
249 | if (next == nullptr) { |
250 | if (trace_) { |
251 | OS::PrintErr(" -> Match failed\n" ); |
252 | } |
253 | cursor = next; |
254 | break; |
255 | } |
256 | if (capture != nullptr) { |
257 | *capture = cursor; |
258 | } |
259 | cursor = next; |
260 | } |
261 | if (cursor != nullptr) { |
262 | cursor_ = cursor; |
263 | return true; |
264 | } |
265 | return false; |
266 | } |
267 | |
268 | Instruction* ILMatcher::MatchInternal(std::vector<MatchCode> match_codes, |
269 | size_t i, |
270 | Instruction* cursor) { |
271 | const MatchOpCode opcode = match_codes[i].opcode(); |
272 | if (opcode == kMatchAndMoveBranchTrue) { |
273 | auto branch = cursor->AsBranch(); |
274 | if (branch == nullptr) return nullptr; |
275 | return branch->true_successor(); |
276 | } |
277 | if (opcode == kMatchAndMoveBranchFalse) { |
278 | auto branch = cursor->AsBranch(); |
279 | if (branch == nullptr) return nullptr; |
280 | return branch->false_successor(); |
281 | } |
282 | if (opcode == kNop) { |
283 | return cursor; |
284 | } |
285 | if (opcode == kMoveAny) { |
286 | return cursor->next(); |
287 | } |
288 | if (opcode == kMoveParallelMoves) { |
289 | while (cursor != nullptr && cursor->IsParallelMove()) { |
290 | cursor = cursor->next(); |
291 | } |
292 | return cursor; |
293 | } |
294 | |
295 | if (opcode == kMoveGlob) { |
296 | ASSERT((i + 1) < match_codes.size()); |
297 | while (true) { |
298 | if (cursor == nullptr) return nullptr; |
299 | if (MatchInternal(match_codes, i + 1, cursor) != nullptr) { |
300 | return cursor; |
301 | } |
302 | if (auto as_goto = cursor->AsGoto()) { |
303 | cursor = as_goto->successor(); |
304 | } else { |
305 | cursor = cursor->next(); |
306 | } |
307 | } |
308 | } |
309 | |
310 | if (opcode == kMatchAndMoveGoto) { |
311 | if (auto goto_instr = cursor->AsGoto()) { |
312 | return goto_instr->successor(); |
313 | } |
314 | } |
315 | |
316 | switch (opcode) { |
317 | #define EMIT_CASE(Instruction, _) \ |
318 | case kMatch##Instruction: { \ |
319 | if (cursor->Is##Instruction()) { \ |
320 | return cursor; \ |
321 | } \ |
322 | return nullptr; \ |
323 | } \ |
324 | case kMatchAndMove##Instruction: { \ |
325 | if (cursor->Is##Instruction()) { \ |
326 | return cursor->next(); \ |
327 | } \ |
328 | return nullptr; \ |
329 | } \ |
330 | case kMatchAndMoveOptional##Instruction: { \ |
331 | if (cursor->Is##Instruction()) { \ |
332 | return cursor->next(); \ |
333 | } \ |
334 | return cursor; \ |
335 | } |
336 | FOR_EACH_INSTRUCTION(EMIT_CASE) |
337 | #undef EMIT_CASE |
338 | default: |
339 | UNREACHABLE(); |
340 | } |
341 | |
342 | UNREACHABLE(); |
343 | return nullptr; |
344 | } |
345 | |
346 | const char* ILMatcher::MatchOpCodeToCString(MatchOpCode opcode) { |
347 | if (opcode == kMatchAndMoveBranchTrue) { |
348 | return "kMatchAndMoveBranchTrue" ; |
349 | } |
350 | if (opcode == kMatchAndMoveBranchFalse) { |
351 | return "kMatchAndMoveBranchFalse" ; |
352 | } |
353 | if (opcode == kNop) { |
354 | return "kNop" ; |
355 | } |
356 | if (opcode == kMoveAny) { |
357 | return "kMoveAny" ; |
358 | } |
359 | if (opcode == kMoveParallelMoves) { |
360 | return "kMoveParallelMoves" ; |
361 | } |
362 | if (opcode == kMoveGlob) { |
363 | return "kMoveGlob" ; |
364 | } |
365 | |
366 | switch (opcode) { |
367 | #define EMIT_CASE(Instruction, _) \ |
368 | case kMatch##Instruction: \ |
369 | return "kMatch" #Instruction; \ |
370 | case kMatchAndMove##Instruction: \ |
371 | return "kMatchAndMove" #Instruction; \ |
372 | case kMatchAndMoveOptional##Instruction: \ |
373 | return "kMatchAndMoveOptional" #Instruction; |
374 | FOR_EACH_INSTRUCTION(EMIT_CASE) |
375 | #undef EMIT_CASE |
376 | default: |
377 | UNREACHABLE(); |
378 | } |
379 | |
380 | UNREACHABLE(); |
381 | return nullptr; |
382 | } |
383 | |
384 | } // namespace dart |
385 | |