1 | // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #include "vm/compiler/jit/compiler.h" |
6 | |
7 | #if !defined(DART_PRECOMPILED_RUNTIME) |
8 | #include "vm/code_patcher.h" |
9 | #include "vm/compiler/assembler/assembler.h" |
10 | #include "vm/compiler/assembler/disassembler.h" |
11 | #include "vm/compiler/backend/block_scheduler.h" |
12 | #include "vm/compiler/backend/branch_optimizer.h" |
13 | #include "vm/compiler/backend/constant_propagator.h" |
14 | #include "vm/compiler/backend/flow_graph.h" |
15 | #include "vm/compiler/backend/flow_graph_compiler.h" |
16 | #include "vm/compiler/backend/il_printer.h" |
17 | #include "vm/compiler/backend/inliner.h" |
18 | #include "vm/compiler/backend/linearscan.h" |
19 | #include "vm/compiler/backend/range_analysis.h" |
20 | #include "vm/compiler/backend/redundancy_elimination.h" |
21 | #include "vm/compiler/backend/type_propagator.h" |
22 | #include "vm/compiler/cha.h" |
23 | #include "vm/compiler/compiler_pass.h" |
24 | #include "vm/compiler/compiler_state.h" |
25 | #include "vm/compiler/frontend/bytecode_reader.h" |
26 | #include "vm/compiler/frontend/flow_graph_builder.h" |
27 | #include "vm/compiler/frontend/kernel_to_il.h" |
28 | #include "vm/compiler/jit/jit_call_specializer.h" |
29 | #include "vm/dart_entry.h" |
30 | #include "vm/debugger.h" |
31 | #include "vm/deopt_instructions.h" |
32 | #include "vm/exceptions.h" |
33 | #include "vm/flags.h" |
34 | #include "vm/kernel.h" |
35 | #include "vm/longjump.h" |
36 | #include "vm/object.h" |
37 | #include "vm/object_store.h" |
38 | #include "vm/os.h" |
39 | #include "vm/parser.h" |
40 | #include "vm/regexp_assembler.h" |
41 | #include "vm/regexp_parser.h" |
42 | #include "vm/runtime_entry.h" |
43 | #include "vm/symbols.h" |
44 | #include "vm/tags.h" |
45 | #include "vm/thread_registry.h" |
46 | #include "vm/timeline.h" |
47 | #include "vm/timer.h" |
48 | #endif |
49 | |
50 | namespace dart { |
51 | |
52 | DEFINE_FLAG( |
53 | int, |
54 | max_deoptimization_counter_threshold, |
55 | 16, |
56 | "How many times we allow deoptimization before we disallow optimization." ); |
57 | DEFINE_FLAG(charp, optimization_filter, NULL, "Optimize only named function" ); |
58 | DEFINE_FLAG(bool, print_flow_graph, false, "Print the IR flow graph." ); |
59 | DEFINE_FLAG(bool, |
60 | print_flow_graph_optimized, |
61 | false, |
62 | "Print the IR flow graph when optimizing." ); |
63 | DEFINE_FLAG(bool, |
64 | print_ic_data_map, |
65 | false, |
66 | "Print the deopt-id to ICData map in optimizing compiler." ); |
67 | DEFINE_FLAG(bool, print_code_source_map, false, "Print code source map." ); |
68 | DEFINE_FLAG(bool, |
69 | stress_test_background_compilation, |
70 | false, |
71 | "Keep background compiler running all the time" ); |
72 | DEFINE_FLAG(bool, |
73 | stop_on_excessive_deoptimization, |
74 | false, |
75 | "Debugging: stops program if deoptimizing same function too often" ); |
76 | DEFINE_FLAG(bool, trace_compiler, false, "Trace compiler operations." ); |
77 | DEFINE_FLAG(bool, |
78 | trace_failed_optimization_attempts, |
79 | false, |
80 | "Traces all failed optimization attempts" ); |
81 | DEFINE_FLAG(bool, |
82 | trace_optimizing_compiler, |
83 | false, |
84 | "Trace only optimizing compiler operations." ); |
85 | DEFINE_FLAG(bool, trace_bailout, false, "Print bailout from ssa compiler." ); |
86 | |
87 | DECLARE_FLAG(bool, enable_interpreter); |
88 | DECLARE_FLAG(bool, huge_method_cutoff_in_code_size); |
89 | DECLARE_FLAG(bool, trace_failed_optimization_attempts); |
90 | |
91 | static void PrecompilationModeHandler(bool value) { |
92 | if (value) { |
93 | #if defined(TARGET_ARCH_IA32) |
94 | FATAL("Precompilation not supported on IA32" ); |
95 | #endif |
96 | |
97 | FLAG_background_compilation = false; |
98 | FLAG_enable_mirrors = false; |
99 | FLAG_fields_may_be_reset = true; |
100 | FLAG_interpret_irregexp = true; |
101 | FLAG_lazy_dispatchers = false; |
102 | FLAG_link_natives_lazily = true; |
103 | FLAG_optimization_counter_threshold = -1; |
104 | FLAG_polymorphic_with_deopt = false; |
105 | FLAG_precompiled_mode = true; |
106 | FLAG_reorder_basic_blocks = true; |
107 | FLAG_use_field_guards = false; |
108 | FLAG_use_cha_deopt = false; |
109 | FLAG_causal_async_stacks = false; |
110 | FLAG_lazy_async_stacks = true; |
111 | |
112 | #if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME) |
113 | // Set flags affecting runtime accordingly for gen_snapshot. |
114 | // These flags are constants with PRODUCT and DART_PRECOMPILED_RUNTIME. |
115 | FLAG_deoptimize_alot = false; // Used in some tests. |
116 | FLAG_deoptimize_every = 0; // Used in some tests. |
117 | FLAG_use_osr = false; |
118 | #endif |
119 | } |
120 | } |
121 | |
122 | DEFINE_FLAG_HANDLER(PrecompilationModeHandler, |
123 | precompilation, |
124 | "Precompilation mode" ); |
125 | |
126 | #ifndef DART_PRECOMPILED_RUNTIME |
127 | |
128 | void DartCompilationPipeline::ParseFunction(ParsedFunction* parsed_function) { |
129 | // Nothing to do here. |
130 | } |
131 | |
132 | FlowGraph* DartCompilationPipeline::BuildFlowGraph( |
133 | Zone* zone, |
134 | ParsedFunction* parsed_function, |
135 | ZoneGrowableArray<const ICData*>* ic_data_array, |
136 | intptr_t osr_id, |
137 | bool optimized) { |
138 | kernel::FlowGraphBuilder builder(parsed_function, ic_data_array, |
139 | /* not building var desc */ NULL, |
140 | /* not inlining */ NULL, optimized, osr_id); |
141 | FlowGraph* graph = builder.BuildGraph(); |
142 | ASSERT(graph != NULL); |
143 | return graph; |
144 | } |
145 | |
146 | void IrregexpCompilationPipeline::ParseFunction( |
147 | ParsedFunction* parsed_function) { |
148 | VMTagScope tagScope(parsed_function->thread(), |
149 | VMTag::kCompileParseRegExpTagId); |
150 | Zone* zone = parsed_function->zone(); |
151 | RegExp& regexp = RegExp::Handle(parsed_function->function().regexp()); |
152 | |
153 | const String& pattern = String::Handle(regexp.pattern()); |
154 | |
155 | RegExpCompileData* compile_data = new (zone) RegExpCompileData(); |
156 | // Parsing failures are handled in the RegExp factory constructor. |
157 | RegExpParser::ParseRegExp(pattern, regexp.flags(), compile_data); |
158 | |
159 | regexp.set_num_bracket_expressions(compile_data->capture_count); |
160 | regexp.set_capture_name_map(compile_data->capture_name_map); |
161 | if (compile_data->simple) { |
162 | regexp.set_is_simple(); |
163 | } else { |
164 | regexp.set_is_complex(); |
165 | } |
166 | |
167 | parsed_function->SetRegExpCompileData(compile_data); |
168 | |
169 | // Variables are allocated after compilation. |
170 | } |
171 | |
172 | FlowGraph* IrregexpCompilationPipeline::BuildFlowGraph( |
173 | Zone* zone, |
174 | ParsedFunction* parsed_function, |
175 | ZoneGrowableArray<const ICData*>* ic_data_array, |
176 | intptr_t osr_id, |
177 | bool optimized) { |
178 | // Compile to the dart IR. |
179 | RegExpEngine::CompilationResult result = |
180 | RegExpEngine::CompileIR(parsed_function->regexp_compile_data(), |
181 | parsed_function, *ic_data_array, osr_id); |
182 | backtrack_goto_ = result.backtrack_goto; |
183 | |
184 | // Allocate variables now that we know the number of locals. |
185 | parsed_function->AllocateIrregexpVariables(result.num_stack_locals); |
186 | |
187 | // When compiling for OSR, use a depth first search to find the OSR |
188 | // entry and make graph entry jump to it instead of normal entry. |
189 | // Catch entries are always considered reachable, even if they |
190 | // become unreachable after OSR. |
191 | if (osr_id != Compiler::kNoOSRDeoptId) { |
192 | result.graph_entry->RelinkToOsrEntry(zone, result.num_blocks); |
193 | } |
194 | PrologueInfo prologue_info(-1, -1); |
195 | return new (zone) FlowGraph(*parsed_function, result.graph_entry, |
196 | result.num_blocks, prologue_info); |
197 | } |
198 | |
199 | CompilationPipeline* CompilationPipeline::New(Zone* zone, |
200 | const Function& function) { |
201 | if (function.IsIrregexpFunction()) { |
202 | return new (zone) IrregexpCompilationPipeline(); |
203 | } else { |
204 | return new (zone) DartCompilationPipeline(); |
205 | } |
206 | } |
207 | |
208 | // Compile a function. Should call only if the function has not been compiled. |
209 | // Arg0: function object. |
210 | DEFINE_RUNTIME_ENTRY(CompileFunction, 1) { |
211 | ASSERT(thread->IsMutatorThread()); |
212 | const Function& function = Function::CheckedHandle(zone, arguments.ArgAt(0)); |
213 | Object& result = Object::Handle(zone); |
214 | |
215 | if (FLAG_enable_interpreter && function.IsBytecodeAllowed(zone)) { |
216 | if (!function.HasBytecode()) { |
217 | result = kernel::BytecodeReader::ReadFunctionBytecode(thread, function); |
218 | if (!result.IsNull()) { |
219 | Exceptions::PropagateError(Error::Cast(result)); |
220 | } |
221 | } |
222 | if (function.HasBytecode() && (FLAG_compilation_counter_threshold != 0)) { |
223 | // If interpreter is enabled and there is bytecode, LazyCompile stub |
224 | // (which calls CompileFunction) should proceed to InterpretCall in order |
225 | // to enter interpreter. In such case, compilation is postponed and |
226 | // triggered by interpreter later via CompileInterpretedFunction. |
227 | return; |
228 | } |
229 | // Fall back to compilation. |
230 | } else { |
231 | ASSERT(!function.HasCode()); |
232 | } |
233 | |
234 | result = Compiler::CompileFunction(thread, function); |
235 | if (result.IsError()) { |
236 | if (result.IsLanguageError()) { |
237 | Exceptions::ThrowCompileTimeError(LanguageError::Cast(result)); |
238 | UNREACHABLE(); |
239 | } |
240 | Exceptions::PropagateError(Error::Cast(result)); |
241 | } |
242 | } |
243 | |
244 | bool Compiler::CanOptimizeFunction(Thread* thread, const Function& function) { |
245 | #if !defined(PRODUCT) |
246 | if (Debugger::IsDebugging(thread, function)) { |
247 | // We cannot set breakpoints and single step in optimized code, |
248 | // so do not optimize the function. Bump usage counter down to avoid |
249 | // repeatedly entering the runtime for an optimization attempt. |
250 | function.SetUsageCounter(0); |
251 | |
252 | // If the optimization counter = 1, the unoptimized code will come back here |
253 | // immediately, causing an infinite compilation loop. The compiler raises |
254 | // the threshold for functions with breakpoints, so we drop the unoptimized |
255 | // to force it to be recompiled. |
256 | if (thread->isolate()->CanOptimizeImmediately()) { |
257 | function.ClearCode(); |
258 | } |
259 | return false; |
260 | } |
261 | #endif |
262 | if (function.deoptimization_counter() >= |
263 | FLAG_max_deoptimization_counter_threshold) { |
264 | if (FLAG_trace_failed_optimization_attempts || |
265 | FLAG_stop_on_excessive_deoptimization) { |
266 | THR_Print("Too many deoptimizations: %s\n" , |
267 | function.ToFullyQualifiedCString()); |
268 | if (FLAG_stop_on_excessive_deoptimization) { |
269 | FATAL("Stop on excessive deoptimization" ); |
270 | } |
271 | } |
272 | // The function will not be optimized any longer. This situation can occur |
273 | // mostly with small optimization counter thresholds. |
274 | function.SetIsOptimizable(false); |
275 | function.SetUsageCounter(INT32_MIN); |
276 | return false; |
277 | } |
278 | if (FLAG_optimization_filter != NULL) { |
279 | // FLAG_optimization_filter is a comma-separated list of strings that are |
280 | // matched against the fully-qualified function name. |
281 | char* save_ptr; // Needed for strtok_r. |
282 | const char* function_name = function.ToFullyQualifiedCString(); |
283 | intptr_t len = strlen(FLAG_optimization_filter) + 1; // Length with \0. |
284 | char* filter = new char[len]; |
285 | strncpy(filter, FLAG_optimization_filter, len); // strtok modifies arg 1. |
286 | char* token = strtok_r(filter, "," , &save_ptr); |
287 | bool found = false; |
288 | while (token != NULL) { |
289 | if (strstr(function_name, token) != NULL) { |
290 | found = true; |
291 | break; |
292 | } |
293 | token = strtok_r(NULL, "," , &save_ptr); |
294 | } |
295 | delete[] filter; |
296 | if (!found) { |
297 | function.SetUsageCounter(INT32_MIN); |
298 | return false; |
299 | } |
300 | } |
301 | if (!function.IsOptimizable()) { |
302 | // Huge methods (code size above --huge_method_cutoff_in_code_size) become |
303 | // non-optimizable only after the code has been generated. |
304 | if (FLAG_trace_failed_optimization_attempts) { |
305 | THR_Print("Not optimizable: %s\n" , function.ToFullyQualifiedCString()); |
306 | } |
307 | function.SetUsageCounter(INT32_MIN); |
308 | return false; |
309 | } |
310 | return true; |
311 | } |
312 | |
313 | bool Compiler::IsBackgroundCompilation() { |
314 | // For now: compilation in non mutator thread is the background compoilation. |
315 | return !Thread::Current()->IsMutatorThread(); |
316 | } |
317 | |
318 | class CompileParsedFunctionHelper : public ValueObject { |
319 | public: |
320 | CompileParsedFunctionHelper(ParsedFunction* parsed_function, |
321 | bool optimized, |
322 | intptr_t osr_id) |
323 | : parsed_function_(parsed_function), |
324 | optimized_(optimized), |
325 | osr_id_(osr_id), |
326 | thread_(Thread::Current()) {} |
327 | |
328 | CodePtr Compile(CompilationPipeline* pipeline); |
329 | |
330 | private: |
331 | ParsedFunction* parsed_function() const { return parsed_function_; } |
332 | bool optimized() const { return optimized_; } |
333 | intptr_t osr_id() const { return osr_id_; } |
334 | Thread* thread() const { return thread_; } |
335 | Isolate* isolate() const { return thread_->isolate(); } |
336 | CodePtr FinalizeCompilation(compiler::Assembler* assembler, |
337 | FlowGraphCompiler* graph_compiler, |
338 | FlowGraph* flow_graph); |
339 | void CheckIfBackgroundCompilerIsBeingStopped(bool optimizing_compiler); |
340 | |
341 | ParsedFunction* parsed_function_; |
342 | const bool optimized_; |
343 | const intptr_t osr_id_; |
344 | Thread* const thread_; |
345 | |
346 | DISALLOW_COPY_AND_ASSIGN(CompileParsedFunctionHelper); |
347 | }; |
348 | |
349 | CodePtr CompileParsedFunctionHelper::FinalizeCompilation( |
350 | compiler::Assembler* assembler, |
351 | FlowGraphCompiler* graph_compiler, |
352 | FlowGraph* flow_graph) { |
353 | ASSERT(!CompilerState::Current().is_aot()); |
354 | const Function& function = parsed_function()->function(); |
355 | Zone* const zone = thread()->zone(); |
356 | |
357 | // CreateDeoptInfo uses the object pool and needs to be done before |
358 | // FinalizeCode. |
359 | Array& deopt_info_array = Array::Handle(zone, Object::empty_array().raw()); |
360 | deopt_info_array = graph_compiler->CreateDeoptInfo(assembler); |
361 | |
362 | // Allocates instruction object. Since this occurs only at safepoint, |
363 | // there can be no concurrent access to the instruction page. |
364 | Code& code = Code::Handle(Code::FinalizeCode( |
365 | graph_compiler, assembler, Code::PoolAttachment::kAttachPool, optimized(), |
366 | /*stats=*/nullptr)); |
367 | code.set_is_optimized(optimized()); |
368 | code.set_owner(function); |
369 | |
370 | if (!function.IsOptimizable()) { |
371 | // A function with huge unoptimized code can become non-optimizable |
372 | // after generating unoptimized code. |
373 | function.SetUsageCounter(INT32_MIN); |
374 | } |
375 | |
376 | graph_compiler->FinalizePcDescriptors(code); |
377 | code.set_deopt_info_array(deopt_info_array); |
378 | |
379 | graph_compiler->FinalizeStackMaps(code); |
380 | graph_compiler->FinalizeVarDescriptors(code); |
381 | graph_compiler->FinalizeExceptionHandlers(code); |
382 | graph_compiler->FinalizeCatchEntryMovesMap(code); |
383 | graph_compiler->FinalizeStaticCallTargetsTable(code); |
384 | graph_compiler->FinalizeCodeSourceMap(code); |
385 | |
386 | if (function.ForceOptimize()) { |
387 | ASSERT(optimized() && thread()->IsMutatorThread()); |
388 | code.set_is_force_optimized(true); |
389 | function.AttachCode(code); |
390 | function.SetWasCompiled(true); |
391 | } else if (optimized()) { |
392 | // Installs code while at safepoint. |
393 | if (thread()->IsMutatorThread()) { |
394 | const bool is_osr = osr_id() != Compiler::kNoOSRDeoptId; |
395 | if (!is_osr) { |
396 | function.InstallOptimizedCode(code); |
397 | } |
398 | ASSERT(code.owner() == function.raw()); |
399 | } else { |
400 | // Background compilation. |
401 | // Before installing code check generation counts if the code may |
402 | // have become invalid. |
403 | const bool trace_compiler = |
404 | FLAG_trace_compiler || FLAG_trace_optimizing_compiler; |
405 | bool code_is_valid = true; |
406 | if (!flow_graph->parsed_function().guarded_fields()->is_empty()) { |
407 | const ZoneGrowableArray<const Field*>& guarded_fields = |
408 | *flow_graph->parsed_function().guarded_fields(); |
409 | Field& original = Field::Handle(); |
410 | for (intptr_t i = 0; i < guarded_fields.length(); i++) { |
411 | const Field& field = *guarded_fields[i]; |
412 | ASSERT(!field.IsOriginal()); |
413 | original = field.Original(); |
414 | if (!field.IsConsistentWith(original)) { |
415 | code_is_valid = false; |
416 | if (trace_compiler) { |
417 | THR_Print("--> FAIL: Field %s guarded state changed." , |
418 | field.ToCString()); |
419 | } |
420 | break; |
421 | } |
422 | } |
423 | } |
424 | if (!thread() |
425 | ->compiler_state() |
426 | .cha() |
427 | .IsConsistentWithCurrentHierarchy()) { |
428 | code_is_valid = false; |
429 | if (trace_compiler) { |
430 | THR_Print("--> FAIL: Class hierarchy has new subclasses." ); |
431 | } |
432 | } |
433 | |
434 | // Setting breakpoints at runtime could make a function non-optimizable. |
435 | if (code_is_valid && Compiler::CanOptimizeFunction(thread(), function)) { |
436 | const bool is_osr = osr_id() != Compiler::kNoOSRDeoptId; |
437 | ASSERT(!is_osr); // OSR is not compiled in background. |
438 | function.InstallOptimizedCode(code); |
439 | } else { |
440 | code = Code::null(); |
441 | } |
442 | if (function.usage_counter() < 0) { |
443 | // Reset to 0 so that it can be recompiled if needed. |
444 | if (code_is_valid) { |
445 | function.SetUsageCounter(0); |
446 | } else { |
447 | // Trigger another optimization pass soon. |
448 | function.SetUsageCounter(FLAG_optimization_counter_threshold - 100); |
449 | } |
450 | } |
451 | } |
452 | |
453 | if (!code.IsNull()) { |
454 | // The generated code was compiled under certain assumptions about |
455 | // class hierarchy and field types. Register these dependencies |
456 | // to ensure that the code will be deoptimized if they are violated. |
457 | thread()->compiler_state().cha().RegisterDependencies(code); |
458 | |
459 | const ZoneGrowableArray<const Field*>& guarded_fields = |
460 | *flow_graph->parsed_function().guarded_fields(); |
461 | Field& field = Field::Handle(); |
462 | for (intptr_t i = 0; i < guarded_fields.length(); i++) { |
463 | field = guarded_fields[i]->Original(); |
464 | field.RegisterDependentCode(code); |
465 | } |
466 | } |
467 | } else { // not optimized. |
468 | if (function.ic_data_array() == Array::null()) { |
469 | function.SaveICDataMap( |
470 | graph_compiler->deopt_id_to_ic_data(), |
471 | Array::Handle(zone, graph_compiler->edge_counters_array())); |
472 | } |
473 | function.set_unoptimized_code(code); |
474 | function.AttachCode(code); |
475 | function.SetWasCompiled(true); |
476 | if (function.IsOptimizable() && (function.usage_counter() < 0)) { |
477 | // While doing compilation in background, usage counter is set |
478 | // to INT32_MIN. Reset counter so that function can be optimized further. |
479 | function.SetUsageCounter(0); |
480 | } |
481 | } |
482 | return code.raw(); |
483 | } |
484 | |
485 | void CompileParsedFunctionHelper::CheckIfBackgroundCompilerIsBeingStopped( |
486 | bool optimizing_compiler) { |
487 | ASSERT(Compiler::IsBackgroundCompilation()); |
488 | if (optimizing_compiler) { |
489 | if (!isolate()->optimizing_background_compiler()->is_running()) { |
490 | // The background compiler is being stopped. |
491 | Compiler::AbortBackgroundCompilation( |
492 | DeoptId::kNone, "Optimizing Background compilation is being stopped" ); |
493 | } |
494 | } else { |
495 | if (FLAG_enable_interpreter && |
496 | !isolate()->background_compiler()->is_running()) { |
497 | // The background compiler is being stopped. |
498 | Compiler::AbortBackgroundCompilation( |
499 | DeoptId::kNone, "Background compilation is being stopped" ); |
500 | } |
501 | } |
502 | } |
503 | |
504 | // Return null if bailed out. |
505 | CodePtr CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) { |
506 | ASSERT(!FLAG_precompiled_mode); |
507 | const Function& function = parsed_function()->function(); |
508 | if (optimized() && !function.IsOptimizable()) { |
509 | return Code::null(); |
510 | } |
511 | Zone* const zone = thread()->zone(); |
512 | HANDLESCOPE(thread()); |
513 | |
514 | // We may reattempt compilation if the function needs to be assembled using |
515 | // far branches on ARM. In the else branch of the setjmp call, done is set to |
516 | // false, and use_far_branches is set to true if there is a longjmp from the |
517 | // ARM assembler. In all other paths through this while loop, done is set to |
518 | // true. use_far_branches is always false on ia32 and x64. |
519 | volatile bool done = false; |
520 | // volatile because the variable may be clobbered by a longjmp. |
521 | volatile bool use_far_branches = false; |
522 | |
523 | // In the JIT case we allow speculative inlining and have no need for a |
524 | // suppression, since we don't restart optimization. |
525 | SpeculativeInliningPolicy speculative_policy(/*enable_suppression=*/false); |
526 | |
527 | Code* volatile result = &Code::ZoneHandle(zone); |
528 | while (!done) { |
529 | *result = Code::null(); |
530 | LongJumpScope jump; |
531 | if (setjmp(*jump.Set()) == 0) { |
532 | FlowGraph* flow_graph = nullptr; |
533 | ZoneGrowableArray<const ICData*>* ic_data_array = nullptr; |
534 | |
535 | CompilerState compiler_state(thread(), /*is_aot=*/false, |
536 | CompilerState::ShouldTrace(function)); |
537 | |
538 | { |
539 | if (optimized()) { |
540 | // In background compilation the deoptimization counter may have |
541 | // already reached the limit. |
542 | ASSERT(Compiler::IsBackgroundCompilation() || |
543 | (function.deoptimization_counter() < |
544 | FLAG_max_deoptimization_counter_threshold)); |
545 | } |
546 | |
547 | // Extract type feedback before the graph is built, as the graph |
548 | // builder uses it to attach it to nodes. |
549 | ic_data_array = new (zone) ZoneGrowableArray<const ICData*>(); |
550 | |
551 | // Clone ICData for background compilation so that it does not |
552 | // change while compiling. |
553 | const bool clone_ic_data = Compiler::IsBackgroundCompilation(); |
554 | function.RestoreICDataMap(ic_data_array, clone_ic_data); |
555 | |
556 | if (optimized()) { |
557 | if (Compiler::IsBackgroundCompilation() && |
558 | (function.ic_data_array() == Array::null())) { |
559 | Compiler::AbortBackgroundCompilation( |
560 | DeoptId::kNone, "RestoreICDataMap: ICData array cleared." ); |
561 | } |
562 | } |
563 | |
564 | if (FLAG_print_ic_data_map) { |
565 | for (intptr_t i = 0; i < ic_data_array->length(); i++) { |
566 | if ((*ic_data_array)[i] != NULL) { |
567 | THR_Print("%" Pd " " , i); |
568 | FlowGraphPrinter::PrintICData(*(*ic_data_array)[i]); |
569 | } |
570 | } |
571 | } |
572 | |
573 | TIMELINE_DURATION(thread(), CompilerVerbose, "BuildFlowGraph" ); |
574 | flow_graph = pipeline->BuildFlowGraph( |
575 | zone, parsed_function(), ic_data_array, osr_id(), optimized()); |
576 | } |
577 | |
578 | const bool print_flow_graph = |
579 | (FLAG_print_flow_graph || |
580 | (optimized() && FLAG_print_flow_graph_optimized)) && |
581 | FlowGraphPrinter::ShouldPrint(function); |
582 | |
583 | if (print_flow_graph && !optimized()) { |
584 | FlowGraphPrinter::PrintGraph("Unoptimized Compilation" , flow_graph); |
585 | } |
586 | |
587 | const bool reorder_blocks = |
588 | FlowGraph::ShouldReorderBlocks(function, optimized()); |
589 | if (reorder_blocks) { |
590 | TIMELINE_DURATION(thread(), CompilerVerbose, |
591 | "BlockScheduler::AssignEdgeWeights" ); |
592 | BlockScheduler::AssignEdgeWeights(flow_graph); |
593 | } |
594 | |
595 | CompilerPassState pass_state(thread(), flow_graph, &speculative_policy); |
596 | pass_state.reorder_blocks = reorder_blocks; |
597 | |
598 | if (function.ForceOptimize()) { |
599 | ASSERT(optimized()); |
600 | TIMELINE_DURATION(thread(), CompilerVerbose, "OptimizationPasses" ); |
601 | flow_graph = CompilerPass::RunForceOptimizedPipeline(CompilerPass::kJIT, |
602 | &pass_state); |
603 | } else if (optimized()) { |
604 | TIMELINE_DURATION(thread(), CompilerVerbose, "OptimizationPasses" ); |
605 | |
606 | pass_state.inline_id_to_function.Add(&function); |
607 | // We do not add the token position now because we don't know the |
608 | // position of the inlined call until later. A side effect of this |
609 | // is that the length of |inline_id_to_function| is always larger |
610 | // than the length of |inline_id_to_token_pos| by one. |
611 | // Top scope function has no caller (-1). We do this because we expect |
612 | // all token positions to be at an inlined call. |
613 | pass_state.caller_inline_id.Add(-1); |
614 | |
615 | JitCallSpecializer call_specializer(flow_graph, &speculative_policy); |
616 | pass_state.call_specializer = &call_specializer; |
617 | |
618 | flow_graph = CompilerPass::RunPipeline(CompilerPass::kJIT, &pass_state); |
619 | } |
620 | |
621 | ASSERT(pass_state.inline_id_to_function.length() == |
622 | pass_state.caller_inline_id.length()); |
623 | compiler::ObjectPoolBuilder object_pool_builder; |
624 | compiler::Assembler assembler(&object_pool_builder, use_far_branches); |
625 | FlowGraphCompiler graph_compiler( |
626 | &assembler, flow_graph, *parsed_function(), optimized(), |
627 | &speculative_policy, pass_state.inline_id_to_function, |
628 | pass_state.inline_id_to_token_pos, pass_state.caller_inline_id, |
629 | ic_data_array); |
630 | { |
631 | TIMELINE_DURATION(thread(), CompilerVerbose, "CompileGraph" ); |
632 | graph_compiler.CompileGraph(); |
633 | } |
634 | { |
635 | TIMELINE_DURATION(thread(), CompilerVerbose, "FinalizeCompilation" ); |
636 | |
637 | auto install_code_fun = [&]() { |
638 | *result = |
639 | FinalizeCompilation(&assembler, &graph_compiler, flow_graph); |
640 | }; |
641 | |
642 | if (Compiler::IsBackgroundCompilation()) { |
643 | CheckIfBackgroundCompilerIsBeingStopped(optimized()); |
644 | } |
645 | |
646 | // We have to ensure no mutators are running, because: |
647 | // |
648 | // a) We allocate an instructions object, which might cause us to |
649 | // temporarily flip page protections (RX -> RW -> RX). |
650 | // |
651 | // b) We have to ensure the code generated does not violate |
652 | // assumptions (e.g. CHA, field guards), the validation has to |
653 | // happen while mutator is stopped. |
654 | // |
655 | // b) We update the [Function] object with a new [Code] which |
656 | // requires updating several pointers: We have to ensure all of |
657 | // those writes are observed atomically. |
658 | // |
659 | thread()->isolate_group()->RunWithStoppedMutators( |
660 | install_code_fun, /*use_force_growth=*/true); |
661 | } |
662 | if (!result->IsNull()) { |
663 | // Must be called outside of safepoint. |
664 | Code::NotifyCodeObservers(function, *result, optimized()); |
665 | |
666 | #if !defined(PRODUCT) |
667 | if (!function.HasOptimizedCode()) { |
668 | isolate()->debugger()->NotifyCompilation(function); |
669 | } |
670 | #endif |
671 | if (FLAG_disassemble && FlowGraphPrinter::ShouldPrint(function)) { |
672 | Disassembler::DisassembleCode(function, *result, optimized()); |
673 | } else if (FLAG_disassemble_optimized && optimized() && |
674 | FlowGraphPrinter::ShouldPrint(function)) { |
675 | Disassembler::DisassembleCode(function, *result, true); |
676 | } |
677 | } |
678 | // Exit the loop and the function with the correct result value. |
679 | done = true; |
680 | } else { |
681 | // We bailed out or we encountered an error. |
682 | const Error& error = Error::Handle(thread()->StealStickyError()); |
683 | |
684 | if (error.raw() == Object::branch_offset_error().raw()) { |
685 | // Compilation failed due to an out of range branch offset in the |
686 | // assembler. We try again (done = false) with far branches enabled. |
687 | done = false; |
688 | ASSERT(!use_far_branches); |
689 | use_far_branches = true; |
690 | } else if (error.raw() == Object::speculative_inlining_error().raw()) { |
691 | // Can only happen with precompilation. |
692 | UNREACHABLE(); |
693 | } else { |
694 | // If the error isn't due to an out of range branch offset, we don't |
695 | // try again (done = true). |
696 | if (FLAG_trace_bailout) { |
697 | THR_Print("%s\n" , error.ToErrorCString()); |
698 | } |
699 | if (!Compiler::IsBackgroundCompilation() && error.IsLanguageError() && |
700 | (LanguageError::Cast(error).kind() == Report::kBailout)) { |
701 | // If is is not a background compilation, discard the error if it was |
702 | // not a real error, but just a bailout. If we're it a background |
703 | // compilation this will be dealt with in the caller. |
704 | } else { |
705 | // Otherwise, continue propagating unless we will try again. |
706 | thread()->set_sticky_error(error); |
707 | } |
708 | done = true; |
709 | } |
710 | } |
711 | } |
712 | return result->raw(); |
713 | } |
714 | |
715 | static ObjectPtr CompileFunctionHelper(CompilationPipeline* pipeline, |
716 | const Function& function, |
717 | volatile bool optimized, |
718 | intptr_t osr_id) { |
719 | ASSERT(!FLAG_precompiled_mode); |
720 | ASSERT(!optimized || function.WasCompiled() || function.ForceOptimize()); |
721 | ASSERT(function.is_background_optimizable() || |
722 | !Compiler::IsBackgroundCompilation()); |
723 | if (function.ForceOptimize()) optimized = true; |
724 | LongJumpScope jump; |
725 | if (setjmp(*jump.Set()) == 0) { |
726 | Thread* const thread = Thread::Current(); |
727 | StackZone stack_zone(thread); |
728 | Zone* const zone = stack_zone.GetZone(); |
729 | const bool trace_compiler = |
730 | FLAG_trace_compiler || (FLAG_trace_optimizing_compiler && optimized); |
731 | Timer per_compile_timer(trace_compiler, "Compilation time" ); |
732 | per_compile_timer.Start(); |
733 | |
734 | ParsedFunction* parsed_function = new (zone) |
735 | ParsedFunction(thread, Function::ZoneHandle(zone, function.raw())); |
736 | if (trace_compiler) { |
737 | const intptr_t token_size = |
738 | function.end_token_pos().Pos() - function.token_pos().Pos(); |
739 | THR_Print("Compiling %s%sfunction %s: '%s' @ token %s, size %" Pd "\n" , |
740 | (osr_id == Compiler::kNoOSRDeoptId ? "" : "osr " ), |
741 | (optimized ? "optimized " : "" ), |
742 | (Compiler::IsBackgroundCompilation() ? "(background)" : "" ), |
743 | function.ToFullyQualifiedCString(), |
744 | function.token_pos().ToCString(), token_size); |
745 | } |
746 | // Makes sure no classes are loaded during parsing in background. |
747 | { |
748 | HANDLESCOPE(thread); |
749 | pipeline->ParseFunction(parsed_function); |
750 | } |
751 | |
752 | CompileParsedFunctionHelper helper(parsed_function, optimized, osr_id); |
753 | |
754 | const Code& result = Code::Handle(helper.Compile(pipeline)); |
755 | |
756 | if (result.IsNull()) { |
757 | const Error& error = Error::Handle(thread->StealStickyError()); |
758 | |
759 | if (Compiler::IsBackgroundCompilation()) { |
760 | // Try again later, background compilation may abort because of |
761 | // state change during compilation. |
762 | if (FLAG_trace_compiler) { |
763 | THR_Print("Aborted background compilation: %s\n" , |
764 | function.ToFullyQualifiedCString()); |
765 | } |
766 | |
767 | // We got an error during compilation. |
768 | // If it was a bailout, then disable optimization. |
769 | if (error.raw() == Object::background_compilation_error().raw()) { |
770 | if (FLAG_trace_compiler) { |
771 | THR_Print( |
772 | "--> disabling background optimizations for '%s' (will " |
773 | "try to re-compile on isolate thread again)\n" , |
774 | function.ToFullyQualifiedCString()); |
775 | } |
776 | |
777 | // Ensure we don't attempt to re-compile the function on the |
778 | // background compiler. |
779 | function.set_is_background_optimizable(false); |
780 | |
781 | // Trigger another optimization soon on the main thread. |
782 | function.SetUsageCounter(optimized |
783 | ? FLAG_optimization_counter_threshold |
784 | : FLAG_compilation_counter_threshold); |
785 | return Error::null(); |
786 | } else if (error.IsLanguageError() && |
787 | LanguageError::Cast(error).kind() == Report::kBailout) { |
788 | if (FLAG_trace_compiler) { |
789 | THR_Print("--> disabling optimizations for '%s'\n" , |
790 | function.ToFullyQualifiedCString()); |
791 | } |
792 | function.SetIsOptimizable(false); |
793 | return Error::null(); |
794 | } else { |
795 | // The background compiler does not execute Dart code or handle |
796 | // isolate messages. |
797 | ASSERT(!error.IsUnwindError()); |
798 | return error.raw(); |
799 | } |
800 | } |
801 | if (optimized) { |
802 | if (error.IsLanguageError() && |
803 | LanguageError::Cast(error).kind() == Report::kBailout) { |
804 | // Functions which cannot deoptimize should never bail out. |
805 | ASSERT(!function.ForceOptimize()); |
806 | // Optimizer bailed out. Disable optimizations and never try again. |
807 | if (trace_compiler) { |
808 | THR_Print("--> disabling optimizations for '%s'\n" , |
809 | function.ToFullyQualifiedCString()); |
810 | } else if (FLAG_trace_failed_optimization_attempts) { |
811 | THR_Print("Cannot optimize: %s\n" , |
812 | function.ToFullyQualifiedCString()); |
813 | } |
814 | function.SetIsOptimizable(false); |
815 | return Error::null(); |
816 | } |
817 | return error.raw(); |
818 | } else { |
819 | ASSERT(!optimized); |
820 | // The non-optimizing compiler can get an unhandled exception |
821 | // due to OOM or Stack overflow errors, it should not however |
822 | // bail out. |
823 | ASSERT(error.IsUnhandledException() || error.IsUnwindError() || |
824 | (error.IsLanguageError() && |
825 | LanguageError::Cast(error).kind() != Report::kBailout)); |
826 | return error.raw(); |
827 | } |
828 | UNREACHABLE(); |
829 | } |
830 | |
831 | per_compile_timer.Stop(); |
832 | |
833 | if (trace_compiler) { |
834 | const auto& code = Code::Handle(function.CurrentCode()); |
835 | THR_Print("--> '%s' entry: %#" Px " size: %" Pd " time: %" Pd64 " us\n" , |
836 | function.ToFullyQualifiedCString(), code.PayloadStart(), |
837 | code.Size(), per_compile_timer.TotalElapsedTime()); |
838 | } |
839 | |
840 | return result.raw(); |
841 | } else { |
842 | Thread* const thread = Thread::Current(); |
843 | StackZone stack_zone(thread); |
844 | // We got an error during compilation or it is a bailout from background |
845 | // compilation (e.g., during parsing with EnsureIsFinalized). |
846 | const Error& error = Error::Handle(thread->StealStickyError()); |
847 | if (error.raw() == Object::background_compilation_error().raw()) { |
848 | // Exit compilation, retry it later. |
849 | if (FLAG_trace_bailout) { |
850 | THR_Print("Aborted background compilation: %s\n" , |
851 | function.ToFullyQualifiedCString()); |
852 | } |
853 | return Object::null(); |
854 | } |
855 | // Do not attempt to optimize functions that can cause errors. |
856 | function.set_is_optimizable(false); |
857 | return error.raw(); |
858 | } |
859 | UNREACHABLE(); |
860 | return Object::null(); |
861 | } |
862 | |
863 | ObjectPtr Compiler::CompileFunction(Thread* thread, const Function& function) { |
864 | #if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32) |
865 | RELEASE_ASSERT(!FLAG_precompiled_mode); |
866 | #endif |
867 | |
868 | #if defined(DART_PRECOMPILED_RUNTIME) |
869 | FATAL3("Precompilation missed function %s (%s, %s)\n" , |
870 | function.ToLibNamePrefixedQualifiedCString(), |
871 | function.token_pos().ToCString(), |
872 | Function::KindToCString(function.kind())); |
873 | #endif // defined(DART_PRECOMPILED_RUNTIME) |
874 | |
875 | VMTagScope tagScope(thread, VMTag::kCompileUnoptimizedTagId); |
876 | #if defined(SUPPORT_TIMELINE) |
877 | const char* event_name; |
878 | if (IsBackgroundCompilation()) { |
879 | event_name = "CompileFunctionUnoptimizedBackground" ; |
880 | } else { |
881 | event_name = "CompileFunction" ; |
882 | } |
883 | TIMELINE_FUNCTION_COMPILATION_DURATION(thread, event_name, function); |
884 | #endif // defined(SUPPORT_TIMELINE) |
885 | |
886 | CompilationPipeline* pipeline = |
887 | CompilationPipeline::New(thread->zone(), function); |
888 | |
889 | const bool optimized = function.ForceOptimize(); |
890 | return CompileFunctionHelper(pipeline, function, optimized, kNoOSRDeoptId); |
891 | } |
892 | |
893 | ErrorPtr Compiler::EnsureUnoptimizedCode(Thread* thread, |
894 | const Function& function) { |
895 | ASSERT(!function.ForceOptimize()); |
896 | if (function.unoptimized_code() != Object::null()) { |
897 | return Error::null(); |
898 | } |
899 | Code& original_code = Code::ZoneHandle(thread->zone()); |
900 | if (function.HasCode()) { |
901 | original_code = function.CurrentCode(); |
902 | } |
903 | CompilationPipeline* pipeline = |
904 | CompilationPipeline::New(thread->zone(), function); |
905 | const Object& result = Object::Handle( |
906 | CompileFunctionHelper(pipeline, function, false, /* not optimized */ |
907 | kNoOSRDeoptId)); |
908 | if (result.IsError()) { |
909 | return Error::Cast(result).raw(); |
910 | } |
911 | // Since CompileFunctionHelper replaces the current code, re-attach the |
912 | // the original code if the function was already compiled. |
913 | if (!original_code.IsNull() && result.raw() == function.CurrentCode() && |
914 | !original_code.IsDisabled()) { |
915 | function.AttachCode(original_code); |
916 | } |
917 | ASSERT(function.unoptimized_code() != Object::null()); |
918 | ASSERT(function.unoptimized_code() == result.raw()); |
919 | if (FLAG_trace_compiler) { |
920 | THR_Print("Ensure unoptimized code for %s\n" , function.ToCString()); |
921 | } |
922 | return Error::null(); |
923 | } |
924 | |
925 | ObjectPtr Compiler::CompileOptimizedFunction(Thread* thread, |
926 | const Function& function, |
927 | intptr_t osr_id) { |
928 | VMTagScope tagScope(thread, VMTag::kCompileOptimizedTagId); |
929 | #if defined(SUPPORT_TIMELINE) |
930 | const char* event_name; |
931 | if (osr_id != kNoOSRDeoptId) { |
932 | event_name = "CompileFunctionOptimizedOSR" ; |
933 | } else if (IsBackgroundCompilation()) { |
934 | event_name = "CompileFunctionOptimizedBackground" ; |
935 | } else { |
936 | event_name = "CompileFunctionOptimized" ; |
937 | } |
938 | TIMELINE_FUNCTION_COMPILATION_DURATION(thread, event_name, function); |
939 | #endif // defined(SUPPORT_TIMELINE) |
940 | |
941 | ASSERT(function.ShouldCompilerOptimize()); |
942 | |
943 | CompilationPipeline* pipeline = |
944 | CompilationPipeline::New(thread->zone(), function); |
945 | return CompileFunctionHelper(pipeline, function, /* optimized = */ true, |
946 | osr_id); |
947 | } |
948 | |
949 | void Compiler::ComputeLocalVarDescriptors(const Code& code) { |
950 | ASSERT(!code.is_optimized()); |
951 | ASSERT(!FLAG_precompiled_mode); |
952 | const Function& function = Function::Handle(code.function()); |
953 | ASSERT(code.var_descriptors() == Object::null()); |
954 | // IsIrregexpFunction have eager var descriptors generation. |
955 | ASSERT(!function.IsIrregexpFunction()); |
956 | // In background compilation, parser can produce 'errors": bailouts |
957 | // if state changed while compiling in background. |
958 | Thread* thread = Thread::Current(); |
959 | Zone* zone = thread->zone(); |
960 | CompilerState state(thread, /*is_aot=*/false); |
961 | LongJumpScope jump; |
962 | if (setjmp(*jump.Set()) == 0) { |
963 | ParsedFunction* parsed_function = |
964 | new ParsedFunction(thread, Function::ZoneHandle(zone, function.raw())); |
965 | ZoneGrowableArray<const ICData*>* ic_data_array = |
966 | new ZoneGrowableArray<const ICData*>(); |
967 | ZoneGrowableArray<intptr_t>* context_level_array = |
968 | new ZoneGrowableArray<intptr_t>(); |
969 | |
970 | kernel::FlowGraphBuilder builder( |
971 | parsed_function, ic_data_array, context_level_array, |
972 | /* not inlining */ NULL, false, Compiler::kNoOSRDeoptId); |
973 | builder.BuildGraph(); |
974 | |
975 | auto& var_descs = LocalVarDescriptors::Handle(zone); |
976 | |
977 | if (function.is_declared_in_bytecode()) { |
978 | if (function.HasBytecode()) { |
979 | const auto& bytecode = Bytecode::Handle(zone, function.bytecode()); |
980 | var_descs = bytecode.GetLocalVarDescriptors(); |
981 | LocalVarDescriptorsBuilder builder; |
982 | builder.AddDeoptIdToContextLevelMappings(context_level_array); |
983 | builder.AddAll(zone, var_descs); |
984 | var_descs = builder.Done(); |
985 | } else { |
986 | var_descs = Object::empty_var_descriptors().raw(); |
987 | } |
988 | } else { |
989 | var_descs = parsed_function->scope()->GetVarDescriptors( |
990 | function, context_level_array); |
991 | } |
992 | |
993 | ASSERT(!var_descs.IsNull()); |
994 | code.set_var_descriptors(var_descs); |
995 | } else { |
996 | // Only possible with background compilation. |
997 | ASSERT(Compiler::IsBackgroundCompilation()); |
998 | } |
999 | } |
1000 | |
1001 | ErrorPtr Compiler::CompileAllFunctions(const Class& cls) { |
1002 | Thread* thread = Thread::Current(); |
1003 | Zone* zone = thread->zone(); |
1004 | Object& result = Object::Handle(zone); |
1005 | Array& functions = Array::Handle(zone, cls.functions()); |
1006 | Function& func = Function::Handle(zone); |
1007 | // Compile all the regular functions. |
1008 | for (int i = 0; i < functions.Length(); i++) { |
1009 | func ^= functions.At(i); |
1010 | ASSERT(!func.IsNull()); |
1011 | if (!func.HasCode() && !func.is_abstract() && |
1012 | !func.IsRedirectingFactory()) { |
1013 | result = CompileFunction(thread, func); |
1014 | if (result.IsError()) { |
1015 | return Error::Cast(result).raw(); |
1016 | } |
1017 | ASSERT(!result.IsNull()); |
1018 | } |
1019 | } |
1020 | return Error::null(); |
1021 | } |
1022 | |
1023 | ErrorPtr Compiler::ReadAllBytecode(const Class& cls) { |
1024 | Thread* thread = Thread::Current(); |
1025 | ASSERT(thread->IsMutatorThread()); |
1026 | Zone* zone = thread->zone(); |
1027 | Error& error = Error::Handle(zone, cls.EnsureIsFinalized(thread)); |
1028 | ASSERT(error.IsNull()); |
1029 | Array& functions = Array::Handle(zone, cls.functions()); |
1030 | Function& func = Function::Handle(zone); |
1031 | // Compile all the regular functions. |
1032 | for (int i = 0; i < functions.Length(); i++) { |
1033 | func ^= functions.At(i); |
1034 | ASSERT(!func.IsNull()); |
1035 | if (func.IsBytecodeAllowed(zone) && !func.HasBytecode() && |
1036 | !func.HasCode()) { |
1037 | ErrorPtr error = |
1038 | kernel::BytecodeReader::ReadFunctionBytecode(thread, func); |
1039 | if (error != Error::null()) { |
1040 | return error; |
1041 | } |
1042 | } |
1043 | } |
1044 | return Error::null(); |
1045 | } |
1046 | |
1047 | void Compiler::AbortBackgroundCompilation(intptr_t deopt_id, const char* msg) { |
1048 | if (FLAG_trace_compiler) { |
1049 | THR_Print("ABORT background compilation: %s\n" , msg); |
1050 | } |
1051 | #if !defined(PRODUCT) |
1052 | TimelineStream* stream = Timeline::GetCompilerStream(); |
1053 | ASSERT(stream != NULL); |
1054 | TimelineEvent* event = stream->StartEvent(); |
1055 | if (event != NULL) { |
1056 | event->Instant("AbortBackgroundCompilation" ); |
1057 | event->SetNumArguments(1); |
1058 | event->CopyArgument(0, "reason" , msg); |
1059 | event->Complete(); |
1060 | } |
1061 | #endif // !defined(PRODUCT) |
1062 | ASSERT(Compiler::IsBackgroundCompilation()); |
1063 | Thread::Current()->long_jump_base()->Jump( |
1064 | deopt_id, Object::background_compilation_error()); |
1065 | } |
1066 | |
1067 | // C-heap allocated background compilation queue element. |
1068 | class QueueElement { |
1069 | public: |
1070 | explicit QueueElement(const Function& function) |
1071 | : next_(NULL), function_(function.raw()) {} |
1072 | |
1073 | virtual ~QueueElement() { |
1074 | next_ = NULL; |
1075 | function_ = Function::null(); |
1076 | } |
1077 | |
1078 | FunctionPtr Function() const { return function_; } |
1079 | |
1080 | void set_next(QueueElement* elem) { next_ = elem; } |
1081 | QueueElement* next() const { return next_; } |
1082 | |
1083 | ObjectPtr function() const { return function_; } |
1084 | ObjectPtr* function_ptr() { return reinterpret_cast<ObjectPtr*>(&function_); } |
1085 | |
1086 | private: |
1087 | QueueElement* next_; |
1088 | FunctionPtr function_; |
1089 | |
1090 | DISALLOW_COPY_AND_ASSIGN(QueueElement); |
1091 | }; |
1092 | |
1093 | // Allocated in C-heap. Handles both input and output of background compilation. |
1094 | // It implements a FIFO queue, using Peek, Add, Remove operations. |
1095 | class BackgroundCompilationQueue { |
1096 | public: |
1097 | BackgroundCompilationQueue() : first_(NULL), last_(NULL) {} |
1098 | virtual ~BackgroundCompilationQueue() { Clear(); } |
1099 | |
1100 | void VisitObjectPointers(ObjectPointerVisitor* visitor) { |
1101 | ASSERT(visitor != NULL); |
1102 | QueueElement* p = first_; |
1103 | while (p != NULL) { |
1104 | visitor->VisitPointer(p->function_ptr()); |
1105 | p = p->next(); |
1106 | } |
1107 | } |
1108 | |
1109 | bool IsEmpty() const { return first_ == NULL; } |
1110 | |
1111 | void Add(QueueElement* value) { |
1112 | ASSERT(value != NULL); |
1113 | ASSERT(value->next() == NULL); |
1114 | if (first_ == NULL) { |
1115 | first_ = value; |
1116 | ASSERT(last_ == NULL); |
1117 | } else { |
1118 | ASSERT(last_ != NULL); |
1119 | last_->set_next(value); |
1120 | } |
1121 | last_ = value; |
1122 | ASSERT(first_ != NULL && last_ != NULL); |
1123 | } |
1124 | |
1125 | QueueElement* Peek() const { return first_; } |
1126 | |
1127 | FunctionPtr PeekFunction() const { |
1128 | QueueElement* e = Peek(); |
1129 | if (e == NULL) { |
1130 | return Function::null(); |
1131 | } else { |
1132 | return e->Function(); |
1133 | } |
1134 | } |
1135 | |
1136 | QueueElement* Remove() { |
1137 | ASSERT(first_ != NULL); |
1138 | QueueElement* result = first_; |
1139 | first_ = first_->next(); |
1140 | if (first_ == NULL) { |
1141 | last_ = NULL; |
1142 | } |
1143 | return result; |
1144 | } |
1145 | |
1146 | bool ContainsObj(const Object& obj) const { |
1147 | QueueElement* p = first_; |
1148 | while (p != NULL) { |
1149 | if (p->function() == obj.raw()) { |
1150 | return true; |
1151 | } |
1152 | p = p->next(); |
1153 | } |
1154 | return false; |
1155 | } |
1156 | |
1157 | void Clear() { |
1158 | while (!IsEmpty()) { |
1159 | QueueElement* e = Remove(); |
1160 | delete e; |
1161 | } |
1162 | ASSERT((first_ == NULL) && (last_ == NULL)); |
1163 | } |
1164 | |
1165 | private: |
1166 | QueueElement* first_; |
1167 | QueueElement* last_; |
1168 | |
1169 | DISALLOW_COPY_AND_ASSIGN(BackgroundCompilationQueue); |
1170 | }; |
1171 | |
1172 | BackgroundCompiler::BackgroundCompiler(Isolate* isolate, bool optimizing) |
1173 | : isolate_(isolate), |
1174 | queue_monitor_(), |
1175 | function_queue_(new BackgroundCompilationQueue()), |
1176 | done_monitor_(), |
1177 | running_(false), |
1178 | done_(true), |
1179 | optimizing_(optimizing), |
1180 | disabled_depth_(0) {} |
1181 | |
1182 | // Fields all deleted in ::Stop; here clear them. |
1183 | BackgroundCompiler::~BackgroundCompiler() { |
1184 | delete function_queue_; |
1185 | } |
1186 | |
1187 | void BackgroundCompiler::Run() { |
1188 | while (running_) { |
1189 | // Maybe something is already in the queue, check first before waiting |
1190 | // to be notified. |
1191 | bool result = Thread::EnterIsolateAsHelper(isolate_, Thread::kCompilerTask); |
1192 | ASSERT(result); |
1193 | { |
1194 | Thread* thread = Thread::Current(); |
1195 | StackZone stack_zone(thread); |
1196 | Zone* zone = stack_zone.GetZone(); |
1197 | HANDLESCOPE(thread); |
1198 | Function& function = Function::Handle(zone); |
1199 | { |
1200 | MonitorLocker ml(&queue_monitor_); |
1201 | if (running_) { |
1202 | function = function_queue()->PeekFunction(); |
1203 | } |
1204 | } |
1205 | while (!function.IsNull()) { |
1206 | if (is_optimizing()) { |
1207 | Compiler::CompileOptimizedFunction(thread, function, |
1208 | Compiler::kNoOSRDeoptId); |
1209 | } else { |
1210 | ASSERT(FLAG_enable_interpreter); |
1211 | Compiler::CompileFunction(thread, function); |
1212 | } |
1213 | |
1214 | QueueElement* qelem = NULL; |
1215 | { |
1216 | MonitorLocker ml(&queue_monitor_); |
1217 | if (!running_ || function_queue()->IsEmpty()) { |
1218 | // We are shutting down, queue was cleared. |
1219 | function = Function::null(); |
1220 | } else { |
1221 | qelem = function_queue()->Remove(); |
1222 | const Function& old = Function::Handle(qelem->Function()); |
1223 | // If an optimizable method is not optimized, put it back on |
1224 | // the background queue (unless it was passed to foreground). |
1225 | if ((is_optimizing() && !old.HasOptimizedCode() && |
1226 | old.IsOptimizable()) || |
1227 | FLAG_stress_test_background_compilation) { |
1228 | if (old.is_background_optimizable() && |
1229 | Compiler::CanOptimizeFunction(thread, old)) { |
1230 | QueueElement* repeat_qelem = new QueueElement(old); |
1231 | function_queue()->Add(repeat_qelem); |
1232 | } |
1233 | } |
1234 | function = function_queue()->PeekFunction(); |
1235 | } |
1236 | } |
1237 | if (qelem != NULL) { |
1238 | delete qelem; |
1239 | } |
1240 | } |
1241 | } |
1242 | Thread::ExitIsolateAsHelper(); |
1243 | { |
1244 | // Wait to be notified when the work queue is not empty. |
1245 | MonitorLocker ml(&queue_monitor_); |
1246 | while (function_queue()->IsEmpty() && running_) { |
1247 | ml.Wait(); |
1248 | } |
1249 | } |
1250 | } // while running |
1251 | |
1252 | { |
1253 | // Notify that the thread is done. |
1254 | MonitorLocker ml_done(&done_monitor_); |
1255 | done_ = true; |
1256 | ml_done.Notify(); |
1257 | } |
1258 | } |
1259 | |
1260 | void BackgroundCompiler::Compile(const Function& function) { |
1261 | ASSERT(Thread::Current()->IsMutatorThread()); |
1262 | MonitorLocker ml(&queue_monitor_); |
1263 | ASSERT(running_); |
1264 | if (function_queue()->ContainsObj(function)) { |
1265 | return; |
1266 | } |
1267 | QueueElement* elem = new QueueElement(function); |
1268 | function_queue()->Add(elem); |
1269 | ml.Notify(); |
1270 | } |
1271 | |
1272 | void BackgroundCompiler::VisitPointers(ObjectPointerVisitor* visitor) { |
1273 | function_queue_->VisitObjectPointers(visitor); |
1274 | } |
1275 | |
1276 | class BackgroundCompilerTask : public ThreadPool::Task { |
1277 | public: |
1278 | explicit BackgroundCompilerTask(BackgroundCompiler* background_compiler) |
1279 | : background_compiler_(background_compiler) {} |
1280 | virtual ~BackgroundCompilerTask() {} |
1281 | |
1282 | private: |
1283 | virtual void Run() { background_compiler_->Run(); } |
1284 | |
1285 | BackgroundCompiler* background_compiler_; |
1286 | |
1287 | DISALLOW_COPY_AND_ASSIGN(BackgroundCompilerTask); |
1288 | }; |
1289 | |
1290 | void BackgroundCompiler::Start() { |
1291 | Thread* thread = Thread::Current(); |
1292 | ASSERT(thread->IsMutatorThread()); |
1293 | ASSERT(!thread->IsAtSafepoint()); |
1294 | |
1295 | MonitorLocker ml(&done_monitor_); |
1296 | if (running_ || !done_) return; |
1297 | running_ = true; |
1298 | done_ = false; |
1299 | // If we ever wanted to run the BG compiler on the |
1300 | // `IsolateGroup::mutator_pool()` we would need to ensure the BG compiler |
1301 | // stops when it's idle - otherwise the [MutatorThreadPool]-based idle |
1302 | // notification would not work anymore. |
1303 | bool task_started = Dart::thread_pool()->Run<BackgroundCompilerTask>(this); |
1304 | if (!task_started) { |
1305 | running_ = false; |
1306 | done_ = true; |
1307 | } |
1308 | } |
1309 | |
1310 | void BackgroundCompiler::Stop() { |
1311 | Thread* thread = Thread::Current(); |
1312 | ASSERT(thread->IsMutatorThread()); |
1313 | ASSERT(!thread->IsAtSafepoint()); |
1314 | |
1315 | { |
1316 | MonitorLocker ml(&queue_monitor_); |
1317 | running_ = false; |
1318 | function_queue_->Clear(); |
1319 | ml.Notify(); // Stop waiting for the queue. |
1320 | } |
1321 | |
1322 | { |
1323 | MonitorLocker ml_done(&done_monitor_); |
1324 | while (!done_) { |
1325 | ml_done.WaitWithSafepointCheck(thread); |
1326 | } |
1327 | } |
1328 | } |
1329 | |
1330 | void BackgroundCompiler::Enable() { |
1331 | disabled_depth_--; |
1332 | if (disabled_depth_ < 0) { |
1333 | FATAL("Mismatched number of calls to BackgroundCompiler::Enable/Disable." ); |
1334 | } |
1335 | } |
1336 | |
1337 | void BackgroundCompiler::Disable() { |
1338 | Stop(); |
1339 | disabled_depth_++; |
1340 | } |
1341 | |
1342 | bool BackgroundCompiler::IsDisabled() { |
1343 | return disabled_depth_ > 0; |
1344 | } |
1345 | |
1346 | #else // DART_PRECOMPILED_RUNTIME |
1347 | |
1348 | CompilationPipeline* CompilationPipeline::New(Zone* zone, |
1349 | const Function& function) { |
1350 | UNREACHABLE(); |
1351 | return NULL; |
1352 | } |
1353 | |
1354 | DEFINE_RUNTIME_ENTRY(CompileFunction, 1) { |
1355 | const Function& function = Function::CheckedHandle(zone, arguments.ArgAt(0)); |
1356 | FATAL3("Precompilation missed function %s (%" Pd ", %s)\n" , |
1357 | function.ToLibNamePrefixedQualifiedCString(), |
1358 | function.token_pos().value(), |
1359 | Function::KindToCString(function.kind())); |
1360 | } |
1361 | |
1362 | bool Compiler::IsBackgroundCompilation() { |
1363 | return false; |
1364 | } |
1365 | |
1366 | bool Compiler::CanOptimizeFunction(Thread* thread, const Function& function) { |
1367 | UNREACHABLE(); |
1368 | return false; |
1369 | } |
1370 | |
1371 | ObjectPtr Compiler::CompileFunction(Thread* thread, const Function& function) { |
1372 | FATAL1("Attempt to compile function %s" , function.ToCString()); |
1373 | return Error::null(); |
1374 | } |
1375 | |
1376 | ErrorPtr Compiler::EnsureUnoptimizedCode(Thread* thread, |
1377 | const Function& function) { |
1378 | FATAL1("Attempt to compile function %s" , function.ToCString()); |
1379 | return Error::null(); |
1380 | } |
1381 | |
1382 | ObjectPtr Compiler::CompileOptimizedFunction(Thread* thread, |
1383 | const Function& function, |
1384 | intptr_t osr_id) { |
1385 | FATAL1("Attempt to compile function %s" , function.ToCString()); |
1386 | return Error::null(); |
1387 | } |
1388 | |
1389 | void Compiler::ComputeLocalVarDescriptors(const Code& code) { |
1390 | UNREACHABLE(); |
1391 | } |
1392 | |
1393 | ErrorPtr Compiler::CompileAllFunctions(const Class& cls) { |
1394 | FATAL1("Attempt to compile class %s" , cls.ToCString()); |
1395 | return Error::null(); |
1396 | } |
1397 | |
1398 | void Compiler::AbortBackgroundCompilation(intptr_t deopt_id, const char* msg) { |
1399 | UNREACHABLE(); |
1400 | } |
1401 | |
1402 | void BackgroundCompiler::Compile(const Function& function) { |
1403 | UNREACHABLE(); |
1404 | } |
1405 | |
1406 | void BackgroundCompiler::VisitPointers(ObjectPointerVisitor* visitor) { |
1407 | UNREACHABLE(); |
1408 | } |
1409 | |
1410 | void BackgroundCompiler::Start() { |
1411 | UNREACHABLE(); |
1412 | } |
1413 | |
1414 | void BackgroundCompiler::Stop() { |
1415 | UNREACHABLE(); |
1416 | } |
1417 | |
1418 | void BackgroundCompiler::Enable() { |
1419 | UNREACHABLE(); |
1420 | } |
1421 | |
1422 | void BackgroundCompiler::Disable() { |
1423 | UNREACHABLE(); |
1424 | } |
1425 | |
1426 | bool BackgroundCompiler::IsDisabled() { |
1427 | UNREACHABLE(); |
1428 | return true; |
1429 | } |
1430 | |
1431 | #endif // DART_PRECOMPILED_RUNTIME |
1432 | |
1433 | } // namespace dart |
1434 | |