1/*
2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/classLoaderDataGraph.inline.hpp"
27#include "code/compiledIC.hpp"
28#include "code/nmethod.hpp"
29#include "code/scopeDesc.hpp"
30#include "interpreter/interpreter.hpp"
31#include "memory/resourceArea.hpp"
32#include "oops/methodData.hpp"
33#include "oops/method.inline.hpp"
34#include "oops/oop.inline.hpp"
35#include "prims/nativeLookup.hpp"
36#include "runtime/compilationPolicy.hpp"
37#include "runtime/frame.hpp"
38#include "runtime/handles.inline.hpp"
39#include "runtime/rframe.hpp"
40#include "runtime/stubRoutines.hpp"
41#include "runtime/thread.hpp"
42#include "runtime/tieredThresholdPolicy.hpp"
43#include "runtime/vframe.hpp"
44#include "runtime/vmOperations.hpp"
45#include "utilities/events.hpp"
46#include "utilities/globalDefinitions.hpp"
47
48#ifdef COMPILER1
49#include "c1/c1_Compiler.hpp"
50#endif
51#ifdef COMPILER2
52#include "opto/c2compiler.hpp"
53#endif
54
55CompilationPolicy* CompilationPolicy::_policy;
56
57// Determine compilation policy based on command line argument
58void compilationPolicy_init() {
59 switch(CompilationPolicyChoice) {
60 case 0:
61 CompilationPolicy::set_policy(new SimpleCompPolicy());
62 break;
63
64 case 1:
65#ifdef COMPILER2
66 CompilationPolicy::set_policy(new StackWalkCompPolicy());
67#else
68 Unimplemented();
69#endif
70 break;
71 case 2:
72#ifdef TIERED
73 CompilationPolicy::set_policy(new TieredThresholdPolicy());
74#else
75 Unimplemented();
76#endif
77 break;
78 default:
79 fatal("CompilationPolicyChoice must be in the range: [0-2]");
80 }
81 CompilationPolicy::policy()->initialize();
82}
83
84// Returns true if m must be compiled before executing it
85// This is intended to force compiles for methods (usually for
86// debugging) that would otherwise be interpreted for some reason.
87bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
88 // Don't allow Xcomp to cause compiles in replay mode
89 if (ReplayCompiles) return false;
90
91 if (m->has_compiled_code()) return false; // already compiled
92 if (!can_be_compiled(m, comp_level)) return false;
93
94 return !UseInterpreter || // must compile all methods
95 (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
96}
97
98void CompilationPolicy::compile_if_required(const methodHandle& selected_method, TRAPS) {
99 if (must_be_compiled(selected_method)) {
100 // This path is unusual, mostly used by the '-Xcomp' stress test mode.
101
102 // Note: with several active threads, the must_be_compiled may be true
103 // while can_be_compiled is false; remove assert
104 // assert(CompilationPolicy::can_be_compiled(selected_method), "cannot compile");
105 if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
106 // don't force compilation, resolve was on behalf of compiler
107 return;
108 }
109 if (selected_method->method_holder()->is_not_initialized()) {
110 // 'is_not_initialized' means not only '!is_initialized', but also that
111 // initialization has not been started yet ('!being_initialized')
112 // Do not force compilation of methods in uninitialized classes.
113 // Note that doing this would throw an assert later,
114 // in CompileBroker::compile_method.
115 // We sometimes use the link resolver to do reflective lookups
116 // even before classes are initialized.
117 return;
118 }
119 CompileBroker::compile_method(selected_method, InvocationEntryBci,
120 CompilationPolicy::policy()->initial_compile_level(),
121 methodHandle(), 0, CompileTask::Reason_MustBeCompiled, CHECK);
122 }
123}
124
125// Returns true if m is allowed to be compiled
126bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
127 // allow any levels for WhiteBox
128 assert(WhiteBoxAPI || comp_level == CompLevel_all || is_compile(comp_level), "illegal compilation level");
129
130 if (m->is_abstract()) return false;
131 if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
132
133 // Math intrinsics should never be compiled as this can lead to
134 // monotonicity problems because the interpreter will prefer the
135 // compiled code to the intrinsic version. This can't happen in
136 // production because the invocation counter can't be incremented
137 // but we shouldn't expose the system to this problem in testing
138 // modes.
139 if (!AbstractInterpreter::can_be_compiled(m)) {
140 return false;
141 }
142 if (comp_level == CompLevel_all) {
143 if (TieredCompilation) {
144 // enough to be compilable at any level for tiered
145 return !m->is_not_compilable(CompLevel_simple) || !m->is_not_compilable(CompLevel_full_optimization);
146 } else {
147 // must be compilable at available level for non-tiered
148 return !m->is_not_compilable(CompLevel_highest_tier);
149 }
150 } else if (is_compile(comp_level)) {
151 return !m->is_not_compilable(comp_level);
152 }
153 return false;
154}
155
156// Returns true if m is allowed to be osr compiled
157bool CompilationPolicy::can_be_osr_compiled(const methodHandle& m, int comp_level) {
158 bool result = false;
159 if (comp_level == CompLevel_all) {
160 if (TieredCompilation) {
161 // enough to be osr compilable at any level for tiered
162 result = !m->is_not_osr_compilable(CompLevel_simple) || !m->is_not_osr_compilable(CompLevel_full_optimization);
163 } else {
164 // must be osr compilable at available level for non-tiered
165 result = !m->is_not_osr_compilable(CompLevel_highest_tier);
166 }
167 } else if (is_compile(comp_level)) {
168 result = !m->is_not_osr_compilable(comp_level);
169 }
170 return (result && can_be_compiled(m, comp_level));
171}
172
173bool CompilationPolicy::is_compilation_enabled() {
174 // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
175 return CompileBroker::should_compile_new_jobs();
176}
177
178CompileTask* CompilationPolicy::select_task_helper(CompileQueue* compile_queue) {
179 // Remove unloaded methods from the queue
180 for (CompileTask* task = compile_queue->first(); task != NULL; ) {
181 CompileTask* next = task->next();
182 if (task->is_unloaded()) {
183 compile_queue->remove_and_mark_stale(task);
184 }
185 task = next;
186 }
187#if INCLUDE_JVMCI
188 if (UseJVMCICompiler && !BackgroundCompilation) {
189 /*
190 * In blocking compilation mode, the CompileBroker will make
191 * compilations submitted by a JVMCI compiler thread non-blocking. These
192 * compilations should be scheduled after all blocking compilations
193 * to service non-compiler related compilations sooner and reduce the
194 * chance of such compilations timing out.
195 */
196 for (CompileTask* task = compile_queue->first(); task != NULL; task = task->next()) {
197 if (task->is_blocking()) {
198 return task;
199 }
200 }
201 }
202#endif
203 return compile_queue->first();
204}
205
206#ifndef PRODUCT
207void NonTieredCompPolicy::trace_osr_completion(nmethod* osr_nm) {
208 if (TraceOnStackReplacement) {
209 if (osr_nm == NULL) tty->print_cr("compilation failed");
210 else tty->print_cr("nmethod " INTPTR_FORMAT, p2i(osr_nm));
211 }
212}
213#endif // !PRODUCT
214
215void NonTieredCompPolicy::initialize() {
216 // Setup the compiler thread numbers
217 if (CICompilerCountPerCPU) {
218 // Example: if CICompilerCountPerCPU is true, then we get
219 // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
220 // May help big-app startup time.
221 _compiler_count = MAX2(log2_int(os::active_processor_count())-1,1);
222 // Make sure there is enough space in the code cache to hold all the compiler buffers
223 size_t buffer_size = 1;
224#ifdef COMPILER1
225 buffer_size = is_client_compilation_mode_vm() ? Compiler::code_buffer_size() : buffer_size;
226#endif
227#ifdef COMPILER2
228 buffer_size = is_server_compilation_mode_vm() ? C2Compiler::initial_code_buffer_size() : buffer_size;
229#endif
230 int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size;
231 if (_compiler_count > max_count) {
232 // Lower the compiler count such that all buffers fit into the code cache
233 _compiler_count = MAX2(max_count, 1);
234 }
235 FLAG_SET_ERGO(CICompilerCount, _compiler_count);
236 } else {
237 _compiler_count = CICompilerCount;
238 }
239}
240
241// Note: this policy is used ONLY if TieredCompilation is off.
242// compiler_count() behaves the following way:
243// - with TIERED build (with both COMPILER1 and COMPILER2 defined) it should return
244// zero for the c1 compilation levels in server compilation mode runs
245// and c2 compilation levels in client compilation mode runs.
246// - with COMPILER2 not defined it should return zero for c2 compilation levels.
247// - with COMPILER1 not defined it should return zero for c1 compilation levels.
248// - if neither is defined - always return zero.
249int NonTieredCompPolicy::compiler_count(CompLevel comp_level) {
250 assert(!TieredCompilation, "This policy should not be used with TieredCompilation");
251 if (COMPILER2_PRESENT(is_server_compilation_mode_vm() && is_c2_compile(comp_level) ||)
252 is_client_compilation_mode_vm() && is_c1_compile(comp_level)) {
253 return _compiler_count;
254 }
255 return 0;
256}
257
258void NonTieredCompPolicy::reset_counter_for_invocation_event(const methodHandle& m) {
259 // Make sure invocation and backedge counter doesn't overflow again right away
260 // as would be the case for native methods.
261
262 // BUT also make sure the method doesn't look like it was never executed.
263 // Set carry bit and reduce counter's value to min(count, CompileThreshold/2).
264 MethodCounters* mcs = m->method_counters();
265 assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
266 mcs->invocation_counter()->set_carry();
267 mcs->backedge_counter()->set_carry();
268
269 assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed");
270}
271
272void NonTieredCompPolicy::reset_counter_for_back_branch_event(const methodHandle& m) {
273 // Delay next back-branch event but pump up invocation counter to trigger
274 // whole method compilation.
275 MethodCounters* mcs = m->method_counters();
276 assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
277 InvocationCounter* i = mcs->invocation_counter();
278 InvocationCounter* b = mcs->backedge_counter();
279
280 // Don't set invocation_counter's value too low otherwise the method will
281 // look like immature (ic < ~5300) which prevents the inlining based on
282 // the type profiling.
283 i->set(i->state(), CompileThreshold);
284 // Don't reset counter too low - it is used to check if OSR method is ready.
285 b->set(b->state(), CompileThreshold / 2);
286}
287
288//
289// CounterDecay
290//
291// Iterates through invocation counters and decrements them. This
292// is done at each safepoint.
293//
294class CounterDecay : public AllStatic {
295 static jlong _last_timestamp;
296 static void do_method(Method* m) {
297 MethodCounters* mcs = m->method_counters();
298 if (mcs != NULL) {
299 mcs->invocation_counter()->decay();
300 }
301 }
302public:
303 static void decay();
304 static bool is_decay_needed() {
305 return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength;
306 }
307};
308
309jlong CounterDecay::_last_timestamp = 0;
310
311void CounterDecay::decay() {
312 _last_timestamp = os::javaTimeMillis();
313
314 // This operation is going to be performed only at the end of a safepoint
315 // and hence GC's will not be going on, all Java mutators are suspended
316 // at this point and hence SystemDictionary_lock is also not needed.
317 assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
318 size_t nclasses = ClassLoaderDataGraph::num_instance_classes();
319 size_t classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
320 CounterHalfLifeTime);
321 for (size_t i = 0; i < classes_per_tick; i++) {
322 InstanceKlass* k = ClassLoaderDataGraph::try_get_next_class();
323 if (k != NULL) {
324 k->methods_do(do_method);
325 }
326 }
327}
328
329// Called at the end of the safepoint
330void NonTieredCompPolicy::do_safepoint_work() {
331 if(UseCounterDecay && CounterDecay::is_decay_needed()) {
332 CounterDecay::decay();
333 }
334}
335
336void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
337 ScopeDesc* sd = trap_scope;
338 MethodCounters* mcs;
339 InvocationCounter* c;
340 for (; !sd->is_top(); sd = sd->sender()) {
341 mcs = sd->method()->method_counters();
342 if (mcs != NULL) {
343 // Reset ICs of inlined methods, since they can trigger compilations also.
344 mcs->invocation_counter()->reset();
345 }
346 }
347 mcs = sd->method()->method_counters();
348 if (mcs != NULL) {
349 c = mcs->invocation_counter();
350 if (is_osr) {
351 // It was an OSR method, so bump the count higher.
352 c->set(c->state(), CompileThreshold);
353 } else {
354 c->reset();
355 }
356 mcs->backedge_counter()->reset();
357 }
358}
359
360// This method can be called by any component of the runtime to notify the policy
361// that it's recommended to delay the compilation of this method.
362void NonTieredCompPolicy::delay_compilation(Method* method) {
363 MethodCounters* mcs = method->method_counters();
364 if (mcs != NULL) {
365 mcs->invocation_counter()->decay();
366 mcs->backedge_counter()->decay();
367 }
368}
369
370void NonTieredCompPolicy::disable_compilation(Method* method) {
371 MethodCounters* mcs = method->method_counters();
372 if (mcs != NULL) {
373 mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
374 mcs->backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
375 }
376}
377
378CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) {
379 return select_task_helper(compile_queue);
380}
381
382bool NonTieredCompPolicy::is_mature(Method* method) {
383 MethodData* mdo = method->method_data();
384 assert(mdo != NULL, "Should be");
385 uint current = mdo->mileage_of(method);
386 uint initial = mdo->creation_mileage();
387 if (current < initial)
388 return true; // some sort of overflow
389 uint target;
390 if (ProfileMaturityPercentage <= 0)
391 target = (uint) -ProfileMaturityPercentage; // absolute value
392 else
393 target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
394 return (current >= initial + target);
395}
396
397nmethod* NonTieredCompPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci,
398 int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) {
399 assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
400 NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
401 if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) {
402 // If certain JVMTI events (e.g. frame pop event) are requested then the
403 // thread is forced to remain in interpreted code. This is
404 // implemented partly by a check in the run_compiled_code
405 // section of the interpreter whether we should skip running
406 // compiled code, and partly by skipping OSR compiles for
407 // interpreted-only threads.
408 if (bci != InvocationEntryBci) {
409 reset_counter_for_back_branch_event(method);
410 return NULL;
411 }
412 }
413 if (ReplayCompiles) {
414 // Don't trigger other compiles in testing mode
415 if (bci == InvocationEntryBci) {
416 reset_counter_for_invocation_event(method);
417 } else {
418 reset_counter_for_back_branch_event(method);
419 }
420 return NULL;
421 }
422
423 if (bci == InvocationEntryBci) {
424 // when code cache is full, compilation gets switched off, UseCompiler
425 // is set to false
426 if (!method->has_compiled_code() && UseCompiler) {
427 method_invocation_event(method, thread);
428 } else {
429 // Force counter overflow on method entry, even if no compilation
430 // happened. (The method_invocation_event call does this also.)
431 reset_counter_for_invocation_event(method);
432 }
433 // compilation at an invocation overflow no longer goes and retries test for
434 // compiled method. We always run the loser of the race as interpreted.
435 // so return NULL
436 return NULL;
437 } else {
438 // counter overflow in a loop => try to do on-stack-replacement
439 nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
440 NOT_PRODUCT(trace_osr_request(method, osr_nm, bci));
441 // when code cache is full, we should not compile any more...
442 if (osr_nm == NULL && UseCompiler) {
443 method_back_branch_event(method, bci, thread);
444 osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
445 }
446 if (osr_nm == NULL) {
447 reset_counter_for_back_branch_event(method);
448 return NULL;
449 }
450 return osr_nm;
451 }
452 return NULL;
453}
454
455#ifndef PRODUCT
456void NonTieredCompPolicy::trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci) {
457 if (TraceInvocationCounterOverflow) {
458 MethodCounters* mcs = m->method_counters();
459 assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
460 InvocationCounter* ic = mcs->invocation_counter();
461 InvocationCounter* bc = mcs->backedge_counter();
462 ResourceMark rm;
463 if (bci == InvocationEntryBci) {
464 tty->print("comp-policy cntr ovfl @ %d in entry of ", bci);
465 } else {
466 tty->print("comp-policy cntr ovfl @ %d in loop of ", bci);
467 }
468 m->print_value();
469 tty->cr();
470 ic->print();
471 bc->print();
472 if (ProfileInterpreter) {
473 if (bci != InvocationEntryBci) {
474 MethodData* mdo = m->method_data();
475 if (mdo != NULL) {
476 ProfileData *pd = mdo->bci_to_data(branch_bci);
477 if (pd == NULL) {
478 tty->print_cr("back branch count = N/A (missing ProfileData)");
479 } else {
480 tty->print_cr("back branch count = %d", pd->as_JumpData()->taken());
481 }
482 }
483 }
484 }
485 }
486}
487
488void NonTieredCompPolicy::trace_osr_request(const methodHandle& method, nmethod* osr, int bci) {
489 if (TraceOnStackReplacement) {
490 ResourceMark rm;
491 tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
492 method->print_short_name(tty);
493 tty->print_cr(" at bci %d", bci);
494 }
495}
496#endif // !PRODUCT
497
498// SimpleCompPolicy - compile current method
499
500void SimpleCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) {
501 const int comp_level = CompLevel_highest_tier;
502 const int hot_count = m->invocation_count();
503 reset_counter_for_invocation_event(m);
504
505 if (is_compilation_enabled() && can_be_compiled(m, comp_level)) {
506 CompiledMethod* nm = m->code();
507 if (nm == NULL ) {
508 CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, CompileTask::Reason_InvocationCount, thread);
509 }
510 }
511}
512
513void SimpleCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) {
514 const int comp_level = CompLevel_highest_tier;
515 const int hot_count = m->backedge_count();
516
517 if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
518 CompileBroker::compile_method(m, bci, comp_level, m, hot_count, CompileTask::Reason_BackedgeCount, thread);
519 NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
520 }
521}
522// StackWalkCompPolicy - walk up stack to find a suitable method to compile
523
524#ifdef COMPILER2
525const char* StackWalkCompPolicy::_msg = NULL;
526
527
528// Consider m for compilation
529void StackWalkCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) {
530 const int comp_level = CompLevel_highest_tier;
531 const int hot_count = m->invocation_count();
532 reset_counter_for_invocation_event(m);
533
534 if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m, comp_level)) {
535 ResourceMark rm(thread);
536 frame fr = thread->last_frame();
537 assert(fr.is_interpreted_frame(), "must be interpreted");
538 assert(fr.interpreter_frame_method() == m(), "bad method");
539
540 RegisterMap reg_map(thread, false);
541 javaVFrame* triggerVF = thread->last_java_vframe(&reg_map);
542 // triggerVF is the frame that triggered its counter
543 RFrame* first = new InterpretedRFrame(triggerVF->fr(), thread, m());
544
545 if (first->top_method()->code() != NULL) {
546 // called obsolete method/nmethod -- no need to recompile
547 } else {
548 GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50);
549 stack->push(first);
550 RFrame* top = findTopInlinableFrame(stack);
551 assert(top != NULL, "findTopInlinableFrame returned null");
552 CompileBroker::compile_method(top->top_method(), InvocationEntryBci, comp_level,
553 m, hot_count, CompileTask::Reason_InvocationCount, thread);
554 }
555 }
556}
557
558void StackWalkCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) {
559 const int comp_level = CompLevel_highest_tier;
560 const int hot_count = m->backedge_count();
561
562 if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
563 CompileBroker::compile_method(m, bci, comp_level, m, hot_count, CompileTask::Reason_BackedgeCount, thread);
564 NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
565 }
566}
567
568RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) {
569 // go up the stack until finding a frame that (probably) won't be inlined
570 // into its caller
571 RFrame* current = stack->at(0); // current choice for stopping
572 assert( current && !current->is_compiled(), "" );
573 const char* msg = NULL;
574
575 while (1) {
576
577 // before going up the stack further, check if doing so would get us into
578 // compiled code
579 RFrame* next = senderOf(current, stack);
580 if( !next ) // No next frame up the stack?
581 break; // Then compile with current frame
582
583 Method* m = current->top_method();
584 Method* next_m = next->top_method();
585
586 if( !Inline ) { // Inlining turned off
587 msg = "Inlining turned off";
588 break;
589 }
590 if (next_m->is_not_compilable()) { // Did fail to compile this before/
591 msg = "caller not compilable";
592 break;
593 }
594 if (next->num() > MaxRecompilationSearchLength) {
595 // don't go up too high when searching for recompilees
596 msg = "don't go up any further: > MaxRecompilationSearchLength";
597 break;
598 }
599 if (next->distance() > MaxInterpretedSearchLength) {
600 // don't go up too high when searching for recompilees
601 msg = "don't go up any further: next > MaxInterpretedSearchLength";
602 break;
603 }
604 // Compiled frame above already decided not to inline;
605 // do not recompile him.
606 if (next->is_compiled()) {
607 msg = "not going up into optimized code";
608 break;
609 }
610
611 // Interpreted frame above us was already compiled. Do not force
612 // a recompile, although if the frame above us runs long enough an
613 // OSR might still happen.
614 if( current->is_interpreted() && next_m->has_compiled_code() ) {
615 msg = "not going up -- already compiled caller";
616 break;
617 }
618
619 // Compute how frequent this call site is. We have current method 'm'.
620 // We know next method 'next_m' is interpreted. Find the call site and
621 // check the various invocation counts.
622 int invcnt = 0; // Caller counts
623 if (ProfileInterpreter) {
624 invcnt = next_m->interpreter_invocation_count();
625 }
626 int cnt = 0; // Call site counts
627 if (ProfileInterpreter && next_m->method_data() != NULL) {
628 ResourceMark rm;
629 int bci = next->top_vframe()->bci();
630 ProfileData* data = next_m->method_data()->bci_to_data(bci);
631 if (data != NULL && data->is_CounterData())
632 cnt = data->as_CounterData()->count();
633 }
634
635 // Caller counts / call-site counts; i.e. is this call site
636 // a hot call site for method next_m?
637 int freq = (invcnt) ? cnt/invcnt : cnt;
638
639 // Check size and frequency limits
640 if ((msg = shouldInline(m, freq, cnt)) != NULL) {
641 break;
642 }
643 // Check inlining negative tests
644 if ((msg = shouldNotInline(m)) != NULL) {
645 break;
646 }
647
648
649 // If the caller method is too big or something then we do not want to
650 // compile it just to inline a method
651 if (!can_be_compiled(next_m, CompLevel_any)) {
652 msg = "caller cannot be compiled";
653 break;
654 }
655
656 if( next_m->name() == vmSymbols::class_initializer_name() ) {
657 msg = "do not compile class initializer (OSR ok)";
658 break;
659 }
660
661 current = next;
662 }
663
664 assert( !current || !current->is_compiled(), "" );
665
666 return current;
667}
668
669RFrame* StackWalkCompPolicy::senderOf(RFrame* rf, GrowableArray<RFrame*>* stack) {
670 RFrame* sender = rf->caller();
671 if (sender && sender->num() == stack->length()) stack->push(sender);
672 return sender;
673}
674
675
676const char* StackWalkCompPolicy::shouldInline(const methodHandle& m, float freq, int cnt) {
677 // Allows targeted inlining
678 // positive filter: should send be inlined? returns NULL (--> yes)
679 // or rejection msg
680 int max_size = MaxInlineSize;
681 int cost = m->code_size();
682
683 // Check for too many throws (and not too huge)
684 if (m->interpreter_throwout_count() > InlineThrowCount && cost < InlineThrowMaxSize ) {
685 return NULL;
686 }
687
688 // bump the max size if the call is frequent
689 if ((freq >= InlineFrequencyRatio) || (cnt >= InlineFrequencyCount)) {
690 if (TraceFrequencyInlining) {
691 tty->print("(Inlined frequent method)\n");
692 m->print();
693 }
694 max_size = FreqInlineSize;
695 }
696 if (cost > max_size) {
697 return (_msg = "too big");
698 }
699 return NULL;
700}
701
702
703const char* StackWalkCompPolicy::shouldNotInline(const methodHandle& m) {
704 // negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg
705 if (m->is_abstract()) return (_msg = "abstract method");
706 // note: we allow ik->is_abstract()
707 if (!m->method_holder()->is_initialized()) return (_msg = "method holder not initialized");
708 if (m->is_native()) return (_msg = "native method");
709 CompiledMethod* m_code = m->code();
710 if (m_code != NULL && m_code->code_size() > InlineSmallCode)
711 return (_msg = "already compiled into a big method");
712
713 // use frequency-based objections only for non-trivial methods
714 if (m->code_size() <= MaxTrivialSize) return NULL;
715 if (UseInterpreter) { // don't use counts with -Xcomp
716 if ((m->code() == NULL) && m->was_never_executed()) return (_msg = "never executed");
717 if (!m->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) return (_msg = "executed < MinInliningThreshold times");
718 }
719 if (Method::has_unloaded_classes_in_signature(m, JavaThread::current())) return (_msg = "unloaded signature classes");
720
721 return NULL;
722}
723
724
725
726#endif // COMPILER2
727