1/*
2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "ci/bcEscapeAnalyzer.hpp"
27#include "ci/ciCallSite.hpp"
28#include "ci/ciObjArray.hpp"
29#include "ci/ciMemberName.hpp"
30#include "ci/ciMethodHandle.hpp"
31#include "classfile/javaClasses.hpp"
32#include "compiler/compileLog.hpp"
33#include "opto/addnode.hpp"
34#include "opto/callGenerator.hpp"
35#include "opto/callnode.hpp"
36#include "opto/castnode.hpp"
37#include "opto/cfgnode.hpp"
38#include "opto/parse.hpp"
39#include "opto/rootnode.hpp"
40#include "opto/runtime.hpp"
41#include "opto/subnode.hpp"
42#include "runtime/sharedRuntime.hpp"
43
44// Utility function.
45const TypeFunc* CallGenerator::tf() const {
46 return TypeFunc::make(method());
47}
48
49bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
50 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
51}
52
53bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
54 ciMethod* symbolic_info = caller->get_method_at_bci(bci);
55 return is_inlined_method_handle_intrinsic(symbolic_info, m);
56}
57
58bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
59 return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
60}
61
62//-----------------------------ParseGenerator---------------------------------
63// Internal class which handles all direct bytecode traversal.
64class ParseGenerator : public InlineCallGenerator {
65private:
66 bool _is_osr;
67 float _expected_uses;
68
69public:
70 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
71 : InlineCallGenerator(method)
72 {
73 _is_osr = is_osr;
74 _expected_uses = expected_uses;
75 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
76 }
77
78 virtual bool is_parse() const { return true; }
79 virtual JVMState* generate(JVMState* jvms);
80 int is_osr() { return _is_osr; }
81
82};
83
84JVMState* ParseGenerator::generate(JVMState* jvms) {
85 Compile* C = Compile::current();
86 C->print_inlining_update(this);
87
88 if (is_osr()) {
89 // The JVMS for a OSR has a single argument (see its TypeFunc).
90 assert(jvms->depth() == 1, "no inline OSR");
91 }
92
93 if (C->failing()) {
94 return NULL; // bailing out of the compile; do not try to parse
95 }
96
97 Parse parser(jvms, method(), _expected_uses);
98 // Grab signature for matching/allocation
99#ifdef ASSERT
100 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
101 assert(C->env()->system_dictionary_modification_counter_changed(),
102 "Must invalidate if TypeFuncs differ");
103 }
104#endif
105
106 GraphKit& exits = parser.exits();
107
108 if (C->failing()) {
109 while (exits.pop_exception_state() != NULL) ;
110 return NULL;
111 }
112
113 assert(exits.jvms()->same_calls_as(jvms), "sanity");
114
115 // Simply return the exit state of the parser,
116 // augmented by any exceptional states.
117 return exits.transfer_exceptions_into_jvms();
118}
119
120//---------------------------DirectCallGenerator------------------------------
121// Internal class which handles all out-of-line calls w/o receiver type checks.
122class DirectCallGenerator : public CallGenerator {
123 private:
124 CallStaticJavaNode* _call_node;
125 // Force separate memory and I/O projections for the exceptional
126 // paths to facilitate late inlinig.
127 bool _separate_io_proj;
128
129 public:
130 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
131 : CallGenerator(method),
132 _separate_io_proj(separate_io_proj)
133 {
134 }
135 virtual JVMState* generate(JVMState* jvms);
136
137 CallStaticJavaNode* call_node() const { return _call_node; }
138};
139
140JVMState* DirectCallGenerator::generate(JVMState* jvms) {
141 GraphKit kit(jvms);
142 kit.C->print_inlining_update(this);
143 bool is_static = method()->is_static();
144 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
145 : SharedRuntime::get_resolve_opt_virtual_call_stub();
146
147 if (kit.C->log() != NULL) {
148 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
149 }
150
151 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
152 if (is_inlined_method_handle_intrinsic(jvms, method())) {
153 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
154 // additional information about the method being invoked should be attached
155 // to the call site to make resolution logic work
156 // (see SharedRuntime::resolve_static_call_C).
157 call->set_override_symbolic_info(true);
158 }
159 _call_node = call; // Save the call node in case we need it later
160 if (!is_static) {
161 // Make an explicit receiver null_check as part of this call.
162 // Since we share a map with the caller, his JVMS gets adjusted.
163 kit.null_check_receiver_before_call(method());
164 if (kit.stopped()) {
165 // And dump it back to the caller, decorated with any exceptions:
166 return kit.transfer_exceptions_into_jvms();
167 }
168 // Mark the call node as virtual, sort of:
169 call->set_optimized_virtual(true);
170 if (method()->is_method_handle_intrinsic() ||
171 method()->is_compiled_lambda_form()) {
172 call->set_method_handle_invoke(true);
173 }
174 }
175 kit.set_arguments_for_java_call(call);
176 kit.set_edges_for_java_call(call, false, _separate_io_proj);
177 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
178 kit.push_node(method()->return_type()->basic_type(), ret);
179 return kit.transfer_exceptions_into_jvms();
180}
181
182//--------------------------VirtualCallGenerator------------------------------
183// Internal class which handles all out-of-line calls checking receiver type.
184class VirtualCallGenerator : public CallGenerator {
185private:
186 int _vtable_index;
187public:
188 VirtualCallGenerator(ciMethod* method, int vtable_index)
189 : CallGenerator(method), _vtable_index(vtable_index)
190 {
191 assert(vtable_index == Method::invalid_vtable_index ||
192 vtable_index >= 0, "either invalid or usable");
193 }
194 virtual bool is_virtual() const { return true; }
195 virtual JVMState* generate(JVMState* jvms);
196};
197
198JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
199 GraphKit kit(jvms);
200 Node* receiver = kit.argument(0);
201
202 kit.C->print_inlining_update(this);
203
204 if (kit.C->log() != NULL) {
205 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
206 }
207
208 // If the receiver is a constant null, do not torture the system
209 // by attempting to call through it. The compile will proceed
210 // correctly, but may bail out in final_graph_reshaping, because
211 // the call instruction will have a seemingly deficient out-count.
212 // (The bailout says something misleading about an "infinite loop".)
213 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
214 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
215 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
216 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
217 kit.inc_sp(arg_size); // restore arguments
218 kit.uncommon_trap(Deoptimization::Reason_null_check,
219 Deoptimization::Action_none,
220 NULL, "null receiver");
221 return kit.transfer_exceptions_into_jvms();
222 }
223
224 // Ideally we would unconditionally do a null check here and let it
225 // be converted to an implicit check based on profile information.
226 // However currently the conversion to implicit null checks in
227 // Block::implicit_null_check() only looks for loads and stores, not calls.
228 ciMethod *caller = kit.method();
229 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
230 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
231 ((ImplicitNullCheckThreshold > 0) && caller_md &&
232 (caller_md->trap_count(Deoptimization::Reason_null_check)
233 >= (uint)ImplicitNullCheckThreshold))) {
234 // Make an explicit receiver null_check as part of this call.
235 // Since we share a map with the caller, his JVMS gets adjusted.
236 receiver = kit.null_check_receiver_before_call(method());
237 if (kit.stopped()) {
238 // And dump it back to the caller, decorated with any exceptions:
239 return kit.transfer_exceptions_into_jvms();
240 }
241 }
242
243 assert(!method()->is_static(), "virtual call must not be to static");
244 assert(!method()->is_final(), "virtual call should not be to final");
245 assert(!method()->is_private(), "virtual call should not be to private");
246 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
247 "no vtable calls if +UseInlineCaches ");
248 address target = SharedRuntime::get_resolve_virtual_call_stub();
249 // Normal inline cache used for call
250 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
251 if (is_inlined_method_handle_intrinsic(jvms, method())) {
252 // To be able to issue a direct call (optimized virtual or virtual)
253 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
254 // about the method being invoked should be attached to the call site to
255 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
256 call->set_override_symbolic_info(true);
257 }
258 kit.set_arguments_for_java_call(call);
259 kit.set_edges_for_java_call(call);
260 Node* ret = kit.set_results_for_java_call(call);
261 kit.push_node(method()->return_type()->basic_type(), ret);
262
263 // Represent the effect of an implicit receiver null_check
264 // as part of this call. Since we share a map with the caller,
265 // his JVMS gets adjusted.
266 kit.cast_not_null(receiver);
267 return kit.transfer_exceptions_into_jvms();
268}
269
270CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
271 if (InlineTree::check_can_parse(m) != NULL) return NULL;
272 return new ParseGenerator(m, expected_uses);
273}
274
275// As a special case, the JVMS passed to this CallGenerator is
276// for the method execution already in progress, not just the JVMS
277// of the caller. Thus, this CallGenerator cannot be mixed with others!
278CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
279 if (InlineTree::check_can_parse(m) != NULL) return NULL;
280 float past_uses = m->interpreter_invocation_count();
281 float expected_uses = past_uses;
282 return new ParseGenerator(m, expected_uses, true);
283}
284
285CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
286 assert(!m->is_abstract(), "for_direct_call mismatch");
287 return new DirectCallGenerator(m, separate_io_proj);
288}
289
290CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
291 assert(!m->is_static(), "for_virtual_call mismatch");
292 assert(!m->is_method_handle_intrinsic(), "should be a direct call");
293 return new VirtualCallGenerator(m, vtable_index);
294}
295
296// Allow inlining decisions to be delayed
297class LateInlineCallGenerator : public DirectCallGenerator {
298 private:
299 jlong _unique_id; // unique id for log compilation
300 bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
301
302 protected:
303 CallGenerator* _inline_cg;
304 virtual bool do_late_inline_check(JVMState* jvms) { return true; }
305
306 public:
307 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
308 DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
309
310 virtual bool is_late_inline() const { return true; }
311
312 // Convert the CallStaticJava into an inline
313 virtual void do_late_inline();
314
315 virtual JVMState* generate(JVMState* jvms) {
316 Compile *C = Compile::current();
317
318 C->log_inline_id(this);
319
320 // Record that this call site should be revisited once the main
321 // parse is finished.
322 if (!is_mh_late_inline()) {
323 C->add_late_inline(this);
324 }
325
326 // Emit the CallStaticJava and request separate projections so
327 // that the late inlining logic can distinguish between fall
328 // through and exceptional uses of the memory and io projections
329 // as is done for allocations and macro expansion.
330 return DirectCallGenerator::generate(jvms);
331 }
332
333 virtual void print_inlining_late(const char* msg) {
334 CallNode* call = call_node();
335 Compile* C = Compile::current();
336 C->print_inlining_assert_ready();
337 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
338 C->print_inlining_move_to(this);
339 C->print_inlining_update_delayed(this);
340 }
341
342 virtual void set_unique_id(jlong id) {
343 _unique_id = id;
344 }
345
346 virtual jlong unique_id() const {
347 return _unique_id;
348 }
349};
350
351void LateInlineCallGenerator::do_late_inline() {
352 // Can't inline it
353 CallStaticJavaNode* call = call_node();
354 if (call == NULL || call->outcnt() == 0 ||
355 call->in(0) == NULL || call->in(0)->is_top()) {
356 return;
357 }
358
359 const TypeTuple *r = call->tf()->domain();
360 for (int i1 = 0; i1 < method()->arg_size(); i1++) {
361 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
362 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
363 return;
364 }
365 }
366
367 if (call->in(TypeFunc::Memory)->is_top()) {
368 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
369 return;
370 }
371
372 // check for unreachable loop
373 CallProjections callprojs;
374 call->extract_projections(&callprojs, true);
375 if (callprojs.fallthrough_catchproj == call->in(0) ||
376 callprojs.catchall_catchproj == call->in(0) ||
377 callprojs.fallthrough_memproj == call->in(TypeFunc::Memory) ||
378 callprojs.catchall_memproj == call->in(TypeFunc::Memory) ||
379 callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O) ||
380 callprojs.catchall_ioproj == call->in(TypeFunc::I_O) ||
381 (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
382 (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
383 return;
384 }
385
386 Compile* C = Compile::current();
387 // Remove inlined methods from Compiler's lists.
388 if (call->is_macro()) {
389 C->remove_macro_node(call);
390 }
391
392 bool result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0);
393 if (_is_pure_call && result_not_used) {
394 // The call is marked as pure (no important side effects), but result isn't used.
395 // It's safe to remove the call.
396 GraphKit kit(call->jvms());
397 kit.replace_call(call, C->top(), true);
398 } else {
399 // Make a clone of the JVMState that appropriate to use for driving a parse
400 JVMState* old_jvms = call->jvms();
401 JVMState* jvms = old_jvms->clone_shallow(C);
402 uint size = call->req();
403 SafePointNode* map = new SafePointNode(size, jvms);
404 for (uint i1 = 0; i1 < size; i1++) {
405 map->init_req(i1, call->in(i1));
406 }
407
408 // Make sure the state is a MergeMem for parsing.
409 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
410 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
411 C->initial_gvn()->set_type_bottom(mem);
412 map->set_req(TypeFunc::Memory, mem);
413 }
414
415 uint nargs = method()->arg_size();
416 // blow away old call arguments
417 Node* top = C->top();
418 for (uint i1 = 0; i1 < nargs; i1++) {
419 map->set_req(TypeFunc::Parms + i1, top);
420 }
421 jvms->set_map(map);
422
423 // Make enough space in the expression stack to transfer
424 // the incoming arguments and return value.
425 map->ensure_stack(jvms, jvms->method()->max_stack());
426 for (uint i1 = 0; i1 < nargs; i1++) {
427 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
428 }
429
430 C->print_inlining_assert_ready();
431
432 C->print_inlining_move_to(this);
433
434 C->log_late_inline(this);
435
436 // This check is done here because for_method_handle_inline() method
437 // needs jvms for inlined state.
438 if (!do_late_inline_check(jvms)) {
439 map->disconnect_inputs(NULL, C);
440 return;
441 }
442
443 // Setup default node notes to be picked up by the inlining
444 Node_Notes* old_nn = C->node_notes_at(call->_idx);
445 if (old_nn != NULL) {
446 Node_Notes* entry_nn = old_nn->clone(C);
447 entry_nn->set_jvms(jvms);
448 C->set_default_node_notes(entry_nn);
449 }
450
451 // Now perform the inlining using the synthesized JVMState
452 JVMState* new_jvms = _inline_cg->generate(jvms);
453 if (new_jvms == NULL) return; // no change
454 if (C->failing()) return;
455
456 // Capture any exceptional control flow
457 GraphKit kit(new_jvms);
458
459 // Find the result object
460 Node* result = C->top();
461 int result_size = method()->return_type()->size();
462 if (result_size != 0 && !kit.stopped()) {
463 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
464 }
465
466 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
467 C->env()->notice_inlined_method(_inline_cg->method());
468 C->set_inlining_progress(true);
469 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
470 kit.replace_call(call, result, true);
471 }
472}
473
474
475CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
476 return new LateInlineCallGenerator(method, inline_cg);
477}
478
479class LateInlineMHCallGenerator : public LateInlineCallGenerator {
480 ciMethod* _caller;
481 int _attempt;
482 bool _input_not_const;
483
484 virtual bool do_late_inline_check(JVMState* jvms);
485 virtual bool already_attempted() const { return _attempt > 0; }
486
487 public:
488 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
489 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
490
491 virtual bool is_mh_late_inline() const { return true; }
492
493 virtual JVMState* generate(JVMState* jvms) {
494 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
495
496 Compile* C = Compile::current();
497 if (_input_not_const) {
498 // inlining won't be possible so no need to enqueue right now.
499 call_node()->set_generator(this);
500 } else {
501 C->add_late_inline(this);
502 }
503 return new_jvms;
504 }
505};
506
507bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
508
509 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
510
511 Compile::current()->print_inlining_update_delayed(this);
512
513 if (!_input_not_const) {
514 _attempt++;
515 }
516
517 if (cg != NULL && cg->is_inline()) {
518 assert(!cg->is_late_inline(), "we're doing late inlining");
519 _inline_cg = cg;
520 Compile::current()->dec_number_of_mh_late_inlines();
521 return true;
522 }
523
524 call_node()->set_generator(this);
525 return false;
526}
527
528CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
529 Compile::current()->inc_number_of_mh_late_inlines();
530 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
531 return cg;
532}
533
534class LateInlineStringCallGenerator : public LateInlineCallGenerator {
535
536 public:
537 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
538 LateInlineCallGenerator(method, inline_cg) {}
539
540 virtual JVMState* generate(JVMState* jvms) {
541 Compile *C = Compile::current();
542
543 C->log_inline_id(this);
544
545 C->add_string_late_inline(this);
546
547 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
548 return new_jvms;
549 }
550
551 virtual bool is_string_late_inline() const { return true; }
552};
553
554CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
555 return new LateInlineStringCallGenerator(method, inline_cg);
556}
557
558class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
559
560 public:
561 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
562 LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
563
564 virtual JVMState* generate(JVMState* jvms) {
565 Compile *C = Compile::current();
566
567 C->log_inline_id(this);
568
569 C->add_boxing_late_inline(this);
570
571 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
572 return new_jvms;
573 }
574};
575
576CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
577 return new LateInlineBoxingCallGenerator(method, inline_cg);
578}
579
580//---------------------------WarmCallGenerator--------------------------------
581// Internal class which handles initial deferral of inlining decisions.
582class WarmCallGenerator : public CallGenerator {
583 WarmCallInfo* _call_info;
584 CallGenerator* _if_cold;
585 CallGenerator* _if_hot;
586 bool _is_virtual; // caches virtuality of if_cold
587 bool _is_inline; // caches inline-ness of if_hot
588
589public:
590 WarmCallGenerator(WarmCallInfo* ci,
591 CallGenerator* if_cold,
592 CallGenerator* if_hot)
593 : CallGenerator(if_cold->method())
594 {
595 assert(method() == if_hot->method(), "consistent choices");
596 _call_info = ci;
597 _if_cold = if_cold;
598 _if_hot = if_hot;
599 _is_virtual = if_cold->is_virtual();
600 _is_inline = if_hot->is_inline();
601 }
602
603 virtual bool is_inline() const { return _is_inline; }
604 virtual bool is_virtual() const { return _is_virtual; }
605 virtual bool is_deferred() const { return true; }
606
607 virtual JVMState* generate(JVMState* jvms);
608};
609
610
611CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
612 CallGenerator* if_cold,
613 CallGenerator* if_hot) {
614 return new WarmCallGenerator(ci, if_cold, if_hot);
615}
616
617JVMState* WarmCallGenerator::generate(JVMState* jvms) {
618 Compile* C = Compile::current();
619 C->print_inlining_update(this);
620
621 if (C->log() != NULL) {
622 C->log()->elem("warm_call bci='%d'", jvms->bci());
623 }
624 jvms = _if_cold->generate(jvms);
625 if (jvms != NULL) {
626 Node* m = jvms->map()->control();
627 if (m->is_CatchProj()) m = m->in(0); else m = C->top();
628 if (m->is_Catch()) m = m->in(0); else m = C->top();
629 if (m->is_Proj()) m = m->in(0); else m = C->top();
630 if (m->is_CallJava()) {
631 _call_info->set_call(m->as_Call());
632 _call_info->set_hot_cg(_if_hot);
633#ifndef PRODUCT
634 if (PrintOpto || PrintOptoInlining) {
635 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
636 tty->print("WCI: ");
637 _call_info->print();
638 }
639#endif
640 _call_info->set_heat(_call_info->compute_heat());
641 C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
642 }
643 }
644 return jvms;
645}
646
647void WarmCallInfo::make_hot() {
648 Unimplemented();
649}
650
651void WarmCallInfo::make_cold() {
652 // No action: Just dequeue.
653}
654
655
656//------------------------PredictedCallGenerator------------------------------
657// Internal class which handles all out-of-line calls checking receiver type.
658class PredictedCallGenerator : public CallGenerator {
659 ciKlass* _predicted_receiver;
660 CallGenerator* _if_missed;
661 CallGenerator* _if_hit;
662 float _hit_prob;
663 bool _exact_check;
664
665public:
666 PredictedCallGenerator(ciKlass* predicted_receiver,
667 CallGenerator* if_missed,
668 CallGenerator* if_hit, bool exact_check,
669 float hit_prob)
670 : CallGenerator(if_missed->method())
671 {
672 // The call profile data may predict the hit_prob as extreme as 0 or 1.
673 // Remove the extremes values from the range.
674 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
675 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
676
677 _predicted_receiver = predicted_receiver;
678 _if_missed = if_missed;
679 _if_hit = if_hit;
680 _hit_prob = hit_prob;
681 _exact_check = exact_check;
682 }
683
684 virtual bool is_virtual() const { return true; }
685 virtual bool is_inline() const { return _if_hit->is_inline(); }
686 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
687
688 virtual JVMState* generate(JVMState* jvms);
689};
690
691
692CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
693 CallGenerator* if_missed,
694 CallGenerator* if_hit,
695 float hit_prob) {
696 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
697 /*exact_check=*/true, hit_prob);
698}
699
700CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
701 CallGenerator* if_missed,
702 CallGenerator* if_hit) {
703 return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
704 /*exact_check=*/false, PROB_ALWAYS);
705}
706
707JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
708 GraphKit kit(jvms);
709 kit.C->print_inlining_update(this);
710 PhaseGVN& gvn = kit.gvn();
711 // We need an explicit receiver null_check before checking its type.
712 // We share a map with the caller, so his JVMS gets adjusted.
713 Node* receiver = kit.argument(0);
714 CompileLog* log = kit.C->log();
715 if (log != NULL) {
716 log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
717 jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
718 }
719
720 receiver = kit.null_check_receiver_before_call(method());
721 if (kit.stopped()) {
722 return kit.transfer_exceptions_into_jvms();
723 }
724
725 // Make a copy of the replaced nodes in case we need to restore them
726 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
727 replaced_nodes.clone();
728
729 Node* casted_receiver = receiver; // will get updated in place...
730 Node* slow_ctl = NULL;
731 if (_exact_check) {
732 slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
733 &casted_receiver);
734 } else {
735 slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,
736 &casted_receiver);
737 }
738
739 SafePointNode* slow_map = NULL;
740 JVMState* slow_jvms = NULL;
741 { PreserveJVMState pjvms(&kit);
742 kit.set_control(slow_ctl);
743 if (!kit.stopped()) {
744 slow_jvms = _if_missed->generate(kit.sync_jvms());
745 if (kit.failing())
746 return NULL; // might happen because of NodeCountInliningCutoff
747 assert(slow_jvms != NULL, "must be");
748 kit.add_exception_states_from(slow_jvms);
749 kit.set_map(slow_jvms->map());
750 if (!kit.stopped())
751 slow_map = kit.stop();
752 }
753 }
754
755 if (kit.stopped()) {
756 // Instance exactly does not matches the desired type.
757 kit.set_jvms(slow_jvms);
758 return kit.transfer_exceptions_into_jvms();
759 }
760
761 // fall through if the instance exactly matches the desired type
762 kit.replace_in_map(receiver, casted_receiver);
763
764 // Make the hot call:
765 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
766 if (new_jvms == NULL) {
767 // Inline failed, so make a direct call.
768 assert(_if_hit->is_inline(), "must have been a failed inline");
769 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
770 new_jvms = cg->generate(kit.sync_jvms());
771 }
772 kit.add_exception_states_from(new_jvms);
773 kit.set_jvms(new_jvms);
774
775 // Need to merge slow and fast?
776 if (slow_map == NULL) {
777 // The fast path is the only path remaining.
778 return kit.transfer_exceptions_into_jvms();
779 }
780
781 if (kit.stopped()) {
782 // Inlined method threw an exception, so it's just the slow path after all.
783 kit.set_jvms(slow_jvms);
784 return kit.transfer_exceptions_into_jvms();
785 }
786
787 // There are 2 branches and the replaced nodes are only valid on
788 // one: restore the replaced nodes to what they were before the
789 // branch.
790 kit.map()->set_replaced_nodes(replaced_nodes);
791
792 // Finish the diamond.
793 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
794 RegionNode* region = new RegionNode(3);
795 region->init_req(1, kit.control());
796 region->init_req(2, slow_map->control());
797 kit.set_control(gvn.transform(region));
798 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
799 iophi->set_req(2, slow_map->i_o());
800 kit.set_i_o(gvn.transform(iophi));
801 // Merge memory
802 kit.merge_memory(slow_map->merged_memory(), region, 2);
803 // Transform new memory Phis.
804 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
805 Node* phi = mms.memory();
806 if (phi->is_Phi() && phi->in(0) == region) {
807 mms.set_memory(gvn.transform(phi));
808 }
809 }
810 uint tos = kit.jvms()->stkoff() + kit.sp();
811 uint limit = slow_map->req();
812 for (uint i = TypeFunc::Parms; i < limit; i++) {
813 // Skip unused stack slots; fast forward to monoff();
814 if (i == tos) {
815 i = kit.jvms()->monoff();
816 if( i >= limit ) break;
817 }
818 Node* m = kit.map()->in(i);
819 Node* n = slow_map->in(i);
820 if (m != n) {
821 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
822 Node* phi = PhiNode::make(region, m, t);
823 phi->set_req(2, n);
824 kit.map()->set_req(i, gvn.transform(phi));
825 }
826 }
827 return kit.transfer_exceptions_into_jvms();
828}
829
830
831CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
832 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
833 bool input_not_const;
834 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
835 Compile* C = Compile::current();
836 if (cg != NULL) {
837 if (!delayed_forbidden && AlwaysIncrementalInline) {
838 return CallGenerator::for_late_inline(callee, cg);
839 } else {
840 return cg;
841 }
842 }
843 int bci = jvms->bci();
844 ciCallProfile profile = caller->call_profile_at_bci(bci);
845 int call_site_count = caller->scale_count(profile.count());
846
847 if (IncrementalInline && call_site_count > 0 &&
848 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
849 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
850 } else {
851 // Out-of-line call.
852 return CallGenerator::for_direct_call(callee);
853 }
854}
855
856CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
857 GraphKit kit(jvms);
858 PhaseGVN& gvn = kit.gvn();
859 Compile* C = kit.C;
860 vmIntrinsics::ID iid = callee->intrinsic_id();
861 input_not_const = true;
862 switch (iid) {
863 case vmIntrinsics::_invokeBasic:
864 {
865 // Get MethodHandle receiver:
866 Node* receiver = kit.argument(0);
867 if (receiver->Opcode() == Op_ConP) {
868 input_not_const = false;
869 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
870 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
871 const int vtable_index = Method::invalid_vtable_index;
872
873 if (!ciMethod::is_consistent_info(callee, target)) {
874 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
875 "signatures mismatch");
876 return NULL;
877 }
878
879 CallGenerator* cg = C->call_generator(target, vtable_index,
880 false /* call_does_dispatch */,
881 jvms,
882 true /* allow_inline */,
883 PROB_ALWAYS);
884 return cg;
885 } else {
886 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
887 "receiver not constant");
888 }
889 }
890 break;
891
892 case vmIntrinsics::_linkToVirtual:
893 case vmIntrinsics::_linkToStatic:
894 case vmIntrinsics::_linkToSpecial:
895 case vmIntrinsics::_linkToInterface:
896 {
897 // Get MemberName argument:
898 Node* member_name = kit.argument(callee->arg_size() - 1);
899 if (member_name->Opcode() == Op_ConP) {
900 input_not_const = false;
901 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
902 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
903
904 if (!ciMethod::is_consistent_info(callee, target)) {
905 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
906 "signatures mismatch");
907 return NULL;
908 }
909
910 // In lambda forms we erase signature types to avoid resolving issues
911 // involving class loaders. When we optimize a method handle invoke
912 // to a direct call we must cast the receiver and arguments to its
913 // actual types.
914 ciSignature* signature = target->signature();
915 const int receiver_skip = target->is_static() ? 0 : 1;
916 // Cast receiver to its type.
917 if (!target->is_static()) {
918 Node* arg = kit.argument(0);
919 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
920 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
921 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
922 const Type* recv_type = arg_type->join_speculative(sig_type); // keep speculative part
923 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));
924 kit.set_argument(0, cast_obj);
925 }
926 }
927 // Cast reference arguments to its type.
928 for (int i = 0, j = 0; i < signature->count(); i++) {
929 ciType* t = signature->type_at(i);
930 if (t->is_klass()) {
931 Node* arg = kit.argument(receiver_skip + j);
932 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
933 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
934 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
935 const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part
936 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
937 kit.set_argument(receiver_skip + j, cast_obj);
938 }
939 }
940 j += t->size(); // long and double take two slots
941 }
942
943 // Try to get the most accurate receiver type
944 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
945 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
946 int vtable_index = Method::invalid_vtable_index;
947 bool call_does_dispatch = false;
948
949 ciKlass* speculative_receiver_type = NULL;
950 if (is_virtual_or_interface) {
951 ciInstanceKlass* klass = target->holder();
952 Node* receiver_node = kit.argument(0);
953 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
954 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
955 // optimize_virtual_call() takes 2 different holder
956 // arguments for a corner case that doesn't apply here (see
957 // Parse::do_call())
958 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
959 target, receiver_type, is_virtual,
960 call_does_dispatch, vtable_index, // out-parameters
961 false /* check_access */);
962 // We lack profiling at this call but type speculation may
963 // provide us with a type
964 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
965 }
966 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
967 !StressMethodHandleLinkerInlining /* allow_inline */,
968 PROB_ALWAYS,
969 speculative_receiver_type);
970 return cg;
971 } else {
972 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
973 "member_name not constant");
974 }
975 }
976 break;
977
978 default:
979 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
980 break;
981 }
982 return NULL;
983}
984
985
986//------------------------PredicatedIntrinsicGenerator------------------------------
987// Internal class which handles all predicated Intrinsic calls.
988class PredicatedIntrinsicGenerator : public CallGenerator {
989 CallGenerator* _intrinsic;
990 CallGenerator* _cg;
991
992public:
993 PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
994 CallGenerator* cg)
995 : CallGenerator(cg->method())
996 {
997 _intrinsic = intrinsic;
998 _cg = cg;
999 }
1000
1001 virtual bool is_virtual() const { return true; }
1002 virtual bool is_inlined() const { return true; }
1003 virtual bool is_intrinsic() const { return true; }
1004
1005 virtual JVMState* generate(JVMState* jvms);
1006};
1007
1008
1009CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1010 CallGenerator* cg) {
1011 return new PredicatedIntrinsicGenerator(intrinsic, cg);
1012}
1013
1014
1015JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1016 // The code we want to generate here is:
1017 // if (receiver == NULL)
1018 // uncommon_Trap
1019 // if (predicate(0))
1020 // do_intrinsic(0)
1021 // else
1022 // if (predicate(1))
1023 // do_intrinsic(1)
1024 // ...
1025 // else
1026 // do_java_comp
1027
1028 GraphKit kit(jvms);
1029 PhaseGVN& gvn = kit.gvn();
1030
1031 CompileLog* log = kit.C->log();
1032 if (log != NULL) {
1033 log->elem("predicated_intrinsic bci='%d' method='%d'",
1034 jvms->bci(), log->identify(method()));
1035 }
1036
1037 if (!method()->is_static()) {
1038 // We need an explicit receiver null_check before checking its type in predicate.
1039 // We share a map with the caller, so his JVMS gets adjusted.
1040 Node* receiver = kit.null_check_receiver_before_call(method());
1041 if (kit.stopped()) {
1042 return kit.transfer_exceptions_into_jvms();
1043 }
1044 }
1045
1046 int n_predicates = _intrinsic->predicates_count();
1047 assert(n_predicates > 0, "sanity");
1048
1049 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1050
1051 // Region for normal compilation code if intrinsic failed.
1052 Node* slow_region = new RegionNode(1);
1053
1054 int results = 0;
1055 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1056#ifdef ASSERT
1057 JVMState* old_jvms = kit.jvms();
1058 SafePointNode* old_map = kit.map();
1059 Node* old_io = old_map->i_o();
1060 Node* old_mem = old_map->memory();
1061 Node* old_exc = old_map->next_exception();
1062#endif
1063 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1064#ifdef ASSERT
1065 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1066 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1067 SafePointNode* new_map = kit.map();
1068 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o");
1069 assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1070 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1071#endif
1072 if (!kit.stopped()) {
1073 PreserveJVMState pjvms(&kit);
1074 // Generate intrinsic code:
1075 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1076 if (new_jvms == NULL) {
1077 // Intrinsic failed, use normal compilation path for this predicate.
1078 slow_region->add_req(kit.control());
1079 } else {
1080 kit.add_exception_states_from(new_jvms);
1081 kit.set_jvms(new_jvms);
1082 if (!kit.stopped()) {
1083 result_jvms[results++] = kit.jvms();
1084 }
1085 }
1086 }
1087 if (else_ctrl == NULL) {
1088 else_ctrl = kit.C->top();
1089 }
1090 kit.set_control(else_ctrl);
1091 }
1092 if (!kit.stopped()) {
1093 // Final 'else' after predicates.
1094 slow_region->add_req(kit.control());
1095 }
1096 if (slow_region->req() > 1) {
1097 PreserveJVMState pjvms(&kit);
1098 // Generate normal compilation code:
1099 kit.set_control(gvn.transform(slow_region));
1100 JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1101 if (kit.failing())
1102 return NULL; // might happen because of NodeCountInliningCutoff
1103 assert(new_jvms != NULL, "must be");
1104 kit.add_exception_states_from(new_jvms);
1105 kit.set_jvms(new_jvms);
1106 if (!kit.stopped()) {
1107 result_jvms[results++] = kit.jvms();
1108 }
1109 }
1110
1111 if (results == 0) {
1112 // All paths ended in uncommon traps.
1113 (void) kit.stop();
1114 return kit.transfer_exceptions_into_jvms();
1115 }
1116
1117 if (results == 1) { // Only one path
1118 kit.set_jvms(result_jvms[0]);
1119 return kit.transfer_exceptions_into_jvms();
1120 }
1121
1122 // Merge all paths.
1123 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1124 RegionNode* region = new RegionNode(results + 1);
1125 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1126 for (int i = 0; i < results; i++) {
1127 JVMState* jvms = result_jvms[i];
1128 int path = i + 1;
1129 SafePointNode* map = jvms->map();
1130 region->init_req(path, map->control());
1131 iophi->set_req(path, map->i_o());
1132 if (i == 0) {
1133 kit.set_jvms(jvms);
1134 } else {
1135 kit.merge_memory(map->merged_memory(), region, path);
1136 }
1137 }
1138 kit.set_control(gvn.transform(region));
1139 kit.set_i_o(gvn.transform(iophi));
1140 // Transform new memory Phis.
1141 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1142 Node* phi = mms.memory();
1143 if (phi->is_Phi() && phi->in(0) == region) {
1144 mms.set_memory(gvn.transform(phi));
1145 }
1146 }
1147
1148 // Merge debug info.
1149 Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1150 uint tos = kit.jvms()->stkoff() + kit.sp();
1151 Node* map = kit.map();
1152 uint limit = map->req();
1153 for (uint i = TypeFunc::Parms; i < limit; i++) {
1154 // Skip unused stack slots; fast forward to monoff();
1155 if (i == tos) {
1156 i = kit.jvms()->monoff();
1157 if( i >= limit ) break;
1158 }
1159 Node* n = map->in(i);
1160 ins[0] = n;
1161 const Type* t = gvn.type(n);
1162 bool needs_phi = false;
1163 for (int j = 1; j < results; j++) {
1164 JVMState* jvms = result_jvms[j];
1165 Node* jmap = jvms->map();
1166 Node* m = NULL;
1167 if (jmap->req() > i) {
1168 m = jmap->in(i);
1169 if (m != n) {
1170 needs_phi = true;
1171 t = t->meet_speculative(gvn.type(m));
1172 }
1173 }
1174 ins[j] = m;
1175 }
1176 if (needs_phi) {
1177 Node* phi = PhiNode::make(region, n, t);
1178 for (int j = 1; j < results; j++) {
1179 phi->set_req(j + 1, ins[j]);
1180 }
1181 map->set_req(i, gvn.transform(phi));
1182 }
1183 }
1184
1185 return kit.transfer_exceptions_into_jvms();
1186}
1187
1188//-------------------------UncommonTrapCallGenerator-----------------------------
1189// Internal class which handles all out-of-line calls checking receiver type.
1190class UncommonTrapCallGenerator : public CallGenerator {
1191 Deoptimization::DeoptReason _reason;
1192 Deoptimization::DeoptAction _action;
1193
1194public:
1195 UncommonTrapCallGenerator(ciMethod* m,
1196 Deoptimization::DeoptReason reason,
1197 Deoptimization::DeoptAction action)
1198 : CallGenerator(m)
1199 {
1200 _reason = reason;
1201 _action = action;
1202 }
1203
1204 virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
1205 virtual bool is_trap() const { return true; }
1206
1207 virtual JVMState* generate(JVMState* jvms);
1208};
1209
1210
1211CallGenerator*
1212CallGenerator::for_uncommon_trap(ciMethod* m,
1213 Deoptimization::DeoptReason reason,
1214 Deoptimization::DeoptAction action) {
1215 return new UncommonTrapCallGenerator(m, reason, action);
1216}
1217
1218
1219JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1220 GraphKit kit(jvms);
1221 kit.C->print_inlining_update(this);
1222 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
1223 // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1224 // Use callsite signature always.
1225 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1226 int nargs = declared_method->arg_size();
1227 kit.inc_sp(nargs);
1228 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1229 if (_reason == Deoptimization::Reason_class_check &&
1230 _action == Deoptimization::Action_maybe_recompile) {
1231 // Temp fix for 6529811
1232 // Don't allow uncommon_trap to override our decision to recompile in the event
1233 // of a class cast failure for a monomorphic call as it will never let us convert
1234 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1235 bool keep_exact_action = true;
1236 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1237 } else {
1238 kit.uncommon_trap(_reason, _action);
1239 }
1240 return kit.transfer_exceptions_into_jvms();
1241}
1242
1243// (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1244
1245// (Node: Merged hook_up_exits into ParseGenerator::generate.)
1246
1247#define NODES_OVERHEAD_PER_METHOD (30.0)
1248#define NODES_PER_BYTECODE (9.5)
1249
1250void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
1251 int call_count = profile.count();
1252 int code_size = call_method->code_size();
1253
1254 // Expected execution count is based on the historical count:
1255 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
1256
1257 // Expected profit from inlining, in units of simple call-overheads.
1258 _profit = 1.0;
1259
1260 // Expected work performed by the call in units of call-overheads.
1261 // %%% need an empirical curve fit for "work" (time in call)
1262 float bytecodes_per_call = 3;
1263 _work = 1.0 + code_size / bytecodes_per_call;
1264
1265 // Expected size of compilation graph:
1266 // -XX:+PrintParseStatistics once reported:
1267 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391
1268 // Histogram of 144298 parsed bytecodes:
1269 // %%% Need an better predictor for graph size.
1270 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
1271}
1272
1273// is_cold: Return true if the node should never be inlined.
1274// This is true if any of the key metrics are extreme.
1275bool WarmCallInfo::is_cold() const {
1276 if (count() < WarmCallMinCount) return true;
1277 if (profit() < WarmCallMinProfit) return true;
1278 if (work() > WarmCallMaxWork) return true;
1279 if (size() > WarmCallMaxSize) return true;
1280 return false;
1281}
1282
1283// is_hot: Return true if the node should be inlined immediately.
1284// This is true if any of the key metrics are extreme.
1285bool WarmCallInfo::is_hot() const {
1286 assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
1287 if (count() >= HotCallCountThreshold) return true;
1288 if (profit() >= HotCallProfitThreshold) return true;
1289 if (work() <= HotCallTrivialWork) return true;
1290 if (size() <= HotCallTrivialSize) return true;
1291 return false;
1292}
1293
1294// compute_heat:
1295float WarmCallInfo::compute_heat() const {
1296 assert(!is_cold(), "compute heat only on warm nodes");
1297 assert(!is_hot(), "compute heat only on warm nodes");
1298 int min_size = MAX2(0, (int)HotCallTrivialSize);
1299 int max_size = MIN2(500, (int)WarmCallMaxSize);
1300 float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
1301 float size_factor;
1302 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg.
1303 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg.
1304 else if (method_size < 0.5) size_factor = 1; // better than avg.
1305 else size_factor = 0.5; // worse than avg.
1306 return (count() * profit() * size_factor);
1307}
1308
1309bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
1310 assert(this != that, "compare only different WCIs");
1311 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
1312 if (this->heat() > that->heat()) return true;
1313 if (this->heat() < that->heat()) return false;
1314 assert(this->heat() == that->heat(), "no NaN heat allowed");
1315 // Equal heat. Break the tie some other way.
1316 if (!this->call() || !that->call()) return (address)this > (address)that;
1317 return this->call()->_idx > that->call()->_idx;
1318}
1319
1320//#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
1321#define UNINIT_NEXT ((WarmCallInfo*)NULL)
1322
1323WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
1324 assert(next() == UNINIT_NEXT, "not yet on any list");
1325 WarmCallInfo* prev_p = NULL;
1326 WarmCallInfo* next_p = head;
1327 while (next_p != NULL && next_p->warmer_than(this)) {
1328 prev_p = next_p;
1329 next_p = prev_p->next();
1330 }
1331 // Install this between prev_p and next_p.
1332 this->set_next(next_p);
1333 if (prev_p == NULL)
1334 head = this;
1335 else
1336 prev_p->set_next(this);
1337 return head;
1338}
1339
1340WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
1341 WarmCallInfo* prev_p = NULL;
1342 WarmCallInfo* next_p = head;
1343 while (next_p != this) {
1344 assert(next_p != NULL, "this must be in the list somewhere");
1345 prev_p = next_p;
1346 next_p = prev_p->next();
1347 }
1348 next_p = this->next();
1349 debug_only(this->set_next(UNINIT_NEXT));
1350 // Remove this from between prev_p and next_p.
1351 if (prev_p == NULL)
1352 head = next_p;
1353 else
1354 prev_p->set_next(next_p);
1355 return head;
1356}
1357
1358WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
1359 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
1360WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
1361 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
1362
1363WarmCallInfo* WarmCallInfo::always_hot() {
1364 assert(_always_hot.is_hot(), "must always be hot");
1365 return &_always_hot;
1366}
1367
1368WarmCallInfo* WarmCallInfo::always_cold() {
1369 assert(_always_cold.is_cold(), "must always be cold");
1370 return &_always_cold;
1371}
1372
1373
1374#ifndef PRODUCT
1375
1376void WarmCallInfo::print() const {
1377 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
1378 is_cold() ? "cold" : is_hot() ? "hot " : "warm",
1379 count(), profit(), work(), size(), compute_heat(), next());
1380 tty->cr();
1381 if (call() != NULL) call()->dump();
1382}
1383
1384void print_wci(WarmCallInfo* ci) {
1385 ci->print();
1386}
1387
1388void WarmCallInfo::print_all() const {
1389 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1390 p->print();
1391}
1392
1393int WarmCallInfo::count_all() const {
1394 int cnt = 0;
1395 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1396 cnt++;
1397 return cnt;
1398}
1399
1400#endif //PRODUCT
1401