1/*
2 * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "opto/loopnode.hpp"
27#include "opto/addnode.hpp"
28#include "opto/callnode.hpp"
29#include "opto/connode.hpp"
30#include "opto/convertnode.hpp"
31#include "opto/loopnode.hpp"
32#include "opto/matcher.hpp"
33#include "opto/mulnode.hpp"
34#include "opto/opaquenode.hpp"
35#include "opto/rootnode.hpp"
36#include "opto/subnode.hpp"
37#include <fenv.h>
38#include <math.h>
39
40/*
41 * The general idea of Loop Predication is to insert a predicate on the entry
42 * path to a loop, and raise a uncommon trap if the check of the condition fails.
43 * The condition checks are promoted from inside the loop body, and thus
44 * the checks inside the loop could be eliminated. Currently, loop predication
45 * optimization has been applied to remove array range check and loop invariant
46 * checks (such as null checks).
47*/
48
49//-------------------------------register_control-------------------------
50void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred) {
51 assert(n->is_CFG(), "must be control node");
52 _igvn.register_new_node_with_optimizer(n);
53 loop->_body.push(n);
54 set_loop(n, loop);
55 // When called from beautify_loops() idom is not constructed yet.
56 if (_idom != NULL) {
57 set_idom(n, pred, dom_depth(pred));
58 }
59}
60
61//------------------------------create_new_if_for_predicate------------------------
62// create a new if above the uct_if_pattern for the predicate to be promoted.
63//
64// before after
65// ---------- ----------
66// ctrl ctrl
67// | |
68// | |
69// v v
70// iff new_iff
71// / \ / \
72// / \ / \
73// v v v v
74// uncommon_proj cont_proj if_uct if_cont
75// \ | | | |
76// \ | | | |
77// v v v | v
78// rgn loop | iff
79// | | / \
80// | | / \
81// v | v v
82// uncommon_trap | uncommon_proj cont_proj
83// \ \ | |
84// \ \ | |
85// v v v v
86// rgn loop
87// |
88// |
89// v
90// uncommon_trap
91//
92//
93// We will create a region to guard the uct call if there is no one there.
94// The true projection (if_cont) of the new_iff is returned.
95// This code is also used to clone predicates to cloned loops.
96ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
97 Deoptimization::DeoptReason reason,
98 int opcode) {
99 assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!");
100 IfNode* iff = cont_proj->in(0)->as_If();
101
102 ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
103 Node *rgn = uncommon_proj->unique_ctrl_out();
104 assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
105
106 uint proj_index = 1; // region's edge corresponding to uncommon_proj
107 if (!rgn->is_Region()) { // create a region to guard the call
108 assert(rgn->is_Call(), "must be call uct");
109 CallNode* call = rgn->as_Call();
110 IdealLoopTree* loop = get_loop(call);
111 rgn = new RegionNode(1);
112 rgn->add_req(uncommon_proj);
113 register_control(rgn, loop, uncommon_proj);
114 _igvn.replace_input_of(call, 0, rgn);
115 // When called from beautify_loops() idom is not constructed yet.
116 if (_idom != NULL) {
117 set_idom(call, rgn, dom_depth(rgn));
118 }
119 for (DUIterator_Fast imax, i = uncommon_proj->fast_outs(imax); i < imax; i++) {
120 Node* n = uncommon_proj->fast_out(i);
121 if (n->is_Load() || n->is_Store()) {
122 _igvn.replace_input_of(n, 0, rgn);
123 --i; --imax;
124 }
125 }
126 } else {
127 // Find region's edge corresponding to uncommon_proj
128 for (; proj_index < rgn->req(); proj_index++)
129 if (rgn->in(proj_index) == uncommon_proj) break;
130 assert(proj_index < rgn->req(), "sanity");
131 }
132
133 Node* entry = iff->in(0);
134 if (new_entry != NULL) {
135 // Clonning the predicate to new location.
136 entry = new_entry;
137 }
138 // Create new_iff
139 IdealLoopTree* lp = get_loop(entry);
140 IfNode* new_iff = NULL;
141 if (opcode == Op_If) {
142 new_iff = new IfNode(entry, iff->in(1), iff->_prob, iff->_fcnt);
143 } else {
144 assert(opcode == Op_RangeCheck, "no other if variant here");
145 new_iff = new RangeCheckNode(entry, iff->in(1), iff->_prob, iff->_fcnt);
146 }
147 register_control(new_iff, lp, entry);
148 Node *if_cont = new IfTrueNode(new_iff);
149 Node *if_uct = new IfFalseNode(new_iff);
150 if (cont_proj->is_IfFalse()) {
151 // Swap
152 Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp;
153 }
154 register_control(if_cont, lp, new_iff);
155 register_control(if_uct, get_loop(rgn), new_iff);
156
157 // if_uct to rgn
158 _igvn.hash_delete(rgn);
159 rgn->add_req(if_uct);
160 // When called from beautify_loops() idom is not constructed yet.
161 if (_idom != NULL) {
162 Node* ridom = idom(rgn);
163 Node* nrdom = dom_lca_internal(ridom, new_iff);
164 set_idom(rgn, nrdom, dom_depth(rgn));
165 }
166
167 // If rgn has phis add new edges which has the same
168 // value as on original uncommon_proj pass.
169 assert(rgn->in(rgn->req() -1) == if_uct, "new edge should be last");
170 bool has_phi = false;
171 for (DUIterator_Fast imax, i = rgn->fast_outs(imax); i < imax; i++) {
172 Node* use = rgn->fast_out(i);
173 if (use->is_Phi() && use->outcnt() > 0) {
174 assert(use->in(0) == rgn, "");
175 _igvn.rehash_node_delayed(use);
176 use->add_req(use->in(proj_index));
177 has_phi = true;
178 }
179 }
180 assert(!has_phi || rgn->req() > 3, "no phis when region is created");
181
182 if (new_entry == NULL) {
183 // Attach if_cont to iff
184 _igvn.replace_input_of(iff, 0, if_cont);
185 if (_idom != NULL) {
186 set_idom(iff, if_cont, dom_depth(iff));
187 }
188 }
189 return if_cont->as_Proj();
190}
191
192//------------------------------create_new_if_for_predicate------------------------
193// Create a new if below new_entry for the predicate to be cloned (IGVN optimization)
194ProjNode* PhaseIterGVN::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
195 Deoptimization::DeoptReason reason,
196 int opcode) {
197 assert(new_entry != 0, "only used for clone predicate");
198 assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!");
199 IfNode* iff = cont_proj->in(0)->as_If();
200
201 ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
202 Node *rgn = uncommon_proj->unique_ctrl_out();
203 assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
204
205 uint proj_index = 1; // region's edge corresponding to uncommon_proj
206 if (!rgn->is_Region()) { // create a region to guard the call
207 assert(rgn->is_Call(), "must be call uct");
208 CallNode* call = rgn->as_Call();
209 rgn = new RegionNode(1);
210 register_new_node_with_optimizer(rgn);
211 rgn->add_req(uncommon_proj);
212 replace_input_of(call, 0, rgn);
213 } else {
214 // Find region's edge corresponding to uncommon_proj
215 for (; proj_index < rgn->req(); proj_index++)
216 if (rgn->in(proj_index) == uncommon_proj) break;
217 assert(proj_index < rgn->req(), "sanity");
218 }
219
220 // Create new_iff in new location.
221 IfNode* new_iff = NULL;
222 if (opcode == Op_If) {
223 new_iff = new IfNode(new_entry, iff->in(1), iff->_prob, iff->_fcnt);
224 } else {
225 assert(opcode == Op_RangeCheck, "no other if variant here");
226 new_iff = new RangeCheckNode(new_entry, iff->in(1), iff->_prob, iff->_fcnt);
227 }
228
229 register_new_node_with_optimizer(new_iff);
230 Node *if_cont = new IfTrueNode(new_iff);
231 Node *if_uct = new IfFalseNode(new_iff);
232 if (cont_proj->is_IfFalse()) {
233 // Swap
234 Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp;
235 }
236 register_new_node_with_optimizer(if_cont);
237 register_new_node_with_optimizer(if_uct);
238
239 // if_uct to rgn
240 hash_delete(rgn);
241 rgn->add_req(if_uct);
242
243 // If rgn has phis add corresponding new edges which has the same
244 // value as on original uncommon_proj pass.
245 assert(rgn->in(rgn->req() -1) == if_uct, "new edge should be last");
246 bool has_phi = false;
247 for (DUIterator_Fast imax, i = rgn->fast_outs(imax); i < imax; i++) {
248 Node* use = rgn->fast_out(i);
249 if (use->is_Phi() && use->outcnt() > 0) {
250 rehash_node_delayed(use);
251 use->add_req(use->in(proj_index));
252 has_phi = true;
253 }
254 }
255 assert(!has_phi || rgn->req() > 3, "no phis when region is created");
256
257 return if_cont->as_Proj();
258}
259
260//--------------------------clone_predicate-----------------------
261ProjNode* PhaseIdealLoop::clone_predicate(ProjNode* predicate_proj, Node* new_entry,
262 Deoptimization::DeoptReason reason,
263 PhaseIdealLoop* loop_phase,
264 PhaseIterGVN* igvn) {
265 ProjNode* new_predicate_proj;
266 if (loop_phase != NULL) {
267 new_predicate_proj = loop_phase->create_new_if_for_predicate(predicate_proj, new_entry, reason, Op_If);
268 } else {
269 new_predicate_proj = igvn->create_new_if_for_predicate(predicate_proj, new_entry, reason, Op_If);
270 }
271 IfNode* iff = new_predicate_proj->in(0)->as_If();
272 Node* ctrl = iff->in(0);
273
274 // Match original condition since predicate's projections could be swapped.
275 assert(predicate_proj->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be");
276 Node* opq = new Opaque1Node(igvn->C, predicate_proj->in(0)->in(1)->in(1)->in(1));
277 igvn->C->add_predicate_opaq(opq);
278
279 Node* bol = new Conv2BNode(opq);
280 if (loop_phase != NULL) {
281 loop_phase->register_new_node(opq, ctrl);
282 loop_phase->register_new_node(bol, ctrl);
283 } else {
284 igvn->register_new_node_with_optimizer(opq);
285 igvn->register_new_node_with_optimizer(bol);
286 }
287 igvn->hash_delete(iff);
288 iff->set_req(1, bol);
289 return new_predicate_proj;
290}
291
292
293//--------------------------clone_loop_predicates-----------------------
294// Interface from IGVN
295Node* PhaseIterGVN::clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) {
296 return PhaseIdealLoop::clone_loop_predicates(old_entry, new_entry, clone_limit_check, NULL, this);
297}
298
299// Interface from PhaseIdealLoop
300Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) {
301 return clone_loop_predicates(old_entry, new_entry, clone_limit_check, this, &this->_igvn);
302}
303
304void PhaseIdealLoop::clone_loop_predicates_fix_mem(ProjNode* dom_proj , ProjNode* proj,
305 PhaseIdealLoop* loop_phase,
306 PhaseIterGVN* igvn) {
307 Compile* C = NULL;
308 if (loop_phase != NULL) {
309 igvn = &loop_phase->igvn();
310 }
311 C = igvn->C;
312 ProjNode* other_dom_proj = dom_proj->in(0)->as_Multi()->proj_out(1-dom_proj->_con);
313 Node* dom_r = other_dom_proj->unique_ctrl_out();
314 if (dom_r->is_Region()) {
315 assert(dom_r->unique_ctrl_out()->is_Call(), "unc expected");
316 ProjNode* other_proj = proj->in(0)->as_Multi()->proj_out(1-proj->_con);
317 Node* r = other_proj->unique_ctrl_out();
318 assert(r->is_Region() && r->unique_ctrl_out()->is_Call(), "cloned predicate should have caused region to be added");
319 for (DUIterator_Fast imax, i = dom_r->fast_outs(imax); i < imax; i++) {
320 Node* dom_use = dom_r->fast_out(i);
321 if (dom_use->is_Phi() && dom_use->bottom_type() == Type::MEMORY) {
322 assert(dom_use->in(0) == dom_r, "");
323 Node* phi = NULL;
324 for (DUIterator_Fast jmax, j = r->fast_outs(jmax); j < jmax; j++) {
325 Node* use = r->fast_out(j);
326 if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
327 use->adr_type() == dom_use->adr_type()) {
328 assert(use->in(0) == r, "");
329 assert(phi == NULL, "only one phi");
330 phi = use;
331 }
332 }
333 if (phi == NULL) {
334 const TypePtr* adr_type = dom_use->adr_type();
335 int alias = C->get_alias_index(adr_type);
336 Node* call = r->unique_ctrl_out();
337 Node* mem = call->in(TypeFunc::Memory);
338 MergeMemNode* mm = NULL;
339 if (mem->is_MergeMem()) {
340 mm = mem->clone()->as_MergeMem();
341 if (adr_type == TypePtr::BOTTOM) {
342 mem = mem->as_MergeMem()->base_memory();
343 } else {
344 mem = mem->as_MergeMem()->memory_at(alias);
345 }
346 } else {
347 mm = MergeMemNode::make(mem);
348 }
349 phi = PhiNode::make(r, mem, Type::MEMORY, adr_type);
350 if (adr_type == TypePtr::BOTTOM) {
351 mm->set_base_memory(phi);
352 } else {
353 mm->set_memory_at(alias, phi);
354 }
355 if (loop_phase != NULL) {
356 loop_phase->register_new_node(mm, r);
357 loop_phase->register_new_node(phi, r);
358 } else {
359 igvn->register_new_node_with_optimizer(mm);
360 igvn->register_new_node_with_optimizer(phi);
361 }
362 igvn->replace_input_of(call, TypeFunc::Memory, mm);
363 }
364 igvn->replace_input_of(phi, r->find_edge(other_proj), dom_use->in(dom_r->find_edge(other_dom_proj)));
365 }
366 }
367 }
368}
369
370
371// Clone loop predicates to cloned loops (peeled, unswitched, split_if).
372Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry,
373 bool clone_limit_check,
374 PhaseIdealLoop* loop_phase,
375 PhaseIterGVN* igvn) {
376#ifdef ASSERT
377 if (new_entry == NULL || !(new_entry->is_Proj() || new_entry->is_Region() || new_entry->is_SafePoint())) {
378 if (new_entry != NULL)
379 new_entry->dump();
380 assert(false, "not IfTrue, IfFalse, Region or SafePoint");
381 }
382#endif
383 // Search original predicates
384 Node* entry = old_entry;
385 ProjNode* limit_check_proj = NULL;
386 limit_check_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
387 if (limit_check_proj != NULL) {
388 entry = skip_loop_predicates(entry);
389 }
390 ProjNode* profile_predicate_proj = NULL;
391 ProjNode* predicate_proj = NULL;
392 if (UseProfiledLoopPredicate) {
393 profile_predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate);
394 if (profile_predicate_proj != NULL) {
395 entry = skip_loop_predicates(entry);
396 }
397 }
398 if (UseLoopPredicate) {
399 predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
400 }
401 if (predicate_proj != NULL) { // right pattern that can be used by loop predication
402 // clone predicate
403 ProjNode* proj = clone_predicate(predicate_proj, new_entry,
404 Deoptimization::Reason_predicate,
405 loop_phase, igvn);
406 assert(proj != NULL, "IfTrue or IfFalse after clone predicate");
407 new_entry = proj;
408 if (TraceLoopPredicate) {
409 tty->print("Loop Predicate cloned: ");
410 debug_only( new_entry->in(0)->dump(); );
411 }
412 if (profile_predicate_proj != NULL) {
413 // A node that produces memory may be out of loop and depend on
414 // a profiled predicates. In that case the memory state at the
415 // end of profiled predicates and at the end of predicates are
416 // not the same. The cloned predicates are dominated by the
417 // profiled predicates but may have the wrong memory
418 // state. Update it.
419 clone_loop_predicates_fix_mem(profile_predicate_proj, proj, loop_phase, igvn);
420 }
421 }
422 if (profile_predicate_proj != NULL) { // right pattern that can be used by loop predication
423 // clone predicate
424 new_entry = clone_predicate(profile_predicate_proj, new_entry,
425 Deoptimization::Reason_profile_predicate,
426 loop_phase, igvn);
427 assert(new_entry != NULL && new_entry->is_Proj(), "IfTrue or IfFalse after clone predicate");
428 if (TraceLoopPredicate) {
429 tty->print("Loop Predicate cloned: ");
430 debug_only( new_entry->in(0)->dump(); );
431 }
432 }
433 if (limit_check_proj != NULL && clone_limit_check) {
434 // Clone loop limit check last to insert it before loop.
435 // Don't clone a limit check which was already finalized
436 // for this counted loop (only one limit check is needed).
437 new_entry = clone_predicate(limit_check_proj, new_entry,
438 Deoptimization::Reason_loop_limit_check,
439 loop_phase, igvn);
440 assert(new_entry != NULL && new_entry->is_Proj(), "IfTrue or IfFalse after clone limit check");
441 if (TraceLoopLimitCheck) {
442 tty->print("Loop Limit Check cloned: ");
443 debug_only( new_entry->in(0)->dump(); )
444 }
445 }
446 return new_entry;
447}
448
449//--------------------------skip_loop_predicates------------------------------
450// Skip related predicates.
451Node* PhaseIdealLoop::skip_loop_predicates(Node* entry) {
452 IfNode* iff = entry->in(0)->as_If();
453 ProjNode* uncommon_proj = iff->proj_out(1 - entry->as_Proj()->_con);
454 Node* rgn = uncommon_proj->unique_ctrl_out();
455 assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
456 entry = entry->in(0)->in(0);
457 while (entry != NULL && entry->is_Proj() && entry->in(0)->is_If()) {
458 uncommon_proj = entry->in(0)->as_If()->proj_out(1 - entry->as_Proj()->_con);
459 if (uncommon_proj->unique_ctrl_out() != rgn)
460 break;
461 entry = entry->in(0)->in(0);
462 }
463 return entry;
464}
465
466Node* PhaseIdealLoop::skip_all_loop_predicates(Node* entry) {
467 Node* predicate = NULL;
468 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
469 if (predicate != NULL) {
470 entry = skip_loop_predicates(entry);
471 }
472 if (UseProfiledLoopPredicate) {
473 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate);
474 if (predicate != NULL) { // right pattern that can be used by loop predication
475 entry = skip_loop_predicates(entry);
476 }
477 }
478 if (UseLoopPredicate) {
479 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
480 if (predicate != NULL) { // right pattern that can be used by loop predication
481 entry = skip_loop_predicates(entry);
482 }
483 }
484 return entry;
485}
486
487//--------------------------find_predicate_insertion_point-------------------
488// Find a good location to insert a predicate
489ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) {
490 if (start_c == NULL || !start_c->is_Proj())
491 return NULL;
492 if (start_c->as_Proj()->is_uncommon_trap_if_pattern(reason)) {
493 return start_c->as_Proj();
494 }
495 return NULL;
496}
497
498//--------------------------find_predicate------------------------------------
499// Find a predicate
500Node* PhaseIdealLoop::find_predicate(Node* entry) {
501 Node* predicate = NULL;
502 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
503 if (predicate != NULL) { // right pattern that can be used by loop predication
504 return entry;
505 }
506 if (UseLoopPredicate) {
507 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
508 if (predicate != NULL) { // right pattern that can be used by loop predication
509 return entry;
510 }
511 }
512 if (UseProfiledLoopPredicate) {
513 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate);
514 if (predicate != NULL) { // right pattern that can be used by loop predication
515 return entry;
516 }
517 }
518 return NULL;
519}
520
521//------------------------------Invariance-----------------------------------
522// Helper class for loop_predication_impl to compute invariance on the fly and
523// clone invariants.
524class Invariance : public StackObj {
525 VectorSet _visited, _invariant;
526 Node_Stack _stack;
527 VectorSet _clone_visited;
528 Node_List _old_new; // map of old to new (clone)
529 IdealLoopTree* _lpt;
530 PhaseIdealLoop* _phase;
531
532 // Helper function to set up the invariance for invariance computation
533 // If n is a known invariant, set up directly. Otherwise, look up the
534 // the possibility to push n onto the stack for further processing.
535 void visit(Node* use, Node* n) {
536 if (_lpt->is_invariant(n)) { // known invariant
537 _invariant.set(n->_idx);
538 } else if (!n->is_CFG()) {
539 Node *n_ctrl = _phase->ctrl_or_self(n);
540 Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG
541 if (_phase->is_dominator(n_ctrl, u_ctrl)) {
542 _stack.push(n, n->in(0) == NULL ? 1 : 0);
543 }
544 }
545 }
546
547 // Compute invariance for "the_node" and (possibly) all its inputs recursively
548 // on the fly
549 void compute_invariance(Node* n) {
550 assert(_visited.test(n->_idx), "must be");
551 visit(n, n);
552 while (_stack.is_nonempty()) {
553 Node* n = _stack.node();
554 uint idx = _stack.index();
555 if (idx == n->req()) { // all inputs are processed
556 _stack.pop();
557 // n is invariant if it's inputs are all invariant
558 bool all_inputs_invariant = true;
559 for (uint i = 0; i < n->req(); i++) {
560 Node* in = n->in(i);
561 if (in == NULL) continue;
562 assert(_visited.test(in->_idx), "must have visited input");
563 if (!_invariant.test(in->_idx)) { // bad guy
564 all_inputs_invariant = false;
565 break;
566 }
567 }
568 if (all_inputs_invariant) {
569 // If n's control is a predicate that was moved out of the
570 // loop, it was marked invariant but n is only invariant if
571 // it depends only on that test. Otherwise, unless that test
572 // is out of the loop, it's not invariant.
573 if (n->is_CFG() || n->depends_only_on_test() || n->in(0) == NULL || !_phase->is_member(_lpt, n->in(0))) {
574 _invariant.set(n->_idx); // I am a invariant too
575 }
576 }
577 } else { // process next input
578 _stack.set_index(idx + 1);
579 Node* m = n->in(idx);
580 if (m != NULL && !_visited.test_set(m->_idx)) {
581 visit(n, m);
582 }
583 }
584 }
585 }
586
587 // Helper function to set up _old_new map for clone_nodes.
588 // If n is a known invariant, set up directly ("clone" of n == n).
589 // Otherwise, push n onto the stack for real cloning.
590 void clone_visit(Node* n) {
591 assert(_invariant.test(n->_idx), "must be invariant");
592 if (_lpt->is_invariant(n)) { // known invariant
593 _old_new.map(n->_idx, n);
594 } else { // to be cloned
595 assert(!n->is_CFG(), "should not see CFG here");
596 _stack.push(n, n->in(0) == NULL ? 1 : 0);
597 }
598 }
599
600 // Clone "n" and (possibly) all its inputs recursively
601 void clone_nodes(Node* n, Node* ctrl) {
602 clone_visit(n);
603 while (_stack.is_nonempty()) {
604 Node* n = _stack.node();
605 uint idx = _stack.index();
606 if (idx == n->req()) { // all inputs processed, clone n!
607 _stack.pop();
608 // clone invariant node
609 Node* n_cl = n->clone();
610 _old_new.map(n->_idx, n_cl);
611 _phase->register_new_node(n_cl, ctrl);
612 for (uint i = 0; i < n->req(); i++) {
613 Node* in = n_cl->in(i);
614 if (in == NULL) continue;
615 n_cl->set_req(i, _old_new[in->_idx]);
616 }
617 } else { // process next input
618 _stack.set_index(idx + 1);
619 Node* m = n->in(idx);
620 if (m != NULL && !_clone_visited.test_set(m->_idx)) {
621 clone_visit(m); // visit the input
622 }
623 }
624 }
625 }
626
627 public:
628 Invariance(Arena* area, IdealLoopTree* lpt) :
629 _visited(area), _invariant(area),
630 _stack(area, 10 /* guess */),
631 _clone_visited(area), _old_new(area),
632 _lpt(lpt), _phase(lpt->_phase)
633 {
634 LoopNode* head = _lpt->_head->as_Loop();
635 Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
636 if (entry->outcnt() != 1) {
637 // If a node is pinned between the predicates and the loop
638 // entry, we won't be able to move any node in the loop that
639 // depends on it above it in a predicate. Mark all those nodes
640 // as non loop invariatnt.
641 Unique_Node_List wq;
642 wq.push(entry);
643 for (uint next = 0; next < wq.size(); ++next) {
644 Node *n = wq.at(next);
645 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
646 Node* u = n->fast_out(i);
647 if (!u->is_CFG()) {
648 Node* c = _phase->get_ctrl(u);
649 if (_lpt->is_member(_phase->get_loop(c)) || _phase->is_dominator(c, head)) {
650 _visited.set(u->_idx);
651 wq.push(u);
652 }
653 }
654 }
655 }
656 }
657 }
658
659 // Map old to n for invariance computation and clone
660 void map_ctrl(Node* old, Node* n) {
661 assert(old->is_CFG() && n->is_CFG(), "must be");
662 _old_new.map(old->_idx, n); // "clone" of old is n
663 _invariant.set(old->_idx); // old is invariant
664 _clone_visited.set(old->_idx);
665 }
666
667 // Driver function to compute invariance
668 bool is_invariant(Node* n) {
669 if (!_visited.test_set(n->_idx))
670 compute_invariance(n);
671 return (_invariant.test(n->_idx) != 0);
672 }
673
674 // Driver function to clone invariant
675 Node* clone(Node* n, Node* ctrl) {
676 assert(ctrl->is_CFG(), "must be");
677 assert(_invariant.test(n->_idx), "must be an invariant");
678 if (!_clone_visited.test(n->_idx))
679 clone_nodes(n, ctrl);
680 return _old_new[n->_idx];
681 }
682};
683
684//------------------------------is_range_check_if -----------------------------------
685// Returns true if the predicate of iff is in "scale*iv + offset u< load_range(ptr)" format
686// Note: this function is particularly designed for loop predication. We require load_range
687// and offset to be loop invariant computed on the fly by "invar"
688bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const {
689 if (!is_loop_exit(iff)) {
690 return false;
691 }
692 if (!iff->in(1)->is_Bool()) {
693 return false;
694 }
695 const BoolNode *bol = iff->in(1)->as_Bool();
696 if (bol->_test._test != BoolTest::lt) {
697 return false;
698 }
699 if (!bol->in(1)->is_Cmp()) {
700 return false;
701 }
702 const CmpNode *cmp = bol->in(1)->as_Cmp();
703 if (cmp->Opcode() != Op_CmpU) {
704 return false;
705 }
706 Node* range = cmp->in(2);
707 if (range->Opcode() != Op_LoadRange && !iff->is_RangeCheck()) {
708 const TypeInt* tint = phase->_igvn.type(range)->isa_int();
709 if (tint == NULL || tint->empty() || tint->_lo < 0) {
710 // Allow predication on positive values that aren't LoadRanges.
711 // This allows optimization of loops where the length of the
712 // array is a known value and doesn't need to be loaded back
713 // from the array.
714 return false;
715 }
716 }
717 if (!invar.is_invariant(range)) {
718 return false;
719 }
720 Node *iv = _head->as_CountedLoop()->phi();
721 int scale = 0;
722 Node *offset = NULL;
723 if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, &scale, &offset)) {
724 return false;
725 }
726 if (offset && !invar.is_invariant(offset)) { // offset must be invariant
727 return false;
728 }
729 return true;
730}
731
732//------------------------------rc_predicate-----------------------------------
733// Create a range check predicate
734//
735// for (i = init; i < limit; i += stride) {
736// a[scale*i+offset]
737// }
738//
739// Compute max(scale*i + offset) for init <= i < limit and build the predicate
740// as "max(scale*i + offset) u< a.length".
741//
742// There are two cases for max(scale*i + offset):
743// (1) stride*scale > 0
744// max(scale*i + offset) = scale*(limit-stride) + offset
745// (2) stride*scale < 0
746// max(scale*i + offset) = scale*init + offset
747BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree *loop, Node* ctrl,
748 int scale, Node* offset,
749 Node* init, Node* limit, jint stride,
750 Node* range, bool upper, bool &overflow) {
751 jint con_limit = (limit != NULL && limit->is_Con()) ? limit->get_int() : 0;
752 jint con_init = init->is_Con() ? init->get_int() : 0;
753 jint con_offset = offset->is_Con() ? offset->get_int() : 0;
754
755 stringStream* predString = NULL;
756 if (TraceLoopPredicate) {
757 predString = new stringStream();
758 predString->print("rc_predicate ");
759 }
760
761 overflow = false;
762 Node* max_idx_expr = NULL;
763 const TypeInt* idx_type = TypeInt::INT;
764 if ((stride > 0) == (scale > 0) == upper) {
765 guarantee(limit != NULL, "sanity");
766 if (TraceLoopPredicate) {
767 if (limit->is_Con()) {
768 predString->print("(%d ", con_limit);
769 } else {
770 predString->print("(limit ");
771 }
772 predString->print("- %d) ", stride);
773 }
774 // Check if (limit - stride) may overflow
775 const TypeInt* limit_type = _igvn.type(limit)->isa_int();
776 jint limit_lo = limit_type->_lo;
777 jint limit_hi = limit_type->_hi;
778 if ((stride > 0 && (java_subtract(limit_lo, stride) < limit_lo)) ||
779 (stride < 0 && (java_subtract(limit_hi, stride) > limit_hi))) {
780 // No overflow possible
781 ConINode* con_stride = _igvn.intcon(stride);
782 set_ctrl(con_stride, C->root());
783 max_idx_expr = new SubINode(limit, con_stride);
784 idx_type = TypeInt::make(limit_lo - stride, limit_hi - stride, limit_type->_widen);
785 } else {
786 // May overflow
787 overflow = true;
788 limit = new ConvI2LNode(limit);
789 register_new_node(limit, ctrl);
790 ConLNode* con_stride = _igvn.longcon(stride);
791 set_ctrl(con_stride, C->root());
792 max_idx_expr = new SubLNode(limit, con_stride);
793 }
794 register_new_node(max_idx_expr, ctrl);
795 } else {
796 if (TraceLoopPredicate) {
797 if (init->is_Con()) {
798 predString->print("%d ", con_init);
799 } else {
800 predString->print("init ");
801 }
802 }
803 idx_type = _igvn.type(init)->isa_int();
804 max_idx_expr = init;
805 }
806
807 if (scale != 1) {
808 ConNode* con_scale = _igvn.intcon(scale);
809 set_ctrl(con_scale, C->root());
810 if (TraceLoopPredicate) {
811 predString->print("* %d ", scale);
812 }
813 // Check if (scale * max_idx_expr) may overflow
814 const TypeInt* scale_type = TypeInt::make(scale);
815 MulINode* mul = new MulINode(max_idx_expr, con_scale);
816 idx_type = (TypeInt*)mul->mul_ring(idx_type, scale_type);
817 if (overflow || TypeInt::INT->higher_equal(idx_type)) {
818 // May overflow
819 mul->destruct();
820 if (!overflow) {
821 max_idx_expr = new ConvI2LNode(max_idx_expr);
822 register_new_node(max_idx_expr, ctrl);
823 }
824 overflow = true;
825 con_scale = _igvn.longcon(scale);
826 set_ctrl(con_scale, C->root());
827 max_idx_expr = new MulLNode(max_idx_expr, con_scale);
828 } else {
829 // No overflow possible
830 max_idx_expr = mul;
831 }
832 register_new_node(max_idx_expr, ctrl);
833 }
834
835 if (offset && (!offset->is_Con() || con_offset != 0)){
836 if (TraceLoopPredicate) {
837 if (offset->is_Con()) {
838 predString->print("+ %d ", con_offset);
839 } else {
840 predString->print("+ offset");
841 }
842 }
843 // Check if (max_idx_expr + offset) may overflow
844 const TypeInt* offset_type = _igvn.type(offset)->isa_int();
845 jint lo = java_add(idx_type->_lo, offset_type->_lo);
846 jint hi = java_add(idx_type->_hi, offset_type->_hi);
847 if (overflow || (lo > hi) ||
848 ((idx_type->_lo & offset_type->_lo) < 0 && lo >= 0) ||
849 ((~(idx_type->_hi | offset_type->_hi)) < 0 && hi < 0)) {
850 // May overflow
851 if (!overflow) {
852 max_idx_expr = new ConvI2LNode(max_idx_expr);
853 register_new_node(max_idx_expr, ctrl);
854 }
855 overflow = true;
856 offset = new ConvI2LNode(offset);
857 register_new_node(offset, ctrl);
858 max_idx_expr = new AddLNode(max_idx_expr, offset);
859 } else {
860 // No overflow possible
861 max_idx_expr = new AddINode(max_idx_expr, offset);
862 }
863 register_new_node(max_idx_expr, ctrl);
864 }
865
866 CmpNode* cmp = NULL;
867 if (overflow) {
868 // Integer expressions may overflow, do long comparison
869 range = new ConvI2LNode(range);
870 register_new_node(range, ctrl);
871 cmp = new CmpULNode(max_idx_expr, range);
872 } else {
873 cmp = new CmpUNode(max_idx_expr, range);
874 }
875 register_new_node(cmp, ctrl);
876 BoolNode* bol = new BoolNode(cmp, BoolTest::lt);
877 register_new_node(bol, ctrl);
878
879 if (TraceLoopPredicate) {
880 predString->print_cr("<u range");
881 tty->print("%s", predString->as_string());
882 }
883 return bol;
884}
885
886// Should loop predication look not only in the path from tail to head
887// but also in branches of the loop body?
888bool PhaseIdealLoop::loop_predication_should_follow_branches(IdealLoopTree *loop, ProjNode *predicate_proj, float& loop_trip_cnt) {
889 if (!UseProfiledLoopPredicate) {
890 return false;
891 }
892
893 if (predicate_proj == NULL) {
894 return false;
895 }
896
897 LoopNode* head = loop->_head->as_Loop();
898 bool follow_branches = true;
899 IdealLoopTree* l = loop->_child;
900 // For leaf loops and loops with a single inner loop
901 while (l != NULL && follow_branches) {
902 IdealLoopTree* child = l;
903 if (child->_child != NULL &&
904 child->_head->is_OuterStripMinedLoop()) {
905 assert(child->_child->_next == NULL, "only one inner loop for strip mined loop");
906 assert(child->_child->_head->is_CountedLoop() && child->_child->_head->as_CountedLoop()->is_strip_mined(), "inner loop should be strip mined");
907 child = child->_child;
908 }
909 if (child->_child != NULL || child->_irreducible) {
910 follow_branches = false;
911 }
912 l = l->_next;
913 }
914 if (follow_branches) {
915 loop->compute_profile_trip_cnt(this);
916 if (head->is_profile_trip_failed()) {
917 follow_branches = false;
918 } else {
919 loop_trip_cnt = head->profile_trip_cnt();
920 if (head->is_CountedLoop()) {
921 CountedLoopNode* cl = head->as_CountedLoop();
922 if (cl->phi() != NULL) {
923 const TypeInt* t = _igvn.type(cl->phi())->is_int();
924 float worst_case_trip_cnt = ((float)t->_hi - t->_lo) / ABS(cl->stride_con());
925 if (worst_case_trip_cnt < loop_trip_cnt) {
926 loop_trip_cnt = worst_case_trip_cnt;
927 }
928 }
929 }
930 }
931 }
932 return follow_branches;
933}
934
935// Compute probability of reaching some CFG node from a fixed
936// dominating CFG node
937class PathFrequency {
938private:
939 Node* _dom; // frequencies are computed relative to this node
940 Node_Stack _stack;
941 GrowableArray<float> _freqs_stack; // keep track of intermediate result at regions
942 GrowableArray<float> _freqs; // cache frequencies
943 PhaseIdealLoop* _phase;
944
945 void set_rounding(int mode) {
946 // fesetround is broken on windows
947 NOT_WINDOWS(fesetround(mode);)
948 }
949
950 void check_frequency(float f) {
951 NOT_WINDOWS(assert(f <= 1 && f >= 0, "Incorrect frequency");)
952 }
953
954public:
955 PathFrequency(Node* dom, PhaseIdealLoop* phase)
956 : _dom(dom), _stack(0), _phase(phase) {
957 }
958
959 float to(Node* n) {
960 // post order walk on the CFG graph from n to _dom
961 set_rounding(FE_TOWARDZERO); // make sure rounding doesn't push frequency above 1
962 IdealLoopTree* loop = _phase->get_loop(_dom);
963 Node* c = n;
964 for (;;) {
965 assert(_phase->get_loop(c) == loop, "have to be in the same loop");
966 if (c == _dom || _freqs.at_grow(c->_idx, -1) >= 0) {
967 float f = c == _dom ? 1 : _freqs.at(c->_idx);
968 Node* prev = c;
969 while (_stack.size() > 0 && prev == c) {
970 Node* n = _stack.node();
971 if (!n->is_Region()) {
972 if (_phase->get_loop(n) != _phase->get_loop(n->in(0))) {
973 // Found an inner loop: compute frequency of reaching this
974 // exit from the loop head by looking at the number of
975 // times each loop exit was taken
976 IdealLoopTree* inner_loop = _phase->get_loop(n->in(0));
977 LoopNode* inner_head = inner_loop->_head->as_Loop();
978 assert(_phase->get_loop(n) == loop, "only 1 inner loop");
979 if (inner_head->is_OuterStripMinedLoop()) {
980 inner_head->verify_strip_mined(1);
981 if (n->in(0) == inner_head->in(LoopNode::LoopBackControl)->in(0)) {
982 n = n->in(0)->in(0)->in(0);
983 }
984 inner_loop = inner_loop->_child;
985 inner_head = inner_loop->_head->as_Loop();
986 inner_head->verify_strip_mined(1);
987 }
988 set_rounding(FE_UPWARD); // make sure rounding doesn't push frequency above 1
989 float loop_exit_cnt = 0.0f;
990 for (uint i = 0; i < inner_loop->_body.size(); i++) {
991 Node *n = inner_loop->_body[i];
992 float c = inner_loop->compute_profile_trip_cnt_helper(n);
993 loop_exit_cnt += c;
994 }
995 set_rounding(FE_TOWARDZERO);
996 float cnt = -1;
997 if (n->in(0)->is_If()) {
998 IfNode* iff = n->in(0)->as_If();
999 float p = n->in(0)->as_If()->_prob;
1000 if (n->Opcode() == Op_IfFalse) {
1001 p = 1 - p;
1002 }
1003 if (p > PROB_MIN) {
1004 cnt = p * iff->_fcnt;
1005 } else {
1006 cnt = 0;
1007 }
1008 } else {
1009 assert(n->in(0)->is_Jump(), "unsupported node kind");
1010 JumpNode* jmp = n->in(0)->as_Jump();
1011 float p = n->in(0)->as_Jump()->_probs[n->as_JumpProj()->_con];
1012 cnt = p * jmp->_fcnt;
1013 }
1014 float this_exit_f = cnt > 0 ? cnt / loop_exit_cnt : 0;
1015 check_frequency(this_exit_f);
1016 f = f * this_exit_f;
1017 check_frequency(f);
1018 } else {
1019 float p = -1;
1020 if (n->in(0)->is_If()) {
1021 p = n->in(0)->as_If()->_prob;
1022 if (n->Opcode() == Op_IfFalse) {
1023 p = 1 - p;
1024 }
1025 } else {
1026 assert(n->in(0)->is_Jump(), "unsupported node kind");
1027 p = n->in(0)->as_Jump()->_probs[n->as_JumpProj()->_con];
1028 }
1029 f = f * p;
1030 check_frequency(f);
1031 }
1032 _freqs.at_put_grow(n->_idx, (float)f, -1);
1033 _stack.pop();
1034 } else {
1035 float prev_f = _freqs_stack.pop();
1036 float new_f = f;
1037 f = new_f + prev_f;
1038 check_frequency(f);
1039 uint i = _stack.index();
1040 if (i < n->req()) {
1041 c = n->in(i);
1042 _stack.set_index(i+1);
1043 _freqs_stack.push(f);
1044 } else {
1045 _freqs.at_put_grow(n->_idx, f, -1);
1046 _stack.pop();
1047 }
1048 }
1049 }
1050 if (_stack.size() == 0) {
1051 set_rounding(FE_TONEAREST);
1052 check_frequency(f);
1053 return f;
1054 }
1055 } else if (c->is_Loop()) {
1056 ShouldNotReachHere();
1057 c = c->in(LoopNode::EntryControl);
1058 } else if (c->is_Region()) {
1059 _freqs_stack.push(0);
1060 _stack.push(c, 2);
1061 c = c->in(1);
1062 } else {
1063 if (c->is_IfProj()) {
1064 IfNode* iff = c->in(0)->as_If();
1065 if (iff->_prob == PROB_UNKNOWN) {
1066 // assume never taken
1067 _freqs.at_put_grow(c->_idx, 0, -1);
1068 } else if (_phase->get_loop(c) != _phase->get_loop(iff)) {
1069 if (iff->_fcnt == COUNT_UNKNOWN) {
1070 // assume never taken
1071 _freqs.at_put_grow(c->_idx, 0, -1);
1072 } else {
1073 // skip over loop
1074 _stack.push(c, 1);
1075 c = _phase->get_loop(c->in(0))->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
1076 }
1077 } else {
1078 _stack.push(c, 1);
1079 c = iff;
1080 }
1081 } else if (c->is_JumpProj()) {
1082 JumpNode* jmp = c->in(0)->as_Jump();
1083 if (_phase->get_loop(c) != _phase->get_loop(jmp)) {
1084 if (jmp->_fcnt == COUNT_UNKNOWN) {
1085 // assume never taken
1086 _freqs.at_put_grow(c->_idx, 0, -1);
1087 } else {
1088 // skip over loop
1089 _stack.push(c, 1);
1090 c = _phase->get_loop(c->in(0))->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
1091 }
1092 } else {
1093 _stack.push(c, 1);
1094 c = jmp;
1095 }
1096 } else if (c->Opcode() == Op_CatchProj &&
1097 c->in(0)->Opcode() == Op_Catch &&
1098 c->in(0)->in(0)->is_Proj() &&
1099 c->in(0)->in(0)->in(0)->is_Call()) {
1100 // assume exceptions are never thrown
1101 uint con = c->as_Proj()->_con;
1102 if (con == CatchProjNode::fall_through_index) {
1103 Node* call = c->in(0)->in(0)->in(0)->in(0);
1104 if (_phase->get_loop(call) != _phase->get_loop(c)) {
1105 _freqs.at_put_grow(c->_idx, 0, -1);
1106 } else {
1107 c = call;
1108 }
1109 } else {
1110 assert(con >= CatchProjNode::catch_all_index, "what else?");
1111 _freqs.at_put_grow(c->_idx, 0, -1);
1112 }
1113 } else if (c->unique_ctrl_out() == NULL && !c->is_If() && !c->is_Jump()) {
1114 ShouldNotReachHere();
1115 } else {
1116 c = c->in(0);
1117 }
1118 }
1119 }
1120 ShouldNotReachHere();
1121 return -1;
1122 }
1123};
1124
1125void PhaseIdealLoop::loop_predication_follow_branches(Node *n, IdealLoopTree *loop, float loop_trip_cnt,
1126 PathFrequency& pf, Node_Stack& stack, VectorSet& seen,
1127 Node_List& if_proj_list) {
1128 assert(n->is_Region(), "start from a region");
1129 Node* tail = loop->tail();
1130 stack.push(n, 1);
1131 do {
1132 Node* c = stack.node();
1133 assert(c->is_Region() || c->is_IfProj(), "only region here");
1134 uint i = stack.index();
1135
1136 if (i < c->req()) {
1137 stack.set_index(i+1);
1138 Node* in = c->in(i);
1139 while (!is_dominator(in, tail) && !seen.test_set(in->_idx)) {
1140 IdealLoopTree* in_loop = get_loop(in);
1141 if (in_loop != loop) {
1142 in = in_loop->_head->in(LoopNode::EntryControl);
1143 } else if (in->is_Region()) {
1144 stack.push(in, 1);
1145 break;
1146 } else if (in->is_IfProj() &&
1147 in->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1148 (in->in(0)->Opcode() == Op_If ||
1149 in->in(0)->Opcode() == Op_RangeCheck)) {
1150 if (pf.to(in) * loop_trip_cnt >= 1) {
1151 stack.push(in, 1);
1152 }
1153 in = in->in(0);
1154 } else {
1155 in = in->in(0);
1156 }
1157 }
1158 } else {
1159 if (c->is_IfProj()) {
1160 if_proj_list.push(c);
1161 }
1162 stack.pop();
1163 }
1164
1165 } while (stack.size() > 0);
1166}
1167
1168
1169bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode* proj, ProjNode *predicate_proj,
1170 CountedLoopNode *cl, ConNode* zero, Invariance& invar,
1171 Deoptimization::DeoptReason reason) {
1172 // Following are changed to nonnull when a predicate can be hoisted
1173 ProjNode* new_predicate_proj = NULL;
1174 IfNode* iff = proj->in(0)->as_If();
1175 Node* test = iff->in(1);
1176 if (!test->is_Bool()){ //Conv2B, ...
1177 return false;
1178 }
1179 BoolNode* bol = test->as_Bool();
1180 if (invar.is_invariant(bol)) {
1181 // Invariant test
1182 new_predicate_proj = create_new_if_for_predicate(predicate_proj, NULL,
1183 reason,
1184 iff->Opcode());
1185 Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0);
1186 BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool();
1187
1188 // Negate test if necessary
1189 bool negated = false;
1190 if (proj->_con != predicate_proj->_con) {
1191 new_predicate_bol = new BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate());
1192 register_new_node(new_predicate_bol, ctrl);
1193 negated = true;
1194 }
1195 IfNode* new_predicate_iff = new_predicate_proj->in(0)->as_If();
1196 _igvn.hash_delete(new_predicate_iff);
1197 new_predicate_iff->set_req(1, new_predicate_bol);
1198#ifndef PRODUCT
1199 if (TraceLoopPredicate) {
1200 tty->print("Predicate invariant if%s: %d ", negated ? " negated" : "", new_predicate_iff->_idx);
1201 loop->dump_head();
1202 } else if (TraceLoopOpts) {
1203 tty->print("Predicate IC ");
1204 loop->dump_head();
1205 }
1206#endif
1207 } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) {
1208 // Range check for counted loops
1209 const Node* cmp = bol->in(1)->as_Cmp();
1210 Node* idx = cmp->in(1);
1211 assert(!invar.is_invariant(idx), "index is variant");
1212 Node* rng = cmp->in(2);
1213 assert(rng->Opcode() == Op_LoadRange || iff->is_RangeCheck() || _igvn.type(rng)->is_int()->_lo >= 0, "must be");
1214 assert(invar.is_invariant(rng), "range must be invariant");
1215 int scale = 1;
1216 Node* offset = zero;
1217 bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset);
1218 assert(ok, "must be index expression");
1219
1220 Node* init = cl->init_trip();
1221 // Limit is not exact.
1222 // Calculate exact limit here.
1223 // Note, counted loop's test is '<' or '>'.
1224 Node* limit = exact_limit(loop);
1225 int stride = cl->stride()->get_int();
1226
1227 // Build if's for the upper and lower bound tests. The
1228 // lower_bound test will dominate the upper bound test and all
1229 // cloned or created nodes will use the lower bound test as
1230 // their declared control.
1231
1232 // Perform cloning to keep Invariance state correct since the
1233 // late schedule will place invariant things in the loop.
1234 Node *ctrl = predicate_proj->in(0)->as_If()->in(0);
1235 rng = invar.clone(rng, ctrl);
1236 if (offset && offset != zero) {
1237 assert(invar.is_invariant(offset), "offset must be loop invariant");
1238 offset = invar.clone(offset, ctrl);
1239 }
1240 // If predicate expressions may overflow in the integer range, longs are used.
1241 bool overflow = false;
1242
1243 // Test the lower bound
1244 BoolNode* lower_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, false, overflow);
1245 // Negate test if necessary
1246 bool negated = false;
1247 if (proj->_con != predicate_proj->_con) {
1248 lower_bound_bol = new BoolNode(lower_bound_bol->in(1), lower_bound_bol->_test.negate());
1249 register_new_node(lower_bound_bol, ctrl);
1250 negated = true;
1251 }
1252 ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode());
1253 IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If();
1254 _igvn.hash_delete(lower_bound_iff);
1255 lower_bound_iff->set_req(1, lower_bound_bol);
1256 if (TraceLoopPredicate) tty->print_cr("lower bound check if: %s %d ", negated ? " negated" : "", lower_bound_iff->_idx);
1257
1258 // Test the upper bound
1259 BoolNode* upper_bound_bol = rc_predicate(loop, lower_bound_proj, scale, offset, init, limit, stride, rng, true, overflow);
1260 negated = false;
1261 if (proj->_con != predicate_proj->_con) {
1262 upper_bound_bol = new BoolNode(upper_bound_bol->in(1), upper_bound_bol->_test.negate());
1263 register_new_node(upper_bound_bol, ctrl);
1264 negated = true;
1265 }
1266 ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode());
1267 assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate");
1268 IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If();
1269 _igvn.hash_delete(upper_bound_iff);
1270 upper_bound_iff->set_req(1, upper_bound_bol);
1271 if (TraceLoopPredicate) tty->print_cr("upper bound check if: %s %d ", negated ? " negated" : "", lower_bound_iff->_idx);
1272
1273 // Fall through into rest of the clean up code which will move
1274 // any dependent nodes onto the upper bound test.
1275 new_predicate_proj = upper_bound_proj;
1276
1277 if (iff->is_RangeCheck()) {
1278 new_predicate_proj = insert_skeleton_predicate(iff, loop, proj, predicate_proj, upper_bound_proj, scale, offset, init, limit, stride, rng, overflow, reason);
1279 }
1280
1281#ifndef PRODUCT
1282 if (TraceLoopOpts && !TraceLoopPredicate) {
1283 tty->print("Predicate RC ");
1284 loop->dump_head();
1285 }
1286#endif
1287 } else {
1288 // Loop variant check (for example, range check in non-counted loop)
1289 // with uncommon trap.
1290 return false;
1291 }
1292 assert(new_predicate_proj != NULL, "sanity");
1293 // Success - attach condition (new_predicate_bol) to predicate if
1294 invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate
1295
1296 // Eliminate the old If in the loop body
1297 dominated_by( new_predicate_proj, iff, proj->_con != new_predicate_proj->_con );
1298
1299 C->set_major_progress();
1300 return true;
1301}
1302
1303
1304// After pre/main/post loops are created, we'll put a copy of some
1305// range checks between the pre and main loop to validate the value
1306// of the main loop induction variable. Make a copy of the predicates
1307// here with an opaque node as a place holder for the value (will be
1308// updated by PhaseIdealLoop::clone_skeleton_predicate()).
1309ProjNode* PhaseIdealLoop::insert_skeleton_predicate(IfNode* iff, IdealLoopTree *loop,
1310 ProjNode* proj, ProjNode *predicate_proj,
1311 ProjNode* upper_bound_proj,
1312 int scale, Node* offset,
1313 Node* init, Node* limit, jint stride,
1314 Node* rng, bool &overflow,
1315 Deoptimization::DeoptReason reason) {
1316 assert(proj->_con && predicate_proj->_con, "not a range check?");
1317 Node* opaque_init = new Opaque1Node(C, init);
1318 register_new_node(opaque_init, upper_bound_proj);
1319 BoolNode* bol = rc_predicate(loop, upper_bound_proj, scale, offset, opaque_init, limit, stride, rng, (stride > 0) != (scale > 0), overflow);
1320 Node* opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1)); // This will go away once loop opts are over
1321 register_new_node(opaque_bol, upper_bound_proj);
1322 ProjNode* new_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode());
1323 _igvn.replace_input_of(new_proj->in(0), 1, opaque_bol);
1324 assert(opaque_init->outcnt() > 0, "should be used");
1325 return new_proj;
1326}
1327
1328//------------------------------ loop_predication_impl--------------------------
1329// Insert loop predicates for null checks and range checks
1330bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
1331 if (!UseLoopPredicate) return false;
1332
1333 if (!loop->_head->is_Loop()) {
1334 // Could be a simple region when irreducible loops are present.
1335 return false;
1336 }
1337 LoopNode* head = loop->_head->as_Loop();
1338
1339 if (head->unique_ctrl_out()->Opcode() == Op_NeverBranch) {
1340 // do nothing for infinite loops
1341 return false;
1342 }
1343
1344 if (head->is_OuterStripMinedLoop()) {
1345 return false;
1346 }
1347
1348 CountedLoopNode *cl = NULL;
1349 if (head->is_valid_counted_loop()) {
1350 cl = head->as_CountedLoop();
1351 // do nothing for iteration-splitted loops
1352 if (!cl->is_normal_loop()) return false;
1353 // Avoid RCE if Counted loop's test is '!='.
1354 BoolTest::mask bt = cl->loopexit()->test_trip();
1355 if (bt != BoolTest::lt && bt != BoolTest::gt)
1356 cl = NULL;
1357 }
1358
1359 Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
1360 ProjNode *loop_limit_proj = NULL;
1361 ProjNode *predicate_proj = NULL;
1362 ProjNode *profile_predicate_proj = NULL;
1363 // Loop limit check predicate should be near the loop.
1364 loop_limit_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
1365 if (loop_limit_proj != NULL) {
1366 entry = skip_loop_predicates(loop_limit_proj);
1367 }
1368 bool has_profile_predicates = false;
1369 profile_predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate);
1370 if (profile_predicate_proj != NULL) {
1371 Node* n = skip_loop_predicates(entry);
1372 // Check if predicates were already added to the profile predicate
1373 // block
1374 if (n != entry->in(0)->in(0) || n->outcnt() != 1) {
1375 has_profile_predicates = true;
1376 }
1377 entry = n;
1378 }
1379 predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
1380
1381 float loop_trip_cnt = -1;
1382 bool follow_branches = loop_predication_should_follow_branches(loop, profile_predicate_proj, loop_trip_cnt);
1383 assert(!follow_branches || loop_trip_cnt >= 0, "negative trip count?");
1384
1385 if (predicate_proj == NULL && !follow_branches) {
1386#ifndef PRODUCT
1387 if (TraceLoopPredicate) {
1388 tty->print("missing predicate:");
1389 loop->dump_head();
1390 head->dump(1);
1391 }
1392#endif
1393 return false;
1394 }
1395 ConNode* zero = _igvn.intcon(0);
1396 set_ctrl(zero, C->root());
1397
1398 ResourceArea *area = Thread::current()->resource_area();
1399 Invariance invar(area, loop);
1400
1401 // Create list of if-projs such that a newer proj dominates all older
1402 // projs in the list, and they all dominate loop->tail()
1403 Node_List if_proj_list(area);
1404 Node_List regions(area);
1405 Node *current_proj = loop->tail(); //start from tail
1406
1407
1408 Node_List controls(area);
1409 while (current_proj != head) {
1410 if (loop == get_loop(current_proj) && // still in the loop ?
1411 current_proj->is_Proj() && // is a projection ?
1412 (current_proj->in(0)->Opcode() == Op_If ||
1413 current_proj->in(0)->Opcode() == Op_RangeCheck)) { // is a if projection ?
1414 if_proj_list.push(current_proj);
1415 }
1416 if (follow_branches &&
1417 current_proj->Opcode() == Op_Region &&
1418 loop == get_loop(current_proj)) {
1419 regions.push(current_proj);
1420 }
1421 current_proj = idom(current_proj);
1422 }
1423
1424 bool hoisted = false; // true if at least one proj is promoted
1425
1426 if (!has_profile_predicates) {
1427 while (if_proj_list.size() > 0) {
1428 Node* n = if_proj_list.pop();
1429
1430 ProjNode* proj = n->as_Proj();
1431 IfNode* iff = proj->in(0)->as_If();
1432
1433 CallStaticJavaNode* call = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1434 if (call == NULL) {
1435 if (loop->is_loop_exit(iff)) {
1436 // stop processing the remaining projs in the list because the execution of them
1437 // depends on the condition of "iff" (iff->in(1)).
1438 break;
1439 } else {
1440 // Both arms are inside the loop. There are two cases:
1441 // (1) there is one backward branch. In this case, any remaining proj
1442 // in the if_proj list post-dominates "iff". So, the condition of "iff"
1443 // does not determine the execution the remining projs directly, and we
1444 // can safely continue.
1445 // (2) both arms are forwarded, i.e. a diamond shape. In this case, "proj"
1446 // does not dominate loop->tail(), so it can not be in the if_proj list.
1447 continue;
1448 }
1449 }
1450 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(call->uncommon_trap_request());
1451 if (reason == Deoptimization::Reason_predicate) {
1452 break;
1453 }
1454
1455 if (predicate_proj != NULL) {
1456 hoisted = loop_predication_impl_helper(loop, proj, predicate_proj, cl, zero, invar, Deoptimization::Reason_predicate) | hoisted;
1457 }
1458 } // end while
1459 }
1460
1461 Node_List if_proj_list_freq(area);
1462 if (follow_branches) {
1463 PathFrequency pf(loop->_head, this);
1464
1465 // Some projections were skipped by regular predicates because of
1466 // an early loop exit. Try them with profile data.
1467 while (if_proj_list.size() > 0) {
1468 Node* proj = if_proj_list.pop();
1469 float f = pf.to(proj);
1470 if (proj->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1471 f * loop_trip_cnt >= 1) {
1472 hoisted = loop_predication_impl_helper(loop, proj->as_Proj(), profile_predicate_proj, cl, zero, invar, Deoptimization::Reason_profile_predicate) | hoisted;
1473 }
1474 }
1475
1476 // And look into all branches
1477 Node_Stack stack(0);
1478 VectorSet seen(Thread::current()->resource_area());
1479 while (regions.size() > 0) {
1480 Node* c = regions.pop();
1481 loop_predication_follow_branches(c, loop, loop_trip_cnt, pf, stack, seen, if_proj_list_freq);
1482 }
1483
1484 for (uint i = 0; i < if_proj_list_freq.size(); i++) {
1485 ProjNode* proj = if_proj_list_freq.at(i)->as_Proj();
1486 hoisted = loop_predication_impl_helper(loop, proj, profile_predicate_proj, cl, zero, invar, Deoptimization::Reason_profile_predicate) | hoisted;
1487 }
1488 }
1489
1490#ifndef PRODUCT
1491 // report that the loop predication has been actually performed
1492 // for this loop
1493 if (TraceLoopPredicate && hoisted) {
1494 tty->print("Loop Predication Performed:");
1495 loop->dump_head();
1496 }
1497#endif
1498
1499 head->verify_strip_mined(1);
1500
1501 return hoisted;
1502}
1503
1504//------------------------------loop_predication--------------------------------
1505// driver routine for loop predication optimization
1506bool IdealLoopTree::loop_predication( PhaseIdealLoop *phase) {
1507 bool hoisted = false;
1508 // Recursively promote predicates
1509 if (_child) {
1510 hoisted = _child->loop_predication( phase);
1511 }
1512
1513 // self
1514 if (!_irreducible && !tail()->is_top()) {
1515 hoisted |= phase->loop_predication_impl(this);
1516 }
1517
1518 if (_next) { //sibling
1519 hoisted |= _next->loop_predication( phase);
1520 }
1521
1522 return hoisted;
1523}
1524