| 1 | /* |
| 2 | * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "ci/ciMethodData.hpp" |
| 27 | #include "compiler/compileLog.hpp" |
| 28 | #include "gc/shared/barrierSet.hpp" |
| 29 | #include "gc/shared/c2/barrierSetC2.hpp" |
| 30 | #include "libadt/vectset.hpp" |
| 31 | #include "memory/allocation.inline.hpp" |
| 32 | #include "memory/resourceArea.hpp" |
| 33 | #include "opto/addnode.hpp" |
| 34 | #include "opto/callnode.hpp" |
| 35 | #include "opto/connode.hpp" |
| 36 | #include "opto/convertnode.hpp" |
| 37 | #include "opto/divnode.hpp" |
| 38 | #include "opto/idealGraphPrinter.hpp" |
| 39 | #include "opto/loopnode.hpp" |
| 40 | #include "opto/mulnode.hpp" |
| 41 | #include "opto/rootnode.hpp" |
| 42 | #include "opto/superword.hpp" |
| 43 | |
| 44 | //============================================================================= |
| 45 | //--------------------------is_cloop_ind_var----------------------------------- |
| 46 | // Determine if a node is a counted loop induction variable. |
| 47 | // NOTE: The method is declared in "node.hpp". |
| 48 | bool Node::is_cloop_ind_var() const { |
| 49 | return (is_Phi() && !as_Phi()->is_copy() && |
| 50 | as_Phi()->region()->is_CountedLoop() && |
| 51 | as_Phi()->region()->as_CountedLoop()->phi() == this); |
| 52 | } |
| 53 | |
| 54 | //============================================================================= |
| 55 | //------------------------------dump_spec-------------------------------------- |
| 56 | // Dump special per-node info |
| 57 | #ifndef PRODUCT |
| 58 | void LoopNode::dump_spec(outputStream *st) const { |
| 59 | if (is_inner_loop()) st->print( "inner " ); |
| 60 | if (is_partial_peel_loop()) st->print( "partial_peel " ); |
| 61 | if (partial_peel_has_failed()) st->print( "partial_peel_failed " ); |
| 62 | } |
| 63 | #endif |
| 64 | |
| 65 | //------------------------------is_valid_counted_loop------------------------- |
| 66 | bool LoopNode::is_valid_counted_loop() const { |
| 67 | if (is_CountedLoop()) { |
| 68 | CountedLoopNode* l = as_CountedLoop(); |
| 69 | CountedLoopEndNode* le = l->loopexit_or_null(); |
| 70 | if (le != NULL && |
| 71 | le->proj_out_or_null(1 /* true */) == l->in(LoopNode::LoopBackControl)) { |
| 72 | Node* phi = l->phi(); |
| 73 | Node* exit = le->proj_out_or_null(0 /* false */); |
| 74 | if (exit != NULL && exit->Opcode() == Op_IfFalse && |
| 75 | phi != NULL && phi->is_Phi() && |
| 76 | phi->in(LoopNode::LoopBackControl) == l->incr() && |
| 77 | le->loopnode() == l && le->stride_is_con()) { |
| 78 | return true; |
| 79 | } |
| 80 | } |
| 81 | } |
| 82 | return false; |
| 83 | } |
| 84 | |
| 85 | //------------------------------get_early_ctrl--------------------------------- |
| 86 | // Compute earliest legal control |
| 87 | Node *PhaseIdealLoop::get_early_ctrl( Node *n ) { |
| 88 | assert( !n->is_Phi() && !n->is_CFG(), "this code only handles data nodes" ); |
| 89 | uint i; |
| 90 | Node *early; |
| 91 | if (n->in(0) && !n->is_expensive()) { |
| 92 | early = n->in(0); |
| 93 | if (!early->is_CFG()) // Might be a non-CFG multi-def |
| 94 | early = get_ctrl(early); // So treat input as a straight data input |
| 95 | i = 1; |
| 96 | } else { |
| 97 | early = get_ctrl(n->in(1)); |
| 98 | i = 2; |
| 99 | } |
| 100 | uint e_d = dom_depth(early); |
| 101 | assert( early, "" ); |
| 102 | for (; i < n->req(); i++) { |
| 103 | Node *cin = get_ctrl(n->in(i)); |
| 104 | assert( cin, "" ); |
| 105 | // Keep deepest dominator depth |
| 106 | uint c_d = dom_depth(cin); |
| 107 | if (c_d > e_d) { // Deeper guy? |
| 108 | early = cin; // Keep deepest found so far |
| 109 | e_d = c_d; |
| 110 | } else if (c_d == e_d && // Same depth? |
| 111 | early != cin) { // If not equal, must use slower algorithm |
| 112 | // If same depth but not equal, one _must_ dominate the other |
| 113 | // and we want the deeper (i.e., dominated) guy. |
| 114 | Node *n1 = early; |
| 115 | Node *n2 = cin; |
| 116 | while (1) { |
| 117 | n1 = idom(n1); // Walk up until break cycle |
| 118 | n2 = idom(n2); |
| 119 | if (n1 == cin || // Walked early up to cin |
| 120 | dom_depth(n2) < c_d) |
| 121 | break; // early is deeper; keep him |
| 122 | if (n2 == early || // Walked cin up to early |
| 123 | dom_depth(n1) < c_d) { |
| 124 | early = cin; // cin is deeper; keep him |
| 125 | break; |
| 126 | } |
| 127 | } |
| 128 | e_d = dom_depth(early); // Reset depth register cache |
| 129 | } |
| 130 | } |
| 131 | |
| 132 | // Return earliest legal location |
| 133 | assert(early == find_non_split_ctrl(early), "unexpected early control" ); |
| 134 | |
| 135 | if (n->is_expensive() && !_verify_only && !_verify_me) { |
| 136 | assert(n->in(0), "should have control input" ); |
| 137 | early = get_early_ctrl_for_expensive(n, early); |
| 138 | } |
| 139 | |
| 140 | return early; |
| 141 | } |
| 142 | |
| 143 | //------------------------------get_early_ctrl_for_expensive--------------------------------- |
| 144 | // Move node up the dominator tree as high as legal while still beneficial |
| 145 | Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) { |
| 146 | assert(n->in(0) && n->is_expensive(), "expensive node with control input here" ); |
| 147 | assert(OptimizeExpensiveOps, "optimization off?" ); |
| 148 | |
| 149 | Node* ctl = n->in(0); |
| 150 | assert(ctl->is_CFG(), "expensive input 0 must be cfg" ); |
| 151 | uint min_dom_depth = dom_depth(earliest); |
| 152 | #ifdef ASSERT |
| 153 | if (!is_dominator(ctl, earliest) && !is_dominator(earliest, ctl)) { |
| 154 | dump_bad_graph("Bad graph detected in get_early_ctrl_for_expensive" , n, earliest, ctl); |
| 155 | assert(false, "Bad graph detected in get_early_ctrl_for_expensive" ); |
| 156 | } |
| 157 | #endif |
| 158 | if (dom_depth(ctl) < min_dom_depth) { |
| 159 | return earliest; |
| 160 | } |
| 161 | |
| 162 | while (1) { |
| 163 | Node *next = ctl; |
| 164 | // Moving the node out of a loop on the projection of a If |
| 165 | // confuses loop predication. So once we hit a Loop in a If branch |
| 166 | // that doesn't branch to an UNC, we stop. The code that process |
| 167 | // expensive nodes will notice the loop and skip over it to try to |
| 168 | // move the node further up. |
| 169 | if (ctl->is_CountedLoop() && ctl->in(1) != NULL && ctl->in(1)->in(0) != NULL && ctl->in(1)->in(0)->is_If()) { |
| 170 | if (!ctl->in(1)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { |
| 171 | break; |
| 172 | } |
| 173 | next = idom(ctl->in(1)->in(0)); |
| 174 | } else if (ctl->is_Proj()) { |
| 175 | // We only move it up along a projection if the projection is |
| 176 | // the single control projection for its parent: same code path, |
| 177 | // if it's a If with UNC or fallthrough of a call. |
| 178 | Node* parent_ctl = ctl->in(0); |
| 179 | if (parent_ctl == NULL) { |
| 180 | break; |
| 181 | } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != NULL) { |
| 182 | next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control(); |
| 183 | } else if (parent_ctl->is_If()) { |
| 184 | if (!ctl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { |
| 185 | break; |
| 186 | } |
| 187 | assert(idom(ctl) == parent_ctl, "strange" ); |
| 188 | next = idom(parent_ctl); |
| 189 | } else if (ctl->is_CatchProj()) { |
| 190 | if (ctl->as_Proj()->_con != CatchProjNode::fall_through_index) { |
| 191 | break; |
| 192 | } |
| 193 | assert(parent_ctl->in(0)->in(0)->is_Call(), "strange graph" ); |
| 194 | next = parent_ctl->in(0)->in(0)->in(0); |
| 195 | } else { |
| 196 | // Check if parent control has a single projection (this |
| 197 | // control is the only possible successor of the parent |
| 198 | // control). If so, we can try to move the node above the |
| 199 | // parent control. |
| 200 | int nb_ctl_proj = 0; |
| 201 | for (DUIterator_Fast imax, i = parent_ctl->fast_outs(imax); i < imax; i++) { |
| 202 | Node *p = parent_ctl->fast_out(i); |
| 203 | if (p->is_Proj() && p->is_CFG()) { |
| 204 | nb_ctl_proj++; |
| 205 | if (nb_ctl_proj > 1) { |
| 206 | break; |
| 207 | } |
| 208 | } |
| 209 | } |
| 210 | |
| 211 | if (nb_ctl_proj > 1) { |
| 212 | break; |
| 213 | } |
| 214 | assert(parent_ctl->is_Start() || parent_ctl->is_MemBar() || parent_ctl->is_Call() || |
| 215 | BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(parent_ctl), "unexpected node" ); |
| 216 | assert(idom(ctl) == parent_ctl, "strange" ); |
| 217 | next = idom(parent_ctl); |
| 218 | } |
| 219 | } else { |
| 220 | next = idom(ctl); |
| 221 | } |
| 222 | if (next->is_Root() || next->is_Start() || dom_depth(next) < min_dom_depth) { |
| 223 | break; |
| 224 | } |
| 225 | ctl = next; |
| 226 | } |
| 227 | |
| 228 | if (ctl != n->in(0)) { |
| 229 | _igvn.replace_input_of(n, 0, ctl); |
| 230 | _igvn.hash_insert(n); |
| 231 | } |
| 232 | |
| 233 | return ctl; |
| 234 | } |
| 235 | |
| 236 | |
| 237 | //------------------------------set_early_ctrl--------------------------------- |
| 238 | // Set earliest legal control |
| 239 | void PhaseIdealLoop::set_early_ctrl( Node *n ) { |
| 240 | Node *early = get_early_ctrl(n); |
| 241 | |
| 242 | // Record earliest legal location |
| 243 | set_ctrl(n, early); |
| 244 | } |
| 245 | |
| 246 | //------------------------------set_subtree_ctrl------------------------------- |
| 247 | // set missing _ctrl entries on new nodes |
| 248 | void PhaseIdealLoop::set_subtree_ctrl( Node *n ) { |
| 249 | // Already set? Get out. |
| 250 | if( _nodes[n->_idx] ) return; |
| 251 | // Recursively set _nodes array to indicate where the Node goes |
| 252 | uint i; |
| 253 | for( i = 0; i < n->req(); ++i ) { |
| 254 | Node *m = n->in(i); |
| 255 | if( m && m != C->root() ) |
| 256 | set_subtree_ctrl( m ); |
| 257 | } |
| 258 | |
| 259 | // Fixup self |
| 260 | set_early_ctrl( n ); |
| 261 | } |
| 262 | |
| 263 | // Create a skeleton strip mined outer loop: a Loop head before the |
| 264 | // inner strip mined loop, a safepoint and an exit condition guarded |
| 265 | // by an opaque node after the inner strip mined loop with a backedge |
| 266 | // to the loop head. The inner strip mined loop is left as it is. Only |
| 267 | // once loop optimizations are over, do we adjust the inner loop exit |
| 268 | // condition to limit its number of iterations, set the outer loop |
| 269 | // exit condition and add Phis to the outer loop head. Some loop |
| 270 | // optimizations that operate on the inner strip mined loop need to be |
| 271 | // aware of the outer strip mined loop: loop unswitching needs to |
| 272 | // clone the outer loop as well as the inner, unrolling needs to only |
| 273 | // clone the inner loop etc. No optimizations need to change the outer |
| 274 | // strip mined loop as it is only a skeleton. |
| 275 | IdealLoopTree* PhaseIdealLoop::create_outer_strip_mined_loop(BoolNode *test, Node *cmp, Node *init_control, |
| 276 | IdealLoopTree* loop, float cl_prob, float le_fcnt, |
| 277 | Node*& entry_control, Node*& iffalse) { |
| 278 | Node* outer_test = _igvn.intcon(0); |
| 279 | set_ctrl(outer_test, C->root()); |
| 280 | Node *orig = iffalse; |
| 281 | iffalse = iffalse->clone(); |
| 282 | _igvn.register_new_node_with_optimizer(iffalse); |
| 283 | set_idom(iffalse, idom(orig), dom_depth(orig)); |
| 284 | |
| 285 | IfNode *outer_le = new OuterStripMinedLoopEndNode(iffalse, outer_test, cl_prob, le_fcnt); |
| 286 | Node *outer_ift = new IfTrueNode (outer_le); |
| 287 | Node* outer_iff = orig; |
| 288 | _igvn.replace_input_of(outer_iff, 0, outer_le); |
| 289 | |
| 290 | LoopNode *outer_l = new OuterStripMinedLoopNode(C, init_control, outer_ift); |
| 291 | entry_control = outer_l; |
| 292 | |
| 293 | IdealLoopTree* outer_ilt = new IdealLoopTree(this, outer_l, outer_ift); |
| 294 | IdealLoopTree* parent = loop->_parent; |
| 295 | IdealLoopTree* sibling = parent->_child; |
| 296 | if (sibling == loop) { |
| 297 | parent->_child = outer_ilt; |
| 298 | } else { |
| 299 | while (sibling->_next != loop) { |
| 300 | sibling = sibling->_next; |
| 301 | } |
| 302 | sibling->_next = outer_ilt; |
| 303 | } |
| 304 | outer_ilt->_next = loop->_next; |
| 305 | outer_ilt->_parent = parent; |
| 306 | outer_ilt->_child = loop; |
| 307 | outer_ilt->_nest = loop->_nest; |
| 308 | loop->_parent = outer_ilt; |
| 309 | loop->_next = NULL; |
| 310 | loop->_nest++; |
| 311 | |
| 312 | set_loop(iffalse, outer_ilt); |
| 313 | register_control(outer_le, outer_ilt, iffalse); |
| 314 | register_control(outer_ift, outer_ilt, outer_le); |
| 315 | set_idom(outer_iff, outer_le, dom_depth(outer_le)); |
| 316 | _igvn.register_new_node_with_optimizer(outer_l); |
| 317 | set_loop(outer_l, outer_ilt); |
| 318 | set_idom(outer_l, init_control, dom_depth(init_control)+1); |
| 319 | |
| 320 | return outer_ilt; |
| 321 | } |
| 322 | |
| 323 | void PhaseIdealLoop::insert_loop_limit_check(ProjNode* limit_check_proj, Node* cmp_limit, Node* bol) { |
| 324 | Node* new_predicate_proj = create_new_if_for_predicate(limit_check_proj, NULL, |
| 325 | Deoptimization::Reason_loop_limit_check, |
| 326 | Op_If); |
| 327 | Node* iff = new_predicate_proj->in(0); |
| 328 | assert(iff->Opcode() == Op_If, "bad graph shape" ); |
| 329 | Node* conv = iff->in(1); |
| 330 | assert(conv->Opcode() == Op_Conv2B, "bad graph shape" ); |
| 331 | Node* opaq = conv->in(1); |
| 332 | assert(opaq->Opcode() == Op_Opaque1, "bad graph shape" ); |
| 333 | cmp_limit = _igvn.register_new_node_with_optimizer(cmp_limit); |
| 334 | bol = _igvn.register_new_node_with_optimizer(bol); |
| 335 | set_subtree_ctrl(bol); |
| 336 | _igvn.replace_input_of(iff, 1, bol); |
| 337 | |
| 338 | #ifndef PRODUCT |
| 339 | // report that the loop predication has been actually performed |
| 340 | // for this loop |
| 341 | if (TraceLoopLimitCheck) { |
| 342 | tty->print_cr("Counted Loop Limit Check generated:" ); |
| 343 | debug_only( bol->dump(2); ) |
| 344 | } |
| 345 | #endif |
| 346 | } |
| 347 | |
| 348 | //------------------------------is_counted_loop-------------------------------- |
| 349 | bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop) { |
| 350 | PhaseGVN *gvn = &_igvn; |
| 351 | |
| 352 | // Counted loop head must be a good RegionNode with only 3 not NULL |
| 353 | // control input edges: Self, Entry, LoopBack. |
| 354 | if (x->in(LoopNode::Self) == NULL || x->req() != 3 || loop->_irreducible) { |
| 355 | return false; |
| 356 | } |
| 357 | Node *init_control = x->in(LoopNode::EntryControl); |
| 358 | Node *back_control = x->in(LoopNode::LoopBackControl); |
| 359 | if (init_control == NULL || back_control == NULL) // Partially dead |
| 360 | return false; |
| 361 | // Must also check for TOP when looking for a dead loop |
| 362 | if (init_control->is_top() || back_control->is_top()) |
| 363 | return false; |
| 364 | |
| 365 | // Allow funny placement of Safepoint |
| 366 | if (back_control->Opcode() == Op_SafePoint) { |
| 367 | if (LoopStripMiningIter != 0) { |
| 368 | // Leaving the safepoint on the backedge and creating a |
| 369 | // CountedLoop will confuse optimizations. We can't move the |
| 370 | // safepoint around because its jvm state wouldn't match a new |
| 371 | // location. Give up on that loop. |
| 372 | return false; |
| 373 | } |
| 374 | back_control = back_control->in(TypeFunc::Control); |
| 375 | } |
| 376 | |
| 377 | // Controlling test for loop |
| 378 | Node *iftrue = back_control; |
| 379 | uint iftrue_op = iftrue->Opcode(); |
| 380 | if (iftrue_op != Op_IfTrue && |
| 381 | iftrue_op != Op_IfFalse) |
| 382 | // I have a weird back-control. Probably the loop-exit test is in |
| 383 | // the middle of the loop and I am looking at some trailing control-flow |
| 384 | // merge point. To fix this I would have to partially peel the loop. |
| 385 | return false; // Obscure back-control |
| 386 | |
| 387 | // Get boolean guarding loop-back test |
| 388 | Node *iff = iftrue->in(0); |
| 389 | if (get_loop(iff) != loop || !iff->in(1)->is_Bool()) |
| 390 | return false; |
| 391 | BoolNode *test = iff->in(1)->as_Bool(); |
| 392 | BoolTest::mask bt = test->_test._test; |
| 393 | float cl_prob = iff->as_If()->_prob; |
| 394 | if (iftrue_op == Op_IfFalse) { |
| 395 | bt = BoolTest(bt).negate(); |
| 396 | cl_prob = 1.0 - cl_prob; |
| 397 | } |
| 398 | // Get backedge compare |
| 399 | Node *cmp = test->in(1); |
| 400 | int cmp_op = cmp->Opcode(); |
| 401 | if (cmp_op != Op_CmpI) |
| 402 | return false; // Avoid pointer & float compares |
| 403 | |
| 404 | // Find the trip-counter increment & limit. Limit must be loop invariant. |
| 405 | Node *incr = cmp->in(1); |
| 406 | Node *limit = cmp->in(2); |
| 407 | |
| 408 | // --------- |
| 409 | // need 'loop()' test to tell if limit is loop invariant |
| 410 | // --------- |
| 411 | |
| 412 | if (!is_member(loop, get_ctrl(incr))) { // Swapped trip counter and limit? |
| 413 | Node *tmp = incr; // Then reverse order into the CmpI |
| 414 | incr = limit; |
| 415 | limit = tmp; |
| 416 | bt = BoolTest(bt).commute(); // And commute the exit test |
| 417 | } |
| 418 | if (is_member(loop, get_ctrl(limit))) // Limit must be loop-invariant |
| 419 | return false; |
| 420 | if (!is_member(loop, get_ctrl(incr))) // Trip counter must be loop-variant |
| 421 | return false; |
| 422 | |
| 423 | Node* phi_incr = NULL; |
| 424 | // Trip-counter increment must be commutative & associative. |
| 425 | if (incr->Opcode() == Op_CastII) { |
| 426 | incr = incr->in(1); |
| 427 | } |
| 428 | if (incr->is_Phi()) { |
| 429 | if (incr->as_Phi()->region() != x || incr->req() != 3) |
| 430 | return false; // Not simple trip counter expression |
| 431 | phi_incr = incr; |
| 432 | incr = phi_incr->in(LoopNode::LoopBackControl); // Assume incr is on backedge of Phi |
| 433 | if (!is_member(loop, get_ctrl(incr))) // Trip counter must be loop-variant |
| 434 | return false; |
| 435 | } |
| 436 | |
| 437 | Node* trunc1 = NULL; |
| 438 | Node* trunc2 = NULL; |
| 439 | const TypeInt* iv_trunc_t = NULL; |
| 440 | Node* orig_incr = incr; |
| 441 | if (!(incr = CountedLoopNode::match_incr_with_optional_truncation(incr, &trunc1, &trunc2, &iv_trunc_t))) { |
| 442 | return false; // Funny increment opcode |
| 443 | } |
| 444 | assert(incr->Opcode() == Op_AddI, "wrong increment code" ); |
| 445 | |
| 446 | const TypeInt* limit_t = gvn->type(limit)->is_int(); |
| 447 | if (trunc1 != NULL) { |
| 448 | // When there is a truncation, we must be sure that after the truncation |
| 449 | // the trip counter will end up higher than the limit, otherwise we are looking |
| 450 | // at an endless loop. Can happen with range checks. |
| 451 | |
| 452 | // Example: |
| 453 | // int i = 0; |
| 454 | // while (true) |
| 455 | // sum + = array[i]; |
| 456 | // i++; |
| 457 | // i = i && 0x7fff; |
| 458 | // } |
| 459 | // |
| 460 | // If the array is shorter than 0x8000 this exits through a AIOOB |
| 461 | // - Counted loop transformation is ok |
| 462 | // If the array is longer then this is an endless loop |
| 463 | // - No transformation can be done. |
| 464 | |
| 465 | const TypeInt* incr_t = gvn->type(orig_incr)->is_int(); |
| 466 | if (limit_t->_hi > incr_t->_hi) { |
| 467 | // if the limit can have a higher value than the increment (before the phi) |
| 468 | return false; |
| 469 | } |
| 470 | } |
| 471 | |
| 472 | // Get merge point |
| 473 | Node *xphi = incr->in(1); |
| 474 | Node *stride = incr->in(2); |
| 475 | if (!stride->is_Con()) { // Oops, swap these |
| 476 | if (!xphi->is_Con()) // Is the other guy a constant? |
| 477 | return false; // Nope, unknown stride, bail out |
| 478 | Node *tmp = xphi; // 'incr' is commutative, so ok to swap |
| 479 | xphi = stride; |
| 480 | stride = tmp; |
| 481 | } |
| 482 | if (xphi->Opcode() == Op_CastII) { |
| 483 | xphi = xphi->in(1); |
| 484 | } |
| 485 | // Stride must be constant |
| 486 | int stride_con = stride->get_int(); |
| 487 | if (stride_con == 0) |
| 488 | return false; // missed some peephole opt |
| 489 | |
| 490 | if (!xphi->is_Phi()) |
| 491 | return false; // Too much math on the trip counter |
| 492 | if (phi_incr != NULL && phi_incr != xphi) |
| 493 | return false; |
| 494 | PhiNode *phi = xphi->as_Phi(); |
| 495 | |
| 496 | // Phi must be of loop header; backedge must wrap to increment |
| 497 | if (phi->region() != x) |
| 498 | return false; |
| 499 | if ((trunc1 == NULL && phi->in(LoopNode::LoopBackControl) != incr) || |
| 500 | (trunc1 != NULL && phi->in(LoopNode::LoopBackControl) != trunc1)) { |
| 501 | return false; |
| 502 | } |
| 503 | Node *init_trip = phi->in(LoopNode::EntryControl); |
| 504 | |
| 505 | // If iv trunc type is smaller than int, check for possible wrap. |
| 506 | if (!TypeInt::INT->higher_equal(iv_trunc_t)) { |
| 507 | assert(trunc1 != NULL, "must have found some truncation" ); |
| 508 | |
| 509 | // Get a better type for the phi (filtered thru if's) |
| 510 | const TypeInt* phi_ft = filtered_type(phi); |
| 511 | |
| 512 | // Can iv take on a value that will wrap? |
| 513 | // |
| 514 | // Ensure iv's limit is not within "stride" of the wrap value. |
| 515 | // |
| 516 | // Example for "short" type |
| 517 | // Truncation ensures value is in the range -32768..32767 (iv_trunc_t) |
| 518 | // If the stride is +10, then the last value of the induction |
| 519 | // variable before the increment (phi_ft->_hi) must be |
| 520 | // <= 32767 - 10 and (phi_ft->_lo) must be >= -32768 to |
| 521 | // ensure no truncation occurs after the increment. |
| 522 | |
| 523 | if (stride_con > 0) { |
| 524 | if (iv_trunc_t->_hi - phi_ft->_hi < stride_con || |
| 525 | iv_trunc_t->_lo > phi_ft->_lo) { |
| 526 | return false; // truncation may occur |
| 527 | } |
| 528 | } else if (stride_con < 0) { |
| 529 | if (iv_trunc_t->_lo - phi_ft->_lo > stride_con || |
| 530 | iv_trunc_t->_hi < phi_ft->_hi) { |
| 531 | return false; // truncation may occur |
| 532 | } |
| 533 | } |
| 534 | // No possibility of wrap so truncation can be discarded |
| 535 | // Promote iv type to Int |
| 536 | } else { |
| 537 | assert(trunc1 == NULL && trunc2 == NULL, "no truncation for int" ); |
| 538 | } |
| 539 | |
| 540 | // If the condition is inverted and we will be rolling |
| 541 | // through MININT to MAXINT, then bail out. |
| 542 | if (bt == BoolTest::eq || // Bail out, but this loop trips at most twice! |
| 543 | // Odd stride |
| 544 | (bt == BoolTest::ne && stride_con != 1 && stride_con != -1) || |
| 545 | // Count down loop rolls through MAXINT |
| 546 | ((bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0) || |
| 547 | // Count up loop rolls through MININT |
| 548 | ((bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0)) { |
| 549 | return false; // Bail out |
| 550 | } |
| 551 | |
| 552 | const TypeInt* init_t = gvn->type(init_trip)->is_int(); |
| 553 | |
| 554 | if (stride_con > 0) { |
| 555 | jlong init_p = (jlong)init_t->_lo + stride_con; |
| 556 | if (init_p > (jlong)max_jint || init_p > (jlong)limit_t->_hi) |
| 557 | return false; // cyclic loop or this loop trips only once |
| 558 | } else { |
| 559 | jlong init_p = (jlong)init_t->_hi + stride_con; |
| 560 | if (init_p < (jlong)min_jint || init_p < (jlong)limit_t->_lo) |
| 561 | return false; // cyclic loop or this loop trips only once |
| 562 | } |
| 563 | |
| 564 | if (phi_incr != NULL && bt != BoolTest::ne) { |
| 565 | // check if there is a possiblity of IV overflowing after the first increment |
| 566 | if (stride_con > 0) { |
| 567 | if (init_t->_hi > max_jint - stride_con) { |
| 568 | return false; |
| 569 | } |
| 570 | } else { |
| 571 | if (init_t->_lo < min_jint - stride_con) { |
| 572 | return false; |
| 573 | } |
| 574 | } |
| 575 | } |
| 576 | |
| 577 | // ================================================= |
| 578 | // ---- SUCCESS! Found A Trip-Counted Loop! ----- |
| 579 | // |
| 580 | assert(x->Opcode() == Op_Loop, "regular loops only" ); |
| 581 | C->print_method(PHASE_BEFORE_CLOOPS, 3); |
| 582 | |
| 583 | Node *hook = new Node(6); |
| 584 | |
| 585 | // =================================================== |
| 586 | // Generate loop limit check to avoid integer overflow |
| 587 | // in cases like next (cyclic loops): |
| 588 | // |
| 589 | // for (i=0; i <= max_jint; i++) {} |
| 590 | // for (i=0; i < max_jint; i+=2) {} |
| 591 | // |
| 592 | // |
| 593 | // Limit check predicate depends on the loop test: |
| 594 | // |
| 595 | // for(;i != limit; i++) --> limit <= (max_jint) |
| 596 | // for(;i < limit; i+=stride) --> limit <= (max_jint - stride + 1) |
| 597 | // for(;i <= limit; i+=stride) --> limit <= (max_jint - stride ) |
| 598 | // |
| 599 | |
| 600 | // Check if limit is excluded to do more precise int overflow check. |
| 601 | bool incl_limit = (bt == BoolTest::le || bt == BoolTest::ge); |
| 602 | int stride_m = stride_con - (incl_limit ? 0 : (stride_con > 0 ? 1 : -1)); |
| 603 | |
| 604 | // If compare points directly to the phi we need to adjust |
| 605 | // the compare so that it points to the incr. Limit have |
| 606 | // to be adjusted to keep trip count the same and the |
| 607 | // adjusted limit should be checked for int overflow. |
| 608 | if (phi_incr != NULL) { |
| 609 | stride_m += stride_con; |
| 610 | } |
| 611 | |
| 612 | if (limit->is_Con()) { |
| 613 | int limit_con = limit->get_int(); |
| 614 | if ((stride_con > 0 && limit_con > (max_jint - stride_m)) || |
| 615 | (stride_con < 0 && limit_con < (min_jint - stride_m))) { |
| 616 | // Bailout: it could be integer overflow. |
| 617 | return false; |
| 618 | } |
| 619 | } else if ((stride_con > 0 && limit_t->_hi <= (max_jint - stride_m)) || |
| 620 | (stride_con < 0 && limit_t->_lo >= (min_jint - stride_m))) { |
| 621 | // Limit's type may satisfy the condition, for example, |
| 622 | // when it is an array length. |
| 623 | } else { |
| 624 | // Generate loop's limit check. |
| 625 | // Loop limit check predicate should be near the loop. |
| 626 | ProjNode *limit_check_proj = find_predicate_insertion_point(init_control, Deoptimization::Reason_loop_limit_check); |
| 627 | if (!limit_check_proj) { |
| 628 | // The limit check predicate is not generated if this method trapped here before. |
| 629 | #ifdef ASSERT |
| 630 | if (TraceLoopLimitCheck) { |
| 631 | tty->print("missing loop limit check:" ); |
| 632 | loop->dump_head(); |
| 633 | x->dump(1); |
| 634 | } |
| 635 | #endif |
| 636 | return false; |
| 637 | } |
| 638 | |
| 639 | IfNode* check_iff = limit_check_proj->in(0)->as_If(); |
| 640 | |
| 641 | if (!is_dominator(get_ctrl(limit), check_iff->in(0))) { |
| 642 | return false; |
| 643 | } |
| 644 | |
| 645 | Node* cmp_limit; |
| 646 | Node* bol; |
| 647 | |
| 648 | if (stride_con > 0) { |
| 649 | cmp_limit = new CmpINode(limit, _igvn.intcon(max_jint - stride_m)); |
| 650 | bol = new BoolNode(cmp_limit, BoolTest::le); |
| 651 | } else { |
| 652 | cmp_limit = new CmpINode(limit, _igvn.intcon(min_jint - stride_m)); |
| 653 | bol = new BoolNode(cmp_limit, BoolTest::ge); |
| 654 | } |
| 655 | |
| 656 | insert_loop_limit_check(limit_check_proj, cmp_limit, bol); |
| 657 | } |
| 658 | |
| 659 | // Now we need to canonicalize loop condition. |
| 660 | if (bt == BoolTest::ne) { |
| 661 | assert(stride_con == 1 || stride_con == -1, "simple increment only" ); |
| 662 | if (stride_con > 0 && init_t->_hi < limit_t->_lo) { |
| 663 | // 'ne' can be replaced with 'lt' only when init < limit. |
| 664 | bt = BoolTest::lt; |
| 665 | } else if (stride_con < 0 && init_t->_lo > limit_t->_hi) { |
| 666 | // 'ne' can be replaced with 'gt' only when init > limit. |
| 667 | bt = BoolTest::gt; |
| 668 | } else { |
| 669 | ProjNode *limit_check_proj = find_predicate_insertion_point(init_control, Deoptimization::Reason_loop_limit_check); |
| 670 | if (!limit_check_proj) { |
| 671 | // The limit check predicate is not generated if this method trapped here before. |
| 672 | #ifdef ASSERT |
| 673 | if (TraceLoopLimitCheck) { |
| 674 | tty->print("missing loop limit check:" ); |
| 675 | loop->dump_head(); |
| 676 | x->dump(1); |
| 677 | } |
| 678 | #endif |
| 679 | return false; |
| 680 | } |
| 681 | IfNode* check_iff = limit_check_proj->in(0)->as_If(); |
| 682 | |
| 683 | if (!is_dominator(get_ctrl(limit), check_iff->in(0)) || |
| 684 | !is_dominator(get_ctrl(init_trip), check_iff->in(0))) { |
| 685 | return false; |
| 686 | } |
| 687 | |
| 688 | Node* cmp_limit; |
| 689 | Node* bol; |
| 690 | |
| 691 | if (stride_con > 0) { |
| 692 | cmp_limit = new CmpINode(init_trip, limit); |
| 693 | bol = new BoolNode(cmp_limit, BoolTest::lt); |
| 694 | } else { |
| 695 | cmp_limit = new CmpINode(init_trip, limit); |
| 696 | bol = new BoolNode(cmp_limit, BoolTest::gt); |
| 697 | } |
| 698 | |
| 699 | insert_loop_limit_check(limit_check_proj, cmp_limit, bol); |
| 700 | |
| 701 | if (stride_con > 0) { |
| 702 | // 'ne' can be replaced with 'lt' only when init < limit. |
| 703 | bt = BoolTest::lt; |
| 704 | } else if (stride_con < 0) { |
| 705 | // 'ne' can be replaced with 'gt' only when init > limit. |
| 706 | bt = BoolTest::gt; |
| 707 | } |
| 708 | } |
| 709 | } |
| 710 | |
| 711 | if (phi_incr != NULL) { |
| 712 | // If compare points directly to the phi we need to adjust |
| 713 | // the compare so that it points to the incr. Limit have |
| 714 | // to be adjusted to keep trip count the same and we |
| 715 | // should avoid int overflow. |
| 716 | // |
| 717 | // i = init; do {} while(i++ < limit); |
| 718 | // is converted to |
| 719 | // i = init; do {} while(++i < limit+1); |
| 720 | // |
| 721 | limit = gvn->transform(new AddINode(limit, stride)); |
| 722 | } |
| 723 | |
| 724 | if (incl_limit) { |
| 725 | // The limit check guaranties that 'limit <= (max_jint - stride)' so |
| 726 | // we can convert 'i <= limit' to 'i < limit+1' since stride != 0. |
| 727 | // |
| 728 | Node* one = (stride_con > 0) ? gvn->intcon( 1) : gvn->intcon(-1); |
| 729 | limit = gvn->transform(new AddINode(limit, one)); |
| 730 | if (bt == BoolTest::le) |
| 731 | bt = BoolTest::lt; |
| 732 | else if (bt == BoolTest::ge) |
| 733 | bt = BoolTest::gt; |
| 734 | else |
| 735 | ShouldNotReachHere(); |
| 736 | } |
| 737 | set_subtree_ctrl( limit ); |
| 738 | |
| 739 | if (LoopStripMiningIter == 0) { |
| 740 | // Check for SafePoint on backedge and remove |
| 741 | Node *sfpt = x->in(LoopNode::LoopBackControl); |
| 742 | if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) { |
| 743 | lazy_replace( sfpt, iftrue ); |
| 744 | if (loop->_safepts != NULL) { |
| 745 | loop->_safepts->yank(sfpt); |
| 746 | } |
| 747 | loop->_tail = iftrue; |
| 748 | } |
| 749 | } |
| 750 | |
| 751 | // Build a canonical trip test. |
| 752 | // Clone code, as old values may be in use. |
| 753 | incr = incr->clone(); |
| 754 | incr->set_req(1,phi); |
| 755 | incr->set_req(2,stride); |
| 756 | incr = _igvn.register_new_node_with_optimizer(incr); |
| 757 | set_early_ctrl( incr ); |
| 758 | _igvn.rehash_node_delayed(phi); |
| 759 | phi->set_req_X( LoopNode::LoopBackControl, incr, &_igvn ); |
| 760 | |
| 761 | // If phi type is more restrictive than Int, raise to |
| 762 | // Int to prevent (almost) infinite recursion in igvn |
| 763 | // which can only handle integer types for constants or minint..maxint. |
| 764 | if (!TypeInt::INT->higher_equal(phi->bottom_type())) { |
| 765 | Node* nphi = PhiNode::make(phi->in(0), phi->in(LoopNode::EntryControl), TypeInt::INT); |
| 766 | nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl)); |
| 767 | nphi = _igvn.register_new_node_with_optimizer(nphi); |
| 768 | set_ctrl(nphi, get_ctrl(phi)); |
| 769 | _igvn.replace_node(phi, nphi); |
| 770 | phi = nphi->as_Phi(); |
| 771 | } |
| 772 | cmp = cmp->clone(); |
| 773 | cmp->set_req(1,incr); |
| 774 | cmp->set_req(2,limit); |
| 775 | cmp = _igvn.register_new_node_with_optimizer(cmp); |
| 776 | set_ctrl(cmp, iff->in(0)); |
| 777 | |
| 778 | test = test->clone()->as_Bool(); |
| 779 | (*(BoolTest*)&test->_test)._test = bt; |
| 780 | test->set_req(1,cmp); |
| 781 | _igvn.register_new_node_with_optimizer(test); |
| 782 | set_ctrl(test, iff->in(0)); |
| 783 | |
| 784 | // Replace the old IfNode with a new LoopEndNode |
| 785 | Node *lex = _igvn.register_new_node_with_optimizer(new CountedLoopEndNode( iff->in(0), test, cl_prob, iff->as_If()->_fcnt )); |
| 786 | IfNode *le = lex->as_If(); |
| 787 | uint dd = dom_depth(iff); |
| 788 | set_idom(le, le->in(0), dd); // Update dominance for loop exit |
| 789 | set_loop(le, loop); |
| 790 | |
| 791 | // Get the loop-exit control |
| 792 | Node *iffalse = iff->as_If()->proj_out(!(iftrue_op == Op_IfTrue)); |
| 793 | |
| 794 | // Need to swap loop-exit and loop-back control? |
| 795 | if (iftrue_op == Op_IfFalse) { |
| 796 | Node *ift2=_igvn.register_new_node_with_optimizer(new IfTrueNode (le)); |
| 797 | Node *iff2=_igvn.register_new_node_with_optimizer(new IfFalseNode(le)); |
| 798 | |
| 799 | loop->_tail = back_control = ift2; |
| 800 | set_loop(ift2, loop); |
| 801 | set_loop(iff2, get_loop(iffalse)); |
| 802 | |
| 803 | // Lazy update of 'get_ctrl' mechanism. |
| 804 | lazy_replace(iffalse, iff2); |
| 805 | lazy_replace(iftrue, ift2); |
| 806 | |
| 807 | // Swap names |
| 808 | iffalse = iff2; |
| 809 | iftrue = ift2; |
| 810 | } else { |
| 811 | _igvn.rehash_node_delayed(iffalse); |
| 812 | _igvn.rehash_node_delayed(iftrue); |
| 813 | iffalse->set_req_X( 0, le, &_igvn ); |
| 814 | iftrue ->set_req_X( 0, le, &_igvn ); |
| 815 | } |
| 816 | |
| 817 | set_idom(iftrue, le, dd+1); |
| 818 | set_idom(iffalse, le, dd+1); |
| 819 | assert(iff->outcnt() == 0, "should be dead now" ); |
| 820 | lazy_replace( iff, le ); // fix 'get_ctrl' |
| 821 | |
| 822 | Node *sfpt2 = le->in(0); |
| 823 | |
| 824 | Node* entry_control = init_control; |
| 825 | bool strip_mine_loop = LoopStripMiningIter > 1 && loop->_child == NULL && |
| 826 | sfpt2->Opcode() == Op_SafePoint && !loop->_has_call; |
| 827 | IdealLoopTree* outer_ilt = NULL; |
| 828 | if (strip_mine_loop) { |
| 829 | outer_ilt = create_outer_strip_mined_loop(test, cmp, init_control, loop, |
| 830 | cl_prob, le->_fcnt, entry_control, |
| 831 | iffalse); |
| 832 | } |
| 833 | |
| 834 | // Now setup a new CountedLoopNode to replace the existing LoopNode |
| 835 | CountedLoopNode *l = new CountedLoopNode(entry_control, back_control); |
| 836 | l->set_unswitch_count(x->as_Loop()->unswitch_count()); // Preserve |
| 837 | // The following assert is approximately true, and defines the intention |
| 838 | // of can_be_counted_loop. It fails, however, because phase->type |
| 839 | // is not yet initialized for this loop and its parts. |
| 840 | //assert(l->can_be_counted_loop(this), "sanity"); |
| 841 | _igvn.register_new_node_with_optimizer(l); |
| 842 | set_loop(l, loop); |
| 843 | loop->_head = l; |
| 844 | // Fix all data nodes placed at the old loop head. |
| 845 | // Uses the lazy-update mechanism of 'get_ctrl'. |
| 846 | lazy_replace( x, l ); |
| 847 | set_idom(l, entry_control, dom_depth(entry_control) + 1); |
| 848 | |
| 849 | if (LoopStripMiningIter == 0 || strip_mine_loop) { |
| 850 | // Check for immediately preceding SafePoint and remove |
| 851 | if (sfpt2->Opcode() == Op_SafePoint && (LoopStripMiningIter != 0 || is_deleteable_safept(sfpt2))) { |
| 852 | if (strip_mine_loop) { |
| 853 | Node* outer_le = outer_ilt->_tail->in(0); |
| 854 | Node* sfpt = sfpt2->clone(); |
| 855 | sfpt->set_req(0, iffalse); |
| 856 | outer_le->set_req(0, sfpt); |
| 857 | register_control(sfpt, outer_ilt, iffalse); |
| 858 | set_idom(outer_le, sfpt, dom_depth(sfpt)); |
| 859 | } |
| 860 | lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control)); |
| 861 | if (loop->_safepts != NULL) { |
| 862 | loop->_safepts->yank(sfpt2); |
| 863 | } |
| 864 | } |
| 865 | } |
| 866 | |
| 867 | // Free up intermediate goo |
| 868 | _igvn.remove_dead_node(hook); |
| 869 | |
| 870 | #ifdef ASSERT |
| 871 | assert(l->is_valid_counted_loop(), "counted loop shape is messed up" ); |
| 872 | assert(l == loop->_head && l->phi() == phi && l->loopexit_or_null() == lex, "" ); |
| 873 | #endif |
| 874 | #ifndef PRODUCT |
| 875 | if (TraceLoopOpts) { |
| 876 | tty->print("Counted " ); |
| 877 | loop->dump_head(); |
| 878 | } |
| 879 | #endif |
| 880 | |
| 881 | C->print_method(PHASE_AFTER_CLOOPS, 3); |
| 882 | |
| 883 | // Capture bounds of the loop in the induction variable Phi before |
| 884 | // subsequent transformation (iteration splitting) obscures the |
| 885 | // bounds |
| 886 | l->phi()->as_Phi()->set_type(l->phi()->Value(&_igvn)); |
| 887 | |
| 888 | if (strip_mine_loop) { |
| 889 | l->mark_strip_mined(); |
| 890 | l->verify_strip_mined(1); |
| 891 | outer_ilt->_head->as_Loop()->verify_strip_mined(1); |
| 892 | loop = outer_ilt; |
| 893 | } |
| 894 | |
| 895 | return true; |
| 896 | } |
| 897 | |
| 898 | //----------------------exact_limit------------------------------------------- |
| 899 | Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) { |
| 900 | assert(loop->_head->is_CountedLoop(), "" ); |
| 901 | CountedLoopNode *cl = loop->_head->as_CountedLoop(); |
| 902 | assert(cl->is_valid_counted_loop(), "" ); |
| 903 | |
| 904 | if (ABS(cl->stride_con()) == 1 || |
| 905 | cl->limit()->Opcode() == Op_LoopLimit) { |
| 906 | // Old code has exact limit (it could be incorrect in case of int overflow). |
| 907 | // Loop limit is exact with stride == 1. And loop may already have exact limit. |
| 908 | return cl->limit(); |
| 909 | } |
| 910 | Node *limit = NULL; |
| 911 | #ifdef ASSERT |
| 912 | BoolTest::mask bt = cl->loopexit()->test_trip(); |
| 913 | assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected" ); |
| 914 | #endif |
| 915 | if (cl->has_exact_trip_count()) { |
| 916 | // Simple case: loop has constant boundaries. |
| 917 | // Use jlongs to avoid integer overflow. |
| 918 | int stride_con = cl->stride_con(); |
| 919 | jlong init_con = cl->init_trip()->get_int(); |
| 920 | jlong limit_con = cl->limit()->get_int(); |
| 921 | julong trip_cnt = cl->trip_count(); |
| 922 | jlong final_con = init_con + trip_cnt*stride_con; |
| 923 | int final_int = (int)final_con; |
| 924 | // The final value should be in integer range since the loop |
| 925 | // is counted and the limit was checked for overflow. |
| 926 | assert(final_con == (jlong)final_int, "final value should be integer" ); |
| 927 | limit = _igvn.intcon(final_int); |
| 928 | } else { |
| 929 | // Create new LoopLimit node to get exact limit (final iv value). |
| 930 | limit = new LoopLimitNode(C, cl->init_trip(), cl->limit(), cl->stride()); |
| 931 | register_new_node(limit, cl->in(LoopNode::EntryControl)); |
| 932 | } |
| 933 | assert(limit != NULL, "sanity" ); |
| 934 | return limit; |
| 935 | } |
| 936 | |
| 937 | //------------------------------Ideal------------------------------------------ |
| 938 | // Return a node which is more "ideal" than the current node. |
| 939 | // Attempt to convert into a counted-loop. |
| 940 | Node *LoopNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
| 941 | if (!can_be_counted_loop(phase) && !is_OuterStripMinedLoop()) { |
| 942 | phase->C->set_major_progress(); |
| 943 | } |
| 944 | return RegionNode::Ideal(phase, can_reshape); |
| 945 | } |
| 946 | |
| 947 | void LoopNode::verify_strip_mined(int expect_skeleton) const { |
| 948 | #ifdef ASSERT |
| 949 | const OuterStripMinedLoopNode* outer = NULL; |
| 950 | const CountedLoopNode* inner = NULL; |
| 951 | if (is_strip_mined()) { |
| 952 | assert(is_CountedLoop(), "no Loop should be marked strip mined" ); |
| 953 | inner = as_CountedLoop(); |
| 954 | outer = inner->in(LoopNode::EntryControl)->as_OuterStripMinedLoop(); |
| 955 | } else if (is_OuterStripMinedLoop()) { |
| 956 | outer = this->as_OuterStripMinedLoop(); |
| 957 | inner = outer->unique_ctrl_out()->as_CountedLoop(); |
| 958 | assert(!is_strip_mined(), "outer loop shouldn't be marked strip mined" ); |
| 959 | } |
| 960 | if (inner != NULL || outer != NULL) { |
| 961 | assert(inner != NULL && outer != NULL, "missing loop in strip mined nest" ); |
| 962 | Node* outer_tail = outer->in(LoopNode::LoopBackControl); |
| 963 | Node* outer_le = outer_tail->in(0); |
| 964 | assert(outer_le->Opcode() == Op_OuterStripMinedLoopEnd, "tail of outer loop should be an If" ); |
| 965 | Node* sfpt = outer_le->in(0); |
| 966 | assert(sfpt->Opcode() == Op_SafePoint, "where's the safepoint?" ); |
| 967 | Node* inner_out = sfpt->in(0); |
| 968 | if (inner_out->outcnt() != 1) { |
| 969 | ResourceMark rm; |
| 970 | Unique_Node_List wq; |
| 971 | |
| 972 | for (DUIterator_Fast imax, i = inner_out->fast_outs(imax); i < imax; i++) { |
| 973 | Node* u = inner_out->fast_out(i); |
| 974 | if (u == sfpt) { |
| 975 | continue; |
| 976 | } |
| 977 | wq.clear(); |
| 978 | wq.push(u); |
| 979 | bool found_sfpt = false; |
| 980 | for (uint next = 0; next < wq.size() && !found_sfpt; next++) { |
| 981 | Node* n = wq.at(next); |
| 982 | for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !found_sfpt; i++) { |
| 983 | Node* u = n->fast_out(i); |
| 984 | if (u == sfpt) { |
| 985 | found_sfpt = true; |
| 986 | } |
| 987 | if (!u->is_CFG()) { |
| 988 | wq.push(u); |
| 989 | } |
| 990 | } |
| 991 | } |
| 992 | assert(found_sfpt, "no node in loop that's not input to safepoint" ); |
| 993 | } |
| 994 | } |
| 995 | |
| 996 | if (UseZGC && !inner_out->in(0)->is_CountedLoopEnd()) { |
| 997 | // In some very special cases there can be a load that has no other uses than the |
| 998 | // counted loop safepoint. Then its loadbarrier will be placed between the inner |
| 999 | // loop exit and the safepoint. This is very rare |
| 1000 | |
| 1001 | Node* ifnode = inner_out->in(1)->in(0); |
| 1002 | // Region->IfTrue->If == Region->Iffalse->If |
| 1003 | if (ifnode == inner_out->in(2)->in(0)) { |
| 1004 | inner_out = ifnode->in(0); |
| 1005 | } |
| 1006 | } |
| 1007 | |
| 1008 | CountedLoopEndNode* cle = inner_out->in(0)->as_CountedLoopEnd(); |
| 1009 | assert(cle == inner->loopexit_or_null(), "mismatch" ); |
| 1010 | bool has_skeleton = outer_le->in(1)->bottom_type()->singleton() && outer_le->in(1)->bottom_type()->is_int()->get_con() == 0; |
| 1011 | if (has_skeleton) { |
| 1012 | assert(expect_skeleton == 1 || expect_skeleton == -1, "unexpected skeleton node" ); |
| 1013 | assert(outer->outcnt() == 2, "only phis" ); |
| 1014 | } else { |
| 1015 | assert(expect_skeleton == 0 || expect_skeleton == -1, "no skeleton node?" ); |
| 1016 | uint phis = 0; |
| 1017 | for (DUIterator_Fast imax, i = inner->fast_outs(imax); i < imax; i++) { |
| 1018 | Node* u = inner->fast_out(i); |
| 1019 | if (u->is_Phi()) { |
| 1020 | phis++; |
| 1021 | } |
| 1022 | } |
| 1023 | for (DUIterator_Fast imax, i = outer->fast_outs(imax); i < imax; i++) { |
| 1024 | Node* u = outer->fast_out(i); |
| 1025 | assert(u == outer || u == inner || u->is_Phi(), "nothing between inner and outer loop" ); |
| 1026 | } |
| 1027 | uint stores = 0; |
| 1028 | for (DUIterator_Fast imax, i = inner_out->fast_outs(imax); i < imax; i++) { |
| 1029 | Node* u = inner_out->fast_out(i); |
| 1030 | if (u->is_Store()) { |
| 1031 | stores++; |
| 1032 | } |
| 1033 | } |
| 1034 | assert(outer->outcnt() >= phis + 2 && outer->outcnt() <= phis + 2 + stores + 1, "only phis" ); |
| 1035 | } |
| 1036 | assert(sfpt->outcnt() == 1, "no data node" ); |
| 1037 | assert(outer_tail->outcnt() == 1 || !has_skeleton, "no data node" ); |
| 1038 | } |
| 1039 | #endif |
| 1040 | } |
| 1041 | |
| 1042 | //============================================================================= |
| 1043 | //------------------------------Ideal------------------------------------------ |
| 1044 | // Return a node which is more "ideal" than the current node. |
| 1045 | // Attempt to convert into a counted-loop. |
| 1046 | Node *CountedLoopNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
| 1047 | return RegionNode::Ideal(phase, can_reshape); |
| 1048 | } |
| 1049 | |
| 1050 | //------------------------------dump_spec-------------------------------------- |
| 1051 | // Dump special per-node info |
| 1052 | #ifndef PRODUCT |
| 1053 | void CountedLoopNode::dump_spec(outputStream *st) const { |
| 1054 | LoopNode::dump_spec(st); |
| 1055 | if (stride_is_con()) { |
| 1056 | st->print("stride: %d " ,stride_con()); |
| 1057 | } |
| 1058 | if (is_pre_loop ()) st->print("pre of N%d" , _main_idx); |
| 1059 | if (is_main_loop()) st->print("main of N%d" , _idx); |
| 1060 | if (is_post_loop()) st->print("post of N%d" , _main_idx); |
| 1061 | if (is_strip_mined()) st->print(" strip mined" ); |
| 1062 | } |
| 1063 | #endif |
| 1064 | |
| 1065 | //============================================================================= |
| 1066 | int CountedLoopEndNode::stride_con() const { |
| 1067 | return stride()->bottom_type()->is_int()->get_con(); |
| 1068 | } |
| 1069 | |
| 1070 | //============================================================================= |
| 1071 | //------------------------------Value----------------------------------------- |
| 1072 | const Type* LoopLimitNode::Value(PhaseGVN* phase) const { |
| 1073 | const Type* init_t = phase->type(in(Init)); |
| 1074 | const Type* limit_t = phase->type(in(Limit)); |
| 1075 | const Type* stride_t = phase->type(in(Stride)); |
| 1076 | // Either input is TOP ==> the result is TOP |
| 1077 | if (init_t == Type::TOP) return Type::TOP; |
| 1078 | if (limit_t == Type::TOP) return Type::TOP; |
| 1079 | if (stride_t == Type::TOP) return Type::TOP; |
| 1080 | |
| 1081 | int stride_con = stride_t->is_int()->get_con(); |
| 1082 | if (stride_con == 1) |
| 1083 | return NULL; // Identity |
| 1084 | |
| 1085 | if (init_t->is_int()->is_con() && limit_t->is_int()->is_con()) { |
| 1086 | // Use jlongs to avoid integer overflow. |
| 1087 | jlong init_con = init_t->is_int()->get_con(); |
| 1088 | jlong limit_con = limit_t->is_int()->get_con(); |
| 1089 | int stride_m = stride_con - (stride_con > 0 ? 1 : -1); |
| 1090 | jlong trip_count = (limit_con - init_con + stride_m)/stride_con; |
| 1091 | jlong final_con = init_con + stride_con*trip_count; |
| 1092 | int final_int = (int)final_con; |
| 1093 | // The final value should be in integer range since the loop |
| 1094 | // is counted and the limit was checked for overflow. |
| 1095 | assert(final_con == (jlong)final_int, "final value should be integer" ); |
| 1096 | return TypeInt::make(final_int); |
| 1097 | } |
| 1098 | |
| 1099 | return bottom_type(); // TypeInt::INT |
| 1100 | } |
| 1101 | |
| 1102 | //------------------------------Ideal------------------------------------------ |
| 1103 | // Return a node which is more "ideal" than the current node. |
| 1104 | Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
| 1105 | if (phase->type(in(Init)) == Type::TOP || |
| 1106 | phase->type(in(Limit)) == Type::TOP || |
| 1107 | phase->type(in(Stride)) == Type::TOP) |
| 1108 | return NULL; // Dead |
| 1109 | |
| 1110 | int stride_con = phase->type(in(Stride))->is_int()->get_con(); |
| 1111 | if (stride_con == 1) |
| 1112 | return NULL; // Identity |
| 1113 | |
| 1114 | if (in(Init)->is_Con() && in(Limit)->is_Con()) |
| 1115 | return NULL; // Value |
| 1116 | |
| 1117 | // Delay following optimizations until all loop optimizations |
| 1118 | // done to keep Ideal graph simple. |
| 1119 | if (!can_reshape || phase->C->major_progress()) |
| 1120 | return NULL; |
| 1121 | |
| 1122 | const TypeInt* init_t = phase->type(in(Init) )->is_int(); |
| 1123 | const TypeInt* limit_t = phase->type(in(Limit))->is_int(); |
| 1124 | int stride_p; |
| 1125 | jlong lim, ini; |
| 1126 | julong max; |
| 1127 | if (stride_con > 0) { |
| 1128 | stride_p = stride_con; |
| 1129 | lim = limit_t->_hi; |
| 1130 | ini = init_t->_lo; |
| 1131 | max = (julong)max_jint; |
| 1132 | } else { |
| 1133 | stride_p = -stride_con; |
| 1134 | lim = init_t->_hi; |
| 1135 | ini = limit_t->_lo; |
| 1136 | max = (julong)min_jint; |
| 1137 | } |
| 1138 | julong range = lim - ini + stride_p; |
| 1139 | if (range <= max) { |
| 1140 | // Convert to integer expression if it is not overflow. |
| 1141 | Node* stride_m = phase->intcon(stride_con - (stride_con > 0 ? 1 : -1)); |
| 1142 | Node *range = phase->transform(new SubINode(in(Limit), in(Init))); |
| 1143 | Node *bias = phase->transform(new AddINode(range, stride_m)); |
| 1144 | Node *trip = phase->transform(new DivINode(0, bias, in(Stride))); |
| 1145 | Node *span = phase->transform(new MulINode(trip, in(Stride))); |
| 1146 | return new AddINode(span, in(Init)); // exact limit |
| 1147 | } |
| 1148 | |
| 1149 | if (is_power_of_2(stride_p) || // divisor is 2^n |
| 1150 | !Matcher::has_match_rule(Op_LoopLimit)) { // or no specialized Mach node? |
| 1151 | // Convert to long expression to avoid integer overflow |
| 1152 | // and let igvn optimizer convert this division. |
| 1153 | // |
| 1154 | Node* init = phase->transform( new ConvI2LNode(in(Init))); |
| 1155 | Node* limit = phase->transform( new ConvI2LNode(in(Limit))); |
| 1156 | Node* stride = phase->longcon(stride_con); |
| 1157 | Node* stride_m = phase->longcon(stride_con - (stride_con > 0 ? 1 : -1)); |
| 1158 | |
| 1159 | Node *range = phase->transform(new SubLNode(limit, init)); |
| 1160 | Node *bias = phase->transform(new AddLNode(range, stride_m)); |
| 1161 | Node *span; |
| 1162 | if (stride_con > 0 && is_power_of_2(stride_p)) { |
| 1163 | // bias >= 0 if stride >0, so if stride is 2^n we can use &(-stride) |
| 1164 | // and avoid generating rounding for division. Zero trip guard should |
| 1165 | // guarantee that init < limit but sometimes the guard is missing and |
| 1166 | // we can get situation when init > limit. Note, for the empty loop |
| 1167 | // optimization zero trip guard is generated explicitly which leaves |
| 1168 | // only RCE predicate where exact limit is used and the predicate |
| 1169 | // will simply fail forcing recompilation. |
| 1170 | Node* neg_stride = phase->longcon(-stride_con); |
| 1171 | span = phase->transform(new AndLNode(bias, neg_stride)); |
| 1172 | } else { |
| 1173 | Node *trip = phase->transform(new DivLNode(0, bias, stride)); |
| 1174 | span = phase->transform(new MulLNode(trip, stride)); |
| 1175 | } |
| 1176 | // Convert back to int |
| 1177 | Node *span_int = phase->transform(new ConvL2INode(span)); |
| 1178 | return new AddINode(span_int, in(Init)); // exact limit |
| 1179 | } |
| 1180 | |
| 1181 | return NULL; // No progress |
| 1182 | } |
| 1183 | |
| 1184 | //------------------------------Identity--------------------------------------- |
| 1185 | // If stride == 1 return limit node. |
| 1186 | Node* LoopLimitNode::Identity(PhaseGVN* phase) { |
| 1187 | int stride_con = phase->type(in(Stride))->is_int()->get_con(); |
| 1188 | if (stride_con == 1 || stride_con == -1) |
| 1189 | return in(Limit); |
| 1190 | return this; |
| 1191 | } |
| 1192 | |
| 1193 | //============================================================================= |
| 1194 | //----------------------match_incr_with_optional_truncation-------------------- |
| 1195 | // Match increment with optional truncation: |
| 1196 | // CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16 |
| 1197 | // Return NULL for failure. Success returns the increment node. |
| 1198 | Node* CountedLoopNode::match_incr_with_optional_truncation( |
| 1199 | Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type) { |
| 1200 | // Quick cutouts: |
| 1201 | if (expr == NULL || expr->req() != 3) return NULL; |
| 1202 | |
| 1203 | Node *t1 = NULL; |
| 1204 | Node *t2 = NULL; |
| 1205 | const TypeInt* trunc_t = TypeInt::INT; |
| 1206 | Node* n1 = expr; |
| 1207 | int n1op = n1->Opcode(); |
| 1208 | |
| 1209 | // Try to strip (n1 & M) or (n1 << N >> N) from n1. |
| 1210 | if (n1op == Op_AndI && |
| 1211 | n1->in(2)->is_Con() && |
| 1212 | n1->in(2)->bottom_type()->is_int()->get_con() == 0x7fff) { |
| 1213 | // %%% This check should match any mask of 2**K-1. |
| 1214 | t1 = n1; |
| 1215 | n1 = t1->in(1); |
| 1216 | n1op = n1->Opcode(); |
| 1217 | trunc_t = TypeInt::CHAR; |
| 1218 | } else if (n1op == Op_RShiftI && |
| 1219 | n1->in(1) != NULL && |
| 1220 | n1->in(1)->Opcode() == Op_LShiftI && |
| 1221 | n1->in(2) == n1->in(1)->in(2) && |
| 1222 | n1->in(2)->is_Con()) { |
| 1223 | jint shift = n1->in(2)->bottom_type()->is_int()->get_con(); |
| 1224 | // %%% This check should match any shift in [1..31]. |
| 1225 | if (shift == 16 || shift == 8) { |
| 1226 | t1 = n1; |
| 1227 | t2 = t1->in(1); |
| 1228 | n1 = t2->in(1); |
| 1229 | n1op = n1->Opcode(); |
| 1230 | if (shift == 16) { |
| 1231 | trunc_t = TypeInt::SHORT; |
| 1232 | } else if (shift == 8) { |
| 1233 | trunc_t = TypeInt::BYTE; |
| 1234 | } |
| 1235 | } |
| 1236 | } |
| 1237 | |
| 1238 | // If (maybe after stripping) it is an AddI, we won: |
| 1239 | if (n1op == Op_AddI) { |
| 1240 | *trunc1 = t1; |
| 1241 | *trunc2 = t2; |
| 1242 | *trunc_type = trunc_t; |
| 1243 | return n1; |
| 1244 | } |
| 1245 | |
| 1246 | // failed |
| 1247 | return NULL; |
| 1248 | } |
| 1249 | |
| 1250 | LoopNode* CountedLoopNode::skip_strip_mined(int expect_skeleton) { |
| 1251 | if (is_strip_mined()) { |
| 1252 | verify_strip_mined(expect_skeleton); |
| 1253 | return in(EntryControl)->as_Loop(); |
| 1254 | } |
| 1255 | return this; |
| 1256 | } |
| 1257 | |
| 1258 | OuterStripMinedLoopNode* CountedLoopNode::outer_loop() const { |
| 1259 | assert(is_strip_mined(), "not a strip mined loop" ); |
| 1260 | Node* c = in(EntryControl); |
| 1261 | if (c == NULL || c->is_top() || !c->is_OuterStripMinedLoop()) { |
| 1262 | return NULL; |
| 1263 | } |
| 1264 | return c->as_OuterStripMinedLoop(); |
| 1265 | } |
| 1266 | |
| 1267 | IfTrueNode* OuterStripMinedLoopNode::outer_loop_tail() const { |
| 1268 | Node* c = in(LoopBackControl); |
| 1269 | if (c == NULL || c->is_top()) { |
| 1270 | return NULL; |
| 1271 | } |
| 1272 | return c->as_IfTrue(); |
| 1273 | } |
| 1274 | |
| 1275 | IfTrueNode* CountedLoopNode::outer_loop_tail() const { |
| 1276 | LoopNode* l = outer_loop(); |
| 1277 | if (l == NULL) { |
| 1278 | return NULL; |
| 1279 | } |
| 1280 | return l->outer_loop_tail(); |
| 1281 | } |
| 1282 | |
| 1283 | OuterStripMinedLoopEndNode* OuterStripMinedLoopNode::outer_loop_end() const { |
| 1284 | IfTrueNode* proj = outer_loop_tail(); |
| 1285 | if (proj == NULL) { |
| 1286 | return NULL; |
| 1287 | } |
| 1288 | Node* c = proj->in(0); |
| 1289 | if (c == NULL || c->is_top() || c->outcnt() != 2) { |
| 1290 | return NULL; |
| 1291 | } |
| 1292 | return c->as_OuterStripMinedLoopEnd(); |
| 1293 | } |
| 1294 | |
| 1295 | OuterStripMinedLoopEndNode* CountedLoopNode::outer_loop_end() const { |
| 1296 | LoopNode* l = outer_loop(); |
| 1297 | if (l == NULL) { |
| 1298 | return NULL; |
| 1299 | } |
| 1300 | return l->outer_loop_end(); |
| 1301 | } |
| 1302 | |
| 1303 | IfFalseNode* OuterStripMinedLoopNode::outer_loop_exit() const { |
| 1304 | IfNode* le = outer_loop_end(); |
| 1305 | if (le == NULL) { |
| 1306 | return NULL; |
| 1307 | } |
| 1308 | Node* c = le->proj_out_or_null(false); |
| 1309 | if (c == NULL) { |
| 1310 | return NULL; |
| 1311 | } |
| 1312 | return c->as_IfFalse(); |
| 1313 | } |
| 1314 | |
| 1315 | IfFalseNode* CountedLoopNode::outer_loop_exit() const { |
| 1316 | LoopNode* l = outer_loop(); |
| 1317 | if (l == NULL) { |
| 1318 | return NULL; |
| 1319 | } |
| 1320 | return l->outer_loop_exit(); |
| 1321 | } |
| 1322 | |
| 1323 | SafePointNode* OuterStripMinedLoopNode::outer_safepoint() const { |
| 1324 | IfNode* le = outer_loop_end(); |
| 1325 | if (le == NULL) { |
| 1326 | return NULL; |
| 1327 | } |
| 1328 | Node* c = le->in(0); |
| 1329 | if (c == NULL || c->is_top()) { |
| 1330 | return NULL; |
| 1331 | } |
| 1332 | assert(c->Opcode() == Op_SafePoint, "broken outer loop" ); |
| 1333 | return c->as_SafePoint(); |
| 1334 | } |
| 1335 | |
| 1336 | SafePointNode* CountedLoopNode::outer_safepoint() const { |
| 1337 | LoopNode* l = outer_loop(); |
| 1338 | if (l == NULL) { |
| 1339 | return NULL; |
| 1340 | } |
| 1341 | return l->outer_safepoint(); |
| 1342 | } |
| 1343 | |
| 1344 | Node* CountedLoopNode::skip_predicates_from_entry(Node* ctrl) { |
| 1345 | while (ctrl != NULL && ctrl->is_Proj() && ctrl->in(0)->is_If() && |
| 1346 | ctrl->in(0)->as_If()->proj_out(1-ctrl->as_Proj()->_con)->outcnt() == 1 && |
| 1347 | ctrl->in(0)->as_If()->proj_out(1-ctrl->as_Proj()->_con)->unique_out()->Opcode() == Op_Halt) { |
| 1348 | ctrl = ctrl->in(0)->in(0); |
| 1349 | } |
| 1350 | |
| 1351 | return ctrl; |
| 1352 | } |
| 1353 | |
| 1354 | Node* CountedLoopNode::skip_predicates() { |
| 1355 | if (is_main_loop()) { |
| 1356 | Node* ctrl = skip_strip_mined()->in(LoopNode::EntryControl); |
| 1357 | |
| 1358 | return skip_predicates_from_entry(ctrl); |
| 1359 | } |
| 1360 | return in(LoopNode::EntryControl); |
| 1361 | } |
| 1362 | |
| 1363 | void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) { |
| 1364 | // Look for the outer & inner strip mined loop, reduce number of |
| 1365 | // iterations of the inner loop, set exit condition of outer loop, |
| 1366 | // construct required phi nodes for outer loop. |
| 1367 | CountedLoopNode* inner_cl = unique_ctrl_out()->as_CountedLoop(); |
| 1368 | assert(inner_cl->is_strip_mined(), "inner loop should be strip mined" ); |
| 1369 | Node* inner_iv_phi = inner_cl->phi(); |
| 1370 | if (inner_iv_phi == NULL) { |
| 1371 | IfNode* outer_le = outer_loop_end(); |
| 1372 | Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt)); |
| 1373 | igvn->replace_node(outer_le, iff); |
| 1374 | inner_cl->clear_strip_mined(); |
| 1375 | return; |
| 1376 | } |
| 1377 | CountedLoopEndNode* inner_cle = inner_cl->loopexit(); |
| 1378 | |
| 1379 | int stride = inner_cl->stride_con(); |
| 1380 | jlong scaled_iters_long = ((jlong)LoopStripMiningIter) * ABS(stride); |
| 1381 | int scaled_iters = (int)scaled_iters_long; |
| 1382 | int short_scaled_iters = LoopStripMiningIterShortLoop* ABS(stride); |
| 1383 | const TypeInt* inner_iv_t = igvn->type(inner_iv_phi)->is_int(); |
| 1384 | jlong iter_estimate = (jlong)inner_iv_t->_hi - (jlong)inner_iv_t->_lo; |
| 1385 | assert(iter_estimate > 0, "broken" ); |
| 1386 | if ((jlong)scaled_iters != scaled_iters_long || iter_estimate <= short_scaled_iters) { |
| 1387 | // Remove outer loop and safepoint (too few iterations) |
| 1388 | Node* outer_sfpt = outer_safepoint(); |
| 1389 | Node* outer_out = outer_loop_exit(); |
| 1390 | igvn->replace_node(outer_out, outer_sfpt->in(0)); |
| 1391 | igvn->replace_input_of(outer_sfpt, 0, igvn->C->top()); |
| 1392 | inner_cl->clear_strip_mined(); |
| 1393 | return; |
| 1394 | } |
| 1395 | if (iter_estimate <= scaled_iters_long) { |
| 1396 | // We would only go through one iteration of |
| 1397 | // the outer loop: drop the outer loop but |
| 1398 | // keep the safepoint so we don't run for |
| 1399 | // too long without a safepoint |
| 1400 | IfNode* outer_le = outer_loop_end(); |
| 1401 | Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt)); |
| 1402 | igvn->replace_node(outer_le, iff); |
| 1403 | inner_cl->clear_strip_mined(); |
| 1404 | return; |
| 1405 | } |
| 1406 | |
| 1407 | Node* cle_tail = inner_cle->proj_out(true); |
| 1408 | ResourceMark rm; |
| 1409 | Node_List old_new; |
| 1410 | if (cle_tail->outcnt() > 1) { |
| 1411 | // Look for nodes on backedge of inner loop and clone them |
| 1412 | Unique_Node_List backedge_nodes; |
| 1413 | for (DUIterator_Fast imax, i = cle_tail->fast_outs(imax); i < imax; i++) { |
| 1414 | Node* u = cle_tail->fast_out(i); |
| 1415 | if (u != inner_cl) { |
| 1416 | assert(!u->is_CFG(), "control flow on the backedge?" ); |
| 1417 | backedge_nodes.push(u); |
| 1418 | } |
| 1419 | } |
| 1420 | uint last = igvn->C->unique(); |
| 1421 | for (uint next = 0; next < backedge_nodes.size(); next++) { |
| 1422 | Node* n = backedge_nodes.at(next); |
| 1423 | old_new.map(n->_idx, n->clone()); |
| 1424 | for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { |
| 1425 | Node* u = n->fast_out(i); |
| 1426 | assert(!u->is_CFG(), "broken" ); |
| 1427 | if (u->_idx >= last) { |
| 1428 | continue; |
| 1429 | } |
| 1430 | if (!u->is_Phi()) { |
| 1431 | backedge_nodes.push(u); |
| 1432 | } else { |
| 1433 | assert(u->in(0) == inner_cl, "strange phi on the backedge" ); |
| 1434 | } |
| 1435 | } |
| 1436 | } |
| 1437 | // Put the clones on the outer loop backedge |
| 1438 | Node* le_tail = outer_loop_tail(); |
| 1439 | for (uint next = 0; next < backedge_nodes.size(); next++) { |
| 1440 | Node *n = old_new[backedge_nodes.at(next)->_idx]; |
| 1441 | for (uint i = 1; i < n->req(); i++) { |
| 1442 | if (n->in(i) != NULL && old_new[n->in(i)->_idx] != NULL) { |
| 1443 | n->set_req(i, old_new[n->in(i)->_idx]); |
| 1444 | } |
| 1445 | } |
| 1446 | if (n->in(0) != NULL && n->in(0) == cle_tail) { |
| 1447 | n->set_req(0, le_tail); |
| 1448 | } |
| 1449 | igvn->register_new_node_with_optimizer(n); |
| 1450 | } |
| 1451 | } |
| 1452 | |
| 1453 | Node* iv_phi = NULL; |
| 1454 | // Make a clone of each phi in the inner loop |
| 1455 | // for the outer loop |
| 1456 | for (uint i = 0; i < inner_cl->outcnt(); i++) { |
| 1457 | Node* u = inner_cl->raw_out(i); |
| 1458 | if (u->is_Phi()) { |
| 1459 | assert(u->in(0) == inner_cl, "inconsistent" ); |
| 1460 | Node* phi = u->clone(); |
| 1461 | phi->set_req(0, this); |
| 1462 | Node* be = old_new[phi->in(LoopNode::LoopBackControl)->_idx]; |
| 1463 | if (be != NULL) { |
| 1464 | phi->set_req(LoopNode::LoopBackControl, be); |
| 1465 | } |
| 1466 | phi = igvn->transform(phi); |
| 1467 | igvn->replace_input_of(u, LoopNode::EntryControl, phi); |
| 1468 | if (u == inner_iv_phi) { |
| 1469 | iv_phi = phi; |
| 1470 | } |
| 1471 | } |
| 1472 | } |
| 1473 | Node* cle_out = inner_cle->proj_out(false); |
| 1474 | if (cle_out->outcnt() > 1) { |
| 1475 | // Look for chains of stores that were sunk |
| 1476 | // out of the inner loop and are in the outer loop |
| 1477 | for (DUIterator_Fast imax, i = cle_out->fast_outs(imax); i < imax; i++) { |
| 1478 | Node* u = cle_out->fast_out(i); |
| 1479 | if (u->is_Store()) { |
| 1480 | Node* first = u; |
| 1481 | for(;;) { |
| 1482 | Node* next = first->in(MemNode::Memory); |
| 1483 | if (!next->is_Store() || next->in(0) != cle_out) { |
| 1484 | break; |
| 1485 | } |
| 1486 | first = next; |
| 1487 | } |
| 1488 | Node* last = u; |
| 1489 | for(;;) { |
| 1490 | Node* next = NULL; |
| 1491 | for (DUIterator_Fast jmax, j = last->fast_outs(jmax); j < jmax; j++) { |
| 1492 | Node* uu = last->fast_out(j); |
| 1493 | if (uu->is_Store() && uu->in(0) == cle_out) { |
| 1494 | assert(next == NULL, "only one in the outer loop" ); |
| 1495 | next = uu; |
| 1496 | } |
| 1497 | } |
| 1498 | if (next == NULL) { |
| 1499 | break; |
| 1500 | } |
| 1501 | last = next; |
| 1502 | } |
| 1503 | Node* phi = NULL; |
| 1504 | for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) { |
| 1505 | Node* uu = fast_out(j); |
| 1506 | if (uu->is_Phi()) { |
| 1507 | Node* be = uu->in(LoopNode::LoopBackControl); |
| 1508 | if (be->is_Store() && old_new[be->_idx] != NULL) { |
| 1509 | assert(false, "store on the backedge + sunk stores: unsupported" ); |
| 1510 | // drop outer loop |
| 1511 | IfNode* outer_le = outer_loop_end(); |
| 1512 | Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt)); |
| 1513 | igvn->replace_node(outer_le, iff); |
| 1514 | inner_cl->clear_strip_mined(); |
| 1515 | return; |
| 1516 | } |
| 1517 | if (be == last || be == first->in(MemNode::Memory)) { |
| 1518 | assert(phi == NULL, "only one phi" ); |
| 1519 | phi = uu; |
| 1520 | } |
| 1521 | } |
| 1522 | } |
| 1523 | #ifdef ASSERT |
| 1524 | for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) { |
| 1525 | Node* uu = fast_out(j); |
| 1526 | if (uu->is_Phi() && uu->bottom_type() == Type::MEMORY) { |
| 1527 | if (uu->adr_type() == igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type()))) { |
| 1528 | assert(phi == uu, "what's that phi?" ); |
| 1529 | } else if (uu->adr_type() == TypePtr::BOTTOM) { |
| 1530 | Node* n = uu->in(LoopNode::LoopBackControl); |
| 1531 | uint limit = igvn->C->live_nodes(); |
| 1532 | uint i = 0; |
| 1533 | while (n != uu) { |
| 1534 | i++; |
| 1535 | assert(i < limit, "infinite loop" ); |
| 1536 | if (n->is_Proj()) { |
| 1537 | n = n->in(0); |
| 1538 | } else if (n->is_SafePoint() || n->is_MemBar()) { |
| 1539 | n = n->in(TypeFunc::Memory); |
| 1540 | } else if (n->is_Phi()) { |
| 1541 | n = n->in(1); |
| 1542 | } else if (n->is_MergeMem()) { |
| 1543 | n = n->as_MergeMem()->memory_at(igvn->C->get_alias_index(u->adr_type())); |
| 1544 | } else if (n->is_Store() || n->is_LoadStore() || n->is_ClearArray()) { |
| 1545 | n = n->in(MemNode::Memory); |
| 1546 | } else { |
| 1547 | n->dump(); |
| 1548 | ShouldNotReachHere(); |
| 1549 | } |
| 1550 | } |
| 1551 | } |
| 1552 | } |
| 1553 | } |
| 1554 | #endif |
| 1555 | if (phi == NULL) { |
| 1556 | // If the an entire chains was sunk, the |
| 1557 | // inner loop has no phi for that memory |
| 1558 | // slice, create one for the outer loop |
| 1559 | phi = PhiNode::make(this, first->in(MemNode::Memory), Type::MEMORY, |
| 1560 | igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type()))); |
| 1561 | phi->set_req(LoopNode::LoopBackControl, last); |
| 1562 | phi = igvn->transform(phi); |
| 1563 | igvn->replace_input_of(first, MemNode::Memory, phi); |
| 1564 | } else { |
| 1565 | // Or fix the outer loop fix to include |
| 1566 | // that chain of stores. |
| 1567 | Node* be = phi->in(LoopNode::LoopBackControl); |
| 1568 | assert(!(be->is_Store() && old_new[be->_idx] != NULL), "store on the backedge + sunk stores: unsupported" ); |
| 1569 | if (be == first->in(MemNode::Memory)) { |
| 1570 | if (be == phi->in(LoopNode::LoopBackControl)) { |
| 1571 | igvn->replace_input_of(phi, LoopNode::LoopBackControl, last); |
| 1572 | } else { |
| 1573 | igvn->replace_input_of(be, MemNode::Memory, last); |
| 1574 | } |
| 1575 | } else { |
| 1576 | #ifdef ASSERT |
| 1577 | if (be == phi->in(LoopNode::LoopBackControl)) { |
| 1578 | assert(phi->in(LoopNode::LoopBackControl) == last, "" ); |
| 1579 | } else { |
| 1580 | assert(be->in(MemNode::Memory) == last, "" ); |
| 1581 | } |
| 1582 | #endif |
| 1583 | } |
| 1584 | } |
| 1585 | } |
| 1586 | } |
| 1587 | } |
| 1588 | |
| 1589 | if (iv_phi != NULL) { |
| 1590 | // Now adjust the inner loop's exit condition |
| 1591 | Node* limit = inner_cl->limit(); |
| 1592 | Node* sub = NULL; |
| 1593 | if (stride > 0) { |
| 1594 | sub = igvn->transform(new SubINode(limit, iv_phi)); |
| 1595 | } else { |
| 1596 | sub = igvn->transform(new SubINode(iv_phi, limit)); |
| 1597 | } |
| 1598 | Node* min = igvn->transform(new MinINode(sub, igvn->intcon(scaled_iters))); |
| 1599 | Node* new_limit = NULL; |
| 1600 | if (stride > 0) { |
| 1601 | new_limit = igvn->transform(new AddINode(min, iv_phi)); |
| 1602 | } else { |
| 1603 | new_limit = igvn->transform(new SubINode(iv_phi, min)); |
| 1604 | } |
| 1605 | Node* inner_cmp = inner_cle->cmp_node(); |
| 1606 | Node* inner_bol = inner_cle->in(CountedLoopEndNode::TestValue); |
| 1607 | Node* outer_bol = inner_bol; |
| 1608 | // cmp node for inner loop may be shared |
| 1609 | inner_cmp = inner_cmp->clone(); |
| 1610 | inner_cmp->set_req(2, new_limit); |
| 1611 | inner_bol = inner_bol->clone(); |
| 1612 | inner_bol->set_req(1, igvn->transform(inner_cmp)); |
| 1613 | igvn->replace_input_of(inner_cle, CountedLoopEndNode::TestValue, igvn->transform(inner_bol)); |
| 1614 | // Set the outer loop's exit condition too |
| 1615 | igvn->replace_input_of(outer_loop_end(), 1, outer_bol); |
| 1616 | } else { |
| 1617 | assert(false, "should be able to adjust outer loop" ); |
| 1618 | IfNode* outer_le = outer_loop_end(); |
| 1619 | Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt)); |
| 1620 | igvn->replace_node(outer_le, iff); |
| 1621 | inner_cl->clear_strip_mined(); |
| 1622 | } |
| 1623 | } |
| 1624 | |
| 1625 | const Type* OuterStripMinedLoopEndNode::Value(PhaseGVN* phase) const { |
| 1626 | if (!in(0)) return Type::TOP; |
| 1627 | if (phase->type(in(0)) == Type::TOP) |
| 1628 | return Type::TOP; |
| 1629 | |
| 1630 | return TypeTuple::IFBOTH; |
| 1631 | } |
| 1632 | |
| 1633 | Node *OuterStripMinedLoopEndNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
| 1634 | if (remove_dead_region(phase, can_reshape)) return this; |
| 1635 | |
| 1636 | return NULL; |
| 1637 | } |
| 1638 | |
| 1639 | //------------------------------filtered_type-------------------------------- |
| 1640 | // Return a type based on condition control flow |
| 1641 | // A successful return will be a type that is restricted due |
| 1642 | // to a series of dominating if-tests, such as: |
| 1643 | // if (i < 10) { |
| 1644 | // if (i > 0) { |
| 1645 | // here: "i" type is [1..10) |
| 1646 | // } |
| 1647 | // } |
| 1648 | // or a control flow merge |
| 1649 | // if (i < 10) { |
| 1650 | // do { |
| 1651 | // phi( , ) -- at top of loop type is [min_int..10) |
| 1652 | // i = ? |
| 1653 | // } while ( i < 10) |
| 1654 | // |
| 1655 | const TypeInt* PhaseIdealLoop::filtered_type( Node *n, Node* n_ctrl) { |
| 1656 | assert(n && n->bottom_type()->is_int(), "must be int" ); |
| 1657 | const TypeInt* filtered_t = NULL; |
| 1658 | if (!n->is_Phi()) { |
| 1659 | assert(n_ctrl != NULL || n_ctrl == C->top(), "valid control" ); |
| 1660 | filtered_t = filtered_type_from_dominators(n, n_ctrl); |
| 1661 | |
| 1662 | } else { |
| 1663 | Node* phi = n->as_Phi(); |
| 1664 | Node* region = phi->in(0); |
| 1665 | assert(n_ctrl == NULL || n_ctrl == region, "ctrl parameter must be region" ); |
| 1666 | if (region && region != C->top()) { |
| 1667 | for (uint i = 1; i < phi->req(); i++) { |
| 1668 | Node* val = phi->in(i); |
| 1669 | Node* use_c = region->in(i); |
| 1670 | const TypeInt* val_t = filtered_type_from_dominators(val, use_c); |
| 1671 | if (val_t != NULL) { |
| 1672 | if (filtered_t == NULL) { |
| 1673 | filtered_t = val_t; |
| 1674 | } else { |
| 1675 | filtered_t = filtered_t->meet(val_t)->is_int(); |
| 1676 | } |
| 1677 | } |
| 1678 | } |
| 1679 | } |
| 1680 | } |
| 1681 | const TypeInt* n_t = _igvn.type(n)->is_int(); |
| 1682 | if (filtered_t != NULL) { |
| 1683 | n_t = n_t->join(filtered_t)->is_int(); |
| 1684 | } |
| 1685 | return n_t; |
| 1686 | } |
| 1687 | |
| 1688 | |
| 1689 | //------------------------------filtered_type_from_dominators-------------------------------- |
| 1690 | // Return a possibly more restrictive type for val based on condition control flow of dominators |
| 1691 | const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *use_ctrl) { |
| 1692 | if (val->is_Con()) { |
| 1693 | return val->bottom_type()->is_int(); |
| 1694 | } |
| 1695 | uint if_limit = 10; // Max number of dominating if's visited |
| 1696 | const TypeInt* rtn_t = NULL; |
| 1697 | |
| 1698 | if (use_ctrl && use_ctrl != C->top()) { |
| 1699 | Node* val_ctrl = get_ctrl(val); |
| 1700 | uint val_dom_depth = dom_depth(val_ctrl); |
| 1701 | Node* pred = use_ctrl; |
| 1702 | uint if_cnt = 0; |
| 1703 | while (if_cnt < if_limit) { |
| 1704 | if ((pred->Opcode() == Op_IfTrue || pred->Opcode() == Op_IfFalse)) { |
| 1705 | if_cnt++; |
| 1706 | const TypeInt* if_t = IfNode::filtered_int_type(&_igvn, val, pred); |
| 1707 | if (if_t != NULL) { |
| 1708 | if (rtn_t == NULL) { |
| 1709 | rtn_t = if_t; |
| 1710 | } else { |
| 1711 | rtn_t = rtn_t->join(if_t)->is_int(); |
| 1712 | } |
| 1713 | } |
| 1714 | } |
| 1715 | pred = idom(pred); |
| 1716 | if (pred == NULL || pred == C->top()) { |
| 1717 | break; |
| 1718 | } |
| 1719 | // Stop if going beyond definition block of val |
| 1720 | if (dom_depth(pred) < val_dom_depth) { |
| 1721 | break; |
| 1722 | } |
| 1723 | } |
| 1724 | } |
| 1725 | return rtn_t; |
| 1726 | } |
| 1727 | |
| 1728 | |
| 1729 | //------------------------------dump_spec-------------------------------------- |
| 1730 | // Dump special per-node info |
| 1731 | #ifndef PRODUCT |
| 1732 | void CountedLoopEndNode::dump_spec(outputStream *st) const { |
| 1733 | if( in(TestValue) != NULL && in(TestValue)->is_Bool() ) { |
| 1734 | BoolTest bt( test_trip()); // Added this for g++. |
| 1735 | |
| 1736 | st->print("[" ); |
| 1737 | bt.dump_on(st); |
| 1738 | st->print("]" ); |
| 1739 | } |
| 1740 | st->print(" " ); |
| 1741 | IfNode::dump_spec(st); |
| 1742 | } |
| 1743 | #endif |
| 1744 | |
| 1745 | //============================================================================= |
| 1746 | //------------------------------is_member-------------------------------------- |
| 1747 | // Is 'l' a member of 'this'? |
| 1748 | bool IdealLoopTree::is_member(const IdealLoopTree *l) const { |
| 1749 | while( l->_nest > _nest ) l = l->_parent; |
| 1750 | return l == this; |
| 1751 | } |
| 1752 | |
| 1753 | //------------------------------set_nest--------------------------------------- |
| 1754 | // Set loop tree nesting depth. Accumulate _has_call bits. |
| 1755 | int IdealLoopTree::set_nest( uint depth ) { |
| 1756 | _nest = depth; |
| 1757 | int bits = _has_call; |
| 1758 | if( _child ) bits |= _child->set_nest(depth+1); |
| 1759 | if( bits ) _has_call = 1; |
| 1760 | if( _next ) bits |= _next ->set_nest(depth ); |
| 1761 | return bits; |
| 1762 | } |
| 1763 | |
| 1764 | //------------------------------split_fall_in---------------------------------- |
| 1765 | // Split out multiple fall-in edges from the loop header. Move them to a |
| 1766 | // private RegionNode before the loop. This becomes the loop landing pad. |
| 1767 | void IdealLoopTree::split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ) { |
| 1768 | PhaseIterGVN &igvn = phase->_igvn; |
| 1769 | uint i; |
| 1770 | |
| 1771 | // Make a new RegionNode to be the landing pad. |
| 1772 | Node *landing_pad = new RegionNode( fall_in_cnt+1 ); |
| 1773 | phase->set_loop(landing_pad,_parent); |
| 1774 | // Gather all the fall-in control paths into the landing pad |
| 1775 | uint icnt = fall_in_cnt; |
| 1776 | uint oreq = _head->req(); |
| 1777 | for( i = oreq-1; i>0; i-- ) |
| 1778 | if( !phase->is_member( this, _head->in(i) ) ) |
| 1779 | landing_pad->set_req(icnt--,_head->in(i)); |
| 1780 | |
| 1781 | // Peel off PhiNode edges as well |
| 1782 | for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) { |
| 1783 | Node *oj = _head->fast_out(j); |
| 1784 | if( oj->is_Phi() ) { |
| 1785 | PhiNode* old_phi = oj->as_Phi(); |
| 1786 | assert( old_phi->region() == _head, "" ); |
| 1787 | igvn.hash_delete(old_phi); // Yank from hash before hacking edges |
| 1788 | Node *p = PhiNode::make_blank(landing_pad, old_phi); |
| 1789 | uint icnt = fall_in_cnt; |
| 1790 | for( i = oreq-1; i>0; i-- ) { |
| 1791 | if( !phase->is_member( this, _head->in(i) ) ) { |
| 1792 | p->init_req(icnt--, old_phi->in(i)); |
| 1793 | // Go ahead and clean out old edges from old phi |
| 1794 | old_phi->del_req(i); |
| 1795 | } |
| 1796 | } |
| 1797 | // Search for CSE's here, because ZKM.jar does a lot of |
| 1798 | // loop hackery and we need to be a little incremental |
| 1799 | // with the CSE to avoid O(N^2) node blow-up. |
| 1800 | Node *p2 = igvn.hash_find_insert(p); // Look for a CSE |
| 1801 | if( p2 ) { // Found CSE |
| 1802 | p->destruct(); // Recover useless new node |
| 1803 | p = p2; // Use old node |
| 1804 | } else { |
| 1805 | igvn.register_new_node_with_optimizer(p, old_phi); |
| 1806 | } |
| 1807 | // Make old Phi refer to new Phi. |
| 1808 | old_phi->add_req(p); |
| 1809 | // Check for the special case of making the old phi useless and |
| 1810 | // disappear it. In JavaGrande I have a case where this useless |
| 1811 | // Phi is the loop limit and prevents recognizing a CountedLoop |
| 1812 | // which in turn prevents removing an empty loop. |
| 1813 | Node *id_old_phi = igvn.apply_identity(old_phi); |
| 1814 | if( id_old_phi != old_phi ) { // Found a simple identity? |
| 1815 | // Note that I cannot call 'replace_node' here, because |
| 1816 | // that will yank the edge from old_phi to the Region and |
| 1817 | // I'm mid-iteration over the Region's uses. |
| 1818 | for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) { |
| 1819 | Node* use = old_phi->last_out(i); |
| 1820 | igvn.rehash_node_delayed(use); |
| 1821 | uint uses_found = 0; |
| 1822 | for (uint j = 0; j < use->len(); j++) { |
| 1823 | if (use->in(j) == old_phi) { |
| 1824 | if (j < use->req()) use->set_req (j, id_old_phi); |
| 1825 | else use->set_prec(j, id_old_phi); |
| 1826 | uses_found++; |
| 1827 | } |
| 1828 | } |
| 1829 | i -= uses_found; // we deleted 1 or more copies of this edge |
| 1830 | } |
| 1831 | } |
| 1832 | igvn._worklist.push(old_phi); |
| 1833 | } |
| 1834 | } |
| 1835 | // Finally clean out the fall-in edges from the RegionNode |
| 1836 | for( i = oreq-1; i>0; i-- ) { |
| 1837 | if( !phase->is_member( this, _head->in(i) ) ) { |
| 1838 | _head->del_req(i); |
| 1839 | } |
| 1840 | } |
| 1841 | igvn.rehash_node_delayed(_head); |
| 1842 | // Transform landing pad |
| 1843 | igvn.register_new_node_with_optimizer(landing_pad, _head); |
| 1844 | // Insert landing pad into the header |
| 1845 | _head->add_req(landing_pad); |
| 1846 | } |
| 1847 | |
| 1848 | //------------------------------split_outer_loop------------------------------- |
| 1849 | // Split out the outermost loop from this shared header. |
| 1850 | void IdealLoopTree::split_outer_loop( PhaseIdealLoop *phase ) { |
| 1851 | PhaseIterGVN &igvn = phase->_igvn; |
| 1852 | |
| 1853 | // Find index of outermost loop; it should also be my tail. |
| 1854 | uint outer_idx = 1; |
| 1855 | while( _head->in(outer_idx) != _tail ) outer_idx++; |
| 1856 | |
| 1857 | // Make a LoopNode for the outermost loop. |
| 1858 | Node *ctl = _head->in(LoopNode::EntryControl); |
| 1859 | Node *outer = new LoopNode( ctl, _head->in(outer_idx) ); |
| 1860 | outer = igvn.register_new_node_with_optimizer(outer, _head); |
| 1861 | phase->set_created_loop_node(); |
| 1862 | |
| 1863 | // Outermost loop falls into '_head' loop |
| 1864 | _head->set_req(LoopNode::EntryControl, outer); |
| 1865 | _head->del_req(outer_idx); |
| 1866 | // Split all the Phis up between '_head' loop and 'outer' loop. |
| 1867 | for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) { |
| 1868 | Node *out = _head->fast_out(j); |
| 1869 | if( out->is_Phi() ) { |
| 1870 | PhiNode *old_phi = out->as_Phi(); |
| 1871 | assert( old_phi->region() == _head, "" ); |
| 1872 | Node *phi = PhiNode::make_blank(outer, old_phi); |
| 1873 | phi->init_req(LoopNode::EntryControl, old_phi->in(LoopNode::EntryControl)); |
| 1874 | phi->init_req(LoopNode::LoopBackControl, old_phi->in(outer_idx)); |
| 1875 | phi = igvn.register_new_node_with_optimizer(phi, old_phi); |
| 1876 | // Make old Phi point to new Phi on the fall-in path |
| 1877 | igvn.replace_input_of(old_phi, LoopNode::EntryControl, phi); |
| 1878 | old_phi->del_req(outer_idx); |
| 1879 | } |
| 1880 | } |
| 1881 | |
| 1882 | // Use the new loop head instead of the old shared one |
| 1883 | _head = outer; |
| 1884 | phase->set_loop(_head, this); |
| 1885 | } |
| 1886 | |
| 1887 | //------------------------------fix_parent------------------------------------- |
| 1888 | static void fix_parent( IdealLoopTree *loop, IdealLoopTree *parent ) { |
| 1889 | loop->_parent = parent; |
| 1890 | if( loop->_child ) fix_parent( loop->_child, loop ); |
| 1891 | if( loop->_next ) fix_parent( loop->_next , parent ); |
| 1892 | } |
| 1893 | |
| 1894 | //------------------------------estimate_path_freq----------------------------- |
| 1895 | static float estimate_path_freq( Node *n ) { |
| 1896 | // Try to extract some path frequency info |
| 1897 | IfNode *iff; |
| 1898 | for( int i = 0; i < 50; i++ ) { // Skip through a bunch of uncommon tests |
| 1899 | uint nop = n->Opcode(); |
| 1900 | if( nop == Op_SafePoint ) { // Skip any safepoint |
| 1901 | n = n->in(0); |
| 1902 | continue; |
| 1903 | } |
| 1904 | if( nop == Op_CatchProj ) { // Get count from a prior call |
| 1905 | // Assume call does not always throw exceptions: means the call-site |
| 1906 | // count is also the frequency of the fall-through path. |
| 1907 | assert( n->is_CatchProj(), "" ); |
| 1908 | if( ((CatchProjNode*)n)->_con != CatchProjNode::fall_through_index ) |
| 1909 | return 0.0f; // Assume call exception path is rare |
| 1910 | Node *call = n->in(0)->in(0)->in(0); |
| 1911 | assert( call->is_Call(), "expect a call here" ); |
| 1912 | const JVMState *jvms = ((CallNode*)call)->jvms(); |
| 1913 | ciMethodData* methodData = jvms->method()->method_data(); |
| 1914 | if (!methodData->is_mature()) return 0.0f; // No call-site data |
| 1915 | ciProfileData* data = methodData->bci_to_data(jvms->bci()); |
| 1916 | if ((data == NULL) || !data->is_CounterData()) { |
| 1917 | // no call profile available, try call's control input |
| 1918 | n = n->in(0); |
| 1919 | continue; |
| 1920 | } |
| 1921 | return data->as_CounterData()->count()/FreqCountInvocations; |
| 1922 | } |
| 1923 | // See if there's a gating IF test |
| 1924 | Node *n_c = n->in(0); |
| 1925 | if( !n_c->is_If() ) break; // No estimate available |
| 1926 | iff = n_c->as_If(); |
| 1927 | if( iff->_fcnt != COUNT_UNKNOWN ) // Have a valid count? |
| 1928 | // Compute how much count comes on this path |
| 1929 | return ((nop == Op_IfTrue) ? iff->_prob : 1.0f - iff->_prob) * iff->_fcnt; |
| 1930 | // Have no count info. Skip dull uncommon-trap like branches. |
| 1931 | if( (nop == Op_IfTrue && iff->_prob < PROB_LIKELY_MAG(5)) || |
| 1932 | (nop == Op_IfFalse && iff->_prob > PROB_UNLIKELY_MAG(5)) ) |
| 1933 | break; |
| 1934 | // Skip through never-taken branch; look for a real loop exit. |
| 1935 | n = iff->in(0); |
| 1936 | } |
| 1937 | return 0.0f; // No estimate available |
| 1938 | } |
| 1939 | |
| 1940 | //------------------------------merge_many_backedges--------------------------- |
| 1941 | // Merge all the backedges from the shared header into a private Region. |
| 1942 | // Feed that region as the one backedge to this loop. |
| 1943 | void IdealLoopTree::merge_many_backedges( PhaseIdealLoop *phase ) { |
| 1944 | uint i; |
| 1945 | |
| 1946 | // Scan for the top 2 hottest backedges |
| 1947 | float hotcnt = 0.0f; |
| 1948 | float warmcnt = 0.0f; |
| 1949 | uint hot_idx = 0; |
| 1950 | // Loop starts at 2 because slot 1 is the fall-in path |
| 1951 | for( i = 2; i < _head->req(); i++ ) { |
| 1952 | float cnt = estimate_path_freq(_head->in(i)); |
| 1953 | if( cnt > hotcnt ) { // Grab hottest path |
| 1954 | warmcnt = hotcnt; |
| 1955 | hotcnt = cnt; |
| 1956 | hot_idx = i; |
| 1957 | } else if( cnt > warmcnt ) { // And 2nd hottest path |
| 1958 | warmcnt = cnt; |
| 1959 | } |
| 1960 | } |
| 1961 | |
| 1962 | // See if the hottest backedge is worthy of being an inner loop |
| 1963 | // by being much hotter than the next hottest backedge. |
| 1964 | if( hotcnt <= 0.0001 || |
| 1965 | hotcnt < 2.0*warmcnt ) hot_idx = 0;// No hot backedge |
| 1966 | |
| 1967 | // Peel out the backedges into a private merge point; peel |
| 1968 | // them all except optionally hot_idx. |
| 1969 | PhaseIterGVN &igvn = phase->_igvn; |
| 1970 | |
| 1971 | Node *hot_tail = NULL; |
| 1972 | // Make a Region for the merge point |
| 1973 | Node *r = new RegionNode(1); |
| 1974 | for( i = 2; i < _head->req(); i++ ) { |
| 1975 | if( i != hot_idx ) |
| 1976 | r->add_req( _head->in(i) ); |
| 1977 | else hot_tail = _head->in(i); |
| 1978 | } |
| 1979 | igvn.register_new_node_with_optimizer(r, _head); |
| 1980 | // Plug region into end of loop _head, followed by hot_tail |
| 1981 | while( _head->req() > 3 ) _head->del_req( _head->req()-1 ); |
| 1982 | igvn.replace_input_of(_head, 2, r); |
| 1983 | if( hot_idx ) _head->add_req(hot_tail); |
| 1984 | |
| 1985 | // Split all the Phis up between '_head' loop and the Region 'r' |
| 1986 | for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) { |
| 1987 | Node *out = _head->fast_out(j); |
| 1988 | if( out->is_Phi() ) { |
| 1989 | PhiNode* n = out->as_Phi(); |
| 1990 | igvn.hash_delete(n); // Delete from hash before hacking edges |
| 1991 | Node *hot_phi = NULL; |
| 1992 | Node *phi = new PhiNode(r, n->type(), n->adr_type()); |
| 1993 | // Check all inputs for the ones to peel out |
| 1994 | uint j = 1; |
| 1995 | for( uint i = 2; i < n->req(); i++ ) { |
| 1996 | if( i != hot_idx ) |
| 1997 | phi->set_req( j++, n->in(i) ); |
| 1998 | else hot_phi = n->in(i); |
| 1999 | } |
| 2000 | // Register the phi but do not transform until whole place transforms |
| 2001 | igvn.register_new_node_with_optimizer(phi, n); |
| 2002 | // Add the merge phi to the old Phi |
| 2003 | while( n->req() > 3 ) n->del_req( n->req()-1 ); |
| 2004 | igvn.replace_input_of(n, 2, phi); |
| 2005 | if( hot_idx ) n->add_req(hot_phi); |
| 2006 | } |
| 2007 | } |
| 2008 | |
| 2009 | |
| 2010 | // Insert a new IdealLoopTree inserted below me. Turn it into a clone |
| 2011 | // of self loop tree. Turn self into a loop headed by _head and with |
| 2012 | // tail being the new merge point. |
| 2013 | IdealLoopTree *ilt = new IdealLoopTree( phase, _head, _tail ); |
| 2014 | phase->set_loop(_tail,ilt); // Adjust tail |
| 2015 | _tail = r; // Self's tail is new merge point |
| 2016 | phase->set_loop(r,this); |
| 2017 | ilt->_child = _child; // New guy has my children |
| 2018 | _child = ilt; // Self has new guy as only child |
| 2019 | ilt->_parent = this; // new guy has self for parent |
| 2020 | ilt->_nest = _nest; // Same nesting depth (for now) |
| 2021 | |
| 2022 | // Starting with 'ilt', look for child loop trees using the same shared |
| 2023 | // header. Flatten these out; they will no longer be loops in the end. |
| 2024 | IdealLoopTree **pilt = &_child; |
| 2025 | while( ilt ) { |
| 2026 | if( ilt->_head == _head ) { |
| 2027 | uint i; |
| 2028 | for( i = 2; i < _head->req(); i++ ) |
| 2029 | if( _head->in(i) == ilt->_tail ) |
| 2030 | break; // Still a loop |
| 2031 | if( i == _head->req() ) { // No longer a loop |
| 2032 | // Flatten ilt. Hang ilt's "_next" list from the end of |
| 2033 | // ilt's '_child' list. Move the ilt's _child up to replace ilt. |
| 2034 | IdealLoopTree **cp = &ilt->_child; |
| 2035 | while( *cp ) cp = &(*cp)->_next; // Find end of child list |
| 2036 | *cp = ilt->_next; // Hang next list at end of child list |
| 2037 | *pilt = ilt->_child; // Move child up to replace ilt |
| 2038 | ilt->_head = NULL; // Flag as a loop UNIONED into parent |
| 2039 | ilt = ilt->_child; // Repeat using new ilt |
| 2040 | continue; // do not advance over ilt->_child |
| 2041 | } |
| 2042 | assert( ilt->_tail == hot_tail, "expected to only find the hot inner loop here" ); |
| 2043 | phase->set_loop(_head,ilt); |
| 2044 | } |
| 2045 | pilt = &ilt->_child; // Advance to next |
| 2046 | ilt = *pilt; |
| 2047 | } |
| 2048 | |
| 2049 | if( _child ) fix_parent( _child, this ); |
| 2050 | } |
| 2051 | |
| 2052 | //------------------------------beautify_loops--------------------------------- |
| 2053 | // Split shared headers and insert loop landing pads. |
| 2054 | // Insert a LoopNode to replace the RegionNode. |
| 2055 | // Return TRUE if loop tree is structurally changed. |
| 2056 | bool IdealLoopTree::beautify_loops( PhaseIdealLoop *phase ) { |
| 2057 | bool result = false; |
| 2058 | // Cache parts in locals for easy |
| 2059 | PhaseIterGVN &igvn = phase->_igvn; |
| 2060 | |
| 2061 | igvn.hash_delete(_head); // Yank from hash before hacking edges |
| 2062 | |
| 2063 | // Check for multiple fall-in paths. Peel off a landing pad if need be. |
| 2064 | int fall_in_cnt = 0; |
| 2065 | for( uint i = 1; i < _head->req(); i++ ) |
| 2066 | if( !phase->is_member( this, _head->in(i) ) ) |
| 2067 | fall_in_cnt++; |
| 2068 | assert( fall_in_cnt, "at least 1 fall-in path" ); |
| 2069 | if( fall_in_cnt > 1 ) // Need a loop landing pad to merge fall-ins |
| 2070 | split_fall_in( phase, fall_in_cnt ); |
| 2071 | |
| 2072 | // Swap inputs to the _head and all Phis to move the fall-in edge to |
| 2073 | // the left. |
| 2074 | fall_in_cnt = 1; |
| 2075 | while( phase->is_member( this, _head->in(fall_in_cnt) ) ) |
| 2076 | fall_in_cnt++; |
| 2077 | if( fall_in_cnt > 1 ) { |
| 2078 | // Since I am just swapping inputs I do not need to update def-use info |
| 2079 | Node *tmp = _head->in(1); |
| 2080 | igvn.rehash_node_delayed(_head); |
| 2081 | _head->set_req( 1, _head->in(fall_in_cnt) ); |
| 2082 | _head->set_req( fall_in_cnt, tmp ); |
| 2083 | // Swap also all Phis |
| 2084 | for (DUIterator_Fast imax, i = _head->fast_outs(imax); i < imax; i++) { |
| 2085 | Node* phi = _head->fast_out(i); |
| 2086 | if( phi->is_Phi() ) { |
| 2087 | igvn.rehash_node_delayed(phi); // Yank from hash before hacking edges |
| 2088 | tmp = phi->in(1); |
| 2089 | phi->set_req( 1, phi->in(fall_in_cnt) ); |
| 2090 | phi->set_req( fall_in_cnt, tmp ); |
| 2091 | } |
| 2092 | } |
| 2093 | } |
| 2094 | assert( !phase->is_member( this, _head->in(1) ), "left edge is fall-in" ); |
| 2095 | assert( phase->is_member( this, _head->in(2) ), "right edge is loop" ); |
| 2096 | |
| 2097 | // If I am a shared header (multiple backedges), peel off the many |
| 2098 | // backedges into a private merge point and use the merge point as |
| 2099 | // the one true backedge. |
| 2100 | if( _head->req() > 3 ) { |
| 2101 | // Merge the many backedges into a single backedge but leave |
| 2102 | // the hottest backedge as separate edge for the following peel. |
| 2103 | merge_many_backedges( phase ); |
| 2104 | result = true; |
| 2105 | } |
| 2106 | |
| 2107 | // If I have one hot backedge, peel off myself loop. |
| 2108 | // I better be the outermost loop. |
| 2109 | if (_head->req() > 3 && !_irreducible) { |
| 2110 | split_outer_loop( phase ); |
| 2111 | result = true; |
| 2112 | |
| 2113 | } else if (!_head->is_Loop() && !_irreducible) { |
| 2114 | // Make a new LoopNode to replace the old loop head |
| 2115 | Node *l = new LoopNode( _head->in(1), _head->in(2) ); |
| 2116 | l = igvn.register_new_node_with_optimizer(l, _head); |
| 2117 | phase->set_created_loop_node(); |
| 2118 | // Go ahead and replace _head |
| 2119 | phase->_igvn.replace_node( _head, l ); |
| 2120 | _head = l; |
| 2121 | phase->set_loop(_head, this); |
| 2122 | } |
| 2123 | |
| 2124 | // Now recursively beautify nested loops |
| 2125 | if( _child ) result |= _child->beautify_loops( phase ); |
| 2126 | if( _next ) result |= _next ->beautify_loops( phase ); |
| 2127 | return result; |
| 2128 | } |
| 2129 | |
| 2130 | //------------------------------allpaths_check_safepts---------------------------- |
| 2131 | // Allpaths backwards scan from loop tail, terminating each path at first safepoint |
| 2132 | // encountered. Helper for check_safepts. |
| 2133 | void IdealLoopTree::allpaths_check_safepts(VectorSet &visited, Node_List &stack) { |
| 2134 | assert(stack.size() == 0, "empty stack" ); |
| 2135 | stack.push(_tail); |
| 2136 | visited.Clear(); |
| 2137 | visited.set(_tail->_idx); |
| 2138 | while (stack.size() > 0) { |
| 2139 | Node* n = stack.pop(); |
| 2140 | if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) { |
| 2141 | // Terminate this path |
| 2142 | } else if (n->Opcode() == Op_SafePoint) { |
| 2143 | if (_phase->get_loop(n) != this) { |
| 2144 | if (_required_safept == NULL) _required_safept = new Node_List(); |
| 2145 | _required_safept->push(n); // save the one closest to the tail |
| 2146 | } |
| 2147 | // Terminate this path |
| 2148 | } else { |
| 2149 | uint start = n->is_Region() ? 1 : 0; |
| 2150 | uint end = n->is_Region() && !n->is_Loop() ? n->req() : start + 1; |
| 2151 | for (uint i = start; i < end; i++) { |
| 2152 | Node* in = n->in(i); |
| 2153 | assert(in->is_CFG(), "must be" ); |
| 2154 | if (!visited.test_set(in->_idx) && is_member(_phase->get_loop(in))) { |
| 2155 | stack.push(in); |
| 2156 | } |
| 2157 | } |
| 2158 | } |
| 2159 | } |
| 2160 | } |
| 2161 | |
| 2162 | //------------------------------check_safepts---------------------------- |
| 2163 | // Given dominators, try to find loops with calls that must always be |
| 2164 | // executed (call dominates loop tail). These loops do not need non-call |
| 2165 | // safepoints (ncsfpt). |
| 2166 | // |
| 2167 | // A complication is that a safepoint in a inner loop may be needed |
| 2168 | // by an outer loop. In the following, the inner loop sees it has a |
| 2169 | // call (block 3) on every path from the head (block 2) to the |
| 2170 | // backedge (arc 3->2). So it deletes the ncsfpt (non-call safepoint) |
| 2171 | // in block 2, _but_ this leaves the outer loop without a safepoint. |
| 2172 | // |
| 2173 | // entry 0 |
| 2174 | // | |
| 2175 | // v |
| 2176 | // outer 1,2 +->1 |
| 2177 | // | | |
| 2178 | // | v |
| 2179 | // | 2<---+ ncsfpt in 2 |
| 2180 | // |_/|\ | |
| 2181 | // | v | |
| 2182 | // inner 2,3 / 3 | call in 3 |
| 2183 | // / | | |
| 2184 | // v +--+ |
| 2185 | // exit 4 |
| 2186 | // |
| 2187 | // |
| 2188 | // This method creates a list (_required_safept) of ncsfpt nodes that must |
| 2189 | // be protected is created for each loop. When a ncsfpt maybe deleted, it |
| 2190 | // is first looked for in the lists for the outer loops of the current loop. |
| 2191 | // |
| 2192 | // The insights into the problem: |
| 2193 | // A) counted loops are okay |
| 2194 | // B) innermost loops are okay (only an inner loop can delete |
| 2195 | // a ncsfpt needed by an outer loop) |
| 2196 | // C) a loop is immune from an inner loop deleting a safepoint |
| 2197 | // if the loop has a call on the idom-path |
| 2198 | // D) a loop is also immune if it has a ncsfpt (non-call safepoint) on the |
| 2199 | // idom-path that is not in a nested loop |
| 2200 | // E) otherwise, an ncsfpt on the idom-path that is nested in an inner |
| 2201 | // loop needs to be prevented from deletion by an inner loop |
| 2202 | // |
| 2203 | // There are two analyses: |
| 2204 | // 1) The first, and cheaper one, scans the loop body from |
| 2205 | // tail to head following the idom (immediate dominator) |
| 2206 | // chain, looking for the cases (C,D,E) above. |
| 2207 | // Since inner loops are scanned before outer loops, there is summary |
| 2208 | // information about inner loops. Inner loops can be skipped over |
| 2209 | // when the tail of an inner loop is encountered. |
| 2210 | // |
| 2211 | // 2) The second, invoked if the first fails to find a call or ncsfpt on |
| 2212 | // the idom path (which is rare), scans all predecessor control paths |
| 2213 | // from the tail to the head, terminating a path when a call or sfpt |
| 2214 | // is encountered, to find the ncsfpt's that are closest to the tail. |
| 2215 | // |
| 2216 | void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) { |
| 2217 | // Bottom up traversal |
| 2218 | IdealLoopTree* ch = _child; |
| 2219 | if (_child) _child->check_safepts(visited, stack); |
| 2220 | if (_next) _next ->check_safepts(visited, stack); |
| 2221 | |
| 2222 | if (!_head->is_CountedLoop() && !_has_sfpt && _parent != NULL && !_irreducible) { |
| 2223 | bool has_call = false; // call on dom-path |
| 2224 | bool has_local_ncsfpt = false; // ncsfpt on dom-path at this loop depth |
| 2225 | Node* nonlocal_ncsfpt = NULL; // ncsfpt on dom-path at a deeper depth |
| 2226 | // Scan the dom-path nodes from tail to head |
| 2227 | for (Node* n = tail(); n != _head; n = _phase->idom(n)) { |
| 2228 | if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) { |
| 2229 | has_call = true; |
| 2230 | _has_sfpt = 1; // Then no need for a safept! |
| 2231 | break; |
| 2232 | } else if (n->Opcode() == Op_SafePoint) { |
| 2233 | if (_phase->get_loop(n) == this) { |
| 2234 | has_local_ncsfpt = true; |
| 2235 | break; |
| 2236 | } |
| 2237 | if (nonlocal_ncsfpt == NULL) { |
| 2238 | nonlocal_ncsfpt = n; // save the one closest to the tail |
| 2239 | } |
| 2240 | } else { |
| 2241 | IdealLoopTree* nlpt = _phase->get_loop(n); |
| 2242 | if (this != nlpt) { |
| 2243 | // If at an inner loop tail, see if the inner loop has already |
| 2244 | // recorded seeing a call on the dom-path (and stop.) If not, |
| 2245 | // jump to the head of the inner loop. |
| 2246 | assert(is_member(nlpt), "nested loop" ); |
| 2247 | Node* tail = nlpt->_tail; |
| 2248 | if (tail->in(0)->is_If()) tail = tail->in(0); |
| 2249 | if (n == tail) { |
| 2250 | // If inner loop has call on dom-path, so does outer loop |
| 2251 | if (nlpt->_has_sfpt) { |
| 2252 | has_call = true; |
| 2253 | _has_sfpt = 1; |
| 2254 | break; |
| 2255 | } |
| 2256 | // Skip to head of inner loop |
| 2257 | assert(_phase->is_dominator(_head, nlpt->_head), "inner head dominated by outer head" ); |
| 2258 | n = nlpt->_head; |
| 2259 | } |
| 2260 | } |
| 2261 | } |
| 2262 | } |
| 2263 | // Record safept's that this loop needs preserved when an |
| 2264 | // inner loop attempts to delete it's safepoints. |
| 2265 | if (_child != NULL && !has_call && !has_local_ncsfpt) { |
| 2266 | if (nonlocal_ncsfpt != NULL) { |
| 2267 | if (_required_safept == NULL) _required_safept = new Node_List(); |
| 2268 | _required_safept->push(nonlocal_ncsfpt); |
| 2269 | } else { |
| 2270 | // Failed to find a suitable safept on the dom-path. Now use |
| 2271 | // an all paths walk from tail to head, looking for safepoints to preserve. |
| 2272 | allpaths_check_safepts(visited, stack); |
| 2273 | } |
| 2274 | } |
| 2275 | } |
| 2276 | } |
| 2277 | |
| 2278 | //---------------------------is_deleteable_safept---------------------------- |
| 2279 | // Is safept not required by an outer loop? |
| 2280 | bool PhaseIdealLoop::is_deleteable_safept(Node* sfpt) { |
| 2281 | assert(sfpt->Opcode() == Op_SafePoint, "" ); |
| 2282 | IdealLoopTree* lp = get_loop(sfpt)->_parent; |
| 2283 | while (lp != NULL) { |
| 2284 | Node_List* sfpts = lp->_required_safept; |
| 2285 | if (sfpts != NULL) { |
| 2286 | for (uint i = 0; i < sfpts->size(); i++) { |
| 2287 | if (sfpt == sfpts->at(i)) |
| 2288 | return false; |
| 2289 | } |
| 2290 | } |
| 2291 | lp = lp->_parent; |
| 2292 | } |
| 2293 | return true; |
| 2294 | } |
| 2295 | |
| 2296 | //---------------------------replace_parallel_iv------------------------------- |
| 2297 | // Replace parallel induction variable (parallel to trip counter) |
| 2298 | void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) { |
| 2299 | assert(loop->_head->is_CountedLoop(), "" ); |
| 2300 | CountedLoopNode *cl = loop->_head->as_CountedLoop(); |
| 2301 | if (!cl->is_valid_counted_loop()) |
| 2302 | return; // skip malformed counted loop |
| 2303 | Node *incr = cl->incr(); |
| 2304 | if (incr == NULL) |
| 2305 | return; // Dead loop? |
| 2306 | Node *init = cl->init_trip(); |
| 2307 | Node *phi = cl->phi(); |
| 2308 | int stride_con = cl->stride_con(); |
| 2309 | |
| 2310 | // Visit all children, looking for Phis |
| 2311 | for (DUIterator i = cl->outs(); cl->has_out(i); i++) { |
| 2312 | Node *out = cl->out(i); |
| 2313 | // Look for other phis (secondary IVs). Skip dead ones |
| 2314 | if (!out->is_Phi() || out == phi || !has_node(out)) |
| 2315 | continue; |
| 2316 | PhiNode* phi2 = out->as_Phi(); |
| 2317 | Node *incr2 = phi2->in( LoopNode::LoopBackControl ); |
| 2318 | // Look for induction variables of the form: X += constant |
| 2319 | if (phi2->region() != loop->_head || |
| 2320 | incr2->req() != 3 || |
| 2321 | incr2->in(1) != phi2 || |
| 2322 | incr2 == incr || |
| 2323 | incr2->Opcode() != Op_AddI || |
| 2324 | !incr2->in(2)->is_Con()) |
| 2325 | continue; |
| 2326 | |
| 2327 | // Check for parallel induction variable (parallel to trip counter) |
| 2328 | // via an affine function. In particular, count-down loops with |
| 2329 | // count-up array indices are common. We only RCE references off |
| 2330 | // the trip-counter, so we need to convert all these to trip-counter |
| 2331 | // expressions. |
| 2332 | Node *init2 = phi2->in( LoopNode::EntryControl ); |
| 2333 | int stride_con2 = incr2->in(2)->get_int(); |
| 2334 | |
| 2335 | // The ratio of the two strides cannot be represented as an int |
| 2336 | // if stride_con2 is min_int and stride_con is -1. |
| 2337 | if (stride_con2 == min_jint && stride_con == -1) { |
| 2338 | continue; |
| 2339 | } |
| 2340 | |
| 2341 | // The general case here gets a little tricky. We want to find the |
| 2342 | // GCD of all possible parallel IV's and make a new IV using this |
| 2343 | // GCD for the loop. Then all possible IVs are simple multiples of |
| 2344 | // the GCD. In practice, this will cover very few extra loops. |
| 2345 | // Instead we require 'stride_con2' to be a multiple of 'stride_con', |
| 2346 | // where +/-1 is the common case, but other integer multiples are |
| 2347 | // also easy to handle. |
| 2348 | int ratio_con = stride_con2/stride_con; |
| 2349 | |
| 2350 | if ((ratio_con * stride_con) == stride_con2) { // Check for exact |
| 2351 | #ifndef PRODUCT |
| 2352 | if (TraceLoopOpts) { |
| 2353 | tty->print("Parallel IV: %d " , phi2->_idx); |
| 2354 | loop->dump_head(); |
| 2355 | } |
| 2356 | #endif |
| 2357 | // Convert to using the trip counter. The parallel induction |
| 2358 | // variable differs from the trip counter by a loop-invariant |
| 2359 | // amount, the difference between their respective initial values. |
| 2360 | // It is scaled by the 'ratio_con'. |
| 2361 | Node* ratio = _igvn.intcon(ratio_con); |
| 2362 | set_ctrl(ratio, C->root()); |
| 2363 | Node* ratio_init = new MulINode(init, ratio); |
| 2364 | _igvn.register_new_node_with_optimizer(ratio_init, init); |
| 2365 | set_early_ctrl(ratio_init); |
| 2366 | Node* diff = new SubINode(init2, ratio_init); |
| 2367 | _igvn.register_new_node_with_optimizer(diff, init2); |
| 2368 | set_early_ctrl(diff); |
| 2369 | Node* ratio_idx = new MulINode(phi, ratio); |
| 2370 | _igvn.register_new_node_with_optimizer(ratio_idx, phi); |
| 2371 | set_ctrl(ratio_idx, cl); |
| 2372 | Node* add = new AddINode(ratio_idx, diff); |
| 2373 | _igvn.register_new_node_with_optimizer(add); |
| 2374 | set_ctrl(add, cl); |
| 2375 | _igvn.replace_node( phi2, add ); |
| 2376 | // Sometimes an induction variable is unused |
| 2377 | if (add->outcnt() == 0) { |
| 2378 | _igvn.remove_dead_node(add); |
| 2379 | } |
| 2380 | --i; // deleted this phi; rescan starting with next position |
| 2381 | continue; |
| 2382 | } |
| 2383 | } |
| 2384 | } |
| 2385 | |
| 2386 | void IdealLoopTree::remove_safepoints(PhaseIdealLoop* phase, bool keep_one) { |
| 2387 | Node* keep = NULL; |
| 2388 | if (keep_one) { |
| 2389 | // Look for a safepoint on the idom-path. |
| 2390 | for (Node* i = tail(); i != _head; i = phase->idom(i)) { |
| 2391 | if (i->Opcode() == Op_SafePoint && phase->get_loop(i) == this) { |
| 2392 | keep = i; |
| 2393 | break; // Found one |
| 2394 | } |
| 2395 | } |
| 2396 | } |
| 2397 | |
| 2398 | // Don't remove any safepoints if it is requested to keep a single safepoint and |
| 2399 | // no safepoint was found on idom-path. It is not safe to remove any safepoint |
| 2400 | // in this case since there's no safepoint dominating all paths in the loop body. |
| 2401 | bool prune = !keep_one || keep != NULL; |
| 2402 | |
| 2403 | // Delete other safepoints in this loop. |
| 2404 | Node_List* sfpts = _safepts; |
| 2405 | if (prune && sfpts != NULL) { |
| 2406 | assert(keep == NULL || keep->Opcode() == Op_SafePoint, "not safepoint" ); |
| 2407 | for (uint i = 0; i < sfpts->size(); i++) { |
| 2408 | Node* n = sfpts->at(i); |
| 2409 | assert(phase->get_loop(n) == this, "" ); |
| 2410 | if (n != keep && phase->is_deleteable_safept(n)) { |
| 2411 | phase->lazy_replace(n, n->in(TypeFunc::Control)); |
| 2412 | } |
| 2413 | } |
| 2414 | } |
| 2415 | } |
| 2416 | |
| 2417 | //------------------------------counted_loop----------------------------------- |
| 2418 | // Convert to counted loops where possible |
| 2419 | void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) { |
| 2420 | |
| 2421 | // For grins, set the inner-loop flag here |
| 2422 | if (!_child) { |
| 2423 | if (_head->is_Loop()) _head->as_Loop()->set_inner_loop(); |
| 2424 | } |
| 2425 | |
| 2426 | IdealLoopTree* loop = this; |
| 2427 | if (_head->is_CountedLoop() || |
| 2428 | phase->is_counted_loop(_head, loop)) { |
| 2429 | |
| 2430 | if (LoopStripMiningIter == 0 || (LoopStripMiningIter > 1 && _child == NULL)) { |
| 2431 | // Indicate we do not need a safepoint here |
| 2432 | _has_sfpt = 1; |
| 2433 | } |
| 2434 | |
| 2435 | // Remove safepoints |
| 2436 | bool keep_one_sfpt = !(_has_call || _has_sfpt); |
| 2437 | remove_safepoints(phase, keep_one_sfpt); |
| 2438 | |
| 2439 | // Look for induction variables |
| 2440 | phase->replace_parallel_iv(this); |
| 2441 | |
| 2442 | } else if (_parent != NULL && !_irreducible) { |
| 2443 | // Not a counted loop. Keep one safepoint. |
| 2444 | bool keep_one_sfpt = true; |
| 2445 | remove_safepoints(phase, keep_one_sfpt); |
| 2446 | } |
| 2447 | |
| 2448 | // Recursively |
| 2449 | assert(loop->_child != this || (loop->_head->as_Loop()->is_OuterStripMinedLoop() && _head->as_CountedLoop()->is_strip_mined()), "what kind of loop was added?" ); |
| 2450 | assert(loop->_child != this || (loop->_child->_child == NULL && loop->_child->_next == NULL), "would miss some loops" ); |
| 2451 | if (loop->_child && loop->_child != this) loop->_child->counted_loop(phase); |
| 2452 | if (loop->_next) loop->_next ->counted_loop(phase); |
| 2453 | } |
| 2454 | |
| 2455 | |
| 2456 | // The Estimated Loop Clone Size: |
| 2457 | // CloneFactor * (~112% * BodySize + BC) + CC + FanOutTerm, |
| 2458 | // where BC and CC are totally ad-hoc/magic "body" and "clone" constants, |
| 2459 | // respectively, used to ensure that the node usage estimates made are on the |
| 2460 | // safe side, for the most part. The FanOutTerm is an attempt to estimate the |
| 2461 | // possible additional/excessive nodes generated due to data and control flow |
| 2462 | // merging, for edges reaching outside the loop. |
| 2463 | uint IdealLoopTree::est_loop_clone_sz(uint factor) const { |
| 2464 | |
| 2465 | precond(0 < factor && factor < 16); |
| 2466 | |
| 2467 | uint const bc = 13; |
| 2468 | uint const cc = 17; |
| 2469 | uint const sz = _body.size() + (_body.size() + 7) / 8; |
| 2470 | uint estimate = factor * (sz + bc) + cc; |
| 2471 | |
| 2472 | assert((estimate - cc) / factor == sz + bc, "overflow" ); |
| 2473 | |
| 2474 | uint ctrl_edge_out_cnt = 0; |
| 2475 | uint data_edge_out_cnt = 0; |
| 2476 | |
| 2477 | for (uint i = 0; i < _body.size(); i++) { |
| 2478 | Node* node = _body.at(i); |
| 2479 | uint outcnt = node->outcnt(); |
| 2480 | |
| 2481 | for (uint k = 0; k < outcnt; k++) { |
| 2482 | Node* out = node->raw_out(k); |
| 2483 | |
| 2484 | if (out->is_CFG()) { |
| 2485 | if (!is_member(_phase->get_loop(out))) { |
| 2486 | ctrl_edge_out_cnt++; |
| 2487 | } |
| 2488 | } else { |
| 2489 | Node* ctrl = _phase->get_ctrl(out); |
| 2490 | assert(ctrl->is_CFG(), "must be" ); |
| 2491 | if (!is_member(_phase->get_loop(ctrl))) { |
| 2492 | data_edge_out_cnt++; |
| 2493 | } |
| 2494 | } |
| 2495 | } |
| 2496 | } |
| 2497 | // Add data and control count (x2.0) to estimate iff both are > 0. This is |
| 2498 | // a rather pessimistic estimate for the most part, in particular for some |
| 2499 | // complex loops, but still not enough to capture all loops. |
| 2500 | if (ctrl_edge_out_cnt > 0 && data_edge_out_cnt > 0) { |
| 2501 | estimate += 2 * (ctrl_edge_out_cnt + data_edge_out_cnt); |
| 2502 | } |
| 2503 | |
| 2504 | return estimate; |
| 2505 | } |
| 2506 | |
| 2507 | #ifndef PRODUCT |
| 2508 | //------------------------------dump_head-------------------------------------- |
| 2509 | // Dump 1 liner for loop header info |
| 2510 | void IdealLoopTree::dump_head() const { |
| 2511 | for (uint i = 0; i < _nest; i++) { |
| 2512 | tty->print(" " ); |
| 2513 | } |
| 2514 | tty->print("Loop: N%d/N%d " ,_head->_idx,_tail->_idx); |
| 2515 | if (_irreducible) tty->print(" IRREDUCIBLE" ); |
| 2516 | Node* entry = _head->is_Loop() ? _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl) : _head->in(LoopNode::EntryControl); |
| 2517 | Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); |
| 2518 | if (predicate != NULL ) { |
| 2519 | tty->print(" limit_check" ); |
| 2520 | entry = PhaseIdealLoop::skip_loop_predicates(entry); |
| 2521 | } |
| 2522 | if (UseLoopPredicate) { |
| 2523 | entry = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); |
| 2524 | if (entry != NULL) { |
| 2525 | tty->print(" predicated" ); |
| 2526 | entry = PhaseIdealLoop::skip_loop_predicates(entry); |
| 2527 | } |
| 2528 | } |
| 2529 | if (UseProfiledLoopPredicate) { |
| 2530 | entry = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate); |
| 2531 | if (entry != NULL) { |
| 2532 | tty->print(" profile_predicated" ); |
| 2533 | } |
| 2534 | } |
| 2535 | if (_head->is_CountedLoop()) { |
| 2536 | CountedLoopNode *cl = _head->as_CountedLoop(); |
| 2537 | tty->print(" counted" ); |
| 2538 | |
| 2539 | Node* init_n = cl->init_trip(); |
| 2540 | if (init_n != NULL && init_n->is_Con()) |
| 2541 | tty->print(" [%d," , cl->init_trip()->get_int()); |
| 2542 | else |
| 2543 | tty->print(" [int," ); |
| 2544 | Node* limit_n = cl->limit(); |
| 2545 | if (limit_n != NULL && limit_n->is_Con()) |
| 2546 | tty->print("%d)," , cl->limit()->get_int()); |
| 2547 | else |
| 2548 | tty->print("int)," ); |
| 2549 | int stride_con = cl->stride_con(); |
| 2550 | if (stride_con > 0) tty->print("+" ); |
| 2551 | tty->print("%d" , stride_con); |
| 2552 | |
| 2553 | tty->print(" (%0.f iters) " , cl->profile_trip_cnt()); |
| 2554 | |
| 2555 | if (cl->is_pre_loop ()) tty->print(" pre" ); |
| 2556 | if (cl->is_main_loop()) tty->print(" main" ); |
| 2557 | if (cl->is_post_loop()) tty->print(" post" ); |
| 2558 | if (cl->is_vectorized_loop()) tty->print(" vector" ); |
| 2559 | if (cl->range_checks_present()) tty->print(" rc " ); |
| 2560 | if (cl->is_multiversioned()) tty->print(" multi " ); |
| 2561 | } |
| 2562 | if (_has_call) tty->print(" has_call" ); |
| 2563 | if (_has_sfpt) tty->print(" has_sfpt" ); |
| 2564 | if (_rce_candidate) tty->print(" rce" ); |
| 2565 | if (_safepts != NULL && _safepts->size() > 0) { |
| 2566 | tty->print(" sfpts={" ); _safepts->dump_simple(); tty->print(" }" ); |
| 2567 | } |
| 2568 | if (_required_safept != NULL && _required_safept->size() > 0) { |
| 2569 | tty->print(" req={" ); _required_safept->dump_simple(); tty->print(" }" ); |
| 2570 | } |
| 2571 | if (Verbose) { |
| 2572 | tty->print(" body={" ); _body.dump_simple(); tty->print(" }" ); |
| 2573 | } |
| 2574 | if (_head->is_Loop() && _head->as_Loop()->is_strip_mined()) { |
| 2575 | tty->print(" strip_mined" ); |
| 2576 | } |
| 2577 | tty->cr(); |
| 2578 | } |
| 2579 | |
| 2580 | //------------------------------dump------------------------------------------- |
| 2581 | // Dump loops by loop tree |
| 2582 | void IdealLoopTree::dump() const { |
| 2583 | dump_head(); |
| 2584 | if (_child) _child->dump(); |
| 2585 | if (_next) _next ->dump(); |
| 2586 | } |
| 2587 | |
| 2588 | #endif |
| 2589 | |
| 2590 | static void log_loop_tree(IdealLoopTree* root, IdealLoopTree* loop, CompileLog* log) { |
| 2591 | if (loop == root) { |
| 2592 | if (loop->_child != NULL) { |
| 2593 | log->begin_head("loop_tree" ); |
| 2594 | log->end_head(); |
| 2595 | if( loop->_child ) log_loop_tree(root, loop->_child, log); |
| 2596 | log->tail("loop_tree" ); |
| 2597 | assert(loop->_next == NULL, "what?" ); |
| 2598 | } |
| 2599 | } else { |
| 2600 | Node* head = loop->_head; |
| 2601 | log->begin_head("loop" ); |
| 2602 | log->print(" idx='%d' " , head->_idx); |
| 2603 | if (loop->_irreducible) log->print("irreducible='1' " ); |
| 2604 | if (head->is_Loop()) { |
| 2605 | if (head->as_Loop()->is_inner_loop()) log->print("inner_loop='1' " ); |
| 2606 | if (head->as_Loop()->is_partial_peel_loop()) log->print("partial_peel_loop='1' " ); |
| 2607 | } |
| 2608 | if (head->is_CountedLoop()) { |
| 2609 | CountedLoopNode* cl = head->as_CountedLoop(); |
| 2610 | if (cl->is_pre_loop()) log->print("pre_loop='%d' " , cl->main_idx()); |
| 2611 | if (cl->is_main_loop()) log->print("main_loop='%d' " , cl->_idx); |
| 2612 | if (cl->is_post_loop()) log->print("post_loop='%d' " , cl->main_idx()); |
| 2613 | } |
| 2614 | log->end_head(); |
| 2615 | if( loop->_child ) log_loop_tree(root, loop->_child, log); |
| 2616 | log->tail("loop" ); |
| 2617 | if( loop->_next ) log_loop_tree(root, loop->_next, log); |
| 2618 | } |
| 2619 | } |
| 2620 | |
| 2621 | //---------------------collect_potentially_useful_predicates----------------------- |
| 2622 | // Helper function to collect potentially useful predicates to prevent them from |
| 2623 | // being eliminated by PhaseIdealLoop::eliminate_useless_predicates |
| 2624 | void PhaseIdealLoop::collect_potentially_useful_predicates( |
| 2625 | IdealLoopTree * loop, Unique_Node_List &useful_predicates) { |
| 2626 | if (loop->_child) { // child |
| 2627 | collect_potentially_useful_predicates(loop->_child, useful_predicates); |
| 2628 | } |
| 2629 | |
| 2630 | // self (only loops that we can apply loop predication may use their predicates) |
| 2631 | if (loop->_head->is_Loop() && |
| 2632 | !loop->_irreducible && |
| 2633 | !loop->tail()->is_top()) { |
| 2634 | LoopNode* lpn = loop->_head->as_Loop(); |
| 2635 | Node* entry = lpn->in(LoopNode::EntryControl); |
| 2636 | Node* predicate_proj = find_predicate(entry); // loop_limit_check first |
| 2637 | if (predicate_proj != NULL ) { // right pattern that can be used by loop predication |
| 2638 | assert(entry->in(0)->in(1)->in(1)->Opcode() == Op_Opaque1, "must be" ); |
| 2639 | useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one |
| 2640 | entry = skip_loop_predicates(entry); |
| 2641 | } |
| 2642 | predicate_proj = find_predicate(entry); // Predicate |
| 2643 | if (predicate_proj != NULL ) { |
| 2644 | useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one |
| 2645 | entry = skip_loop_predicates(entry); |
| 2646 | } |
| 2647 | if (UseProfiledLoopPredicate) { |
| 2648 | predicate_proj = find_predicate(entry); // Predicate |
| 2649 | if (predicate_proj != NULL ) { |
| 2650 | useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one |
| 2651 | } |
| 2652 | } |
| 2653 | } |
| 2654 | |
| 2655 | if (loop->_next) { // sibling |
| 2656 | collect_potentially_useful_predicates(loop->_next, useful_predicates); |
| 2657 | } |
| 2658 | } |
| 2659 | |
| 2660 | //------------------------eliminate_useless_predicates----------------------------- |
| 2661 | // Eliminate all inserted predicates if they could not be used by loop predication. |
| 2662 | // Note: it will also eliminates loop limits check predicate since it also uses |
| 2663 | // Opaque1 node (see Parse::add_predicate()). |
| 2664 | void PhaseIdealLoop::eliminate_useless_predicates() { |
| 2665 | if (C->predicate_count() == 0) |
| 2666 | return; // no predicate left |
| 2667 | |
| 2668 | Unique_Node_List useful_predicates; // to store useful predicates |
| 2669 | if (C->has_loops()) { |
| 2670 | collect_potentially_useful_predicates(_ltree_root->_child, useful_predicates); |
| 2671 | } |
| 2672 | |
| 2673 | for (int i = C->predicate_count(); i > 0; i--) { |
| 2674 | Node * n = C->predicate_opaque1_node(i-1); |
| 2675 | assert(n->Opcode() == Op_Opaque1, "must be" ); |
| 2676 | if (!useful_predicates.member(n)) { // not in the useful list |
| 2677 | _igvn.replace_node(n, n->in(1)); |
| 2678 | } |
| 2679 | } |
| 2680 | } |
| 2681 | |
| 2682 | //------------------------process_expensive_nodes----------------------------- |
| 2683 | // Expensive nodes have their control input set to prevent the GVN |
| 2684 | // from commoning them and as a result forcing the resulting node to |
| 2685 | // be in a more frequent path. Use CFG information here, to change the |
| 2686 | // control inputs so that some expensive nodes can be commoned while |
| 2687 | // not executed more frequently. |
| 2688 | bool PhaseIdealLoop::process_expensive_nodes() { |
| 2689 | assert(OptimizeExpensiveOps, "optimization off?" ); |
| 2690 | |
| 2691 | // Sort nodes to bring similar nodes together |
| 2692 | C->sort_expensive_nodes(); |
| 2693 | |
| 2694 | bool progress = false; |
| 2695 | |
| 2696 | for (int i = 0; i < C->expensive_count(); ) { |
| 2697 | Node* n = C->expensive_node(i); |
| 2698 | int start = i; |
| 2699 | // Find nodes similar to n |
| 2700 | i++; |
| 2701 | for (; i < C->expensive_count() && Compile::cmp_expensive_nodes(n, C->expensive_node(i)) == 0; i++); |
| 2702 | int end = i; |
| 2703 | // And compare them two by two |
| 2704 | for (int j = start; j < end; j++) { |
| 2705 | Node* n1 = C->expensive_node(j); |
| 2706 | if (is_node_unreachable(n1)) { |
| 2707 | continue; |
| 2708 | } |
| 2709 | for (int k = j+1; k < end; k++) { |
| 2710 | Node* n2 = C->expensive_node(k); |
| 2711 | if (is_node_unreachable(n2)) { |
| 2712 | continue; |
| 2713 | } |
| 2714 | |
| 2715 | assert(n1 != n2, "should be pair of nodes" ); |
| 2716 | |
| 2717 | Node* c1 = n1->in(0); |
| 2718 | Node* c2 = n2->in(0); |
| 2719 | |
| 2720 | Node* parent_c1 = c1; |
| 2721 | Node* parent_c2 = c2; |
| 2722 | |
| 2723 | // The call to get_early_ctrl_for_expensive() moves the |
| 2724 | // expensive nodes up but stops at loops that are in a if |
| 2725 | // branch. See whether we can exit the loop and move above the |
| 2726 | // If. |
| 2727 | if (c1->is_Loop()) { |
| 2728 | parent_c1 = c1->in(1); |
| 2729 | } |
| 2730 | if (c2->is_Loop()) { |
| 2731 | parent_c2 = c2->in(1); |
| 2732 | } |
| 2733 | |
| 2734 | if (parent_c1 == parent_c2) { |
| 2735 | _igvn._worklist.push(n1); |
| 2736 | _igvn._worklist.push(n2); |
| 2737 | continue; |
| 2738 | } |
| 2739 | |
| 2740 | // Look for identical expensive node up the dominator chain. |
| 2741 | if (is_dominator(c1, c2)) { |
| 2742 | c2 = c1; |
| 2743 | } else if (is_dominator(c2, c1)) { |
| 2744 | c1 = c2; |
| 2745 | } else if (parent_c1->is_Proj() && parent_c1->in(0)->is_If() && |
| 2746 | parent_c2->is_Proj() && parent_c1->in(0) == parent_c2->in(0)) { |
| 2747 | // Both branches have the same expensive node so move it up |
| 2748 | // before the if. |
| 2749 | c1 = c2 = idom(parent_c1->in(0)); |
| 2750 | } |
| 2751 | // Do the actual moves |
| 2752 | if (n1->in(0) != c1) { |
| 2753 | _igvn.hash_delete(n1); |
| 2754 | n1->set_req(0, c1); |
| 2755 | _igvn.hash_insert(n1); |
| 2756 | _igvn._worklist.push(n1); |
| 2757 | progress = true; |
| 2758 | } |
| 2759 | if (n2->in(0) != c2) { |
| 2760 | _igvn.hash_delete(n2); |
| 2761 | n2->set_req(0, c2); |
| 2762 | _igvn.hash_insert(n2); |
| 2763 | _igvn._worklist.push(n2); |
| 2764 | progress = true; |
| 2765 | } |
| 2766 | } |
| 2767 | } |
| 2768 | } |
| 2769 | |
| 2770 | return progress; |
| 2771 | } |
| 2772 | |
| 2773 | |
| 2774 | //============================================================================= |
| 2775 | //----------------------------build_and_optimize------------------------------- |
| 2776 | // Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to |
| 2777 | // its corresponding LoopNode. If 'optimize' is true, do some loop cleanups. |
| 2778 | void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) { |
| 2779 | bool do_split_ifs = (mode == LoopOptsDefault); |
| 2780 | bool skip_loop_opts = (mode == LoopOptsNone); |
| 2781 | |
| 2782 | int old_progress = C->major_progress(); |
| 2783 | uint orig_worklist_size = _igvn._worklist.size(); |
| 2784 | |
| 2785 | // Reset major-progress flag for the driver's heuristics |
| 2786 | C->clear_major_progress(); |
| 2787 | |
| 2788 | #ifndef PRODUCT |
| 2789 | // Capture for later assert |
| 2790 | uint unique = C->unique(); |
| 2791 | _loop_invokes++; |
| 2792 | _loop_work += unique; |
| 2793 | #endif |
| 2794 | |
| 2795 | // True if the method has at least 1 irreducible loop |
| 2796 | _has_irreducible_loops = false; |
| 2797 | |
| 2798 | _created_loop_node = false; |
| 2799 | |
| 2800 | Arena *a = Thread::current()->resource_area(); |
| 2801 | VectorSet visited(a); |
| 2802 | // Pre-grow the mapping from Nodes to IdealLoopTrees. |
| 2803 | _nodes.map(C->unique(), NULL); |
| 2804 | memset(_nodes.adr(), 0, wordSize * C->unique()); |
| 2805 | |
| 2806 | // Pre-build the top-level outermost loop tree entry |
| 2807 | _ltree_root = new IdealLoopTree( this, C->root(), C->root() ); |
| 2808 | // Do not need a safepoint at the top level |
| 2809 | _ltree_root->_has_sfpt = 1; |
| 2810 | |
| 2811 | // Initialize Dominators. |
| 2812 | // Checked in clone_loop_predicate() during beautify_loops(). |
| 2813 | _idom_size = 0; |
| 2814 | _idom = NULL; |
| 2815 | _dom_depth = NULL; |
| 2816 | _dom_stk = NULL; |
| 2817 | |
| 2818 | // Empty pre-order array |
| 2819 | allocate_preorders(); |
| 2820 | |
| 2821 | // Build a loop tree on the fly. Build a mapping from CFG nodes to |
| 2822 | // IdealLoopTree entries. Data nodes are NOT walked. |
| 2823 | build_loop_tree(); |
| 2824 | // Check for bailout, and return |
| 2825 | if (C->failing()) { |
| 2826 | return; |
| 2827 | } |
| 2828 | |
| 2829 | // No loops after all |
| 2830 | if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false); |
| 2831 | |
| 2832 | // There should always be an outer loop containing the Root and Return nodes. |
| 2833 | // If not, we have a degenerate empty program. Bail out in this case. |
| 2834 | if (!has_node(C->root())) { |
| 2835 | if (!_verify_only) { |
| 2836 | C->clear_major_progress(); |
| 2837 | C->record_method_not_compilable("empty program detected during loop optimization" ); |
| 2838 | } |
| 2839 | return; |
| 2840 | } |
| 2841 | |
| 2842 | BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); |
| 2843 | // Nothing to do, so get out |
| 2844 | bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only && |
| 2845 | !bs->is_gc_specific_loop_opts_pass(mode); |
| 2846 | bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn); |
| 2847 | bool strip_mined_loops_expanded = bs->strip_mined_loops_expanded(mode); |
| 2848 | if (stop_early && !do_expensive_nodes) { |
| 2849 | _igvn.optimize(); // Cleanup NeverBranches |
| 2850 | return; |
| 2851 | } |
| 2852 | |
| 2853 | // Set loop nesting depth |
| 2854 | _ltree_root->set_nest( 0 ); |
| 2855 | |
| 2856 | // Split shared headers and insert loop landing pads. |
| 2857 | // Do not bother doing this on the Root loop of course. |
| 2858 | if( !_verify_me && !_verify_only && _ltree_root->_child ) { |
| 2859 | C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3); |
| 2860 | if( _ltree_root->_child->beautify_loops( this ) ) { |
| 2861 | // Re-build loop tree! |
| 2862 | _ltree_root->_child = NULL; |
| 2863 | _nodes.clear(); |
| 2864 | reallocate_preorders(); |
| 2865 | build_loop_tree(); |
| 2866 | // Check for bailout, and return |
| 2867 | if (C->failing()) { |
| 2868 | return; |
| 2869 | } |
| 2870 | // Reset loop nesting depth |
| 2871 | _ltree_root->set_nest( 0 ); |
| 2872 | |
| 2873 | C->print_method(PHASE_AFTER_BEAUTIFY_LOOPS, 3); |
| 2874 | } |
| 2875 | } |
| 2876 | |
| 2877 | // Build Dominators for elision of NULL checks & loop finding. |
| 2878 | // Since nodes do not have a slot for immediate dominator, make |
| 2879 | // a persistent side array for that info indexed on node->_idx. |
| 2880 | _idom_size = C->unique(); |
| 2881 | _idom = NEW_RESOURCE_ARRAY( Node*, _idom_size ); |
| 2882 | _dom_depth = NEW_RESOURCE_ARRAY( uint, _idom_size ); |
| 2883 | _dom_stk = NULL; // Allocated on demand in recompute_dom_depth |
| 2884 | memset( _dom_depth, 0, _idom_size * sizeof(uint) ); |
| 2885 | |
| 2886 | Dominators(); |
| 2887 | |
| 2888 | if (!_verify_only) { |
| 2889 | // As a side effect, Dominators removed any unreachable CFG paths |
| 2890 | // into RegionNodes. It doesn't do this test against Root, so |
| 2891 | // we do it here. |
| 2892 | for( uint i = 1; i < C->root()->req(); i++ ) { |
| 2893 | if( !_nodes[C->root()->in(i)->_idx] ) { // Dead path into Root? |
| 2894 | _igvn.delete_input_of(C->root(), i); |
| 2895 | i--; // Rerun same iteration on compressed edges |
| 2896 | } |
| 2897 | } |
| 2898 | |
| 2899 | // Given dominators, try to find inner loops with calls that must |
| 2900 | // always be executed (call dominates loop tail). These loops do |
| 2901 | // not need a separate safepoint. |
| 2902 | Node_List cisstack(a); |
| 2903 | _ltree_root->check_safepts(visited, cisstack); |
| 2904 | } |
| 2905 | |
| 2906 | // Walk the DATA nodes and place into loops. Find earliest control |
| 2907 | // node. For CFG nodes, the _nodes array starts out and remains |
| 2908 | // holding the associated IdealLoopTree pointer. For DATA nodes, the |
| 2909 | // _nodes array holds the earliest legal controlling CFG node. |
| 2910 | |
| 2911 | // Allocate stack with enough space to avoid frequent realloc |
| 2912 | int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats |
| 2913 | Node_Stack nstack( a, stack_size ); |
| 2914 | |
| 2915 | visited.Clear(); |
| 2916 | Node_List worklist(a); |
| 2917 | // Don't need C->root() on worklist since |
| 2918 | // it will be processed among C->top() inputs |
| 2919 | worklist.push( C->top() ); |
| 2920 | visited.set( C->top()->_idx ); // Set C->top() as visited now |
| 2921 | build_loop_early( visited, worklist, nstack ); |
| 2922 | |
| 2923 | // Given early legal placement, try finding counted loops. This placement |
| 2924 | // is good enough to discover most loop invariants. |
| 2925 | if (!_verify_me && !_verify_only && !strip_mined_loops_expanded) { |
| 2926 | _ltree_root->counted_loop( this ); |
| 2927 | } |
| 2928 | |
| 2929 | // Find latest loop placement. Find ideal loop placement. |
| 2930 | visited.Clear(); |
| 2931 | init_dom_lca_tags(); |
| 2932 | // Need C->root() on worklist when processing outs |
| 2933 | worklist.push( C->root() ); |
| 2934 | NOT_PRODUCT( C->verify_graph_edges(); ) |
| 2935 | worklist.push( C->top() ); |
| 2936 | build_loop_late( visited, worklist, nstack ); |
| 2937 | |
| 2938 | if (_verify_only) { |
| 2939 | C->restore_major_progress(old_progress); |
| 2940 | assert(C->unique() == unique, "verification mode made Nodes? ? ?" ); |
| 2941 | assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything" ); |
| 2942 | return; |
| 2943 | } |
| 2944 | |
| 2945 | // clear out the dead code after build_loop_late |
| 2946 | while (_deadlist.size()) { |
| 2947 | _igvn.remove_globally_dead_node(_deadlist.pop()); |
| 2948 | } |
| 2949 | |
| 2950 | if (stop_early) { |
| 2951 | assert(do_expensive_nodes, "why are we here?" ); |
| 2952 | if (process_expensive_nodes()) { |
| 2953 | // If we made some progress when processing expensive nodes then |
| 2954 | // the IGVN may modify the graph in a way that will allow us to |
| 2955 | // make some more progress: we need to try processing expensive |
| 2956 | // nodes again. |
| 2957 | C->set_major_progress(); |
| 2958 | } |
| 2959 | _igvn.optimize(); |
| 2960 | return; |
| 2961 | } |
| 2962 | |
| 2963 | // Some parser-inserted loop predicates could never be used by loop |
| 2964 | // predication or they were moved away from loop during some optimizations. |
| 2965 | // For example, peeling. Eliminate them before next loop optimizations. |
| 2966 | eliminate_useless_predicates(); |
| 2967 | |
| 2968 | #ifndef PRODUCT |
| 2969 | C->verify_graph_edges(); |
| 2970 | if (_verify_me) { // Nested verify pass? |
| 2971 | // Check to see if the verify mode is broken |
| 2972 | assert(C->unique() == unique, "non-optimize mode made Nodes? ? ?" ); |
| 2973 | return; |
| 2974 | } |
| 2975 | if (VerifyLoopOptimizations) verify(); |
| 2976 | if (TraceLoopOpts && C->has_loops()) { |
| 2977 | _ltree_root->dump(); |
| 2978 | } |
| 2979 | #endif |
| 2980 | |
| 2981 | if (skip_loop_opts) { |
| 2982 | // restore major progress flag |
| 2983 | C->restore_major_progress(old_progress); |
| 2984 | |
| 2985 | // Cleanup any modified bits |
| 2986 | _igvn.optimize(); |
| 2987 | |
| 2988 | if (C->log() != NULL) { |
| 2989 | log_loop_tree(_ltree_root, _ltree_root, C->log()); |
| 2990 | } |
| 2991 | return; |
| 2992 | } |
| 2993 | |
| 2994 | if (bs->optimize_loops(this, mode, visited, nstack, worklist)) { |
| 2995 | _igvn.optimize(); |
| 2996 | if (C->log() != NULL) { |
| 2997 | log_loop_tree(_ltree_root, _ltree_root, C->log()); |
| 2998 | } |
| 2999 | return; |
| 3000 | } |
| 3001 | |
| 3002 | if (ReassociateInvariants) { |
| 3003 | // Reassociate invariants and prep for split_thru_phi |
| 3004 | for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { |
| 3005 | IdealLoopTree* lpt = iter.current(); |
| 3006 | bool is_counted = lpt->is_counted(); |
| 3007 | if (!is_counted || !lpt->is_innermost()) continue; |
| 3008 | |
| 3009 | // check for vectorized loops, any reassociation of invariants was already done |
| 3010 | if (is_counted && lpt->_head->as_CountedLoop()->is_unroll_only()) { |
| 3011 | continue; |
| 3012 | } else { |
| 3013 | AutoNodeBudget node_budget(this); |
| 3014 | lpt->reassociate_invariants(this); |
| 3015 | } |
| 3016 | // Because RCE opportunities can be masked by split_thru_phi, |
| 3017 | // look for RCE candidates and inhibit split_thru_phi |
| 3018 | // on just their loop-phi's for this pass of loop opts |
| 3019 | if (SplitIfBlocks && do_split_ifs) { |
| 3020 | AutoNodeBudget node_budget(this, AutoNodeBudget::NO_BUDGET_CHECK); |
| 3021 | if (lpt->policy_range_check(this)) { |
| 3022 | lpt->_rce_candidate = 1; // = true |
| 3023 | } |
| 3024 | } |
| 3025 | } |
| 3026 | } |
| 3027 | |
| 3028 | // Check for aggressive application of split-if and other transforms |
| 3029 | // that require basic-block info (like cloning through Phi's) |
| 3030 | if( SplitIfBlocks && do_split_ifs ) { |
| 3031 | visited.Clear(); |
| 3032 | split_if_with_blocks( visited, nstack); |
| 3033 | NOT_PRODUCT( if( VerifyLoopOptimizations ) verify(); ); |
| 3034 | } |
| 3035 | |
| 3036 | if (!C->major_progress() && do_expensive_nodes && process_expensive_nodes()) { |
| 3037 | C->set_major_progress(); |
| 3038 | } |
| 3039 | |
| 3040 | // Perform loop predication before iteration splitting |
| 3041 | if (C->has_loops() && !C->major_progress() && (C->predicate_count() > 0)) { |
| 3042 | _ltree_root->_child->loop_predication(this); |
| 3043 | } |
| 3044 | |
| 3045 | if (OptimizeFill && UseLoopPredicate && C->has_loops() && !C->major_progress()) { |
| 3046 | if (do_intrinsify_fill()) { |
| 3047 | C->set_major_progress(); |
| 3048 | } |
| 3049 | } |
| 3050 | |
| 3051 | // Perform iteration-splitting on inner loops. Split iterations to avoid |
| 3052 | // range checks or one-shot null checks. |
| 3053 | |
| 3054 | // If split-if's didn't hack the graph too bad (no CFG changes) |
| 3055 | // then do loop opts. |
| 3056 | if (C->has_loops() && !C->major_progress()) { |
| 3057 | memset( worklist.adr(), 0, worklist.Size()*sizeof(Node*) ); |
| 3058 | _ltree_root->_child->iteration_split( this, worklist ); |
| 3059 | // No verify after peeling! GCM has hoisted code out of the loop. |
| 3060 | // After peeling, the hoisted code could sink inside the peeled area. |
| 3061 | // The peeling code does not try to recompute the best location for |
| 3062 | // all the code before the peeled area, so the verify pass will always |
| 3063 | // complain about it. |
| 3064 | } |
| 3065 | // Do verify graph edges in any case |
| 3066 | NOT_PRODUCT( C->verify_graph_edges(); ); |
| 3067 | |
| 3068 | if (!do_split_ifs) { |
| 3069 | // We saw major progress in Split-If to get here. We forced a |
| 3070 | // pass with unrolling and not split-if, however more split-if's |
| 3071 | // might make progress. If the unrolling didn't make progress |
| 3072 | // then the major-progress flag got cleared and we won't try |
| 3073 | // another round of Split-If. In particular the ever-common |
| 3074 | // instance-of/check-cast pattern requires at least 2 rounds of |
| 3075 | // Split-If to clear out. |
| 3076 | C->set_major_progress(); |
| 3077 | } |
| 3078 | |
| 3079 | // Repeat loop optimizations if new loops were seen |
| 3080 | if (created_loop_node()) { |
| 3081 | C->set_major_progress(); |
| 3082 | } |
| 3083 | |
| 3084 | // Keep loop predicates and perform optimizations with them |
| 3085 | // until no more loop optimizations could be done. |
| 3086 | // After that switch predicates off and do more loop optimizations. |
| 3087 | if (!C->major_progress() && (C->predicate_count() > 0)) { |
| 3088 | C->cleanup_loop_predicates(_igvn); |
| 3089 | if (TraceLoopOpts) { |
| 3090 | tty->print_cr("PredicatesOff" ); |
| 3091 | } |
| 3092 | C->set_major_progress(); |
| 3093 | } |
| 3094 | |
| 3095 | // Convert scalar to superword operations at the end of all loop opts. |
| 3096 | if (UseSuperWord && C->has_loops() && !C->major_progress()) { |
| 3097 | // SuperWord transform |
| 3098 | SuperWord sw(this); |
| 3099 | for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { |
| 3100 | IdealLoopTree* lpt = iter.current(); |
| 3101 | if (lpt->is_counted()) { |
| 3102 | CountedLoopNode *cl = lpt->_head->as_CountedLoop(); |
| 3103 | |
| 3104 | if (PostLoopMultiversioning && cl->is_rce_post_loop() && !cl->is_vectorized_loop()) { |
| 3105 | // Check that the rce'd post loop is encountered first, multiversion after all |
| 3106 | // major main loop optimization are concluded |
| 3107 | if (!C->major_progress()) { |
| 3108 | IdealLoopTree *lpt_next = lpt->_next; |
| 3109 | if (lpt_next && lpt_next->is_counted()) { |
| 3110 | CountedLoopNode *cl = lpt_next->_head->as_CountedLoop(); |
| 3111 | has_range_checks(lpt_next); |
| 3112 | if (cl->is_post_loop() && cl->range_checks_present()) { |
| 3113 | if (!cl->is_multiversioned()) { |
| 3114 | if (multi_version_post_loops(lpt, lpt_next) == false) { |
| 3115 | // Cause the rce loop to be optimized away if we fail |
| 3116 | cl->mark_is_multiversioned(); |
| 3117 | cl->set_slp_max_unroll(0); |
| 3118 | poison_rce_post_loop(lpt); |
| 3119 | } |
| 3120 | } |
| 3121 | } |
| 3122 | } |
| 3123 | sw.transform_loop(lpt, true); |
| 3124 | } |
| 3125 | } else if (cl->is_main_loop()) { |
| 3126 | sw.transform_loop(lpt, true); |
| 3127 | } |
| 3128 | } |
| 3129 | } |
| 3130 | } |
| 3131 | |
| 3132 | // Cleanup any modified bits |
| 3133 | _igvn.optimize(); |
| 3134 | |
| 3135 | // disable assert until issue with split_flow_path is resolved (6742111) |
| 3136 | // assert(!_has_irreducible_loops || C->parsed_irreducible_loop() || C->is_osr_compilation(), |
| 3137 | // "shouldn't introduce irreducible loops"); |
| 3138 | |
| 3139 | if (C->log() != NULL) { |
| 3140 | log_loop_tree(_ltree_root, _ltree_root, C->log()); |
| 3141 | } |
| 3142 | } |
| 3143 | |
| 3144 | #ifndef PRODUCT |
| 3145 | //------------------------------print_statistics------------------------------- |
| 3146 | int PhaseIdealLoop::_loop_invokes=0;// Count of PhaseIdealLoop invokes |
| 3147 | int PhaseIdealLoop::_loop_work=0; // Sum of PhaseIdealLoop x unique |
| 3148 | void PhaseIdealLoop::print_statistics() { |
| 3149 | tty->print_cr("PhaseIdealLoop=%d, sum _unique=%d" , _loop_invokes, _loop_work); |
| 3150 | } |
| 3151 | |
| 3152 | //------------------------------verify----------------------------------------- |
| 3153 | // Build a verify-only PhaseIdealLoop, and see that it agrees with me. |
| 3154 | static int fail; // debug only, so its multi-thread dont care |
| 3155 | void PhaseIdealLoop::verify() const { |
| 3156 | int old_progress = C->major_progress(); |
| 3157 | ResourceMark rm; |
| 3158 | PhaseIdealLoop loop_verify( _igvn, this ); |
| 3159 | VectorSet visited(Thread::current()->resource_area()); |
| 3160 | |
| 3161 | fail = 0; |
| 3162 | verify_compare( C->root(), &loop_verify, visited ); |
| 3163 | assert( fail == 0, "verify loops failed" ); |
| 3164 | // Verify loop structure is the same |
| 3165 | _ltree_root->verify_tree(loop_verify._ltree_root, NULL); |
| 3166 | // Reset major-progress. It was cleared by creating a verify version of |
| 3167 | // PhaseIdealLoop. |
| 3168 | C->restore_major_progress(old_progress); |
| 3169 | } |
| 3170 | |
| 3171 | //------------------------------verify_compare--------------------------------- |
| 3172 | // Make sure me and the given PhaseIdealLoop agree on key data structures |
| 3173 | void PhaseIdealLoop::verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const { |
| 3174 | if( !n ) return; |
| 3175 | if( visited.test_set( n->_idx ) ) return; |
| 3176 | if( !_nodes[n->_idx] ) { // Unreachable |
| 3177 | assert( !loop_verify->_nodes[n->_idx], "both should be unreachable" ); |
| 3178 | return; |
| 3179 | } |
| 3180 | |
| 3181 | uint i; |
| 3182 | for( i = 0; i < n->req(); i++ ) |
| 3183 | verify_compare( n->in(i), loop_verify, visited ); |
| 3184 | |
| 3185 | // Check the '_nodes' block/loop structure |
| 3186 | i = n->_idx; |
| 3187 | if( has_ctrl(n) ) { // We have control; verify has loop or ctrl |
| 3188 | if( _nodes[i] != loop_verify->_nodes[i] && |
| 3189 | get_ctrl_no_update(n) != loop_verify->get_ctrl_no_update(n) ) { |
| 3190 | tty->print("Mismatched control setting for: " ); |
| 3191 | n->dump(); |
| 3192 | if( fail++ > 10 ) return; |
| 3193 | Node *c = get_ctrl_no_update(n); |
| 3194 | tty->print("We have it as: " ); |
| 3195 | if( c->in(0) ) c->dump(); |
| 3196 | else tty->print_cr("N%d" ,c->_idx); |
| 3197 | tty->print("Verify thinks: " ); |
| 3198 | if( loop_verify->has_ctrl(n) ) |
| 3199 | loop_verify->get_ctrl_no_update(n)->dump(); |
| 3200 | else |
| 3201 | loop_verify->get_loop_idx(n)->dump(); |
| 3202 | tty->cr(); |
| 3203 | } |
| 3204 | } else { // We have a loop |
| 3205 | IdealLoopTree *us = get_loop_idx(n); |
| 3206 | if( loop_verify->has_ctrl(n) ) { |
| 3207 | tty->print("Mismatched loop setting for: " ); |
| 3208 | n->dump(); |
| 3209 | if( fail++ > 10 ) return; |
| 3210 | tty->print("We have it as: " ); |
| 3211 | us->dump(); |
| 3212 | tty->print("Verify thinks: " ); |
| 3213 | loop_verify->get_ctrl_no_update(n)->dump(); |
| 3214 | tty->cr(); |
| 3215 | } else if (!C->major_progress()) { |
| 3216 | // Loop selection can be messed up if we did a major progress |
| 3217 | // operation, like split-if. Do not verify in that case. |
| 3218 | IdealLoopTree *them = loop_verify->get_loop_idx(n); |
| 3219 | if( us->_head != them->_head || us->_tail != them->_tail ) { |
| 3220 | tty->print("Unequals loops for: " ); |
| 3221 | n->dump(); |
| 3222 | if( fail++ > 10 ) return; |
| 3223 | tty->print("We have it as: " ); |
| 3224 | us->dump(); |
| 3225 | tty->print("Verify thinks: " ); |
| 3226 | them->dump(); |
| 3227 | tty->cr(); |
| 3228 | } |
| 3229 | } |
| 3230 | } |
| 3231 | |
| 3232 | // Check for immediate dominators being equal |
| 3233 | if( i >= _idom_size ) { |
| 3234 | if( !n->is_CFG() ) return; |
| 3235 | tty->print("CFG Node with no idom: " ); |
| 3236 | n->dump(); |
| 3237 | return; |
| 3238 | } |
| 3239 | if( !n->is_CFG() ) return; |
| 3240 | if( n == C->root() ) return; // No IDOM here |
| 3241 | |
| 3242 | assert(n->_idx == i, "sanity" ); |
| 3243 | Node *id = idom_no_update(n); |
| 3244 | if( id != loop_verify->idom_no_update(n) ) { |
| 3245 | tty->print("Unequals idoms for: " ); |
| 3246 | n->dump(); |
| 3247 | if( fail++ > 10 ) return; |
| 3248 | tty->print("We have it as: " ); |
| 3249 | id->dump(); |
| 3250 | tty->print("Verify thinks: " ); |
| 3251 | loop_verify->idom_no_update(n)->dump(); |
| 3252 | tty->cr(); |
| 3253 | } |
| 3254 | |
| 3255 | } |
| 3256 | |
| 3257 | //------------------------------verify_tree------------------------------------ |
| 3258 | // Verify that tree structures match. Because the CFG can change, siblings |
| 3259 | // within the loop tree can be reordered. We attempt to deal with that by |
| 3260 | // reordering the verify's loop tree if possible. |
| 3261 | void IdealLoopTree::verify_tree(IdealLoopTree *loop, const IdealLoopTree *parent) const { |
| 3262 | assert( _parent == parent, "Badly formed loop tree" ); |
| 3263 | |
| 3264 | // Siblings not in same order? Attempt to re-order. |
| 3265 | if( _head != loop->_head ) { |
| 3266 | // Find _next pointer to update |
| 3267 | IdealLoopTree **pp = &loop->_parent->_child; |
| 3268 | while( *pp != loop ) |
| 3269 | pp = &((*pp)->_next); |
| 3270 | // Find proper sibling to be next |
| 3271 | IdealLoopTree **nn = &loop->_next; |
| 3272 | while( (*nn) && (*nn)->_head != _head ) |
| 3273 | nn = &((*nn)->_next); |
| 3274 | |
| 3275 | // Check for no match. |
| 3276 | if( !(*nn) ) { |
| 3277 | // Annoyingly, irreducible loops can pick different headers |
| 3278 | // after a major_progress operation, so the rest of the loop |
| 3279 | // tree cannot be matched. |
| 3280 | if (_irreducible && Compile::current()->major_progress()) return; |
| 3281 | assert( 0, "failed to match loop tree" ); |
| 3282 | } |
| 3283 | |
| 3284 | // Move (*nn) to (*pp) |
| 3285 | IdealLoopTree *hit = *nn; |
| 3286 | *nn = hit->_next; |
| 3287 | hit->_next = loop; |
| 3288 | *pp = loop; |
| 3289 | loop = hit; |
| 3290 | // Now try again to verify |
| 3291 | } |
| 3292 | |
| 3293 | assert( _head == loop->_head , "mismatched loop head" ); |
| 3294 | Node *tail = _tail; // Inline a non-updating version of |
| 3295 | while( !tail->in(0) ) // the 'tail()' call. |
| 3296 | tail = tail->in(1); |
| 3297 | assert( tail == loop->_tail, "mismatched loop tail" ); |
| 3298 | |
| 3299 | // Counted loops that are guarded should be able to find their guards |
| 3300 | if( _head->is_CountedLoop() && _head->as_CountedLoop()->is_main_loop() ) { |
| 3301 | CountedLoopNode *cl = _head->as_CountedLoop(); |
| 3302 | Node *init = cl->init_trip(); |
| 3303 | Node *ctrl = cl->in(LoopNode::EntryControl); |
| 3304 | assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" ); |
| 3305 | Node *iff = ctrl->in(0); |
| 3306 | assert( iff->Opcode() == Op_If, "" ); |
| 3307 | Node *bol = iff->in(1); |
| 3308 | assert( bol->Opcode() == Op_Bool, "" ); |
| 3309 | Node *cmp = bol->in(1); |
| 3310 | assert( cmp->Opcode() == Op_CmpI, "" ); |
| 3311 | Node *add = cmp->in(1); |
| 3312 | Node *opaq; |
| 3313 | if( add->Opcode() == Op_Opaque1 ) { |
| 3314 | opaq = add; |
| 3315 | } else { |
| 3316 | assert( add->Opcode() == Op_AddI || add->Opcode() == Op_ConI , "" ); |
| 3317 | assert( add == init, "" ); |
| 3318 | opaq = cmp->in(2); |
| 3319 | } |
| 3320 | assert( opaq->Opcode() == Op_Opaque1, "" ); |
| 3321 | |
| 3322 | } |
| 3323 | |
| 3324 | if (_child != NULL) _child->verify_tree(loop->_child, this); |
| 3325 | if (_next != NULL) _next ->verify_tree(loop->_next, parent); |
| 3326 | // Innermost loops need to verify loop bodies, |
| 3327 | // but only if no 'major_progress' |
| 3328 | int fail = 0; |
| 3329 | if (!Compile::current()->major_progress() && _child == NULL) { |
| 3330 | for( uint i = 0; i < _body.size(); i++ ) { |
| 3331 | Node *n = _body.at(i); |
| 3332 | if (n->outcnt() == 0) continue; // Ignore dead |
| 3333 | uint j; |
| 3334 | for( j = 0; j < loop->_body.size(); j++ ) |
| 3335 | if( loop->_body.at(j) == n ) |
| 3336 | break; |
| 3337 | if( j == loop->_body.size() ) { // Not found in loop body |
| 3338 | // Last ditch effort to avoid assertion: Its possible that we |
| 3339 | // have some users (so outcnt not zero) but are still dead. |
| 3340 | // Try to find from root. |
| 3341 | if (Compile::current()->root()->find(n->_idx)) { |
| 3342 | fail++; |
| 3343 | tty->print("We have that verify does not: " ); |
| 3344 | n->dump(); |
| 3345 | } |
| 3346 | } |
| 3347 | } |
| 3348 | for( uint i2 = 0; i2 < loop->_body.size(); i2++ ) { |
| 3349 | Node *n = loop->_body.at(i2); |
| 3350 | if (n->outcnt() == 0) continue; // Ignore dead |
| 3351 | uint j; |
| 3352 | for( j = 0; j < _body.size(); j++ ) |
| 3353 | if( _body.at(j) == n ) |
| 3354 | break; |
| 3355 | if( j == _body.size() ) { // Not found in loop body |
| 3356 | // Last ditch effort to avoid assertion: Its possible that we |
| 3357 | // have some users (so outcnt not zero) but are still dead. |
| 3358 | // Try to find from root. |
| 3359 | if (Compile::current()->root()->find(n->_idx)) { |
| 3360 | fail++; |
| 3361 | tty->print("Verify has that we do not: " ); |
| 3362 | n->dump(); |
| 3363 | } |
| 3364 | } |
| 3365 | } |
| 3366 | assert( !fail, "loop body mismatch" ); |
| 3367 | } |
| 3368 | } |
| 3369 | |
| 3370 | #endif |
| 3371 | |
| 3372 | //------------------------------set_idom--------------------------------------- |
| 3373 | void PhaseIdealLoop::set_idom(Node* d, Node* n, uint dom_depth) { |
| 3374 | uint idx = d->_idx; |
| 3375 | if (idx >= _idom_size) { |
| 3376 | uint newsize = _idom_size<<1; |
| 3377 | while( idx >= newsize ) { |
| 3378 | newsize <<= 1; |
| 3379 | } |
| 3380 | _idom = REALLOC_RESOURCE_ARRAY( Node*, _idom,_idom_size,newsize); |
| 3381 | _dom_depth = REALLOC_RESOURCE_ARRAY( uint, _dom_depth,_idom_size,newsize); |
| 3382 | memset( _dom_depth + _idom_size, 0, (newsize - _idom_size) * sizeof(uint) ); |
| 3383 | _idom_size = newsize; |
| 3384 | } |
| 3385 | _idom[idx] = n; |
| 3386 | _dom_depth[idx] = dom_depth; |
| 3387 | } |
| 3388 | |
| 3389 | //------------------------------recompute_dom_depth--------------------------------------- |
| 3390 | // The dominator tree is constructed with only parent pointers. |
| 3391 | // This recomputes the depth in the tree by first tagging all |
| 3392 | // nodes as "no depth yet" marker. The next pass then runs up |
| 3393 | // the dom tree from each node marked "no depth yet", and computes |
| 3394 | // the depth on the way back down. |
| 3395 | void PhaseIdealLoop::recompute_dom_depth() { |
| 3396 | uint no_depth_marker = C->unique(); |
| 3397 | uint i; |
| 3398 | // Initialize depth to "no depth yet" and realize all lazy updates |
| 3399 | for (i = 0; i < _idom_size; i++) { |
| 3400 | // Only indices with a _dom_depth has a Node* or NULL (otherwise uninitalized). |
| 3401 | if (_dom_depth[i] > 0 && _idom[i] != NULL) { |
| 3402 | _dom_depth[i] = no_depth_marker; |
| 3403 | |
| 3404 | // heal _idom if it has a fwd mapping in _nodes |
| 3405 | if (_idom[i]->in(0) == NULL) { |
| 3406 | idom(i); |
| 3407 | } |
| 3408 | } |
| 3409 | } |
| 3410 | if (_dom_stk == NULL) { |
| 3411 | uint init_size = C->live_nodes() / 100; // Guess that 1/100 is a reasonable initial size. |
| 3412 | if (init_size < 10) init_size = 10; |
| 3413 | _dom_stk = new GrowableArray<uint>(init_size); |
| 3414 | } |
| 3415 | // Compute new depth for each node. |
| 3416 | for (i = 0; i < _idom_size; i++) { |
| 3417 | uint j = i; |
| 3418 | // Run up the dom tree to find a node with a depth |
| 3419 | while (_dom_depth[j] == no_depth_marker) { |
| 3420 | _dom_stk->push(j); |
| 3421 | j = _idom[j]->_idx; |
| 3422 | } |
| 3423 | // Compute the depth on the way back down this tree branch |
| 3424 | uint dd = _dom_depth[j] + 1; |
| 3425 | while (_dom_stk->length() > 0) { |
| 3426 | uint j = _dom_stk->pop(); |
| 3427 | _dom_depth[j] = dd; |
| 3428 | dd++; |
| 3429 | } |
| 3430 | } |
| 3431 | } |
| 3432 | |
| 3433 | //------------------------------sort------------------------------------------- |
| 3434 | // Insert 'loop' into the existing loop tree. 'innermost' is a leaf of the |
| 3435 | // loop tree, not the root. |
| 3436 | IdealLoopTree *PhaseIdealLoop::sort( IdealLoopTree *loop, IdealLoopTree *innermost ) { |
| 3437 | if( !innermost ) return loop; // New innermost loop |
| 3438 | |
| 3439 | int loop_preorder = get_preorder(loop->_head); // Cache pre-order number |
| 3440 | assert( loop_preorder, "not yet post-walked loop" ); |
| 3441 | IdealLoopTree **pp = &innermost; // Pointer to previous next-pointer |
| 3442 | IdealLoopTree *l = *pp; // Do I go before or after 'l'? |
| 3443 | |
| 3444 | // Insert at start of list |
| 3445 | while( l ) { // Insertion sort based on pre-order |
| 3446 | if( l == loop ) return innermost; // Already on list! |
| 3447 | int l_preorder = get_preorder(l->_head); // Cache pre-order number |
| 3448 | assert( l_preorder, "not yet post-walked l" ); |
| 3449 | // Check header pre-order number to figure proper nesting |
| 3450 | if( loop_preorder > l_preorder ) |
| 3451 | break; // End of insertion |
| 3452 | // If headers tie (e.g., shared headers) check tail pre-order numbers. |
| 3453 | // Since I split shared headers, you'd think this could not happen. |
| 3454 | // BUT: I must first do the preorder numbering before I can discover I |
| 3455 | // have shared headers, so the split headers all get the same preorder |
| 3456 | // number as the RegionNode they split from. |
| 3457 | if( loop_preorder == l_preorder && |
| 3458 | get_preorder(loop->_tail) < get_preorder(l->_tail) ) |
| 3459 | break; // Also check for shared headers (same pre#) |
| 3460 | pp = &l->_parent; // Chain up list |
| 3461 | l = *pp; |
| 3462 | } |
| 3463 | // Link into list |
| 3464 | // Point predecessor to me |
| 3465 | *pp = loop; |
| 3466 | // Point me to successor |
| 3467 | IdealLoopTree *p = loop->_parent; |
| 3468 | loop->_parent = l; // Point me to successor |
| 3469 | if( p ) sort( p, innermost ); // Insert my parents into list as well |
| 3470 | return innermost; |
| 3471 | } |
| 3472 | |
| 3473 | //------------------------------build_loop_tree-------------------------------- |
| 3474 | // I use a modified Vick/Tarjan algorithm. I need pre- and a post- visit |
| 3475 | // bits. The _nodes[] array is mapped by Node index and holds a NULL for |
| 3476 | // not-yet-pre-walked, pre-order # for pre-but-not-post-walked and holds the |
| 3477 | // tightest enclosing IdealLoopTree for post-walked. |
| 3478 | // |
| 3479 | // During my forward walk I do a short 1-layer lookahead to see if I can find |
| 3480 | // a loop backedge with that doesn't have any work on the backedge. This |
| 3481 | // helps me construct nested loops with shared headers better. |
| 3482 | // |
| 3483 | // Once I've done the forward recursion, I do the post-work. For each child |
| 3484 | // I check to see if there is a backedge. Backedges define a loop! I |
| 3485 | // insert an IdealLoopTree at the target of the backedge. |
| 3486 | // |
| 3487 | // During the post-work I also check to see if I have several children |
| 3488 | // belonging to different loops. If so, then this Node is a decision point |
| 3489 | // where control flow can choose to change loop nests. It is at this |
| 3490 | // decision point where I can figure out how loops are nested. At this |
| 3491 | // time I can properly order the different loop nests from my children. |
| 3492 | // Note that there may not be any backedges at the decision point! |
| 3493 | // |
| 3494 | // Since the decision point can be far removed from the backedges, I can't |
| 3495 | // order my loops at the time I discover them. Thus at the decision point |
| 3496 | // I need to inspect loop header pre-order numbers to properly nest my |
| 3497 | // loops. This means I need to sort my childrens' loops by pre-order. |
| 3498 | // The sort is of size number-of-control-children, which generally limits |
| 3499 | // it to size 2 (i.e., I just choose between my 2 target loops). |
| 3500 | void PhaseIdealLoop::build_loop_tree() { |
| 3501 | // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc |
| 3502 | GrowableArray <Node *> bltstack(C->live_nodes() >> 1); |
| 3503 | Node *n = C->root(); |
| 3504 | bltstack.push(n); |
| 3505 | int pre_order = 1; |
| 3506 | int stack_size; |
| 3507 | |
| 3508 | while ( ( stack_size = bltstack.length() ) != 0 ) { |
| 3509 | n = bltstack.top(); // Leave node on stack |
| 3510 | if ( !is_visited(n) ) { |
| 3511 | // ---- Pre-pass Work ---- |
| 3512 | // Pre-walked but not post-walked nodes need a pre_order number. |
| 3513 | |
| 3514 | set_preorder_visited( n, pre_order ); // set as visited |
| 3515 | |
| 3516 | // ---- Scan over children ---- |
| 3517 | // Scan first over control projections that lead to loop headers. |
| 3518 | // This helps us find inner-to-outer loops with shared headers better. |
| 3519 | |
| 3520 | // Scan children's children for loop headers. |
| 3521 | for ( int i = n->outcnt() - 1; i >= 0; --i ) { |
| 3522 | Node* m = n->raw_out(i); // Child |
| 3523 | if( m->is_CFG() && !is_visited(m) ) { // Only for CFG children |
| 3524 | // Scan over children's children to find loop |
| 3525 | for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { |
| 3526 | Node* l = m->fast_out(j); |
| 3527 | if( is_visited(l) && // Been visited? |
| 3528 | !is_postvisited(l) && // But not post-visited |
| 3529 | get_preorder(l) < pre_order ) { // And smaller pre-order |
| 3530 | // Found! Scan the DFS down this path before doing other paths |
| 3531 | bltstack.push(m); |
| 3532 | break; |
| 3533 | } |
| 3534 | } |
| 3535 | } |
| 3536 | } |
| 3537 | pre_order++; |
| 3538 | } |
| 3539 | else if ( !is_postvisited(n) ) { |
| 3540 | // Note: build_loop_tree_impl() adds out edges on rare occasions, |
| 3541 | // such as com.sun.rsasign.am::a. |
| 3542 | // For non-recursive version, first, process current children. |
| 3543 | // On next iteration, check if additional children were added. |
| 3544 | for ( int k = n->outcnt() - 1; k >= 0; --k ) { |
| 3545 | Node* u = n->raw_out(k); |
| 3546 | if ( u->is_CFG() && !is_visited(u) ) { |
| 3547 | bltstack.push(u); |
| 3548 | } |
| 3549 | } |
| 3550 | if ( bltstack.length() == stack_size ) { |
| 3551 | // There were no additional children, post visit node now |
| 3552 | (void)bltstack.pop(); // Remove node from stack |
| 3553 | pre_order = build_loop_tree_impl( n, pre_order ); |
| 3554 | // Check for bailout |
| 3555 | if (C->failing()) { |
| 3556 | return; |
| 3557 | } |
| 3558 | // Check to grow _preorders[] array for the case when |
| 3559 | // build_loop_tree_impl() adds new nodes. |
| 3560 | check_grow_preorders(); |
| 3561 | } |
| 3562 | } |
| 3563 | else { |
| 3564 | (void)bltstack.pop(); // Remove post-visited node from stack |
| 3565 | } |
| 3566 | } |
| 3567 | } |
| 3568 | |
| 3569 | //------------------------------build_loop_tree_impl--------------------------- |
| 3570 | int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) { |
| 3571 | // ---- Post-pass Work ---- |
| 3572 | // Pre-walked but not post-walked nodes need a pre_order number. |
| 3573 | |
| 3574 | // Tightest enclosing loop for this Node |
| 3575 | IdealLoopTree *innermost = NULL; |
| 3576 | |
| 3577 | // For all children, see if any edge is a backedge. If so, make a loop |
| 3578 | // for it. Then find the tightest enclosing loop for the self Node. |
| 3579 | for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { |
| 3580 | Node* m = n->fast_out(i); // Child |
| 3581 | if( n == m ) continue; // Ignore control self-cycles |
| 3582 | if( !m->is_CFG() ) continue;// Ignore non-CFG edges |
| 3583 | |
| 3584 | IdealLoopTree *l; // Child's loop |
| 3585 | if( !is_postvisited(m) ) { // Child visited but not post-visited? |
| 3586 | // Found a backedge |
| 3587 | assert( get_preorder(m) < pre_order, "should be backedge" ); |
| 3588 | // Check for the RootNode, which is already a LoopNode and is allowed |
| 3589 | // to have multiple "backedges". |
| 3590 | if( m == C->root()) { // Found the root? |
| 3591 | l = _ltree_root; // Root is the outermost LoopNode |
| 3592 | } else { // Else found a nested loop |
| 3593 | // Insert a LoopNode to mark this loop. |
| 3594 | l = new IdealLoopTree(this, m, n); |
| 3595 | } // End of Else found a nested loop |
| 3596 | if( !has_loop(m) ) // If 'm' does not already have a loop set |
| 3597 | set_loop(m, l); // Set loop header to loop now |
| 3598 | |
| 3599 | } else { // Else not a nested loop |
| 3600 | if( !_nodes[m->_idx] ) continue; // Dead code has no loop |
| 3601 | l = get_loop(m); // Get previously determined loop |
| 3602 | // If successor is header of a loop (nest), move up-loop till it |
| 3603 | // is a member of some outer enclosing loop. Since there are no |
| 3604 | // shared headers (I've split them already) I only need to go up |
| 3605 | // at most 1 level. |
| 3606 | while( l && l->_head == m ) // Successor heads loop? |
| 3607 | l = l->_parent; // Move up 1 for me |
| 3608 | // If this loop is not properly parented, then this loop |
| 3609 | // has no exit path out, i.e. its an infinite loop. |
| 3610 | if( !l ) { |
| 3611 | // Make loop "reachable" from root so the CFG is reachable. Basically |
| 3612 | // insert a bogus loop exit that is never taken. 'm', the loop head, |
| 3613 | // points to 'n', one (of possibly many) fall-in paths. There may be |
| 3614 | // many backedges as well. |
| 3615 | |
| 3616 | // Here I set the loop to be the root loop. I could have, after |
| 3617 | // inserting a bogus loop exit, restarted the recursion and found my |
| 3618 | // new loop exit. This would make the infinite loop a first-class |
| 3619 | // loop and it would then get properly optimized. What's the use of |
| 3620 | // optimizing an infinite loop? |
| 3621 | l = _ltree_root; // Oops, found infinite loop |
| 3622 | |
| 3623 | if (!_verify_only) { |
| 3624 | // Insert the NeverBranch between 'm' and it's control user. |
| 3625 | NeverBranchNode *iff = new NeverBranchNode( m ); |
| 3626 | _igvn.register_new_node_with_optimizer(iff); |
| 3627 | set_loop(iff, l); |
| 3628 | Node *if_t = new CProjNode( iff, 0 ); |
| 3629 | _igvn.register_new_node_with_optimizer(if_t); |
| 3630 | set_loop(if_t, l); |
| 3631 | |
| 3632 | Node* cfg = NULL; // Find the One True Control User of m |
| 3633 | for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { |
| 3634 | Node* x = m->fast_out(j); |
| 3635 | if (x->is_CFG() && x != m && x != iff) |
| 3636 | { cfg = x; break; } |
| 3637 | } |
| 3638 | assert(cfg != NULL, "must find the control user of m" ); |
| 3639 | uint k = 0; // Probably cfg->in(0) |
| 3640 | while( cfg->in(k) != m ) k++; // But check incase cfg is a Region |
| 3641 | cfg->set_req( k, if_t ); // Now point to NeverBranch |
| 3642 | _igvn._worklist.push(cfg); |
| 3643 | |
| 3644 | // Now create the never-taken loop exit |
| 3645 | Node *if_f = new CProjNode( iff, 1 ); |
| 3646 | _igvn.register_new_node_with_optimizer(if_f); |
| 3647 | set_loop(if_f, l); |
| 3648 | // Find frame ptr for Halt. Relies on the optimizer |
| 3649 | // V-N'ing. Easier and quicker than searching through |
| 3650 | // the program structure. |
| 3651 | Node *frame = new ParmNode( C->start(), TypeFunc::FramePtr ); |
| 3652 | _igvn.register_new_node_with_optimizer(frame); |
| 3653 | // Halt & Catch Fire |
| 3654 | Node *halt = new HaltNode( if_f, frame ); |
| 3655 | _igvn.register_new_node_with_optimizer(halt); |
| 3656 | set_loop(halt, l); |
| 3657 | C->root()->add_req(halt); |
| 3658 | } |
| 3659 | set_loop(C->root(), _ltree_root); |
| 3660 | } |
| 3661 | } |
| 3662 | // Weeny check for irreducible. This child was already visited (this |
| 3663 | // IS the post-work phase). Is this child's loop header post-visited |
| 3664 | // as well? If so, then I found another entry into the loop. |
| 3665 | if (!_verify_only) { |
| 3666 | while( is_postvisited(l->_head) ) { |
| 3667 | // found irreducible |
| 3668 | l->_irreducible = 1; // = true |
| 3669 | l = l->_parent; |
| 3670 | _has_irreducible_loops = true; |
| 3671 | // Check for bad CFG here to prevent crash, and bailout of compile |
| 3672 | if (l == NULL) { |
| 3673 | C->record_method_not_compilable("unhandled CFG detected during loop optimization" ); |
| 3674 | return pre_order; |
| 3675 | } |
| 3676 | } |
| 3677 | C->set_has_irreducible_loop(_has_irreducible_loops); |
| 3678 | } |
| 3679 | |
| 3680 | // This Node might be a decision point for loops. It is only if |
| 3681 | // it's children belong to several different loops. The sort call |
| 3682 | // does a trivial amount of work if there is only 1 child or all |
| 3683 | // children belong to the same loop. If however, the children |
| 3684 | // belong to different loops, the sort call will properly set the |
| 3685 | // _parent pointers to show how the loops nest. |
| 3686 | // |
| 3687 | // In any case, it returns the tightest enclosing loop. |
| 3688 | innermost = sort( l, innermost ); |
| 3689 | } |
| 3690 | |
| 3691 | // Def-use info will have some dead stuff; dead stuff will have no |
| 3692 | // loop decided on. |
| 3693 | |
| 3694 | // Am I a loop header? If so fix up my parent's child and next ptrs. |
| 3695 | if( innermost && innermost->_head == n ) { |
| 3696 | assert( get_loop(n) == innermost, "" ); |
| 3697 | IdealLoopTree *p = innermost->_parent; |
| 3698 | IdealLoopTree *l = innermost; |
| 3699 | while( p && l->_head == n ) { |
| 3700 | l->_next = p->_child; // Put self on parents 'next child' |
| 3701 | p->_child = l; // Make self as first child of parent |
| 3702 | l = p; // Now walk up the parent chain |
| 3703 | p = l->_parent; |
| 3704 | } |
| 3705 | } else { |
| 3706 | // Note that it is possible for a LoopNode to reach here, if the |
| 3707 | // backedge has been made unreachable (hence the LoopNode no longer |
| 3708 | // denotes a Loop, and will eventually be removed). |
| 3709 | |
| 3710 | // Record tightest enclosing loop for self. Mark as post-visited. |
| 3711 | set_loop(n, innermost); |
| 3712 | // Also record has_call flag early on |
| 3713 | if( innermost ) { |
| 3714 | if( n->is_Call() && !n->is_CallLeaf() && !n->is_macro() ) { |
| 3715 | // Do not count uncommon calls |
| 3716 | if( !n->is_CallStaticJava() || !n->as_CallStaticJava()->_name ) { |
| 3717 | Node *iff = n->in(0)->in(0); |
| 3718 | // No any calls for vectorized loops. |
| 3719 | if( UseSuperWord || !iff->is_If() || |
| 3720 | (n->in(0)->Opcode() == Op_IfFalse && |
| 3721 | (1.0 - iff->as_If()->_prob) >= 0.01) || |
| 3722 | (iff->as_If()->_prob >= 0.01) ) |
| 3723 | innermost->_has_call = 1; |
| 3724 | } |
| 3725 | } else if( n->is_Allocate() && n->as_Allocate()->_is_scalar_replaceable ) { |
| 3726 | // Disable loop optimizations if the loop has a scalar replaceable |
| 3727 | // allocation. This disabling may cause a potential performance lost |
| 3728 | // if the allocation is not eliminated for some reason. |
| 3729 | innermost->_allow_optimizations = false; |
| 3730 | innermost->_has_call = 1; // = true |
| 3731 | } else if (n->Opcode() == Op_SafePoint) { |
| 3732 | // Record all safepoints in this loop. |
| 3733 | if (innermost->_safepts == NULL) innermost->_safepts = new Node_List(); |
| 3734 | innermost->_safepts->push(n); |
| 3735 | } |
| 3736 | } |
| 3737 | } |
| 3738 | |
| 3739 | // Flag as post-visited now |
| 3740 | set_postvisited(n); |
| 3741 | return pre_order; |
| 3742 | } |
| 3743 | |
| 3744 | |
| 3745 | //------------------------------build_loop_early------------------------------- |
| 3746 | // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping. |
| 3747 | // First pass computes the earliest controlling node possible. This is the |
| 3748 | // controlling input with the deepest dominating depth. |
| 3749 | void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) { |
| 3750 | while (worklist.size() != 0) { |
| 3751 | // Use local variables nstack_top_n & nstack_top_i to cache values |
| 3752 | // on nstack's top. |
| 3753 | Node *nstack_top_n = worklist.pop(); |
| 3754 | uint nstack_top_i = 0; |
| 3755 | //while_nstack_nonempty: |
| 3756 | while (true) { |
| 3757 | // Get parent node and next input's index from stack's top. |
| 3758 | Node *n = nstack_top_n; |
| 3759 | uint i = nstack_top_i; |
| 3760 | uint cnt = n->req(); // Count of inputs |
| 3761 | if (i == 0) { // Pre-process the node. |
| 3762 | if( has_node(n) && // Have either loop or control already? |
| 3763 | !has_ctrl(n) ) { // Have loop picked out already? |
| 3764 | // During "merge_many_backedges" we fold up several nested loops |
| 3765 | // into a single loop. This makes the members of the original |
| 3766 | // loop bodies pointing to dead loops; they need to move up |
| 3767 | // to the new UNION'd larger loop. I set the _head field of these |
| 3768 | // dead loops to NULL and the _parent field points to the owning |
| 3769 | // loop. Shades of UNION-FIND algorithm. |
| 3770 | IdealLoopTree *ilt; |
| 3771 | while( !(ilt = get_loop(n))->_head ) { |
| 3772 | // Normally I would use a set_loop here. But in this one special |
| 3773 | // case, it is legal (and expected) to change what loop a Node |
| 3774 | // belongs to. |
| 3775 | _nodes.map(n->_idx, (Node*)(ilt->_parent) ); |
| 3776 | } |
| 3777 | // Remove safepoints ONLY if I've already seen I don't need one. |
| 3778 | // (the old code here would yank a 2nd safepoint after seeing a |
| 3779 | // first one, even though the 1st did not dominate in the loop body |
| 3780 | // and thus could be avoided indefinitely) |
| 3781 | if( !_verify_only && !_verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint && |
| 3782 | is_deleteable_safept(n)) { |
| 3783 | Node *in = n->in(TypeFunc::Control); |
| 3784 | lazy_replace(n,in); // Pull safepoint now |
| 3785 | if (ilt->_safepts != NULL) { |
| 3786 | ilt->_safepts->yank(n); |
| 3787 | } |
| 3788 | // Carry on with the recursion "as if" we are walking |
| 3789 | // only the control input |
| 3790 | if( !visited.test_set( in->_idx ) ) { |
| 3791 | worklist.push(in); // Visit this guy later, using worklist |
| 3792 | } |
| 3793 | // Get next node from nstack: |
| 3794 | // - skip n's inputs processing by setting i > cnt; |
| 3795 | // - we also will not call set_early_ctrl(n) since |
| 3796 | // has_node(n) == true (see the condition above). |
| 3797 | i = cnt + 1; |
| 3798 | } |
| 3799 | } |
| 3800 | } // if (i == 0) |
| 3801 | |
| 3802 | // Visit all inputs |
| 3803 | bool done = true; // Assume all n's inputs will be processed |
| 3804 | while (i < cnt) { |
| 3805 | Node *in = n->in(i); |
| 3806 | ++i; |
| 3807 | if (in == NULL) continue; |
| 3808 | if (in->pinned() && !in->is_CFG()) |
| 3809 | set_ctrl(in, in->in(0)); |
| 3810 | int is_visited = visited.test_set( in->_idx ); |
| 3811 | if (!has_node(in)) { // No controlling input yet? |
| 3812 | assert( !in->is_CFG(), "CFG Node with no controlling input?" ); |
| 3813 | assert( !is_visited, "visit only once" ); |
| 3814 | nstack.push(n, i); // Save parent node and next input's index. |
| 3815 | nstack_top_n = in; // Process current input now. |
| 3816 | nstack_top_i = 0; |
| 3817 | done = false; // Not all n's inputs processed. |
| 3818 | break; // continue while_nstack_nonempty; |
| 3819 | } else if (!is_visited) { |
| 3820 | // This guy has a location picked out for him, but has not yet |
| 3821 | // been visited. Happens to all CFG nodes, for instance. |
| 3822 | // Visit him using the worklist instead of recursion, to break |
| 3823 | // cycles. Since he has a location already we do not need to |
| 3824 | // find his location before proceeding with the current Node. |
| 3825 | worklist.push(in); // Visit this guy later, using worklist |
| 3826 | } |
| 3827 | } |
| 3828 | if (done) { |
| 3829 | // All of n's inputs have been processed, complete post-processing. |
| 3830 | |
| 3831 | // Compute earliest point this Node can go. |
| 3832 | // CFG, Phi, pinned nodes already know their controlling input. |
| 3833 | if (!has_node(n)) { |
| 3834 | // Record earliest legal location |
| 3835 | set_early_ctrl( n ); |
| 3836 | } |
| 3837 | if (nstack.is_empty()) { |
| 3838 | // Finished all nodes on stack. |
| 3839 | // Process next node on the worklist. |
| 3840 | break; |
| 3841 | } |
| 3842 | // Get saved parent node and next input's index. |
| 3843 | nstack_top_n = nstack.node(); |
| 3844 | nstack_top_i = nstack.index(); |
| 3845 | nstack.pop(); |
| 3846 | } |
| 3847 | } // while (true) |
| 3848 | } |
| 3849 | } |
| 3850 | |
| 3851 | //------------------------------dom_lca_internal-------------------------------- |
| 3852 | // Pair-wise LCA |
| 3853 | Node *PhaseIdealLoop::dom_lca_internal( Node *n1, Node *n2 ) const { |
| 3854 | if( !n1 ) return n2; // Handle NULL original LCA |
| 3855 | assert( n1->is_CFG(), "" ); |
| 3856 | assert( n2->is_CFG(), "" ); |
| 3857 | // find LCA of all uses |
| 3858 | uint d1 = dom_depth(n1); |
| 3859 | uint d2 = dom_depth(n2); |
| 3860 | while (n1 != n2) { |
| 3861 | if (d1 > d2) { |
| 3862 | n1 = idom(n1); |
| 3863 | d1 = dom_depth(n1); |
| 3864 | } else if (d1 < d2) { |
| 3865 | n2 = idom(n2); |
| 3866 | d2 = dom_depth(n2); |
| 3867 | } else { |
| 3868 | // Here d1 == d2. Due to edits of the dominator-tree, sections |
| 3869 | // of the tree might have the same depth. These sections have |
| 3870 | // to be searched more carefully. |
| 3871 | |
| 3872 | // Scan up all the n1's with equal depth, looking for n2. |
| 3873 | Node *t1 = idom(n1); |
| 3874 | while (dom_depth(t1) == d1) { |
| 3875 | if (t1 == n2) return n2; |
| 3876 | t1 = idom(t1); |
| 3877 | } |
| 3878 | // Scan up all the n2's with equal depth, looking for n1. |
| 3879 | Node *t2 = idom(n2); |
| 3880 | while (dom_depth(t2) == d2) { |
| 3881 | if (t2 == n1) return n1; |
| 3882 | t2 = idom(t2); |
| 3883 | } |
| 3884 | // Move up to a new dominator-depth value as well as up the dom-tree. |
| 3885 | n1 = t1; |
| 3886 | n2 = t2; |
| 3887 | d1 = dom_depth(n1); |
| 3888 | d2 = dom_depth(n2); |
| 3889 | } |
| 3890 | } |
| 3891 | return n1; |
| 3892 | } |
| 3893 | |
| 3894 | //------------------------------compute_idom----------------------------------- |
| 3895 | // Locally compute IDOM using dom_lca call. Correct only if the incoming |
| 3896 | // IDOMs are correct. |
| 3897 | Node *PhaseIdealLoop::compute_idom( Node *region ) const { |
| 3898 | assert( region->is_Region(), "" ); |
| 3899 | Node *LCA = NULL; |
| 3900 | for( uint i = 1; i < region->req(); i++ ) { |
| 3901 | if( region->in(i) != C->top() ) |
| 3902 | LCA = dom_lca( LCA, region->in(i) ); |
| 3903 | } |
| 3904 | return LCA; |
| 3905 | } |
| 3906 | |
| 3907 | bool PhaseIdealLoop::verify_dominance(Node* n, Node* use, Node* LCA, Node* early) { |
| 3908 | bool had_error = false; |
| 3909 | #ifdef ASSERT |
| 3910 | if (early != C->root()) { |
| 3911 | // Make sure that there's a dominance path from LCA to early |
| 3912 | Node* d = LCA; |
| 3913 | while (d != early) { |
| 3914 | if (d == C->root()) { |
| 3915 | dump_bad_graph("Bad graph detected in compute_lca_of_uses" , n, early, LCA); |
| 3916 | tty->print_cr("*** Use %d isn't dominated by def %d ***" , use->_idx, n->_idx); |
| 3917 | had_error = true; |
| 3918 | break; |
| 3919 | } |
| 3920 | d = idom(d); |
| 3921 | } |
| 3922 | } |
| 3923 | #endif |
| 3924 | return had_error; |
| 3925 | } |
| 3926 | |
| 3927 | |
| 3928 | Node* PhaseIdealLoop::compute_lca_of_uses(Node* n, Node* early, bool verify) { |
| 3929 | // Compute LCA over list of uses |
| 3930 | bool had_error = false; |
| 3931 | Node *LCA = NULL; |
| 3932 | for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && LCA != early; i++) { |
| 3933 | Node* c = n->fast_out(i); |
| 3934 | if (_nodes[c->_idx] == NULL) |
| 3935 | continue; // Skip the occasional dead node |
| 3936 | if( c->is_Phi() ) { // For Phis, we must land above on the path |
| 3937 | for( uint j=1; j<c->req(); j++ ) {// For all inputs |
| 3938 | if( c->in(j) == n ) { // Found matching input? |
| 3939 | Node *use = c->in(0)->in(j); |
| 3940 | if (_verify_only && use->is_top()) continue; |
| 3941 | LCA = dom_lca_for_get_late_ctrl( LCA, use, n ); |
| 3942 | if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error; |
| 3943 | } |
| 3944 | } |
| 3945 | } else { |
| 3946 | // For CFG data-users, use is in the block just prior |
| 3947 | Node *use = has_ctrl(c) ? get_ctrl(c) : c->in(0); |
| 3948 | LCA = dom_lca_for_get_late_ctrl( LCA, use, n ); |
| 3949 | if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error; |
| 3950 | } |
| 3951 | } |
| 3952 | assert(!had_error, "bad dominance" ); |
| 3953 | return LCA; |
| 3954 | } |
| 3955 | |
| 3956 | // Check the shape of the graph at the loop entry. In some cases, |
| 3957 | // the shape of the graph does not match the shape outlined below. |
| 3958 | // That is caused by the Opaque1 node "protecting" the shape of |
| 3959 | // the graph being removed by, for example, the IGVN performed |
| 3960 | // in PhaseIdealLoop::build_and_optimize(). |
| 3961 | // |
| 3962 | // After the Opaque1 node has been removed, optimizations (e.g., split-if, |
| 3963 | // loop unswitching, and IGVN, or a combination of them) can freely change |
| 3964 | // the graph's shape. As a result, the graph shape outlined below cannot |
| 3965 | // be guaranteed anymore. |
| 3966 | bool PhaseIdealLoop::is_canonical_loop_entry(CountedLoopNode* cl) { |
| 3967 | if (!cl->is_main_loop() && !cl->is_post_loop()) { |
| 3968 | return false; |
| 3969 | } |
| 3970 | Node* ctrl = cl->skip_predicates(); |
| 3971 | |
| 3972 | if (ctrl == NULL || (!ctrl->is_IfTrue() && !ctrl->is_IfFalse())) { |
| 3973 | return false; |
| 3974 | } |
| 3975 | Node* iffm = ctrl->in(0); |
| 3976 | if (iffm == NULL || !iffm->is_If()) { |
| 3977 | return false; |
| 3978 | } |
| 3979 | Node* bolzm = iffm->in(1); |
| 3980 | if (bolzm == NULL || !bolzm->is_Bool()) { |
| 3981 | return false; |
| 3982 | } |
| 3983 | Node* cmpzm = bolzm->in(1); |
| 3984 | if (cmpzm == NULL || !cmpzm->is_Cmp()) { |
| 3985 | return false; |
| 3986 | } |
| 3987 | // compares can get conditionally flipped |
| 3988 | bool found_opaque = false; |
| 3989 | for (uint i = 1; i < cmpzm->req(); i++) { |
| 3990 | Node* opnd = cmpzm->in(i); |
| 3991 | if (opnd && opnd->Opcode() == Op_Opaque1) { |
| 3992 | found_opaque = true; |
| 3993 | break; |
| 3994 | } |
| 3995 | } |
| 3996 | if (!found_opaque) { |
| 3997 | return false; |
| 3998 | } |
| 3999 | return true; |
| 4000 | } |
| 4001 | |
| 4002 | //------------------------------get_late_ctrl---------------------------------- |
| 4003 | // Compute latest legal control. |
| 4004 | Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) { |
| 4005 | assert(early != NULL, "early control should not be NULL" ); |
| 4006 | |
| 4007 | Node* LCA = compute_lca_of_uses(n, early); |
| 4008 | #ifdef ASSERT |
| 4009 | if (LCA == C->root() && LCA != early) { |
| 4010 | // def doesn't dominate uses so print some useful debugging output |
| 4011 | compute_lca_of_uses(n, early, true); |
| 4012 | } |
| 4013 | #endif |
| 4014 | |
| 4015 | // if this is a load, check for anti-dependent stores |
| 4016 | // We use a conservative algorithm to identify potential interfering |
| 4017 | // instructions and for rescheduling the load. The users of the memory |
| 4018 | // input of this load are examined. Any use which is not a load and is |
| 4019 | // dominated by early is considered a potentially interfering store. |
| 4020 | // This can produce false positives. |
| 4021 | if (n->is_Load() && LCA != early) { |
| 4022 | Node_List worklist; |
| 4023 | |
| 4024 | Node *mem = n->in(MemNode::Memory); |
| 4025 | for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { |
| 4026 | Node* s = mem->fast_out(i); |
| 4027 | worklist.push(s); |
| 4028 | } |
| 4029 | while(worklist.size() != 0 && LCA != early) { |
| 4030 | Node* s = worklist.pop(); |
| 4031 | if (s->is_Load() || s->Opcode() == Op_SafePoint || |
| 4032 | (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) { |
| 4033 | continue; |
| 4034 | } else if (s->is_MergeMem()) { |
| 4035 | for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) { |
| 4036 | Node* s1 = s->fast_out(i); |
| 4037 | worklist.push(s1); |
| 4038 | } |
| 4039 | } else { |
| 4040 | Node *sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0); |
| 4041 | assert(sctrl != NULL || s->outcnt() == 0, "must have control" ); |
| 4042 | if (sctrl != NULL && !sctrl->is_top() && is_dominator(early, sctrl)) { |
| 4043 | LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n); |
| 4044 | } |
| 4045 | } |
| 4046 | } |
| 4047 | } |
| 4048 | |
| 4049 | assert(LCA == find_non_split_ctrl(LCA), "unexpected late control" ); |
| 4050 | return LCA; |
| 4051 | } |
| 4052 | |
| 4053 | // true if CFG node d dominates CFG node n |
| 4054 | bool PhaseIdealLoop::is_dominator(Node *d, Node *n) { |
| 4055 | if (d == n) |
| 4056 | return true; |
| 4057 | assert(d->is_CFG() && n->is_CFG(), "must have CFG nodes" ); |
| 4058 | uint dd = dom_depth(d); |
| 4059 | while (dom_depth(n) >= dd) { |
| 4060 | if (n == d) |
| 4061 | return true; |
| 4062 | n = idom(n); |
| 4063 | } |
| 4064 | return false; |
| 4065 | } |
| 4066 | |
| 4067 | //------------------------------dom_lca_for_get_late_ctrl_internal------------- |
| 4068 | // Pair-wise LCA with tags. |
| 4069 | // Tag each index with the node 'tag' currently being processed |
| 4070 | // before advancing up the dominator chain using idom(). |
| 4071 | // Later calls that find a match to 'tag' know that this path has already |
| 4072 | // been considered in the current LCA (which is input 'n1' by convention). |
| 4073 | // Since get_late_ctrl() is only called once for each node, the tag array |
| 4074 | // does not need to be cleared between calls to get_late_ctrl(). |
| 4075 | // Algorithm trades a larger constant factor for better asymptotic behavior |
| 4076 | // |
| 4077 | Node *PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal( Node *n1, Node *n2, Node *tag ) { |
| 4078 | uint d1 = dom_depth(n1); |
| 4079 | uint d2 = dom_depth(n2); |
| 4080 | |
| 4081 | do { |
| 4082 | if (d1 > d2) { |
| 4083 | // current lca is deeper than n2 |
| 4084 | _dom_lca_tags.map(n1->_idx, tag); |
| 4085 | n1 = idom(n1); |
| 4086 | d1 = dom_depth(n1); |
| 4087 | } else if (d1 < d2) { |
| 4088 | // n2 is deeper than current lca |
| 4089 | Node *memo = _dom_lca_tags[n2->_idx]; |
| 4090 | if( memo == tag ) { |
| 4091 | return n1; // Return the current LCA |
| 4092 | } |
| 4093 | _dom_lca_tags.map(n2->_idx, tag); |
| 4094 | n2 = idom(n2); |
| 4095 | d2 = dom_depth(n2); |
| 4096 | } else { |
| 4097 | // Here d1 == d2. Due to edits of the dominator-tree, sections |
| 4098 | // of the tree might have the same depth. These sections have |
| 4099 | // to be searched more carefully. |
| 4100 | |
| 4101 | // Scan up all the n1's with equal depth, looking for n2. |
| 4102 | _dom_lca_tags.map(n1->_idx, tag); |
| 4103 | Node *t1 = idom(n1); |
| 4104 | while (dom_depth(t1) == d1) { |
| 4105 | if (t1 == n2) return n2; |
| 4106 | _dom_lca_tags.map(t1->_idx, tag); |
| 4107 | t1 = idom(t1); |
| 4108 | } |
| 4109 | // Scan up all the n2's with equal depth, looking for n1. |
| 4110 | _dom_lca_tags.map(n2->_idx, tag); |
| 4111 | Node *t2 = idom(n2); |
| 4112 | while (dom_depth(t2) == d2) { |
| 4113 | if (t2 == n1) return n1; |
| 4114 | _dom_lca_tags.map(t2->_idx, tag); |
| 4115 | t2 = idom(t2); |
| 4116 | } |
| 4117 | // Move up to a new dominator-depth value as well as up the dom-tree. |
| 4118 | n1 = t1; |
| 4119 | n2 = t2; |
| 4120 | d1 = dom_depth(n1); |
| 4121 | d2 = dom_depth(n2); |
| 4122 | } |
| 4123 | } while (n1 != n2); |
| 4124 | return n1; |
| 4125 | } |
| 4126 | |
| 4127 | //------------------------------init_dom_lca_tags------------------------------ |
| 4128 | // Tag could be a node's integer index, 32bits instead of 64bits in some cases |
| 4129 | // Intended use does not involve any growth for the array, so it could |
| 4130 | // be of fixed size. |
| 4131 | void PhaseIdealLoop::init_dom_lca_tags() { |
| 4132 | uint limit = C->unique() + 1; |
| 4133 | _dom_lca_tags.map( limit, NULL ); |
| 4134 | #ifdef ASSERT |
| 4135 | for( uint i = 0; i < limit; ++i ) { |
| 4136 | assert(_dom_lca_tags[i] == NULL, "Must be distinct from each node pointer" ); |
| 4137 | } |
| 4138 | #endif // ASSERT |
| 4139 | } |
| 4140 | |
| 4141 | //------------------------------clear_dom_lca_tags------------------------------ |
| 4142 | // Tag could be a node's integer index, 32bits instead of 64bits in some cases |
| 4143 | // Intended use does not involve any growth for the array, so it could |
| 4144 | // be of fixed size. |
| 4145 | void PhaseIdealLoop::clear_dom_lca_tags() { |
| 4146 | uint limit = C->unique() + 1; |
| 4147 | _dom_lca_tags.map( limit, NULL ); |
| 4148 | _dom_lca_tags.clear(); |
| 4149 | #ifdef ASSERT |
| 4150 | for( uint i = 0; i < limit; ++i ) { |
| 4151 | assert(_dom_lca_tags[i] == NULL, "Must be distinct from each node pointer" ); |
| 4152 | } |
| 4153 | #endif // ASSERT |
| 4154 | } |
| 4155 | |
| 4156 | //------------------------------build_loop_late-------------------------------- |
| 4157 | // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping. |
| 4158 | // Second pass finds latest legal placement, and ideal loop placement. |
| 4159 | void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) { |
| 4160 | while (worklist.size() != 0) { |
| 4161 | Node *n = worklist.pop(); |
| 4162 | // Only visit once |
| 4163 | if (visited.test_set(n->_idx)) continue; |
| 4164 | uint cnt = n->outcnt(); |
| 4165 | uint i = 0; |
| 4166 | while (true) { |
| 4167 | assert( _nodes[n->_idx], "no dead nodes" ); |
| 4168 | // Visit all children |
| 4169 | if (i < cnt) { |
| 4170 | Node* use = n->raw_out(i); |
| 4171 | ++i; |
| 4172 | // Check for dead uses. Aggressively prune such junk. It might be |
| 4173 | // dead in the global sense, but still have local uses so I cannot |
| 4174 | // easily call 'remove_dead_node'. |
| 4175 | if( _nodes[use->_idx] != NULL || use->is_top() ) { // Not dead? |
| 4176 | // Due to cycles, we might not hit the same fixed point in the verify |
| 4177 | // pass as we do in the regular pass. Instead, visit such phis as |
| 4178 | // simple uses of the loop head. |
| 4179 | if( use->in(0) && (use->is_CFG() || use->is_Phi()) ) { |
| 4180 | if( !visited.test(use->_idx) ) |
| 4181 | worklist.push(use); |
| 4182 | } else if( !visited.test_set(use->_idx) ) { |
| 4183 | nstack.push(n, i); // Save parent and next use's index. |
| 4184 | n = use; // Process all children of current use. |
| 4185 | cnt = use->outcnt(); |
| 4186 | i = 0; |
| 4187 | } |
| 4188 | } else { |
| 4189 | // Do not visit around the backedge of loops via data edges. |
| 4190 | // push dead code onto a worklist |
| 4191 | _deadlist.push(use); |
| 4192 | } |
| 4193 | } else { |
| 4194 | // All of n's children have been processed, complete post-processing. |
| 4195 | build_loop_late_post(n); |
| 4196 | if (nstack.is_empty()) { |
| 4197 | // Finished all nodes on stack. |
| 4198 | // Process next node on the worklist. |
| 4199 | break; |
| 4200 | } |
| 4201 | // Get saved parent node and next use's index. Visit the rest of uses. |
| 4202 | n = nstack.node(); |
| 4203 | cnt = n->outcnt(); |
| 4204 | i = nstack.index(); |
| 4205 | nstack.pop(); |
| 4206 | } |
| 4207 | } |
| 4208 | } |
| 4209 | } |
| 4210 | |
| 4211 | // Verify that no data node is scheduled in the outer loop of a strip |
| 4212 | // mined loop. |
| 4213 | void PhaseIdealLoop::verify_strip_mined_scheduling(Node *n, Node* least) { |
| 4214 | #ifdef ASSERT |
| 4215 | if (get_loop(least)->_nest == 0) { |
| 4216 | return; |
| 4217 | } |
| 4218 | IdealLoopTree* loop = get_loop(least); |
| 4219 | Node* head = loop->_head; |
| 4220 | if (head->is_OuterStripMinedLoop() && |
| 4221 | // Verification can't be applied to fully built strip mined loops |
| 4222 | head->as_Loop()->outer_loop_end()->in(1)->find_int_con(-1) == 0) { |
| 4223 | Node* sfpt = head->as_Loop()->outer_safepoint(); |
| 4224 | ResourceMark rm; |
| 4225 | Unique_Node_List wq; |
| 4226 | wq.push(sfpt); |
| 4227 | for (uint i = 0; i < wq.size(); i++) { |
| 4228 | Node *m = wq.at(i); |
| 4229 | for (uint i = 1; i < m->req(); i++) { |
| 4230 | Node* nn = m->in(i); |
| 4231 | if (nn == n) { |
| 4232 | return; |
| 4233 | } |
| 4234 | if (nn != NULL && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) { |
| 4235 | wq.push(nn); |
| 4236 | } |
| 4237 | } |
| 4238 | } |
| 4239 | ShouldNotReachHere(); |
| 4240 | } |
| 4241 | #endif |
| 4242 | } |
| 4243 | |
| 4244 | |
| 4245 | //------------------------------build_loop_late_post--------------------------- |
| 4246 | // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping. |
| 4247 | // Second pass finds latest legal placement, and ideal loop placement. |
| 4248 | void PhaseIdealLoop::build_loop_late_post(Node *n) { |
| 4249 | BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); |
| 4250 | |
| 4251 | if (bs->build_loop_late_post(this, n)) { |
| 4252 | return; |
| 4253 | } |
| 4254 | |
| 4255 | build_loop_late_post_work(n, true); |
| 4256 | } |
| 4257 | |
| 4258 | void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) { |
| 4259 | |
| 4260 | if (n->req() == 2 && (n->Opcode() == Op_ConvI2L || n->Opcode() == Op_CastII) && !C->major_progress() && !_verify_only) { |
| 4261 | _igvn._worklist.push(n); // Maybe we'll normalize it, if no more loops. |
| 4262 | } |
| 4263 | |
| 4264 | #ifdef ASSERT |
| 4265 | if (_verify_only && !n->is_CFG()) { |
| 4266 | // Check def-use domination. |
| 4267 | compute_lca_of_uses(n, get_ctrl(n), true /* verify */); |
| 4268 | } |
| 4269 | #endif |
| 4270 | |
| 4271 | // CFG and pinned nodes already handled |
| 4272 | if( n->in(0) ) { |
| 4273 | if( n->in(0)->is_top() ) return; // Dead? |
| 4274 | |
| 4275 | // We'd like +VerifyLoopOptimizations to not believe that Mod's/Loads |
| 4276 | // _must_ be pinned (they have to observe their control edge of course). |
| 4277 | // Unlike Stores (which modify an unallocable resource, the memory |
| 4278 | // state), Mods/Loads can float around. So free them up. |
| 4279 | switch( n->Opcode() ) { |
| 4280 | case Op_DivI: |
| 4281 | case Op_DivF: |
| 4282 | case Op_DivD: |
| 4283 | case Op_ModI: |
| 4284 | case Op_ModF: |
| 4285 | case Op_ModD: |
| 4286 | case Op_LoadB: // Same with Loads; they can sink |
| 4287 | case Op_LoadUB: // during loop optimizations. |
| 4288 | case Op_LoadUS: |
| 4289 | case Op_LoadD: |
| 4290 | case Op_LoadF: |
| 4291 | case Op_LoadI: |
| 4292 | case Op_LoadKlass: |
| 4293 | case Op_LoadNKlass: |
| 4294 | case Op_LoadL: |
| 4295 | case Op_LoadS: |
| 4296 | case Op_LoadP: |
| 4297 | case Op_LoadN: |
| 4298 | case Op_LoadRange: |
| 4299 | case Op_LoadD_unaligned: |
| 4300 | case Op_LoadL_unaligned: |
| 4301 | case Op_StrComp: // Does a bunch of load-like effects |
| 4302 | case Op_StrEquals: |
| 4303 | case Op_StrIndexOf: |
| 4304 | case Op_StrIndexOfChar: |
| 4305 | case Op_AryEq: |
| 4306 | case Op_HasNegatives: |
| 4307 | pinned = false; |
| 4308 | } |
| 4309 | if( pinned ) { |
| 4310 | IdealLoopTree *chosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n)); |
| 4311 | if( !chosen_loop->_child ) // Inner loop? |
| 4312 | chosen_loop->_body.push(n); // Collect inner loops |
| 4313 | return; |
| 4314 | } |
| 4315 | } else { // No slot zero |
| 4316 | if( n->is_CFG() ) { // CFG with no slot 0 is dead |
| 4317 | _nodes.map(n->_idx,0); // No block setting, it's globally dead |
| 4318 | return; |
| 4319 | } |
| 4320 | assert(!n->is_CFG() || n->outcnt() == 0, "" ); |
| 4321 | } |
| 4322 | |
| 4323 | // Do I have a "safe range" I can select over? |
| 4324 | Node *early = get_ctrl(n);// Early location already computed |
| 4325 | |
| 4326 | // Compute latest point this Node can go |
| 4327 | Node *LCA = get_late_ctrl( n, early ); |
| 4328 | // LCA is NULL due to uses being dead |
| 4329 | if( LCA == NULL ) { |
| 4330 | #ifdef ASSERT |
| 4331 | for (DUIterator i1 = n->outs(); n->has_out(i1); i1++) { |
| 4332 | assert( _nodes[n->out(i1)->_idx] == NULL, "all uses must also be dead" ); |
| 4333 | } |
| 4334 | #endif |
| 4335 | _nodes.map(n->_idx, 0); // This node is useless |
| 4336 | _deadlist.push(n); |
| 4337 | return; |
| 4338 | } |
| 4339 | assert(LCA != NULL && !LCA->is_top(), "no dead nodes" ); |
| 4340 | |
| 4341 | Node *legal = LCA; // Walk 'legal' up the IDOM chain |
| 4342 | Node *least = legal; // Best legal position so far |
| 4343 | while( early != legal ) { // While not at earliest legal |
| 4344 | #ifdef ASSERT |
| 4345 | if (legal->is_Start() && !early->is_Root()) { |
| 4346 | // Bad graph. Print idom path and fail. |
| 4347 | dump_bad_graph("Bad graph detected in build_loop_late" , n, early, LCA); |
| 4348 | assert(false, "Bad graph detected in build_loop_late" ); |
| 4349 | } |
| 4350 | #endif |
| 4351 | // Find least loop nesting depth |
| 4352 | legal = idom(legal); // Bump up the IDOM tree |
| 4353 | // Check for lower nesting depth |
| 4354 | if( get_loop(legal)->_nest < get_loop(least)->_nest ) |
| 4355 | least = legal; |
| 4356 | } |
| 4357 | assert(early == legal || legal != C->root(), "bad dominance of inputs" ); |
| 4358 | |
| 4359 | // Try not to place code on a loop entry projection |
| 4360 | // which can inhibit range check elimination. |
| 4361 | if (least != early) { |
| 4362 | Node* ctrl_out = least->unique_ctrl_out(); |
| 4363 | if (ctrl_out && ctrl_out->is_Loop() && |
| 4364 | least == ctrl_out->in(LoopNode::EntryControl)) { |
| 4365 | // Move the node above predicates as far up as possible so a |
| 4366 | // following pass of loop predication doesn't hoist a predicate |
| 4367 | // that depends on it above that node. |
| 4368 | Node* new_ctrl = least; |
| 4369 | for (;;) { |
| 4370 | if (!new_ctrl->is_Proj()) { |
| 4371 | break; |
| 4372 | } |
| 4373 | CallStaticJavaNode* call = new_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); |
| 4374 | if (call == NULL) { |
| 4375 | break; |
| 4376 | } |
| 4377 | int req = call->uncommon_trap_request(); |
| 4378 | Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); |
| 4379 | if (trap_reason != Deoptimization::Reason_loop_limit_check && |
| 4380 | trap_reason != Deoptimization::Reason_predicate && |
| 4381 | trap_reason != Deoptimization::Reason_profile_predicate) { |
| 4382 | break; |
| 4383 | } |
| 4384 | Node* c = new_ctrl->in(0)->in(0); |
| 4385 | if (is_dominator(c, early) && c != early) { |
| 4386 | break; |
| 4387 | } |
| 4388 | new_ctrl = c; |
| 4389 | } |
| 4390 | least = new_ctrl; |
| 4391 | } |
| 4392 | } |
| 4393 | |
| 4394 | #ifdef ASSERT |
| 4395 | // If verifying, verify that 'verify_me' has a legal location |
| 4396 | // and choose it as our location. |
| 4397 | if( _verify_me ) { |
| 4398 | Node *v_ctrl = _verify_me->get_ctrl_no_update(n); |
| 4399 | Node *legal = LCA; |
| 4400 | while( early != legal ) { // While not at earliest legal |
| 4401 | if( legal == v_ctrl ) break; // Check for prior good location |
| 4402 | legal = idom(legal) ;// Bump up the IDOM tree |
| 4403 | } |
| 4404 | // Check for prior good location |
| 4405 | if( legal == v_ctrl ) least = legal; // Keep prior if found |
| 4406 | } |
| 4407 | #endif |
| 4408 | |
| 4409 | // Assign discovered "here or above" point |
| 4410 | least = find_non_split_ctrl(least); |
| 4411 | verify_strip_mined_scheduling(n, least); |
| 4412 | set_ctrl(n, least); |
| 4413 | |
| 4414 | // Collect inner loop bodies |
| 4415 | IdealLoopTree *chosen_loop = get_loop(least); |
| 4416 | if( !chosen_loop->_child ) // Inner loop? |
| 4417 | chosen_loop->_body.push(n);// Collect inner loops |
| 4418 | } |
| 4419 | |
| 4420 | #ifdef ASSERT |
| 4421 | void PhaseIdealLoop::dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA) { |
| 4422 | tty->print_cr("%s" , msg); |
| 4423 | tty->print("n: " ); n->dump(); |
| 4424 | tty->print("early(n): " ); early->dump(); |
| 4425 | if (n->in(0) != NULL && !n->in(0)->is_top() && |
| 4426 | n->in(0) != early && !n->in(0)->is_Root()) { |
| 4427 | tty->print("n->in(0): " ); n->in(0)->dump(); |
| 4428 | } |
| 4429 | for (uint i = 1; i < n->req(); i++) { |
| 4430 | Node* in1 = n->in(i); |
| 4431 | if (in1 != NULL && in1 != n && !in1->is_top()) { |
| 4432 | tty->print("n->in(%d): " , i); in1->dump(); |
| 4433 | Node* in1_early = get_ctrl(in1); |
| 4434 | tty->print("early(n->in(%d)): " , i); in1_early->dump(); |
| 4435 | if (in1->in(0) != NULL && !in1->in(0)->is_top() && |
| 4436 | in1->in(0) != in1_early && !in1->in(0)->is_Root()) { |
| 4437 | tty->print("n->in(%d)->in(0): " , i); in1->in(0)->dump(); |
| 4438 | } |
| 4439 | for (uint j = 1; j < in1->req(); j++) { |
| 4440 | Node* in2 = in1->in(j); |
| 4441 | if (in2 != NULL && in2 != n && in2 != in1 && !in2->is_top()) { |
| 4442 | tty->print("n->in(%d)->in(%d): " , i, j); in2->dump(); |
| 4443 | Node* in2_early = get_ctrl(in2); |
| 4444 | tty->print("early(n->in(%d)->in(%d)): " , i, j); in2_early->dump(); |
| 4445 | if (in2->in(0) != NULL && !in2->in(0)->is_top() && |
| 4446 | in2->in(0) != in2_early && !in2->in(0)->is_Root()) { |
| 4447 | tty->print("n->in(%d)->in(%d)->in(0): " , i, j); in2->in(0)->dump(); |
| 4448 | } |
| 4449 | } |
| 4450 | } |
| 4451 | } |
| 4452 | } |
| 4453 | tty->cr(); |
| 4454 | tty->print("LCA(n): " ); LCA->dump(); |
| 4455 | for (uint i = 0; i < n->outcnt(); i++) { |
| 4456 | Node* u1 = n->raw_out(i); |
| 4457 | if (u1 == n) |
| 4458 | continue; |
| 4459 | tty->print("n->out(%d): " , i); u1->dump(); |
| 4460 | if (u1->is_CFG()) { |
| 4461 | for (uint j = 0; j < u1->outcnt(); j++) { |
| 4462 | Node* u2 = u1->raw_out(j); |
| 4463 | if (u2 != u1 && u2 != n && u2->is_CFG()) { |
| 4464 | tty->print("n->out(%d)->out(%d): " , i, j); u2->dump(); |
| 4465 | } |
| 4466 | } |
| 4467 | } else { |
| 4468 | Node* u1_later = get_ctrl(u1); |
| 4469 | tty->print("later(n->out(%d)): " , i); u1_later->dump(); |
| 4470 | if (u1->in(0) != NULL && !u1->in(0)->is_top() && |
| 4471 | u1->in(0) != u1_later && !u1->in(0)->is_Root()) { |
| 4472 | tty->print("n->out(%d)->in(0): " , i); u1->in(0)->dump(); |
| 4473 | } |
| 4474 | for (uint j = 0; j < u1->outcnt(); j++) { |
| 4475 | Node* u2 = u1->raw_out(j); |
| 4476 | if (u2 == n || u2 == u1) |
| 4477 | continue; |
| 4478 | tty->print("n->out(%d)->out(%d): " , i, j); u2->dump(); |
| 4479 | if (!u2->is_CFG()) { |
| 4480 | Node* u2_later = get_ctrl(u2); |
| 4481 | tty->print("later(n->out(%d)->out(%d)): " , i, j); u2_later->dump(); |
| 4482 | if (u2->in(0) != NULL && !u2->in(0)->is_top() && |
| 4483 | u2->in(0) != u2_later && !u2->in(0)->is_Root()) { |
| 4484 | tty->print("n->out(%d)->in(0): " , i); u2->in(0)->dump(); |
| 4485 | } |
| 4486 | } |
| 4487 | } |
| 4488 | } |
| 4489 | } |
| 4490 | tty->cr(); |
| 4491 | int ct = 0; |
| 4492 | Node *dbg_legal = LCA; |
| 4493 | while(!dbg_legal->is_Start() && ct < 100) { |
| 4494 | tty->print("idom[%d] " ,ct); dbg_legal->dump(); |
| 4495 | ct++; |
| 4496 | dbg_legal = idom(dbg_legal); |
| 4497 | } |
| 4498 | tty->cr(); |
| 4499 | } |
| 4500 | #endif |
| 4501 | |
| 4502 | #ifndef PRODUCT |
| 4503 | //------------------------------dump------------------------------------------- |
| 4504 | void PhaseIdealLoop::dump( ) const { |
| 4505 | ResourceMark rm; |
| 4506 | Arena* arena = Thread::current()->resource_area(); |
| 4507 | Node_Stack stack(arena, C->live_nodes() >> 2); |
| 4508 | Node_List rpo_list; |
| 4509 | VectorSet visited(arena); |
| 4510 | visited.set(C->top()->_idx); |
| 4511 | rpo( C->root(), stack, visited, rpo_list ); |
| 4512 | // Dump root loop indexed by last element in PO order |
| 4513 | dump( _ltree_root, rpo_list.size(), rpo_list ); |
| 4514 | } |
| 4515 | |
| 4516 | void PhaseIdealLoop::dump( IdealLoopTree *loop, uint idx, Node_List &rpo_list ) const { |
| 4517 | loop->dump_head(); |
| 4518 | |
| 4519 | // Now scan for CFG nodes in the same loop |
| 4520 | for( uint j=idx; j > 0; j-- ) { |
| 4521 | Node *n = rpo_list[j-1]; |
| 4522 | if( !_nodes[n->_idx] ) // Skip dead nodes |
| 4523 | continue; |
| 4524 | if( get_loop(n) != loop ) { // Wrong loop nest |
| 4525 | if( get_loop(n)->_head == n && // Found nested loop? |
| 4526 | get_loop(n)->_parent == loop ) |
| 4527 | dump(get_loop(n),rpo_list.size(),rpo_list); // Print it nested-ly |
| 4528 | continue; |
| 4529 | } |
| 4530 | |
| 4531 | // Dump controlling node |
| 4532 | for( uint x = 0; x < loop->_nest; x++ ) |
| 4533 | tty->print(" " ); |
| 4534 | tty->print("C" ); |
| 4535 | if( n == C->root() ) { |
| 4536 | n->dump(); |
| 4537 | } else { |
| 4538 | Node* cached_idom = idom_no_update(n); |
| 4539 | Node *computed_idom = n->in(0); |
| 4540 | if( n->is_Region() ) { |
| 4541 | computed_idom = compute_idom(n); |
| 4542 | // computed_idom() will return n->in(0) when idom(n) is an IfNode (or |
| 4543 | // any MultiBranch ctrl node), so apply a similar transform to |
| 4544 | // the cached idom returned from idom_no_update. |
| 4545 | cached_idom = find_non_split_ctrl(cached_idom); |
| 4546 | } |
| 4547 | tty->print(" ID:%d" ,computed_idom->_idx); |
| 4548 | n->dump(); |
| 4549 | if( cached_idom != computed_idom ) { |
| 4550 | tty->print_cr("*** BROKEN IDOM! Computed as: %d, cached as: %d" , |
| 4551 | computed_idom->_idx, cached_idom->_idx); |
| 4552 | } |
| 4553 | } |
| 4554 | // Dump nodes it controls |
| 4555 | for( uint k = 0; k < _nodes.Size(); k++ ) { |
| 4556 | // (k < C->unique() && get_ctrl(find(k)) == n) |
| 4557 | if (k < C->unique() && _nodes[k] == (Node*)((intptr_t)n + 1)) { |
| 4558 | Node *m = C->root()->find(k); |
| 4559 | if( m && m->outcnt() > 0 ) { |
| 4560 | if (!(has_ctrl(m) && get_ctrl_no_update(m) == n)) { |
| 4561 | tty->print_cr("*** BROKEN CTRL ACCESSOR! _nodes[k] is %p, ctrl is %p" , |
| 4562 | _nodes[k], has_ctrl(m) ? get_ctrl_no_update(m) : NULL); |
| 4563 | } |
| 4564 | for( uint j = 0; j < loop->_nest; j++ ) |
| 4565 | tty->print(" " ); |
| 4566 | tty->print(" " ); |
| 4567 | m->dump(); |
| 4568 | } |
| 4569 | } |
| 4570 | } |
| 4571 | } |
| 4572 | } |
| 4573 | #endif |
| 4574 | |
| 4575 | // Collect a R-P-O for the whole CFG. |
| 4576 | // Result list is in post-order (scan backwards for RPO) |
| 4577 | void PhaseIdealLoop::rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const { |
| 4578 | stk.push(start, 0); |
| 4579 | visited.set(start->_idx); |
| 4580 | |
| 4581 | while (stk.is_nonempty()) { |
| 4582 | Node* m = stk.node(); |
| 4583 | uint idx = stk.index(); |
| 4584 | if (idx < m->outcnt()) { |
| 4585 | stk.set_index(idx + 1); |
| 4586 | Node* n = m->raw_out(idx); |
| 4587 | if (n->is_CFG() && !visited.test_set(n->_idx)) { |
| 4588 | stk.push(n, 0); |
| 4589 | } |
| 4590 | } else { |
| 4591 | rpo_list.push(m); |
| 4592 | stk.pop(); |
| 4593 | } |
| 4594 | } |
| 4595 | } |
| 4596 | |
| 4597 | |
| 4598 | //============================================================================= |
| 4599 | //------------------------------LoopTreeIterator----------------------------------- |
| 4600 | |
| 4601 | // Advance to next loop tree using a preorder, left-to-right traversal. |
| 4602 | void LoopTreeIterator::next() { |
| 4603 | assert(!done(), "must not be done." ); |
| 4604 | if (_curnt->_child != NULL) { |
| 4605 | _curnt = _curnt->_child; |
| 4606 | } else if (_curnt->_next != NULL) { |
| 4607 | _curnt = _curnt->_next; |
| 4608 | } else { |
| 4609 | while (_curnt != _root && _curnt->_next == NULL) { |
| 4610 | _curnt = _curnt->_parent; |
| 4611 | } |
| 4612 | if (_curnt == _root) { |
| 4613 | _curnt = NULL; |
| 4614 | assert(done(), "must be done." ); |
| 4615 | } else { |
| 4616 | assert(_curnt->_next != NULL, "must be more to do" ); |
| 4617 | _curnt = _curnt->_next; |
| 4618 | } |
| 4619 | } |
| 4620 | } |
| 4621 | |