| 1 | /* |
| 2 | * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_OPTO_LOOPNODE_HPP |
| 26 | #define SHARE_OPTO_LOOPNODE_HPP |
| 27 | |
| 28 | #include "opto/cfgnode.hpp" |
| 29 | #include "opto/multnode.hpp" |
| 30 | #include "opto/phaseX.hpp" |
| 31 | #include "opto/subnode.hpp" |
| 32 | #include "opto/type.hpp" |
| 33 | |
| 34 | class CmpNode; |
| 35 | class CountedLoopEndNode; |
| 36 | class CountedLoopNode; |
| 37 | class IdealLoopTree; |
| 38 | class LoopNode; |
| 39 | class Node; |
| 40 | class OuterStripMinedLoopEndNode; |
| 41 | class PathFrequency; |
| 42 | class PhaseIdealLoop; |
| 43 | class CountedLoopReserveKit; |
| 44 | class VectorSet; |
| 45 | class Invariance; |
| 46 | struct small_cache; |
| 47 | |
| 48 | // |
| 49 | // I D E A L I Z E D L O O P S |
| 50 | // |
| 51 | // Idealized loops are the set of loops I perform more interesting |
| 52 | // transformations on, beyond simple hoisting. |
| 53 | |
| 54 | //------------------------------LoopNode--------------------------------------- |
| 55 | // Simple loop header. Fall in path on left, loop-back path on right. |
| 56 | class LoopNode : public RegionNode { |
| 57 | // Size is bigger to hold the flags. However, the flags do not change |
| 58 | // the semantics so it does not appear in the hash & cmp functions. |
| 59 | virtual uint size_of() const { return sizeof(*this); } |
| 60 | protected: |
| 61 | uint _loop_flags; |
| 62 | // Names for flag bitfields |
| 63 | enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3, |
| 64 | MainHasNoPreLoop=4, |
| 65 | HasExactTripCount=8, |
| 66 | InnerLoop=16, |
| 67 | PartialPeelLoop=32, |
| 68 | PartialPeelFailed=64, |
| 69 | HasReductions=128, |
| 70 | WasSlpAnalyzed=256, |
| 71 | PassedSlpAnalysis=512, |
| 72 | DoUnrollOnly=1024, |
| 73 | VectorizedLoop=2048, |
| 74 | HasAtomicPostLoop=4096, |
| 75 | HasRangeChecks=8192, |
| 76 | IsMultiversioned=16384, |
| 77 | StripMined=32768, |
| 78 | SubwordLoop=65536, |
| 79 | ProfileTripFailed=131072}; |
| 80 | char _unswitch_count; |
| 81 | enum { _unswitch_max=3 }; |
| 82 | char _postloop_flags; |
| 83 | enum { LoopNotRCEChecked = 0, LoopRCEChecked = 1, RCEPostLoop = 2 }; |
| 84 | |
| 85 | // Expected trip count from profile data |
| 86 | float _profile_trip_cnt; |
| 87 | |
| 88 | public: |
| 89 | // Names for edge indices |
| 90 | enum { Self=0, EntryControl, LoopBackControl }; |
| 91 | |
| 92 | bool is_inner_loop() const { return _loop_flags & InnerLoop; } |
| 93 | void set_inner_loop() { _loop_flags |= InnerLoop; } |
| 94 | |
| 95 | bool range_checks_present() const { return _loop_flags & HasRangeChecks; } |
| 96 | bool is_multiversioned() const { return _loop_flags & IsMultiversioned; } |
| 97 | bool is_vectorized_loop() const { return _loop_flags & VectorizedLoop; } |
| 98 | bool is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; } |
| 99 | void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; } |
| 100 | bool partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; } |
| 101 | bool is_strip_mined() const { return _loop_flags & StripMined; } |
| 102 | bool is_profile_trip_failed() const { return _loop_flags & ProfileTripFailed; } |
| 103 | bool is_subword_loop() const { return _loop_flags & SubwordLoop; } |
| 104 | |
| 105 | void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; } |
| 106 | void mark_has_reductions() { _loop_flags |= HasReductions; } |
| 107 | void mark_was_slp() { _loop_flags |= WasSlpAnalyzed; } |
| 108 | void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; } |
| 109 | void mark_do_unroll_only() { _loop_flags |= DoUnrollOnly; } |
| 110 | void mark_loop_vectorized() { _loop_flags |= VectorizedLoop; } |
| 111 | void mark_has_atomic_post_loop() { _loop_flags |= HasAtomicPostLoop; } |
| 112 | void mark_has_range_checks() { _loop_flags |= HasRangeChecks; } |
| 113 | void mark_is_multiversioned() { _loop_flags |= IsMultiversioned; } |
| 114 | void mark_strip_mined() { _loop_flags |= StripMined; } |
| 115 | void clear_strip_mined() { _loop_flags &= ~StripMined; } |
| 116 | void mark_profile_trip_failed() { _loop_flags |= ProfileTripFailed; } |
| 117 | void mark_subword_loop() { _loop_flags |= SubwordLoop; } |
| 118 | |
| 119 | int unswitch_max() { return _unswitch_max; } |
| 120 | int unswitch_count() { return _unswitch_count; } |
| 121 | |
| 122 | int has_been_range_checked() const { return _postloop_flags & LoopRCEChecked; } |
| 123 | void set_has_been_range_checked() { _postloop_flags |= LoopRCEChecked; } |
| 124 | int is_rce_post_loop() const { return _postloop_flags & RCEPostLoop; } |
| 125 | void set_is_rce_post_loop() { _postloop_flags |= RCEPostLoop; } |
| 126 | |
| 127 | void set_unswitch_count(int val) { |
| 128 | assert (val <= unswitch_max(), "too many unswitches" ); |
| 129 | _unswitch_count = val; |
| 130 | } |
| 131 | |
| 132 | void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; } |
| 133 | float profile_trip_cnt() { return _profile_trip_cnt; } |
| 134 | |
| 135 | LoopNode(Node *entry, Node *backedge) |
| 136 | : RegionNode(3), _loop_flags(0), _unswitch_count(0), |
| 137 | _postloop_flags(0), _profile_trip_cnt(COUNT_UNKNOWN) { |
| 138 | init_class_id(Class_Loop); |
| 139 | init_req(EntryControl, entry); |
| 140 | init_req(LoopBackControl, backedge); |
| 141 | } |
| 142 | |
| 143 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
| 144 | virtual int Opcode() const; |
| 145 | bool can_be_counted_loop(PhaseTransform* phase) const { |
| 146 | return req() == 3 && in(0) != NULL && |
| 147 | in(1) != NULL && phase->type(in(1)) != Type::TOP && |
| 148 | in(2) != NULL && phase->type(in(2)) != Type::TOP; |
| 149 | } |
| 150 | bool is_valid_counted_loop() const; |
| 151 | #ifndef PRODUCT |
| 152 | virtual void dump_spec(outputStream *st) const; |
| 153 | #endif |
| 154 | |
| 155 | void verify_strip_mined(int expect_skeleton) const; |
| 156 | virtual LoopNode* skip_strip_mined(int expect_skeleton = 1) { return this; } |
| 157 | virtual IfTrueNode* outer_loop_tail() const { ShouldNotReachHere(); return NULL; } |
| 158 | virtual OuterStripMinedLoopEndNode* outer_loop_end() const { ShouldNotReachHere(); return NULL; } |
| 159 | virtual IfFalseNode* outer_loop_exit() const { ShouldNotReachHere(); return NULL; } |
| 160 | virtual SafePointNode* outer_safepoint() const { ShouldNotReachHere(); return NULL; } |
| 161 | }; |
| 162 | |
| 163 | //------------------------------Counted Loops---------------------------------- |
| 164 | // Counted loops are all trip-counted loops, with exactly 1 trip-counter exit |
| 165 | // path (and maybe some other exit paths). The trip-counter exit is always |
| 166 | // last in the loop. The trip-counter have to stride by a constant; |
| 167 | // the exit value is also loop invariant. |
| 168 | |
| 169 | // CountedLoopNodes and CountedLoopEndNodes come in matched pairs. The |
| 170 | // CountedLoopNode has the incoming loop control and the loop-back-control |
| 171 | // which is always the IfTrue before the matching CountedLoopEndNode. The |
| 172 | // CountedLoopEndNode has an incoming control (possibly not the |
| 173 | // CountedLoopNode if there is control flow in the loop), the post-increment |
| 174 | // trip-counter value, and the limit. The trip-counter value is always of |
| 175 | // the form (Op old-trip-counter stride). The old-trip-counter is produced |
| 176 | // by a Phi connected to the CountedLoopNode. The stride is constant. |
| 177 | // The Op is any commutable opcode, including Add, Mul, Xor. The |
| 178 | // CountedLoopEndNode also takes in the loop-invariant limit value. |
| 179 | |
| 180 | // From a CountedLoopNode I can reach the matching CountedLoopEndNode via the |
| 181 | // loop-back control. From CountedLoopEndNodes I can reach CountedLoopNodes |
| 182 | // via the old-trip-counter from the Op node. |
| 183 | |
| 184 | //------------------------------CountedLoopNode-------------------------------- |
| 185 | // CountedLoopNodes head simple counted loops. CountedLoopNodes have as |
| 186 | // inputs the incoming loop-start control and the loop-back control, so they |
| 187 | // act like RegionNodes. They also take in the initial trip counter, the |
| 188 | // loop-invariant stride and the loop-invariant limit value. CountedLoopNodes |
| 189 | // produce a loop-body control and the trip counter value. Since |
| 190 | // CountedLoopNodes behave like RegionNodes I still have a standard CFG model. |
| 191 | |
| 192 | class CountedLoopNode : public LoopNode { |
| 193 | // Size is bigger to hold _main_idx. However, _main_idx does not change |
| 194 | // the semantics so it does not appear in the hash & cmp functions. |
| 195 | virtual uint size_of() const { return sizeof(*this); } |
| 196 | |
| 197 | // For Pre- and Post-loops during debugging ONLY, this holds the index of |
| 198 | // the Main CountedLoop. Used to assert that we understand the graph shape. |
| 199 | node_idx_t _main_idx; |
| 200 | |
| 201 | // Known trip count calculated by compute_exact_trip_count() |
| 202 | uint _trip_count; |
| 203 | |
| 204 | // Log2 of original loop bodies in unrolled loop |
| 205 | int _unrolled_count_log2; |
| 206 | |
| 207 | // Node count prior to last unrolling - used to decide if |
| 208 | // unroll,optimize,unroll,optimize,... is making progress |
| 209 | int _node_count_before_unroll; |
| 210 | |
| 211 | // If slp analysis is performed we record the maximum |
| 212 | // vector mapped unroll factor here |
| 213 | int _slp_maximum_unroll_factor; |
| 214 | |
| 215 | public: |
| 216 | CountedLoopNode( Node *entry, Node *backedge ) |
| 217 | : LoopNode(entry, backedge), _main_idx(0), _trip_count(max_juint), |
| 218 | _unrolled_count_log2(0), _node_count_before_unroll(0), |
| 219 | _slp_maximum_unroll_factor(0) { |
| 220 | init_class_id(Class_CountedLoop); |
| 221 | // Initialize _trip_count to the largest possible value. |
| 222 | // Will be reset (lower) if the loop's trip count is known. |
| 223 | } |
| 224 | |
| 225 | virtual int Opcode() const; |
| 226 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
| 227 | |
| 228 | Node *init_control() const { return in(EntryControl); } |
| 229 | Node *back_control() const { return in(LoopBackControl); } |
| 230 | CountedLoopEndNode *loopexit_or_null() const; |
| 231 | CountedLoopEndNode *loopexit() const; |
| 232 | Node *init_trip() const; |
| 233 | Node *stride() const; |
| 234 | int stride_con() const; |
| 235 | bool stride_is_con() const; |
| 236 | Node *limit() const; |
| 237 | Node *incr() const; |
| 238 | Node *phi() const; |
| 239 | |
| 240 | // Match increment with optional truncation |
| 241 | static Node* match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type); |
| 242 | |
| 243 | // A 'main' loop has a pre-loop and a post-loop. The 'main' loop |
| 244 | // can run short a few iterations and may start a few iterations in. |
| 245 | // It will be RCE'd and unrolled and aligned. |
| 246 | |
| 247 | // A following 'post' loop will run any remaining iterations. Used |
| 248 | // during Range Check Elimination, the 'post' loop will do any final |
| 249 | // iterations with full checks. Also used by Loop Unrolling, where |
| 250 | // the 'post' loop will do any epilog iterations needed. Basically, |
| 251 | // a 'post' loop can not profitably be further unrolled or RCE'd. |
| 252 | |
| 253 | // A preceding 'pre' loop will run at least 1 iteration (to do peeling), |
| 254 | // it may do under-flow checks for RCE and may do alignment iterations |
| 255 | // so the following main loop 'knows' that it is striding down cache |
| 256 | // lines. |
| 257 | |
| 258 | // A 'main' loop that is ONLY unrolled or peeled, never RCE'd or |
| 259 | // Aligned, may be missing it's pre-loop. |
| 260 | bool is_normal_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Normal; } |
| 261 | bool is_pre_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Pre; } |
| 262 | bool is_main_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Main; } |
| 263 | bool is_post_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Post; } |
| 264 | bool is_reduction_loop() const { return (_loop_flags&HasReductions) == HasReductions; } |
| 265 | bool was_slp_analyzed () const { return (_loop_flags&WasSlpAnalyzed) == WasSlpAnalyzed; } |
| 266 | bool has_passed_slp () const { return (_loop_flags&PassedSlpAnalysis) == PassedSlpAnalysis; } |
| 267 | bool is_unroll_only () const { return (_loop_flags&DoUnrollOnly) == DoUnrollOnly; } |
| 268 | bool is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; } |
| 269 | bool has_atomic_post_loop () const { return (_loop_flags & HasAtomicPostLoop) == HasAtomicPostLoop; } |
| 270 | void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; } |
| 271 | |
| 272 | int main_idx() const { return _main_idx; } |
| 273 | |
| 274 | |
| 275 | void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),"" ); _loop_flags |= Pre ; _main_idx = main->_idx; } |
| 276 | void set_main_loop ( ) { assert(is_normal_loop(),"" ); _loop_flags |= Main; } |
| 277 | void set_post_loop (CountedLoopNode *main) { assert(is_normal_loop(),"" ); _loop_flags |= Post; _main_idx = main->_idx; } |
| 278 | void set_normal_loop( ) { _loop_flags &= ~PreMainPostFlagsMask; } |
| 279 | |
| 280 | void set_trip_count(uint tc) { _trip_count = tc; } |
| 281 | uint trip_count() { return _trip_count; } |
| 282 | |
| 283 | bool has_exact_trip_count() const { return (_loop_flags & HasExactTripCount) != 0; } |
| 284 | void set_exact_trip_count(uint tc) { |
| 285 | _trip_count = tc; |
| 286 | _loop_flags |= HasExactTripCount; |
| 287 | } |
| 288 | void set_nonexact_trip_count() { |
| 289 | _loop_flags &= ~HasExactTripCount; |
| 290 | } |
| 291 | void set_notpassed_slp() { |
| 292 | _loop_flags &= ~PassedSlpAnalysis; |
| 293 | } |
| 294 | |
| 295 | void double_unrolled_count() { _unrolled_count_log2++; } |
| 296 | int unrolled_count() { return 1 << MIN2(_unrolled_count_log2, BitsPerInt-3); } |
| 297 | |
| 298 | void set_node_count_before_unroll(int ct) { _node_count_before_unroll = ct; } |
| 299 | int node_count_before_unroll() { return _node_count_before_unroll; } |
| 300 | void set_slp_max_unroll(int unroll_factor) { _slp_maximum_unroll_factor = unroll_factor; } |
| 301 | int slp_max_unroll() const { return _slp_maximum_unroll_factor; } |
| 302 | |
| 303 | virtual LoopNode* skip_strip_mined(int expect_skeleton = 1); |
| 304 | OuterStripMinedLoopNode* outer_loop() const; |
| 305 | virtual IfTrueNode* outer_loop_tail() const; |
| 306 | virtual OuterStripMinedLoopEndNode* outer_loop_end() const; |
| 307 | virtual IfFalseNode* outer_loop_exit() const; |
| 308 | virtual SafePointNode* outer_safepoint() const; |
| 309 | |
| 310 | // If this is a main loop in a pre/main/post loop nest, walk over |
| 311 | // the predicates that were inserted by |
| 312 | // duplicate_predicates()/add_range_check_predicate() |
| 313 | static Node* skip_predicates_from_entry(Node* ctrl); |
| 314 | Node* skip_predicates(); |
| 315 | |
| 316 | #ifndef PRODUCT |
| 317 | virtual void dump_spec(outputStream *st) const; |
| 318 | #endif |
| 319 | }; |
| 320 | |
| 321 | //------------------------------CountedLoopEndNode----------------------------- |
| 322 | // CountedLoopEndNodes end simple trip counted loops. They act much like |
| 323 | // IfNodes. |
| 324 | class CountedLoopEndNode : public IfNode { |
| 325 | public: |
| 326 | enum { TestControl, TestValue }; |
| 327 | |
| 328 | CountedLoopEndNode( Node *control, Node *test, float prob, float cnt ) |
| 329 | : IfNode( control, test, prob, cnt) { |
| 330 | init_class_id(Class_CountedLoopEnd); |
| 331 | } |
| 332 | virtual int Opcode() const; |
| 333 | |
| 334 | Node *cmp_node() const { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : NULL; } |
| 335 | Node *incr() const { Node *tmp = cmp_node(); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; } |
| 336 | Node *limit() const { Node *tmp = cmp_node(); return (tmp && tmp->req()==3) ? tmp->in(2) : NULL; } |
| 337 | Node *stride() const { Node *tmp = incr (); return (tmp && tmp->req()==3) ? tmp->in(2) : NULL; } |
| 338 | Node *init_trip() const { Node *tmp = phi (); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; } |
| 339 | int stride_con() const; |
| 340 | bool stride_is_con() const { Node *tmp = stride (); return (tmp != NULL && tmp->is_Con()); } |
| 341 | BoolTest::mask test_trip() const { return in(TestValue)->as_Bool()->_test._test; } |
| 342 | PhiNode *phi() const { |
| 343 | Node *tmp = incr(); |
| 344 | if (tmp && tmp->req() == 3) { |
| 345 | Node* phi = tmp->in(1); |
| 346 | if (phi->is_Phi()) { |
| 347 | return phi->as_Phi(); |
| 348 | } |
| 349 | } |
| 350 | return NULL; |
| 351 | } |
| 352 | CountedLoopNode *loopnode() const { |
| 353 | // The CountedLoopNode that goes with this CountedLoopEndNode may |
| 354 | // have been optimized out by the IGVN so be cautious with the |
| 355 | // pattern matching on the graph |
| 356 | PhiNode* iv_phi = phi(); |
| 357 | if (iv_phi == NULL) { |
| 358 | return NULL; |
| 359 | } |
| 360 | Node *ln = iv_phi->in(0); |
| 361 | if (ln->is_CountedLoop() && ln->as_CountedLoop()->loopexit_or_null() == this) { |
| 362 | return (CountedLoopNode*)ln; |
| 363 | } |
| 364 | return NULL; |
| 365 | } |
| 366 | |
| 367 | #ifndef PRODUCT |
| 368 | virtual void dump_spec(outputStream *st) const; |
| 369 | #endif |
| 370 | }; |
| 371 | |
| 372 | |
| 373 | inline CountedLoopEndNode* CountedLoopNode::loopexit_or_null() const { |
| 374 | Node* bctrl = back_control(); |
| 375 | if (bctrl == NULL) return NULL; |
| 376 | |
| 377 | Node* lexit = bctrl->in(0); |
| 378 | return (CountedLoopEndNode*) |
| 379 | (lexit->Opcode() == Op_CountedLoopEnd ? lexit : NULL); |
| 380 | } |
| 381 | |
| 382 | inline CountedLoopEndNode* CountedLoopNode::loopexit() const { |
| 383 | CountedLoopEndNode* cle = loopexit_or_null(); |
| 384 | assert(cle != NULL, "loopexit is NULL" ); |
| 385 | return cle; |
| 386 | } |
| 387 | |
| 388 | inline Node* CountedLoopNode::init_trip() const { |
| 389 | CountedLoopEndNode* cle = loopexit_or_null(); |
| 390 | return cle != NULL ? cle->init_trip() : NULL; |
| 391 | } |
| 392 | inline Node* CountedLoopNode::stride() const { |
| 393 | CountedLoopEndNode* cle = loopexit_or_null(); |
| 394 | return cle != NULL ? cle->stride() : NULL; |
| 395 | } |
| 396 | inline int CountedLoopNode::stride_con() const { |
| 397 | CountedLoopEndNode* cle = loopexit_or_null(); |
| 398 | return cle != NULL ? cle->stride_con() : 0; |
| 399 | } |
| 400 | inline bool CountedLoopNode::stride_is_con() const { |
| 401 | CountedLoopEndNode* cle = loopexit_or_null(); |
| 402 | return cle != NULL && cle->stride_is_con(); |
| 403 | } |
| 404 | inline Node* CountedLoopNode::limit() const { |
| 405 | CountedLoopEndNode* cle = loopexit_or_null(); |
| 406 | return cle != NULL ? cle->limit() : NULL; |
| 407 | } |
| 408 | inline Node* CountedLoopNode::incr() const { |
| 409 | CountedLoopEndNode* cle = loopexit_or_null(); |
| 410 | return cle != NULL ? cle->incr() : NULL; |
| 411 | } |
| 412 | inline Node* CountedLoopNode::phi() const { |
| 413 | CountedLoopEndNode* cle = loopexit_or_null(); |
| 414 | return cle != NULL ? cle->phi() : NULL; |
| 415 | } |
| 416 | |
| 417 | //------------------------------LoopLimitNode----------------------------- |
| 418 | // Counted Loop limit node which represents exact final iterator value: |
| 419 | // trip_count = (limit - init_trip + stride - 1)/stride |
| 420 | // final_value= trip_count * stride + init_trip. |
| 421 | // Use HW instructions to calculate it when it can overflow in integer. |
| 422 | // Note, final_value should fit into integer since counted loop has |
| 423 | // limit check: limit <= max_int-stride. |
| 424 | class LoopLimitNode : public Node { |
| 425 | enum { Init=1, Limit=2, Stride=3 }; |
| 426 | public: |
| 427 | LoopLimitNode( Compile* C, Node *init, Node *limit, Node *stride ) : Node(0,init,limit,stride) { |
| 428 | // Put it on the Macro nodes list to optimize during macro nodes expansion. |
| 429 | init_flags(Flag_is_macro); |
| 430 | C->add_macro_node(this); |
| 431 | } |
| 432 | virtual int Opcode() const; |
| 433 | virtual const Type *bottom_type() const { return TypeInt::INT; } |
| 434 | virtual uint ideal_reg() const { return Op_RegI; } |
| 435 | virtual const Type* Value(PhaseGVN* phase) const; |
| 436 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
| 437 | virtual Node* Identity(PhaseGVN* phase); |
| 438 | }; |
| 439 | |
| 440 | // Support for strip mining |
| 441 | class OuterStripMinedLoopNode : public LoopNode { |
| 442 | private: |
| 443 | CountedLoopNode* inner_loop() const; |
| 444 | public: |
| 445 | OuterStripMinedLoopNode(Compile* C, Node *entry, Node *backedge) |
| 446 | : LoopNode(entry, backedge) { |
| 447 | init_class_id(Class_OuterStripMinedLoop); |
| 448 | init_flags(Flag_is_macro); |
| 449 | C->add_macro_node(this); |
| 450 | } |
| 451 | |
| 452 | virtual int Opcode() const; |
| 453 | |
| 454 | virtual IfTrueNode* outer_loop_tail() const; |
| 455 | virtual OuterStripMinedLoopEndNode* outer_loop_end() const; |
| 456 | virtual IfFalseNode* outer_loop_exit() const; |
| 457 | virtual SafePointNode* outer_safepoint() const; |
| 458 | void adjust_strip_mined_loop(PhaseIterGVN* igvn); |
| 459 | }; |
| 460 | |
| 461 | class OuterStripMinedLoopEndNode : public IfNode { |
| 462 | public: |
| 463 | OuterStripMinedLoopEndNode(Node *control, Node *test, float prob, float cnt) |
| 464 | : IfNode(control, test, prob, cnt) { |
| 465 | init_class_id(Class_OuterStripMinedLoopEnd); |
| 466 | } |
| 467 | |
| 468 | virtual int Opcode() const; |
| 469 | |
| 470 | virtual const Type* Value(PhaseGVN* phase) const; |
| 471 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
| 472 | }; |
| 473 | |
| 474 | // -----------------------------IdealLoopTree---------------------------------- |
| 475 | class IdealLoopTree : public ResourceObj { |
| 476 | public: |
| 477 | IdealLoopTree *_parent; // Parent in loop tree |
| 478 | IdealLoopTree *_next; // Next sibling in loop tree |
| 479 | IdealLoopTree *_child; // First child in loop tree |
| 480 | |
| 481 | // The head-tail backedge defines the loop. |
| 482 | // If a loop has multiple backedges, this is addressed during cleanup where |
| 483 | // we peel off the multiple backedges, merging all edges at the bottom and |
| 484 | // ensuring that one proper backedge flow into the loop. |
| 485 | Node *_head; // Head of loop |
| 486 | Node *_tail; // Tail of loop |
| 487 | inline Node *tail(); // Handle lazy update of _tail field |
| 488 | PhaseIdealLoop* _phase; |
| 489 | int _local_loop_unroll_limit; |
| 490 | int _local_loop_unroll_factor; |
| 491 | |
| 492 | Node_List _body; // Loop body for inner loops |
| 493 | |
| 494 | uint8_t _nest; // Nesting depth |
| 495 | uint8_t _irreducible:1, // True if irreducible |
| 496 | _has_call:1, // True if has call safepoint |
| 497 | _has_sfpt:1, // True if has non-call safepoint |
| 498 | _rce_candidate:1; // True if candidate for range check elimination |
| 499 | |
| 500 | Node_List* _safepts; // List of safepoints in this loop |
| 501 | Node_List* _required_safept; // A inner loop cannot delete these safepts; |
| 502 | bool _allow_optimizations; // Allow loop optimizations |
| 503 | |
| 504 | IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail ) |
| 505 | : _parent(0), _next(0), _child(0), |
| 506 | _head(head), _tail(tail), |
| 507 | _phase(phase), |
| 508 | _local_loop_unroll_limit(0), _local_loop_unroll_factor(0), |
| 509 | _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0), |
| 510 | _safepts(NULL), |
| 511 | _required_safept(NULL), |
| 512 | _allow_optimizations(true) |
| 513 | { |
| 514 | precond(_head != NULL); |
| 515 | precond(_tail != NULL); |
| 516 | } |
| 517 | |
| 518 | // Is 'l' a member of 'this'? |
| 519 | bool is_member(const IdealLoopTree *l) const; // Test for nested membership |
| 520 | |
| 521 | // Set loop nesting depth. Accumulate has_call bits. |
| 522 | int set_nest( uint depth ); |
| 523 | |
| 524 | // Split out multiple fall-in edges from the loop header. Move them to a |
| 525 | // private RegionNode before the loop. This becomes the loop landing pad. |
| 526 | void split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ); |
| 527 | |
| 528 | // Split out the outermost loop from this shared header. |
| 529 | void split_outer_loop( PhaseIdealLoop *phase ); |
| 530 | |
| 531 | // Merge all the backedges from the shared header into a private Region. |
| 532 | // Feed that region as the one backedge to this loop. |
| 533 | void merge_many_backedges( PhaseIdealLoop *phase ); |
| 534 | |
| 535 | // Split shared headers and insert loop landing pads. |
| 536 | // Insert a LoopNode to replace the RegionNode. |
| 537 | // Returns TRUE if loop tree is structurally changed. |
| 538 | bool beautify_loops( PhaseIdealLoop *phase ); |
| 539 | |
| 540 | // Perform optimization to use the loop predicates for null checks and range checks. |
| 541 | // Applies to any loop level (not just the innermost one) |
| 542 | bool loop_predication( PhaseIdealLoop *phase); |
| 543 | |
| 544 | // Perform iteration-splitting on inner loops. Split iterations to |
| 545 | // avoid range checks or one-shot null checks. Returns false if the |
| 546 | // current round of loop opts should stop. |
| 547 | bool iteration_split( PhaseIdealLoop *phase, Node_List &old_new ); |
| 548 | |
| 549 | // Driver for various flavors of iteration splitting. Returns false |
| 550 | // if the current round of loop opts should stop. |
| 551 | bool iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ); |
| 552 | |
| 553 | // Given dominators, try to find loops with calls that must always be |
| 554 | // executed (call dominates loop tail). These loops do not need non-call |
| 555 | // safepoints (ncsfpt). |
| 556 | void check_safepts(VectorSet &visited, Node_List &stack); |
| 557 | |
| 558 | // Allpaths backwards scan from loop tail, terminating each path at first safepoint |
| 559 | // encountered. |
| 560 | void allpaths_check_safepts(VectorSet &visited, Node_List &stack); |
| 561 | |
| 562 | // Remove safepoints from loop. Optionally keeping one. |
| 563 | void remove_safepoints(PhaseIdealLoop* phase, bool keep_one); |
| 564 | |
| 565 | // Convert to counted loops where possible |
| 566 | void counted_loop( PhaseIdealLoop *phase ); |
| 567 | |
| 568 | // Check for Node being a loop-breaking test |
| 569 | Node *is_loop_exit(Node *iff) const; |
| 570 | |
| 571 | // Remove simplistic dead code from loop body |
| 572 | void DCE_loop_body(); |
| 573 | |
| 574 | // Look for loop-exit tests with my 50/50 guesses from the Parsing stage. |
| 575 | // Replace with a 1-in-10 exit guess. |
| 576 | void adjust_loop_exit_prob( PhaseIdealLoop *phase ); |
| 577 | |
| 578 | // Return TRUE or FALSE if the loop should never be RCE'd or aligned. |
| 579 | // Useful for unrolling loops with NO array accesses. |
| 580 | bool policy_peel_only( PhaseIdealLoop *phase ) const; |
| 581 | |
| 582 | // Return TRUE or FALSE if the loop should be unswitched -- clone |
| 583 | // loop with an invariant test |
| 584 | bool policy_unswitching( PhaseIdealLoop *phase ) const; |
| 585 | |
| 586 | // Micro-benchmark spamming. Remove empty loops. |
| 587 | bool do_remove_empty_loop( PhaseIdealLoop *phase ); |
| 588 | |
| 589 | // Convert one iteration loop into normal code. |
| 590 | bool do_one_iteration_loop( PhaseIdealLoop *phase ); |
| 591 | |
| 592 | // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can |
| 593 | // move some loop-invariant test (usually a null-check) before the loop. |
| 594 | bool policy_peeling(PhaseIdealLoop *phase); |
| 595 | |
| 596 | uint estimate_peeling(PhaseIdealLoop *phase); |
| 597 | |
| 598 | // Return TRUE or FALSE if the loop should be maximally unrolled. Stash any |
| 599 | // known trip count in the counted loop node. |
| 600 | bool policy_maximally_unroll(PhaseIdealLoop *phase) const; |
| 601 | |
| 602 | // Return TRUE or FALSE if the loop should be unrolled or not. Apply unroll |
| 603 | // if the loop is a counted loop and the loop body is small enough. |
| 604 | bool policy_unroll(PhaseIdealLoop *phase); |
| 605 | |
| 606 | // Loop analyses to map to a maximal superword unrolling for vectorization. |
| 607 | void policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct); |
| 608 | |
| 609 | // Return TRUE or FALSE if the loop should be range-check-eliminated. |
| 610 | // Gather a list of IF tests that are dominated by iteration splitting; |
| 611 | // also gather the end of the first split and the start of the 2nd split. |
| 612 | bool policy_range_check( PhaseIdealLoop *phase ) const; |
| 613 | |
| 614 | // Return TRUE or FALSE if the loop should be cache-line aligned. |
| 615 | // Gather the expression that does the alignment. Note that only |
| 616 | // one array base can be aligned in a loop (unless the VM guarantees |
| 617 | // mutual alignment). Note that if we vectorize short memory ops |
| 618 | // into longer memory ops, we may want to increase alignment. |
| 619 | bool policy_align( PhaseIdealLoop *phase ) const; |
| 620 | |
| 621 | // Return TRUE if "iff" is a range check. |
| 622 | bool is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const; |
| 623 | |
| 624 | // Estimate the number of nodes required when cloning a loop (body). |
| 625 | uint est_loop_clone_sz(uint factor) const; |
| 626 | |
| 627 | // Compute loop trip count if possible |
| 628 | void compute_trip_count(PhaseIdealLoop* phase); |
| 629 | |
| 630 | // Compute loop trip count from profile data |
| 631 | float compute_profile_trip_cnt_helper(Node* n); |
| 632 | void compute_profile_trip_cnt( PhaseIdealLoop *phase ); |
| 633 | |
| 634 | // Reassociate invariant expressions. |
| 635 | void reassociate_invariants(PhaseIdealLoop *phase); |
| 636 | // Reassociate invariant add and subtract expressions. |
| 637 | Node* reassociate_add_sub(Node* n1, PhaseIdealLoop *phase); |
| 638 | // Return nonzero index of invariant operand if invariant and variant |
| 639 | // are combined with an Add or Sub. Helper for reassociate_invariants. |
| 640 | int is_invariant_addition(Node* n, PhaseIdealLoop *phase); |
| 641 | |
| 642 | // Return true if n is invariant |
| 643 | bool is_invariant(Node* n) const; |
| 644 | |
| 645 | // Put loop body on igvn work list |
| 646 | void record_for_igvn(); |
| 647 | |
| 648 | bool is_root() { return _parent == NULL; } |
| 649 | // A proper/reducible loop w/o any (occasional) dead back-edge. |
| 650 | bool is_loop() { return !_irreducible && !tail()->is_top(); } |
| 651 | bool is_counted() { return is_loop() && _head->is_CountedLoop(); } |
| 652 | bool is_innermost() { return is_loop() && _child == NULL; } |
| 653 | |
| 654 | void remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase); |
| 655 | |
| 656 | #ifndef PRODUCT |
| 657 | void dump_head( ) const; // Dump loop head only |
| 658 | void dump() const; // Dump this loop recursively |
| 659 | void verify_tree(IdealLoopTree *loop, const IdealLoopTree *parent) const; |
| 660 | #endif |
| 661 | |
| 662 | }; |
| 663 | |
| 664 | // -----------------------------PhaseIdealLoop--------------------------------- |
| 665 | // Computes the mapping from Nodes to IdealLoopTrees. Organizes IdealLoopTrees |
| 666 | // into a loop tree. Drives the loop-based transformations on the ideal graph. |
| 667 | class PhaseIdealLoop : public PhaseTransform { |
| 668 | friend class IdealLoopTree; |
| 669 | friend class SuperWord; |
| 670 | friend class CountedLoopReserveKit; |
| 671 | friend class ShenandoahBarrierC2Support; |
| 672 | friend class AutoNodeBudget; |
| 673 | |
| 674 | // Pre-computed def-use info |
| 675 | PhaseIterGVN &_igvn; |
| 676 | |
| 677 | // Head of loop tree |
| 678 | IdealLoopTree *_ltree_root; |
| 679 | |
| 680 | // Array of pre-order numbers, plus post-visited bit. |
| 681 | // ZERO for not pre-visited. EVEN for pre-visited but not post-visited. |
| 682 | // ODD for post-visited. Other bits are the pre-order number. |
| 683 | uint *_preorders; |
| 684 | uint _max_preorder; |
| 685 | |
| 686 | const PhaseIdealLoop* _verify_me; |
| 687 | bool _verify_only; |
| 688 | |
| 689 | // Allocate _preorders[] array |
| 690 | void allocate_preorders() { |
| 691 | _max_preorder = C->unique()+8; |
| 692 | _preorders = NEW_RESOURCE_ARRAY(uint, _max_preorder); |
| 693 | memset(_preorders, 0, sizeof(uint) * _max_preorder); |
| 694 | } |
| 695 | |
| 696 | // Allocate _preorders[] array |
| 697 | void reallocate_preorders() { |
| 698 | if ( _max_preorder < C->unique() ) { |
| 699 | _preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, C->unique()); |
| 700 | _max_preorder = C->unique(); |
| 701 | } |
| 702 | memset(_preorders, 0, sizeof(uint) * _max_preorder); |
| 703 | } |
| 704 | |
| 705 | // Check to grow _preorders[] array for the case when build_loop_tree_impl() |
| 706 | // adds new nodes. |
| 707 | void check_grow_preorders( ) { |
| 708 | if ( _max_preorder < C->unique() ) { |
| 709 | uint newsize = _max_preorder<<1; // double size of array |
| 710 | _preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, newsize); |
| 711 | memset(&_preorders[_max_preorder],0,sizeof(uint)*(newsize-_max_preorder)); |
| 712 | _max_preorder = newsize; |
| 713 | } |
| 714 | } |
| 715 | // Check for pre-visited. Zero for NOT visited; non-zero for visited. |
| 716 | int is_visited( Node *n ) const { return _preorders[n->_idx]; } |
| 717 | // Pre-order numbers are written to the Nodes array as low-bit-set values. |
| 718 | void set_preorder_visited( Node *n, int pre_order ) { |
| 719 | assert( !is_visited( n ), "already set" ); |
| 720 | _preorders[n->_idx] = (pre_order<<1); |
| 721 | }; |
| 722 | // Return pre-order number. |
| 723 | int get_preorder( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]>>1; } |
| 724 | |
| 725 | // Check for being post-visited. |
| 726 | // Should be previsited already (checked with assert(is_visited(n))). |
| 727 | int is_postvisited( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]&1; } |
| 728 | |
| 729 | // Mark as post visited |
| 730 | void set_postvisited( Node *n ) { assert( !is_postvisited( n ), "" ); _preorders[n->_idx] |= 1; } |
| 731 | |
| 732 | public: |
| 733 | // Set/get control node out. Set lower bit to distinguish from IdealLoopTree |
| 734 | // Returns true if "n" is a data node, false if it's a control node. |
| 735 | bool has_ctrl( Node *n ) const { return ((intptr_t)_nodes[n->_idx]) & 1; } |
| 736 | |
| 737 | private: |
| 738 | // clear out dead code after build_loop_late |
| 739 | Node_List _deadlist; |
| 740 | |
| 741 | // Support for faster execution of get_late_ctrl()/dom_lca() |
| 742 | // when a node has many uses and dominator depth is deep. |
| 743 | Node_Array _dom_lca_tags; |
| 744 | void init_dom_lca_tags(); |
| 745 | void clear_dom_lca_tags(); |
| 746 | |
| 747 | // Helper for debugging bad dominance relationships |
| 748 | bool verify_dominance(Node* n, Node* use, Node* LCA, Node* early); |
| 749 | |
| 750 | Node* compute_lca_of_uses(Node* n, Node* early, bool verify = false); |
| 751 | |
| 752 | // Inline wrapper for frequent cases: |
| 753 | // 1) only one use |
| 754 | // 2) a use is the same as the current LCA passed as 'n1' |
| 755 | Node *dom_lca_for_get_late_ctrl( Node *lca, Node *n, Node *tag ) { |
| 756 | assert( n->is_CFG(), "" ); |
| 757 | // Fast-path NULL lca |
| 758 | if( lca != NULL && lca != n ) { |
| 759 | assert( lca->is_CFG(), "" ); |
| 760 | // find LCA of all uses |
| 761 | n = dom_lca_for_get_late_ctrl_internal( lca, n, tag ); |
| 762 | } |
| 763 | return find_non_split_ctrl(n); |
| 764 | } |
| 765 | Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag ); |
| 766 | |
| 767 | // Helper function for directing control inputs away from CFG split points. |
| 768 | Node *find_non_split_ctrl( Node *ctrl ) const { |
| 769 | if (ctrl != NULL) { |
| 770 | if (ctrl->is_MultiBranch()) { |
| 771 | ctrl = ctrl->in(0); |
| 772 | } |
| 773 | assert(ctrl->is_CFG(), "CFG" ); |
| 774 | } |
| 775 | return ctrl; |
| 776 | } |
| 777 | |
| 778 | Node* cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop); |
| 779 | void duplicate_predicates_helper(Node* predicate, Node* start, Node* end, IdealLoopTree* outer_loop, |
| 780 | LoopNode* outer_main_head, uint dd_main_head); |
| 781 | void duplicate_predicates(CountedLoopNode* pre_head, Node* start, Node* end, IdealLoopTree* outer_loop, |
| 782 | LoopNode* outer_main_head, uint dd_main_head); |
| 783 | Node* clone_skeleton_predicate(Node* iff, Node* value, Node* predicate, Node* uncommon_proj, |
| 784 | Node* current_proj, IdealLoopTree* outer_loop, Node* prev_proj); |
| 785 | bool skeleton_predicate_has_opaque(IfNode* iff); |
| 786 | void update_skeleton_predicates(Node* ctrl, CountedLoopNode* loop_head, Node* init, int stride_con); |
| 787 | void insert_loop_limit_check(ProjNode* limit_check_proj, Node* cmp_limit, Node* bol); |
| 788 | |
| 789 | public: |
| 790 | |
| 791 | PhaseIterGVN &igvn() const { return _igvn; } |
| 792 | |
| 793 | static bool is_canonical_loop_entry(CountedLoopNode* cl); |
| 794 | |
| 795 | bool has_node( Node* n ) const { |
| 796 | guarantee(n != NULL, "No Node." ); |
| 797 | return _nodes[n->_idx] != NULL; |
| 798 | } |
| 799 | // check if transform created new nodes that need _ctrl recorded |
| 800 | Node *get_late_ctrl( Node *n, Node *early ); |
| 801 | Node *get_early_ctrl( Node *n ); |
| 802 | Node *get_early_ctrl_for_expensive(Node *n, Node* earliest); |
| 803 | void set_early_ctrl( Node *n ); |
| 804 | void set_subtree_ctrl( Node *root ); |
| 805 | void set_ctrl( Node *n, Node *ctrl ) { |
| 806 | assert( !has_node(n) || has_ctrl(n), "" ); |
| 807 | assert( ctrl->in(0), "cannot set dead control node" ); |
| 808 | assert( ctrl == find_non_split_ctrl(ctrl), "must set legal crtl" ); |
| 809 | _nodes.map( n->_idx, (Node*)((intptr_t)ctrl + 1) ); |
| 810 | } |
| 811 | // Set control and update loop membership |
| 812 | void set_ctrl_and_loop(Node* n, Node* ctrl) { |
| 813 | IdealLoopTree* old_loop = get_loop(get_ctrl(n)); |
| 814 | IdealLoopTree* new_loop = get_loop(ctrl); |
| 815 | if (old_loop != new_loop) { |
| 816 | if (old_loop->_child == NULL) old_loop->_body.yank(n); |
| 817 | if (new_loop->_child == NULL) new_loop->_body.push(n); |
| 818 | } |
| 819 | set_ctrl(n, ctrl); |
| 820 | } |
| 821 | // Control nodes can be replaced or subsumed. During this pass they |
| 822 | // get their replacement Node in slot 1. Instead of updating the block |
| 823 | // location of all Nodes in the subsumed block, we lazily do it. As we |
| 824 | // pull such a subsumed block out of the array, we write back the final |
| 825 | // correct block. |
| 826 | Node *get_ctrl( Node *i ) { |
| 827 | |
| 828 | assert(has_node(i), "" ); |
| 829 | Node *n = get_ctrl_no_update(i); |
| 830 | _nodes.map( i->_idx, (Node*)((intptr_t)n + 1) ); |
| 831 | assert(has_node(i) && has_ctrl(i), "" ); |
| 832 | assert(n == find_non_split_ctrl(n), "must return legal ctrl" ); |
| 833 | return n; |
| 834 | } |
| 835 | // true if CFG node d dominates CFG node n |
| 836 | bool is_dominator(Node *d, Node *n); |
| 837 | // return get_ctrl for a data node and self(n) for a CFG node |
| 838 | Node* ctrl_or_self(Node* n) { |
| 839 | if (has_ctrl(n)) |
| 840 | return get_ctrl(n); |
| 841 | else { |
| 842 | assert (n->is_CFG(), "must be a CFG node" ); |
| 843 | return n; |
| 844 | } |
| 845 | } |
| 846 | |
| 847 | Node *get_ctrl_no_update_helper(Node *i) const { |
| 848 | assert(has_ctrl(i), "should be control, not loop" ); |
| 849 | return (Node*)(((intptr_t)_nodes[i->_idx]) & ~1); |
| 850 | } |
| 851 | |
| 852 | Node *get_ctrl_no_update(Node *i) const { |
| 853 | assert( has_ctrl(i), "" ); |
| 854 | Node *n = get_ctrl_no_update_helper(i); |
| 855 | if (!n->in(0)) { |
| 856 | // Skip dead CFG nodes |
| 857 | do { |
| 858 | n = get_ctrl_no_update_helper(n); |
| 859 | } while (!n->in(0)); |
| 860 | n = find_non_split_ctrl(n); |
| 861 | } |
| 862 | return n; |
| 863 | } |
| 864 | |
| 865 | // Check for loop being set |
| 866 | // "n" must be a control node. Returns true if "n" is known to be in a loop. |
| 867 | bool has_loop( Node *n ) const { |
| 868 | assert(!has_node(n) || !has_ctrl(n), "" ); |
| 869 | return has_node(n); |
| 870 | } |
| 871 | // Set loop |
| 872 | void set_loop( Node *n, IdealLoopTree *loop ) { |
| 873 | _nodes.map(n->_idx, (Node*)loop); |
| 874 | } |
| 875 | // Lazy-dazy update of 'get_ctrl' and 'idom_at' mechanisms. Replace |
| 876 | // the 'old_node' with 'new_node'. Kill old-node. Add a reference |
| 877 | // from old_node to new_node to support the lazy update. Reference |
| 878 | // replaces loop reference, since that is not needed for dead node. |
| 879 | void lazy_update(Node *old_node, Node *new_node) { |
| 880 | assert(old_node != new_node, "no cycles please" ); |
| 881 | // Re-use the side array slot for this node to provide the |
| 882 | // forwarding pointer. |
| 883 | _nodes.map(old_node->_idx, (Node*)((intptr_t)new_node + 1)); |
| 884 | } |
| 885 | void lazy_replace(Node *old_node, Node *new_node) { |
| 886 | _igvn.replace_node(old_node, new_node); |
| 887 | lazy_update(old_node, new_node); |
| 888 | } |
| 889 | |
| 890 | private: |
| 891 | |
| 892 | // Place 'n' in some loop nest, where 'n' is a CFG node |
| 893 | void build_loop_tree(); |
| 894 | int build_loop_tree_impl( Node *n, int pre_order ); |
| 895 | // Insert loop into the existing loop tree. 'innermost' is a leaf of the |
| 896 | // loop tree, not the root. |
| 897 | IdealLoopTree *sort( IdealLoopTree *loop, IdealLoopTree *innermost ); |
| 898 | |
| 899 | // Place Data nodes in some loop nest |
| 900 | void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ); |
| 901 | void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ); |
| 902 | void build_loop_late_post_work(Node* n, bool pinned); |
| 903 | void build_loop_late_post(Node* n); |
| 904 | void verify_strip_mined_scheduling(Node *n, Node* least); |
| 905 | |
| 906 | // Array of immediate dominance info for each CFG node indexed by node idx |
| 907 | private: |
| 908 | uint _idom_size; |
| 909 | Node **_idom; // Array of immediate dominators |
| 910 | uint *_dom_depth; // Used for fast LCA test |
| 911 | GrowableArray<uint>* _dom_stk; // For recomputation of dom depth |
| 912 | |
| 913 | // Perform verification that the graph is valid. |
| 914 | PhaseIdealLoop( PhaseIterGVN &igvn) : |
| 915 | PhaseTransform(Ideal_Loop), |
| 916 | _igvn(igvn), |
| 917 | _verify_me(NULL), |
| 918 | _verify_only(true), |
| 919 | _dom_lca_tags(arena()), // Thread::resource_area |
| 920 | _nodes_required(UINT_MAX) { |
| 921 | build_and_optimize(LoopOptsVerify); |
| 922 | } |
| 923 | |
| 924 | // build the loop tree and perform any requested optimizations |
| 925 | void build_and_optimize(LoopOptsMode mode); |
| 926 | |
| 927 | // Dominators for the sea of nodes |
| 928 | void Dominators(); |
| 929 | |
| 930 | // Compute the Ideal Node to Loop mapping |
| 931 | PhaseIdealLoop(PhaseIterGVN &igvn, LoopOptsMode mode) : |
| 932 | PhaseTransform(Ideal_Loop), |
| 933 | _igvn(igvn), |
| 934 | _verify_me(NULL), |
| 935 | _verify_only(false), |
| 936 | _dom_lca_tags(arena()), // Thread::resource_area |
| 937 | _nodes_required(UINT_MAX) { |
| 938 | build_and_optimize(mode); |
| 939 | } |
| 940 | |
| 941 | // Verify that verify_me made the same decisions as a fresh run. |
| 942 | PhaseIdealLoop(PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me) : |
| 943 | PhaseTransform(Ideal_Loop), |
| 944 | _igvn(igvn), |
| 945 | _verify_me(verify_me), |
| 946 | _verify_only(false), |
| 947 | _dom_lca_tags(arena()), // Thread::resource_area |
| 948 | _nodes_required(UINT_MAX) { |
| 949 | build_and_optimize(LoopOptsVerify); |
| 950 | } |
| 951 | |
| 952 | public: |
| 953 | Node* idom_no_update(Node* d) const { |
| 954 | return idom_no_update(d->_idx); |
| 955 | } |
| 956 | |
| 957 | Node* idom_no_update(uint didx) const { |
| 958 | assert(didx < _idom_size, "oob" ); |
| 959 | Node* n = _idom[didx]; |
| 960 | assert(n != NULL,"Bad immediate dominator info." ); |
| 961 | while (n->in(0) == NULL) { // Skip dead CFG nodes |
| 962 | n = (Node*)(((intptr_t)_nodes[n->_idx]) & ~1); |
| 963 | assert(n != NULL,"Bad immediate dominator info." ); |
| 964 | } |
| 965 | return n; |
| 966 | } |
| 967 | |
| 968 | Node *idom(Node* d) const { |
| 969 | return idom(d->_idx); |
| 970 | } |
| 971 | |
| 972 | Node *idom(uint didx) const { |
| 973 | Node *n = idom_no_update(didx); |
| 974 | _idom[didx] = n; // Lazily remove dead CFG nodes from table. |
| 975 | return n; |
| 976 | } |
| 977 | |
| 978 | uint dom_depth(Node* d) const { |
| 979 | guarantee(d != NULL, "Null dominator info." ); |
| 980 | guarantee(d->_idx < _idom_size, "" ); |
| 981 | return _dom_depth[d->_idx]; |
| 982 | } |
| 983 | void set_idom(Node* d, Node* n, uint dom_depth); |
| 984 | // Locally compute IDOM using dom_lca call |
| 985 | Node *compute_idom( Node *region ) const; |
| 986 | // Recompute dom_depth |
| 987 | void recompute_dom_depth(); |
| 988 | |
| 989 | // Is safept not required by an outer loop? |
| 990 | bool is_deleteable_safept(Node* sfpt); |
| 991 | |
| 992 | // Replace parallel induction variable (parallel to trip counter) |
| 993 | void replace_parallel_iv(IdealLoopTree *loop); |
| 994 | |
| 995 | Node *dom_lca( Node *n1, Node *n2 ) const { |
| 996 | return find_non_split_ctrl(dom_lca_internal(n1, n2)); |
| 997 | } |
| 998 | Node *dom_lca_internal( Node *n1, Node *n2 ) const; |
| 999 | |
| 1000 | // Build and verify the loop tree without modifying the graph. This |
| 1001 | // is useful to verify that all inputs properly dominate their uses. |
| 1002 | static void verify(PhaseIterGVN& igvn) { |
| 1003 | #ifdef ASSERT |
| 1004 | ResourceMark rm; |
| 1005 | PhaseIdealLoop v(igvn); |
| 1006 | #endif |
| 1007 | } |
| 1008 | |
| 1009 | // Recommended way to use PhaseIdealLoop. |
| 1010 | // Run PhaseIdealLoop in some mode and allocates a local scope for memory allocations. |
| 1011 | static void optimize(PhaseIterGVN &igvn, LoopOptsMode mode) { |
| 1012 | ResourceMark rm; |
| 1013 | PhaseIdealLoop v(igvn, mode); |
| 1014 | } |
| 1015 | |
| 1016 | // True if the method has at least 1 irreducible loop |
| 1017 | bool _has_irreducible_loops; |
| 1018 | |
| 1019 | // Per-Node transform |
| 1020 | virtual Node *transform( Node *a_node ) { return 0; } |
| 1021 | |
| 1022 | bool is_counted_loop(Node* x, IdealLoopTree*& loop); |
| 1023 | IdealLoopTree* create_outer_strip_mined_loop(BoolNode *test, Node *cmp, Node *init_control, |
| 1024 | IdealLoopTree* loop, float cl_prob, float le_fcnt, |
| 1025 | Node*& entry_control, Node*& iffalse); |
| 1026 | |
| 1027 | Node* exact_limit( IdealLoopTree *loop ); |
| 1028 | |
| 1029 | // Return a post-walked LoopNode |
| 1030 | IdealLoopTree *get_loop( Node *n ) const { |
| 1031 | // Dead nodes have no loop, so return the top level loop instead |
| 1032 | if (!has_node(n)) return _ltree_root; |
| 1033 | assert(!has_ctrl(n), "" ); |
| 1034 | return (IdealLoopTree*)_nodes[n->_idx]; |
| 1035 | } |
| 1036 | |
| 1037 | IdealLoopTree *ltree_root() const { return _ltree_root; } |
| 1038 | |
| 1039 | // Is 'n' a (nested) member of 'loop'? |
| 1040 | int is_member( const IdealLoopTree *loop, Node *n ) const { |
| 1041 | return loop->is_member(get_loop(n)); } |
| 1042 | |
| 1043 | // This is the basic building block of the loop optimizations. It clones an |
| 1044 | // entire loop body. It makes an old_new loop body mapping; with this |
| 1045 | // mapping you can find the new-loop equivalent to an old-loop node. All |
| 1046 | // new-loop nodes are exactly equal to their old-loop counterparts, all |
| 1047 | // edges are the same. All exits from the old-loop now have a RegionNode |
| 1048 | // that merges the equivalent new-loop path. This is true even for the |
| 1049 | // normal "loop-exit" condition. All uses of loop-invariant old-loop values |
| 1050 | // now come from (one or more) Phis that merge their new-loop equivalents. |
| 1051 | // Parameter side_by_side_idom: |
| 1052 | // When side_by_size_idom is NULL, the dominator tree is constructed for |
| 1053 | // the clone loop to dominate the original. Used in construction of |
| 1054 | // pre-main-post loop sequence. |
| 1055 | // When nonnull, the clone and original are side-by-side, both are |
| 1056 | // dominated by the passed in side_by_side_idom node. Used in |
| 1057 | // construction of unswitched loops. |
| 1058 | enum CloneLoopMode { |
| 1059 | IgnoreStripMined = 0, // Only clone inner strip mined loop |
| 1060 | CloneIncludesStripMined = 1, // clone both inner and outer strip mined loops |
| 1061 | ControlAroundStripMined = 2 // Only clone inner strip mined loop, |
| 1062 | // result control flow branches |
| 1063 | // either to inner clone or outer |
| 1064 | // strip mined loop. |
| 1065 | }; |
| 1066 | void clone_loop( IdealLoopTree *loop, Node_List &old_new, int dom_depth, |
| 1067 | CloneLoopMode mode, Node* side_by_side_idom = NULL); |
| 1068 | void clone_loop_handle_data_uses(Node* old, Node_List &old_new, |
| 1069 | IdealLoopTree* loop, IdealLoopTree* companion_loop, |
| 1070 | Node_List*& split_if_set, Node_List*& split_bool_set, |
| 1071 | Node_List*& split_cex_set, Node_List& worklist, |
| 1072 | uint new_counter, CloneLoopMode mode); |
| 1073 | void clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop, |
| 1074 | IdealLoopTree* outer_loop, int dd, Node_List &old_new, |
| 1075 | Node_List& ); |
| 1076 | |
| 1077 | // If we got the effect of peeling, either by actually peeling or by |
| 1078 | // making a pre-loop which must execute at least once, we can remove |
| 1079 | // all loop-invariant dominated tests in the main body. |
| 1080 | void peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ); |
| 1081 | |
| 1082 | // Generate code to do a loop peel for the given loop (and body). |
| 1083 | // old_new is a temp array. |
| 1084 | void do_peeling( IdealLoopTree *loop, Node_List &old_new ); |
| 1085 | |
| 1086 | // Add pre and post loops around the given loop. These loops are used |
| 1087 | // during RCE, unrolling and aligning loops. |
| 1088 | void insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ); |
| 1089 | |
| 1090 | // Add post loop after the given loop. |
| 1091 | Node *insert_post_loop(IdealLoopTree *loop, Node_List &old_new, |
| 1092 | CountedLoopNode *main_head, CountedLoopEndNode *main_end, |
| 1093 | Node *incr, Node *limit, CountedLoopNode *&post_head); |
| 1094 | |
| 1095 | // Add an RCE'd post loop which we will multi-version adapt for run time test path usage |
| 1096 | void insert_scalar_rced_post_loop( IdealLoopTree *loop, Node_List &old_new ); |
| 1097 | |
| 1098 | // Add a vector post loop between a vector main loop and the current post loop |
| 1099 | void insert_vector_post_loop(IdealLoopTree *loop, Node_List &old_new); |
| 1100 | // If Node n lives in the back_ctrl block, we clone a private version of n |
| 1101 | // in preheader_ctrl block and return that, otherwise return n. |
| 1102 | Node *clone_up_backedge_goo( Node *back_ctrl, Node *, Node *n, VectorSet &visited, Node_Stack &clones ); |
| 1103 | |
| 1104 | // Take steps to maximally unroll the loop. Peel any odd iterations, then |
| 1105 | // unroll to do double iterations. The next round of major loop transforms |
| 1106 | // will repeat till the doubled loop body does all remaining iterations in 1 |
| 1107 | // pass. |
| 1108 | void do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ); |
| 1109 | |
| 1110 | // Unroll the loop body one step - make each trip do 2 iterations. |
| 1111 | void do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ); |
| 1112 | |
| 1113 | // Mark vector reduction candidates before loop unrolling |
| 1114 | void mark_reductions( IdealLoopTree *loop ); |
| 1115 | |
| 1116 | // Return true if exp is a constant times an induction var |
| 1117 | bool is_scaled_iv(Node* exp, Node* iv, int* p_scale); |
| 1118 | |
| 1119 | // Return true if exp is a scaled induction var plus (or minus) constant |
| 1120 | bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0); |
| 1121 | |
| 1122 | // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted |
| 1123 | ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, |
| 1124 | Deoptimization::DeoptReason reason, |
| 1125 | int opcode); |
| 1126 | void register_control(Node* n, IdealLoopTree *loop, Node* pred); |
| 1127 | |
| 1128 | // Clone loop predicates to cloned loops (peeled, unswitched) |
| 1129 | static ProjNode* clone_predicate(ProjNode* predicate_proj, Node* new_entry, |
| 1130 | Deoptimization::DeoptReason reason, |
| 1131 | PhaseIdealLoop* loop_phase, |
| 1132 | PhaseIterGVN* igvn); |
| 1133 | |
| 1134 | static void clone_loop_predicates_fix_mem(ProjNode* dom_proj , ProjNode* proj, |
| 1135 | PhaseIdealLoop* loop_phase, |
| 1136 | PhaseIterGVN* igvn); |
| 1137 | |
| 1138 | static Node* clone_loop_predicates(Node* old_entry, Node* new_entry, |
| 1139 | bool clone_limit_check, |
| 1140 | PhaseIdealLoop* loop_phase, |
| 1141 | PhaseIterGVN* igvn); |
| 1142 | Node* clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check); |
| 1143 | |
| 1144 | static Node* skip_all_loop_predicates(Node* entry); |
| 1145 | static Node* skip_loop_predicates(Node* entry); |
| 1146 | |
| 1147 | // Find a good location to insert a predicate |
| 1148 | static ProjNode* find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason); |
| 1149 | // Find a predicate |
| 1150 | static Node* find_predicate(Node* entry); |
| 1151 | // Construct a range check for a predicate if |
| 1152 | BoolNode* rc_predicate(IdealLoopTree *loop, Node* ctrl, |
| 1153 | int scale, Node* offset, |
| 1154 | Node* init, Node* limit, jint stride, |
| 1155 | Node* range, bool upper, bool &overflow); |
| 1156 | |
| 1157 | // Implementation of the loop predication to promote checks outside the loop |
| 1158 | bool loop_predication_impl(IdealLoopTree *loop); |
| 1159 | bool loop_predication_impl_helper(IdealLoopTree *loop, ProjNode* proj, ProjNode *predicate_proj, |
| 1160 | CountedLoopNode *cl, ConNode* zero, Invariance& invar, |
| 1161 | Deoptimization::DeoptReason reason); |
| 1162 | bool loop_predication_should_follow_branches(IdealLoopTree *loop, ProjNode *predicate_proj, float& loop_trip_cnt); |
| 1163 | void loop_predication_follow_branches(Node *c, IdealLoopTree *loop, float loop_trip_cnt, |
| 1164 | PathFrequency& pf, Node_Stack& stack, VectorSet& seen, |
| 1165 | Node_List& if_proj_list); |
| 1166 | ProjNode* insert_skeleton_predicate(IfNode* iff, IdealLoopTree *loop, |
| 1167 | ProjNode* proj, ProjNode *predicate_proj, |
| 1168 | ProjNode* upper_bound_proj, |
| 1169 | int scale, Node* offset, |
| 1170 | Node* init, Node* limit, jint stride, |
| 1171 | Node* rng, bool& overflow, |
| 1172 | Deoptimization::DeoptReason reason); |
| 1173 | Node* add_range_check_predicate(IdealLoopTree* loop, CountedLoopNode* cl, |
| 1174 | Node* predicate_proj, int scale_con, Node* offset, |
| 1175 | Node* limit, jint stride_con, Node* value); |
| 1176 | |
| 1177 | // Helper function to collect predicate for eliminating the useless ones |
| 1178 | void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1); |
| 1179 | void eliminate_useless_predicates(); |
| 1180 | |
| 1181 | // Change the control input of expensive nodes to allow commoning by |
| 1182 | // IGVN when it is guaranteed to not result in a more frequent |
| 1183 | // execution of the expensive node. Return true if progress. |
| 1184 | bool process_expensive_nodes(); |
| 1185 | |
| 1186 | // Check whether node has become unreachable |
| 1187 | bool is_node_unreachable(Node *n) const { |
| 1188 | return !has_node(n) || n->is_unreachable(_igvn); |
| 1189 | } |
| 1190 | |
| 1191 | // Eliminate range-checks and other trip-counter vs loop-invariant tests. |
| 1192 | int do_range_check( IdealLoopTree *loop, Node_List &old_new ); |
| 1193 | |
| 1194 | // Check to see if do_range_check(...) cleaned the main loop of range-checks |
| 1195 | void has_range_checks(IdealLoopTree *loop); |
| 1196 | |
| 1197 | // Process post loops which have range checks and try to build a multi-version |
| 1198 | // guard to safely determine if we can execute the post loop which was RCE'd. |
| 1199 | bool multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoopTree *legacy_loop); |
| 1200 | |
| 1201 | // Cause the rce'd post loop to optimized away, this happens if we cannot complete multiverioning |
| 1202 | void poison_rce_post_loop(IdealLoopTree *rce_loop); |
| 1203 | |
| 1204 | // Create a slow version of the loop by cloning the loop |
| 1205 | // and inserting an if to select fast-slow versions. |
| 1206 | ProjNode* create_slow_version_of_loop(IdealLoopTree *loop, |
| 1207 | Node_List &old_new, |
| 1208 | int opcode, |
| 1209 | CloneLoopMode mode); |
| 1210 | |
| 1211 | // Clone a loop and return the clone head (clone_loop_head). |
| 1212 | // Added nodes include int(1), int(0) - disconnected, If, IfTrue, IfFalse, |
| 1213 | // This routine was created for usage in CountedLoopReserveKit. |
| 1214 | // |
| 1215 | // int(1) -> If -> IfTrue -> original_loop_head |
| 1216 | // | |
| 1217 | // V |
| 1218 | // IfFalse -> clone_loop_head (returned by function pointer) |
| 1219 | // |
| 1220 | LoopNode* create_reserve_version_of_loop(IdealLoopTree *loop, CountedLoopReserveKit* lk); |
| 1221 | // Clone loop with an invariant test (that does not exit) and |
| 1222 | // insert a clone of the test that selects which version to |
| 1223 | // execute. |
| 1224 | void do_unswitching (IdealLoopTree *loop, Node_List &old_new); |
| 1225 | |
| 1226 | // Find candidate "if" for unswitching |
| 1227 | IfNode* find_unswitching_candidate(const IdealLoopTree *loop) const; |
| 1228 | |
| 1229 | // Range Check Elimination uses this function! |
| 1230 | // Constrain the main loop iterations so the affine function: |
| 1231 | // low_limit <= scale_con * I + offset < upper_limit |
| 1232 | // always holds true. That is, either increase the number of iterations in |
| 1233 | // the pre-loop or the post-loop until the condition holds true in the main |
| 1234 | // loop. Scale_con, offset and limit are all loop invariant. |
| 1235 | void add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ); |
| 1236 | // Helper function for add_constraint(). |
| 1237 | Node* adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl, bool round_up); |
| 1238 | |
| 1239 | // Partially peel loop up through last_peel node. |
| 1240 | bool partial_peel( IdealLoopTree *loop, Node_List &old_new ); |
| 1241 | |
| 1242 | // Create a scheduled list of nodes control dependent on ctrl set. |
| 1243 | void scheduled_nodelist( IdealLoopTree *loop, VectorSet& ctrl, Node_List &sched ); |
| 1244 | // Has a use in the vector set |
| 1245 | bool has_use_in_set( Node* n, VectorSet& vset ); |
| 1246 | // Has use internal to the vector set (ie. not in a phi at the loop head) |
| 1247 | bool has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ); |
| 1248 | // clone "n" for uses that are outside of loop |
| 1249 | int clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ); |
| 1250 | // clone "n" for special uses that are in the not_peeled region |
| 1251 | void clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n, |
| 1252 | VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ); |
| 1253 | // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist |
| 1254 | void insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ); |
| 1255 | #ifdef ASSERT |
| 1256 | // Validate the loop partition sets: peel and not_peel |
| 1257 | bool is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, VectorSet& not_peel ); |
| 1258 | // Ensure that uses outside of loop are of the right form |
| 1259 | bool is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list, |
| 1260 | uint orig_exit_idx, uint clone_exit_idx); |
| 1261 | bool is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx); |
| 1262 | #endif |
| 1263 | |
| 1264 | // Returns nonzero constant stride if-node is a possible iv test (otherwise returns zero.) |
| 1265 | int stride_of_possible_iv( Node* iff ); |
| 1266 | bool is_possible_iv_test( Node* iff ) { return stride_of_possible_iv(iff) != 0; } |
| 1267 | // Return the (unique) control output node that's in the loop (if it exists.) |
| 1268 | Node* stay_in_loop( Node* n, IdealLoopTree *loop); |
| 1269 | // Insert a signed compare loop exit cloned from an unsigned compare. |
| 1270 | IfNode* insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop); |
| 1271 | void remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop); |
| 1272 | // Utility to register node "n" with PhaseIdealLoop |
| 1273 | void register_node(Node* n, IdealLoopTree *loop, Node* pred, int ddepth); |
| 1274 | // Utility to create an if-projection |
| 1275 | ProjNode* proj_clone(ProjNode* p, IfNode* iff); |
| 1276 | // Force the iff control output to be the live_proj |
| 1277 | Node* short_circuit_if(IfNode* iff, ProjNode* live_proj); |
| 1278 | // Insert a region before an if projection |
| 1279 | RegionNode* insert_region_before_proj(ProjNode* proj); |
| 1280 | // Insert a new if before an if projection |
| 1281 | ProjNode* insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj); |
| 1282 | |
| 1283 | // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. |
| 1284 | // "Nearly" because all Nodes have been cloned from the original in the loop, |
| 1285 | // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs |
| 1286 | // through the Phi recursively, and return a Bool. |
| 1287 | Node *clone_iff( PhiNode *phi, IdealLoopTree *loop ); |
| 1288 | CmpNode *clone_bool( PhiNode *phi, IdealLoopTree *loop ); |
| 1289 | |
| 1290 | |
| 1291 | // Rework addressing expressions to get the most loop-invariant stuff |
| 1292 | // moved out. We'd like to do all associative operators, but it's especially |
| 1293 | // important (common) to do address expressions. |
| 1294 | Node *remix_address_expressions( Node *n ); |
| 1295 | |
| 1296 | // Convert add to muladd to generate MuladdS2I under certain criteria |
| 1297 | Node * convert_add_to_muladd(Node * n); |
| 1298 | |
| 1299 | // Attempt to use a conditional move instead of a phi/branch |
| 1300 | Node *conditional_move( Node *n ); |
| 1301 | |
| 1302 | // Reorganize offset computations to lower register pressure. |
| 1303 | // Mostly prevent loop-fallout uses of the pre-incremented trip counter |
| 1304 | // (which are then alive with the post-incremented trip counter |
| 1305 | // forcing an extra register move) |
| 1306 | void reorg_offsets( IdealLoopTree *loop ); |
| 1307 | |
| 1308 | // Check for aggressive application of 'split-if' optimization, |
| 1309 | // using basic block level info. |
| 1310 | void split_if_with_blocks ( VectorSet &visited, Node_Stack &nstack); |
| 1311 | Node *split_if_with_blocks_pre ( Node *n ); |
| 1312 | void split_if_with_blocks_post( Node *n ); |
| 1313 | Node *has_local_phi_input( Node *n ); |
| 1314 | // Mark an IfNode as being dominated by a prior test, |
| 1315 | // without actually altering the CFG (and hence IDOM info). |
| 1316 | void dominated_by( Node *prevdom, Node *iff, bool flip = false, bool exclude_loop_predicate = false ); |
| 1317 | |
| 1318 | // Split Node 'n' through merge point |
| 1319 | Node *split_thru_region( Node *n, Node *region ); |
| 1320 | // Split Node 'n' through merge point if there is enough win. |
| 1321 | Node *split_thru_phi( Node *n, Node *region, int policy ); |
| 1322 | // Found an If getting its condition-code input from a Phi in the |
| 1323 | // same block. Split thru the Region. |
| 1324 | void do_split_if( Node *iff ); |
| 1325 | |
| 1326 | // Conversion of fill/copy patterns into intrisic versions |
| 1327 | bool do_intrinsify_fill(); |
| 1328 | bool intrinsify_fill(IdealLoopTree* lpt); |
| 1329 | bool match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value, |
| 1330 | Node*& shift, Node*& offset); |
| 1331 | |
| 1332 | private: |
| 1333 | // Return a type based on condition control flow |
| 1334 | const TypeInt* filtered_type( Node *n, Node* n_ctrl); |
| 1335 | const TypeInt* filtered_type( Node *n ) { return filtered_type(n, NULL); } |
| 1336 | // Helpers for filtered type |
| 1337 | const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl); |
| 1338 | |
| 1339 | // Helper functions |
| 1340 | Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache ); |
| 1341 | Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true ); |
| 1342 | void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true ); |
| 1343 | bool split_up( Node *n, Node *blk1, Node *blk2 ); |
| 1344 | void sink_use( Node *use, Node *post_loop ); |
| 1345 | Node *place_near_use( Node *useblock ) const; |
| 1346 | Node* try_move_store_before_loop(Node* n, Node *n_ctrl); |
| 1347 | void try_move_store_after_loop(Node* n); |
| 1348 | bool identical_backtoback_ifs(Node *n); |
| 1349 | bool can_split_if(Node *n_ctrl); |
| 1350 | |
| 1351 | // Determine if a method is too big for a/another round of split-if, based on |
| 1352 | // a magic (approximate) ratio derived from the equally magic constant 35000, |
| 1353 | // previously used for this purpose (but without relating to the node limit). |
| 1354 | bool must_throttle_split_if() { |
| 1355 | uint threshold = C->max_node_limit() * 2 / 5; |
| 1356 | return C->live_nodes() > threshold; |
| 1357 | } |
| 1358 | |
| 1359 | // A simplistic node request tracking mechanism, where |
| 1360 | // = UINT_MAX Request not valid or made final. |
| 1361 | // < UINT_MAX Nodes currently requested (estimate). |
| 1362 | uint _nodes_required; |
| 1363 | |
| 1364 | enum { REQUIRE_MIN = 70 }; |
| 1365 | |
| 1366 | uint nodes_required() const { return _nodes_required; } |
| 1367 | |
| 1368 | // Given the _currently_ available number of nodes, check whether there is |
| 1369 | // "room" for an additional request or not, considering the already required |
| 1370 | // number of nodes. Return TRUE if the new request is exceeding the node |
| 1371 | // budget limit, otherwise return FALSE. Note that this interpretation will |
| 1372 | // act pessimistic on additional requests when new nodes have already been |
| 1373 | // generated since the 'begin'. This behaviour fits with the intention that |
| 1374 | // node estimates/requests should be made upfront. |
| 1375 | bool exceeding_node_budget(uint required = 0) { |
| 1376 | assert(C->live_nodes() < C->max_node_limit(), "sanity" ); |
| 1377 | uint available = C->max_node_limit() - C->live_nodes(); |
| 1378 | return available < required + _nodes_required; |
| 1379 | } |
| 1380 | |
| 1381 | uint require_nodes(uint require, uint minreq = REQUIRE_MIN) { |
| 1382 | precond(require > 0); |
| 1383 | _nodes_required += MAX2(require, minreq); |
| 1384 | return _nodes_required; |
| 1385 | } |
| 1386 | |
| 1387 | bool may_require_nodes(uint require, uint minreq = REQUIRE_MIN) { |
| 1388 | return !exceeding_node_budget(require) && require_nodes(require, minreq) > 0; |
| 1389 | } |
| 1390 | |
| 1391 | uint require_nodes_begin() { |
| 1392 | assert(_nodes_required == UINT_MAX, "Bad state (begin)." ); |
| 1393 | _nodes_required = 0; |
| 1394 | return C->live_nodes(); |
| 1395 | } |
| 1396 | |
| 1397 | // When a node request is final, optionally check that the requested number |
| 1398 | // of nodes was reasonably correct with respect to the number of new nodes |
| 1399 | // introduced since the last 'begin'. Always check that we have not exceeded |
| 1400 | // the maximum node limit. |
| 1401 | void require_nodes_final(uint live_at_begin, bool check_estimate) { |
| 1402 | assert(_nodes_required < UINT_MAX, "Bad state (final)." ); |
| 1403 | |
| 1404 | if (check_estimate) { |
| 1405 | // Assert that the node budget request was not off by too much (x2). |
| 1406 | // Should this be the case we _surely_ need to improve the estimates |
| 1407 | // used in our budget calculations. |
| 1408 | assert(C->live_nodes() - live_at_begin <= 2 * _nodes_required, |
| 1409 | "Bad node estimate: actual = %d >> request = %d" , |
| 1410 | C->live_nodes() - live_at_begin, _nodes_required); |
| 1411 | } |
| 1412 | // Assert that we have stayed within the node budget limit. |
| 1413 | assert(C->live_nodes() < C->max_node_limit(), |
| 1414 | "Exceeding node budget limit: %d + %d > %d (request = %d)" , |
| 1415 | C->live_nodes() - live_at_begin, live_at_begin, |
| 1416 | C->max_node_limit(), _nodes_required); |
| 1417 | |
| 1418 | _nodes_required = UINT_MAX; |
| 1419 | } |
| 1420 | |
| 1421 | bool _created_loop_node; |
| 1422 | |
| 1423 | public: |
| 1424 | void set_created_loop_node() { _created_loop_node = true; } |
| 1425 | bool created_loop_node() { return _created_loop_node; } |
| 1426 | void register_new_node( Node *n, Node *blk ); |
| 1427 | |
| 1428 | #ifdef ASSERT |
| 1429 | void dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA); |
| 1430 | #endif |
| 1431 | |
| 1432 | #ifndef PRODUCT |
| 1433 | void dump( ) const; |
| 1434 | void dump( IdealLoopTree *loop, uint rpo_idx, Node_List &rpo_list ) const; |
| 1435 | void verify() const; // Major slow :-) |
| 1436 | void verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const; |
| 1437 | IdealLoopTree *get_loop_idx(Node* n) const { |
| 1438 | // Dead nodes have no loop, so return the top level loop instead |
| 1439 | return _nodes[n->_idx] ? (IdealLoopTree*)_nodes[n->_idx] : _ltree_root; |
| 1440 | } |
| 1441 | // Print some stats |
| 1442 | static void print_statistics(); |
| 1443 | static int _loop_invokes; // Count of PhaseIdealLoop invokes |
| 1444 | static int _loop_work; // Sum of PhaseIdealLoop x _unique |
| 1445 | #endif |
| 1446 | void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const; |
| 1447 | }; |
| 1448 | |
| 1449 | |
| 1450 | class AutoNodeBudget : public StackObj |
| 1451 | { |
| 1452 | public: |
| 1453 | enum budget_check_t { BUDGET_CHECK, NO_BUDGET_CHECK }; |
| 1454 | |
| 1455 | AutoNodeBudget(PhaseIdealLoop* phase, budget_check_t chk = BUDGET_CHECK) |
| 1456 | : _phase(phase), |
| 1457 | _check_at_final(chk == BUDGET_CHECK), |
| 1458 | _nodes_at_begin(0) |
| 1459 | { |
| 1460 | precond(_phase != NULL); |
| 1461 | |
| 1462 | _nodes_at_begin = _phase->require_nodes_begin(); |
| 1463 | } |
| 1464 | |
| 1465 | ~AutoNodeBudget() { |
| 1466 | #ifndef PRODUCT |
| 1467 | if (TraceLoopOpts) { |
| 1468 | uint request = _phase->nodes_required(); |
| 1469 | uint delta = _phase->C->live_nodes() - _nodes_at_begin; |
| 1470 | |
| 1471 | if (request < delta) { |
| 1472 | tty->print_cr("Exceeding node budget: %d < %d" , request, delta); |
| 1473 | } else { |
| 1474 | uint const REQUIRE_MIN = PhaseIdealLoop::REQUIRE_MIN; |
| 1475 | // Identify the worst estimates as "poor" ones. |
| 1476 | if (request > REQUIRE_MIN && delta > 0) { |
| 1477 | if ((delta > REQUIRE_MIN && request > 3 * delta) || |
| 1478 | (delta <= REQUIRE_MIN && request > 10 * delta)) { |
| 1479 | tty->print_cr("Poor node estimate: %d >> %d" , request, delta); |
| 1480 | } |
| 1481 | } |
| 1482 | } |
| 1483 | } |
| 1484 | #endif // PRODUCT |
| 1485 | _phase->require_nodes_final(_nodes_at_begin, _check_at_final); |
| 1486 | } |
| 1487 | |
| 1488 | private: |
| 1489 | PhaseIdealLoop* _phase; |
| 1490 | bool _check_at_final; |
| 1491 | uint _nodes_at_begin; |
| 1492 | }; |
| 1493 | |
| 1494 | |
| 1495 | // This kit may be used for making of a reserved copy of a loop before this loop |
| 1496 | // goes under non-reversible changes. |
| 1497 | // |
| 1498 | // Function create_reserve() creates a reserved copy (clone) of the loop. |
| 1499 | // The reserved copy is created by calling |
| 1500 | // PhaseIdealLoop::create_reserve_version_of_loop - see there how |
| 1501 | // the original and reserved loops are connected in the outer graph. |
| 1502 | // If create_reserve succeeded, it returns 'true' and _has_reserved is set to 'true'. |
| 1503 | // |
| 1504 | // By default the reserved copy (clone) of the loop is created as dead code - it is |
| 1505 | // dominated in the outer loop by this node chain: |
| 1506 | // intcon(1)->If->IfFalse->reserved_copy. |
| 1507 | // The original loop is dominated by the the same node chain but IfTrue projection: |
| 1508 | // intcon(0)->If->IfTrue->original_loop. |
| 1509 | // |
| 1510 | // In this implementation of CountedLoopReserveKit the ctor includes create_reserve() |
| 1511 | // and the dtor, checks _use_new value. |
| 1512 | // If _use_new == false, it "switches" control to reserved copy of the loop |
| 1513 | // by simple replacing of node intcon(1) with node intcon(0). |
| 1514 | // |
| 1515 | // Here is a proposed example of usage (see also SuperWord::output in superword.cpp). |
| 1516 | // |
| 1517 | // void CountedLoopReserveKit_example() |
| 1518 | // { |
| 1519 | // CountedLoopReserveKit lrk((phase, lpt, DoReserveCopy = true); // create local object |
| 1520 | // if (DoReserveCopy && !lrk.has_reserved()) { |
| 1521 | // return; //failed to create reserved loop copy |
| 1522 | // } |
| 1523 | // ... |
| 1524 | // //something is wrong, switch to original loop |
| 1525 | /// if(something_is_wrong) return; // ~CountedLoopReserveKit makes the switch |
| 1526 | // ... |
| 1527 | // //everything worked ok, return with the newly modified loop |
| 1528 | // lrk.use_new(); |
| 1529 | // return; // ~CountedLoopReserveKit does nothing once use_new() was called |
| 1530 | // } |
| 1531 | // |
| 1532 | // Keep in mind, that by default if create_reserve() is not followed by use_new() |
| 1533 | // the dtor will "switch to the original" loop. |
| 1534 | // NOTE. You you modify outside of the original loop this class is no help. |
| 1535 | // |
| 1536 | class CountedLoopReserveKit { |
| 1537 | private: |
| 1538 | PhaseIdealLoop* _phase; |
| 1539 | IdealLoopTree* _lpt; |
| 1540 | LoopNode* _lp; |
| 1541 | IfNode* _iff; |
| 1542 | LoopNode* _lp_reserved; |
| 1543 | bool _has_reserved; |
| 1544 | bool _use_new; |
| 1545 | const bool _active; //may be set to false in ctor, then the object is dummy |
| 1546 | |
| 1547 | public: |
| 1548 | CountedLoopReserveKit(PhaseIdealLoop* phase, IdealLoopTree *loop, bool active); |
| 1549 | ~CountedLoopReserveKit(); |
| 1550 | void use_new() {_use_new = true;} |
| 1551 | void set_iff(IfNode* x) {_iff = x;} |
| 1552 | bool has_reserved() const { return _active && _has_reserved;} |
| 1553 | private: |
| 1554 | bool create_reserve(); |
| 1555 | };// class CountedLoopReserveKit |
| 1556 | |
| 1557 | inline Node* IdealLoopTree::tail() { |
| 1558 | // Handle lazy update of _tail field. |
| 1559 | if (_tail->in(0) == NULL) { |
| 1560 | _tail = _phase->get_ctrl(_tail); |
| 1561 | } |
| 1562 | return _tail; |
| 1563 | } |
| 1564 | |
| 1565 | |
| 1566 | // Iterate over the loop tree using a preorder, left-to-right traversal. |
| 1567 | // |
| 1568 | // Example that visits all counted loops from within PhaseIdealLoop |
| 1569 | // |
| 1570 | // for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { |
| 1571 | // IdealLoopTree* lpt = iter.current(); |
| 1572 | // if (!lpt->is_counted()) continue; |
| 1573 | // ... |
| 1574 | class LoopTreeIterator : public StackObj { |
| 1575 | private: |
| 1576 | IdealLoopTree* _root; |
| 1577 | IdealLoopTree* _curnt; |
| 1578 | |
| 1579 | public: |
| 1580 | LoopTreeIterator(IdealLoopTree* root) : _root(root), _curnt(root) {} |
| 1581 | |
| 1582 | bool done() { return _curnt == NULL; } // Finished iterating? |
| 1583 | |
| 1584 | void next(); // Advance to next loop tree |
| 1585 | |
| 1586 | IdealLoopTree* current() { return _curnt; } // Return current value of iterator. |
| 1587 | }; |
| 1588 | |
| 1589 | #endif // SHARE_OPTO_LOOPNODE_HPP |
| 1590 | |